diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/config.json b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..d5dbdf6fba39a3f6fd055ec3ea3a9d9c51ae3cc6 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "91adbd7d-d1f9-402a-a45a-3653c6199f86", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..19c2982705300a415282ab9e74a72be1675c4855 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6473552bdfe161be488a574e328a85bce8412158bcfaba2e89698a23e639d69 +size 342934 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..e4c52c41257f9f0bd9eafde95ac19c14ffa1f5c3 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33301382a9d36c67314781dce4acb9f16829b1537b0521f8655b47a2ad7ec212 +size 246050 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..56a069760ab8690bbc37ecec630070bf128b5900 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:34ea2279b3552cbfef63da9b631f5361b36958071f8d4eec5524c4b088b29c99 +size 100870 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..95b8b4dcf4755685b7a37728843826a50d3f12a6 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d5fcfb389d182d1434f9d372ac00c9ddde25ec4134686431f77bb6f5b4f2e43 +size 98507 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/training_log_91adbd7d-d1f9-402a-a45a-3653c6199f86.txt b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/training_log_91adbd7d-d1f9-402a-a45a-3653c6199f86.txt new file mode 100644 index 0000000000000000000000000000000000000000..072e23496335277b89536b01b854969d503738b8 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/training_log_91adbd7d-d1f9-402a-a45a-3653c6199f86.txt @@ -0,0 +1,5132 @@ +[2025-07-06 14:56:10] [Rank 0] PRINT: --- Script Start: Sun Jul 6 14:56:10 2025 --- +[2025-07-06 14:56:10] [Rank 0] PRINT: --- Script Start: Sun Jul 6 14:56:10 2025 --- +[2025-07-06 14:56:10] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-06 14:56:10] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-06 14:56:10] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 14:56:10] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 14:56:10] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-06 14:56:10] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-06 14:56:10] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42 +[2025-07-06 14:56:10] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42 +[2025-07-06 14:56:10] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 14:56:10] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 14:56:10] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 14:56:10] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 14:56:10] [Rank 0] PRINT: Constructing model... +[2025-07-06 14:56:10] [Rank 0] PRINT: Constructing model... +[2025-07-06 14:56:13] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 14:56:13] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 14:56:13] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 14:56:13] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 14:56:13] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 14:56:13] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 14:56:13] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 14:56:13] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 14:56:13] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 14:56:13] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 14:56:13] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 14:56:13] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 14:56:13] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 14:56:13] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 14:56:14] [Rank 0] PRINT: Model returns: +[2025-07-06 14:56:14] [Rank 0] PRINT: Model returns: +[2025-07-06 14:56:14] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 14:56:14] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 14:56:14] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 14:56:14] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 14:56:14] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 14:56:14] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 14:56:14] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 14:56:14] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 14:56:14] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 14:56:14] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 14:56:14] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 14:56:14] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 14:56:14] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 14:56:14] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 14:56:14] [Rank 0] PRINT: Starting warmup... +[2025-07-06 14:56:14] [Rank 0] PRINT: Starting warmup... +[2025-07-06 14:57:21] [Rank 0] PRINT: Warmup complete. +[2025-07-06 14:57:21] [Rank 0] PRINT: Warmup complete. +[2025-07-06 14:57:21] [Rank 0] PRINT: Starting training... +[2025-07-06 14:57:21] [Rank 0] PRINT: Starting training... +[2025-07-06 14:57:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 14:57:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 14:57:28] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 14:57:28] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 14:57:30] [Rank 0] step:21/10000 train_time:1752ms step_avg:83.43ms +[2025-07-06 14:57:30] [Rank 0] step:21/10000 train_time:1752ms step_avg:83.43ms +[2025-07-06 14:57:31] [Rank 0] step:41/10000 train_time:3208ms step_avg:78.25ms +[2025-07-06 14:57:31] [Rank 0] step:41/10000 train_time:3208ms step_avg:78.25ms +[2025-07-06 14:57:33] [Rank 0] step:61/10000 train_time:4664ms step_avg:76.46ms +[2025-07-06 14:57:33] [Rank 0] step:61/10000 train_time:4664ms step_avg:76.46ms +[2025-07-06 14:57:34] [Rank 0] step:81/10000 train_time:6119ms step_avg:75.54ms +[2025-07-06 14:57:34] [Rank 0] step:81/10000 train_time:6119ms step_avg:75.54ms +[2025-07-06 14:57:36] [Rank 0] step:101/10000 train_time:8237ms step_avg:81.56ms +[2025-07-06 14:57:36] [Rank 0] step:101/10000 train_time:8237ms step_avg:81.56ms +[2025-07-06 14:57:38] [Rank 0] step:121/10000 train_time:9692ms step_avg:80.10ms +[2025-07-06 14:57:38] [Rank 0] step:121/10000 train_time:9692ms step_avg:80.10ms +[2025-07-06 14:57:39] [Rank 0] step:141/10000 train_time:11149ms step_avg:79.07ms +[2025-07-06 14:57:39] [Rank 0] step:141/10000 train_time:11149ms step_avg:79.07ms +[2025-07-06 14:57:40] [Rank 0] step:161/10000 train_time:12607ms step_avg:78.30ms +[2025-07-06 14:57:40] [Rank 0] step:161/10000 train_time:12607ms step_avg:78.30ms +[2025-07-06 14:57:43] [Rank 0] step:181/10000 train_time:14729ms step_avg:81.37ms +[2025-07-06 14:57:43] [Rank 0] step:181/10000 train_time:14729ms step_avg:81.37ms +[2025-07-06 14:57:44] [Rank 0] step:201/10000 train_time:16166ms step_avg:80.43ms +[2025-07-06 14:57:44] [Rank 0] step:201/10000 train_time:16166ms step_avg:80.43ms +[2025-07-06 14:57:46] [Rank 0] step:221/10000 train_time:17625ms step_avg:79.75ms +[2025-07-06 14:57:46] [Rank 0] step:221/10000 train_time:17625ms step_avg:79.75ms +[2025-07-06 14:57:47] [Rank 0] step:241/10000 train_time:19084ms step_avg:79.19ms +[2025-07-06 14:57:47] [Rank 0] step:241/10000 train_time:19084ms step_avg:79.19ms +[2025-07-06 14:57:48] [Rank 0] step:261/10000 train_time:20542ms step_avg:78.71ms +[2025-07-06 14:57:48] [Rank 0] step:261/10000 train_time:20542ms step_avg:78.71ms +[2025-07-06 14:57:50] [Rank 0] step:281/10000 train_time:22338ms step_avg:79.50ms +[2025-07-06 14:57:50] [Rank 0] step:281/10000 train_time:22338ms step_avg:79.50ms +[2025-07-06 14:57:52] [Rank 0] step:301/10000 train_time:23797ms step_avg:79.06ms +[2025-07-06 14:57:52] [Rank 0] step:301/10000 train_time:23797ms step_avg:79.06ms +[2025-07-06 14:57:53] [Rank 0] step:321/10000 train_time:25257ms step_avg:78.68ms +[2025-07-06 14:57:53] [Rank 0] step:321/10000 train_time:25257ms step_avg:78.68ms +[2025-07-06 14:57:55] [Rank 0] step:341/10000 train_time:26883ms step_avg:78.84ms +[2025-07-06 14:57:55] [Rank 0] step:341/10000 train_time:26883ms step_avg:78.84ms +[2025-07-06 14:57:57] [Rank 0] step:361/10000 train_time:28476ms step_avg:78.88ms +[2025-07-06 14:57:57] [Rank 0] step:361/10000 train_time:28476ms step_avg:78.88ms +[2025-07-06 14:57:58] [Rank 0] step:381/10000 train_time:30176ms step_avg:79.20ms +[2025-07-06 14:57:58] [Rank 0] step:381/10000 train_time:30176ms step_avg:79.20ms +[2025-07-06 14:58:00] [Rank 0] step:401/10000 train_time:31637ms step_avg:78.89ms +[2025-07-06 14:58:00] [Rank 0] step:401/10000 train_time:31637ms step_avg:78.89ms +[2025-07-06 14:58:01] [Rank 0] step:421/10000 train_time:33094ms step_avg:78.61ms +[2025-07-06 14:58:01] [Rank 0] step:421/10000 train_time:33094ms step_avg:78.61ms +[2025-07-06 14:58:02] [Rank 0] step:441/10000 train_time:34553ms step_avg:78.35ms +[2025-07-06 14:58:02] [Rank 0] step:441/10000 train_time:34553ms step_avg:78.35ms +[2025-07-06 14:58:05] [Rank 0] step:461/10000 train_time:36683ms step_avg:79.57ms +[2025-07-06 14:58:05] [Rank 0] step:461/10000 train_time:36683ms step_avg:79.57ms +[2025-07-06 14:58:06] [Rank 0] step:481/10000 train_time:38145ms step_avg:79.30ms +[2025-07-06 14:58:06] [Rank 0] step:481/10000 train_time:38145ms step_avg:79.30ms +[2025-07-06 14:58:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 14:58:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 14:58:08] [Rank 0] PRINT: step:500/10000 train_loss:9.6493 val_loss:8.5918 train_time:39604ms step_avg:79.21ms +[2025-07-06 14:58:08] [Rank 0] PRINT: step:500/10000 train_loss:9.6493 val_loss:8.5918 train_time:39604ms step_avg:79.21ms +[2025-07-06 14:58:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 14:58:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 14:58:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 14:58:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 14:58:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 14:58:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 15:03:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 15:03:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 15:03:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 15:03:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 15:03:29] [Rank 0] Total Loss: 8.9099 +[2025-07-06 15:03:29] [Rank 0] Total Loss: 8.9099 +[2025-07-06 15:03:29] [Rank 0] Total FTA: 0.0004 +[2025-07-06 15:03:29] [Rank 0] Total FTA: 0.0004 +[2025-07-06 15:03:29] [Rank 0] Group 0 Loss: 8.9283 +[2025-07-06 15:03:29] [Rank 0] Group 0 Loss: 8.9283 +[2025-07-06 15:03:29] [Rank 0] Group 1 Loss: 8.9079 +[2025-07-06 15:03:29] [Rank 0] Group 1 Loss: 8.9079 +[2025-07-06 15:03:29] [Rank 0] Group 2 Loss: 8.9452 +[2025-07-06 15:03:29] [Rank 0] Group 2 Loss: 8.9452 +[2025-07-06 15:03:29] [Rank 0] Group 3 Loss: 8.8900 +[2025-07-06 15:03:29] [Rank 0] Group 3 Loss: 8.8900 +[2025-07-06 15:03:29] [Rank 0] Group 4 Loss: 8.9082 +[2025-07-06 15:03:29] [Rank 0] Group 4 Loss: 8.9082 +[2025-07-06 15:03:29] [Rank 0] Group 5 Loss: 8.8918 +[2025-07-06 15:03:29] [Rank 0] Group 5 Loss: 8.8918 +[2025-07-06 15:03:29] [Rank 0] Group 6 Loss: 8.9074 +[2025-07-06 15:03:29] [Rank 0] Group 6 Loss: 8.9074 +[2025-07-06 15:03:29] [Rank 0] Group 7 Loss: 8.9080 +[2025-07-06 15:03:29] [Rank 0] Group 7 Loss: 8.9080 +[2025-07-06 15:03:30] [Rank 0] Group 8 Loss: 8.9023 +[2025-07-06 15:03:30] [Rank 0] Group 8 Loss: 8.9023 +[2025-07-06 15:03:30] [Rank 0] Group 9 Loss: 8.8946 +[2025-07-06 15:03:30] [Rank 0] Group 9 Loss: 8.8946 +[2025-07-06 15:03:30] [Rank 0] Group 10 Loss: 8.9021 +[2025-07-06 15:03:30] [Rank 0] Group 10 Loss: 8.9021 +[2025-07-06 15:03:30] [Rank 0] Group 11 Loss: 8.9108 +[2025-07-06 15:03:30] [Rank 0] Group 11 Loss: 8.9108 +[2025-07-06 15:03:30] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-06 15:03:30] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-06 15:03:30] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 15:03:30] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 15:03:30] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-06 15:03:30] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-06 15:03:30] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-06 15:03:30] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-06 15:03:30] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-06 15:03:30] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-06 15:03:30] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-06 15:03:30] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-06 15:03:30] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-06 15:03:30] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-06 15:03:30] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-06 15:03:30] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-06 15:03:30] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-06 15:03:30] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-06 15:03:30] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-06 15:03:30] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-06 15:03:30] [Rank 0] Group 10 FTA: 0.0020 +[2025-07-06 15:03:30] [Rank 0] Group 10 FTA: 0.0020 +[2025-07-06 15:03:30] [Rank 0] Group 11 FTA: 0.0010 +[2025-07-06 15:03:30] [Rank 0] Group 11 FTA: 0.0010 +[2025-07-06 15:03:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 15:03:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 15:03:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 15:03:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 15:03:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 15:03:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 15:03:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 15:03:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 15:03:31] [Rank 0] step:501/10000 train_time:39625ms step_avg:79.09ms +[2025-07-06 15:03:31] [Rank 0] step:501/10000 train_time:39625ms step_avg:79.09ms +[2025-07-06 15:03:32] [Rank 0] step:521/10000 train_time:41104ms step_avg:78.89ms +[2025-07-06 15:03:32] [Rank 0] step:521/10000 train_time:41104ms step_avg:78.89ms +[2025-07-06 15:03:35] [Rank 0] step:541/10000 train_time:42615ms step_avg:78.77ms +[2025-07-06 15:03:35] [Rank 0] step:541/10000 train_time:42615ms step_avg:78.77ms +[2025-07-06 15:03:36] [Rank 0] step:561/10000 train_time:44666ms step_avg:79.62ms +[2025-07-06 15:03:36] [Rank 0] step:561/10000 train_time:44666ms step_avg:79.62ms +[2025-07-06 15:03:37] [Rank 0] step:581/10000 train_time:46121ms step_avg:79.38ms +[2025-07-06 15:03:37] [Rank 0] step:581/10000 train_time:46121ms step_avg:79.38ms +[2025-07-06 15:03:39] [Rank 0] step:601/10000 train_time:47578ms step_avg:79.17ms +[2025-07-06 15:03:39] [Rank 0] step:601/10000 train_time:47578ms step_avg:79.17ms +[2025-07-06 15:03:40] [Rank 0] step:621/10000 train_time:49037ms step_avg:78.96ms +[2025-07-06 15:03:40] [Rank 0] step:621/10000 train_time:49037ms step_avg:78.96ms +[2025-07-06 15:03:42] [Rank 0] step:641/10000 train_time:51136ms step_avg:79.77ms +[2025-07-06 15:03:42] [Rank 0] step:641/10000 train_time:51136ms step_avg:79.77ms +[2025-07-06 15:03:44] [Rank 0] step:661/10000 train_time:52595ms step_avg:79.57ms +[2025-07-06 15:03:44] [Rank 0] step:661/10000 train_time:52595ms step_avg:79.57ms +[2025-07-06 15:03:45] [Rank 0] step:681/10000 train_time:54051ms step_avg:79.37ms +[2025-07-06 15:03:45] [Rank 0] step:681/10000 train_time:54051ms step_avg:79.37ms +[2025-07-06 15:03:47] [Rank 0] step:701/10000 train_time:55509ms step_avg:79.18ms +[2025-07-06 15:03:47] [Rank 0] step:701/10000 train_time:55509ms step_avg:79.18ms +[2025-07-06 15:03:49] [Rank 0] step:721/10000 train_time:57630ms step_avg:79.93ms +[2025-07-06 15:03:49] [Rank 0] step:721/10000 train_time:57630ms step_avg:79.93ms +[2025-07-06 15:03:50] [Rank 0] step:741/10000 train_time:59071ms step_avg:79.72ms +[2025-07-06 15:03:50] [Rank 0] step:741/10000 train_time:59071ms step_avg:79.72ms +[2025-07-06 15:03:52] [Rank 0] step:761/10000 train_time:60537ms step_avg:79.55ms +[2025-07-06 15:03:52] [Rank 0] step:761/10000 train_time:60537ms step_avg:79.55ms +[2025-07-06 15:03:53] [Rank 0] step:781/10000 train_time:62003ms step_avg:79.39ms +[2025-07-06 15:03:53] [Rank 0] step:781/10000 train_time:62003ms step_avg:79.39ms +[2025-07-06 15:03:55] [Rank 0] step:801/10000 train_time:63474ms step_avg:79.24ms +[2025-07-06 15:03:55] [Rank 0] step:801/10000 train_time:63474ms step_avg:79.24ms +[2025-07-06 15:03:57] [Rank 0] step:821/10000 train_time:65607ms step_avg:79.91ms +[2025-07-06 15:03:57] [Rank 0] step:821/10000 train_time:65607ms step_avg:79.91ms +[2025-07-06 15:03:58] [Rank 0] step:841/10000 train_time:67076ms step_avg:79.76ms +[2025-07-06 15:03:58] [Rank 0] step:841/10000 train_time:67076ms step_avg:79.76ms +[2025-07-06 15:04:00] [Rank 0] step:861/10000 train_time:68548ms step_avg:79.61ms +[2025-07-06 15:04:00] [Rank 0] step:861/10000 train_time:68548ms step_avg:79.61ms +[2025-07-06 15:04:01] [Rank 0] step:881/10000 train_time:70020ms step_avg:79.48ms +[2025-07-06 15:04:01] [Rank 0] step:881/10000 train_time:70020ms step_avg:79.48ms +[2025-07-06 15:04:03] [Rank 0] step:901/10000 train_time:71543ms step_avg:79.40ms +[2025-07-06 15:04:03] [Rank 0] step:901/10000 train_time:71543ms step_avg:79.40ms +[2025-07-06 15:04:05] [Rank 0] step:921/10000 train_time:73609ms step_avg:79.92ms +[2025-07-06 15:04:05] [Rank 0] step:921/10000 train_time:73609ms step_avg:79.92ms +[2025-07-06 15:04:06] [Rank 0] step:941/10000 train_time:75080ms step_avg:79.79ms +[2025-07-06 15:04:06] [Rank 0] step:941/10000 train_time:75080ms step_avg:79.79ms +[2025-07-06 15:04:08] [Rank 0] step:961/10000 train_time:76552ms step_avg:79.66ms +[2025-07-06 15:04:08] [Rank 0] step:961/10000 train_time:76552ms step_avg:79.66ms +[2025-07-06 15:04:09] [Rank 0] step:981/10000 train_time:78020ms step_avg:79.53ms +[2025-07-06 15:04:09] [Rank 0] step:981/10000 train_time:78020ms step_avg:79.53ms +[2025-07-06 15:04:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 15:04:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 15:04:12] [Rank 0] PRINT: step:1000/10000 train_loss:7.7949 val_loss:7.0963 train_time:80133ms step_avg:80.13ms +[2025-07-06 15:04:12] [Rank 0] PRINT: step:1000/10000 train_loss:7.7949 val_loss:7.0963 train_time:80133ms step_avg:80.13ms +[2025-07-06 15:04:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 15:04:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 15:04:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 15:04:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 15:04:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 15:04:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 15:09:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 15:09:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 15:09:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 15:09:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 15:09:33] [Rank 0] Total Loss: 7.6677 +[2025-07-06 15:09:33] [Rank 0] Total Loss: 7.6677 +[2025-07-06 15:09:33] [Rank 0] Total FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Total FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 0 Loss: 7.6758 +[2025-07-06 15:09:33] [Rank 0] Group 0 Loss: 7.6758 +[2025-07-06 15:09:33] [Rank 0] Group 1 Loss: 7.6342 +[2025-07-06 15:09:33] [Rank 0] Group 1 Loss: 7.6342 +[2025-07-06 15:09:33] [Rank 0] Group 2 Loss: 7.7793 +[2025-07-06 15:09:33] [Rank 0] Group 2 Loss: 7.7793 +[2025-07-06 15:09:33] [Rank 0] Group 3 Loss: 7.6291 +[2025-07-06 15:09:33] [Rank 0] Group 3 Loss: 7.6291 +[2025-07-06 15:09:33] [Rank 0] Group 4 Loss: 7.6880 +[2025-07-06 15:09:33] [Rank 0] Group 4 Loss: 7.6880 +[2025-07-06 15:09:33] [Rank 0] Group 5 Loss: 7.6310 +[2025-07-06 15:09:33] [Rank 0] Group 5 Loss: 7.6310 +[2025-07-06 15:09:33] [Rank 0] Group 6 Loss: 7.6789 +[2025-07-06 15:09:33] [Rank 0] Group 6 Loss: 7.6789 +[2025-07-06 15:09:33] [Rank 0] Group 7 Loss: 7.6568 +[2025-07-06 15:09:33] [Rank 0] Group 7 Loss: 7.6568 +[2025-07-06 15:09:33] [Rank 0] Group 8 Loss: 7.6421 +[2025-07-06 15:09:33] [Rank 0] Group 8 Loss: 7.6421 +[2025-07-06 15:09:33] [Rank 0] Group 9 Loss: 7.6538 +[2025-07-06 15:09:33] [Rank 0] Group 9 Loss: 7.6538 +[2025-07-06 15:09:33] [Rank 0] Group 10 Loss: 7.6639 +[2025-07-06 15:09:33] [Rank 0] Group 10 Loss: 7.6639 +[2025-07-06 15:09:33] [Rank 0] Group 11 Loss: 7.6678 +[2025-07-06 15:09:33] [Rank 0] Group 11 Loss: 7.6678 +[2025-07-06 15:09:33] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-06 15:09:33] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-06 15:09:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 15:09:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 15:09:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 15:09:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 15:09:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 15:09:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 15:09:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 15:09:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 15:09:35] [Rank 0] step:1001/10000 train_time:80154ms step_avg:80.07ms +[2025-07-06 15:09:35] [Rank 0] step:1001/10000 train_time:80154ms step_avg:80.07ms +[2025-07-06 15:09:36] [Rank 0] step:1021/10000 train_time:81632ms step_avg:79.95ms +[2025-07-06 15:09:36] [Rank 0] step:1021/10000 train_time:81632ms step_avg:79.95ms +[2025-07-06 15:09:38] [Rank 0] step:1041/10000 train_time:83098ms step_avg:79.82ms +[2025-07-06 15:09:38] [Rank 0] step:1041/10000 train_time:83098ms step_avg:79.82ms +[2025-07-06 15:09:39] [Rank 0] step:1061/10000 train_time:84560ms step_avg:79.70ms +[2025-07-06 15:09:39] [Rank 0] step:1061/10000 train_time:84560ms step_avg:79.70ms +[2025-07-06 15:09:41] [Rank 0] step:1081/10000 train_time:86083ms step_avg:79.63ms +[2025-07-06 15:09:41] [Rank 0] step:1081/10000 train_time:86083ms step_avg:79.63ms +[2025-07-06 15:09:43] [Rank 0] step:1101/10000 train_time:88153ms step_avg:80.07ms +[2025-07-06 15:09:43] [Rank 0] step:1101/10000 train_time:88153ms step_avg:80.07ms +[2025-07-06 15:09:44] [Rank 0] step:1121/10000 train_time:89621ms step_avg:79.95ms +[2025-07-06 15:09:44] [Rank 0] step:1121/10000 train_time:89621ms step_avg:79.95ms +[2025-07-06 15:09:46] [Rank 0] step:1141/10000 train_time:91088ms step_avg:79.83ms +[2025-07-06 15:09:46] [Rank 0] step:1141/10000 train_time:91088ms step_avg:79.83ms +[2025-07-06 15:09:47] [Rank 0] step:1161/10000 train_time:92557ms step_avg:79.72ms +[2025-07-06 15:09:47] [Rank 0] step:1161/10000 train_time:92557ms step_avg:79.72ms +[2025-07-06 15:09:49] [Rank 0] step:1181/10000 train_time:94263ms step_avg:79.82ms +[2025-07-06 15:09:49] [Rank 0] step:1181/10000 train_time:94263ms step_avg:79.82ms +[2025-07-06 15:09:50] [Rank 0] step:1201/10000 train_time:95733ms step_avg:79.71ms +[2025-07-06 15:09:50] [Rank 0] step:1201/10000 train_time:95733ms step_avg:79.71ms +[2025-07-06 15:09:52] [Rank 0] step:1221/10000 train_time:97204ms step_avg:79.61ms +[2025-07-06 15:09:52] [Rank 0] step:1221/10000 train_time:97204ms step_avg:79.61ms +[2025-07-06 15:09:53] [Rank 0] step:1241/10000 train_time:98673ms step_avg:79.51ms +[2025-07-06 15:09:53] [Rank 0] step:1241/10000 train_time:98673ms step_avg:79.51ms +[2025-07-06 15:09:56] [Rank 0] step:1261/10000 train_time:100403ms step_avg:79.62ms +[2025-07-06 15:09:56] [Rank 0] step:1261/10000 train_time:100403ms step_avg:79.62ms +[2025-07-06 15:09:57] [Rank 0] step:1281/10000 train_time:102275ms step_avg:79.84ms +[2025-07-06 15:09:57] [Rank 0] step:1281/10000 train_time:102275ms step_avg:79.84ms +[2025-07-06 15:09:58] [Rank 0] step:1301/10000 train_time:103746ms step_avg:79.74ms +[2025-07-06 15:09:58] [Rank 0] step:1301/10000 train_time:103746ms step_avg:79.74ms +[2025-07-06 15:10:00] [Rank 0] step:1321/10000 train_time:105218ms step_avg:79.65ms +[2025-07-06 15:10:00] [Rank 0] step:1321/10000 train_time:105218ms step_avg:79.65ms +[2025-07-06 15:10:01] [Rank 0] step:1341/10000 train_time:106694ms step_avg:79.56ms +[2025-07-06 15:10:01] [Rank 0] step:1341/10000 train_time:106694ms step_avg:79.56ms +[2025-07-06 15:10:04] [Rank 0] step:1361/10000 train_time:108829ms step_avg:79.96ms +[2025-07-06 15:10:04] [Rank 0] step:1361/10000 train_time:108829ms step_avg:79.96ms +[2025-07-06 15:10:05] [Rank 0] step:1381/10000 train_time:110307ms step_avg:79.87ms +[2025-07-06 15:10:05] [Rank 0] step:1381/10000 train_time:110307ms step_avg:79.87ms +[2025-07-06 15:10:07] [Rank 0] step:1401/10000 train_time:111781ms step_avg:79.79ms +[2025-07-06 15:10:07] [Rank 0] step:1401/10000 train_time:111781ms step_avg:79.79ms +[2025-07-06 15:10:08] [Rank 0] step:1421/10000 train_time:113257ms step_avg:79.70ms +[2025-07-06 15:10:08] [Rank 0] step:1421/10000 train_time:113257ms step_avg:79.70ms +[2025-07-06 15:10:10] [Rank 0] step:1441/10000 train_time:114886ms step_avg:79.73ms +[2025-07-06 15:10:10] [Rank 0] step:1441/10000 train_time:114886ms step_avg:79.73ms +[2025-07-06 15:10:11] [Rank 0] step:1461/10000 train_time:116343ms step_avg:79.63ms +[2025-07-06 15:10:11] [Rank 0] step:1461/10000 train_time:116343ms step_avg:79.63ms +[2025-07-06 15:10:13] [Rank 0] step:1481/10000 train_time:117820ms step_avg:79.55ms +[2025-07-06 15:10:13] [Rank 0] step:1481/10000 train_time:117820ms step_avg:79.55ms +[2025-07-06 15:10:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 15:10:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 15:10:15] [Rank 0] PRINT: step:1500/10000 train_loss:6.5295 val_loss:6.0083 train_time:119295ms step_avg:79.53ms +[2025-07-06 15:10:15] [Rank 0] PRINT: step:1500/10000 train_loss:6.5295 val_loss:6.0083 train_time:119295ms step_avg:79.53ms +[2025-07-06 15:10:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 15:10:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 15:10:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 15:10:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 15:10:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 15:10:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 15:15:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 15:15:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 15:15:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 15:15:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 15:15:36] [Rank 0] Total Loss: 6.7942 +[2025-07-06 15:15:36] [Rank 0] Total Loss: 6.7942 +[2025-07-06 15:15:36] [Rank 0] Total FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Total FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 0 Loss: 6.7604 +[2025-07-06 15:15:36] [Rank 0] Group 0 Loss: 6.7604 +[2025-07-06 15:15:36] [Rank 0] Group 1 Loss: 6.7797 +[2025-07-06 15:15:36] [Rank 0] Group 1 Loss: 6.7797 +[2025-07-06 15:15:36] [Rank 0] Group 2 Loss: 6.8973 +[2025-07-06 15:15:36] [Rank 0] Group 2 Loss: 6.8973 +[2025-07-06 15:15:36] [Rank 0] Group 3 Loss: 6.7500 +[2025-07-06 15:15:36] [Rank 0] Group 3 Loss: 6.7500 +[2025-07-06 15:15:36] [Rank 0] Group 4 Loss: 6.8479 +[2025-07-06 15:15:36] [Rank 0] Group 4 Loss: 6.8479 +[2025-07-06 15:15:36] [Rank 0] Group 5 Loss: 6.7603 +[2025-07-06 15:15:36] [Rank 0] Group 5 Loss: 6.7603 +[2025-07-06 15:15:36] [Rank 0] Group 6 Loss: 6.8022 +[2025-07-06 15:15:36] [Rank 0] Group 6 Loss: 6.8022 +[2025-07-06 15:15:36] [Rank 0] Group 7 Loss: 6.8086 +[2025-07-06 15:15:36] [Rank 0] Group 7 Loss: 6.8086 +[2025-07-06 15:15:36] [Rank 0] Group 8 Loss: 6.7692 +[2025-07-06 15:15:36] [Rank 0] Group 8 Loss: 6.7692 +[2025-07-06 15:15:36] [Rank 0] Group 9 Loss: 6.8069 +[2025-07-06 15:15:36] [Rank 0] Group 9 Loss: 6.8069 +[2025-07-06 15:15:36] [Rank 0] Group 10 Loss: 6.8021 +[2025-07-06 15:15:36] [Rank 0] Group 10 Loss: 6.8021 +[2025-07-06 15:15:36] [Rank 0] Group 11 Loss: 6.7894 +[2025-07-06 15:15:36] [Rank 0] Group 11 Loss: 6.7894 +[2025-07-06 15:15:36] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-06 15:15:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 15:15:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 15:15:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 15:15:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 15:15:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 15:15:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 15:15:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 15:15:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 15:15:37] [Rank 0] step:1501/10000 train_time:119317ms step_avg:79.49ms +[2025-07-06 15:15:37] [Rank 0] step:1501/10000 train_time:119317ms step_avg:79.49ms +[2025-07-06 15:15:39] [Rank 0] step:1521/10000 train_time:120786ms step_avg:79.41ms +[2025-07-06 15:15:39] [Rank 0] step:1521/10000 train_time:120786ms step_avg:79.41ms +[2025-07-06 15:15:41] [Rank 0] step:1541/10000 train_time:122904ms step_avg:79.76ms +[2025-07-06 15:15:41] [Rank 0] step:1541/10000 train_time:122904ms step_avg:79.76ms +[2025-07-06 15:15:42] [Rank 0] step:1561/10000 train_time:124374ms step_avg:79.68ms +[2025-07-06 15:15:42] [Rank 0] step:1561/10000 train_time:124374ms step_avg:79.68ms +[2025-07-06 15:15:44] [Rank 0] step:1581/10000 train_time:125844ms step_avg:79.60ms +[2025-07-06 15:15:44] [Rank 0] step:1581/10000 train_time:125844ms step_avg:79.60ms +[2025-07-06 15:15:45] [Rank 0] step:1601/10000 train_time:127315ms step_avg:79.52ms +[2025-07-06 15:15:45] [Rank 0] step:1601/10000 train_time:127315ms step_avg:79.52ms +[2025-07-06 15:15:47] [Rank 0] step:1621/10000 train_time:128834ms step_avg:79.48ms +[2025-07-06 15:15:47] [Rank 0] step:1621/10000 train_time:128834ms step_avg:79.48ms +[2025-07-06 15:15:49] [Rank 0] step:1641/10000 train_time:130491ms step_avg:79.52ms +[2025-07-06 15:15:49] [Rank 0] step:1641/10000 train_time:130491ms step_avg:79.52ms +[2025-07-06 15:15:50] [Rank 0] step:1661/10000 train_time:131963ms step_avg:79.45ms +[2025-07-06 15:15:50] [Rank 0] step:1661/10000 train_time:131963ms step_avg:79.45ms +[2025-07-06 15:15:52] [Rank 0] step:1681/10000 train_time:133724ms step_avg:79.55ms +[2025-07-06 15:15:52] [Rank 0] step:1681/10000 train_time:133724ms step_avg:79.55ms +[2025-07-06 15:15:53] [Rank 0] step:1701/10000 train_time:135195ms step_avg:79.48ms +[2025-07-06 15:15:53] [Rank 0] step:1701/10000 train_time:135195ms step_avg:79.48ms +[2025-07-06 15:15:55] [Rank 0] step:1721/10000 train_time:137340ms step_avg:79.80ms +[2025-07-06 15:15:55] [Rank 0] step:1721/10000 train_time:137340ms step_avg:79.80ms +[2025-07-06 15:15:57] [Rank 0] step:1741/10000 train_time:138813ms step_avg:79.73ms +[2025-07-06 15:15:57] [Rank 0] step:1741/10000 train_time:138813ms step_avg:79.73ms +[2025-07-06 15:15:58] [Rank 0] step:1761/10000 train_time:140287ms step_avg:79.66ms +[2025-07-06 15:15:58] [Rank 0] step:1761/10000 train_time:140287ms step_avg:79.66ms +[2025-07-06 15:16:00] [Rank 0] step:1781/10000 train_time:141762ms step_avg:79.60ms +[2025-07-06 15:16:00] [Rank 0] step:1781/10000 train_time:141762ms step_avg:79.60ms +[2025-07-06 15:16:02] [Rank 0] step:1801/10000 train_time:143596ms step_avg:79.73ms +[2025-07-06 15:16:02] [Rank 0] step:1801/10000 train_time:143596ms step_avg:79.73ms +[2025-07-06 15:16:03] [Rank 0] step:1821/10000 train_time:145053ms step_avg:79.66ms +[2025-07-06 15:16:03] [Rank 0] step:1821/10000 train_time:145053ms step_avg:79.66ms +[2025-07-06 15:16:05] [Rank 0] step:1841/10000 train_time:146531ms step_avg:79.59ms +[2025-07-06 15:16:05] [Rank 0] step:1841/10000 train_time:146531ms step_avg:79.59ms +[2025-07-06 15:16:06] [Rank 0] step:1861/10000 train_time:148012ms step_avg:79.53ms +[2025-07-06 15:16:06] [Rank 0] step:1861/10000 train_time:148012ms step_avg:79.53ms +[2025-07-06 15:16:08] [Rank 0] step:1881/10000 train_time:149491ms step_avg:79.47ms +[2025-07-06 15:16:08] [Rank 0] step:1881/10000 train_time:149491ms step_avg:79.47ms +[2025-07-06 15:16:09] [Rank 0] step:1901/10000 train_time:151205ms step_avg:79.54ms +[2025-07-06 15:16:09] [Rank 0] step:1901/10000 train_time:151205ms step_avg:79.54ms +[2025-07-06 15:16:11] [Rank 0] step:1921/10000 train_time:152682ms step_avg:79.48ms +[2025-07-06 15:16:11] [Rank 0] step:1921/10000 train_time:152682ms step_avg:79.48ms +[2025-07-06 15:16:12] [Rank 0] step:1941/10000 train_time:154161ms step_avg:79.42ms +[2025-07-06 15:16:12] [Rank 0] step:1941/10000 train_time:154161ms step_avg:79.42ms +[2025-07-06 15:16:14] [Rank 0] step:1961/10000 train_time:155642ms step_avg:79.37ms +[2025-07-06 15:16:14] [Rank 0] step:1961/10000 train_time:155642ms step_avg:79.37ms +[2025-07-06 15:16:15] [Rank 0] step:1981/10000 train_time:157376ms step_avg:79.44ms +[2025-07-06 15:16:15] [Rank 0] step:1981/10000 train_time:157376ms step_avg:79.44ms +[2025-07-06 15:16:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 15:16:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 15:16:18] [Rank 0] PRINT: step:2000/10000 train_loss:5.5536 val_loss:5.1370 train_time:158834ms step_avg:79.42ms +[2025-07-06 15:16:18] [Rank 0] PRINT: step:2000/10000 train_loss:5.5536 val_loss:5.1370 train_time:158834ms step_avg:79.42ms +[2025-07-06 15:16:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 15:16:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 15:16:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 15:16:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 15:16:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 15:16:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 15:21:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 15:21:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 15:21:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 15:21:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 15:21:41] [Rank 0] Total Loss: 6.1524 +[2025-07-06 15:21:41] [Rank 0] Total Loss: 6.1524 +[2025-07-06 15:21:41] [Rank 0] Total FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Total FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 0 Loss: 6.1020 +[2025-07-06 15:21:41] [Rank 0] Group 0 Loss: 6.1020 +[2025-07-06 15:21:41] [Rank 0] Group 1 Loss: 6.2181 +[2025-07-06 15:21:41] [Rank 0] Group 1 Loss: 6.2181 +[2025-07-06 15:21:41] [Rank 0] Group 2 Loss: 6.2311 +[2025-07-06 15:21:41] [Rank 0] Group 2 Loss: 6.2311 +[2025-07-06 15:21:41] [Rank 0] Group 3 Loss: 6.1138 +[2025-07-06 15:21:41] [Rank 0] Group 3 Loss: 6.1138 +[2025-07-06 15:21:41] [Rank 0] Group 4 Loss: 6.1855 +[2025-07-06 15:21:41] [Rank 0] Group 4 Loss: 6.1855 +[2025-07-06 15:21:41] [Rank 0] Group 5 Loss: 6.1093 +[2025-07-06 15:21:41] [Rank 0] Group 5 Loss: 6.1093 +[2025-07-06 15:21:41] [Rank 0] Group 6 Loss: 6.1745 +[2025-07-06 15:21:41] [Rank 0] Group 6 Loss: 6.1745 +[2025-07-06 15:21:41] [Rank 0] Group 7 Loss: 6.1818 +[2025-07-06 15:21:41] [Rank 0] Group 7 Loss: 6.1818 +[2025-07-06 15:21:41] [Rank 0] Group 8 Loss: 6.1309 +[2025-07-06 15:21:41] [Rank 0] Group 8 Loss: 6.1309 +[2025-07-06 15:21:41] [Rank 0] Group 9 Loss: 6.1481 +[2025-07-06 15:21:41] [Rank 0] Group 9 Loss: 6.1481 +[2025-07-06 15:21:41] [Rank 0] Group 10 Loss: 6.1532 +[2025-07-06 15:21:41] [Rank 0] Group 10 Loss: 6.1532 +[2025-07-06 15:21:41] [Rank 0] Group 11 Loss: 6.1436 +[2025-07-06 15:21:41] [Rank 0] Group 11 Loss: 6.1436 +[2025-07-06 15:21:41] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-06 15:21:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 15:21:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 15:21:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 15:21:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 15:21:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 15:21:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 15:21:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 15:21:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 15:21:42] [Rank 0] step:2001/10000 train_time:158858ms step_avg:79.39ms +[2025-07-06 15:21:42] [Rank 0] step:2001/10000 train_time:158858ms step_avg:79.39ms +[2025-07-06 15:21:44] [Rank 0] step:2021/10000 train_time:160341ms step_avg:79.34ms +[2025-07-06 15:21:44] [Rank 0] step:2021/10000 train_time:160341ms step_avg:79.34ms +[2025-07-06 15:21:45] [Rank 0] step:2041/10000 train_time:161812ms step_avg:79.28ms +[2025-07-06 15:21:45] [Rank 0] step:2041/10000 train_time:161812ms step_avg:79.28ms +[2025-07-06 15:21:47] [Rank 0] step:2061/10000 train_time:163286ms step_avg:79.23ms +[2025-07-06 15:21:47] [Rank 0] step:2061/10000 train_time:163286ms step_avg:79.23ms +[2025-07-06 15:21:49] [Rank 0] step:2081/10000 train_time:165418ms step_avg:79.49ms +[2025-07-06 15:21:49] [Rank 0] step:2081/10000 train_time:165418ms step_avg:79.49ms +[2025-07-06 15:21:50] [Rank 0] step:2101/10000 train_time:166889ms step_avg:79.43ms +[2025-07-06 15:21:50] [Rank 0] step:2101/10000 train_time:166889ms step_avg:79.43ms +[2025-07-06 15:21:52] [Rank 0] step:2121/10000 train_time:168362ms step_avg:79.38ms +[2025-07-06 15:21:52] [Rank 0] step:2121/10000 train_time:168362ms step_avg:79.38ms +[2025-07-06 15:21:53] [Rank 0] step:2141/10000 train_time:169836ms step_avg:79.33ms +[2025-07-06 15:21:53] [Rank 0] step:2141/10000 train_time:169836ms step_avg:79.33ms +[2025-07-06 15:21:55] [Rank 0] step:2161/10000 train_time:171361ms step_avg:79.30ms +[2025-07-06 15:21:55] [Rank 0] step:2161/10000 train_time:171361ms step_avg:79.30ms +[2025-07-06 15:21:57] [Rank 0] step:2181/10000 train_time:173019ms step_avg:79.33ms +[2025-07-06 15:21:57] [Rank 0] step:2181/10000 train_time:173019ms step_avg:79.33ms +[2025-07-06 15:21:58] [Rank 0] step:2201/10000 train_time:174494ms step_avg:79.28ms +[2025-07-06 15:21:58] [Rank 0] step:2201/10000 train_time:174494ms step_avg:79.28ms +[2025-07-06 15:22:00] [Rank 0] step:2221/10000 train_time:175970ms step_avg:79.23ms +[2025-07-06 15:22:00] [Rank 0] step:2221/10000 train_time:175970ms step_avg:79.23ms +[2025-07-06 15:22:01] [Rank 0] step:2241/10000 train_time:177465ms step_avg:79.19ms +[2025-07-06 15:22:01] [Rank 0] step:2241/10000 train_time:177465ms step_avg:79.19ms +[2025-07-06 15:22:03] [Rank 0] step:2261/10000 train_time:179607ms step_avg:79.44ms +[2025-07-06 15:22:03] [Rank 0] step:2261/10000 train_time:179607ms step_avg:79.44ms +[2025-07-06 15:22:05] [Rank 0] step:2281/10000 train_time:181107ms step_avg:79.40ms +[2025-07-06 15:22:05] [Rank 0] step:2281/10000 train_time:181107ms step_avg:79.40ms +[2025-07-06 15:22:06] [Rank 0] step:2301/10000 train_time:182608ms step_avg:79.36ms +[2025-07-06 15:22:06] [Rank 0] step:2301/10000 train_time:182608ms step_avg:79.36ms +[2025-07-06 15:22:08] [Rank 0] step:2321/10000 train_time:184112ms step_avg:79.32ms +[2025-07-06 15:22:08] [Rank 0] step:2321/10000 train_time:184112ms step_avg:79.32ms +[2025-07-06 15:22:09] [Rank 0] step:2341/10000 train_time:185873ms step_avg:79.40ms +[2025-07-06 15:22:09] [Rank 0] step:2341/10000 train_time:185873ms step_avg:79.40ms +[2025-07-06 15:22:11] [Rank 0] step:2361/10000 train_time:187601ms step_avg:79.46ms +[2025-07-06 15:22:11] [Rank 0] step:2361/10000 train_time:187601ms step_avg:79.46ms +[2025-07-06 15:22:13] [Rank 0] step:2381/10000 train_time:189104ms step_avg:79.42ms +[2025-07-06 15:22:13] [Rank 0] step:2381/10000 train_time:189104ms step_avg:79.42ms +[2025-07-06 15:22:14] [Rank 0] step:2401/10000 train_time:190608ms step_avg:79.39ms +[2025-07-06 15:22:14] [Rank 0] step:2401/10000 train_time:190608ms step_avg:79.39ms +[2025-07-06 15:22:16] [Rank 0] step:2421/10000 train_time:192114ms step_avg:79.35ms +[2025-07-06 15:22:16] [Rank 0] step:2421/10000 train_time:192114ms step_avg:79.35ms +[2025-07-06 15:22:18] [Rank 0] step:2441/10000 train_time:194265ms step_avg:79.58ms +[2025-07-06 15:22:18] [Rank 0] step:2441/10000 train_time:194265ms step_avg:79.58ms +[2025-07-06 15:22:19] [Rank 0] step:2461/10000 train_time:195771ms step_avg:79.55ms +[2025-07-06 15:22:19] [Rank 0] step:2461/10000 train_time:195771ms step_avg:79.55ms +[2025-07-06 15:22:21] [Rank 0] step:2481/10000 train_time:197276ms step_avg:79.51ms +[2025-07-06 15:22:21] [Rank 0] step:2481/10000 train_time:197276ms step_avg:79.51ms +[2025-07-06 15:22:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 15:22:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 15:22:23] [Rank 0] PRINT: step:2500/10000 train_loss:4.7795 val_loss:4.4401 train_time:198783ms step_avg:79.51ms +[2025-07-06 15:22:23] [Rank 0] PRINT: step:2500/10000 train_loss:4.7795 val_loss:4.4401 train_time:198783ms step_avg:79.51ms +[2025-07-06 15:22:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 15:22:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 15:22:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 15:22:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 15:22:23] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 15:22:23] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 15:27:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 15:27:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 15:27:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 15:27:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 15:27:45] [Rank 0] Total Loss: 5.6871 +[2025-07-06 15:27:45] [Rank 0] Total Loss: 5.6871 +[2025-07-06 15:27:45] [Rank 0] Total FTA: 0.0758 +[2025-07-06 15:27:45] [Rank 0] Total FTA: 0.0758 +[2025-07-06 15:27:45] [Rank 0] Group 0 Loss: 5.6376 +[2025-07-06 15:27:45] [Rank 0] Group 0 Loss: 5.6376 +[2025-07-06 15:27:45] [Rank 0] Group 1 Loss: 5.8208 +[2025-07-06 15:27:45] [Rank 0] Group 1 Loss: 5.8208 +[2025-07-06 15:27:45] [Rank 0] Group 2 Loss: 5.7087 +[2025-07-06 15:27:45] [Rank 0] Group 2 Loss: 5.7087 +[2025-07-06 15:27:45] [Rank 0] Group 3 Loss: 5.6316 +[2025-07-06 15:27:45] [Rank 0] Group 3 Loss: 5.6316 +[2025-07-06 15:27:45] [Rank 0] Group 4 Loss: 5.6903 +[2025-07-06 15:27:45] [Rank 0] Group 4 Loss: 5.6903 +[2025-07-06 15:27:45] [Rank 0] Group 5 Loss: 5.6490 +[2025-07-06 15:27:45] [Rank 0] Group 5 Loss: 5.6490 +[2025-07-06 15:27:45] [Rank 0] Group 6 Loss: 5.7006 +[2025-07-06 15:27:45] [Rank 0] Group 6 Loss: 5.7006 +[2025-07-06 15:27:45] [Rank 0] Group 7 Loss: 5.6775 +[2025-07-06 15:27:45] [Rank 0] Group 7 Loss: 5.6775 +[2025-07-06 15:27:45] [Rank 0] Group 8 Loss: 5.6972 +[2025-07-06 15:27:45] [Rank 0] Group 8 Loss: 5.6972 +[2025-07-06 15:27:45] [Rank 0] Group 9 Loss: 5.6846 +[2025-07-06 15:27:45] [Rank 0] Group 9 Loss: 5.6846 +[2025-07-06 15:27:45] [Rank 0] Group 10 Loss: 5.6816 +[2025-07-06 15:27:45] [Rank 0] Group 10 Loss: 5.6816 +[2025-07-06 15:27:45] [Rank 0] Group 11 Loss: 5.6978 +[2025-07-06 15:27:45] [Rank 0] Group 11 Loss: 5.6978 +[2025-07-06 15:27:45] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-06 15:27:45] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-06 15:27:45] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 15:27:45] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 15:27:45] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-06 15:27:45] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-06 15:27:45] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-06 15:27:45] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-06 15:27:45] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-06 15:27:45] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-06 15:27:45] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-06 15:27:45] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-06 15:27:45] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-06 15:27:45] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-06 15:27:45] [Rank 0] Group 7 FTA: 0.0755 +[2025-07-06 15:27:45] [Rank 0] Group 7 FTA: 0.0755 +[2025-07-06 15:27:45] [Rank 0] Group 8 FTA: 0.0729 +[2025-07-06 15:27:45] [Rank 0] Group 8 FTA: 0.0729 +[2025-07-06 15:27:45] [Rank 0] Group 9 FTA: 0.0703 +[2025-07-06 15:27:45] [Rank 0] Group 9 FTA: 0.0703 +[2025-07-06 15:27:45] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-06 15:27:45] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-06 15:27:45] [Rank 0] Group 11 FTA: 0.0693 +[2025-07-06 15:27:45] [Rank 0] Group 11 FTA: 0.0693 +[2025-07-06 15:27:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 15:27:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 15:27:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 15:27:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 15:27:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 15:27:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 15:27:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 15:27:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 15:27:46] [Rank 0] step:2501/10000 train_time:198803ms step_avg:79.49ms +[2025-07-06 15:27:46] [Rank 0] step:2501/10000 train_time:198803ms step_avg:79.49ms +[2025-07-06 15:27:48] [Rank 0] step:2521/10000 train_time:200364ms step_avg:79.48ms +[2025-07-06 15:27:48] [Rank 0] step:2521/10000 train_time:200364ms step_avg:79.48ms +[2025-07-06 15:27:50] [Rank 0] step:2541/10000 train_time:202451ms step_avg:79.67ms +[2025-07-06 15:27:50] [Rank 0] step:2541/10000 train_time:202451ms step_avg:79.67ms +[2025-07-06 15:27:51] [Rank 0] step:2561/10000 train_time:203946ms step_avg:79.64ms +[2025-07-06 15:27:51] [Rank 0] step:2561/10000 train_time:203946ms step_avg:79.64ms +[2025-07-06 15:27:53] [Rank 0] step:2581/10000 train_time:205441ms step_avg:79.60ms +[2025-07-06 15:27:53] [Rank 0] step:2581/10000 train_time:205441ms step_avg:79.60ms +[2025-07-06 15:27:54] [Rank 0] step:2601/10000 train_time:206937ms step_avg:79.56ms +[2025-07-06 15:27:54] [Rank 0] step:2601/10000 train_time:206937ms step_avg:79.56ms +[2025-07-06 15:27:56] [Rank 0] step:2621/10000 train_time:208669ms step_avg:79.61ms +[2025-07-06 15:27:56] [Rank 0] step:2621/10000 train_time:208669ms step_avg:79.61ms +[2025-07-06 15:27:57] [Rank 0] step:2641/10000 train_time:210168ms step_avg:79.58ms +[2025-07-06 15:27:57] [Rank 0] step:2641/10000 train_time:210168ms step_avg:79.58ms +[2025-07-06 15:27:59] [Rank 0] step:2661/10000 train_time:211667ms step_avg:79.54ms +[2025-07-06 15:27:59] [Rank 0] step:2661/10000 train_time:211667ms step_avg:79.54ms +[2025-07-06 15:28:00] [Rank 0] step:2681/10000 train_time:213166ms step_avg:79.51ms +[2025-07-06 15:28:00] [Rank 0] step:2681/10000 train_time:213166ms step_avg:79.51ms +[2025-07-06 15:28:03] [Rank 0] step:2701/10000 train_time:215346ms step_avg:79.73ms +[2025-07-06 15:28:03] [Rank 0] step:2701/10000 train_time:215346ms step_avg:79.73ms +[2025-07-06 15:28:04] [Rank 0] step:2721/10000 train_time:216827ms step_avg:79.69ms +[2025-07-06 15:28:04] [Rank 0] step:2721/10000 train_time:216827ms step_avg:79.69ms +[2025-07-06 15:28:06] [Rank 0] step:2741/10000 train_time:218326ms step_avg:79.65ms +[2025-07-06 15:28:06] [Rank 0] step:2741/10000 train_time:218326ms step_avg:79.65ms +[2025-07-06 15:28:07] [Rank 0] step:2761/10000 train_time:219827ms step_avg:79.62ms +[2025-07-06 15:28:07] [Rank 0] step:2761/10000 train_time:219827ms step_avg:79.62ms +[2025-07-06 15:28:09] [Rank 0] step:2781/10000 train_time:221329ms step_avg:79.59ms +[2025-07-06 15:28:09] [Rank 0] step:2781/10000 train_time:221329ms step_avg:79.59ms +[2025-07-06 15:28:11] [Rank 0] step:2801/10000 train_time:223499ms step_avg:79.79ms +[2025-07-06 15:28:11] [Rank 0] step:2801/10000 train_time:223499ms step_avg:79.79ms +[2025-07-06 15:28:12] [Rank 0] step:2821/10000 train_time:224998ms step_avg:79.76ms +[2025-07-06 15:28:12] [Rank 0] step:2821/10000 train_time:224998ms step_avg:79.76ms +[2025-07-06 15:28:14] [Rank 0] step:2841/10000 train_time:226500ms step_avg:79.73ms +[2025-07-06 15:28:14] [Rank 0] step:2841/10000 train_time:226500ms step_avg:79.73ms +[2025-07-06 15:28:15] [Rank 0] step:2861/10000 train_time:228002ms step_avg:79.69ms +[2025-07-06 15:28:15] [Rank 0] step:2861/10000 train_time:228002ms step_avg:79.69ms +[2025-07-06 15:28:17] [Rank 0] step:2881/10000 train_time:229557ms step_avg:79.68ms +[2025-07-06 15:28:17] [Rank 0] step:2881/10000 train_time:229557ms step_avg:79.68ms +[2025-07-06 15:28:19] [Rank 0] step:2901/10000 train_time:231675ms step_avg:79.86ms +[2025-07-06 15:28:19] [Rank 0] step:2901/10000 train_time:231675ms step_avg:79.86ms +[2025-07-06 15:28:20] [Rank 0] step:2921/10000 train_time:233179ms step_avg:79.83ms +[2025-07-06 15:28:20] [Rank 0] step:2921/10000 train_time:233179ms step_avg:79.83ms +[2025-07-06 15:28:22] [Rank 0] step:2941/10000 train_time:234685ms step_avg:79.80ms +[2025-07-06 15:28:22] [Rank 0] step:2941/10000 train_time:234685ms step_avg:79.80ms +[2025-07-06 15:28:24] [Rank 0] step:2961/10000 train_time:236193ms step_avg:79.77ms +[2025-07-06 15:28:24] [Rank 0] step:2961/10000 train_time:236193ms step_avg:79.77ms +[2025-07-06 15:28:26] [Rank 0] step:2981/10000 train_time:238347ms step_avg:79.96ms +[2025-07-06 15:28:26] [Rank 0] step:2981/10000 train_time:238347ms step_avg:79.96ms +[2025-07-06 15:28:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 15:28:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 15:28:28] [Rank 0] PRINT: step:3000/10000 train_loss:4.1194 val_loss:3.8119 train_time:239853ms step_avg:79.95ms +[2025-07-06 15:28:28] [Rank 0] PRINT: step:3000/10000 train_loss:4.1194 val_loss:3.8119 train_time:239853ms step_avg:79.95ms +[2025-07-06 15:28:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 15:28:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 15:28:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 15:28:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 15:28:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 15:28:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 15:33:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 15:33:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 15:33:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 15:33:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 15:33:54] [Rank 0] Total Loss: 5.3121 +[2025-07-06 15:33:54] [Rank 0] Total Loss: 5.3121 +[2025-07-06 15:33:54] [Rank 0] Total FTA: 0.0900 +[2025-07-06 15:33:54] [Rank 0] Total FTA: 0.0900 +[2025-07-06 15:33:54] [Rank 0] Group 0 Loss: 5.2831 +[2025-07-06 15:33:54] [Rank 0] Group 0 Loss: 5.2831 +[2025-07-06 15:33:54] [Rank 0] Group 1 Loss: 5.4342 +[2025-07-06 15:33:54] [Rank 0] Group 1 Loss: 5.4342 +[2025-07-06 15:33:54] [Rank 0] Group 2 Loss: 5.4346 +[2025-07-06 15:33:54] [Rank 0] Group 2 Loss: 5.4346 +[2025-07-06 15:33:54] [Rank 0] Group 3 Loss: 5.2460 +[2025-07-06 15:33:54] [Rank 0] Group 3 Loss: 5.2460 +[2025-07-06 15:33:54] [Rank 0] Group 4 Loss: 5.3424 +[2025-07-06 15:33:54] [Rank 0] Group 4 Loss: 5.3424 +[2025-07-06 15:33:54] [Rank 0] Group 5 Loss: 5.2822 +[2025-07-06 15:33:54] [Rank 0] Group 5 Loss: 5.2822 +[2025-07-06 15:33:54] [Rank 0] Group 6 Loss: 5.3094 +[2025-07-06 15:33:54] [Rank 0] Group 6 Loss: 5.3094 +[2025-07-06 15:33:54] [Rank 0] Group 7 Loss: 5.3126 +[2025-07-06 15:33:54] [Rank 0] Group 7 Loss: 5.3126 +[2025-07-06 15:33:54] [Rank 0] Group 8 Loss: 5.2503 +[2025-07-06 15:33:54] [Rank 0] Group 8 Loss: 5.2503 +[2025-07-06 15:33:54] [Rank 0] Group 9 Loss: 5.3012 +[2025-07-06 15:33:54] [Rank 0] Group 9 Loss: 5.3012 +[2025-07-06 15:33:54] [Rank 0] Group 10 Loss: 5.2918 +[2025-07-06 15:33:54] [Rank 0] Group 10 Loss: 5.2918 +[2025-07-06 15:33:54] [Rank 0] Group 11 Loss: 5.3037 +[2025-07-06 15:33:54] [Rank 0] Group 11 Loss: 5.3037 +[2025-07-06 15:33:54] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-06 15:33:54] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-06 15:33:54] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 15:33:54] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 15:33:54] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-06 15:33:54] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-06 15:33:54] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-06 15:33:54] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-06 15:33:54] [Rank 0] Group 4 FTA: 0.0417 +[2025-07-06 15:33:54] [Rank 0] Group 4 FTA: 0.0417 +[2025-07-06 15:33:54] [Rank 0] Group 5 FTA: 0.0469 +[2025-07-06 15:33:54] [Rank 0] Group 5 FTA: 0.0469 +[2025-07-06 15:33:54] [Rank 0] Group 6 FTA: 0.1016 +[2025-07-06 15:33:54] [Rank 0] Group 6 FTA: 0.1016 +[2025-07-06 15:33:54] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-06 15:33:54] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-06 15:33:54] [Rank 0] Group 8 FTA: 0.0807 +[2025-07-06 15:33:54] [Rank 0] Group 8 FTA: 0.0807 +[2025-07-06 15:33:54] [Rank 0] Group 9 FTA: 0.1055 +[2025-07-06 15:33:54] [Rank 0] Group 9 FTA: 0.1055 +[2025-07-06 15:33:54] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-06 15:33:54] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-06 15:33:54] [Rank 0] Group 11 FTA: 0.1084 +[2025-07-06 15:33:54] [Rank 0] Group 11 FTA: 0.1084 +[2025-07-06 15:33:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 15:33:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 15:33:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 15:33:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 15:33:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 15:33:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 15:33:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 15:33:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 15:33:55] [Rank 0] step:3001/10000 train_time:239873ms step_avg:79.93ms +[2025-07-06 15:33:55] [Rank 0] step:3001/10000 train_time:239873ms step_avg:79.93ms +[2025-07-06 15:33:57] [Rank 0] step:3021/10000 train_time:241395ms step_avg:79.91ms +[2025-07-06 15:33:57] [Rank 0] step:3021/10000 train_time:241395ms step_avg:79.91ms +[2025-07-06 15:33:58] [Rank 0] step:3041/10000 train_time:242890ms step_avg:79.87ms +[2025-07-06 15:33:58] [Rank 0] step:3041/10000 train_time:242890ms step_avg:79.87ms +[2025-07-06 15:34:00] [Rank 0] step:3061/10000 train_time:244386ms step_avg:79.84ms +[2025-07-06 15:34:00] [Rank 0] step:3061/10000 train_time:244386ms step_avg:79.84ms +[2025-07-06 15:34:02] [Rank 0] step:3081/10000 train_time:246551ms step_avg:80.02ms +[2025-07-06 15:34:02] [Rank 0] step:3081/10000 train_time:246551ms step_avg:80.02ms +[2025-07-06 15:34:03] [Rank 0] step:3101/10000 train_time:248046ms step_avg:79.99ms +[2025-07-06 15:34:03] [Rank 0] step:3101/10000 train_time:248046ms step_avg:79.99ms +[2025-07-06 15:34:05] [Rank 0] step:3121/10000 train_time:249546ms step_avg:79.96ms +[2025-07-06 15:34:05] [Rank 0] step:3121/10000 train_time:249546ms step_avg:79.96ms +[2025-07-06 15:34:06] [Rank 0] step:3141/10000 train_time:251044ms step_avg:79.92ms +[2025-07-06 15:34:06] [Rank 0] step:3141/10000 train_time:251044ms step_avg:79.92ms +[2025-07-06 15:34:08] [Rank 0] step:3161/10000 train_time:252777ms step_avg:79.97ms +[2025-07-06 15:34:08] [Rank 0] step:3161/10000 train_time:252777ms step_avg:79.97ms +[2025-07-06 15:34:09] [Rank 0] step:3181/10000 train_time:254275ms step_avg:79.94ms +[2025-07-06 15:34:09] [Rank 0] step:3181/10000 train_time:254275ms step_avg:79.94ms +[2025-07-06 15:34:11] [Rank 0] step:3201/10000 train_time:255775ms step_avg:79.90ms +[2025-07-06 15:34:11] [Rank 0] step:3201/10000 train_time:255775ms step_avg:79.90ms +[2025-07-06 15:34:12] [Rank 0] step:3221/10000 train_time:257274ms step_avg:79.87ms +[2025-07-06 15:34:12] [Rank 0] step:3221/10000 train_time:257274ms step_avg:79.87ms +[2025-07-06 15:34:15] [Rank 0] step:3241/10000 train_time:259442ms step_avg:80.05ms +[2025-07-06 15:34:15] [Rank 0] step:3241/10000 train_time:259442ms step_avg:80.05ms +[2025-07-06 15:34:16] [Rank 0] step:3261/10000 train_time:260922ms step_avg:80.01ms +[2025-07-06 15:34:16] [Rank 0] step:3261/10000 train_time:260922ms step_avg:80.01ms +[2025-07-06 15:34:18] [Rank 0] step:3281/10000 train_time:262420ms step_avg:79.98ms +[2025-07-06 15:34:18] [Rank 0] step:3281/10000 train_time:262420ms step_avg:79.98ms +[2025-07-06 15:34:19] [Rank 0] step:3301/10000 train_time:263921ms step_avg:79.95ms +[2025-07-06 15:34:19] [Rank 0] step:3301/10000 train_time:263921ms step_avg:79.95ms +[2025-07-06 15:34:21] [Rank 0] step:3321/10000 train_time:265422ms step_avg:79.92ms +[2025-07-06 15:34:21] [Rank 0] step:3321/10000 train_time:265422ms step_avg:79.92ms +[2025-07-06 15:34:23] [Rank 0] step:3341/10000 train_time:267562ms step_avg:80.08ms +[2025-07-06 15:34:23] [Rank 0] step:3341/10000 train_time:267562ms step_avg:80.08ms +[2025-07-06 15:34:24] [Rank 0] step:3361/10000 train_time:269063ms step_avg:80.05ms +[2025-07-06 15:34:24] [Rank 0] step:3361/10000 train_time:269063ms step_avg:80.05ms +[2025-07-06 15:34:26] [Rank 0] step:3381/10000 train_time:270568ms step_avg:80.03ms +[2025-07-06 15:34:26] [Rank 0] step:3381/10000 train_time:270568ms step_avg:80.03ms +[2025-07-06 15:34:27] [Rank 0] step:3401/10000 train_time:272074ms step_avg:80.00ms +[2025-07-06 15:34:27] [Rank 0] step:3401/10000 train_time:272074ms step_avg:80.00ms +[2025-07-06 15:34:29] [Rank 0] step:3421/10000 train_time:273630ms step_avg:79.99ms +[2025-07-06 15:34:29] [Rank 0] step:3421/10000 train_time:273630ms step_avg:79.99ms +[2025-07-06 15:34:31] [Rank 0] step:3441/10000 train_time:275743ms step_avg:80.13ms +[2025-07-06 15:34:31] [Rank 0] step:3441/10000 train_time:275743ms step_avg:80.13ms +[2025-07-06 15:34:32] [Rank 0] step:3461/10000 train_time:277248ms step_avg:80.11ms +[2025-07-06 15:34:32] [Rank 0] step:3461/10000 train_time:277248ms step_avg:80.11ms +[2025-07-06 15:34:34] [Rank 0] step:3481/10000 train_time:278753ms step_avg:80.08ms +[2025-07-06 15:34:34] [Rank 0] step:3481/10000 train_time:278753ms step_avg:80.08ms +[2025-07-06 15:34:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 15:34:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 15:34:36] [Rank 0] PRINT: step:3500/10000 train_loss:3.5396 val_loss:3.2816 train_time:280260ms step_avg:80.07ms +[2025-07-06 15:34:36] [Rank 0] PRINT: step:3500/10000 train_loss:3.5396 val_loss:3.2816 train_time:280260ms step_avg:80.07ms +[2025-07-06 15:34:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 15:34:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 15:34:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 15:34:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 15:34:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 15:34:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 15:39:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 15:39:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 15:39:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 15:39:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 15:39:58] [Rank 0] Total Loss: 4.9859 +[2025-07-06 15:39:58] [Rank 0] Total Loss: 4.9859 +[2025-07-06 15:39:58] [Rank 0] Total FTA: 0.1191 +[2025-07-06 15:39:58] [Rank 0] Total FTA: 0.1191 +[2025-07-06 15:39:58] [Rank 0] Group 0 Loss: 5.0018 +[2025-07-06 15:39:58] [Rank 0] Group 0 Loss: 5.0018 +[2025-07-06 15:39:58] [Rank 0] Group 1 Loss: 5.1513 +[2025-07-06 15:39:58] [Rank 0] Group 1 Loss: 5.1513 +[2025-07-06 15:39:58] [Rank 0] Group 2 Loss: 4.8921 +[2025-07-06 15:39:58] [Rank 0] Group 2 Loss: 4.8921 +[2025-07-06 15:39:58] [Rank 0] Group 3 Loss: 4.9289 +[2025-07-06 15:39:58] [Rank 0] Group 3 Loss: 4.9289 +[2025-07-06 15:39:58] [Rank 0] Group 4 Loss: 5.0199 +[2025-07-06 15:39:58] [Rank 0] Group 4 Loss: 5.0199 +[2025-07-06 15:39:58] [Rank 0] Group 5 Loss: 4.9299 +[2025-07-06 15:39:58] [Rank 0] Group 5 Loss: 4.9299 +[2025-07-06 15:39:58] [Rank 0] Group 6 Loss: 4.9861 +[2025-07-06 15:39:58] [Rank 0] Group 6 Loss: 4.9861 +[2025-07-06 15:39:58] [Rank 0] Group 7 Loss: 5.0084 +[2025-07-06 15:39:58] [Rank 0] Group 7 Loss: 5.0084 +[2025-07-06 15:39:58] [Rank 0] Group 8 Loss: 4.9686 +[2025-07-06 15:39:58] [Rank 0] Group 8 Loss: 4.9686 +[2025-07-06 15:39:58] [Rank 0] Group 9 Loss: 4.9564 +[2025-07-06 15:39:58] [Rank 0] Group 9 Loss: 4.9564 +[2025-07-06 15:39:58] [Rank 0] Group 10 Loss: 4.9812 +[2025-07-06 15:39:58] [Rank 0] Group 10 Loss: 4.9812 +[2025-07-06 15:39:58] [Rank 0] Group 11 Loss: 4.9845 +[2025-07-06 15:39:58] [Rank 0] Group 11 Loss: 4.9845 +[2025-07-06 15:39:58] [Rank 0] Group 0 FTA: 0.1821 +[2025-07-06 15:39:58] [Rank 0] Group 0 FTA: 0.1821 +[2025-07-06 15:39:58] [Rank 0] Group 1 FTA: 0.2031 +[2025-07-06 15:39:58] [Rank 0] Group 1 FTA: 0.2031 +[2025-07-06 15:39:58] [Rank 0] Group 2 FTA: 0.1094 +[2025-07-06 15:39:58] [Rank 0] Group 2 FTA: 0.1094 +[2025-07-06 15:39:58] [Rank 0] Group 3 FTA: 0.0573 +[2025-07-06 15:39:58] [Rank 0] Group 3 FTA: 0.0573 +[2025-07-06 15:39:58] [Rank 0] Group 4 FTA: 0.0651 +[2025-07-06 15:39:58] [Rank 0] Group 4 FTA: 0.0651 +[2025-07-06 15:39:58] [Rank 0] Group 5 FTA: 0.0755 +[2025-07-06 15:39:58] [Rank 0] Group 5 FTA: 0.0755 +[2025-07-06 15:39:58] [Rank 0] Group 6 FTA: 0.1094 +[2025-07-06 15:39:58] [Rank 0] Group 6 FTA: 0.1094 +[2025-07-06 15:39:58] [Rank 0] Group 7 FTA: 0.1354 +[2025-07-06 15:39:58] [Rank 0] Group 7 FTA: 0.1354 +[2025-07-06 15:39:58] [Rank 0] Group 8 FTA: 0.1224 +[2025-07-06 15:39:58] [Rank 0] Group 8 FTA: 0.1224 +[2025-07-06 15:39:58] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-06 15:39:58] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-06 15:39:58] [Rank 0] Group 10 FTA: 0.1094 +[2025-07-06 15:39:58] [Rank 0] Group 10 FTA: 0.1094 +[2025-07-06 15:39:58] [Rank 0] Group 11 FTA: 0.1162 +[2025-07-06 15:39:58] [Rank 0] Group 11 FTA: 0.1162 +[2025-07-06 15:39:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 15:39:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 15:39:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 15:39:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 15:39:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 15:39:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 15:39:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 15:39:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 15:40:00] [Rank 0] step:3501/10000 train_time:280281ms step_avg:80.06ms +[2025-07-06 15:40:00] [Rank 0] step:3501/10000 train_time:280281ms step_avg:80.06ms +[2025-07-06 15:40:02] [Rank 0] step:3521/10000 train_time:282456ms step_avg:80.22ms +[2025-07-06 15:40:02] [Rank 0] step:3521/10000 train_time:282456ms step_avg:80.22ms +[2025-07-06 15:40:03] [Rank 0] step:3541/10000 train_time:283949ms step_avg:80.19ms +[2025-07-06 15:40:03] [Rank 0] step:3541/10000 train_time:283949ms step_avg:80.19ms +[2025-07-06 15:40:05] [Rank 0] step:3561/10000 train_time:285445ms step_avg:80.16ms +[2025-07-06 15:40:05] [Rank 0] step:3561/10000 train_time:285445ms step_avg:80.16ms +[2025-07-06 15:40:06] [Rank 0] step:3581/10000 train_time:287150ms step_avg:80.19ms +[2025-07-06 15:40:06] [Rank 0] step:3581/10000 train_time:287150ms step_avg:80.19ms +[2025-07-06 15:40:09] [Rank 0] step:3601/10000 train_time:288646ms step_avg:80.16ms +[2025-07-06 15:40:09] [Rank 0] step:3601/10000 train_time:288646ms step_avg:80.16ms +[2025-07-06 15:40:10] [Rank 0] step:3621/10000 train_time:290792ms step_avg:80.31ms +[2025-07-06 15:40:10] [Rank 0] step:3621/10000 train_time:290792ms step_avg:80.31ms +[2025-07-06 15:40:12] [Rank 0] step:3641/10000 train_time:292288ms step_avg:80.28ms +[2025-07-06 15:40:12] [Rank 0] step:3641/10000 train_time:292288ms step_avg:80.28ms +[2025-07-06 15:40:13] [Rank 0] step:3661/10000 train_time:293786ms step_avg:80.25ms +[2025-07-06 15:40:13] [Rank 0] step:3661/10000 train_time:293786ms step_avg:80.25ms +[2025-07-06 15:40:15] [Rank 0] step:3681/10000 train_time:295285ms step_avg:80.22ms +[2025-07-06 15:40:15] [Rank 0] step:3681/10000 train_time:295285ms step_avg:80.22ms +[2025-07-06 15:40:17] [Rank 0] step:3701/10000 train_time:297432ms step_avg:80.37ms +[2025-07-06 15:40:17] [Rank 0] step:3701/10000 train_time:297432ms step_avg:80.37ms +[2025-07-06 15:40:18] [Rank 0] step:3721/10000 train_time:298932ms step_avg:80.34ms +[2025-07-06 15:40:18] [Rank 0] step:3721/10000 train_time:298932ms step_avg:80.34ms +[2025-07-06 15:40:20] [Rank 0] step:3741/10000 train_time:300430ms step_avg:80.31ms +[2025-07-06 15:40:20] [Rank 0] step:3741/10000 train_time:300430ms step_avg:80.31ms +[2025-07-06 15:40:21] [Rank 0] step:3761/10000 train_time:301931ms step_avg:80.28ms +[2025-07-06 15:40:21] [Rank 0] step:3761/10000 train_time:301931ms step_avg:80.28ms +[2025-07-06 15:40:23] [Rank 0] step:3781/10000 train_time:304098ms step_avg:80.43ms +[2025-07-06 15:40:23] [Rank 0] step:3781/10000 train_time:304098ms step_avg:80.43ms +[2025-07-06 15:40:25] [Rank 0] step:3801/10000 train_time:305579ms step_avg:80.39ms +[2025-07-06 15:40:25] [Rank 0] step:3801/10000 train_time:305579ms step_avg:80.39ms +[2025-07-06 15:40:26] [Rank 0] step:3821/10000 train_time:307081ms step_avg:80.37ms +[2025-07-06 15:40:26] [Rank 0] step:3821/10000 train_time:307081ms step_avg:80.37ms +[2025-07-06 15:40:28] [Rank 0] step:3841/10000 train_time:308582ms step_avg:80.34ms +[2025-07-06 15:40:28] [Rank 0] step:3841/10000 train_time:308582ms step_avg:80.34ms +[2025-07-06 15:40:29] [Rank 0] step:3861/10000 train_time:310088ms step_avg:80.31ms +[2025-07-06 15:40:29] [Rank 0] step:3861/10000 train_time:310088ms step_avg:80.31ms +[2025-07-06 15:40:32] [Rank 0] step:3881/10000 train_time:312252ms step_avg:80.46ms +[2025-07-06 15:40:32] [Rank 0] step:3881/10000 train_time:312252ms step_avg:80.46ms +[2025-07-06 15:40:33] [Rank 0] step:3901/10000 train_time:313755ms step_avg:80.43ms +[2025-07-06 15:40:33] [Rank 0] step:3901/10000 train_time:313755ms step_avg:80.43ms +[2025-07-06 15:40:35] [Rank 0] step:3921/10000 train_time:315260ms step_avg:80.40ms +[2025-07-06 15:40:35] [Rank 0] step:3921/10000 train_time:315260ms step_avg:80.40ms +[2025-07-06 15:40:36] [Rank 0] step:3941/10000 train_time:316766ms step_avg:80.38ms +[2025-07-06 15:40:36] [Rank 0] step:3941/10000 train_time:316766ms step_avg:80.38ms +[2025-07-06 15:40:38] [Rank 0] step:3961/10000 train_time:318322ms step_avg:80.36ms +[2025-07-06 15:40:38] [Rank 0] step:3961/10000 train_time:318322ms step_avg:80.36ms +[2025-07-06 15:40:39] [Rank 0] step:3981/10000 train_time:320015ms step_avg:80.39ms +[2025-07-06 15:40:39] [Rank 0] step:3981/10000 train_time:320015ms step_avg:80.39ms +[2025-07-06 15:40:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 15:40:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 15:40:42] [Rank 0] PRINT: step:4000/10000 train_loss:3.0584 val_loss:2.8505 train_time:321522ms step_avg:80.38ms +[2025-07-06 15:40:42] [Rank 0] PRINT: step:4000/10000 train_loss:3.0584 val_loss:2.8505 train_time:321522ms step_avg:80.38ms +[2025-07-06 15:40:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 15:40:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 15:40:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 15:40:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 15:40:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 15:40:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 15:46:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 15:46:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 15:46:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 15:46:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 15:46:03] [Rank 0] Total Loss: 4.6656 +[2025-07-06 15:46:03] [Rank 0] Total Loss: 4.6656 +[2025-07-06 15:46:03] [Rank 0] Total FTA: 0.1070 +[2025-07-06 15:46:03] [Rank 0] Total FTA: 0.1070 +[2025-07-06 15:46:03] [Rank 0] Group 0 Loss: 4.6662 +[2025-07-06 15:46:03] [Rank 0] Group 0 Loss: 4.6662 +[2025-07-06 15:46:03] [Rank 0] Group 1 Loss: 4.8161 +[2025-07-06 15:46:03] [Rank 0] Group 1 Loss: 4.8161 +[2025-07-06 15:46:03] [Rank 0] Group 2 Loss: 4.7159 +[2025-07-06 15:46:03] [Rank 0] Group 2 Loss: 4.7159 +[2025-07-06 15:46:03] [Rank 0] Group 3 Loss: 4.5952 +[2025-07-06 15:46:03] [Rank 0] Group 3 Loss: 4.5952 +[2025-07-06 15:46:03] [Rank 0] Group 4 Loss: 4.7445 +[2025-07-06 15:46:03] [Rank 0] Group 4 Loss: 4.7445 +[2025-07-06 15:46:03] [Rank 0] Group 5 Loss: 4.5873 +[2025-07-06 15:46:03] [Rank 0] Group 5 Loss: 4.5873 +[2025-07-06 15:46:03] [Rank 0] Group 6 Loss: 4.5991 +[2025-07-06 15:46:03] [Rank 0] Group 6 Loss: 4.5991 +[2025-07-06 15:46:03] [Rank 0] Group 7 Loss: 4.6504 +[2025-07-06 15:46:03] [Rank 0] Group 7 Loss: 4.6504 +[2025-07-06 15:46:03] [Rank 0] Group 8 Loss: 4.6512 +[2025-07-06 15:46:03] [Rank 0] Group 8 Loss: 4.6512 +[2025-07-06 15:46:03] [Rank 0] Group 9 Loss: 4.6578 +[2025-07-06 15:46:03] [Rank 0] Group 9 Loss: 4.6578 +[2025-07-06 15:46:03] [Rank 0] Group 10 Loss: 4.6739 +[2025-07-06 15:46:03] [Rank 0] Group 10 Loss: 4.6739 +[2025-07-06 15:46:03] [Rank 0] Group 11 Loss: 4.6498 +[2025-07-06 15:46:03] [Rank 0] Group 11 Loss: 4.6498 +[2025-07-06 15:46:03] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-06 15:46:03] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-06 15:46:03] [Rank 0] Group 1 FTA: 0.1641 +[2025-07-06 15:46:03] [Rank 0] Group 1 FTA: 0.1641 +[2025-07-06 15:46:03] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-06 15:46:03] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-06 15:46:03] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-06 15:46:03] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-06 15:46:03] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-06 15:46:03] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-06 15:46:03] [Rank 0] Group 5 FTA: 0.0859 +[2025-07-06 15:46:03] [Rank 0] Group 5 FTA: 0.0859 +[2025-07-06 15:46:03] [Rank 0] Group 6 FTA: 0.1094 +[2025-07-06 15:46:03] [Rank 0] Group 6 FTA: 0.1094 +[2025-07-06 15:46:03] [Rank 0] Group 7 FTA: 0.1224 +[2025-07-06 15:46:03] [Rank 0] Group 7 FTA: 0.1224 +[2025-07-06 15:46:03] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-06 15:46:03] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-06 15:46:03] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-06 15:46:03] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-06 15:46:03] [Rank 0] Group 10 FTA: 0.1367 +[2025-07-06 15:46:03] [Rank 0] Group 10 FTA: 0.1367 +[2025-07-06 15:46:03] [Rank 0] Group 11 FTA: 0.1016 +[2025-07-06 15:46:03] [Rank 0] Group 11 FTA: 0.1016 +[2025-07-06 15:46:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 15:46:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 15:46:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 15:46:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 15:46:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 15:46:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 15:46:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 15:46:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 15:46:05] [Rank 0] step:4001/10000 train_time:321543ms step_avg:80.37ms +[2025-07-06 15:46:05] [Rank 0] step:4001/10000 train_time:321543ms step_avg:80.37ms +[2025-07-06 15:46:06] [Rank 0] step:4021/10000 train_time:323062ms step_avg:80.34ms +[2025-07-06 15:46:06] [Rank 0] step:4021/10000 train_time:323062ms step_avg:80.34ms +[2025-07-06 15:46:08] [Rank 0] step:4041/10000 train_time:324555ms step_avg:80.32ms +[2025-07-06 15:46:08] [Rank 0] step:4041/10000 train_time:324555ms step_avg:80.32ms +[2025-07-06 15:46:10] [Rank 0] step:4061/10000 train_time:326701ms step_avg:80.45ms +[2025-07-06 15:46:10] [Rank 0] step:4061/10000 train_time:326701ms step_avg:80.45ms +[2025-07-06 15:46:11] [Rank 0] step:4081/10000 train_time:328195ms step_avg:80.42ms +[2025-07-06 15:46:11] [Rank 0] step:4081/10000 train_time:328195ms step_avg:80.42ms +[2025-07-06 15:46:13] [Rank 0] step:4101/10000 train_time:329693ms step_avg:80.39ms +[2025-07-06 15:46:13] [Rank 0] step:4101/10000 train_time:329693ms step_avg:80.39ms +[2025-07-06 15:46:14] [Rank 0] step:4121/10000 train_time:331191ms step_avg:80.37ms +[2025-07-06 15:46:14] [Rank 0] step:4121/10000 train_time:331191ms step_avg:80.37ms +[2025-07-06 15:46:17] [Rank 0] step:4141/10000 train_time:332945ms step_avg:80.40ms +[2025-07-06 15:46:17] [Rank 0] step:4141/10000 train_time:332945ms step_avg:80.40ms +[2025-07-06 15:46:18] [Rank 0] step:4161/10000 train_time:334848ms step_avg:80.47ms +[2025-07-06 15:46:18] [Rank 0] step:4161/10000 train_time:334848ms step_avg:80.47ms +[2025-07-06 15:46:20] [Rank 0] step:4181/10000 train_time:336346ms step_avg:80.45ms +[2025-07-06 15:46:20] [Rank 0] step:4181/10000 train_time:336346ms step_avg:80.45ms +[2025-07-06 15:46:21] [Rank 0] step:4201/10000 train_time:337845ms step_avg:80.42ms +[2025-07-06 15:46:21] [Rank 0] step:4201/10000 train_time:337845ms step_avg:80.42ms +[2025-07-06 15:46:23] [Rank 0] step:4221/10000 train_time:339347ms step_avg:80.39ms +[2025-07-06 15:46:23] [Rank 0] step:4221/10000 train_time:339347ms step_avg:80.39ms +[2025-07-06 15:46:25] [Rank 0] step:4241/10000 train_time:341695ms step_avg:80.57ms +[2025-07-06 15:46:25] [Rank 0] step:4241/10000 train_time:341695ms step_avg:80.57ms +[2025-07-06 15:46:27] [Rank 0] step:4261/10000 train_time:343280ms step_avg:80.56ms +[2025-07-06 15:46:27] [Rank 0] step:4261/10000 train_time:343280ms step_avg:80.56ms +[2025-07-06 15:46:28] [Rank 0] step:4281/10000 train_time:344780ms step_avg:80.54ms +[2025-07-06 15:46:28] [Rank 0] step:4281/10000 train_time:344780ms step_avg:80.54ms +[2025-07-06 15:46:30] [Rank 0] step:4301/10000 train_time:346280ms step_avg:80.51ms +[2025-07-06 15:46:30] [Rank 0] step:4301/10000 train_time:346280ms step_avg:80.51ms +[2025-07-06 15:46:32] [Rank 0] step:4321/10000 train_time:348031ms step_avg:80.54ms +[2025-07-06 15:46:32] [Rank 0] step:4321/10000 train_time:348031ms step_avg:80.54ms +[2025-07-06 15:46:33] [Rank 0] step:4341/10000 train_time:349918ms step_avg:80.61ms +[2025-07-06 15:46:33] [Rank 0] step:4341/10000 train_time:349918ms step_avg:80.61ms +[2025-07-06 15:46:35] [Rank 0] step:4361/10000 train_time:351418ms step_avg:80.58ms +[2025-07-06 15:46:35] [Rank 0] step:4361/10000 train_time:351418ms step_avg:80.58ms +[2025-07-06 15:46:36] [Rank 0] step:4381/10000 train_time:352919ms step_avg:80.56ms +[2025-07-06 15:46:36] [Rank 0] step:4381/10000 train_time:352919ms step_avg:80.56ms +[2025-07-06 15:46:38] [Rank 0] step:4401/10000 train_time:354422ms step_avg:80.53ms +[2025-07-06 15:46:38] [Rank 0] step:4401/10000 train_time:354422ms step_avg:80.53ms +[2025-07-06 15:46:40] [Rank 0] step:4421/10000 train_time:356592ms step_avg:80.66ms +[2025-07-06 15:46:40] [Rank 0] step:4421/10000 train_time:356592ms step_avg:80.66ms +[2025-07-06 15:46:41] [Rank 0] step:4441/10000 train_time:358092ms step_avg:80.63ms +[2025-07-06 15:46:41] [Rank 0] step:4441/10000 train_time:358092ms step_avg:80.63ms +[2025-07-06 15:46:43] [Rank 0] step:4461/10000 train_time:359594ms step_avg:80.61ms +[2025-07-06 15:46:43] [Rank 0] step:4461/10000 train_time:359594ms step_avg:80.61ms +[2025-07-06 15:46:44] [Rank 0] step:4481/10000 train_time:361099ms step_avg:80.58ms +[2025-07-06 15:46:44] [Rank 0] step:4481/10000 train_time:361099ms step_avg:80.58ms +[2025-07-06 15:46:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 15:46:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 15:46:47] [Rank 0] PRINT: step:4500/10000 train_loss:2.6747 val_loss:2.5148 train_time:362605ms step_avg:80.58ms +[2025-07-06 15:46:47] [Rank 0] PRINT: step:4500/10000 train_loss:2.6747 val_loss:2.5148 train_time:362605ms step_avg:80.58ms +[2025-07-06 15:46:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 15:46:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 15:46:47] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 15:46:47] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 15:46:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 15:46:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 15:52:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 15:52:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 15:52:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 15:52:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 15:52:09] [Rank 0] Total Loss: 4.4798 +[2025-07-06 15:52:09] [Rank 0] Total Loss: 4.4798 +[2025-07-06 15:52:09] [Rank 0] Total FTA: 0.1259 +[2025-07-06 15:52:09] [Rank 0] Total FTA: 0.1259 +[2025-07-06 15:52:09] [Rank 0] Group 0 Loss: 4.6060 +[2025-07-06 15:52:09] [Rank 0] Group 0 Loss: 4.6060 +[2025-07-06 15:52:09] [Rank 0] Group 1 Loss: 4.4348 +[2025-07-06 15:52:09] [Rank 0] Group 1 Loss: 4.4348 +[2025-07-06 15:52:09] [Rank 0] Group 2 Loss: 4.3670 +[2025-07-06 15:52:09] [Rank 0] Group 2 Loss: 4.3670 +[2025-07-06 15:52:09] [Rank 0] Group 3 Loss: 4.3893 +[2025-07-06 15:52:09] [Rank 0] Group 3 Loss: 4.3893 +[2025-07-06 15:52:09] [Rank 0] Group 4 Loss: 4.5297 +[2025-07-06 15:52:09] [Rank 0] Group 4 Loss: 4.5297 +[2025-07-06 15:52:09] [Rank 0] Group 5 Loss: 4.4319 +[2025-07-06 15:52:09] [Rank 0] Group 5 Loss: 4.4319 +[2025-07-06 15:52:09] [Rank 0] Group 6 Loss: 4.4552 +[2025-07-06 15:52:09] [Rank 0] Group 6 Loss: 4.4552 +[2025-07-06 15:52:09] [Rank 0] Group 7 Loss: 4.4989 +[2025-07-06 15:52:09] [Rank 0] Group 7 Loss: 4.4989 +[2025-07-06 15:52:09] [Rank 0] Group 8 Loss: 4.4609 +[2025-07-06 15:52:09] [Rank 0] Group 8 Loss: 4.4609 +[2025-07-06 15:52:09] [Rank 0] Group 9 Loss: 4.4867 +[2025-07-06 15:52:09] [Rank 0] Group 9 Loss: 4.4867 +[2025-07-06 15:52:09] [Rank 0] Group 10 Loss: 4.4553 +[2025-07-06 15:52:09] [Rank 0] Group 10 Loss: 4.4553 +[2025-07-06 15:52:09] [Rank 0] Group 11 Loss: 4.4971 +[2025-07-06 15:52:09] [Rank 0] Group 11 Loss: 4.4971 +[2025-07-06 15:52:09] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-06 15:52:09] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-06 15:52:09] [Rank 0] Group 1 FTA: 0.2005 +[2025-07-06 15:52:09] [Rank 0] Group 1 FTA: 0.2005 +[2025-07-06 15:52:09] [Rank 0] Group 2 FTA: 0.1615 +[2025-07-06 15:52:09] [Rank 0] Group 2 FTA: 0.1615 +[2025-07-06 15:52:09] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-06 15:52:09] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-06 15:52:09] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-06 15:52:09] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-06 15:52:09] [Rank 0] Group 5 FTA: 0.0964 +[2025-07-06 15:52:09] [Rank 0] Group 5 FTA: 0.0964 +[2025-07-06 15:52:09] [Rank 0] Group 6 FTA: 0.1406 +[2025-07-06 15:52:09] [Rank 0] Group 6 FTA: 0.1406 +[2025-07-06 15:52:09] [Rank 0] Group 7 FTA: 0.1354 +[2025-07-06 15:52:09] [Rank 0] Group 7 FTA: 0.1354 +[2025-07-06 15:52:09] [Rank 0] Group 8 FTA: 0.1510 +[2025-07-06 15:52:09] [Rank 0] Group 8 FTA: 0.1510 +[2025-07-06 15:52:09] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-06 15:52:09] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-06 15:52:09] [Rank 0] Group 10 FTA: 0.1289 +[2025-07-06 15:52:09] [Rank 0] Group 10 FTA: 0.1289 +[2025-07-06 15:52:09] [Rank 0] Group 11 FTA: 0.1172 +[2025-07-06 15:52:09] [Rank 0] Group 11 FTA: 0.1172 +[2025-07-06 15:52:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 15:52:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 15:52:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 15:52:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 15:52:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 15:52:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 15:52:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 15:52:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 15:52:11] [Rank 0] step:4501/10000 train_time:363363ms step_avg:80.73ms +[2025-07-06 15:52:11] [Rank 0] step:4501/10000 train_time:363363ms step_avg:80.73ms +[2025-07-06 15:52:12] [Rank 0] step:4521/10000 train_time:364886ms step_avg:80.71ms +[2025-07-06 15:52:12] [Rank 0] step:4521/10000 train_time:364886ms step_avg:80.71ms +[2025-07-06 15:52:14] [Rank 0] step:4541/10000 train_time:366381ms step_avg:80.68ms +[2025-07-06 15:52:14] [Rank 0] step:4541/10000 train_time:366381ms step_avg:80.68ms +[2025-07-06 15:52:15] [Rank 0] step:4561/10000 train_time:367877ms step_avg:80.66ms +[2025-07-06 15:52:15] [Rank 0] step:4561/10000 train_time:367877ms step_avg:80.66ms +[2025-07-06 15:52:17] [Rank 0] step:4581/10000 train_time:369373ms step_avg:80.63ms +[2025-07-06 15:52:17] [Rank 0] step:4581/10000 train_time:369373ms step_avg:80.63ms +[2025-07-06 15:52:19] [Rank 0] step:4601/10000 train_time:371511ms step_avg:80.75ms +[2025-07-06 15:52:19] [Rank 0] step:4601/10000 train_time:371511ms step_avg:80.75ms +[2025-07-06 15:52:21] [Rank 0] step:4621/10000 train_time:373008ms step_avg:80.72ms +[2025-07-06 15:52:21] [Rank 0] step:4621/10000 train_time:373008ms step_avg:80.72ms +[2025-07-06 15:52:22] [Rank 0] step:4641/10000 train_time:374506ms step_avg:80.70ms +[2025-07-06 15:52:22] [Rank 0] step:4641/10000 train_time:374506ms step_avg:80.70ms +[2025-07-06 15:52:24] [Rank 0] step:4661/10000 train_time:376005ms step_avg:80.67ms +[2025-07-06 15:52:24] [Rank 0] step:4661/10000 train_time:376005ms step_avg:80.67ms +[2025-07-06 15:52:26] [Rank 0] step:4681/10000 train_time:378177ms step_avg:80.79ms +[2025-07-06 15:52:26] [Rank 0] step:4681/10000 train_time:378177ms step_avg:80.79ms +[2025-07-06 15:52:27] [Rank 0] step:4701/10000 train_time:379656ms step_avg:80.76ms +[2025-07-06 15:52:27] [Rank 0] step:4701/10000 train_time:379656ms step_avg:80.76ms +[2025-07-06 15:52:29] [Rank 0] step:4721/10000 train_time:381157ms step_avg:80.74ms +[2025-07-06 15:52:29] [Rank 0] step:4721/10000 train_time:381157ms step_avg:80.74ms +[2025-07-06 15:52:30] [Rank 0] step:4741/10000 train_time:382658ms step_avg:80.71ms +[2025-07-06 15:52:30] [Rank 0] step:4741/10000 train_time:382658ms step_avg:80.71ms +[2025-07-06 15:52:32] [Rank 0] step:4761/10000 train_time:384158ms step_avg:80.69ms +[2025-07-06 15:52:32] [Rank 0] step:4761/10000 train_time:384158ms step_avg:80.69ms +[2025-07-06 15:52:33] [Rank 0] step:4781/10000 train_time:385899ms step_avg:80.72ms +[2025-07-06 15:52:33] [Rank 0] step:4781/10000 train_time:385899ms step_avg:80.72ms +[2025-07-06 15:52:35] [Rank 0] step:4801/10000 train_time:387400ms step_avg:80.69ms +[2025-07-06 15:52:35] [Rank 0] step:4801/10000 train_time:387400ms step_avg:80.69ms +[2025-07-06 15:52:36] [Rank 0] step:4821/10000 train_time:388902ms step_avg:80.67ms +[2025-07-06 15:52:36] [Rank 0] step:4821/10000 train_time:388902ms step_avg:80.67ms +[2025-07-06 15:52:38] [Rank 0] step:4841/10000 train_time:390402ms step_avg:80.64ms +[2025-07-06 15:52:38] [Rank 0] step:4841/10000 train_time:390402ms step_avg:80.64ms +[2025-07-06 15:52:40] [Rank 0] step:4861/10000 train_time:392577ms step_avg:80.76ms +[2025-07-06 15:52:40] [Rank 0] step:4861/10000 train_time:392577ms step_avg:80.76ms +[2025-07-06 15:52:42] [Rank 0] step:4881/10000 train_time:394059ms step_avg:80.73ms +[2025-07-06 15:52:42] [Rank 0] step:4881/10000 train_time:394059ms step_avg:80.73ms +[2025-07-06 15:52:43] [Rank 0] step:4901/10000 train_time:395560ms step_avg:80.71ms +[2025-07-06 15:52:43] [Rank 0] step:4901/10000 train_time:395560ms step_avg:80.71ms +[2025-07-06 15:52:45] [Rank 0] step:4921/10000 train_time:397314ms step_avg:80.74ms +[2025-07-06 15:52:45] [Rank 0] step:4921/10000 train_time:397314ms step_avg:80.74ms +[2025-07-06 15:52:46] [Rank 0] step:4941/10000 train_time:398816ms step_avg:80.72ms +[2025-07-06 15:52:46] [Rank 0] step:4941/10000 train_time:398816ms step_avg:80.72ms +[2025-07-06 15:52:49] [Rank 0] step:4961/10000 train_time:400985ms step_avg:80.83ms +[2025-07-06 15:52:49] [Rank 0] step:4961/10000 train_time:400985ms step_avg:80.83ms +[2025-07-06 15:52:50] [Rank 0] step:4981/10000 train_time:402487ms step_avg:80.80ms +[2025-07-06 15:52:50] [Rank 0] step:4981/10000 train_time:402487ms step_avg:80.80ms +[2025-07-06 15:52:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 15:52:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 15:52:52] [Rank 0] PRINT: step:5000/10000 train_loss:2.3837 val_loss:2.2642 train_time:403991ms step_avg:80.80ms +[2025-07-06 15:52:52] [Rank 0] PRINT: step:5000/10000 train_loss:2.3837 val_loss:2.2642 train_time:403991ms step_avg:80.80ms +[2025-07-06 15:52:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 15:52:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 15:52:53] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 15:52:53] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 15:52:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 15:52:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 15:58:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 15:58:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 15:58:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 15:58:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 15:58:15] [Rank 0] Total Loss: 4.3820 +[2025-07-06 15:58:15] [Rank 0] Total Loss: 4.3820 +[2025-07-06 15:58:15] [Rank 0] Total FTA: 0.1676 +[2025-07-06 15:58:15] [Rank 0] Total FTA: 0.1676 +[2025-07-06 15:58:15] [Rank 0] Group 0 Loss: 4.5238 +[2025-07-06 15:58:15] [Rank 0] Group 0 Loss: 4.5238 +[2025-07-06 15:58:15] [Rank 0] Group 1 Loss: 4.4341 +[2025-07-06 15:58:15] [Rank 0] Group 1 Loss: 4.4341 +[2025-07-06 15:58:15] [Rank 0] Group 2 Loss: 4.2273 +[2025-07-06 15:58:15] [Rank 0] Group 2 Loss: 4.2273 +[2025-07-06 15:58:15] [Rank 0] Group 3 Loss: 4.3558 +[2025-07-06 15:58:15] [Rank 0] Group 3 Loss: 4.3558 +[2025-07-06 15:58:15] [Rank 0] Group 4 Loss: 4.4362 +[2025-07-06 15:58:15] [Rank 0] Group 4 Loss: 4.4362 +[2025-07-06 15:58:15] [Rank 0] Group 5 Loss: 4.3377 +[2025-07-06 15:58:15] [Rank 0] Group 5 Loss: 4.3377 +[2025-07-06 15:58:15] [Rank 0] Group 6 Loss: 4.3112 +[2025-07-06 15:58:15] [Rank 0] Group 6 Loss: 4.3112 +[2025-07-06 15:58:15] [Rank 0] Group 7 Loss: 4.3908 +[2025-07-06 15:58:15] [Rank 0] Group 7 Loss: 4.3908 +[2025-07-06 15:58:15] [Rank 0] Group 8 Loss: 4.3657 +[2025-07-06 15:58:15] [Rank 0] Group 8 Loss: 4.3657 +[2025-07-06 15:58:15] [Rank 0] Group 9 Loss: 4.3690 +[2025-07-06 15:58:15] [Rank 0] Group 9 Loss: 4.3690 +[2025-07-06 15:58:15] [Rank 0] Group 10 Loss: 4.3848 +[2025-07-06 15:58:15] [Rank 0] Group 10 Loss: 4.3848 +[2025-07-06 15:58:15] [Rank 0] Group 11 Loss: 4.3511 +[2025-07-06 15:58:15] [Rank 0] Group 11 Loss: 4.3511 +[2025-07-06 15:58:15] [Rank 0] Group 0 FTA: 0.3199 +[2025-07-06 15:58:15] [Rank 0] Group 0 FTA: 0.3199 +[2025-07-06 15:58:15] [Rank 0] Group 1 FTA: 0.1432 +[2025-07-06 15:58:15] [Rank 0] Group 1 FTA: 0.1432 +[2025-07-06 15:58:15] [Rank 0] Group 2 FTA: 0.1693 +[2025-07-06 15:58:15] [Rank 0] Group 2 FTA: 0.1693 +[2025-07-06 15:58:15] [Rank 0] Group 3 FTA: 0.1198 +[2025-07-06 15:58:15] [Rank 0] Group 3 FTA: 0.1198 +[2025-07-06 15:58:15] [Rank 0] Group 4 FTA: 0.1641 +[2025-07-06 15:58:15] [Rank 0] Group 4 FTA: 0.1641 +[2025-07-06 15:58:15] [Rank 0] Group 5 FTA: 0.1328 +[2025-07-06 15:58:15] [Rank 0] Group 5 FTA: 0.1328 +[2025-07-06 15:58:15] [Rank 0] Group 6 FTA: 0.1641 +[2025-07-06 15:58:15] [Rank 0] Group 6 FTA: 0.1641 +[2025-07-06 15:58:15] [Rank 0] Group 7 FTA: 0.1432 +[2025-07-06 15:58:15] [Rank 0] Group 7 FTA: 0.1432 +[2025-07-06 15:58:15] [Rank 0] Group 8 FTA: 0.1380 +[2025-07-06 15:58:15] [Rank 0] Group 8 FTA: 0.1380 +[2025-07-06 15:58:15] [Rank 0] Group 9 FTA: 0.1289 +[2025-07-06 15:58:15] [Rank 0] Group 9 FTA: 0.1289 +[2025-07-06 15:58:15] [Rank 0] Group 10 FTA: 0.1445 +[2025-07-06 15:58:15] [Rank 0] Group 10 FTA: 0.1445 +[2025-07-06 15:58:15] [Rank 0] Group 11 FTA: 0.1367 +[2025-07-06 15:58:15] [Rank 0] Group 11 FTA: 0.1367 +[2025-07-06 15:58:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 15:58:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 15:58:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 15:58:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 15:58:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 15:58:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 15:58:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 15:58:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 15:58:16] [Rank 0] step:5001/10000 train_time:404013ms step_avg:80.79ms +[2025-07-06 15:58:16] [Rank 0] step:5001/10000 train_time:404013ms step_avg:80.79ms +[2025-07-06 15:58:18] [Rank 0] step:5021/10000 train_time:405516ms step_avg:80.76ms +[2025-07-06 15:58:18] [Rank 0] step:5021/10000 train_time:405516ms step_avg:80.76ms +[2025-07-06 15:58:20] [Rank 0] step:5041/10000 train_time:407011ms step_avg:80.74ms +[2025-07-06 15:58:20] [Rank 0] step:5041/10000 train_time:407011ms step_avg:80.74ms +[2025-07-06 15:58:21] [Rank 0] step:5061/10000 train_time:409277ms step_avg:80.87ms +[2025-07-06 15:58:21] [Rank 0] step:5061/10000 train_time:409277ms step_avg:80.87ms +[2025-07-06 15:58:23] [Rank 0] step:5081/10000 train_time:410773ms step_avg:80.84ms +[2025-07-06 15:58:23] [Rank 0] step:5081/10000 train_time:410773ms step_avg:80.84ms +[2025-07-06 15:58:24] [Rank 0] step:5101/10000 train_time:412271ms step_avg:80.82ms +[2025-07-06 15:58:24] [Rank 0] step:5101/10000 train_time:412271ms step_avg:80.82ms +[2025-07-06 15:58:26] [Rank 0] step:5121/10000 train_time:413770ms step_avg:80.80ms +[2025-07-06 15:58:26] [Rank 0] step:5121/10000 train_time:413770ms step_avg:80.80ms +[2025-07-06 15:58:28] [Rank 0] step:5141/10000 train_time:415914ms step_avg:80.90ms +[2025-07-06 15:58:28] [Rank 0] step:5141/10000 train_time:415914ms step_avg:80.90ms +[2025-07-06 15:58:30] [Rank 0] step:5161/10000 train_time:417412ms step_avg:80.88ms +[2025-07-06 15:58:30] [Rank 0] step:5161/10000 train_time:417412ms step_avg:80.88ms +[2025-07-06 15:58:31] [Rank 0] step:5181/10000 train_time:418912ms step_avg:80.86ms +[2025-07-06 15:58:31] [Rank 0] step:5181/10000 train_time:418912ms step_avg:80.86ms +[2025-07-06 15:58:33] [Rank 0] step:5201/10000 train_time:420411ms step_avg:80.83ms +[2025-07-06 15:58:33] [Rank 0] step:5201/10000 train_time:420411ms step_avg:80.83ms +[2025-07-06 15:58:35] [Rank 0] step:5221/10000 train_time:421965ms step_avg:80.82ms +[2025-07-06 15:58:35] [Rank 0] step:5221/10000 train_time:421965ms step_avg:80.82ms +[2025-07-06 15:58:36] [Rank 0] step:5241/10000 train_time:424062ms step_avg:80.91ms +[2025-07-06 15:58:36] [Rank 0] step:5241/10000 train_time:424062ms step_avg:80.91ms +[2025-07-06 15:58:38] [Rank 0] step:5261/10000 train_time:425561ms step_avg:80.89ms +[2025-07-06 15:58:38] [Rank 0] step:5261/10000 train_time:425561ms step_avg:80.89ms +[2025-07-06 15:58:39] [Rank 0] step:5281/10000 train_time:427061ms step_avg:80.87ms +[2025-07-06 15:58:39] [Rank 0] step:5281/10000 train_time:427061ms step_avg:80.87ms +[2025-07-06 15:58:41] [Rank 0] step:5301/10000 train_time:428561ms step_avg:80.85ms +[2025-07-06 15:58:41] [Rank 0] step:5301/10000 train_time:428561ms step_avg:80.85ms +[2025-07-06 15:58:43] [Rank 0] step:5321/10000 train_time:430733ms step_avg:80.95ms +[2025-07-06 15:58:43] [Rank 0] step:5321/10000 train_time:430733ms step_avg:80.95ms +[2025-07-06 15:58:44] [Rank 0] step:5341/10000 train_time:432235ms step_avg:80.93ms +[2025-07-06 15:58:44] [Rank 0] step:5341/10000 train_time:432235ms step_avg:80.93ms +[2025-07-06 15:58:46] [Rank 0] step:5361/10000 train_time:433740ms step_avg:80.91ms +[2025-07-06 15:58:46] [Rank 0] step:5361/10000 train_time:433740ms step_avg:80.91ms +[2025-07-06 15:58:47] [Rank 0] step:5381/10000 train_time:435241ms step_avg:80.88ms +[2025-07-06 15:58:47] [Rank 0] step:5381/10000 train_time:435241ms step_avg:80.88ms +[2025-07-06 15:58:50] [Rank 0] step:5401/10000 train_time:436793ms step_avg:80.87ms +[2025-07-06 15:58:50] [Rank 0] step:5401/10000 train_time:436793ms step_avg:80.87ms +[2025-07-06 15:58:51] [Rank 0] step:5421/10000 train_time:438913ms step_avg:80.97ms +[2025-07-06 15:58:51] [Rank 0] step:5421/10000 train_time:438913ms step_avg:80.97ms +[2025-07-06 15:58:53] [Rank 0] step:5441/10000 train_time:440417ms step_avg:80.94ms +[2025-07-06 15:58:53] [Rank 0] step:5441/10000 train_time:440417ms step_avg:80.94ms +[2025-07-06 15:58:54] [Rank 0] step:5461/10000 train_time:441921ms step_avg:80.92ms +[2025-07-06 15:58:54] [Rank 0] step:5461/10000 train_time:441921ms step_avg:80.92ms +[2025-07-06 15:58:56] [Rank 0] step:5481/10000 train_time:443425ms step_avg:80.90ms +[2025-07-06 15:58:56] [Rank 0] step:5481/10000 train_time:443425ms step_avg:80.90ms +[2025-07-06 15:58:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 15:58:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 15:58:59] [Rank 0] PRINT: step:5500/10000 train_loss:2.1697 val_loss:2.0843 train_time:445581ms step_avg:81.01ms +[2025-07-06 15:58:59] [Rank 0] PRINT: step:5500/10000 train_loss:2.1697 val_loss:2.0843 train_time:445581ms step_avg:81.01ms +[2025-07-06 15:58:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 15:58:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 15:58:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 15:58:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 15:58:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 15:58:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 16:04:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 16:04:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 16:04:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 16:04:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 16:04:23] [Rank 0] Total Loss: 4.2972 +[2025-07-06 16:04:23] [Rank 0] Total Loss: 4.2972 +[2025-07-06 16:04:23] [Rank 0] Total FTA: 0.1481 +[2025-07-06 16:04:23] [Rank 0] Total FTA: 0.1481 +[2025-07-06 16:04:23] [Rank 0] Group 0 Loss: 4.5405 +[2025-07-06 16:04:23] [Rank 0] Group 0 Loss: 4.5405 +[2025-07-06 16:04:23] [Rank 0] Group 1 Loss: 4.3280 +[2025-07-06 16:04:23] [Rank 0] Group 1 Loss: 4.3280 +[2025-07-06 16:04:23] [Rank 0] Group 2 Loss: 4.0893 +[2025-07-06 16:04:23] [Rank 0] Group 2 Loss: 4.0893 +[2025-07-06 16:04:23] [Rank 0] Group 3 Loss: 4.1327 +[2025-07-06 16:04:23] [Rank 0] Group 3 Loss: 4.1327 +[2025-07-06 16:04:23] [Rank 0] Group 4 Loss: 4.3622 +[2025-07-06 16:04:23] [Rank 0] Group 4 Loss: 4.3622 +[2025-07-06 16:04:23] [Rank 0] Group 5 Loss: 4.2266 +[2025-07-06 16:04:23] [Rank 0] Group 5 Loss: 4.2266 +[2025-07-06 16:04:23] [Rank 0] Group 6 Loss: 4.2200 +[2025-07-06 16:04:23] [Rank 0] Group 6 Loss: 4.2200 +[2025-07-06 16:04:23] [Rank 0] Group 7 Loss: 4.3278 +[2025-07-06 16:04:23] [Rank 0] Group 7 Loss: 4.3278 +[2025-07-06 16:04:23] [Rank 0] Group 8 Loss: 4.2669 +[2025-07-06 16:04:23] [Rank 0] Group 8 Loss: 4.2669 +[2025-07-06 16:04:24] [Rank 0] Group 9 Loss: 4.2594 +[2025-07-06 16:04:24] [Rank 0] Group 9 Loss: 4.2594 +[2025-07-06 16:04:24] [Rank 0] Group 10 Loss: 4.2697 +[2025-07-06 16:04:24] [Rank 0] Group 10 Loss: 4.2697 +[2025-07-06 16:04:24] [Rank 0] Group 11 Loss: 4.2968 +[2025-07-06 16:04:24] [Rank 0] Group 11 Loss: 4.2968 +[2025-07-06 16:04:24] [Rank 0] Group 0 FTA: 0.1912 +[2025-07-06 16:04:24] [Rank 0] Group 0 FTA: 0.1912 +[2025-07-06 16:04:24] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-06 16:04:24] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-06 16:04:24] [Rank 0] Group 2 FTA: 0.1068 +[2025-07-06 16:04:24] [Rank 0] Group 2 FTA: 0.1068 +[2025-07-06 16:04:24] [Rank 0] Group 3 FTA: 0.1250 +[2025-07-06 16:04:24] [Rank 0] Group 3 FTA: 0.1250 +[2025-07-06 16:04:24] [Rank 0] Group 4 FTA: 0.0885 +[2025-07-06 16:04:24] [Rank 0] Group 4 FTA: 0.0885 +[2025-07-06 16:04:24] [Rank 0] Group 5 FTA: 0.1250 +[2025-07-06 16:04:24] [Rank 0] Group 5 FTA: 0.1250 +[2025-07-06 16:04:24] [Rank 0] Group 6 FTA: 0.1693 +[2025-07-06 16:04:24] [Rank 0] Group 6 FTA: 0.1693 +[2025-07-06 16:04:24] [Rank 0] Group 7 FTA: 0.1589 +[2025-07-06 16:04:24] [Rank 0] Group 7 FTA: 0.1589 +[2025-07-06 16:04:24] [Rank 0] Group 8 FTA: 0.1458 +[2025-07-06 16:04:24] [Rank 0] Group 8 FTA: 0.1458 +[2025-07-06 16:04:24] [Rank 0] Group 9 FTA: 0.1367 +[2025-07-06 16:04:24] [Rank 0] Group 9 FTA: 0.1367 +[2025-07-06 16:04:24] [Rank 0] Group 10 FTA: 0.1562 +[2025-07-06 16:04:24] [Rank 0] Group 10 FTA: 0.1562 +[2025-07-06 16:04:24] [Rank 0] Group 11 FTA: 0.1494 +[2025-07-06 16:04:24] [Rank 0] Group 11 FTA: 0.1494 +[2025-07-06 16:04:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 16:04:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 16:04:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 16:04:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 16:04:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 16:04:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 16:04:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 16:04:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 16:04:25] [Rank 0] step:5501/10000 train_time:445604ms step_avg:81.00ms +[2025-07-06 16:04:25] [Rank 0] step:5501/10000 train_time:445604ms step_avg:81.00ms +[2025-07-06 16:04:26] [Rank 0] step:5521/10000 train_time:447110ms step_avg:80.98ms +[2025-07-06 16:04:26] [Rank 0] step:5521/10000 train_time:447110ms step_avg:80.98ms +[2025-07-06 16:04:28] [Rank 0] step:5541/10000 train_time:448605ms step_avg:80.96ms +[2025-07-06 16:04:28] [Rank 0] step:5541/10000 train_time:448605ms step_avg:80.96ms +[2025-07-06 16:04:29] [Rank 0] step:5561/10000 train_time:450102ms step_avg:80.94ms +[2025-07-06 16:04:29] [Rank 0] step:5561/10000 train_time:450102ms step_avg:80.94ms +[2025-07-06 16:04:32] [Rank 0] step:5581/10000 train_time:451656ms step_avg:80.93ms +[2025-07-06 16:04:32] [Rank 0] step:5581/10000 train_time:451656ms step_avg:80.93ms +[2025-07-06 16:04:33] [Rank 0] step:5601/10000 train_time:453749ms step_avg:81.01ms +[2025-07-06 16:04:33] [Rank 0] step:5601/10000 train_time:453749ms step_avg:81.01ms +[2025-07-06 16:04:35] [Rank 0] step:5621/10000 train_time:455247ms step_avg:80.99ms +[2025-07-06 16:04:35] [Rank 0] step:5621/10000 train_time:455247ms step_avg:80.99ms +[2025-07-06 16:04:36] [Rank 0] step:5641/10000 train_time:456744ms step_avg:80.97ms +[2025-07-06 16:04:36] [Rank 0] step:5641/10000 train_time:456744ms step_avg:80.97ms +[2025-07-06 16:04:38] [Rank 0] step:5661/10000 train_time:458242ms step_avg:80.95ms +[2025-07-06 16:04:38] [Rank 0] step:5661/10000 train_time:458242ms step_avg:80.95ms +[2025-07-06 16:04:40] [Rank 0] step:5681/10000 train_time:460390ms step_avg:81.04ms +[2025-07-06 16:04:40] [Rank 0] step:5681/10000 train_time:460390ms step_avg:81.04ms +[2025-07-06 16:04:41] [Rank 0] step:5701/10000 train_time:461987ms step_avg:81.04ms +[2025-07-06 16:04:41] [Rank 0] step:5701/10000 train_time:461987ms step_avg:81.04ms +[2025-07-06 16:04:43] [Rank 0] step:5721/10000 train_time:463588ms step_avg:81.03ms +[2025-07-06 16:04:43] [Rank 0] step:5721/10000 train_time:463588ms step_avg:81.03ms +[2025-07-06 16:04:44] [Rank 0] step:5741/10000 train_time:465087ms step_avg:81.01ms +[2025-07-06 16:04:44] [Rank 0] step:5741/10000 train_time:465087ms step_avg:81.01ms +[2025-07-06 16:04:47] [Rank 0] step:5761/10000 train_time:467280ms step_avg:81.11ms +[2025-07-06 16:04:47] [Rank 0] step:5761/10000 train_time:467280ms step_avg:81.11ms +[2025-07-06 16:04:48] [Rank 0] step:5781/10000 train_time:468759ms step_avg:81.09ms +[2025-07-06 16:04:48] [Rank 0] step:5781/10000 train_time:468759ms step_avg:81.09ms +[2025-07-06 16:04:50] [Rank 0] step:5801/10000 train_time:470261ms step_avg:81.07ms +[2025-07-06 16:04:50] [Rank 0] step:5801/10000 train_time:470261ms step_avg:81.07ms +[2025-07-06 16:04:51] [Rank 0] step:5821/10000 train_time:471760ms step_avg:81.04ms +[2025-07-06 16:04:51] [Rank 0] step:5821/10000 train_time:471760ms step_avg:81.04ms +[2025-07-06 16:04:53] [Rank 0] step:5841/10000 train_time:473259ms step_avg:81.02ms +[2025-07-06 16:04:53] [Rank 0] step:5841/10000 train_time:473259ms step_avg:81.02ms +[2025-07-06 16:04:55] [Rank 0] step:5861/10000 train_time:475399ms step_avg:81.11ms +[2025-07-06 16:04:55] [Rank 0] step:5861/10000 train_time:475399ms step_avg:81.11ms +[2025-07-06 16:04:56] [Rank 0] step:5881/10000 train_time:476900ms step_avg:81.09ms +[2025-07-06 16:04:56] [Rank 0] step:5881/10000 train_time:476900ms step_avg:81.09ms +[2025-07-06 16:04:58] [Rank 0] step:5901/10000 train_time:478401ms step_avg:81.07ms +[2025-07-06 16:04:58] [Rank 0] step:5901/10000 train_time:478401ms step_avg:81.07ms +[2025-07-06 16:04:59] [Rank 0] step:5921/10000 train_time:479900ms step_avg:81.05ms +[2025-07-06 16:04:59] [Rank 0] step:5921/10000 train_time:479900ms step_avg:81.05ms +[2025-07-06 16:05:01] [Rank 0] step:5941/10000 train_time:481478ms step_avg:81.04ms +[2025-07-06 16:05:01] [Rank 0] step:5941/10000 train_time:481478ms step_avg:81.04ms +[2025-07-06 16:05:03] [Rank 0] step:5961/10000 train_time:483142ms step_avg:81.05ms +[2025-07-06 16:05:03] [Rank 0] step:5961/10000 train_time:483142ms step_avg:81.05ms +[2025-07-06 16:05:04] [Rank 0] step:5981/10000 train_time:484641ms step_avg:81.03ms +[2025-07-06 16:05:04] [Rank 0] step:5981/10000 train_time:484641ms step_avg:81.03ms +[2025-07-06 16:05:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 16:05:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 16:05:06] [Rank 0] PRINT: step:6000/10000 train_loss:2.0126 val_loss:1.9485 train_time:486141ms step_avg:81.02ms +[2025-07-06 16:05:06] [Rank 0] PRINT: step:6000/10000 train_loss:2.0126 val_loss:1.9485 train_time:486141ms step_avg:81.02ms +[2025-07-06 16:05:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 16:05:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 16:05:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 16:05:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 16:05:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 16:05:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 16:10:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 16:10:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 16:10:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 16:10:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 16:10:29] [Rank 0] Total Loss: 4.2410 +[2025-07-06 16:10:29] [Rank 0] Total Loss: 4.2410 +[2025-07-06 16:10:29] [Rank 0] Total FTA: 0.1949 +[2025-07-06 16:10:29] [Rank 0] Total FTA: 0.1949 +[2025-07-06 16:10:29] [Rank 0] Group 0 Loss: 4.4888 +[2025-07-06 16:10:29] [Rank 0] Group 0 Loss: 4.4888 +[2025-07-06 16:10:29] [Rank 0] Group 1 Loss: 4.2107 +[2025-07-06 16:10:29] [Rank 0] Group 1 Loss: 4.2107 +[2025-07-06 16:10:29] [Rank 0] Group 2 Loss: 4.0255 +[2025-07-06 16:10:29] [Rank 0] Group 2 Loss: 4.0255 +[2025-07-06 16:10:29] [Rank 0] Group 3 Loss: 4.2112 +[2025-07-06 16:10:29] [Rank 0] Group 3 Loss: 4.2112 +[2025-07-06 16:10:29] [Rank 0] Group 4 Loss: 4.1974 +[2025-07-06 16:10:29] [Rank 0] Group 4 Loss: 4.1974 +[2025-07-06 16:10:29] [Rank 0] Group 5 Loss: 4.1361 +[2025-07-06 16:10:29] [Rank 0] Group 5 Loss: 4.1361 +[2025-07-06 16:10:29] [Rank 0] Group 6 Loss: 4.1322 +[2025-07-06 16:10:29] [Rank 0] Group 6 Loss: 4.1322 +[2025-07-06 16:10:29] [Rank 0] Group 7 Loss: 4.2668 +[2025-07-06 16:10:29] [Rank 0] Group 7 Loss: 4.2668 +[2025-07-06 16:10:29] [Rank 0] Group 8 Loss: 4.2741 +[2025-07-06 16:10:29] [Rank 0] Group 8 Loss: 4.2741 +[2025-07-06 16:10:29] [Rank 0] Group 9 Loss: 4.1986 +[2025-07-06 16:10:29] [Rank 0] Group 9 Loss: 4.1986 +[2025-07-06 16:10:29] [Rank 0] Group 10 Loss: 4.2373 +[2025-07-06 16:10:29] [Rank 0] Group 10 Loss: 4.2373 +[2025-07-06 16:10:29] [Rank 0] Group 11 Loss: 4.2451 +[2025-07-06 16:10:29] [Rank 0] Group 11 Loss: 4.2451 +[2025-07-06 16:10:29] [Rank 0] Group 0 FTA: 0.3433 +[2025-07-06 16:10:29] [Rank 0] Group 0 FTA: 0.3433 +[2025-07-06 16:10:29] [Rank 0] Group 1 FTA: 0.1797 +[2025-07-06 16:10:29] [Rank 0] Group 1 FTA: 0.1797 +[2025-07-06 16:10:29] [Rank 0] Group 2 FTA: 0.2474 +[2025-07-06 16:10:29] [Rank 0] Group 2 FTA: 0.2474 +[2025-07-06 16:10:29] [Rank 0] Group 3 FTA: 0.0781 +[2025-07-06 16:10:29] [Rank 0] Group 3 FTA: 0.0781 +[2025-07-06 16:10:29] [Rank 0] Group 4 FTA: 0.1250 +[2025-07-06 16:10:29] [Rank 0] Group 4 FTA: 0.1250 +[2025-07-06 16:10:29] [Rank 0] Group 5 FTA: 0.1380 +[2025-07-06 16:10:29] [Rank 0] Group 5 FTA: 0.1380 +[2025-07-06 16:10:29] [Rank 0] Group 6 FTA: 0.1771 +[2025-07-06 16:10:29] [Rank 0] Group 6 FTA: 0.1771 +[2025-07-06 16:10:29] [Rank 0] Group 7 FTA: 0.1797 +[2025-07-06 16:10:29] [Rank 0] Group 7 FTA: 0.1797 +[2025-07-06 16:10:29] [Rank 0] Group 8 FTA: 0.2005 +[2025-07-06 16:10:29] [Rank 0] Group 8 FTA: 0.2005 +[2025-07-06 16:10:29] [Rank 0] Group 9 FTA: 0.1797 +[2025-07-06 16:10:29] [Rank 0] Group 9 FTA: 0.1797 +[2025-07-06 16:10:29] [Rank 0] Group 10 FTA: 0.1855 +[2025-07-06 16:10:29] [Rank 0] Group 10 FTA: 0.1855 +[2025-07-06 16:10:29] [Rank 0] Group 11 FTA: 0.1797 +[2025-07-06 16:10:29] [Rank 0] Group 11 FTA: 0.1797 +[2025-07-06 16:10:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 16:10:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 16:10:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 16:10:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 16:10:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 16:10:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 16:10:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 16:10:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 16:10:30] [Rank 0] step:6001/10000 train_time:486164ms step_avg:81.01ms +[2025-07-06 16:10:30] [Rank 0] step:6001/10000 train_time:486164ms step_avg:81.01ms +[2025-07-06 16:10:32] [Rank 0] step:6021/10000 train_time:487678ms step_avg:81.00ms +[2025-07-06 16:10:32] [Rank 0] step:6021/10000 train_time:487678ms step_avg:81.00ms +[2025-07-06 16:10:34] [Rank 0] step:6041/10000 train_time:489833ms step_avg:81.08ms +[2025-07-06 16:10:34] [Rank 0] step:6041/10000 train_time:489833ms step_avg:81.08ms +[2025-07-06 16:10:35] [Rank 0] step:6061/10000 train_time:491327ms step_avg:81.06ms +[2025-07-06 16:10:35] [Rank 0] step:6061/10000 train_time:491327ms step_avg:81.06ms +[2025-07-06 16:10:37] [Rank 0] step:6081/10000 train_time:492823ms step_avg:81.04ms +[2025-07-06 16:10:37] [Rank 0] step:6081/10000 train_time:492823ms step_avg:81.04ms +[2025-07-06 16:10:38] [Rank 0] step:6101/10000 train_time:494319ms step_avg:81.02ms +[2025-07-06 16:10:38] [Rank 0] step:6101/10000 train_time:494319ms step_avg:81.02ms +[2025-07-06 16:10:40] [Rank 0] step:6121/10000 train_time:496314ms step_avg:81.08ms +[2025-07-06 16:10:40] [Rank 0] step:6121/10000 train_time:496314ms step_avg:81.08ms +[2025-07-06 16:10:42] [Rank 0] step:6141/10000 train_time:497792ms step_avg:81.06ms +[2025-07-06 16:10:42] [Rank 0] step:6141/10000 train_time:497792ms step_avg:81.06ms +[2025-07-06 16:10:43] [Rank 0] step:6161/10000 train_time:499288ms step_avg:81.04ms +[2025-07-06 16:10:43] [Rank 0] step:6161/10000 train_time:499288ms step_avg:81.04ms +[2025-07-06 16:10:45] [Rank 0] step:6181/10000 train_time:500787ms step_avg:81.02ms +[2025-07-06 16:10:45] [Rank 0] step:6181/10000 train_time:500787ms step_avg:81.02ms +[2025-07-06 16:10:46] [Rank 0] step:6201/10000 train_time:502287ms step_avg:81.00ms +[2025-07-06 16:10:46] [Rank 0] step:6201/10000 train_time:502287ms step_avg:81.00ms +[2025-07-06 16:10:49] [Rank 0] step:6221/10000 train_time:504427ms step_avg:81.08ms +[2025-07-06 16:10:49] [Rank 0] step:6221/10000 train_time:504427ms step_avg:81.08ms +[2025-07-06 16:10:50] [Rank 0] step:6241/10000 train_time:505925ms step_avg:81.06ms +[2025-07-06 16:10:50] [Rank 0] step:6241/10000 train_time:505925ms step_avg:81.06ms +[2025-07-06 16:10:52] [Rank 0] step:6261/10000 train_time:507424ms step_avg:81.05ms +[2025-07-06 16:10:52] [Rank 0] step:6261/10000 train_time:507424ms step_avg:81.05ms +[2025-07-06 16:10:53] [Rank 0] step:6281/10000 train_time:508924ms step_avg:81.03ms +[2025-07-06 16:10:53] [Rank 0] step:6281/10000 train_time:508924ms step_avg:81.03ms +[2025-07-06 16:10:55] [Rank 0] step:6301/10000 train_time:510477ms step_avg:81.02ms +[2025-07-06 16:10:55] [Rank 0] step:6301/10000 train_time:510477ms step_avg:81.02ms +[2025-07-06 16:10:57] [Rank 0] step:6321/10000 train_time:512573ms step_avg:81.09ms +[2025-07-06 16:10:57] [Rank 0] step:6321/10000 train_time:512573ms step_avg:81.09ms +[2025-07-06 16:10:58] [Rank 0] step:6341/10000 train_time:514072ms step_avg:81.07ms +[2025-07-06 16:10:58] [Rank 0] step:6341/10000 train_time:514072ms step_avg:81.07ms +[2025-07-06 16:11:00] [Rank 0] step:6361/10000 train_time:515572ms step_avg:81.05ms +[2025-07-06 16:11:00] [Rank 0] step:6361/10000 train_time:515572ms step_avg:81.05ms +[2025-07-06 16:11:01] [Rank 0] step:6381/10000 train_time:517073ms step_avg:81.03ms +[2025-07-06 16:11:01] [Rank 0] step:6381/10000 train_time:517073ms step_avg:81.03ms +[2025-07-06 16:11:03] [Rank 0] step:6401/10000 train_time:519214ms step_avg:81.11ms +[2025-07-06 16:11:03] [Rank 0] step:6401/10000 train_time:519214ms step_avg:81.11ms +[2025-07-06 16:11:05] [Rank 0] step:6421/10000 train_time:520715ms step_avg:81.10ms +[2025-07-06 16:11:05] [Rank 0] step:6421/10000 train_time:520715ms step_avg:81.10ms +[2025-07-06 16:11:06] [Rank 0] step:6441/10000 train_time:522217ms step_avg:81.08ms +[2025-07-06 16:11:06] [Rank 0] step:6441/10000 train_time:522217ms step_avg:81.08ms +[2025-07-06 16:11:08] [Rank 0] step:6461/10000 train_time:523719ms step_avg:81.06ms +[2025-07-06 16:11:08] [Rank 0] step:6461/10000 train_time:523719ms step_avg:81.06ms +[2025-07-06 16:11:10] [Rank 0] step:6481/10000 train_time:525474ms step_avg:81.08ms +[2025-07-06 16:11:10] [Rank 0] step:6481/10000 train_time:525474ms step_avg:81.08ms +[2025-07-06 16:11:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 16:11:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 16:11:12] [Rank 0] PRINT: step:6500/10000 train_loss:1.8952 val_loss:1.8466 train_time:526955ms step_avg:81.07ms +[2025-07-06 16:11:12] [Rank 0] PRINT: step:6500/10000 train_loss:1.8952 val_loss:1.8466 train_time:526955ms step_avg:81.07ms +[2025-07-06 16:11:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 16:11:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 16:11:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 16:11:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 16:11:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 16:11:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 16:16:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 16:16:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 16:16:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 16:16:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 16:16:35] [Rank 0] Total Loss: 4.2498 +[2025-07-06 16:16:35] [Rank 0] Total Loss: 4.2498 +[2025-07-06 16:16:35] [Rank 0] Total FTA: 0.1850 +[2025-07-06 16:16:35] [Rank 0] Total FTA: 0.1850 +[2025-07-06 16:16:35] [Rank 0] Group 0 Loss: 4.4718 +[2025-07-06 16:16:35] [Rank 0] Group 0 Loss: 4.4718 +[2025-07-06 16:16:35] [Rank 0] Group 1 Loss: 4.2163 +[2025-07-06 16:16:35] [Rank 0] Group 1 Loss: 4.2163 +[2025-07-06 16:16:35] [Rank 0] Group 2 Loss: 4.0147 +[2025-07-06 16:16:35] [Rank 0] Group 2 Loss: 4.0147 +[2025-07-06 16:16:35] [Rank 0] Group 3 Loss: 4.2429 +[2025-07-06 16:16:35] [Rank 0] Group 3 Loss: 4.2429 +[2025-07-06 16:16:35] [Rank 0] Group 4 Loss: 4.2511 +[2025-07-06 16:16:35] [Rank 0] Group 4 Loss: 4.2511 +[2025-07-06 16:16:35] [Rank 0] Group 5 Loss: 4.2278 +[2025-07-06 16:16:35] [Rank 0] Group 5 Loss: 4.2278 +[2025-07-06 16:16:35] [Rank 0] Group 6 Loss: 4.1703 +[2025-07-06 16:16:35] [Rank 0] Group 6 Loss: 4.1703 +[2025-07-06 16:16:35] [Rank 0] Group 7 Loss: 4.2910 +[2025-07-06 16:16:35] [Rank 0] Group 7 Loss: 4.2910 +[2025-07-06 16:16:35] [Rank 0] Group 8 Loss: 4.2144 +[2025-07-06 16:16:35] [Rank 0] Group 8 Loss: 4.2144 +[2025-07-06 16:16:35] [Rank 0] Group 9 Loss: 4.2078 +[2025-07-06 16:16:35] [Rank 0] Group 9 Loss: 4.2078 +[2025-07-06 16:16:35] [Rank 0] Group 10 Loss: 4.2373 +[2025-07-06 16:16:35] [Rank 0] Group 10 Loss: 4.2373 +[2025-07-06 16:16:35] [Rank 0] Group 11 Loss: 4.2387 +[2025-07-06 16:16:35] [Rank 0] Group 11 Loss: 4.2387 +[2025-07-06 16:16:35] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-06 16:16:35] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-06 16:16:35] [Rank 0] Group 1 FTA: 0.1641 +[2025-07-06 16:16:35] [Rank 0] Group 1 FTA: 0.1641 +[2025-07-06 16:16:35] [Rank 0] Group 2 FTA: 0.2526 +[2025-07-06 16:16:35] [Rank 0] Group 2 FTA: 0.2526 +[2025-07-06 16:16:35] [Rank 0] Group 3 FTA: 0.1667 +[2025-07-06 16:16:35] [Rank 0] Group 3 FTA: 0.1667 +[2025-07-06 16:16:35] [Rank 0] Group 4 FTA: 0.1094 +[2025-07-06 16:16:35] [Rank 0] Group 4 FTA: 0.1094 +[2025-07-06 16:16:35] [Rank 0] Group 5 FTA: 0.1484 +[2025-07-06 16:16:35] [Rank 0] Group 5 FTA: 0.1484 +[2025-07-06 16:16:35] [Rank 0] Group 6 FTA: 0.2370 +[2025-07-06 16:16:35] [Rank 0] Group 6 FTA: 0.2370 +[2025-07-06 16:16:35] [Rank 0] Group 7 FTA: 0.2318 +[2025-07-06 16:16:35] [Rank 0] Group 7 FTA: 0.2318 +[2025-07-06 16:16:35] [Rank 0] Group 8 FTA: 0.2005 +[2025-07-06 16:16:35] [Rank 0] Group 8 FTA: 0.2005 +[2025-07-06 16:16:35] [Rank 0] Group 9 FTA: 0.1719 +[2025-07-06 16:16:35] [Rank 0] Group 9 FTA: 0.1719 +[2025-07-06 16:16:35] [Rank 0] Group 10 FTA: 0.1953 +[2025-07-06 16:16:35] [Rank 0] Group 10 FTA: 0.1953 +[2025-07-06 16:16:35] [Rank 0] Group 11 FTA: 0.2002 +[2025-07-06 16:16:35] [Rank 0] Group 11 FTA: 0.2002 +[2025-07-06 16:16:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 16:16:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 16:16:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 16:16:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 16:16:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 16:16:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 16:16:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 16:16:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 16:16:37] [Rank 0] step:6501/10000 train_time:526976ms step_avg:81.06ms +[2025-07-06 16:16:37] [Rank 0] step:6501/10000 train_time:526976ms step_avg:81.06ms +[2025-07-06 16:16:38] [Rank 0] step:6521/10000 train_time:528479ms step_avg:81.04ms +[2025-07-06 16:16:38] [Rank 0] step:6521/10000 train_time:528479ms step_avg:81.04ms +[2025-07-06 16:16:40] [Rank 0] step:6541/10000 train_time:529972ms step_avg:81.02ms +[2025-07-06 16:16:40] [Rank 0] step:6541/10000 train_time:529972ms step_avg:81.02ms +[2025-07-06 16:16:41] [Rank 0] step:6561/10000 train_time:531467ms step_avg:81.00ms +[2025-07-06 16:16:41] [Rank 0] step:6561/10000 train_time:531467ms step_avg:81.00ms +[2025-07-06 16:16:43] [Rank 0] step:6581/10000 train_time:533610ms step_avg:81.08ms +[2025-07-06 16:16:43] [Rank 0] step:6581/10000 train_time:533610ms step_avg:81.08ms +[2025-07-06 16:16:45] [Rank 0] step:6601/10000 train_time:535106ms step_avg:81.06ms +[2025-07-06 16:16:45] [Rank 0] step:6601/10000 train_time:535106ms step_avg:81.06ms +[2025-07-06 16:16:46] [Rank 0] step:6621/10000 train_time:536601ms step_avg:81.05ms +[2025-07-06 16:16:46] [Rank 0] step:6621/10000 train_time:536601ms step_avg:81.05ms +[2025-07-06 16:16:48] [Rank 0] step:6641/10000 train_time:538098ms step_avg:81.03ms +[2025-07-06 16:16:48] [Rank 0] step:6641/10000 train_time:538098ms step_avg:81.03ms +[2025-07-06 16:16:50] [Rank 0] step:6661/10000 train_time:539648ms step_avg:81.02ms +[2025-07-06 16:16:50] [Rank 0] step:6661/10000 train_time:539648ms step_avg:81.02ms +[2025-07-06 16:16:51] [Rank 0] step:6681/10000 train_time:541329ms step_avg:81.03ms +[2025-07-06 16:16:51] [Rank 0] step:6681/10000 train_time:541329ms step_avg:81.03ms +[2025-07-06 16:16:53] [Rank 0] step:6701/10000 train_time:542827ms step_avg:81.01ms +[2025-07-06 16:16:53] [Rank 0] step:6701/10000 train_time:542827ms step_avg:81.01ms +[2025-07-06 16:16:54] [Rank 0] step:6721/10000 train_time:544325ms step_avg:80.99ms +[2025-07-06 16:16:54] [Rank 0] step:6721/10000 train_time:544325ms step_avg:80.99ms +[2025-07-06 16:16:56] [Rank 0] step:6741/10000 train_time:545825ms step_avg:80.97ms +[2025-07-06 16:16:56] [Rank 0] step:6741/10000 train_time:545825ms step_avg:80.97ms +[2025-07-06 16:16:58] [Rank 0] step:6761/10000 train_time:547979ms step_avg:81.05ms +[2025-07-06 16:16:58] [Rank 0] step:6761/10000 train_time:547979ms step_avg:81.05ms +[2025-07-06 16:17:00] [Rank 0] step:6781/10000 train_time:549707ms step_avg:81.07ms +[2025-07-06 16:17:00] [Rank 0] step:6781/10000 train_time:549707ms step_avg:81.07ms +[2025-07-06 16:17:01] [Rank 0] step:6801/10000 train_time:551229ms step_avg:81.05ms +[2025-07-06 16:17:01] [Rank 0] step:6801/10000 train_time:551229ms step_avg:81.05ms +[2025-07-06 16:17:03] [Rank 0] step:6821/10000 train_time:552727ms step_avg:81.03ms +[2025-07-06 16:17:03] [Rank 0] step:6821/10000 train_time:552727ms step_avg:81.03ms +[2025-07-06 16:17:05] [Rank 0] step:6841/10000 train_time:554279ms step_avg:81.02ms +[2025-07-06 16:17:05] [Rank 0] step:6841/10000 train_time:554279ms step_avg:81.02ms +[2025-07-06 16:17:06] [Rank 0] step:6861/10000 train_time:556382ms step_avg:81.09ms +[2025-07-06 16:17:06] [Rank 0] step:6861/10000 train_time:556382ms step_avg:81.09ms +[2025-07-06 16:17:08] [Rank 0] step:6881/10000 train_time:557881ms step_avg:81.08ms +[2025-07-06 16:17:08] [Rank 0] step:6881/10000 train_time:557881ms step_avg:81.08ms +[2025-07-06 16:17:09] [Rank 0] step:6901/10000 train_time:559381ms step_avg:81.06ms +[2025-07-06 16:17:09] [Rank 0] step:6901/10000 train_time:559381ms step_avg:81.06ms +[2025-07-06 16:17:11] [Rank 0] step:6921/10000 train_time:560881ms step_avg:81.04ms +[2025-07-06 16:17:11] [Rank 0] step:6921/10000 train_time:560881ms step_avg:81.04ms +[2025-07-06 16:17:13] [Rank 0] step:6941/10000 train_time:563021ms step_avg:81.12ms +[2025-07-06 16:17:13] [Rank 0] step:6941/10000 train_time:563021ms step_avg:81.12ms +[2025-07-06 16:17:14] [Rank 0] step:6961/10000 train_time:564519ms step_avg:81.10ms +[2025-07-06 16:17:14] [Rank 0] step:6961/10000 train_time:564519ms step_avg:81.10ms +[2025-07-06 16:17:16] [Rank 0] step:6981/10000 train_time:566019ms step_avg:81.08ms +[2025-07-06 16:17:16] [Rank 0] step:6981/10000 train_time:566019ms step_avg:81.08ms +[2025-07-06 16:17:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 16:17:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 16:17:18] [Rank 0] PRINT: step:7000/10000 train_loss:1.8067 val_loss:1.7703 train_time:567521ms step_avg:81.07ms +[2025-07-06 16:17:18] [Rank 0] PRINT: step:7000/10000 train_loss:1.8067 val_loss:1.7703 train_time:567521ms step_avg:81.07ms +[2025-07-06 16:17:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 16:17:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 16:17:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 16:17:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 16:17:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 16:17:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 16:22:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 16:22:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 16:22:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 16:22:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 16:22:42] [Rank 0] Total Loss: 4.2381 +[2025-07-06 16:22:42] [Rank 0] Total Loss: 4.2381 +[2025-07-06 16:22:42] [Rank 0] Total FTA: 0.1848 +[2025-07-06 16:22:42] [Rank 0] Total FTA: 0.1848 +[2025-07-06 16:22:42] [Rank 0] Group 0 Loss: 4.5345 +[2025-07-06 16:22:42] [Rank 0] Group 0 Loss: 4.5345 +[2025-07-06 16:22:42] [Rank 0] Group 1 Loss: 4.2093 +[2025-07-06 16:22:42] [Rank 0] Group 1 Loss: 4.2093 +[2025-07-06 16:22:42] [Rank 0] Group 2 Loss: 4.0269 +[2025-07-06 16:22:42] [Rank 0] Group 2 Loss: 4.0269 +[2025-07-06 16:22:42] [Rank 0] Group 3 Loss: 4.2697 +[2025-07-06 16:22:42] [Rank 0] Group 3 Loss: 4.2697 +[2025-07-06 16:22:42] [Rank 0] Group 4 Loss: 4.1889 +[2025-07-06 16:22:42] [Rank 0] Group 4 Loss: 4.1889 +[2025-07-06 16:22:42] [Rank 0] Group 5 Loss: 4.1305 +[2025-07-06 16:22:42] [Rank 0] Group 5 Loss: 4.1305 +[2025-07-06 16:22:42] [Rank 0] Group 6 Loss: 4.1051 +[2025-07-06 16:22:42] [Rank 0] Group 6 Loss: 4.1051 +[2025-07-06 16:22:42] [Rank 0] Group 7 Loss: 4.2581 +[2025-07-06 16:22:42] [Rank 0] Group 7 Loss: 4.2581 +[2025-07-06 16:22:42] [Rank 0] Group 8 Loss: 4.1943 +[2025-07-06 16:22:42] [Rank 0] Group 8 Loss: 4.1943 +[2025-07-06 16:22:42] [Rank 0] Group 9 Loss: 4.2326 +[2025-07-06 16:22:42] [Rank 0] Group 9 Loss: 4.2326 +[2025-07-06 16:22:42] [Rank 0] Group 10 Loss: 4.2307 +[2025-07-06 16:22:42] [Rank 0] Group 10 Loss: 4.2307 +[2025-07-06 16:22:42] [Rank 0] Group 11 Loss: 4.2162 +[2025-07-06 16:22:42] [Rank 0] Group 11 Loss: 4.2162 +[2025-07-06 16:22:42] [Rank 0] Group 0 FTA: 0.1808 +[2025-07-06 16:22:42] [Rank 0] Group 0 FTA: 0.1808 +[2025-07-06 16:22:42] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-06 16:22:42] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-06 16:22:42] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-06 16:22:42] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-06 16:22:42] [Rank 0] Group 3 FTA: 0.1823 +[2025-07-06 16:22:42] [Rank 0] Group 3 FTA: 0.1823 +[2025-07-06 16:22:42] [Rank 0] Group 4 FTA: 0.0807 +[2025-07-06 16:22:42] [Rank 0] Group 4 FTA: 0.0807 +[2025-07-06 16:22:42] [Rank 0] Group 5 FTA: 0.1458 +[2025-07-06 16:22:42] [Rank 0] Group 5 FTA: 0.1458 +[2025-07-06 16:22:42] [Rank 0] Group 6 FTA: 0.2005 +[2025-07-06 16:22:42] [Rank 0] Group 6 FTA: 0.2005 +[2025-07-06 16:22:42] [Rank 0] Group 7 FTA: 0.2396 +[2025-07-06 16:22:42] [Rank 0] Group 7 FTA: 0.2396 +[2025-07-06 16:22:42] [Rank 0] Group 8 FTA: 0.2292 +[2025-07-06 16:22:42] [Rank 0] Group 8 FTA: 0.2292 +[2025-07-06 16:22:42] [Rank 0] Group 9 FTA: 0.2266 +[2025-07-06 16:22:42] [Rank 0] Group 9 FTA: 0.2266 +[2025-07-06 16:22:42] [Rank 0] Group 10 FTA: 0.2188 +[2025-07-06 16:22:42] [Rank 0] Group 10 FTA: 0.2188 +[2025-07-06 16:22:42] [Rank 0] Group 11 FTA: 0.2217 +[2025-07-06 16:22:42] [Rank 0] Group 11 FTA: 0.2217 +[2025-07-06 16:22:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 16:22:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 16:22:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 16:22:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 16:22:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 16:22:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 16:22:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 16:22:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 16:22:43] [Rank 0] step:7001/10000 train_time:567542ms step_avg:81.07ms +[2025-07-06 16:22:43] [Rank 0] step:7001/10000 train_time:567542ms step_avg:81.07ms +[2025-07-06 16:22:46] [Rank 0] step:7021/10000 train_time:569316ms step_avg:81.09ms +[2025-07-06 16:22:46] [Rank 0] step:7021/10000 train_time:569316ms step_avg:81.09ms +[2025-07-06 16:22:47] [Rank 0] step:7041/10000 train_time:571202ms step_avg:81.13ms +[2025-07-06 16:22:47] [Rank 0] step:7041/10000 train_time:571202ms step_avg:81.13ms +[2025-07-06 16:22:49] [Rank 0] step:7061/10000 train_time:572697ms step_avg:81.11ms +[2025-07-06 16:22:49] [Rank 0] step:7061/10000 train_time:572697ms step_avg:81.11ms +[2025-07-06 16:22:50] [Rank 0] step:7081/10000 train_time:574197ms step_avg:81.09ms +[2025-07-06 16:22:50] [Rank 0] step:7081/10000 train_time:574197ms step_avg:81.09ms +[2025-07-06 16:22:52] [Rank 0] step:7101/10000 train_time:575697ms step_avg:81.07ms +[2025-07-06 16:22:52] [Rank 0] step:7101/10000 train_time:575697ms step_avg:81.07ms +[2025-07-06 16:22:53] [Rank 0] step:7121/10000 train_time:577434ms step_avg:81.09ms +[2025-07-06 16:22:53] [Rank 0] step:7121/10000 train_time:577434ms step_avg:81.09ms +[2025-07-06 16:22:55] [Rank 0] step:7141/10000 train_time:578932ms step_avg:81.07ms +[2025-07-06 16:22:55] [Rank 0] step:7141/10000 train_time:578932ms step_avg:81.07ms +[2025-07-06 16:22:56] [Rank 0] step:7161/10000 train_time:580432ms step_avg:81.05ms +[2025-07-06 16:22:56] [Rank 0] step:7161/10000 train_time:580432ms step_avg:81.05ms +[2025-07-06 16:22:58] [Rank 0] step:7181/10000 train_time:581949ms step_avg:81.04ms +[2025-07-06 16:22:58] [Rank 0] step:7181/10000 train_time:581949ms step_avg:81.04ms +[2025-07-06 16:22:59] [Rank 0] step:7201/10000 train_time:583447ms step_avg:81.02ms +[2025-07-06 16:22:59] [Rank 0] step:7201/10000 train_time:583447ms step_avg:81.02ms +[2025-07-06 16:23:01] [Rank 0] step:7221/10000 train_time:585180ms step_avg:81.04ms +[2025-07-06 16:23:01] [Rank 0] step:7221/10000 train_time:585180ms step_avg:81.04ms +[2025-07-06 16:23:02] [Rank 0] step:7241/10000 train_time:586680ms step_avg:81.02ms +[2025-07-06 16:23:02] [Rank 0] step:7241/10000 train_time:586680ms step_avg:81.02ms +[2025-07-06 16:23:04] [Rank 0] step:7261/10000 train_time:588179ms step_avg:81.01ms +[2025-07-06 16:23:04] [Rank 0] step:7261/10000 train_time:588179ms step_avg:81.01ms +[2025-07-06 16:23:05] [Rank 0] step:7281/10000 train_time:589682ms step_avg:80.99ms +[2025-07-06 16:23:05] [Rank 0] step:7281/10000 train_time:589682ms step_avg:80.99ms +[2025-07-06 16:23:08] [Rank 0] step:7301/10000 train_time:591830ms step_avg:81.06ms +[2025-07-06 16:23:08] [Rank 0] step:7301/10000 train_time:591830ms step_avg:81.06ms +[2025-07-06 16:23:09] [Rank 0] step:7321/10000 train_time:593328ms step_avg:81.04ms +[2025-07-06 16:23:09] [Rank 0] step:7321/10000 train_time:593328ms step_avg:81.04ms +[2025-07-06 16:23:11] [Rank 0] step:7341/10000 train_time:594839ms step_avg:81.03ms +[2025-07-06 16:23:11] [Rank 0] step:7341/10000 train_time:594839ms step_avg:81.03ms +[2025-07-06 16:23:12] [Rank 0] step:7361/10000 train_time:596340ms step_avg:81.01ms +[2025-07-06 16:23:12] [Rank 0] step:7361/10000 train_time:596340ms step_avg:81.01ms +[2025-07-06 16:23:14] [Rank 0] step:7381/10000 train_time:598094ms step_avg:81.03ms +[2025-07-06 16:23:14] [Rank 0] step:7381/10000 train_time:598094ms step_avg:81.03ms +[2025-07-06 16:23:16] [Rank 0] step:7401/10000 train_time:600007ms step_avg:81.07ms +[2025-07-06 16:23:16] [Rank 0] step:7401/10000 train_time:600007ms step_avg:81.07ms +[2025-07-06 16:23:17] [Rank 0] step:7421/10000 train_time:601508ms step_avg:81.05ms +[2025-07-06 16:23:17] [Rank 0] step:7421/10000 train_time:601508ms step_avg:81.05ms +[2025-07-06 16:23:19] [Rank 0] step:7441/10000 train_time:603254ms step_avg:81.07ms +[2025-07-06 16:23:19] [Rank 0] step:7441/10000 train_time:603254ms step_avg:81.07ms +[2025-07-06 16:23:21] [Rank 0] step:7461/10000 train_time:604756ms step_avg:81.06ms +[2025-07-06 16:23:21] [Rank 0] step:7461/10000 train_time:604756ms step_avg:81.06ms +[2025-07-06 16:23:23] [Rank 0] step:7481/10000 train_time:606901ms step_avg:81.13ms +[2025-07-06 16:23:23] [Rank 0] step:7481/10000 train_time:606901ms step_avg:81.13ms +[2025-07-06 16:23:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 16:23:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 16:23:25] [Rank 0] PRINT: step:7500/10000 train_loss:1.7395 val_loss:1.7118 train_time:608403ms step_avg:81.12ms +[2025-07-06 16:23:25] [Rank 0] PRINT: step:7500/10000 train_loss:1.7395 val_loss:1.7118 train_time:608403ms step_avg:81.12ms +[2025-07-06 16:23:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 16:23:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 16:23:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 16:23:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 16:23:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 16:23:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 16:28:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 16:28:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 16:28:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 16:28:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 16:28:48] [Rank 0] Total Loss: 4.2582 +[2025-07-06 16:28:48] [Rank 0] Total Loss: 4.2582 +[2025-07-06 16:28:48] [Rank 0] Total FTA: 0.2283 +[2025-07-06 16:28:48] [Rank 0] Total FTA: 0.2283 +[2025-07-06 16:28:48] [Rank 0] Group 0 Loss: 4.5318 +[2025-07-06 16:28:48] [Rank 0] Group 0 Loss: 4.5318 +[2025-07-06 16:28:48] [Rank 0] Group 1 Loss: 4.2395 +[2025-07-06 16:28:48] [Rank 0] Group 1 Loss: 4.2395 +[2025-07-06 16:28:48] [Rank 0] Group 2 Loss: 4.1344 +[2025-07-06 16:28:48] [Rank 0] Group 2 Loss: 4.1344 +[2025-07-06 16:28:48] [Rank 0] Group 3 Loss: 4.2809 +[2025-07-06 16:28:48] [Rank 0] Group 3 Loss: 4.2809 +[2025-07-06 16:28:48] [Rank 0] Group 4 Loss: 4.2250 +[2025-07-06 16:28:48] [Rank 0] Group 4 Loss: 4.2250 +[2025-07-06 16:28:48] [Rank 0] Group 5 Loss: 4.1688 +[2025-07-06 16:28:48] [Rank 0] Group 5 Loss: 4.1688 +[2025-07-06 16:28:48] [Rank 0] Group 6 Loss: 4.1524 +[2025-07-06 16:28:48] [Rank 0] Group 6 Loss: 4.1524 +[2025-07-06 16:28:48] [Rank 0] Group 7 Loss: 4.2203 +[2025-07-06 16:28:48] [Rank 0] Group 7 Loss: 4.2203 +[2025-07-06 16:28:48] [Rank 0] Group 8 Loss: 4.2166 +[2025-07-06 16:28:48] [Rank 0] Group 8 Loss: 4.2166 +[2025-07-06 16:28:48] [Rank 0] Group 9 Loss: 4.2384 +[2025-07-06 16:28:48] [Rank 0] Group 9 Loss: 4.2384 +[2025-07-06 16:28:48] [Rank 0] Group 10 Loss: 4.2427 +[2025-07-06 16:28:48] [Rank 0] Group 10 Loss: 4.2427 +[2025-07-06 16:28:48] [Rank 0] Group 11 Loss: 4.2256 +[2025-07-06 16:28:48] [Rank 0] Group 11 Loss: 4.2256 +[2025-07-06 16:28:48] [Rank 0] Group 0 FTA: 0.3420 +[2025-07-06 16:28:48] [Rank 0] Group 0 FTA: 0.3420 +[2025-07-06 16:28:48] [Rank 0] Group 1 FTA: 0.1432 +[2025-07-06 16:28:48] [Rank 0] Group 1 FTA: 0.1432 +[2025-07-06 16:28:48] [Rank 0] Group 2 FTA: 0.2969 +[2025-07-06 16:28:48] [Rank 0] Group 2 FTA: 0.2969 +[2025-07-06 16:28:48] [Rank 0] Group 3 FTA: 0.1771 +[2025-07-06 16:28:48] [Rank 0] Group 3 FTA: 0.1771 +[2025-07-06 16:28:48] [Rank 0] Group 4 FTA: 0.1198 +[2025-07-06 16:28:48] [Rank 0] Group 4 FTA: 0.1198 +[2025-07-06 16:28:48] [Rank 0] Group 5 FTA: 0.1432 +[2025-07-06 16:28:48] [Rank 0] Group 5 FTA: 0.1432 +[2025-07-06 16:28:48] [Rank 0] Group 6 FTA: 0.2318 +[2025-07-06 16:28:48] [Rank 0] Group 6 FTA: 0.2318 +[2025-07-06 16:28:48] [Rank 0] Group 7 FTA: 0.2370 +[2025-07-06 16:28:48] [Rank 0] Group 7 FTA: 0.2370 +[2025-07-06 16:28:48] [Rank 0] Group 8 FTA: 0.2240 +[2025-07-06 16:28:48] [Rank 0] Group 8 FTA: 0.2240 +[2025-07-06 16:28:48] [Rank 0] Group 9 FTA: 0.2109 +[2025-07-06 16:28:48] [Rank 0] Group 9 FTA: 0.2109 +[2025-07-06 16:28:48] [Rank 0] Group 10 FTA: 0.2539 +[2025-07-06 16:28:48] [Rank 0] Group 10 FTA: 0.2539 +[2025-07-06 16:28:48] [Rank 0] Group 11 FTA: 0.2295 +[2025-07-06 16:28:48] [Rank 0] Group 11 FTA: 0.2295 +[2025-07-06 16:28:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 16:28:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 16:28:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 16:28:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 16:28:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 16:28:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 16:28:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 16:28:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 16:28:50] [Rank 0] step:7501/10000 train_time:608425ms step_avg:81.11ms +[2025-07-06 16:28:50] [Rank 0] step:7501/10000 train_time:608425ms step_avg:81.11ms +[2025-07-06 16:28:51] [Rank 0] step:7521/10000 train_time:609934ms step_avg:81.10ms +[2025-07-06 16:28:51] [Rank 0] step:7521/10000 train_time:609934ms step_avg:81.10ms +[2025-07-06 16:28:53] [Rank 0] step:7541/10000 train_time:611430ms step_avg:81.08ms +[2025-07-06 16:28:53] [Rank 0] step:7541/10000 train_time:611430ms step_avg:81.08ms +[2025-07-06 16:28:55] [Rank 0] step:7561/10000 train_time:613185ms step_avg:81.10ms +[2025-07-06 16:28:55] [Rank 0] step:7561/10000 train_time:613185ms step_avg:81.10ms +[2025-07-06 16:28:56] [Rank 0] step:7581/10000 train_time:615092ms step_avg:81.14ms +[2025-07-06 16:28:56] [Rank 0] step:7581/10000 train_time:615092ms step_avg:81.14ms +[2025-07-06 16:28:58] [Rank 0] step:7601/10000 train_time:616589ms step_avg:81.12ms +[2025-07-06 16:28:58] [Rank 0] step:7601/10000 train_time:616589ms step_avg:81.12ms +[2025-07-06 16:28:59] [Rank 0] step:7621/10000 train_time:618087ms step_avg:81.10ms +[2025-07-06 16:28:59] [Rank 0] step:7621/10000 train_time:618087ms step_avg:81.10ms +[2025-07-06 16:29:01] [Rank 0] step:7641/10000 train_time:619585ms step_avg:81.09ms +[2025-07-06 16:29:01] [Rank 0] step:7641/10000 train_time:619585ms step_avg:81.09ms +[2025-07-06 16:29:03] [Rank 0] step:7661/10000 train_time:621735ms step_avg:81.16ms +[2025-07-06 16:29:03] [Rank 0] step:7661/10000 train_time:621735ms step_avg:81.16ms +[2025-07-06 16:29:04] [Rank 0] step:7681/10000 train_time:623235ms step_avg:81.14ms +[2025-07-06 16:29:04] [Rank 0] step:7681/10000 train_time:623235ms step_avg:81.14ms +[2025-07-06 16:29:06] [Rank 0] step:7701/10000 train_time:624735ms step_avg:81.12ms +[2025-07-06 16:29:06] [Rank 0] step:7701/10000 train_time:624735ms step_avg:81.12ms +[2025-07-06 16:29:07] [Rank 0] step:7721/10000 train_time:626233ms step_avg:81.11ms +[2025-07-06 16:29:07] [Rank 0] step:7721/10000 train_time:626233ms step_avg:81.11ms +[2025-07-06 16:29:10] [Rank 0] step:7741/10000 train_time:628400ms step_avg:81.18ms +[2025-07-06 16:29:10] [Rank 0] step:7741/10000 train_time:628400ms step_avg:81.18ms +[2025-07-06 16:29:11] [Rank 0] step:7761/10000 train_time:629882ms step_avg:81.16ms +[2025-07-06 16:29:11] [Rank 0] step:7761/10000 train_time:629882ms step_avg:81.16ms +[2025-07-06 16:29:13] [Rank 0] step:7781/10000 train_time:631382ms step_avg:81.14ms +[2025-07-06 16:29:13] [Rank 0] step:7781/10000 train_time:631382ms step_avg:81.14ms +[2025-07-06 16:29:14] [Rank 0] step:7801/10000 train_time:632883ms step_avg:81.13ms +[2025-07-06 16:29:14] [Rank 0] step:7801/10000 train_time:632883ms step_avg:81.13ms +[2025-07-06 16:29:16] [Rank 0] step:7821/10000 train_time:634386ms step_avg:81.11ms +[2025-07-06 16:29:16] [Rank 0] step:7821/10000 train_time:634386ms step_avg:81.11ms +[2025-07-06 16:29:18] [Rank 0] step:7841/10000 train_time:636555ms step_avg:81.18ms +[2025-07-06 16:29:18] [Rank 0] step:7841/10000 train_time:636555ms step_avg:81.18ms +[2025-07-06 16:29:19] [Rank 0] step:7861/10000 train_time:638055ms step_avg:81.17ms +[2025-07-06 16:29:19] [Rank 0] step:7861/10000 train_time:638055ms step_avg:81.17ms +[2025-07-06 16:29:21] [Rank 0] step:7881/10000 train_time:639557ms step_avg:81.15ms +[2025-07-06 16:29:21] [Rank 0] step:7881/10000 train_time:639557ms step_avg:81.15ms +[2025-07-06 16:29:22] [Rank 0] step:7901/10000 train_time:641061ms step_avg:81.14ms +[2025-07-06 16:29:22] [Rank 0] step:7901/10000 train_time:641061ms step_avg:81.14ms +[2025-07-06 16:29:24] [Rank 0] step:7921/10000 train_time:643249ms step_avg:81.21ms +[2025-07-06 16:29:24] [Rank 0] step:7921/10000 train_time:643249ms step_avg:81.21ms +[2025-07-06 16:29:26] [Rank 0] step:7941/10000 train_time:644731ms step_avg:81.19ms +[2025-07-06 16:29:26] [Rank 0] step:7941/10000 train_time:644731ms step_avg:81.19ms +[2025-07-06 16:29:27] [Rank 0] step:7961/10000 train_time:646235ms step_avg:81.18ms +[2025-07-06 16:29:27] [Rank 0] step:7961/10000 train_time:646235ms step_avg:81.18ms +[2025-07-06 16:29:29] [Rank 0] step:7981/10000 train_time:647741ms step_avg:81.16ms +[2025-07-06 16:29:29] [Rank 0] step:7981/10000 train_time:647741ms step_avg:81.16ms +[2025-07-06 16:29:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 16:29:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 16:29:31] [Rank 0] PRINT: step:8000/10000 train_loss:1.6878 val_loss:1.6668 train_time:649247ms step_avg:81.16ms +[2025-07-06 16:29:31] [Rank 0] PRINT: step:8000/10000 train_loss:1.6878 val_loss:1.6668 train_time:649247ms step_avg:81.16ms +[2025-07-06 16:29:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 16:29:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 16:29:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 16:29:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 16:29:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 16:29:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 16:34:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 16:34:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 16:34:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 16:34:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 16:34:55] [Rank 0] Total Loss: 4.2791 +[2025-07-06 16:34:55] [Rank 0] Total Loss: 4.2791 +[2025-07-06 16:34:55] [Rank 0] Total FTA: 0.2001 +[2025-07-06 16:34:55] [Rank 0] Total FTA: 0.2001 +[2025-07-06 16:34:55] [Rank 0] Group 0 Loss: 4.4987 +[2025-07-06 16:34:55] [Rank 0] Group 0 Loss: 4.4987 +[2025-07-06 16:34:55] [Rank 0] Group 1 Loss: 4.2336 +[2025-07-06 16:34:55] [Rank 0] Group 1 Loss: 4.2336 +[2025-07-06 16:34:55] [Rank 0] Group 2 Loss: 4.2067 +[2025-07-06 16:34:55] [Rank 0] Group 2 Loss: 4.2067 +[2025-07-06 16:34:55] [Rank 0] Group 3 Loss: 4.2518 +[2025-07-06 16:34:55] [Rank 0] Group 3 Loss: 4.2518 +[2025-07-06 16:34:55] [Rank 0] Group 4 Loss: 4.1888 +[2025-07-06 16:34:55] [Rank 0] Group 4 Loss: 4.1888 +[2025-07-06 16:34:55] [Rank 0] Group 5 Loss: 4.2376 +[2025-07-06 16:34:55] [Rank 0] Group 5 Loss: 4.2376 +[2025-07-06 16:34:55] [Rank 0] Group 6 Loss: 4.2012 +[2025-07-06 16:34:55] [Rank 0] Group 6 Loss: 4.2012 +[2025-07-06 16:34:55] [Rank 0] Group 7 Loss: 4.3003 +[2025-07-06 16:34:55] [Rank 0] Group 7 Loss: 4.3003 +[2025-07-06 16:34:55] [Rank 0] Group 8 Loss: 4.2579 +[2025-07-06 16:34:55] [Rank 0] Group 8 Loss: 4.2579 +[2025-07-06 16:34:55] [Rank 0] Group 9 Loss: 4.2471 +[2025-07-06 16:34:55] [Rank 0] Group 9 Loss: 4.2471 +[2025-07-06 16:34:55] [Rank 0] Group 10 Loss: 4.2623 +[2025-07-06 16:34:55] [Rank 0] Group 10 Loss: 4.2623 +[2025-07-06 16:34:55] [Rank 0] Group 11 Loss: 4.2634 +[2025-07-06 16:34:55] [Rank 0] Group 11 Loss: 4.2634 +[2025-07-06 16:34:55] [Rank 0] Group 0 FTA: 0.1508 +[2025-07-06 16:34:55] [Rank 0] Group 0 FTA: 0.1508 +[2025-07-06 16:34:55] [Rank 0] Group 1 FTA: 0.1589 +[2025-07-06 16:34:55] [Rank 0] Group 1 FTA: 0.1589 +[2025-07-06 16:34:55] [Rank 0] Group 2 FTA: 0.2760 +[2025-07-06 16:34:55] [Rank 0] Group 2 FTA: 0.2760 +[2025-07-06 16:34:55] [Rank 0] Group 3 FTA: 0.1380 +[2025-07-06 16:34:55] [Rank 0] Group 3 FTA: 0.1380 +[2025-07-06 16:34:55] [Rank 0] Group 4 FTA: 0.1172 +[2025-07-06 16:34:55] [Rank 0] Group 4 FTA: 0.1172 +[2025-07-06 16:34:55] [Rank 0] Group 5 FTA: 0.1172 +[2025-07-06 16:34:55] [Rank 0] Group 5 FTA: 0.1172 +[2025-07-06 16:34:55] [Rank 0] Group 6 FTA: 0.2161 +[2025-07-06 16:34:55] [Rank 0] Group 6 FTA: 0.2161 +[2025-07-06 16:34:55] [Rank 0] Group 7 FTA: 0.2786 +[2025-07-06 16:34:55] [Rank 0] Group 7 FTA: 0.2786 +[2025-07-06 16:34:55] [Rank 0] Group 8 FTA: 0.2266 +[2025-07-06 16:34:55] [Rank 0] Group 8 FTA: 0.2266 +[2025-07-06 16:34:55] [Rank 0] Group 9 FTA: 0.2266 +[2025-07-06 16:34:55] [Rank 0] Group 9 FTA: 0.2266 +[2025-07-06 16:34:55] [Rank 0] Group 10 FTA: 0.2227 +[2025-07-06 16:34:55] [Rank 0] Group 10 FTA: 0.2227 +[2025-07-06 16:34:55] [Rank 0] Group 11 FTA: 0.2461 +[2025-07-06 16:34:55] [Rank 0] Group 11 FTA: 0.2461 +[2025-07-06 16:34:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 16:34:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 16:34:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 16:34:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 16:34:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 16:34:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 16:34:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 16:34:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 16:34:56] [Rank 0] step:8001/10000 train_time:649269ms step_avg:81.15ms +[2025-07-06 16:34:56] [Rank 0] step:8001/10000 train_time:649269ms step_avg:81.15ms +[2025-07-06 16:34:58] [Rank 0] step:8021/10000 train_time:651429ms step_avg:81.22ms +[2025-07-06 16:34:58] [Rank 0] step:8021/10000 train_time:651429ms step_avg:81.22ms +[2025-07-06 16:35:00] [Rank 0] step:8041/10000 train_time:652924ms step_avg:81.20ms +[2025-07-06 16:35:00] [Rank 0] step:8041/10000 train_time:652924ms step_avg:81.20ms +[2025-07-06 16:35:01] [Rank 0] step:8061/10000 train_time:654420ms step_avg:81.18ms +[2025-07-06 16:35:01] [Rank 0] step:8061/10000 train_time:654420ms step_avg:81.18ms +[2025-07-06 16:35:03] [Rank 0] step:8081/10000 train_time:655918ms step_avg:81.17ms +[2025-07-06 16:35:03] [Rank 0] step:8081/10000 train_time:655918ms step_avg:81.17ms +[2025-07-06 16:35:05] [Rank 0] step:8101/10000 train_time:657415ms step_avg:81.15ms +[2025-07-06 16:35:05] [Rank 0] step:8101/10000 train_time:657415ms step_avg:81.15ms +[2025-07-06 16:35:06] [Rank 0] step:8121/10000 train_time:659146ms step_avg:81.17ms +[2025-07-06 16:35:06] [Rank 0] step:8121/10000 train_time:659146ms step_avg:81.17ms +[2025-07-06 16:35:08] [Rank 0] step:8141/10000 train_time:660644ms step_avg:81.15ms +[2025-07-06 16:35:08] [Rank 0] step:8141/10000 train_time:660644ms step_avg:81.15ms +[2025-07-06 16:35:09] [Rank 0] step:8161/10000 train_time:662142ms step_avg:81.13ms +[2025-07-06 16:35:09] [Rank 0] step:8161/10000 train_time:662142ms step_avg:81.13ms +[2025-07-06 16:35:11] [Rank 0] step:8181/10000 train_time:663641ms step_avg:81.12ms +[2025-07-06 16:35:11] [Rank 0] step:8181/10000 train_time:663641ms step_avg:81.12ms +[2025-07-06 16:35:13] [Rank 0] step:8201/10000 train_time:665799ms step_avg:81.19ms +[2025-07-06 16:35:13] [Rank 0] step:8201/10000 train_time:665799ms step_avg:81.19ms +[2025-07-06 16:35:14] [Rank 0] step:8221/10000 train_time:667298ms step_avg:81.17ms +[2025-07-06 16:35:14] [Rank 0] step:8221/10000 train_time:667298ms step_avg:81.17ms +[2025-07-06 16:35:16] [Rank 0] step:8241/10000 train_time:668797ms step_avg:81.15ms +[2025-07-06 16:35:16] [Rank 0] step:8241/10000 train_time:668797ms step_avg:81.15ms +[2025-07-06 16:35:17] [Rank 0] step:8261/10000 train_time:670298ms step_avg:81.14ms +[2025-07-06 16:35:17] [Rank 0] step:8261/10000 train_time:670298ms step_avg:81.14ms +[2025-07-06 16:35:19] [Rank 0] step:8281/10000 train_time:671849ms step_avg:81.13ms +[2025-07-06 16:35:19] [Rank 0] step:8281/10000 train_time:671849ms step_avg:81.13ms +[2025-07-06 16:35:21] [Rank 0] step:8301/10000 train_time:673539ms step_avg:81.14ms +[2025-07-06 16:35:21] [Rank 0] step:8301/10000 train_time:673539ms step_avg:81.14ms +[2025-07-06 16:35:22] [Rank 0] step:8321/10000 train_time:675038ms step_avg:81.12ms +[2025-07-06 16:35:22] [Rank 0] step:8321/10000 train_time:675038ms step_avg:81.12ms +[2025-07-06 16:35:24] [Rank 0] step:8341/10000 train_time:676540ms step_avg:81.11ms +[2025-07-06 16:35:24] [Rank 0] step:8341/10000 train_time:676540ms step_avg:81.11ms +[2025-07-06 16:35:25] [Rank 0] step:8361/10000 train_time:678043ms step_avg:81.10ms +[2025-07-06 16:35:25] [Rank 0] step:8361/10000 train_time:678043ms step_avg:81.10ms +[2025-07-06 16:35:27] [Rank 0] step:8381/10000 train_time:680202ms step_avg:81.16ms +[2025-07-06 16:35:27] [Rank 0] step:8381/10000 train_time:680202ms step_avg:81.16ms +[2025-07-06 16:35:29] [Rank 0] step:8401/10000 train_time:681804ms step_avg:81.16ms +[2025-07-06 16:35:29] [Rank 0] step:8401/10000 train_time:681804ms step_avg:81.16ms +[2025-07-06 16:35:30] [Rank 0] step:8421/10000 train_time:683305ms step_avg:81.14ms +[2025-07-06 16:35:30] [Rank 0] step:8421/10000 train_time:683305ms step_avg:81.14ms +[2025-07-06 16:35:32] [Rank 0] step:8441/10000 train_time:684808ms step_avg:81.13ms +[2025-07-06 16:35:32] [Rank 0] step:8441/10000 train_time:684808ms step_avg:81.13ms +[2025-07-06 16:35:34] [Rank 0] step:8461/10000 train_time:686565ms step_avg:81.14ms +[2025-07-06 16:35:34] [Rank 0] step:8461/10000 train_time:686565ms step_avg:81.14ms +[2025-07-06 16:35:35] [Rank 0] step:8481/10000 train_time:688047ms step_avg:81.13ms +[2025-07-06 16:35:35] [Rank 0] step:8481/10000 train_time:688047ms step_avg:81.13ms +[2025-07-06 16:35:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 16:35:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 16:35:37] [Rank 0] PRINT: step:8500/10000 train_loss:1.6480 val_loss:1.6326 train_time:689551ms step_avg:81.12ms +[2025-07-06 16:35:37] [Rank 0] PRINT: step:8500/10000 train_loss:1.6480 val_loss:1.6326 train_time:689551ms step_avg:81.12ms +[2025-07-06 16:35:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 16:35:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 16:35:38] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 16:35:38] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 16:35:38] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 16:35:38] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 16:41:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 16:41:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 16:41:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 16:41:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 16:41:02] [Rank 0] Total Loss: 4.2707 +[2025-07-06 16:41:02] [Rank 0] Total Loss: 4.2707 +[2025-07-06 16:41:02] [Rank 0] Total FTA: 0.2721 +[2025-07-06 16:41:02] [Rank 0] Total FTA: 0.2721 +[2025-07-06 16:41:02] [Rank 0] Group 0 Loss: 4.5486 +[2025-07-06 16:41:02] [Rank 0] Group 0 Loss: 4.5486 +[2025-07-06 16:41:02] [Rank 0] Group 1 Loss: 4.2735 +[2025-07-06 16:41:02] [Rank 0] Group 1 Loss: 4.2735 +[2025-07-06 16:41:02] [Rank 0] Group 2 Loss: 4.0671 +[2025-07-06 16:41:02] [Rank 0] Group 2 Loss: 4.0671 +[2025-07-06 16:41:02] [Rank 0] Group 3 Loss: 4.2444 +[2025-07-06 16:41:02] [Rank 0] Group 3 Loss: 4.2444 +[2025-07-06 16:41:02] [Rank 0] Group 4 Loss: 4.2006 +[2025-07-06 16:41:02] [Rank 0] Group 4 Loss: 4.2006 +[2025-07-06 16:41:02] [Rank 0] Group 5 Loss: 4.1604 +[2025-07-06 16:41:02] [Rank 0] Group 5 Loss: 4.1604 +[2025-07-06 16:41:02] [Rank 0] Group 6 Loss: 4.1958 +[2025-07-06 16:41:02] [Rank 0] Group 6 Loss: 4.1958 +[2025-07-06 16:41:02] [Rank 0] Group 7 Loss: 4.2830 +[2025-07-06 16:41:02] [Rank 0] Group 7 Loss: 4.2830 +[2025-07-06 16:41:02] [Rank 0] Group 8 Loss: 4.2470 +[2025-07-06 16:41:02] [Rank 0] Group 8 Loss: 4.2470 +[2025-07-06 16:41:02] [Rank 0] Group 9 Loss: 4.2510 +[2025-07-06 16:41:02] [Rank 0] Group 9 Loss: 4.2510 +[2025-07-06 16:41:02] [Rank 0] Group 10 Loss: 4.2265 +[2025-07-06 16:41:02] [Rank 0] Group 10 Loss: 4.2265 +[2025-07-06 16:41:02] [Rank 0] Group 11 Loss: 4.2744 +[2025-07-06 16:41:02] [Rank 0] Group 11 Loss: 4.2744 +[2025-07-06 16:41:02] [Rank 0] Group 0 FTA: 0.3628 +[2025-07-06 16:41:02] [Rank 0] Group 0 FTA: 0.3628 +[2025-07-06 16:41:02] [Rank 0] Group 1 FTA: 0.3359 +[2025-07-06 16:41:02] [Rank 0] Group 1 FTA: 0.3359 +[2025-07-06 16:41:02] [Rank 0] Group 2 FTA: 0.4349 +[2025-07-06 16:41:02] [Rank 0] Group 2 FTA: 0.4349 +[2025-07-06 16:41:02] [Rank 0] Group 3 FTA: 0.1875 +[2025-07-06 16:41:02] [Rank 0] Group 3 FTA: 0.1875 +[2025-07-06 16:41:02] [Rank 0] Group 4 FTA: 0.1719 +[2025-07-06 16:41:02] [Rank 0] Group 4 FTA: 0.1719 +[2025-07-06 16:41:02] [Rank 0] Group 5 FTA: 0.1719 +[2025-07-06 16:41:02] [Rank 0] Group 5 FTA: 0.1719 +[2025-07-06 16:41:02] [Rank 0] Group 6 FTA: 0.2083 +[2025-07-06 16:41:02] [Rank 0] Group 6 FTA: 0.2083 +[2025-07-06 16:41:02] [Rank 0] Group 7 FTA: 0.2995 +[2025-07-06 16:41:02] [Rank 0] Group 7 FTA: 0.2995 +[2025-07-06 16:41:02] [Rank 0] Group 8 FTA: 0.2786 +[2025-07-06 16:41:02] [Rank 0] Group 8 FTA: 0.2786 +[2025-07-06 16:41:02] [Rank 0] Group 9 FTA: 0.2578 +[2025-07-06 16:41:02] [Rank 0] Group 9 FTA: 0.2578 +[2025-07-06 16:41:02] [Rank 0] Group 10 FTA: 0.2285 +[2025-07-06 16:41:02] [Rank 0] Group 10 FTA: 0.2285 +[2025-07-06 16:41:02] [Rank 0] Group 11 FTA: 0.2627 +[2025-07-06 16:41:02] [Rank 0] Group 11 FTA: 0.2627 +[2025-07-06 16:41:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 16:41:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 16:41:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 16:41:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 16:41:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 16:41:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 16:41:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 16:41:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 16:41:04] [Rank 0] step:8501/10000 train_time:689572ms step_avg:81.12ms +[2025-07-06 16:41:04] [Rank 0] step:8501/10000 train_time:689572ms step_avg:81.12ms +[2025-07-06 16:41:05] [Rank 0] step:8521/10000 train_time:691086ms step_avg:81.10ms +[2025-07-06 16:41:05] [Rank 0] step:8521/10000 train_time:691086ms step_avg:81.10ms +[2025-07-06 16:41:07] [Rank 0] step:8541/10000 train_time:692581ms step_avg:81.09ms +[2025-07-06 16:41:07] [Rank 0] step:8541/10000 train_time:692581ms step_avg:81.09ms +[2025-07-06 16:41:09] [Rank 0] step:8561/10000 train_time:694741ms step_avg:81.15ms +[2025-07-06 16:41:09] [Rank 0] step:8561/10000 train_time:694741ms step_avg:81.15ms +[2025-07-06 16:41:10] [Rank 0] step:8581/10000 train_time:696237ms step_avg:81.14ms +[2025-07-06 16:41:10] [Rank 0] step:8581/10000 train_time:696237ms step_avg:81.14ms +[2025-07-06 16:41:12] [Rank 0] step:8601/10000 train_time:697735ms step_avg:81.12ms +[2025-07-06 16:41:12] [Rank 0] step:8601/10000 train_time:697735ms step_avg:81.12ms +[2025-07-06 16:41:14] [Rank 0] step:8621/10000 train_time:699407ms step_avg:81.13ms +[2025-07-06 16:41:14] [Rank 0] step:8621/10000 train_time:699407ms step_avg:81.13ms +[2025-07-06 16:41:15] [Rank 0] step:8641/10000 train_time:700989ms step_avg:81.12ms +[2025-07-06 16:41:15] [Rank 0] step:8641/10000 train_time:700989ms step_avg:81.12ms +[2025-07-06 16:41:17] [Rank 0] step:8661/10000 train_time:702724ms step_avg:81.14ms +[2025-07-06 16:41:17] [Rank 0] step:8661/10000 train_time:702724ms step_avg:81.14ms +[2025-07-06 16:41:18] [Rank 0] step:8681/10000 train_time:704222ms step_avg:81.12ms +[2025-07-06 16:41:18] [Rank 0] step:8681/10000 train_time:704222ms step_avg:81.12ms +[2025-07-06 16:41:20] [Rank 0] step:8701/10000 train_time:705722ms step_avg:81.11ms +[2025-07-06 16:41:20] [Rank 0] step:8701/10000 train_time:705722ms step_avg:81.11ms +[2025-07-06 16:41:21] [Rank 0] step:8721/10000 train_time:707221ms step_avg:81.09ms +[2025-07-06 16:41:21] [Rank 0] step:8721/10000 train_time:707221ms step_avg:81.09ms +[2025-07-06 16:41:24] [Rank 0] step:8741/10000 train_time:709370ms step_avg:81.15ms +[2025-07-06 16:41:24] [Rank 0] step:8741/10000 train_time:709370ms step_avg:81.15ms +[2025-07-06 16:41:25] [Rank 0] step:8761/10000 train_time:710869ms step_avg:81.14ms +[2025-07-06 16:41:25] [Rank 0] step:8761/10000 train_time:710869ms step_avg:81.14ms +[2025-07-06 16:41:27] [Rank 0] step:8781/10000 train_time:712369ms step_avg:81.13ms +[2025-07-06 16:41:27] [Rank 0] step:8781/10000 train_time:712369ms step_avg:81.13ms +[2025-07-06 16:41:28] [Rank 0] step:8801/10000 train_time:713871ms step_avg:81.11ms +[2025-07-06 16:41:28] [Rank 0] step:8801/10000 train_time:713871ms step_avg:81.11ms +[2025-07-06 16:41:30] [Rank 0] step:8821/10000 train_time:715629ms step_avg:81.13ms +[2025-07-06 16:41:30] [Rank 0] step:8821/10000 train_time:715629ms step_avg:81.13ms +[2025-07-06 16:41:32] [Rank 0] step:8841/10000 train_time:717513ms step_avg:81.16ms +[2025-07-06 16:41:32] [Rank 0] step:8841/10000 train_time:717513ms step_avg:81.16ms +[2025-07-06 16:41:33] [Rank 0] step:8861/10000 train_time:719015ms step_avg:81.14ms +[2025-07-06 16:41:33] [Rank 0] step:8861/10000 train_time:719015ms step_avg:81.14ms +[2025-07-06 16:41:35] [Rank 0] step:8881/10000 train_time:720514ms step_avg:81.13ms +[2025-07-06 16:41:35] [Rank 0] step:8881/10000 train_time:720514ms step_avg:81.13ms +[2025-07-06 16:41:36] [Rank 0] step:8901/10000 train_time:722014ms step_avg:81.12ms +[2025-07-06 16:41:36] [Rank 0] step:8901/10000 train_time:722014ms step_avg:81.12ms +[2025-07-06 16:41:38] [Rank 0] step:8921/10000 train_time:724185ms step_avg:81.18ms +[2025-07-06 16:41:38] [Rank 0] step:8921/10000 train_time:724185ms step_avg:81.18ms +[2025-07-06 16:41:40] [Rank 0] step:8941/10000 train_time:725684ms step_avg:81.16ms +[2025-07-06 16:41:40] [Rank 0] step:8941/10000 train_time:725684ms step_avg:81.16ms +[2025-07-06 16:41:41] [Rank 0] step:8961/10000 train_time:727184ms step_avg:81.15ms +[2025-07-06 16:41:41] [Rank 0] step:8961/10000 train_time:727184ms step_avg:81.15ms +[2025-07-06 16:41:43] [Rank 0] step:8981/10000 train_time:728685ms step_avg:81.14ms +[2025-07-06 16:41:43] [Rank 0] step:8981/10000 train_time:728685ms step_avg:81.14ms +[2025-07-06 16:41:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 16:41:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 16:41:45] [Rank 0] PRINT: step:9000/10000 train_loss:1.6178 val_loss:1.6064 train_time:730186ms step_avg:81.13ms +[2025-07-06 16:41:45] [Rank 0] PRINT: step:9000/10000 train_loss:1.6178 val_loss:1.6064 train_time:730186ms step_avg:81.13ms +[2025-07-06 16:41:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 16:41:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 16:41:45] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 16:41:45] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 16:41:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 16:41:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 16:47:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 16:47:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 16:47:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 16:47:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 16:47:07] [Rank 0] Total Loss: 4.3070 +[2025-07-06 16:47:07] [Rank 0] Total Loss: 4.3070 +[2025-07-06 16:47:07] [Rank 0] Total FTA: 0.2476 +[2025-07-06 16:47:07] [Rank 0] Total FTA: 0.2476 +[2025-07-06 16:47:07] [Rank 0] Group 0 Loss: 4.5788 +[2025-07-06 16:47:07] [Rank 0] Group 0 Loss: 4.5788 +[2025-07-06 16:47:07] [Rank 0] Group 1 Loss: 4.3869 +[2025-07-06 16:47:07] [Rank 0] Group 1 Loss: 4.3869 +[2025-07-06 16:47:07] [Rank 0] Group 2 Loss: 4.0730 +[2025-07-06 16:47:07] [Rank 0] Group 2 Loss: 4.0730 +[2025-07-06 16:47:07] [Rank 0] Group 3 Loss: 4.3205 +[2025-07-06 16:47:07] [Rank 0] Group 3 Loss: 4.3205 +[2025-07-06 16:47:07] [Rank 0] Group 4 Loss: 4.2206 +[2025-07-06 16:47:07] [Rank 0] Group 4 Loss: 4.2206 +[2025-07-06 16:47:07] [Rank 0] Group 5 Loss: 4.2199 +[2025-07-06 16:47:07] [Rank 0] Group 5 Loss: 4.2199 +[2025-07-06 16:47:07] [Rank 0] Group 6 Loss: 4.2294 +[2025-07-06 16:47:07] [Rank 0] Group 6 Loss: 4.2294 +[2025-07-06 16:47:07] [Rank 0] Group 7 Loss: 4.2777 +[2025-07-06 16:47:07] [Rank 0] Group 7 Loss: 4.2777 +[2025-07-06 16:47:07] [Rank 0] Group 8 Loss: 4.2946 +[2025-07-06 16:47:07] [Rank 0] Group 8 Loss: 4.2946 +[2025-07-06 16:47:07] [Rank 0] Group 9 Loss: 4.2606 +[2025-07-06 16:47:07] [Rank 0] Group 9 Loss: 4.2606 +[2025-07-06 16:47:07] [Rank 0] Group 10 Loss: 4.2958 +[2025-07-06 16:47:07] [Rank 0] Group 10 Loss: 4.2958 +[2025-07-06 16:47:07] [Rank 0] Group 11 Loss: 4.2825 +[2025-07-06 16:47:07] [Rank 0] Group 11 Loss: 4.2825 +[2025-07-06 16:47:07] [Rank 0] Group 0 FTA: 0.1560 +[2025-07-06 16:47:07] [Rank 0] Group 0 FTA: 0.1560 +[2025-07-06 16:47:07] [Rank 0] Group 1 FTA: 0.3333 +[2025-07-06 16:47:07] [Rank 0] Group 1 FTA: 0.3333 +[2025-07-06 16:47:07] [Rank 0] Group 2 FTA: 0.3932 +[2025-07-06 16:47:07] [Rank 0] Group 2 FTA: 0.3932 +[2025-07-06 16:47:07] [Rank 0] Group 3 FTA: 0.1953 +[2025-07-06 16:47:07] [Rank 0] Group 3 FTA: 0.1953 +[2025-07-06 16:47:07] [Rank 0] Group 4 FTA: 0.1510 +[2025-07-06 16:47:07] [Rank 0] Group 4 FTA: 0.1510 +[2025-07-06 16:47:07] [Rank 0] Group 5 FTA: 0.1797 +[2025-07-06 16:47:07] [Rank 0] Group 5 FTA: 0.1797 +[2025-07-06 16:47:07] [Rank 0] Group 6 FTA: 0.2786 +[2025-07-06 16:47:07] [Rank 0] Group 6 FTA: 0.2786 +[2025-07-06 16:47:07] [Rank 0] Group 7 FTA: 0.2552 +[2025-07-06 16:47:07] [Rank 0] Group 7 FTA: 0.2552 +[2025-07-06 16:47:07] [Rank 0] Group 8 FTA: 0.3177 +[2025-07-06 16:47:07] [Rank 0] Group 8 FTA: 0.3177 +[2025-07-06 16:47:07] [Rank 0] Group 9 FTA: 0.2617 +[2025-07-06 16:47:07] [Rank 0] Group 9 FTA: 0.2617 +[2025-07-06 16:47:07] [Rank 0] Group 10 FTA: 0.2617 +[2025-07-06 16:47:07] [Rank 0] Group 10 FTA: 0.2617 +[2025-07-06 16:47:07] [Rank 0] Group 11 FTA: 0.2598 +[2025-07-06 16:47:07] [Rank 0] Group 11 FTA: 0.2598 +[2025-07-06 16:47:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 16:47:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 16:47:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 16:47:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 16:47:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 16:47:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 16:47:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 16:47:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 16:47:09] [Rank 0] step:9001/10000 train_time:730216ms step_avg:81.13ms +[2025-07-06 16:47:09] [Rank 0] step:9001/10000 train_time:730216ms step_avg:81.13ms +[2025-07-06 16:47:11] [Rank 0] step:9021/10000 train_time:732423ms step_avg:81.19ms +[2025-07-06 16:47:11] [Rank 0] step:9021/10000 train_time:732423ms step_avg:81.19ms +[2025-07-06 16:47:12] [Rank 0] step:9041/10000 train_time:733917ms step_avg:81.18ms +[2025-07-06 16:47:12] [Rank 0] step:9041/10000 train_time:733917ms step_avg:81.18ms +[2025-07-06 16:47:14] [Rank 0] step:9061/10000 train_time:735414ms step_avg:81.16ms +[2025-07-06 16:47:14] [Rank 0] step:9061/10000 train_time:735414ms step_avg:81.16ms +[2025-07-06 16:47:15] [Rank 0] step:9081/10000 train_time:736913ms step_avg:81.15ms +[2025-07-06 16:47:15] [Rank 0] step:9081/10000 train_time:736913ms step_avg:81.15ms +[2025-07-06 16:47:17] [Rank 0] step:9101/10000 train_time:738647ms step_avg:81.16ms +[2025-07-06 16:47:17] [Rank 0] step:9101/10000 train_time:738647ms step_avg:81.16ms +[2025-07-06 16:47:19] [Rank 0] step:9121/10000 train_time:740145ms step_avg:81.15ms +[2025-07-06 16:47:19] [Rank 0] step:9121/10000 train_time:740145ms step_avg:81.15ms +[2025-07-06 16:47:20] [Rank 0] step:9141/10000 train_time:741645ms step_avg:81.13ms +[2025-07-06 16:47:20] [Rank 0] step:9141/10000 train_time:741645ms step_avg:81.13ms +[2025-07-06 16:47:22] [Rank 0] step:9161/10000 train_time:743144ms step_avg:81.12ms +[2025-07-06 16:47:22] [Rank 0] step:9161/10000 train_time:743144ms step_avg:81.12ms +[2025-07-06 16:47:24] [Rank 0] step:9181/10000 train_time:744644ms step_avg:81.11ms +[2025-07-06 16:47:24] [Rank 0] step:9181/10000 train_time:744644ms step_avg:81.11ms +[2025-07-06 16:47:25] [Rank 0] step:9201/10000 train_time:746791ms step_avg:81.16ms +[2025-07-06 16:47:25] [Rank 0] step:9201/10000 train_time:746791ms step_avg:81.16ms +[2025-07-06 16:47:27] [Rank 0] step:9221/10000 train_time:748289ms step_avg:81.15ms +[2025-07-06 16:47:27] [Rank 0] step:9221/10000 train_time:748289ms step_avg:81.15ms +[2025-07-06 16:47:28] [Rank 0] step:9241/10000 train_time:749789ms step_avg:81.14ms +[2025-07-06 16:47:28] [Rank 0] step:9241/10000 train_time:749789ms step_avg:81.14ms +[2025-07-06 16:47:30] [Rank 0] step:9261/10000 train_time:751290ms step_avg:81.12ms +[2025-07-06 16:47:30] [Rank 0] step:9261/10000 train_time:751290ms step_avg:81.12ms +[2025-07-06 16:47:32] [Rank 0] step:9281/10000 train_time:753433ms step_avg:81.18ms +[2025-07-06 16:47:32] [Rank 0] step:9281/10000 train_time:753433ms step_avg:81.18ms +[2025-07-06 16:47:34] [Rank 0] step:9301/10000 train_time:755172ms step_avg:81.19ms +[2025-07-06 16:47:34] [Rank 0] step:9301/10000 train_time:755172ms step_avg:81.19ms +[2025-07-06 16:47:35] [Rank 0] step:9321/10000 train_time:756669ms step_avg:81.18ms +[2025-07-06 16:47:35] [Rank 0] step:9321/10000 train_time:756669ms step_avg:81.18ms +[2025-07-06 16:47:37] [Rank 0] step:9341/10000 train_time:758168ms step_avg:81.17ms +[2025-07-06 16:47:37] [Rank 0] step:9341/10000 train_time:758168ms step_avg:81.17ms +[2025-07-06 16:47:39] [Rank 0] step:9361/10000 train_time:759719ms step_avg:81.16ms +[2025-07-06 16:47:39] [Rank 0] step:9361/10000 train_time:759719ms step_avg:81.16ms +[2025-07-06 16:47:40] [Rank 0] step:9381/10000 train_time:761832ms step_avg:81.21ms +[2025-07-06 16:47:40] [Rank 0] step:9381/10000 train_time:761832ms step_avg:81.21ms +[2025-07-06 16:47:42] [Rank 0] step:9401/10000 train_time:763332ms step_avg:81.20ms +[2025-07-06 16:47:42] [Rank 0] step:9401/10000 train_time:763332ms step_avg:81.20ms +[2025-07-06 16:47:43] [Rank 0] step:9421/10000 train_time:764832ms step_avg:81.18ms +[2025-07-06 16:47:43] [Rank 0] step:9421/10000 train_time:764832ms step_avg:81.18ms +[2025-07-06 16:47:45] [Rank 0] step:9441/10000 train_time:766331ms step_avg:81.17ms +[2025-07-06 16:47:45] [Rank 0] step:9441/10000 train_time:766331ms step_avg:81.17ms +[2025-07-06 16:47:47] [Rank 0] step:9461/10000 train_time:768472ms step_avg:81.23ms +[2025-07-06 16:47:47] [Rank 0] step:9461/10000 train_time:768472ms step_avg:81.23ms +[2025-07-06 16:47:48] [Rank 0] step:9481/10000 train_time:769972ms step_avg:81.21ms +[2025-07-06 16:47:48] [Rank 0] step:9481/10000 train_time:769972ms step_avg:81.21ms +[2025-07-06 16:47:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 16:47:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 16:47:51] [Rank 0] PRINT: step:9500/10000 train_loss:1.5952 val_loss:1.5871 train_time:771474ms step_avg:81.21ms +[2025-07-06 16:47:51] [Rank 0] PRINT: step:9500/10000 train_loss:1.5952 val_loss:1.5871 train_time:771474ms step_avg:81.21ms +[2025-07-06 16:47:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 16:47:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 16:47:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 16:47:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 16:47:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 16:47:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 16:53:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 16:53:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 16:53:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 16:53:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 16:53:15] [Rank 0] Total Loss: 4.3156 +[2025-07-06 16:53:15] [Rank 0] Total Loss: 4.3156 +[2025-07-06 16:53:15] [Rank 0] Total FTA: 0.2377 +[2025-07-06 16:53:15] [Rank 0] Total FTA: 0.2377 +[2025-07-06 16:53:15] [Rank 0] Group 0 Loss: 4.6240 +[2025-07-06 16:53:15] [Rank 0] Group 0 Loss: 4.6240 +[2025-07-06 16:53:15] [Rank 0] Group 1 Loss: 4.3402 +[2025-07-06 16:53:15] [Rank 0] Group 1 Loss: 4.3402 +[2025-07-06 16:53:15] [Rank 0] Group 2 Loss: 4.0783 +[2025-07-06 16:53:15] [Rank 0] Group 2 Loss: 4.0783 +[2025-07-06 16:53:15] [Rank 0] Group 3 Loss: 4.2340 +[2025-07-06 16:53:15] [Rank 0] Group 3 Loss: 4.2340 +[2025-07-06 16:53:15] [Rank 0] Group 4 Loss: 4.2684 +[2025-07-06 16:53:15] [Rank 0] Group 4 Loss: 4.2684 +[2025-07-06 16:53:15] [Rank 0] Group 5 Loss: 4.2104 +[2025-07-06 16:53:15] [Rank 0] Group 5 Loss: 4.2104 +[2025-07-06 16:53:16] [Rank 0] Group 6 Loss: 4.2502 +[2025-07-06 16:53:16] [Rank 0] Group 6 Loss: 4.2502 +[2025-07-06 16:53:16] [Rank 0] Group 7 Loss: 4.3002 +[2025-07-06 16:53:16] [Rank 0] Group 7 Loss: 4.3002 +[2025-07-06 16:53:16] [Rank 0] Group 8 Loss: 4.3188 +[2025-07-06 16:53:16] [Rank 0] Group 8 Loss: 4.3188 +[2025-07-06 16:53:16] [Rank 0] Group 9 Loss: 4.3055 +[2025-07-06 16:53:16] [Rank 0] Group 9 Loss: 4.3055 +[2025-07-06 16:53:16] [Rank 0] Group 10 Loss: 4.2829 +[2025-07-06 16:53:16] [Rank 0] Group 10 Loss: 4.2829 +[2025-07-06 16:53:16] [Rank 0] Group 11 Loss: 4.2996 +[2025-07-06 16:53:16] [Rank 0] Group 11 Loss: 4.2996 +[2025-07-06 16:53:16] [Rank 0] Group 0 FTA: 0.1873 +[2025-07-06 16:53:16] [Rank 0] Group 0 FTA: 0.1873 +[2025-07-06 16:53:16] [Rank 0] Group 1 FTA: 0.1745 +[2025-07-06 16:53:16] [Rank 0] Group 1 FTA: 0.1745 +[2025-07-06 16:53:16] [Rank 0] Group 2 FTA: 0.3385 +[2025-07-06 16:53:16] [Rank 0] Group 2 FTA: 0.3385 +[2025-07-06 16:53:16] [Rank 0] Group 3 FTA: 0.1771 +[2025-07-06 16:53:16] [Rank 0] Group 3 FTA: 0.1771 +[2025-07-06 16:53:16] [Rank 0] Group 4 FTA: 0.2005 +[2025-07-06 16:53:16] [Rank 0] Group 4 FTA: 0.2005 +[2025-07-06 16:53:16] [Rank 0] Group 5 FTA: 0.2708 +[2025-07-06 16:53:16] [Rank 0] Group 5 FTA: 0.2708 +[2025-07-06 16:53:16] [Rank 0] Group 6 FTA: 0.2422 +[2025-07-06 16:53:16] [Rank 0] Group 6 FTA: 0.2422 +[2025-07-06 16:53:16] [Rank 0] Group 7 FTA: 0.2526 +[2025-07-06 16:53:16] [Rank 0] Group 7 FTA: 0.2526 +[2025-07-06 16:53:16] [Rank 0] Group 8 FTA: 0.2865 +[2025-07-06 16:53:16] [Rank 0] Group 8 FTA: 0.2865 +[2025-07-06 16:53:16] [Rank 0] Group 9 FTA: 0.2617 +[2025-07-06 16:53:16] [Rank 0] Group 9 FTA: 0.2617 +[2025-07-06 16:53:16] [Rank 0] Group 10 FTA: 0.2637 +[2025-07-06 16:53:16] [Rank 0] Group 10 FTA: 0.2637 +[2025-07-06 16:53:16] [Rank 0] Group 11 FTA: 0.2412 +[2025-07-06 16:53:16] [Rank 0] Group 11 FTA: 0.2412 +[2025-07-06 16:53:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 16:53:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 16:53:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 16:53:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 16:53:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 16:53:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 16:53:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 16:53:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 16:53:17] [Rank 0] step:9501/10000 train_time:771495ms step_avg:81.20ms +[2025-07-06 16:53:17] [Rank 0] step:9501/10000 train_time:771495ms step_avg:81.20ms +[2025-07-06 16:53:19] [Rank 0] step:9521/10000 train_time:773006ms step_avg:81.19ms +[2025-07-06 16:53:19] [Rank 0] step:9521/10000 train_time:773006ms step_avg:81.19ms +[2025-07-06 16:53:21] [Rank 0] step:9541/10000 train_time:774502ms step_avg:81.18ms +[2025-07-06 16:53:21] [Rank 0] step:9541/10000 train_time:774502ms step_avg:81.18ms +[2025-07-06 16:53:22] [Rank 0] step:9561/10000 train_time:776643ms step_avg:81.23ms +[2025-07-06 16:53:22] [Rank 0] step:9561/10000 train_time:776643ms step_avg:81.23ms +[2025-07-06 16:53:24] [Rank 0] step:9581/10000 train_time:778141ms step_avg:81.22ms +[2025-07-06 16:53:24] [Rank 0] step:9581/10000 train_time:778141ms step_avg:81.22ms +[2025-07-06 16:53:25] [Rank 0] step:9601/10000 train_time:779636ms step_avg:81.20ms +[2025-07-06 16:53:25] [Rank 0] step:9601/10000 train_time:779636ms step_avg:81.20ms +[2025-07-06 16:53:27] [Rank 0] step:9621/10000 train_time:781134ms step_avg:81.19ms +[2025-07-06 16:53:27] [Rank 0] step:9621/10000 train_time:781134ms step_avg:81.19ms +[2025-07-06 16:53:28] [Rank 0] step:9641/10000 train_time:782865ms step_avg:81.20ms +[2025-07-06 16:53:28] [Rank 0] step:9641/10000 train_time:782865ms step_avg:81.20ms +[2025-07-06 16:53:30] [Rank 0] step:9661/10000 train_time:784366ms step_avg:81.19ms +[2025-07-06 16:53:30] [Rank 0] step:9661/10000 train_time:784366ms step_avg:81.19ms +[2025-07-06 16:53:31] [Rank 0] step:9681/10000 train_time:785865ms step_avg:81.18ms +[2025-07-06 16:53:31] [Rank 0] step:9681/10000 train_time:785865ms step_avg:81.18ms +[2025-07-06 16:53:33] [Rank 0] step:9701/10000 train_time:787364ms step_avg:81.16ms +[2025-07-06 16:53:33] [Rank 0] step:9701/10000 train_time:787364ms step_avg:81.16ms +[2025-07-06 16:53:35] [Rank 0] step:9721/10000 train_time:788915ms step_avg:81.16ms +[2025-07-06 16:53:35] [Rank 0] step:9721/10000 train_time:788915ms step_avg:81.16ms +[2025-07-06 16:53:37] [Rank 0] step:9741/10000 train_time:791013ms step_avg:81.20ms +[2025-07-06 16:53:37] [Rank 0] step:9741/10000 train_time:791013ms step_avg:81.20ms +[2025-07-06 16:53:38] [Rank 0] step:9761/10000 train_time:792513ms step_avg:81.19ms +[2025-07-06 16:53:38] [Rank 0] step:9761/10000 train_time:792513ms step_avg:81.19ms +[2025-07-06 16:53:40] [Rank 0] step:9781/10000 train_time:794013ms step_avg:81.18ms +[2025-07-06 16:53:40] [Rank 0] step:9781/10000 train_time:794013ms step_avg:81.18ms +[2025-07-06 16:53:41] [Rank 0] step:9801/10000 train_time:795516ms step_avg:81.17ms +[2025-07-06 16:53:41] [Rank 0] step:9801/10000 train_time:795516ms step_avg:81.17ms +[2025-07-06 16:53:43] [Rank 0] step:9821/10000 train_time:797656ms step_avg:81.22ms +[2025-07-06 16:53:43] [Rank 0] step:9821/10000 train_time:797656ms step_avg:81.22ms +[2025-07-06 16:53:45] [Rank 0] step:9841/10000 train_time:799153ms step_avg:81.21ms +[2025-07-06 16:53:45] [Rank 0] step:9841/10000 train_time:799153ms step_avg:81.21ms +[2025-07-06 16:53:46] [Rank 0] step:9861/10000 train_time:800654ms step_avg:81.19ms +[2025-07-06 16:53:46] [Rank 0] step:9861/10000 train_time:800654ms step_avg:81.19ms +[2025-07-06 16:53:48] [Rank 0] step:9881/10000 train_time:802155ms step_avg:81.18ms +[2025-07-06 16:53:48] [Rank 0] step:9881/10000 train_time:802155ms step_avg:81.18ms +[2025-07-06 16:53:50] [Rank 0] step:9901/10000 train_time:804320ms step_avg:81.24ms +[2025-07-06 16:53:50] [Rank 0] step:9901/10000 train_time:804320ms step_avg:81.24ms +[2025-07-06 16:53:51] [Rank 0] step:9921/10000 train_time:805799ms step_avg:81.22ms +[2025-07-06 16:53:51] [Rank 0] step:9921/10000 train_time:805799ms step_avg:81.22ms +[2025-07-06 16:53:53] [Rank 0] step:9941/10000 train_time:807425ms step_avg:81.22ms +[2025-07-06 16:53:53] [Rank 0] step:9941/10000 train_time:807425ms step_avg:81.22ms +[2025-07-06 16:53:54] [Rank 0] step:9961/10000 train_time:808925ms step_avg:81.21ms +[2025-07-06 16:53:54] [Rank 0] step:9961/10000 train_time:808925ms step_avg:81.21ms +[2025-07-06 16:53:56] [Rank 0] step:9981/10000 train_time:810429ms step_avg:81.20ms +[2025-07-06 16:53:56] [Rank 0] step:9981/10000 train_time:810429ms step_avg:81.20ms +[2025-07-06 16:53:58] [Rank 0] step:10000/10000 train_time:812525ms step_avg:81.25ms +[2025-07-06 16:53:58] [Rank 0] step:10000/10000 train_time:812525ms step_avg:81.25ms +[2025-07-06 16:53:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 16:53:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 16:53:59] [Rank 0] PRINT: step:10000/10000 train_loss:1.5792 val_loss:1.5748 train_time:812606ms step_avg:81.26ms +[2025-07-06 16:53:59] [Rank 0] PRINT: step:10000/10000 train_loss:1.5792 val_loss:1.5748 train_time:812606ms step_avg:81.26ms +[2025-07-06 16:53:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 16:53:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 16:53:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 16:53:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 16:53:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 16:53:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 16:59:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 16:59:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 16:59:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 16:59:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 16:59:24] [Rank 0] Total Loss: 4.3176 +[2025-07-06 16:59:24] [Rank 0] Total Loss: 4.3176 +[2025-07-06 16:59:24] [Rank 0] Total FTA: 0.2588 +[2025-07-06 16:59:24] [Rank 0] Total FTA: 0.2588 +[2025-07-06 16:59:24] [Rank 0] Group 0 Loss: 4.5235 +[2025-07-06 16:59:24] [Rank 0] Group 0 Loss: 4.5235 +[2025-07-06 16:59:24] [Rank 0] Group 1 Loss: 4.3904 +[2025-07-06 16:59:24] [Rank 0] Group 1 Loss: 4.3904 +[2025-07-06 16:59:24] [Rank 0] Group 2 Loss: 4.0956 +[2025-07-06 16:59:24] [Rank 0] Group 2 Loss: 4.0956 +[2025-07-06 16:59:24] [Rank 0] Group 3 Loss: 4.2945 +[2025-07-06 16:59:24] [Rank 0] Group 3 Loss: 4.2945 +[2025-07-06 16:59:24] [Rank 0] Group 4 Loss: 4.2484 +[2025-07-06 16:59:24] [Rank 0] Group 4 Loss: 4.2484 +[2025-07-06 16:59:24] [Rank 0] Group 5 Loss: 4.2643 +[2025-07-06 16:59:24] [Rank 0] Group 5 Loss: 4.2643 +[2025-07-06 16:59:24] [Rank 0] Group 6 Loss: 4.2308 +[2025-07-06 16:59:24] [Rank 0] Group 6 Loss: 4.2308 +[2025-07-06 16:59:24] [Rank 0] Group 7 Loss: 4.3302 +[2025-07-06 16:59:24] [Rank 0] Group 7 Loss: 4.3302 +[2025-07-06 16:59:24] [Rank 0] Group 8 Loss: 4.3254 +[2025-07-06 16:59:24] [Rank 0] Group 8 Loss: 4.3254 +[2025-07-06 16:59:24] [Rank 0] Group 9 Loss: 4.2837 +[2025-07-06 16:59:24] [Rank 0] Group 9 Loss: 4.2837 +[2025-07-06 16:59:24] [Rank 0] Group 10 Loss: 4.2912 +[2025-07-06 16:59:24] [Rank 0] Group 10 Loss: 4.2912 +[2025-07-06 16:59:24] [Rank 0] Group 11 Loss: 4.3199 +[2025-07-06 16:59:24] [Rank 0] Group 11 Loss: 4.3199 +[2025-07-06 16:59:24] [Rank 0] Group 0 FTA: 0.3303 +[2025-07-06 16:59:24] [Rank 0] Group 0 FTA: 0.3303 +[2025-07-06 16:59:24] [Rank 0] Group 1 FTA: 0.1302 +[2025-07-06 16:59:24] [Rank 0] Group 1 FTA: 0.1302 +[2025-07-06 16:59:24] [Rank 0] Group 2 FTA: 0.2995 +[2025-07-06 16:59:24] [Rank 0] Group 2 FTA: 0.2995 +[2025-07-06 16:59:24] [Rank 0] Group 3 FTA: 0.2214 +[2025-07-06 16:59:24] [Rank 0] Group 3 FTA: 0.2214 +[2025-07-06 16:59:24] [Rank 0] Group 4 FTA: 0.2266 +[2025-07-06 16:59:24] [Rank 0] Group 4 FTA: 0.2266 +[2025-07-06 16:59:24] [Rank 0] Group 5 FTA: 0.2370 +[2025-07-06 16:59:24] [Rank 0] Group 5 FTA: 0.2370 +[2025-07-06 16:59:24] [Rank 0] Group 6 FTA: 0.2656 +[2025-07-06 16:59:24] [Rank 0] Group 6 FTA: 0.2656 +[2025-07-06 16:59:24] [Rank 0] Group 7 FTA: 0.3047 +[2025-07-06 16:59:24] [Rank 0] Group 7 FTA: 0.3047 +[2025-07-06 16:59:24] [Rank 0] Group 8 FTA: 0.2656 +[2025-07-06 16:59:24] [Rank 0] Group 8 FTA: 0.2656 +[2025-07-06 16:59:24] [Rank 0] Group 9 FTA: 0.2539 +[2025-07-06 16:59:24] [Rank 0] Group 9 FTA: 0.2539 +[2025-07-06 16:59:24] [Rank 0] Group 10 FTA: 0.2832 +[2025-07-06 16:59:24] [Rank 0] Group 10 FTA: 0.2832 +[2025-07-06 16:59:24] [Rank 0] Group 11 FTA: 0.2393 +[2025-07-06 16:59:24] [Rank 0] Group 11 FTA: 0.2393 +[2025-07-06 16:59:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 16:59:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-06 16:59:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 16:59:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-06 16:59:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 16:59:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-06 16:59:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 16:59:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-06 16:59:26] [Rank 0] step:10001/10000 train_time:812628ms step_avg:81.25ms +[2025-07-06 16:59:26] [Rank 0] step:10001/10000 train_time:812628ms step_avg:81.25ms +[2025-07-06 16:59:26] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 16:59:26 2025 --- +[2025-07-06 16:59:26] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 16:59:26 2025 --- +[2025-07-06 16:59:26] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 10356 MiB +[2025-07-06 16:59:26] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 10356 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/config.json b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..0118486a838cde50ade07d5b6f960b2c172f8d0f --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "e25860a3-f256-4f04-957f-b89636a83c08", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..0f261b94f9344b8d7aedc97c2be22622caca7ef1 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e156bbef24c1ca952619f16229119ecd7032694feeba10e04ec40a3455e7c2d2 +size 330776 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..c513cad53af1d4121b6801ec68c566bf42d940bd --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1acdf1b3e701833238ddc159264ac43d0a076ff858681719b7c33fc3db1e4dbd +size 270913 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..36d0e16b77864f04fde02951171b33138c90b29a --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4664726920af86c968ccff43e7ac87e0d7b4e9e899c9206ae08db13d3ba51c4 +size 91516 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..53edb223721373c03e060f7d0ad2aa83630177c5 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d5108e2f0b3ca06ee117ed7df4e33271b600ae789781c0e25112bef610a0431 +size 100925 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/training_log_e25860a3-f256-4f04-957f-b89636a83c08.txt b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/training_log_e25860a3-f256-4f04-957f-b89636a83c08.txt new file mode 100644 index 0000000000000000000000000000000000000000..2db176808cb6e36fe1ff19b2f644ec41a5e1dd40 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/training_log_e25860a3-f256-4f04-957f-b89636a83c08.txt @@ -0,0 +1,5144 @@ +[2025-07-08 05:13:09] [Rank 0] PRINT: --- Script Start: Tue Jul 8 05:13:09 2025 --- +[2025-07-08 05:13:09] [Rank 0] PRINT: --- Script Start: Tue Jul 8 05:13:09 2025 --- +[2025-07-08 05:13:09] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-08 05:13:09] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-08 05:13:09] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-08 05:13:09] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-08 05:13:09] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-08 05:13:09] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-08 05:13:09] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43 +[2025-07-08 05:13:09] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43 +[2025-07-08 05:13:09] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-08 05:13:09] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-08 05:13:09] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-08 05:13:09] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-08 05:13:09] [Rank 0] PRINT: Constructing model... +[2025-07-08 05:13:09] [Rank 0] PRINT: Constructing model... +[2025-07-08 05:13:11] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-08 05:13:11] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-08 05:13:11] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-08 05:13:11] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-08 05:13:12] [Rank 0] PRINT: Testing model forward function: +[2025-07-08 05:13:12] [Rank 0] PRINT: Testing model forward function: +[2025-07-08 05:13:12] [Rank 0] PRINT: Model test - Result type: +[2025-07-08 05:13:12] [Rank 0] PRINT: Model test - Result type: +[2025-07-08 05:13:12] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-08 05:13:12] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-08 05:13:12] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-08 05:13:12] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-08 05:13:12] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-08 05:13:12] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-08 05:13:12] [Rank 0] PRINT: Model returns: +[2025-07-08 05:13:12] [Rank 0] PRINT: Model returns: +[2025-07-08 05:13:12] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-08 05:13:12] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-08 05:13:12] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-08 05:13:12] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-08 05:13:12] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-08 05:13:12] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-08 05:13:12] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-08 05:13:12] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-08 05:13:12] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-08 05:13:12] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-08 05:13:12] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-08 05:13:12] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-08 05:13:12] [Rank 0] PRINT: Model compilation complete. +[2025-07-08 05:13:12] [Rank 0] PRINT: Model compilation complete. +[2025-07-08 05:13:12] [Rank 0] PRINT: Starting warmup... +[2025-07-08 05:13:12] [Rank 0] PRINT: Starting warmup... +[2025-07-08 05:14:19] [Rank 0] PRINT: Warmup complete. +[2025-07-08 05:14:19] [Rank 0] PRINT: Warmup complete. +[2025-07-08 05:14:19] [Rank 0] PRINT: Starting training... +[2025-07-08 05:14:19] [Rank 0] PRINT: Starting training... +[2025-07-08 05:14:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:14:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:14:27] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-08 05:14:27] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-08 05:14:29] [Rank 0] step:21/10000 train_time:1557ms step_avg:74.13ms +[2025-07-08 05:14:29] [Rank 0] step:21/10000 train_time:1557ms step_avg:74.13ms +[2025-07-08 05:14:30] [Rank 0] step:41/10000 train_time:3014ms step_avg:73.52ms +[2025-07-08 05:14:30] [Rank 0] step:41/10000 train_time:3014ms step_avg:73.52ms +[2025-07-08 05:14:32] [Rank 0] step:61/10000 train_time:4472ms step_avg:73.32ms +[2025-07-08 05:14:32] [Rank 0] step:61/10000 train_time:4472ms step_avg:73.32ms +[2025-07-08 05:14:33] [Rank 0] step:81/10000 train_time:5933ms step_avg:73.25ms +[2025-07-08 05:14:33] [Rank 0] step:81/10000 train_time:5933ms step_avg:73.25ms +[2025-07-08 05:14:35] [Rank 0] step:101/10000 train_time:8045ms step_avg:79.66ms +[2025-07-08 05:14:35] [Rank 0] step:101/10000 train_time:8045ms step_avg:79.66ms +[2025-07-08 05:14:37] [Rank 0] step:121/10000 train_time:9501ms step_avg:78.52ms +[2025-07-08 05:14:37] [Rank 0] step:121/10000 train_time:9501ms step_avg:78.52ms +[2025-07-08 05:14:38] [Rank 0] step:141/10000 train_time:11073ms step_avg:78.53ms +[2025-07-08 05:14:38] [Rank 0] step:141/10000 train_time:11073ms step_avg:78.53ms +[2025-07-08 05:14:40] [Rank 0] step:161/10000 train_time:12728ms step_avg:79.06ms +[2025-07-08 05:14:40] [Rank 0] step:161/10000 train_time:12728ms step_avg:79.06ms +[2025-07-08 05:14:42] [Rank 0] step:181/10000 train_time:14186ms step_avg:78.37ms +[2025-07-08 05:14:42] [Rank 0] step:181/10000 train_time:14186ms step_avg:78.37ms +[2025-07-08 05:14:44] [Rank 0] step:201/10000 train_time:16294ms step_avg:81.07ms +[2025-07-08 05:14:44] [Rank 0] step:201/10000 train_time:16294ms step_avg:81.07ms +[2025-07-08 05:14:45] [Rank 0] step:221/10000 train_time:17751ms step_avg:80.32ms +[2025-07-08 05:14:45] [Rank 0] step:221/10000 train_time:17751ms step_avg:80.32ms +[2025-07-08 05:14:46] [Rank 0] step:241/10000 train_time:19212ms step_avg:79.72ms +[2025-07-08 05:14:46] [Rank 0] step:241/10000 train_time:19212ms step_avg:79.72ms +[2025-07-08 05:14:48] [Rank 0] step:261/10000 train_time:20673ms step_avg:79.21ms +[2025-07-08 05:14:48] [Rank 0] step:261/10000 train_time:20673ms step_avg:79.21ms +[2025-07-08 05:14:50] [Rank 0] step:281/10000 train_time:22792ms step_avg:81.11ms +[2025-07-08 05:14:50] [Rank 0] step:281/10000 train_time:22792ms step_avg:81.11ms +[2025-07-08 05:14:51] [Rank 0] step:301/10000 train_time:24251ms step_avg:80.57ms +[2025-07-08 05:14:51] [Rank 0] step:301/10000 train_time:24251ms step_avg:80.57ms +[2025-07-08 05:14:53] [Rank 0] step:321/10000 train_time:25714ms step_avg:80.11ms +[2025-07-08 05:14:53] [Rank 0] step:321/10000 train_time:25714ms step_avg:80.11ms +[2025-07-08 05:14:54] [Rank 0] step:341/10000 train_time:27178ms step_avg:79.70ms +[2025-07-08 05:14:54] [Rank 0] step:341/10000 train_time:27178ms step_avg:79.70ms +[2025-07-08 05:14:56] [Rank 0] step:361/10000 train_time:28689ms step_avg:79.47ms +[2025-07-08 05:14:56] [Rank 0] step:361/10000 train_time:28689ms step_avg:79.47ms +[2025-07-08 05:14:58] [Rank 0] step:381/10000 train_time:30340ms step_avg:79.63ms +[2025-07-08 05:14:58] [Rank 0] step:381/10000 train_time:30340ms step_avg:79.63ms +[2025-07-08 05:14:59] [Rank 0] step:401/10000 train_time:31802ms step_avg:79.31ms +[2025-07-08 05:14:59] [Rank 0] step:401/10000 train_time:31802ms step_avg:79.31ms +[2025-07-08 05:15:00] [Rank 0] step:421/10000 train_time:33265ms step_avg:79.02ms +[2025-07-08 05:15:00] [Rank 0] step:421/10000 train_time:33265ms step_avg:79.02ms +[2025-07-08 05:15:02] [Rank 0] step:441/10000 train_time:34726ms step_avg:78.74ms +[2025-07-08 05:15:02] [Rank 0] step:441/10000 train_time:34726ms step_avg:78.74ms +[2025-07-08 05:15:04] [Rank 0] step:461/10000 train_time:36426ms step_avg:79.02ms +[2025-07-08 05:15:04] [Rank 0] step:461/10000 train_time:36426ms step_avg:79.02ms +[2025-07-08 05:15:05] [Rank 0] step:481/10000 train_time:37890ms step_avg:78.77ms +[2025-07-08 05:15:05] [Rank 0] step:481/10000 train_time:37890ms step_avg:78.77ms +[2025-07-08 05:15:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:15:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:15:07] [Rank 0] PRINT: step:500/10000 train_loss:9.6492 val_loss:8.5917 train_time:39354ms step_avg:78.71ms +[2025-07-08 05:15:07] [Rank 0] PRINT: step:500/10000 train_loss:9.6492 val_loss:8.5917 train_time:39354ms step_avg:78.71ms +[2025-07-08 05:15:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:15:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:15:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:15:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:15:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:15:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:20:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:20:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:20:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:20:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:20:32] [Rank 0] Total Loss: 8.9123 +[2025-07-08 05:20:32] [Rank 0] Total Loss: 8.9123 +[2025-07-08 05:20:32] [Rank 0] Total FTA: 0.0032 +[2025-07-08 05:20:32] [Rank 0] Total FTA: 0.0032 +[2025-07-08 05:20:32] [Rank 0] Group 0 Loss: 8.9296 +[2025-07-08 05:20:32] [Rank 0] Group 0 Loss: 8.9296 +[2025-07-08 05:20:32] [Rank 0] Group 1 Loss: 8.9179 +[2025-07-08 05:20:32] [Rank 0] Group 1 Loss: 8.9179 +[2025-07-08 05:20:32] [Rank 0] Group 2 Loss: 8.9521 +[2025-07-08 05:20:32] [Rank 0] Group 2 Loss: 8.9521 +[2025-07-08 05:20:32] [Rank 0] Group 3 Loss: 8.8908 +[2025-07-08 05:20:32] [Rank 0] Group 3 Loss: 8.8908 +[2025-07-08 05:20:32] [Rank 0] Group 4 Loss: 8.9044 +[2025-07-08 05:20:32] [Rank 0] Group 4 Loss: 8.9044 +[2025-07-08 05:20:32] [Rank 0] Group 5 Loss: 8.8949 +[2025-07-08 05:20:32] [Rank 0] Group 5 Loss: 8.8949 +[2025-07-08 05:20:32] [Rank 0] Group 6 Loss: 8.9115 +[2025-07-08 05:20:32] [Rank 0] Group 6 Loss: 8.9115 +[2025-07-08 05:20:32] [Rank 0] Group 7 Loss: 8.9128 +[2025-07-08 05:20:32] [Rank 0] Group 7 Loss: 8.9128 +[2025-07-08 05:20:32] [Rank 0] Group 8 Loss: 8.9010 +[2025-07-08 05:20:32] [Rank 0] Group 8 Loss: 8.9010 +[2025-07-08 05:20:32] [Rank 0] Group 9 Loss: 8.9046 +[2025-07-08 05:20:32] [Rank 0] Group 9 Loss: 8.9046 +[2025-07-08 05:20:32] [Rank 0] Group 10 Loss: 8.9078 +[2025-07-08 05:20:32] [Rank 0] Group 10 Loss: 8.9078 +[2025-07-08 05:20:32] [Rank 0] Group 11 Loss: 8.9084 +[2025-07-08 05:20:32] [Rank 0] Group 11 Loss: 8.9084 +[2025-07-08 05:20:32] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-08 05:20:32] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-08 05:20:32] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 05:20:32] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 05:20:32] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-08 05:20:32] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-08 05:20:32] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-08 05:20:32] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-08 05:20:32] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-08 05:20:32] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-08 05:20:32] [Rank 0] Group 5 FTA: 0.0104 +[2025-07-08 05:20:32] [Rank 0] Group 5 FTA: 0.0104 +[2025-07-08 05:20:32] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-08 05:20:32] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-08 05:20:32] [Rank 0] Group 7 FTA: 0.0078 +[2025-07-08 05:20:32] [Rank 0] Group 7 FTA: 0.0078 +[2025-07-08 05:20:32] [Rank 0] Group 8 FTA: 0.0052 +[2025-07-08 05:20:32] [Rank 0] Group 8 FTA: 0.0052 +[2025-07-08 05:20:32] [Rank 0] Group 9 FTA: 0.0039 +[2025-07-08 05:20:32] [Rank 0] Group 9 FTA: 0.0039 +[2025-07-08 05:20:32] [Rank 0] Group 10 FTA: 0.0059 +[2025-07-08 05:20:32] [Rank 0] Group 10 FTA: 0.0059 +[2025-07-08 05:20:32] [Rank 0] Group 11 FTA: 0.0049 +[2025-07-08 05:20:32] [Rank 0] Group 11 FTA: 0.0049 +[2025-07-08 05:20:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 05:20:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 05:20:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 05:20:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 05:20:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 05:20:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 05:20:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 05:20:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 05:20:33] [Rank 0] step:501/10000 train_time:39375ms step_avg:78.59ms +[2025-07-08 05:20:33] [Rank 0] step:501/10000 train_time:39375ms step_avg:78.59ms +[2025-07-08 05:20:35] [Rank 0] step:521/10000 train_time:40852ms step_avg:78.41ms +[2025-07-08 05:20:35] [Rank 0] step:521/10000 train_time:40852ms step_avg:78.41ms +[2025-07-08 05:20:37] [Rank 0] step:541/10000 train_time:42308ms step_avg:78.20ms +[2025-07-08 05:20:37] [Rank 0] step:541/10000 train_time:42308ms step_avg:78.20ms +[2025-07-08 05:20:38] [Rank 0] step:561/10000 train_time:44419ms step_avg:79.18ms +[2025-07-08 05:20:38] [Rank 0] step:561/10000 train_time:44419ms step_avg:79.18ms +[2025-07-08 05:20:40] [Rank 0] step:581/10000 train_time:45876ms step_avg:78.96ms +[2025-07-08 05:20:40] [Rank 0] step:581/10000 train_time:45876ms step_avg:78.96ms +[2025-07-08 05:20:41] [Rank 0] step:601/10000 train_time:47333ms step_avg:78.76ms +[2025-07-08 05:20:41] [Rank 0] step:601/10000 train_time:47333ms step_avg:78.76ms +[2025-07-08 05:20:43] [Rank 0] step:621/10000 train_time:48791ms step_avg:78.57ms +[2025-07-08 05:20:43] [Rank 0] step:621/10000 train_time:48791ms step_avg:78.57ms +[2025-07-08 05:20:44] [Rank 0] step:641/10000 train_time:50491ms step_avg:78.77ms +[2025-07-08 05:20:44] [Rank 0] step:641/10000 train_time:50491ms step_avg:78.77ms +[2025-07-08 05:20:46] [Rank 0] step:661/10000 train_time:51949ms step_avg:78.59ms +[2025-07-08 05:20:46] [Rank 0] step:661/10000 train_time:51949ms step_avg:78.59ms +[2025-07-08 05:20:47] [Rank 0] step:681/10000 train_time:53406ms step_avg:78.42ms +[2025-07-08 05:20:47] [Rank 0] step:681/10000 train_time:53406ms step_avg:78.42ms +[2025-07-08 05:20:49] [Rank 0] step:701/10000 train_time:54865ms step_avg:78.27ms +[2025-07-08 05:20:49] [Rank 0] step:701/10000 train_time:54865ms step_avg:78.27ms +[2025-07-08 05:20:51] [Rank 0] step:721/10000 train_time:56324ms step_avg:78.12ms +[2025-07-08 05:20:51] [Rank 0] step:721/10000 train_time:56324ms step_avg:78.12ms +[2025-07-08 05:20:52] [Rank 0] step:741/10000 train_time:58438ms step_avg:78.86ms +[2025-07-08 05:20:52] [Rank 0] step:741/10000 train_time:58438ms step_avg:78.86ms +[2025-07-08 05:20:54] [Rank 0] step:761/10000 train_time:59907ms step_avg:78.72ms +[2025-07-08 05:20:54] [Rank 0] step:761/10000 train_time:59907ms step_avg:78.72ms +[2025-07-08 05:20:55] [Rank 0] step:781/10000 train_time:61380ms step_avg:78.59ms +[2025-07-08 05:20:55] [Rank 0] step:781/10000 train_time:61380ms step_avg:78.59ms +[2025-07-08 05:20:57] [Rank 0] step:801/10000 train_time:62850ms step_avg:78.46ms +[2025-07-08 05:20:57] [Rank 0] step:801/10000 train_time:62850ms step_avg:78.46ms +[2025-07-08 05:20:59] [Rank 0] step:821/10000 train_time:65090ms step_avg:79.28ms +[2025-07-08 05:20:59] [Rank 0] step:821/10000 train_time:65090ms step_avg:79.28ms +[2025-07-08 05:21:01] [Rank 0] step:841/10000 train_time:66564ms step_avg:79.15ms +[2025-07-08 05:21:01] [Rank 0] step:841/10000 train_time:66564ms step_avg:79.15ms +[2025-07-08 05:21:02] [Rank 0] step:861/10000 train_time:68034ms step_avg:79.02ms +[2025-07-08 05:21:02] [Rank 0] step:861/10000 train_time:68034ms step_avg:79.02ms +[2025-07-08 05:21:03] [Rank 0] step:881/10000 train_time:69506ms step_avg:78.89ms +[2025-07-08 05:21:03] [Rank 0] step:881/10000 train_time:69506ms step_avg:78.89ms +[2025-07-08 05:21:06] [Rank 0] step:901/10000 train_time:71032ms step_avg:78.84ms +[2025-07-08 05:21:06] [Rank 0] step:901/10000 train_time:71032ms step_avg:78.84ms +[2025-07-08 05:21:07] [Rank 0] step:921/10000 train_time:73107ms step_avg:79.38ms +[2025-07-08 05:21:07] [Rank 0] step:921/10000 train_time:73107ms step_avg:79.38ms +[2025-07-08 05:21:09] [Rank 0] step:941/10000 train_time:74578ms step_avg:79.25ms +[2025-07-08 05:21:09] [Rank 0] step:941/10000 train_time:74578ms step_avg:79.25ms +[2025-07-08 05:21:10] [Rank 0] step:961/10000 train_time:76051ms step_avg:79.14ms +[2025-07-08 05:21:10] [Rank 0] step:961/10000 train_time:76051ms step_avg:79.14ms +[2025-07-08 05:21:11] [Rank 0] step:981/10000 train_time:77526ms step_avg:79.03ms +[2025-07-08 05:21:11] [Rank 0] step:981/10000 train_time:77526ms step_avg:79.03ms +[2025-07-08 05:21:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:21:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:21:15] [Rank 0] PRINT: step:1000/10000 train_loss:7.7952 val_loss:7.0978 train_time:79663ms step_avg:79.66ms +[2025-07-08 05:21:15] [Rank 0] PRINT: step:1000/10000 train_loss:7.7952 val_loss:7.0978 train_time:79663ms step_avg:79.66ms +[2025-07-08 05:21:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:21:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:21:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:21:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:21:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:21:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:26:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:26:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:26:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:26:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:26:39] [Rank 0] Total Loss: 7.6658 +[2025-07-08 05:26:39] [Rank 0] Total Loss: 7.6658 +[2025-07-08 05:26:39] [Rank 0] Total FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Total FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 0 Loss: 7.6784 +[2025-07-08 05:26:39] [Rank 0] Group 0 Loss: 7.6784 +[2025-07-08 05:26:39] [Rank 0] Group 1 Loss: 7.6150 +[2025-07-08 05:26:39] [Rank 0] Group 1 Loss: 7.6150 +[2025-07-08 05:26:39] [Rank 0] Group 2 Loss: 7.7728 +[2025-07-08 05:26:39] [Rank 0] Group 2 Loss: 7.7728 +[2025-07-08 05:26:39] [Rank 0] Group 3 Loss: 7.6310 +[2025-07-08 05:26:39] [Rank 0] Group 3 Loss: 7.6310 +[2025-07-08 05:26:39] [Rank 0] Group 4 Loss: 7.6858 +[2025-07-08 05:26:39] [Rank 0] Group 4 Loss: 7.6858 +[2025-07-08 05:26:39] [Rank 0] Group 5 Loss: 7.6250 +[2025-07-08 05:26:39] [Rank 0] Group 5 Loss: 7.6250 +[2025-07-08 05:26:39] [Rank 0] Group 6 Loss: 7.6776 +[2025-07-08 05:26:39] [Rank 0] Group 6 Loss: 7.6776 +[2025-07-08 05:26:39] [Rank 0] Group 7 Loss: 7.6581 +[2025-07-08 05:26:39] [Rank 0] Group 7 Loss: 7.6581 +[2025-07-08 05:26:39] [Rank 0] Group 8 Loss: 7.6461 +[2025-07-08 05:26:39] [Rank 0] Group 8 Loss: 7.6461 +[2025-07-08 05:26:39] [Rank 0] Group 9 Loss: 7.6590 +[2025-07-08 05:26:39] [Rank 0] Group 9 Loss: 7.6590 +[2025-07-08 05:26:39] [Rank 0] Group 10 Loss: 7.6686 +[2025-07-08 05:26:39] [Rank 0] Group 10 Loss: 7.6686 +[2025-07-08 05:26:39] [Rank 0] Group 11 Loss: 7.6623 +[2025-07-08 05:26:39] [Rank 0] Group 11 Loss: 7.6623 +[2025-07-08 05:26:39] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-08 05:26:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 05:26:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 05:26:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 05:26:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 05:26:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 05:26:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 05:26:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 05:26:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 05:26:40] [Rank 0] step:1001/10000 train_time:79684ms step_avg:79.60ms +[2025-07-08 05:26:40] [Rank 0] step:1001/10000 train_time:79684ms step_avg:79.60ms +[2025-07-08 05:26:42] [Rank 0] step:1021/10000 train_time:81178ms step_avg:79.51ms +[2025-07-08 05:26:42] [Rank 0] step:1021/10000 train_time:81178ms step_avg:79.51ms +[2025-07-08 05:26:43] [Rank 0] step:1041/10000 train_time:82641ms step_avg:79.39ms +[2025-07-08 05:26:43] [Rank 0] step:1041/10000 train_time:82641ms step_avg:79.39ms +[2025-07-08 05:26:45] [Rank 0] step:1061/10000 train_time:84109ms step_avg:79.27ms +[2025-07-08 05:26:45] [Rank 0] step:1061/10000 train_time:84109ms step_avg:79.27ms +[2025-07-08 05:26:47] [Rank 0] step:1081/10000 train_time:85643ms step_avg:79.23ms +[2025-07-08 05:26:47] [Rank 0] step:1081/10000 train_time:85643ms step_avg:79.23ms +[2025-07-08 05:26:48] [Rank 0] step:1101/10000 train_time:87721ms step_avg:79.67ms +[2025-07-08 05:26:48] [Rank 0] step:1101/10000 train_time:87721ms step_avg:79.67ms +[2025-07-08 05:26:50] [Rank 0] step:1121/10000 train_time:89193ms step_avg:79.57ms +[2025-07-08 05:26:50] [Rank 0] step:1121/10000 train_time:89193ms step_avg:79.57ms +[2025-07-08 05:26:51] [Rank 0] step:1141/10000 train_time:90663ms step_avg:79.46ms +[2025-07-08 05:26:51] [Rank 0] step:1141/10000 train_time:90663ms step_avg:79.46ms +[2025-07-08 05:26:53] [Rank 0] step:1161/10000 train_time:92135ms step_avg:79.36ms +[2025-07-08 05:26:53] [Rank 0] step:1161/10000 train_time:92135ms step_avg:79.36ms +[2025-07-08 05:26:55] [Rank 0] step:1181/10000 train_time:94252ms step_avg:79.81ms +[2025-07-08 05:26:55] [Rank 0] step:1181/10000 train_time:94252ms step_avg:79.81ms +[2025-07-08 05:26:56] [Rank 0] step:1201/10000 train_time:95725ms step_avg:79.70ms +[2025-07-08 05:26:56] [Rank 0] step:1201/10000 train_time:95725ms step_avg:79.70ms +[2025-07-08 05:26:58] [Rank 0] step:1221/10000 train_time:97198ms step_avg:79.60ms +[2025-07-08 05:26:58] [Rank 0] step:1221/10000 train_time:97198ms step_avg:79.60ms +[2025-07-08 05:26:59] [Rank 0] step:1241/10000 train_time:98671ms step_avg:79.51ms +[2025-07-08 05:26:59] [Rank 0] step:1241/10000 train_time:98671ms step_avg:79.51ms +[2025-07-08 05:27:01] [Rank 0] step:1261/10000 train_time:100836ms step_avg:79.97ms +[2025-07-08 05:27:01] [Rank 0] step:1261/10000 train_time:100836ms step_avg:79.97ms +[2025-07-08 05:27:03] [Rank 0] step:1281/10000 train_time:102291ms step_avg:79.85ms +[2025-07-08 05:27:03] [Rank 0] step:1281/10000 train_time:102291ms step_avg:79.85ms +[2025-07-08 05:27:04] [Rank 0] step:1301/10000 train_time:103764ms step_avg:79.76ms +[2025-07-08 05:27:04] [Rank 0] step:1301/10000 train_time:103764ms step_avg:79.76ms +[2025-07-08 05:27:06] [Rank 0] step:1321/10000 train_time:105241ms step_avg:79.67ms +[2025-07-08 05:27:06] [Rank 0] step:1321/10000 train_time:105241ms step_avg:79.67ms +[2025-07-08 05:27:07] [Rank 0] step:1341/10000 train_time:106720ms step_avg:79.58ms +[2025-07-08 05:27:07] [Rank 0] step:1341/10000 train_time:106720ms step_avg:79.58ms +[2025-07-08 05:27:09] [Rank 0] step:1361/10000 train_time:108434ms step_avg:79.67ms +[2025-07-08 05:27:09] [Rank 0] step:1361/10000 train_time:108434ms step_avg:79.67ms +[2025-07-08 05:27:11] [Rank 0] step:1381/10000 train_time:109914ms step_avg:79.59ms +[2025-07-08 05:27:11] [Rank 0] step:1381/10000 train_time:109914ms step_avg:79.59ms +[2025-07-08 05:27:12] [Rank 0] step:1401/10000 train_time:111390ms step_avg:79.51ms +[2025-07-08 05:27:12] [Rank 0] step:1401/10000 train_time:111390ms step_avg:79.51ms +[2025-07-08 05:27:13] [Rank 0] step:1421/10000 train_time:112870ms step_avg:79.43ms +[2025-07-08 05:27:13] [Rank 0] step:1421/10000 train_time:112870ms step_avg:79.43ms +[2025-07-08 05:27:16] [Rank 0] step:1441/10000 train_time:114607ms step_avg:79.53ms +[2025-07-08 05:27:16] [Rank 0] step:1441/10000 train_time:114607ms step_avg:79.53ms +[2025-07-08 05:27:17] [Rank 0] step:1461/10000 train_time:116635ms step_avg:79.83ms +[2025-07-08 05:27:17] [Rank 0] step:1461/10000 train_time:116635ms step_avg:79.83ms +[2025-07-08 05:27:19] [Rank 0] step:1481/10000 train_time:118152ms step_avg:79.78ms +[2025-07-08 05:27:19] [Rank 0] step:1481/10000 train_time:118152ms step_avg:79.78ms +[2025-07-08 05:27:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:27:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:27:21] [Rank 0] PRINT: step:1500/10000 train_loss:6.5314 val_loss:6.0130 train_time:119631ms step_avg:79.75ms +[2025-07-08 05:27:21] [Rank 0] PRINT: step:1500/10000 train_loss:6.5314 val_loss:6.0130 train_time:119631ms step_avg:79.75ms +[2025-07-08 05:27:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:27:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:27:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:27:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:27:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:27:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:32:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:32:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:32:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:32:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:32:47] [Rank 0] Total Loss: 6.7988 +[2025-07-08 05:32:47] [Rank 0] Total Loss: 6.7988 +[2025-07-08 05:32:47] [Rank 0] Total FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Total FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 0 Loss: 6.7571 +[2025-07-08 05:32:47] [Rank 0] Group 0 Loss: 6.7571 +[2025-07-08 05:32:47] [Rank 0] Group 1 Loss: 6.8035 +[2025-07-08 05:32:47] [Rank 0] Group 1 Loss: 6.8035 +[2025-07-08 05:32:47] [Rank 0] Group 2 Loss: 6.9124 +[2025-07-08 05:32:47] [Rank 0] Group 2 Loss: 6.9124 +[2025-07-08 05:32:47] [Rank 0] Group 3 Loss: 6.7553 +[2025-07-08 05:32:47] [Rank 0] Group 3 Loss: 6.7553 +[2025-07-08 05:32:47] [Rank 0] Group 4 Loss: 6.8471 +[2025-07-08 05:32:47] [Rank 0] Group 4 Loss: 6.8471 +[2025-07-08 05:32:47] [Rank 0] Group 5 Loss: 6.7723 +[2025-07-08 05:32:47] [Rank 0] Group 5 Loss: 6.7723 +[2025-07-08 05:32:47] [Rank 0] Group 6 Loss: 6.7914 +[2025-07-08 05:32:47] [Rank 0] Group 6 Loss: 6.7914 +[2025-07-08 05:32:47] [Rank 0] Group 7 Loss: 6.7947 +[2025-07-08 05:32:47] [Rank 0] Group 7 Loss: 6.7947 +[2025-07-08 05:32:47] [Rank 0] Group 8 Loss: 6.7682 +[2025-07-08 05:32:47] [Rank 0] Group 8 Loss: 6.7682 +[2025-07-08 05:32:47] [Rank 0] Group 9 Loss: 6.8381 +[2025-07-08 05:32:47] [Rank 0] Group 9 Loss: 6.8381 +[2025-07-08 05:32:47] [Rank 0] Group 10 Loss: 6.7981 +[2025-07-08 05:32:47] [Rank 0] Group 10 Loss: 6.7981 +[2025-07-08 05:32:47] [Rank 0] Group 11 Loss: 6.8001 +[2025-07-08 05:32:47] [Rank 0] Group 11 Loss: 6.8001 +[2025-07-08 05:32:47] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-08 05:32:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 05:32:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 05:32:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 05:32:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 05:32:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 05:32:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 05:32:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 05:32:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 05:32:48] [Rank 0] step:1501/10000 train_time:119651ms step_avg:79.71ms +[2025-07-08 05:32:48] [Rank 0] step:1501/10000 train_time:119651ms step_avg:79.71ms +[2025-07-08 05:32:50] [Rank 0] step:1521/10000 train_time:121119ms step_avg:79.63ms +[2025-07-08 05:32:50] [Rank 0] step:1521/10000 train_time:121119ms step_avg:79.63ms +[2025-07-08 05:32:52] [Rank 0] step:1541/10000 train_time:123241ms step_avg:79.97ms +[2025-07-08 05:32:52] [Rank 0] step:1541/10000 train_time:123241ms step_avg:79.97ms +[2025-07-08 05:32:53] [Rank 0] step:1561/10000 train_time:124715ms step_avg:79.89ms +[2025-07-08 05:32:53] [Rank 0] step:1561/10000 train_time:124715ms step_avg:79.89ms +[2025-07-08 05:32:55] [Rank 0] step:1581/10000 train_time:126186ms step_avg:79.81ms +[2025-07-08 05:32:55] [Rank 0] step:1581/10000 train_time:126186ms step_avg:79.81ms +[2025-07-08 05:32:56] [Rank 0] step:1601/10000 train_time:127660ms step_avg:79.74ms +[2025-07-08 05:32:56] [Rank 0] step:1601/10000 train_time:127660ms step_avg:79.74ms +[2025-07-08 05:32:58] [Rank 0] step:1621/10000 train_time:129189ms step_avg:79.70ms +[2025-07-08 05:32:58] [Rank 0] step:1621/10000 train_time:129189ms step_avg:79.70ms +[2025-07-08 05:33:00] [Rank 0] step:1641/10000 train_time:131279ms step_avg:80.00ms +[2025-07-08 05:33:00] [Rank 0] step:1641/10000 train_time:131279ms step_avg:80.00ms +[2025-07-08 05:33:01] [Rank 0] step:1661/10000 train_time:132752ms step_avg:79.92ms +[2025-07-08 05:33:01] [Rank 0] step:1661/10000 train_time:132752ms step_avg:79.92ms +[2025-07-08 05:33:03] [Rank 0] step:1681/10000 train_time:134228ms step_avg:79.85ms +[2025-07-08 05:33:03] [Rank 0] step:1681/10000 train_time:134228ms step_avg:79.85ms +[2025-07-08 05:33:04] [Rank 0] step:1701/10000 train_time:135704ms step_avg:79.78ms +[2025-07-08 05:33:04] [Rank 0] step:1701/10000 train_time:135704ms step_avg:79.78ms +[2025-07-08 05:33:06] [Rank 0] step:1721/10000 train_time:137836ms step_avg:80.09ms +[2025-07-08 05:33:06] [Rank 0] step:1721/10000 train_time:137836ms step_avg:80.09ms +[2025-07-08 05:33:08] [Rank 0] step:1741/10000 train_time:139312ms step_avg:80.02ms +[2025-07-08 05:33:08] [Rank 0] step:1741/10000 train_time:139312ms step_avg:80.02ms +[2025-07-08 05:33:09] [Rank 0] step:1761/10000 train_time:140790ms step_avg:79.95ms +[2025-07-08 05:33:09] [Rank 0] step:1761/10000 train_time:140790ms step_avg:79.95ms +[2025-07-08 05:33:11] [Rank 0] step:1781/10000 train_time:142268ms step_avg:79.88ms +[2025-07-08 05:33:11] [Rank 0] step:1781/10000 train_time:142268ms step_avg:79.88ms +[2025-07-08 05:33:13] [Rank 0] step:1801/10000 train_time:144006ms step_avg:79.96ms +[2025-07-08 05:33:13] [Rank 0] step:1801/10000 train_time:144006ms step_avg:79.96ms +[2025-07-08 05:33:15] [Rank 0] step:1821/10000 train_time:145876ms step_avg:80.11ms +[2025-07-08 05:33:15] [Rank 0] step:1821/10000 train_time:145876ms step_avg:80.11ms +[2025-07-08 05:33:16] [Rank 0] step:1841/10000 train_time:147357ms step_avg:80.04ms +[2025-07-08 05:33:16] [Rank 0] step:1841/10000 train_time:147357ms step_avg:80.04ms +[2025-07-08 05:33:17] [Rank 0] step:1861/10000 train_time:148839ms step_avg:79.98ms +[2025-07-08 05:33:17] [Rank 0] step:1861/10000 train_time:148839ms step_avg:79.98ms +[2025-07-08 05:33:19] [Rank 0] step:1881/10000 train_time:150316ms step_avg:79.91ms +[2025-07-08 05:33:19] [Rank 0] step:1881/10000 train_time:150316ms step_avg:79.91ms +[2025-07-08 05:33:21] [Rank 0] step:1901/10000 train_time:152462ms step_avg:80.20ms +[2025-07-08 05:33:21] [Rank 0] step:1901/10000 train_time:152462ms step_avg:80.20ms +[2025-07-08 05:33:23] [Rank 0] step:1921/10000 train_time:153940ms step_avg:80.14ms +[2025-07-08 05:33:23] [Rank 0] step:1921/10000 train_time:153940ms step_avg:80.14ms +[2025-07-08 05:33:24] [Rank 0] step:1941/10000 train_time:155420ms step_avg:80.07ms +[2025-07-08 05:33:24] [Rank 0] step:1941/10000 train_time:155420ms step_avg:80.07ms +[2025-07-08 05:33:26] [Rank 0] step:1961/10000 train_time:156902ms step_avg:80.01ms +[2025-07-08 05:33:26] [Rank 0] step:1961/10000 train_time:156902ms step_avg:80.01ms +[2025-07-08 05:33:27] [Rank 0] step:1981/10000 train_time:158433ms step_avg:79.98ms +[2025-07-08 05:33:27] [Rank 0] step:1981/10000 train_time:158433ms step_avg:79.98ms +[2025-07-08 05:33:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:33:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:33:30] [Rank 0] PRINT: step:2000/10000 train_loss:5.5587 val_loss:5.1417 train_time:160097ms step_avg:80.05ms +[2025-07-08 05:33:30] [Rank 0] PRINT: step:2000/10000 train_loss:5.5587 val_loss:5.1417 train_time:160097ms step_avg:80.05ms +[2025-07-08 05:33:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:33:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:33:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:33:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:33:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:33:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:38:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:38:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:38:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:38:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:38:57] [Rank 0] Total Loss: 6.1573 +[2025-07-08 05:38:57] [Rank 0] Total Loss: 6.1573 +[2025-07-08 05:38:57] [Rank 0] Total FTA: 0.0000 +[2025-07-08 05:38:57] [Rank 0] Total FTA: 0.0000 +[2025-07-08 05:38:57] [Rank 0] Group 0 Loss: 6.0632 +[2025-07-08 05:38:57] [Rank 0] Group 0 Loss: 6.0632 +[2025-07-08 05:38:57] [Rank 0] Group 1 Loss: 6.2653 +[2025-07-08 05:38:57] [Rank 0] Group 1 Loss: 6.2653 +[2025-07-08 05:38:57] [Rank 0] Group 2 Loss: 6.2316 +[2025-07-08 05:38:57] [Rank 0] Group 2 Loss: 6.2316 +[2025-07-08 05:38:57] [Rank 0] Group 3 Loss: 6.0980 +[2025-07-08 05:38:57] [Rank 0] Group 3 Loss: 6.0980 +[2025-07-08 05:38:57] [Rank 0] Group 4 Loss: 6.1782 +[2025-07-08 05:38:57] [Rank 0] Group 4 Loss: 6.1782 +[2025-07-08 05:38:57] [Rank 0] Group 5 Loss: 6.1582 +[2025-07-08 05:38:57] [Rank 0] Group 5 Loss: 6.1582 +[2025-07-08 05:38:57] [Rank 0] Group 6 Loss: 6.1743 +[2025-07-08 05:38:57] [Rank 0] Group 6 Loss: 6.1743 +[2025-07-08 05:38:57] [Rank 0] Group 7 Loss: 6.1761 +[2025-07-08 05:38:57] [Rank 0] Group 7 Loss: 6.1761 +[2025-07-08 05:38:57] [Rank 0] Group 8 Loss: 6.1567 +[2025-07-08 05:38:57] [Rank 0] Group 8 Loss: 6.1567 +[2025-07-08 05:38:57] [Rank 0] Group 9 Loss: 6.1810 +[2025-07-08 05:38:57] [Rank 0] Group 9 Loss: 6.1810 +[2025-07-08 05:38:57] [Rank 0] Group 10 Loss: 6.1519 +[2025-07-08 05:38:57] [Rank 0] Group 10 Loss: 6.1519 +[2025-07-08 05:38:57] [Rank 0] Group 11 Loss: 6.1570 +[2025-07-08 05:38:57] [Rank 0] Group 11 Loss: 6.1570 +[2025-07-08 05:38:58] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-08 05:38:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 05:38:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 05:38:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 05:38:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 05:38:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 05:38:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 05:38:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 05:38:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 05:38:59] [Rank 0] step:2001/10000 train_time:160117ms step_avg:80.02ms +[2025-07-08 05:38:59] [Rank 0] step:2001/10000 train_time:160117ms step_avg:80.02ms +[2025-07-08 05:39:00] [Rank 0] step:2021/10000 train_time:161589ms step_avg:79.95ms +[2025-07-08 05:39:00] [Rank 0] step:2021/10000 train_time:161589ms step_avg:79.95ms +[2025-07-08 05:39:02] [Rank 0] step:2041/10000 train_time:163064ms step_avg:79.89ms +[2025-07-08 05:39:02] [Rank 0] step:2041/10000 train_time:163064ms step_avg:79.89ms +[2025-07-08 05:39:03] [Rank 0] step:2061/10000 train_time:164536ms step_avg:79.83ms +[2025-07-08 05:39:03] [Rank 0] step:2061/10000 train_time:164536ms step_avg:79.83ms +[2025-07-08 05:39:06] [Rank 0] step:2081/10000 train_time:166676ms step_avg:80.09ms +[2025-07-08 05:39:06] [Rank 0] step:2081/10000 train_time:166676ms step_avg:80.09ms +[2025-07-08 05:39:07] [Rank 0] step:2101/10000 train_time:168151ms step_avg:80.03ms +[2025-07-08 05:39:07] [Rank 0] step:2101/10000 train_time:168151ms step_avg:80.03ms +[2025-07-08 05:39:09] [Rank 0] step:2121/10000 train_time:169625ms step_avg:79.97ms +[2025-07-08 05:39:09] [Rank 0] step:2121/10000 train_time:169625ms step_avg:79.97ms +[2025-07-08 05:39:10] [Rank 0] step:2141/10000 train_time:171101ms step_avg:79.92ms +[2025-07-08 05:39:10] [Rank 0] step:2141/10000 train_time:171101ms step_avg:79.92ms +[2025-07-08 05:39:12] [Rank 0] step:2161/10000 train_time:172632ms step_avg:79.89ms +[2025-07-08 05:39:12] [Rank 0] step:2161/10000 train_time:172632ms step_avg:79.89ms +[2025-07-08 05:39:13] [Rank 0] step:2181/10000 train_time:174294ms step_avg:79.91ms +[2025-07-08 05:39:13] [Rank 0] step:2181/10000 train_time:174294ms step_avg:79.91ms +[2025-07-08 05:39:15] [Rank 0] step:2201/10000 train_time:175773ms step_avg:79.86ms +[2025-07-08 05:39:15] [Rank 0] step:2201/10000 train_time:175773ms step_avg:79.86ms +[2025-07-08 05:39:16] [Rank 0] step:2221/10000 train_time:177252ms step_avg:79.81ms +[2025-07-08 05:39:16] [Rank 0] step:2221/10000 train_time:177252ms step_avg:79.81ms +[2025-07-08 05:39:18] [Rank 0] step:2241/10000 train_time:178751ms step_avg:79.76ms +[2025-07-08 05:39:18] [Rank 0] step:2241/10000 train_time:178751ms step_avg:79.76ms +[2025-07-08 05:39:20] [Rank 0] step:2261/10000 train_time:180899ms step_avg:80.01ms +[2025-07-08 05:39:20] [Rank 0] step:2261/10000 train_time:180899ms step_avg:80.01ms +[2025-07-08 05:39:21] [Rank 0] step:2281/10000 train_time:182407ms step_avg:79.97ms +[2025-07-08 05:39:21] [Rank 0] step:2281/10000 train_time:182407ms step_avg:79.97ms +[2025-07-08 05:39:23] [Rank 0] step:2301/10000 train_time:183911ms step_avg:79.93ms +[2025-07-08 05:39:23] [Rank 0] step:2301/10000 train_time:183911ms step_avg:79.93ms +[2025-07-08 05:39:24] [Rank 0] step:2321/10000 train_time:185417ms step_avg:79.89ms +[2025-07-08 05:39:24] [Rank 0] step:2321/10000 train_time:185417ms step_avg:79.89ms +[2025-07-08 05:39:26] [Rank 0] step:2341/10000 train_time:186974ms step_avg:79.87ms +[2025-07-08 05:39:26] [Rank 0] step:2341/10000 train_time:186974ms step_avg:79.87ms +[2025-07-08 05:39:28] [Rank 0] step:2361/10000 train_time:189083ms step_avg:80.09ms +[2025-07-08 05:39:28] [Rank 0] step:2361/10000 train_time:189083ms step_avg:80.09ms +[2025-07-08 05:39:29] [Rank 0] step:2381/10000 train_time:190590ms step_avg:80.05ms +[2025-07-08 05:39:29] [Rank 0] step:2381/10000 train_time:190590ms step_avg:80.05ms +[2025-07-08 05:39:31] [Rank 0] step:2401/10000 train_time:192095ms step_avg:80.01ms +[2025-07-08 05:39:31] [Rank 0] step:2401/10000 train_time:192095ms step_avg:80.01ms +[2025-07-08 05:39:32] [Rank 0] step:2421/10000 train_time:193603ms step_avg:79.97ms +[2025-07-08 05:39:32] [Rank 0] step:2421/10000 train_time:193603ms step_avg:79.97ms +[2025-07-08 05:39:35] [Rank 0] step:2441/10000 train_time:195749ms step_avg:80.19ms +[2025-07-08 05:39:35] [Rank 0] step:2441/10000 train_time:195749ms step_avg:80.19ms +[2025-07-08 05:39:36] [Rank 0] step:2461/10000 train_time:197254ms step_avg:80.15ms +[2025-07-08 05:39:36] [Rank 0] step:2461/10000 train_time:197254ms step_avg:80.15ms +[2025-07-08 05:39:38] [Rank 0] step:2481/10000 train_time:198760ms step_avg:80.11ms +[2025-07-08 05:39:38] [Rank 0] step:2481/10000 train_time:198760ms step_avg:80.11ms +[2025-07-08 05:39:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:39:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:39:40] [Rank 0] PRINT: step:2500/10000 train_loss:4.7846 val_loss:4.4447 train_time:200268ms step_avg:80.11ms +[2025-07-08 05:39:40] [Rank 0] PRINT: step:2500/10000 train_loss:4.7846 val_loss:4.4447 train_time:200268ms step_avg:80.11ms +[2025-07-08 05:39:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:39:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:39:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:39:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:39:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:39:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:45:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:45:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:45:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:45:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:45:08] [Rank 0] Total Loss: 5.7148 +[2025-07-08 05:45:08] [Rank 0] Total Loss: 5.7148 +[2025-07-08 05:45:08] [Rank 0] Total FTA: 0.0607 +[2025-07-08 05:45:08] [Rank 0] Total FTA: 0.0607 +[2025-07-08 05:45:08] [Rank 0] Group 0 Loss: 5.6527 +[2025-07-08 05:45:08] [Rank 0] Group 0 Loss: 5.6527 +[2025-07-08 05:45:08] [Rank 0] Group 1 Loss: 6.0015 +[2025-07-08 05:45:08] [Rank 0] Group 1 Loss: 6.0015 +[2025-07-08 05:45:08] [Rank 0] Group 2 Loss: 5.7220 +[2025-07-08 05:45:08] [Rank 0] Group 2 Loss: 5.7220 +[2025-07-08 05:45:08] [Rank 0] Group 3 Loss: 5.6326 +[2025-07-08 05:45:08] [Rank 0] Group 3 Loss: 5.6326 +[2025-07-08 05:45:08] [Rank 0] Group 4 Loss: 5.7952 +[2025-07-08 05:45:08] [Rank 0] Group 4 Loss: 5.7952 +[2025-07-08 05:45:08] [Rank 0] Group 5 Loss: 5.6864 +[2025-07-08 05:45:08] [Rank 0] Group 5 Loss: 5.6864 +[2025-07-08 05:45:08] [Rank 0] Group 6 Loss: 5.6730 +[2025-07-08 05:45:08] [Rank 0] Group 6 Loss: 5.6730 +[2025-07-08 05:45:08] [Rank 0] Group 7 Loss: 5.7027 +[2025-07-08 05:45:08] [Rank 0] Group 7 Loss: 5.7027 +[2025-07-08 05:45:08] [Rank 0] Group 8 Loss: 5.6878 +[2025-07-08 05:45:08] [Rank 0] Group 8 Loss: 5.6878 +[2025-07-08 05:45:08] [Rank 0] Group 9 Loss: 5.6876 +[2025-07-08 05:45:08] [Rank 0] Group 9 Loss: 5.6876 +[2025-07-08 05:45:08] [Rank 0] Group 10 Loss: 5.6935 +[2025-07-08 05:45:08] [Rank 0] Group 10 Loss: 5.6935 +[2025-07-08 05:45:08] [Rank 0] Group 11 Loss: 5.7106 +[2025-07-08 05:45:08] [Rank 0] Group 11 Loss: 5.7106 +[2025-07-08 05:45:08] [Rank 0] Group 0 FTA: 0.1404 +[2025-07-08 05:45:08] [Rank 0] Group 0 FTA: 0.1404 +[2025-07-08 05:45:08] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 05:45:08] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 05:45:08] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-08 05:45:08] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-08 05:45:08] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-08 05:45:08] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-08 05:45:08] [Rank 0] Group 4 FTA: 0.0260 +[2025-07-08 05:45:08] [Rank 0] Group 4 FTA: 0.0260 +[2025-07-08 05:45:08] [Rank 0] Group 5 FTA: 0.0365 +[2025-07-08 05:45:08] [Rank 0] Group 5 FTA: 0.0365 +[2025-07-08 05:45:08] [Rank 0] Group 6 FTA: 0.0703 +[2025-07-08 05:45:08] [Rank 0] Group 6 FTA: 0.0703 +[2025-07-08 05:45:08] [Rank 0] Group 7 FTA: 0.0703 +[2025-07-08 05:45:08] [Rank 0] Group 7 FTA: 0.0703 +[2025-07-08 05:45:08] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-08 05:45:08] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-08 05:45:08] [Rank 0] Group 9 FTA: 0.0273 +[2025-07-08 05:45:08] [Rank 0] Group 9 FTA: 0.0273 +[2025-07-08 05:45:08] [Rank 0] Group 10 FTA: 0.0547 +[2025-07-08 05:45:08] [Rank 0] Group 10 FTA: 0.0547 +[2025-07-08 05:45:08] [Rank 0] Group 11 FTA: 0.0576 +[2025-07-08 05:45:08] [Rank 0] Group 11 FTA: 0.0576 +[2025-07-08 05:45:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 05:45:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 05:45:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 05:45:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 05:45:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 05:45:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 05:45:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 05:45:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 05:45:10] [Rank 0] step:2501/10000 train_time:200288ms step_avg:80.08ms +[2025-07-08 05:45:10] [Rank 0] step:2501/10000 train_time:200288ms step_avg:80.08ms +[2025-07-08 05:45:12] [Rank 0] step:2521/10000 train_time:201802ms step_avg:80.05ms +[2025-07-08 05:45:12] [Rank 0] step:2521/10000 train_time:201802ms step_avg:80.05ms +[2025-07-08 05:45:14] [Rank 0] step:2541/10000 train_time:204142ms step_avg:80.34ms +[2025-07-08 05:45:14] [Rank 0] step:2541/10000 train_time:204142ms step_avg:80.34ms +[2025-07-08 05:45:15] [Rank 0] step:2561/10000 train_time:205636ms step_avg:80.30ms +[2025-07-08 05:45:15] [Rank 0] step:2561/10000 train_time:205636ms step_avg:80.30ms +[2025-07-08 05:45:17] [Rank 0] step:2581/10000 train_time:207132ms step_avg:80.25ms +[2025-07-08 05:45:17] [Rank 0] step:2581/10000 train_time:207132ms step_avg:80.25ms +[2025-07-08 05:45:18] [Rank 0] step:2601/10000 train_time:208629ms step_avg:80.21ms +[2025-07-08 05:45:18] [Rank 0] step:2601/10000 train_time:208629ms step_avg:80.21ms +[2025-07-08 05:45:20] [Rank 0] step:2621/10000 train_time:210365ms step_avg:80.26ms +[2025-07-08 05:45:20] [Rank 0] step:2621/10000 train_time:210365ms step_avg:80.26ms +[2025-07-08 05:45:21] [Rank 0] step:2641/10000 train_time:211865ms step_avg:80.22ms +[2025-07-08 05:45:21] [Rank 0] step:2641/10000 train_time:211865ms step_avg:80.22ms +[2025-07-08 05:45:23] [Rank 0] step:2661/10000 train_time:213366ms step_avg:80.18ms +[2025-07-08 05:45:23] [Rank 0] step:2661/10000 train_time:213366ms step_avg:80.18ms +[2025-07-08 05:45:24] [Rank 0] step:2681/10000 train_time:214868ms step_avg:80.14ms +[2025-07-08 05:45:24] [Rank 0] step:2681/10000 train_time:214868ms step_avg:80.14ms +[2025-07-08 05:45:27] [Rank 0] step:2701/10000 train_time:217061ms step_avg:80.36ms +[2025-07-08 05:45:27] [Rank 0] step:2701/10000 train_time:217061ms step_avg:80.36ms +[2025-07-08 05:45:28] [Rank 0] step:2721/10000 train_time:218541ms step_avg:80.32ms +[2025-07-08 05:45:28] [Rank 0] step:2721/10000 train_time:218541ms step_avg:80.32ms +[2025-07-08 05:45:30] [Rank 0] step:2741/10000 train_time:220042ms step_avg:80.28ms +[2025-07-08 05:45:30] [Rank 0] step:2741/10000 train_time:220042ms step_avg:80.28ms +[2025-07-08 05:45:31] [Rank 0] step:2761/10000 train_time:221546ms step_avg:80.24ms +[2025-07-08 05:45:31] [Rank 0] step:2761/10000 train_time:221546ms step_avg:80.24ms +[2025-07-08 05:45:33] [Rank 0] step:2781/10000 train_time:223053ms step_avg:80.21ms +[2025-07-08 05:45:33] [Rank 0] step:2781/10000 train_time:223053ms step_avg:80.21ms +[2025-07-08 05:45:35] [Rank 0] step:2801/10000 train_time:225214ms step_avg:80.40ms +[2025-07-08 05:45:35] [Rank 0] step:2801/10000 train_time:225214ms step_avg:80.40ms +[2025-07-08 05:45:36] [Rank 0] step:2821/10000 train_time:226717ms step_avg:80.37ms +[2025-07-08 05:45:36] [Rank 0] step:2821/10000 train_time:226717ms step_avg:80.37ms +[2025-07-08 05:45:38] [Rank 0] step:2841/10000 train_time:228223ms step_avg:80.33ms +[2025-07-08 05:45:38] [Rank 0] step:2841/10000 train_time:228223ms step_avg:80.33ms +[2025-07-08 05:45:39] [Rank 0] step:2861/10000 train_time:229728ms step_avg:80.30ms +[2025-07-08 05:45:39] [Rank 0] step:2861/10000 train_time:229728ms step_avg:80.30ms +[2025-07-08 05:45:41] [Rank 0] step:2881/10000 train_time:231289ms step_avg:80.28ms +[2025-07-08 05:45:41] [Rank 0] step:2881/10000 train_time:231289ms step_avg:80.28ms +[2025-07-08 05:45:42] [Rank 0] step:2901/10000 train_time:232777ms step_avg:80.24ms +[2025-07-08 05:45:42] [Rank 0] step:2901/10000 train_time:232777ms step_avg:80.24ms +[2025-07-08 05:45:44] [Rank 0] step:2921/10000 train_time:234283ms step_avg:80.21ms +[2025-07-08 05:45:44] [Rank 0] step:2921/10000 train_time:234283ms step_avg:80.21ms +[2025-07-08 05:45:45] [Rank 0] step:2941/10000 train_time:235788ms step_avg:80.17ms +[2025-07-08 05:45:45] [Rank 0] step:2941/10000 train_time:235788ms step_avg:80.17ms +[2025-07-08 05:45:47] [Rank 0] step:2961/10000 train_time:237293ms step_avg:80.14ms +[2025-07-08 05:45:47] [Rank 0] step:2961/10000 train_time:237293ms step_avg:80.14ms +[2025-07-08 05:45:48] [Rank 0] step:2981/10000 train_time:239036ms step_avg:80.19ms +[2025-07-08 05:45:48] [Rank 0] step:2981/10000 train_time:239036ms step_avg:80.19ms +[2025-07-08 05:45:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:45:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:45:51] [Rank 0] PRINT: step:3000/10000 train_loss:4.1245 val_loss:3.8168 train_time:240543ms step_avg:80.18ms +[2025-07-08 05:45:51] [Rank 0] PRINT: step:3000/10000 train_loss:4.1245 val_loss:3.8168 train_time:240543ms step_avg:80.18ms +[2025-07-08 05:45:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:45:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:45:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:45:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:45:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:45:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:51:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:51:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:51:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:51:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:51:20] [Rank 0] Total Loss: 5.3258 +[2025-07-08 05:51:20] [Rank 0] Total Loss: 5.3258 +[2025-07-08 05:51:20] [Rank 0] Total FTA: 0.0822 +[2025-07-08 05:51:20] [Rank 0] Total FTA: 0.0822 +[2025-07-08 05:51:20] [Rank 0] Group 0 Loss: 5.4052 +[2025-07-08 05:51:20] [Rank 0] Group 0 Loss: 5.4052 +[2025-07-08 05:51:20] [Rank 0] Group 1 Loss: 5.5002 +[2025-07-08 05:51:20] [Rank 0] Group 1 Loss: 5.5002 +[2025-07-08 05:51:20] [Rank 0] Group 2 Loss: 5.2409 +[2025-07-08 05:51:20] [Rank 0] Group 2 Loss: 5.2409 +[2025-07-08 05:51:20] [Rank 0] Group 3 Loss: 5.2325 +[2025-07-08 05:51:20] [Rank 0] Group 3 Loss: 5.2325 +[2025-07-08 05:51:20] [Rank 0] Group 4 Loss: 5.3931 +[2025-07-08 05:51:20] [Rank 0] Group 4 Loss: 5.3931 +[2025-07-08 05:51:20] [Rank 0] Group 5 Loss: 5.2705 +[2025-07-08 05:51:20] [Rank 0] Group 5 Loss: 5.2705 +[2025-07-08 05:51:20] [Rank 0] Group 6 Loss: 5.3143 +[2025-07-08 05:51:20] [Rank 0] Group 6 Loss: 5.3143 +[2025-07-08 05:51:20] [Rank 0] Group 7 Loss: 5.3144 +[2025-07-08 05:51:20] [Rank 0] Group 7 Loss: 5.3144 +[2025-07-08 05:51:20] [Rank 0] Group 8 Loss: 5.2882 +[2025-07-08 05:51:20] [Rank 0] Group 8 Loss: 5.2882 +[2025-07-08 05:51:20] [Rank 0] Group 9 Loss: 5.3448 +[2025-07-08 05:51:20] [Rank 0] Group 9 Loss: 5.3448 +[2025-07-08 05:51:20] [Rank 0] Group 10 Loss: 5.2944 +[2025-07-08 05:51:20] [Rank 0] Group 10 Loss: 5.2944 +[2025-07-08 05:51:20] [Rank 0] Group 11 Loss: 5.2965 +[2025-07-08 05:51:20] [Rank 0] Group 11 Loss: 5.2965 +[2025-07-08 05:51:20] [Rank 0] Group 0 FTA: 0.1808 +[2025-07-08 05:51:20] [Rank 0] Group 0 FTA: 0.1808 +[2025-07-08 05:51:20] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 05:51:20] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 05:51:20] [Rank 0] Group 2 FTA: 0.0573 +[2025-07-08 05:51:20] [Rank 0] Group 2 FTA: 0.0573 +[2025-07-08 05:51:20] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-08 05:51:20] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-08 05:51:20] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-08 05:51:20] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-08 05:51:20] [Rank 0] Group 5 FTA: 0.0443 +[2025-07-08 05:51:20] [Rank 0] Group 5 FTA: 0.0443 +[2025-07-08 05:51:20] [Rank 0] Group 6 FTA: 0.1068 +[2025-07-08 05:51:20] [Rank 0] Group 6 FTA: 0.1068 +[2025-07-08 05:51:20] [Rank 0] Group 7 FTA: 0.0703 +[2025-07-08 05:51:20] [Rank 0] Group 7 FTA: 0.0703 +[2025-07-08 05:51:20] [Rank 0] Group 8 FTA: 0.0911 +[2025-07-08 05:51:20] [Rank 0] Group 8 FTA: 0.0911 +[2025-07-08 05:51:20] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-08 05:51:20] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-08 05:51:20] [Rank 0] Group 10 FTA: 0.1035 +[2025-07-08 05:51:20] [Rank 0] Group 10 FTA: 0.1035 +[2025-07-08 05:51:20] [Rank 0] Group 11 FTA: 0.0781 +[2025-07-08 05:51:20] [Rank 0] Group 11 FTA: 0.0781 +[2025-07-08 05:51:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 05:51:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 05:51:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 05:51:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 05:51:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 05:51:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 05:51:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 05:51:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 05:51:22] [Rank 0] step:3001/10000 train_time:240564ms step_avg:80.16ms +[2025-07-08 05:51:22] [Rank 0] step:3001/10000 train_time:240564ms step_avg:80.16ms +[2025-07-08 05:51:23] [Rank 0] step:3021/10000 train_time:242061ms step_avg:80.13ms +[2025-07-08 05:51:23] [Rank 0] step:3021/10000 train_time:242061ms step_avg:80.13ms +[2025-07-08 05:51:25] [Rank 0] step:3041/10000 train_time:243555ms step_avg:80.09ms +[2025-07-08 05:51:25] [Rank 0] step:3041/10000 train_time:243555ms step_avg:80.09ms +[2025-07-08 05:51:27] [Rank 0] step:3061/10000 train_time:245112ms step_avg:80.08ms +[2025-07-08 05:51:27] [Rank 0] step:3061/10000 train_time:245112ms step_avg:80.08ms +[2025-07-08 05:51:28] [Rank 0] step:3081/10000 train_time:247201ms step_avg:80.23ms +[2025-07-08 05:51:28] [Rank 0] step:3081/10000 train_time:247201ms step_avg:80.23ms +[2025-07-08 05:51:30] [Rank 0] step:3101/10000 train_time:248702ms step_avg:80.20ms +[2025-07-08 05:51:30] [Rank 0] step:3101/10000 train_time:248702ms step_avg:80.20ms +[2025-07-08 05:51:31] [Rank 0] step:3121/10000 train_time:250300ms step_avg:80.20ms +[2025-07-08 05:51:31] [Rank 0] step:3121/10000 train_time:250300ms step_avg:80.20ms +[2025-07-08 05:51:33] [Rank 0] step:3141/10000 train_time:251919ms step_avg:80.20ms +[2025-07-08 05:51:33] [Rank 0] step:3141/10000 train_time:251919ms step_avg:80.20ms +[2025-07-08 05:51:35] [Rank 0] step:3161/10000 train_time:254068ms step_avg:80.38ms +[2025-07-08 05:51:35] [Rank 0] step:3161/10000 train_time:254068ms step_avg:80.38ms +[2025-07-08 05:51:37] [Rank 0] step:3181/10000 train_time:255567ms step_avg:80.34ms +[2025-07-08 05:51:37] [Rank 0] step:3181/10000 train_time:255567ms step_avg:80.34ms +[2025-07-08 05:51:38] [Rank 0] step:3201/10000 train_time:257069ms step_avg:80.31ms +[2025-07-08 05:51:38] [Rank 0] step:3201/10000 train_time:257069ms step_avg:80.31ms +[2025-07-08 05:51:40] [Rank 0] step:3221/10000 train_time:258573ms step_avg:80.28ms +[2025-07-08 05:51:40] [Rank 0] step:3221/10000 train_time:258573ms step_avg:80.28ms +[2025-07-08 05:51:41] [Rank 0] step:3241/10000 train_time:260130ms step_avg:80.26ms +[2025-07-08 05:51:41] [Rank 0] step:3241/10000 train_time:260130ms step_avg:80.26ms +[2025-07-08 05:51:43] [Rank 0] step:3261/10000 train_time:261816ms step_avg:80.29ms +[2025-07-08 05:51:43] [Rank 0] step:3261/10000 train_time:261816ms step_avg:80.29ms +[2025-07-08 05:51:44] [Rank 0] step:3281/10000 train_time:263319ms step_avg:80.26ms +[2025-07-08 05:51:44] [Rank 0] step:3281/10000 train_time:263319ms step_avg:80.26ms +[2025-07-08 05:51:46] [Rank 0] step:3301/10000 train_time:264824ms step_avg:80.23ms +[2025-07-08 05:51:46] [Rank 0] step:3301/10000 train_time:264824ms step_avg:80.23ms +[2025-07-08 05:51:47] [Rank 0] step:3321/10000 train_time:266330ms step_avg:80.20ms +[2025-07-08 05:51:47] [Rank 0] step:3321/10000 train_time:266330ms step_avg:80.20ms +[2025-07-08 05:51:49] [Rank 0] step:3341/10000 train_time:268070ms step_avg:80.24ms +[2025-07-08 05:51:49] [Rank 0] step:3341/10000 train_time:268070ms step_avg:80.24ms +[2025-07-08 05:51:51] [Rank 0] step:3361/10000 train_time:269576ms step_avg:80.21ms +[2025-07-08 05:51:51] [Rank 0] step:3361/10000 train_time:269576ms step_avg:80.21ms +[2025-07-08 05:51:52] [Rank 0] step:3381/10000 train_time:271083ms step_avg:80.18ms +[2025-07-08 05:51:52] [Rank 0] step:3381/10000 train_time:271083ms step_avg:80.18ms +[2025-07-08 05:51:54] [Rank 0] step:3401/10000 train_time:272590ms step_avg:80.15ms +[2025-07-08 05:51:54] [Rank 0] step:3401/10000 train_time:272590ms step_avg:80.15ms +[2025-07-08 05:51:55] [Rank 0] step:3421/10000 train_time:274152ms step_avg:80.14ms +[2025-07-08 05:51:55] [Rank 0] step:3421/10000 train_time:274152ms step_avg:80.14ms +[2025-07-08 05:51:57] [Rank 0] step:3441/10000 train_time:275837ms step_avg:80.16ms +[2025-07-08 05:51:57] [Rank 0] step:3441/10000 train_time:275837ms step_avg:80.16ms +[2025-07-08 05:51:58] [Rank 0] step:3461/10000 train_time:277343ms step_avg:80.13ms +[2025-07-08 05:51:58] [Rank 0] step:3461/10000 train_time:277343ms step_avg:80.13ms +[2025-07-08 05:52:00] [Rank 0] step:3481/10000 train_time:278852ms step_avg:80.11ms +[2025-07-08 05:52:00] [Rank 0] step:3481/10000 train_time:278852ms step_avg:80.11ms +[2025-07-08 05:52:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:52:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:52:02] [Rank 0] PRINT: step:3500/10000 train_loss:3.5429 val_loss:3.2840 train_time:280362ms step_avg:80.10ms +[2025-07-08 05:52:02] [Rank 0] PRINT: step:3500/10000 train_loss:3.5429 val_loss:3.2840 train_time:280362ms step_avg:80.10ms +[2025-07-08 05:52:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:52:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:52:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:52:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:52:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:52:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:57:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:57:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:57:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:57:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:57:27] [Rank 0] Total Loss: 4.9853 +[2025-07-08 05:57:27] [Rank 0] Total Loss: 4.9853 +[2025-07-08 05:57:27] [Rank 0] Total FTA: 0.1005 +[2025-07-08 05:57:27] [Rank 0] Total FTA: 0.1005 +[2025-07-08 05:57:27] [Rank 0] Group 0 Loss: 5.0596 +[2025-07-08 05:57:27] [Rank 0] Group 0 Loss: 5.0596 +[2025-07-08 05:57:27] [Rank 0] Group 1 Loss: 5.0665 +[2025-07-08 05:57:27] [Rank 0] Group 1 Loss: 5.0665 +[2025-07-08 05:57:27] [Rank 0] Group 2 Loss: 4.8855 +[2025-07-08 05:57:27] [Rank 0] Group 2 Loss: 4.8855 +[2025-07-08 05:57:27] [Rank 0] Group 3 Loss: 4.8956 +[2025-07-08 05:57:27] [Rank 0] Group 3 Loss: 4.8956 +[2025-07-08 05:57:27] [Rank 0] Group 4 Loss: 5.0379 +[2025-07-08 05:57:27] [Rank 0] Group 4 Loss: 5.0379 +[2025-07-08 05:57:27] [Rank 0] Group 5 Loss: 4.9311 +[2025-07-08 05:57:27] [Rank 0] Group 5 Loss: 4.9311 +[2025-07-08 05:57:27] [Rank 0] Group 6 Loss: 4.9473 +[2025-07-08 05:57:27] [Rank 0] Group 6 Loss: 4.9473 +[2025-07-08 05:57:27] [Rank 0] Group 7 Loss: 4.9816 +[2025-07-08 05:57:27] [Rank 0] Group 7 Loss: 4.9816 +[2025-07-08 05:57:27] [Rank 0] Group 8 Loss: 4.9816 +[2025-07-08 05:57:27] [Rank 0] Group 8 Loss: 4.9816 +[2025-07-08 05:57:27] [Rank 0] Group 9 Loss: 4.9796 +[2025-07-08 05:57:27] [Rank 0] Group 9 Loss: 4.9796 +[2025-07-08 05:57:27] [Rank 0] Group 10 Loss: 4.9779 +[2025-07-08 05:57:27] [Rank 0] Group 10 Loss: 4.9779 +[2025-07-08 05:57:27] [Rank 0] Group 11 Loss: 4.9929 +[2025-07-08 05:57:27] [Rank 0] Group 11 Loss: 4.9929 +[2025-07-08 05:57:27] [Rank 0] Group 0 FTA: 0.1704 +[2025-07-08 05:57:27] [Rank 0] Group 0 FTA: 0.1704 +[2025-07-08 05:57:27] [Rank 0] Group 1 FTA: 0.1797 +[2025-07-08 05:57:27] [Rank 0] Group 1 FTA: 0.1797 +[2025-07-08 05:57:27] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-08 05:57:27] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-08 05:57:27] [Rank 0] Group 3 FTA: 0.0443 +[2025-07-08 05:57:27] [Rank 0] Group 3 FTA: 0.0443 +[2025-07-08 05:57:27] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-08 05:57:27] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-08 05:57:27] [Rank 0] Group 5 FTA: 0.0365 +[2025-07-08 05:57:27] [Rank 0] Group 5 FTA: 0.0365 +[2025-07-08 05:57:27] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-08 05:57:27] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-08 05:57:27] [Rank 0] Group 7 FTA: 0.1198 +[2025-07-08 05:57:27] [Rank 0] Group 7 FTA: 0.1198 +[2025-07-08 05:57:27] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-08 05:57:27] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-08 05:57:27] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-08 05:57:27] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-08 05:57:27] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-08 05:57:27] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-08 05:57:27] [Rank 0] Group 11 FTA: 0.0869 +[2025-07-08 05:57:27] [Rank 0] Group 11 FTA: 0.0869 +[2025-07-08 05:57:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 05:57:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 05:57:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 05:57:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 05:57:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 05:57:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 05:57:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 05:57:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 05:57:29] [Rank 0] step:3501/10000 train_time:280382ms step_avg:80.09ms +[2025-07-08 05:57:29] [Rank 0] step:3501/10000 train_time:280382ms step_avg:80.09ms +[2025-07-08 05:57:31] [Rank 0] step:3521/10000 train_time:282541ms step_avg:80.24ms +[2025-07-08 05:57:31] [Rank 0] step:3521/10000 train_time:282541ms step_avg:80.24ms +[2025-07-08 05:57:32] [Rank 0] step:3541/10000 train_time:284037ms step_avg:80.21ms +[2025-07-08 05:57:32] [Rank 0] step:3541/10000 train_time:284037ms step_avg:80.21ms +[2025-07-08 05:57:34] [Rank 0] step:3561/10000 train_time:285535ms step_avg:80.18ms +[2025-07-08 05:57:34] [Rank 0] step:3561/10000 train_time:285535ms step_avg:80.18ms +[2025-07-08 05:57:35] [Rank 0] step:3581/10000 train_time:287039ms step_avg:80.16ms +[2025-07-08 05:57:35] [Rank 0] step:3581/10000 train_time:287039ms step_avg:80.16ms +[2025-07-08 05:57:38] [Rank 0] step:3601/10000 train_time:289208ms step_avg:80.31ms +[2025-07-08 05:57:38] [Rank 0] step:3601/10000 train_time:289208ms step_avg:80.31ms +[2025-07-08 05:57:39] [Rank 0] step:3621/10000 train_time:290690ms step_avg:80.28ms +[2025-07-08 05:57:39] [Rank 0] step:3621/10000 train_time:290690ms step_avg:80.28ms +[2025-07-08 05:57:41] [Rank 0] step:3641/10000 train_time:292191ms step_avg:80.25ms +[2025-07-08 05:57:41] [Rank 0] step:3641/10000 train_time:292191ms step_avg:80.25ms +[2025-07-08 05:57:42] [Rank 0] step:3661/10000 train_time:293693ms step_avg:80.22ms +[2025-07-08 05:57:42] [Rank 0] step:3661/10000 train_time:293693ms step_avg:80.22ms +[2025-07-08 05:57:44] [Rank 0] step:3681/10000 train_time:295198ms step_avg:80.20ms +[2025-07-08 05:57:44] [Rank 0] step:3681/10000 train_time:295198ms step_avg:80.20ms +[2025-07-08 05:57:46] [Rank 0] step:3701/10000 train_time:297360ms step_avg:80.35ms +[2025-07-08 05:57:46] [Rank 0] step:3701/10000 train_time:297360ms step_avg:80.35ms +[2025-07-08 05:57:47] [Rank 0] step:3721/10000 train_time:298864ms step_avg:80.32ms +[2025-07-08 05:57:47] [Rank 0] step:3721/10000 train_time:298864ms step_avg:80.32ms +[2025-07-08 05:57:49] [Rank 0] step:3741/10000 train_time:300369ms step_avg:80.29ms +[2025-07-08 05:57:49] [Rank 0] step:3741/10000 train_time:300369ms step_avg:80.29ms +[2025-07-08 05:57:50] [Rank 0] step:3761/10000 train_time:301875ms step_avg:80.26ms +[2025-07-08 05:57:50] [Rank 0] step:3761/10000 train_time:301875ms step_avg:80.26ms +[2025-07-08 05:57:53] [Rank 0] step:3781/10000 train_time:303596ms step_avg:80.30ms +[2025-07-08 05:57:53] [Rank 0] step:3781/10000 train_time:303596ms step_avg:80.30ms +[2025-07-08 05:57:54] [Rank 0] step:3801/10000 train_time:305768ms step_avg:80.44ms +[2025-07-08 05:57:54] [Rank 0] step:3801/10000 train_time:305768ms step_avg:80.44ms +[2025-07-08 05:57:56] [Rank 0] step:3821/10000 train_time:307274ms step_avg:80.42ms +[2025-07-08 05:57:56] [Rank 0] step:3821/10000 train_time:307274ms step_avg:80.42ms +[2025-07-08 05:57:57] [Rank 0] step:3841/10000 train_time:308778ms step_avg:80.39ms +[2025-07-08 05:57:57] [Rank 0] step:3841/10000 train_time:308778ms step_avg:80.39ms +[2025-07-08 05:57:59] [Rank 0] step:3861/10000 train_time:310284ms step_avg:80.36ms +[2025-07-08 05:57:59] [Rank 0] step:3861/10000 train_time:310284ms step_avg:80.36ms +[2025-07-08 05:58:00] [Rank 0] step:3881/10000 train_time:312023ms step_avg:80.40ms +[2025-07-08 05:58:00] [Rank 0] step:3881/10000 train_time:312023ms step_avg:80.40ms +[2025-07-08 05:58:02] [Rank 0] step:3901/10000 train_time:313529ms step_avg:80.37ms +[2025-07-08 05:58:02] [Rank 0] step:3901/10000 train_time:313529ms step_avg:80.37ms +[2025-07-08 05:58:03] [Rank 0] step:3921/10000 train_time:315036ms step_avg:80.35ms +[2025-07-08 05:58:03] [Rank 0] step:3921/10000 train_time:315036ms step_avg:80.35ms +[2025-07-08 05:58:05] [Rank 0] step:3941/10000 train_time:316547ms step_avg:80.32ms +[2025-07-08 05:58:05] [Rank 0] step:3941/10000 train_time:316547ms step_avg:80.32ms +[2025-07-08 05:58:07] [Rank 0] step:3961/10000 train_time:318055ms step_avg:80.30ms +[2025-07-08 05:58:07] [Rank 0] step:3961/10000 train_time:318055ms step_avg:80.30ms +[2025-07-08 05:58:09] [Rank 0] step:3981/10000 train_time:320231ms step_avg:80.44ms +[2025-07-08 05:58:09] [Rank 0] step:3981/10000 train_time:320231ms step_avg:80.44ms +[2025-07-08 05:58:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:58:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:58:11] [Rank 0] PRINT: step:4000/10000 train_loss:3.0593 val_loss:2.8493 train_time:321737ms step_avg:80.43ms +[2025-07-08 05:58:11] [Rank 0] PRINT: step:4000/10000 train_loss:3.0593 val_loss:2.8493 train_time:321737ms step_avg:80.43ms +[2025-07-08 05:58:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:58:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:58:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:58:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:58:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:58:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:03:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:03:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:03:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:03:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:03:37] [Rank 0] Total Loss: 4.6962 +[2025-07-08 06:03:37] [Rank 0] Total Loss: 4.6962 +[2025-07-08 06:03:37] [Rank 0] Total FTA: 0.1067 +[2025-07-08 06:03:37] [Rank 0] Total FTA: 0.1067 +[2025-07-08 06:03:37] [Rank 0] Group 0 Loss: 4.8111 +[2025-07-08 06:03:37] [Rank 0] Group 0 Loss: 4.8111 +[2025-07-08 06:03:37] [Rank 0] Group 1 Loss: 4.7247 +[2025-07-08 06:03:37] [Rank 0] Group 1 Loss: 4.7247 +[2025-07-08 06:03:37] [Rank 0] Group 2 Loss: 4.5919 +[2025-07-08 06:03:37] [Rank 0] Group 2 Loss: 4.5919 +[2025-07-08 06:03:37] [Rank 0] Group 3 Loss: 4.5970 +[2025-07-08 06:03:37] [Rank 0] Group 3 Loss: 4.5970 +[2025-07-08 06:03:37] [Rank 0] Group 4 Loss: 4.7000 +[2025-07-08 06:03:37] [Rank 0] Group 4 Loss: 4.7000 +[2025-07-08 06:03:37] [Rank 0] Group 5 Loss: 4.6289 +[2025-07-08 06:03:37] [Rank 0] Group 5 Loss: 4.6289 +[2025-07-08 06:03:37] [Rank 0] Group 6 Loss: 4.6728 +[2025-07-08 06:03:37] [Rank 0] Group 6 Loss: 4.6728 +[2025-07-08 06:03:37] [Rank 0] Group 7 Loss: 4.7100 +[2025-07-08 06:03:37] [Rank 0] Group 7 Loss: 4.7100 +[2025-07-08 06:03:37] [Rank 0] Group 8 Loss: 4.6915 +[2025-07-08 06:03:37] [Rank 0] Group 8 Loss: 4.6915 +[2025-07-08 06:03:37] [Rank 0] Group 9 Loss: 4.6815 +[2025-07-08 06:03:37] [Rank 0] Group 9 Loss: 4.6815 +[2025-07-08 06:03:37] [Rank 0] Group 10 Loss: 4.7190 +[2025-07-08 06:03:37] [Rank 0] Group 10 Loss: 4.7190 +[2025-07-08 06:03:37] [Rank 0] Group 11 Loss: 4.6968 +[2025-07-08 06:03:37] [Rank 0] Group 11 Loss: 4.6968 +[2025-07-08 06:03:37] [Rank 0] Group 0 FTA: 0.1977 +[2025-07-08 06:03:37] [Rank 0] Group 0 FTA: 0.1977 +[2025-07-08 06:03:37] [Rank 0] Group 1 FTA: 0.1901 +[2025-07-08 06:03:37] [Rank 0] Group 1 FTA: 0.1901 +[2025-07-08 06:03:37] [Rank 0] Group 2 FTA: 0.0599 +[2025-07-08 06:03:37] [Rank 0] Group 2 FTA: 0.0599 +[2025-07-08 06:03:37] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-08 06:03:37] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-08 06:03:37] [Rank 0] Group 4 FTA: 0.0469 +[2025-07-08 06:03:37] [Rank 0] Group 4 FTA: 0.0469 +[2025-07-08 06:03:37] [Rank 0] Group 5 FTA: 0.0443 +[2025-07-08 06:03:37] [Rank 0] Group 5 FTA: 0.0443 +[2025-07-08 06:03:37] [Rank 0] Group 6 FTA: 0.1224 +[2025-07-08 06:03:37] [Rank 0] Group 6 FTA: 0.1224 +[2025-07-08 06:03:37] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-08 06:03:37] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-08 06:03:37] [Rank 0] Group 8 FTA: 0.0911 +[2025-07-08 06:03:37] [Rank 0] Group 8 FTA: 0.0911 +[2025-07-08 06:03:37] [Rank 0] Group 9 FTA: 0.1172 +[2025-07-08 06:03:37] [Rank 0] Group 9 FTA: 0.1172 +[2025-07-08 06:03:37] [Rank 0] Group 10 FTA: 0.0859 +[2025-07-08 06:03:37] [Rank 0] Group 10 FTA: 0.0859 +[2025-07-08 06:03:37] [Rank 0] Group 11 FTA: 0.1025 +[2025-07-08 06:03:37] [Rank 0] Group 11 FTA: 0.1025 +[2025-07-08 06:03:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 06:03:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 06:03:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 06:03:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 06:03:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 06:03:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 06:03:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 06:03:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 06:03:39] [Rank 0] step:4001/10000 train_time:321758ms step_avg:80.42ms +[2025-07-08 06:03:39] [Rank 0] step:4001/10000 train_time:321758ms step_avg:80.42ms +[2025-07-08 06:03:40] [Rank 0] step:4021/10000 train_time:323268ms step_avg:80.39ms +[2025-07-08 06:03:40] [Rank 0] step:4021/10000 train_time:323268ms step_avg:80.39ms +[2025-07-08 06:03:42] [Rank 0] step:4041/10000 train_time:324761ms step_avg:80.37ms +[2025-07-08 06:03:42] [Rank 0] step:4041/10000 train_time:324761ms step_avg:80.37ms +[2025-07-08 06:03:44] [Rank 0] step:4061/10000 train_time:326905ms step_avg:80.50ms +[2025-07-08 06:03:44] [Rank 0] step:4061/10000 train_time:326905ms step_avg:80.50ms +[2025-07-08 06:03:45] [Rank 0] step:4081/10000 train_time:328400ms step_avg:80.47ms +[2025-07-08 06:03:45] [Rank 0] step:4081/10000 train_time:328400ms step_avg:80.47ms +[2025-07-08 06:03:47] [Rank 0] step:4101/10000 train_time:329898ms step_avg:80.44ms +[2025-07-08 06:03:47] [Rank 0] step:4101/10000 train_time:329898ms step_avg:80.44ms +[2025-07-08 06:03:48] [Rank 0] step:4121/10000 train_time:331397ms step_avg:80.42ms +[2025-07-08 06:03:48] [Rank 0] step:4121/10000 train_time:331397ms step_avg:80.42ms +[2025-07-08 06:03:50] [Rank 0] step:4141/10000 train_time:333153ms step_avg:80.45ms +[2025-07-08 06:03:50] [Rank 0] step:4141/10000 train_time:333153ms step_avg:80.45ms +[2025-07-08 06:03:52] [Rank 0] step:4161/10000 train_time:335052ms step_avg:80.52ms +[2025-07-08 06:03:52] [Rank 0] step:4161/10000 train_time:335052ms step_avg:80.52ms +[2025-07-08 06:03:53] [Rank 0] step:4181/10000 train_time:336555ms step_avg:80.50ms +[2025-07-08 06:03:53] [Rank 0] step:4181/10000 train_time:336555ms step_avg:80.50ms +[2025-07-08 06:03:55] [Rank 0] step:4201/10000 train_time:338061ms step_avg:80.47ms +[2025-07-08 06:03:55] [Rank 0] step:4201/10000 train_time:338061ms step_avg:80.47ms +[2025-07-08 06:03:56] [Rank 0] step:4221/10000 train_time:339565ms step_avg:80.45ms +[2025-07-08 06:03:56] [Rank 0] step:4221/10000 train_time:339565ms step_avg:80.45ms +[2025-07-08 06:03:58] [Rank 0] step:4241/10000 train_time:341306ms step_avg:80.48ms +[2025-07-08 06:03:58] [Rank 0] step:4241/10000 train_time:341306ms step_avg:80.48ms +[2025-07-08 06:04:00] [Rank 0] step:4261/10000 train_time:342809ms step_avg:80.45ms +[2025-07-08 06:04:00] [Rank 0] step:4261/10000 train_time:342809ms step_avg:80.45ms +[2025-07-08 06:04:01] [Rank 0] step:4281/10000 train_time:344315ms step_avg:80.43ms +[2025-07-08 06:04:01] [Rank 0] step:4281/10000 train_time:344315ms step_avg:80.43ms +[2025-07-08 06:04:03] [Rank 0] step:4301/10000 train_time:345820ms step_avg:80.40ms +[2025-07-08 06:04:03] [Rank 0] step:4301/10000 train_time:345820ms step_avg:80.40ms +[2025-07-08 06:04:05] [Rank 0] step:4321/10000 train_time:347326ms step_avg:80.38ms +[2025-07-08 06:04:05] [Rank 0] step:4321/10000 train_time:347326ms step_avg:80.38ms +[2025-07-08 06:04:06] [Rank 0] step:4341/10000 train_time:349485ms step_avg:80.51ms +[2025-07-08 06:04:06] [Rank 0] step:4341/10000 train_time:349485ms step_avg:80.51ms +[2025-07-08 06:04:08] [Rank 0] step:4361/10000 train_time:350991ms step_avg:80.48ms +[2025-07-08 06:04:08] [Rank 0] step:4361/10000 train_time:350991ms step_avg:80.48ms +[2025-07-08 06:04:09] [Rank 0] step:4381/10000 train_time:352496ms step_avg:80.46ms +[2025-07-08 06:04:09] [Rank 0] step:4381/10000 train_time:352496ms step_avg:80.46ms +[2025-07-08 06:04:11] [Rank 0] step:4401/10000 train_time:354239ms step_avg:80.49ms +[2025-07-08 06:04:11] [Rank 0] step:4401/10000 train_time:354239ms step_avg:80.49ms +[2025-07-08 06:04:13] [Rank 0] step:4421/10000 train_time:355977ms step_avg:80.52ms +[2025-07-08 06:04:13] [Rank 0] step:4421/10000 train_time:355977ms step_avg:80.52ms +[2025-07-08 06:04:14] [Rank 0] step:4441/10000 train_time:357483ms step_avg:80.50ms +[2025-07-08 06:04:14] [Rank 0] step:4441/10000 train_time:357483ms step_avg:80.50ms +[2025-07-08 06:04:16] [Rank 0] step:4461/10000 train_time:358987ms step_avg:80.47ms +[2025-07-08 06:04:16] [Rank 0] step:4461/10000 train_time:358987ms step_avg:80.47ms +[2025-07-08 06:04:17] [Rank 0] step:4481/10000 train_time:360493ms step_avg:80.45ms +[2025-07-08 06:04:17] [Rank 0] step:4481/10000 train_time:360493ms step_avg:80.45ms +[2025-07-08 06:04:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:04:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:04:20] [Rank 0] PRINT: step:4500/10000 train_loss:2.6727 val_loss:2.5118 train_time:361998ms step_avg:80.44ms +[2025-07-08 06:04:20] [Rank 0] PRINT: step:4500/10000 train_loss:2.6727 val_loss:2.5118 train_time:361998ms step_avg:80.44ms +[2025-07-08 06:04:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:04:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:04:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:04:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:04:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:04:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:09:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:09:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:09:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:09:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:09:45] [Rank 0] Total Loss: 4.5338 +[2025-07-08 06:09:45] [Rank 0] Total Loss: 4.5338 +[2025-07-08 06:09:45] [Rank 0] Total FTA: 0.1296 +[2025-07-08 06:09:45] [Rank 0] Total FTA: 0.1296 +[2025-07-08 06:09:45] [Rank 0] Group 0 Loss: 4.8524 +[2025-07-08 06:09:45] [Rank 0] Group 0 Loss: 4.8524 +[2025-07-08 06:09:45] [Rank 0] Group 1 Loss: 4.6326 +[2025-07-08 06:09:45] [Rank 0] Group 1 Loss: 4.6326 +[2025-07-08 06:09:45] [Rank 0] Group 2 Loss: 4.2886 +[2025-07-08 06:09:45] [Rank 0] Group 2 Loss: 4.2886 +[2025-07-08 06:09:45] [Rank 0] Group 3 Loss: 4.5417 +[2025-07-08 06:09:45] [Rank 0] Group 3 Loss: 4.5417 +[2025-07-08 06:09:45] [Rank 0] Group 4 Loss: 4.4409 +[2025-07-08 06:09:45] [Rank 0] Group 4 Loss: 4.4409 +[2025-07-08 06:09:45] [Rank 0] Group 5 Loss: 4.4826 +[2025-07-08 06:09:45] [Rank 0] Group 5 Loss: 4.4826 +[2025-07-08 06:09:45] [Rank 0] Group 6 Loss: 4.4331 +[2025-07-08 06:09:45] [Rank 0] Group 6 Loss: 4.4331 +[2025-07-08 06:09:45] [Rank 0] Group 7 Loss: 4.4905 +[2025-07-08 06:09:45] [Rank 0] Group 7 Loss: 4.4905 +[2025-07-08 06:09:45] [Rank 0] Group 8 Loss: 4.5015 +[2025-07-08 06:09:45] [Rank 0] Group 8 Loss: 4.5015 +[2025-07-08 06:09:45] [Rank 0] Group 9 Loss: 4.4194 +[2025-07-08 06:09:45] [Rank 0] Group 9 Loss: 4.4194 +[2025-07-08 06:09:45] [Rank 0] Group 10 Loss: 4.4771 +[2025-07-08 06:09:45] [Rank 0] Group 10 Loss: 4.4771 +[2025-07-08 06:09:45] [Rank 0] Group 11 Loss: 4.5238 +[2025-07-08 06:09:45] [Rank 0] Group 11 Loss: 4.5238 +[2025-07-08 06:09:45] [Rank 0] Group 0 FTA: 0.1899 +[2025-07-08 06:09:45] [Rank 0] Group 0 FTA: 0.1899 +[2025-07-08 06:09:45] [Rank 0] Group 1 FTA: 0.1615 +[2025-07-08 06:09:45] [Rank 0] Group 1 FTA: 0.1615 +[2025-07-08 06:09:45] [Rank 0] Group 2 FTA: 0.1745 +[2025-07-08 06:09:45] [Rank 0] Group 2 FTA: 0.1745 +[2025-07-08 06:09:45] [Rank 0] Group 3 FTA: 0.0859 +[2025-07-08 06:09:45] [Rank 0] Group 3 FTA: 0.0859 +[2025-07-08 06:09:45] [Rank 0] Group 4 FTA: 0.0755 +[2025-07-08 06:09:45] [Rank 0] Group 4 FTA: 0.0755 +[2025-07-08 06:09:45] [Rank 0] Group 5 FTA: 0.0755 +[2025-07-08 06:09:45] [Rank 0] Group 5 FTA: 0.0755 +[2025-07-08 06:09:45] [Rank 0] Group 6 FTA: 0.1380 +[2025-07-08 06:09:45] [Rank 0] Group 6 FTA: 0.1380 +[2025-07-08 06:09:45] [Rank 0] Group 7 FTA: 0.1276 +[2025-07-08 06:09:45] [Rank 0] Group 7 FTA: 0.1276 +[2025-07-08 06:09:45] [Rank 0] Group 8 FTA: 0.1068 +[2025-07-08 06:09:45] [Rank 0] Group 8 FTA: 0.1068 +[2025-07-08 06:09:45] [Rank 0] Group 9 FTA: 0.1328 +[2025-07-08 06:09:45] [Rank 0] Group 9 FTA: 0.1328 +[2025-07-08 06:09:45] [Rank 0] Group 10 FTA: 0.1211 +[2025-07-08 06:09:45] [Rank 0] Group 10 FTA: 0.1211 +[2025-07-08 06:09:45] [Rank 0] Group 11 FTA: 0.1221 +[2025-07-08 06:09:45] [Rank 0] Group 11 FTA: 0.1221 +[2025-07-08 06:09:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 06:09:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 06:09:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 06:09:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 06:09:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 06:09:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 06:09:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 06:09:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 06:09:47] [Rank 0] step:4501/10000 train_time:362026ms step_avg:80.43ms +[2025-07-08 06:09:47] [Rank 0] step:4501/10000 train_time:362026ms step_avg:80.43ms +[2025-07-08 06:09:49] [Rank 0] step:4521/10000 train_time:364232ms step_avg:80.56ms +[2025-07-08 06:09:49] [Rank 0] step:4521/10000 train_time:364232ms step_avg:80.56ms +[2025-07-08 06:09:50] [Rank 0] step:4541/10000 train_time:365730ms step_avg:80.54ms +[2025-07-08 06:09:50] [Rank 0] step:4541/10000 train_time:365730ms step_avg:80.54ms +[2025-07-08 06:09:52] [Rank 0] step:4561/10000 train_time:367228ms step_avg:80.51ms +[2025-07-08 06:09:52] [Rank 0] step:4561/10000 train_time:367228ms step_avg:80.51ms +[2025-07-08 06:09:53] [Rank 0] step:4581/10000 train_time:368727ms step_avg:80.49ms +[2025-07-08 06:09:53] [Rank 0] step:4581/10000 train_time:368727ms step_avg:80.49ms +[2025-07-08 06:09:55] [Rank 0] step:4601/10000 train_time:370470ms step_avg:80.52ms +[2025-07-08 06:09:55] [Rank 0] step:4601/10000 train_time:370470ms step_avg:80.52ms +[2025-07-08 06:09:57] [Rank 0] step:4621/10000 train_time:371966ms step_avg:80.49ms +[2025-07-08 06:09:57] [Rank 0] step:4621/10000 train_time:371966ms step_avg:80.49ms +[2025-07-08 06:09:58] [Rank 0] step:4641/10000 train_time:373466ms step_avg:80.47ms +[2025-07-08 06:09:58] [Rank 0] step:4641/10000 train_time:373466ms step_avg:80.47ms +[2025-07-08 06:10:00] [Rank 0] step:4661/10000 train_time:374969ms step_avg:80.45ms +[2025-07-08 06:10:00] [Rank 0] step:4661/10000 train_time:374969ms step_avg:80.45ms +[2025-07-08 06:10:01] [Rank 0] step:4681/10000 train_time:376477ms step_avg:80.43ms +[2025-07-08 06:10:01] [Rank 0] step:4681/10000 train_time:376477ms step_avg:80.43ms +[2025-07-08 06:10:03] [Rank 0] step:4701/10000 train_time:378114ms step_avg:80.43ms +[2025-07-08 06:10:03] [Rank 0] step:4701/10000 train_time:378114ms step_avg:80.43ms +[2025-07-08 06:10:04] [Rank 0] step:4721/10000 train_time:379616ms step_avg:80.41ms +[2025-07-08 06:10:04] [Rank 0] step:4721/10000 train_time:379616ms step_avg:80.41ms +[2025-07-08 06:10:06] [Rank 0] step:4741/10000 train_time:381121ms step_avg:80.39ms +[2025-07-08 06:10:06] [Rank 0] step:4741/10000 train_time:381121ms step_avg:80.39ms +[2025-07-08 06:10:07] [Rank 0] step:4761/10000 train_time:382627ms step_avg:80.37ms +[2025-07-08 06:10:07] [Rank 0] step:4761/10000 train_time:382627ms step_avg:80.37ms +[2025-07-08 06:10:09] [Rank 0] step:4781/10000 train_time:384366ms step_avg:80.39ms +[2025-07-08 06:10:09] [Rank 0] step:4781/10000 train_time:384366ms step_avg:80.39ms +[2025-07-08 06:10:10] [Rank 0] step:4801/10000 train_time:385871ms step_avg:80.37ms +[2025-07-08 06:10:10] [Rank 0] step:4801/10000 train_time:385871ms step_avg:80.37ms +[2025-07-08 06:10:12] [Rank 0] step:4821/10000 train_time:387377ms step_avg:80.35ms +[2025-07-08 06:10:12] [Rank 0] step:4821/10000 train_time:387377ms step_avg:80.35ms +[2025-07-08 06:10:13] [Rank 0] step:4841/10000 train_time:388884ms step_avg:80.33ms +[2025-07-08 06:10:13] [Rank 0] step:4841/10000 train_time:388884ms step_avg:80.33ms +[2025-07-08 06:10:15] [Rank 0] step:4861/10000 train_time:390388ms step_avg:80.31ms +[2025-07-08 06:10:15] [Rank 0] step:4861/10000 train_time:390388ms step_avg:80.31ms +[2025-07-08 06:10:17] [Rank 0] step:4881/10000 train_time:392134ms step_avg:80.34ms +[2025-07-08 06:10:17] [Rank 0] step:4881/10000 train_time:392134ms step_avg:80.34ms +[2025-07-08 06:10:18] [Rank 0] step:4901/10000 train_time:393639ms step_avg:80.32ms +[2025-07-08 06:10:18] [Rank 0] step:4901/10000 train_time:393639ms step_avg:80.32ms +[2025-07-08 06:10:20] [Rank 0] step:4921/10000 train_time:395145ms step_avg:80.30ms +[2025-07-08 06:10:20] [Rank 0] step:4921/10000 train_time:395145ms step_avg:80.30ms +[2025-07-08 06:10:21] [Rank 0] step:4941/10000 train_time:396650ms step_avg:80.28ms +[2025-07-08 06:10:21] [Rank 0] step:4941/10000 train_time:396650ms step_avg:80.28ms +[2025-07-08 06:10:23] [Rank 0] step:4961/10000 train_time:398808ms step_avg:80.39ms +[2025-07-08 06:10:23] [Rank 0] step:4961/10000 train_time:398808ms step_avg:80.39ms +[2025-07-08 06:10:25] [Rank 0] step:4981/10000 train_time:400315ms step_avg:80.37ms +[2025-07-08 06:10:25] [Rank 0] step:4981/10000 train_time:400315ms step_avg:80.37ms +[2025-07-08 06:10:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:10:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:10:27] [Rank 0] PRINT: step:5000/10000 train_loss:2.3798 val_loss:2.2612 train_time:401821ms step_avg:80.36ms +[2025-07-08 06:10:27] [Rank 0] PRINT: step:5000/10000 train_loss:2.3798 val_loss:2.2612 train_time:401821ms step_avg:80.36ms +[2025-07-08 06:10:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:10:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:10:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:10:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:10:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:10:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:15:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:15:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:15:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:15:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:15:53] [Rank 0] Total Loss: 4.3861 +[2025-07-08 06:15:53] [Rank 0] Total Loss: 4.3861 +[2025-07-08 06:15:53] [Rank 0] Total FTA: 0.1292 +[2025-07-08 06:15:53] [Rank 0] Total FTA: 0.1292 +[2025-07-08 06:15:53] [Rank 0] Group 0 Loss: 4.4910 +[2025-07-08 06:15:53] [Rank 0] Group 0 Loss: 4.4910 +[2025-07-08 06:15:53] [Rank 0] Group 1 Loss: 4.9376 +[2025-07-08 06:15:53] [Rank 0] Group 1 Loss: 4.9376 +[2025-07-08 06:15:53] [Rank 0] Group 2 Loss: 4.1663 +[2025-07-08 06:15:53] [Rank 0] Group 2 Loss: 4.1663 +[2025-07-08 06:15:53] [Rank 0] Group 3 Loss: 4.3956 +[2025-07-08 06:15:53] [Rank 0] Group 3 Loss: 4.3956 +[2025-07-08 06:15:53] [Rank 0] Group 4 Loss: 4.2847 +[2025-07-08 06:15:53] [Rank 0] Group 4 Loss: 4.2847 +[2025-07-08 06:15:53] [Rank 0] Group 5 Loss: 4.3070 +[2025-07-08 06:15:53] [Rank 0] Group 5 Loss: 4.3070 +[2025-07-08 06:15:53] [Rank 0] Group 6 Loss: 4.2765 +[2025-07-08 06:15:53] [Rank 0] Group 6 Loss: 4.2765 +[2025-07-08 06:15:53] [Rank 0] Group 7 Loss: 4.3393 +[2025-07-08 06:15:53] [Rank 0] Group 7 Loss: 4.3393 +[2025-07-08 06:15:53] [Rank 0] Group 8 Loss: 4.3547 +[2025-07-08 06:15:53] [Rank 0] Group 8 Loss: 4.3547 +[2025-07-08 06:15:53] [Rank 0] Group 9 Loss: 4.2901 +[2025-07-08 06:15:53] [Rank 0] Group 9 Loss: 4.2901 +[2025-07-08 06:15:53] [Rank 0] Group 10 Loss: 4.3722 +[2025-07-08 06:15:53] [Rank 0] Group 10 Loss: 4.3722 +[2025-07-08 06:15:53] [Rank 0] Group 11 Loss: 4.3484 +[2025-07-08 06:15:53] [Rank 0] Group 11 Loss: 4.3484 +[2025-07-08 06:15:53] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-08 06:15:53] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-08 06:15:53] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-08 06:15:53] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-08 06:15:53] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-08 06:15:53] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-08 06:15:53] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-08 06:15:53] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-08 06:15:53] [Rank 0] Group 4 FTA: 0.0469 +[2025-07-08 06:15:53] [Rank 0] Group 4 FTA: 0.0469 +[2025-07-08 06:15:53] [Rank 0] Group 5 FTA: 0.0990 +[2025-07-08 06:15:53] [Rank 0] Group 5 FTA: 0.0990 +[2025-07-08 06:15:53] [Rank 0] Group 6 FTA: 0.1589 +[2025-07-08 06:15:53] [Rank 0] Group 6 FTA: 0.1589 +[2025-07-08 06:15:53] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-08 06:15:53] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-08 06:15:53] [Rank 0] Group 8 FTA: 0.1510 +[2025-07-08 06:15:53] [Rank 0] Group 8 FTA: 0.1510 +[2025-07-08 06:15:53] [Rank 0] Group 9 FTA: 0.1211 +[2025-07-08 06:15:53] [Rank 0] Group 9 FTA: 0.1211 +[2025-07-08 06:15:53] [Rank 0] Group 10 FTA: 0.1641 +[2025-07-08 06:15:53] [Rank 0] Group 10 FTA: 0.1641 +[2025-07-08 06:15:53] [Rank 0] Group 11 FTA: 0.1270 +[2025-07-08 06:15:53] [Rank 0] Group 11 FTA: 0.1270 +[2025-07-08 06:15:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 06:15:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 06:15:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 06:15:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 06:15:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 06:15:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 06:15:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 06:15:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 06:15:54] [Rank 0] step:5001/10000 train_time:401841ms step_avg:80.35ms +[2025-07-08 06:15:54] [Rank 0] step:5001/10000 train_time:401841ms step_avg:80.35ms +[2025-07-08 06:15:56] [Rank 0] step:5021/10000 train_time:403354ms step_avg:80.33ms +[2025-07-08 06:15:56] [Rank 0] step:5021/10000 train_time:403354ms step_avg:80.33ms +[2025-07-08 06:15:58] [Rank 0] step:5041/10000 train_time:404908ms step_avg:80.32ms +[2025-07-08 06:15:58] [Rank 0] step:5041/10000 train_time:404908ms step_avg:80.32ms +[2025-07-08 06:16:00] [Rank 0] step:5061/10000 train_time:407007ms step_avg:80.42ms +[2025-07-08 06:16:00] [Rank 0] step:5061/10000 train_time:407007ms step_avg:80.42ms +[2025-07-08 06:16:01] [Rank 0] step:5081/10000 train_time:408505ms step_avg:80.40ms +[2025-07-08 06:16:01] [Rank 0] step:5081/10000 train_time:408505ms step_avg:80.40ms +[2025-07-08 06:16:03] [Rank 0] step:5101/10000 train_time:410009ms step_avg:80.38ms +[2025-07-08 06:16:03] [Rank 0] step:5101/10000 train_time:410009ms step_avg:80.38ms +[2025-07-08 06:16:04] [Rank 0] step:5121/10000 train_time:411513ms step_avg:80.36ms +[2025-07-08 06:16:04] [Rank 0] step:5121/10000 train_time:411513ms step_avg:80.36ms +[2025-07-08 06:16:06] [Rank 0] step:5141/10000 train_time:413676ms step_avg:80.47ms +[2025-07-08 06:16:06] [Rank 0] step:5141/10000 train_time:413676ms step_avg:80.47ms +[2025-07-08 06:16:08] [Rank 0] step:5161/10000 train_time:415177ms step_avg:80.45ms +[2025-07-08 06:16:08] [Rank 0] step:5161/10000 train_time:415177ms step_avg:80.45ms +[2025-07-08 06:16:09] [Rank 0] step:5181/10000 train_time:416679ms step_avg:80.42ms +[2025-07-08 06:16:09] [Rank 0] step:5181/10000 train_time:416679ms step_avg:80.42ms +[2025-07-08 06:16:11] [Rank 0] step:5201/10000 train_time:418180ms step_avg:80.40ms +[2025-07-08 06:16:11] [Rank 0] step:5201/10000 train_time:418180ms step_avg:80.40ms +[2025-07-08 06:16:13] [Rank 0] step:5221/10000 train_time:419740ms step_avg:80.39ms +[2025-07-08 06:16:13] [Rank 0] step:5221/10000 train_time:419740ms step_avg:80.39ms +[2025-07-08 06:16:14] [Rank 0] step:5241/10000 train_time:421859ms step_avg:80.49ms +[2025-07-08 06:16:14] [Rank 0] step:5241/10000 train_time:421859ms step_avg:80.49ms +[2025-07-08 06:16:16] [Rank 0] step:5261/10000 train_time:423364ms step_avg:80.47ms +[2025-07-08 06:16:16] [Rank 0] step:5261/10000 train_time:423364ms step_avg:80.47ms +[2025-07-08 06:16:17] [Rank 0] step:5281/10000 train_time:424870ms step_avg:80.45ms +[2025-07-08 06:16:17] [Rank 0] step:5281/10000 train_time:424870ms step_avg:80.45ms +[2025-07-08 06:16:19] [Rank 0] step:5301/10000 train_time:426377ms step_avg:80.43ms +[2025-07-08 06:16:19] [Rank 0] step:5301/10000 train_time:426377ms step_avg:80.43ms +[2025-07-08 06:16:21] [Rank 0] step:5321/10000 train_time:428529ms step_avg:80.54ms +[2025-07-08 06:16:21] [Rank 0] step:5321/10000 train_time:428529ms step_avg:80.54ms +[2025-07-08 06:16:23] [Rank 0] step:5341/10000 train_time:430033ms step_avg:80.52ms +[2025-07-08 06:16:23] [Rank 0] step:5341/10000 train_time:430033ms step_avg:80.52ms +[2025-07-08 06:16:24] [Rank 0] step:5361/10000 train_time:431541ms step_avg:80.50ms +[2025-07-08 06:16:24] [Rank 0] step:5361/10000 train_time:431541ms step_avg:80.50ms +[2025-07-08 06:16:26] [Rank 0] step:5381/10000 train_time:433049ms step_avg:80.48ms +[2025-07-08 06:16:26] [Rank 0] step:5381/10000 train_time:433049ms step_avg:80.48ms +[2025-07-08 06:16:28] [Rank 0] step:5401/10000 train_time:434557ms step_avg:80.46ms +[2025-07-08 06:16:28] [Rank 0] step:5401/10000 train_time:434557ms step_avg:80.46ms +[2025-07-08 06:16:29] [Rank 0] step:5421/10000 train_time:436716ms step_avg:80.56ms +[2025-07-08 06:16:29] [Rank 0] step:5421/10000 train_time:436716ms step_avg:80.56ms +[2025-07-08 06:16:31] [Rank 0] step:5441/10000 train_time:438223ms step_avg:80.54ms +[2025-07-08 06:16:31] [Rank 0] step:5441/10000 train_time:438223ms step_avg:80.54ms +[2025-07-08 06:16:32] [Rank 0] step:5461/10000 train_time:439732ms step_avg:80.52ms +[2025-07-08 06:16:32] [Rank 0] step:5461/10000 train_time:439732ms step_avg:80.52ms +[2025-07-08 06:16:34] [Rank 0] step:5481/10000 train_time:441240ms step_avg:80.50ms +[2025-07-08 06:16:34] [Rank 0] step:5481/10000 train_time:441240ms step_avg:80.50ms +[2025-07-08 06:16:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:16:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:16:37] [Rank 0] PRINT: step:5500/10000 train_loss:2.1655 val_loss:2.0795 train_time:443409ms step_avg:80.62ms +[2025-07-08 06:16:37] [Rank 0] PRINT: step:5500/10000 train_loss:2.1655 val_loss:2.0795 train_time:443409ms step_avg:80.62ms +[2025-07-08 06:16:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:16:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:16:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:16:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:16:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:16:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:22:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:22:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:22:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:22:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:22:04] [Rank 0] Total Loss: 4.2657 +[2025-07-08 06:22:04] [Rank 0] Total Loss: 4.2657 +[2025-07-08 06:22:04] [Rank 0] Total FTA: 0.1402 +[2025-07-08 06:22:04] [Rank 0] Total FTA: 0.1402 +[2025-07-08 06:22:04] [Rank 0] Group 0 Loss: 4.4312 +[2025-07-08 06:22:04] [Rank 0] Group 0 Loss: 4.4312 +[2025-07-08 06:22:04] [Rank 0] Group 1 Loss: 4.3566 +[2025-07-08 06:22:04] [Rank 0] Group 1 Loss: 4.3566 +[2025-07-08 06:22:04] [Rank 0] Group 2 Loss: 4.0235 +[2025-07-08 06:22:04] [Rank 0] Group 2 Loss: 4.0235 +[2025-07-08 06:22:04] [Rank 0] Group 3 Loss: 4.2795 +[2025-07-08 06:22:04] [Rank 0] Group 3 Loss: 4.2795 +[2025-07-08 06:22:04] [Rank 0] Group 4 Loss: 4.2130 +[2025-07-08 06:22:04] [Rank 0] Group 4 Loss: 4.2130 +[2025-07-08 06:22:04] [Rank 0] Group 5 Loss: 4.1898 +[2025-07-08 06:22:04] [Rank 0] Group 5 Loss: 4.1898 +[2025-07-08 06:22:04] [Rank 0] Group 6 Loss: 4.1982 +[2025-07-08 06:22:04] [Rank 0] Group 6 Loss: 4.1982 +[2025-07-08 06:22:04] [Rank 0] Group 7 Loss: 4.2615 +[2025-07-08 06:22:04] [Rank 0] Group 7 Loss: 4.2615 +[2025-07-08 06:22:04] [Rank 0] Group 8 Loss: 4.2624 +[2025-07-08 06:22:04] [Rank 0] Group 8 Loss: 4.2624 +[2025-07-08 06:22:04] [Rank 0] Group 9 Loss: 4.2474 +[2025-07-08 06:22:04] [Rank 0] Group 9 Loss: 4.2474 +[2025-07-08 06:22:04] [Rank 0] Group 10 Loss: 4.2735 +[2025-07-08 06:22:04] [Rank 0] Group 10 Loss: 4.2735 +[2025-07-08 06:22:04] [Rank 0] Group 11 Loss: 4.2699 +[2025-07-08 06:22:04] [Rank 0] Group 11 Loss: 4.2699 +[2025-07-08 06:22:04] [Rank 0] Group 0 FTA: 0.1730 +[2025-07-08 06:22:04] [Rank 0] Group 0 FTA: 0.1730 +[2025-07-08 06:22:04] [Rank 0] Group 1 FTA: 0.1771 +[2025-07-08 06:22:04] [Rank 0] Group 1 FTA: 0.1771 +[2025-07-08 06:22:04] [Rank 0] Group 2 FTA: 0.2578 +[2025-07-08 06:22:04] [Rank 0] Group 2 FTA: 0.2578 +[2025-07-08 06:22:04] [Rank 0] Group 3 FTA: 0.0703 +[2025-07-08 06:22:04] [Rank 0] Group 3 FTA: 0.0703 +[2025-07-08 06:22:04] [Rank 0] Group 4 FTA: 0.0859 +[2025-07-08 06:22:04] [Rank 0] Group 4 FTA: 0.0859 +[2025-07-08 06:22:04] [Rank 0] Group 5 FTA: 0.1042 +[2025-07-08 06:22:04] [Rank 0] Group 5 FTA: 0.1042 +[2025-07-08 06:22:04] [Rank 0] Group 6 FTA: 0.1432 +[2025-07-08 06:22:04] [Rank 0] Group 6 FTA: 0.1432 +[2025-07-08 06:22:04] [Rank 0] Group 7 FTA: 0.1458 +[2025-07-08 06:22:04] [Rank 0] Group 7 FTA: 0.1458 +[2025-07-08 06:22:04] [Rank 0] Group 8 FTA: 0.1354 +[2025-07-08 06:22:04] [Rank 0] Group 8 FTA: 0.1354 +[2025-07-08 06:22:04] [Rank 0] Group 9 FTA: 0.1250 +[2025-07-08 06:22:04] [Rank 0] Group 9 FTA: 0.1250 +[2025-07-08 06:22:04] [Rank 0] Group 10 FTA: 0.1367 +[2025-07-08 06:22:04] [Rank 0] Group 10 FTA: 0.1367 +[2025-07-08 06:22:04] [Rank 0] Group 11 FTA: 0.1221 +[2025-07-08 06:22:04] [Rank 0] Group 11 FTA: 0.1221 +[2025-07-08 06:22:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 06:22:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 06:22:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 06:22:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 06:22:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 06:22:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 06:22:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 06:22:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 06:22:05] [Rank 0] step:5501/10000 train_time:443429ms step_avg:80.61ms +[2025-07-08 06:22:05] [Rank 0] step:5501/10000 train_time:443429ms step_avg:80.61ms +[2025-07-08 06:22:07] [Rank 0] step:5521/10000 train_time:445149ms step_avg:80.63ms +[2025-07-08 06:22:07] [Rank 0] step:5521/10000 train_time:445149ms step_avg:80.63ms +[2025-07-08 06:22:09] [Rank 0] step:5541/10000 train_time:446647ms step_avg:80.61ms +[2025-07-08 06:22:09] [Rank 0] step:5541/10000 train_time:446647ms step_avg:80.61ms +[2025-07-08 06:22:10] [Rank 0] step:5561/10000 train_time:448145ms step_avg:80.59ms +[2025-07-08 06:22:10] [Rank 0] step:5561/10000 train_time:448145ms step_avg:80.59ms +[2025-07-08 06:22:12] [Rank 0] step:5581/10000 train_time:449701ms step_avg:80.58ms +[2025-07-08 06:22:12] [Rank 0] step:5581/10000 train_time:449701ms step_avg:80.58ms +[2025-07-08 06:22:13] [Rank 0] step:5601/10000 train_time:451384ms step_avg:80.59ms +[2025-07-08 06:22:13] [Rank 0] step:5601/10000 train_time:451384ms step_avg:80.59ms +[2025-07-08 06:22:15] [Rank 0] step:5621/10000 train_time:452882ms step_avg:80.57ms +[2025-07-08 06:22:15] [Rank 0] step:5621/10000 train_time:452882ms step_avg:80.57ms +[2025-07-08 06:22:16] [Rank 0] step:5641/10000 train_time:454381ms step_avg:80.55ms +[2025-07-08 06:22:16] [Rank 0] step:5641/10000 train_time:454381ms step_avg:80.55ms +[2025-07-08 06:22:18] [Rank 0] step:5661/10000 train_time:455881ms step_avg:80.53ms +[2025-07-08 06:22:18] [Rank 0] step:5661/10000 train_time:455881ms step_avg:80.53ms +[2025-07-08 06:22:20] [Rank 0] step:5681/10000 train_time:458032ms step_avg:80.63ms +[2025-07-08 06:22:20] [Rank 0] step:5681/10000 train_time:458032ms step_avg:80.63ms +[2025-07-08 06:22:21] [Rank 0] step:5701/10000 train_time:459532ms step_avg:80.61ms +[2025-07-08 06:22:21] [Rank 0] step:5701/10000 train_time:459532ms step_avg:80.61ms +[2025-07-08 06:22:23] [Rank 0] step:5721/10000 train_time:461036ms step_avg:80.59ms +[2025-07-08 06:22:23] [Rank 0] step:5721/10000 train_time:461036ms step_avg:80.59ms +[2025-07-08 06:22:24] [Rank 0] step:5741/10000 train_time:462540ms step_avg:80.57ms +[2025-07-08 06:22:24] [Rank 0] step:5741/10000 train_time:462540ms step_avg:80.57ms +[2025-07-08 06:22:27] [Rank 0] step:5761/10000 train_time:464096ms step_avg:80.56ms +[2025-07-08 06:22:27] [Rank 0] step:5761/10000 train_time:464096ms step_avg:80.56ms +[2025-07-08 06:22:28] [Rank 0] step:5781/10000 train_time:466212ms step_avg:80.65ms +[2025-07-08 06:22:28] [Rank 0] step:5781/10000 train_time:466212ms step_avg:80.65ms +[2025-07-08 06:22:30] [Rank 0] step:5801/10000 train_time:467719ms step_avg:80.63ms +[2025-07-08 06:22:30] [Rank 0] step:5801/10000 train_time:467719ms step_avg:80.63ms +[2025-07-08 06:22:31] [Rank 0] step:5821/10000 train_time:469225ms step_avg:80.61ms +[2025-07-08 06:22:31] [Rank 0] step:5821/10000 train_time:469225ms step_avg:80.61ms +[2025-07-08 06:22:33] [Rank 0] step:5841/10000 train_time:470730ms step_avg:80.59ms +[2025-07-08 06:22:33] [Rank 0] step:5841/10000 train_time:470730ms step_avg:80.59ms +[2025-07-08 06:22:35] [Rank 0] step:5861/10000 train_time:472879ms step_avg:80.68ms +[2025-07-08 06:22:35] [Rank 0] step:5861/10000 train_time:472879ms step_avg:80.68ms +[2025-07-08 06:22:36] [Rank 0] step:5881/10000 train_time:474385ms step_avg:80.66ms +[2025-07-08 06:22:36] [Rank 0] step:5881/10000 train_time:474385ms step_avg:80.66ms +[2025-07-08 06:22:38] [Rank 0] step:5901/10000 train_time:475893ms step_avg:80.65ms +[2025-07-08 06:22:38] [Rank 0] step:5901/10000 train_time:475893ms step_avg:80.65ms +[2025-07-08 06:22:39] [Rank 0] step:5921/10000 train_time:477395ms step_avg:80.63ms +[2025-07-08 06:22:39] [Rank 0] step:5921/10000 train_time:477395ms step_avg:80.63ms +[2025-07-08 06:22:41] [Rank 0] step:5941/10000 train_time:478953ms step_avg:80.62ms +[2025-07-08 06:22:41] [Rank 0] step:5941/10000 train_time:478953ms step_avg:80.62ms +[2025-07-08 06:22:43] [Rank 0] step:5961/10000 train_time:481059ms step_avg:80.70ms +[2025-07-08 06:22:43] [Rank 0] step:5961/10000 train_time:481059ms step_avg:80.70ms +[2025-07-08 06:22:44] [Rank 0] step:5981/10000 train_time:482564ms step_avg:80.68ms +[2025-07-08 06:22:44] [Rank 0] step:5981/10000 train_time:482564ms step_avg:80.68ms +[2025-07-08 06:22:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:22:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:22:47] [Rank 0] PRINT: step:6000/10000 train_loss:2.0084 val_loss:1.9448 train_time:484070ms step_avg:80.68ms +[2025-07-08 06:22:47] [Rank 0] PRINT: step:6000/10000 train_loss:2.0084 val_loss:1.9448 train_time:484070ms step_avg:80.68ms +[2025-07-08 06:22:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:22:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:22:47] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:22:47] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:22:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:22:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:28:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:28:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:28:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:28:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:28:12] [Rank 0] Total Loss: 4.2699 +[2025-07-08 06:28:12] [Rank 0] Total Loss: 4.2699 +[2025-07-08 06:28:12] [Rank 0] Total FTA: 0.1513 +[2025-07-08 06:28:12] [Rank 0] Total FTA: 0.1513 +[2025-07-08 06:28:12] [Rank 0] Group 0 Loss: 4.4772 +[2025-07-08 06:28:12] [Rank 0] Group 0 Loss: 4.4772 +[2025-07-08 06:28:12] [Rank 0] Group 1 Loss: 4.7262 +[2025-07-08 06:28:12] [Rank 0] Group 1 Loss: 4.7262 +[2025-07-08 06:28:12] [Rank 0] Group 2 Loss: 3.9702 +[2025-07-08 06:28:12] [Rank 0] Group 2 Loss: 3.9702 +[2025-07-08 06:28:12] [Rank 0] Group 3 Loss: 4.2052 +[2025-07-08 06:28:12] [Rank 0] Group 3 Loss: 4.2052 +[2025-07-08 06:28:12] [Rank 0] Group 4 Loss: 4.1644 +[2025-07-08 06:28:12] [Rank 0] Group 4 Loss: 4.1644 +[2025-07-08 06:28:12] [Rank 0] Group 5 Loss: 4.1712 +[2025-07-08 06:28:12] [Rank 0] Group 5 Loss: 4.1712 +[2025-07-08 06:28:12] [Rank 0] Group 6 Loss: 4.1128 +[2025-07-08 06:28:12] [Rank 0] Group 6 Loss: 4.1128 +[2025-07-08 06:28:12] [Rank 0] Group 7 Loss: 4.2408 +[2025-07-08 06:28:12] [Rank 0] Group 7 Loss: 4.2408 +[2025-07-08 06:28:12] [Rank 0] Group 8 Loss: 4.2839 +[2025-07-08 06:28:12] [Rank 0] Group 8 Loss: 4.2839 +[2025-07-08 06:28:12] [Rank 0] Group 9 Loss: 4.2349 +[2025-07-08 06:28:12] [Rank 0] Group 9 Loss: 4.2349 +[2025-07-08 06:28:12] [Rank 0] Group 10 Loss: 4.2382 +[2025-07-08 06:28:12] [Rank 0] Group 10 Loss: 4.2382 +[2025-07-08 06:28:12] [Rank 0] Group 11 Loss: 4.2455 +[2025-07-08 06:28:12] [Rank 0] Group 11 Loss: 4.2455 +[2025-07-08 06:28:12] [Rank 0] Group 0 FTA: 0.1730 +[2025-07-08 06:28:12] [Rank 0] Group 0 FTA: 0.1730 +[2025-07-08 06:28:12] [Rank 0] Group 1 FTA: 0.1458 +[2025-07-08 06:28:12] [Rank 0] Group 1 FTA: 0.1458 +[2025-07-08 06:28:12] [Rank 0] Group 2 FTA: 0.3359 +[2025-07-08 06:28:12] [Rank 0] Group 2 FTA: 0.3359 +[2025-07-08 06:28:12] [Rank 0] Group 3 FTA: 0.1250 +[2025-07-08 06:28:12] [Rank 0] Group 3 FTA: 0.1250 +[2025-07-08 06:28:12] [Rank 0] Group 4 FTA: 0.0651 +[2025-07-08 06:28:12] [Rank 0] Group 4 FTA: 0.0651 +[2025-07-08 06:28:12] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-08 06:28:12] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-08 06:28:12] [Rank 0] Group 6 FTA: 0.1432 +[2025-07-08 06:28:12] [Rank 0] Group 6 FTA: 0.1432 +[2025-07-08 06:28:12] [Rank 0] Group 7 FTA: 0.1432 +[2025-07-08 06:28:12] [Rank 0] Group 7 FTA: 0.1432 +[2025-07-08 06:28:12] [Rank 0] Group 8 FTA: 0.1510 +[2025-07-08 06:28:12] [Rank 0] Group 8 FTA: 0.1510 +[2025-07-08 06:28:12] [Rank 0] Group 9 FTA: 0.1406 +[2025-07-08 06:28:12] [Rank 0] Group 9 FTA: 0.1406 +[2025-07-08 06:28:12] [Rank 0] Group 10 FTA: 0.1602 +[2025-07-08 06:28:12] [Rank 0] Group 10 FTA: 0.1602 +[2025-07-08 06:28:12] [Rank 0] Group 11 FTA: 0.1416 +[2025-07-08 06:28:12] [Rank 0] Group 11 FTA: 0.1416 +[2025-07-08 06:28:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 06:28:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 06:28:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 06:28:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 06:28:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 06:28:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 06:28:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 06:28:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 06:28:14] [Rank 0] step:6001/10000 train_time:484090ms step_avg:80.67ms +[2025-07-08 06:28:14] [Rank 0] step:6001/10000 train_time:484090ms step_avg:80.67ms +[2025-07-08 06:28:15] [Rank 0] step:6021/10000 train_time:485612ms step_avg:80.65ms +[2025-07-08 06:28:15] [Rank 0] step:6021/10000 train_time:485612ms step_avg:80.65ms +[2025-07-08 06:28:17] [Rank 0] step:6041/10000 train_time:487756ms step_avg:80.74ms +[2025-07-08 06:28:17] [Rank 0] step:6041/10000 train_time:487756ms step_avg:80.74ms +[2025-07-08 06:28:19] [Rank 0] step:6061/10000 train_time:489253ms step_avg:80.72ms +[2025-07-08 06:28:19] [Rank 0] step:6061/10000 train_time:489253ms step_avg:80.72ms +[2025-07-08 06:28:20] [Rank 0] step:6081/10000 train_time:490750ms step_avg:80.70ms +[2025-07-08 06:28:20] [Rank 0] step:6081/10000 train_time:490750ms step_avg:80.70ms +[2025-07-08 06:28:22] [Rank 0] step:6101/10000 train_time:492250ms step_avg:80.68ms +[2025-07-08 06:28:22] [Rank 0] step:6101/10000 train_time:492250ms step_avg:80.68ms +[2025-07-08 06:28:24] [Rank 0] step:6121/10000 train_time:494008ms step_avg:80.71ms +[2025-07-08 06:28:24] [Rank 0] step:6121/10000 train_time:494008ms step_avg:80.71ms +[2025-07-08 06:28:26] [Rank 0] step:6141/10000 train_time:496148ms step_avg:80.79ms +[2025-07-08 06:28:26] [Rank 0] step:6141/10000 train_time:496148ms step_avg:80.79ms +[2025-07-08 06:28:27] [Rank 0] step:6161/10000 train_time:497647ms step_avg:80.77ms +[2025-07-08 06:28:27] [Rank 0] step:6161/10000 train_time:497647ms step_avg:80.77ms +[2025-07-08 06:28:29] [Rank 0] step:6181/10000 train_time:499150ms step_avg:80.76ms +[2025-07-08 06:28:29] [Rank 0] step:6181/10000 train_time:499150ms step_avg:80.76ms +[2025-07-08 06:28:30] [Rank 0] step:6201/10000 train_time:500652ms step_avg:80.74ms +[2025-07-08 06:28:30] [Rank 0] step:6201/10000 train_time:500652ms step_avg:80.74ms +[2025-07-08 06:28:32] [Rank 0] step:6221/10000 train_time:502798ms step_avg:80.82ms +[2025-07-08 06:28:32] [Rank 0] step:6221/10000 train_time:502798ms step_avg:80.82ms +[2025-07-08 06:28:34] [Rank 0] step:6241/10000 train_time:504298ms step_avg:80.80ms +[2025-07-08 06:28:34] [Rank 0] step:6241/10000 train_time:504298ms step_avg:80.80ms +[2025-07-08 06:28:35] [Rank 0] step:6261/10000 train_time:505802ms step_avg:80.79ms +[2025-07-08 06:28:35] [Rank 0] step:6261/10000 train_time:505802ms step_avg:80.79ms +[2025-07-08 06:28:37] [Rank 0] step:6281/10000 train_time:507305ms step_avg:80.77ms +[2025-07-08 06:28:37] [Rank 0] step:6281/10000 train_time:507305ms step_avg:80.77ms +[2025-07-08 06:28:39] [Rank 0] step:6301/10000 train_time:508861ms step_avg:80.76ms +[2025-07-08 06:28:39] [Rank 0] step:6301/10000 train_time:508861ms step_avg:80.76ms +[2025-07-08 06:28:40] [Rank 0] step:6321/10000 train_time:510547ms step_avg:80.77ms +[2025-07-08 06:28:40] [Rank 0] step:6321/10000 train_time:510547ms step_avg:80.77ms +[2025-07-08 06:28:42] [Rank 0] step:6341/10000 train_time:512051ms step_avg:80.75ms +[2025-07-08 06:28:42] [Rank 0] step:6341/10000 train_time:512051ms step_avg:80.75ms +[2025-07-08 06:28:43] [Rank 0] step:6361/10000 train_time:513556ms step_avg:80.74ms +[2025-07-08 06:28:43] [Rank 0] step:6361/10000 train_time:513556ms step_avg:80.74ms +[2025-07-08 06:28:45] [Rank 0] step:6381/10000 train_time:515061ms step_avg:80.72ms +[2025-07-08 06:28:45] [Rank 0] step:6381/10000 train_time:515061ms step_avg:80.72ms +[2025-07-08 06:28:46] [Rank 0] step:6401/10000 train_time:516800ms step_avg:80.74ms +[2025-07-08 06:28:46] [Rank 0] step:6401/10000 train_time:516800ms step_avg:80.74ms +[2025-07-08 06:28:48] [Rank 0] step:6421/10000 train_time:518306ms step_avg:80.72ms +[2025-07-08 06:28:48] [Rank 0] step:6421/10000 train_time:518306ms step_avg:80.72ms +[2025-07-08 06:28:49] [Rank 0] step:6441/10000 train_time:519813ms step_avg:80.70ms +[2025-07-08 06:28:49] [Rank 0] step:6441/10000 train_time:519813ms step_avg:80.70ms +[2025-07-08 06:28:51] [Rank 0] step:6461/10000 train_time:521321ms step_avg:80.69ms +[2025-07-08 06:28:51] [Rank 0] step:6461/10000 train_time:521321ms step_avg:80.69ms +[2025-07-08 06:28:53] [Rank 0] step:6481/10000 train_time:522828ms step_avg:80.67ms +[2025-07-08 06:28:53] [Rank 0] step:6481/10000 train_time:522828ms step_avg:80.67ms +[2025-07-08 06:28:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:28:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:28:55] [Rank 0] PRINT: step:6500/10000 train_loss:1.8917 val_loss:1.8439 train_time:524996ms step_avg:80.77ms +[2025-07-08 06:28:55] [Rank 0] PRINT: step:6500/10000 train_loss:1.8917 val_loss:1.8439 train_time:524996ms step_avg:80.77ms +[2025-07-08 06:28:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:28:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:28:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:28:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:28:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:28:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:34:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:34:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:34:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:34:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:34:22] [Rank 0] Total Loss: 4.2437 +[2025-07-08 06:34:22] [Rank 0] Total Loss: 4.2437 +[2025-07-08 06:34:22] [Rank 0] Total FTA: 0.1619 +[2025-07-08 06:34:22] [Rank 0] Total FTA: 0.1619 +[2025-07-08 06:34:22] [Rank 0] Group 0 Loss: 4.5640 +[2025-07-08 06:34:22] [Rank 0] Group 0 Loss: 4.5640 +[2025-07-08 06:34:22] [Rank 0] Group 1 Loss: 4.2013 +[2025-07-08 06:34:22] [Rank 0] Group 1 Loss: 4.2013 +[2025-07-08 06:34:22] [Rank 0] Group 2 Loss: 3.9652 +[2025-07-08 06:34:22] [Rank 0] Group 2 Loss: 3.9652 +[2025-07-08 06:34:22] [Rank 0] Group 3 Loss: 4.2197 +[2025-07-08 06:34:22] [Rank 0] Group 3 Loss: 4.2197 +[2025-07-08 06:34:22] [Rank 0] Group 4 Loss: 4.2458 +[2025-07-08 06:34:22] [Rank 0] Group 4 Loss: 4.2458 +[2025-07-08 06:34:22] [Rank 0] Group 5 Loss: 4.2227 +[2025-07-08 06:34:22] [Rank 0] Group 5 Loss: 4.2227 +[2025-07-08 06:34:22] [Rank 0] Group 6 Loss: 4.1141 +[2025-07-08 06:34:22] [Rank 0] Group 6 Loss: 4.1141 +[2025-07-08 06:34:22] [Rank 0] Group 7 Loss: 4.2063 +[2025-07-08 06:34:22] [Rank 0] Group 7 Loss: 4.2063 +[2025-07-08 06:34:22] [Rank 0] Group 8 Loss: 4.2282 +[2025-07-08 06:34:22] [Rank 0] Group 8 Loss: 4.2282 +[2025-07-08 06:34:22] [Rank 0] Group 9 Loss: 4.1933 +[2025-07-08 06:34:22] [Rank 0] Group 9 Loss: 4.1933 +[2025-07-08 06:34:22] [Rank 0] Group 10 Loss: 4.2080 +[2025-07-08 06:34:22] [Rank 0] Group 10 Loss: 4.2080 +[2025-07-08 06:34:22] [Rank 0] Group 11 Loss: 4.2386 +[2025-07-08 06:34:22] [Rank 0] Group 11 Loss: 4.2386 +[2025-07-08 06:34:22] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-08 06:34:22] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-08 06:34:22] [Rank 0] Group 1 FTA: 0.1693 +[2025-07-08 06:34:22] [Rank 0] Group 1 FTA: 0.1693 +[2025-07-08 06:34:22] [Rank 0] Group 2 FTA: 0.2552 +[2025-07-08 06:34:22] [Rank 0] Group 2 FTA: 0.2552 +[2025-07-08 06:34:22] [Rank 0] Group 3 FTA: 0.1510 +[2025-07-08 06:34:22] [Rank 0] Group 3 FTA: 0.1510 +[2025-07-08 06:34:22] [Rank 0] Group 4 FTA: 0.0833 +[2025-07-08 06:34:22] [Rank 0] Group 4 FTA: 0.0833 +[2025-07-08 06:34:22] [Rank 0] Group 5 FTA: 0.1302 +[2025-07-08 06:34:22] [Rank 0] Group 5 FTA: 0.1302 +[2025-07-08 06:34:22] [Rank 0] Group 6 FTA: 0.1745 +[2025-07-08 06:34:22] [Rank 0] Group 6 FTA: 0.1745 +[2025-07-08 06:34:22] [Rank 0] Group 7 FTA: 0.2135 +[2025-07-08 06:34:22] [Rank 0] Group 7 FTA: 0.2135 +[2025-07-08 06:34:22] [Rank 0] Group 8 FTA: 0.1719 +[2025-07-08 06:34:22] [Rank 0] Group 8 FTA: 0.1719 +[2025-07-08 06:34:22] [Rank 0] Group 9 FTA: 0.1680 +[2025-07-08 06:34:22] [Rank 0] Group 9 FTA: 0.1680 +[2025-07-08 06:34:22] [Rank 0] Group 10 FTA: 0.1484 +[2025-07-08 06:34:22] [Rank 0] Group 10 FTA: 0.1484 +[2025-07-08 06:34:22] [Rank 0] Group 11 FTA: 0.1475 +[2025-07-08 06:34:22] [Rank 0] Group 11 FTA: 0.1475 +[2025-07-08 06:34:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 06:34:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 06:34:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 06:34:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 06:34:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 06:34:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 06:34:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 06:34:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 06:34:24] [Rank 0] step:6501/10000 train_time:525017ms step_avg:80.76ms +[2025-07-08 06:34:24] [Rank 0] step:6501/10000 train_time:525017ms step_avg:80.76ms +[2025-07-08 06:34:25] [Rank 0] step:6521/10000 train_time:526538ms step_avg:80.74ms +[2025-07-08 06:34:25] [Rank 0] step:6521/10000 train_time:526538ms step_avg:80.74ms +[2025-07-08 06:34:27] [Rank 0] step:6541/10000 train_time:528033ms step_avg:80.73ms +[2025-07-08 06:34:27] [Rank 0] step:6541/10000 train_time:528033ms step_avg:80.73ms +[2025-07-08 06:34:28] [Rank 0] step:6561/10000 train_time:529530ms step_avg:80.71ms +[2025-07-08 06:34:28] [Rank 0] step:6561/10000 train_time:529530ms step_avg:80.71ms +[2025-07-08 06:34:30] [Rank 0] step:6581/10000 train_time:531677ms step_avg:80.79ms +[2025-07-08 06:34:30] [Rank 0] step:6581/10000 train_time:531677ms step_avg:80.79ms +[2025-07-08 06:34:32] [Rank 0] step:6601/10000 train_time:533176ms step_avg:80.77ms +[2025-07-08 06:34:32] [Rank 0] step:6601/10000 train_time:533176ms step_avg:80.77ms +[2025-07-08 06:34:33] [Rank 0] step:6621/10000 train_time:534675ms step_avg:80.75ms +[2025-07-08 06:34:33] [Rank 0] step:6621/10000 train_time:534675ms step_avg:80.75ms +[2025-07-08 06:34:35] [Rank 0] step:6641/10000 train_time:536176ms step_avg:80.74ms +[2025-07-08 06:34:35] [Rank 0] step:6641/10000 train_time:536176ms step_avg:80.74ms +[2025-07-08 06:34:37] [Rank 0] step:6661/10000 train_time:537729ms step_avg:80.73ms +[2025-07-08 06:34:37] [Rank 0] step:6661/10000 train_time:537729ms step_avg:80.73ms +[2025-07-08 06:34:38] [Rank 0] step:6681/10000 train_time:539415ms step_avg:80.74ms +[2025-07-08 06:34:38] [Rank 0] step:6681/10000 train_time:539415ms step_avg:80.74ms +[2025-07-08 06:34:40] [Rank 0] step:6701/10000 train_time:540917ms step_avg:80.72ms +[2025-07-08 06:34:40] [Rank 0] step:6701/10000 train_time:540917ms step_avg:80.72ms +[2025-07-08 06:34:41] [Rank 0] step:6721/10000 train_time:542420ms step_avg:80.71ms +[2025-07-08 06:34:41] [Rank 0] step:6721/10000 train_time:542420ms step_avg:80.71ms +[2025-07-08 06:34:43] [Rank 0] step:6741/10000 train_time:543924ms step_avg:80.69ms +[2025-07-08 06:34:43] [Rank 0] step:6741/10000 train_time:543924ms step_avg:80.69ms +[2025-07-08 06:34:45] [Rank 0] step:6761/10000 train_time:546177ms step_avg:80.78ms +[2025-07-08 06:34:45] [Rank 0] step:6761/10000 train_time:546177ms step_avg:80.78ms +[2025-07-08 06:34:46] [Rank 0] step:6781/10000 train_time:547680ms step_avg:80.77ms +[2025-07-08 06:34:46] [Rank 0] step:6781/10000 train_time:547680ms step_avg:80.77ms +[2025-07-08 06:34:48] [Rank 0] step:6801/10000 train_time:549185ms step_avg:80.75ms +[2025-07-08 06:34:48] [Rank 0] step:6801/10000 train_time:549185ms step_avg:80.75ms +[2025-07-08 06:34:49] [Rank 0] step:6821/10000 train_time:550695ms step_avg:80.74ms +[2025-07-08 06:34:49] [Rank 0] step:6821/10000 train_time:550695ms step_avg:80.74ms +[2025-07-08 06:34:51] [Rank 0] step:6841/10000 train_time:552253ms step_avg:80.73ms +[2025-07-08 06:34:51] [Rank 0] step:6841/10000 train_time:552253ms step_avg:80.73ms +[2025-07-08 06:34:53] [Rank 0] step:6861/10000 train_time:553937ms step_avg:80.74ms +[2025-07-08 06:34:53] [Rank 0] step:6861/10000 train_time:553937ms step_avg:80.74ms +[2025-07-08 06:34:54] [Rank 0] step:6881/10000 train_time:555443ms step_avg:80.72ms +[2025-07-08 06:34:54] [Rank 0] step:6881/10000 train_time:555443ms step_avg:80.72ms +[2025-07-08 06:34:56] [Rank 0] step:6901/10000 train_time:556948ms step_avg:80.71ms +[2025-07-08 06:34:56] [Rank 0] step:6901/10000 train_time:556948ms step_avg:80.71ms +[2025-07-08 06:34:57] [Rank 0] step:6921/10000 train_time:558454ms step_avg:80.69ms +[2025-07-08 06:34:57] [Rank 0] step:6921/10000 train_time:558454ms step_avg:80.69ms +[2025-07-08 06:34:59] [Rank 0] step:6941/10000 train_time:560612ms step_avg:80.77ms +[2025-07-08 06:34:59] [Rank 0] step:6941/10000 train_time:560612ms step_avg:80.77ms +[2025-07-08 06:35:01] [Rank 0] step:6961/10000 train_time:562115ms step_avg:80.75ms +[2025-07-08 06:35:01] [Rank 0] step:6961/10000 train_time:562115ms step_avg:80.75ms +[2025-07-08 06:35:02] [Rank 0] step:6981/10000 train_time:563629ms step_avg:80.74ms +[2025-07-08 06:35:02] [Rank 0] step:6981/10000 train_time:563629ms step_avg:80.74ms +[2025-07-08 06:35:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:35:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:35:05] [Rank 0] PRINT: step:7000/10000 train_loss:1.8040 val_loss:1.7682 train_time:565135ms step_avg:80.73ms +[2025-07-08 06:35:05] [Rank 0] PRINT: step:7000/10000 train_loss:1.8040 val_loss:1.7682 train_time:565135ms step_avg:80.73ms +[2025-07-08 06:35:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:35:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:35:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:35:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:35:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:35:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:40:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:40:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:40:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:40:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:40:34] [Rank 0] Total Loss: 4.2374 +[2025-07-08 06:40:34] [Rank 0] Total Loss: 4.2374 +[2025-07-08 06:40:34] [Rank 0] Total FTA: 0.1653 +[2025-07-08 06:40:34] [Rank 0] Total FTA: 0.1653 +[2025-07-08 06:40:34] [Rank 0] Group 0 Loss: 4.5187 +[2025-07-08 06:40:34] [Rank 0] Group 0 Loss: 4.5187 +[2025-07-08 06:40:34] [Rank 0] Group 1 Loss: 4.4279 +[2025-07-08 06:40:34] [Rank 0] Group 1 Loss: 4.4279 +[2025-07-08 06:40:34] [Rank 0] Group 2 Loss: 3.9413 +[2025-07-08 06:40:34] [Rank 0] Group 2 Loss: 3.9413 +[2025-07-08 06:40:34] [Rank 0] Group 3 Loss: 4.3088 +[2025-07-08 06:40:34] [Rank 0] Group 3 Loss: 4.3088 +[2025-07-08 06:40:34] [Rank 0] Group 4 Loss: 4.1663 +[2025-07-08 06:40:34] [Rank 0] Group 4 Loss: 4.1663 +[2025-07-08 06:40:34] [Rank 0] Group 5 Loss: 4.1241 +[2025-07-08 06:40:34] [Rank 0] Group 5 Loss: 4.1241 +[2025-07-08 06:40:34] [Rank 0] Group 6 Loss: 4.0800 +[2025-07-08 06:40:34] [Rank 0] Group 6 Loss: 4.0800 +[2025-07-08 06:40:34] [Rank 0] Group 7 Loss: 4.1942 +[2025-07-08 06:40:34] [Rank 0] Group 7 Loss: 4.1942 +[2025-07-08 06:40:34] [Rank 0] Group 8 Loss: 4.2120 +[2025-07-08 06:40:34] [Rank 0] Group 8 Loss: 4.2120 +[2025-07-08 06:40:34] [Rank 0] Group 9 Loss: 4.1809 +[2025-07-08 06:40:34] [Rank 0] Group 9 Loss: 4.1809 +[2025-07-08 06:40:34] [Rank 0] Group 10 Loss: 4.2170 +[2025-07-08 06:40:34] [Rank 0] Group 10 Loss: 4.2170 +[2025-07-08 06:40:34] [Rank 0] Group 11 Loss: 4.2170 +[2025-07-08 06:40:34] [Rank 0] Group 11 Loss: 4.2170 +[2025-07-08 06:40:34] [Rank 0] Group 0 FTA: 0.1560 +[2025-07-08 06:40:34] [Rank 0] Group 0 FTA: 0.1560 +[2025-07-08 06:40:34] [Rank 0] Group 1 FTA: 0.1536 +[2025-07-08 06:40:34] [Rank 0] Group 1 FTA: 0.1536 +[2025-07-08 06:40:34] [Rank 0] Group 2 FTA: 0.2682 +[2025-07-08 06:40:34] [Rank 0] Group 2 FTA: 0.2682 +[2025-07-08 06:40:34] [Rank 0] Group 3 FTA: 0.1432 +[2025-07-08 06:40:34] [Rank 0] Group 3 FTA: 0.1432 +[2025-07-08 06:40:34] [Rank 0] Group 4 FTA: 0.0911 +[2025-07-08 06:40:34] [Rank 0] Group 4 FTA: 0.0911 +[2025-07-08 06:40:34] [Rank 0] Group 5 FTA: 0.0677 +[2025-07-08 06:40:34] [Rank 0] Group 5 FTA: 0.0677 +[2025-07-08 06:40:34] [Rank 0] Group 6 FTA: 0.1510 +[2025-07-08 06:40:34] [Rank 0] Group 6 FTA: 0.1510 +[2025-07-08 06:40:34] [Rank 0] Group 7 FTA: 0.1562 +[2025-07-08 06:40:34] [Rank 0] Group 7 FTA: 0.1562 +[2025-07-08 06:40:34] [Rank 0] Group 8 FTA: 0.2083 +[2025-07-08 06:40:34] [Rank 0] Group 8 FTA: 0.2083 +[2025-07-08 06:40:34] [Rank 0] Group 9 FTA: 0.1758 +[2025-07-08 06:40:34] [Rank 0] Group 9 FTA: 0.1758 +[2025-07-08 06:40:34] [Rank 0] Group 10 FTA: 0.1777 +[2025-07-08 06:40:34] [Rank 0] Group 10 FTA: 0.1777 +[2025-07-08 06:40:34] [Rank 0] Group 11 FTA: 0.1943 +[2025-07-08 06:40:34] [Rank 0] Group 11 FTA: 0.1943 +[2025-07-08 06:40:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 06:40:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 06:40:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 06:40:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 06:40:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 06:40:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 06:40:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 06:40:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 06:40:35] [Rank 0] step:7001/10000 train_time:565156ms step_avg:80.73ms +[2025-07-08 06:40:35] [Rank 0] step:7001/10000 train_time:565156ms step_avg:80.73ms +[2025-07-08 06:40:37] [Rank 0] step:7021/10000 train_time:566664ms step_avg:80.71ms +[2025-07-08 06:40:37] [Rank 0] step:7021/10000 train_time:566664ms step_avg:80.71ms +[2025-07-08 06:40:39] [Rank 0] step:7041/10000 train_time:568827ms step_avg:80.79ms +[2025-07-08 06:40:39] [Rank 0] step:7041/10000 train_time:568827ms step_avg:80.79ms +[2025-07-08 06:40:40] [Rank 0] step:7061/10000 train_time:570323ms step_avg:80.77ms +[2025-07-08 06:40:40] [Rank 0] step:7061/10000 train_time:570323ms step_avg:80.77ms +[2025-07-08 06:40:42] [Rank 0] step:7081/10000 train_time:571821ms step_avg:80.75ms +[2025-07-08 06:40:42] [Rank 0] step:7081/10000 train_time:571821ms step_avg:80.75ms +[2025-07-08 06:40:43] [Rank 0] step:7101/10000 train_time:573318ms step_avg:80.74ms +[2025-07-08 06:40:43] [Rank 0] step:7101/10000 train_time:573318ms step_avg:80.74ms +[2025-07-08 06:40:45] [Rank 0] step:7121/10000 train_time:575467ms step_avg:80.81ms +[2025-07-08 06:40:45] [Rank 0] step:7121/10000 train_time:575467ms step_avg:80.81ms +[2025-07-08 06:40:47] [Rank 0] step:7141/10000 train_time:576963ms step_avg:80.80ms +[2025-07-08 06:40:47] [Rank 0] step:7141/10000 train_time:576963ms step_avg:80.80ms +[2025-07-08 06:40:48] [Rank 0] step:7161/10000 train_time:578463ms step_avg:80.78ms +[2025-07-08 06:40:48] [Rank 0] step:7161/10000 train_time:578463ms step_avg:80.78ms +[2025-07-08 06:40:50] [Rank 0] step:7181/10000 train_time:579966ms step_avg:80.76ms +[2025-07-08 06:40:50] [Rank 0] step:7181/10000 train_time:579966ms step_avg:80.76ms +[2025-07-08 06:40:52] [Rank 0] step:7201/10000 train_time:581518ms step_avg:80.76ms +[2025-07-08 06:40:52] [Rank 0] step:7201/10000 train_time:581518ms step_avg:80.76ms +[2025-07-08 06:40:53] [Rank 0] step:7221/10000 train_time:583202ms step_avg:80.76ms +[2025-07-08 06:40:53] [Rank 0] step:7221/10000 train_time:583202ms step_avg:80.76ms +[2025-07-08 06:40:55] [Rank 0] step:7241/10000 train_time:584704ms step_avg:80.75ms +[2025-07-08 06:40:55] [Rank 0] step:7241/10000 train_time:584704ms step_avg:80.75ms +[2025-07-08 06:40:56] [Rank 0] step:7261/10000 train_time:586208ms step_avg:80.73ms +[2025-07-08 06:40:56] [Rank 0] step:7261/10000 train_time:586208ms step_avg:80.73ms +[2025-07-08 06:40:58] [Rank 0] step:7281/10000 train_time:587713ms step_avg:80.72ms +[2025-07-08 06:40:58] [Rank 0] step:7281/10000 train_time:587713ms step_avg:80.72ms +[2025-07-08 06:40:59] [Rank 0] step:7301/10000 train_time:589451ms step_avg:80.74ms +[2025-07-08 06:40:59] [Rank 0] step:7301/10000 train_time:589451ms step_avg:80.74ms +[2025-07-08 06:41:01] [Rank 0] step:7321/10000 train_time:590956ms step_avg:80.72ms +[2025-07-08 06:41:01] [Rank 0] step:7321/10000 train_time:590956ms step_avg:80.72ms +[2025-07-08 06:41:02] [Rank 0] step:7341/10000 train_time:592462ms step_avg:80.71ms +[2025-07-08 06:41:02] [Rank 0] step:7341/10000 train_time:592462ms step_avg:80.71ms +[2025-07-08 06:41:04] [Rank 0] step:7361/10000 train_time:594204ms step_avg:80.72ms +[2025-07-08 06:41:04] [Rank 0] step:7361/10000 train_time:594204ms step_avg:80.72ms +[2025-07-08 06:41:06] [Rank 0] step:7381/10000 train_time:595758ms step_avg:80.72ms +[2025-07-08 06:41:06] [Rank 0] step:7381/10000 train_time:595758ms step_avg:80.72ms +[2025-07-08 06:41:07] [Rank 0] step:7401/10000 train_time:597448ms step_avg:80.73ms +[2025-07-08 06:41:07] [Rank 0] step:7401/10000 train_time:597448ms step_avg:80.73ms +[2025-07-08 06:41:09] [Rank 0] step:7421/10000 train_time:598953ms step_avg:80.71ms +[2025-07-08 06:41:09] [Rank 0] step:7421/10000 train_time:598953ms step_avg:80.71ms +[2025-07-08 06:41:10] [Rank 0] step:7441/10000 train_time:600458ms step_avg:80.70ms +[2025-07-08 06:41:10] [Rank 0] step:7441/10000 train_time:600458ms step_avg:80.70ms +[2025-07-08 06:41:12] [Rank 0] step:7461/10000 train_time:601963ms step_avg:80.68ms +[2025-07-08 06:41:12] [Rank 0] step:7461/10000 train_time:601963ms step_avg:80.68ms +[2025-07-08 06:41:14] [Rank 0] step:7481/10000 train_time:604108ms step_avg:80.75ms +[2025-07-08 06:41:14] [Rank 0] step:7481/10000 train_time:604108ms step_avg:80.75ms +[2025-07-08 06:41:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:41:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:41:17] [Rank 0] PRINT: step:7500/10000 train_loss:1.7375 val_loss:1.7103 train_time:605613ms step_avg:80.75ms +[2025-07-08 06:41:17] [Rank 0] PRINT: step:7500/10000 train_loss:1.7375 val_loss:1.7103 train_time:605613ms step_avg:80.75ms +[2025-07-08 06:41:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:41:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:41:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:41:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:41:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:41:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:46:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:46:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:46:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:46:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:46:43] [Rank 0] Total Loss: 4.2136 +[2025-07-08 06:46:43] [Rank 0] Total Loss: 4.2136 +[2025-07-08 06:46:43] [Rank 0] Total FTA: 0.1601 +[2025-07-08 06:46:43] [Rank 0] Total FTA: 0.1601 +[2025-07-08 06:46:43] [Rank 0] Group 0 Loss: 4.4909 +[2025-07-08 06:46:43] [Rank 0] Group 0 Loss: 4.4909 +[2025-07-08 06:46:43] [Rank 0] Group 1 Loss: 4.1854 +[2025-07-08 06:46:43] [Rank 0] Group 1 Loss: 4.1854 +[2025-07-08 06:46:43] [Rank 0] Group 2 Loss: 3.9291 +[2025-07-08 06:46:43] [Rank 0] Group 2 Loss: 3.9291 +[2025-07-08 06:46:43] [Rank 0] Group 3 Loss: 4.2785 +[2025-07-08 06:46:43] [Rank 0] Group 3 Loss: 4.2785 +[2025-07-08 06:46:43] [Rank 0] Group 4 Loss: 4.1995 +[2025-07-08 06:46:43] [Rank 0] Group 4 Loss: 4.1995 +[2025-07-08 06:46:43] [Rank 0] Group 5 Loss: 4.1043 +[2025-07-08 06:46:43] [Rank 0] Group 5 Loss: 4.1043 +[2025-07-08 06:46:43] [Rank 0] Group 6 Loss: 4.0864 +[2025-07-08 06:46:43] [Rank 0] Group 6 Loss: 4.0864 +[2025-07-08 06:46:43] [Rank 0] Group 7 Loss: 4.2103 +[2025-07-08 06:46:43] [Rank 0] Group 7 Loss: 4.2103 +[2025-07-08 06:46:43] [Rank 0] Group 8 Loss: 4.2110 +[2025-07-08 06:46:43] [Rank 0] Group 8 Loss: 4.2110 +[2025-07-08 06:46:43] [Rank 0] Group 9 Loss: 4.1686 +[2025-07-08 06:46:43] [Rank 0] Group 9 Loss: 4.1686 +[2025-07-08 06:46:43] [Rank 0] Group 10 Loss: 4.2153 +[2025-07-08 06:46:43] [Rank 0] Group 10 Loss: 4.2153 +[2025-07-08 06:46:43] [Rank 0] Group 11 Loss: 4.2047 +[2025-07-08 06:46:43] [Rank 0] Group 11 Loss: 4.2047 +[2025-07-08 06:46:43] [Rank 0] Group 0 FTA: 0.1860 +[2025-07-08 06:46:43] [Rank 0] Group 0 FTA: 0.1860 +[2025-07-08 06:46:43] [Rank 0] Group 1 FTA: 0.1354 +[2025-07-08 06:46:43] [Rank 0] Group 1 FTA: 0.1354 +[2025-07-08 06:46:43] [Rank 0] Group 2 FTA: 0.2422 +[2025-07-08 06:46:43] [Rank 0] Group 2 FTA: 0.2422 +[2025-07-08 06:46:43] [Rank 0] Group 3 FTA: 0.0781 +[2025-07-08 06:46:43] [Rank 0] Group 3 FTA: 0.0781 +[2025-07-08 06:46:43] [Rank 0] Group 4 FTA: 0.1120 +[2025-07-08 06:46:43] [Rank 0] Group 4 FTA: 0.1120 +[2025-07-08 06:46:43] [Rank 0] Group 5 FTA: 0.1120 +[2025-07-08 06:46:43] [Rank 0] Group 5 FTA: 0.1120 +[2025-07-08 06:46:43] [Rank 0] Group 6 FTA: 0.1562 +[2025-07-08 06:46:43] [Rank 0] Group 6 FTA: 0.1562 +[2025-07-08 06:46:43] [Rank 0] Group 7 FTA: 0.1458 +[2025-07-08 06:46:43] [Rank 0] Group 7 FTA: 0.1458 +[2025-07-08 06:46:43] [Rank 0] Group 8 FTA: 0.1641 +[2025-07-08 06:46:43] [Rank 0] Group 8 FTA: 0.1641 +[2025-07-08 06:46:43] [Rank 0] Group 9 FTA: 0.1523 +[2025-07-08 06:46:43] [Rank 0] Group 9 FTA: 0.1523 +[2025-07-08 06:46:43] [Rank 0] Group 10 FTA: 0.2188 +[2025-07-08 06:46:43] [Rank 0] Group 10 FTA: 0.2188 +[2025-07-08 06:46:43] [Rank 0] Group 11 FTA: 0.1641 +[2025-07-08 06:46:43] [Rank 0] Group 11 FTA: 0.1641 +[2025-07-08 06:46:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 06:46:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 06:46:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 06:46:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 06:46:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 06:46:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 06:46:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 06:46:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 06:46:44] [Rank 0] step:7501/10000 train_time:605633ms step_avg:80.74ms +[2025-07-08 06:46:44] [Rank 0] step:7501/10000 train_time:605633ms step_avg:80.74ms +[2025-07-08 06:46:46] [Rank 0] step:7521/10000 train_time:607142ms step_avg:80.73ms +[2025-07-08 06:46:46] [Rank 0] step:7521/10000 train_time:607142ms step_avg:80.73ms +[2025-07-08 06:46:47] [Rank 0] step:7541/10000 train_time:608639ms step_avg:80.71ms +[2025-07-08 06:46:47] [Rank 0] step:7541/10000 train_time:608639ms step_avg:80.71ms +[2025-07-08 06:46:49] [Rank 0] step:7561/10000 train_time:610396ms step_avg:80.73ms +[2025-07-08 06:46:49] [Rank 0] step:7561/10000 train_time:610396ms step_avg:80.73ms +[2025-07-08 06:46:51] [Rank 0] step:7581/10000 train_time:612291ms step_avg:80.77ms +[2025-07-08 06:46:51] [Rank 0] step:7581/10000 train_time:612291ms step_avg:80.77ms +[2025-07-08 06:46:52] [Rank 0] step:7601/10000 train_time:613791ms step_avg:80.75ms +[2025-07-08 06:46:52] [Rank 0] step:7601/10000 train_time:613791ms step_avg:80.75ms +[2025-07-08 06:46:54] [Rank 0] step:7621/10000 train_time:615292ms step_avg:80.74ms +[2025-07-08 06:46:54] [Rank 0] step:7621/10000 train_time:615292ms step_avg:80.74ms +[2025-07-08 06:46:55] [Rank 0] step:7641/10000 train_time:616793ms step_avg:80.72ms +[2025-07-08 06:46:55] [Rank 0] step:7641/10000 train_time:616793ms step_avg:80.72ms +[2025-07-08 06:46:58] [Rank 0] step:7661/10000 train_time:618966ms step_avg:80.79ms +[2025-07-08 06:46:58] [Rank 0] step:7661/10000 train_time:618966ms step_avg:80.79ms +[2025-07-08 06:46:59] [Rank 0] step:7681/10000 train_time:620467ms step_avg:80.78ms +[2025-07-08 06:46:59] [Rank 0] step:7681/10000 train_time:620467ms step_avg:80.78ms +[2025-07-08 06:47:01] [Rank 0] step:7701/10000 train_time:621973ms step_avg:80.77ms +[2025-07-08 06:47:01] [Rank 0] step:7701/10000 train_time:621973ms step_avg:80.77ms +[2025-07-08 06:47:02] [Rank 0] step:7721/10000 train_time:623478ms step_avg:80.75ms +[2025-07-08 06:47:02] [Rank 0] step:7721/10000 train_time:623478ms step_avg:80.75ms +[2025-07-08 06:47:04] [Rank 0] step:7741/10000 train_time:625661ms step_avg:80.82ms +[2025-07-08 06:47:04] [Rank 0] step:7741/10000 train_time:625661ms step_avg:80.82ms +[2025-07-08 06:47:06] [Rank 0] step:7761/10000 train_time:627146ms step_avg:80.81ms +[2025-07-08 06:47:06] [Rank 0] step:7761/10000 train_time:627146ms step_avg:80.81ms +[2025-07-08 06:47:07] [Rank 0] step:7781/10000 train_time:628652ms step_avg:80.79ms +[2025-07-08 06:47:07] [Rank 0] step:7781/10000 train_time:628652ms step_avg:80.79ms +[2025-07-08 06:47:09] [Rank 0] step:7801/10000 train_time:630157ms step_avg:80.78ms +[2025-07-08 06:47:09] [Rank 0] step:7801/10000 train_time:630157ms step_avg:80.78ms +[2025-07-08 06:47:10] [Rank 0] step:7821/10000 train_time:631663ms step_avg:80.77ms +[2025-07-08 06:47:10] [Rank 0] step:7821/10000 train_time:631663ms step_avg:80.77ms +[2025-07-08 06:47:12] [Rank 0] step:7841/10000 train_time:633815ms step_avg:80.83ms +[2025-07-08 06:47:12] [Rank 0] step:7841/10000 train_time:633815ms step_avg:80.83ms +[2025-07-08 06:47:14] [Rank 0] step:7861/10000 train_time:635319ms step_avg:80.82ms +[2025-07-08 06:47:14] [Rank 0] step:7861/10000 train_time:635319ms step_avg:80.82ms +[2025-07-08 06:47:15] [Rank 0] step:7881/10000 train_time:636825ms step_avg:80.81ms +[2025-07-08 06:47:15] [Rank 0] step:7881/10000 train_time:636825ms step_avg:80.81ms +[2025-07-08 06:47:17] [Rank 0] step:7901/10000 train_time:638332ms step_avg:80.79ms +[2025-07-08 06:47:17] [Rank 0] step:7901/10000 train_time:638332ms step_avg:80.79ms +[2025-07-08 06:47:19] [Rank 0] step:7921/10000 train_time:639892ms step_avg:80.78ms +[2025-07-08 06:47:19] [Rank 0] step:7921/10000 train_time:639892ms step_avg:80.78ms +[2025-07-08 06:47:20] [Rank 0] step:7941/10000 train_time:641582ms step_avg:80.79ms +[2025-07-08 06:47:20] [Rank 0] step:7941/10000 train_time:641582ms step_avg:80.79ms +[2025-07-08 06:47:22] [Rank 0] step:7961/10000 train_time:643090ms step_avg:80.78ms +[2025-07-08 06:47:22] [Rank 0] step:7961/10000 train_time:643090ms step_avg:80.78ms +[2025-07-08 06:47:23] [Rank 0] step:7981/10000 train_time:644828ms step_avg:80.80ms +[2025-07-08 06:47:23] [Rank 0] step:7981/10000 train_time:644828ms step_avg:80.80ms +[2025-07-08 06:47:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:47:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:47:26] [Rank 0] PRINT: step:8000/10000 train_loss:1.6866 val_loss:1.6664 train_time:646334ms step_avg:80.79ms +[2025-07-08 06:47:26] [Rank 0] PRINT: step:8000/10000 train_loss:1.6866 val_loss:1.6664 train_time:646334ms step_avg:80.79ms +[2025-07-08 06:47:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:47:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:47:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:47:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:47:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:47:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:52:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:52:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:52:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:52:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:52:55] [Rank 0] Total Loss: 4.2347 +[2025-07-08 06:52:55] [Rank 0] Total Loss: 4.2347 +[2025-07-08 06:52:55] [Rank 0] Total FTA: 0.1681 +[2025-07-08 06:52:55] [Rank 0] Total FTA: 0.1681 +[2025-07-08 06:52:55] [Rank 0] Group 0 Loss: 4.4636 +[2025-07-08 06:52:55] [Rank 0] Group 0 Loss: 4.4636 +[2025-07-08 06:52:55] [Rank 0] Group 1 Loss: 4.6238 +[2025-07-08 06:52:55] [Rank 0] Group 1 Loss: 4.6238 +[2025-07-08 06:52:55] [Rank 0] Group 2 Loss: 3.9619 +[2025-07-08 06:52:55] [Rank 0] Group 2 Loss: 3.9619 +[2025-07-08 06:52:55] [Rank 0] Group 3 Loss: 4.2372 +[2025-07-08 06:52:55] [Rank 0] Group 3 Loss: 4.2372 +[2025-07-08 06:52:55] [Rank 0] Group 4 Loss: 4.1097 +[2025-07-08 06:52:55] [Rank 0] Group 4 Loss: 4.1097 +[2025-07-08 06:52:55] [Rank 0] Group 5 Loss: 4.1435 +[2025-07-08 06:52:55] [Rank 0] Group 5 Loss: 4.1435 +[2025-07-08 06:52:55] [Rank 0] Group 6 Loss: 4.0976 +[2025-07-08 06:52:55] [Rank 0] Group 6 Loss: 4.0976 +[2025-07-08 06:52:55] [Rank 0] Group 7 Loss: 4.2214 +[2025-07-08 06:52:55] [Rank 0] Group 7 Loss: 4.2214 +[2025-07-08 06:52:55] [Rank 0] Group 8 Loss: 4.2108 +[2025-07-08 06:52:55] [Rank 0] Group 8 Loss: 4.2108 +[2025-07-08 06:52:55] [Rank 0] Group 9 Loss: 4.1891 +[2025-07-08 06:52:55] [Rank 0] Group 9 Loss: 4.1891 +[2025-07-08 06:52:55] [Rank 0] Group 10 Loss: 4.1835 +[2025-07-08 06:52:55] [Rank 0] Group 10 Loss: 4.1835 +[2025-07-08 06:52:55] [Rank 0] Group 11 Loss: 4.2017 +[2025-07-08 06:52:55] [Rank 0] Group 11 Loss: 4.2017 +[2025-07-08 06:52:55] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-08 06:52:55] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-08 06:52:55] [Rank 0] Group 1 FTA: 0.1589 +[2025-07-08 06:52:55] [Rank 0] Group 1 FTA: 0.1589 +[2025-07-08 06:52:55] [Rank 0] Group 2 FTA: 0.2943 +[2025-07-08 06:52:55] [Rank 0] Group 2 FTA: 0.2943 +[2025-07-08 06:52:55] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-08 06:52:55] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-08 06:52:55] [Rank 0] Group 4 FTA: 0.1042 +[2025-07-08 06:52:55] [Rank 0] Group 4 FTA: 0.1042 +[2025-07-08 06:52:55] [Rank 0] Group 5 FTA: 0.1276 +[2025-07-08 06:52:55] [Rank 0] Group 5 FTA: 0.1276 +[2025-07-08 06:52:55] [Rank 0] Group 6 FTA: 0.1458 +[2025-07-08 06:52:55] [Rank 0] Group 6 FTA: 0.1458 +[2025-07-08 06:52:55] [Rank 0] Group 7 FTA: 0.2083 +[2025-07-08 06:52:55] [Rank 0] Group 7 FTA: 0.2083 +[2025-07-08 06:52:55] [Rank 0] Group 8 FTA: 0.1927 +[2025-07-08 06:52:55] [Rank 0] Group 8 FTA: 0.1927 +[2025-07-08 06:52:55] [Rank 0] Group 9 FTA: 0.1445 +[2025-07-08 06:52:55] [Rank 0] Group 9 FTA: 0.1445 +[2025-07-08 06:52:55] [Rank 0] Group 10 FTA: 0.1719 +[2025-07-08 06:52:55] [Rank 0] Group 10 FTA: 0.1719 +[2025-07-08 06:52:55] [Rank 0] Group 11 FTA: 0.2021 +[2025-07-08 06:52:55] [Rank 0] Group 11 FTA: 0.2021 +[2025-07-08 06:52:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 06:52:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 06:52:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 06:52:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 06:52:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 06:52:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 06:52:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 06:52:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 06:52:56] [Rank 0] step:8001/10000 train_time:646355ms step_avg:80.78ms +[2025-07-08 06:52:56] [Rank 0] step:8001/10000 train_time:646355ms step_avg:80.78ms +[2025-07-08 06:52:58] [Rank 0] step:8021/10000 train_time:648519ms step_avg:80.85ms +[2025-07-08 06:52:58] [Rank 0] step:8021/10000 train_time:648519ms step_avg:80.85ms +[2025-07-08 06:53:00] [Rank 0] step:8041/10000 train_time:650013ms step_avg:80.84ms +[2025-07-08 06:53:00] [Rank 0] step:8041/10000 train_time:650013ms step_avg:80.84ms +[2025-07-08 06:53:01] [Rank 0] step:8061/10000 train_time:651510ms step_avg:80.82ms +[2025-07-08 06:53:01] [Rank 0] step:8061/10000 train_time:651510ms step_avg:80.82ms +[2025-07-08 06:53:03] [Rank 0] step:8081/10000 train_time:653006ms step_avg:80.81ms +[2025-07-08 06:53:03] [Rank 0] step:8081/10000 train_time:653006ms step_avg:80.81ms +[2025-07-08 06:53:05] [Rank 0] step:8101/10000 train_time:654557ms step_avg:80.80ms +[2025-07-08 06:53:05] [Rank 0] step:8101/10000 train_time:654557ms step_avg:80.80ms +[2025-07-08 06:53:06] [Rank 0] step:8121/10000 train_time:656239ms step_avg:80.81ms +[2025-07-08 06:53:06] [Rank 0] step:8121/10000 train_time:656239ms step_avg:80.81ms +[2025-07-08 06:53:08] [Rank 0] step:8141/10000 train_time:657738ms step_avg:80.79ms +[2025-07-08 06:53:08] [Rank 0] step:8141/10000 train_time:657738ms step_avg:80.79ms +[2025-07-08 06:53:09] [Rank 0] step:8161/10000 train_time:659238ms step_avg:80.78ms +[2025-07-08 06:53:09] [Rank 0] step:8161/10000 train_time:659238ms step_avg:80.78ms +[2025-07-08 06:53:11] [Rank 0] step:8181/10000 train_time:660741ms step_avg:80.77ms +[2025-07-08 06:53:11] [Rank 0] step:8181/10000 train_time:660741ms step_avg:80.77ms +[2025-07-08 06:53:12] [Rank 0] step:8201/10000 train_time:662476ms step_avg:80.78ms +[2025-07-08 06:53:12] [Rank 0] step:8201/10000 train_time:662476ms step_avg:80.78ms +[2025-07-08 06:53:14] [Rank 0] step:8221/10000 train_time:663980ms step_avg:80.77ms +[2025-07-08 06:53:14] [Rank 0] step:8221/10000 train_time:663980ms step_avg:80.77ms +[2025-07-08 06:53:15] [Rank 0] step:8241/10000 train_time:665484ms step_avg:80.75ms +[2025-07-08 06:53:15] [Rank 0] step:8241/10000 train_time:665484ms step_avg:80.75ms +[2025-07-08 06:53:17] [Rank 0] step:8261/10000 train_time:666990ms step_avg:80.74ms +[2025-07-08 06:53:17] [Rank 0] step:8261/10000 train_time:666990ms step_avg:80.74ms +[2025-07-08 06:53:19] [Rank 0] step:8281/10000 train_time:668548ms step_avg:80.73ms +[2025-07-08 06:53:19] [Rank 0] step:8281/10000 train_time:668548ms step_avg:80.73ms +[2025-07-08 06:53:20] [Rank 0] step:8301/10000 train_time:670238ms step_avg:80.74ms +[2025-07-08 06:53:20] [Rank 0] step:8301/10000 train_time:670238ms step_avg:80.74ms +[2025-07-08 06:53:22] [Rank 0] step:8321/10000 train_time:671744ms step_avg:80.73ms +[2025-07-08 06:53:22] [Rank 0] step:8321/10000 train_time:671744ms step_avg:80.73ms +[2025-07-08 06:53:23] [Rank 0] step:8341/10000 train_time:673250ms step_avg:80.72ms +[2025-07-08 06:53:23] [Rank 0] step:8341/10000 train_time:673250ms step_avg:80.72ms +[2025-07-08 06:53:25] [Rank 0] step:8361/10000 train_time:674756ms step_avg:80.70ms +[2025-07-08 06:53:25] [Rank 0] step:8361/10000 train_time:674756ms step_avg:80.70ms +[2025-07-08 06:53:27] [Rank 0] step:8381/10000 train_time:676905ms step_avg:80.77ms +[2025-07-08 06:53:27] [Rank 0] step:8381/10000 train_time:676905ms step_avg:80.77ms +[2025-07-08 06:53:28] [Rank 0] step:8401/10000 train_time:678410ms step_avg:80.75ms +[2025-07-08 06:53:28] [Rank 0] step:8401/10000 train_time:678410ms step_avg:80.75ms +[2025-07-08 06:53:30] [Rank 0] step:8421/10000 train_time:679918ms step_avg:80.74ms +[2025-07-08 06:53:30] [Rank 0] step:8421/10000 train_time:679918ms step_avg:80.74ms +[2025-07-08 06:53:31] [Rank 0] step:8441/10000 train_time:681425ms step_avg:80.73ms +[2025-07-08 06:53:31] [Rank 0] step:8441/10000 train_time:681425ms step_avg:80.73ms +[2025-07-08 06:53:33] [Rank 0] step:8461/10000 train_time:683185ms step_avg:80.75ms +[2025-07-08 06:53:33] [Rank 0] step:8461/10000 train_time:683185ms step_avg:80.75ms +[2025-07-08 06:53:35] [Rank 0] step:8481/10000 train_time:685085ms step_avg:80.78ms +[2025-07-08 06:53:35] [Rank 0] step:8481/10000 train_time:685085ms step_avg:80.78ms +[2025-07-08 06:53:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:53:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:53:37] [Rank 0] PRINT: step:8500/10000 train_loss:1.6475 val_loss:1.6325 train_time:686592ms step_avg:80.78ms +[2025-07-08 06:53:37] [Rank 0] PRINT: step:8500/10000 train_loss:1.6475 val_loss:1.6325 train_time:686592ms step_avg:80.78ms +[2025-07-08 06:53:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:53:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:53:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:53:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:53:38] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:53:38] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:59:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:59:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:59:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:59:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:59:08] [Rank 0] Total Loss: 4.2860 +[2025-07-08 06:59:08] [Rank 0] Total Loss: 4.2860 +[2025-07-08 06:59:08] [Rank 0] Total FTA: 0.1979 +[2025-07-08 06:59:08] [Rank 0] Total FTA: 0.1979 +[2025-07-08 06:59:08] [Rank 0] Group 0 Loss: 4.5062 +[2025-07-08 06:59:08] [Rank 0] Group 0 Loss: 4.5062 +[2025-07-08 06:59:08] [Rank 0] Group 1 Loss: 4.6823 +[2025-07-08 06:59:08] [Rank 0] Group 1 Loss: 4.6823 +[2025-07-08 06:59:08] [Rank 0] Group 2 Loss: 4.0093 +[2025-07-08 06:59:08] [Rank 0] Group 2 Loss: 4.0093 +[2025-07-08 06:59:08] [Rank 0] Group 3 Loss: 4.3752 +[2025-07-08 06:59:08] [Rank 0] Group 3 Loss: 4.3752 +[2025-07-08 06:59:08] [Rank 0] Group 4 Loss: 4.2284 +[2025-07-08 06:59:08] [Rank 0] Group 4 Loss: 4.2284 +[2025-07-08 06:59:08] [Rank 0] Group 5 Loss: 4.1740 +[2025-07-08 06:59:08] [Rank 0] Group 5 Loss: 4.1740 +[2025-07-08 06:59:08] [Rank 0] Group 6 Loss: 4.1262 +[2025-07-08 06:59:08] [Rank 0] Group 6 Loss: 4.1262 +[2025-07-08 06:59:08] [Rank 0] Group 7 Loss: 4.2335 +[2025-07-08 06:59:08] [Rank 0] Group 7 Loss: 4.2335 +[2025-07-08 06:59:08] [Rank 0] Group 8 Loss: 4.2225 +[2025-07-08 06:59:08] [Rank 0] Group 8 Loss: 4.2225 +[2025-07-08 06:59:08] [Rank 0] Group 9 Loss: 4.2030 +[2025-07-08 06:59:08] [Rank 0] Group 9 Loss: 4.2030 +[2025-07-08 06:59:08] [Rank 0] Group 10 Loss: 4.2322 +[2025-07-08 06:59:08] [Rank 0] Group 10 Loss: 4.2322 +[2025-07-08 06:59:08] [Rank 0] Group 11 Loss: 4.2571 +[2025-07-08 06:59:08] [Rank 0] Group 11 Loss: 4.2571 +[2025-07-08 06:59:08] [Rank 0] Group 0 FTA: 0.3251 +[2025-07-08 06:59:08] [Rank 0] Group 0 FTA: 0.3251 +[2025-07-08 06:59:08] [Rank 0] Group 1 FTA: 0.1589 +[2025-07-08 06:59:08] [Rank 0] Group 1 FTA: 0.1589 +[2025-07-08 06:59:08] [Rank 0] Group 2 FTA: 0.3802 +[2025-07-08 06:59:08] [Rank 0] Group 2 FTA: 0.3802 +[2025-07-08 06:59:08] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-08 06:59:08] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-08 06:59:08] [Rank 0] Group 4 FTA: 0.0964 +[2025-07-08 06:59:08] [Rank 0] Group 4 FTA: 0.0964 +[2025-07-08 06:59:08] [Rank 0] Group 5 FTA: 0.1172 +[2025-07-08 06:59:08] [Rank 0] Group 5 FTA: 0.1172 +[2025-07-08 06:59:08] [Rank 0] Group 6 FTA: 0.2448 +[2025-07-08 06:59:08] [Rank 0] Group 6 FTA: 0.2448 +[2025-07-08 06:59:08] [Rank 0] Group 7 FTA: 0.1745 +[2025-07-08 06:59:08] [Rank 0] Group 7 FTA: 0.1745 +[2025-07-08 06:59:08] [Rank 0] Group 8 FTA: 0.1953 +[2025-07-08 06:59:08] [Rank 0] Group 8 FTA: 0.1953 +[2025-07-08 06:59:08] [Rank 0] Group 9 FTA: 0.1562 +[2025-07-08 06:59:08] [Rank 0] Group 9 FTA: 0.1562 +[2025-07-08 06:59:08] [Rank 0] Group 10 FTA: 0.1973 +[2025-07-08 06:59:08] [Rank 0] Group 10 FTA: 0.1973 +[2025-07-08 06:59:08] [Rank 0] Group 11 FTA: 0.1758 +[2025-07-08 06:59:08] [Rank 0] Group 11 FTA: 0.1758 +[2025-07-08 06:59:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 06:59:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 06:59:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 06:59:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 06:59:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 06:59:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 06:59:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 06:59:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 06:59:09] [Rank 0] step:8501/10000 train_time:686612ms step_avg:80.77ms +[2025-07-08 06:59:09] [Rank 0] step:8501/10000 train_time:686612ms step_avg:80.77ms +[2025-07-08 06:59:11] [Rank 0] step:8521/10000 train_time:688126ms step_avg:80.76ms +[2025-07-08 06:59:11] [Rank 0] step:8521/10000 train_time:688126ms step_avg:80.76ms +[2025-07-08 06:59:12] [Rank 0] step:8541/10000 train_time:689626ms step_avg:80.74ms +[2025-07-08 06:59:12] [Rank 0] step:8541/10000 train_time:689626ms step_avg:80.74ms +[2025-07-08 06:59:14] [Rank 0] step:8561/10000 train_time:691775ms step_avg:80.81ms +[2025-07-08 06:59:14] [Rank 0] step:8561/10000 train_time:691775ms step_avg:80.81ms +[2025-07-08 06:59:16] [Rank 0] step:8581/10000 train_time:693271ms step_avg:80.79ms +[2025-07-08 06:59:16] [Rank 0] step:8581/10000 train_time:693271ms step_avg:80.79ms +[2025-07-08 06:59:17] [Rank 0] step:8601/10000 train_time:694771ms step_avg:80.78ms +[2025-07-08 06:59:17] [Rank 0] step:8601/10000 train_time:694771ms step_avg:80.78ms +[2025-07-08 06:59:19] [Rank 0] step:8621/10000 train_time:696272ms step_avg:80.76ms +[2025-07-08 06:59:19] [Rank 0] step:8621/10000 train_time:696272ms step_avg:80.76ms +[2025-07-08 06:59:21] [Rank 0] step:8641/10000 train_time:697774ms step_avg:80.75ms +[2025-07-08 06:59:21] [Rank 0] step:8641/10000 train_time:697774ms step_avg:80.75ms +[2025-07-08 06:59:22] [Rank 0] step:8661/10000 train_time:699510ms step_avg:80.77ms +[2025-07-08 06:59:22] [Rank 0] step:8661/10000 train_time:699510ms step_avg:80.77ms +[2025-07-08 06:59:24] [Rank 0] step:8681/10000 train_time:701010ms step_avg:80.75ms +[2025-07-08 06:59:24] [Rank 0] step:8681/10000 train_time:701010ms step_avg:80.75ms +[2025-07-08 06:59:25] [Rank 0] step:8701/10000 train_time:702514ms step_avg:80.74ms +[2025-07-08 06:59:25] [Rank 0] step:8701/10000 train_time:702514ms step_avg:80.74ms +[2025-07-08 06:59:27] [Rank 0] step:8721/10000 train_time:704017ms step_avg:80.73ms +[2025-07-08 06:59:27] [Rank 0] step:8721/10000 train_time:704017ms step_avg:80.73ms +[2025-07-08 06:59:28] [Rank 0] step:8741/10000 train_time:705755ms step_avg:80.74ms +[2025-07-08 06:59:28] [Rank 0] step:8741/10000 train_time:705755ms step_avg:80.74ms +[2025-07-08 06:59:30] [Rank 0] step:8761/10000 train_time:707264ms step_avg:80.73ms +[2025-07-08 06:59:30] [Rank 0] step:8761/10000 train_time:707264ms step_avg:80.73ms +[2025-07-08 06:59:31] [Rank 0] step:8781/10000 train_time:708769ms step_avg:80.72ms +[2025-07-08 06:59:31] [Rank 0] step:8781/10000 train_time:708769ms step_avg:80.72ms +[2025-07-08 06:59:33] [Rank 0] step:8801/10000 train_time:710275ms step_avg:80.70ms +[2025-07-08 06:59:33] [Rank 0] step:8801/10000 train_time:710275ms step_avg:80.70ms +[2025-07-08 06:59:35] [Rank 0] step:8821/10000 train_time:711781ms step_avg:80.69ms +[2025-07-08 06:59:35] [Rank 0] step:8821/10000 train_time:711781ms step_avg:80.69ms +[2025-07-08 06:59:36] [Rank 0] step:8841/10000 train_time:713526ms step_avg:80.71ms +[2025-07-08 06:59:36] [Rank 0] step:8841/10000 train_time:713526ms step_avg:80.71ms +[2025-07-08 06:59:38] [Rank 0] step:8861/10000 train_time:715033ms step_avg:80.69ms +[2025-07-08 06:59:38] [Rank 0] step:8861/10000 train_time:715033ms step_avg:80.69ms +[2025-07-08 06:59:39] [Rank 0] step:8881/10000 train_time:716539ms step_avg:80.68ms +[2025-07-08 06:59:39] [Rank 0] step:8881/10000 train_time:716539ms step_avg:80.68ms +[2025-07-08 06:59:41] [Rank 0] step:8901/10000 train_time:718046ms step_avg:80.67ms +[2025-07-08 06:59:41] [Rank 0] step:8901/10000 train_time:718046ms step_avg:80.67ms +[2025-07-08 06:59:43] [Rank 0] step:8921/10000 train_time:720194ms step_avg:80.73ms +[2025-07-08 06:59:43] [Rank 0] step:8921/10000 train_time:720194ms step_avg:80.73ms +[2025-07-08 06:59:44] [Rank 0] step:8941/10000 train_time:721701ms step_avg:80.72ms +[2025-07-08 06:59:44] [Rank 0] step:8941/10000 train_time:721701ms step_avg:80.72ms +[2025-07-08 06:59:46] [Rank 0] step:8961/10000 train_time:723208ms step_avg:80.71ms +[2025-07-08 06:59:46] [Rank 0] step:8961/10000 train_time:723208ms step_avg:80.71ms +[2025-07-08 06:59:47] [Rank 0] step:8981/10000 train_time:724715ms step_avg:80.69ms +[2025-07-08 06:59:47] [Rank 0] step:8981/10000 train_time:724715ms step_avg:80.69ms +[2025-07-08 06:59:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:59:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:59:50] [Rank 0] PRINT: step:9000/10000 train_loss:1.6177 val_loss:1.6067 train_time:726221ms step_avg:80.69ms +[2025-07-08 06:59:50] [Rank 0] PRINT: step:9000/10000 train_loss:1.6177 val_loss:1.6067 train_time:726221ms step_avg:80.69ms +[2025-07-08 06:59:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:59:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:59:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:59:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:59:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:59:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:05:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:05:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:05:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:05:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:05:15] [Rank 0] Total Loss: 4.2585 +[2025-07-08 07:05:15] [Rank 0] Total Loss: 4.2585 +[2025-07-08 07:05:15] [Rank 0] Total FTA: 0.2075 +[2025-07-08 07:05:15] [Rank 0] Total FTA: 0.2075 +[2025-07-08 07:05:15] [Rank 0] Group 0 Loss: 4.4987 +[2025-07-08 07:05:15] [Rank 0] Group 0 Loss: 4.4987 +[2025-07-08 07:05:15] [Rank 0] Group 1 Loss: 4.3147 +[2025-07-08 07:05:15] [Rank 0] Group 1 Loss: 4.3147 +[2025-07-08 07:05:15] [Rank 0] Group 2 Loss: 3.9927 +[2025-07-08 07:05:15] [Rank 0] Group 2 Loss: 3.9927 +[2025-07-08 07:05:15] [Rank 0] Group 3 Loss: 4.3142 +[2025-07-08 07:05:15] [Rank 0] Group 3 Loss: 4.3142 +[2025-07-08 07:05:15] [Rank 0] Group 4 Loss: 4.2456 +[2025-07-08 07:05:15] [Rank 0] Group 4 Loss: 4.2456 +[2025-07-08 07:05:15] [Rank 0] Group 5 Loss: 4.1678 +[2025-07-08 07:05:15] [Rank 0] Group 5 Loss: 4.1678 +[2025-07-08 07:05:15] [Rank 0] Group 6 Loss: 4.1182 +[2025-07-08 07:05:15] [Rank 0] Group 6 Loss: 4.1182 +[2025-07-08 07:05:15] [Rank 0] Group 7 Loss: 4.2573 +[2025-07-08 07:05:15] [Rank 0] Group 7 Loss: 4.2573 +[2025-07-08 07:05:15] [Rank 0] Group 8 Loss: 4.2786 +[2025-07-08 07:05:15] [Rank 0] Group 8 Loss: 4.2786 +[2025-07-08 07:05:15] [Rank 0] Group 9 Loss: 4.2573 +[2025-07-08 07:05:15] [Rank 0] Group 9 Loss: 4.2573 +[2025-07-08 07:05:15] [Rank 0] Group 10 Loss: 4.2490 +[2025-07-08 07:05:15] [Rank 0] Group 10 Loss: 4.2490 +[2025-07-08 07:05:15] [Rank 0] Group 11 Loss: 4.2253 +[2025-07-08 07:05:15] [Rank 0] Group 11 Loss: 4.2253 +[2025-07-08 07:05:15] [Rank 0] Group 0 FTA: 0.3264 +[2025-07-08 07:05:15] [Rank 0] Group 0 FTA: 0.3264 +[2025-07-08 07:05:15] [Rank 0] Group 1 FTA: 0.2083 +[2025-07-08 07:05:15] [Rank 0] Group 1 FTA: 0.2083 +[2025-07-08 07:05:15] [Rank 0] Group 2 FTA: 0.2500 +[2025-07-08 07:05:15] [Rank 0] Group 2 FTA: 0.2500 +[2025-07-08 07:05:15] [Rank 0] Group 3 FTA: 0.1432 +[2025-07-08 07:05:15] [Rank 0] Group 3 FTA: 0.1432 +[2025-07-08 07:05:15] [Rank 0] Group 4 FTA: 0.0833 +[2025-07-08 07:05:15] [Rank 0] Group 4 FTA: 0.0833 +[2025-07-08 07:05:15] [Rank 0] Group 5 FTA: 0.1458 +[2025-07-08 07:05:15] [Rank 0] Group 5 FTA: 0.1458 +[2025-07-08 07:05:15] [Rank 0] Group 6 FTA: 0.2240 +[2025-07-08 07:05:15] [Rank 0] Group 6 FTA: 0.2240 +[2025-07-08 07:05:15] [Rank 0] Group 7 FTA: 0.1927 +[2025-07-08 07:05:15] [Rank 0] Group 7 FTA: 0.1927 +[2025-07-08 07:05:15] [Rank 0] Group 8 FTA: 0.1901 +[2025-07-08 07:05:15] [Rank 0] Group 8 FTA: 0.1901 +[2025-07-08 07:05:15] [Rank 0] Group 9 FTA: 0.1758 +[2025-07-08 07:05:15] [Rank 0] Group 9 FTA: 0.1758 +[2025-07-08 07:05:15] [Rank 0] Group 10 FTA: 0.2051 +[2025-07-08 07:05:15] [Rank 0] Group 10 FTA: 0.2051 +[2025-07-08 07:05:15] [Rank 0] Group 11 FTA: 0.2109 +[2025-07-08 07:05:15] [Rank 0] Group 11 FTA: 0.2109 +[2025-07-08 07:05:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 07:05:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 07:05:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 07:05:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 07:05:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 07:05:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 07:05:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 07:05:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 07:05:17] [Rank 0] step:9001/10000 train_time:726248ms step_avg:80.69ms +[2025-07-08 07:05:17] [Rank 0] step:9001/10000 train_time:726248ms step_avg:80.69ms +[2025-07-08 07:05:19] [Rank 0] step:9021/10000 train_time:728693ms step_avg:80.78ms +[2025-07-08 07:05:19] [Rank 0] step:9021/10000 train_time:728693ms step_avg:80.78ms +[2025-07-08 07:05:20] [Rank 0] step:9041/10000 train_time:730189ms step_avg:80.76ms +[2025-07-08 07:05:20] [Rank 0] step:9041/10000 train_time:730189ms step_avg:80.76ms +[2025-07-08 07:05:22] [Rank 0] step:9061/10000 train_time:731687ms step_avg:80.75ms +[2025-07-08 07:05:22] [Rank 0] step:9061/10000 train_time:731687ms step_avg:80.75ms +[2025-07-08 07:05:23] [Rank 0] step:9081/10000 train_time:733189ms step_avg:80.74ms +[2025-07-08 07:05:23] [Rank 0] step:9081/10000 train_time:733189ms step_avg:80.74ms +[2025-07-08 07:05:25] [Rank 0] step:9101/10000 train_time:735353ms step_avg:80.80ms +[2025-07-08 07:05:25] [Rank 0] step:9101/10000 train_time:735353ms step_avg:80.80ms +[2025-07-08 07:05:27] [Rank 0] step:9121/10000 train_time:736850ms step_avg:80.79ms +[2025-07-08 07:05:27] [Rank 0] step:9121/10000 train_time:736850ms step_avg:80.79ms +[2025-07-08 07:05:28] [Rank 0] step:9141/10000 train_time:738351ms step_avg:80.77ms +[2025-07-08 07:05:28] [Rank 0] step:9141/10000 train_time:738351ms step_avg:80.77ms +[2025-07-08 07:05:30] [Rank 0] step:9161/10000 train_time:739853ms step_avg:80.76ms +[2025-07-08 07:05:30] [Rank 0] step:9161/10000 train_time:739853ms step_avg:80.76ms +[2025-07-08 07:05:31] [Rank 0] step:9181/10000 train_time:741611ms step_avg:80.78ms +[2025-07-08 07:05:31] [Rank 0] step:9181/10000 train_time:741611ms step_avg:80.78ms +[2025-07-08 07:05:33] [Rank 0] step:9201/10000 train_time:743096ms step_avg:80.76ms +[2025-07-08 07:05:33] [Rank 0] step:9201/10000 train_time:743096ms step_avg:80.76ms +[2025-07-08 07:05:34] [Rank 0] step:9221/10000 train_time:744601ms step_avg:80.75ms +[2025-07-08 07:05:34] [Rank 0] step:9221/10000 train_time:744601ms step_avg:80.75ms +[2025-07-08 07:05:36] [Rank 0] step:9241/10000 train_time:746108ms step_avg:80.74ms +[2025-07-08 07:05:36] [Rank 0] step:9241/10000 train_time:746108ms step_avg:80.74ms +[2025-07-08 07:05:37] [Rank 0] step:9261/10000 train_time:747612ms step_avg:80.73ms +[2025-07-08 07:05:37] [Rank 0] step:9261/10000 train_time:747612ms step_avg:80.73ms +[2025-07-08 07:05:39] [Rank 0] step:9281/10000 train_time:749355ms step_avg:80.74ms +[2025-07-08 07:05:39] [Rank 0] step:9281/10000 train_time:749355ms step_avg:80.74ms +[2025-07-08 07:05:41] [Rank 0] step:9301/10000 train_time:750857ms step_avg:80.73ms +[2025-07-08 07:05:41] [Rank 0] step:9301/10000 train_time:750857ms step_avg:80.73ms +[2025-07-08 07:05:42] [Rank 0] step:9321/10000 train_time:752363ms step_avg:80.72ms +[2025-07-08 07:05:42] [Rank 0] step:9321/10000 train_time:752363ms step_avg:80.72ms +[2025-07-08 07:05:44] [Rank 0] step:9341/10000 train_time:753868ms step_avg:80.71ms +[2025-07-08 07:05:44] [Rank 0] step:9341/10000 train_time:753868ms step_avg:80.71ms +[2025-07-08 07:05:45] [Rank 0] step:9361/10000 train_time:755628ms step_avg:80.72ms +[2025-07-08 07:05:45] [Rank 0] step:9361/10000 train_time:755628ms step_avg:80.72ms +[2025-07-08 07:05:47] [Rank 0] step:9381/10000 train_time:757115ms step_avg:80.71ms +[2025-07-08 07:05:47] [Rank 0] step:9381/10000 train_time:757115ms step_avg:80.71ms +[2025-07-08 07:05:48] [Rank 0] step:9401/10000 train_time:758620ms step_avg:80.70ms +[2025-07-08 07:05:48] [Rank 0] step:9401/10000 train_time:758620ms step_avg:80.70ms +[2025-07-08 07:05:50] [Rank 0] step:9421/10000 train_time:760126ms step_avg:80.68ms +[2025-07-08 07:05:50] [Rank 0] step:9421/10000 train_time:760126ms step_avg:80.68ms +[2025-07-08 07:05:51] [Rank 0] step:9441/10000 train_time:761631ms step_avg:80.67ms +[2025-07-08 07:05:51] [Rank 0] step:9441/10000 train_time:761631ms step_avg:80.67ms +[2025-07-08 07:05:53] [Rank 0] step:9461/10000 train_time:763372ms step_avg:80.69ms +[2025-07-08 07:05:53] [Rank 0] step:9461/10000 train_time:763372ms step_avg:80.69ms +[2025-07-08 07:05:55] [Rank 0] step:9481/10000 train_time:764881ms step_avg:80.68ms +[2025-07-08 07:05:55] [Rank 0] step:9481/10000 train_time:764881ms step_avg:80.68ms +[2025-07-08 07:05:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:05:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:05:57] [Rank 0] PRINT: step:9500/10000 train_loss:1.5954 val_loss:1.5875 train_time:766388ms step_avg:80.67ms +[2025-07-08 07:05:57] [Rank 0] PRINT: step:9500/10000 train_loss:1.5954 val_loss:1.5875 train_time:766388ms step_avg:80.67ms +[2025-07-08 07:05:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:05:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:05:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:05:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:05:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:05:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:11:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:11:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:11:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:11:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:11:27] [Rank 0] Total Loss: 4.3202 +[2025-07-08 07:11:27] [Rank 0] Total Loss: 4.3202 +[2025-07-08 07:11:27] [Rank 0] Total FTA: 0.1571 +[2025-07-08 07:11:27] [Rank 0] Total FTA: 0.1571 +[2025-07-08 07:11:27] [Rank 0] Group 0 Loss: 4.5276 +[2025-07-08 07:11:27] [Rank 0] Group 0 Loss: 4.5276 +[2025-07-08 07:11:27] [Rank 0] Group 1 Loss: 4.7659 +[2025-07-08 07:11:27] [Rank 0] Group 1 Loss: 4.7659 +[2025-07-08 07:11:27] [Rank 0] Group 2 Loss: 4.0668 +[2025-07-08 07:11:27] [Rank 0] Group 2 Loss: 4.0668 +[2025-07-08 07:11:27] [Rank 0] Group 3 Loss: 4.4186 +[2025-07-08 07:11:27] [Rank 0] Group 3 Loss: 4.4186 +[2025-07-08 07:11:27] [Rank 0] Group 4 Loss: 4.1913 +[2025-07-08 07:11:27] [Rank 0] Group 4 Loss: 4.1913 +[2025-07-08 07:11:27] [Rank 0] Group 5 Loss: 4.1636 +[2025-07-08 07:11:27] [Rank 0] Group 5 Loss: 4.1636 +[2025-07-08 07:11:27] [Rank 0] Group 6 Loss: 4.1633 +[2025-07-08 07:11:27] [Rank 0] Group 6 Loss: 4.1633 +[2025-07-08 07:11:27] [Rank 0] Group 7 Loss: 4.3241 +[2025-07-08 07:11:27] [Rank 0] Group 7 Loss: 4.3241 +[2025-07-08 07:11:27] [Rank 0] Group 8 Loss: 4.2495 +[2025-07-08 07:11:27] [Rank 0] Group 8 Loss: 4.2495 +[2025-07-08 07:11:27] [Rank 0] Group 9 Loss: 4.2795 +[2025-07-08 07:11:27] [Rank 0] Group 9 Loss: 4.2795 +[2025-07-08 07:11:27] [Rank 0] Group 10 Loss: 4.2633 +[2025-07-08 07:11:27] [Rank 0] Group 10 Loss: 4.2633 +[2025-07-08 07:11:27] [Rank 0] Group 11 Loss: 4.2853 +[2025-07-08 07:11:27] [Rank 0] Group 11 Loss: 4.2853 +[2025-07-08 07:11:27] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-08 07:11:27] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-08 07:11:27] [Rank 0] Group 1 FTA: 0.3073 +[2025-07-08 07:11:27] [Rank 0] Group 1 FTA: 0.3073 +[2025-07-08 07:11:27] [Rank 0] Group 2 FTA: 0.2135 +[2025-07-08 07:11:27] [Rank 0] Group 2 FTA: 0.2135 +[2025-07-08 07:11:27] [Rank 0] Group 3 FTA: 0.1198 +[2025-07-08 07:11:27] [Rank 0] Group 3 FTA: 0.1198 +[2025-07-08 07:11:27] [Rank 0] Group 4 FTA: 0.1042 +[2025-07-08 07:11:27] [Rank 0] Group 4 FTA: 0.1042 +[2025-07-08 07:11:27] [Rank 0] Group 5 FTA: 0.1224 +[2025-07-08 07:11:27] [Rank 0] Group 5 FTA: 0.1224 +[2025-07-08 07:11:27] [Rank 0] Group 6 FTA: 0.1849 +[2025-07-08 07:11:27] [Rank 0] Group 6 FTA: 0.1849 +[2025-07-08 07:11:27] [Rank 0] Group 7 FTA: 0.1953 +[2025-07-08 07:11:27] [Rank 0] Group 7 FTA: 0.1953 +[2025-07-08 07:11:27] [Rank 0] Group 8 FTA: 0.1927 +[2025-07-08 07:11:27] [Rank 0] Group 8 FTA: 0.1927 +[2025-07-08 07:11:27] [Rank 0] Group 9 FTA: 0.1992 +[2025-07-08 07:11:27] [Rank 0] Group 9 FTA: 0.1992 +[2025-07-08 07:11:27] [Rank 0] Group 10 FTA: 0.1836 +[2025-07-08 07:11:27] [Rank 0] Group 10 FTA: 0.1836 +[2025-07-08 07:11:27] [Rank 0] Group 11 FTA: 0.1826 +[2025-07-08 07:11:27] [Rank 0] Group 11 FTA: 0.1826 +[2025-07-08 07:11:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 07:11:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 07:11:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 07:11:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 07:11:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 07:11:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 07:11:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 07:11:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 07:11:28] [Rank 0] step:9501/10000 train_time:766409ms step_avg:80.67ms +[2025-07-08 07:11:28] [Rank 0] step:9501/10000 train_time:766409ms step_avg:80.67ms +[2025-07-08 07:11:30] [Rank 0] step:9521/10000 train_time:767917ms step_avg:80.66ms +[2025-07-08 07:11:30] [Rank 0] step:9521/10000 train_time:767917ms step_avg:80.66ms +[2025-07-08 07:11:32] [Rank 0] step:9541/10000 train_time:769414ms step_avg:80.64ms +[2025-07-08 07:11:32] [Rank 0] step:9541/10000 train_time:769414ms step_avg:80.64ms +[2025-07-08 07:11:34] [Rank 0] step:9561/10000 train_time:771566ms step_avg:80.70ms +[2025-07-08 07:11:34] [Rank 0] step:9561/10000 train_time:771566ms step_avg:80.70ms +[2025-07-08 07:11:35] [Rank 0] step:9581/10000 train_time:773066ms step_avg:80.69ms +[2025-07-08 07:11:35] [Rank 0] step:9581/10000 train_time:773066ms step_avg:80.69ms +[2025-07-08 07:11:37] [Rank 0] step:9601/10000 train_time:774567ms step_avg:80.68ms +[2025-07-08 07:11:37] [Rank 0] step:9601/10000 train_time:774567ms step_avg:80.68ms +[2025-07-08 07:11:38] [Rank 0] step:9621/10000 train_time:776327ms step_avg:80.69ms +[2025-07-08 07:11:38] [Rank 0] step:9621/10000 train_time:776327ms step_avg:80.69ms +[2025-07-08 07:11:41] [Rank 0] step:9641/10000 train_time:778464ms step_avg:80.75ms +[2025-07-08 07:11:41] [Rank 0] step:9641/10000 train_time:778464ms step_avg:80.75ms +[2025-07-08 07:11:42] [Rank 0] step:9661/10000 train_time:779964ms step_avg:80.73ms +[2025-07-08 07:11:42] [Rank 0] step:9661/10000 train_time:779964ms step_avg:80.73ms +[2025-07-08 07:11:44] [Rank 0] step:9681/10000 train_time:781466ms step_avg:80.72ms +[2025-07-08 07:11:44] [Rank 0] step:9681/10000 train_time:781466ms step_avg:80.72ms +[2025-07-08 07:11:45] [Rank 0] step:9701/10000 train_time:782970ms step_avg:80.71ms +[2025-07-08 07:11:45] [Rank 0] step:9701/10000 train_time:782970ms step_avg:80.71ms +[2025-07-08 07:11:47] [Rank 0] step:9721/10000 train_time:784525ms step_avg:80.70ms +[2025-07-08 07:11:47] [Rank 0] step:9721/10000 train_time:784525ms step_avg:80.70ms +[2025-07-08 07:11:49] [Rank 0] step:9741/10000 train_time:786643ms step_avg:80.76ms +[2025-07-08 07:11:49] [Rank 0] step:9741/10000 train_time:786643ms step_avg:80.76ms +[2025-07-08 07:11:50] [Rank 0] step:9761/10000 train_time:788149ms step_avg:80.74ms +[2025-07-08 07:11:50] [Rank 0] step:9761/10000 train_time:788149ms step_avg:80.74ms +[2025-07-08 07:11:52] [Rank 0] step:9781/10000 train_time:789655ms step_avg:80.73ms +[2025-07-08 07:11:52] [Rank 0] step:9781/10000 train_time:789655ms step_avg:80.73ms +[2025-07-08 07:11:53] [Rank 0] step:9801/10000 train_time:791160ms step_avg:80.72ms +[2025-07-08 07:11:53] [Rank 0] step:9801/10000 train_time:791160ms step_avg:80.72ms +[2025-07-08 07:11:55] [Rank 0] step:9821/10000 train_time:793313ms step_avg:80.78ms +[2025-07-08 07:11:55] [Rank 0] step:9821/10000 train_time:793313ms step_avg:80.78ms +[2025-07-08 07:11:57] [Rank 0] step:9841/10000 train_time:794818ms step_avg:80.77ms +[2025-07-08 07:11:57] [Rank 0] step:9841/10000 train_time:794818ms step_avg:80.77ms +[2025-07-08 07:11:58] [Rank 0] step:9861/10000 train_time:796322ms step_avg:80.75ms +[2025-07-08 07:11:58] [Rank 0] step:9861/10000 train_time:796322ms step_avg:80.75ms +[2025-07-08 07:12:00] [Rank 0] step:9881/10000 train_time:797827ms step_avg:80.74ms +[2025-07-08 07:12:00] [Rank 0] step:9881/10000 train_time:797827ms step_avg:80.74ms +[2025-07-08 07:12:01] [Rank 0] step:9901/10000 train_time:799334ms step_avg:80.73ms +[2025-07-08 07:12:01] [Rank 0] step:9901/10000 train_time:799334ms step_avg:80.73ms +[2025-07-08 07:12:03] [Rank 0] step:9921/10000 train_time:800880ms step_avg:80.73ms +[2025-07-08 07:12:03] [Rank 0] step:9921/10000 train_time:800880ms step_avg:80.73ms +[2025-07-08 07:12:04] [Rank 0] step:9941/10000 train_time:802385ms step_avg:80.71ms +[2025-07-08 07:12:04] [Rank 0] step:9941/10000 train_time:802385ms step_avg:80.71ms +[2025-07-08 07:12:06] [Rank 0] step:9961/10000 train_time:803892ms step_avg:80.70ms +[2025-07-08 07:12:06] [Rank 0] step:9961/10000 train_time:803892ms step_avg:80.70ms +[2025-07-08 07:12:08] [Rank 0] step:9981/10000 train_time:805399ms step_avg:80.69ms +[2025-07-08 07:12:08] [Rank 0] step:9981/10000 train_time:805399ms step_avg:80.69ms +[2025-07-08 07:12:09] [Rank 0] step:10000/10000 train_time:807070ms step_avg:80.71ms +[2025-07-08 07:12:09] [Rank 0] step:10000/10000 train_time:807070ms step_avg:80.71ms +[2025-07-08 07:12:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:12:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:12:10] [Rank 0] PRINT: step:10000/10000 train_loss:1.5796 val_loss:1.5753 train_time:807149ms step_avg:80.71ms +[2025-07-08 07:12:10] [Rank 0] PRINT: step:10000/10000 train_loss:1.5796 val_loss:1.5753 train_time:807149ms step_avg:80.71ms +[2025-07-08 07:12:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:12:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:12:10] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:12:10] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:12:10] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:12:10] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:17:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:17:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:17:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:17:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:17:38] [Rank 0] Total Loss: 4.3281 +[2025-07-08 07:17:38] [Rank 0] Total Loss: 4.3281 +[2025-07-08 07:17:38] [Rank 0] Total FTA: 0.1994 +[2025-07-08 07:17:38] [Rank 0] Total FTA: 0.1994 +[2025-07-08 07:17:38] [Rank 0] Group 0 Loss: 4.5057 +[2025-07-08 07:17:38] [Rank 0] Group 0 Loss: 4.5057 +[2025-07-08 07:17:38] [Rank 0] Group 1 Loss: 4.7388 +[2025-07-08 07:17:38] [Rank 0] Group 1 Loss: 4.7388 +[2025-07-08 07:17:38] [Rank 0] Group 2 Loss: 4.1145 +[2025-07-08 07:17:38] [Rank 0] Group 2 Loss: 4.1145 +[2025-07-08 07:17:38] [Rank 0] Group 3 Loss: 4.4203 +[2025-07-08 07:17:38] [Rank 0] Group 3 Loss: 4.4203 +[2025-07-08 07:17:38] [Rank 0] Group 4 Loss: 4.2537 +[2025-07-08 07:17:38] [Rank 0] Group 4 Loss: 4.2537 +[2025-07-08 07:17:38] [Rank 0] Group 5 Loss: 4.1872 +[2025-07-08 07:17:38] [Rank 0] Group 5 Loss: 4.1872 +[2025-07-08 07:17:38] [Rank 0] Group 6 Loss: 4.1403 +[2025-07-08 07:17:38] [Rank 0] Group 6 Loss: 4.1403 +[2025-07-08 07:17:38] [Rank 0] Group 7 Loss: 4.3259 +[2025-07-08 07:17:38] [Rank 0] Group 7 Loss: 4.3259 +[2025-07-08 07:17:38] [Rank 0] Group 8 Loss: 4.2912 +[2025-07-08 07:17:38] [Rank 0] Group 8 Loss: 4.2912 +[2025-07-08 07:17:38] [Rank 0] Group 9 Loss: 4.2672 +[2025-07-08 07:17:38] [Rank 0] Group 9 Loss: 4.2672 +[2025-07-08 07:17:38] [Rank 0] Group 10 Loss: 4.2493 +[2025-07-08 07:17:38] [Rank 0] Group 10 Loss: 4.2493 +[2025-07-08 07:17:38] [Rank 0] Group 11 Loss: 4.3066 +[2025-07-08 07:17:38] [Rank 0] Group 11 Loss: 4.3066 +[2025-07-08 07:17:38] [Rank 0] Group 0 FTA: 0.3082 +[2025-07-08 07:17:38] [Rank 0] Group 0 FTA: 0.3082 +[2025-07-08 07:17:38] [Rank 0] Group 1 FTA: 0.1589 +[2025-07-08 07:17:38] [Rank 0] Group 1 FTA: 0.1589 +[2025-07-08 07:17:38] [Rank 0] Group 2 FTA: 0.2396 +[2025-07-08 07:17:38] [Rank 0] Group 2 FTA: 0.2396 +[2025-07-08 07:17:38] [Rank 0] Group 3 FTA: 0.1198 +[2025-07-08 07:17:38] [Rank 0] Group 3 FTA: 0.1198 +[2025-07-08 07:17:38] [Rank 0] Group 4 FTA: 0.1589 +[2025-07-08 07:17:38] [Rank 0] Group 4 FTA: 0.1589 +[2025-07-08 07:17:38] [Rank 0] Group 5 FTA: 0.1146 +[2025-07-08 07:17:38] [Rank 0] Group 5 FTA: 0.1146 +[2025-07-08 07:17:38] [Rank 0] Group 6 FTA: 0.1615 +[2025-07-08 07:17:38] [Rank 0] Group 6 FTA: 0.1615 +[2025-07-08 07:17:38] [Rank 0] Group 7 FTA: 0.2109 +[2025-07-08 07:17:38] [Rank 0] Group 7 FTA: 0.2109 +[2025-07-08 07:17:38] [Rank 0] Group 8 FTA: 0.2083 +[2025-07-08 07:17:38] [Rank 0] Group 8 FTA: 0.2083 +[2025-07-08 07:17:38] [Rank 0] Group 9 FTA: 0.1719 +[2025-07-08 07:17:38] [Rank 0] Group 9 FTA: 0.1719 +[2025-07-08 07:17:38] [Rank 0] Group 10 FTA: 0.1914 +[2025-07-08 07:17:38] [Rank 0] Group 10 FTA: 0.1914 +[2025-07-08 07:17:38] [Rank 0] Group 11 FTA: 0.2119 +[2025-07-08 07:17:38] [Rank 0] Group 11 FTA: 0.2119 +[2025-07-08 07:17:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 07:17:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_loss_curves.png +[2025-07-08 07:17:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 07:17:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/per_class_acc_curves.png +[2025-07-08 07:17:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 07:17:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_loss_curve.png +[2025-07-08 07:17:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 07:17:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_43/total_acc_curve.png +[2025-07-08 07:17:39] [Rank 0] step:10001/10000 train_time:807170ms step_avg:80.71ms +[2025-07-08 07:17:39] [Rank 0] step:10001/10000 train_time:807170ms step_avg:80.71ms +[2025-07-08 07:17:39] [Rank 0] PRINT: --- Training Finished: Tue Jul 8 07:17:39 2025 --- +[2025-07-08 07:17:39] [Rank 0] PRINT: --- Training Finished: Tue Jul 8 07:17:39 2025 --- +[2025-07-08 07:17:39] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9916 MiB +[2025-07-08 07:17:39] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9916 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/config.json b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..4287b9a0a020d2ef4c3ae503d35b89cad9db488d --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "896addaa-1426-423c-829d-54a8af07dd30", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..505cb3202e7fdd7189c5b4598a3832d099ae32e1 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74c84e01915ecbe15733086d0dddba6a9303cadae3935a319682ec93e69dd81b +size 340764 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..2630e0af8539b81073ab145895e7a232b9dc4e59 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb40758d460613a5e0de05e69495829616f9d0a667b8e780cbb1d51da2806577 +size 252171 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..239dec7c17760a318921af9e95e4bfadcb3a264e --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb1090ff76e173bf761562f90f432a493e2393108af5875109296f812628fe03 +size 90338 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..80e7eb1314d8e40825b03d0e1ac17e91fded6b81 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:235816656c464694042275ea5ff32cd319f957a5976d13e7d858ff27960a50dc +size 100686 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/training_log_896addaa-1426-423c-829d-54a8af07dd30.txt b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/training_log_896addaa-1426-423c-829d-54a8af07dd30.txt new file mode 100644 index 0000000000000000000000000000000000000000..5efd7c75c6e179ed1111ab6de45498d12b77efeb --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/training_log_896addaa-1426-423c-829d-54a8af07dd30.txt @@ -0,0 +1,5132 @@ +[2025-07-07 04:02:35] [Rank 0] PRINT: --- Script Start: Mon Jul 7 04:02:35 2025 --- +[2025-07-07 04:02:35] [Rank 0] PRINT: --- Script Start: Mon Jul 7 04:02:35 2025 --- +[2025-07-07 04:02:35] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-07 04:02:35] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-07 04:02:35] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 04:02:35] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 04:02:35] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-07 04:02:35] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-07 04:02:35] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45 +[2025-07-07 04:02:35] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45 +[2025-07-07 04:02:35] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 04:02:35] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 04:02:35] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 04:02:35] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 04:02:35] [Rank 0] PRINT: Constructing model... +[2025-07-07 04:02:35] [Rank 0] PRINT: Constructing model... +[2025-07-07 04:02:37] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 04:02:37] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 04:02:37] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 04:02:37] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 04:02:37] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 04:02:37] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 04:02:38] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 04:02:38] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 04:02:38] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 04:02:38] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 04:02:38] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 04:02:38] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 04:02:38] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 04:02:38] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 04:02:38] [Rank 0] PRINT: Model returns: +[2025-07-07 04:02:38] [Rank 0] PRINT: Model returns: +[2025-07-07 04:02:38] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 04:02:38] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 04:02:38] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-07 04:02:38] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-07 04:02:38] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-07 04:02:38] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-07 04:02:38] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-07 04:02:38] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-07 04:02:38] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-07 04:02:38] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-07 04:02:38] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 04:02:38] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 04:02:38] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 04:02:38] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 04:02:38] [Rank 0] PRINT: Starting warmup... +[2025-07-07 04:02:38] [Rank 0] PRINT: Starting warmup... +[2025-07-07 04:03:51] [Rank 0] PRINT: Warmup complete. +[2025-07-07 04:03:51] [Rank 0] PRINT: Warmup complete. +[2025-07-07 04:03:51] [Rank 0] PRINT: Starting training... +[2025-07-07 04:03:51] [Rank 0] PRINT: Starting training... +[2025-07-07 04:03:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:03:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:03:59] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 04:03:59] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 04:04:01] [Rank 0] step:21/10000 train_time:1732ms step_avg:82.49ms +[2025-07-07 04:04:01] [Rank 0] step:21/10000 train_time:1732ms step_avg:82.49ms +[2025-07-07 04:04:02] [Rank 0] step:41/10000 train_time:3185ms step_avg:77.69ms +[2025-07-07 04:04:02] [Rank 0] step:41/10000 train_time:3185ms step_avg:77.69ms +[2025-07-07 04:04:04] [Rank 0] step:61/10000 train_time:4639ms step_avg:76.06ms +[2025-07-07 04:04:04] [Rank 0] step:61/10000 train_time:4639ms step_avg:76.06ms +[2025-07-07 04:04:05] [Rank 0] step:81/10000 train_time:6095ms step_avg:75.24ms +[2025-07-07 04:04:05] [Rank 0] step:81/10000 train_time:6095ms step_avg:75.24ms +[2025-07-07 04:04:07] [Rank 0] step:101/10000 train_time:7789ms step_avg:77.12ms +[2025-07-07 04:04:07] [Rank 0] step:101/10000 train_time:7789ms step_avg:77.12ms +[2025-07-07 04:04:08] [Rank 0] step:121/10000 train_time:9245ms step_avg:76.40ms +[2025-07-07 04:04:08] [Rank 0] step:121/10000 train_time:9245ms step_avg:76.40ms +[2025-07-07 04:04:10] [Rank 0] step:141/10000 train_time:10699ms step_avg:75.88ms +[2025-07-07 04:04:10] [Rank 0] step:141/10000 train_time:10699ms step_avg:75.88ms +[2025-07-07 04:04:11] [Rank 0] step:161/10000 train_time:12156ms step_avg:75.50ms +[2025-07-07 04:04:11] [Rank 0] step:161/10000 train_time:12156ms step_avg:75.50ms +[2025-07-07 04:04:13] [Rank 0] step:181/10000 train_time:13868ms step_avg:76.62ms +[2025-07-07 04:04:13] [Rank 0] step:181/10000 train_time:13868ms step_avg:76.62ms +[2025-07-07 04:04:15] [Rank 0] step:201/10000 train_time:15470ms step_avg:76.96ms +[2025-07-07 04:04:15] [Rank 0] step:201/10000 train_time:15470ms step_avg:76.96ms +[2025-07-07 04:04:16] [Rank 0] step:221/10000 train_time:16926ms step_avg:76.59ms +[2025-07-07 04:04:16] [Rank 0] step:221/10000 train_time:16926ms step_avg:76.59ms +[2025-07-07 04:04:17] [Rank 0] step:241/10000 train_time:18383ms step_avg:76.28ms +[2025-07-07 04:04:17] [Rank 0] step:241/10000 train_time:18383ms step_avg:76.28ms +[2025-07-07 04:04:19] [Rank 0] step:261/10000 train_time:19840ms step_avg:76.02ms +[2025-07-07 04:04:19] [Rank 0] step:261/10000 train_time:19840ms step_avg:76.02ms +[2025-07-07 04:04:21] [Rank 0] step:281/10000 train_time:21950ms step_avg:78.11ms +[2025-07-07 04:04:21] [Rank 0] step:281/10000 train_time:21950ms step_avg:78.11ms +[2025-07-07 04:04:22] [Rank 0] step:301/10000 train_time:23407ms step_avg:77.76ms +[2025-07-07 04:04:22] [Rank 0] step:301/10000 train_time:23407ms step_avg:77.76ms +[2025-07-07 04:04:24] [Rank 0] step:321/10000 train_time:24863ms step_avg:77.46ms +[2025-07-07 04:04:24] [Rank 0] step:321/10000 train_time:24863ms step_avg:77.46ms +[2025-07-07 04:04:25] [Rank 0] step:341/10000 train_time:26320ms step_avg:77.19ms +[2025-07-07 04:04:25] [Rank 0] step:341/10000 train_time:26320ms step_avg:77.19ms +[2025-07-07 04:04:27] [Rank 0] step:361/10000 train_time:27832ms step_avg:77.10ms +[2025-07-07 04:04:27] [Rank 0] step:361/10000 train_time:27832ms step_avg:77.10ms +[2025-07-07 04:04:29] [Rank 0] step:381/10000 train_time:29881ms step_avg:78.43ms +[2025-07-07 04:04:29] [Rank 0] step:381/10000 train_time:29881ms step_avg:78.43ms +[2025-07-07 04:04:30] [Rank 0] step:401/10000 train_time:31341ms step_avg:78.16ms +[2025-07-07 04:04:30] [Rank 0] step:401/10000 train_time:31341ms step_avg:78.16ms +[2025-07-07 04:04:32] [Rank 0] step:421/10000 train_time:32803ms step_avg:77.92ms +[2025-07-07 04:04:32] [Rank 0] step:421/10000 train_time:32803ms step_avg:77.92ms +[2025-07-07 04:04:33] [Rank 0] step:441/10000 train_time:34268ms step_avg:77.70ms +[2025-07-07 04:04:33] [Rank 0] step:441/10000 train_time:34268ms step_avg:77.70ms +[2025-07-07 04:04:35] [Rank 0] step:461/10000 train_time:35971ms step_avg:78.03ms +[2025-07-07 04:04:35] [Rank 0] step:461/10000 train_time:35971ms step_avg:78.03ms +[2025-07-07 04:04:37] [Rank 0] step:481/10000 train_time:37431ms step_avg:77.82ms +[2025-07-07 04:04:37] [Rank 0] step:481/10000 train_time:37431ms step_avg:77.82ms +[2025-07-07 04:04:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:04:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:04:39] [Rank 0] PRINT: step:500/10000 train_loss:9.6495 val_loss:8.5919 train_time:38892ms step_avg:77.78ms +[2025-07-07 04:04:39] [Rank 0] PRINT: step:500/10000 train_loss:9.6495 val_loss:8.5919 train_time:38892ms step_avg:77.78ms +[2025-07-07 04:04:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:04:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:04:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:04:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:04:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:04:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:10:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:10:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:10:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:10:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:10:01] [Rank 0] Total Loss: 8.9113 +[2025-07-07 04:10:01] [Rank 0] Total Loss: 8.9113 +[2025-07-07 04:10:01] [Rank 0] Total FTA: 0.0020 +[2025-07-07 04:10:01] [Rank 0] Total FTA: 0.0020 +[2025-07-07 04:10:01] [Rank 0] Group 0 Loss: 8.9286 +[2025-07-07 04:10:01] [Rank 0] Group 0 Loss: 8.9286 +[2025-07-07 04:10:01] [Rank 0] Group 1 Loss: 8.9109 +[2025-07-07 04:10:01] [Rank 0] Group 1 Loss: 8.9109 +[2025-07-07 04:10:01] [Rank 0] Group 2 Loss: 8.9518 +[2025-07-07 04:10:01] [Rank 0] Group 2 Loss: 8.9518 +[2025-07-07 04:10:01] [Rank 0] Group 3 Loss: 8.8896 +[2025-07-07 04:10:01] [Rank 0] Group 3 Loss: 8.8896 +[2025-07-07 04:10:01] [Rank 0] Group 4 Loss: 8.9114 +[2025-07-07 04:10:01] [Rank 0] Group 4 Loss: 8.9114 +[2025-07-07 04:10:01] [Rank 0] Group 5 Loss: 8.8925 +[2025-07-07 04:10:01] [Rank 0] Group 5 Loss: 8.8925 +[2025-07-07 04:10:01] [Rank 0] Group 6 Loss: 8.9127 +[2025-07-07 04:10:01] [Rank 0] Group 6 Loss: 8.9127 +[2025-07-07 04:10:01] [Rank 0] Group 7 Loss: 8.9069 +[2025-07-07 04:10:01] [Rank 0] Group 7 Loss: 8.9069 +[2025-07-07 04:10:01] [Rank 0] Group 8 Loss: 8.9008 +[2025-07-07 04:10:01] [Rank 0] Group 8 Loss: 8.9008 +[2025-07-07 04:10:01] [Rank 0] Group 9 Loss: 8.9050 +[2025-07-07 04:10:01] [Rank 0] Group 9 Loss: 8.9050 +[2025-07-07 04:10:01] [Rank 0] Group 10 Loss: 8.9093 +[2025-07-07 04:10:01] [Rank 0] Group 10 Loss: 8.9093 +[2025-07-07 04:10:01] [Rank 0] Group 11 Loss: 8.9062 +[2025-07-07 04:10:01] [Rank 0] Group 11 Loss: 8.9062 +[2025-07-07 04:10:01] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 04:10:01] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 04:10:01] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:10:01] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:10:01] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 04:10:01] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 04:10:01] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 04:10:01] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 04:10:01] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 04:10:01] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 04:10:01] [Rank 0] Group 5 FTA: 0.0104 +[2025-07-07 04:10:01] [Rank 0] Group 5 FTA: 0.0104 +[2025-07-07 04:10:01] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 04:10:01] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 04:10:01] [Rank 0] Group 7 FTA: 0.0078 +[2025-07-07 04:10:01] [Rank 0] Group 7 FTA: 0.0078 +[2025-07-07 04:10:01] [Rank 0] Group 8 FTA: 0.0026 +[2025-07-07 04:10:01] [Rank 0] Group 8 FTA: 0.0026 +[2025-07-07 04:10:01] [Rank 0] Group 9 FTA: 0.0039 +[2025-07-07 04:10:01] [Rank 0] Group 9 FTA: 0.0039 +[2025-07-07 04:10:01] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 04:10:01] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 04:10:01] [Rank 0] Group 11 FTA: 0.0020 +[2025-07-07 04:10:01] [Rank 0] Group 11 FTA: 0.0020 +[2025-07-07 04:10:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 04:10:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 04:10:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 04:10:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 04:10:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 04:10:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 04:10:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 04:10:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 04:10:02] [Rank 0] step:501/10000 train_time:38911ms step_avg:77.67ms +[2025-07-07 04:10:02] [Rank 0] step:501/10000 train_time:38911ms step_avg:77.67ms +[2025-07-07 04:10:04] [Rank 0] step:521/10000 train_time:40381ms step_avg:77.51ms +[2025-07-07 04:10:04] [Rank 0] step:521/10000 train_time:40381ms step_avg:77.51ms +[2025-07-07 04:10:06] [Rank 0] step:541/10000 train_time:41832ms step_avg:77.32ms +[2025-07-07 04:10:06] [Rank 0] step:541/10000 train_time:41832ms step_avg:77.32ms +[2025-07-07 04:10:07] [Rank 0] step:561/10000 train_time:43942ms step_avg:78.33ms +[2025-07-07 04:10:07] [Rank 0] step:561/10000 train_time:43942ms step_avg:78.33ms +[2025-07-07 04:10:09] [Rank 0] step:581/10000 train_time:45397ms step_avg:78.14ms +[2025-07-07 04:10:09] [Rank 0] step:581/10000 train_time:45397ms step_avg:78.14ms +[2025-07-07 04:10:10] [Rank 0] step:601/10000 train_time:46847ms step_avg:77.95ms +[2025-07-07 04:10:10] [Rank 0] step:601/10000 train_time:46847ms step_avg:77.95ms +[2025-07-07 04:10:12] [Rank 0] step:621/10000 train_time:48300ms step_avg:77.78ms +[2025-07-07 04:10:12] [Rank 0] step:621/10000 train_time:48300ms step_avg:77.78ms +[2025-07-07 04:10:14] [Rank 0] step:641/10000 train_time:50416ms step_avg:78.65ms +[2025-07-07 04:10:14] [Rank 0] step:641/10000 train_time:50416ms step_avg:78.65ms +[2025-07-07 04:10:15] [Rank 0] step:661/10000 train_time:51868ms step_avg:78.47ms +[2025-07-07 04:10:15] [Rank 0] step:661/10000 train_time:51868ms step_avg:78.47ms +[2025-07-07 04:10:17] [Rank 0] step:681/10000 train_time:53320ms step_avg:78.30ms +[2025-07-07 04:10:17] [Rank 0] step:681/10000 train_time:53320ms step_avg:78.30ms +[2025-07-07 04:10:18] [Rank 0] step:701/10000 train_time:54775ms step_avg:78.14ms +[2025-07-07 04:10:18] [Rank 0] step:701/10000 train_time:54775ms step_avg:78.14ms +[2025-07-07 04:10:20] [Rank 0] step:721/10000 train_time:56282ms step_avg:78.06ms +[2025-07-07 04:10:20] [Rank 0] step:721/10000 train_time:56282ms step_avg:78.06ms +[2025-07-07 04:10:22] [Rank 0] step:741/10000 train_time:58347ms step_avg:78.74ms +[2025-07-07 04:10:22] [Rank 0] step:741/10000 train_time:58347ms step_avg:78.74ms +[2025-07-07 04:10:23] [Rank 0] step:761/10000 train_time:59810ms step_avg:78.59ms +[2025-07-07 04:10:23] [Rank 0] step:761/10000 train_time:59810ms step_avg:78.59ms +[2025-07-07 04:10:25] [Rank 0] step:781/10000 train_time:61276ms step_avg:78.46ms +[2025-07-07 04:10:25] [Rank 0] step:781/10000 train_time:61276ms step_avg:78.46ms +[2025-07-07 04:10:26] [Rank 0] step:801/10000 train_time:62744ms step_avg:78.33ms +[2025-07-07 04:10:26] [Rank 0] step:801/10000 train_time:62744ms step_avg:78.33ms +[2025-07-07 04:10:28] [Rank 0] step:821/10000 train_time:64448ms step_avg:78.50ms +[2025-07-07 04:10:28] [Rank 0] step:821/10000 train_time:64448ms step_avg:78.50ms +[2025-07-07 04:10:29] [Rank 0] step:841/10000 train_time:65917ms step_avg:78.38ms +[2025-07-07 04:10:29] [Rank 0] step:841/10000 train_time:65917ms step_avg:78.38ms +[2025-07-07 04:10:31] [Rank 0] step:861/10000 train_time:67381ms step_avg:78.26ms +[2025-07-07 04:10:31] [Rank 0] step:861/10000 train_time:67381ms step_avg:78.26ms +[2025-07-07 04:10:32] [Rank 0] step:881/10000 train_time:68850ms step_avg:78.15ms +[2025-07-07 04:10:32] [Rank 0] step:881/10000 train_time:68850ms step_avg:78.15ms +[2025-07-07 04:10:35] [Rank 0] step:901/10000 train_time:70534ms step_avg:78.28ms +[2025-07-07 04:10:35] [Rank 0] step:901/10000 train_time:70534ms step_avg:78.28ms +[2025-07-07 04:10:36] [Rank 0] step:921/10000 train_time:72596ms step_avg:78.82ms +[2025-07-07 04:10:36] [Rank 0] step:921/10000 train_time:72596ms step_avg:78.82ms +[2025-07-07 04:10:38] [Rank 0] step:941/10000 train_time:74062ms step_avg:78.71ms +[2025-07-07 04:10:38] [Rank 0] step:941/10000 train_time:74062ms step_avg:78.71ms +[2025-07-07 04:10:39] [Rank 0] step:961/10000 train_time:75533ms step_avg:78.60ms +[2025-07-07 04:10:39] [Rank 0] step:961/10000 train_time:75533ms step_avg:78.60ms +[2025-07-07 04:10:40] [Rank 0] step:981/10000 train_time:77000ms step_avg:78.49ms +[2025-07-07 04:10:40] [Rank 0] step:981/10000 train_time:77000ms step_avg:78.49ms +[2025-07-07 04:10:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:10:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:10:43] [Rank 0] PRINT: step:1000/10000 train_loss:7.7952 val_loss:7.0980 train_time:79126ms step_avg:79.13ms +[2025-07-07 04:10:43] [Rank 0] PRINT: step:1000/10000 train_loss:7.7952 val_loss:7.0980 train_time:79126ms step_avg:79.13ms +[2025-07-07 04:10:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:10:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:10:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:10:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:10:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:10:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:16:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:16:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:16:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:16:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:16:08] [Rank 0] Total Loss: 7.6667 +[2025-07-07 04:16:08] [Rank 0] Total Loss: 7.6667 +[2025-07-07 04:16:08] [Rank 0] Total FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Total FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 0 Loss: 7.6773 +[2025-07-07 04:16:08] [Rank 0] Group 0 Loss: 7.6773 +[2025-07-07 04:16:08] [Rank 0] Group 1 Loss: 7.6229 +[2025-07-07 04:16:08] [Rank 0] Group 1 Loss: 7.6229 +[2025-07-07 04:16:08] [Rank 0] Group 2 Loss: 7.7648 +[2025-07-07 04:16:08] [Rank 0] Group 2 Loss: 7.7648 +[2025-07-07 04:16:08] [Rank 0] Group 3 Loss: 7.6532 +[2025-07-07 04:16:08] [Rank 0] Group 3 Loss: 7.6532 +[2025-07-07 04:16:08] [Rank 0] Group 4 Loss: 7.6830 +[2025-07-07 04:16:08] [Rank 0] Group 4 Loss: 7.6830 +[2025-07-07 04:16:08] [Rank 0] Group 5 Loss: 7.6315 +[2025-07-07 04:16:08] [Rank 0] Group 5 Loss: 7.6315 +[2025-07-07 04:16:08] [Rank 0] Group 6 Loss: 7.6721 +[2025-07-07 04:16:08] [Rank 0] Group 6 Loss: 7.6721 +[2025-07-07 04:16:08] [Rank 0] Group 7 Loss: 7.6718 +[2025-07-07 04:16:08] [Rank 0] Group 7 Loss: 7.6718 +[2025-07-07 04:16:08] [Rank 0] Group 8 Loss: 7.6357 +[2025-07-07 04:16:08] [Rank 0] Group 8 Loss: 7.6357 +[2025-07-07 04:16:08] [Rank 0] Group 9 Loss: 7.6610 +[2025-07-07 04:16:08] [Rank 0] Group 9 Loss: 7.6610 +[2025-07-07 04:16:08] [Rank 0] Group 10 Loss: 7.6610 +[2025-07-07 04:16:08] [Rank 0] Group 10 Loss: 7.6610 +[2025-07-07 04:16:08] [Rank 0] Group 11 Loss: 7.6623 +[2025-07-07 04:16:08] [Rank 0] Group 11 Loss: 7.6623 +[2025-07-07 04:16:08] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 04:16:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 04:16:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 04:16:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 04:16:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 04:16:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 04:16:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 04:16:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 04:16:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 04:16:09] [Rank 0] step:1001/10000 train_time:79146ms step_avg:79.07ms +[2025-07-07 04:16:09] [Rank 0] step:1001/10000 train_time:79146ms step_avg:79.07ms +[2025-07-07 04:16:11] [Rank 0] step:1021/10000 train_time:80619ms step_avg:78.96ms +[2025-07-07 04:16:11] [Rank 0] step:1021/10000 train_time:80619ms step_avg:78.96ms +[2025-07-07 04:16:12] [Rank 0] step:1041/10000 train_time:82081ms step_avg:78.85ms +[2025-07-07 04:16:12] [Rank 0] step:1041/10000 train_time:82081ms step_avg:78.85ms +[2025-07-07 04:16:14] [Rank 0] step:1061/10000 train_time:83543ms step_avg:78.74ms +[2025-07-07 04:16:14] [Rank 0] step:1061/10000 train_time:83543ms step_avg:78.74ms +[2025-07-07 04:16:16] [Rank 0] step:1081/10000 train_time:85679ms step_avg:79.26ms +[2025-07-07 04:16:16] [Rank 0] step:1081/10000 train_time:85679ms step_avg:79.26ms +[2025-07-07 04:16:17] [Rank 0] step:1101/10000 train_time:87122ms step_avg:79.13ms +[2025-07-07 04:16:17] [Rank 0] step:1101/10000 train_time:87122ms step_avg:79.13ms +[2025-07-07 04:16:19] [Rank 0] step:1121/10000 train_time:88588ms step_avg:79.03ms +[2025-07-07 04:16:19] [Rank 0] step:1121/10000 train_time:88588ms step_avg:79.03ms +[2025-07-07 04:16:20] [Rank 0] step:1141/10000 train_time:90052ms step_avg:78.92ms +[2025-07-07 04:16:20] [Rank 0] step:1141/10000 train_time:90052ms step_avg:78.92ms +[2025-07-07 04:16:21] [Rank 0] step:1161/10000 train_time:91521ms step_avg:78.83ms +[2025-07-07 04:16:21] [Rank 0] step:1161/10000 train_time:91521ms step_avg:78.83ms +[2025-07-07 04:16:24] [Rank 0] step:1181/10000 train_time:93653ms step_avg:79.30ms +[2025-07-07 04:16:24] [Rank 0] step:1181/10000 train_time:93653ms step_avg:79.30ms +[2025-07-07 04:16:25] [Rank 0] step:1201/10000 train_time:95123ms step_avg:79.20ms +[2025-07-07 04:16:25] [Rank 0] step:1201/10000 train_time:95123ms step_avg:79.20ms +[2025-07-07 04:16:27] [Rank 0] step:1221/10000 train_time:96590ms step_avg:79.11ms +[2025-07-07 04:16:27] [Rank 0] step:1221/10000 train_time:96590ms step_avg:79.11ms +[2025-07-07 04:16:28] [Rank 0] step:1241/10000 train_time:98060ms step_avg:79.02ms +[2025-07-07 04:16:28] [Rank 0] step:1241/10000 train_time:98060ms step_avg:79.02ms +[2025-07-07 04:16:30] [Rank 0] step:1261/10000 train_time:100207ms step_avg:79.47ms +[2025-07-07 04:16:30] [Rank 0] step:1261/10000 train_time:100207ms step_avg:79.47ms +[2025-07-07 04:16:32] [Rank 0] step:1281/10000 train_time:101658ms step_avg:79.36ms +[2025-07-07 04:16:32] [Rank 0] step:1281/10000 train_time:101658ms step_avg:79.36ms +[2025-07-07 04:16:33] [Rank 0] step:1301/10000 train_time:103125ms step_avg:79.27ms +[2025-07-07 04:16:33] [Rank 0] step:1301/10000 train_time:103125ms step_avg:79.27ms +[2025-07-07 04:16:35] [Rank 0] step:1321/10000 train_time:104595ms step_avg:79.18ms +[2025-07-07 04:16:35] [Rank 0] step:1321/10000 train_time:104595ms step_avg:79.18ms +[2025-07-07 04:16:36] [Rank 0] step:1341/10000 train_time:106068ms step_avg:79.10ms +[2025-07-07 04:16:36] [Rank 0] step:1341/10000 train_time:106068ms step_avg:79.10ms +[2025-07-07 04:16:38] [Rank 0] step:1361/10000 train_time:108183ms step_avg:79.49ms +[2025-07-07 04:16:38] [Rank 0] step:1361/10000 train_time:108183ms step_avg:79.49ms +[2025-07-07 04:16:40] [Rank 0] step:1381/10000 train_time:109656ms step_avg:79.40ms +[2025-07-07 04:16:40] [Rank 0] step:1381/10000 train_time:109656ms step_avg:79.40ms +[2025-07-07 04:16:41] [Rank 0] step:1401/10000 train_time:111128ms step_avg:79.32ms +[2025-07-07 04:16:41] [Rank 0] step:1401/10000 train_time:111128ms step_avg:79.32ms +[2025-07-07 04:16:43] [Rank 0] step:1421/10000 train_time:112605ms step_avg:79.24ms +[2025-07-07 04:16:43] [Rank 0] step:1421/10000 train_time:112605ms step_avg:79.24ms +[2025-07-07 04:16:44] [Rank 0] step:1441/10000 train_time:114336ms step_avg:79.34ms +[2025-07-07 04:16:44] [Rank 0] step:1441/10000 train_time:114336ms step_avg:79.34ms +[2025-07-07 04:16:46] [Rank 0] step:1461/10000 train_time:115790ms step_avg:79.25ms +[2025-07-07 04:16:46] [Rank 0] step:1461/10000 train_time:115790ms step_avg:79.25ms +[2025-07-07 04:16:47] [Rank 0] step:1481/10000 train_time:117263ms step_avg:79.18ms +[2025-07-07 04:16:47] [Rank 0] step:1481/10000 train_time:117263ms step_avg:79.18ms +[2025-07-07 04:16:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:16:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:16:50] [Rank 0] PRINT: step:1500/10000 train_loss:6.5309 val_loss:6.0109 train_time:118739ms step_avg:79.16ms +[2025-07-07 04:16:50] [Rank 0] PRINT: step:1500/10000 train_loss:6.5309 val_loss:6.0109 train_time:118739ms step_avg:79.16ms +[2025-07-07 04:16:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:16:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:16:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:16:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:16:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:16:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:22:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:22:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:22:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:22:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:22:14] [Rank 0] Total Loss: 6.7910 +[2025-07-07 04:22:14] [Rank 0] Total Loss: 6.7910 +[2025-07-07 04:22:14] [Rank 0] Total FTA: 0.0000 +[2025-07-07 04:22:14] [Rank 0] Total FTA: 0.0000 +[2025-07-07 04:22:14] [Rank 0] Group 0 Loss: 6.7391 +[2025-07-07 04:22:14] [Rank 0] Group 0 Loss: 6.7391 +[2025-07-07 04:22:14] [Rank 0] Group 1 Loss: 6.8008 +[2025-07-07 04:22:14] [Rank 0] Group 1 Loss: 6.8008 +[2025-07-07 04:22:14] [Rank 0] Group 2 Loss: 6.9026 +[2025-07-07 04:22:14] [Rank 0] Group 2 Loss: 6.9026 +[2025-07-07 04:22:14] [Rank 0] Group 3 Loss: 6.7404 +[2025-07-07 04:22:14] [Rank 0] Group 3 Loss: 6.7404 +[2025-07-07 04:22:14] [Rank 0] Group 4 Loss: 6.8312 +[2025-07-07 04:22:14] [Rank 0] Group 4 Loss: 6.8312 +[2025-07-07 04:22:14] [Rank 0] Group 5 Loss: 6.7684 +[2025-07-07 04:22:14] [Rank 0] Group 5 Loss: 6.7684 +[2025-07-07 04:22:14] [Rank 0] Group 6 Loss: 6.8048 +[2025-07-07 04:22:14] [Rank 0] Group 6 Loss: 6.8048 +[2025-07-07 04:22:14] [Rank 0] Group 7 Loss: 6.8113 +[2025-07-07 04:22:14] [Rank 0] Group 7 Loss: 6.8113 +[2025-07-07 04:22:14] [Rank 0] Group 8 Loss: 6.7706 +[2025-07-07 04:22:14] [Rank 0] Group 8 Loss: 6.7706 +[2025-07-07 04:22:14] [Rank 0] Group 9 Loss: 6.7845 +[2025-07-07 04:22:14] [Rank 0] Group 9 Loss: 6.7845 +[2025-07-07 04:22:14] [Rank 0] Group 10 Loss: 6.7912 +[2025-07-07 04:22:14] [Rank 0] Group 10 Loss: 6.7912 +[2025-07-07 04:22:14] [Rank 0] Group 11 Loss: 6.7933 +[2025-07-07 04:22:14] [Rank 0] Group 11 Loss: 6.7933 +[2025-07-07 04:22:14] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 04:22:14] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 04:22:14] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:22:14] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:22:14] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 04:22:14] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 04:22:14] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 04:22:14] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 04:22:14] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 04:22:14] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 04:22:14] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 04:22:14] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 04:22:14] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 04:22:14] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 04:22:14] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 04:22:14] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 04:22:15] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 04:22:15] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 04:22:15] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 04:22:15] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 04:22:15] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 04:22:15] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 04:22:15] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 04:22:15] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 04:22:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 04:22:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 04:22:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 04:22:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 04:22:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 04:22:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 04:22:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 04:22:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 04:22:16] [Rank 0] step:1501/10000 train_time:118760ms step_avg:79.12ms +[2025-07-07 04:22:16] [Rank 0] step:1501/10000 train_time:118760ms step_avg:79.12ms +[2025-07-07 04:22:17] [Rank 0] step:1521/10000 train_time:120250ms step_avg:79.06ms +[2025-07-07 04:22:17] [Rank 0] step:1521/10000 train_time:120250ms step_avg:79.06ms +[2025-07-07 04:22:19] [Rank 0] step:1541/10000 train_time:122379ms step_avg:79.42ms +[2025-07-07 04:22:19] [Rank 0] step:1541/10000 train_time:122379ms step_avg:79.42ms +[2025-07-07 04:22:21] [Rank 0] step:1561/10000 train_time:123845ms step_avg:79.34ms +[2025-07-07 04:22:21] [Rank 0] step:1561/10000 train_time:123845ms step_avg:79.34ms +[2025-07-07 04:22:22] [Rank 0] step:1581/10000 train_time:125313ms step_avg:79.26ms +[2025-07-07 04:22:22] [Rank 0] step:1581/10000 train_time:125313ms step_avg:79.26ms +[2025-07-07 04:22:24] [Rank 0] step:1601/10000 train_time:126782ms step_avg:79.19ms +[2025-07-07 04:22:24] [Rank 0] step:1601/10000 train_time:126782ms step_avg:79.19ms +[2025-07-07 04:22:26] [Rank 0] step:1621/10000 train_time:128300ms step_avg:79.15ms +[2025-07-07 04:22:26] [Rank 0] step:1621/10000 train_time:128300ms step_avg:79.15ms +[2025-07-07 04:22:27] [Rank 0] step:1641/10000 train_time:130384ms step_avg:79.45ms +[2025-07-07 04:22:27] [Rank 0] step:1641/10000 train_time:130384ms step_avg:79.45ms +[2025-07-07 04:22:29] [Rank 0] step:1661/10000 train_time:131855ms step_avg:79.38ms +[2025-07-07 04:22:29] [Rank 0] step:1661/10000 train_time:131855ms step_avg:79.38ms +[2025-07-07 04:22:30] [Rank 0] step:1681/10000 train_time:133324ms step_avg:79.31ms +[2025-07-07 04:22:30] [Rank 0] step:1681/10000 train_time:133324ms step_avg:79.31ms +[2025-07-07 04:22:32] [Rank 0] step:1701/10000 train_time:134795ms step_avg:79.24ms +[2025-07-07 04:22:32] [Rank 0] step:1701/10000 train_time:134795ms step_avg:79.24ms +[2025-07-07 04:22:34] [Rank 0] step:1721/10000 train_time:136907ms step_avg:79.55ms +[2025-07-07 04:22:34] [Rank 0] step:1721/10000 train_time:136907ms step_avg:79.55ms +[2025-07-07 04:22:35] [Rank 0] step:1741/10000 train_time:138379ms step_avg:79.48ms +[2025-07-07 04:22:35] [Rank 0] step:1741/10000 train_time:138379ms step_avg:79.48ms +[2025-07-07 04:22:37] [Rank 0] step:1761/10000 train_time:139850ms step_avg:79.42ms +[2025-07-07 04:22:37] [Rank 0] step:1761/10000 train_time:139850ms step_avg:79.42ms +[2025-07-07 04:22:38] [Rank 0] step:1781/10000 train_time:141325ms step_avg:79.35ms +[2025-07-07 04:22:38] [Rank 0] step:1781/10000 train_time:141325ms step_avg:79.35ms +[2025-07-07 04:22:41] [Rank 0] step:1801/10000 train_time:143052ms step_avg:79.43ms +[2025-07-07 04:22:41] [Rank 0] step:1801/10000 train_time:143052ms step_avg:79.43ms +[2025-07-07 04:22:42] [Rank 0] step:1821/10000 train_time:144917ms step_avg:79.58ms +[2025-07-07 04:22:42] [Rank 0] step:1821/10000 train_time:144917ms step_avg:79.58ms +[2025-07-07 04:22:44] [Rank 0] step:1841/10000 train_time:146391ms step_avg:79.52ms +[2025-07-07 04:22:44] [Rank 0] step:1841/10000 train_time:146391ms step_avg:79.52ms +[2025-07-07 04:22:45] [Rank 0] step:1861/10000 train_time:147868ms step_avg:79.46ms +[2025-07-07 04:22:45] [Rank 0] step:1861/10000 train_time:147868ms step_avg:79.46ms +[2025-07-07 04:22:46] [Rank 0] step:1881/10000 train_time:149346ms step_avg:79.40ms +[2025-07-07 04:22:46] [Rank 0] step:1881/10000 train_time:149346ms step_avg:79.40ms +[2025-07-07 04:22:49] [Rank 0] step:1901/10000 train_time:151471ms step_avg:79.68ms +[2025-07-07 04:22:49] [Rank 0] step:1901/10000 train_time:151471ms step_avg:79.68ms +[2025-07-07 04:22:50] [Rank 0] step:1921/10000 train_time:152946ms step_avg:79.62ms +[2025-07-07 04:22:50] [Rank 0] step:1921/10000 train_time:152946ms step_avg:79.62ms +[2025-07-07 04:22:52] [Rank 0] step:1941/10000 train_time:154423ms step_avg:79.56ms +[2025-07-07 04:22:52] [Rank 0] step:1941/10000 train_time:154423ms step_avg:79.56ms +[2025-07-07 04:22:53] [Rank 0] step:1961/10000 train_time:155901ms step_avg:79.50ms +[2025-07-07 04:22:53] [Rank 0] step:1961/10000 train_time:155901ms step_avg:79.50ms +[2025-07-07 04:22:55] [Rank 0] step:1981/10000 train_time:158053ms step_avg:79.78ms +[2025-07-07 04:22:55] [Rank 0] step:1981/10000 train_time:158053ms step_avg:79.78ms +[2025-07-07 04:22:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:22:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:22:57] [Rank 0] PRINT: step:2000/10000 train_loss:5.5567 val_loss:5.1391 train_time:159509ms step_avg:79.75ms +[2025-07-07 04:22:57] [Rank 0] PRINT: step:2000/10000 train_loss:5.5567 val_loss:5.1391 train_time:159509ms step_avg:79.75ms +[2025-07-07 04:22:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:22:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:22:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:22:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:22:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:22:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:28:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:28:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:28:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:28:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:28:23] [Rank 0] Total Loss: 6.1524 +[2025-07-07 04:28:23] [Rank 0] Total Loss: 6.1524 +[2025-07-07 04:28:23] [Rank 0] Total FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Total FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 0 Loss: 6.1118 +[2025-07-07 04:28:23] [Rank 0] Group 0 Loss: 6.1118 +[2025-07-07 04:28:23] [Rank 0] Group 1 Loss: 6.2511 +[2025-07-07 04:28:23] [Rank 0] Group 1 Loss: 6.2511 +[2025-07-07 04:28:23] [Rank 0] Group 2 Loss: 6.2059 +[2025-07-07 04:28:23] [Rank 0] Group 2 Loss: 6.2059 +[2025-07-07 04:28:23] [Rank 0] Group 3 Loss: 6.1439 +[2025-07-07 04:28:23] [Rank 0] Group 3 Loss: 6.1439 +[2025-07-07 04:28:23] [Rank 0] Group 4 Loss: 6.1834 +[2025-07-07 04:28:23] [Rank 0] Group 4 Loss: 6.1834 +[2025-07-07 04:28:23] [Rank 0] Group 5 Loss: 6.1077 +[2025-07-07 04:28:23] [Rank 0] Group 5 Loss: 6.1077 +[2025-07-07 04:28:23] [Rank 0] Group 6 Loss: 6.1810 +[2025-07-07 04:28:23] [Rank 0] Group 6 Loss: 6.1810 +[2025-07-07 04:28:23] [Rank 0] Group 7 Loss: 6.1484 +[2025-07-07 04:28:23] [Rank 0] Group 7 Loss: 6.1484 +[2025-07-07 04:28:23] [Rank 0] Group 8 Loss: 6.1229 +[2025-07-07 04:28:23] [Rank 0] Group 8 Loss: 6.1229 +[2025-07-07 04:28:23] [Rank 0] Group 9 Loss: 6.1658 +[2025-07-07 04:28:23] [Rank 0] Group 9 Loss: 6.1658 +[2025-07-07 04:28:23] [Rank 0] Group 10 Loss: 6.1445 +[2025-07-07 04:28:23] [Rank 0] Group 10 Loss: 6.1445 +[2025-07-07 04:28:23] [Rank 0] Group 11 Loss: 6.1368 +[2025-07-07 04:28:23] [Rank 0] Group 11 Loss: 6.1368 +[2025-07-07 04:28:23] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 04:28:23] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 04:28:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 04:28:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 04:28:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 04:28:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 04:28:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 04:28:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 04:28:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 04:28:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 04:28:25] [Rank 0] step:2001/10000 train_time:159529ms step_avg:79.72ms +[2025-07-07 04:28:25] [Rank 0] step:2001/10000 train_time:159529ms step_avg:79.72ms +[2025-07-07 04:28:26] [Rank 0] step:2021/10000 train_time:161024ms step_avg:79.68ms +[2025-07-07 04:28:26] [Rank 0] step:2021/10000 train_time:161024ms step_avg:79.68ms +[2025-07-07 04:28:28] [Rank 0] step:2041/10000 train_time:162490ms step_avg:79.61ms +[2025-07-07 04:28:28] [Rank 0] step:2041/10000 train_time:162490ms step_avg:79.61ms +[2025-07-07 04:28:29] [Rank 0] step:2061/10000 train_time:164115ms step_avg:79.63ms +[2025-07-07 04:28:29] [Rank 0] step:2061/10000 train_time:164115ms step_avg:79.63ms +[2025-07-07 04:28:31] [Rank 0] step:2081/10000 train_time:166249ms step_avg:79.89ms +[2025-07-07 04:28:31] [Rank 0] step:2081/10000 train_time:166249ms step_avg:79.89ms +[2025-07-07 04:28:33] [Rank 0] step:2101/10000 train_time:167720ms step_avg:79.83ms +[2025-07-07 04:28:33] [Rank 0] step:2101/10000 train_time:167720ms step_avg:79.83ms +[2025-07-07 04:28:34] [Rank 0] step:2121/10000 train_time:169191ms step_avg:79.77ms +[2025-07-07 04:28:34] [Rank 0] step:2121/10000 train_time:169191ms step_avg:79.77ms +[2025-07-07 04:28:36] [Rank 0] step:2141/10000 train_time:170663ms step_avg:79.71ms +[2025-07-07 04:28:36] [Rank 0] step:2141/10000 train_time:170663ms step_avg:79.71ms +[2025-07-07 04:28:38] [Rank 0] step:2161/10000 train_time:172393ms step_avg:79.77ms +[2025-07-07 04:28:38] [Rank 0] step:2161/10000 train_time:172393ms step_avg:79.77ms +[2025-07-07 04:28:39] [Rank 0] step:2181/10000 train_time:174255ms step_avg:79.90ms +[2025-07-07 04:28:39] [Rank 0] step:2181/10000 train_time:174255ms step_avg:79.90ms +[2025-07-07 04:28:41] [Rank 0] step:2201/10000 train_time:175729ms step_avg:79.84ms +[2025-07-07 04:28:41] [Rank 0] step:2201/10000 train_time:175729ms step_avg:79.84ms +[2025-07-07 04:28:42] [Rank 0] step:2221/10000 train_time:177204ms step_avg:79.79ms +[2025-07-07 04:28:42] [Rank 0] step:2221/10000 train_time:177204ms step_avg:79.79ms +[2025-07-07 04:28:44] [Rank 0] step:2241/10000 train_time:178697ms step_avg:79.74ms +[2025-07-07 04:28:44] [Rank 0] step:2241/10000 train_time:178697ms step_avg:79.74ms +[2025-07-07 04:28:46] [Rank 0] step:2261/10000 train_time:180854ms step_avg:79.99ms +[2025-07-07 04:28:46] [Rank 0] step:2261/10000 train_time:180854ms step_avg:79.99ms +[2025-07-07 04:28:48] [Rank 0] step:2281/10000 train_time:182353ms step_avg:79.94ms +[2025-07-07 04:28:48] [Rank 0] step:2281/10000 train_time:182353ms step_avg:79.94ms +[2025-07-07 04:28:49] [Rank 0] step:2301/10000 train_time:183853ms step_avg:79.90ms +[2025-07-07 04:28:49] [Rank 0] step:2301/10000 train_time:183853ms step_avg:79.90ms +[2025-07-07 04:28:51] [Rank 0] step:2321/10000 train_time:185353ms step_avg:79.86ms +[2025-07-07 04:28:51] [Rank 0] step:2321/10000 train_time:185353ms step_avg:79.86ms +[2025-07-07 04:28:53] [Rank 0] step:2341/10000 train_time:187112ms step_avg:79.93ms +[2025-07-07 04:28:53] [Rank 0] step:2341/10000 train_time:187112ms step_avg:79.93ms +[2025-07-07 04:28:54] [Rank 0] step:2361/10000 train_time:189001ms step_avg:80.05ms +[2025-07-07 04:28:54] [Rank 0] step:2361/10000 train_time:189001ms step_avg:80.05ms +[2025-07-07 04:28:56] [Rank 0] step:2381/10000 train_time:190503ms step_avg:80.01ms +[2025-07-07 04:28:56] [Rank 0] step:2381/10000 train_time:190503ms step_avg:80.01ms +[2025-07-07 04:28:57] [Rank 0] step:2401/10000 train_time:192004ms step_avg:79.97ms +[2025-07-07 04:28:57] [Rank 0] step:2401/10000 train_time:192004ms step_avg:79.97ms +[2025-07-07 04:28:59] [Rank 0] step:2421/10000 train_time:193507ms step_avg:79.93ms +[2025-07-07 04:28:59] [Rank 0] step:2421/10000 train_time:193507ms step_avg:79.93ms +[2025-07-07 04:29:01] [Rank 0] step:2441/10000 train_time:195674ms step_avg:80.16ms +[2025-07-07 04:29:01] [Rank 0] step:2441/10000 train_time:195674ms step_avg:80.16ms +[2025-07-07 04:29:02] [Rank 0] step:2461/10000 train_time:197176ms step_avg:80.12ms +[2025-07-07 04:29:02] [Rank 0] step:2461/10000 train_time:197176ms step_avg:80.12ms +[2025-07-07 04:29:04] [Rank 0] step:2481/10000 train_time:198680ms step_avg:80.08ms +[2025-07-07 04:29:04] [Rank 0] step:2481/10000 train_time:198680ms step_avg:80.08ms +[2025-07-07 04:29:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:29:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:29:06] [Rank 0] PRINT: step:2500/10000 train_loss:4.7815 val_loss:4.4405 train_time:200185ms step_avg:80.07ms +[2025-07-07 04:29:06] [Rank 0] PRINT: step:2500/10000 train_loss:4.7815 val_loss:4.4405 train_time:200185ms step_avg:80.07ms +[2025-07-07 04:29:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:29:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:29:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:29:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:29:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:29:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:34:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:34:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:34:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:34:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:34:32] [Rank 0] Total Loss: 5.6999 +[2025-07-07 04:34:32] [Rank 0] Total Loss: 5.6999 +[2025-07-07 04:34:32] [Rank 0] Total FTA: 0.0694 +[2025-07-07 04:34:32] [Rank 0] Total FTA: 0.0694 +[2025-07-07 04:34:32] [Rank 0] Group 0 Loss: 5.6600 +[2025-07-07 04:34:32] [Rank 0] Group 0 Loss: 5.6600 +[2025-07-07 04:34:32] [Rank 0] Group 1 Loss: 5.7720 +[2025-07-07 04:34:32] [Rank 0] Group 1 Loss: 5.7720 +[2025-07-07 04:34:32] [Rank 0] Group 2 Loss: 5.7594 +[2025-07-07 04:34:32] [Rank 0] Group 2 Loss: 5.7594 +[2025-07-07 04:34:32] [Rank 0] Group 3 Loss: 5.6376 +[2025-07-07 04:34:32] [Rank 0] Group 3 Loss: 5.6376 +[2025-07-07 04:34:32] [Rank 0] Group 4 Loss: 5.7124 +[2025-07-07 04:34:32] [Rank 0] Group 4 Loss: 5.7124 +[2025-07-07 04:34:32] [Rank 0] Group 5 Loss: 5.6637 +[2025-07-07 04:34:32] [Rank 0] Group 5 Loss: 5.6637 +[2025-07-07 04:34:32] [Rank 0] Group 6 Loss: 5.6942 +[2025-07-07 04:34:32] [Rank 0] Group 6 Loss: 5.6942 +[2025-07-07 04:34:32] [Rank 0] Group 7 Loss: 5.7077 +[2025-07-07 04:34:32] [Rank 0] Group 7 Loss: 5.7077 +[2025-07-07 04:34:32] [Rank 0] Group 8 Loss: 5.6853 +[2025-07-07 04:34:32] [Rank 0] Group 8 Loss: 5.6853 +[2025-07-07 04:34:32] [Rank 0] Group 9 Loss: 5.6901 +[2025-07-07 04:34:32] [Rank 0] Group 9 Loss: 5.6901 +[2025-07-07 04:34:32] [Rank 0] Group 10 Loss: 5.7318 +[2025-07-07 04:34:32] [Rank 0] Group 10 Loss: 5.7318 +[2025-07-07 04:34:32] [Rank 0] Group 11 Loss: 5.7041 +[2025-07-07 04:34:32] [Rank 0] Group 11 Loss: 5.7041 +[2025-07-07 04:34:32] [Rank 0] Group 0 FTA: 0.1730 +[2025-07-07 04:34:32] [Rank 0] Group 0 FTA: 0.1730 +[2025-07-07 04:34:32] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:34:32] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:34:32] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-07 04:34:32] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-07 04:34:32] [Rank 0] Group 3 FTA: 0.0286 +[2025-07-07 04:34:32] [Rank 0] Group 3 FTA: 0.0286 +[2025-07-07 04:34:32] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 04:34:32] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 04:34:32] [Rank 0] Group 5 FTA: 0.0312 +[2025-07-07 04:34:32] [Rank 0] Group 5 FTA: 0.0312 +[2025-07-07 04:34:32] [Rank 0] Group 6 FTA: 0.0625 +[2025-07-07 04:34:32] [Rank 0] Group 6 FTA: 0.0625 +[2025-07-07 04:34:32] [Rank 0] Group 7 FTA: 0.0651 +[2025-07-07 04:34:32] [Rank 0] Group 7 FTA: 0.0651 +[2025-07-07 04:34:32] [Rank 0] Group 8 FTA: 0.0573 +[2025-07-07 04:34:32] [Rank 0] Group 8 FTA: 0.0573 +[2025-07-07 04:34:32] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-07 04:34:32] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-07 04:34:32] [Rank 0] Group 10 FTA: 0.0742 +[2025-07-07 04:34:32] [Rank 0] Group 10 FTA: 0.0742 +[2025-07-07 04:34:32] [Rank 0] Group 11 FTA: 0.0625 +[2025-07-07 04:34:32] [Rank 0] Group 11 FTA: 0.0625 +[2025-07-07 04:34:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 04:34:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 04:34:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 04:34:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 04:34:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 04:34:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 04:34:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 04:34:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 04:34:33] [Rank 0] step:2501/10000 train_time:200205ms step_avg:80.05ms +[2025-07-07 04:34:33] [Rank 0] step:2501/10000 train_time:200205ms step_avg:80.05ms +[2025-07-07 04:34:35] [Rank 0] step:2521/10000 train_time:201763ms step_avg:80.03ms +[2025-07-07 04:34:35] [Rank 0] step:2521/10000 train_time:201763ms step_avg:80.03ms +[2025-07-07 04:34:37] [Rank 0] step:2541/10000 train_time:203856ms step_avg:80.23ms +[2025-07-07 04:34:37] [Rank 0] step:2541/10000 train_time:203856ms step_avg:80.23ms +[2025-07-07 04:34:38] [Rank 0] step:2561/10000 train_time:205345ms step_avg:80.18ms +[2025-07-07 04:34:38] [Rank 0] step:2561/10000 train_time:205345ms step_avg:80.18ms +[2025-07-07 04:34:40] [Rank 0] step:2581/10000 train_time:206837ms step_avg:80.14ms +[2025-07-07 04:34:40] [Rank 0] step:2581/10000 train_time:206837ms step_avg:80.14ms +[2025-07-07 04:34:41] [Rank 0] step:2601/10000 train_time:208333ms step_avg:80.10ms +[2025-07-07 04:34:41] [Rank 0] step:2601/10000 train_time:208333ms step_avg:80.10ms +[2025-07-07 04:34:43] [Rank 0] step:2621/10000 train_time:210065ms step_avg:80.15ms +[2025-07-07 04:34:43] [Rank 0] step:2621/10000 train_time:210065ms step_avg:80.15ms +[2025-07-07 04:34:45] [Rank 0] step:2641/10000 train_time:211562ms step_avg:80.11ms +[2025-07-07 04:34:45] [Rank 0] step:2641/10000 train_time:211562ms step_avg:80.11ms +[2025-07-07 04:34:46] [Rank 0] step:2661/10000 train_time:213058ms step_avg:80.07ms +[2025-07-07 04:34:46] [Rank 0] step:2661/10000 train_time:213058ms step_avg:80.07ms +[2025-07-07 04:34:48] [Rank 0] step:2681/10000 train_time:214648ms step_avg:80.06ms +[2025-07-07 04:34:48] [Rank 0] step:2681/10000 train_time:214648ms step_avg:80.06ms +[2025-07-07 04:34:50] [Rank 0] step:2701/10000 train_time:216264ms step_avg:80.07ms +[2025-07-07 04:34:50] [Rank 0] step:2701/10000 train_time:216264ms step_avg:80.07ms +[2025-07-07 04:34:51] [Rank 0] step:2721/10000 train_time:218358ms step_avg:80.25ms +[2025-07-07 04:34:51] [Rank 0] step:2721/10000 train_time:218358ms step_avg:80.25ms +[2025-07-07 04:34:53] [Rank 0] step:2741/10000 train_time:219858ms step_avg:80.21ms +[2025-07-07 04:34:53] [Rank 0] step:2741/10000 train_time:219858ms step_avg:80.21ms +[2025-07-07 04:34:54] [Rank 0] step:2761/10000 train_time:221360ms step_avg:80.17ms +[2025-07-07 04:34:54] [Rank 0] step:2761/10000 train_time:221360ms step_avg:80.17ms +[2025-07-07 04:34:56] [Rank 0] step:2781/10000 train_time:222860ms step_avg:80.14ms +[2025-07-07 04:34:56] [Rank 0] step:2781/10000 train_time:222860ms step_avg:80.14ms +[2025-07-07 04:34:58] [Rank 0] step:2801/10000 train_time:224599ms step_avg:80.19ms +[2025-07-07 04:34:58] [Rank 0] step:2801/10000 train_time:224599ms step_avg:80.19ms +[2025-07-07 04:34:59] [Rank 0] step:2821/10000 train_time:226102ms step_avg:80.15ms +[2025-07-07 04:34:59] [Rank 0] step:2821/10000 train_time:226102ms step_avg:80.15ms +[2025-07-07 04:35:01] [Rank 0] step:2841/10000 train_time:227605ms step_avg:80.11ms +[2025-07-07 04:35:01] [Rank 0] step:2841/10000 train_time:227605ms step_avg:80.11ms +[2025-07-07 04:35:02] [Rank 0] step:2861/10000 train_time:229110ms step_avg:80.08ms +[2025-07-07 04:35:02] [Rank 0] step:2861/10000 train_time:229110ms step_avg:80.08ms +[2025-07-07 04:35:04] [Rank 0] step:2881/10000 train_time:230668ms step_avg:80.07ms +[2025-07-07 04:35:04] [Rank 0] step:2881/10000 train_time:230668ms step_avg:80.07ms +[2025-07-07 04:35:06] [Rank 0] step:2901/10000 train_time:232761ms step_avg:80.23ms +[2025-07-07 04:35:06] [Rank 0] step:2901/10000 train_time:232761ms step_avg:80.23ms +[2025-07-07 04:35:07] [Rank 0] step:2921/10000 train_time:234264ms step_avg:80.20ms +[2025-07-07 04:35:07] [Rank 0] step:2921/10000 train_time:234264ms step_avg:80.20ms +[2025-07-07 04:35:09] [Rank 0] step:2941/10000 train_time:235769ms step_avg:80.17ms +[2025-07-07 04:35:09] [Rank 0] step:2941/10000 train_time:235769ms step_avg:80.17ms +[2025-07-07 04:35:10] [Rank 0] step:2961/10000 train_time:237276ms step_avg:80.13ms +[2025-07-07 04:35:10] [Rank 0] step:2961/10000 train_time:237276ms step_avg:80.13ms +[2025-07-07 04:35:12] [Rank 0] step:2981/10000 train_time:239430ms step_avg:80.32ms +[2025-07-07 04:35:12] [Rank 0] step:2981/10000 train_time:239430ms step_avg:80.32ms +[2025-07-07 04:35:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:35:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:35:15] [Rank 0] PRINT: step:3000/10000 train_loss:4.1197 val_loss:3.8112 train_time:240937ms step_avg:80.31ms +[2025-07-07 04:35:15] [Rank 0] PRINT: step:3000/10000 train_loss:4.1197 val_loss:3.8112 train_time:240937ms step_avg:80.31ms +[2025-07-07 04:35:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:35:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:35:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:35:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:35:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:35:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:40:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:40:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:40:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:40:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:40:39] [Rank 0] Total Loss: 5.2774 +[2025-07-07 04:40:39] [Rank 0] Total Loss: 5.2774 +[2025-07-07 04:40:39] [Rank 0] Total FTA: 0.0843 +[2025-07-07 04:40:39] [Rank 0] Total FTA: 0.0843 +[2025-07-07 04:40:39] [Rank 0] Group 0 Loss: 5.2368 +[2025-07-07 04:40:39] [Rank 0] Group 0 Loss: 5.2368 +[2025-07-07 04:40:39] [Rank 0] Group 1 Loss: 5.3864 +[2025-07-07 04:40:39] [Rank 0] Group 1 Loss: 5.3864 +[2025-07-07 04:40:39] [Rank 0] Group 2 Loss: 5.3032 +[2025-07-07 04:40:39] [Rank 0] Group 2 Loss: 5.3032 +[2025-07-07 04:40:39] [Rank 0] Group 3 Loss: 5.2036 +[2025-07-07 04:40:39] [Rank 0] Group 3 Loss: 5.2036 +[2025-07-07 04:40:39] [Rank 0] Group 4 Loss: 5.2844 +[2025-07-07 04:40:39] [Rank 0] Group 4 Loss: 5.2844 +[2025-07-07 04:40:39] [Rank 0] Group 5 Loss: 5.2427 +[2025-07-07 04:40:39] [Rank 0] Group 5 Loss: 5.2427 +[2025-07-07 04:40:39] [Rank 0] Group 6 Loss: 5.2722 +[2025-07-07 04:40:39] [Rank 0] Group 6 Loss: 5.2722 +[2025-07-07 04:40:39] [Rank 0] Group 7 Loss: 5.3194 +[2025-07-07 04:40:39] [Rank 0] Group 7 Loss: 5.3194 +[2025-07-07 04:40:39] [Rank 0] Group 8 Loss: 5.2816 +[2025-07-07 04:40:39] [Rank 0] Group 8 Loss: 5.2816 +[2025-07-07 04:40:39] [Rank 0] Group 9 Loss: 5.2938 +[2025-07-07 04:40:39] [Rank 0] Group 9 Loss: 5.2938 +[2025-07-07 04:40:39] [Rank 0] Group 10 Loss: 5.3063 +[2025-07-07 04:40:39] [Rank 0] Group 10 Loss: 5.3063 +[2025-07-07 04:40:39] [Rank 0] Group 11 Loss: 5.2616 +[2025-07-07 04:40:39] [Rank 0] Group 11 Loss: 5.2616 +[2025-07-07 04:40:39] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-07 04:40:39] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-07 04:40:39] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:40:39] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:40:39] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 04:40:39] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 04:40:39] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 04:40:39] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 04:40:39] [Rank 0] Group 4 FTA: 0.0469 +[2025-07-07 04:40:39] [Rank 0] Group 4 FTA: 0.0469 +[2025-07-07 04:40:39] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-07 04:40:39] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-07 04:40:39] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 04:40:39] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 04:40:39] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-07 04:40:39] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-07 04:40:39] [Rank 0] Group 8 FTA: 0.0911 +[2025-07-07 04:40:39] [Rank 0] Group 8 FTA: 0.0911 +[2025-07-07 04:40:39] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 04:40:39] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 04:40:39] [Rank 0] Group 10 FTA: 0.1055 +[2025-07-07 04:40:39] [Rank 0] Group 10 FTA: 0.1055 +[2025-07-07 04:40:39] [Rank 0] Group 11 FTA: 0.0869 +[2025-07-07 04:40:39] [Rank 0] Group 11 FTA: 0.0869 +[2025-07-07 04:40:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 04:40:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 04:40:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 04:40:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 04:40:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 04:40:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 04:40:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 04:40:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 04:40:41] [Rank 0] step:3001/10000 train_time:240957ms step_avg:80.29ms +[2025-07-07 04:40:41] [Rank 0] step:3001/10000 train_time:240957ms step_avg:80.29ms +[2025-07-07 04:40:42] [Rank 0] step:3021/10000 train_time:242450ms step_avg:80.26ms +[2025-07-07 04:40:42] [Rank 0] step:3021/10000 train_time:242450ms step_avg:80.26ms +[2025-07-07 04:40:44] [Rank 0] step:3041/10000 train_time:243944ms step_avg:80.22ms +[2025-07-07 04:40:44] [Rank 0] step:3041/10000 train_time:243944ms step_avg:80.22ms +[2025-07-07 04:40:46] [Rank 0] step:3061/10000 train_time:245494ms step_avg:80.20ms +[2025-07-07 04:40:46] [Rank 0] step:3061/10000 train_time:245494ms step_avg:80.20ms +[2025-07-07 04:40:47] [Rank 0] step:3081/10000 train_time:247600ms step_avg:80.36ms +[2025-07-07 04:40:47] [Rank 0] step:3081/10000 train_time:247600ms step_avg:80.36ms +[2025-07-07 04:40:49] [Rank 0] step:3101/10000 train_time:249095ms step_avg:80.33ms +[2025-07-07 04:40:49] [Rank 0] step:3101/10000 train_time:249095ms step_avg:80.33ms +[2025-07-07 04:40:50] [Rank 0] step:3121/10000 train_time:250591ms step_avg:80.29ms +[2025-07-07 04:40:50] [Rank 0] step:3121/10000 train_time:250591ms step_avg:80.29ms +[2025-07-07 04:40:52] [Rank 0] step:3141/10000 train_time:252086ms step_avg:80.26ms +[2025-07-07 04:40:52] [Rank 0] step:3141/10000 train_time:252086ms step_avg:80.26ms +[2025-07-07 04:40:54] [Rank 0] step:3161/10000 train_time:254244ms step_avg:80.43ms +[2025-07-07 04:40:54] [Rank 0] step:3161/10000 train_time:254244ms step_avg:80.43ms +[2025-07-07 04:40:56] [Rank 0] step:3181/10000 train_time:255742ms step_avg:80.40ms +[2025-07-07 04:40:56] [Rank 0] step:3181/10000 train_time:255742ms step_avg:80.40ms +[2025-07-07 04:40:57] [Rank 0] step:3201/10000 train_time:257241ms step_avg:80.36ms +[2025-07-07 04:40:57] [Rank 0] step:3201/10000 train_time:257241ms step_avg:80.36ms +[2025-07-07 04:40:59] [Rank 0] step:3221/10000 train_time:258741ms step_avg:80.33ms +[2025-07-07 04:40:59] [Rank 0] step:3221/10000 train_time:258741ms step_avg:80.33ms +[2025-07-07 04:41:01] [Rank 0] step:3241/10000 train_time:260297ms step_avg:80.31ms +[2025-07-07 04:41:01] [Rank 0] step:3241/10000 train_time:260297ms step_avg:80.31ms +[2025-07-07 04:41:02] [Rank 0] step:3261/10000 train_time:262389ms step_avg:80.46ms +[2025-07-07 04:41:02] [Rank 0] step:3261/10000 train_time:262389ms step_avg:80.46ms +[2025-07-07 04:41:04] [Rank 0] step:3281/10000 train_time:263893ms step_avg:80.43ms +[2025-07-07 04:41:04] [Rank 0] step:3281/10000 train_time:263893ms step_avg:80.43ms +[2025-07-07 04:41:05] [Rank 0] step:3301/10000 train_time:265394ms step_avg:80.40ms +[2025-07-07 04:41:05] [Rank 0] step:3301/10000 train_time:265394ms step_avg:80.40ms +[2025-07-07 04:41:07] [Rank 0] step:3321/10000 train_time:266896ms step_avg:80.37ms +[2025-07-07 04:41:07] [Rank 0] step:3321/10000 train_time:266896ms step_avg:80.37ms +[2025-07-07 04:41:08] [Rank 0] step:3341/10000 train_time:268575ms step_avg:80.39ms +[2025-07-07 04:41:08] [Rank 0] step:3341/10000 train_time:268575ms step_avg:80.39ms +[2025-07-07 04:41:10] [Rank 0] step:3361/10000 train_time:270078ms step_avg:80.36ms +[2025-07-07 04:41:10] [Rank 0] step:3361/10000 train_time:270078ms step_avg:80.36ms +[2025-07-07 04:41:11] [Rank 0] step:3381/10000 train_time:271585ms step_avg:80.33ms +[2025-07-07 04:41:11] [Rank 0] step:3381/10000 train_time:271585ms step_avg:80.33ms +[2025-07-07 04:41:13] [Rank 0] step:3401/10000 train_time:273090ms step_avg:80.30ms +[2025-07-07 04:41:13] [Rank 0] step:3401/10000 train_time:273090ms step_avg:80.30ms +[2025-07-07 04:41:15] [Rank 0] step:3421/10000 train_time:274849ms step_avg:80.34ms +[2025-07-07 04:41:15] [Rank 0] step:3421/10000 train_time:274849ms step_avg:80.34ms +[2025-07-07 04:41:17] [Rank 0] step:3441/10000 train_time:276761ms step_avg:80.43ms +[2025-07-07 04:41:17] [Rank 0] step:3441/10000 train_time:276761ms step_avg:80.43ms +[2025-07-07 04:41:18] [Rank 0] step:3461/10000 train_time:278266ms step_avg:80.40ms +[2025-07-07 04:41:18] [Rank 0] step:3461/10000 train_time:278266ms step_avg:80.40ms +[2025-07-07 04:41:20] [Rank 0] step:3481/10000 train_time:279773ms step_avg:80.37ms +[2025-07-07 04:41:20] [Rank 0] step:3481/10000 train_time:279773ms step_avg:80.37ms +[2025-07-07 04:41:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:41:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:41:22] [Rank 0] PRINT: step:3500/10000 train_loss:3.5379 val_loss:3.2798 train_time:281279ms step_avg:80.37ms +[2025-07-07 04:41:22] [Rank 0] PRINT: step:3500/10000 train_loss:3.5379 val_loss:3.2798 train_time:281279ms step_avg:80.37ms +[2025-07-07 04:41:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:41:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:41:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:41:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:41:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:41:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:46:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:46:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:46:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:46:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:46:46] [Rank 0] Total Loss: 4.9578 +[2025-07-07 04:46:46] [Rank 0] Total Loss: 4.9578 +[2025-07-07 04:46:46] [Rank 0] Total FTA: 0.0932 +[2025-07-07 04:46:46] [Rank 0] Total FTA: 0.0932 +[2025-07-07 04:46:46] [Rank 0] Group 0 Loss: 5.0287 +[2025-07-07 04:46:46] [Rank 0] Group 0 Loss: 5.0287 +[2025-07-07 04:46:46] [Rank 0] Group 1 Loss: 4.9894 +[2025-07-07 04:46:46] [Rank 0] Group 1 Loss: 4.9894 +[2025-07-07 04:46:46] [Rank 0] Group 2 Loss: 4.8808 +[2025-07-07 04:46:46] [Rank 0] Group 2 Loss: 4.8808 +[2025-07-07 04:46:46] [Rank 0] Group 3 Loss: 4.9374 +[2025-07-07 04:46:46] [Rank 0] Group 3 Loss: 4.9374 +[2025-07-07 04:46:46] [Rank 0] Group 4 Loss: 4.9774 +[2025-07-07 04:46:46] [Rank 0] Group 4 Loss: 4.9774 +[2025-07-07 04:46:46] [Rank 0] Group 5 Loss: 4.8986 +[2025-07-07 04:46:46] [Rank 0] Group 5 Loss: 4.8986 +[2025-07-07 04:46:46] [Rank 0] Group 6 Loss: 4.9652 +[2025-07-07 04:46:46] [Rank 0] Group 6 Loss: 4.9652 +[2025-07-07 04:46:46] [Rank 0] Group 7 Loss: 4.9669 +[2025-07-07 04:46:46] [Rank 0] Group 7 Loss: 4.9669 +[2025-07-07 04:46:46] [Rank 0] Group 8 Loss: 4.9000 +[2025-07-07 04:46:46] [Rank 0] Group 8 Loss: 4.9000 +[2025-07-07 04:46:46] [Rank 0] Group 9 Loss: 4.9445 +[2025-07-07 04:46:46] [Rank 0] Group 9 Loss: 4.9445 +[2025-07-07 04:46:46] [Rank 0] Group 10 Loss: 4.9620 +[2025-07-07 04:46:46] [Rank 0] Group 10 Loss: 4.9620 +[2025-07-07 04:46:46] [Rank 0] Group 11 Loss: 4.9606 +[2025-07-07 04:46:46] [Rank 0] Group 11 Loss: 4.9606 +[2025-07-07 04:46:46] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 04:46:46] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 04:46:46] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:46:46] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:46:46] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-07 04:46:46] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-07 04:46:46] [Rank 0] Group 3 FTA: 0.0443 +[2025-07-07 04:46:46] [Rank 0] Group 3 FTA: 0.0443 +[2025-07-07 04:46:47] [Rank 0] Group 4 FTA: 0.0573 +[2025-07-07 04:46:47] [Rank 0] Group 4 FTA: 0.0573 +[2025-07-07 04:46:47] [Rank 0] Group 5 FTA: 0.0469 +[2025-07-07 04:46:47] [Rank 0] Group 5 FTA: 0.0469 +[2025-07-07 04:46:47] [Rank 0] Group 6 FTA: 0.0833 +[2025-07-07 04:46:47] [Rank 0] Group 6 FTA: 0.0833 +[2025-07-07 04:46:47] [Rank 0] Group 7 FTA: 0.1094 +[2025-07-07 04:46:47] [Rank 0] Group 7 FTA: 0.1094 +[2025-07-07 04:46:47] [Rank 0] Group 8 FTA: 0.1484 +[2025-07-07 04:46:47] [Rank 0] Group 8 FTA: 0.1484 +[2025-07-07 04:46:47] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 04:46:47] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 04:46:47] [Rank 0] Group 10 FTA: 0.1035 +[2025-07-07 04:46:47] [Rank 0] Group 10 FTA: 0.1035 +[2025-07-07 04:46:47] [Rank 0] Group 11 FTA: 0.1045 +[2025-07-07 04:46:47] [Rank 0] Group 11 FTA: 0.1045 +[2025-07-07 04:46:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 04:46:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 04:46:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 04:46:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 04:46:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 04:46:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 04:46:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 04:46:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 04:46:48] [Rank 0] step:3501/10000 train_time:281299ms step_avg:80.35ms +[2025-07-07 04:46:48] [Rank 0] step:3501/10000 train_time:281299ms step_avg:80.35ms +[2025-07-07 04:46:50] [Rank 0] step:3521/10000 train_time:283451ms step_avg:80.50ms +[2025-07-07 04:46:50] [Rank 0] step:3521/10000 train_time:283451ms step_avg:80.50ms +[2025-07-07 04:46:52] [Rank 0] step:3541/10000 train_time:284943ms step_avg:80.47ms +[2025-07-07 04:46:52] [Rank 0] step:3541/10000 train_time:284943ms step_avg:80.47ms +[2025-07-07 04:46:53] [Rank 0] step:3561/10000 train_time:286436ms step_avg:80.44ms +[2025-07-07 04:46:53] [Rank 0] step:3561/10000 train_time:286436ms step_avg:80.44ms +[2025-07-07 04:46:55] [Rank 0] step:3581/10000 train_time:287930ms step_avg:80.40ms +[2025-07-07 04:46:55] [Rank 0] step:3581/10000 train_time:287930ms step_avg:80.40ms +[2025-07-07 04:46:56] [Rank 0] step:3601/10000 train_time:289685ms step_avg:80.45ms +[2025-07-07 04:46:56] [Rank 0] step:3601/10000 train_time:289685ms step_avg:80.45ms +[2025-07-07 04:46:58] [Rank 0] step:3621/10000 train_time:291160ms step_avg:80.41ms +[2025-07-07 04:46:58] [Rank 0] step:3621/10000 train_time:291160ms step_avg:80.41ms +[2025-07-07 04:46:59] [Rank 0] step:3641/10000 train_time:292655ms step_avg:80.38ms +[2025-07-07 04:46:59] [Rank 0] step:3641/10000 train_time:292655ms step_avg:80.38ms +[2025-07-07 04:47:01] [Rank 0] step:3661/10000 train_time:294151ms step_avg:80.35ms +[2025-07-07 04:47:01] [Rank 0] step:3661/10000 train_time:294151ms step_avg:80.35ms +[2025-07-07 04:47:02] [Rank 0] step:3681/10000 train_time:295650ms step_avg:80.32ms +[2025-07-07 04:47:02] [Rank 0] step:3681/10000 train_time:295650ms step_avg:80.32ms +[2025-07-07 04:47:04] [Rank 0] step:3701/10000 train_time:297387ms step_avg:80.35ms +[2025-07-07 04:47:04] [Rank 0] step:3701/10000 train_time:297387ms step_avg:80.35ms +[2025-07-07 04:47:06] [Rank 0] step:3721/10000 train_time:298883ms step_avg:80.32ms +[2025-07-07 04:47:06] [Rank 0] step:3721/10000 train_time:298883ms step_avg:80.32ms +[2025-07-07 04:47:07] [Rank 0] step:3741/10000 train_time:300383ms step_avg:80.29ms +[2025-07-07 04:47:07] [Rank 0] step:3741/10000 train_time:300383ms step_avg:80.29ms +[2025-07-07 04:47:09] [Rank 0] step:3761/10000 train_time:301886ms step_avg:80.27ms +[2025-07-07 04:47:09] [Rank 0] step:3761/10000 train_time:301886ms step_avg:80.27ms +[2025-07-07 04:47:11] [Rank 0] step:3781/10000 train_time:304073ms step_avg:80.42ms +[2025-07-07 04:47:11] [Rank 0] step:3781/10000 train_time:304073ms step_avg:80.42ms +[2025-07-07 04:47:12] [Rank 0] step:3801/10000 train_time:305553ms step_avg:80.39ms +[2025-07-07 04:47:12] [Rank 0] step:3801/10000 train_time:305553ms step_avg:80.39ms +[2025-07-07 04:47:14] [Rank 0] step:3821/10000 train_time:307056ms step_avg:80.36ms +[2025-07-07 04:47:14] [Rank 0] step:3821/10000 train_time:307056ms step_avg:80.36ms +[2025-07-07 04:47:15] [Rank 0] step:3841/10000 train_time:308559ms step_avg:80.33ms +[2025-07-07 04:47:15] [Rank 0] step:3841/10000 train_time:308559ms step_avg:80.33ms +[2025-07-07 04:47:17] [Rank 0] step:3861/10000 train_time:310062ms step_avg:80.31ms +[2025-07-07 04:47:17] [Rank 0] step:3861/10000 train_time:310062ms step_avg:80.31ms +[2025-07-07 04:47:19] [Rank 0] step:3881/10000 train_time:312225ms step_avg:80.45ms +[2025-07-07 04:47:19] [Rank 0] step:3881/10000 train_time:312225ms step_avg:80.45ms +[2025-07-07 04:47:20] [Rank 0] step:3901/10000 train_time:313727ms step_avg:80.42ms +[2025-07-07 04:47:20] [Rank 0] step:3901/10000 train_time:313727ms step_avg:80.42ms +[2025-07-07 04:47:22] [Rank 0] step:3921/10000 train_time:315233ms step_avg:80.40ms +[2025-07-07 04:47:22] [Rank 0] step:3921/10000 train_time:315233ms step_avg:80.40ms +[2025-07-07 04:47:23] [Rank 0] step:3941/10000 train_time:316738ms step_avg:80.37ms +[2025-07-07 04:47:23] [Rank 0] step:3941/10000 train_time:316738ms step_avg:80.37ms +[2025-07-07 04:47:26] [Rank 0] step:3961/10000 train_time:318963ms step_avg:80.53ms +[2025-07-07 04:47:26] [Rank 0] step:3961/10000 train_time:318963ms step_avg:80.53ms +[2025-07-07 04:47:27] [Rank 0] step:3981/10000 train_time:320579ms step_avg:80.53ms +[2025-07-07 04:47:27] [Rank 0] step:3981/10000 train_time:320579ms step_avg:80.53ms +[2025-07-07 04:47:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:47:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:47:30] [Rank 0] PRINT: step:4000/10000 train_loss:3.0552 val_loss:2.8471 train_time:322084ms step_avg:80.52ms +[2025-07-07 04:47:30] [Rank 0] PRINT: step:4000/10000 train_loss:3.0552 val_loss:2.8471 train_time:322084ms step_avg:80.52ms +[2025-07-07 04:47:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:47:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:47:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:47:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:47:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:47:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:52:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:52:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:52:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:52:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:52:55] [Rank 0] Total Loss: 4.6748 +[2025-07-07 04:52:55] [Rank 0] Total Loss: 4.6748 +[2025-07-07 04:52:55] [Rank 0] Total FTA: 0.1225 +[2025-07-07 04:52:55] [Rank 0] Total FTA: 0.1225 +[2025-07-07 04:52:55] [Rank 0] Group 0 Loss: 4.7670 +[2025-07-07 04:52:55] [Rank 0] Group 0 Loss: 4.7670 +[2025-07-07 04:52:55] [Rank 0] Group 1 Loss: 4.7554 +[2025-07-07 04:52:55] [Rank 0] Group 1 Loss: 4.7554 +[2025-07-07 04:52:55] [Rank 0] Group 2 Loss: 4.6217 +[2025-07-07 04:52:55] [Rank 0] Group 2 Loss: 4.6217 +[2025-07-07 04:52:55] [Rank 0] Group 3 Loss: 4.6184 +[2025-07-07 04:52:55] [Rank 0] Group 3 Loss: 4.6184 +[2025-07-07 04:52:55] [Rank 0] Group 4 Loss: 4.6178 +[2025-07-07 04:52:55] [Rank 0] Group 4 Loss: 4.6178 +[2025-07-07 04:52:55] [Rank 0] Group 5 Loss: 4.5977 +[2025-07-07 04:52:55] [Rank 0] Group 5 Loss: 4.5977 +[2025-07-07 04:52:55] [Rank 0] Group 6 Loss: 4.6562 +[2025-07-07 04:52:55] [Rank 0] Group 6 Loss: 4.6562 +[2025-07-07 04:52:55] [Rank 0] Group 7 Loss: 4.6465 +[2025-07-07 04:52:55] [Rank 0] Group 7 Loss: 4.6465 +[2025-07-07 04:52:55] [Rank 0] Group 8 Loss: 4.6651 +[2025-07-07 04:52:55] [Rank 0] Group 8 Loss: 4.6651 +[2025-07-07 04:52:55] [Rank 0] Group 9 Loss: 4.6855 +[2025-07-07 04:52:55] [Rank 0] Group 9 Loss: 4.6855 +[2025-07-07 04:52:55] [Rank 0] Group 10 Loss: 4.6811 +[2025-07-07 04:52:55] [Rank 0] Group 10 Loss: 4.6811 +[2025-07-07 04:52:55] [Rank 0] Group 11 Loss: 4.6818 +[2025-07-07 04:52:55] [Rank 0] Group 11 Loss: 4.6818 +[2025-07-07 04:52:55] [Rank 0] Group 0 FTA: 0.1899 +[2025-07-07 04:52:55] [Rank 0] Group 0 FTA: 0.1899 +[2025-07-07 04:52:55] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-07 04:52:55] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-07 04:52:55] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 04:52:55] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 04:52:55] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-07 04:52:55] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-07 04:52:55] [Rank 0] Group 4 FTA: 0.0729 +[2025-07-07 04:52:55] [Rank 0] Group 4 FTA: 0.0729 +[2025-07-07 04:52:55] [Rank 0] Group 5 FTA: 0.0521 +[2025-07-07 04:52:55] [Rank 0] Group 5 FTA: 0.0521 +[2025-07-07 04:52:55] [Rank 0] Group 6 FTA: 0.1302 +[2025-07-07 04:52:55] [Rank 0] Group 6 FTA: 0.1302 +[2025-07-07 04:52:55] [Rank 0] Group 7 FTA: 0.1276 +[2025-07-07 04:52:55] [Rank 0] Group 7 FTA: 0.1276 +[2025-07-07 04:52:55] [Rank 0] Group 8 FTA: 0.1354 +[2025-07-07 04:52:55] [Rank 0] Group 8 FTA: 0.1354 +[2025-07-07 04:52:56] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-07 04:52:56] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-07 04:52:56] [Rank 0] Group 10 FTA: 0.1387 +[2025-07-07 04:52:56] [Rank 0] Group 10 FTA: 0.1387 +[2025-07-07 04:52:56] [Rank 0] Group 11 FTA: 0.1357 +[2025-07-07 04:52:56] [Rank 0] Group 11 FTA: 0.1357 +[2025-07-07 04:52:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 04:52:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 04:52:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 04:52:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 04:52:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 04:52:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 04:52:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 04:52:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 04:52:57] [Rank 0] step:4001/10000 train_time:322106ms step_avg:80.51ms +[2025-07-07 04:52:57] [Rank 0] step:4001/10000 train_time:322106ms step_avg:80.51ms +[2025-07-07 04:52:58] [Rank 0] step:4021/10000 train_time:323606ms step_avg:80.48ms +[2025-07-07 04:52:58] [Rank 0] step:4021/10000 train_time:323606ms step_avg:80.48ms +[2025-07-07 04:53:00] [Rank 0] step:4041/10000 train_time:325095ms step_avg:80.45ms +[2025-07-07 04:53:00] [Rank 0] step:4041/10000 train_time:325095ms step_avg:80.45ms +[2025-07-07 04:53:02] [Rank 0] step:4061/10000 train_time:327262ms step_avg:80.59ms +[2025-07-07 04:53:02] [Rank 0] step:4061/10000 train_time:327262ms step_avg:80.59ms +[2025-07-07 04:53:04] [Rank 0] step:4081/10000 train_time:328751ms step_avg:80.56ms +[2025-07-07 04:53:04] [Rank 0] step:4081/10000 train_time:328751ms step_avg:80.56ms +[2025-07-07 04:53:05] [Rank 0] step:4101/10000 train_time:330246ms step_avg:80.53ms +[2025-07-07 04:53:05] [Rank 0] step:4101/10000 train_time:330246ms step_avg:80.53ms +[2025-07-07 04:53:07] [Rank 0] step:4121/10000 train_time:331741ms step_avg:80.50ms +[2025-07-07 04:53:07] [Rank 0] step:4121/10000 train_time:331741ms step_avg:80.50ms +[2025-07-07 04:53:08] [Rank 0] step:4141/10000 train_time:333286ms step_avg:80.48ms +[2025-07-07 04:53:08] [Rank 0] step:4141/10000 train_time:333286ms step_avg:80.48ms +[2025-07-07 04:53:10] [Rank 0] step:4161/10000 train_time:334965ms step_avg:80.50ms +[2025-07-07 04:53:10] [Rank 0] step:4161/10000 train_time:334965ms step_avg:80.50ms +[2025-07-07 04:53:11] [Rank 0] step:4181/10000 train_time:336461ms step_avg:80.47ms +[2025-07-07 04:53:11] [Rank 0] step:4181/10000 train_time:336461ms step_avg:80.47ms +[2025-07-07 04:53:13] [Rank 0] step:4201/10000 train_time:337958ms step_avg:80.45ms +[2025-07-07 04:53:13] [Rank 0] step:4201/10000 train_time:337958ms step_avg:80.45ms +[2025-07-07 04:53:14] [Rank 0] step:4221/10000 train_time:339459ms step_avg:80.42ms +[2025-07-07 04:53:14] [Rank 0] step:4221/10000 train_time:339459ms step_avg:80.42ms +[2025-07-07 04:53:16] [Rank 0] step:4241/10000 train_time:341598ms step_avg:80.55ms +[2025-07-07 04:53:16] [Rank 0] step:4241/10000 train_time:341598ms step_avg:80.55ms +[2025-07-07 04:53:18] [Rank 0] step:4261/10000 train_time:343095ms step_avg:80.52ms +[2025-07-07 04:53:18] [Rank 0] step:4261/10000 train_time:343095ms step_avg:80.52ms +[2025-07-07 04:53:19] [Rank 0] step:4281/10000 train_time:344596ms step_avg:80.49ms +[2025-07-07 04:53:19] [Rank 0] step:4281/10000 train_time:344596ms step_avg:80.49ms +[2025-07-07 04:53:21] [Rank 0] step:4301/10000 train_time:346098ms step_avg:80.47ms +[2025-07-07 04:53:21] [Rank 0] step:4301/10000 train_time:346098ms step_avg:80.47ms +[2025-07-07 04:53:23] [Rank 0] step:4321/10000 train_time:347602ms step_avg:80.44ms +[2025-07-07 04:53:23] [Rank 0] step:4321/10000 train_time:347602ms step_avg:80.44ms +[2025-07-07 04:53:25] [Rank 0] step:4341/10000 train_time:349744ms step_avg:80.57ms +[2025-07-07 04:53:25] [Rank 0] step:4341/10000 train_time:349744ms step_avg:80.57ms +[2025-07-07 04:53:26] [Rank 0] step:4361/10000 train_time:351245ms step_avg:80.54ms +[2025-07-07 04:53:26] [Rank 0] step:4361/10000 train_time:351245ms step_avg:80.54ms +[2025-07-07 04:53:28] [Rank 0] step:4381/10000 train_time:352748ms step_avg:80.52ms +[2025-07-07 04:53:28] [Rank 0] step:4381/10000 train_time:352748ms step_avg:80.52ms +[2025-07-07 04:53:29] [Rank 0] step:4401/10000 train_time:354252ms step_avg:80.49ms +[2025-07-07 04:53:29] [Rank 0] step:4401/10000 train_time:354252ms step_avg:80.49ms +[2025-07-07 04:53:31] [Rank 0] step:4421/10000 train_time:356417ms step_avg:80.62ms +[2025-07-07 04:53:31] [Rank 0] step:4421/10000 train_time:356417ms step_avg:80.62ms +[2025-07-07 04:53:33] [Rank 0] step:4441/10000 train_time:357921ms step_avg:80.59ms +[2025-07-07 04:53:33] [Rank 0] step:4441/10000 train_time:357921ms step_avg:80.59ms +[2025-07-07 04:53:34] [Rank 0] step:4461/10000 train_time:359423ms step_avg:80.57ms +[2025-07-07 04:53:34] [Rank 0] step:4461/10000 train_time:359423ms step_avg:80.57ms +[2025-07-07 04:53:36] [Rank 0] step:4481/10000 train_time:360927ms step_avg:80.55ms +[2025-07-07 04:53:36] [Rank 0] step:4481/10000 train_time:360927ms step_avg:80.55ms +[2025-07-07 04:53:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:53:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:53:38] [Rank 0] PRINT: step:4500/10000 train_loss:2.6707 val_loss:2.5107 train_time:362431ms step_avg:80.54ms +[2025-07-07 04:53:38] [Rank 0] PRINT: step:4500/10000 train_loss:2.6707 val_loss:2.5107 train_time:362431ms step_avg:80.54ms +[2025-07-07 04:53:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:53:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:53:38] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:53:38] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:53:38] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:53:38] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:59:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:59:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:59:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:59:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:59:04] [Rank 0] Total Loss: 4.5008 +[2025-07-07 04:59:04] [Rank 0] Total Loss: 4.5008 +[2025-07-07 04:59:04] [Rank 0] Total FTA: 0.1349 +[2025-07-07 04:59:04] [Rank 0] Total FTA: 0.1349 +[2025-07-07 04:59:04] [Rank 0] Group 0 Loss: 4.7249 +[2025-07-07 04:59:04] [Rank 0] Group 0 Loss: 4.7249 +[2025-07-07 04:59:04] [Rank 0] Group 1 Loss: 4.6267 +[2025-07-07 04:59:04] [Rank 0] Group 1 Loss: 4.6267 +[2025-07-07 04:59:04] [Rank 0] Group 2 Loss: 4.4365 +[2025-07-07 04:59:04] [Rank 0] Group 2 Loss: 4.4365 +[2025-07-07 04:59:04] [Rank 0] Group 3 Loss: 4.4615 +[2025-07-07 04:59:04] [Rank 0] Group 3 Loss: 4.4615 +[2025-07-07 04:59:04] [Rank 0] Group 4 Loss: 4.4240 +[2025-07-07 04:59:04] [Rank 0] Group 4 Loss: 4.4240 +[2025-07-07 04:59:04] [Rank 0] Group 5 Loss: 4.3750 +[2025-07-07 04:59:04] [Rank 0] Group 5 Loss: 4.3750 +[2025-07-07 04:59:04] [Rank 0] Group 6 Loss: 4.4481 +[2025-07-07 04:59:04] [Rank 0] Group 6 Loss: 4.4481 +[2025-07-07 04:59:04] [Rank 0] Group 7 Loss: 4.4583 +[2025-07-07 04:59:04] [Rank 0] Group 7 Loss: 4.4583 +[2025-07-07 04:59:04] [Rank 0] Group 8 Loss: 4.5034 +[2025-07-07 04:59:04] [Rank 0] Group 8 Loss: 4.5034 +[2025-07-07 04:59:04] [Rank 0] Group 9 Loss: 4.4286 +[2025-07-07 04:59:04] [Rank 0] Group 9 Loss: 4.4286 +[2025-07-07 04:59:04] [Rank 0] Group 10 Loss: 4.4694 +[2025-07-07 04:59:04] [Rank 0] Group 10 Loss: 4.4694 +[2025-07-07 04:59:04] [Rank 0] Group 11 Loss: 4.4684 +[2025-07-07 04:59:04] [Rank 0] Group 11 Loss: 4.4684 +[2025-07-07 04:59:04] [Rank 0] Group 0 FTA: 0.1782 +[2025-07-07 04:59:04] [Rank 0] Group 0 FTA: 0.1782 +[2025-07-07 04:59:04] [Rank 0] Group 1 FTA: 0.1641 +[2025-07-07 04:59:04] [Rank 0] Group 1 FTA: 0.1641 +[2025-07-07 04:59:04] [Rank 0] Group 2 FTA: 0.1641 +[2025-07-07 04:59:04] [Rank 0] Group 2 FTA: 0.1641 +[2025-07-07 04:59:04] [Rank 0] Group 3 FTA: 0.0859 +[2025-07-07 04:59:04] [Rank 0] Group 3 FTA: 0.0859 +[2025-07-07 04:59:04] [Rank 0] Group 4 FTA: 0.1016 +[2025-07-07 04:59:04] [Rank 0] Group 4 FTA: 0.1016 +[2025-07-07 04:59:04] [Rank 0] Group 5 FTA: 0.1016 +[2025-07-07 04:59:04] [Rank 0] Group 5 FTA: 0.1016 +[2025-07-07 04:59:04] [Rank 0] Group 6 FTA: 0.1432 +[2025-07-07 04:59:04] [Rank 0] Group 6 FTA: 0.1432 +[2025-07-07 04:59:04] [Rank 0] Group 7 FTA: 0.1328 +[2025-07-07 04:59:04] [Rank 0] Group 7 FTA: 0.1328 +[2025-07-07 04:59:04] [Rank 0] Group 8 FTA: 0.1484 +[2025-07-07 04:59:04] [Rank 0] Group 8 FTA: 0.1484 +[2025-07-07 04:59:04] [Rank 0] Group 9 FTA: 0.1250 +[2025-07-07 04:59:04] [Rank 0] Group 9 FTA: 0.1250 +[2025-07-07 04:59:04] [Rank 0] Group 10 FTA: 0.1328 +[2025-07-07 04:59:04] [Rank 0] Group 10 FTA: 0.1328 +[2025-07-07 04:59:04] [Rank 0] Group 11 FTA: 0.1201 +[2025-07-07 04:59:04] [Rank 0] Group 11 FTA: 0.1201 +[2025-07-07 04:59:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 04:59:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 04:59:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 04:59:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 04:59:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 04:59:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 04:59:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 04:59:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 04:59:07] [Rank 0] step:4501/10000 train_time:362459ms step_avg:80.53ms +[2025-07-07 04:59:07] [Rank 0] step:4501/10000 train_time:362459ms step_avg:80.53ms +[2025-07-07 04:59:08] [Rank 0] step:4521/10000 train_time:364641ms step_avg:80.65ms +[2025-07-07 04:59:08] [Rank 0] step:4521/10000 train_time:364641ms step_avg:80.65ms +[2025-07-07 04:59:10] [Rank 0] step:4541/10000 train_time:366135ms step_avg:80.63ms +[2025-07-07 04:59:10] [Rank 0] step:4541/10000 train_time:366135ms step_avg:80.63ms +[2025-07-07 04:59:11] [Rank 0] step:4561/10000 train_time:367626ms step_avg:80.60ms +[2025-07-07 04:59:11] [Rank 0] step:4561/10000 train_time:367626ms step_avg:80.60ms +[2025-07-07 04:59:13] [Rank 0] step:4581/10000 train_time:369119ms step_avg:80.58ms +[2025-07-07 04:59:13] [Rank 0] step:4581/10000 train_time:369119ms step_avg:80.58ms +[2025-07-07 04:59:15] [Rank 0] step:4601/10000 train_time:371252ms step_avg:80.69ms +[2025-07-07 04:59:15] [Rank 0] step:4601/10000 train_time:371252ms step_avg:80.69ms +[2025-07-07 04:59:16] [Rank 0] step:4621/10000 train_time:372747ms step_avg:80.66ms +[2025-07-07 04:59:16] [Rank 0] step:4621/10000 train_time:372747ms step_avg:80.66ms +[2025-07-07 04:59:18] [Rank 0] step:4641/10000 train_time:374243ms step_avg:80.64ms +[2025-07-07 04:59:18] [Rank 0] step:4641/10000 train_time:374243ms step_avg:80.64ms +[2025-07-07 04:59:19] [Rank 0] step:4661/10000 train_time:375738ms step_avg:80.61ms +[2025-07-07 04:59:19] [Rank 0] step:4661/10000 train_time:375738ms step_avg:80.61ms +[2025-07-07 04:59:21] [Rank 0] step:4681/10000 train_time:377282ms step_avg:80.60ms +[2025-07-07 04:59:21] [Rank 0] step:4681/10000 train_time:377282ms step_avg:80.60ms +[2025-07-07 04:59:22] [Rank 0] step:4701/10000 train_time:378969ms step_avg:80.61ms +[2025-07-07 04:59:22] [Rank 0] step:4701/10000 train_time:378969ms step_avg:80.61ms +[2025-07-07 04:59:24] [Rank 0] step:4721/10000 train_time:380467ms step_avg:80.59ms +[2025-07-07 04:59:24] [Rank 0] step:4721/10000 train_time:380467ms step_avg:80.59ms +[2025-07-07 04:59:25] [Rank 0] step:4741/10000 train_time:381965ms step_avg:80.57ms +[2025-07-07 04:59:25] [Rank 0] step:4741/10000 train_time:381965ms step_avg:80.57ms +[2025-07-07 04:59:27] [Rank 0] step:4761/10000 train_time:383465ms step_avg:80.54ms +[2025-07-07 04:59:27] [Rank 0] step:4761/10000 train_time:383465ms step_avg:80.54ms +[2025-07-07 04:59:29] [Rank 0] step:4781/10000 train_time:385622ms step_avg:80.66ms +[2025-07-07 04:59:29] [Rank 0] step:4781/10000 train_time:385622ms step_avg:80.66ms +[2025-07-07 04:59:31] [Rank 0] step:4801/10000 train_time:387124ms step_avg:80.63ms +[2025-07-07 04:59:31] [Rank 0] step:4801/10000 train_time:387124ms step_avg:80.63ms +[2025-07-07 04:59:32] [Rank 0] step:4821/10000 train_time:388627ms step_avg:80.61ms +[2025-07-07 04:59:32] [Rank 0] step:4821/10000 train_time:388627ms step_avg:80.61ms +[2025-07-07 04:59:34] [Rank 0] step:4841/10000 train_time:390129ms step_avg:80.59ms +[2025-07-07 04:59:34] [Rank 0] step:4841/10000 train_time:390129ms step_avg:80.59ms +[2025-07-07 04:59:36] [Rank 0] step:4861/10000 train_time:392320ms step_avg:80.71ms +[2025-07-07 04:59:36] [Rank 0] step:4861/10000 train_time:392320ms step_avg:80.71ms +[2025-07-07 04:59:37] [Rank 0] step:4881/10000 train_time:393804ms step_avg:80.68ms +[2025-07-07 04:59:37] [Rank 0] step:4881/10000 train_time:393804ms step_avg:80.68ms +[2025-07-07 04:59:39] [Rank 0] step:4901/10000 train_time:395305ms step_avg:80.66ms +[2025-07-07 04:59:39] [Rank 0] step:4901/10000 train_time:395305ms step_avg:80.66ms +[2025-07-07 04:59:40] [Rank 0] step:4921/10000 train_time:396805ms step_avg:80.64ms +[2025-07-07 04:59:40] [Rank 0] step:4921/10000 train_time:396805ms step_avg:80.64ms +[2025-07-07 04:59:42] [Rank 0] step:4941/10000 train_time:398310ms step_avg:80.61ms +[2025-07-07 04:59:42] [Rank 0] step:4941/10000 train_time:398310ms step_avg:80.61ms +[2025-07-07 04:59:44] [Rank 0] step:4961/10000 train_time:400474ms step_avg:80.72ms +[2025-07-07 04:59:44] [Rank 0] step:4961/10000 train_time:400474ms step_avg:80.72ms +[2025-07-07 04:59:45] [Rank 0] step:4981/10000 train_time:401977ms step_avg:80.70ms +[2025-07-07 04:59:45] [Rank 0] step:4981/10000 train_time:401977ms step_avg:80.70ms +[2025-07-07 04:59:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:59:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:59:48] [Rank 0] PRINT: step:5000/10000 train_loss:2.3797 val_loss:2.2616 train_time:403483ms step_avg:80.70ms +[2025-07-07 04:59:48] [Rank 0] PRINT: step:5000/10000 train_loss:2.3797 val_loss:2.2616 train_time:403483ms step_avg:80.70ms +[2025-07-07 04:59:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:59:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:59:48] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:59:48] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:59:48] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:59:48] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:05:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:05:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:05:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:05:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:05:14] [Rank 0] Total Loss: 4.3749 +[2025-07-07 05:05:14] [Rank 0] Total Loss: 4.3749 +[2025-07-07 05:05:14] [Rank 0] Total FTA: 0.1408 +[2025-07-07 05:05:14] [Rank 0] Total FTA: 0.1408 +[2025-07-07 05:05:14] [Rank 0] Group 0 Loss: 4.7266 +[2025-07-07 05:05:14] [Rank 0] Group 0 Loss: 4.7266 +[2025-07-07 05:05:14] [Rank 0] Group 1 Loss: 4.3100 +[2025-07-07 05:05:14] [Rank 0] Group 1 Loss: 4.3100 +[2025-07-07 05:05:14] [Rank 0] Group 2 Loss: 4.1518 +[2025-07-07 05:05:14] [Rank 0] Group 2 Loss: 4.1518 +[2025-07-07 05:05:14] [Rank 0] Group 3 Loss: 4.3762 +[2025-07-07 05:05:14] [Rank 0] Group 3 Loss: 4.3762 +[2025-07-07 05:05:14] [Rank 0] Group 4 Loss: 4.3069 +[2025-07-07 05:05:14] [Rank 0] Group 4 Loss: 4.3069 +[2025-07-07 05:05:14] [Rank 0] Group 5 Loss: 4.3272 +[2025-07-07 05:05:14] [Rank 0] Group 5 Loss: 4.3272 +[2025-07-07 05:05:14] [Rank 0] Group 6 Loss: 4.3002 +[2025-07-07 05:05:14] [Rank 0] Group 6 Loss: 4.3002 +[2025-07-07 05:05:14] [Rank 0] Group 7 Loss: 4.3620 +[2025-07-07 05:05:14] [Rank 0] Group 7 Loss: 4.3620 +[2025-07-07 05:05:14] [Rank 0] Group 8 Loss: 4.3568 +[2025-07-07 05:05:14] [Rank 0] Group 8 Loss: 4.3568 +[2025-07-07 05:05:14] [Rank 0] Group 9 Loss: 4.3474 +[2025-07-07 05:05:14] [Rank 0] Group 9 Loss: 4.3474 +[2025-07-07 05:05:14] [Rank 0] Group 10 Loss: 4.3304 +[2025-07-07 05:05:14] [Rank 0] Group 10 Loss: 4.3304 +[2025-07-07 05:05:14] [Rank 0] Group 11 Loss: 4.3304 +[2025-07-07 05:05:14] [Rank 0] Group 11 Loss: 4.3304 +[2025-07-07 05:05:14] [Rank 0] Group 0 FTA: 0.1756 +[2025-07-07 05:05:14] [Rank 0] Group 0 FTA: 0.1756 +[2025-07-07 05:05:14] [Rank 0] Group 1 FTA: 0.1797 +[2025-07-07 05:05:14] [Rank 0] Group 1 FTA: 0.1797 +[2025-07-07 05:05:14] [Rank 0] Group 2 FTA: 0.1719 +[2025-07-07 05:05:14] [Rank 0] Group 2 FTA: 0.1719 +[2025-07-07 05:05:14] [Rank 0] Group 3 FTA: 0.0990 +[2025-07-07 05:05:14] [Rank 0] Group 3 FTA: 0.0990 +[2025-07-07 05:05:14] [Rank 0] Group 4 FTA: 0.0781 +[2025-07-07 05:05:14] [Rank 0] Group 4 FTA: 0.0781 +[2025-07-07 05:05:14] [Rank 0] Group 5 FTA: 0.1172 +[2025-07-07 05:05:14] [Rank 0] Group 5 FTA: 0.1172 +[2025-07-07 05:05:14] [Rank 0] Group 6 FTA: 0.1849 +[2025-07-07 05:05:14] [Rank 0] Group 6 FTA: 0.1849 +[2025-07-07 05:05:14] [Rank 0] Group 7 FTA: 0.1276 +[2025-07-07 05:05:14] [Rank 0] Group 7 FTA: 0.1276 +[2025-07-07 05:05:14] [Rank 0] Group 8 FTA: 0.1224 +[2025-07-07 05:05:14] [Rank 0] Group 8 FTA: 0.1224 +[2025-07-07 05:05:14] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 05:05:14] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 05:05:14] [Rank 0] Group 10 FTA: 0.1523 +[2025-07-07 05:05:14] [Rank 0] Group 10 FTA: 0.1523 +[2025-07-07 05:05:14] [Rank 0] Group 11 FTA: 0.1357 +[2025-07-07 05:05:14] [Rank 0] Group 11 FTA: 0.1357 +[2025-07-07 05:05:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 05:05:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 05:05:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 05:05:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 05:05:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 05:05:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 05:05:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 05:05:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 05:05:16] [Rank 0] step:5001/10000 train_time:403504ms step_avg:80.68ms +[2025-07-07 05:05:16] [Rank 0] step:5001/10000 train_time:403504ms step_avg:80.68ms +[2025-07-07 05:05:17] [Rank 0] step:5021/10000 train_time:405023ms step_avg:80.67ms +[2025-07-07 05:05:17] [Rank 0] step:5021/10000 train_time:405023ms step_avg:80.67ms +[2025-07-07 05:05:20] [Rank 0] step:5041/10000 train_time:407189ms step_avg:80.78ms +[2025-07-07 05:05:20] [Rank 0] step:5041/10000 train_time:407189ms step_avg:80.78ms +[2025-07-07 05:05:21] [Rank 0] step:5061/10000 train_time:408661ms step_avg:80.75ms +[2025-07-07 05:05:21] [Rank 0] step:5061/10000 train_time:408661ms step_avg:80.75ms +[2025-07-07 05:05:23] [Rank 0] step:5081/10000 train_time:410289ms step_avg:80.75ms +[2025-07-07 05:05:23] [Rank 0] step:5081/10000 train_time:410289ms step_avg:80.75ms +[2025-07-07 05:05:24] [Rank 0] step:5101/10000 train_time:411782ms step_avg:80.73ms +[2025-07-07 05:05:24] [Rank 0] step:5101/10000 train_time:411782ms step_avg:80.73ms +[2025-07-07 05:05:26] [Rank 0] step:5121/10000 train_time:413276ms step_avg:80.70ms +[2025-07-07 05:05:26] [Rank 0] step:5121/10000 train_time:413276ms step_avg:80.70ms +[2025-07-07 05:05:28] [Rank 0] step:5141/10000 train_time:415436ms step_avg:80.81ms +[2025-07-07 05:05:28] [Rank 0] step:5141/10000 train_time:415436ms step_avg:80.81ms +[2025-07-07 05:05:29] [Rank 0] step:5161/10000 train_time:416931ms step_avg:80.78ms +[2025-07-07 05:05:29] [Rank 0] step:5161/10000 train_time:416931ms step_avg:80.78ms +[2025-07-07 05:05:31] [Rank 0] step:5181/10000 train_time:418429ms step_avg:80.76ms +[2025-07-07 05:05:31] [Rank 0] step:5181/10000 train_time:418429ms step_avg:80.76ms +[2025-07-07 05:05:32] [Rank 0] step:5201/10000 train_time:419926ms step_avg:80.74ms +[2025-07-07 05:05:32] [Rank 0] step:5201/10000 train_time:419926ms step_avg:80.74ms +[2025-07-07 05:05:34] [Rank 0] step:5221/10000 train_time:421477ms step_avg:80.73ms +[2025-07-07 05:05:34] [Rank 0] step:5221/10000 train_time:421477ms step_avg:80.73ms +[2025-07-07 05:05:36] [Rank 0] step:5241/10000 train_time:423581ms step_avg:80.82ms +[2025-07-07 05:05:36] [Rank 0] step:5241/10000 train_time:423581ms step_avg:80.82ms +[2025-07-07 05:05:37] [Rank 0] step:5261/10000 train_time:425079ms step_avg:80.80ms +[2025-07-07 05:05:37] [Rank 0] step:5261/10000 train_time:425079ms step_avg:80.80ms +[2025-07-07 05:05:39] [Rank 0] step:5281/10000 train_time:426581ms step_avg:80.78ms +[2025-07-07 05:05:39] [Rank 0] step:5281/10000 train_time:426581ms step_avg:80.78ms +[2025-07-07 05:05:40] [Rank 0] step:5301/10000 train_time:428083ms step_avg:80.76ms +[2025-07-07 05:05:40] [Rank 0] step:5301/10000 train_time:428083ms step_avg:80.76ms +[2025-07-07 05:05:43] [Rank 0] step:5321/10000 train_time:430254ms step_avg:80.86ms +[2025-07-07 05:05:43] [Rank 0] step:5321/10000 train_time:430254ms step_avg:80.86ms +[2025-07-07 05:05:44] [Rank 0] step:5341/10000 train_time:431754ms step_avg:80.84ms +[2025-07-07 05:05:44] [Rank 0] step:5341/10000 train_time:431754ms step_avg:80.84ms +[2025-07-07 05:05:46] [Rank 0] step:5361/10000 train_time:433258ms step_avg:80.82ms +[2025-07-07 05:05:46] [Rank 0] step:5361/10000 train_time:433258ms step_avg:80.82ms +[2025-07-07 05:05:47] [Rank 0] step:5381/10000 train_time:434762ms step_avg:80.80ms +[2025-07-07 05:05:47] [Rank 0] step:5381/10000 train_time:434762ms step_avg:80.80ms +[2025-07-07 05:05:49] [Rank 0] step:5401/10000 train_time:436949ms step_avg:80.90ms +[2025-07-07 05:05:49] [Rank 0] step:5401/10000 train_time:436949ms step_avg:80.90ms +[2025-07-07 05:05:51] [Rank 0] step:5421/10000 train_time:438433ms step_avg:80.88ms +[2025-07-07 05:05:51] [Rank 0] step:5421/10000 train_time:438433ms step_avg:80.88ms +[2025-07-07 05:05:52] [Rank 0] step:5441/10000 train_time:439936ms step_avg:80.86ms +[2025-07-07 05:05:52] [Rank 0] step:5441/10000 train_time:439936ms step_avg:80.86ms +[2025-07-07 05:05:54] [Rank 0] step:5461/10000 train_time:441440ms step_avg:80.84ms +[2025-07-07 05:05:54] [Rank 0] step:5461/10000 train_time:441440ms step_avg:80.84ms +[2025-07-07 05:05:55] [Rank 0] step:5481/10000 train_time:442945ms step_avg:80.81ms +[2025-07-07 05:05:55] [Rank 0] step:5481/10000 train_time:442945ms step_avg:80.81ms +[2025-07-07 05:05:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:05:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:05:58] [Rank 0] PRINT: step:5500/10000 train_loss:2.1661 val_loss:2.0796 train_time:445101ms step_avg:80.93ms +[2025-07-07 05:05:58] [Rank 0] PRINT: step:5500/10000 train_loss:2.1661 val_loss:2.0796 train_time:445101ms step_avg:80.93ms +[2025-07-07 05:05:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:05:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:05:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:05:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:05:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:05:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:11:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:11:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:11:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:11:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:11:26] [Rank 0] Total Loss: 4.2634 +[2025-07-07 05:11:26] [Rank 0] Total Loss: 4.2634 +[2025-07-07 05:11:26] [Rank 0] Total FTA: 0.1676 +[2025-07-07 05:11:26] [Rank 0] Total FTA: 0.1676 +[2025-07-07 05:11:26] [Rank 0] Group 0 Loss: 4.4440 +[2025-07-07 05:11:26] [Rank 0] Group 0 Loss: 4.4440 +[2025-07-07 05:11:26] [Rank 0] Group 1 Loss: 4.2061 +[2025-07-07 05:11:26] [Rank 0] Group 1 Loss: 4.2061 +[2025-07-07 05:11:26] [Rank 0] Group 2 Loss: 4.1516 +[2025-07-07 05:11:26] [Rank 0] Group 2 Loss: 4.1516 +[2025-07-07 05:11:26] [Rank 0] Group 3 Loss: 4.3410 +[2025-07-07 05:11:26] [Rank 0] Group 3 Loss: 4.3410 +[2025-07-07 05:11:26] [Rank 0] Group 4 Loss: 4.1961 +[2025-07-07 05:11:26] [Rank 0] Group 4 Loss: 4.1961 +[2025-07-07 05:11:26] [Rank 0] Group 5 Loss: 4.2063 +[2025-07-07 05:11:26] [Rank 0] Group 5 Loss: 4.2063 +[2025-07-07 05:11:26] [Rank 0] Group 6 Loss: 4.2131 +[2025-07-07 05:11:26] [Rank 0] Group 6 Loss: 4.2131 +[2025-07-07 05:11:26] [Rank 0] Group 7 Loss: 4.2367 +[2025-07-07 05:11:26] [Rank 0] Group 7 Loss: 4.2367 +[2025-07-07 05:11:26] [Rank 0] Group 8 Loss: 4.2492 +[2025-07-07 05:11:26] [Rank 0] Group 8 Loss: 4.2492 +[2025-07-07 05:11:26] [Rank 0] Group 9 Loss: 4.2155 +[2025-07-07 05:11:26] [Rank 0] Group 9 Loss: 4.2155 +[2025-07-07 05:11:26] [Rank 0] Group 10 Loss: 4.2449 +[2025-07-07 05:11:26] [Rank 0] Group 10 Loss: 4.2449 +[2025-07-07 05:11:26] [Rank 0] Group 11 Loss: 4.2641 +[2025-07-07 05:11:26] [Rank 0] Group 11 Loss: 4.2641 +[2025-07-07 05:11:26] [Rank 0] Group 0 FTA: 0.1678 +[2025-07-07 05:11:26] [Rank 0] Group 0 FTA: 0.1678 +[2025-07-07 05:11:26] [Rank 0] Group 1 FTA: 0.1615 +[2025-07-07 05:11:26] [Rank 0] Group 1 FTA: 0.1615 +[2025-07-07 05:11:26] [Rank 0] Group 2 FTA: 0.2656 +[2025-07-07 05:11:26] [Rank 0] Group 2 FTA: 0.2656 +[2025-07-07 05:11:26] [Rank 0] Group 3 FTA: 0.0964 +[2025-07-07 05:11:26] [Rank 0] Group 3 FTA: 0.0964 +[2025-07-07 05:11:26] [Rank 0] Group 4 FTA: 0.1250 +[2025-07-07 05:11:26] [Rank 0] Group 4 FTA: 0.1250 +[2025-07-07 05:11:26] [Rank 0] Group 5 FTA: 0.1380 +[2025-07-07 05:11:26] [Rank 0] Group 5 FTA: 0.1380 +[2025-07-07 05:11:26] [Rank 0] Group 6 FTA: 0.1406 +[2025-07-07 05:11:26] [Rank 0] Group 6 FTA: 0.1406 +[2025-07-07 05:11:26] [Rank 0] Group 7 FTA: 0.1562 +[2025-07-07 05:11:26] [Rank 0] Group 7 FTA: 0.1562 +[2025-07-07 05:11:26] [Rank 0] Group 8 FTA: 0.1927 +[2025-07-07 05:11:26] [Rank 0] Group 8 FTA: 0.1927 +[2025-07-07 05:11:26] [Rank 0] Group 9 FTA: 0.1953 +[2025-07-07 05:11:26] [Rank 0] Group 9 FTA: 0.1953 +[2025-07-07 05:11:26] [Rank 0] Group 10 FTA: 0.1934 +[2025-07-07 05:11:26] [Rank 0] Group 10 FTA: 0.1934 +[2025-07-07 05:11:26] [Rank 0] Group 11 FTA: 0.1719 +[2025-07-07 05:11:26] [Rank 0] Group 11 FTA: 0.1719 +[2025-07-07 05:11:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 05:11:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 05:11:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 05:11:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 05:11:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 05:11:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 05:11:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 05:11:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 05:11:27] [Rank 0] step:5501/10000 train_time:445121ms step_avg:80.92ms +[2025-07-07 05:11:27] [Rank 0] step:5501/10000 train_time:445121ms step_avg:80.92ms +[2025-07-07 05:11:29] [Rank 0] step:5521/10000 train_time:446630ms step_avg:80.90ms +[2025-07-07 05:11:29] [Rank 0] step:5521/10000 train_time:446630ms step_avg:80.90ms +[2025-07-07 05:11:30] [Rank 0] step:5541/10000 train_time:448122ms step_avg:80.87ms +[2025-07-07 05:11:30] [Rank 0] step:5541/10000 train_time:448122ms step_avg:80.87ms +[2025-07-07 05:11:32] [Rank 0] step:5561/10000 train_time:449614ms step_avg:80.85ms +[2025-07-07 05:11:32] [Rank 0] step:5561/10000 train_time:449614ms step_avg:80.85ms +[2025-07-07 05:11:34] [Rank 0] step:5581/10000 train_time:451163ms step_avg:80.84ms +[2025-07-07 05:11:34] [Rank 0] step:5581/10000 train_time:451163ms step_avg:80.84ms +[2025-07-07 05:11:35] [Rank 0] step:5601/10000 train_time:453272ms step_avg:80.93ms +[2025-07-07 05:11:35] [Rank 0] step:5601/10000 train_time:453272ms step_avg:80.93ms +[2025-07-07 05:11:37] [Rank 0] step:5621/10000 train_time:454763ms step_avg:80.90ms +[2025-07-07 05:11:37] [Rank 0] step:5621/10000 train_time:454763ms step_avg:80.90ms +[2025-07-07 05:11:38] [Rank 0] step:5641/10000 train_time:456257ms step_avg:80.88ms +[2025-07-07 05:11:38] [Rank 0] step:5641/10000 train_time:456257ms step_avg:80.88ms +[2025-07-07 05:11:40] [Rank 0] step:5661/10000 train_time:457752ms step_avg:80.86ms +[2025-07-07 05:11:40] [Rank 0] step:5661/10000 train_time:457752ms step_avg:80.86ms +[2025-07-07 05:11:42] [Rank 0] step:5681/10000 train_time:460071ms step_avg:80.98ms +[2025-07-07 05:11:42] [Rank 0] step:5681/10000 train_time:460071ms step_avg:80.98ms +[2025-07-07 05:11:44] [Rank 0] step:5701/10000 train_time:461567ms step_avg:80.96ms +[2025-07-07 05:11:44] [Rank 0] step:5701/10000 train_time:461567ms step_avg:80.96ms +[2025-07-07 05:11:45] [Rank 0] step:5721/10000 train_time:463064ms step_avg:80.94ms +[2025-07-07 05:11:45] [Rank 0] step:5721/10000 train_time:463064ms step_avg:80.94ms +[2025-07-07 05:11:47] [Rank 0] step:5741/10000 train_time:464564ms step_avg:80.92ms +[2025-07-07 05:11:47] [Rank 0] step:5741/10000 train_time:464564ms step_avg:80.92ms +[2025-07-07 05:11:49] [Rank 0] step:5761/10000 train_time:466320ms step_avg:80.94ms +[2025-07-07 05:11:49] [Rank 0] step:5761/10000 train_time:466320ms step_avg:80.94ms +[2025-07-07 05:11:50] [Rank 0] step:5781/10000 train_time:468220ms step_avg:80.99ms +[2025-07-07 05:11:50] [Rank 0] step:5781/10000 train_time:468220ms step_avg:80.99ms +[2025-07-07 05:11:52] [Rank 0] step:5801/10000 train_time:469719ms step_avg:80.97ms +[2025-07-07 05:11:52] [Rank 0] step:5801/10000 train_time:469719ms step_avg:80.97ms +[2025-07-07 05:11:53] [Rank 0] step:5821/10000 train_time:471221ms step_avg:80.95ms +[2025-07-07 05:11:53] [Rank 0] step:5821/10000 train_time:471221ms step_avg:80.95ms +[2025-07-07 05:11:55] [Rank 0] step:5841/10000 train_time:472723ms step_avg:80.93ms +[2025-07-07 05:11:55] [Rank 0] step:5841/10000 train_time:472723ms step_avg:80.93ms +[2025-07-07 05:11:57] [Rank 0] step:5861/10000 train_time:474888ms step_avg:81.03ms +[2025-07-07 05:11:57] [Rank 0] step:5861/10000 train_time:474888ms step_avg:81.03ms +[2025-07-07 05:11:58] [Rank 0] step:5881/10000 train_time:476392ms step_avg:81.01ms +[2025-07-07 05:11:58] [Rank 0] step:5881/10000 train_time:476392ms step_avg:81.01ms +[2025-07-07 05:12:00] [Rank 0] step:5901/10000 train_time:477896ms step_avg:80.99ms +[2025-07-07 05:12:00] [Rank 0] step:5901/10000 train_time:477896ms step_avg:80.99ms +[2025-07-07 05:12:01] [Rank 0] step:5921/10000 train_time:479396ms step_avg:80.97ms +[2025-07-07 05:12:01] [Rank 0] step:5921/10000 train_time:479396ms step_avg:80.97ms +[2025-07-07 05:12:04] [Rank 0] step:5941/10000 train_time:481588ms step_avg:81.06ms +[2025-07-07 05:12:04] [Rank 0] step:5941/10000 train_time:481588ms step_avg:81.06ms +[2025-07-07 05:12:05] [Rank 0] step:5961/10000 train_time:483067ms step_avg:81.04ms +[2025-07-07 05:12:05] [Rank 0] step:5961/10000 train_time:483067ms step_avg:81.04ms +[2025-07-07 05:12:07] [Rank 0] step:5981/10000 train_time:484567ms step_avg:81.02ms +[2025-07-07 05:12:07] [Rank 0] step:5981/10000 train_time:484567ms step_avg:81.02ms +[2025-07-07 05:12:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:12:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:12:09] [Rank 0] PRINT: step:6000/10000 train_loss:2.0092 val_loss:1.9459 train_time:486070ms step_avg:81.01ms +[2025-07-07 05:12:09] [Rank 0] PRINT: step:6000/10000 train_loss:2.0092 val_loss:1.9459 train_time:486070ms step_avg:81.01ms +[2025-07-07 05:12:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:12:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:12:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:12:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:12:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:12:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:17:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:17:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:17:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:17:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:17:36] [Rank 0] Total Loss: 4.2820 +[2025-07-07 05:17:36] [Rank 0] Total Loss: 4.2820 +[2025-07-07 05:17:36] [Rank 0] Total FTA: 0.1674 +[2025-07-07 05:17:36] [Rank 0] Total FTA: 0.1674 +[2025-07-07 05:17:36] [Rank 0] Group 0 Loss: 4.6929 +[2025-07-07 05:17:36] [Rank 0] Group 0 Loss: 4.6929 +[2025-07-07 05:17:36] [Rank 0] Group 1 Loss: 4.3932 +[2025-07-07 05:17:36] [Rank 0] Group 1 Loss: 4.3932 +[2025-07-07 05:17:36] [Rank 0] Group 2 Loss: 4.1925 +[2025-07-07 05:17:36] [Rank 0] Group 2 Loss: 4.1925 +[2025-07-07 05:17:36] [Rank 0] Group 3 Loss: 4.2353 +[2025-07-07 05:17:36] [Rank 0] Group 3 Loss: 4.2353 +[2025-07-07 05:17:36] [Rank 0] Group 4 Loss: 4.1735 +[2025-07-07 05:17:36] [Rank 0] Group 4 Loss: 4.1735 +[2025-07-07 05:17:36] [Rank 0] Group 5 Loss: 4.1608 +[2025-07-07 05:17:36] [Rank 0] Group 5 Loss: 4.1608 +[2025-07-07 05:17:36] [Rank 0] Group 6 Loss: 4.1642 +[2025-07-07 05:17:36] [Rank 0] Group 6 Loss: 4.1642 +[2025-07-07 05:17:36] [Rank 0] Group 7 Loss: 4.2227 +[2025-07-07 05:17:36] [Rank 0] Group 7 Loss: 4.2227 +[2025-07-07 05:17:36] [Rank 0] Group 8 Loss: 4.1939 +[2025-07-07 05:17:36] [Rank 0] Group 8 Loss: 4.1939 +[2025-07-07 05:17:36] [Rank 0] Group 9 Loss: 4.1939 +[2025-07-07 05:17:36] [Rank 0] Group 9 Loss: 4.1939 +[2025-07-07 05:17:36] [Rank 0] Group 10 Loss: 4.1932 +[2025-07-07 05:17:36] [Rank 0] Group 10 Loss: 4.1932 +[2025-07-07 05:17:36] [Rank 0] Group 11 Loss: 4.2349 +[2025-07-07 05:17:36] [Rank 0] Group 11 Loss: 4.2349 +[2025-07-07 05:17:36] [Rank 0] Group 0 FTA: 0.1782 +[2025-07-07 05:17:36] [Rank 0] Group 0 FTA: 0.1782 +[2025-07-07 05:17:36] [Rank 0] Group 1 FTA: 0.1823 +[2025-07-07 05:17:36] [Rank 0] Group 1 FTA: 0.1823 +[2025-07-07 05:17:36] [Rank 0] Group 2 FTA: 0.2526 +[2025-07-07 05:17:36] [Rank 0] Group 2 FTA: 0.2526 +[2025-07-07 05:17:36] [Rank 0] Group 3 FTA: 0.0599 +[2025-07-07 05:17:36] [Rank 0] Group 3 FTA: 0.0599 +[2025-07-07 05:17:36] [Rank 0] Group 4 FTA: 0.1536 +[2025-07-07 05:17:36] [Rank 0] Group 4 FTA: 0.1536 +[2025-07-07 05:17:36] [Rank 0] Group 5 FTA: 0.1484 +[2025-07-07 05:17:36] [Rank 0] Group 5 FTA: 0.1484 +[2025-07-07 05:17:36] [Rank 0] Group 6 FTA: 0.1667 +[2025-07-07 05:17:36] [Rank 0] Group 6 FTA: 0.1667 +[2025-07-07 05:17:36] [Rank 0] Group 7 FTA: 0.1771 +[2025-07-07 05:17:36] [Rank 0] Group 7 FTA: 0.1771 +[2025-07-07 05:17:36] [Rank 0] Group 8 FTA: 0.1589 +[2025-07-07 05:17:36] [Rank 0] Group 8 FTA: 0.1589 +[2025-07-07 05:17:36] [Rank 0] Group 9 FTA: 0.1875 +[2025-07-07 05:17:36] [Rank 0] Group 9 FTA: 0.1875 +[2025-07-07 05:17:36] [Rank 0] Group 10 FTA: 0.1621 +[2025-07-07 05:17:36] [Rank 0] Group 10 FTA: 0.1621 +[2025-07-07 05:17:36] [Rank 0] Group 11 FTA: 0.1719 +[2025-07-07 05:17:36] [Rank 0] Group 11 FTA: 0.1719 +[2025-07-07 05:17:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 05:17:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 05:17:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 05:17:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 05:17:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 05:17:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 05:17:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 05:17:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 05:17:37] [Rank 0] step:6001/10000 train_time:486091ms step_avg:81.00ms +[2025-07-07 05:17:37] [Rank 0] step:6001/10000 train_time:486091ms step_avg:81.00ms +[2025-07-07 05:17:39] [Rank 0] step:6021/10000 train_time:487595ms step_avg:80.98ms +[2025-07-07 05:17:39] [Rank 0] step:6021/10000 train_time:487595ms step_avg:80.98ms +[2025-07-07 05:17:41] [Rank 0] step:6041/10000 train_time:489739ms step_avg:81.07ms +[2025-07-07 05:17:41] [Rank 0] step:6041/10000 train_time:489739ms step_avg:81.07ms +[2025-07-07 05:17:43] [Rank 0] step:6061/10000 train_time:491231ms step_avg:81.05ms +[2025-07-07 05:17:43] [Rank 0] step:6061/10000 train_time:491231ms step_avg:81.05ms +[2025-07-07 05:17:44] [Rank 0] step:6081/10000 train_time:492722ms step_avg:81.03ms +[2025-07-07 05:17:44] [Rank 0] step:6081/10000 train_time:492722ms step_avg:81.03ms +[2025-07-07 05:17:46] [Rank 0] step:6101/10000 train_time:494213ms step_avg:81.01ms +[2025-07-07 05:17:46] [Rank 0] step:6101/10000 train_time:494213ms step_avg:81.01ms +[2025-07-07 05:17:48] [Rank 0] step:6121/10000 train_time:495707ms step_avg:80.98ms +[2025-07-07 05:17:48] [Rank 0] step:6121/10000 train_time:495707ms step_avg:80.98ms +[2025-07-07 05:17:49] [Rank 0] step:6141/10000 train_time:497850ms step_avg:81.07ms +[2025-07-07 05:17:49] [Rank 0] step:6141/10000 train_time:497850ms step_avg:81.07ms +[2025-07-07 05:17:51] [Rank 0] step:6161/10000 train_time:499347ms step_avg:81.05ms +[2025-07-07 05:17:51] [Rank 0] step:6161/10000 train_time:499347ms step_avg:81.05ms +[2025-07-07 05:17:52] [Rank 0] step:6181/10000 train_time:500843ms step_avg:81.03ms +[2025-07-07 05:17:52] [Rank 0] step:6181/10000 train_time:500843ms step_avg:81.03ms +[2025-07-07 05:17:54] [Rank 0] step:6201/10000 train_time:502341ms step_avg:81.01ms +[2025-07-07 05:17:54] [Rank 0] step:6201/10000 train_time:502341ms step_avg:81.01ms +[2025-07-07 05:17:56] [Rank 0] step:6221/10000 train_time:504492ms step_avg:81.10ms +[2025-07-07 05:17:56] [Rank 0] step:6221/10000 train_time:504492ms step_avg:81.10ms +[2025-07-07 05:17:57] [Rank 0] step:6241/10000 train_time:505990ms step_avg:81.08ms +[2025-07-07 05:17:57] [Rank 0] step:6241/10000 train_time:505990ms step_avg:81.08ms +[2025-07-07 05:17:59] [Rank 0] step:6261/10000 train_time:507490ms step_avg:81.06ms +[2025-07-07 05:17:59] [Rank 0] step:6261/10000 train_time:507490ms step_avg:81.06ms +[2025-07-07 05:18:00] [Rank 0] step:6281/10000 train_time:508989ms step_avg:81.04ms +[2025-07-07 05:18:00] [Rank 0] step:6281/10000 train_time:508989ms step_avg:81.04ms +[2025-07-07 05:18:03] [Rank 0] step:6301/10000 train_time:510555ms step_avg:81.03ms +[2025-07-07 05:18:03] [Rank 0] step:6301/10000 train_time:510555ms step_avg:81.03ms +[2025-07-07 05:18:04] [Rank 0] step:6321/10000 train_time:512703ms step_avg:81.11ms +[2025-07-07 05:18:04] [Rank 0] step:6321/10000 train_time:512703ms step_avg:81.11ms +[2025-07-07 05:18:06] [Rank 0] step:6341/10000 train_time:514204ms step_avg:81.09ms +[2025-07-07 05:18:06] [Rank 0] step:6341/10000 train_time:514204ms step_avg:81.09ms +[2025-07-07 05:18:07] [Rank 0] step:6361/10000 train_time:515706ms step_avg:81.07ms +[2025-07-07 05:18:07] [Rank 0] step:6361/10000 train_time:515706ms step_avg:81.07ms +[2025-07-07 05:18:09] [Rank 0] step:6381/10000 train_time:517209ms step_avg:81.05ms +[2025-07-07 05:18:09] [Rank 0] step:6381/10000 train_time:517209ms step_avg:81.05ms +[2025-07-07 05:18:11] [Rank 0] step:6401/10000 train_time:519375ms step_avg:81.14ms +[2025-07-07 05:18:11] [Rank 0] step:6401/10000 train_time:519375ms step_avg:81.14ms +[2025-07-07 05:18:12] [Rank 0] step:6421/10000 train_time:520877ms step_avg:81.12ms +[2025-07-07 05:18:12] [Rank 0] step:6421/10000 train_time:520877ms step_avg:81.12ms +[2025-07-07 05:18:14] [Rank 0] step:6441/10000 train_time:522379ms step_avg:81.10ms +[2025-07-07 05:18:14] [Rank 0] step:6441/10000 train_time:522379ms step_avg:81.10ms +[2025-07-07 05:18:15] [Rank 0] step:6461/10000 train_time:523881ms step_avg:81.08ms +[2025-07-07 05:18:15] [Rank 0] step:6461/10000 train_time:523881ms step_avg:81.08ms +[2025-07-07 05:18:17] [Rank 0] step:6481/10000 train_time:525638ms step_avg:81.10ms +[2025-07-07 05:18:17] [Rank 0] step:6481/10000 train_time:525638ms step_avg:81.10ms +[2025-07-07 05:18:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:18:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:18:20] [Rank 0] PRINT: step:6500/10000 train_loss:1.8920 val_loss:1.8445 train_time:527548ms step_avg:81.16ms +[2025-07-07 05:18:20] [Rank 0] PRINT: step:6500/10000 train_loss:1.8920 val_loss:1.8445 train_time:527548ms step_avg:81.16ms +[2025-07-07 05:18:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:18:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:18:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:18:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:18:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:18:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:23:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:23:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:23:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:23:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:23:44] [Rank 0] Total Loss: 4.2461 +[2025-07-07 05:23:44] [Rank 0] Total Loss: 4.2461 +[2025-07-07 05:23:44] [Rank 0] Total FTA: 0.1740 +[2025-07-07 05:23:44] [Rank 0] Total FTA: 0.1740 +[2025-07-07 05:23:44] [Rank 0] Group 0 Loss: 4.5685 +[2025-07-07 05:23:44] [Rank 0] Group 0 Loss: 4.5685 +[2025-07-07 05:23:44] [Rank 0] Group 1 Loss: 4.4588 +[2025-07-07 05:23:44] [Rank 0] Group 1 Loss: 4.4588 +[2025-07-07 05:23:44] [Rank 0] Group 2 Loss: 4.1202 +[2025-07-07 05:23:44] [Rank 0] Group 2 Loss: 4.1202 +[2025-07-07 05:23:44] [Rank 0] Group 3 Loss: 4.2918 +[2025-07-07 05:23:44] [Rank 0] Group 3 Loss: 4.2918 +[2025-07-07 05:23:44] [Rank 0] Group 4 Loss: 4.1635 +[2025-07-07 05:23:44] [Rank 0] Group 4 Loss: 4.1635 +[2025-07-07 05:23:44] [Rank 0] Group 5 Loss: 4.1049 +[2025-07-07 05:23:44] [Rank 0] Group 5 Loss: 4.1049 +[2025-07-07 05:23:44] [Rank 0] Group 6 Loss: 4.1178 +[2025-07-07 05:23:44] [Rank 0] Group 6 Loss: 4.1178 +[2025-07-07 05:23:44] [Rank 0] Group 7 Loss: 4.1952 +[2025-07-07 05:23:44] [Rank 0] Group 7 Loss: 4.1952 +[2025-07-07 05:23:44] [Rank 0] Group 8 Loss: 4.1700 +[2025-07-07 05:23:44] [Rank 0] Group 8 Loss: 4.1700 +[2025-07-07 05:23:44] [Rank 0] Group 9 Loss: 4.1361 +[2025-07-07 05:23:44] [Rank 0] Group 9 Loss: 4.1361 +[2025-07-07 05:23:44] [Rank 0] Group 10 Loss: 4.1979 +[2025-07-07 05:23:44] [Rank 0] Group 10 Loss: 4.1979 +[2025-07-07 05:23:44] [Rank 0] Group 11 Loss: 4.1853 +[2025-07-07 05:23:44] [Rank 0] Group 11 Loss: 4.1853 +[2025-07-07 05:23:44] [Rank 0] Group 0 FTA: 0.1534 +[2025-07-07 05:23:44] [Rank 0] Group 0 FTA: 0.1534 +[2025-07-07 05:23:44] [Rank 0] Group 1 FTA: 0.1615 +[2025-07-07 05:23:44] [Rank 0] Group 1 FTA: 0.1615 +[2025-07-07 05:23:44] [Rank 0] Group 2 FTA: 0.1719 +[2025-07-07 05:23:44] [Rank 0] Group 2 FTA: 0.1719 +[2025-07-07 05:23:44] [Rank 0] Group 3 FTA: 0.1302 +[2025-07-07 05:23:44] [Rank 0] Group 3 FTA: 0.1302 +[2025-07-07 05:23:44] [Rank 0] Group 4 FTA: 0.2057 +[2025-07-07 05:23:44] [Rank 0] Group 4 FTA: 0.2057 +[2025-07-07 05:23:44] [Rank 0] Group 5 FTA: 0.1536 +[2025-07-07 05:23:44] [Rank 0] Group 5 FTA: 0.1536 +[2025-07-07 05:23:44] [Rank 0] Group 6 FTA: 0.1797 +[2025-07-07 05:23:44] [Rank 0] Group 6 FTA: 0.1797 +[2025-07-07 05:23:44] [Rank 0] Group 7 FTA: 0.2057 +[2025-07-07 05:23:44] [Rank 0] Group 7 FTA: 0.2057 +[2025-07-07 05:23:44] [Rank 0] Group 8 FTA: 0.1927 +[2025-07-07 05:23:44] [Rank 0] Group 8 FTA: 0.1927 +[2025-07-07 05:23:44] [Rank 0] Group 9 FTA: 0.1875 +[2025-07-07 05:23:44] [Rank 0] Group 9 FTA: 0.1875 +[2025-07-07 05:23:44] [Rank 0] Group 10 FTA: 0.1895 +[2025-07-07 05:23:44] [Rank 0] Group 10 FTA: 0.1895 +[2025-07-07 05:23:44] [Rank 0] Group 11 FTA: 0.1748 +[2025-07-07 05:23:44] [Rank 0] Group 11 FTA: 0.1748 +[2025-07-07 05:23:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 05:23:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 05:23:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 05:23:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 05:23:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 05:23:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 05:23:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 05:23:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 05:23:46] [Rank 0] step:6501/10000 train_time:527569ms step_avg:81.15ms +[2025-07-07 05:23:46] [Rank 0] step:6501/10000 train_time:527569ms step_avg:81.15ms +[2025-07-07 05:23:47] [Rank 0] step:6521/10000 train_time:529079ms step_avg:81.13ms +[2025-07-07 05:23:47] [Rank 0] step:6521/10000 train_time:529079ms step_avg:81.13ms +[2025-07-07 05:23:49] [Rank 0] step:6541/10000 train_time:530568ms step_avg:81.11ms +[2025-07-07 05:23:49] [Rank 0] step:6541/10000 train_time:530568ms step_avg:81.11ms +[2025-07-07 05:23:50] [Rank 0] step:6561/10000 train_time:532059ms step_avg:81.09ms +[2025-07-07 05:23:50] [Rank 0] step:6561/10000 train_time:532059ms step_avg:81.09ms +[2025-07-07 05:23:52] [Rank 0] step:6581/10000 train_time:534215ms step_avg:81.18ms +[2025-07-07 05:23:52] [Rank 0] step:6581/10000 train_time:534215ms step_avg:81.18ms +[2025-07-07 05:23:54] [Rank 0] step:6601/10000 train_time:535708ms step_avg:81.16ms +[2025-07-07 05:23:54] [Rank 0] step:6601/10000 train_time:535708ms step_avg:81.16ms +[2025-07-07 05:23:55] [Rank 0] step:6621/10000 train_time:537201ms step_avg:81.14ms +[2025-07-07 05:23:55] [Rank 0] step:6621/10000 train_time:537201ms step_avg:81.14ms +[2025-07-07 05:23:57] [Rank 0] step:6641/10000 train_time:538695ms step_avg:81.12ms +[2025-07-07 05:23:57] [Rank 0] step:6641/10000 train_time:538695ms step_avg:81.12ms +[2025-07-07 05:23:59] [Rank 0] step:6661/10000 train_time:540851ms step_avg:81.20ms +[2025-07-07 05:23:59] [Rank 0] step:6661/10000 train_time:540851ms step_avg:81.20ms +[2025-07-07 05:24:00] [Rank 0] step:6681/10000 train_time:542327ms step_avg:81.17ms +[2025-07-07 05:24:00] [Rank 0] step:6681/10000 train_time:542327ms step_avg:81.17ms +[2025-07-07 05:24:02] [Rank 0] step:6701/10000 train_time:543822ms step_avg:81.16ms +[2025-07-07 05:24:02] [Rank 0] step:6701/10000 train_time:543822ms step_avg:81.16ms +[2025-07-07 05:24:03] [Rank 0] step:6721/10000 train_time:545319ms step_avg:81.14ms +[2025-07-07 05:24:03] [Rank 0] step:6721/10000 train_time:545319ms step_avg:81.14ms +[2025-07-07 05:24:05] [Rank 0] step:6741/10000 train_time:546817ms step_avg:81.12ms +[2025-07-07 05:24:05] [Rank 0] step:6741/10000 train_time:546817ms step_avg:81.12ms +[2025-07-07 05:24:07] [Rank 0] step:6761/10000 train_time:548970ms step_avg:81.20ms +[2025-07-07 05:24:07] [Rank 0] step:6761/10000 train_time:548970ms step_avg:81.20ms +[2025-07-07 05:24:08] [Rank 0] step:6781/10000 train_time:550468ms step_avg:81.18ms +[2025-07-07 05:24:08] [Rank 0] step:6781/10000 train_time:550468ms step_avg:81.18ms +[2025-07-07 05:24:10] [Rank 0] step:6801/10000 train_time:551967ms step_avg:81.16ms +[2025-07-07 05:24:10] [Rank 0] step:6801/10000 train_time:551967ms step_avg:81.16ms +[2025-07-07 05:24:11] [Rank 0] step:6821/10000 train_time:553468ms step_avg:81.14ms +[2025-07-07 05:24:11] [Rank 0] step:6821/10000 train_time:553468ms step_avg:81.14ms +[2025-07-07 05:24:14] [Rank 0] step:6841/10000 train_time:555635ms step_avg:81.22ms +[2025-07-07 05:24:14] [Rank 0] step:6841/10000 train_time:555635ms step_avg:81.22ms +[2025-07-07 05:24:15] [Rank 0] step:6861/10000 train_time:557115ms step_avg:81.20ms +[2025-07-07 05:24:15] [Rank 0] step:6861/10000 train_time:557115ms step_avg:81.20ms +[2025-07-07 05:24:17] [Rank 0] step:6881/10000 train_time:558616ms step_avg:81.18ms +[2025-07-07 05:24:17] [Rank 0] step:6881/10000 train_time:558616ms step_avg:81.18ms +[2025-07-07 05:24:18] [Rank 0] step:6901/10000 train_time:560118ms step_avg:81.16ms +[2025-07-07 05:24:18] [Rank 0] step:6901/10000 train_time:560118ms step_avg:81.16ms +[2025-07-07 05:24:20] [Rank 0] step:6921/10000 train_time:561620ms step_avg:81.15ms +[2025-07-07 05:24:20] [Rank 0] step:6921/10000 train_time:561620ms step_avg:81.15ms +[2025-07-07 05:24:21] [Rank 0] step:6941/10000 train_time:563355ms step_avg:81.16ms +[2025-07-07 05:24:21] [Rank 0] step:6941/10000 train_time:563355ms step_avg:81.16ms +[2025-07-07 05:24:23] [Rank 0] step:6961/10000 train_time:564858ms step_avg:81.15ms +[2025-07-07 05:24:23] [Rank 0] step:6961/10000 train_time:564858ms step_avg:81.15ms +[2025-07-07 05:24:24] [Rank 0] step:6981/10000 train_time:566360ms step_avg:81.13ms +[2025-07-07 05:24:24] [Rank 0] step:6981/10000 train_time:566360ms step_avg:81.13ms +[2025-07-07 05:24:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:24:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:24:27] [Rank 0] PRINT: step:7000/10000 train_loss:1.8040 val_loss:1.7682 train_time:567863ms step_avg:81.12ms +[2025-07-07 05:24:27] [Rank 0] PRINT: step:7000/10000 train_loss:1.8040 val_loss:1.7682 train_time:567863ms step_avg:81.12ms +[2025-07-07 05:24:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:24:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:24:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:24:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:24:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:24:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:29:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:29:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:29:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:29:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:29:55] [Rank 0] Total Loss: 4.2453 +[2025-07-07 05:29:55] [Rank 0] Total Loss: 4.2453 +[2025-07-07 05:29:55] [Rank 0] Total FTA: 0.1752 +[2025-07-07 05:29:55] [Rank 0] Total FTA: 0.1752 +[2025-07-07 05:29:55] [Rank 0] Group 0 Loss: 4.6747 +[2025-07-07 05:29:55] [Rank 0] Group 0 Loss: 4.6747 +[2025-07-07 05:29:55] [Rank 0] Group 1 Loss: 4.4379 +[2025-07-07 05:29:55] [Rank 0] Group 1 Loss: 4.4379 +[2025-07-07 05:29:55] [Rank 0] Group 2 Loss: 4.0438 +[2025-07-07 05:29:55] [Rank 0] Group 2 Loss: 4.0438 +[2025-07-07 05:29:55] [Rank 0] Group 3 Loss: 4.1996 +[2025-07-07 05:29:55] [Rank 0] Group 3 Loss: 4.1996 +[2025-07-07 05:29:55] [Rank 0] Group 4 Loss: 4.1141 +[2025-07-07 05:29:55] [Rank 0] Group 4 Loss: 4.1141 +[2025-07-07 05:29:55] [Rank 0] Group 5 Loss: 4.0924 +[2025-07-07 05:29:55] [Rank 0] Group 5 Loss: 4.0924 +[2025-07-07 05:29:55] [Rank 0] Group 6 Loss: 4.1293 +[2025-07-07 05:29:55] [Rank 0] Group 6 Loss: 4.1293 +[2025-07-07 05:29:55] [Rank 0] Group 7 Loss: 4.1842 +[2025-07-07 05:29:55] [Rank 0] Group 7 Loss: 4.1842 +[2025-07-07 05:29:55] [Rank 0] Group 8 Loss: 4.1793 +[2025-07-07 05:29:55] [Rank 0] Group 8 Loss: 4.1793 +[2025-07-07 05:29:55] [Rank 0] Group 9 Loss: 4.1615 +[2025-07-07 05:29:55] [Rank 0] Group 9 Loss: 4.1615 +[2025-07-07 05:29:55] [Rank 0] Group 10 Loss: 4.2098 +[2025-07-07 05:29:55] [Rank 0] Group 10 Loss: 4.2098 +[2025-07-07 05:29:55] [Rank 0] Group 11 Loss: 4.1797 +[2025-07-07 05:29:55] [Rank 0] Group 11 Loss: 4.1797 +[2025-07-07 05:29:55] [Rank 0] Group 0 FTA: 0.1795 +[2025-07-07 05:29:55] [Rank 0] Group 0 FTA: 0.1795 +[2025-07-07 05:29:55] [Rank 0] Group 1 FTA: 0.1458 +[2025-07-07 05:29:55] [Rank 0] Group 1 FTA: 0.1458 +[2025-07-07 05:29:55] [Rank 0] Group 2 FTA: 0.1589 +[2025-07-07 05:29:55] [Rank 0] Group 2 FTA: 0.1589 +[2025-07-07 05:29:55] [Rank 0] Group 3 FTA: 0.1172 +[2025-07-07 05:29:55] [Rank 0] Group 3 FTA: 0.1172 +[2025-07-07 05:29:55] [Rank 0] Group 4 FTA: 0.0964 +[2025-07-07 05:29:55] [Rank 0] Group 4 FTA: 0.0964 +[2025-07-07 05:29:55] [Rank 0] Group 5 FTA: 0.1406 +[2025-07-07 05:29:55] [Rank 0] Group 5 FTA: 0.1406 +[2025-07-07 05:29:55] [Rank 0] Group 6 FTA: 0.2266 +[2025-07-07 05:29:55] [Rank 0] Group 6 FTA: 0.2266 +[2025-07-07 05:29:55] [Rank 0] Group 7 FTA: 0.1875 +[2025-07-07 05:29:55] [Rank 0] Group 7 FTA: 0.1875 +[2025-07-07 05:29:55] [Rank 0] Group 8 FTA: 0.2188 +[2025-07-07 05:29:55] [Rank 0] Group 8 FTA: 0.2188 +[2025-07-07 05:29:55] [Rank 0] Group 9 FTA: 0.2070 +[2025-07-07 05:29:55] [Rank 0] Group 9 FTA: 0.2070 +[2025-07-07 05:29:55] [Rank 0] Group 10 FTA: 0.1914 +[2025-07-07 05:29:55] [Rank 0] Group 10 FTA: 0.1914 +[2025-07-07 05:29:55] [Rank 0] Group 11 FTA: 0.1973 +[2025-07-07 05:29:55] [Rank 0] Group 11 FTA: 0.1973 +[2025-07-07 05:29:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 05:29:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 05:29:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 05:29:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 05:29:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 05:29:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 05:29:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 05:29:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 05:29:56] [Rank 0] step:7001/10000 train_time:567883ms step_avg:81.11ms +[2025-07-07 05:29:56] [Rank 0] step:7001/10000 train_time:567883ms step_avg:81.11ms +[2025-07-07 05:29:59] [Rank 0] step:7021/10000 train_time:569397ms step_avg:81.10ms +[2025-07-07 05:29:59] [Rank 0] step:7021/10000 train_time:569397ms step_avg:81.10ms +[2025-07-07 05:30:00] [Rank 0] step:7041/10000 train_time:571561ms step_avg:81.18ms +[2025-07-07 05:30:00] [Rank 0] step:7041/10000 train_time:571561ms step_avg:81.18ms +[2025-07-07 05:30:02] [Rank 0] step:7061/10000 train_time:573052ms step_avg:81.16ms +[2025-07-07 05:30:02] [Rank 0] step:7061/10000 train_time:573052ms step_avg:81.16ms +[2025-07-07 05:30:03] [Rank 0] step:7081/10000 train_time:574545ms step_avg:81.14ms +[2025-07-07 05:30:03] [Rank 0] step:7081/10000 train_time:574545ms step_avg:81.14ms +[2025-07-07 05:30:05] [Rank 0] step:7101/10000 train_time:576039ms step_avg:81.12ms +[2025-07-07 05:30:05] [Rank 0] step:7101/10000 train_time:576039ms step_avg:81.12ms +[2025-07-07 05:30:07] [Rank 0] step:7121/10000 train_time:578205ms step_avg:81.20ms +[2025-07-07 05:30:07] [Rank 0] step:7121/10000 train_time:578205ms step_avg:81.20ms +[2025-07-07 05:30:08] [Rank 0] step:7141/10000 train_time:579699ms step_avg:81.18ms +[2025-07-07 05:30:08] [Rank 0] step:7141/10000 train_time:579699ms step_avg:81.18ms +[2025-07-07 05:30:10] [Rank 0] step:7161/10000 train_time:581193ms step_avg:81.16ms +[2025-07-07 05:30:10] [Rank 0] step:7161/10000 train_time:581193ms step_avg:81.16ms +[2025-07-07 05:30:11] [Rank 0] step:7181/10000 train_time:582690ms step_avg:81.14ms +[2025-07-07 05:30:11] [Rank 0] step:7181/10000 train_time:582690ms step_avg:81.14ms +[2025-07-07 05:30:13] [Rank 0] step:7201/10000 train_time:584871ms step_avg:81.22ms +[2025-07-07 05:30:13] [Rank 0] step:7201/10000 train_time:584871ms step_avg:81.22ms +[2025-07-07 05:30:15] [Rank 0] step:7221/10000 train_time:586346ms step_avg:81.20ms +[2025-07-07 05:30:15] [Rank 0] step:7221/10000 train_time:586346ms step_avg:81.20ms +[2025-07-07 05:30:16] [Rank 0] step:7241/10000 train_time:587843ms step_avg:81.18ms +[2025-07-07 05:30:16] [Rank 0] step:7241/10000 train_time:587843ms step_avg:81.18ms +[2025-07-07 05:30:18] [Rank 0] step:7261/10000 train_time:589341ms step_avg:81.17ms +[2025-07-07 05:30:18] [Rank 0] step:7261/10000 train_time:589341ms step_avg:81.17ms +[2025-07-07 05:30:19] [Rank 0] step:7281/10000 train_time:590840ms step_avg:81.15ms +[2025-07-07 05:30:19] [Rank 0] step:7281/10000 train_time:590840ms step_avg:81.15ms +[2025-07-07 05:30:22] [Rank 0] step:7301/10000 train_time:592988ms step_avg:81.22ms +[2025-07-07 05:30:22] [Rank 0] step:7301/10000 train_time:592988ms step_avg:81.22ms +[2025-07-07 05:30:23] [Rank 0] step:7321/10000 train_time:594488ms step_avg:81.20ms +[2025-07-07 05:30:23] [Rank 0] step:7321/10000 train_time:594488ms step_avg:81.20ms +[2025-07-07 05:30:25] [Rank 0] step:7341/10000 train_time:595988ms step_avg:81.19ms +[2025-07-07 05:30:25] [Rank 0] step:7341/10000 train_time:595988ms step_avg:81.19ms +[2025-07-07 05:30:26] [Rank 0] step:7361/10000 train_time:597491ms step_avg:81.17ms +[2025-07-07 05:30:26] [Rank 0] step:7361/10000 train_time:597491ms step_avg:81.17ms +[2025-07-07 05:30:28] [Rank 0] step:7381/10000 train_time:599247ms step_avg:81.19ms +[2025-07-07 05:30:28] [Rank 0] step:7381/10000 train_time:599247ms step_avg:81.19ms +[2025-07-07 05:30:30] [Rank 0] step:7401/10000 train_time:601135ms step_avg:81.22ms +[2025-07-07 05:30:30] [Rank 0] step:7401/10000 train_time:601135ms step_avg:81.22ms +[2025-07-07 05:30:31] [Rank 0] step:7421/10000 train_time:602635ms step_avg:81.21ms +[2025-07-07 05:30:31] [Rank 0] step:7421/10000 train_time:602635ms step_avg:81.21ms +[2025-07-07 05:30:33] [Rank 0] step:7441/10000 train_time:604137ms step_avg:81.19ms +[2025-07-07 05:30:33] [Rank 0] step:7441/10000 train_time:604137ms step_avg:81.19ms +[2025-07-07 05:30:34] [Rank 0] step:7461/10000 train_time:605639ms step_avg:81.17ms +[2025-07-07 05:30:34] [Rank 0] step:7461/10000 train_time:605639ms step_avg:81.17ms +[2025-07-07 05:30:36] [Rank 0] step:7481/10000 train_time:607809ms step_avg:81.25ms +[2025-07-07 05:30:36] [Rank 0] step:7481/10000 train_time:607809ms step_avg:81.25ms +[2025-07-07 05:30:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:30:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:30:39] [Rank 0] PRINT: step:7500/10000 train_loss:1.7377 val_loss:1.7109 train_time:609310ms step_avg:81.24ms +[2025-07-07 05:30:39] [Rank 0] PRINT: step:7500/10000 train_loss:1.7377 val_loss:1.7109 train_time:609310ms step_avg:81.24ms +[2025-07-07 05:30:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:30:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:30:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:30:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:30:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:30:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:36:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:36:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:36:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:36:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:36:06] [Rank 0] Total Loss: 4.2370 +[2025-07-07 05:36:06] [Rank 0] Total Loss: 4.2370 +[2025-07-07 05:36:06] [Rank 0] Total FTA: 0.2027 +[2025-07-07 05:36:06] [Rank 0] Total FTA: 0.2027 +[2025-07-07 05:36:06] [Rank 0] Group 0 Loss: 4.4888 +[2025-07-07 05:36:06] [Rank 0] Group 0 Loss: 4.4888 +[2025-07-07 05:36:06] [Rank 0] Group 1 Loss: 4.5007 +[2025-07-07 05:36:06] [Rank 0] Group 1 Loss: 4.5007 +[2025-07-07 05:36:06] [Rank 0] Group 2 Loss: 4.1104 +[2025-07-07 05:36:06] [Rank 0] Group 2 Loss: 4.1104 +[2025-07-07 05:36:06] [Rank 0] Group 3 Loss: 4.2406 +[2025-07-07 05:36:06] [Rank 0] Group 3 Loss: 4.2406 +[2025-07-07 05:36:06] [Rank 0] Group 4 Loss: 4.1559 +[2025-07-07 05:36:06] [Rank 0] Group 4 Loss: 4.1559 +[2025-07-07 05:36:06] [Rank 0] Group 5 Loss: 4.0881 +[2025-07-07 05:36:06] [Rank 0] Group 5 Loss: 4.0881 +[2025-07-07 05:36:06] [Rank 0] Group 6 Loss: 4.1573 +[2025-07-07 05:36:06] [Rank 0] Group 6 Loss: 4.1573 +[2025-07-07 05:36:06] [Rank 0] Group 7 Loss: 4.1890 +[2025-07-07 05:36:06] [Rank 0] Group 7 Loss: 4.1890 +[2025-07-07 05:36:06] [Rank 0] Group 8 Loss: 4.1857 +[2025-07-07 05:36:06] [Rank 0] Group 8 Loss: 4.1857 +[2025-07-07 05:36:06] [Rank 0] Group 9 Loss: 4.1657 +[2025-07-07 05:36:06] [Rank 0] Group 9 Loss: 4.1657 +[2025-07-07 05:36:06] [Rank 0] Group 10 Loss: 4.1919 +[2025-07-07 05:36:06] [Rank 0] Group 10 Loss: 4.1919 +[2025-07-07 05:36:06] [Rank 0] Group 11 Loss: 4.1889 +[2025-07-07 05:36:06] [Rank 0] Group 11 Loss: 4.1889 +[2025-07-07 05:36:06] [Rank 0] Group 0 FTA: 0.1521 +[2025-07-07 05:36:06] [Rank 0] Group 0 FTA: 0.1521 +[2025-07-07 05:36:06] [Rank 0] Group 1 FTA: 0.1172 +[2025-07-07 05:36:06] [Rank 0] Group 1 FTA: 0.1172 +[2025-07-07 05:36:06] [Rank 0] Group 2 FTA: 0.3464 +[2025-07-07 05:36:06] [Rank 0] Group 2 FTA: 0.3464 +[2025-07-07 05:36:06] [Rank 0] Group 3 FTA: 0.1250 +[2025-07-07 05:36:06] [Rank 0] Group 3 FTA: 0.1250 +[2025-07-07 05:36:06] [Rank 0] Group 4 FTA: 0.1927 +[2025-07-07 05:36:06] [Rank 0] Group 4 FTA: 0.1927 +[2025-07-07 05:36:06] [Rank 0] Group 5 FTA: 0.1562 +[2025-07-07 05:36:06] [Rank 0] Group 5 FTA: 0.1562 +[2025-07-07 05:36:06] [Rank 0] Group 6 FTA: 0.2448 +[2025-07-07 05:36:06] [Rank 0] Group 6 FTA: 0.2448 +[2025-07-07 05:36:06] [Rank 0] Group 7 FTA: 0.2604 +[2025-07-07 05:36:06] [Rank 0] Group 7 FTA: 0.2604 +[2025-07-07 05:36:06] [Rank 0] Group 8 FTA: 0.2161 +[2025-07-07 05:36:06] [Rank 0] Group 8 FTA: 0.2161 +[2025-07-07 05:36:06] [Rank 0] Group 9 FTA: 0.2539 +[2025-07-07 05:36:06] [Rank 0] Group 9 FTA: 0.2539 +[2025-07-07 05:36:06] [Rank 0] Group 10 FTA: 0.2031 +[2025-07-07 05:36:06] [Rank 0] Group 10 FTA: 0.2031 +[2025-07-07 05:36:06] [Rank 0] Group 11 FTA: 0.2139 +[2025-07-07 05:36:06] [Rank 0] Group 11 FTA: 0.2139 +[2025-07-07 05:36:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 05:36:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 05:36:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 05:36:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 05:36:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 05:36:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 05:36:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 05:36:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 05:36:07] [Rank 0] step:7501/10000 train_time:609331ms step_avg:81.23ms +[2025-07-07 05:36:07] [Rank 0] step:7501/10000 train_time:609331ms step_avg:81.23ms +[2025-07-07 05:36:09] [Rank 0] step:7521/10000 train_time:610819ms step_avg:81.22ms +[2025-07-07 05:36:09] [Rank 0] step:7521/10000 train_time:610819ms step_avg:81.22ms +[2025-07-07 05:36:10] [Rank 0] step:7541/10000 train_time:612309ms step_avg:81.20ms +[2025-07-07 05:36:10] [Rank 0] step:7541/10000 train_time:612309ms step_avg:81.20ms +[2025-07-07 05:36:13] [Rank 0] step:7561/10000 train_time:614482ms step_avg:81.27ms +[2025-07-07 05:36:13] [Rank 0] step:7561/10000 train_time:614482ms step_avg:81.27ms +[2025-07-07 05:36:14] [Rank 0] step:7581/10000 train_time:615953ms step_avg:81.25ms +[2025-07-07 05:36:14] [Rank 0] step:7581/10000 train_time:615953ms step_avg:81.25ms +[2025-07-07 05:36:16] [Rank 0] step:7601/10000 train_time:617444ms step_avg:81.23ms +[2025-07-07 05:36:16] [Rank 0] step:7601/10000 train_time:617444ms step_avg:81.23ms +[2025-07-07 05:36:17] [Rank 0] step:7621/10000 train_time:618937ms step_avg:81.21ms +[2025-07-07 05:36:17] [Rank 0] step:7621/10000 train_time:618937ms step_avg:81.21ms +[2025-07-07 05:36:19] [Rank 0] step:7641/10000 train_time:620432ms step_avg:81.20ms +[2025-07-07 05:36:19] [Rank 0] step:7641/10000 train_time:620432ms step_avg:81.20ms +[2025-07-07 05:36:21] [Rank 0] step:7661/10000 train_time:622569ms step_avg:81.26ms +[2025-07-07 05:36:21] [Rank 0] step:7661/10000 train_time:622569ms step_avg:81.26ms +[2025-07-07 05:36:22] [Rank 0] step:7681/10000 train_time:624065ms step_avg:81.25ms +[2025-07-07 05:36:22] [Rank 0] step:7681/10000 train_time:624065ms step_avg:81.25ms +[2025-07-07 05:36:24] [Rank 0] step:7701/10000 train_time:625561ms step_avg:81.23ms +[2025-07-07 05:36:24] [Rank 0] step:7701/10000 train_time:625561ms step_avg:81.23ms +[2025-07-07 05:36:25] [Rank 0] step:7721/10000 train_time:627059ms step_avg:81.21ms +[2025-07-07 05:36:25] [Rank 0] step:7721/10000 train_time:627059ms step_avg:81.21ms +[2025-07-07 05:36:27] [Rank 0] step:7741/10000 train_time:628610ms step_avg:81.21ms +[2025-07-07 05:36:27] [Rank 0] step:7741/10000 train_time:628610ms step_avg:81.21ms +[2025-07-07 05:36:29] [Rank 0] step:7761/10000 train_time:630713ms step_avg:81.27ms +[2025-07-07 05:36:29] [Rank 0] step:7761/10000 train_time:630713ms step_avg:81.27ms +[2025-07-07 05:36:30] [Rank 0] step:7781/10000 train_time:632214ms step_avg:81.25ms +[2025-07-07 05:36:30] [Rank 0] step:7781/10000 train_time:632214ms step_avg:81.25ms +[2025-07-07 05:36:32] [Rank 0] step:7801/10000 train_time:633717ms step_avg:81.24ms +[2025-07-07 05:36:32] [Rank 0] step:7801/10000 train_time:633717ms step_avg:81.24ms +[2025-07-07 05:36:33] [Rank 0] step:7821/10000 train_time:635219ms step_avg:81.22ms +[2025-07-07 05:36:33] [Rank 0] step:7821/10000 train_time:635219ms step_avg:81.22ms +[2025-07-07 05:36:36] [Rank 0] step:7841/10000 train_time:637385ms step_avg:81.29ms +[2025-07-07 05:36:36] [Rank 0] step:7841/10000 train_time:637385ms step_avg:81.29ms +[2025-07-07 05:36:37] [Rank 0] step:7861/10000 train_time:638886ms step_avg:81.27ms +[2025-07-07 05:36:37] [Rank 0] step:7861/10000 train_time:638886ms step_avg:81.27ms +[2025-07-07 05:36:39] [Rank 0] step:7881/10000 train_time:640386ms step_avg:81.26ms +[2025-07-07 05:36:39] [Rank 0] step:7881/10000 train_time:640386ms step_avg:81.26ms +[2025-07-07 05:36:40] [Rank 0] step:7901/10000 train_time:641890ms step_avg:81.24ms +[2025-07-07 05:36:40] [Rank 0] step:7901/10000 train_time:641890ms step_avg:81.24ms +[2025-07-07 05:36:42] [Rank 0] step:7921/10000 train_time:643393ms step_avg:81.23ms +[2025-07-07 05:36:42] [Rank 0] step:7921/10000 train_time:643393ms step_avg:81.23ms +[2025-07-07 05:36:44] [Rank 0] step:7941/10000 train_time:645533ms step_avg:81.29ms +[2025-07-07 05:36:44] [Rank 0] step:7941/10000 train_time:645533ms step_avg:81.29ms +[2025-07-07 05:36:45] [Rank 0] step:7961/10000 train_time:647036ms step_avg:81.28ms +[2025-07-07 05:36:45] [Rank 0] step:7961/10000 train_time:647036ms step_avg:81.28ms +[2025-07-07 05:36:47] [Rank 0] step:7981/10000 train_time:648541ms step_avg:81.26ms +[2025-07-07 05:36:47] [Rank 0] step:7981/10000 train_time:648541ms step_avg:81.26ms +[2025-07-07 05:36:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:36:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:36:49] [Rank 0] PRINT: step:8000/10000 train_loss:1.6871 val_loss:1.6671 train_time:650045ms step_avg:81.26ms +[2025-07-07 05:36:49] [Rank 0] PRINT: step:8000/10000 train_loss:1.6871 val_loss:1.6671 train_time:650045ms step_avg:81.26ms +[2025-07-07 05:36:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:36:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:36:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:36:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:36:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:36:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:42:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:42:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:42:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:42:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:42:15] [Rank 0] Total Loss: 4.2230 +[2025-07-07 05:42:15] [Rank 0] Total Loss: 4.2230 +[2025-07-07 05:42:15] [Rank 0] Total FTA: 0.2075 +[2025-07-07 05:42:15] [Rank 0] Total FTA: 0.2075 +[2025-07-07 05:42:15] [Rank 0] Group 0 Loss: 4.4635 +[2025-07-07 05:42:15] [Rank 0] Group 0 Loss: 4.4635 +[2025-07-07 05:42:15] [Rank 0] Group 1 Loss: 4.3618 +[2025-07-07 05:42:15] [Rank 0] Group 1 Loss: 4.3618 +[2025-07-07 05:42:15] [Rank 0] Group 2 Loss: 4.0644 +[2025-07-07 05:42:15] [Rank 0] Group 2 Loss: 4.0644 +[2025-07-07 05:42:15] [Rank 0] Group 3 Loss: 4.2174 +[2025-07-07 05:42:15] [Rank 0] Group 3 Loss: 4.2174 +[2025-07-07 05:42:15] [Rank 0] Group 4 Loss: 4.1457 +[2025-07-07 05:42:15] [Rank 0] Group 4 Loss: 4.1457 +[2025-07-07 05:42:15] [Rank 0] Group 5 Loss: 4.1484 +[2025-07-07 05:42:15] [Rank 0] Group 5 Loss: 4.1484 +[2025-07-07 05:42:15] [Rank 0] Group 6 Loss: 4.1469 +[2025-07-07 05:42:15] [Rank 0] Group 6 Loss: 4.1469 +[2025-07-07 05:42:15] [Rank 0] Group 7 Loss: 4.2190 +[2025-07-07 05:42:15] [Rank 0] Group 7 Loss: 4.2190 +[2025-07-07 05:42:15] [Rank 0] Group 8 Loss: 4.1859 +[2025-07-07 05:42:15] [Rank 0] Group 8 Loss: 4.1859 +[2025-07-07 05:42:15] [Rank 0] Group 9 Loss: 4.1860 +[2025-07-07 05:42:15] [Rank 0] Group 9 Loss: 4.1860 +[2025-07-07 05:42:15] [Rank 0] Group 10 Loss: 4.1765 +[2025-07-07 05:42:15] [Rank 0] Group 10 Loss: 4.1765 +[2025-07-07 05:42:15] [Rank 0] Group 11 Loss: 4.1853 +[2025-07-07 05:42:15] [Rank 0] Group 11 Loss: 4.1853 +[2025-07-07 05:42:15] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-07 05:42:15] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-07 05:42:15] [Rank 0] Group 1 FTA: 0.1797 +[2025-07-07 05:42:15] [Rank 0] Group 1 FTA: 0.1797 +[2025-07-07 05:42:15] [Rank 0] Group 2 FTA: 0.1589 +[2025-07-07 05:42:15] [Rank 0] Group 2 FTA: 0.1589 +[2025-07-07 05:42:15] [Rank 0] Group 3 FTA: 0.1094 +[2025-07-07 05:42:15] [Rank 0] Group 3 FTA: 0.1094 +[2025-07-07 05:42:15] [Rank 0] Group 4 FTA: 0.1693 +[2025-07-07 05:42:15] [Rank 0] Group 4 FTA: 0.1693 +[2025-07-07 05:42:16] [Rank 0] Group 5 FTA: 0.2031 +[2025-07-07 05:42:16] [Rank 0] Group 5 FTA: 0.2031 +[2025-07-07 05:42:16] [Rank 0] Group 6 FTA: 0.2396 +[2025-07-07 05:42:16] [Rank 0] Group 6 FTA: 0.2396 +[2025-07-07 05:42:16] [Rank 0] Group 7 FTA: 0.2839 +[2025-07-07 05:42:16] [Rank 0] Group 7 FTA: 0.2839 +[2025-07-07 05:42:16] [Rank 0] Group 8 FTA: 0.2839 +[2025-07-07 05:42:16] [Rank 0] Group 8 FTA: 0.2839 +[2025-07-07 05:42:16] [Rank 0] Group 9 FTA: 0.2695 +[2025-07-07 05:42:16] [Rank 0] Group 9 FTA: 0.2695 +[2025-07-07 05:42:16] [Rank 0] Group 10 FTA: 0.2363 +[2025-07-07 05:42:16] [Rank 0] Group 10 FTA: 0.2363 +[2025-07-07 05:42:16] [Rank 0] Group 11 FTA: 0.2236 +[2025-07-07 05:42:16] [Rank 0] Group 11 FTA: 0.2236 +[2025-07-07 05:42:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 05:42:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 05:42:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 05:42:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 05:42:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 05:42:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 05:42:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 05:42:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 05:42:17] [Rank 0] step:8001/10000 train_time:650067ms step_avg:81.25ms +[2025-07-07 05:42:17] [Rank 0] step:8001/10000 train_time:650067ms step_avg:81.25ms +[2025-07-07 05:42:19] [Rank 0] step:8021/10000 train_time:652199ms step_avg:81.31ms +[2025-07-07 05:42:19] [Rank 0] step:8021/10000 train_time:652199ms step_avg:81.31ms +[2025-07-07 05:42:21] [Rank 0] step:8041/10000 train_time:653689ms step_avg:81.29ms +[2025-07-07 05:42:21] [Rank 0] step:8041/10000 train_time:653689ms step_avg:81.29ms +[2025-07-07 05:42:22] [Rank 0] step:8061/10000 train_time:655179ms step_avg:81.28ms +[2025-07-07 05:42:22] [Rank 0] step:8061/10000 train_time:655179ms step_avg:81.28ms +[2025-07-07 05:42:24] [Rank 0] step:8081/10000 train_time:656671ms step_avg:81.26ms +[2025-07-07 05:42:24] [Rank 0] step:8081/10000 train_time:656671ms step_avg:81.26ms +[2025-07-07 05:42:26] [Rank 0] step:8101/10000 train_time:658164ms step_avg:81.24ms +[2025-07-07 05:42:26] [Rank 0] step:8101/10000 train_time:658164ms step_avg:81.24ms +[2025-07-07 05:42:27] [Rank 0] step:8121/10000 train_time:660312ms step_avg:81.31ms +[2025-07-07 05:42:27] [Rank 0] step:8121/10000 train_time:660312ms step_avg:81.31ms +[2025-07-07 05:42:29] [Rank 0] step:8141/10000 train_time:661805ms step_avg:81.29ms +[2025-07-07 05:42:29] [Rank 0] step:8141/10000 train_time:661805ms step_avg:81.29ms +[2025-07-07 05:42:30] [Rank 0] step:8161/10000 train_time:663299ms step_avg:81.28ms +[2025-07-07 05:42:30] [Rank 0] step:8161/10000 train_time:663299ms step_avg:81.28ms +[2025-07-07 05:42:32] [Rank 0] step:8181/10000 train_time:664797ms step_avg:81.26ms +[2025-07-07 05:42:32] [Rank 0] step:8181/10000 train_time:664797ms step_avg:81.26ms +[2025-07-07 05:42:34] [Rank 0] step:8201/10000 train_time:666953ms step_avg:81.33ms +[2025-07-07 05:42:34] [Rank 0] step:8201/10000 train_time:666953ms step_avg:81.33ms +[2025-07-07 05:42:35] [Rank 0] step:8221/10000 train_time:668450ms step_avg:81.31ms +[2025-07-07 05:42:35] [Rank 0] step:8221/10000 train_time:668450ms step_avg:81.31ms +[2025-07-07 05:42:37] [Rank 0] step:8241/10000 train_time:669948ms step_avg:81.29ms +[2025-07-07 05:42:37] [Rank 0] step:8241/10000 train_time:669948ms step_avg:81.29ms +[2025-07-07 05:42:38] [Rank 0] step:8261/10000 train_time:671447ms step_avg:81.28ms +[2025-07-07 05:42:38] [Rank 0] step:8261/10000 train_time:671447ms step_avg:81.28ms +[2025-07-07 05:42:41] [Rank 0] step:8281/10000 train_time:672996ms step_avg:81.27ms +[2025-07-07 05:42:41] [Rank 0] step:8281/10000 train_time:672996ms step_avg:81.27ms +[2025-07-07 05:42:42] [Rank 0] step:8301/10000 train_time:675101ms step_avg:81.33ms +[2025-07-07 05:42:42] [Rank 0] step:8301/10000 train_time:675101ms step_avg:81.33ms +[2025-07-07 05:42:44] [Rank 0] step:8321/10000 train_time:676600ms step_avg:81.31ms +[2025-07-07 05:42:44] [Rank 0] step:8321/10000 train_time:676600ms step_avg:81.31ms +[2025-07-07 05:42:45] [Rank 0] step:8341/10000 train_time:678100ms step_avg:81.30ms +[2025-07-07 05:42:45] [Rank 0] step:8341/10000 train_time:678100ms step_avg:81.30ms +[2025-07-07 05:42:47] [Rank 0] step:8361/10000 train_time:679601ms step_avg:81.28ms +[2025-07-07 05:42:47] [Rank 0] step:8361/10000 train_time:679601ms step_avg:81.28ms +[2025-07-07 05:42:49] [Rank 0] step:8381/10000 train_time:681739ms step_avg:81.34ms +[2025-07-07 05:42:49] [Rank 0] step:8381/10000 train_time:681739ms step_avg:81.34ms +[2025-07-07 05:42:50] [Rank 0] step:8401/10000 train_time:683237ms step_avg:81.33ms +[2025-07-07 05:42:50] [Rank 0] step:8401/10000 train_time:683237ms step_avg:81.33ms +[2025-07-07 05:42:52] [Rank 0] step:8421/10000 train_time:684738ms step_avg:81.31ms +[2025-07-07 05:42:52] [Rank 0] step:8421/10000 train_time:684738ms step_avg:81.31ms +[2025-07-07 05:42:53] [Rank 0] step:8441/10000 train_time:686238ms step_avg:81.30ms +[2025-07-07 05:42:53] [Rank 0] step:8441/10000 train_time:686238ms step_avg:81.30ms +[2025-07-07 05:42:55] [Rank 0] step:8461/10000 train_time:688404ms step_avg:81.36ms +[2025-07-07 05:42:55] [Rank 0] step:8461/10000 train_time:688404ms step_avg:81.36ms +[2025-07-07 05:42:57] [Rank 0] step:8481/10000 train_time:689885ms step_avg:81.34ms +[2025-07-07 05:42:57] [Rank 0] step:8481/10000 train_time:689885ms step_avg:81.34ms +[2025-07-07 05:42:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:42:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:42:59] [Rank 0] PRINT: step:8500/10000 train_loss:1.6480 val_loss:1.6329 train_time:691388ms step_avg:81.34ms +[2025-07-07 05:42:59] [Rank 0] PRINT: step:8500/10000 train_loss:1.6480 val_loss:1.6329 train_time:691388ms step_avg:81.34ms +[2025-07-07 05:42:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:42:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:42:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:42:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:42:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:42:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:48:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:48:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:48:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:48:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:48:26] [Rank 0] Total Loss: 4.2599 +[2025-07-07 05:48:26] [Rank 0] Total Loss: 4.2599 +[2025-07-07 05:48:26] [Rank 0] Total FTA: 0.2008 +[2025-07-07 05:48:26] [Rank 0] Total FTA: 0.2008 +[2025-07-07 05:48:26] [Rank 0] Group 0 Loss: 4.6107 +[2025-07-07 05:48:26] [Rank 0] Group 0 Loss: 4.6107 +[2025-07-07 05:48:26] [Rank 0] Group 1 Loss: 4.5012 +[2025-07-07 05:48:26] [Rank 0] Group 1 Loss: 4.5012 +[2025-07-07 05:48:26] [Rank 0] Group 2 Loss: 3.9978 +[2025-07-07 05:48:26] [Rank 0] Group 2 Loss: 3.9978 +[2025-07-07 05:48:26] [Rank 0] Group 3 Loss: 4.2122 +[2025-07-07 05:48:26] [Rank 0] Group 3 Loss: 4.2122 +[2025-07-07 05:48:26] [Rank 0] Group 4 Loss: 4.1339 +[2025-07-07 05:48:26] [Rank 0] Group 4 Loss: 4.1339 +[2025-07-07 05:48:26] [Rank 0] Group 5 Loss: 4.1175 +[2025-07-07 05:48:26] [Rank 0] Group 5 Loss: 4.1175 +[2025-07-07 05:48:26] [Rank 0] Group 6 Loss: 4.1969 +[2025-07-07 05:48:26] [Rank 0] Group 6 Loss: 4.1969 +[2025-07-07 05:48:26] [Rank 0] Group 7 Loss: 4.2192 +[2025-07-07 05:48:26] [Rank 0] Group 7 Loss: 4.2192 +[2025-07-07 05:48:26] [Rank 0] Group 8 Loss: 4.1902 +[2025-07-07 05:48:26] [Rank 0] Group 8 Loss: 4.1902 +[2025-07-07 05:48:26] [Rank 0] Group 9 Loss: 4.1615 +[2025-07-07 05:48:26] [Rank 0] Group 9 Loss: 4.1615 +[2025-07-07 05:48:26] [Rank 0] Group 10 Loss: 4.2008 +[2025-07-07 05:48:26] [Rank 0] Group 10 Loss: 4.2008 +[2025-07-07 05:48:26] [Rank 0] Group 11 Loss: 4.2421 +[2025-07-07 05:48:26] [Rank 0] Group 11 Loss: 4.2421 +[2025-07-07 05:48:26] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 05:48:26] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 05:48:27] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 05:48:27] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 05:48:27] [Rank 0] Group 2 FTA: 0.3203 +[2025-07-07 05:48:27] [Rank 0] Group 2 FTA: 0.3203 +[2025-07-07 05:48:27] [Rank 0] Group 3 FTA: 0.1719 +[2025-07-07 05:48:27] [Rank 0] Group 3 FTA: 0.1719 +[2025-07-07 05:48:27] [Rank 0] Group 4 FTA: 0.2604 +[2025-07-07 05:48:27] [Rank 0] Group 4 FTA: 0.2604 +[2025-07-07 05:48:27] [Rank 0] Group 5 FTA: 0.2708 +[2025-07-07 05:48:27] [Rank 0] Group 5 FTA: 0.2708 +[2025-07-07 05:48:27] [Rank 0] Group 6 FTA: 0.2526 +[2025-07-07 05:48:27] [Rank 0] Group 6 FTA: 0.2526 +[2025-07-07 05:48:27] [Rank 0] Group 7 FTA: 0.2708 +[2025-07-07 05:48:27] [Rank 0] Group 7 FTA: 0.2708 +[2025-07-07 05:48:27] [Rank 0] Group 8 FTA: 0.2526 +[2025-07-07 05:48:27] [Rank 0] Group 8 FTA: 0.2526 +[2025-07-07 05:48:27] [Rank 0] Group 9 FTA: 0.2344 +[2025-07-07 05:48:27] [Rank 0] Group 9 FTA: 0.2344 +[2025-07-07 05:48:27] [Rank 0] Group 10 FTA: 0.2305 +[2025-07-07 05:48:27] [Rank 0] Group 10 FTA: 0.2305 +[2025-07-07 05:48:27] [Rank 0] Group 11 FTA: 0.2559 +[2025-07-07 05:48:27] [Rank 0] Group 11 FTA: 0.2559 +[2025-07-07 05:48:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 05:48:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 05:48:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 05:48:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 05:48:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 05:48:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 05:48:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 05:48:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 05:48:28] [Rank 0] step:8501/10000 train_time:691411ms step_avg:81.33ms +[2025-07-07 05:48:28] [Rank 0] step:8501/10000 train_time:691411ms step_avg:81.33ms +[2025-07-07 05:48:30] [Rank 0] step:8521/10000 train_time:692907ms step_avg:81.32ms +[2025-07-07 05:48:30] [Rank 0] step:8521/10000 train_time:692907ms step_avg:81.32ms +[2025-07-07 05:48:31] [Rank 0] step:8541/10000 train_time:694401ms step_avg:81.30ms +[2025-07-07 05:48:31] [Rank 0] step:8541/10000 train_time:694401ms step_avg:81.30ms +[2025-07-07 05:48:33] [Rank 0] step:8561/10000 train_time:696560ms step_avg:81.36ms +[2025-07-07 05:48:33] [Rank 0] step:8561/10000 train_time:696560ms step_avg:81.36ms +[2025-07-07 05:48:35] [Rank 0] step:8581/10000 train_time:698052ms step_avg:81.35ms +[2025-07-07 05:48:35] [Rank 0] step:8581/10000 train_time:698052ms step_avg:81.35ms +[2025-07-07 05:48:36] [Rank 0] step:8601/10000 train_time:699610ms step_avg:81.34ms +[2025-07-07 05:48:36] [Rank 0] step:8601/10000 train_time:699610ms step_avg:81.34ms +[2025-07-07 05:48:38] [Rank 0] step:8621/10000 train_time:701103ms step_avg:81.33ms +[2025-07-07 05:48:38] [Rank 0] step:8621/10000 train_time:701103ms step_avg:81.33ms +[2025-07-07 05:48:39] [Rank 0] step:8641/10000 train_time:702850ms step_avg:81.34ms +[2025-07-07 05:48:39] [Rank 0] step:8641/10000 train_time:702850ms step_avg:81.34ms +[2025-07-07 05:48:41] [Rank 0] step:8661/10000 train_time:704326ms step_avg:81.32ms +[2025-07-07 05:48:41] [Rank 0] step:8661/10000 train_time:704326ms step_avg:81.32ms +[2025-07-07 05:48:42] [Rank 0] step:8681/10000 train_time:705821ms step_avg:81.31ms +[2025-07-07 05:48:42] [Rank 0] step:8681/10000 train_time:705821ms step_avg:81.31ms +[2025-07-07 05:48:44] [Rank 0] step:8701/10000 train_time:707320ms step_avg:81.29ms +[2025-07-07 05:48:44] [Rank 0] step:8701/10000 train_time:707320ms step_avg:81.29ms +[2025-07-07 05:48:45] [Rank 0] step:8721/10000 train_time:708818ms step_avg:81.28ms +[2025-07-07 05:48:45] [Rank 0] step:8721/10000 train_time:708818ms step_avg:81.28ms +[2025-07-07 05:48:47] [Rank 0] step:8741/10000 train_time:710549ms step_avg:81.29ms +[2025-07-07 05:48:47] [Rank 0] step:8741/10000 train_time:710549ms step_avg:81.29ms +[2025-07-07 05:48:49] [Rank 0] step:8761/10000 train_time:712048ms step_avg:81.27ms +[2025-07-07 05:48:49] [Rank 0] step:8761/10000 train_time:712048ms step_avg:81.27ms +[2025-07-07 05:48:50] [Rank 0] step:8781/10000 train_time:713547ms step_avg:81.26ms +[2025-07-07 05:48:50] [Rank 0] step:8781/10000 train_time:713547ms step_avg:81.26ms +[2025-07-07 05:48:52] [Rank 0] step:8801/10000 train_time:715047ms step_avg:81.25ms +[2025-07-07 05:48:52] [Rank 0] step:8801/10000 train_time:715047ms step_avg:81.25ms +[2025-07-07 05:48:54] [Rank 0] step:8821/10000 train_time:716598ms step_avg:81.24ms +[2025-07-07 05:48:54] [Rank 0] step:8821/10000 train_time:716598ms step_avg:81.24ms +[2025-07-07 05:48:55] [Rank 0] step:8841/10000 train_time:718690ms step_avg:81.29ms +[2025-07-07 05:48:55] [Rank 0] step:8841/10000 train_time:718690ms step_avg:81.29ms +[2025-07-07 05:48:57] [Rank 0] step:8861/10000 train_time:720190ms step_avg:81.28ms +[2025-07-07 05:48:57] [Rank 0] step:8861/10000 train_time:720190ms step_avg:81.28ms +[2025-07-07 05:48:58] [Rank 0] step:8881/10000 train_time:721691ms step_avg:81.26ms +[2025-07-07 05:48:58] [Rank 0] step:8881/10000 train_time:721691ms step_avg:81.26ms +[2025-07-07 05:49:00] [Rank 0] step:8901/10000 train_time:723190ms step_avg:81.25ms +[2025-07-07 05:49:00] [Rank 0] step:8901/10000 train_time:723190ms step_avg:81.25ms +[2025-07-07 05:49:02] [Rank 0] step:8921/10000 train_time:725331ms step_avg:81.31ms +[2025-07-07 05:49:02] [Rank 0] step:8921/10000 train_time:725331ms step_avg:81.31ms +[2025-07-07 05:49:03] [Rank 0] step:8941/10000 train_time:726830ms step_avg:81.29ms +[2025-07-07 05:49:03] [Rank 0] step:8941/10000 train_time:726830ms step_avg:81.29ms +[2025-07-07 05:49:05] [Rank 0] step:8961/10000 train_time:728338ms step_avg:81.28ms +[2025-07-07 05:49:05] [Rank 0] step:8961/10000 train_time:728338ms step_avg:81.28ms +[2025-07-07 05:49:06] [Rank 0] step:8981/10000 train_time:729839ms step_avg:81.26ms +[2025-07-07 05:49:06] [Rank 0] step:8981/10000 train_time:729839ms step_avg:81.26ms +[2025-07-07 05:49:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:49:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:49:09] [Rank 0] PRINT: step:9000/10000 train_loss:1.6183 val_loss:1.6074 train_time:731339ms step_avg:81.26ms +[2025-07-07 05:49:09] [Rank 0] PRINT: step:9000/10000 train_loss:1.6183 val_loss:1.6074 train_time:731339ms step_avg:81.26ms +[2025-07-07 05:49:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:49:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:49:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:49:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:49:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:49:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:54:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:54:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:54:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:54:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:54:34] [Rank 0] Total Loss: 4.3099 +[2025-07-07 05:54:34] [Rank 0] Total Loss: 4.3099 +[2025-07-07 05:54:34] [Rank 0] Total FTA: 0.2592 +[2025-07-07 05:54:34] [Rank 0] Total FTA: 0.2592 +[2025-07-07 05:54:34] [Rank 0] Group 0 Loss: 4.7039 +[2025-07-07 05:54:34] [Rank 0] Group 0 Loss: 4.7039 +[2025-07-07 05:54:34] [Rank 0] Group 1 Loss: 4.7806 +[2025-07-07 05:54:34] [Rank 0] Group 1 Loss: 4.7806 +[2025-07-07 05:54:34] [Rank 0] Group 2 Loss: 4.0902 +[2025-07-07 05:54:34] [Rank 0] Group 2 Loss: 4.0902 +[2025-07-07 05:54:34] [Rank 0] Group 3 Loss: 4.2597 +[2025-07-07 05:54:34] [Rank 0] Group 3 Loss: 4.2597 +[2025-07-07 05:54:34] [Rank 0] Group 4 Loss: 4.1354 +[2025-07-07 05:54:34] [Rank 0] Group 4 Loss: 4.1354 +[2025-07-07 05:54:34] [Rank 0] Group 5 Loss: 4.1656 +[2025-07-07 05:54:34] [Rank 0] Group 5 Loss: 4.1656 +[2025-07-07 05:54:34] [Rank 0] Group 6 Loss: 4.1690 +[2025-07-07 05:54:34] [Rank 0] Group 6 Loss: 4.1690 +[2025-07-07 05:54:34] [Rank 0] Group 7 Loss: 4.2408 +[2025-07-07 05:54:34] [Rank 0] Group 7 Loss: 4.2408 +[2025-07-07 05:54:34] [Rank 0] Group 8 Loss: 4.2459 +[2025-07-07 05:54:34] [Rank 0] Group 8 Loss: 4.2459 +[2025-07-07 05:54:34] [Rank 0] Group 9 Loss: 4.1517 +[2025-07-07 05:54:34] [Rank 0] Group 9 Loss: 4.1517 +[2025-07-07 05:54:34] [Rank 0] Group 10 Loss: 4.2533 +[2025-07-07 05:54:34] [Rank 0] Group 10 Loss: 4.2533 +[2025-07-07 05:54:34] [Rank 0] Group 11 Loss: 4.2287 +[2025-07-07 05:54:34] [Rank 0] Group 11 Loss: 4.2287 +[2025-07-07 05:54:34] [Rank 0] Group 0 FTA: 0.3667 +[2025-07-07 05:54:34] [Rank 0] Group 0 FTA: 0.3667 +[2025-07-07 05:54:34] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-07 05:54:34] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-07 05:54:34] [Rank 0] Group 2 FTA: 0.1589 +[2025-07-07 05:54:34] [Rank 0] Group 2 FTA: 0.1589 +[2025-07-07 05:54:34] [Rank 0] Group 3 FTA: 0.2552 +[2025-07-07 05:54:34] [Rank 0] Group 3 FTA: 0.2552 +[2025-07-07 05:54:34] [Rank 0] Group 4 FTA: 0.2786 +[2025-07-07 05:54:34] [Rank 0] Group 4 FTA: 0.2786 +[2025-07-07 05:54:34] [Rank 0] Group 5 FTA: 0.2370 +[2025-07-07 05:54:34] [Rank 0] Group 5 FTA: 0.2370 +[2025-07-07 05:54:34] [Rank 0] Group 6 FTA: 0.2578 +[2025-07-07 05:54:34] [Rank 0] Group 6 FTA: 0.2578 +[2025-07-07 05:54:34] [Rank 0] Group 7 FTA: 0.2760 +[2025-07-07 05:54:34] [Rank 0] Group 7 FTA: 0.2760 +[2025-07-07 05:54:34] [Rank 0] Group 8 FTA: 0.2891 +[2025-07-07 05:54:34] [Rank 0] Group 8 FTA: 0.2891 +[2025-07-07 05:54:34] [Rank 0] Group 9 FTA: 0.2656 +[2025-07-07 05:54:34] [Rank 0] Group 9 FTA: 0.2656 +[2025-07-07 05:54:34] [Rank 0] Group 10 FTA: 0.2539 +[2025-07-07 05:54:34] [Rank 0] Group 10 FTA: 0.2539 +[2025-07-07 05:54:34] [Rank 0] Group 11 FTA: 0.2412 +[2025-07-07 05:54:34] [Rank 0] Group 11 FTA: 0.2412 +[2025-07-07 05:54:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 05:54:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 05:54:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 05:54:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 05:54:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 05:54:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 05:54:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 05:54:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 05:54:37] [Rank 0] step:9001/10000 train_time:732103ms step_avg:81.34ms +[2025-07-07 05:54:37] [Rank 0] step:9001/10000 train_time:732103ms step_avg:81.34ms +[2025-07-07 05:54:38] [Rank 0] step:9021/10000 train_time:733610ms step_avg:81.32ms +[2025-07-07 05:54:38] [Rank 0] step:9021/10000 train_time:733610ms step_avg:81.32ms +[2025-07-07 05:54:40] [Rank 0] step:9041/10000 train_time:735101ms step_avg:81.31ms +[2025-07-07 05:54:40] [Rank 0] step:9041/10000 train_time:735101ms step_avg:81.31ms +[2025-07-07 05:54:41] [Rank 0] step:9061/10000 train_time:736593ms step_avg:81.29ms +[2025-07-07 05:54:41] [Rank 0] step:9061/10000 train_time:736593ms step_avg:81.29ms +[2025-07-07 05:54:43] [Rank 0] step:9081/10000 train_time:738089ms step_avg:81.28ms +[2025-07-07 05:54:43] [Rank 0] step:9081/10000 train_time:738089ms step_avg:81.28ms +[2025-07-07 05:54:45] [Rank 0] step:9101/10000 train_time:740248ms step_avg:81.34ms +[2025-07-07 05:54:45] [Rank 0] step:9101/10000 train_time:740248ms step_avg:81.34ms +[2025-07-07 05:54:46] [Rank 0] step:9121/10000 train_time:741745ms step_avg:81.32ms +[2025-07-07 05:54:46] [Rank 0] step:9121/10000 train_time:741745ms step_avg:81.32ms +[2025-07-07 05:54:48] [Rank 0] step:9141/10000 train_time:743239ms step_avg:81.31ms +[2025-07-07 05:54:48] [Rank 0] step:9141/10000 train_time:743239ms step_avg:81.31ms +[2025-07-07 05:54:49] [Rank 0] step:9161/10000 train_time:744737ms step_avg:81.29ms +[2025-07-07 05:54:49] [Rank 0] step:9161/10000 train_time:744737ms step_avg:81.29ms +[2025-07-07 05:54:51] [Rank 0] step:9181/10000 train_time:746238ms step_avg:81.28ms +[2025-07-07 05:54:51] [Rank 0] step:9181/10000 train_time:746238ms step_avg:81.28ms +[2025-07-07 05:54:53] [Rank 0] step:9201/10000 train_time:748394ms step_avg:81.34ms +[2025-07-07 05:54:53] [Rank 0] step:9201/10000 train_time:748394ms step_avg:81.34ms +[2025-07-07 05:54:55] [Rank 0] step:9221/10000 train_time:750028ms step_avg:81.34ms +[2025-07-07 05:54:55] [Rank 0] step:9221/10000 train_time:750028ms step_avg:81.34ms +[2025-07-07 05:54:56] [Rank 0] step:9241/10000 train_time:751530ms step_avg:81.33ms +[2025-07-07 05:54:56] [Rank 0] step:9241/10000 train_time:751530ms step_avg:81.33ms +[2025-07-07 05:54:58] [Rank 0] step:9261/10000 train_time:753027ms step_avg:81.31ms +[2025-07-07 05:54:58] [Rank 0] step:9261/10000 train_time:753027ms step_avg:81.31ms +[2025-07-07 05:54:59] [Rank 0] step:9281/10000 train_time:754758ms step_avg:81.32ms +[2025-07-07 05:54:59] [Rank 0] step:9281/10000 train_time:754758ms step_avg:81.32ms +[2025-07-07 05:55:01] [Rank 0] step:9301/10000 train_time:756255ms step_avg:81.31ms +[2025-07-07 05:55:01] [Rank 0] step:9301/10000 train_time:756255ms step_avg:81.31ms +[2025-07-07 05:55:02] [Rank 0] step:9321/10000 train_time:757752ms step_avg:81.30ms +[2025-07-07 05:55:02] [Rank 0] step:9321/10000 train_time:757752ms step_avg:81.30ms +[2025-07-07 05:55:04] [Rank 0] step:9341/10000 train_time:759251ms step_avg:81.28ms +[2025-07-07 05:55:04] [Rank 0] step:9341/10000 train_time:759251ms step_avg:81.28ms +[2025-07-07 05:55:06] [Rank 0] step:9361/10000 train_time:761003ms step_avg:81.30ms +[2025-07-07 05:55:06] [Rank 0] step:9361/10000 train_time:761003ms step_avg:81.30ms +[2025-07-07 05:55:07] [Rank 0] step:9381/10000 train_time:762485ms step_avg:81.28ms +[2025-07-07 05:55:07] [Rank 0] step:9381/10000 train_time:762485ms step_avg:81.28ms +[2025-07-07 05:55:09] [Rank 0] step:9401/10000 train_time:763985ms step_avg:81.27ms +[2025-07-07 05:55:09] [Rank 0] step:9401/10000 train_time:763985ms step_avg:81.27ms +[2025-07-07 05:55:10] [Rank 0] step:9421/10000 train_time:765485ms step_avg:81.25ms +[2025-07-07 05:55:10] [Rank 0] step:9421/10000 train_time:765485ms step_avg:81.25ms +[2025-07-07 05:55:12] [Rank 0] step:9441/10000 train_time:766986ms step_avg:81.24ms +[2025-07-07 05:55:12] [Rank 0] step:9441/10000 train_time:766986ms step_avg:81.24ms +[2025-07-07 05:55:14] [Rank 0] step:9461/10000 train_time:769152ms step_avg:81.30ms +[2025-07-07 05:55:14] [Rank 0] step:9461/10000 train_time:769152ms step_avg:81.30ms +[2025-07-07 05:55:15] [Rank 0] step:9481/10000 train_time:770655ms step_avg:81.28ms +[2025-07-07 05:55:15] [Rank 0] step:9481/10000 train_time:770655ms step_avg:81.28ms +[2025-07-07 05:55:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:55:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:55:18] [Rank 0] PRINT: step:9500/10000 train_loss:1.5960 val_loss:1.5884 train_time:772156ms step_avg:81.28ms +[2025-07-07 05:55:18] [Rank 0] PRINT: step:9500/10000 train_loss:1.5960 val_loss:1.5884 train_time:772156ms step_avg:81.28ms +[2025-07-07 05:55:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:55:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:55:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:55:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:55:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:55:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:00:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:00:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:00:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:00:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:00:43] [Rank 0] Total Loss: 4.2568 +[2025-07-07 06:00:43] [Rank 0] Total Loss: 4.2568 +[2025-07-07 06:00:43] [Rank 0] Total FTA: 0.2792 +[2025-07-07 06:00:43] [Rank 0] Total FTA: 0.2792 +[2025-07-07 06:00:43] [Rank 0] Group 0 Loss: 4.5199 +[2025-07-07 06:00:43] [Rank 0] Group 0 Loss: 4.5199 +[2025-07-07 06:00:43] [Rank 0] Group 1 Loss: 4.5641 +[2025-07-07 06:00:43] [Rank 0] Group 1 Loss: 4.5641 +[2025-07-07 06:00:43] [Rank 0] Group 2 Loss: 4.1134 +[2025-07-07 06:00:43] [Rank 0] Group 2 Loss: 4.1134 +[2025-07-07 06:00:43] [Rank 0] Group 3 Loss: 4.2103 +[2025-07-07 06:00:43] [Rank 0] Group 3 Loss: 4.2103 +[2025-07-07 06:00:43] [Rank 0] Group 4 Loss: 4.1287 +[2025-07-07 06:00:43] [Rank 0] Group 4 Loss: 4.1287 +[2025-07-07 06:00:43] [Rank 0] Group 5 Loss: 4.1633 +[2025-07-07 06:00:43] [Rank 0] Group 5 Loss: 4.1633 +[2025-07-07 06:00:43] [Rank 0] Group 6 Loss: 4.1628 +[2025-07-07 06:00:43] [Rank 0] Group 6 Loss: 4.1628 +[2025-07-07 06:00:43] [Rank 0] Group 7 Loss: 4.2209 +[2025-07-07 06:00:43] [Rank 0] Group 7 Loss: 4.2209 +[2025-07-07 06:00:43] [Rank 0] Group 8 Loss: 4.1720 +[2025-07-07 06:00:43] [Rank 0] Group 8 Loss: 4.1720 +[2025-07-07 06:00:43] [Rank 0] Group 9 Loss: 4.2433 +[2025-07-07 06:00:43] [Rank 0] Group 9 Loss: 4.2433 +[2025-07-07 06:00:43] [Rank 0] Group 10 Loss: 4.2364 +[2025-07-07 06:00:43] [Rank 0] Group 10 Loss: 4.2364 +[2025-07-07 06:00:43] [Rank 0] Group 11 Loss: 4.1922 +[2025-07-07 06:00:43] [Rank 0] Group 11 Loss: 4.1922 +[2025-07-07 06:00:43] [Rank 0] Group 0 FTA: 0.5137 +[2025-07-07 06:00:43] [Rank 0] Group 0 FTA: 0.5137 +[2025-07-07 06:00:43] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:00:43] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:00:43] [Rank 0] Group 2 FTA: 0.2474 +[2025-07-07 06:00:43] [Rank 0] Group 2 FTA: 0.2474 +[2025-07-07 06:00:43] [Rank 0] Group 3 FTA: 0.1745 +[2025-07-07 06:00:43] [Rank 0] Group 3 FTA: 0.1745 +[2025-07-07 06:00:43] [Rank 0] Group 4 FTA: 0.2526 +[2025-07-07 06:00:43] [Rank 0] Group 4 FTA: 0.2526 +[2025-07-07 06:00:43] [Rank 0] Group 5 FTA: 0.2708 +[2025-07-07 06:00:43] [Rank 0] Group 5 FTA: 0.2708 +[2025-07-07 06:00:43] [Rank 0] Group 6 FTA: 0.2500 +[2025-07-07 06:00:43] [Rank 0] Group 6 FTA: 0.2500 +[2025-07-07 06:00:43] [Rank 0] Group 7 FTA: 0.2995 +[2025-07-07 06:00:43] [Rank 0] Group 7 FTA: 0.2995 +[2025-07-07 06:00:43] [Rank 0] Group 8 FTA: 0.2344 +[2025-07-07 06:00:43] [Rank 0] Group 8 FTA: 0.2344 +[2025-07-07 06:00:43] [Rank 0] Group 9 FTA: 0.2461 +[2025-07-07 06:00:43] [Rank 0] Group 9 FTA: 0.2461 +[2025-07-07 06:00:43] [Rank 0] Group 10 FTA: 0.3008 +[2025-07-07 06:00:43] [Rank 0] Group 10 FTA: 0.3008 +[2025-07-07 06:00:43] [Rank 0] Group 11 FTA: 0.2900 +[2025-07-07 06:00:43] [Rank 0] Group 11 FTA: 0.2900 +[2025-07-07 06:00:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 06:00:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 06:00:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 06:00:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 06:00:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 06:00:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 06:00:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 06:00:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 06:00:44] [Rank 0] step:9501/10000 train_time:772178ms step_avg:81.27ms +[2025-07-07 06:00:44] [Rank 0] step:9501/10000 train_time:772178ms step_avg:81.27ms +[2025-07-07 06:00:46] [Rank 0] step:9521/10000 train_time:773668ms step_avg:81.26ms +[2025-07-07 06:00:46] [Rank 0] step:9521/10000 train_time:773668ms step_avg:81.26ms +[2025-07-07 06:00:48] [Rank 0] step:9541/10000 train_time:775209ms step_avg:81.25ms +[2025-07-07 06:00:48] [Rank 0] step:9541/10000 train_time:775209ms step_avg:81.25ms +[2025-07-07 06:00:49] [Rank 0] step:9561/10000 train_time:777295ms step_avg:81.30ms +[2025-07-07 06:00:49] [Rank 0] step:9561/10000 train_time:777295ms step_avg:81.30ms +[2025-07-07 06:00:51] [Rank 0] step:9581/10000 train_time:778788ms step_avg:81.28ms +[2025-07-07 06:00:51] [Rank 0] step:9581/10000 train_time:778788ms step_avg:81.28ms +[2025-07-07 06:00:52] [Rank 0] step:9601/10000 train_time:780281ms step_avg:81.27ms +[2025-07-07 06:00:52] [Rank 0] step:9601/10000 train_time:780281ms step_avg:81.27ms +[2025-07-07 06:00:54] [Rank 0] step:9621/10000 train_time:781775ms step_avg:81.26ms +[2025-07-07 06:00:54] [Rank 0] step:9621/10000 train_time:781775ms step_avg:81.26ms +[2025-07-07 06:00:56] [Rank 0] step:9641/10000 train_time:783939ms step_avg:81.31ms +[2025-07-07 06:00:56] [Rank 0] step:9641/10000 train_time:783939ms step_avg:81.31ms +[2025-07-07 06:00:58] [Rank 0] step:9661/10000 train_time:785433ms step_avg:81.30ms +[2025-07-07 06:00:58] [Rank 0] step:9661/10000 train_time:785433ms step_avg:81.30ms +[2025-07-07 06:00:59] [Rank 0] step:9681/10000 train_time:786930ms step_avg:81.29ms +[2025-07-07 06:00:59] [Rank 0] step:9681/10000 train_time:786930ms step_avg:81.29ms +[2025-07-07 06:01:01] [Rank 0] step:9701/10000 train_time:788428ms step_avg:81.27ms +[2025-07-07 06:01:01] [Rank 0] step:9701/10000 train_time:788428ms step_avg:81.27ms +[2025-07-07 06:01:03] [Rank 0] step:9721/10000 train_time:789973ms step_avg:81.26ms +[2025-07-07 06:01:03] [Rank 0] step:9721/10000 train_time:789973ms step_avg:81.26ms +[2025-07-07 06:01:04] [Rank 0] step:9741/10000 train_time:792085ms step_avg:81.31ms +[2025-07-07 06:01:04] [Rank 0] step:9741/10000 train_time:792085ms step_avg:81.31ms +[2025-07-07 06:01:06] [Rank 0] step:9761/10000 train_time:793584ms step_avg:81.30ms +[2025-07-07 06:01:06] [Rank 0] step:9761/10000 train_time:793584ms step_avg:81.30ms +[2025-07-07 06:01:07] [Rank 0] step:9781/10000 train_time:795081ms step_avg:81.29ms +[2025-07-07 06:01:07] [Rank 0] step:9781/10000 train_time:795081ms step_avg:81.29ms +[2025-07-07 06:01:09] [Rank 0] step:9801/10000 train_time:796581ms step_avg:81.28ms +[2025-07-07 06:01:09] [Rank 0] step:9801/10000 train_time:796581ms step_avg:81.28ms +[2025-07-07 06:01:11] [Rank 0] step:9821/10000 train_time:798724ms step_avg:81.33ms +[2025-07-07 06:01:11] [Rank 0] step:9821/10000 train_time:798724ms step_avg:81.33ms +[2025-07-07 06:01:12] [Rank 0] step:9841/10000 train_time:800224ms step_avg:81.32ms +[2025-07-07 06:01:12] [Rank 0] step:9841/10000 train_time:800224ms step_avg:81.32ms +[2025-07-07 06:01:14] [Rank 0] step:9861/10000 train_time:801723ms step_avg:81.30ms +[2025-07-07 06:01:14] [Rank 0] step:9861/10000 train_time:801723ms step_avg:81.30ms +[2025-07-07 06:01:15] [Rank 0] step:9881/10000 train_time:803374ms step_avg:81.30ms +[2025-07-07 06:01:15] [Rank 0] step:9881/10000 train_time:803374ms step_avg:81.30ms +[2025-07-07 06:01:18] [Rank 0] step:9901/10000 train_time:804925ms step_avg:81.30ms +[2025-07-07 06:01:18] [Rank 0] step:9901/10000 train_time:804925ms step_avg:81.30ms +[2025-07-07 06:01:19] [Rank 0] step:9921/10000 train_time:807033ms step_avg:81.35ms +[2025-07-07 06:01:19] [Rank 0] step:9921/10000 train_time:807033ms step_avg:81.35ms +[2025-07-07 06:01:21] [Rank 0] step:9941/10000 train_time:808533ms step_avg:81.33ms +[2025-07-07 06:01:21] [Rank 0] step:9941/10000 train_time:808533ms step_avg:81.33ms +[2025-07-07 06:01:22] [Rank 0] step:9961/10000 train_time:810034ms step_avg:81.32ms +[2025-07-07 06:01:22] [Rank 0] step:9961/10000 train_time:810034ms step_avg:81.32ms +[2025-07-07 06:01:24] [Rank 0] step:9981/10000 train_time:811537ms step_avg:81.31ms +[2025-07-07 06:01:24] [Rank 0] step:9981/10000 train_time:811537ms step_avg:81.31ms +[2025-07-07 06:01:26] [Rank 0] step:10000/10000 train_time:813626ms step_avg:81.36ms +[2025-07-07 06:01:26] [Rank 0] step:10000/10000 train_time:813626ms step_avg:81.36ms +[2025-07-07 06:01:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:01:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:01:27] [Rank 0] PRINT: step:10000/10000 train_loss:1.5803 val_loss:1.5757 train_time:813707ms step_avg:81.37ms +[2025-07-07 06:01:27] [Rank 0] PRINT: step:10000/10000 train_loss:1.5803 val_loss:1.5757 train_time:813707ms step_avg:81.37ms +[2025-07-07 06:01:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:01:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:01:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:01:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:01:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:01:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:06:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:06:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:06:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:06:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:06:52] [Rank 0] Total Loss: 4.2852 +[2025-07-07 06:06:52] [Rank 0] Total Loss: 4.2852 +[2025-07-07 06:06:52] [Rank 0] Total FTA: 0.2789 +[2025-07-07 06:06:52] [Rank 0] Total FTA: 0.2789 +[2025-07-07 06:06:52] [Rank 0] Group 0 Loss: 4.6813 +[2025-07-07 06:06:52] [Rank 0] Group 0 Loss: 4.6813 +[2025-07-07 06:06:52] [Rank 0] Group 1 Loss: 4.4982 +[2025-07-07 06:06:52] [Rank 0] Group 1 Loss: 4.4982 +[2025-07-07 06:06:52] [Rank 0] Group 2 Loss: 4.0800 +[2025-07-07 06:06:52] [Rank 0] Group 2 Loss: 4.0800 +[2025-07-07 06:06:52] [Rank 0] Group 3 Loss: 4.1901 +[2025-07-07 06:06:52] [Rank 0] Group 3 Loss: 4.1901 +[2025-07-07 06:06:52] [Rank 0] Group 4 Loss: 4.1430 +[2025-07-07 06:06:52] [Rank 0] Group 4 Loss: 4.1430 +[2025-07-07 06:06:52] [Rank 0] Group 5 Loss: 4.1086 +[2025-07-07 06:06:52] [Rank 0] Group 5 Loss: 4.1086 +[2025-07-07 06:06:52] [Rank 0] Group 6 Loss: 4.1748 +[2025-07-07 06:06:52] [Rank 0] Group 6 Loss: 4.1748 +[2025-07-07 06:06:52] [Rank 0] Group 7 Loss: 4.3035 +[2025-07-07 06:06:52] [Rank 0] Group 7 Loss: 4.3035 +[2025-07-07 06:06:52] [Rank 0] Group 8 Loss: 4.1858 +[2025-07-07 06:06:52] [Rank 0] Group 8 Loss: 4.1858 +[2025-07-07 06:06:52] [Rank 0] Group 9 Loss: 4.2216 +[2025-07-07 06:06:52] [Rank 0] Group 9 Loss: 4.2216 +[2025-07-07 06:06:52] [Rank 0] Group 10 Loss: 4.2637 +[2025-07-07 06:06:52] [Rank 0] Group 10 Loss: 4.2637 +[2025-07-07 06:06:52] [Rank 0] Group 11 Loss: 4.2387 +[2025-07-07 06:06:52] [Rank 0] Group 11 Loss: 4.2387 +[2025-07-07 06:06:52] [Rank 0] Group 0 FTA: 0.3329 +[2025-07-07 06:06:52] [Rank 0] Group 0 FTA: 0.3329 +[2025-07-07 06:06:52] [Rank 0] Group 1 FTA: 0.3464 +[2025-07-07 06:06:52] [Rank 0] Group 1 FTA: 0.3464 +[2025-07-07 06:06:52] [Rank 0] Group 2 FTA: 0.2214 +[2025-07-07 06:06:52] [Rank 0] Group 2 FTA: 0.2214 +[2025-07-07 06:06:52] [Rank 0] Group 3 FTA: 0.1927 +[2025-07-07 06:06:52] [Rank 0] Group 3 FTA: 0.1927 +[2025-07-07 06:06:52] [Rank 0] Group 4 FTA: 0.1901 +[2025-07-07 06:06:52] [Rank 0] Group 4 FTA: 0.1901 +[2025-07-07 06:06:52] [Rank 0] Group 5 FTA: 0.2604 +[2025-07-07 06:06:52] [Rank 0] Group 5 FTA: 0.2604 +[2025-07-07 06:06:52] [Rank 0] Group 6 FTA: 0.2917 +[2025-07-07 06:06:52] [Rank 0] Group 6 FTA: 0.2917 +[2025-07-07 06:06:52] [Rank 0] Group 7 FTA: 0.3073 +[2025-07-07 06:06:52] [Rank 0] Group 7 FTA: 0.3073 +[2025-07-07 06:06:52] [Rank 0] Group 8 FTA: 0.2682 +[2025-07-07 06:06:52] [Rank 0] Group 8 FTA: 0.2682 +[2025-07-07 06:06:52] [Rank 0] Group 9 FTA: 0.2539 +[2025-07-07 06:06:52] [Rank 0] Group 9 FTA: 0.2539 +[2025-07-07 06:06:52] [Rank 0] Group 10 FTA: 0.2930 +[2025-07-07 06:06:52] [Rank 0] Group 10 FTA: 0.2930 +[2025-07-07 06:06:52] [Rank 0] Group 11 FTA: 0.2949 +[2025-07-07 06:06:52] [Rank 0] Group 11 FTA: 0.2949 +[2025-07-07 06:06:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 06:06:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 06:06:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 06:06:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 06:06:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 06:06:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 06:06:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 06:06:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 06:06:53] [Rank 0] step:10001/10000 train_time:813727ms step_avg:81.36ms +[2025-07-07 06:06:53] [Rank 0] step:10001/10000 train_time:813727ms step_avg:81.36ms +[2025-07-07 06:06:53] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 06:06:53 2025 --- +[2025-07-07 06:06:53] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 06:06:53 2025 --- +[2025-07-07 06:06:53] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9956 MiB +[2025-07-07 06:06:53] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9956 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/config.json b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..cec4fb20bfaa7bfd972eff9ebc7c7cdb4e58bc8f --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "5943336e-e338-474e-b76f-5095c0802128", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..16bcb0fcf50f787abae88f8afd9157f50e4e0863 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99dc5db1e86265aee9544797b39dcc8a7815be6c7be0e2324f3e780a05b98485 +size 339448 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..e5d38820d84b2f5e7d6cc6346f86a6cca375d402 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:56a24d86ab4668de5f0904d7c21789ccc989cf387a4006952ced9939f986da92 +size 243718 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..77fd9db0f862819a1a53a16a07680a3f9c8dae9b --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d47bf3d1aaa7366724d07078d06a5ba2ce7607dab706bfb1d6228de29edbc486 +size 93919 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..da0ebb481e9f973e559e337cb5f41ccd33afa9b9 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae138e6adce4017b5de71e01728aa4be580dfc3c10660f1a1146879cb8b4fbb6 +size 98646 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/training_log_5943336e-e338-474e-b76f-5095c0802128.txt b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/training_log_5943336e-e338-474e-b76f-5095c0802128.txt new file mode 100644 index 0000000000000000000000000000000000000000..d5352e4615432a3e155042cd0f4183b300110bb2 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/training_log_5943336e-e338-474e-b76f-5095c0802128.txt @@ -0,0 +1,5132 @@ +[2025-07-07 04:09:05] [Rank 0] PRINT: --- Script Start: Mon Jul 7 04:09:05 2025 --- +[2025-07-07 04:09:05] [Rank 0] PRINT: --- Script Start: Mon Jul 7 04:09:05 2025 --- +[2025-07-07 04:09:05] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-07 04:09:05] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-07 04:09:05] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 04:09:05] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 04:09:05] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-07 04:09:05] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-07 04:09:05] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48 +[2025-07-07 04:09:05] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48 +[2025-07-07 04:09:05] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 04:09:05] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 04:09:05] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 04:09:05] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 04:09:05] [Rank 0] PRINT: Constructing model... +[2025-07-07 04:09:05] [Rank 0] PRINT: Constructing model... +[2025-07-07 04:09:07] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 04:09:07] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 04:09:07] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 04:09:07] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 04:09:07] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 04:09:07] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 04:09:08] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 04:09:08] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 04:09:08] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 04:09:08] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 04:09:08] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 04:09:08] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 04:09:08] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 04:09:08] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 04:09:08] [Rank 0] PRINT: Model returns: +[2025-07-07 04:09:08] [Rank 0] PRINT: Model returns: +[2025-07-07 04:09:08] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 04:09:08] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 04:09:08] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-07 04:09:08] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-07 04:09:08] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-07 04:09:08] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-07 04:09:08] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-07 04:09:08] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-07 04:09:08] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-07 04:09:08] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-07 04:09:08] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 04:09:08] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 04:09:08] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 04:09:08] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 04:09:08] [Rank 0] PRINT: Starting warmup... +[2025-07-07 04:09:08] [Rank 0] PRINT: Starting warmup... +[2025-07-07 04:10:14] [Rank 0] PRINT: Warmup complete. +[2025-07-07 04:10:14] [Rank 0] PRINT: Warmup complete. +[2025-07-07 04:10:14] [Rank 0] PRINT: Starting training... +[2025-07-07 04:10:14] [Rank 0] PRINT: Starting training... +[2025-07-07 04:10:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:10:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:10:21] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 04:10:21] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 04:10:23] [Rank 0] step:21/10000 train_time:1748ms step_avg:83.26ms +[2025-07-07 04:10:23] [Rank 0] step:21/10000 train_time:1748ms step_avg:83.26ms +[2025-07-07 04:10:24] [Rank 0] step:41/10000 train_time:3201ms step_avg:78.07ms +[2025-07-07 04:10:24] [Rank 0] step:41/10000 train_time:3201ms step_avg:78.07ms +[2025-07-07 04:10:26] [Rank 0] step:61/10000 train_time:4653ms step_avg:76.28ms +[2025-07-07 04:10:26] [Rank 0] step:61/10000 train_time:4653ms step_avg:76.28ms +[2025-07-07 04:10:27] [Rank 0] step:81/10000 train_time:6107ms step_avg:75.40ms +[2025-07-07 04:10:27] [Rank 0] step:81/10000 train_time:6107ms step_avg:75.40ms +[2025-07-07 04:10:29] [Rank 0] step:101/10000 train_time:7807ms step_avg:77.29ms +[2025-07-07 04:10:29] [Rank 0] step:101/10000 train_time:7807ms step_avg:77.29ms +[2025-07-07 04:10:30] [Rank 0] step:121/10000 train_time:9258ms step_avg:76.51ms +[2025-07-07 04:10:30] [Rank 0] step:121/10000 train_time:9258ms step_avg:76.51ms +[2025-07-07 04:10:32] [Rank 0] step:141/10000 train_time:10712ms step_avg:75.97ms +[2025-07-07 04:10:32] [Rank 0] step:141/10000 train_time:10712ms step_avg:75.97ms +[2025-07-07 04:10:33] [Rank 0] step:161/10000 train_time:12168ms step_avg:75.58ms +[2025-07-07 04:10:33] [Rank 0] step:161/10000 train_time:12168ms step_avg:75.58ms +[2025-07-07 04:10:36] [Rank 0] step:181/10000 train_time:13620ms step_avg:75.25ms +[2025-07-07 04:10:36] [Rank 0] step:181/10000 train_time:13620ms step_avg:75.25ms +[2025-07-07 04:10:37] [Rank 0] step:201/10000 train_time:15742ms step_avg:78.32ms +[2025-07-07 04:10:37] [Rank 0] step:201/10000 train_time:15742ms step_avg:78.32ms +[2025-07-07 04:10:38] [Rank 0] step:221/10000 train_time:17197ms step_avg:77.81ms +[2025-07-07 04:10:38] [Rank 0] step:221/10000 train_time:17197ms step_avg:77.81ms +[2025-07-07 04:10:40] [Rank 0] step:241/10000 train_time:18655ms step_avg:77.41ms +[2025-07-07 04:10:40] [Rank 0] step:241/10000 train_time:18655ms step_avg:77.41ms +[2025-07-07 04:10:41] [Rank 0] step:261/10000 train_time:20109ms step_avg:77.04ms +[2025-07-07 04:10:41] [Rank 0] step:261/10000 train_time:20109ms step_avg:77.04ms +[2025-07-07 04:10:43] [Rank 0] step:281/10000 train_time:22215ms step_avg:79.06ms +[2025-07-07 04:10:43] [Rank 0] step:281/10000 train_time:22215ms step_avg:79.06ms +[2025-07-07 04:10:45] [Rank 0] step:301/10000 train_time:23672ms step_avg:78.64ms +[2025-07-07 04:10:45] [Rank 0] step:301/10000 train_time:23672ms step_avg:78.64ms +[2025-07-07 04:10:46] [Rank 0] step:321/10000 train_time:25133ms step_avg:78.30ms +[2025-07-07 04:10:46] [Rank 0] step:321/10000 train_time:25133ms step_avg:78.30ms +[2025-07-07 04:10:48] [Rank 0] step:341/10000 train_time:26591ms step_avg:77.98ms +[2025-07-07 04:10:48] [Rank 0] step:341/10000 train_time:26591ms step_avg:77.98ms +[2025-07-07 04:10:50] [Rank 0] step:361/10000 train_time:28310ms step_avg:78.42ms +[2025-07-07 04:10:50] [Rank 0] step:361/10000 train_time:28310ms step_avg:78.42ms +[2025-07-07 04:10:51] [Rank 0] step:381/10000 train_time:29751ms step_avg:78.09ms +[2025-07-07 04:10:51] [Rank 0] step:381/10000 train_time:29751ms step_avg:78.09ms +[2025-07-07 04:10:52] [Rank 0] step:401/10000 train_time:31213ms step_avg:77.84ms +[2025-07-07 04:10:52] [Rank 0] step:401/10000 train_time:31213ms step_avg:77.84ms +[2025-07-07 04:10:54] [Rank 0] step:421/10000 train_time:32673ms step_avg:77.61ms +[2025-07-07 04:10:54] [Rank 0] step:421/10000 train_time:32673ms step_avg:77.61ms +[2025-07-07 04:10:55] [Rank 0] step:441/10000 train_time:34135ms step_avg:77.40ms +[2025-07-07 04:10:55] [Rank 0] step:441/10000 train_time:34135ms step_avg:77.40ms +[2025-07-07 04:10:57] [Rank 0] step:461/10000 train_time:36265ms step_avg:78.67ms +[2025-07-07 04:10:57] [Rank 0] step:461/10000 train_time:36265ms step_avg:78.67ms +[2025-07-07 04:10:59] [Rank 0] step:481/10000 train_time:37727ms step_avg:78.43ms +[2025-07-07 04:10:59] [Rank 0] step:481/10000 train_time:37727ms step_avg:78.43ms +[2025-07-07 04:11:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:11:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:11:01] [Rank 0] PRINT: step:500/10000 train_loss:9.6494 val_loss:8.5918 train_time:39187ms step_avg:78.37ms +[2025-07-07 04:11:01] [Rank 0] PRINT: step:500/10000 train_loss:9.6494 val_loss:8.5918 train_time:39187ms step_avg:78.37ms +[2025-07-07 04:11:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:11:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:11:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:11:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:11:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:11:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:16:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:16:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:16:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:16:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:16:18] [Rank 0] Total Loss: 8.9117 +[2025-07-07 04:16:18] [Rank 0] Total Loss: 8.9117 +[2025-07-07 04:16:18] [Rank 0] Total FTA: 0.0023 +[2025-07-07 04:16:18] [Rank 0] Total FTA: 0.0023 +[2025-07-07 04:16:18] [Rank 0] Group 0 Loss: 8.9311 +[2025-07-07 04:16:18] [Rank 0] Group 0 Loss: 8.9311 +[2025-07-07 04:16:18] [Rank 0] Group 1 Loss: 8.9123 +[2025-07-07 04:16:18] [Rank 0] Group 1 Loss: 8.9123 +[2025-07-07 04:16:18] [Rank 0] Group 2 Loss: 8.9507 +[2025-07-07 04:16:18] [Rank 0] Group 2 Loss: 8.9507 +[2025-07-07 04:16:18] [Rank 0] Group 3 Loss: 8.8947 +[2025-07-07 04:16:18] [Rank 0] Group 3 Loss: 8.8947 +[2025-07-07 04:16:18] [Rank 0] Group 4 Loss: 8.9058 +[2025-07-07 04:16:18] [Rank 0] Group 4 Loss: 8.9058 +[2025-07-07 04:16:18] [Rank 0] Group 5 Loss: 8.8913 +[2025-07-07 04:16:18] [Rank 0] Group 5 Loss: 8.8913 +[2025-07-07 04:16:18] [Rank 0] Group 6 Loss: 8.9126 +[2025-07-07 04:16:18] [Rank 0] Group 6 Loss: 8.9126 +[2025-07-07 04:16:18] [Rank 0] Group 7 Loss: 8.9073 +[2025-07-07 04:16:18] [Rank 0] Group 7 Loss: 8.9073 +[2025-07-07 04:16:18] [Rank 0] Group 8 Loss: 8.9053 +[2025-07-07 04:16:18] [Rank 0] Group 8 Loss: 8.9053 +[2025-07-07 04:16:18] [Rank 0] Group 9 Loss: 8.9090 +[2025-07-07 04:16:18] [Rank 0] Group 9 Loss: 8.9090 +[2025-07-07 04:16:18] [Rank 0] Group 10 Loss: 8.9030 +[2025-07-07 04:16:18] [Rank 0] Group 10 Loss: 8.9030 +[2025-07-07 04:16:18] [Rank 0] Group 11 Loss: 8.9071 +[2025-07-07 04:16:18] [Rank 0] Group 11 Loss: 8.9071 +[2025-07-07 04:16:18] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 04:16:18] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 04:16:18] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:16:18] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:16:18] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 04:16:18] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 04:16:18] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 04:16:18] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 04:16:18] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 04:16:18] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 04:16:18] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 04:16:18] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 04:16:18] [Rank 0] Group 6 FTA: 0.0078 +[2025-07-07 04:16:18] [Rank 0] Group 6 FTA: 0.0078 +[2025-07-07 04:16:18] [Rank 0] Group 7 FTA: 0.0078 +[2025-07-07 04:16:18] [Rank 0] Group 7 FTA: 0.0078 +[2025-07-07 04:16:18] [Rank 0] Group 8 FTA: 0.0078 +[2025-07-07 04:16:18] [Rank 0] Group 8 FTA: 0.0078 +[2025-07-07 04:16:18] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 04:16:18] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 04:16:18] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 04:16:18] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 04:16:18] [Rank 0] Group 11 FTA: 0.0039 +[2025-07-07 04:16:18] [Rank 0] Group 11 FTA: 0.0039 +[2025-07-07 04:16:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 04:16:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 04:16:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 04:16:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 04:16:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 04:16:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 04:16:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 04:16:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 04:16:20] [Rank 0] step:501/10000 train_time:39208ms step_avg:78.26ms +[2025-07-07 04:16:20] [Rank 0] step:501/10000 train_time:39208ms step_avg:78.26ms +[2025-07-07 04:16:21] [Rank 0] step:521/10000 train_time:40679ms step_avg:78.08ms +[2025-07-07 04:16:21] [Rank 0] step:521/10000 train_time:40679ms step_avg:78.08ms +[2025-07-07 04:16:23] [Rank 0] step:541/10000 train_time:42186ms step_avg:77.98ms +[2025-07-07 04:16:23] [Rank 0] step:541/10000 train_time:42186ms step_avg:77.98ms +[2025-07-07 04:16:25] [Rank 0] step:561/10000 train_time:44234ms step_avg:78.85ms +[2025-07-07 04:16:25] [Rank 0] step:561/10000 train_time:44234ms step_avg:78.85ms +[2025-07-07 04:16:26] [Rank 0] step:581/10000 train_time:45951ms step_avg:79.09ms +[2025-07-07 04:16:26] [Rank 0] step:581/10000 train_time:45951ms step_avg:79.09ms +[2025-07-07 04:16:28] [Rank 0] step:601/10000 train_time:47552ms step_avg:79.12ms +[2025-07-07 04:16:28] [Rank 0] step:601/10000 train_time:47552ms step_avg:79.12ms +[2025-07-07 04:16:29] [Rank 0] step:621/10000 train_time:49009ms step_avg:78.92ms +[2025-07-07 04:16:29] [Rank 0] step:621/10000 train_time:49009ms step_avg:78.92ms +[2025-07-07 04:16:32] [Rank 0] step:641/10000 train_time:51118ms step_avg:79.75ms +[2025-07-07 04:16:32] [Rank 0] step:641/10000 train_time:51118ms step_avg:79.75ms +[2025-07-07 04:16:33] [Rank 0] step:661/10000 train_time:52576ms step_avg:79.54ms +[2025-07-07 04:16:33] [Rank 0] step:661/10000 train_time:52576ms step_avg:79.54ms +[2025-07-07 04:16:34] [Rank 0] step:681/10000 train_time:54031ms step_avg:79.34ms +[2025-07-07 04:16:34] [Rank 0] step:681/10000 train_time:54031ms step_avg:79.34ms +[2025-07-07 04:16:36] [Rank 0] step:701/10000 train_time:55487ms step_avg:79.15ms +[2025-07-07 04:16:36] [Rank 0] step:701/10000 train_time:55487ms step_avg:79.15ms +[2025-07-07 04:16:38] [Rank 0] step:721/10000 train_time:57202ms step_avg:79.34ms +[2025-07-07 04:16:38] [Rank 0] step:721/10000 train_time:57202ms step_avg:79.34ms +[2025-07-07 04:16:39] [Rank 0] step:741/10000 train_time:59055ms step_avg:79.70ms +[2025-07-07 04:16:39] [Rank 0] step:741/10000 train_time:59055ms step_avg:79.70ms +[2025-07-07 04:16:41] [Rank 0] step:761/10000 train_time:60518ms step_avg:79.52ms +[2025-07-07 04:16:41] [Rank 0] step:761/10000 train_time:60518ms step_avg:79.52ms +[2025-07-07 04:16:42] [Rank 0] step:781/10000 train_time:61986ms step_avg:79.37ms +[2025-07-07 04:16:42] [Rank 0] step:781/10000 train_time:61986ms step_avg:79.37ms +[2025-07-07 04:16:44] [Rank 0] step:801/10000 train_time:63457ms step_avg:79.22ms +[2025-07-07 04:16:44] [Rank 0] step:801/10000 train_time:63457ms step_avg:79.22ms +[2025-07-07 04:16:46] [Rank 0] step:821/10000 train_time:65590ms step_avg:79.89ms +[2025-07-07 04:16:46] [Rank 0] step:821/10000 train_time:65590ms step_avg:79.89ms +[2025-07-07 04:16:47] [Rank 0] step:841/10000 train_time:67059ms step_avg:79.74ms +[2025-07-07 04:16:47] [Rank 0] step:841/10000 train_time:67059ms step_avg:79.74ms +[2025-07-07 04:16:49] [Rank 0] step:861/10000 train_time:68528ms step_avg:79.59ms +[2025-07-07 04:16:49] [Rank 0] step:861/10000 train_time:68528ms step_avg:79.59ms +[2025-07-07 04:16:50] [Rank 0] step:881/10000 train_time:69997ms step_avg:79.45ms +[2025-07-07 04:16:50] [Rank 0] step:881/10000 train_time:69997ms step_avg:79.45ms +[2025-07-07 04:16:53] [Rank 0] step:901/10000 train_time:71467ms step_avg:79.32ms +[2025-07-07 04:16:53] [Rank 0] step:901/10000 train_time:71467ms step_avg:79.32ms +[2025-07-07 04:16:54] [Rank 0] step:921/10000 train_time:73592ms step_avg:79.90ms +[2025-07-07 04:16:54] [Rank 0] step:921/10000 train_time:73592ms step_avg:79.90ms +[2025-07-07 04:16:55] [Rank 0] step:941/10000 train_time:75061ms step_avg:79.77ms +[2025-07-07 04:16:55] [Rank 0] step:941/10000 train_time:75061ms step_avg:79.77ms +[2025-07-07 04:16:57] [Rank 0] step:961/10000 train_time:76533ms step_avg:79.64ms +[2025-07-07 04:16:57] [Rank 0] step:961/10000 train_time:76533ms step_avg:79.64ms +[2025-07-07 04:16:58] [Rank 0] step:981/10000 train_time:78002ms step_avg:79.51ms +[2025-07-07 04:16:58] [Rank 0] step:981/10000 train_time:78002ms step_avg:79.51ms +[2025-07-07 04:17:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:17:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:17:01] [Rank 0] PRINT: step:1000/10000 train_loss:7.7951 val_loss:7.0966 train_time:80120ms step_avg:80.12ms +[2025-07-07 04:17:01] [Rank 0] PRINT: step:1000/10000 train_loss:7.7951 val_loss:7.0966 train_time:80120ms step_avg:80.12ms +[2025-07-07 04:17:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:17:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:17:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:17:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:17:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:17:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:22:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:22:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:22:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:22:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:22:20] [Rank 0] Total Loss: 7.6662 +[2025-07-07 04:22:20] [Rank 0] Total Loss: 7.6662 +[2025-07-07 04:22:20] [Rank 0] Total FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Total FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 0 Loss: 7.6817 +[2025-07-07 04:22:20] [Rank 0] Group 0 Loss: 7.6817 +[2025-07-07 04:22:20] [Rank 0] Group 1 Loss: 7.6286 +[2025-07-07 04:22:20] [Rank 0] Group 1 Loss: 7.6286 +[2025-07-07 04:22:20] [Rank 0] Group 2 Loss: 7.7698 +[2025-07-07 04:22:20] [Rank 0] Group 2 Loss: 7.7698 +[2025-07-07 04:22:20] [Rank 0] Group 3 Loss: 7.6345 +[2025-07-07 04:22:20] [Rank 0] Group 3 Loss: 7.6345 +[2025-07-07 04:22:20] [Rank 0] Group 4 Loss: 7.6957 +[2025-07-07 04:22:20] [Rank 0] Group 4 Loss: 7.6957 +[2025-07-07 04:22:20] [Rank 0] Group 5 Loss: 7.6191 +[2025-07-07 04:22:20] [Rank 0] Group 5 Loss: 7.6191 +[2025-07-07 04:22:20] [Rank 0] Group 6 Loss: 7.6706 +[2025-07-07 04:22:20] [Rank 0] Group 6 Loss: 7.6706 +[2025-07-07 04:22:20] [Rank 0] Group 7 Loss: 7.6746 +[2025-07-07 04:22:20] [Rank 0] Group 7 Loss: 7.6746 +[2025-07-07 04:22:20] [Rank 0] Group 8 Loss: 7.6295 +[2025-07-07 04:22:20] [Rank 0] Group 8 Loss: 7.6295 +[2025-07-07 04:22:20] [Rank 0] Group 9 Loss: 7.6594 +[2025-07-07 04:22:20] [Rank 0] Group 9 Loss: 7.6594 +[2025-07-07 04:22:20] [Rank 0] Group 10 Loss: 7.6647 +[2025-07-07 04:22:20] [Rank 0] Group 10 Loss: 7.6647 +[2025-07-07 04:22:20] [Rank 0] Group 11 Loss: 7.6599 +[2025-07-07 04:22:20] [Rank 0] Group 11 Loss: 7.6599 +[2025-07-07 04:22:20] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 04:22:20] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 04:22:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 04:22:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 04:22:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 04:22:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 04:22:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 04:22:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 04:22:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 04:22:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 04:22:22] [Rank 0] step:1001/10000 train_time:80140ms step_avg:80.06ms +[2025-07-07 04:22:22] [Rank 0] step:1001/10000 train_time:80140ms step_avg:80.06ms +[2025-07-07 04:22:23] [Rank 0] step:1021/10000 train_time:81630ms step_avg:79.95ms +[2025-07-07 04:22:23] [Rank 0] step:1021/10000 train_time:81630ms step_avg:79.95ms +[2025-07-07 04:22:25] [Rank 0] step:1041/10000 train_time:83095ms step_avg:79.82ms +[2025-07-07 04:22:25] [Rank 0] step:1041/10000 train_time:83095ms step_avg:79.82ms +[2025-07-07 04:22:26] [Rank 0] step:1061/10000 train_time:84558ms step_avg:79.70ms +[2025-07-07 04:22:26] [Rank 0] step:1061/10000 train_time:84558ms step_avg:79.70ms +[2025-07-07 04:22:28] [Rank 0] step:1081/10000 train_time:86713ms step_avg:80.22ms +[2025-07-07 04:22:28] [Rank 0] step:1081/10000 train_time:86713ms step_avg:80.22ms +[2025-07-07 04:22:30] [Rank 0] step:1101/10000 train_time:88160ms step_avg:80.07ms +[2025-07-07 04:22:30] [Rank 0] step:1101/10000 train_time:88160ms step_avg:80.07ms +[2025-07-07 04:22:31] [Rank 0] step:1121/10000 train_time:89625ms step_avg:79.95ms +[2025-07-07 04:22:31] [Rank 0] step:1121/10000 train_time:89625ms step_avg:79.95ms +[2025-07-07 04:22:33] [Rank 0] step:1141/10000 train_time:91092ms step_avg:79.83ms +[2025-07-07 04:22:33] [Rank 0] step:1141/10000 train_time:91092ms step_avg:79.83ms +[2025-07-07 04:22:34] [Rank 0] step:1161/10000 train_time:92562ms step_avg:79.73ms +[2025-07-07 04:22:34] [Rank 0] step:1161/10000 train_time:92562ms step_avg:79.73ms +[2025-07-07 04:22:36] [Rank 0] step:1181/10000 train_time:94266ms step_avg:79.82ms +[2025-07-07 04:22:36] [Rank 0] step:1181/10000 train_time:94266ms step_avg:79.82ms +[2025-07-07 04:22:37] [Rank 0] step:1201/10000 train_time:95741ms step_avg:79.72ms +[2025-07-07 04:22:37] [Rank 0] step:1201/10000 train_time:95741ms step_avg:79.72ms +[2025-07-07 04:22:39] [Rank 0] step:1221/10000 train_time:97211ms step_avg:79.62ms +[2025-07-07 04:22:39] [Rank 0] step:1221/10000 train_time:97211ms step_avg:79.62ms +[2025-07-07 04:22:40] [Rank 0] step:1241/10000 train_time:98682ms step_avg:79.52ms +[2025-07-07 04:22:40] [Rank 0] step:1241/10000 train_time:98682ms step_avg:79.52ms +[2025-07-07 04:22:42] [Rank 0] step:1261/10000 train_time:100153ms step_avg:79.42ms +[2025-07-07 04:22:42] [Rank 0] step:1261/10000 train_time:100153ms step_avg:79.42ms +[2025-07-07 04:22:44] [Rank 0] step:1281/10000 train_time:101865ms step_avg:79.52ms +[2025-07-07 04:22:44] [Rank 0] step:1281/10000 train_time:101865ms step_avg:79.52ms +[2025-07-07 04:22:45] [Rank 0] step:1301/10000 train_time:103579ms step_avg:79.61ms +[2025-07-07 04:22:45] [Rank 0] step:1301/10000 train_time:103579ms step_avg:79.61ms +[2025-07-07 04:22:47] [Rank 0] step:1321/10000 train_time:105116ms step_avg:79.57ms +[2025-07-07 04:22:47] [Rank 0] step:1321/10000 train_time:105116ms step_avg:79.57ms +[2025-07-07 04:22:48] [Rank 0] step:1341/10000 train_time:106639ms step_avg:79.52ms +[2025-07-07 04:22:48] [Rank 0] step:1341/10000 train_time:106639ms step_avg:79.52ms +[2025-07-07 04:22:50] [Rank 0] step:1361/10000 train_time:108768ms step_avg:79.92ms +[2025-07-07 04:22:50] [Rank 0] step:1361/10000 train_time:108768ms step_avg:79.92ms +[2025-07-07 04:22:52] [Rank 0] step:1381/10000 train_time:110246ms step_avg:79.83ms +[2025-07-07 04:22:52] [Rank 0] step:1381/10000 train_time:110246ms step_avg:79.83ms +[2025-07-07 04:22:53] [Rank 0] step:1401/10000 train_time:111720ms step_avg:79.74ms +[2025-07-07 04:22:53] [Rank 0] step:1401/10000 train_time:111720ms step_avg:79.74ms +[2025-07-07 04:22:55] [Rank 0] step:1421/10000 train_time:113195ms step_avg:79.66ms +[2025-07-07 04:22:55] [Rank 0] step:1421/10000 train_time:113195ms step_avg:79.66ms +[2025-07-07 04:22:57] [Rank 0] step:1441/10000 train_time:114673ms step_avg:79.58ms +[2025-07-07 04:22:57] [Rank 0] step:1441/10000 train_time:114673ms step_avg:79.58ms +[2025-07-07 04:22:58] [Rank 0] step:1461/10000 train_time:116810ms step_avg:79.95ms +[2025-07-07 04:22:58] [Rank 0] step:1461/10000 train_time:116810ms step_avg:79.95ms +[2025-07-07 04:23:00] [Rank 0] step:1481/10000 train_time:118286ms step_avg:79.87ms +[2025-07-07 04:23:00] [Rank 0] step:1481/10000 train_time:118286ms step_avg:79.87ms +[2025-07-07 04:23:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:23:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:23:02] [Rank 0] PRINT: step:1500/10000 train_loss:6.5303 val_loss:6.0112 train_time:119763ms step_avg:79.84ms +[2025-07-07 04:23:02] [Rank 0] PRINT: step:1500/10000 train_loss:6.5303 val_loss:6.0112 train_time:119763ms step_avg:79.84ms +[2025-07-07 04:23:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:23:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:23:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:23:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:23:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:23:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:28:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:28:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:28:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:28:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:28:18] [Rank 0] Total Loss: 6.7971 +[2025-07-07 04:28:18] [Rank 0] Total Loss: 6.7971 +[2025-07-07 04:28:18] [Rank 0] Total FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Total FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 0 Loss: 6.7567 +[2025-07-07 04:28:18] [Rank 0] Group 0 Loss: 6.7567 +[2025-07-07 04:28:18] [Rank 0] Group 1 Loss: 6.8100 +[2025-07-07 04:28:18] [Rank 0] Group 1 Loss: 6.8100 +[2025-07-07 04:28:18] [Rank 0] Group 2 Loss: 6.9168 +[2025-07-07 04:28:18] [Rank 0] Group 2 Loss: 6.9168 +[2025-07-07 04:28:18] [Rank 0] Group 3 Loss: 6.7452 +[2025-07-07 04:28:18] [Rank 0] Group 3 Loss: 6.7452 +[2025-07-07 04:28:18] [Rank 0] Group 4 Loss: 6.8310 +[2025-07-07 04:28:18] [Rank 0] Group 4 Loss: 6.8310 +[2025-07-07 04:28:18] [Rank 0] Group 5 Loss: 6.7784 +[2025-07-07 04:28:18] [Rank 0] Group 5 Loss: 6.7784 +[2025-07-07 04:28:18] [Rank 0] Group 6 Loss: 6.8137 +[2025-07-07 04:28:18] [Rank 0] Group 6 Loss: 6.8137 +[2025-07-07 04:28:18] [Rank 0] Group 7 Loss: 6.8083 +[2025-07-07 04:28:18] [Rank 0] Group 7 Loss: 6.8083 +[2025-07-07 04:28:18] [Rank 0] Group 8 Loss: 6.7748 +[2025-07-07 04:28:18] [Rank 0] Group 8 Loss: 6.7748 +[2025-07-07 04:28:18] [Rank 0] Group 9 Loss: 6.8140 +[2025-07-07 04:28:18] [Rank 0] Group 9 Loss: 6.8140 +[2025-07-07 04:28:18] [Rank 0] Group 10 Loss: 6.7957 +[2025-07-07 04:28:18] [Rank 0] Group 10 Loss: 6.7957 +[2025-07-07 04:28:18] [Rank 0] Group 11 Loss: 6.7861 +[2025-07-07 04:28:18] [Rank 0] Group 11 Loss: 6.7861 +[2025-07-07 04:28:18] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 04:28:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 04:28:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 04:28:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 04:28:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 04:28:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 04:28:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 04:28:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 04:28:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 04:28:19] [Rank 0] step:1501/10000 train_time:119783ms step_avg:79.80ms +[2025-07-07 04:28:19] [Rank 0] step:1501/10000 train_time:119783ms step_avg:79.80ms +[2025-07-07 04:28:21] [Rank 0] step:1521/10000 train_time:121252ms step_avg:79.72ms +[2025-07-07 04:28:21] [Rank 0] step:1521/10000 train_time:121252ms step_avg:79.72ms +[2025-07-07 04:28:23] [Rank 0] step:1541/10000 train_time:123393ms step_avg:80.07ms +[2025-07-07 04:28:23] [Rank 0] step:1541/10000 train_time:123393ms step_avg:80.07ms +[2025-07-07 04:28:24] [Rank 0] step:1561/10000 train_time:124862ms step_avg:79.99ms +[2025-07-07 04:28:24] [Rank 0] step:1561/10000 train_time:124862ms step_avg:79.99ms +[2025-07-07 04:28:26] [Rank 0] step:1581/10000 train_time:126334ms step_avg:79.91ms +[2025-07-07 04:28:26] [Rank 0] step:1581/10000 train_time:126334ms step_avg:79.91ms +[2025-07-07 04:28:27] [Rank 0] step:1601/10000 train_time:127803ms step_avg:79.83ms +[2025-07-07 04:28:27] [Rank 0] step:1601/10000 train_time:127803ms step_avg:79.83ms +[2025-07-07 04:28:29] [Rank 0] step:1621/10000 train_time:129537ms step_avg:79.91ms +[2025-07-07 04:28:29] [Rank 0] step:1621/10000 train_time:129537ms step_avg:79.91ms +[2025-07-07 04:28:31] [Rank 0] step:1641/10000 train_time:130990ms step_avg:79.82ms +[2025-07-07 04:28:31] [Rank 0] step:1641/10000 train_time:130990ms step_avg:79.82ms +[2025-07-07 04:28:32] [Rank 0] step:1661/10000 train_time:132464ms step_avg:79.75ms +[2025-07-07 04:28:32] [Rank 0] step:1661/10000 train_time:132464ms step_avg:79.75ms +[2025-07-07 04:28:34] [Rank 0] step:1681/10000 train_time:133937ms step_avg:79.68ms +[2025-07-07 04:28:34] [Rank 0] step:1681/10000 train_time:133937ms step_avg:79.68ms +[2025-07-07 04:28:35] [Rank 0] step:1701/10000 train_time:135413ms step_avg:79.61ms +[2025-07-07 04:28:35] [Rank 0] step:1701/10000 train_time:135413ms step_avg:79.61ms +[2025-07-07 04:28:37] [Rank 0] step:1721/10000 train_time:137543ms step_avg:79.92ms +[2025-07-07 04:28:37] [Rank 0] step:1721/10000 train_time:137543ms step_avg:79.92ms +[2025-07-07 04:28:39] [Rank 0] step:1741/10000 train_time:139017ms step_avg:79.85ms +[2025-07-07 04:28:39] [Rank 0] step:1741/10000 train_time:139017ms step_avg:79.85ms +[2025-07-07 04:28:40] [Rank 0] step:1761/10000 train_time:140494ms step_avg:79.78ms +[2025-07-07 04:28:40] [Rank 0] step:1761/10000 train_time:140494ms step_avg:79.78ms +[2025-07-07 04:28:42] [Rank 0] step:1781/10000 train_time:141971ms step_avg:79.71ms +[2025-07-07 04:28:42] [Rank 0] step:1781/10000 train_time:141971ms step_avg:79.71ms +[2025-07-07 04:28:44] [Rank 0] step:1801/10000 train_time:143447ms step_avg:79.65ms +[2025-07-07 04:28:44] [Rank 0] step:1801/10000 train_time:143447ms step_avg:79.65ms +[2025-07-07 04:28:45] [Rank 0] step:1821/10000 train_time:145578ms step_avg:79.94ms +[2025-07-07 04:28:45] [Rank 0] step:1821/10000 train_time:145578ms step_avg:79.94ms +[2025-07-07 04:28:47] [Rank 0] step:1841/10000 train_time:147054ms step_avg:79.88ms +[2025-07-07 04:28:47] [Rank 0] step:1841/10000 train_time:147054ms step_avg:79.88ms +[2025-07-07 04:28:48] [Rank 0] step:1861/10000 train_time:148533ms step_avg:79.81ms +[2025-07-07 04:28:48] [Rank 0] step:1861/10000 train_time:148533ms step_avg:79.81ms +[2025-07-07 04:28:50] [Rank 0] step:1881/10000 train_time:150009ms step_avg:79.75ms +[2025-07-07 04:28:50] [Rank 0] step:1881/10000 train_time:150009ms step_avg:79.75ms +[2025-07-07 04:28:52] [Rank 0] step:1901/10000 train_time:152133ms step_avg:80.03ms +[2025-07-07 04:28:52] [Rank 0] step:1901/10000 train_time:152133ms step_avg:80.03ms +[2025-07-07 04:28:53] [Rank 0] step:1921/10000 train_time:153606ms step_avg:79.96ms +[2025-07-07 04:28:53] [Rank 0] step:1921/10000 train_time:153606ms step_avg:79.96ms +[2025-07-07 04:28:55] [Rank 0] step:1941/10000 train_time:155083ms step_avg:79.90ms +[2025-07-07 04:28:55] [Rank 0] step:1941/10000 train_time:155083ms step_avg:79.90ms +[2025-07-07 04:28:56] [Rank 0] step:1961/10000 train_time:156563ms step_avg:79.84ms +[2025-07-07 04:28:56] [Rank 0] step:1961/10000 train_time:156563ms step_avg:79.84ms +[2025-07-07 04:28:58] [Rank 0] step:1981/10000 train_time:158092ms step_avg:79.80ms +[2025-07-07 04:28:58] [Rank 0] step:1981/10000 train_time:158092ms step_avg:79.80ms +[2025-07-07 04:29:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:29:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:29:01] [Rank 0] PRINT: step:2000/10000 train_loss:5.5567 val_loss:5.1388 train_time:160168ms step_avg:80.08ms +[2025-07-07 04:29:01] [Rank 0] PRINT: step:2000/10000 train_loss:5.5567 val_loss:5.1388 train_time:160168ms step_avg:80.08ms +[2025-07-07 04:29:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:29:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:29:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:29:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:29:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:29:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:34:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:34:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:34:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:34:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:34:19] [Rank 0] Total Loss: 6.1530 +[2025-07-07 04:34:19] [Rank 0] Total Loss: 6.1530 +[2025-07-07 04:34:19] [Rank 0] Total FTA: 0.0037 +[2025-07-07 04:34:19] [Rank 0] Total FTA: 0.0037 +[2025-07-07 04:34:19] [Rank 0] Group 0 Loss: 6.1013 +[2025-07-07 04:34:19] [Rank 0] Group 0 Loss: 6.1013 +[2025-07-07 04:34:19] [Rank 0] Group 1 Loss: 6.2661 +[2025-07-07 04:34:19] [Rank 0] Group 1 Loss: 6.2661 +[2025-07-07 04:34:19] [Rank 0] Group 2 Loss: 6.2559 +[2025-07-07 04:34:19] [Rank 0] Group 2 Loss: 6.2559 +[2025-07-07 04:34:19] [Rank 0] Group 3 Loss: 6.0914 +[2025-07-07 04:34:19] [Rank 0] Group 3 Loss: 6.0914 +[2025-07-07 04:34:19] [Rank 0] Group 4 Loss: 6.1769 +[2025-07-07 04:34:19] [Rank 0] Group 4 Loss: 6.1769 +[2025-07-07 04:34:19] [Rank 0] Group 5 Loss: 6.1267 +[2025-07-07 04:34:19] [Rank 0] Group 5 Loss: 6.1267 +[2025-07-07 04:34:19] [Rank 0] Group 6 Loss: 6.1728 +[2025-07-07 04:34:19] [Rank 0] Group 6 Loss: 6.1728 +[2025-07-07 04:34:19] [Rank 0] Group 7 Loss: 6.1736 +[2025-07-07 04:34:19] [Rank 0] Group 7 Loss: 6.1736 +[2025-07-07 04:34:19] [Rank 0] Group 8 Loss: 6.1133 +[2025-07-07 04:34:19] [Rank 0] Group 8 Loss: 6.1133 +[2025-07-07 04:34:19] [Rank 0] Group 9 Loss: 6.1218 +[2025-07-07 04:34:19] [Rank 0] Group 9 Loss: 6.1218 +[2025-07-07 04:34:19] [Rank 0] Group 10 Loss: 6.1337 +[2025-07-07 04:34:19] [Rank 0] Group 10 Loss: 6.1337 +[2025-07-07 04:34:19] [Rank 0] Group 11 Loss: 6.1519 +[2025-07-07 04:34:19] [Rank 0] Group 11 Loss: 6.1519 +[2025-07-07 04:34:19] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 04:34:19] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 04:34:19] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:34:19] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:34:19] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 04:34:19] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 04:34:19] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 04:34:19] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 04:34:19] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 04:34:19] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 04:34:19] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 04:34:19] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 04:34:19] [Rank 0] Group 6 FTA: 0.0208 +[2025-07-07 04:34:19] [Rank 0] Group 6 FTA: 0.0208 +[2025-07-07 04:34:19] [Rank 0] Group 7 FTA: 0.0104 +[2025-07-07 04:34:19] [Rank 0] Group 7 FTA: 0.0104 +[2025-07-07 04:34:19] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 04:34:19] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 04:34:19] [Rank 0] Group 9 FTA: 0.0039 +[2025-07-07 04:34:19] [Rank 0] Group 9 FTA: 0.0039 +[2025-07-07 04:34:19] [Rank 0] Group 10 FTA: 0.0039 +[2025-07-07 04:34:19] [Rank 0] Group 10 FTA: 0.0039 +[2025-07-07 04:34:19] [Rank 0] Group 11 FTA: 0.0059 +[2025-07-07 04:34:19] [Rank 0] Group 11 FTA: 0.0059 +[2025-07-07 04:34:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 04:34:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 04:34:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 04:34:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 04:34:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 04:34:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 04:34:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 04:34:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 04:34:20] [Rank 0] step:2001/10000 train_time:160188ms step_avg:80.05ms +[2025-07-07 04:34:20] [Rank 0] step:2001/10000 train_time:160188ms step_avg:80.05ms +[2025-07-07 04:34:22] [Rank 0] step:2021/10000 train_time:161717ms step_avg:80.02ms +[2025-07-07 04:34:22] [Rank 0] step:2021/10000 train_time:161717ms step_avg:80.02ms +[2025-07-07 04:34:24] [Rank 0] step:2041/10000 train_time:163348ms step_avg:80.03ms +[2025-07-07 04:34:24] [Rank 0] step:2041/10000 train_time:163348ms step_avg:80.03ms +[2025-07-07 04:34:25] [Rank 0] step:2061/10000 train_time:164817ms step_avg:79.97ms +[2025-07-07 04:34:25] [Rank 0] step:2061/10000 train_time:164817ms step_avg:79.97ms +[2025-07-07 04:34:27] [Rank 0] step:2081/10000 train_time:166953ms step_avg:80.23ms +[2025-07-07 04:34:27] [Rank 0] step:2081/10000 train_time:166953ms step_avg:80.23ms +[2025-07-07 04:34:29] [Rank 0] step:2101/10000 train_time:168423ms step_avg:80.16ms +[2025-07-07 04:34:29] [Rank 0] step:2101/10000 train_time:168423ms step_avg:80.16ms +[2025-07-07 04:34:30] [Rank 0] step:2121/10000 train_time:169896ms step_avg:80.10ms +[2025-07-07 04:34:30] [Rank 0] step:2121/10000 train_time:169896ms step_avg:80.10ms +[2025-07-07 04:34:32] [Rank 0] step:2141/10000 train_time:171371ms step_avg:80.04ms +[2025-07-07 04:34:32] [Rank 0] step:2141/10000 train_time:171371ms step_avg:80.04ms +[2025-07-07 04:34:34] [Rank 0] step:2161/10000 train_time:173102ms step_avg:80.10ms +[2025-07-07 04:34:34] [Rank 0] step:2161/10000 train_time:173102ms step_avg:80.10ms +[2025-07-07 04:34:35] [Rank 0] step:2181/10000 train_time:174989ms step_avg:80.23ms +[2025-07-07 04:34:35] [Rank 0] step:2181/10000 train_time:174989ms step_avg:80.23ms +[2025-07-07 04:34:37] [Rank 0] step:2201/10000 train_time:176466ms step_avg:80.18ms +[2025-07-07 04:34:37] [Rank 0] step:2201/10000 train_time:176466ms step_avg:80.18ms +[2025-07-07 04:34:38] [Rank 0] step:2221/10000 train_time:177939ms step_avg:80.12ms +[2025-07-07 04:34:38] [Rank 0] step:2221/10000 train_time:177939ms step_avg:80.12ms +[2025-07-07 04:34:40] [Rank 0] step:2241/10000 train_time:179435ms step_avg:80.07ms +[2025-07-07 04:34:40] [Rank 0] step:2241/10000 train_time:179435ms step_avg:80.07ms +[2025-07-07 04:34:42] [Rank 0] step:2261/10000 train_time:181590ms step_avg:80.31ms +[2025-07-07 04:34:42] [Rank 0] step:2261/10000 train_time:181590ms step_avg:80.31ms +[2025-07-07 04:34:43] [Rank 0] step:2281/10000 train_time:183089ms step_avg:80.27ms +[2025-07-07 04:34:43] [Rank 0] step:2281/10000 train_time:183089ms step_avg:80.27ms +[2025-07-07 04:34:45] [Rank 0] step:2301/10000 train_time:184594ms step_avg:80.22ms +[2025-07-07 04:34:45] [Rank 0] step:2301/10000 train_time:184594ms step_avg:80.22ms +[2025-07-07 04:34:46] [Rank 0] step:2321/10000 train_time:186098ms step_avg:80.18ms +[2025-07-07 04:34:46] [Rank 0] step:2321/10000 train_time:186098ms step_avg:80.18ms +[2025-07-07 04:34:49] [Rank 0] step:2341/10000 train_time:188286ms step_avg:80.43ms +[2025-07-07 04:34:49] [Rank 0] step:2341/10000 train_time:188286ms step_avg:80.43ms +[2025-07-07 04:34:50] [Rank 0] step:2361/10000 train_time:189771ms step_avg:80.38ms +[2025-07-07 04:34:50] [Rank 0] step:2361/10000 train_time:189771ms step_avg:80.38ms +[2025-07-07 04:34:52] [Rank 0] step:2381/10000 train_time:191275ms step_avg:80.33ms +[2025-07-07 04:34:52] [Rank 0] step:2381/10000 train_time:191275ms step_avg:80.33ms +[2025-07-07 04:34:53] [Rank 0] step:2401/10000 train_time:192781ms step_avg:80.29ms +[2025-07-07 04:34:53] [Rank 0] step:2401/10000 train_time:192781ms step_avg:80.29ms +[2025-07-07 04:34:55] [Rank 0] step:2421/10000 train_time:194287ms step_avg:80.25ms +[2025-07-07 04:34:55] [Rank 0] step:2421/10000 train_time:194287ms step_avg:80.25ms +[2025-07-07 04:34:57] [Rank 0] step:2441/10000 train_time:196443ms step_avg:80.48ms +[2025-07-07 04:34:57] [Rank 0] step:2441/10000 train_time:196443ms step_avg:80.48ms +[2025-07-07 04:34:58] [Rank 0] step:2461/10000 train_time:197947ms step_avg:80.43ms +[2025-07-07 04:34:58] [Rank 0] step:2461/10000 train_time:197947ms step_avg:80.43ms +[2025-07-07 04:35:00] [Rank 0] step:2481/10000 train_time:199451ms step_avg:80.39ms +[2025-07-07 04:35:00] [Rank 0] step:2481/10000 train_time:199451ms step_avg:80.39ms +[2025-07-07 04:35:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:35:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:35:02] [Rank 0] PRINT: step:2500/10000 train_loss:4.7834 val_loss:4.4441 train_time:200958ms step_avg:80.38ms +[2025-07-07 04:35:02] [Rank 0] PRINT: step:2500/10000 train_loss:4.7834 val_loss:4.4441 train_time:200958ms step_avg:80.38ms +[2025-07-07 04:35:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:35:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:35:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:35:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:35:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:35:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:40:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:40:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:40:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:40:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:40:18] [Rank 0] Total Loss: 5.6936 +[2025-07-07 04:40:18] [Rank 0] Total Loss: 5.6936 +[2025-07-07 04:40:18] [Rank 0] Total FTA: 0.0509 +[2025-07-07 04:40:18] [Rank 0] Total FTA: 0.0509 +[2025-07-07 04:40:18] [Rank 0] Group 0 Loss: 5.6339 +[2025-07-07 04:40:18] [Rank 0] Group 0 Loss: 5.6339 +[2025-07-07 04:40:18] [Rank 0] Group 1 Loss: 5.7905 +[2025-07-07 04:40:18] [Rank 0] Group 1 Loss: 5.7905 +[2025-07-07 04:40:18] [Rank 0] Group 2 Loss: 5.7399 +[2025-07-07 04:40:18] [Rank 0] Group 2 Loss: 5.7399 +[2025-07-07 04:40:18] [Rank 0] Group 3 Loss: 5.6454 +[2025-07-07 04:40:18] [Rank 0] Group 3 Loss: 5.6454 +[2025-07-07 04:40:18] [Rank 0] Group 4 Loss: 5.7063 +[2025-07-07 04:40:18] [Rank 0] Group 4 Loss: 5.7063 +[2025-07-07 04:40:18] [Rank 0] Group 5 Loss: 5.7019 +[2025-07-07 04:40:18] [Rank 0] Group 5 Loss: 5.7019 +[2025-07-07 04:40:18] [Rank 0] Group 6 Loss: 5.6866 +[2025-07-07 04:40:18] [Rank 0] Group 6 Loss: 5.6866 +[2025-07-07 04:40:18] [Rank 0] Group 7 Loss: 5.6922 +[2025-07-07 04:40:18] [Rank 0] Group 7 Loss: 5.6922 +[2025-07-07 04:40:18] [Rank 0] Group 8 Loss: 5.6916 +[2025-07-07 04:40:18] [Rank 0] Group 8 Loss: 5.6916 +[2025-07-07 04:40:18] [Rank 0] Group 9 Loss: 5.6877 +[2025-07-07 04:40:18] [Rank 0] Group 9 Loss: 5.6877 +[2025-07-07 04:40:18] [Rank 0] Group 10 Loss: 5.7014 +[2025-07-07 04:40:18] [Rank 0] Group 10 Loss: 5.7014 +[2025-07-07 04:40:18] [Rank 0] Group 11 Loss: 5.6964 +[2025-07-07 04:40:18] [Rank 0] Group 11 Loss: 5.6964 +[2025-07-07 04:40:18] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 04:40:18] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 04:40:18] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:40:18] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:40:18] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 04:40:18] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 04:40:18] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 04:40:18] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 04:40:18] [Rank 0] Group 4 FTA: 0.0417 +[2025-07-07 04:40:18] [Rank 0] Group 4 FTA: 0.0417 +[2025-07-07 04:40:18] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-07 04:40:18] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-07 04:40:18] [Rank 0] Group 6 FTA: 0.0729 +[2025-07-07 04:40:18] [Rank 0] Group 6 FTA: 0.0729 +[2025-07-07 04:40:18] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-07 04:40:18] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-07 04:40:18] [Rank 0] Group 8 FTA: 0.0547 +[2025-07-07 04:40:18] [Rank 0] Group 8 FTA: 0.0547 +[2025-07-07 04:40:18] [Rank 0] Group 9 FTA: 0.0547 +[2025-07-07 04:40:18] [Rank 0] Group 9 FTA: 0.0547 +[2025-07-07 04:40:18] [Rank 0] Group 10 FTA: 0.0684 +[2025-07-07 04:40:18] [Rank 0] Group 10 FTA: 0.0684 +[2025-07-07 04:40:18] [Rank 0] Group 11 FTA: 0.0654 +[2025-07-07 04:40:18] [Rank 0] Group 11 FTA: 0.0654 +[2025-07-07 04:40:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 04:40:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 04:40:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 04:40:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 04:40:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 04:40:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 04:40:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 04:40:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 04:40:20] [Rank 0] step:2501/10000 train_time:200977ms step_avg:80.36ms +[2025-07-07 04:40:20] [Rank 0] step:2501/10000 train_time:200977ms step_avg:80.36ms +[2025-07-07 04:40:22] [Rank 0] step:2521/10000 train_time:203176ms step_avg:80.59ms +[2025-07-07 04:40:22] [Rank 0] step:2521/10000 train_time:203176ms step_avg:80.59ms +[2025-07-07 04:40:24] [Rank 0] step:2541/10000 train_time:204651ms step_avg:80.54ms +[2025-07-07 04:40:24] [Rank 0] step:2541/10000 train_time:204651ms step_avg:80.54ms +[2025-07-07 04:40:25] [Rank 0] step:2561/10000 train_time:206142ms step_avg:80.49ms +[2025-07-07 04:40:25] [Rank 0] step:2561/10000 train_time:206142ms step_avg:80.49ms +[2025-07-07 04:40:27] [Rank 0] step:2581/10000 train_time:207636ms step_avg:80.45ms +[2025-07-07 04:40:27] [Rank 0] step:2581/10000 train_time:207636ms step_avg:80.45ms +[2025-07-07 04:40:28] [Rank 0] step:2601/10000 train_time:209132ms step_avg:80.40ms +[2025-07-07 04:40:28] [Rank 0] step:2601/10000 train_time:209132ms step_avg:80.40ms +[2025-07-07 04:40:30] [Rank 0] step:2621/10000 train_time:211295ms step_avg:80.62ms +[2025-07-07 04:40:30] [Rank 0] step:2621/10000 train_time:211295ms step_avg:80.62ms +[2025-07-07 04:40:32] [Rank 0] step:2641/10000 train_time:212796ms step_avg:80.57ms +[2025-07-07 04:40:32] [Rank 0] step:2641/10000 train_time:212796ms step_avg:80.57ms +[2025-07-07 04:40:33] [Rank 0] step:2661/10000 train_time:214294ms step_avg:80.53ms +[2025-07-07 04:40:33] [Rank 0] step:2661/10000 train_time:214294ms step_avg:80.53ms +[2025-07-07 04:40:35] [Rank 0] step:2681/10000 train_time:215794ms step_avg:80.49ms +[2025-07-07 04:40:35] [Rank 0] step:2681/10000 train_time:215794ms step_avg:80.49ms +[2025-07-07 04:40:36] [Rank 0] step:2701/10000 train_time:217294ms step_avg:80.45ms +[2025-07-07 04:40:36] [Rank 0] step:2701/10000 train_time:217294ms step_avg:80.45ms +[2025-07-07 04:40:38] [Rank 0] step:2721/10000 train_time:219030ms step_avg:80.50ms +[2025-07-07 04:40:38] [Rank 0] step:2721/10000 train_time:219030ms step_avg:80.50ms +[2025-07-07 04:40:40] [Rank 0] step:2741/10000 train_time:220586ms step_avg:80.48ms +[2025-07-07 04:40:40] [Rank 0] step:2741/10000 train_time:220586ms step_avg:80.48ms +[2025-07-07 04:40:41] [Rank 0] step:2761/10000 train_time:222204ms step_avg:80.48ms +[2025-07-07 04:40:41] [Rank 0] step:2761/10000 train_time:222204ms step_avg:80.48ms +[2025-07-07 04:40:43] [Rank 0] step:2781/10000 train_time:223776ms step_avg:80.47ms +[2025-07-07 04:40:43] [Rank 0] step:2781/10000 train_time:223776ms step_avg:80.47ms +[2025-07-07 04:40:45] [Rank 0] step:2801/10000 train_time:225921ms step_avg:80.66ms +[2025-07-07 04:40:45] [Rank 0] step:2801/10000 train_time:225921ms step_avg:80.66ms +[2025-07-07 04:40:46] [Rank 0] step:2821/10000 train_time:227423ms step_avg:80.62ms +[2025-07-07 04:40:46] [Rank 0] step:2821/10000 train_time:227423ms step_avg:80.62ms +[2025-07-07 04:40:48] [Rank 0] step:2841/10000 train_time:228928ms step_avg:80.58ms +[2025-07-07 04:40:48] [Rank 0] step:2841/10000 train_time:228928ms step_avg:80.58ms +[2025-07-07 04:40:49] [Rank 0] step:2861/10000 train_time:230432ms step_avg:80.54ms +[2025-07-07 04:40:49] [Rank 0] step:2861/10000 train_time:230432ms step_avg:80.54ms +[2025-07-07 04:40:52] [Rank 0] step:2881/10000 train_time:231937ms step_avg:80.51ms +[2025-07-07 04:40:52] [Rank 0] step:2881/10000 train_time:231937ms step_avg:80.51ms +[2025-07-07 04:40:53] [Rank 0] step:2901/10000 train_time:234095ms step_avg:80.69ms +[2025-07-07 04:40:53] [Rank 0] step:2901/10000 train_time:234095ms step_avg:80.69ms +[2025-07-07 04:40:55] [Rank 0] step:2921/10000 train_time:235596ms step_avg:80.66ms +[2025-07-07 04:40:55] [Rank 0] step:2921/10000 train_time:235596ms step_avg:80.66ms +[2025-07-07 04:40:56] [Rank 0] step:2941/10000 train_time:237100ms step_avg:80.62ms +[2025-07-07 04:40:56] [Rank 0] step:2941/10000 train_time:237100ms step_avg:80.62ms +[2025-07-07 04:40:58] [Rank 0] step:2961/10000 train_time:238603ms step_avg:80.58ms +[2025-07-07 04:40:58] [Rank 0] step:2961/10000 train_time:238603ms step_avg:80.58ms +[2025-07-07 04:41:00] [Rank 0] step:2981/10000 train_time:240770ms step_avg:80.77ms +[2025-07-07 04:41:00] [Rank 0] step:2981/10000 train_time:240770ms step_avg:80.77ms +[2025-07-07 04:41:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:41:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:41:02] [Rank 0] PRINT: step:3000/10000 train_loss:4.1238 val_loss:3.8186 train_time:242273ms step_avg:80.76ms +[2025-07-07 04:41:02] [Rank 0] PRINT: step:3000/10000 train_loss:4.1238 val_loss:3.8186 train_time:242273ms step_avg:80.76ms +[2025-07-07 04:41:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:41:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:41:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:41:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:41:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:41:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:46:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:46:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:46:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:46:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:46:19] [Rank 0] Total Loss: 5.3200 +[2025-07-07 04:46:19] [Rank 0] Total Loss: 5.3200 +[2025-07-07 04:46:19] [Rank 0] Total FTA: 0.0813 +[2025-07-07 04:46:19] [Rank 0] Total FTA: 0.0813 +[2025-07-07 04:46:19] [Rank 0] Group 0 Loss: 5.3113 +[2025-07-07 04:46:19] [Rank 0] Group 0 Loss: 5.3113 +[2025-07-07 04:46:19] [Rank 0] Group 1 Loss: 5.4450 +[2025-07-07 04:46:19] [Rank 0] Group 1 Loss: 5.4450 +[2025-07-07 04:46:19] [Rank 0] Group 2 Loss: 5.2992 +[2025-07-07 04:46:19] [Rank 0] Group 2 Loss: 5.2992 +[2025-07-07 04:46:19] [Rank 0] Group 3 Loss: 5.2342 +[2025-07-07 04:46:19] [Rank 0] Group 3 Loss: 5.2342 +[2025-07-07 04:46:19] [Rank 0] Group 4 Loss: 5.3241 +[2025-07-07 04:46:19] [Rank 0] Group 4 Loss: 5.3241 +[2025-07-07 04:46:19] [Rank 0] Group 5 Loss: 5.3270 +[2025-07-07 04:46:19] [Rank 0] Group 5 Loss: 5.3270 +[2025-07-07 04:46:19] [Rank 0] Group 6 Loss: 5.2937 +[2025-07-07 04:46:19] [Rank 0] Group 6 Loss: 5.2937 +[2025-07-07 04:46:19] [Rank 0] Group 7 Loss: 5.3234 +[2025-07-07 04:46:19] [Rank 0] Group 7 Loss: 5.3234 +[2025-07-07 04:46:19] [Rank 0] Group 8 Loss: 5.3062 +[2025-07-07 04:46:19] [Rank 0] Group 8 Loss: 5.3062 +[2025-07-07 04:46:19] [Rank 0] Group 9 Loss: 5.3241 +[2025-07-07 04:46:19] [Rank 0] Group 9 Loss: 5.3241 +[2025-07-07 04:46:19] [Rank 0] Group 10 Loss: 5.3053 +[2025-07-07 04:46:19] [Rank 0] Group 10 Loss: 5.3053 +[2025-07-07 04:46:19] [Rank 0] Group 11 Loss: 5.3356 +[2025-07-07 04:46:19] [Rank 0] Group 11 Loss: 5.3356 +[2025-07-07 04:46:19] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 04:46:19] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 04:46:19] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:46:19] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:46:19] [Rank 0] Group 2 FTA: 0.0677 +[2025-07-07 04:46:19] [Rank 0] Group 2 FTA: 0.0677 +[2025-07-07 04:46:19] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-07 04:46:19] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-07 04:46:19] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 04:46:19] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 04:46:19] [Rank 0] Group 5 FTA: 0.0286 +[2025-07-07 04:46:19] [Rank 0] Group 5 FTA: 0.0286 +[2025-07-07 04:46:19] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-07 04:46:19] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-07 04:46:19] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-07 04:46:19] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-07 04:46:19] [Rank 0] Group 8 FTA: 0.0755 +[2025-07-07 04:46:19] [Rank 0] Group 8 FTA: 0.0755 +[2025-07-07 04:46:19] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-07 04:46:19] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-07 04:46:19] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 04:46:19] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 04:46:19] [Rank 0] Group 11 FTA: 0.0850 +[2025-07-07 04:46:19] [Rank 0] Group 11 FTA: 0.0850 +[2025-07-07 04:46:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 04:46:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 04:46:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 04:46:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 04:46:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 04:46:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 04:46:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 04:46:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 04:46:21] [Rank 0] step:3001/10000 train_time:242292ms step_avg:80.74ms +[2025-07-07 04:46:21] [Rank 0] step:3001/10000 train_time:242292ms step_avg:80.74ms +[2025-07-07 04:46:22] [Rank 0] step:3021/10000 train_time:243788ms step_avg:80.70ms +[2025-07-07 04:46:22] [Rank 0] step:3021/10000 train_time:243788ms step_avg:80.70ms +[2025-07-07 04:46:24] [Rank 0] step:3041/10000 train_time:245282ms step_avg:80.66ms +[2025-07-07 04:46:24] [Rank 0] step:3041/10000 train_time:245282ms step_avg:80.66ms +[2025-07-07 04:46:26] [Rank 0] step:3061/10000 train_time:246778ms step_avg:80.62ms +[2025-07-07 04:46:26] [Rank 0] step:3061/10000 train_time:246778ms step_avg:80.62ms +[2025-07-07 04:46:27] [Rank 0] step:3081/10000 train_time:248931ms step_avg:80.80ms +[2025-07-07 04:46:27] [Rank 0] step:3081/10000 train_time:248931ms step_avg:80.80ms +[2025-07-07 04:46:29] [Rank 0] step:3101/10000 train_time:250426ms step_avg:80.76ms +[2025-07-07 04:46:29] [Rank 0] step:3101/10000 train_time:250426ms step_avg:80.76ms +[2025-07-07 04:46:30] [Rank 0] step:3121/10000 train_time:251922ms step_avg:80.72ms +[2025-07-07 04:46:30] [Rank 0] step:3121/10000 train_time:251922ms step_avg:80.72ms +[2025-07-07 04:46:32] [Rank 0] step:3141/10000 train_time:253420ms step_avg:80.68ms +[2025-07-07 04:46:32] [Rank 0] step:3141/10000 train_time:253420ms step_avg:80.68ms +[2025-07-07 04:46:34] [Rank 0] step:3161/10000 train_time:255572ms step_avg:80.85ms +[2025-07-07 04:46:34] [Rank 0] step:3161/10000 train_time:255572ms step_avg:80.85ms +[2025-07-07 04:46:35] [Rank 0] step:3181/10000 train_time:257071ms step_avg:80.81ms +[2025-07-07 04:46:35] [Rank 0] step:3181/10000 train_time:257071ms step_avg:80.81ms +[2025-07-07 04:46:37] [Rank 0] step:3201/10000 train_time:258571ms step_avg:80.78ms +[2025-07-07 04:46:37] [Rank 0] step:3201/10000 train_time:258571ms step_avg:80.78ms +[2025-07-07 04:46:38] [Rank 0] step:3221/10000 train_time:260070ms step_avg:80.74ms +[2025-07-07 04:46:38] [Rank 0] step:3221/10000 train_time:260070ms step_avg:80.74ms +[2025-07-07 04:46:41] [Rank 0] step:3241/10000 train_time:261573ms step_avg:80.71ms +[2025-07-07 04:46:41] [Rank 0] step:3241/10000 train_time:261573ms step_avg:80.71ms +[2025-07-07 04:46:42] [Rank 0] step:3261/10000 train_time:263720ms step_avg:80.87ms +[2025-07-07 04:46:42] [Rank 0] step:3261/10000 train_time:263720ms step_avg:80.87ms +[2025-07-07 04:46:44] [Rank 0] step:3281/10000 train_time:265224ms step_avg:80.84ms +[2025-07-07 04:46:44] [Rank 0] step:3281/10000 train_time:265224ms step_avg:80.84ms +[2025-07-07 04:46:45] [Rank 0] step:3301/10000 train_time:266728ms step_avg:80.80ms +[2025-07-07 04:46:45] [Rank 0] step:3301/10000 train_time:266728ms step_avg:80.80ms +[2025-07-07 04:46:47] [Rank 0] step:3321/10000 train_time:268235ms step_avg:80.77ms +[2025-07-07 04:46:47] [Rank 0] step:3321/10000 train_time:268235ms step_avg:80.77ms +[2025-07-07 04:46:49] [Rank 0] step:3341/10000 train_time:270391ms step_avg:80.93ms +[2025-07-07 04:46:49] [Rank 0] step:3341/10000 train_time:270391ms step_avg:80.93ms +[2025-07-07 04:46:50] [Rank 0] step:3361/10000 train_time:271895ms step_avg:80.90ms +[2025-07-07 04:46:50] [Rank 0] step:3361/10000 train_time:271895ms step_avg:80.90ms +[2025-07-07 04:46:52] [Rank 0] step:3381/10000 train_time:273400ms step_avg:80.86ms +[2025-07-07 04:46:52] [Rank 0] step:3381/10000 train_time:273400ms step_avg:80.86ms +[2025-07-07 04:46:53] [Rank 0] step:3401/10000 train_time:274904ms step_avg:80.83ms +[2025-07-07 04:46:53] [Rank 0] step:3401/10000 train_time:274904ms step_avg:80.83ms +[2025-07-07 04:46:55] [Rank 0] step:3421/10000 train_time:276666ms step_avg:80.87ms +[2025-07-07 04:46:55] [Rank 0] step:3421/10000 train_time:276666ms step_avg:80.87ms +[2025-07-07 04:46:57] [Rank 0] step:3441/10000 train_time:278568ms step_avg:80.96ms +[2025-07-07 04:46:57] [Rank 0] step:3441/10000 train_time:278568ms step_avg:80.96ms +[2025-07-07 04:46:58] [Rank 0] step:3461/10000 train_time:280072ms step_avg:80.92ms +[2025-07-07 04:46:58] [Rank 0] step:3461/10000 train_time:280072ms step_avg:80.92ms +[2025-07-07 04:47:00] [Rank 0] step:3481/10000 train_time:281814ms step_avg:80.96ms +[2025-07-07 04:47:00] [Rank 0] step:3481/10000 train_time:281814ms step_avg:80.96ms +[2025-07-07 04:47:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:47:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:47:02] [Rank 0] PRINT: step:3500/10000 train_loss:3.5439 val_loss:3.2860 train_time:283387ms step_avg:80.97ms +[2025-07-07 04:47:02] [Rank 0] PRINT: step:3500/10000 train_loss:3.5439 val_loss:3.2860 train_time:283387ms step_avg:80.97ms +[2025-07-07 04:47:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:47:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:47:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:47:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:47:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:47:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:52:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:52:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:52:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:52:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:52:19] [Rank 0] Total Loss: 4.9807 +[2025-07-07 04:52:19] [Rank 0] Total Loss: 4.9807 +[2025-07-07 04:52:19] [Rank 0] Total FTA: 0.0925 +[2025-07-07 04:52:19] [Rank 0] Total FTA: 0.0925 +[2025-07-07 04:52:19] [Rank 0] Group 0 Loss: 5.0071 +[2025-07-07 04:52:19] [Rank 0] Group 0 Loss: 5.0071 +[2025-07-07 04:52:19] [Rank 0] Group 1 Loss: 5.0582 +[2025-07-07 04:52:19] [Rank 0] Group 1 Loss: 5.0582 +[2025-07-07 04:52:19] [Rank 0] Group 2 Loss: 4.9120 +[2025-07-07 04:52:19] [Rank 0] Group 2 Loss: 4.9120 +[2025-07-07 04:52:19] [Rank 0] Group 3 Loss: 4.8719 +[2025-07-07 04:52:19] [Rank 0] Group 3 Loss: 4.8719 +[2025-07-07 04:52:19] [Rank 0] Group 4 Loss: 4.9401 +[2025-07-07 04:52:19] [Rank 0] Group 4 Loss: 4.9401 +[2025-07-07 04:52:19] [Rank 0] Group 5 Loss: 4.9851 +[2025-07-07 04:52:19] [Rank 0] Group 5 Loss: 4.9851 +[2025-07-07 04:52:19] [Rank 0] Group 6 Loss: 4.9963 +[2025-07-07 04:52:19] [Rank 0] Group 6 Loss: 4.9963 +[2025-07-07 04:52:19] [Rank 0] Group 7 Loss: 4.9716 +[2025-07-07 04:52:19] [Rank 0] Group 7 Loss: 4.9716 +[2025-07-07 04:52:19] [Rank 0] Group 8 Loss: 4.9788 +[2025-07-07 04:52:19] [Rank 0] Group 8 Loss: 4.9788 +[2025-07-07 04:52:19] [Rank 0] Group 9 Loss: 4.9939 +[2025-07-07 04:52:19] [Rank 0] Group 9 Loss: 4.9939 +[2025-07-07 04:52:19] [Rank 0] Group 10 Loss: 4.9990 +[2025-07-07 04:52:19] [Rank 0] Group 10 Loss: 4.9990 +[2025-07-07 04:52:19] [Rank 0] Group 11 Loss: 4.9977 +[2025-07-07 04:52:19] [Rank 0] Group 11 Loss: 4.9977 +[2025-07-07 04:52:19] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 04:52:19] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 04:52:19] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:52:19] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:52:19] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-07 04:52:19] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-07 04:52:19] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 04:52:19] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 04:52:19] [Rank 0] Group 4 FTA: 0.0625 +[2025-07-07 04:52:19] [Rank 0] Group 4 FTA: 0.0625 +[2025-07-07 04:52:19] [Rank 0] Group 5 FTA: 0.0521 +[2025-07-07 04:52:19] [Rank 0] Group 5 FTA: 0.0521 +[2025-07-07 04:52:19] [Rank 0] Group 6 FTA: 0.1016 +[2025-07-07 04:52:19] [Rank 0] Group 6 FTA: 0.1016 +[2025-07-07 04:52:19] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-07 04:52:19] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-07 04:52:19] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-07 04:52:19] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-07 04:52:19] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-07 04:52:19] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-07 04:52:19] [Rank 0] Group 10 FTA: 0.1035 +[2025-07-07 04:52:19] [Rank 0] Group 10 FTA: 0.1035 +[2025-07-07 04:52:19] [Rank 0] Group 11 FTA: 0.0957 +[2025-07-07 04:52:19] [Rank 0] Group 11 FTA: 0.0957 +[2025-07-07 04:52:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 04:52:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 04:52:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 04:52:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 04:52:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 04:52:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 04:52:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 04:52:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 04:52:21] [Rank 0] step:3501/10000 train_time:283406ms step_avg:80.95ms +[2025-07-07 04:52:21] [Rank 0] step:3501/10000 train_time:283406ms step_avg:80.95ms +[2025-07-07 04:52:23] [Rank 0] step:3521/10000 train_time:285574ms step_avg:81.11ms +[2025-07-07 04:52:23] [Rank 0] step:3521/10000 train_time:285574ms step_avg:81.11ms +[2025-07-07 04:52:24] [Rank 0] step:3541/10000 train_time:287066ms step_avg:81.07ms +[2025-07-07 04:52:24] [Rank 0] step:3541/10000 train_time:287066ms step_avg:81.07ms +[2025-07-07 04:52:26] [Rank 0] step:3561/10000 train_time:288562ms step_avg:81.03ms +[2025-07-07 04:52:26] [Rank 0] step:3561/10000 train_time:288562ms step_avg:81.03ms +[2025-07-07 04:52:27] [Rank 0] step:3581/10000 train_time:290059ms step_avg:81.00ms +[2025-07-07 04:52:27] [Rank 0] step:3581/10000 train_time:290059ms step_avg:81.00ms +[2025-07-07 04:52:29] [Rank 0] step:3601/10000 train_time:291812ms step_avg:81.04ms +[2025-07-07 04:52:29] [Rank 0] step:3601/10000 train_time:291812ms step_avg:81.04ms +[2025-07-07 04:52:31] [Rank 0] step:3621/10000 train_time:293291ms step_avg:81.00ms +[2025-07-07 04:52:31] [Rank 0] step:3621/10000 train_time:293291ms step_avg:81.00ms +[2025-07-07 04:52:32] [Rank 0] step:3641/10000 train_time:294787ms step_avg:80.96ms +[2025-07-07 04:52:32] [Rank 0] step:3641/10000 train_time:294787ms step_avg:80.96ms +[2025-07-07 04:52:34] [Rank 0] step:3661/10000 train_time:296285ms step_avg:80.93ms +[2025-07-07 04:52:34] [Rank 0] step:3661/10000 train_time:296285ms step_avg:80.93ms +[2025-07-07 04:52:35] [Rank 0] step:3681/10000 train_time:297784ms step_avg:80.90ms +[2025-07-07 04:52:35] [Rank 0] step:3681/10000 train_time:297784ms step_avg:80.90ms +[2025-07-07 04:52:37] [Rank 0] step:3701/10000 train_time:299944ms step_avg:81.04ms +[2025-07-07 04:52:37] [Rank 0] step:3701/10000 train_time:299944ms step_avg:81.04ms +[2025-07-07 04:52:39] [Rank 0] step:3721/10000 train_time:301445ms step_avg:81.01ms +[2025-07-07 04:52:39] [Rank 0] step:3721/10000 train_time:301445ms step_avg:81.01ms +[2025-07-07 04:52:40] [Rank 0] step:3741/10000 train_time:302945ms step_avg:80.98ms +[2025-07-07 04:52:40] [Rank 0] step:3741/10000 train_time:302945ms step_avg:80.98ms +[2025-07-07 04:52:42] [Rank 0] step:3761/10000 train_time:304449ms step_avg:80.95ms +[2025-07-07 04:52:42] [Rank 0] step:3761/10000 train_time:304449ms step_avg:80.95ms +[2025-07-07 04:52:44] [Rank 0] step:3781/10000 train_time:305952ms step_avg:80.92ms +[2025-07-07 04:52:44] [Rank 0] step:3781/10000 train_time:305952ms step_avg:80.92ms +[2025-07-07 04:52:45] [Rank 0] step:3801/10000 train_time:308125ms step_avg:81.06ms +[2025-07-07 04:52:45] [Rank 0] step:3801/10000 train_time:308125ms step_avg:81.06ms +[2025-07-07 04:52:47] [Rank 0] step:3821/10000 train_time:309625ms step_avg:81.03ms +[2025-07-07 04:52:47] [Rank 0] step:3821/10000 train_time:309625ms step_avg:81.03ms +[2025-07-07 04:52:48] [Rank 0] step:3841/10000 train_time:311127ms step_avg:81.00ms +[2025-07-07 04:52:48] [Rank 0] step:3841/10000 train_time:311127ms step_avg:81.00ms +[2025-07-07 04:52:50] [Rank 0] step:3861/10000 train_time:312630ms step_avg:80.97ms +[2025-07-07 04:52:50] [Rank 0] step:3861/10000 train_time:312630ms step_avg:80.97ms +[2025-07-07 04:52:52] [Rank 0] step:3881/10000 train_time:314365ms step_avg:81.00ms +[2025-07-07 04:52:52] [Rank 0] step:3881/10000 train_time:314365ms step_avg:81.00ms +[2025-07-07 04:52:53] [Rank 0] step:3901/10000 train_time:315867ms step_avg:80.97ms +[2025-07-07 04:52:53] [Rank 0] step:3901/10000 train_time:315867ms step_avg:80.97ms +[2025-07-07 04:52:55] [Rank 0] step:3921/10000 train_time:317369ms step_avg:80.94ms +[2025-07-07 04:52:55] [Rank 0] step:3921/10000 train_time:317369ms step_avg:80.94ms +[2025-07-07 04:52:56] [Rank 0] step:3941/10000 train_time:318871ms step_avg:80.91ms +[2025-07-07 04:52:56] [Rank 0] step:3941/10000 train_time:318871ms step_avg:80.91ms +[2025-07-07 04:52:58] [Rank 0] step:3961/10000 train_time:320426ms step_avg:80.90ms +[2025-07-07 04:52:58] [Rank 0] step:3961/10000 train_time:320426ms step_avg:80.90ms +[2025-07-07 04:52:59] [Rank 0] step:3981/10000 train_time:322108ms step_avg:80.91ms +[2025-07-07 04:52:59] [Rank 0] step:3981/10000 train_time:322108ms step_avg:80.91ms +[2025-07-07 04:53:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:53:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:53:02] [Rank 0] PRINT: step:4000/10000 train_loss:3.0612 val_loss:2.8521 train_time:323612ms step_avg:80.90ms +[2025-07-07 04:53:02] [Rank 0] PRINT: step:4000/10000 train_loss:3.0612 val_loss:2.8521 train_time:323612ms step_avg:80.90ms +[2025-07-07 04:53:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:53:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:53:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:53:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:53:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:53:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:58:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:58:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:58:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:58:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:58:19] [Rank 0] Total Loss: 4.6894 +[2025-07-07 04:58:19] [Rank 0] Total Loss: 4.6894 +[2025-07-07 04:58:19] [Rank 0] Total FTA: 0.1078 +[2025-07-07 04:58:19] [Rank 0] Total FTA: 0.1078 +[2025-07-07 04:58:19] [Rank 0] Group 0 Loss: 4.8209 +[2025-07-07 04:58:19] [Rank 0] Group 0 Loss: 4.8209 +[2025-07-07 04:58:19] [Rank 0] Group 1 Loss: 4.7731 +[2025-07-07 04:58:19] [Rank 0] Group 1 Loss: 4.7731 +[2025-07-07 04:58:19] [Rank 0] Group 2 Loss: 4.6103 +[2025-07-07 04:58:19] [Rank 0] Group 2 Loss: 4.6103 +[2025-07-07 04:58:19] [Rank 0] Group 3 Loss: 4.6179 +[2025-07-07 04:58:19] [Rank 0] Group 3 Loss: 4.6179 +[2025-07-07 04:58:19] [Rank 0] Group 4 Loss: 4.6389 +[2025-07-07 04:58:19] [Rank 0] Group 4 Loss: 4.6389 +[2025-07-07 04:58:19] [Rank 0] Group 5 Loss: 4.6709 +[2025-07-07 04:58:19] [Rank 0] Group 5 Loss: 4.6709 +[2025-07-07 04:58:19] [Rank 0] Group 6 Loss: 4.6359 +[2025-07-07 04:58:19] [Rank 0] Group 6 Loss: 4.6359 +[2025-07-07 04:58:19] [Rank 0] Group 7 Loss: 4.6856 +[2025-07-07 04:58:19] [Rank 0] Group 7 Loss: 4.6856 +[2025-07-07 04:58:19] [Rank 0] Group 8 Loss: 4.6630 +[2025-07-07 04:58:19] [Rank 0] Group 8 Loss: 4.6630 +[2025-07-07 04:58:19] [Rank 0] Group 9 Loss: 4.6618 +[2025-07-07 04:58:19] [Rank 0] Group 9 Loss: 4.6618 +[2025-07-07 04:58:19] [Rank 0] Group 10 Loss: 4.6978 +[2025-07-07 04:58:19] [Rank 0] Group 10 Loss: 4.6978 +[2025-07-07 04:58:19] [Rank 0] Group 11 Loss: 4.6755 +[2025-07-07 04:58:19] [Rank 0] Group 11 Loss: 4.6755 +[2025-07-07 04:58:19] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-07 04:58:19] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-07 04:58:19] [Rank 0] Group 1 FTA: 0.1875 +[2025-07-07 04:58:19] [Rank 0] Group 1 FTA: 0.1875 +[2025-07-07 04:58:19] [Rank 0] Group 2 FTA: 0.0677 +[2025-07-07 04:58:19] [Rank 0] Group 2 FTA: 0.0677 +[2025-07-07 04:58:19] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-07 04:58:19] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-07 04:58:19] [Rank 0] Group 4 FTA: 0.0521 +[2025-07-07 04:58:19] [Rank 0] Group 4 FTA: 0.0521 +[2025-07-07 04:58:19] [Rank 0] Group 5 FTA: 0.0521 +[2025-07-07 04:58:19] [Rank 0] Group 5 FTA: 0.0521 +[2025-07-07 04:58:19] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-07 04:58:19] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-07 04:58:19] [Rank 0] Group 7 FTA: 0.1302 +[2025-07-07 04:58:19] [Rank 0] Group 7 FTA: 0.1302 +[2025-07-07 04:58:19] [Rank 0] Group 8 FTA: 0.1380 +[2025-07-07 04:58:19] [Rank 0] Group 8 FTA: 0.1380 +[2025-07-07 04:58:19] [Rank 0] Group 9 FTA: 0.1406 +[2025-07-07 04:58:19] [Rank 0] Group 9 FTA: 0.1406 +[2025-07-07 04:58:19] [Rank 0] Group 10 FTA: 0.1152 +[2025-07-07 04:58:19] [Rank 0] Group 10 FTA: 0.1152 +[2025-07-07 04:58:19] [Rank 0] Group 11 FTA: 0.1084 +[2025-07-07 04:58:19] [Rank 0] Group 11 FTA: 0.1084 +[2025-07-07 04:58:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 04:58:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 04:58:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 04:58:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 04:58:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 04:58:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 04:58:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 04:58:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 04:58:21] [Rank 0] step:4001/10000 train_time:323632ms step_avg:80.89ms +[2025-07-07 04:58:21] [Rank 0] step:4001/10000 train_time:323632ms step_avg:80.89ms +[2025-07-07 04:58:22] [Rank 0] step:4021/10000 train_time:325147ms step_avg:80.86ms +[2025-07-07 04:58:22] [Rank 0] step:4021/10000 train_time:325147ms step_avg:80.86ms +[2025-07-07 04:58:24] [Rank 0] step:4041/10000 train_time:326638ms step_avg:80.83ms +[2025-07-07 04:58:24] [Rank 0] step:4041/10000 train_time:326638ms step_avg:80.83ms +[2025-07-07 04:58:26] [Rank 0] step:4061/10000 train_time:328787ms step_avg:80.96ms +[2025-07-07 04:58:26] [Rank 0] step:4061/10000 train_time:328787ms step_avg:80.96ms +[2025-07-07 04:58:27] [Rank 0] step:4081/10000 train_time:330279ms step_avg:80.93ms +[2025-07-07 04:58:27] [Rank 0] step:4081/10000 train_time:330279ms step_avg:80.93ms +[2025-07-07 04:58:29] [Rank 0] step:4101/10000 train_time:331775ms step_avg:80.90ms +[2025-07-07 04:58:29] [Rank 0] step:4101/10000 train_time:331775ms step_avg:80.90ms +[2025-07-07 04:58:30] [Rank 0] step:4121/10000 train_time:333273ms step_avg:80.87ms +[2025-07-07 04:58:30] [Rank 0] step:4121/10000 train_time:333273ms step_avg:80.87ms +[2025-07-07 04:58:32] [Rank 0] step:4141/10000 train_time:334817ms step_avg:80.85ms +[2025-07-07 04:58:32] [Rank 0] step:4141/10000 train_time:334817ms step_avg:80.85ms +[2025-07-07 04:58:34] [Rank 0] step:4161/10000 train_time:336934ms step_avg:80.97ms +[2025-07-07 04:58:34] [Rank 0] step:4161/10000 train_time:336934ms step_avg:80.97ms +[2025-07-07 04:58:36] [Rank 0] step:4181/10000 train_time:338558ms step_avg:80.98ms +[2025-07-07 04:58:36] [Rank 0] step:4181/10000 train_time:338558ms step_avg:80.98ms +[2025-07-07 04:58:37] [Rank 0] step:4201/10000 train_time:340212ms step_avg:80.98ms +[2025-07-07 04:58:37] [Rank 0] step:4201/10000 train_time:340212ms step_avg:80.98ms +[2025-07-07 04:58:39] [Rank 0] step:4221/10000 train_time:341717ms step_avg:80.96ms +[2025-07-07 04:58:39] [Rank 0] step:4221/10000 train_time:341717ms step_avg:80.96ms +[2025-07-07 04:58:41] [Rank 0] step:4241/10000 train_time:343861ms step_avg:81.08ms +[2025-07-07 04:58:41] [Rank 0] step:4241/10000 train_time:343861ms step_avg:81.08ms +[2025-07-07 04:58:42] [Rank 0] step:4261/10000 train_time:345360ms step_avg:81.05ms +[2025-07-07 04:58:42] [Rank 0] step:4261/10000 train_time:345360ms step_avg:81.05ms +[2025-07-07 04:58:44] [Rank 0] step:4281/10000 train_time:346862ms step_avg:81.02ms +[2025-07-07 04:58:44] [Rank 0] step:4281/10000 train_time:346862ms step_avg:81.02ms +[2025-07-07 04:58:45] [Rank 0] step:4301/10000 train_time:348365ms step_avg:81.00ms +[2025-07-07 04:58:45] [Rank 0] step:4301/10000 train_time:348365ms step_avg:81.00ms +[2025-07-07 04:58:47] [Rank 0] step:4321/10000 train_time:349918ms step_avg:80.98ms +[2025-07-07 04:58:47] [Rank 0] step:4321/10000 train_time:349918ms step_avg:80.98ms +[2025-07-07 04:58:49] [Rank 0] step:4341/10000 train_time:351609ms step_avg:81.00ms +[2025-07-07 04:58:49] [Rank 0] step:4341/10000 train_time:351609ms step_avg:81.00ms +[2025-07-07 04:58:50] [Rank 0] step:4361/10000 train_time:353113ms step_avg:80.97ms +[2025-07-07 04:58:50] [Rank 0] step:4361/10000 train_time:353113ms step_avg:80.97ms +[2025-07-07 04:58:52] [Rank 0] step:4381/10000 train_time:354616ms step_avg:80.94ms +[2025-07-07 04:58:52] [Rank 0] step:4381/10000 train_time:354616ms step_avg:80.94ms +[2025-07-07 04:58:53] [Rank 0] step:4401/10000 train_time:356122ms step_avg:80.92ms +[2025-07-07 04:58:53] [Rank 0] step:4401/10000 train_time:356122ms step_avg:80.92ms +[2025-07-07 04:58:55] [Rank 0] step:4421/10000 train_time:357861ms step_avg:80.95ms +[2025-07-07 04:58:55] [Rank 0] step:4421/10000 train_time:357861ms step_avg:80.95ms +[2025-07-07 04:58:56] [Rank 0] step:4441/10000 train_time:359365ms step_avg:80.92ms +[2025-07-07 04:58:56] [Rank 0] step:4441/10000 train_time:359365ms step_avg:80.92ms +[2025-07-07 04:58:58] [Rank 0] step:4461/10000 train_time:360870ms step_avg:80.89ms +[2025-07-07 04:58:58] [Rank 0] step:4461/10000 train_time:360870ms step_avg:80.89ms +[2025-07-07 04:58:59] [Rank 0] step:4481/10000 train_time:362376ms step_avg:80.87ms +[2025-07-07 04:58:59] [Rank 0] step:4481/10000 train_time:362376ms step_avg:80.87ms +[2025-07-07 04:59:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:59:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:59:02] [Rank 0] PRINT: step:4500/10000 train_loss:2.6762 val_loss:2.5155 train_time:363880ms step_avg:80.86ms +[2025-07-07 04:59:02] [Rank 0] PRINT: step:4500/10000 train_loss:2.6762 val_loss:2.5155 train_time:363880ms step_avg:80.86ms +[2025-07-07 04:59:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:59:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:59:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:59:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:59:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:59:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:04:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:04:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:04:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:04:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:04:19] [Rank 0] Total Loss: 4.5007 +[2025-07-07 05:04:19] [Rank 0] Total Loss: 4.5007 +[2025-07-07 05:04:19] [Rank 0] Total FTA: 0.1209 +[2025-07-07 05:04:19] [Rank 0] Total FTA: 0.1209 +[2025-07-07 05:04:20] [Rank 0] Group 0 Loss: 4.6382 +[2025-07-07 05:04:20] [Rank 0] Group 0 Loss: 4.6382 +[2025-07-07 05:04:20] [Rank 0] Group 1 Loss: 4.4817 +[2025-07-07 05:04:20] [Rank 0] Group 1 Loss: 4.4817 +[2025-07-07 05:04:20] [Rank 0] Group 2 Loss: 4.3908 +[2025-07-07 05:04:20] [Rank 0] Group 2 Loss: 4.3908 +[2025-07-07 05:04:20] [Rank 0] Group 3 Loss: 4.4352 +[2025-07-07 05:04:20] [Rank 0] Group 3 Loss: 4.4352 +[2025-07-07 05:04:20] [Rank 0] Group 4 Loss: 4.4754 +[2025-07-07 05:04:20] [Rank 0] Group 4 Loss: 4.4754 +[2025-07-07 05:04:20] [Rank 0] Group 5 Loss: 4.4998 +[2025-07-07 05:04:20] [Rank 0] Group 5 Loss: 4.4998 +[2025-07-07 05:04:20] [Rank 0] Group 6 Loss: 4.4971 +[2025-07-07 05:04:20] [Rank 0] Group 6 Loss: 4.4971 +[2025-07-07 05:04:20] [Rank 0] Group 7 Loss: 4.4974 +[2025-07-07 05:04:20] [Rank 0] Group 7 Loss: 4.4974 +[2025-07-07 05:04:20] [Rank 0] Group 8 Loss: 4.4804 +[2025-07-07 05:04:20] [Rank 0] Group 8 Loss: 4.4804 +[2025-07-07 05:04:20] [Rank 0] Group 9 Loss: 4.5022 +[2025-07-07 05:04:20] [Rank 0] Group 9 Loss: 4.5022 +[2025-07-07 05:04:20] [Rank 0] Group 10 Loss: 4.4898 +[2025-07-07 05:04:20] [Rank 0] Group 10 Loss: 4.4898 +[2025-07-07 05:04:20] [Rank 0] Group 11 Loss: 4.4952 +[2025-07-07 05:04:20] [Rank 0] Group 11 Loss: 4.4952 +[2025-07-07 05:04:20] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 05:04:20] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 05:04:20] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-07 05:04:20] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-07 05:04:20] [Rank 0] Group 2 FTA: 0.1484 +[2025-07-07 05:04:20] [Rank 0] Group 2 FTA: 0.1484 +[2025-07-07 05:04:20] [Rank 0] Group 3 FTA: 0.0807 +[2025-07-07 05:04:20] [Rank 0] Group 3 FTA: 0.0807 +[2025-07-07 05:04:20] [Rank 0] Group 4 FTA: 0.0911 +[2025-07-07 05:04:20] [Rank 0] Group 4 FTA: 0.0911 +[2025-07-07 05:04:20] [Rank 0] Group 5 FTA: 0.0521 +[2025-07-07 05:04:20] [Rank 0] Group 5 FTA: 0.0521 +[2025-07-07 05:04:20] [Rank 0] Group 6 FTA: 0.1510 +[2025-07-07 05:04:20] [Rank 0] Group 6 FTA: 0.1510 +[2025-07-07 05:04:20] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-07 05:04:20] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-07 05:04:20] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-07 05:04:20] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-07 05:04:20] [Rank 0] Group 9 FTA: 0.1367 +[2025-07-07 05:04:20] [Rank 0] Group 9 FTA: 0.1367 +[2025-07-07 05:04:20] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-07 05:04:20] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-07 05:04:20] [Rank 0] Group 11 FTA: 0.1260 +[2025-07-07 05:04:20] [Rank 0] Group 11 FTA: 0.1260 +[2025-07-07 05:04:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 05:04:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 05:04:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 05:04:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 05:04:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 05:04:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 05:04:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 05:04:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 05:04:22] [Rank 0] step:4501/10000 train_time:363907ms step_avg:80.85ms +[2025-07-07 05:04:22] [Rank 0] step:4501/10000 train_time:363907ms step_avg:80.85ms +[2025-07-07 05:04:23] [Rank 0] step:4521/10000 train_time:366093ms step_avg:80.98ms +[2025-07-07 05:04:23] [Rank 0] step:4521/10000 train_time:366093ms step_avg:80.98ms +[2025-07-07 05:04:25] [Rank 0] step:4541/10000 train_time:367586ms step_avg:80.95ms +[2025-07-07 05:04:25] [Rank 0] step:4541/10000 train_time:367586ms step_avg:80.95ms +[2025-07-07 05:04:26] [Rank 0] step:4561/10000 train_time:369080ms step_avg:80.92ms +[2025-07-07 05:04:26] [Rank 0] step:4561/10000 train_time:369080ms step_avg:80.92ms +[2025-07-07 05:04:28] [Rank 0] step:4581/10000 train_time:370573ms step_avg:80.89ms +[2025-07-07 05:04:28] [Rank 0] step:4581/10000 train_time:370573ms step_avg:80.89ms +[2025-07-07 05:04:29] [Rank 0] step:4601/10000 train_time:372309ms step_avg:80.92ms +[2025-07-07 05:04:29] [Rank 0] step:4601/10000 train_time:372309ms step_avg:80.92ms +[2025-07-07 05:04:31] [Rank 0] step:4621/10000 train_time:373804ms step_avg:80.89ms +[2025-07-07 05:04:31] [Rank 0] step:4621/10000 train_time:373804ms step_avg:80.89ms +[2025-07-07 05:04:32] [Rank 0] step:4641/10000 train_time:375302ms step_avg:80.87ms +[2025-07-07 05:04:32] [Rank 0] step:4641/10000 train_time:375302ms step_avg:80.87ms +[2025-07-07 05:04:34] [Rank 0] step:4661/10000 train_time:376803ms step_avg:80.84ms +[2025-07-07 05:04:34] [Rank 0] step:4661/10000 train_time:376803ms step_avg:80.84ms +[2025-07-07 05:04:36] [Rank 0] step:4681/10000 train_time:378459ms step_avg:80.85ms +[2025-07-07 05:04:36] [Rank 0] step:4681/10000 train_time:378459ms step_avg:80.85ms +[2025-07-07 05:04:37] [Rank 0] step:4701/10000 train_time:379943ms step_avg:80.82ms +[2025-07-07 05:04:37] [Rank 0] step:4701/10000 train_time:379943ms step_avg:80.82ms +[2025-07-07 05:04:39] [Rank 0] step:4721/10000 train_time:381452ms step_avg:80.80ms +[2025-07-07 05:04:39] [Rank 0] step:4721/10000 train_time:381452ms step_avg:80.80ms +[2025-07-07 05:04:40] [Rank 0] step:4741/10000 train_time:382955ms step_avg:80.78ms +[2025-07-07 05:04:40] [Rank 0] step:4741/10000 train_time:382955ms step_avg:80.78ms +[2025-07-07 05:04:42] [Rank 0] step:4761/10000 train_time:384460ms step_avg:80.75ms +[2025-07-07 05:04:42] [Rank 0] step:4761/10000 train_time:384460ms step_avg:80.75ms +[2025-07-07 05:04:44] [Rank 0] step:4781/10000 train_time:386624ms step_avg:80.87ms +[2025-07-07 05:04:44] [Rank 0] step:4781/10000 train_time:386624ms step_avg:80.87ms +[2025-07-07 05:04:45] [Rank 0] step:4801/10000 train_time:388125ms step_avg:80.84ms +[2025-07-07 05:04:45] [Rank 0] step:4801/10000 train_time:388125ms step_avg:80.84ms +[2025-07-07 05:04:47] [Rank 0] step:4821/10000 train_time:389627ms step_avg:80.82ms +[2025-07-07 05:04:47] [Rank 0] step:4821/10000 train_time:389627ms step_avg:80.82ms +[2025-07-07 05:04:48] [Rank 0] step:4841/10000 train_time:391127ms step_avg:80.79ms +[2025-07-07 05:04:48] [Rank 0] step:4841/10000 train_time:391127ms step_avg:80.79ms +[2025-07-07 05:04:50] [Rank 0] step:4861/10000 train_time:392628ms step_avg:80.77ms +[2025-07-07 05:04:50] [Rank 0] step:4861/10000 train_time:392628ms step_avg:80.77ms +[2025-07-07 05:04:52] [Rank 0] step:4881/10000 train_time:394769ms step_avg:80.88ms +[2025-07-07 05:04:52] [Rank 0] step:4881/10000 train_time:394769ms step_avg:80.88ms +[2025-07-07 05:04:53] [Rank 0] step:4901/10000 train_time:396268ms step_avg:80.85ms +[2025-07-07 05:04:53] [Rank 0] step:4901/10000 train_time:396268ms step_avg:80.85ms +[2025-07-07 05:04:55] [Rank 0] step:4921/10000 train_time:397956ms step_avg:80.87ms +[2025-07-07 05:04:55] [Rank 0] step:4921/10000 train_time:397956ms step_avg:80.87ms +[2025-07-07 05:04:57] [Rank 0] step:4941/10000 train_time:399524ms step_avg:80.86ms +[2025-07-07 05:04:57] [Rank 0] step:4941/10000 train_time:399524ms step_avg:80.86ms +[2025-07-07 05:04:58] [Rank 0] step:4961/10000 train_time:401264ms step_avg:80.88ms +[2025-07-07 05:04:58] [Rank 0] step:4961/10000 train_time:401264ms step_avg:80.88ms +[2025-07-07 05:05:00] [Rank 0] step:4981/10000 train_time:402765ms step_avg:80.86ms +[2025-07-07 05:05:00] [Rank 0] step:4981/10000 train_time:402765ms step_avg:80.86ms +[2025-07-07 05:05:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:05:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:05:02] [Rank 0] PRINT: step:5000/10000 train_loss:2.3844 val_loss:2.2653 train_time:404267ms step_avg:80.85ms +[2025-07-07 05:05:02] [Rank 0] PRINT: step:5000/10000 train_loss:2.3844 val_loss:2.2653 train_time:404267ms step_avg:80.85ms +[2025-07-07 05:05:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:05:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:05:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:05:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:05:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:05:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:10:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:10:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:10:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:10:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:10:21] [Rank 0] Total Loss: 4.3410 +[2025-07-07 05:10:21] [Rank 0] Total Loss: 4.3410 +[2025-07-07 05:10:21] [Rank 0] Total FTA: 0.1365 +[2025-07-07 05:10:21] [Rank 0] Total FTA: 0.1365 +[2025-07-07 05:10:21] [Rank 0] Group 0 Loss: 4.4869 +[2025-07-07 05:10:21] [Rank 0] Group 0 Loss: 4.4869 +[2025-07-07 05:10:21] [Rank 0] Group 1 Loss: 4.3015 +[2025-07-07 05:10:21] [Rank 0] Group 1 Loss: 4.3015 +[2025-07-07 05:10:21] [Rank 0] Group 2 Loss: 4.1636 +[2025-07-07 05:10:21] [Rank 0] Group 2 Loss: 4.1636 +[2025-07-07 05:10:21] [Rank 0] Group 3 Loss: 4.3001 +[2025-07-07 05:10:21] [Rank 0] Group 3 Loss: 4.3001 +[2025-07-07 05:10:21] [Rank 0] Group 4 Loss: 4.3425 +[2025-07-07 05:10:21] [Rank 0] Group 4 Loss: 4.3425 +[2025-07-07 05:10:21] [Rank 0] Group 5 Loss: 4.3457 +[2025-07-07 05:10:21] [Rank 0] Group 5 Loss: 4.3457 +[2025-07-07 05:10:21] [Rank 0] Group 6 Loss: 4.3247 +[2025-07-07 05:10:21] [Rank 0] Group 6 Loss: 4.3247 +[2025-07-07 05:10:21] [Rank 0] Group 7 Loss: 4.3583 +[2025-07-07 05:10:21] [Rank 0] Group 7 Loss: 4.3583 +[2025-07-07 05:10:21] [Rank 0] Group 8 Loss: 4.2791 +[2025-07-07 05:10:21] [Rank 0] Group 8 Loss: 4.2791 +[2025-07-07 05:10:21] [Rank 0] Group 9 Loss: 4.3343 +[2025-07-07 05:10:21] [Rank 0] Group 9 Loss: 4.3343 +[2025-07-07 05:10:21] [Rank 0] Group 10 Loss: 4.3662 +[2025-07-07 05:10:21] [Rank 0] Group 10 Loss: 4.3662 +[2025-07-07 05:10:21] [Rank 0] Group 11 Loss: 4.3379 +[2025-07-07 05:10:21] [Rank 0] Group 11 Loss: 4.3379 +[2025-07-07 05:10:21] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 05:10:21] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 05:10:21] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-07 05:10:21] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-07 05:10:21] [Rank 0] Group 2 FTA: 0.1432 +[2025-07-07 05:10:21] [Rank 0] Group 2 FTA: 0.1432 +[2025-07-07 05:10:21] [Rank 0] Group 3 FTA: 0.0885 +[2025-07-07 05:10:21] [Rank 0] Group 3 FTA: 0.0885 +[2025-07-07 05:10:21] [Rank 0] Group 4 FTA: 0.1328 +[2025-07-07 05:10:21] [Rank 0] Group 4 FTA: 0.1328 +[2025-07-07 05:10:21] [Rank 0] Group 5 FTA: 0.1198 +[2025-07-07 05:10:21] [Rank 0] Group 5 FTA: 0.1198 +[2025-07-07 05:10:21] [Rank 0] Group 6 FTA: 0.1302 +[2025-07-07 05:10:21] [Rank 0] Group 6 FTA: 0.1302 +[2025-07-07 05:10:21] [Rank 0] Group 7 FTA: 0.1328 +[2025-07-07 05:10:21] [Rank 0] Group 7 FTA: 0.1328 +[2025-07-07 05:10:21] [Rank 0] Group 8 FTA: 0.1406 +[2025-07-07 05:10:21] [Rank 0] Group 8 FTA: 0.1406 +[2025-07-07 05:10:21] [Rank 0] Group 9 FTA: 0.1367 +[2025-07-07 05:10:21] [Rank 0] Group 9 FTA: 0.1367 +[2025-07-07 05:10:21] [Rank 0] Group 10 FTA: 0.1387 +[2025-07-07 05:10:21] [Rank 0] Group 10 FTA: 0.1387 +[2025-07-07 05:10:21] [Rank 0] Group 11 FTA: 0.1250 +[2025-07-07 05:10:21] [Rank 0] Group 11 FTA: 0.1250 +[2025-07-07 05:10:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 05:10:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 05:10:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 05:10:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 05:10:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 05:10:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 05:10:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 05:10:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 05:10:23] [Rank 0] step:5001/10000 train_time:404287ms step_avg:80.84ms +[2025-07-07 05:10:23] [Rank 0] step:5001/10000 train_time:404287ms step_avg:80.84ms +[2025-07-07 05:10:24] [Rank 0] step:5021/10000 train_time:405799ms step_avg:80.82ms +[2025-07-07 05:10:24] [Rank 0] step:5021/10000 train_time:405799ms step_avg:80.82ms +[2025-07-07 05:10:26] [Rank 0] step:5041/10000 train_time:407294ms step_avg:80.80ms +[2025-07-07 05:10:26] [Rank 0] step:5041/10000 train_time:407294ms step_avg:80.80ms +[2025-07-07 05:10:28] [Rank 0] step:5061/10000 train_time:409451ms step_avg:80.90ms +[2025-07-07 05:10:28] [Rank 0] step:5061/10000 train_time:409451ms step_avg:80.90ms +[2025-07-07 05:10:29] [Rank 0] step:5081/10000 train_time:410942ms step_avg:80.88ms +[2025-07-07 05:10:29] [Rank 0] step:5081/10000 train_time:410942ms step_avg:80.88ms +[2025-07-07 05:10:31] [Rank 0] step:5101/10000 train_time:412437ms step_avg:80.85ms +[2025-07-07 05:10:31] [Rank 0] step:5101/10000 train_time:412437ms step_avg:80.85ms +[2025-07-07 05:10:32] [Rank 0] step:5121/10000 train_time:413934ms step_avg:80.83ms +[2025-07-07 05:10:32] [Rank 0] step:5121/10000 train_time:413934ms step_avg:80.83ms +[2025-07-07 05:10:34] [Rank 0] step:5141/10000 train_time:415671ms step_avg:80.85ms +[2025-07-07 05:10:34] [Rank 0] step:5141/10000 train_time:415671ms step_avg:80.85ms +[2025-07-07 05:10:36] [Rank 0] step:5161/10000 train_time:417167ms step_avg:80.83ms +[2025-07-07 05:10:36] [Rank 0] step:5161/10000 train_time:417167ms step_avg:80.83ms +[2025-07-07 05:10:37] [Rank 0] step:5181/10000 train_time:418667ms step_avg:80.81ms +[2025-07-07 05:10:37] [Rank 0] step:5181/10000 train_time:418667ms step_avg:80.81ms +[2025-07-07 05:10:39] [Rank 0] step:5201/10000 train_time:420164ms step_avg:80.79ms +[2025-07-07 05:10:39] [Rank 0] step:5201/10000 train_time:420164ms step_avg:80.79ms +[2025-07-07 05:10:41] [Rank 0] step:5221/10000 train_time:422346ms step_avg:80.89ms +[2025-07-07 05:10:41] [Rank 0] step:5221/10000 train_time:422346ms step_avg:80.89ms +[2025-07-07 05:10:42] [Rank 0] step:5241/10000 train_time:423828ms step_avg:80.87ms +[2025-07-07 05:10:42] [Rank 0] step:5241/10000 train_time:423828ms step_avg:80.87ms +[2025-07-07 05:10:44] [Rank 0] step:5261/10000 train_time:425327ms step_avg:80.85ms +[2025-07-07 05:10:44] [Rank 0] step:5261/10000 train_time:425327ms step_avg:80.85ms +[2025-07-07 05:10:45] [Rank 0] step:5281/10000 train_time:426833ms step_avg:80.82ms +[2025-07-07 05:10:45] [Rank 0] step:5281/10000 train_time:426833ms step_avg:80.82ms +[2025-07-07 05:10:47] [Rank 0] step:5301/10000 train_time:428336ms step_avg:80.80ms +[2025-07-07 05:10:47] [Rank 0] step:5301/10000 train_time:428336ms step_avg:80.80ms +[2025-07-07 05:10:49] [Rank 0] step:5321/10000 train_time:430071ms step_avg:80.83ms +[2025-07-07 05:10:49] [Rank 0] step:5321/10000 train_time:430071ms step_avg:80.83ms +[2025-07-07 05:10:50] [Rank 0] step:5341/10000 train_time:431575ms step_avg:80.80ms +[2025-07-07 05:10:50] [Rank 0] step:5341/10000 train_time:431575ms step_avg:80.80ms +[2025-07-07 05:10:52] [Rank 0] step:5361/10000 train_time:433079ms step_avg:80.78ms +[2025-07-07 05:10:52] [Rank 0] step:5361/10000 train_time:433079ms step_avg:80.78ms +[2025-07-07 05:10:53] [Rank 0] step:5381/10000 train_time:434583ms step_avg:80.76ms +[2025-07-07 05:10:53] [Rank 0] step:5381/10000 train_time:434583ms step_avg:80.76ms +[2025-07-07 05:10:55] [Rank 0] step:5401/10000 train_time:436345ms step_avg:80.79ms +[2025-07-07 05:10:55] [Rank 0] step:5401/10000 train_time:436345ms step_avg:80.79ms +[2025-07-07 05:10:57] [Rank 0] step:5421/10000 train_time:438260ms step_avg:80.84ms +[2025-07-07 05:10:57] [Rank 0] step:5421/10000 train_time:438260ms step_avg:80.84ms +[2025-07-07 05:10:58] [Rank 0] step:5441/10000 train_time:439762ms step_avg:80.82ms +[2025-07-07 05:10:58] [Rank 0] step:5441/10000 train_time:439762ms step_avg:80.82ms +[2025-07-07 05:11:00] [Rank 0] step:5461/10000 train_time:441267ms step_avg:80.80ms +[2025-07-07 05:11:00] [Rank 0] step:5461/10000 train_time:441267ms step_avg:80.80ms +[2025-07-07 05:11:01] [Rank 0] step:5481/10000 train_time:442772ms step_avg:80.78ms +[2025-07-07 05:11:01] [Rank 0] step:5481/10000 train_time:442772ms step_avg:80.78ms +[2025-07-07 05:11:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:11:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:11:04] [Rank 0] PRINT: step:5500/10000 train_loss:2.1704 val_loss:2.0838 train_time:444929ms step_avg:80.90ms +[2025-07-07 05:11:04] [Rank 0] PRINT: step:5500/10000 train_loss:2.1704 val_loss:2.0838 train_time:444929ms step_avg:80.90ms +[2025-07-07 05:11:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:11:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:11:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:11:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:11:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:11:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:16:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:16:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:16:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:16:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:16:30] [Rank 0] Total Loss: 4.3181 +[2025-07-07 05:16:30] [Rank 0] Total Loss: 4.3181 +[2025-07-07 05:16:30] [Rank 0] Total FTA: 0.1482 +[2025-07-07 05:16:30] [Rank 0] Total FTA: 0.1482 +[2025-07-07 05:16:30] [Rank 0] Group 0 Loss: 4.6645 +[2025-07-07 05:16:30] [Rank 0] Group 0 Loss: 4.6645 +[2025-07-07 05:16:30] [Rank 0] Group 1 Loss: 4.2683 +[2025-07-07 05:16:30] [Rank 0] Group 1 Loss: 4.2683 +[2025-07-07 05:16:30] [Rank 0] Group 2 Loss: 4.1422 +[2025-07-07 05:16:30] [Rank 0] Group 2 Loss: 4.1422 +[2025-07-07 05:16:30] [Rank 0] Group 3 Loss: 4.2567 +[2025-07-07 05:16:30] [Rank 0] Group 3 Loss: 4.2567 +[2025-07-07 05:16:30] [Rank 0] Group 4 Loss: 4.2775 +[2025-07-07 05:16:30] [Rank 0] Group 4 Loss: 4.2775 +[2025-07-07 05:16:30] [Rank 0] Group 5 Loss: 4.2572 +[2025-07-07 05:16:30] [Rank 0] Group 5 Loss: 4.2572 +[2025-07-07 05:16:30] [Rank 0] Group 6 Loss: 4.2132 +[2025-07-07 05:16:30] [Rank 0] Group 6 Loss: 4.2132 +[2025-07-07 05:16:30] [Rank 0] Group 7 Loss: 4.3274 +[2025-07-07 05:16:30] [Rank 0] Group 7 Loss: 4.3274 +[2025-07-07 05:16:30] [Rank 0] Group 8 Loss: 4.2777 +[2025-07-07 05:16:30] [Rank 0] Group 8 Loss: 4.2777 +[2025-07-07 05:16:30] [Rank 0] Group 9 Loss: 4.2390 +[2025-07-07 05:16:30] [Rank 0] Group 9 Loss: 4.2390 +[2025-07-07 05:16:30] [Rank 0] Group 10 Loss: 4.2733 +[2025-07-07 05:16:30] [Rank 0] Group 10 Loss: 4.2733 +[2025-07-07 05:16:30] [Rank 0] Group 11 Loss: 4.2970 +[2025-07-07 05:16:30] [Rank 0] Group 11 Loss: 4.2970 +[2025-07-07 05:16:30] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 05:16:30] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 05:16:30] [Rank 0] Group 1 FTA: 0.1771 +[2025-07-07 05:16:30] [Rank 0] Group 1 FTA: 0.1771 +[2025-07-07 05:16:30] [Rank 0] Group 2 FTA: 0.1562 +[2025-07-07 05:16:30] [Rank 0] Group 2 FTA: 0.1562 +[2025-07-07 05:16:30] [Rank 0] Group 3 FTA: 0.1172 +[2025-07-07 05:16:30] [Rank 0] Group 3 FTA: 0.1172 +[2025-07-07 05:16:30] [Rank 0] Group 4 FTA: 0.1224 +[2025-07-07 05:16:30] [Rank 0] Group 4 FTA: 0.1224 +[2025-07-07 05:16:30] [Rank 0] Group 5 FTA: 0.1068 +[2025-07-07 05:16:30] [Rank 0] Group 5 FTA: 0.1068 +[2025-07-07 05:16:30] [Rank 0] Group 6 FTA: 0.1693 +[2025-07-07 05:16:30] [Rank 0] Group 6 FTA: 0.1693 +[2025-07-07 05:16:30] [Rank 0] Group 7 FTA: 0.1406 +[2025-07-07 05:16:30] [Rank 0] Group 7 FTA: 0.1406 +[2025-07-07 05:16:30] [Rank 0] Group 8 FTA: 0.1589 +[2025-07-07 05:16:30] [Rank 0] Group 8 FTA: 0.1589 +[2025-07-07 05:16:30] [Rank 0] Group 9 FTA: 0.1680 +[2025-07-07 05:16:30] [Rank 0] Group 9 FTA: 0.1680 +[2025-07-07 05:16:30] [Rank 0] Group 10 FTA: 0.1445 +[2025-07-07 05:16:30] [Rank 0] Group 10 FTA: 0.1445 +[2025-07-07 05:16:30] [Rank 0] Group 11 FTA: 0.1455 +[2025-07-07 05:16:30] [Rank 0] Group 11 FTA: 0.1455 +[2025-07-07 05:16:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 05:16:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 05:16:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 05:16:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 05:16:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 05:16:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 05:16:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 05:16:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 05:16:32] [Rank 0] step:5501/10000 train_time:444951ms step_avg:80.89ms +[2025-07-07 05:16:32] [Rank 0] step:5501/10000 train_time:444951ms step_avg:80.89ms +[2025-07-07 05:16:33] [Rank 0] step:5521/10000 train_time:446547ms step_avg:80.88ms +[2025-07-07 05:16:33] [Rank 0] step:5521/10000 train_time:446547ms step_avg:80.88ms +[2025-07-07 05:16:35] [Rank 0] step:5541/10000 train_time:448039ms step_avg:80.86ms +[2025-07-07 05:16:35] [Rank 0] step:5541/10000 train_time:448039ms step_avg:80.86ms +[2025-07-07 05:16:36] [Rank 0] step:5561/10000 train_time:449534ms step_avg:80.84ms +[2025-07-07 05:16:36] [Rank 0] step:5561/10000 train_time:449534ms step_avg:80.84ms +[2025-07-07 05:16:39] [Rank 0] step:5581/10000 train_time:451083ms step_avg:80.82ms +[2025-07-07 05:16:39] [Rank 0] step:5581/10000 train_time:451083ms step_avg:80.82ms +[2025-07-07 05:16:40] [Rank 0] step:5601/10000 train_time:453187ms step_avg:80.91ms +[2025-07-07 05:16:40] [Rank 0] step:5601/10000 train_time:453187ms step_avg:80.91ms +[2025-07-07 05:16:42] [Rank 0] step:5621/10000 train_time:454684ms step_avg:80.89ms +[2025-07-07 05:16:42] [Rank 0] step:5621/10000 train_time:454684ms step_avg:80.89ms +[2025-07-07 05:16:43] [Rank 0] step:5641/10000 train_time:456179ms step_avg:80.87ms +[2025-07-07 05:16:43] [Rank 0] step:5641/10000 train_time:456179ms step_avg:80.87ms +[2025-07-07 05:16:45] [Rank 0] step:5661/10000 train_time:457678ms step_avg:80.85ms +[2025-07-07 05:16:45] [Rank 0] step:5661/10000 train_time:457678ms step_avg:80.85ms +[2025-07-07 05:16:47] [Rank 0] step:5681/10000 train_time:459832ms step_avg:80.94ms +[2025-07-07 05:16:47] [Rank 0] step:5681/10000 train_time:459832ms step_avg:80.94ms +[2025-07-07 05:16:48] [Rank 0] step:5701/10000 train_time:461330ms step_avg:80.92ms +[2025-07-07 05:16:48] [Rank 0] step:5701/10000 train_time:461330ms step_avg:80.92ms +[2025-07-07 05:16:50] [Rank 0] step:5721/10000 train_time:462828ms step_avg:80.90ms +[2025-07-07 05:16:50] [Rank 0] step:5721/10000 train_time:462828ms step_avg:80.90ms +[2025-07-07 05:16:51] [Rank 0] step:5741/10000 train_time:464328ms step_avg:80.88ms +[2025-07-07 05:16:51] [Rank 0] step:5741/10000 train_time:464328ms step_avg:80.88ms +[2025-07-07 05:16:53] [Rank 0] step:5761/10000 train_time:466499ms step_avg:80.98ms +[2025-07-07 05:16:53] [Rank 0] step:5761/10000 train_time:466499ms step_avg:80.98ms +[2025-07-07 05:16:55] [Rank 0] step:5781/10000 train_time:467977ms step_avg:80.95ms +[2025-07-07 05:16:55] [Rank 0] step:5781/10000 train_time:467977ms step_avg:80.95ms +[2025-07-07 05:16:56] [Rank 0] step:5801/10000 train_time:469477ms step_avg:80.93ms +[2025-07-07 05:16:56] [Rank 0] step:5801/10000 train_time:469477ms step_avg:80.93ms +[2025-07-07 05:16:58] [Rank 0] step:5821/10000 train_time:470981ms step_avg:80.91ms +[2025-07-07 05:16:58] [Rank 0] step:5821/10000 train_time:470981ms step_avg:80.91ms +[2025-07-07 05:16:59] [Rank 0] step:5841/10000 train_time:472483ms step_avg:80.89ms +[2025-07-07 05:16:59] [Rank 0] step:5841/10000 train_time:472483ms step_avg:80.89ms +[2025-07-07 05:17:01] [Rank 0] step:5861/10000 train_time:474221ms step_avg:80.91ms +[2025-07-07 05:17:01] [Rank 0] step:5861/10000 train_time:474221ms step_avg:80.91ms +[2025-07-07 05:17:03] [Rank 0] step:5881/10000 train_time:475723ms step_avg:80.89ms +[2025-07-07 05:17:03] [Rank 0] step:5881/10000 train_time:475723ms step_avg:80.89ms +[2025-07-07 05:17:04] [Rank 0] step:5901/10000 train_time:477226ms step_avg:80.87ms +[2025-07-07 05:17:04] [Rank 0] step:5901/10000 train_time:477226ms step_avg:80.87ms +[2025-07-07 05:17:06] [Rank 0] step:5921/10000 train_time:478728ms step_avg:80.85ms +[2025-07-07 05:17:06] [Rank 0] step:5921/10000 train_time:478728ms step_avg:80.85ms +[2025-07-07 05:17:08] [Rank 0] step:5941/10000 train_time:480899ms step_avg:80.95ms +[2025-07-07 05:17:08] [Rank 0] step:5941/10000 train_time:480899ms step_avg:80.95ms +[2025-07-07 05:17:09] [Rank 0] step:5961/10000 train_time:482379ms step_avg:80.92ms +[2025-07-07 05:17:09] [Rank 0] step:5961/10000 train_time:482379ms step_avg:80.92ms +[2025-07-07 05:17:11] [Rank 0] step:5981/10000 train_time:483883ms step_avg:80.90ms +[2025-07-07 05:17:11] [Rank 0] step:5981/10000 train_time:483883ms step_avg:80.90ms +[2025-07-07 05:17:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:17:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:17:13] [Rank 0] PRINT: step:6000/10000 train_loss:2.0136 val_loss:1.9498 train_time:485389ms step_avg:80.90ms +[2025-07-07 05:17:13] [Rank 0] PRINT: step:6000/10000 train_loss:2.0136 val_loss:1.9498 train_time:485389ms step_avg:80.90ms +[2025-07-07 05:17:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:17:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:17:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:17:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:17:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:17:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:22:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:22:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:22:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:22:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:22:37] [Rank 0] Total Loss: 4.2202 +[2025-07-07 05:22:37] [Rank 0] Total Loss: 4.2202 +[2025-07-07 05:22:37] [Rank 0] Total FTA: 0.1662 +[2025-07-07 05:22:37] [Rank 0] Total FTA: 0.1662 +[2025-07-07 05:22:37] [Rank 0] Group 0 Loss: 4.4253 +[2025-07-07 05:22:37] [Rank 0] Group 0 Loss: 4.4253 +[2025-07-07 05:22:37] [Rank 0] Group 1 Loss: 4.1255 +[2025-07-07 05:22:37] [Rank 0] Group 1 Loss: 4.1255 +[2025-07-07 05:22:37] [Rank 0] Group 2 Loss: 4.0477 +[2025-07-07 05:22:37] [Rank 0] Group 2 Loss: 4.0477 +[2025-07-07 05:22:37] [Rank 0] Group 3 Loss: 4.1198 +[2025-07-07 05:22:37] [Rank 0] Group 3 Loss: 4.1198 +[2025-07-07 05:22:37] [Rank 0] Group 4 Loss: 4.1524 +[2025-07-07 05:22:37] [Rank 0] Group 4 Loss: 4.1524 +[2025-07-07 05:22:37] [Rank 0] Group 5 Loss: 4.2417 +[2025-07-07 05:22:37] [Rank 0] Group 5 Loss: 4.2417 +[2025-07-07 05:22:37] [Rank 0] Group 6 Loss: 4.1210 +[2025-07-07 05:22:37] [Rank 0] Group 6 Loss: 4.1210 +[2025-07-07 05:22:37] [Rank 0] Group 7 Loss: 4.2521 +[2025-07-07 05:22:37] [Rank 0] Group 7 Loss: 4.2521 +[2025-07-07 05:22:37] [Rank 0] Group 8 Loss: 4.2045 +[2025-07-07 05:22:37] [Rank 0] Group 8 Loss: 4.2045 +[2025-07-07 05:22:37] [Rank 0] Group 9 Loss: 4.1983 +[2025-07-07 05:22:37] [Rank 0] Group 9 Loss: 4.1983 +[2025-07-07 05:22:37] [Rank 0] Group 10 Loss: 4.2614 +[2025-07-07 05:22:37] [Rank 0] Group 10 Loss: 4.2614 +[2025-07-07 05:22:37] [Rank 0] Group 11 Loss: 4.2374 +[2025-07-07 05:22:37] [Rank 0] Group 11 Loss: 4.2374 +[2025-07-07 05:22:37] [Rank 0] Group 0 FTA: 0.3420 +[2025-07-07 05:22:37] [Rank 0] Group 0 FTA: 0.3420 +[2025-07-07 05:22:37] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 05:22:37] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 05:22:37] [Rank 0] Group 2 FTA: 0.1901 +[2025-07-07 05:22:37] [Rank 0] Group 2 FTA: 0.1901 +[2025-07-07 05:22:37] [Rank 0] Group 3 FTA: 0.1068 +[2025-07-07 05:22:37] [Rank 0] Group 3 FTA: 0.1068 +[2025-07-07 05:22:37] [Rank 0] Group 4 FTA: 0.0938 +[2025-07-07 05:22:37] [Rank 0] Group 4 FTA: 0.0938 +[2025-07-07 05:22:37] [Rank 0] Group 5 FTA: 0.0990 +[2025-07-07 05:22:37] [Rank 0] Group 5 FTA: 0.0990 +[2025-07-07 05:22:37] [Rank 0] Group 6 FTA: 0.1615 +[2025-07-07 05:22:37] [Rank 0] Group 6 FTA: 0.1615 +[2025-07-07 05:22:37] [Rank 0] Group 7 FTA: 0.1615 +[2025-07-07 05:22:37] [Rank 0] Group 7 FTA: 0.1615 +[2025-07-07 05:22:37] [Rank 0] Group 8 FTA: 0.1510 +[2025-07-07 05:22:37] [Rank 0] Group 8 FTA: 0.1510 +[2025-07-07 05:22:37] [Rank 0] Group 9 FTA: 0.1641 +[2025-07-07 05:22:37] [Rank 0] Group 9 FTA: 0.1641 +[2025-07-07 05:22:37] [Rank 0] Group 10 FTA: 0.2012 +[2025-07-07 05:22:37] [Rank 0] Group 10 FTA: 0.2012 +[2025-07-07 05:22:37] [Rank 0] Group 11 FTA: 0.1543 +[2025-07-07 05:22:37] [Rank 0] Group 11 FTA: 0.1543 +[2025-07-07 05:22:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 05:22:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 05:22:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 05:22:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 05:22:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 05:22:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 05:22:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 05:22:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 05:22:38] [Rank 0] step:6001/10000 train_time:485411ms step_avg:80.89ms +[2025-07-07 05:22:38] [Rank 0] step:6001/10000 train_time:485411ms step_avg:80.89ms +[2025-07-07 05:22:40] [Rank 0] step:6021/10000 train_time:486917ms step_avg:80.87ms +[2025-07-07 05:22:40] [Rank 0] step:6021/10000 train_time:486917ms step_avg:80.87ms +[2025-07-07 05:22:42] [Rank 0] step:6041/10000 train_time:489066ms step_avg:80.96ms +[2025-07-07 05:22:42] [Rank 0] step:6041/10000 train_time:489066ms step_avg:80.96ms +[2025-07-07 05:22:44] [Rank 0] step:6061/10000 train_time:490558ms step_avg:80.94ms +[2025-07-07 05:22:44] [Rank 0] step:6061/10000 train_time:490558ms step_avg:80.94ms +[2025-07-07 05:22:45] [Rank 0] step:6081/10000 train_time:492053ms step_avg:80.92ms +[2025-07-07 05:22:45] [Rank 0] step:6081/10000 train_time:492053ms step_avg:80.92ms +[2025-07-07 05:22:47] [Rank 0] step:6101/10000 train_time:493546ms step_avg:80.90ms +[2025-07-07 05:22:47] [Rank 0] step:6101/10000 train_time:493546ms step_avg:80.90ms +[2025-07-07 05:22:49] [Rank 0] step:6121/10000 train_time:495039ms step_avg:80.88ms +[2025-07-07 05:22:49] [Rank 0] step:6121/10000 train_time:495039ms step_avg:80.88ms +[2025-07-07 05:22:50] [Rank 0] step:6141/10000 train_time:497353ms step_avg:80.99ms +[2025-07-07 05:22:50] [Rank 0] step:6141/10000 train_time:497353ms step_avg:80.99ms +[2025-07-07 05:22:52] [Rank 0] step:6161/10000 train_time:498959ms step_avg:80.99ms +[2025-07-07 05:22:52] [Rank 0] step:6161/10000 train_time:498959ms step_avg:80.99ms +[2025-07-07 05:22:53] [Rank 0] step:6181/10000 train_time:500456ms step_avg:80.97ms +[2025-07-07 05:22:53] [Rank 0] step:6181/10000 train_time:500456ms step_avg:80.97ms +[2025-07-07 05:22:55] [Rank 0] step:6201/10000 train_time:501955ms step_avg:80.95ms +[2025-07-07 05:22:55] [Rank 0] step:6201/10000 train_time:501955ms step_avg:80.95ms +[2025-07-07 05:22:57] [Rank 0] step:6221/10000 train_time:504104ms step_avg:81.03ms +[2025-07-07 05:22:57] [Rank 0] step:6221/10000 train_time:504104ms step_avg:81.03ms +[2025-07-07 05:22:59] [Rank 0] step:6241/10000 train_time:505600ms step_avg:81.01ms +[2025-07-07 05:22:59] [Rank 0] step:6241/10000 train_time:505600ms step_avg:81.01ms +[2025-07-07 05:23:00] [Rank 0] step:6261/10000 train_time:507097ms step_avg:80.99ms +[2025-07-07 05:23:00] [Rank 0] step:6261/10000 train_time:507097ms step_avg:80.99ms +[2025-07-07 05:23:02] [Rank 0] step:6281/10000 train_time:508595ms step_avg:80.97ms +[2025-07-07 05:23:02] [Rank 0] step:6281/10000 train_time:508595ms step_avg:80.97ms +[2025-07-07 05:23:04] [Rank 0] step:6301/10000 train_time:510093ms step_avg:80.95ms +[2025-07-07 05:23:04] [Rank 0] step:6301/10000 train_time:510093ms step_avg:80.95ms +[2025-07-07 05:23:05] [Rank 0] step:6321/10000 train_time:512254ms step_avg:81.04ms +[2025-07-07 05:23:05] [Rank 0] step:6321/10000 train_time:512254ms step_avg:81.04ms +[2025-07-07 05:23:07] [Rank 0] step:6341/10000 train_time:513752ms step_avg:81.02ms +[2025-07-07 05:23:07] [Rank 0] step:6341/10000 train_time:513752ms step_avg:81.02ms +[2025-07-07 05:23:08] [Rank 0] step:6361/10000 train_time:515253ms step_avg:81.00ms +[2025-07-07 05:23:08] [Rank 0] step:6361/10000 train_time:515253ms step_avg:81.00ms +[2025-07-07 05:23:10] [Rank 0] step:6381/10000 train_time:516753ms step_avg:80.98ms +[2025-07-07 05:23:10] [Rank 0] step:6381/10000 train_time:516753ms step_avg:80.98ms +[2025-07-07 05:23:12] [Rank 0] step:6401/10000 train_time:518894ms step_avg:81.06ms +[2025-07-07 05:23:12] [Rank 0] step:6401/10000 train_time:518894ms step_avg:81.06ms +[2025-07-07 05:23:13] [Rank 0] step:6421/10000 train_time:520393ms step_avg:81.05ms +[2025-07-07 05:23:13] [Rank 0] step:6421/10000 train_time:520393ms step_avg:81.05ms +[2025-07-07 05:23:15] [Rank 0] step:6441/10000 train_time:521894ms step_avg:81.03ms +[2025-07-07 05:23:15] [Rank 0] step:6441/10000 train_time:521894ms step_avg:81.03ms +[2025-07-07 05:23:16] [Rank 0] step:6461/10000 train_time:523396ms step_avg:81.01ms +[2025-07-07 05:23:16] [Rank 0] step:6461/10000 train_time:523396ms step_avg:81.01ms +[2025-07-07 05:23:19] [Rank 0] step:6481/10000 train_time:525153ms step_avg:81.03ms +[2025-07-07 05:23:19] [Rank 0] step:6481/10000 train_time:525153ms step_avg:81.03ms +[2025-07-07 05:23:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:23:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:23:21] [Rank 0] PRINT: step:6500/10000 train_loss:1.8965 val_loss:1.8486 train_time:527034ms step_avg:81.08ms +[2025-07-07 05:23:21] [Rank 0] PRINT: step:6500/10000 train_loss:1.8965 val_loss:1.8486 train_time:527034ms step_avg:81.08ms +[2025-07-07 05:23:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:23:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:23:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:23:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:23:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:23:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:28:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:28:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:28:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:28:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:28:47] [Rank 0] Total Loss: 4.2204 +[2025-07-07 05:28:47] [Rank 0] Total Loss: 4.2204 +[2025-07-07 05:28:47] [Rank 0] Total FTA: 0.1880 +[2025-07-07 05:28:47] [Rank 0] Total FTA: 0.1880 +[2025-07-07 05:28:47] [Rank 0] Group 0 Loss: 4.4443 +[2025-07-07 05:28:47] [Rank 0] Group 0 Loss: 4.4443 +[2025-07-07 05:28:47] [Rank 0] Group 1 Loss: 4.1471 +[2025-07-07 05:28:47] [Rank 0] Group 1 Loss: 4.1471 +[2025-07-07 05:28:47] [Rank 0] Group 2 Loss: 4.0025 +[2025-07-07 05:28:47] [Rank 0] Group 2 Loss: 4.0025 +[2025-07-07 05:28:47] [Rank 0] Group 3 Loss: 4.1599 +[2025-07-07 05:28:47] [Rank 0] Group 3 Loss: 4.1599 +[2025-07-07 05:28:47] [Rank 0] Group 4 Loss: 4.2032 +[2025-07-07 05:28:47] [Rank 0] Group 4 Loss: 4.2032 +[2025-07-07 05:28:47] [Rank 0] Group 5 Loss: 4.2094 +[2025-07-07 05:28:47] [Rank 0] Group 5 Loss: 4.2094 +[2025-07-07 05:28:47] [Rank 0] Group 6 Loss: 4.1612 +[2025-07-07 05:28:47] [Rank 0] Group 6 Loss: 4.1612 +[2025-07-07 05:28:47] [Rank 0] Group 7 Loss: 4.2427 +[2025-07-07 05:28:47] [Rank 0] Group 7 Loss: 4.2427 +[2025-07-07 05:28:47] [Rank 0] Group 8 Loss: 4.2157 +[2025-07-07 05:28:47] [Rank 0] Group 8 Loss: 4.2157 +[2025-07-07 05:28:47] [Rank 0] Group 9 Loss: 4.2231 +[2025-07-07 05:28:47] [Rank 0] Group 9 Loss: 4.2231 +[2025-07-07 05:28:47] [Rank 0] Group 10 Loss: 4.2381 +[2025-07-07 05:28:47] [Rank 0] Group 10 Loss: 4.2381 +[2025-07-07 05:28:47] [Rank 0] Group 11 Loss: 4.2007 +[2025-07-07 05:28:47] [Rank 0] Group 11 Loss: 4.2007 +[2025-07-07 05:28:47] [Rank 0] Group 0 FTA: 0.1834 +[2025-07-07 05:28:47] [Rank 0] Group 0 FTA: 0.1834 +[2025-07-07 05:28:47] [Rank 0] Group 1 FTA: 0.1771 +[2025-07-07 05:28:47] [Rank 0] Group 1 FTA: 0.1771 +[2025-07-07 05:28:47] [Rank 0] Group 2 FTA: 0.2500 +[2025-07-07 05:28:47] [Rank 0] Group 2 FTA: 0.2500 +[2025-07-07 05:28:47] [Rank 0] Group 3 FTA: 0.1693 +[2025-07-07 05:28:47] [Rank 0] Group 3 FTA: 0.1693 +[2025-07-07 05:28:47] [Rank 0] Group 4 FTA: 0.1458 +[2025-07-07 05:28:47] [Rank 0] Group 4 FTA: 0.1458 +[2025-07-07 05:28:47] [Rank 0] Group 5 FTA: 0.1042 +[2025-07-07 05:28:47] [Rank 0] Group 5 FTA: 0.1042 +[2025-07-07 05:28:47] [Rank 0] Group 6 FTA: 0.1745 +[2025-07-07 05:28:47] [Rank 0] Group 6 FTA: 0.1745 +[2025-07-07 05:28:47] [Rank 0] Group 7 FTA: 0.2344 +[2025-07-07 05:28:47] [Rank 0] Group 7 FTA: 0.2344 +[2025-07-07 05:28:47] [Rank 0] Group 8 FTA: 0.2344 +[2025-07-07 05:28:47] [Rank 0] Group 8 FTA: 0.2344 +[2025-07-07 05:28:47] [Rank 0] Group 9 FTA: 0.1875 +[2025-07-07 05:28:47] [Rank 0] Group 9 FTA: 0.1875 +[2025-07-07 05:28:47] [Rank 0] Group 10 FTA: 0.2129 +[2025-07-07 05:28:47] [Rank 0] Group 10 FTA: 0.2129 +[2025-07-07 05:28:47] [Rank 0] Group 11 FTA: 0.1846 +[2025-07-07 05:28:47] [Rank 0] Group 11 FTA: 0.1846 +[2025-07-07 05:28:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 05:28:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 05:28:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 05:28:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 05:28:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 05:28:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 05:28:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 05:28:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 05:28:49] [Rank 0] step:6501/10000 train_time:527056ms step_avg:81.07ms +[2025-07-07 05:28:49] [Rank 0] step:6501/10000 train_time:527056ms step_avg:81.07ms +[2025-07-07 05:28:50] [Rank 0] step:6521/10000 train_time:528554ms step_avg:81.05ms +[2025-07-07 05:28:50] [Rank 0] step:6521/10000 train_time:528554ms step_avg:81.05ms +[2025-07-07 05:28:52] [Rank 0] step:6541/10000 train_time:530045ms step_avg:81.03ms +[2025-07-07 05:28:52] [Rank 0] step:6541/10000 train_time:530045ms step_avg:81.03ms +[2025-07-07 05:28:53] [Rank 0] step:6561/10000 train_time:531538ms step_avg:81.01ms +[2025-07-07 05:28:53] [Rank 0] step:6561/10000 train_time:531538ms step_avg:81.01ms +[2025-07-07 05:28:55] [Rank 0] step:6581/10000 train_time:533372ms step_avg:81.05ms +[2025-07-07 05:28:55] [Rank 0] step:6581/10000 train_time:533372ms step_avg:81.05ms +[2025-07-07 05:28:56] [Rank 0] step:6601/10000 train_time:534862ms step_avg:81.03ms +[2025-07-07 05:28:56] [Rank 0] step:6601/10000 train_time:534862ms step_avg:81.03ms +[2025-07-07 05:28:58] [Rank 0] step:6621/10000 train_time:536359ms step_avg:81.01ms +[2025-07-07 05:28:58] [Rank 0] step:6621/10000 train_time:536359ms step_avg:81.01ms +[2025-07-07 05:28:59] [Rank 0] step:6641/10000 train_time:537854ms step_avg:80.99ms +[2025-07-07 05:28:59] [Rank 0] step:6641/10000 train_time:537854ms step_avg:80.99ms +[2025-07-07 05:29:02] [Rank 0] step:6661/10000 train_time:539348ms step_avg:80.97ms +[2025-07-07 05:29:02] [Rank 0] step:6661/10000 train_time:539348ms step_avg:80.97ms +[2025-07-07 05:29:03] [Rank 0] step:6681/10000 train_time:541514ms step_avg:81.05ms +[2025-07-07 05:29:03] [Rank 0] step:6681/10000 train_time:541514ms step_avg:81.05ms +[2025-07-07 05:29:05] [Rank 0] step:6701/10000 train_time:543008ms step_avg:81.03ms +[2025-07-07 05:29:05] [Rank 0] step:6701/10000 train_time:543008ms step_avg:81.03ms +[2025-07-07 05:29:06] [Rank 0] step:6721/10000 train_time:544504ms step_avg:81.02ms +[2025-07-07 05:29:06] [Rank 0] step:6721/10000 train_time:544504ms step_avg:81.02ms +[2025-07-07 05:29:08] [Rank 0] step:6741/10000 train_time:546004ms step_avg:81.00ms +[2025-07-07 05:29:08] [Rank 0] step:6741/10000 train_time:546004ms step_avg:81.00ms +[2025-07-07 05:29:10] [Rank 0] step:6761/10000 train_time:548295ms step_avg:81.10ms +[2025-07-07 05:29:10] [Rank 0] step:6761/10000 train_time:548295ms step_avg:81.10ms +[2025-07-07 05:29:11] [Rank 0] step:6781/10000 train_time:549855ms step_avg:81.09ms +[2025-07-07 05:29:11] [Rank 0] step:6781/10000 train_time:549855ms step_avg:81.09ms +[2025-07-07 05:29:13] [Rank 0] step:6801/10000 train_time:551356ms step_avg:81.07ms +[2025-07-07 05:29:13] [Rank 0] step:6801/10000 train_time:551356ms step_avg:81.07ms +[2025-07-07 05:29:14] [Rank 0] step:6821/10000 train_time:552859ms step_avg:81.05ms +[2025-07-07 05:29:14] [Rank 0] step:6821/10000 train_time:552859ms step_avg:81.05ms +[2025-07-07 05:29:16] [Rank 0] step:6841/10000 train_time:554409ms step_avg:81.04ms +[2025-07-07 05:29:16] [Rank 0] step:6841/10000 train_time:554409ms step_avg:81.04ms +[2025-07-07 05:29:18] [Rank 0] step:6861/10000 train_time:556094ms step_avg:81.05ms +[2025-07-07 05:29:18] [Rank 0] step:6861/10000 train_time:556094ms step_avg:81.05ms +[2025-07-07 05:29:19] [Rank 0] step:6881/10000 train_time:557595ms step_avg:81.03ms +[2025-07-07 05:29:19] [Rank 0] step:6881/10000 train_time:557595ms step_avg:81.03ms +[2025-07-07 05:29:21] [Rank 0] step:6901/10000 train_time:559097ms step_avg:81.02ms +[2025-07-07 05:29:21] [Rank 0] step:6901/10000 train_time:559097ms step_avg:81.02ms +[2025-07-07 05:29:22] [Rank 0] step:6921/10000 train_time:560598ms step_avg:81.00ms +[2025-07-07 05:29:22] [Rank 0] step:6921/10000 train_time:560598ms step_avg:81.00ms +[2025-07-07 05:29:24] [Rank 0] step:6941/10000 train_time:562746ms step_avg:81.08ms +[2025-07-07 05:29:24] [Rank 0] step:6941/10000 train_time:562746ms step_avg:81.08ms +[2025-07-07 05:29:26] [Rank 0] step:6961/10000 train_time:564245ms step_avg:81.06ms +[2025-07-07 05:29:26] [Rank 0] step:6961/10000 train_time:564245ms step_avg:81.06ms +[2025-07-07 05:29:27] [Rank 0] step:6981/10000 train_time:565744ms step_avg:81.04ms +[2025-07-07 05:29:27] [Rank 0] step:6981/10000 train_time:565744ms step_avg:81.04ms +[2025-07-07 05:29:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:29:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:29:30] [Rank 0] PRINT: step:7000/10000 train_loss:1.8078 val_loss:1.7718 train_time:567245ms step_avg:81.03ms +[2025-07-07 05:29:30] [Rank 0] PRINT: step:7000/10000 train_loss:1.8078 val_loss:1.7718 train_time:567245ms step_avg:81.03ms +[2025-07-07 05:29:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:29:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:29:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:29:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:29:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:29:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:35:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:35:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:35:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:35:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:35:02] [Rank 0] Total Loss: 4.2366 +[2025-07-07 05:35:02] [Rank 0] Total Loss: 4.2366 +[2025-07-07 05:35:02] [Rank 0] Total FTA: 0.1797 +[2025-07-07 05:35:02] [Rank 0] Total FTA: 0.1797 +[2025-07-07 05:35:02] [Rank 0] Group 0 Loss: 4.4690 +[2025-07-07 05:35:02] [Rank 0] Group 0 Loss: 4.4690 +[2025-07-07 05:35:02] [Rank 0] Group 1 Loss: 4.2282 +[2025-07-07 05:35:02] [Rank 0] Group 1 Loss: 4.2282 +[2025-07-07 05:35:02] [Rank 0] Group 2 Loss: 3.9880 +[2025-07-07 05:35:02] [Rank 0] Group 2 Loss: 3.9880 +[2025-07-07 05:35:02] [Rank 0] Group 3 Loss: 4.1634 +[2025-07-07 05:35:02] [Rank 0] Group 3 Loss: 4.1634 +[2025-07-07 05:35:02] [Rank 0] Group 4 Loss: 4.1593 +[2025-07-07 05:35:02] [Rank 0] Group 4 Loss: 4.1593 +[2025-07-07 05:35:02] [Rank 0] Group 5 Loss: 4.2046 +[2025-07-07 05:35:02] [Rank 0] Group 5 Loss: 4.2046 +[2025-07-07 05:35:02] [Rank 0] Group 6 Loss: 4.2300 +[2025-07-07 05:35:02] [Rank 0] Group 6 Loss: 4.2300 +[2025-07-07 05:35:02] [Rank 0] Group 7 Loss: 4.2236 +[2025-07-07 05:35:02] [Rank 0] Group 7 Loss: 4.2236 +[2025-07-07 05:35:02] [Rank 0] Group 8 Loss: 4.2016 +[2025-07-07 05:35:02] [Rank 0] Group 8 Loss: 4.2016 +[2025-07-07 05:35:02] [Rank 0] Group 9 Loss: 4.2447 +[2025-07-07 05:35:02] [Rank 0] Group 9 Loss: 4.2447 +[2025-07-07 05:35:02] [Rank 0] Group 10 Loss: 4.2428 +[2025-07-07 05:35:02] [Rank 0] Group 10 Loss: 4.2428 +[2025-07-07 05:35:02] [Rank 0] Group 11 Loss: 4.2422 +[2025-07-07 05:35:02] [Rank 0] Group 11 Loss: 4.2422 +[2025-07-07 05:35:02] [Rank 0] Group 0 FTA: 0.1834 +[2025-07-07 05:35:02] [Rank 0] Group 0 FTA: 0.1834 +[2025-07-07 05:35:02] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 05:35:02] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 05:35:02] [Rank 0] Group 2 FTA: 0.3438 +[2025-07-07 05:35:02] [Rank 0] Group 2 FTA: 0.3438 +[2025-07-07 05:35:02] [Rank 0] Group 3 FTA: 0.1641 +[2025-07-07 05:35:02] [Rank 0] Group 3 FTA: 0.1641 +[2025-07-07 05:35:02] [Rank 0] Group 4 FTA: 0.1042 +[2025-07-07 05:35:02] [Rank 0] Group 4 FTA: 0.1042 +[2025-07-07 05:35:02] [Rank 0] Group 5 FTA: 0.1250 +[2025-07-07 05:35:02] [Rank 0] Group 5 FTA: 0.1250 +[2025-07-07 05:35:02] [Rank 0] Group 6 FTA: 0.2318 +[2025-07-07 05:35:02] [Rank 0] Group 6 FTA: 0.2318 +[2025-07-07 05:35:02] [Rank 0] Group 7 FTA: 0.2057 +[2025-07-07 05:35:02] [Rank 0] Group 7 FTA: 0.2057 +[2025-07-07 05:35:02] [Rank 0] Group 8 FTA: 0.1927 +[2025-07-07 05:35:02] [Rank 0] Group 8 FTA: 0.1927 +[2025-07-07 05:35:02] [Rank 0] Group 9 FTA: 0.1641 +[2025-07-07 05:35:02] [Rank 0] Group 9 FTA: 0.1641 +[2025-07-07 05:35:02] [Rank 0] Group 10 FTA: 0.1738 +[2025-07-07 05:35:02] [Rank 0] Group 10 FTA: 0.1738 +[2025-07-07 05:35:02] [Rank 0] Group 11 FTA: 0.2100 +[2025-07-07 05:35:02] [Rank 0] Group 11 FTA: 0.2100 +[2025-07-07 05:35:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 05:35:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 05:35:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 05:35:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 05:35:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 05:35:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 05:35:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 05:35:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 05:35:03] [Rank 0] step:7001/10000 train_time:567267ms step_avg:81.03ms +[2025-07-07 05:35:03] [Rank 0] step:7001/10000 train_time:567267ms step_avg:81.03ms +[2025-07-07 05:35:05] [Rank 0] step:7021/10000 train_time:569024ms step_avg:81.05ms +[2025-07-07 05:35:05] [Rank 0] step:7021/10000 train_time:569024ms step_avg:81.05ms +[2025-07-07 05:35:06] [Rank 0] step:7041/10000 train_time:570496ms step_avg:81.02ms +[2025-07-07 05:35:06] [Rank 0] step:7041/10000 train_time:570496ms step_avg:81.02ms +[2025-07-07 05:35:08] [Rank 0] step:7061/10000 train_time:571987ms step_avg:81.01ms +[2025-07-07 05:35:08] [Rank 0] step:7061/10000 train_time:571987ms step_avg:81.01ms +[2025-07-07 05:35:09] [Rank 0] step:7081/10000 train_time:573483ms step_avg:80.99ms +[2025-07-07 05:35:09] [Rank 0] step:7081/10000 train_time:573483ms step_avg:80.99ms +[2025-07-07 05:35:11] [Rank 0] step:7101/10000 train_time:574977ms step_avg:80.97ms +[2025-07-07 05:35:11] [Rank 0] step:7101/10000 train_time:574977ms step_avg:80.97ms +[2025-07-07 05:35:13] [Rank 0] step:7121/10000 train_time:577136ms step_avg:81.05ms +[2025-07-07 05:35:13] [Rank 0] step:7121/10000 train_time:577136ms step_avg:81.05ms +[2025-07-07 05:35:14] [Rank 0] step:7141/10000 train_time:578631ms step_avg:81.03ms +[2025-07-07 05:35:14] [Rank 0] step:7141/10000 train_time:578631ms step_avg:81.03ms +[2025-07-07 05:35:16] [Rank 0] step:7161/10000 train_time:580128ms step_avg:81.01ms +[2025-07-07 05:35:16] [Rank 0] step:7161/10000 train_time:580128ms step_avg:81.01ms +[2025-07-07 05:35:17] [Rank 0] step:7181/10000 train_time:581625ms step_avg:80.99ms +[2025-07-07 05:35:17] [Rank 0] step:7181/10000 train_time:581625ms step_avg:80.99ms +[2025-07-07 05:35:19] [Rank 0] step:7201/10000 train_time:583176ms step_avg:80.99ms +[2025-07-07 05:35:19] [Rank 0] step:7201/10000 train_time:583176ms step_avg:80.99ms +[2025-07-07 05:35:21] [Rank 0] step:7221/10000 train_time:584853ms step_avg:80.99ms +[2025-07-07 05:35:21] [Rank 0] step:7221/10000 train_time:584853ms step_avg:80.99ms +[2025-07-07 05:35:22] [Rank 0] step:7241/10000 train_time:586351ms step_avg:80.98ms +[2025-07-07 05:35:22] [Rank 0] step:7241/10000 train_time:586351ms step_avg:80.98ms +[2025-07-07 05:35:24] [Rank 0] step:7261/10000 train_time:587850ms step_avg:80.96ms +[2025-07-07 05:35:24] [Rank 0] step:7261/10000 train_time:587850ms step_avg:80.96ms +[2025-07-07 05:35:25] [Rank 0] step:7281/10000 train_time:589350ms step_avg:80.94ms +[2025-07-07 05:35:25] [Rank 0] step:7281/10000 train_time:589350ms step_avg:80.94ms +[2025-07-07 05:35:27] [Rank 0] step:7301/10000 train_time:591506ms step_avg:81.02ms +[2025-07-07 05:35:27] [Rank 0] step:7301/10000 train_time:591506ms step_avg:81.02ms +[2025-07-07 05:35:29] [Rank 0] step:7321/10000 train_time:593134ms step_avg:81.02ms +[2025-07-07 05:35:29] [Rank 0] step:7321/10000 train_time:593134ms step_avg:81.02ms +[2025-07-07 05:35:31] [Rank 0] step:7341/10000 train_time:594694ms step_avg:81.01ms +[2025-07-07 05:35:31] [Rank 0] step:7341/10000 train_time:594694ms step_avg:81.01ms +[2025-07-07 05:35:32] [Rank 0] step:7361/10000 train_time:596248ms step_avg:81.00ms +[2025-07-07 05:35:32] [Rank 0] step:7361/10000 train_time:596248ms step_avg:81.00ms +[2025-07-07 05:35:34] [Rank 0] step:7381/10000 train_time:597800ms step_avg:80.99ms +[2025-07-07 05:35:34] [Rank 0] step:7381/10000 train_time:597800ms step_avg:80.99ms +[2025-07-07 05:35:36] [Rank 0] step:7401/10000 train_time:599907ms step_avg:81.06ms +[2025-07-07 05:35:36] [Rank 0] step:7401/10000 train_time:599907ms step_avg:81.06ms +[2025-07-07 05:35:37] [Rank 0] step:7421/10000 train_time:601409ms step_avg:81.04ms +[2025-07-07 05:35:37] [Rank 0] step:7421/10000 train_time:601409ms step_avg:81.04ms +[2025-07-07 05:35:39] [Rank 0] step:7441/10000 train_time:602909ms step_avg:81.03ms +[2025-07-07 05:35:39] [Rank 0] step:7441/10000 train_time:602909ms step_avg:81.03ms +[2025-07-07 05:35:40] [Rank 0] step:7461/10000 train_time:604412ms step_avg:81.01ms +[2025-07-07 05:35:40] [Rank 0] step:7461/10000 train_time:604412ms step_avg:81.01ms +[2025-07-07 05:35:42] [Rank 0] step:7481/10000 train_time:606581ms step_avg:81.08ms +[2025-07-07 05:35:42] [Rank 0] step:7481/10000 train_time:606581ms step_avg:81.08ms +[2025-07-07 05:35:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:35:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:35:45] [Rank 0] PRINT: step:7500/10000 train_loss:1.7404 val_loss:1.7128 train_time:608080ms step_avg:81.08ms +[2025-07-07 05:35:45] [Rank 0] PRINT: step:7500/10000 train_loss:1.7404 val_loss:1.7128 train_time:608080ms step_avg:81.08ms +[2025-07-07 05:35:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:35:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:35:45] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:35:45] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:35:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:35:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:41:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:41:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:41:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:41:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:41:10] [Rank 0] Total Loss: 4.2070 +[2025-07-07 05:41:10] [Rank 0] Total Loss: 4.2070 +[2025-07-07 05:41:10] [Rank 0] Total FTA: 0.2052 +[2025-07-07 05:41:10] [Rank 0] Total FTA: 0.2052 +[2025-07-07 05:41:10] [Rank 0] Group 0 Loss: 4.4985 +[2025-07-07 05:41:10] [Rank 0] Group 0 Loss: 4.4985 +[2025-07-07 05:41:10] [Rank 0] Group 1 Loss: 4.1128 +[2025-07-07 05:41:10] [Rank 0] Group 1 Loss: 4.1128 +[2025-07-07 05:41:10] [Rank 0] Group 2 Loss: 3.9619 +[2025-07-07 05:41:10] [Rank 0] Group 2 Loss: 3.9619 +[2025-07-07 05:41:10] [Rank 0] Group 3 Loss: 4.1119 +[2025-07-07 05:41:10] [Rank 0] Group 3 Loss: 4.1119 +[2025-07-07 05:41:10] [Rank 0] Group 4 Loss: 4.1654 +[2025-07-07 05:41:10] [Rank 0] Group 4 Loss: 4.1654 +[2025-07-07 05:41:10] [Rank 0] Group 5 Loss: 4.1027 +[2025-07-07 05:41:10] [Rank 0] Group 5 Loss: 4.1027 +[2025-07-07 05:41:10] [Rank 0] Group 6 Loss: 4.1517 +[2025-07-07 05:41:10] [Rank 0] Group 6 Loss: 4.1517 +[2025-07-07 05:41:10] [Rank 0] Group 7 Loss: 4.2257 +[2025-07-07 05:41:10] [Rank 0] Group 7 Loss: 4.2257 +[2025-07-07 05:41:10] [Rank 0] Group 8 Loss: 4.2154 +[2025-07-07 05:41:10] [Rank 0] Group 8 Loss: 4.2154 +[2025-07-07 05:41:10] [Rank 0] Group 9 Loss: 4.1888 +[2025-07-07 05:41:10] [Rank 0] Group 9 Loss: 4.1888 +[2025-07-07 05:41:10] [Rank 0] Group 10 Loss: 4.2250 +[2025-07-07 05:41:10] [Rank 0] Group 10 Loss: 4.2250 +[2025-07-07 05:41:11] [Rank 0] Group 11 Loss: 4.2120 +[2025-07-07 05:41:11] [Rank 0] Group 11 Loss: 4.2120 +[2025-07-07 05:41:11] [Rank 0] Group 0 FTA: 0.1964 +[2025-07-07 05:41:11] [Rank 0] Group 0 FTA: 0.1964 +[2025-07-07 05:41:11] [Rank 0] Group 1 FTA: 0.1771 +[2025-07-07 05:41:11] [Rank 0] Group 1 FTA: 0.1771 +[2025-07-07 05:41:11] [Rank 0] Group 2 FTA: 0.3464 +[2025-07-07 05:41:11] [Rank 0] Group 2 FTA: 0.3464 +[2025-07-07 05:41:11] [Rank 0] Group 3 FTA: 0.1823 +[2025-07-07 05:41:11] [Rank 0] Group 3 FTA: 0.1823 +[2025-07-07 05:41:11] [Rank 0] Group 4 FTA: 0.1510 +[2025-07-07 05:41:11] [Rank 0] Group 4 FTA: 0.1510 +[2025-07-07 05:41:11] [Rank 0] Group 5 FTA: 0.1432 +[2025-07-07 05:41:11] [Rank 0] Group 5 FTA: 0.1432 +[2025-07-07 05:41:11] [Rank 0] Group 6 FTA: 0.1953 +[2025-07-07 05:41:11] [Rank 0] Group 6 FTA: 0.1953 +[2025-07-07 05:41:11] [Rank 0] Group 7 FTA: 0.2005 +[2025-07-07 05:41:11] [Rank 0] Group 7 FTA: 0.2005 +[2025-07-07 05:41:11] [Rank 0] Group 8 FTA: 0.1823 +[2025-07-07 05:41:11] [Rank 0] Group 8 FTA: 0.1823 +[2025-07-07 05:41:11] [Rank 0] Group 9 FTA: 0.1953 +[2025-07-07 05:41:11] [Rank 0] Group 9 FTA: 0.1953 +[2025-07-07 05:41:11] [Rank 0] Group 10 FTA: 0.2363 +[2025-07-07 05:41:11] [Rank 0] Group 10 FTA: 0.2363 +[2025-07-07 05:41:11] [Rank 0] Group 11 FTA: 0.2227 +[2025-07-07 05:41:11] [Rank 0] Group 11 FTA: 0.2227 +[2025-07-07 05:41:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 05:41:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 05:41:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 05:41:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 05:41:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 05:41:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 05:41:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 05:41:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 05:41:12] [Rank 0] step:7501/10000 train_time:608102ms step_avg:81.07ms +[2025-07-07 05:41:12] [Rank 0] step:7501/10000 train_time:608102ms step_avg:81.07ms +[2025-07-07 05:41:13] [Rank 0] step:7521/10000 train_time:609606ms step_avg:81.05ms +[2025-07-07 05:41:13] [Rank 0] step:7521/10000 train_time:609606ms step_avg:81.05ms +[2025-07-07 05:41:15] [Rank 0] step:7541/10000 train_time:611100ms step_avg:81.04ms +[2025-07-07 05:41:15] [Rank 0] step:7541/10000 train_time:611100ms step_avg:81.04ms +[2025-07-07 05:41:17] [Rank 0] step:7561/10000 train_time:612854ms step_avg:81.05ms +[2025-07-07 05:41:17] [Rank 0] step:7561/10000 train_time:612854ms step_avg:81.05ms +[2025-07-07 05:41:19] [Rank 0] step:7581/10000 train_time:614753ms step_avg:81.09ms +[2025-07-07 05:41:19] [Rank 0] step:7581/10000 train_time:614753ms step_avg:81.09ms +[2025-07-07 05:41:20] [Rank 0] step:7601/10000 train_time:616247ms step_avg:81.07ms +[2025-07-07 05:41:20] [Rank 0] step:7601/10000 train_time:616247ms step_avg:81.07ms +[2025-07-07 05:41:22] [Rank 0] step:7621/10000 train_time:617742ms step_avg:81.06ms +[2025-07-07 05:41:22] [Rank 0] step:7621/10000 train_time:617742ms step_avg:81.06ms +[2025-07-07 05:41:23] [Rank 0] step:7641/10000 train_time:619237ms step_avg:81.04ms +[2025-07-07 05:41:23] [Rank 0] step:7641/10000 train_time:619237ms step_avg:81.04ms +[2025-07-07 05:41:25] [Rank 0] step:7661/10000 train_time:621397ms step_avg:81.11ms +[2025-07-07 05:41:25] [Rank 0] step:7661/10000 train_time:621397ms step_avg:81.11ms +[2025-07-07 05:41:27] [Rank 0] step:7681/10000 train_time:622893ms step_avg:81.10ms +[2025-07-07 05:41:27] [Rank 0] step:7681/10000 train_time:622893ms step_avg:81.10ms +[2025-07-07 05:41:28] [Rank 0] step:7701/10000 train_time:624392ms step_avg:81.08ms +[2025-07-07 05:41:28] [Rank 0] step:7701/10000 train_time:624392ms step_avg:81.08ms +[2025-07-07 05:41:30] [Rank 0] step:7721/10000 train_time:625892ms step_avg:81.06ms +[2025-07-07 05:41:30] [Rank 0] step:7721/10000 train_time:625892ms step_avg:81.06ms +[2025-07-07 05:41:32] [Rank 0] step:7741/10000 train_time:627393ms step_avg:81.05ms +[2025-07-07 05:41:32] [Rank 0] step:7741/10000 train_time:627393ms step_avg:81.05ms +[2025-07-07 05:41:33] [Rank 0] step:7761/10000 train_time:629131ms step_avg:81.06ms +[2025-07-07 05:41:33] [Rank 0] step:7761/10000 train_time:629131ms step_avg:81.06ms +[2025-07-07 05:41:35] [Rank 0] step:7781/10000 train_time:630631ms step_avg:81.05ms +[2025-07-07 05:41:35] [Rank 0] step:7781/10000 train_time:630631ms step_avg:81.05ms +[2025-07-07 05:41:36] [Rank 0] step:7801/10000 train_time:632134ms step_avg:81.03ms +[2025-07-07 05:41:36] [Rank 0] step:7801/10000 train_time:632134ms step_avg:81.03ms +[2025-07-07 05:41:38] [Rank 0] step:7821/10000 train_time:633638ms step_avg:81.02ms +[2025-07-07 05:41:38] [Rank 0] step:7821/10000 train_time:633638ms step_avg:81.02ms +[2025-07-07 05:41:39] [Rank 0] step:7841/10000 train_time:635374ms step_avg:81.03ms +[2025-07-07 05:41:39] [Rank 0] step:7841/10000 train_time:635374ms step_avg:81.03ms +[2025-07-07 05:41:41] [Rank 0] step:7861/10000 train_time:636878ms step_avg:81.02ms +[2025-07-07 05:41:41] [Rank 0] step:7861/10000 train_time:636878ms step_avg:81.02ms +[2025-07-07 05:41:42] [Rank 0] step:7881/10000 train_time:638382ms step_avg:81.00ms +[2025-07-07 05:41:42] [Rank 0] step:7881/10000 train_time:638382ms step_avg:81.00ms +[2025-07-07 05:41:44] [Rank 0] step:7901/10000 train_time:639891ms step_avg:80.99ms +[2025-07-07 05:41:44] [Rank 0] step:7901/10000 train_time:639891ms step_avg:80.99ms +[2025-07-07 05:41:46] [Rank 0] step:7921/10000 train_time:641450ms step_avg:80.98ms +[2025-07-07 05:41:46] [Rank 0] step:7921/10000 train_time:641450ms step_avg:80.98ms +[2025-07-07 05:41:48] [Rank 0] step:7941/10000 train_time:643686ms step_avg:81.06ms +[2025-07-07 05:41:48] [Rank 0] step:7941/10000 train_time:643686ms step_avg:81.06ms +[2025-07-07 05:41:49] [Rank 0] step:7961/10000 train_time:645239ms step_avg:81.05ms +[2025-07-07 05:41:49] [Rank 0] step:7961/10000 train_time:645239ms step_avg:81.05ms +[2025-07-07 05:41:51] [Rank 0] step:7981/10000 train_time:646806ms step_avg:81.04ms +[2025-07-07 05:41:51] [Rank 0] step:7981/10000 train_time:646806ms step_avg:81.04ms +[2025-07-07 05:41:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:41:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:41:53] [Rank 0] PRINT: step:8000/10000 train_loss:1.6887 val_loss:1.6679 train_time:648311ms step_avg:81.04ms +[2025-07-07 05:41:53] [Rank 0] PRINT: step:8000/10000 train_loss:1.6887 val_loss:1.6679 train_time:648311ms step_avg:81.04ms +[2025-07-07 05:41:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:41:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:41:53] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:41:53] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:41:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:41:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:47:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:47:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:47:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:47:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:47:21] [Rank 0] Total Loss: 4.2100 +[2025-07-07 05:47:21] [Rank 0] Total Loss: 4.2100 +[2025-07-07 05:47:21] [Rank 0] Total FTA: 0.1983 +[2025-07-07 05:47:21] [Rank 0] Total FTA: 0.1983 +[2025-07-07 05:47:21] [Rank 0] Group 0 Loss: 4.4377 +[2025-07-07 05:47:21] [Rank 0] Group 0 Loss: 4.4377 +[2025-07-07 05:47:21] [Rank 0] Group 1 Loss: 4.1268 +[2025-07-07 05:47:21] [Rank 0] Group 1 Loss: 4.1268 +[2025-07-07 05:47:21] [Rank 0] Group 2 Loss: 3.9957 +[2025-07-07 05:47:21] [Rank 0] Group 2 Loss: 3.9957 +[2025-07-07 05:47:21] [Rank 0] Group 3 Loss: 4.1452 +[2025-07-07 05:47:21] [Rank 0] Group 3 Loss: 4.1452 +[2025-07-07 05:47:21] [Rank 0] Group 4 Loss: 4.1194 +[2025-07-07 05:47:21] [Rank 0] Group 4 Loss: 4.1194 +[2025-07-07 05:47:21] [Rank 0] Group 5 Loss: 4.1658 +[2025-07-07 05:47:21] [Rank 0] Group 5 Loss: 4.1658 +[2025-07-07 05:47:21] [Rank 0] Group 6 Loss: 4.1549 +[2025-07-07 05:47:21] [Rank 0] Group 6 Loss: 4.1549 +[2025-07-07 05:47:21] [Rank 0] Group 7 Loss: 4.2016 +[2025-07-07 05:47:21] [Rank 0] Group 7 Loss: 4.2016 +[2025-07-07 05:47:21] [Rank 0] Group 8 Loss: 4.2147 +[2025-07-07 05:47:21] [Rank 0] Group 8 Loss: 4.2147 +[2025-07-07 05:47:21] [Rank 0] Group 9 Loss: 4.2459 +[2025-07-07 05:47:21] [Rank 0] Group 9 Loss: 4.2459 +[2025-07-07 05:47:21] [Rank 0] Group 10 Loss: 4.2041 +[2025-07-07 05:47:21] [Rank 0] Group 10 Loss: 4.2041 +[2025-07-07 05:47:21] [Rank 0] Group 11 Loss: 4.2413 +[2025-07-07 05:47:21] [Rank 0] Group 11 Loss: 4.2413 +[2025-07-07 05:47:21] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-07 05:47:21] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-07 05:47:21] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-07 05:47:21] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-07 05:47:21] [Rank 0] Group 2 FTA: 0.2526 +[2025-07-07 05:47:21] [Rank 0] Group 2 FTA: 0.2526 +[2025-07-07 05:47:21] [Rank 0] Group 3 FTA: 0.1615 +[2025-07-07 05:47:21] [Rank 0] Group 3 FTA: 0.1615 +[2025-07-07 05:47:21] [Rank 0] Group 4 FTA: 0.1224 +[2025-07-07 05:47:21] [Rank 0] Group 4 FTA: 0.1224 +[2025-07-07 05:47:21] [Rank 0] Group 5 FTA: 0.1901 +[2025-07-07 05:47:21] [Rank 0] Group 5 FTA: 0.1901 +[2025-07-07 05:47:21] [Rank 0] Group 6 FTA: 0.2083 +[2025-07-07 05:47:21] [Rank 0] Group 6 FTA: 0.2083 +[2025-07-07 05:47:21] [Rank 0] Group 7 FTA: 0.2604 +[2025-07-07 05:47:21] [Rank 0] Group 7 FTA: 0.2604 +[2025-07-07 05:47:21] [Rank 0] Group 8 FTA: 0.2240 +[2025-07-07 05:47:21] [Rank 0] Group 8 FTA: 0.2240 +[2025-07-07 05:47:21] [Rank 0] Group 9 FTA: 0.2109 +[2025-07-07 05:47:21] [Rank 0] Group 9 FTA: 0.2109 +[2025-07-07 05:47:21] [Rank 0] Group 10 FTA: 0.2402 +[2025-07-07 05:47:21] [Rank 0] Group 10 FTA: 0.2402 +[2025-07-07 05:47:21] [Rank 0] Group 11 FTA: 0.1982 +[2025-07-07 05:47:21] [Rank 0] Group 11 FTA: 0.1982 +[2025-07-07 05:47:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 05:47:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 05:47:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 05:47:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 05:47:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 05:47:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 05:47:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 05:47:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 05:47:22] [Rank 0] step:8001/10000 train_time:648333ms step_avg:81.03ms +[2025-07-07 05:47:22] [Rank 0] step:8001/10000 train_time:648333ms step_avg:81.03ms +[2025-07-07 05:47:24] [Rank 0] step:8021/10000 train_time:650492ms step_avg:81.10ms +[2025-07-07 05:47:24] [Rank 0] step:8021/10000 train_time:650492ms step_avg:81.10ms +[2025-07-07 05:47:26] [Rank 0] step:8041/10000 train_time:651985ms step_avg:81.08ms +[2025-07-07 05:47:26] [Rank 0] step:8041/10000 train_time:651985ms step_avg:81.08ms +[2025-07-07 05:47:27] [Rank 0] step:8061/10000 train_time:653477ms step_avg:81.07ms +[2025-07-07 05:47:27] [Rank 0] step:8061/10000 train_time:653477ms step_avg:81.07ms +[2025-07-07 05:47:29] [Rank 0] step:8081/10000 train_time:654969ms step_avg:81.05ms +[2025-07-07 05:47:29] [Rank 0] step:8081/10000 train_time:654969ms step_avg:81.05ms +[2025-07-07 05:47:31] [Rank 0] step:8101/10000 train_time:656720ms step_avg:81.07ms +[2025-07-07 05:47:31] [Rank 0] step:8101/10000 train_time:656720ms step_avg:81.07ms +[2025-07-07 05:47:32] [Rank 0] step:8121/10000 train_time:658601ms step_avg:81.10ms +[2025-07-07 05:47:32] [Rank 0] step:8121/10000 train_time:658601ms step_avg:81.10ms +[2025-07-07 05:47:34] [Rank 0] step:8141/10000 train_time:660098ms step_avg:81.08ms +[2025-07-07 05:47:34] [Rank 0] step:8141/10000 train_time:660098ms step_avg:81.08ms +[2025-07-07 05:47:35] [Rank 0] step:8161/10000 train_time:661594ms step_avg:81.07ms +[2025-07-07 05:47:35] [Rank 0] step:8161/10000 train_time:661594ms step_avg:81.07ms +[2025-07-07 05:47:37] [Rank 0] step:8181/10000 train_time:663095ms step_avg:81.05ms +[2025-07-07 05:47:37] [Rank 0] step:8181/10000 train_time:663095ms step_avg:81.05ms +[2025-07-07 05:47:39] [Rank 0] step:8201/10000 train_time:665247ms step_avg:81.12ms +[2025-07-07 05:47:39] [Rank 0] step:8201/10000 train_time:665247ms step_avg:81.12ms +[2025-07-07 05:47:41] [Rank 0] step:8221/10000 train_time:666744ms step_avg:81.10ms +[2025-07-07 05:47:41] [Rank 0] step:8221/10000 train_time:666744ms step_avg:81.10ms +[2025-07-07 05:47:42] [Rank 0] step:8241/10000 train_time:668244ms step_avg:81.09ms +[2025-07-07 05:47:42] [Rank 0] step:8241/10000 train_time:668244ms step_avg:81.09ms +[2025-07-07 05:47:44] [Rank 0] step:8261/10000 train_time:669744ms step_avg:81.07ms +[2025-07-07 05:47:44] [Rank 0] step:8261/10000 train_time:669744ms step_avg:81.07ms +[2025-07-07 05:47:45] [Rank 0] step:8281/10000 train_time:671501ms step_avg:81.09ms +[2025-07-07 05:47:45] [Rank 0] step:8281/10000 train_time:671501ms step_avg:81.09ms +[2025-07-07 05:47:47] [Rank 0] step:8301/10000 train_time:672982ms step_avg:81.07ms +[2025-07-07 05:47:47] [Rank 0] step:8301/10000 train_time:672982ms step_avg:81.07ms +[2025-07-07 05:47:48] [Rank 0] step:8321/10000 train_time:674483ms step_avg:81.06ms +[2025-07-07 05:47:48] [Rank 0] step:8321/10000 train_time:674483ms step_avg:81.06ms +[2025-07-07 05:47:50] [Rank 0] step:8341/10000 train_time:675984ms step_avg:81.04ms +[2025-07-07 05:47:50] [Rank 0] step:8341/10000 train_time:675984ms step_avg:81.04ms +[2025-07-07 05:47:51] [Rank 0] step:8361/10000 train_time:677487ms step_avg:81.03ms +[2025-07-07 05:47:51] [Rank 0] step:8361/10000 train_time:677487ms step_avg:81.03ms +[2025-07-07 05:47:53] [Rank 0] step:8381/10000 train_time:679235ms step_avg:81.04ms +[2025-07-07 05:47:53] [Rank 0] step:8381/10000 train_time:679235ms step_avg:81.04ms +[2025-07-07 05:47:55] [Rank 0] step:8401/10000 train_time:680738ms step_avg:81.03ms +[2025-07-07 05:47:55] [Rank 0] step:8401/10000 train_time:680738ms step_avg:81.03ms +[2025-07-07 05:47:56] [Rank 0] step:8421/10000 train_time:682242ms step_avg:81.02ms +[2025-07-07 05:47:56] [Rank 0] step:8421/10000 train_time:682242ms step_avg:81.02ms +[2025-07-07 05:47:58] [Rank 0] step:8441/10000 train_time:683746ms step_avg:81.00ms +[2025-07-07 05:47:58] [Rank 0] step:8441/10000 train_time:683746ms step_avg:81.00ms +[2025-07-07 05:47:59] [Rank 0] step:8461/10000 train_time:685249ms step_avg:80.99ms +[2025-07-07 05:47:59] [Rank 0] step:8461/10000 train_time:685249ms step_avg:80.99ms +[2025-07-07 05:48:01] [Rank 0] step:8481/10000 train_time:686992ms step_avg:81.00ms +[2025-07-07 05:48:01] [Rank 0] step:8481/10000 train_time:686992ms step_avg:81.00ms +[2025-07-07 05:48:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:48:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:48:03] [Rank 0] PRINT: step:8500/10000 train_loss:1.6489 val_loss:1.6333 train_time:688499ms step_avg:81.00ms +[2025-07-07 05:48:03] [Rank 0] PRINT: step:8500/10000 train_loss:1.6489 val_loss:1.6333 train_time:688499ms step_avg:81.00ms +[2025-07-07 05:48:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:48:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:48:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:48:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:48:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:48:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:53:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:53:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:53:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:53:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:53:29] [Rank 0] Total Loss: 4.2275 +[2025-07-07 05:53:29] [Rank 0] Total Loss: 4.2275 +[2025-07-07 05:53:29] [Rank 0] Total FTA: 0.2100 +[2025-07-07 05:53:29] [Rank 0] Total FTA: 0.2100 +[2025-07-07 05:53:29] [Rank 0] Group 0 Loss: 4.4586 +[2025-07-07 05:53:29] [Rank 0] Group 0 Loss: 4.4586 +[2025-07-07 05:53:29] [Rank 0] Group 1 Loss: 4.1334 +[2025-07-07 05:53:29] [Rank 0] Group 1 Loss: 4.1334 +[2025-07-07 05:53:29] [Rank 0] Group 2 Loss: 3.9907 +[2025-07-07 05:53:29] [Rank 0] Group 2 Loss: 3.9907 +[2025-07-07 05:53:29] [Rank 0] Group 3 Loss: 4.1324 +[2025-07-07 05:53:29] [Rank 0] Group 3 Loss: 4.1324 +[2025-07-07 05:53:29] [Rank 0] Group 4 Loss: 4.2141 +[2025-07-07 05:53:29] [Rank 0] Group 4 Loss: 4.2141 +[2025-07-07 05:53:29] [Rank 0] Group 5 Loss: 4.1717 +[2025-07-07 05:53:29] [Rank 0] Group 5 Loss: 4.1717 +[2025-07-07 05:53:29] [Rank 0] Group 6 Loss: 4.2104 +[2025-07-07 05:53:29] [Rank 0] Group 6 Loss: 4.2104 +[2025-07-07 05:53:29] [Rank 0] Group 7 Loss: 4.2494 +[2025-07-07 05:53:29] [Rank 0] Group 7 Loss: 4.2494 +[2025-07-07 05:53:29] [Rank 0] Group 8 Loss: 4.2388 +[2025-07-07 05:53:29] [Rank 0] Group 8 Loss: 4.2388 +[2025-07-07 05:53:29] [Rank 0] Group 9 Loss: 4.2870 +[2025-07-07 05:53:29] [Rank 0] Group 9 Loss: 4.2870 +[2025-07-07 05:53:29] [Rank 0] Group 10 Loss: 4.2376 +[2025-07-07 05:53:29] [Rank 0] Group 10 Loss: 4.2376 +[2025-07-07 05:53:29] [Rank 0] Group 11 Loss: 4.2136 +[2025-07-07 05:53:29] [Rank 0] Group 11 Loss: 4.2136 +[2025-07-07 05:53:29] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 05:53:29] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 05:53:29] [Rank 0] Group 1 FTA: 0.1328 +[2025-07-07 05:53:29] [Rank 0] Group 1 FTA: 0.1328 +[2025-07-07 05:53:29] [Rank 0] Group 2 FTA: 0.3620 +[2025-07-07 05:53:29] [Rank 0] Group 2 FTA: 0.3620 +[2025-07-07 05:53:29] [Rank 0] Group 3 FTA: 0.1719 +[2025-07-07 05:53:29] [Rank 0] Group 3 FTA: 0.1719 +[2025-07-07 05:53:29] [Rank 0] Group 4 FTA: 0.1589 +[2025-07-07 05:53:29] [Rank 0] Group 4 FTA: 0.1589 +[2025-07-07 05:53:29] [Rank 0] Group 5 FTA: 0.1562 +[2025-07-07 05:53:29] [Rank 0] Group 5 FTA: 0.1562 +[2025-07-07 05:53:29] [Rank 0] Group 6 FTA: 0.2370 +[2025-07-07 05:53:29] [Rank 0] Group 6 FTA: 0.2370 +[2025-07-07 05:53:29] [Rank 0] Group 7 FTA: 0.2344 +[2025-07-07 05:53:29] [Rank 0] Group 7 FTA: 0.2344 +[2025-07-07 05:53:29] [Rank 0] Group 8 FTA: 0.2188 +[2025-07-07 05:53:29] [Rank 0] Group 8 FTA: 0.2188 +[2025-07-07 05:53:29] [Rank 0] Group 9 FTA: 0.2070 +[2025-07-07 05:53:29] [Rank 0] Group 9 FTA: 0.2070 +[2025-07-07 05:53:29] [Rank 0] Group 10 FTA: 0.2266 +[2025-07-07 05:53:29] [Rank 0] Group 10 FTA: 0.2266 +[2025-07-07 05:53:29] [Rank 0] Group 11 FTA: 0.2393 +[2025-07-07 05:53:29] [Rank 0] Group 11 FTA: 0.2393 +[2025-07-07 05:53:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 05:53:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 05:53:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 05:53:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 05:53:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 05:53:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 05:53:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 05:53:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 05:53:30] [Rank 0] step:8501/10000 train_time:688520ms step_avg:80.99ms +[2025-07-07 05:53:30] [Rank 0] step:8501/10000 train_time:688520ms step_avg:80.99ms +[2025-07-07 05:53:32] [Rank 0] step:8521/10000 train_time:690036ms step_avg:80.98ms +[2025-07-07 05:53:32] [Rank 0] step:8521/10000 train_time:690036ms step_avg:80.98ms +[2025-07-07 05:53:33] [Rank 0] step:8541/10000 train_time:691527ms step_avg:80.97ms +[2025-07-07 05:53:33] [Rank 0] step:8541/10000 train_time:691527ms step_avg:80.97ms +[2025-07-07 05:53:36] [Rank 0] step:8561/10000 train_time:693680ms step_avg:81.03ms +[2025-07-07 05:53:36] [Rank 0] step:8561/10000 train_time:693680ms step_avg:81.03ms +[2025-07-07 05:53:37] [Rank 0] step:8581/10000 train_time:695171ms step_avg:81.01ms +[2025-07-07 05:53:37] [Rank 0] step:8581/10000 train_time:695171ms step_avg:81.01ms +[2025-07-07 05:53:39] [Rank 0] step:8601/10000 train_time:696667ms step_avg:81.00ms +[2025-07-07 05:53:39] [Rank 0] step:8601/10000 train_time:696667ms step_avg:81.00ms +[2025-07-07 05:53:40] [Rank 0] step:8621/10000 train_time:698164ms step_avg:80.98ms +[2025-07-07 05:53:40] [Rank 0] step:8621/10000 train_time:698164ms step_avg:80.98ms +[2025-07-07 05:53:42] [Rank 0] step:8641/10000 train_time:699713ms step_avg:80.98ms +[2025-07-07 05:53:42] [Rank 0] step:8641/10000 train_time:699713ms step_avg:80.98ms +[2025-07-07 05:53:43] [Rank 0] step:8661/10000 train_time:701393ms step_avg:80.98ms +[2025-07-07 05:53:43] [Rank 0] step:8661/10000 train_time:701393ms step_avg:80.98ms +[2025-07-07 05:53:45] [Rank 0] step:8681/10000 train_time:702891ms step_avg:80.97ms +[2025-07-07 05:53:45] [Rank 0] step:8681/10000 train_time:702891ms step_avg:80.97ms +[2025-07-07 05:53:46] [Rank 0] step:8701/10000 train_time:704390ms step_avg:80.96ms +[2025-07-07 05:53:46] [Rank 0] step:8701/10000 train_time:704390ms step_avg:80.96ms +[2025-07-07 05:53:48] [Rank 0] step:8721/10000 train_time:705888ms step_avg:80.94ms +[2025-07-07 05:53:48] [Rank 0] step:8721/10000 train_time:705888ms step_avg:80.94ms +[2025-07-07 05:53:50] [Rank 0] step:8741/10000 train_time:708049ms step_avg:81.00ms +[2025-07-07 05:53:50] [Rank 0] step:8741/10000 train_time:708049ms step_avg:81.00ms +[2025-07-07 05:53:51] [Rank 0] step:8761/10000 train_time:709549ms step_avg:80.99ms +[2025-07-07 05:53:51] [Rank 0] step:8761/10000 train_time:709549ms step_avg:80.99ms +[2025-07-07 05:53:53] [Rank 0] step:8781/10000 train_time:711051ms step_avg:80.98ms +[2025-07-07 05:53:53] [Rank 0] step:8781/10000 train_time:711051ms step_avg:80.98ms +[2025-07-07 05:53:54] [Rank 0] step:8801/10000 train_time:712552ms step_avg:80.96ms +[2025-07-07 05:53:54] [Rank 0] step:8801/10000 train_time:712552ms step_avg:80.96ms +[2025-07-07 05:53:57] [Rank 0] step:8821/10000 train_time:714110ms step_avg:80.96ms +[2025-07-07 05:53:57] [Rank 0] step:8821/10000 train_time:714110ms step_avg:80.96ms +[2025-07-07 05:53:58] [Rank 0] step:8841/10000 train_time:716228ms step_avg:81.01ms +[2025-07-07 05:53:58] [Rank 0] step:8841/10000 train_time:716228ms step_avg:81.01ms +[2025-07-07 05:54:00] [Rank 0] step:8861/10000 train_time:717732ms step_avg:81.00ms +[2025-07-07 05:54:00] [Rank 0] step:8861/10000 train_time:717732ms step_avg:81.00ms +[2025-07-07 05:54:01] [Rank 0] step:8881/10000 train_time:719233ms step_avg:80.99ms +[2025-07-07 05:54:01] [Rank 0] step:8881/10000 train_time:719233ms step_avg:80.99ms +[2025-07-07 05:54:03] [Rank 0] step:8901/10000 train_time:720736ms step_avg:80.97ms +[2025-07-07 05:54:03] [Rank 0] step:8901/10000 train_time:720736ms step_avg:80.97ms +[2025-07-07 05:54:05] [Rank 0] step:8921/10000 train_time:722897ms step_avg:81.03ms +[2025-07-07 05:54:05] [Rank 0] step:8921/10000 train_time:722897ms step_avg:81.03ms +[2025-07-07 05:54:06] [Rank 0] step:8941/10000 train_time:724401ms step_avg:81.02ms +[2025-07-07 05:54:06] [Rank 0] step:8941/10000 train_time:724401ms step_avg:81.02ms +[2025-07-07 05:54:08] [Rank 0] step:8961/10000 train_time:725902ms step_avg:81.01ms +[2025-07-07 05:54:08] [Rank 0] step:8961/10000 train_time:725902ms step_avg:81.01ms +[2025-07-07 05:54:09] [Rank 0] step:8981/10000 train_time:727407ms step_avg:80.99ms +[2025-07-07 05:54:09] [Rank 0] step:8981/10000 train_time:727407ms step_avg:80.99ms +[2025-07-07 05:54:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:54:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:54:12] [Rank 0] PRINT: step:9000/10000 train_loss:1.6185 val_loss:1.6074 train_time:728911ms step_avg:80.99ms +[2025-07-07 05:54:12] [Rank 0] PRINT: step:9000/10000 train_loss:1.6185 val_loss:1.6074 train_time:728911ms step_avg:80.99ms +[2025-07-07 05:54:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:54:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:54:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:54:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:54:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:54:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:59:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:59:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:59:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:59:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:59:35] [Rank 0] Total Loss: 4.2509 +[2025-07-07 05:59:35] [Rank 0] Total Loss: 4.2509 +[2025-07-07 05:59:35] [Rank 0] Total FTA: 0.2340 +[2025-07-07 05:59:35] [Rank 0] Total FTA: 0.2340 +[2025-07-07 05:59:35] [Rank 0] Group 0 Loss: 4.4821 +[2025-07-07 05:59:35] [Rank 0] Group 0 Loss: 4.4821 +[2025-07-07 05:59:35] [Rank 0] Group 1 Loss: 4.1528 +[2025-07-07 05:59:35] [Rank 0] Group 1 Loss: 4.1528 +[2025-07-07 05:59:35] [Rank 0] Group 2 Loss: 4.0005 +[2025-07-07 05:59:35] [Rank 0] Group 2 Loss: 4.0005 +[2025-07-07 05:59:35] [Rank 0] Group 3 Loss: 4.2385 +[2025-07-07 05:59:35] [Rank 0] Group 3 Loss: 4.2385 +[2025-07-07 05:59:35] [Rank 0] Group 4 Loss: 4.2901 +[2025-07-07 05:59:35] [Rank 0] Group 4 Loss: 4.2901 +[2025-07-07 05:59:35] [Rank 0] Group 5 Loss: 4.2384 +[2025-07-07 05:59:35] [Rank 0] Group 5 Loss: 4.2384 +[2025-07-07 05:59:35] [Rank 0] Group 6 Loss: 4.1995 +[2025-07-07 05:59:35] [Rank 0] Group 6 Loss: 4.1995 +[2025-07-07 05:59:35] [Rank 0] Group 7 Loss: 4.2660 +[2025-07-07 05:59:35] [Rank 0] Group 7 Loss: 4.2660 +[2025-07-07 05:59:35] [Rank 0] Group 8 Loss: 4.2273 +[2025-07-07 05:59:35] [Rank 0] Group 8 Loss: 4.2273 +[2025-07-07 05:59:35] [Rank 0] Group 9 Loss: 4.2353 +[2025-07-07 05:59:35] [Rank 0] Group 9 Loss: 4.2353 +[2025-07-07 05:59:35] [Rank 0] Group 10 Loss: 4.2476 +[2025-07-07 05:59:35] [Rank 0] Group 10 Loss: 4.2476 +[2025-07-07 05:59:35] [Rank 0] Group 11 Loss: 4.2309 +[2025-07-07 05:59:35] [Rank 0] Group 11 Loss: 4.2309 +[2025-07-07 05:59:35] [Rank 0] Group 0 FTA: 0.1782 +[2025-07-07 05:59:35] [Rank 0] Group 0 FTA: 0.1782 +[2025-07-07 05:59:35] [Rank 0] Group 1 FTA: 0.1927 +[2025-07-07 05:59:35] [Rank 0] Group 1 FTA: 0.1927 +[2025-07-07 05:59:35] [Rank 0] Group 2 FTA: 0.5417 +[2025-07-07 05:59:35] [Rank 0] Group 2 FTA: 0.5417 +[2025-07-07 05:59:35] [Rank 0] Group 3 FTA: 0.1979 +[2025-07-07 05:59:35] [Rank 0] Group 3 FTA: 0.1979 +[2025-07-07 05:59:35] [Rank 0] Group 4 FTA: 0.1510 +[2025-07-07 05:59:35] [Rank 0] Group 4 FTA: 0.1510 +[2025-07-07 05:59:35] [Rank 0] Group 5 FTA: 0.2188 +[2025-07-07 05:59:35] [Rank 0] Group 5 FTA: 0.2188 +[2025-07-07 05:59:35] [Rank 0] Group 6 FTA: 0.2083 +[2025-07-07 05:59:35] [Rank 0] Group 6 FTA: 0.2083 +[2025-07-07 05:59:35] [Rank 0] Group 7 FTA: 0.2161 +[2025-07-07 05:59:35] [Rank 0] Group 7 FTA: 0.2161 +[2025-07-07 05:59:35] [Rank 0] Group 8 FTA: 0.2500 +[2025-07-07 05:59:35] [Rank 0] Group 8 FTA: 0.2500 +[2025-07-07 05:59:35] [Rank 0] Group 9 FTA: 0.1992 +[2025-07-07 05:59:35] [Rank 0] Group 9 FTA: 0.1992 +[2025-07-07 05:59:35] [Rank 0] Group 10 FTA: 0.2734 +[2025-07-07 05:59:35] [Rank 0] Group 10 FTA: 0.2734 +[2025-07-07 05:59:35] [Rank 0] Group 11 FTA: 0.2256 +[2025-07-07 05:59:35] [Rank 0] Group 11 FTA: 0.2256 +[2025-07-07 05:59:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 05:59:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 05:59:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 05:59:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 05:59:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 05:59:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 05:59:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 05:59:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 05:59:37] [Rank 0] step:9001/10000 train_time:729044ms step_avg:81.00ms +[2025-07-07 05:59:37] [Rank 0] step:9001/10000 train_time:729044ms step_avg:81.00ms +[2025-07-07 05:59:39] [Rank 0] step:9021/10000 train_time:731128ms step_avg:81.05ms +[2025-07-07 05:59:39] [Rank 0] step:9021/10000 train_time:731128ms step_avg:81.05ms +[2025-07-07 05:59:40] [Rank 0] step:9041/10000 train_time:732619ms step_avg:81.03ms +[2025-07-07 05:59:40] [Rank 0] step:9041/10000 train_time:732619ms step_avg:81.03ms +[2025-07-07 05:59:41] [Rank 0] step:9061/10000 train_time:734112ms step_avg:81.02ms +[2025-07-07 05:59:41] [Rank 0] step:9061/10000 train_time:734112ms step_avg:81.02ms +[2025-07-07 05:59:43] [Rank 0] step:9081/10000 train_time:735848ms step_avg:81.03ms +[2025-07-07 05:59:43] [Rank 0] step:9081/10000 train_time:735848ms step_avg:81.03ms +[2025-07-07 05:59:45] [Rank 0] step:9101/10000 train_time:738058ms step_avg:81.10ms +[2025-07-07 05:59:45] [Rank 0] step:9101/10000 train_time:738058ms step_avg:81.10ms +[2025-07-07 05:59:47] [Rank 0] step:9121/10000 train_time:739552ms step_avg:81.08ms +[2025-07-07 05:59:47] [Rank 0] step:9121/10000 train_time:739552ms step_avg:81.08ms +[2025-07-07 05:59:48] [Rank 0] step:9141/10000 train_time:741047ms step_avg:81.07ms +[2025-07-07 05:59:48] [Rank 0] step:9141/10000 train_time:741047ms step_avg:81.07ms +[2025-07-07 05:59:50] [Rank 0] step:9161/10000 train_time:742543ms step_avg:81.05ms +[2025-07-07 05:59:50] [Rank 0] step:9161/10000 train_time:742543ms step_avg:81.05ms +[2025-07-07 05:59:52] [Rank 0] step:9181/10000 train_time:744039ms step_avg:81.04ms +[2025-07-07 05:59:52] [Rank 0] step:9181/10000 train_time:744039ms step_avg:81.04ms +[2025-07-07 05:59:54] [Rank 0] step:9201/10000 train_time:746205ms step_avg:81.10ms +[2025-07-07 05:59:54] [Rank 0] step:9201/10000 train_time:746205ms step_avg:81.10ms +[2025-07-07 05:59:55] [Rank 0] step:9221/10000 train_time:747698ms step_avg:81.09ms +[2025-07-07 05:59:55] [Rank 0] step:9221/10000 train_time:747698ms step_avg:81.09ms +[2025-07-07 05:59:57] [Rank 0] step:9241/10000 train_time:749194ms step_avg:81.07ms +[2025-07-07 05:59:57] [Rank 0] step:9241/10000 train_time:749194ms step_avg:81.07ms +[2025-07-07 05:59:58] [Rank 0] step:9261/10000 train_time:750688ms step_avg:81.06ms +[2025-07-07 05:59:58] [Rank 0] step:9261/10000 train_time:750688ms step_avg:81.06ms +[2025-07-07 06:00:00] [Rank 0] step:9281/10000 train_time:752420ms step_avg:81.07ms +[2025-07-07 06:00:00] [Rank 0] step:9281/10000 train_time:752420ms step_avg:81.07ms +[2025-07-07 06:00:01] [Rank 0] step:9301/10000 train_time:753913ms step_avg:81.06ms +[2025-07-07 06:00:01] [Rank 0] step:9301/10000 train_time:753913ms step_avg:81.06ms +[2025-07-07 06:00:03] [Rank 0] step:9321/10000 train_time:755410ms step_avg:81.04ms +[2025-07-07 06:00:03] [Rank 0] step:9321/10000 train_time:755410ms step_avg:81.04ms +[2025-07-07 06:00:04] [Rank 0] step:9341/10000 train_time:756904ms step_avg:81.03ms +[2025-07-07 06:00:04] [Rank 0] step:9341/10000 train_time:756904ms step_avg:81.03ms +[2025-07-07 06:00:06] [Rank 0] step:9361/10000 train_time:758401ms step_avg:81.02ms +[2025-07-07 06:00:06] [Rank 0] step:9361/10000 train_time:758401ms step_avg:81.02ms +[2025-07-07 06:00:08] [Rank 0] step:9381/10000 train_time:760541ms step_avg:81.07ms +[2025-07-07 06:00:08] [Rank 0] step:9381/10000 train_time:760541ms step_avg:81.07ms +[2025-07-07 06:00:09] [Rank 0] step:9401/10000 train_time:762039ms step_avg:81.06ms +[2025-07-07 06:00:09] [Rank 0] step:9401/10000 train_time:762039ms step_avg:81.06ms +[2025-07-07 06:00:11] [Rank 0] step:9421/10000 train_time:763537ms step_avg:81.05ms +[2025-07-07 06:00:11] [Rank 0] step:9421/10000 train_time:763537ms step_avg:81.05ms +[2025-07-07 06:00:12] [Rank 0] step:9441/10000 train_time:765034ms step_avg:81.03ms +[2025-07-07 06:00:12] [Rank 0] step:9441/10000 train_time:765034ms step_avg:81.03ms +[2025-07-07 06:00:15] [Rank 0] step:9461/10000 train_time:767183ms step_avg:81.09ms +[2025-07-07 06:00:15] [Rank 0] step:9461/10000 train_time:767183ms step_avg:81.09ms +[2025-07-07 06:00:16] [Rank 0] step:9481/10000 train_time:768680ms step_avg:81.08ms +[2025-07-07 06:00:16] [Rank 0] step:9481/10000 train_time:768680ms step_avg:81.08ms +[2025-07-07 06:00:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:00:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:00:18] [Rank 0] PRINT: step:9500/10000 train_loss:1.5958 val_loss:1.5878 train_time:770178ms step_avg:81.07ms +[2025-07-07 06:00:18] [Rank 0] PRINT: step:9500/10000 train_loss:1.5958 val_loss:1.5878 train_time:770178ms step_avg:81.07ms +[2025-07-07 06:00:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:00:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:00:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:00:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:00:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:00:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:05:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:05:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:05:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:05:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:05:46] [Rank 0] Total Loss: 4.2476 +[2025-07-07 06:05:46] [Rank 0] Total Loss: 4.2476 +[2025-07-07 06:05:46] [Rank 0] Total FTA: 0.2436 +[2025-07-07 06:05:46] [Rank 0] Total FTA: 0.2436 +[2025-07-07 06:05:46] [Rank 0] Group 0 Loss: 4.4902 +[2025-07-07 06:05:46] [Rank 0] Group 0 Loss: 4.4902 +[2025-07-07 06:05:46] [Rank 0] Group 1 Loss: 4.1655 +[2025-07-07 06:05:46] [Rank 0] Group 1 Loss: 4.1655 +[2025-07-07 06:05:46] [Rank 0] Group 2 Loss: 3.9992 +[2025-07-07 06:05:46] [Rank 0] Group 2 Loss: 3.9992 +[2025-07-07 06:05:46] [Rank 0] Group 3 Loss: 4.1484 +[2025-07-07 06:05:46] [Rank 0] Group 3 Loss: 4.1484 +[2025-07-07 06:05:46] [Rank 0] Group 4 Loss: 4.2585 +[2025-07-07 06:05:46] [Rank 0] Group 4 Loss: 4.2585 +[2025-07-07 06:05:46] [Rank 0] Group 5 Loss: 4.1498 +[2025-07-07 06:05:46] [Rank 0] Group 5 Loss: 4.1498 +[2025-07-07 06:05:46] [Rank 0] Group 6 Loss: 4.1655 +[2025-07-07 06:05:46] [Rank 0] Group 6 Loss: 4.1655 +[2025-07-07 06:05:46] [Rank 0] Group 7 Loss: 4.2730 +[2025-07-07 06:05:46] [Rank 0] Group 7 Loss: 4.2730 +[2025-07-07 06:05:46] [Rank 0] Group 8 Loss: 4.2243 +[2025-07-07 06:05:46] [Rank 0] Group 8 Loss: 4.2243 +[2025-07-07 06:05:46] [Rank 0] Group 9 Loss: 4.3221 +[2025-07-07 06:05:46] [Rank 0] Group 9 Loss: 4.3221 +[2025-07-07 06:05:46] [Rank 0] Group 10 Loss: 4.2593 +[2025-07-07 06:05:46] [Rank 0] Group 10 Loss: 4.2593 +[2025-07-07 06:05:46] [Rank 0] Group 11 Loss: 4.2647 +[2025-07-07 06:05:46] [Rank 0] Group 11 Loss: 4.2647 +[2025-07-07 06:05:46] [Rank 0] Group 0 FTA: 0.1521 +[2025-07-07 06:05:46] [Rank 0] Group 0 FTA: 0.1521 +[2025-07-07 06:05:46] [Rank 0] Group 1 FTA: 0.3151 +[2025-07-07 06:05:46] [Rank 0] Group 1 FTA: 0.3151 +[2025-07-07 06:05:46] [Rank 0] Group 2 FTA: 0.4089 +[2025-07-07 06:05:46] [Rank 0] Group 2 FTA: 0.4089 +[2025-07-07 06:05:46] [Rank 0] Group 3 FTA: 0.2552 +[2025-07-07 06:05:46] [Rank 0] Group 3 FTA: 0.2552 +[2025-07-07 06:05:46] [Rank 0] Group 4 FTA: 0.1979 +[2025-07-07 06:05:46] [Rank 0] Group 4 FTA: 0.1979 +[2025-07-07 06:05:46] [Rank 0] Group 5 FTA: 0.2240 +[2025-07-07 06:05:46] [Rank 0] Group 5 FTA: 0.2240 +[2025-07-07 06:05:46] [Rank 0] Group 6 FTA: 0.2708 +[2025-07-07 06:05:46] [Rank 0] Group 6 FTA: 0.2708 +[2025-07-07 06:05:46] [Rank 0] Group 7 FTA: 0.2266 +[2025-07-07 06:05:46] [Rank 0] Group 7 FTA: 0.2266 +[2025-07-07 06:05:46] [Rank 0] Group 8 FTA: 0.2552 +[2025-07-07 06:05:46] [Rank 0] Group 8 FTA: 0.2552 +[2025-07-07 06:05:46] [Rank 0] Group 9 FTA: 0.2539 +[2025-07-07 06:05:46] [Rank 0] Group 9 FTA: 0.2539 +[2025-07-07 06:05:46] [Rank 0] Group 10 FTA: 0.2578 +[2025-07-07 06:05:46] [Rank 0] Group 10 FTA: 0.2578 +[2025-07-07 06:05:46] [Rank 0] Group 11 FTA: 0.2256 +[2025-07-07 06:05:46] [Rank 0] Group 11 FTA: 0.2256 +[2025-07-07 06:05:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 06:05:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 06:05:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 06:05:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 06:05:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 06:05:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 06:05:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 06:05:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 06:05:48] [Rank 0] step:9501/10000 train_time:770199ms step_avg:81.07ms +[2025-07-07 06:05:48] [Rank 0] step:9501/10000 train_time:770199ms step_avg:81.07ms +[2025-07-07 06:05:49] [Rank 0] step:9521/10000 train_time:771704ms step_avg:81.05ms +[2025-07-07 06:05:49] [Rank 0] step:9521/10000 train_time:771704ms step_avg:81.05ms +[2025-07-07 06:05:51] [Rank 0] step:9541/10000 train_time:773247ms step_avg:81.04ms +[2025-07-07 06:05:51] [Rank 0] step:9541/10000 train_time:773247ms step_avg:81.04ms +[2025-07-07 06:05:53] [Rank 0] step:9561/10000 train_time:775337ms step_avg:81.09ms +[2025-07-07 06:05:53] [Rank 0] step:9561/10000 train_time:775337ms step_avg:81.09ms +[2025-07-07 06:05:54] [Rank 0] step:9581/10000 train_time:776830ms step_avg:81.08ms +[2025-07-07 06:05:54] [Rank 0] step:9581/10000 train_time:776830ms step_avg:81.08ms +[2025-07-07 06:05:56] [Rank 0] step:9601/10000 train_time:778325ms step_avg:81.07ms +[2025-07-07 06:05:56] [Rank 0] step:9601/10000 train_time:778325ms step_avg:81.07ms +[2025-07-07 06:05:57] [Rank 0] step:9621/10000 train_time:779818ms step_avg:81.05ms +[2025-07-07 06:05:57] [Rank 0] step:9621/10000 train_time:779818ms step_avg:81.05ms +[2025-07-07 06:05:59] [Rank 0] step:9641/10000 train_time:781980ms step_avg:81.11ms +[2025-07-07 06:05:59] [Rank 0] step:9641/10000 train_time:781980ms step_avg:81.11ms +[2025-07-07 06:06:01] [Rank 0] step:9661/10000 train_time:783473ms step_avg:81.10ms +[2025-07-07 06:06:01] [Rank 0] step:9661/10000 train_time:783473ms step_avg:81.10ms +[2025-07-07 06:06:03] [Rank 0] step:9681/10000 train_time:785108ms step_avg:81.10ms +[2025-07-07 06:06:03] [Rank 0] step:9681/10000 train_time:785108ms step_avg:81.10ms +[2025-07-07 06:06:04] [Rank 0] step:9701/10000 train_time:786684ms step_avg:81.09ms +[2025-07-07 06:06:04] [Rank 0] step:9701/10000 train_time:786684ms step_avg:81.09ms +[2025-07-07 06:06:06] [Rank 0] step:9721/10000 train_time:789001ms step_avg:81.16ms +[2025-07-07 06:06:06] [Rank 0] step:9721/10000 train_time:789001ms step_avg:81.16ms +[2025-07-07 06:06:08] [Rank 0] step:9741/10000 train_time:790476ms step_avg:81.15ms +[2025-07-07 06:06:08] [Rank 0] step:9741/10000 train_time:790476ms step_avg:81.15ms +[2025-07-07 06:06:09] [Rank 0] step:9761/10000 train_time:791972ms step_avg:81.14ms +[2025-07-07 06:06:09] [Rank 0] step:9761/10000 train_time:791972ms step_avg:81.14ms +[2025-07-07 06:06:11] [Rank 0] step:9781/10000 train_time:793468ms step_avg:81.12ms +[2025-07-07 06:06:11] [Rank 0] step:9781/10000 train_time:793468ms step_avg:81.12ms +[2025-07-07 06:06:12] [Rank 0] step:9801/10000 train_time:794964ms step_avg:81.11ms +[2025-07-07 06:06:12] [Rank 0] step:9801/10000 train_time:794964ms step_avg:81.11ms +[2025-07-07 06:06:15] [Rank 0] step:9821/10000 train_time:797120ms step_avg:81.16ms +[2025-07-07 06:06:15] [Rank 0] step:9821/10000 train_time:797120ms step_avg:81.16ms +[2025-07-07 06:06:16] [Rank 0] step:9841/10000 train_time:798617ms step_avg:81.15ms +[2025-07-07 06:06:16] [Rank 0] step:9841/10000 train_time:798617ms step_avg:81.15ms +[2025-07-07 06:06:18] [Rank 0] step:9861/10000 train_time:800115ms step_avg:81.14ms +[2025-07-07 06:06:18] [Rank 0] step:9861/10000 train_time:800115ms step_avg:81.14ms +[2025-07-07 06:06:19] [Rank 0] step:9881/10000 train_time:801613ms step_avg:81.13ms +[2025-07-07 06:06:19] [Rank 0] step:9881/10000 train_time:801613ms step_avg:81.13ms +[2025-07-07 06:06:21] [Rank 0] step:9901/10000 train_time:803784ms step_avg:81.18ms +[2025-07-07 06:06:21] [Rank 0] step:9901/10000 train_time:803784ms step_avg:81.18ms +[2025-07-07 06:06:23] [Rank 0] step:9921/10000 train_time:805262ms step_avg:81.17ms +[2025-07-07 06:06:23] [Rank 0] step:9921/10000 train_time:805262ms step_avg:81.17ms +[2025-07-07 06:06:24] [Rank 0] step:9941/10000 train_time:806762ms step_avg:81.15ms +[2025-07-07 06:06:24] [Rank 0] step:9941/10000 train_time:806762ms step_avg:81.15ms +[2025-07-07 06:06:26] [Rank 0] step:9961/10000 train_time:808259ms step_avg:81.14ms +[2025-07-07 06:06:26] [Rank 0] step:9961/10000 train_time:808259ms step_avg:81.14ms +[2025-07-07 06:06:27] [Rank 0] step:9981/10000 train_time:809758ms step_avg:81.13ms +[2025-07-07 06:06:27] [Rank 0] step:9981/10000 train_time:809758ms step_avg:81.13ms +[2025-07-07 06:06:29] [Rank 0] step:10000/10000 train_time:811829ms step_avg:81.18ms +[2025-07-07 06:06:29] [Rank 0] step:10000/10000 train_time:811829ms step_avg:81.18ms +[2025-07-07 06:06:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:06:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:06:30] [Rank 0] PRINT: step:10000/10000 train_loss:1.5798 val_loss:1.5753 train_time:811911ms step_avg:81.19ms +[2025-07-07 06:06:30] [Rank 0] PRINT: step:10000/10000 train_loss:1.5798 val_loss:1.5753 train_time:811911ms step_avg:81.19ms +[2025-07-07 06:06:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:06:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:06:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:06:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:06:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:06:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:11:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:11:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:11:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:11:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:11:59] [Rank 0] Total Loss: 4.2600 +[2025-07-07 06:11:59] [Rank 0] Total Loss: 4.2600 +[2025-07-07 06:11:59] [Rank 0] Total FTA: 0.2072 +[2025-07-07 06:11:59] [Rank 0] Total FTA: 0.2072 +[2025-07-07 06:11:59] [Rank 0] Group 0 Loss: 4.5353 +[2025-07-07 06:11:59] [Rank 0] Group 0 Loss: 4.5353 +[2025-07-07 06:11:59] [Rank 0] Group 1 Loss: 4.1398 +[2025-07-07 06:11:59] [Rank 0] Group 1 Loss: 4.1398 +[2025-07-07 06:11:59] [Rank 0] Group 2 Loss: 4.0678 +[2025-07-07 06:11:59] [Rank 0] Group 2 Loss: 4.0678 +[2025-07-07 06:11:59] [Rank 0] Group 3 Loss: 4.1509 +[2025-07-07 06:11:59] [Rank 0] Group 3 Loss: 4.1509 +[2025-07-07 06:11:59] [Rank 0] Group 4 Loss: 4.2969 +[2025-07-07 06:11:59] [Rank 0] Group 4 Loss: 4.2969 +[2025-07-07 06:11:59] [Rank 0] Group 5 Loss: 4.1454 +[2025-07-07 06:11:59] [Rank 0] Group 5 Loss: 4.1454 +[2025-07-07 06:11:59] [Rank 0] Group 6 Loss: 4.1875 +[2025-07-07 06:11:59] [Rank 0] Group 6 Loss: 4.1875 +[2025-07-07 06:11:59] [Rank 0] Group 7 Loss: 4.2251 +[2025-07-07 06:11:59] [Rank 0] Group 7 Loss: 4.2251 +[2025-07-07 06:11:59] [Rank 0] Group 8 Loss: 4.2307 +[2025-07-07 06:11:59] [Rank 0] Group 8 Loss: 4.2307 +[2025-07-07 06:11:59] [Rank 0] Group 9 Loss: 4.2769 +[2025-07-07 06:11:59] [Rank 0] Group 9 Loss: 4.2769 +[2025-07-07 06:11:59] [Rank 0] Group 10 Loss: 4.2497 +[2025-07-07 06:11:59] [Rank 0] Group 10 Loss: 4.2497 +[2025-07-07 06:11:59] [Rank 0] Group 11 Loss: 4.2924 +[2025-07-07 06:11:59] [Rank 0] Group 11 Loss: 4.2924 +[2025-07-07 06:11:59] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 06:11:59] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 06:11:59] [Rank 0] Group 1 FTA: 0.2005 +[2025-07-07 06:11:59] [Rank 0] Group 1 FTA: 0.2005 +[2025-07-07 06:11:59] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-07 06:11:59] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-07 06:11:59] [Rank 0] Group 3 FTA: 0.1094 +[2025-07-07 06:11:59] [Rank 0] Group 3 FTA: 0.1094 +[2025-07-07 06:11:59] [Rank 0] Group 4 FTA: 0.1224 +[2025-07-07 06:11:59] [Rank 0] Group 4 FTA: 0.1224 +[2025-07-07 06:11:59] [Rank 0] Group 5 FTA: 0.2526 +[2025-07-07 06:11:59] [Rank 0] Group 5 FTA: 0.2526 +[2025-07-07 06:11:59] [Rank 0] Group 6 FTA: 0.2812 +[2025-07-07 06:11:59] [Rank 0] Group 6 FTA: 0.2812 +[2025-07-07 06:11:59] [Rank 0] Group 7 FTA: 0.2188 +[2025-07-07 06:11:59] [Rank 0] Group 7 FTA: 0.2188 +[2025-07-07 06:11:59] [Rank 0] Group 8 FTA: 0.2396 +[2025-07-07 06:11:59] [Rank 0] Group 8 FTA: 0.2396 +[2025-07-07 06:11:59] [Rank 0] Group 9 FTA: 0.1406 +[2025-07-07 06:11:59] [Rank 0] Group 9 FTA: 0.1406 +[2025-07-07 06:11:59] [Rank 0] Group 10 FTA: 0.2559 +[2025-07-07 06:11:59] [Rank 0] Group 10 FTA: 0.2559 +[2025-07-07 06:11:59] [Rank 0] Group 11 FTA: 0.2549 +[2025-07-07 06:11:59] [Rank 0] Group 11 FTA: 0.2549 +[2025-07-07 06:12:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 06:12:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-07 06:12:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 06:12:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-07 06:12:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 06:12:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-07 06:12:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 06:12:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-07 06:12:01] [Rank 0] step:10001/10000 train_time:811934ms step_avg:81.19ms +[2025-07-07 06:12:01] [Rank 0] step:10001/10000 train_time:811934ms step_avg:81.19ms +[2025-07-07 06:12:01] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 06:12:01 2025 --- +[2025-07-07 06:12:01] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 06:12:01 2025 --- +[2025-07-07 06:12:01] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 10356 MiB +[2025-07-07 06:12:01] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 10356 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/config.json b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..8a4c56c8233872b40aaa680f6adaf5894a4a5563 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "ef26a322-e19c-4b7a-a625-6fe484e0cc01", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..39c2b0fabbd29f715449dffd475968cf32d91b3d --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c6816023d9b84c8d14f735e311522ee7a8a68fb47447000dc8b799ffb1f23b19 +size 415550 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..c2e05cea58cf95aa9017d55ed231eba7d41992cb --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:40e1bc7eb93f7d202e797c00b3a5f1eaf8af3610332d7760204207d04f1cac45 +size 291768 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..709afc57c2fd916025ca1dd8c3319e22844d4b93 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ddcf4095ca80da6a50357b0b66ea4ae0ceac836d10dd1dd2c097287f05e4904 +size 103570 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..1dcc0864154f4e25da8e65e174dead6b2ed969e4 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:650693f995eea648b156559e997334d1b78603386e68246ecaa5b907cd418df2 +size 113431 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/training_log_ef26a322-e19c-4b7a-a625-6fe484e0cc01.txt b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/training_log_ef26a322-e19c-4b7a-a625-6fe484e0cc01.txt new file mode 100644 index 0000000000000000000000000000000000000000..caacc05a5141088d0e95e40b9738e933ba69b845 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/training_log_ef26a322-e19c-4b7a-a625-6fe484e0cc01.txt @@ -0,0 +1,5132 @@ +[2025-07-06 12:51:33] [Rank 0] PRINT: --- Script Start: Sun Jul 6 12:51:33 2025 --- +[2025-07-06 12:51:33] [Rank 0] PRINT: --- Script Start: Sun Jul 6 12:51:33 2025 --- +[2025-07-06 12:51:33] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-06 12:51:33] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-06 12:51:33] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 12:51:33] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 12:51:33] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-06 12:51:33] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-06 12:51:33] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42 +[2025-07-06 12:51:33] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42 +[2025-07-06 12:51:33] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 12:51:33] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 12:51:33] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 12:51:33] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 12:51:34] [Rank 0] PRINT: Constructing model... +[2025-07-06 12:51:34] [Rank 0] PRINT: Constructing model... +[2025-07-06 12:51:36] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 12:51:36] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 12:51:36] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 12:51:36] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 12:51:36] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 12:51:36] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 12:51:36] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 12:51:36] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 12:51:36] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 12:51:36] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 12:51:36] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 12:51:36] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 12:51:37] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 12:51:37] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 12:51:37] [Rank 0] PRINT: Model returns: +[2025-07-06 12:51:37] [Rank 0] PRINT: Model returns: +[2025-07-06 12:51:37] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 12:51:37] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 12:51:37] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 12:51:37] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 12:51:37] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 12:51:37] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 12:51:37] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 12:51:37] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 12:51:37] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 12:51:37] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 12:51:37] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 12:51:37] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 12:51:37] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 12:51:37] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 12:51:37] [Rank 0] PRINT: Starting warmup... +[2025-07-06 12:51:37] [Rank 0] PRINT: Starting warmup... +[2025-07-06 12:52:43] [Rank 0] PRINT: Warmup complete. +[2025-07-06 12:52:43] [Rank 0] PRINT: Warmup complete. +[2025-07-06 12:52:43] [Rank 0] PRINT: Starting training... +[2025-07-06 12:52:43] [Rank 0] PRINT: Starting training... +[2025-07-06 12:52:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 12:52:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 12:52:52] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 12:52:52] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 12:52:54] [Rank 0] step:21/10000 train_time:1753ms step_avg:83.45ms +[2025-07-06 12:52:54] [Rank 0] step:21/10000 train_time:1753ms step_avg:83.45ms +[2025-07-06 12:52:55] [Rank 0] step:41/10000 train_time:3206ms step_avg:78.21ms +[2025-07-06 12:52:55] [Rank 0] step:41/10000 train_time:3206ms step_avg:78.21ms +[2025-07-06 12:52:57] [Rank 0] step:61/10000 train_time:4662ms step_avg:76.43ms +[2025-07-06 12:52:57] [Rank 0] step:61/10000 train_time:4662ms step_avg:76.43ms +[2025-07-06 12:52:58] [Rank 0] step:81/10000 train_time:6118ms step_avg:75.53ms +[2025-07-06 12:52:58] [Rank 0] step:81/10000 train_time:6118ms step_avg:75.53ms +[2025-07-06 12:53:00] [Rank 0] step:101/10000 train_time:7834ms step_avg:77.57ms +[2025-07-06 12:53:00] [Rank 0] step:101/10000 train_time:7834ms step_avg:77.57ms +[2025-07-06 12:53:01] [Rank 0] step:121/10000 train_time:9289ms step_avg:76.77ms +[2025-07-06 12:53:01] [Rank 0] step:121/10000 train_time:9289ms step_avg:76.77ms +[2025-07-06 12:53:03] [Rank 0] step:141/10000 train_time:10747ms step_avg:76.22ms +[2025-07-06 12:53:03] [Rank 0] step:141/10000 train_time:10747ms step_avg:76.22ms +[2025-07-06 12:53:04] [Rank 0] step:161/10000 train_time:12205ms step_avg:75.81ms +[2025-07-06 12:53:04] [Rank 0] step:161/10000 train_time:12205ms step_avg:75.81ms +[2025-07-06 12:53:06] [Rank 0] step:181/10000 train_time:13714ms step_avg:75.77ms +[2025-07-06 12:53:06] [Rank 0] step:181/10000 train_time:13714ms step_avg:75.77ms +[2025-07-06 12:53:08] [Rank 0] step:201/10000 train_time:15785ms step_avg:78.53ms +[2025-07-06 12:53:08] [Rank 0] step:201/10000 train_time:15785ms step_avg:78.53ms +[2025-07-06 12:53:09] [Rank 0] step:221/10000 train_time:17245ms step_avg:78.03ms +[2025-07-06 12:53:09] [Rank 0] step:221/10000 train_time:17245ms step_avg:78.03ms +[2025-07-06 12:53:11] [Rank 0] step:241/10000 train_time:18703ms step_avg:77.61ms +[2025-07-06 12:53:11] [Rank 0] step:241/10000 train_time:18703ms step_avg:77.61ms +[2025-07-06 12:53:12] [Rank 0] step:261/10000 train_time:20161ms step_avg:77.25ms +[2025-07-06 12:53:12] [Rank 0] step:261/10000 train_time:20161ms step_avg:77.25ms +[2025-07-06 12:53:14] [Rank 0] step:281/10000 train_time:21961ms step_avg:78.15ms +[2025-07-06 12:53:14] [Rank 0] step:281/10000 train_time:21961ms step_avg:78.15ms +[2025-07-06 12:53:15] [Rank 0] step:301/10000 train_time:23423ms step_avg:77.82ms +[2025-07-06 12:53:15] [Rank 0] step:301/10000 train_time:23423ms step_avg:77.82ms +[2025-07-06 12:53:17] [Rank 0] step:321/10000 train_time:24880ms step_avg:77.51ms +[2025-07-06 12:53:17] [Rank 0] step:321/10000 train_time:24880ms step_avg:77.51ms +[2025-07-06 12:53:18] [Rank 0] step:341/10000 train_time:26338ms step_avg:77.24ms +[2025-07-06 12:53:18] [Rank 0] step:341/10000 train_time:26338ms step_avg:77.24ms +[2025-07-06 12:53:20] [Rank 0] step:361/10000 train_time:28054ms step_avg:77.71ms +[2025-07-06 12:53:20] [Rank 0] step:361/10000 train_time:28054ms step_avg:77.71ms +[2025-07-06 12:53:22] [Rank 0] step:381/10000 train_time:29898ms step_avg:78.47ms +[2025-07-06 12:53:22] [Rank 0] step:381/10000 train_time:29898ms step_avg:78.47ms +[2025-07-06 12:53:23] [Rank 0] step:401/10000 train_time:31358ms step_avg:78.20ms +[2025-07-06 12:53:23] [Rank 0] step:401/10000 train_time:31358ms step_avg:78.20ms +[2025-07-06 12:53:25] [Rank 0] step:421/10000 train_time:32819ms step_avg:77.95ms +[2025-07-06 12:53:25] [Rank 0] step:421/10000 train_time:32819ms step_avg:77.95ms +[2025-07-06 12:53:26] [Rank 0] step:441/10000 train_time:34281ms step_avg:77.74ms +[2025-07-06 12:53:26] [Rank 0] step:441/10000 train_time:34281ms step_avg:77.74ms +[2025-07-06 12:53:28] [Rank 0] step:461/10000 train_time:35979ms step_avg:78.05ms +[2025-07-06 12:53:28] [Rank 0] step:461/10000 train_time:35979ms step_avg:78.05ms +[2025-07-06 12:53:29] [Rank 0] step:481/10000 train_time:37541ms step_avg:78.05ms +[2025-07-06 12:53:29] [Rank 0] step:481/10000 train_time:37541ms step_avg:78.05ms +[2025-07-06 12:53:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 12:53:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 12:53:32] [Rank 0] PRINT: step:500/10000 train_loss:8.7306 val_loss:7.1014 train_time:39004ms step_avg:78.01ms +[2025-07-06 12:53:32] [Rank 0] PRINT: step:500/10000 train_loss:8.7306 val_loss:7.1014 train_time:39004ms step_avg:78.01ms +[2025-07-06 12:53:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 12:53:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 12:53:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 12:53:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 12:53:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 12:53:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 12:58:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 12:58:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 12:58:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 12:58:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 12:58:55] [Rank 0] Total Loss: 7.6654 +[2025-07-06 12:58:55] [Rank 0] Total Loss: 7.6654 +[2025-07-06 12:58:55] [Rank 0] Total FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Total FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 0 Loss: 7.6774 +[2025-07-06 12:58:55] [Rank 0] Group 0 Loss: 7.6774 +[2025-07-06 12:58:55] [Rank 0] Group 1 Loss: 7.6193 +[2025-07-06 12:58:55] [Rank 0] Group 1 Loss: 7.6193 +[2025-07-06 12:58:55] [Rank 0] Group 2 Loss: 7.7694 +[2025-07-06 12:58:55] [Rank 0] Group 2 Loss: 7.7694 +[2025-07-06 12:58:55] [Rank 0] Group 3 Loss: 7.6307 +[2025-07-06 12:58:55] [Rank 0] Group 3 Loss: 7.6307 +[2025-07-06 12:58:55] [Rank 0] Group 4 Loss: 7.6873 +[2025-07-06 12:58:55] [Rank 0] Group 4 Loss: 7.6873 +[2025-07-06 12:58:55] [Rank 0] Group 5 Loss: 7.6344 +[2025-07-06 12:58:55] [Rank 0] Group 5 Loss: 7.6344 +[2025-07-06 12:58:55] [Rank 0] Group 6 Loss: 7.6650 +[2025-07-06 12:58:55] [Rank 0] Group 6 Loss: 7.6650 +[2025-07-06 12:58:55] [Rank 0] Group 7 Loss: 7.6667 +[2025-07-06 12:58:55] [Rank 0] Group 7 Loss: 7.6667 +[2025-07-06 12:58:55] [Rank 0] Group 8 Loss: 7.6372 +[2025-07-06 12:58:55] [Rank 0] Group 8 Loss: 7.6372 +[2025-07-06 12:58:55] [Rank 0] Group 9 Loss: 7.6419 +[2025-07-06 12:58:55] [Rank 0] Group 9 Loss: 7.6419 +[2025-07-06 12:58:55] [Rank 0] Group 10 Loss: 7.6573 +[2025-07-06 12:58:55] [Rank 0] Group 10 Loss: 7.6573 +[2025-07-06 12:58:55] [Rank 0] Group 11 Loss: 7.6714 +[2025-07-06 12:58:55] [Rank 0] Group 11 Loss: 7.6714 +[2025-07-06 12:58:55] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-06 12:58:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 12:58:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 12:58:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 12:58:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 12:58:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 12:58:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 12:58:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 12:58:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 12:58:56] [Rank 0] step:501/10000 train_time:39024ms step_avg:77.89ms +[2025-07-06 12:58:56] [Rank 0] step:501/10000 train_time:39024ms step_avg:77.89ms +[2025-07-06 12:58:58] [Rank 0] step:521/10000 train_time:40484ms step_avg:77.71ms +[2025-07-06 12:58:58] [Rank 0] step:521/10000 train_time:40484ms step_avg:77.71ms +[2025-07-06 12:59:00] [Rank 0] step:541/10000 train_time:42203ms step_avg:78.01ms +[2025-07-06 12:59:00] [Rank 0] step:541/10000 train_time:42203ms step_avg:78.01ms +[2025-07-06 12:59:01] [Rank 0] step:561/10000 train_time:44065ms step_avg:78.55ms +[2025-07-06 12:59:01] [Rank 0] step:561/10000 train_time:44065ms step_avg:78.55ms +[2025-07-06 12:59:03] [Rank 0] step:581/10000 train_time:45524ms step_avg:78.36ms +[2025-07-06 12:59:03] [Rank 0] step:581/10000 train_time:45524ms step_avg:78.36ms +[2025-07-06 12:59:04] [Rank 0] step:601/10000 train_time:46985ms step_avg:78.18ms +[2025-07-06 12:59:04] [Rank 0] step:601/10000 train_time:46985ms step_avg:78.18ms +[2025-07-06 12:59:06] [Rank 0] step:621/10000 train_time:48446ms step_avg:78.01ms +[2025-07-06 12:59:06] [Rank 0] step:621/10000 train_time:48446ms step_avg:78.01ms +[2025-07-06 12:59:08] [Rank 0] step:641/10000 train_time:50568ms step_avg:78.89ms +[2025-07-06 12:59:08] [Rank 0] step:641/10000 train_time:50568ms step_avg:78.89ms +[2025-07-06 12:59:09] [Rank 0] step:661/10000 train_time:52028ms step_avg:78.71ms +[2025-07-06 12:59:09] [Rank 0] step:661/10000 train_time:52028ms step_avg:78.71ms +[2025-07-06 12:59:11] [Rank 0] step:681/10000 train_time:53489ms step_avg:78.54ms +[2025-07-06 12:59:11] [Rank 0] step:681/10000 train_time:53489ms step_avg:78.54ms +[2025-07-06 12:59:12] [Rank 0] step:701/10000 train_time:54952ms step_avg:78.39ms +[2025-07-06 12:59:12] [Rank 0] step:701/10000 train_time:54952ms step_avg:78.39ms +[2025-07-06 12:59:14] [Rank 0] step:721/10000 train_time:57093ms step_avg:79.19ms +[2025-07-06 12:59:14] [Rank 0] step:721/10000 train_time:57093ms step_avg:79.19ms +[2025-07-06 12:59:16] [Rank 0] step:741/10000 train_time:58537ms step_avg:79.00ms +[2025-07-06 12:59:16] [Rank 0] step:741/10000 train_time:58537ms step_avg:79.00ms +[2025-07-06 12:59:17] [Rank 0] step:761/10000 train_time:60009ms step_avg:78.85ms +[2025-07-06 12:59:17] [Rank 0] step:761/10000 train_time:60009ms step_avg:78.85ms +[2025-07-06 12:59:19] [Rank 0] step:781/10000 train_time:61483ms step_avg:78.72ms +[2025-07-06 12:59:19] [Rank 0] step:781/10000 train_time:61483ms step_avg:78.72ms +[2025-07-06 12:59:20] [Rank 0] step:801/10000 train_time:62958ms step_avg:78.60ms +[2025-07-06 12:59:20] [Rank 0] step:801/10000 train_time:62958ms step_avg:78.60ms +[2025-07-06 12:59:22] [Rank 0] step:821/10000 train_time:65069ms step_avg:79.26ms +[2025-07-06 12:59:22] [Rank 0] step:821/10000 train_time:65069ms step_avg:79.26ms +[2025-07-06 12:59:24] [Rank 0] step:841/10000 train_time:66546ms step_avg:79.13ms +[2025-07-06 12:59:24] [Rank 0] step:841/10000 train_time:66546ms step_avg:79.13ms +[2025-07-06 12:59:25] [Rank 0] step:861/10000 train_time:68021ms step_avg:79.00ms +[2025-07-06 12:59:25] [Rank 0] step:861/10000 train_time:68021ms step_avg:79.00ms +[2025-07-06 12:59:27] [Rank 0] step:881/10000 train_time:69497ms step_avg:78.88ms +[2025-07-06 12:59:27] [Rank 0] step:881/10000 train_time:69497ms step_avg:78.88ms +[2025-07-06 12:59:29] [Rank 0] step:901/10000 train_time:71228ms step_avg:79.05ms +[2025-07-06 12:59:29] [Rank 0] step:901/10000 train_time:71228ms step_avg:79.05ms +[2025-07-06 12:59:30] [Rank 0] step:921/10000 train_time:73108ms step_avg:79.38ms +[2025-07-06 12:59:30] [Rank 0] step:921/10000 train_time:73108ms step_avg:79.38ms +[2025-07-06 12:59:32] [Rank 0] step:941/10000 train_time:74584ms step_avg:79.26ms +[2025-07-06 12:59:32] [Rank 0] step:941/10000 train_time:74584ms step_avg:79.26ms +[2025-07-06 12:59:33] [Rank 0] step:961/10000 train_time:76060ms step_avg:79.15ms +[2025-07-06 12:59:33] [Rank 0] step:961/10000 train_time:76060ms step_avg:79.15ms +[2025-07-06 12:59:35] [Rank 0] step:981/10000 train_time:77535ms step_avg:79.04ms +[2025-07-06 12:59:35] [Rank 0] step:981/10000 train_time:77535ms step_avg:79.04ms +[2025-07-06 12:59:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 12:59:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 12:59:38] [Rank 0] PRINT: step:1000/10000 train_loss:6.0299 val_loss:5.1114 train_time:79663ms step_avg:79.66ms +[2025-07-06 12:59:38] [Rank 0] PRINT: step:1000/10000 train_loss:6.0299 val_loss:5.1114 train_time:79663ms step_avg:79.66ms +[2025-07-06 12:59:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 12:59:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 12:59:38] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 12:59:38] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 12:59:38] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 12:59:38] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 13:05:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 13:05:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 13:05:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 13:05:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 13:05:03] [Rank 0] Total Loss: 6.0729 +[2025-07-06 13:05:03] [Rank 0] Total Loss: 6.0729 +[2025-07-06 13:05:03] [Rank 0] Total FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Total FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 0 Loss: 6.0117 +[2025-07-06 13:05:03] [Rank 0] Group 0 Loss: 6.0117 +[2025-07-06 13:05:03] [Rank 0] Group 1 Loss: 6.1258 +[2025-07-06 13:05:03] [Rank 0] Group 1 Loss: 6.1258 +[2025-07-06 13:05:03] [Rank 0] Group 2 Loss: 6.1431 +[2025-07-06 13:05:03] [Rank 0] Group 2 Loss: 6.1431 +[2025-07-06 13:05:03] [Rank 0] Group 3 Loss: 6.0259 +[2025-07-06 13:05:03] [Rank 0] Group 3 Loss: 6.0259 +[2025-07-06 13:05:03] [Rank 0] Group 4 Loss: 6.1148 +[2025-07-06 13:05:03] [Rank 0] Group 4 Loss: 6.1148 +[2025-07-06 13:05:03] [Rank 0] Group 5 Loss: 6.0612 +[2025-07-06 13:05:03] [Rank 0] Group 5 Loss: 6.0612 +[2025-07-06 13:05:03] [Rank 0] Group 6 Loss: 6.0939 +[2025-07-06 13:05:03] [Rank 0] Group 6 Loss: 6.0939 +[2025-07-06 13:05:03] [Rank 0] Group 7 Loss: 6.0655 +[2025-07-06 13:05:03] [Rank 0] Group 7 Loss: 6.0655 +[2025-07-06 13:05:03] [Rank 0] Group 8 Loss: 6.0533 +[2025-07-06 13:05:03] [Rank 0] Group 8 Loss: 6.0533 +[2025-07-06 13:05:03] [Rank 0] Group 9 Loss: 6.0559 +[2025-07-06 13:05:03] [Rank 0] Group 9 Loss: 6.0559 +[2025-07-06 13:05:03] [Rank 0] Group 10 Loss: 6.0735 +[2025-07-06 13:05:03] [Rank 0] Group 10 Loss: 6.0735 +[2025-07-06 13:05:03] [Rank 0] Group 11 Loss: 6.0852 +[2025-07-06 13:05:03] [Rank 0] Group 11 Loss: 6.0852 +[2025-07-06 13:05:03] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-06 13:05:03] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-06 13:05:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 13:05:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 13:05:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 13:05:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 13:05:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 13:05:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 13:05:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 13:05:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 13:05:05] [Rank 0] step:1001/10000 train_time:79684ms step_avg:79.60ms +[2025-07-06 13:05:05] [Rank 0] step:1001/10000 train_time:79684ms step_avg:79.60ms +[2025-07-06 13:05:06] [Rank 0] step:1021/10000 train_time:81155ms step_avg:79.49ms +[2025-07-06 13:05:06] [Rank 0] step:1021/10000 train_time:81155ms step_avg:79.49ms +[2025-07-06 13:05:08] [Rank 0] step:1041/10000 train_time:82624ms step_avg:79.37ms +[2025-07-06 13:05:08] [Rank 0] step:1041/10000 train_time:82624ms step_avg:79.37ms +[2025-07-06 13:05:09] [Rank 0] step:1061/10000 train_time:84380ms step_avg:79.53ms +[2025-07-06 13:05:09] [Rank 0] step:1061/10000 train_time:84380ms step_avg:79.53ms +[2025-07-06 13:05:11] [Rank 0] step:1081/10000 train_time:86113ms step_avg:79.66ms +[2025-07-06 13:05:11] [Rank 0] step:1081/10000 train_time:86113ms step_avg:79.66ms +[2025-07-06 13:05:13] [Rank 0] step:1101/10000 train_time:87989ms step_avg:79.92ms +[2025-07-06 13:05:13] [Rank 0] step:1101/10000 train_time:87989ms step_avg:79.92ms +[2025-07-06 13:05:14] [Rank 0] step:1121/10000 train_time:89460ms step_avg:79.80ms +[2025-07-06 13:05:14] [Rank 0] step:1121/10000 train_time:89460ms step_avg:79.80ms +[2025-07-06 13:05:16] [Rank 0] step:1141/10000 train_time:90933ms step_avg:79.70ms +[2025-07-06 13:05:16] [Rank 0] step:1141/10000 train_time:90933ms step_avg:79.70ms +[2025-07-06 13:05:17] [Rank 0] step:1161/10000 train_time:92407ms step_avg:79.59ms +[2025-07-06 13:05:17] [Rank 0] step:1161/10000 train_time:92407ms step_avg:79.59ms +[2025-07-06 13:05:19] [Rank 0] step:1181/10000 train_time:94547ms step_avg:80.06ms +[2025-07-06 13:05:19] [Rank 0] step:1181/10000 train_time:94547ms step_avg:80.06ms +[2025-07-06 13:05:21] [Rank 0] step:1201/10000 train_time:96021ms step_avg:79.95ms +[2025-07-06 13:05:21] [Rank 0] step:1201/10000 train_time:96021ms step_avg:79.95ms +[2025-07-06 13:05:22] [Rank 0] step:1221/10000 train_time:97496ms step_avg:79.85ms +[2025-07-06 13:05:22] [Rank 0] step:1221/10000 train_time:97496ms step_avg:79.85ms +[2025-07-06 13:05:24] [Rank 0] step:1241/10000 train_time:98972ms step_avg:79.75ms +[2025-07-06 13:05:24] [Rank 0] step:1241/10000 train_time:98972ms step_avg:79.75ms +[2025-07-06 13:05:26] [Rank 0] step:1261/10000 train_time:100702ms step_avg:79.86ms +[2025-07-06 13:05:26] [Rank 0] step:1261/10000 train_time:100702ms step_avg:79.86ms +[2025-07-06 13:05:27] [Rank 0] step:1281/10000 train_time:102583ms step_avg:80.08ms +[2025-07-06 13:05:27] [Rank 0] step:1281/10000 train_time:102583ms step_avg:80.08ms +[2025-07-06 13:05:29] [Rank 0] step:1301/10000 train_time:104059ms step_avg:79.98ms +[2025-07-06 13:05:29] [Rank 0] step:1301/10000 train_time:104059ms step_avg:79.98ms +[2025-07-06 13:05:30] [Rank 0] step:1321/10000 train_time:105533ms step_avg:79.89ms +[2025-07-06 13:05:30] [Rank 0] step:1321/10000 train_time:105533ms step_avg:79.89ms +[2025-07-06 13:05:32] [Rank 0] step:1341/10000 train_time:107010ms step_avg:79.80ms +[2025-07-06 13:05:32] [Rank 0] step:1341/10000 train_time:107010ms step_avg:79.80ms +[2025-07-06 13:05:34] [Rank 0] step:1361/10000 train_time:109142ms step_avg:80.19ms +[2025-07-06 13:05:34] [Rank 0] step:1361/10000 train_time:109142ms step_avg:80.19ms +[2025-07-06 13:05:36] [Rank 0] step:1381/10000 train_time:110619ms step_avg:80.10ms +[2025-07-06 13:05:36] [Rank 0] step:1381/10000 train_time:110619ms step_avg:80.10ms +[2025-07-06 13:05:37] [Rank 0] step:1401/10000 train_time:112098ms step_avg:80.01ms +[2025-07-06 13:05:37] [Rank 0] step:1401/10000 train_time:112098ms step_avg:80.01ms +[2025-07-06 13:05:38] [Rank 0] step:1421/10000 train_time:113578ms step_avg:79.93ms +[2025-07-06 13:05:38] [Rank 0] step:1421/10000 train_time:113578ms step_avg:79.93ms +[2025-07-06 13:05:41] [Rank 0] step:1441/10000 train_time:115314ms step_avg:80.02ms +[2025-07-06 13:05:41] [Rank 0] step:1441/10000 train_time:115314ms step_avg:80.02ms +[2025-07-06 13:05:42] [Rank 0] step:1461/10000 train_time:117180ms step_avg:80.21ms +[2025-07-06 13:05:42] [Rank 0] step:1461/10000 train_time:117180ms step_avg:80.21ms +[2025-07-06 13:05:44] [Rank 0] step:1481/10000 train_time:118658ms step_avg:80.12ms +[2025-07-06 13:05:44] [Rank 0] step:1481/10000 train_time:118658ms step_avg:80.12ms +[2025-07-06 13:05:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 13:05:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 13:05:46] [Rank 0] PRINT: step:1500/10000 train_loss:4.3974 val_loss:3.7099 train_time:120137ms step_avg:80.09ms +[2025-07-06 13:05:46] [Rank 0] PRINT: step:1500/10000 train_loss:4.3974 val_loss:3.7099 train_time:120137ms step_avg:80.09ms +[2025-07-06 13:05:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 13:05:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 13:05:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 13:05:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 13:05:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 13:05:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 13:11:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 13:11:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 13:11:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 13:11:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 13:11:09] [Rank 0] Total Loss: 5.1332 +[2025-07-06 13:11:09] [Rank 0] Total Loss: 5.1332 +[2025-07-06 13:11:09] [Rank 0] Total FTA: 0.0865 +[2025-07-06 13:11:09] [Rank 0] Total FTA: 0.0865 +[2025-07-06 13:11:09] [Rank 0] Group 0 Loss: 5.1879 +[2025-07-06 13:11:09] [Rank 0] Group 0 Loss: 5.1879 +[2025-07-06 13:11:09] [Rank 0] Group 1 Loss: 5.1678 +[2025-07-06 13:11:09] [Rank 0] Group 1 Loss: 5.1678 +[2025-07-06 13:11:09] [Rank 0] Group 2 Loss: 5.0981 +[2025-07-06 13:11:09] [Rank 0] Group 2 Loss: 5.0981 +[2025-07-06 13:11:09] [Rank 0] Group 3 Loss: 5.0847 +[2025-07-06 13:11:09] [Rank 0] Group 3 Loss: 5.0847 +[2025-07-06 13:11:09] [Rank 0] Group 4 Loss: 5.1448 +[2025-07-06 13:11:09] [Rank 0] Group 4 Loss: 5.1448 +[2025-07-06 13:11:09] [Rank 0] Group 5 Loss: 5.1124 +[2025-07-06 13:11:09] [Rank 0] Group 5 Loss: 5.1124 +[2025-07-06 13:11:09] [Rank 0] Group 6 Loss: 5.1130 +[2025-07-06 13:11:09] [Rank 0] Group 6 Loss: 5.1130 +[2025-07-06 13:11:09] [Rank 0] Group 7 Loss: 5.1439 +[2025-07-06 13:11:09] [Rank 0] Group 7 Loss: 5.1439 +[2025-07-06 13:11:09] [Rank 0] Group 8 Loss: 5.1072 +[2025-07-06 13:11:09] [Rank 0] Group 8 Loss: 5.1072 +[2025-07-06 13:11:09] [Rank 0] Group 9 Loss: 5.1284 +[2025-07-06 13:11:09] [Rank 0] Group 9 Loss: 5.1284 +[2025-07-06 13:11:09] [Rank 0] Group 10 Loss: 5.1387 +[2025-07-06 13:11:09] [Rank 0] Group 10 Loss: 5.1387 +[2025-07-06 13:11:09] [Rank 0] Group 11 Loss: 5.1257 +[2025-07-06 13:11:09] [Rank 0] Group 11 Loss: 5.1257 +[2025-07-06 13:11:09] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-06 13:11:09] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-06 13:11:09] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 13:11:09] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 13:11:09] [Rank 0] Group 2 FTA: 0.0885 +[2025-07-06 13:11:09] [Rank 0] Group 2 FTA: 0.0885 +[2025-07-06 13:11:09] [Rank 0] Group 3 FTA: 0.0521 +[2025-07-06 13:11:09] [Rank 0] Group 3 FTA: 0.0521 +[2025-07-06 13:11:09] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-06 13:11:09] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-06 13:11:09] [Rank 0] Group 5 FTA: 0.0286 +[2025-07-06 13:11:09] [Rank 0] Group 5 FTA: 0.0286 +[2025-07-06 13:11:09] [Rank 0] Group 6 FTA: 0.1094 +[2025-07-06 13:11:09] [Rank 0] Group 6 FTA: 0.1094 +[2025-07-06 13:11:09] [Rank 0] Group 7 FTA: 0.0833 +[2025-07-06 13:11:09] [Rank 0] Group 7 FTA: 0.0833 +[2025-07-06 13:11:09] [Rank 0] Group 8 FTA: 0.1198 +[2025-07-06 13:11:09] [Rank 0] Group 8 FTA: 0.1198 +[2025-07-06 13:11:09] [Rank 0] Group 9 FTA: 0.0664 +[2025-07-06 13:11:09] [Rank 0] Group 9 FTA: 0.0664 +[2025-07-06 13:11:09] [Rank 0] Group 10 FTA: 0.0742 +[2025-07-06 13:11:09] [Rank 0] Group 10 FTA: 0.0742 +[2025-07-06 13:11:09] [Rank 0] Group 11 FTA: 0.1025 +[2025-07-06 13:11:09] [Rank 0] Group 11 FTA: 0.1025 +[2025-07-06 13:11:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 13:11:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 13:11:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 13:11:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 13:11:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 13:11:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 13:11:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 13:11:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 13:11:11] [Rank 0] step:1501/10000 train_time:120159ms step_avg:80.05ms +[2025-07-06 13:11:11] [Rank 0] step:1501/10000 train_time:120159ms step_avg:80.05ms +[2025-07-06 13:11:12] [Rank 0] step:1521/10000 train_time:121647ms step_avg:79.98ms +[2025-07-06 13:11:12] [Rank 0] step:1521/10000 train_time:121647ms step_avg:79.98ms +[2025-07-06 13:11:14] [Rank 0] step:1541/10000 train_time:123767ms step_avg:80.32ms +[2025-07-06 13:11:14] [Rank 0] step:1541/10000 train_time:123767ms step_avg:80.32ms +[2025-07-06 13:11:16] [Rank 0] step:1561/10000 train_time:125237ms step_avg:80.23ms +[2025-07-06 13:11:16] [Rank 0] step:1561/10000 train_time:125237ms step_avg:80.23ms +[2025-07-06 13:11:17] [Rank 0] step:1581/10000 train_time:126708ms step_avg:80.14ms +[2025-07-06 13:11:17] [Rank 0] step:1581/10000 train_time:126708ms step_avg:80.14ms +[2025-07-06 13:11:19] [Rank 0] step:1601/10000 train_time:128178ms step_avg:80.06ms +[2025-07-06 13:11:19] [Rank 0] step:1601/10000 train_time:128178ms step_avg:80.06ms +[2025-07-06 13:11:21] [Rank 0] step:1621/10000 train_time:130317ms step_avg:80.39ms +[2025-07-06 13:11:21] [Rank 0] step:1621/10000 train_time:130317ms step_avg:80.39ms +[2025-07-06 13:11:22] [Rank 0] step:1641/10000 train_time:131772ms step_avg:80.30ms +[2025-07-06 13:11:22] [Rank 0] step:1641/10000 train_time:131772ms step_avg:80.30ms +[2025-07-06 13:11:24] [Rank 0] step:1661/10000 train_time:133247ms step_avg:80.22ms +[2025-07-06 13:11:24] [Rank 0] step:1661/10000 train_time:133247ms step_avg:80.22ms +[2025-07-06 13:11:25] [Rank 0] step:1681/10000 train_time:134720ms step_avg:80.14ms +[2025-07-06 13:11:25] [Rank 0] step:1681/10000 train_time:134720ms step_avg:80.14ms +[2025-07-06 13:11:27] [Rank 0] step:1701/10000 train_time:136195ms step_avg:80.07ms +[2025-07-06 13:11:27] [Rank 0] step:1701/10000 train_time:136195ms step_avg:80.07ms +[2025-07-06 13:11:29] [Rank 0] step:1721/10000 train_time:138487ms step_avg:80.47ms +[2025-07-06 13:11:29] [Rank 0] step:1721/10000 train_time:138487ms step_avg:80.47ms +[2025-07-06 13:11:30] [Rank 0] step:1741/10000 train_time:139962ms step_avg:80.39ms +[2025-07-06 13:11:30] [Rank 0] step:1741/10000 train_time:139962ms step_avg:80.39ms +[2025-07-06 13:11:32] [Rank 0] step:1761/10000 train_time:141436ms step_avg:80.32ms +[2025-07-06 13:11:32] [Rank 0] step:1761/10000 train_time:141436ms step_avg:80.32ms +[2025-07-06 13:11:33] [Rank 0] step:1781/10000 train_time:142910ms step_avg:80.24ms +[2025-07-06 13:11:33] [Rank 0] step:1781/10000 train_time:142910ms step_avg:80.24ms +[2025-07-06 13:11:35] [Rank 0] step:1801/10000 train_time:144385ms step_avg:80.17ms +[2025-07-06 13:11:35] [Rank 0] step:1801/10000 train_time:144385ms step_avg:80.17ms +[2025-07-06 13:11:37] [Rank 0] step:1821/10000 train_time:146101ms step_avg:80.23ms +[2025-07-06 13:11:37] [Rank 0] step:1821/10000 train_time:146101ms step_avg:80.23ms +[2025-07-06 13:11:38] [Rank 0] step:1841/10000 train_time:147578ms step_avg:80.16ms +[2025-07-06 13:11:38] [Rank 0] step:1841/10000 train_time:147578ms step_avg:80.16ms +[2025-07-06 13:11:39] [Rank 0] step:1861/10000 train_time:149054ms step_avg:80.09ms +[2025-07-06 13:11:39] [Rank 0] step:1861/10000 train_time:149054ms step_avg:80.09ms +[2025-07-06 13:11:41] [Rank 0] step:1881/10000 train_time:150530ms step_avg:80.03ms +[2025-07-06 13:11:41] [Rank 0] step:1881/10000 train_time:150530ms step_avg:80.03ms +[2025-07-06 13:11:43] [Rank 0] step:1901/10000 train_time:152664ms step_avg:80.31ms +[2025-07-06 13:11:43] [Rank 0] step:1901/10000 train_time:152664ms step_avg:80.31ms +[2025-07-06 13:11:45] [Rank 0] step:1921/10000 train_time:154140ms step_avg:80.24ms +[2025-07-06 13:11:45] [Rank 0] step:1921/10000 train_time:154140ms step_avg:80.24ms +[2025-07-06 13:11:46] [Rank 0] step:1941/10000 train_time:155618ms step_avg:80.17ms +[2025-07-06 13:11:46] [Rank 0] step:1941/10000 train_time:155618ms step_avg:80.17ms +[2025-07-06 13:11:48] [Rank 0] step:1961/10000 train_time:157096ms step_avg:80.11ms +[2025-07-06 13:11:48] [Rank 0] step:1961/10000 train_time:157096ms step_avg:80.11ms +[2025-07-06 13:11:50] [Rank 0] step:1981/10000 train_time:158830ms step_avg:80.18ms +[2025-07-06 13:11:50] [Rank 0] step:1981/10000 train_time:158830ms step_avg:80.18ms +[2025-07-06 13:11:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 13:11:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 13:11:52] [Rank 0] PRINT: step:2000/10000 train_loss:3.1223 val_loss:2.6179 train_time:160702ms step_avg:80.35ms +[2025-07-06 13:11:52] [Rank 0] PRINT: step:2000/10000 train_loss:3.1223 val_loss:2.6179 train_time:160702ms step_avg:80.35ms +[2025-07-06 13:11:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 13:11:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 13:11:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 13:11:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 13:11:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 13:11:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 13:17:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 13:17:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 13:17:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 13:17:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 13:17:16] [Rank 0] Total Loss: 4.4330 +[2025-07-06 13:17:16] [Rank 0] Total Loss: 4.4330 +[2025-07-06 13:17:16] [Rank 0] Total FTA: 0.0849 +[2025-07-06 13:17:16] [Rank 0] Total FTA: 0.0849 +[2025-07-06 13:17:16] [Rank 0] Group 0 Loss: 4.5618 +[2025-07-06 13:17:16] [Rank 0] Group 0 Loss: 4.5618 +[2025-07-06 13:17:16] [Rank 0] Group 1 Loss: 4.5301 +[2025-07-06 13:17:16] [Rank 0] Group 1 Loss: 4.5301 +[2025-07-06 13:17:16] [Rank 0] Group 2 Loss: 4.2777 +[2025-07-06 13:17:16] [Rank 0] Group 2 Loss: 4.2777 +[2025-07-06 13:17:16] [Rank 0] Group 3 Loss: 4.4241 +[2025-07-06 13:17:16] [Rank 0] Group 3 Loss: 4.4241 +[2025-07-06 13:17:16] [Rank 0] Group 4 Loss: 4.3723 +[2025-07-06 13:17:16] [Rank 0] Group 4 Loss: 4.3723 +[2025-07-06 13:17:16] [Rank 0] Group 5 Loss: 4.3447 +[2025-07-06 13:17:16] [Rank 0] Group 5 Loss: 4.3447 +[2025-07-06 13:17:16] [Rank 0] Group 6 Loss: 4.3958 +[2025-07-06 13:17:16] [Rank 0] Group 6 Loss: 4.3958 +[2025-07-06 13:17:16] [Rank 0] Group 7 Loss: 4.4696 +[2025-07-06 13:17:16] [Rank 0] Group 7 Loss: 4.4696 +[2025-07-06 13:17:16] [Rank 0] Group 8 Loss: 4.4086 +[2025-07-06 13:17:16] [Rank 0] Group 8 Loss: 4.4086 +[2025-07-06 13:17:16] [Rank 0] Group 9 Loss: 4.4118 +[2025-07-06 13:17:16] [Rank 0] Group 9 Loss: 4.4118 +[2025-07-06 13:17:16] [Rank 0] Group 10 Loss: 4.4307 +[2025-07-06 13:17:16] [Rank 0] Group 10 Loss: 4.4307 +[2025-07-06 13:17:16] [Rank 0] Group 11 Loss: 4.4330 +[2025-07-06 13:17:16] [Rank 0] Group 11 Loss: 4.4330 +[2025-07-06 13:17:16] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-06 13:17:16] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-06 13:17:16] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 13:17:16] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 13:17:16] [Rank 0] Group 2 FTA: 0.0964 +[2025-07-06 13:17:16] [Rank 0] Group 2 FTA: 0.0964 +[2025-07-06 13:17:16] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-06 13:17:16] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-06 13:17:16] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-06 13:17:16] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-06 13:17:16] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-06 13:17:16] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-06 13:17:16] [Rank 0] Group 6 FTA: 0.0938 +[2025-07-06 13:17:16] [Rank 0] Group 6 FTA: 0.0938 +[2025-07-06 13:17:16] [Rank 0] Group 7 FTA: 0.0703 +[2025-07-06 13:17:16] [Rank 0] Group 7 FTA: 0.0703 +[2025-07-06 13:17:16] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-06 13:17:16] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-06 13:17:16] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-06 13:17:16] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-06 13:17:16] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-06 13:17:16] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-06 13:17:16] [Rank 0] Group 11 FTA: 0.0957 +[2025-07-06 13:17:16] [Rank 0] Group 11 FTA: 0.0957 +[2025-07-06 13:17:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 13:17:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 13:17:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 13:17:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 13:17:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 13:17:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 13:17:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 13:17:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 13:17:17] [Rank 0] step:2001/10000 train_time:160723ms step_avg:80.32ms +[2025-07-06 13:17:17] [Rank 0] step:2001/10000 train_time:160723ms step_avg:80.32ms +[2025-07-06 13:17:19] [Rank 0] step:2021/10000 train_time:162187ms step_avg:80.25ms +[2025-07-06 13:17:19] [Rank 0] step:2021/10000 train_time:162187ms step_avg:80.25ms +[2025-07-06 13:17:20] [Rank 0] step:2041/10000 train_time:163656ms step_avg:80.18ms +[2025-07-06 13:17:20] [Rank 0] step:2041/10000 train_time:163656ms step_avg:80.18ms +[2025-07-06 13:17:22] [Rank 0] step:2061/10000 train_time:165127ms step_avg:80.12ms +[2025-07-06 13:17:22] [Rank 0] step:2061/10000 train_time:165127ms step_avg:80.12ms +[2025-07-06 13:17:24] [Rank 0] step:2081/10000 train_time:166943ms step_avg:80.22ms +[2025-07-06 13:17:24] [Rank 0] step:2081/10000 train_time:166943ms step_avg:80.22ms +[2025-07-06 13:17:25] [Rank 0] step:2101/10000 train_time:168414ms step_avg:80.16ms +[2025-07-06 13:17:25] [Rank 0] step:2101/10000 train_time:168414ms step_avg:80.16ms +[2025-07-06 13:17:27] [Rank 0] step:2121/10000 train_time:169884ms step_avg:80.10ms +[2025-07-06 13:17:27] [Rank 0] step:2121/10000 train_time:169884ms step_avg:80.10ms +[2025-07-06 13:17:28] [Rank 0] step:2141/10000 train_time:171360ms step_avg:80.04ms +[2025-07-06 13:17:28] [Rank 0] step:2141/10000 train_time:171360ms step_avg:80.04ms +[2025-07-06 13:17:30] [Rank 0] step:2161/10000 train_time:173089ms step_avg:80.10ms +[2025-07-06 13:17:30] [Rank 0] step:2161/10000 train_time:173089ms step_avg:80.10ms +[2025-07-06 13:17:32] [Rank 0] step:2181/10000 train_time:174972ms step_avg:80.23ms +[2025-07-06 13:17:32] [Rank 0] step:2181/10000 train_time:174972ms step_avg:80.23ms +[2025-07-06 13:17:33] [Rank 0] step:2201/10000 train_time:176445ms step_avg:80.17ms +[2025-07-06 13:17:33] [Rank 0] step:2201/10000 train_time:176445ms step_avg:80.17ms +[2025-07-06 13:17:35] [Rank 0] step:2221/10000 train_time:178018ms step_avg:80.15ms +[2025-07-06 13:17:35] [Rank 0] step:2221/10000 train_time:178018ms step_avg:80.15ms +[2025-07-06 13:17:36] [Rank 0] step:2241/10000 train_time:179513ms step_avg:80.10ms +[2025-07-06 13:17:36] [Rank 0] step:2241/10000 train_time:179513ms step_avg:80.10ms +[2025-07-06 13:17:38] [Rank 0] step:2261/10000 train_time:181249ms step_avg:80.16ms +[2025-07-06 13:17:38] [Rank 0] step:2261/10000 train_time:181249ms step_avg:80.16ms +[2025-07-06 13:17:39] [Rank 0] step:2281/10000 train_time:182747ms step_avg:80.12ms +[2025-07-06 13:17:39] [Rank 0] step:2281/10000 train_time:182747ms step_avg:80.12ms +[2025-07-06 13:17:41] [Rank 0] step:2301/10000 train_time:184245ms step_avg:80.07ms +[2025-07-06 13:17:41] [Rank 0] step:2301/10000 train_time:184245ms step_avg:80.07ms +[2025-07-06 13:17:42] [Rank 0] step:2321/10000 train_time:185745ms step_avg:80.03ms +[2025-07-06 13:17:42] [Rank 0] step:2321/10000 train_time:185745ms step_avg:80.03ms +[2025-07-06 13:17:45] [Rank 0] step:2341/10000 train_time:187917ms step_avg:80.27ms +[2025-07-06 13:17:45] [Rank 0] step:2341/10000 train_time:187917ms step_avg:80.27ms +[2025-07-06 13:17:46] [Rank 0] step:2361/10000 train_time:189398ms step_avg:80.22ms +[2025-07-06 13:17:46] [Rank 0] step:2361/10000 train_time:189398ms step_avg:80.22ms +[2025-07-06 13:17:48] [Rank 0] step:2381/10000 train_time:191036ms step_avg:80.23ms +[2025-07-06 13:17:48] [Rank 0] step:2381/10000 train_time:191036ms step_avg:80.23ms +[2025-07-06 13:17:49] [Rank 0] step:2401/10000 train_time:192629ms step_avg:80.23ms +[2025-07-06 13:17:49] [Rank 0] step:2401/10000 train_time:192629ms step_avg:80.23ms +[2025-07-06 13:17:51] [Rank 0] step:2421/10000 train_time:194128ms step_avg:80.19ms +[2025-07-06 13:17:51] [Rank 0] step:2421/10000 train_time:194128ms step_avg:80.19ms +[2025-07-06 13:17:53] [Rank 0] step:2441/10000 train_time:196294ms step_avg:80.42ms +[2025-07-06 13:17:53] [Rank 0] step:2441/10000 train_time:196294ms step_avg:80.42ms +[2025-07-06 13:17:54] [Rank 0] step:2461/10000 train_time:197793ms step_avg:80.37ms +[2025-07-06 13:17:54] [Rank 0] step:2461/10000 train_time:197793ms step_avg:80.37ms +[2025-07-06 13:17:56] [Rank 0] step:2481/10000 train_time:199292ms step_avg:80.33ms +[2025-07-06 13:17:56] [Rank 0] step:2481/10000 train_time:199292ms step_avg:80.33ms +[2025-07-06 13:17:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 13:17:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 13:17:58] [Rank 0] PRINT: step:2500/10000 train_loss:2.2775 val_loss:2.0120 train_time:200792ms step_avg:80.32ms +[2025-07-06 13:17:58] [Rank 0] PRINT: step:2500/10000 train_loss:2.2775 val_loss:2.0120 train_time:200792ms step_avg:80.32ms +[2025-07-06 13:17:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 13:17:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 13:17:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 13:17:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 13:17:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 13:17:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 13:23:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 13:23:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 13:23:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 13:23:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 13:23:22] [Rank 0] Total Loss: 4.1027 +[2025-07-06 13:23:22] [Rank 0] Total Loss: 4.1027 +[2025-07-06 13:23:22] [Rank 0] Total FTA: 0.1218 +[2025-07-06 13:23:22] [Rank 0] Total FTA: 0.1218 +[2025-07-06 13:23:22] [Rank 0] Group 0 Loss: 4.3039 +[2025-07-06 13:23:22] [Rank 0] Group 0 Loss: 4.3039 +[2025-07-06 13:23:22] [Rank 0] Group 1 Loss: 4.1102 +[2025-07-06 13:23:22] [Rank 0] Group 1 Loss: 4.1102 +[2025-07-06 13:23:22] [Rank 0] Group 2 Loss: 3.9253 +[2025-07-06 13:23:22] [Rank 0] Group 2 Loss: 3.9253 +[2025-07-06 13:23:22] [Rank 0] Group 3 Loss: 4.0578 +[2025-07-06 13:23:22] [Rank 0] Group 3 Loss: 4.0578 +[2025-07-06 13:23:22] [Rank 0] Group 4 Loss: 4.0534 +[2025-07-06 13:23:22] [Rank 0] Group 4 Loss: 4.0534 +[2025-07-06 13:23:22] [Rank 0] Group 5 Loss: 4.0355 +[2025-07-06 13:23:22] [Rank 0] Group 5 Loss: 4.0355 +[2025-07-06 13:23:22] [Rank 0] Group 6 Loss: 4.0361 +[2025-07-06 13:23:22] [Rank 0] Group 6 Loss: 4.0361 +[2025-07-06 13:23:22] [Rank 0] Group 7 Loss: 4.0935 +[2025-07-06 13:23:22] [Rank 0] Group 7 Loss: 4.0935 +[2025-07-06 13:23:22] [Rank 0] Group 8 Loss: 4.1324 +[2025-07-06 13:23:22] [Rank 0] Group 8 Loss: 4.1324 +[2025-07-06 13:23:22] [Rank 0] Group 9 Loss: 4.0711 +[2025-07-06 13:23:22] [Rank 0] Group 9 Loss: 4.0711 +[2025-07-06 13:23:22] [Rank 0] Group 10 Loss: 4.1001 +[2025-07-06 13:23:22] [Rank 0] Group 10 Loss: 4.1001 +[2025-07-06 13:23:22] [Rank 0] Group 11 Loss: 4.1021 +[2025-07-06 13:23:22] [Rank 0] Group 11 Loss: 4.1021 +[2025-07-06 13:23:22] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-06 13:23:22] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-06 13:23:22] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-06 13:23:22] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-06 13:23:22] [Rank 0] Group 2 FTA: 0.1615 +[2025-07-06 13:23:22] [Rank 0] Group 2 FTA: 0.1615 +[2025-07-06 13:23:22] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-06 13:23:22] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-06 13:23:22] [Rank 0] Group 4 FTA: 0.0469 +[2025-07-06 13:23:22] [Rank 0] Group 4 FTA: 0.0469 +[2025-07-06 13:23:22] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-06 13:23:22] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-06 13:23:22] [Rank 0] Group 6 FTA: 0.1250 +[2025-07-06 13:23:22] [Rank 0] Group 6 FTA: 0.1250 +[2025-07-06 13:23:22] [Rank 0] Group 7 FTA: 0.1250 +[2025-07-06 13:23:22] [Rank 0] Group 7 FTA: 0.1250 +[2025-07-06 13:23:22] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-06 13:23:22] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-06 13:23:22] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-06 13:23:22] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-06 13:23:22] [Rank 0] Group 10 FTA: 0.1484 +[2025-07-06 13:23:22] [Rank 0] Group 10 FTA: 0.1484 +[2025-07-06 13:23:22] [Rank 0] Group 11 FTA: 0.1260 +[2025-07-06 13:23:22] [Rank 0] Group 11 FTA: 0.1260 +[2025-07-06 13:23:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 13:23:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 13:23:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 13:23:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 13:23:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 13:23:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 13:23:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 13:23:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 13:23:23] [Rank 0] step:2501/10000 train_time:200813ms step_avg:80.29ms +[2025-07-06 13:23:23] [Rank 0] step:2501/10000 train_time:200813ms step_avg:80.29ms +[2025-07-06 13:23:25] [Rank 0] step:2521/10000 train_time:202317ms step_avg:80.25ms +[2025-07-06 13:23:25] [Rank 0] step:2521/10000 train_time:202317ms step_avg:80.25ms +[2025-07-06 13:23:27] [Rank 0] step:2541/10000 train_time:204476ms step_avg:80.47ms +[2025-07-06 13:23:27] [Rank 0] step:2541/10000 train_time:204476ms step_avg:80.47ms +[2025-07-06 13:23:28] [Rank 0] step:2561/10000 train_time:205968ms step_avg:80.42ms +[2025-07-06 13:23:28] [Rank 0] step:2561/10000 train_time:205968ms step_avg:80.42ms +[2025-07-06 13:23:30] [Rank 0] step:2581/10000 train_time:207464ms step_avg:80.38ms +[2025-07-06 13:23:30] [Rank 0] step:2581/10000 train_time:207464ms step_avg:80.38ms +[2025-07-06 13:23:31] [Rank 0] step:2601/10000 train_time:208960ms step_avg:80.34ms +[2025-07-06 13:23:31] [Rank 0] step:2601/10000 train_time:208960ms step_avg:80.34ms +[2025-07-06 13:23:34] [Rank 0] step:2621/10000 train_time:211120ms step_avg:80.55ms +[2025-07-06 13:23:34] [Rank 0] step:2621/10000 train_time:211120ms step_avg:80.55ms +[2025-07-06 13:23:35] [Rank 0] step:2641/10000 train_time:212614ms step_avg:80.51ms +[2025-07-06 13:23:35] [Rank 0] step:2641/10000 train_time:212614ms step_avg:80.51ms +[2025-07-06 13:23:37] [Rank 0] step:2661/10000 train_time:214110ms step_avg:80.46ms +[2025-07-06 13:23:37] [Rank 0] step:2661/10000 train_time:214110ms step_avg:80.46ms +[2025-07-06 13:23:38] [Rank 0] step:2681/10000 train_time:215608ms step_avg:80.42ms +[2025-07-06 13:23:38] [Rank 0] step:2681/10000 train_time:215608ms step_avg:80.42ms +[2025-07-06 13:23:40] [Rank 0] step:2701/10000 train_time:217155ms step_avg:80.40ms +[2025-07-06 13:23:40] [Rank 0] step:2701/10000 train_time:217155ms step_avg:80.40ms +[2025-07-06 13:23:42] [Rank 0] step:2721/10000 train_time:219262ms step_avg:80.58ms +[2025-07-06 13:23:42] [Rank 0] step:2721/10000 train_time:219262ms step_avg:80.58ms +[2025-07-06 13:23:43] [Rank 0] step:2741/10000 train_time:220760ms step_avg:80.54ms +[2025-07-06 13:23:43] [Rank 0] step:2741/10000 train_time:220760ms step_avg:80.54ms +[2025-07-06 13:23:45] [Rank 0] step:2761/10000 train_time:222258ms step_avg:80.50ms +[2025-07-06 13:23:45] [Rank 0] step:2761/10000 train_time:222258ms step_avg:80.50ms +[2025-07-06 13:23:46] [Rank 0] step:2781/10000 train_time:223857ms step_avg:80.50ms +[2025-07-06 13:23:46] [Rank 0] step:2781/10000 train_time:223857ms step_avg:80.50ms +[2025-07-06 13:23:48] [Rank 0] step:2801/10000 train_time:225999ms step_avg:80.69ms +[2025-07-06 13:23:48] [Rank 0] step:2801/10000 train_time:225999ms step_avg:80.69ms +[2025-07-06 13:23:50] [Rank 0] step:2821/10000 train_time:227598ms step_avg:80.68ms +[2025-07-06 13:23:50] [Rank 0] step:2821/10000 train_time:227598ms step_avg:80.68ms +[2025-07-06 13:23:52] [Rank 0] step:2841/10000 train_time:229099ms step_avg:80.64ms +[2025-07-06 13:23:52] [Rank 0] step:2841/10000 train_time:229099ms step_avg:80.64ms +[2025-07-06 13:23:53] [Rank 0] step:2861/10000 train_time:230602ms step_avg:80.60ms +[2025-07-06 13:23:53] [Rank 0] step:2861/10000 train_time:230602ms step_avg:80.60ms +[2025-07-06 13:23:55] [Rank 0] step:2881/10000 train_time:232153ms step_avg:80.58ms +[2025-07-06 13:23:55] [Rank 0] step:2881/10000 train_time:232153ms step_avg:80.58ms +[2025-07-06 13:23:56] [Rank 0] step:2901/10000 train_time:233841ms step_avg:80.61ms +[2025-07-06 13:23:56] [Rank 0] step:2901/10000 train_time:233841ms step_avg:80.61ms +[2025-07-06 13:23:58] [Rank 0] step:2921/10000 train_time:235345ms step_avg:80.57ms +[2025-07-06 13:23:58] [Rank 0] step:2921/10000 train_time:235345ms step_avg:80.57ms +[2025-07-06 13:23:59] [Rank 0] step:2941/10000 train_time:236845ms step_avg:80.53ms +[2025-07-06 13:23:59] [Rank 0] step:2941/10000 train_time:236845ms step_avg:80.53ms +[2025-07-06 13:24:01] [Rank 0] step:2961/10000 train_time:238347ms step_avg:80.50ms +[2025-07-06 13:24:01] [Rank 0] step:2961/10000 train_time:238347ms step_avg:80.50ms +[2025-07-06 13:24:03] [Rank 0] step:2981/10000 train_time:240497ms step_avg:80.68ms +[2025-07-06 13:24:03] [Rank 0] step:2981/10000 train_time:240497ms step_avg:80.68ms +[2025-07-06 13:24:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 13:24:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 13:24:05] [Rank 0] PRINT: step:3000/10000 train_loss:1.8512 val_loss:1.7200 train_time:241998ms step_avg:80.67ms +[2025-07-06 13:24:05] [Rank 0] PRINT: step:3000/10000 train_loss:1.8512 val_loss:1.7200 train_time:241998ms step_avg:80.67ms +[2025-07-06 13:24:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 13:24:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 13:24:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 13:24:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 13:24:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 13:24:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 13:29:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 13:29:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 13:29:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 13:29:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 13:29:30] [Rank 0] Total Loss: 4.0910 +[2025-07-06 13:29:30] [Rank 0] Total Loss: 4.0910 +[2025-07-06 13:29:30] [Rank 0] Total FTA: 0.1438 +[2025-07-06 13:29:30] [Rank 0] Total FTA: 0.1438 +[2025-07-06 13:29:30] [Rank 0] Group 0 Loss: 4.4091 +[2025-07-06 13:29:30] [Rank 0] Group 0 Loss: 4.4091 +[2025-07-06 13:29:30] [Rank 0] Group 1 Loss: 3.9712 +[2025-07-06 13:29:30] [Rank 0] Group 1 Loss: 3.9712 +[2025-07-06 13:29:30] [Rank 0] Group 2 Loss: 3.8239 +[2025-07-06 13:29:30] [Rank 0] Group 2 Loss: 3.8239 +[2025-07-06 13:29:30] [Rank 0] Group 3 Loss: 4.1230 +[2025-07-06 13:29:30] [Rank 0] Group 3 Loss: 4.1230 +[2025-07-06 13:29:30] [Rank 0] Group 4 Loss: 4.0315 +[2025-07-06 13:29:30] [Rank 0] Group 4 Loss: 4.0315 +[2025-07-06 13:29:30] [Rank 0] Group 5 Loss: 4.0651 +[2025-07-06 13:29:30] [Rank 0] Group 5 Loss: 4.0651 +[2025-07-06 13:29:30] [Rank 0] Group 6 Loss: 4.0011 +[2025-07-06 13:29:30] [Rank 0] Group 6 Loss: 4.0011 +[2025-07-06 13:29:30] [Rank 0] Group 7 Loss: 4.0715 +[2025-07-06 13:29:30] [Rank 0] Group 7 Loss: 4.0715 +[2025-07-06 13:29:30] [Rank 0] Group 8 Loss: 4.0643 +[2025-07-06 13:29:30] [Rank 0] Group 8 Loss: 4.0643 +[2025-07-06 13:29:30] [Rank 0] Group 9 Loss: 4.0592 +[2025-07-06 13:29:30] [Rank 0] Group 9 Loss: 4.0592 +[2025-07-06 13:29:30] [Rank 0] Group 10 Loss: 4.0776 +[2025-07-06 13:29:30] [Rank 0] Group 10 Loss: 4.0776 +[2025-07-06 13:29:30] [Rank 0] Group 11 Loss: 4.0827 +[2025-07-06 13:29:30] [Rank 0] Group 11 Loss: 4.0827 +[2025-07-06 13:29:30] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-06 13:29:30] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-06 13:29:30] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 13:29:30] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 13:29:30] [Rank 0] Group 2 FTA: 0.1693 +[2025-07-06 13:29:30] [Rank 0] Group 2 FTA: 0.1693 +[2025-07-06 13:29:30] [Rank 0] Group 3 FTA: 0.0911 +[2025-07-06 13:29:30] [Rank 0] Group 3 FTA: 0.0911 +[2025-07-06 13:29:30] [Rank 0] Group 4 FTA: 0.1224 +[2025-07-06 13:29:30] [Rank 0] Group 4 FTA: 0.1224 +[2025-07-06 13:29:30] [Rank 0] Group 5 FTA: 0.1250 +[2025-07-06 13:29:30] [Rank 0] Group 5 FTA: 0.1250 +[2025-07-06 13:29:30] [Rank 0] Group 6 FTA: 0.1589 +[2025-07-06 13:29:30] [Rank 0] Group 6 FTA: 0.1589 +[2025-07-06 13:29:30] [Rank 0] Group 7 FTA: 0.1719 +[2025-07-06 13:29:30] [Rank 0] Group 7 FTA: 0.1719 +[2025-07-06 13:29:30] [Rank 0] Group 8 FTA: 0.1484 +[2025-07-06 13:29:30] [Rank 0] Group 8 FTA: 0.1484 +[2025-07-06 13:29:30] [Rank 0] Group 9 FTA: 0.1602 +[2025-07-06 13:29:30] [Rank 0] Group 9 FTA: 0.1602 +[2025-07-06 13:29:30] [Rank 0] Group 10 FTA: 0.1543 +[2025-07-06 13:29:30] [Rank 0] Group 10 FTA: 0.1543 +[2025-07-06 13:29:30] [Rank 0] Group 11 FTA: 0.1729 +[2025-07-06 13:29:30] [Rank 0] Group 11 FTA: 0.1729 +[2025-07-06 13:29:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 13:29:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 13:29:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 13:29:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 13:29:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 13:29:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 13:29:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 13:29:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 13:29:31] [Rank 0] step:3001/10000 train_time:242018ms step_avg:80.65ms +[2025-07-06 13:29:31] [Rank 0] step:3001/10000 train_time:242018ms step_avg:80.65ms +[2025-07-06 13:29:33] [Rank 0] step:3021/10000 train_time:243510ms step_avg:80.61ms +[2025-07-06 13:29:33] [Rank 0] step:3021/10000 train_time:243510ms step_avg:80.61ms +[2025-07-06 13:29:34] [Rank 0] step:3041/10000 train_time:245004ms step_avg:80.57ms +[2025-07-06 13:29:34] [Rank 0] step:3041/10000 train_time:245004ms step_avg:80.57ms +[2025-07-06 13:29:37] [Rank 0] step:3061/10000 train_time:246500ms step_avg:80.53ms +[2025-07-06 13:29:37] [Rank 0] step:3061/10000 train_time:246500ms step_avg:80.53ms +[2025-07-06 13:29:38] [Rank 0] step:3081/10000 train_time:248652ms step_avg:80.71ms +[2025-07-06 13:29:38] [Rank 0] step:3081/10000 train_time:248652ms step_avg:80.71ms +[2025-07-06 13:29:40] [Rank 0] step:3101/10000 train_time:250148ms step_avg:80.67ms +[2025-07-06 13:29:40] [Rank 0] step:3101/10000 train_time:250148ms step_avg:80.67ms +[2025-07-06 13:29:41] [Rank 0] step:3121/10000 train_time:251645ms step_avg:80.63ms +[2025-07-06 13:29:41] [Rank 0] step:3121/10000 train_time:251645ms step_avg:80.63ms +[2025-07-06 13:29:43] [Rank 0] step:3141/10000 train_time:253142ms step_avg:80.59ms +[2025-07-06 13:29:43] [Rank 0] step:3141/10000 train_time:253142ms step_avg:80.59ms +[2025-07-06 13:29:44] [Rank 0] step:3161/10000 train_time:254874ms step_avg:80.63ms +[2025-07-06 13:29:44] [Rank 0] step:3161/10000 train_time:254874ms step_avg:80.63ms +[2025-07-06 13:29:46] [Rank 0] step:3181/10000 train_time:256372ms step_avg:80.59ms +[2025-07-06 13:29:46] [Rank 0] step:3181/10000 train_time:256372ms step_avg:80.59ms +[2025-07-06 13:29:47] [Rank 0] step:3201/10000 train_time:257869ms step_avg:80.56ms +[2025-07-06 13:29:47] [Rank 0] step:3201/10000 train_time:257869ms step_avg:80.56ms +[2025-07-06 13:29:49] [Rank 0] step:3221/10000 train_time:259368ms step_avg:80.52ms +[2025-07-06 13:29:49] [Rank 0] step:3221/10000 train_time:259368ms step_avg:80.52ms +[2025-07-06 13:29:51] [Rank 0] step:3241/10000 train_time:261544ms step_avg:80.70ms +[2025-07-06 13:29:51] [Rank 0] step:3241/10000 train_time:261544ms step_avg:80.70ms +[2025-07-06 13:29:52] [Rank 0] step:3261/10000 train_time:263024ms step_avg:80.66ms +[2025-07-06 13:29:52] [Rank 0] step:3261/10000 train_time:263024ms step_avg:80.66ms +[2025-07-06 13:29:54] [Rank 0] step:3281/10000 train_time:264521ms step_avg:80.62ms +[2025-07-06 13:29:54] [Rank 0] step:3281/10000 train_time:264521ms step_avg:80.62ms +[2025-07-06 13:29:55] [Rank 0] step:3301/10000 train_time:266019ms step_avg:80.59ms +[2025-07-06 13:29:55] [Rank 0] step:3301/10000 train_time:266019ms step_avg:80.59ms +[2025-07-06 13:29:57] [Rank 0] step:3321/10000 train_time:267519ms step_avg:80.55ms +[2025-07-06 13:29:57] [Rank 0] step:3321/10000 train_time:267519ms step_avg:80.55ms +[2025-07-06 13:29:59] [Rank 0] step:3341/10000 train_time:269665ms step_avg:80.71ms +[2025-07-06 13:29:59] [Rank 0] step:3341/10000 train_time:269665ms step_avg:80.71ms +[2025-07-06 13:30:01] [Rank 0] step:3361/10000 train_time:271164ms step_avg:80.68ms +[2025-07-06 13:30:01] [Rank 0] step:3361/10000 train_time:271164ms step_avg:80.68ms +[2025-07-06 13:30:02] [Rank 0] step:3381/10000 train_time:272665ms step_avg:80.65ms +[2025-07-06 13:30:02] [Rank 0] step:3381/10000 train_time:272665ms step_avg:80.65ms +[2025-07-06 13:30:04] [Rank 0] step:3401/10000 train_time:274165ms step_avg:80.61ms +[2025-07-06 13:30:04] [Rank 0] step:3401/10000 train_time:274165ms step_avg:80.61ms +[2025-07-06 13:30:05] [Rank 0] step:3421/10000 train_time:275919ms step_avg:80.65ms +[2025-07-06 13:30:05] [Rank 0] step:3421/10000 train_time:275919ms step_avg:80.65ms +[2025-07-06 13:30:07] [Rank 0] step:3441/10000 train_time:277399ms step_avg:80.62ms +[2025-07-06 13:30:07] [Rank 0] step:3441/10000 train_time:277399ms step_avg:80.62ms +[2025-07-06 13:30:08] [Rank 0] step:3461/10000 train_time:278899ms step_avg:80.58ms +[2025-07-06 13:30:08] [Rank 0] step:3461/10000 train_time:278899ms step_avg:80.58ms +[2025-07-06 13:30:10] [Rank 0] step:3481/10000 train_time:280400ms step_avg:80.55ms +[2025-07-06 13:30:10] [Rank 0] step:3481/10000 train_time:280400ms step_avg:80.55ms +[2025-07-06 13:30:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 13:30:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 13:30:12] [Rank 0] PRINT: step:3500/10000 train_loss:1.6385 val_loss:1.5664 train_time:281900ms step_avg:80.54ms +[2025-07-06 13:30:12] [Rank 0] PRINT: step:3500/10000 train_loss:1.6385 val_loss:1.5664 train_time:281900ms step_avg:80.54ms +[2025-07-06 13:30:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 13:30:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 13:30:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 13:30:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 13:30:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 13:30:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 13:35:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 13:35:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 13:35:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 13:35:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 13:35:36] [Rank 0] Total Loss: 4.0902 +[2025-07-06 13:35:36] [Rank 0] Total Loss: 4.0902 +[2025-07-06 13:35:36] [Rank 0] Total FTA: 0.2402 +[2025-07-06 13:35:36] [Rank 0] Total FTA: 0.2402 +[2025-07-06 13:35:36] [Rank 0] Group 0 Loss: 4.3960 +[2025-07-06 13:35:36] [Rank 0] Group 0 Loss: 4.3960 +[2025-07-06 13:35:36] [Rank 0] Group 1 Loss: 3.8767 +[2025-07-06 13:35:36] [Rank 0] Group 1 Loss: 3.8767 +[2025-07-06 13:35:36] [Rank 0] Group 2 Loss: 3.7982 +[2025-07-06 13:35:36] [Rank 0] Group 2 Loss: 3.7982 +[2025-07-06 13:35:36] [Rank 0] Group 3 Loss: 4.1119 +[2025-07-06 13:35:36] [Rank 0] Group 3 Loss: 4.1119 +[2025-07-06 13:35:36] [Rank 0] Group 4 Loss: 4.0825 +[2025-07-06 13:35:36] [Rank 0] Group 4 Loss: 4.0825 +[2025-07-06 13:35:36] [Rank 0] Group 5 Loss: 4.0288 +[2025-07-06 13:35:36] [Rank 0] Group 5 Loss: 4.0288 +[2025-07-06 13:35:36] [Rank 0] Group 6 Loss: 3.9874 +[2025-07-06 13:35:36] [Rank 0] Group 6 Loss: 3.9874 +[2025-07-06 13:35:36] [Rank 0] Group 7 Loss: 4.1169 +[2025-07-06 13:35:36] [Rank 0] Group 7 Loss: 4.1169 +[2025-07-06 13:35:36] [Rank 0] Group 8 Loss: 4.0872 +[2025-07-06 13:35:36] [Rank 0] Group 8 Loss: 4.0872 +[2025-07-06 13:35:36] [Rank 0] Group 9 Loss: 4.0735 +[2025-07-06 13:35:36] [Rank 0] Group 9 Loss: 4.0735 +[2025-07-06 13:35:36] [Rank 0] Group 10 Loss: 4.0877 +[2025-07-06 13:35:36] [Rank 0] Group 10 Loss: 4.0877 +[2025-07-06 13:35:36] [Rank 0] Group 11 Loss: 4.1027 +[2025-07-06 13:35:36] [Rank 0] Group 11 Loss: 4.1027 +[2025-07-06 13:35:36] [Rank 0] Group 0 FTA: 0.3576 +[2025-07-06 13:35:36] [Rank 0] Group 0 FTA: 0.3576 +[2025-07-06 13:35:36] [Rank 0] Group 1 FTA: 0.2031 +[2025-07-06 13:35:36] [Rank 0] Group 1 FTA: 0.2031 +[2025-07-06 13:35:36] [Rank 0] Group 2 FTA: 0.3672 +[2025-07-06 13:35:36] [Rank 0] Group 2 FTA: 0.3672 +[2025-07-06 13:35:36] [Rank 0] Group 3 FTA: 0.2448 +[2025-07-06 13:35:36] [Rank 0] Group 3 FTA: 0.2448 +[2025-07-06 13:35:36] [Rank 0] Group 4 FTA: 0.1719 +[2025-07-06 13:35:36] [Rank 0] Group 4 FTA: 0.1719 +[2025-07-06 13:35:36] [Rank 0] Group 5 FTA: 0.1953 +[2025-07-06 13:35:36] [Rank 0] Group 5 FTA: 0.1953 +[2025-07-06 13:35:36] [Rank 0] Group 6 FTA: 0.2057 +[2025-07-06 13:35:36] [Rank 0] Group 6 FTA: 0.2057 +[2025-07-06 13:35:36] [Rank 0] Group 7 FTA: 0.2240 +[2025-07-06 13:35:36] [Rank 0] Group 7 FTA: 0.2240 +[2025-07-06 13:35:36] [Rank 0] Group 8 FTA: 0.2266 +[2025-07-06 13:35:36] [Rank 0] Group 8 FTA: 0.2266 +[2025-07-06 13:35:36] [Rank 0] Group 9 FTA: 0.2031 +[2025-07-06 13:35:36] [Rank 0] Group 9 FTA: 0.2031 +[2025-07-06 13:35:36] [Rank 0] Group 10 FTA: 0.2109 +[2025-07-06 13:35:36] [Rank 0] Group 10 FTA: 0.2109 +[2025-07-06 13:35:36] [Rank 0] Group 11 FTA: 0.2070 +[2025-07-06 13:35:36] [Rank 0] Group 11 FTA: 0.2070 +[2025-07-06 13:35:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 13:35:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 13:35:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 13:35:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 13:35:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 13:35:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 13:35:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 13:35:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 13:35:37] [Rank 0] step:3501/10000 train_time:281920ms step_avg:80.53ms +[2025-07-06 13:35:37] [Rank 0] step:3501/10000 train_time:281920ms step_avg:80.53ms +[2025-07-06 13:35:40] [Rank 0] step:3521/10000 train_time:284087ms step_avg:80.68ms +[2025-07-06 13:35:40] [Rank 0] step:3521/10000 train_time:284087ms step_avg:80.68ms +[2025-07-06 13:35:41] [Rank 0] step:3541/10000 train_time:285581ms step_avg:80.65ms +[2025-07-06 13:35:41] [Rank 0] step:3541/10000 train_time:285581ms step_avg:80.65ms +[2025-07-06 13:35:43] [Rank 0] step:3561/10000 train_time:287213ms step_avg:80.66ms +[2025-07-06 13:35:43] [Rank 0] step:3561/10000 train_time:287213ms step_avg:80.66ms +[2025-07-06 13:35:44] [Rank 0] step:3581/10000 train_time:288805ms step_avg:80.65ms +[2025-07-06 13:35:44] [Rank 0] step:3581/10000 train_time:288805ms step_avg:80.65ms +[2025-07-06 13:35:46] [Rank 0] step:3601/10000 train_time:290301ms step_avg:80.62ms +[2025-07-06 13:35:46] [Rank 0] step:3601/10000 train_time:290301ms step_avg:80.62ms +[2025-07-06 13:35:48] [Rank 0] step:3621/10000 train_time:292457ms step_avg:80.77ms +[2025-07-06 13:35:48] [Rank 0] step:3621/10000 train_time:292457ms step_avg:80.77ms +[2025-07-06 13:35:49] [Rank 0] step:3641/10000 train_time:293954ms step_avg:80.73ms +[2025-07-06 13:35:49] [Rank 0] step:3641/10000 train_time:293954ms step_avg:80.73ms +[2025-07-06 13:35:51] [Rank 0] step:3661/10000 train_time:295451ms step_avg:80.70ms +[2025-07-06 13:35:51] [Rank 0] step:3661/10000 train_time:295451ms step_avg:80.70ms +[2025-07-06 13:35:52] [Rank 0] step:3681/10000 train_time:296950ms step_avg:80.67ms +[2025-07-06 13:35:52] [Rank 0] step:3681/10000 train_time:296950ms step_avg:80.67ms +[2025-07-06 13:35:54] [Rank 0] step:3701/10000 train_time:298681ms step_avg:80.70ms +[2025-07-06 13:35:54] [Rank 0] step:3701/10000 train_time:298681ms step_avg:80.70ms +[2025-07-06 13:35:56] [Rank 0] step:3721/10000 train_time:300178ms step_avg:80.67ms +[2025-07-06 13:35:56] [Rank 0] step:3721/10000 train_time:300178ms step_avg:80.67ms +[2025-07-06 13:35:57] [Rank 0] step:3741/10000 train_time:301676ms step_avg:80.64ms +[2025-07-06 13:35:57] [Rank 0] step:3741/10000 train_time:301676ms step_avg:80.64ms +[2025-07-06 13:35:59] [Rank 0] step:3761/10000 train_time:303175ms step_avg:80.61ms +[2025-07-06 13:35:59] [Rank 0] step:3761/10000 train_time:303175ms step_avg:80.61ms +[2025-07-06 13:36:01] [Rank 0] step:3781/10000 train_time:304931ms step_avg:80.65ms +[2025-07-06 13:36:01] [Rank 0] step:3781/10000 train_time:304931ms step_avg:80.65ms +[2025-07-06 13:36:02] [Rank 0] step:3801/10000 train_time:306829ms step_avg:80.72ms +[2025-07-06 13:36:02] [Rank 0] step:3801/10000 train_time:306829ms step_avg:80.72ms +[2025-07-06 13:36:04] [Rank 0] step:3821/10000 train_time:308328ms step_avg:80.69ms +[2025-07-06 13:36:04] [Rank 0] step:3821/10000 train_time:308328ms step_avg:80.69ms +[2025-07-06 13:36:05] [Rank 0] step:3841/10000 train_time:309828ms step_avg:80.66ms +[2025-07-06 13:36:05] [Rank 0] step:3841/10000 train_time:309828ms step_avg:80.66ms +[2025-07-06 13:36:07] [Rank 0] step:3861/10000 train_time:311327ms step_avg:80.63ms +[2025-07-06 13:36:07] [Rank 0] step:3861/10000 train_time:311327ms step_avg:80.63ms +[2025-07-06 13:36:09] [Rank 0] step:3881/10000 train_time:313467ms step_avg:80.77ms +[2025-07-06 13:36:09] [Rank 0] step:3881/10000 train_time:313467ms step_avg:80.77ms +[2025-07-06 13:36:10] [Rank 0] step:3901/10000 train_time:314966ms step_avg:80.74ms +[2025-07-06 13:36:10] [Rank 0] step:3901/10000 train_time:314966ms step_avg:80.74ms +[2025-07-06 13:36:12] [Rank 0] step:3921/10000 train_time:316466ms step_avg:80.71ms +[2025-07-06 13:36:12] [Rank 0] step:3921/10000 train_time:316466ms step_avg:80.71ms +[2025-07-06 13:36:13] [Rank 0] step:3941/10000 train_time:317966ms step_avg:80.68ms +[2025-07-06 13:36:13] [Rank 0] step:3941/10000 train_time:317966ms step_avg:80.68ms +[2025-07-06 13:36:16] [Rank 0] step:3961/10000 train_time:319519ms step_avg:80.67ms +[2025-07-06 13:36:16] [Rank 0] step:3961/10000 train_time:319519ms step_avg:80.67ms +[2025-07-06 13:36:17] [Rank 0] step:3981/10000 train_time:321611ms step_avg:80.79ms +[2025-07-06 13:36:17] [Rank 0] step:3981/10000 train_time:321611ms step_avg:80.79ms +[2025-07-06 13:36:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 13:36:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 13:36:20] [Rank 0] PRINT: step:4000/10000 train_loss:1.5186 val_loss:1.4724 train_time:323114ms step_avg:80.78ms +[2025-07-06 13:36:20] [Rank 0] PRINT: step:4000/10000 train_loss:1.5186 val_loss:1.4724 train_time:323114ms step_avg:80.78ms +[2025-07-06 13:36:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 13:36:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 13:36:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 13:36:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 13:36:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 13:36:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 13:41:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 13:41:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 13:41:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 13:41:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 13:41:46] [Rank 0] Total Loss: 4.2917 +[2025-07-06 13:41:46] [Rank 0] Total Loss: 4.2917 +[2025-07-06 13:41:46] [Rank 0] Total FTA: 0.2853 +[2025-07-06 13:41:46] [Rank 0] Total FTA: 0.2853 +[2025-07-06 13:41:46] [Rank 0] Group 0 Loss: 5.1513 +[2025-07-06 13:41:46] [Rank 0] Group 0 Loss: 5.1513 +[2025-07-06 13:41:46] [Rank 0] Group 1 Loss: 4.2278 +[2025-07-06 13:41:46] [Rank 0] Group 1 Loss: 4.2278 +[2025-07-06 13:41:46] [Rank 0] Group 2 Loss: 3.9407 +[2025-07-06 13:41:46] [Rank 0] Group 2 Loss: 3.9407 +[2025-07-06 13:41:46] [Rank 0] Group 3 Loss: 4.1985 +[2025-07-06 13:41:46] [Rank 0] Group 3 Loss: 4.1985 +[2025-07-06 13:41:46] [Rank 0] Group 4 Loss: 4.2059 +[2025-07-06 13:41:46] [Rank 0] Group 4 Loss: 4.2059 +[2025-07-06 13:41:46] [Rank 0] Group 5 Loss: 4.1059 +[2025-07-06 13:41:46] [Rank 0] Group 5 Loss: 4.1059 +[2025-07-06 13:41:46] [Rank 0] Group 6 Loss: 4.0763 +[2025-07-06 13:41:46] [Rank 0] Group 6 Loss: 4.0763 +[2025-07-06 13:41:46] [Rank 0] Group 7 Loss: 4.1959 +[2025-07-06 13:41:46] [Rank 0] Group 7 Loss: 4.1959 +[2025-07-06 13:41:46] [Rank 0] Group 8 Loss: 4.1783 +[2025-07-06 13:41:46] [Rank 0] Group 8 Loss: 4.1783 +[2025-07-06 13:41:46] [Rank 0] Group 9 Loss: 4.1515 +[2025-07-06 13:41:46] [Rank 0] Group 9 Loss: 4.1515 +[2025-07-06 13:41:46] [Rank 0] Group 10 Loss: 4.2070 +[2025-07-06 13:41:46] [Rank 0] Group 10 Loss: 4.2070 +[2025-07-06 13:41:46] [Rank 0] Group 11 Loss: 4.1754 +[2025-07-06 13:41:46] [Rank 0] Group 11 Loss: 4.1754 +[2025-07-06 13:41:46] [Rank 0] Group 0 FTA: 0.4681 +[2025-07-06 13:41:46] [Rank 0] Group 0 FTA: 0.4681 +[2025-07-06 13:41:46] [Rank 0] Group 1 FTA: 0.3411 +[2025-07-06 13:41:46] [Rank 0] Group 1 FTA: 0.3411 +[2025-07-06 13:41:46] [Rank 0] Group 2 FTA: 0.3281 +[2025-07-06 13:41:46] [Rank 0] Group 2 FTA: 0.3281 +[2025-07-06 13:41:46] [Rank 0] Group 3 FTA: 0.2318 +[2025-07-06 13:41:46] [Rank 0] Group 3 FTA: 0.2318 +[2025-07-06 13:41:46] [Rank 0] Group 4 FTA: 0.1562 +[2025-07-06 13:41:46] [Rank 0] Group 4 FTA: 0.1562 +[2025-07-06 13:41:46] [Rank 0] Group 5 FTA: 0.2031 +[2025-07-06 13:41:46] [Rank 0] Group 5 FTA: 0.2031 +[2025-07-06 13:41:46] [Rank 0] Group 6 FTA: 0.2760 +[2025-07-06 13:41:46] [Rank 0] Group 6 FTA: 0.2760 +[2025-07-06 13:41:46] [Rank 0] Group 7 FTA: 0.2526 +[2025-07-06 13:41:46] [Rank 0] Group 7 FTA: 0.2526 +[2025-07-06 13:41:46] [Rank 0] Group 8 FTA: 0.2604 +[2025-07-06 13:41:46] [Rank 0] Group 8 FTA: 0.2604 +[2025-07-06 13:41:46] [Rank 0] Group 9 FTA: 0.2734 +[2025-07-06 13:41:46] [Rank 0] Group 9 FTA: 0.2734 +[2025-07-06 13:41:46] [Rank 0] Group 10 FTA: 0.2500 +[2025-07-06 13:41:46] [Rank 0] Group 10 FTA: 0.2500 +[2025-07-06 13:41:46] [Rank 0] Group 11 FTA: 0.2559 +[2025-07-06 13:41:46] [Rank 0] Group 11 FTA: 0.2559 +[2025-07-06 13:41:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 13:41:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 13:41:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 13:41:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 13:41:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 13:41:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 13:41:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 13:41:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 13:41:48] [Rank 0] step:4001/10000 train_time:323135ms step_avg:80.76ms +[2025-07-06 13:41:48] [Rank 0] step:4001/10000 train_time:323135ms step_avg:80.76ms +[2025-07-06 13:41:49] [Rank 0] step:4021/10000 train_time:324638ms step_avg:80.74ms +[2025-07-06 13:41:49] [Rank 0] step:4021/10000 train_time:324638ms step_avg:80.74ms +[2025-07-06 13:41:51] [Rank 0] step:4041/10000 train_time:326133ms step_avg:80.71ms +[2025-07-06 13:41:51] [Rank 0] step:4041/10000 train_time:326133ms step_avg:80.71ms +[2025-07-06 13:41:53] [Rank 0] step:4061/10000 train_time:328288ms step_avg:80.84ms +[2025-07-06 13:41:53] [Rank 0] step:4061/10000 train_time:328288ms step_avg:80.84ms +[2025-07-06 13:41:54] [Rank 0] step:4081/10000 train_time:329784ms step_avg:80.81ms +[2025-07-06 13:41:54] [Rank 0] step:4081/10000 train_time:329784ms step_avg:80.81ms +[2025-07-06 13:41:56] [Rank 0] step:4101/10000 train_time:331278ms step_avg:80.78ms +[2025-07-06 13:41:56] [Rank 0] step:4101/10000 train_time:331278ms step_avg:80.78ms +[2025-07-06 13:41:57] [Rank 0] step:4121/10000 train_time:332775ms step_avg:80.75ms +[2025-07-06 13:41:57] [Rank 0] step:4121/10000 train_time:332775ms step_avg:80.75ms +[2025-07-06 13:42:00] [Rank 0] step:4141/10000 train_time:334529ms step_avg:80.78ms +[2025-07-06 13:42:00] [Rank 0] step:4141/10000 train_time:334529ms step_avg:80.78ms +[2025-07-06 13:42:01] [Rank 0] step:4161/10000 train_time:336435ms step_avg:80.85ms +[2025-07-06 13:42:01] [Rank 0] step:4161/10000 train_time:336435ms step_avg:80.85ms +[2025-07-06 13:42:03] [Rank 0] step:4181/10000 train_time:338133ms step_avg:80.87ms +[2025-07-06 13:42:03] [Rank 0] step:4181/10000 train_time:338133ms step_avg:80.87ms +[2025-07-06 13:42:04] [Rank 0] step:4201/10000 train_time:339632ms step_avg:80.85ms +[2025-07-06 13:42:04] [Rank 0] step:4201/10000 train_time:339632ms step_avg:80.85ms +[2025-07-06 13:42:06] [Rank 0] step:4221/10000 train_time:341131ms step_avg:80.82ms +[2025-07-06 13:42:06] [Rank 0] step:4221/10000 train_time:341131ms step_avg:80.82ms +[2025-07-06 13:42:08] [Rank 0] step:4241/10000 train_time:343296ms step_avg:80.95ms +[2025-07-06 13:42:08] [Rank 0] step:4241/10000 train_time:343296ms step_avg:80.95ms +[2025-07-06 13:42:09] [Rank 0] step:4261/10000 train_time:344794ms step_avg:80.92ms +[2025-07-06 13:42:09] [Rank 0] step:4261/10000 train_time:344794ms step_avg:80.92ms +[2025-07-06 13:42:11] [Rank 0] step:4281/10000 train_time:346296ms step_avg:80.89ms +[2025-07-06 13:42:11] [Rank 0] step:4281/10000 train_time:346296ms step_avg:80.89ms +[2025-07-06 13:42:12] [Rank 0] step:4301/10000 train_time:347795ms step_avg:80.86ms +[2025-07-06 13:42:12] [Rank 0] step:4301/10000 train_time:347795ms step_avg:80.86ms +[2025-07-06 13:42:15] [Rank 0] step:4321/10000 train_time:349550ms step_avg:80.90ms +[2025-07-06 13:42:15] [Rank 0] step:4321/10000 train_time:349550ms step_avg:80.90ms +[2025-07-06 13:42:16] [Rank 0] step:4341/10000 train_time:351441ms step_avg:80.96ms +[2025-07-06 13:42:16] [Rank 0] step:4341/10000 train_time:351441ms step_avg:80.96ms +[2025-07-06 13:42:18] [Rank 0] step:4361/10000 train_time:352939ms step_avg:80.93ms +[2025-07-06 13:42:18] [Rank 0] step:4361/10000 train_time:352939ms step_avg:80.93ms +[2025-07-06 13:42:19] [Rank 0] step:4381/10000 train_time:354442ms step_avg:80.90ms +[2025-07-06 13:42:19] [Rank 0] step:4381/10000 train_time:354442ms step_avg:80.90ms +[2025-07-06 13:42:21] [Rank 0] step:4401/10000 train_time:355944ms step_avg:80.88ms +[2025-07-06 13:42:21] [Rank 0] step:4401/10000 train_time:355944ms step_avg:80.88ms +[2025-07-06 13:42:23] [Rank 0] step:4421/10000 train_time:358113ms step_avg:81.00ms +[2025-07-06 13:42:23] [Rank 0] step:4421/10000 train_time:358113ms step_avg:81.00ms +[2025-07-06 13:42:24] [Rank 0] step:4441/10000 train_time:359613ms step_avg:80.98ms +[2025-07-06 13:42:24] [Rank 0] step:4441/10000 train_time:359613ms step_avg:80.98ms +[2025-07-06 13:42:26] [Rank 0] step:4461/10000 train_time:361112ms step_avg:80.95ms +[2025-07-06 13:42:26] [Rank 0] step:4461/10000 train_time:361112ms step_avg:80.95ms +[2025-07-06 13:42:27] [Rank 0] step:4481/10000 train_time:362614ms step_avg:80.92ms +[2025-07-06 13:42:27] [Rank 0] step:4481/10000 train_time:362614ms step_avg:80.92ms +[2025-07-06 13:42:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 13:42:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 13:42:30] [Rank 0] PRINT: step:4500/10000 train_loss:1.4419 val_loss:1.4097 train_time:364117ms step_avg:80.91ms +[2025-07-06 13:42:30] [Rank 0] PRINT: step:4500/10000 train_loss:1.4419 val_loss:1.4097 train_time:364117ms step_avg:80.91ms +[2025-07-06 13:42:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 13:42:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 13:42:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 13:42:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 13:42:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 13:42:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 13:47:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 13:47:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 13:47:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 13:47:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 13:47:54] [Rank 0] Total Loss: 4.2531 +[2025-07-06 13:47:54] [Rank 0] Total Loss: 4.2531 +[2025-07-06 13:47:54] [Rank 0] Total FTA: 0.3391 +[2025-07-06 13:47:54] [Rank 0] Total FTA: 0.3391 +[2025-07-06 13:47:54] [Rank 0] Group 0 Loss: 4.4654 +[2025-07-06 13:47:54] [Rank 0] Group 0 Loss: 4.4654 +[2025-07-06 13:47:54] [Rank 0] Group 1 Loss: 4.1623 +[2025-07-06 13:47:54] [Rank 0] Group 1 Loss: 4.1623 +[2025-07-06 13:47:54] [Rank 0] Group 2 Loss: 3.9444 +[2025-07-06 13:47:54] [Rank 0] Group 2 Loss: 3.9444 +[2025-07-06 13:47:54] [Rank 0] Group 3 Loss: 4.1580 +[2025-07-06 13:47:54] [Rank 0] Group 3 Loss: 4.1580 +[2025-07-06 13:47:54] [Rank 0] Group 4 Loss: 4.2928 +[2025-07-06 13:47:54] [Rank 0] Group 4 Loss: 4.2928 +[2025-07-06 13:47:54] [Rank 0] Group 5 Loss: 4.1965 +[2025-07-06 13:47:54] [Rank 0] Group 5 Loss: 4.1965 +[2025-07-06 13:47:54] [Rank 0] Group 6 Loss: 4.2033 +[2025-07-06 13:47:54] [Rank 0] Group 6 Loss: 4.2033 +[2025-07-06 13:47:54] [Rank 0] Group 7 Loss: 4.2712 +[2025-07-06 13:47:54] [Rank 0] Group 7 Loss: 4.2712 +[2025-07-06 13:47:54] [Rank 0] Group 8 Loss: 4.2489 +[2025-07-06 13:47:54] [Rank 0] Group 8 Loss: 4.2489 +[2025-07-06 13:47:54] [Rank 0] Group 9 Loss: 4.2508 +[2025-07-06 13:47:54] [Rank 0] Group 9 Loss: 4.2508 +[2025-07-06 13:47:54] [Rank 0] Group 10 Loss: 4.2575 +[2025-07-06 13:47:54] [Rank 0] Group 10 Loss: 4.2575 +[2025-07-06 13:47:54] [Rank 0] Group 11 Loss: 4.2973 +[2025-07-06 13:47:54] [Rank 0] Group 11 Loss: 4.2973 +[2025-07-06 13:47:54] [Rank 0] Group 0 FTA: 0.5072 +[2025-07-06 13:47:54] [Rank 0] Group 0 FTA: 0.5072 +[2025-07-06 13:47:54] [Rank 0] Group 1 FTA: 0.5339 +[2025-07-06 13:47:54] [Rank 0] Group 1 FTA: 0.5339 +[2025-07-06 13:47:54] [Rank 0] Group 2 FTA: 0.4089 +[2025-07-06 13:47:54] [Rank 0] Group 2 FTA: 0.4089 +[2025-07-06 13:47:54] [Rank 0] Group 3 FTA: 0.2344 +[2025-07-06 13:47:54] [Rank 0] Group 3 FTA: 0.2344 +[2025-07-06 13:47:54] [Rank 0] Group 4 FTA: 0.1979 +[2025-07-06 13:47:54] [Rank 0] Group 4 FTA: 0.1979 +[2025-07-06 13:47:54] [Rank 0] Group 5 FTA: 0.2630 +[2025-07-06 13:47:54] [Rank 0] Group 5 FTA: 0.2630 +[2025-07-06 13:47:54] [Rank 0] Group 6 FTA: 0.3203 +[2025-07-06 13:47:54] [Rank 0] Group 6 FTA: 0.3203 +[2025-07-06 13:47:54] [Rank 0] Group 7 FTA: 0.3542 +[2025-07-06 13:47:54] [Rank 0] Group 7 FTA: 0.3542 +[2025-07-06 13:47:54] [Rank 0] Group 8 FTA: 0.3125 +[2025-07-06 13:47:54] [Rank 0] Group 8 FTA: 0.3125 +[2025-07-06 13:47:54] [Rank 0] Group 9 FTA: 0.2852 +[2025-07-06 13:47:54] [Rank 0] Group 9 FTA: 0.2852 +[2025-07-06 13:47:54] [Rank 0] Group 10 FTA: 0.2949 +[2025-07-06 13:47:54] [Rank 0] Group 10 FTA: 0.2949 +[2025-07-06 13:47:54] [Rank 0] Group 11 FTA: 0.2812 +[2025-07-06 13:47:54] [Rank 0] Group 11 FTA: 0.2812 +[2025-07-06 13:47:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 13:47:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 13:47:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 13:47:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 13:47:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 13:47:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 13:47:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 13:47:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 13:47:56] [Rank 0] step:4501/10000 train_time:364145ms step_avg:80.90ms +[2025-07-06 13:47:56] [Rank 0] step:4501/10000 train_time:364145ms step_avg:80.90ms +[2025-07-06 13:47:58] [Rank 0] step:4521/10000 train_time:366349ms step_avg:81.03ms +[2025-07-06 13:47:58] [Rank 0] step:4521/10000 train_time:366349ms step_avg:81.03ms +[2025-07-06 13:47:59] [Rank 0] step:4541/10000 train_time:367841ms step_avg:81.00ms +[2025-07-06 13:47:59] [Rank 0] step:4541/10000 train_time:367841ms step_avg:81.00ms +[2025-07-06 13:48:01] [Rank 0] step:4561/10000 train_time:369334ms step_avg:80.98ms +[2025-07-06 13:48:01] [Rank 0] step:4561/10000 train_time:369334ms step_avg:80.98ms +[2025-07-06 13:48:02] [Rank 0] step:4581/10000 train_time:370830ms step_avg:80.95ms +[2025-07-06 13:48:02] [Rank 0] step:4581/10000 train_time:370830ms step_avg:80.95ms +[2025-07-06 13:48:04] [Rank 0] step:4601/10000 train_time:372989ms step_avg:81.07ms +[2025-07-06 13:48:04] [Rank 0] step:4601/10000 train_time:372989ms step_avg:81.07ms +[2025-07-06 13:48:06] [Rank 0] step:4621/10000 train_time:374485ms step_avg:81.04ms +[2025-07-06 13:48:06] [Rank 0] step:4621/10000 train_time:374485ms step_avg:81.04ms +[2025-07-06 13:48:07] [Rank 0] step:4641/10000 train_time:375983ms step_avg:81.01ms +[2025-07-06 13:48:07] [Rank 0] step:4641/10000 train_time:375983ms step_avg:81.01ms +[2025-07-06 13:48:09] [Rank 0] step:4661/10000 train_time:377481ms step_avg:80.99ms +[2025-07-06 13:48:09] [Rank 0] step:4661/10000 train_time:377481ms step_avg:80.99ms +[2025-07-06 13:48:11] [Rank 0] step:4681/10000 train_time:379236ms step_avg:81.02ms +[2025-07-06 13:48:11] [Rank 0] step:4681/10000 train_time:379236ms step_avg:81.02ms +[2025-07-06 13:48:12] [Rank 0] step:4701/10000 train_time:380716ms step_avg:80.99ms +[2025-07-06 13:48:12] [Rank 0] step:4701/10000 train_time:380716ms step_avg:80.99ms +[2025-07-06 13:48:14] [Rank 0] step:4721/10000 train_time:382218ms step_avg:80.96ms +[2025-07-06 13:48:14] [Rank 0] step:4721/10000 train_time:382218ms step_avg:80.96ms +[2025-07-06 13:48:15] [Rank 0] step:4741/10000 train_time:383719ms step_avg:80.94ms +[2025-07-06 13:48:15] [Rank 0] step:4741/10000 train_time:383719ms step_avg:80.94ms +[2025-07-06 13:48:17] [Rank 0] step:4761/10000 train_time:385218ms step_avg:80.91ms +[2025-07-06 13:48:17] [Rank 0] step:4761/10000 train_time:385218ms step_avg:80.91ms +[2025-07-06 13:48:18] [Rank 0] step:4781/10000 train_time:386956ms step_avg:80.94ms +[2025-07-06 13:48:18] [Rank 0] step:4781/10000 train_time:386956ms step_avg:80.94ms +[2025-07-06 13:48:20] [Rank 0] step:4801/10000 train_time:388458ms step_avg:80.91ms +[2025-07-06 13:48:20] [Rank 0] step:4801/10000 train_time:388458ms step_avg:80.91ms +[2025-07-06 13:48:21] [Rank 0] step:4821/10000 train_time:390136ms step_avg:80.92ms +[2025-07-06 13:48:21] [Rank 0] step:4821/10000 train_time:390136ms step_avg:80.92ms +[2025-07-06 13:48:23] [Rank 0] step:4841/10000 train_time:391704ms step_avg:80.91ms +[2025-07-06 13:48:23] [Rank 0] step:4841/10000 train_time:391704ms step_avg:80.91ms +[2025-07-06 13:48:25] [Rank 0] step:4861/10000 train_time:393896ms step_avg:81.03ms +[2025-07-06 13:48:25] [Rank 0] step:4861/10000 train_time:393896ms step_avg:81.03ms +[2025-07-06 13:48:27] [Rank 0] step:4881/10000 train_time:395481ms step_avg:81.02ms +[2025-07-06 13:48:27] [Rank 0] step:4881/10000 train_time:395481ms step_avg:81.02ms +[2025-07-06 13:48:28] [Rank 0] step:4901/10000 train_time:396979ms step_avg:81.00ms +[2025-07-06 13:48:28] [Rank 0] step:4901/10000 train_time:396979ms step_avg:81.00ms +[2025-07-06 13:48:30] [Rank 0] step:4921/10000 train_time:398479ms step_avg:80.98ms +[2025-07-06 13:48:30] [Rank 0] step:4921/10000 train_time:398479ms step_avg:80.98ms +[2025-07-06 13:48:31] [Rank 0] step:4941/10000 train_time:399980ms step_avg:80.95ms +[2025-07-06 13:48:31] [Rank 0] step:4941/10000 train_time:399980ms step_avg:80.95ms +[2025-07-06 13:48:33] [Rank 0] step:4961/10000 train_time:402145ms step_avg:81.06ms +[2025-07-06 13:48:33] [Rank 0] step:4961/10000 train_time:402145ms step_avg:81.06ms +[2025-07-06 13:48:35] [Rank 0] step:4981/10000 train_time:403646ms step_avg:81.04ms +[2025-07-06 13:48:35] [Rank 0] step:4981/10000 train_time:403646ms step_avg:81.04ms +[2025-07-06 13:48:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 13:48:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 13:48:37] [Rank 0] PRINT: step:5000/10000 train_loss:1.3872 val_loss:1.3623 train_time:405145ms step_avg:81.03ms +[2025-07-06 13:48:37] [Rank 0] PRINT: step:5000/10000 train_loss:1.3872 val_loss:1.3623 train_time:405145ms step_avg:81.03ms +[2025-07-06 13:48:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 13:48:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 13:48:38] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 13:48:38] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 13:48:38] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 13:48:38] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 13:54:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 13:54:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 13:54:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 13:54:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 13:54:02] [Rank 0] Total Loss: 4.3325 +[2025-07-06 13:54:02] [Rank 0] Total Loss: 4.3325 +[2025-07-06 13:54:02] [Rank 0] Total FTA: 0.3357 +[2025-07-06 13:54:02] [Rank 0] Total FTA: 0.3357 +[2025-07-06 13:54:02] [Rank 0] Group 0 Loss: 4.4736 +[2025-07-06 13:54:02] [Rank 0] Group 0 Loss: 4.4736 +[2025-07-06 13:54:02] [Rank 0] Group 1 Loss: 4.2447 +[2025-07-06 13:54:02] [Rank 0] Group 1 Loss: 4.2447 +[2025-07-06 13:54:02] [Rank 0] Group 2 Loss: 4.0197 +[2025-07-06 13:54:02] [Rank 0] Group 2 Loss: 4.0197 +[2025-07-06 13:54:02] [Rank 0] Group 3 Loss: 4.3992 +[2025-07-06 13:54:02] [Rank 0] Group 3 Loss: 4.3992 +[2025-07-06 13:54:02] [Rank 0] Group 4 Loss: 4.4206 +[2025-07-06 13:54:02] [Rank 0] Group 4 Loss: 4.4206 +[2025-07-06 13:54:02] [Rank 0] Group 5 Loss: 4.3808 +[2025-07-06 13:54:02] [Rank 0] Group 5 Loss: 4.3808 +[2025-07-06 13:54:02] [Rank 0] Group 6 Loss: 4.2708 +[2025-07-06 13:54:02] [Rank 0] Group 6 Loss: 4.2708 +[2025-07-06 13:54:02] [Rank 0] Group 7 Loss: 4.3053 +[2025-07-06 13:54:02] [Rank 0] Group 7 Loss: 4.3053 +[2025-07-06 13:54:02] [Rank 0] Group 8 Loss: 4.3227 +[2025-07-06 13:54:02] [Rank 0] Group 8 Loss: 4.3227 +[2025-07-06 13:54:02] [Rank 0] Group 9 Loss: 4.2935 +[2025-07-06 13:54:02] [Rank 0] Group 9 Loss: 4.2935 +[2025-07-06 13:54:02] [Rank 0] Group 10 Loss: 4.3624 +[2025-07-06 13:54:02] [Rank 0] Group 10 Loss: 4.3624 +[2025-07-06 13:54:02] [Rank 0] Group 11 Loss: 4.3324 +[2025-07-06 13:54:02] [Rank 0] Group 11 Loss: 4.3324 +[2025-07-06 13:54:02] [Rank 0] Group 0 FTA: 0.5072 +[2025-07-06 13:54:02] [Rank 0] Group 0 FTA: 0.5072 +[2025-07-06 13:54:02] [Rank 0] Group 1 FTA: 0.4505 +[2025-07-06 13:54:02] [Rank 0] Group 1 FTA: 0.4505 +[2025-07-06 13:54:02] [Rank 0] Group 2 FTA: 0.3021 +[2025-07-06 13:54:02] [Rank 0] Group 2 FTA: 0.3021 +[2025-07-06 13:54:02] [Rank 0] Group 3 FTA: 0.2812 +[2025-07-06 13:54:02] [Rank 0] Group 3 FTA: 0.2812 +[2025-07-06 13:54:02] [Rank 0] Group 4 FTA: 0.1641 +[2025-07-06 13:54:02] [Rank 0] Group 4 FTA: 0.1641 +[2025-07-06 13:54:02] [Rank 0] Group 5 FTA: 0.3203 +[2025-07-06 13:54:02] [Rank 0] Group 5 FTA: 0.3203 +[2025-07-06 13:54:02] [Rank 0] Group 6 FTA: 0.2839 +[2025-07-06 13:54:02] [Rank 0] Group 6 FTA: 0.2839 +[2025-07-06 13:54:02] [Rank 0] Group 7 FTA: 0.3620 +[2025-07-06 13:54:02] [Rank 0] Group 7 FTA: 0.3620 +[2025-07-06 13:54:02] [Rank 0] Group 8 FTA: 0.2786 +[2025-07-06 13:54:02] [Rank 0] Group 8 FTA: 0.2786 +[2025-07-06 13:54:02] [Rank 0] Group 9 FTA: 0.3047 +[2025-07-06 13:54:02] [Rank 0] Group 9 FTA: 0.3047 +[2025-07-06 13:54:02] [Rank 0] Group 10 FTA: 0.3105 +[2025-07-06 13:54:02] [Rank 0] Group 10 FTA: 0.3105 +[2025-07-06 13:54:02] [Rank 0] Group 11 FTA: 0.3184 +[2025-07-06 13:54:02] [Rank 0] Group 11 FTA: 0.3184 +[2025-07-06 13:54:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 13:54:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 13:54:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 13:54:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 13:54:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 13:54:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 13:54:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 13:54:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 13:54:04] [Rank 0] step:5001/10000 train_time:405167ms step_avg:81.02ms +[2025-07-06 13:54:04] [Rank 0] step:5001/10000 train_time:405167ms step_avg:81.02ms +[2025-07-06 13:54:05] [Rank 0] step:5021/10000 train_time:406658ms step_avg:80.99ms +[2025-07-06 13:54:05] [Rank 0] step:5021/10000 train_time:406658ms step_avg:80.99ms +[2025-07-06 13:54:07] [Rank 0] step:5041/10000 train_time:408207ms step_avg:80.98ms +[2025-07-06 13:54:07] [Rank 0] step:5041/10000 train_time:408207ms step_avg:80.98ms +[2025-07-06 13:54:09] [Rank 0] step:5061/10000 train_time:410316ms step_avg:81.07ms +[2025-07-06 13:54:09] [Rank 0] step:5061/10000 train_time:410316ms step_avg:81.07ms +[2025-07-06 13:54:10] [Rank 0] step:5081/10000 train_time:411810ms step_avg:81.05ms +[2025-07-06 13:54:10] [Rank 0] step:5081/10000 train_time:411810ms step_avg:81.05ms +[2025-07-06 13:54:12] [Rank 0] step:5101/10000 train_time:413305ms step_avg:81.02ms +[2025-07-06 13:54:12] [Rank 0] step:5101/10000 train_time:413305ms step_avg:81.02ms +[2025-07-06 13:54:13] [Rank 0] step:5121/10000 train_time:414802ms step_avg:81.00ms +[2025-07-06 13:54:13] [Rank 0] step:5121/10000 train_time:414802ms step_avg:81.00ms +[2025-07-06 13:54:15] [Rank 0] step:5141/10000 train_time:416958ms step_avg:81.10ms +[2025-07-06 13:54:15] [Rank 0] step:5141/10000 train_time:416958ms step_avg:81.10ms +[2025-07-06 13:54:17] [Rank 0] step:5161/10000 train_time:418456ms step_avg:81.08ms +[2025-07-06 13:54:17] [Rank 0] step:5161/10000 train_time:418456ms step_avg:81.08ms +[2025-07-06 13:54:18] [Rank 0] step:5181/10000 train_time:419953ms step_avg:81.06ms +[2025-07-06 13:54:18] [Rank 0] step:5181/10000 train_time:419953ms step_avg:81.06ms +[2025-07-06 13:54:20] [Rank 0] step:5201/10000 train_time:421450ms step_avg:81.03ms +[2025-07-06 13:54:20] [Rank 0] step:5201/10000 train_time:421450ms step_avg:81.03ms +[2025-07-06 13:54:22] [Rank 0] step:5221/10000 train_time:423623ms step_avg:81.14ms +[2025-07-06 13:54:22] [Rank 0] step:5221/10000 train_time:423623ms step_avg:81.14ms +[2025-07-06 13:54:23] [Rank 0] step:5241/10000 train_time:425103ms step_avg:81.11ms +[2025-07-06 13:54:23] [Rank 0] step:5241/10000 train_time:425103ms step_avg:81.11ms +[2025-07-06 13:54:25] [Rank 0] step:5261/10000 train_time:426601ms step_avg:81.09ms +[2025-07-06 13:54:25] [Rank 0] step:5261/10000 train_time:426601ms step_avg:81.09ms +[2025-07-06 13:54:26] [Rank 0] step:5281/10000 train_time:428099ms step_avg:81.06ms +[2025-07-06 13:54:26] [Rank 0] step:5281/10000 train_time:428099ms step_avg:81.06ms +[2025-07-06 13:54:28] [Rank 0] step:5301/10000 train_time:429599ms step_avg:81.04ms +[2025-07-06 13:54:28] [Rank 0] step:5301/10000 train_time:429599ms step_avg:81.04ms +[2025-07-06 13:54:30] [Rank 0] step:5321/10000 train_time:431742ms step_avg:81.14ms +[2025-07-06 13:54:30] [Rank 0] step:5321/10000 train_time:431742ms step_avg:81.14ms +[2025-07-06 13:54:32] [Rank 0] step:5341/10000 train_time:433242ms step_avg:81.12ms +[2025-07-06 13:54:32] [Rank 0] step:5341/10000 train_time:433242ms step_avg:81.12ms +[2025-07-06 13:54:33] [Rank 0] step:5361/10000 train_time:434742ms step_avg:81.09ms +[2025-07-06 13:54:33] [Rank 0] step:5361/10000 train_time:434742ms step_avg:81.09ms +[2025-07-06 13:54:35] [Rank 0] step:5381/10000 train_time:436243ms step_avg:81.07ms +[2025-07-06 13:54:35] [Rank 0] step:5381/10000 train_time:436243ms step_avg:81.07ms +[2025-07-06 13:54:36] [Rank 0] step:5401/10000 train_time:437999ms step_avg:81.10ms +[2025-07-06 13:54:36] [Rank 0] step:5401/10000 train_time:437999ms step_avg:81.10ms +[2025-07-06 13:54:38] [Rank 0] step:5421/10000 train_time:439482ms step_avg:81.07ms +[2025-07-06 13:54:38] [Rank 0] step:5421/10000 train_time:439482ms step_avg:81.07ms +[2025-07-06 13:54:39] [Rank 0] step:5441/10000 train_time:440983ms step_avg:81.05ms +[2025-07-06 13:54:39] [Rank 0] step:5441/10000 train_time:440983ms step_avg:81.05ms +[2025-07-06 13:54:41] [Rank 0] step:5461/10000 train_time:442731ms step_avg:81.07ms +[2025-07-06 13:54:41] [Rank 0] step:5461/10000 train_time:442731ms step_avg:81.07ms +[2025-07-06 13:54:43] [Rank 0] step:5481/10000 train_time:444342ms step_avg:81.07ms +[2025-07-06 13:54:43] [Rank 0] step:5481/10000 train_time:444342ms step_avg:81.07ms +[2025-07-06 13:54:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 13:54:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 13:54:46] [Rank 0] PRINT: step:5500/10000 train_loss:1.3451 val_loss:1.3246 train_time:446494ms step_avg:81.18ms +[2025-07-06 13:54:46] [Rank 0] PRINT: step:5500/10000 train_loss:1.3451 val_loss:1.3246 train_time:446494ms step_avg:81.18ms +[2025-07-06 13:54:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 13:54:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 13:54:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 13:54:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 13:54:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 13:54:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 14:00:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 14:00:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 14:00:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 14:00:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 14:00:11] [Rank 0] Total Loss: 4.4444 +[2025-07-06 14:00:11] [Rank 0] Total Loss: 4.4444 +[2025-07-06 14:00:11] [Rank 0] Total FTA: 0.3243 +[2025-07-06 14:00:11] [Rank 0] Total FTA: 0.3243 +[2025-07-06 14:00:11] [Rank 0] Group 0 Loss: 4.6404 +[2025-07-06 14:00:11] [Rank 0] Group 0 Loss: 4.6404 +[2025-07-06 14:00:11] [Rank 0] Group 1 Loss: 4.4957 +[2025-07-06 14:00:11] [Rank 0] Group 1 Loss: 4.4957 +[2025-07-06 14:00:11] [Rank 0] Group 2 Loss: 4.0247 +[2025-07-06 14:00:11] [Rank 0] Group 2 Loss: 4.0247 +[2025-07-06 14:00:11] [Rank 0] Group 3 Loss: 4.4313 +[2025-07-06 14:00:11] [Rank 0] Group 3 Loss: 4.4313 +[2025-07-06 14:00:11] [Rank 0] Group 4 Loss: 4.4702 +[2025-07-06 14:00:11] [Rank 0] Group 4 Loss: 4.4702 +[2025-07-06 14:00:11] [Rank 0] Group 5 Loss: 4.3904 +[2025-07-06 14:00:11] [Rank 0] Group 5 Loss: 4.3904 +[2025-07-06 14:00:11] [Rank 0] Group 6 Loss: 4.3691 +[2025-07-06 14:00:11] [Rank 0] Group 6 Loss: 4.3691 +[2025-07-06 14:00:11] [Rank 0] Group 7 Loss: 4.4819 +[2025-07-06 14:00:11] [Rank 0] Group 7 Loss: 4.4819 +[2025-07-06 14:00:11] [Rank 0] Group 8 Loss: 4.4449 +[2025-07-06 14:00:11] [Rank 0] Group 8 Loss: 4.4449 +[2025-07-06 14:00:11] [Rank 0] Group 9 Loss: 4.4333 +[2025-07-06 14:00:11] [Rank 0] Group 9 Loss: 4.4333 +[2025-07-06 14:00:11] [Rank 0] Group 10 Loss: 4.4422 +[2025-07-06 14:00:11] [Rank 0] Group 10 Loss: 4.4422 +[2025-07-06 14:00:11] [Rank 0] Group 11 Loss: 4.4687 +[2025-07-06 14:00:11] [Rank 0] Group 11 Loss: 4.4687 +[2025-07-06 14:00:11] [Rank 0] Group 0 FTA: 0.3316 +[2025-07-06 14:00:11] [Rank 0] Group 0 FTA: 0.3316 +[2025-07-06 14:00:11] [Rank 0] Group 1 FTA: 0.5156 +[2025-07-06 14:00:11] [Rank 0] Group 1 FTA: 0.5156 +[2025-07-06 14:00:11] [Rank 0] Group 2 FTA: 0.4062 +[2025-07-06 14:00:11] [Rank 0] Group 2 FTA: 0.4062 +[2025-07-06 14:00:11] [Rank 0] Group 3 FTA: 0.2500 +[2025-07-06 14:00:11] [Rank 0] Group 3 FTA: 0.2500 +[2025-07-06 14:00:11] [Rank 0] Group 4 FTA: 0.2057 +[2025-07-06 14:00:11] [Rank 0] Group 4 FTA: 0.2057 +[2025-07-06 14:00:11] [Rank 0] Group 5 FTA: 0.2630 +[2025-07-06 14:00:11] [Rank 0] Group 5 FTA: 0.2630 +[2025-07-06 14:00:11] [Rank 0] Group 6 FTA: 0.2526 +[2025-07-06 14:00:11] [Rank 0] Group 6 FTA: 0.2526 +[2025-07-06 14:00:11] [Rank 0] Group 7 FTA: 0.3854 +[2025-07-06 14:00:11] [Rank 0] Group 7 FTA: 0.3854 +[2025-07-06 14:00:11] [Rank 0] Group 8 FTA: 0.3099 +[2025-07-06 14:00:11] [Rank 0] Group 8 FTA: 0.3099 +[2025-07-06 14:00:11] [Rank 0] Group 9 FTA: 0.2891 +[2025-07-06 14:00:11] [Rank 0] Group 9 FTA: 0.2891 +[2025-07-06 14:00:11] [Rank 0] Group 10 FTA: 0.3242 +[2025-07-06 14:00:11] [Rank 0] Group 10 FTA: 0.3242 +[2025-07-06 14:00:11] [Rank 0] Group 11 FTA: 0.3301 +[2025-07-06 14:00:11] [Rank 0] Group 11 FTA: 0.3301 +[2025-07-06 14:00:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 14:00:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 14:00:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 14:00:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 14:00:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 14:00:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 14:00:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 14:00:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 14:00:13] [Rank 0] step:5501/10000 train_time:446515ms step_avg:81.17ms +[2025-07-06 14:00:13] [Rank 0] step:5501/10000 train_time:446515ms step_avg:81.17ms +[2025-07-06 14:00:14] [Rank 0] step:5521/10000 train_time:448020ms step_avg:81.15ms +[2025-07-06 14:00:14] [Rank 0] step:5521/10000 train_time:448020ms step_avg:81.15ms +[2025-07-06 14:00:16] [Rank 0] step:5541/10000 train_time:449514ms step_avg:81.13ms +[2025-07-06 14:00:16] [Rank 0] step:5541/10000 train_time:449514ms step_avg:81.13ms +[2025-07-06 14:00:17] [Rank 0] step:5561/10000 train_time:451009ms step_avg:81.10ms +[2025-07-06 14:00:17] [Rank 0] step:5561/10000 train_time:451009ms step_avg:81.10ms +[2025-07-06 14:00:19] [Rank 0] step:5581/10000 train_time:452506ms step_avg:81.08ms +[2025-07-06 14:00:19] [Rank 0] step:5581/10000 train_time:452506ms step_avg:81.08ms +[2025-07-06 14:00:21] [Rank 0] step:5601/10000 train_time:454667ms step_avg:81.18ms +[2025-07-06 14:00:21] [Rank 0] step:5601/10000 train_time:454667ms step_avg:81.18ms +[2025-07-06 14:00:22] [Rank 0] step:5621/10000 train_time:456164ms step_avg:81.15ms +[2025-07-06 14:00:22] [Rank 0] step:5621/10000 train_time:456164ms step_avg:81.15ms +[2025-07-06 14:00:24] [Rank 0] step:5641/10000 train_time:457661ms step_avg:81.13ms +[2025-07-06 14:00:24] [Rank 0] step:5641/10000 train_time:457661ms step_avg:81.13ms +[2025-07-06 14:00:25] [Rank 0] step:5661/10000 train_time:459158ms step_avg:81.11ms +[2025-07-06 14:00:25] [Rank 0] step:5661/10000 train_time:459158ms step_avg:81.11ms +[2025-07-06 14:00:27] [Rank 0] step:5681/10000 train_time:460892ms step_avg:81.13ms +[2025-07-06 14:00:27] [Rank 0] step:5681/10000 train_time:460892ms step_avg:81.13ms +[2025-07-06 14:00:28] [Rank 0] step:5701/10000 train_time:462390ms step_avg:81.11ms +[2025-07-06 14:00:28] [Rank 0] step:5701/10000 train_time:462390ms step_avg:81.11ms +[2025-07-06 14:00:30] [Rank 0] step:5721/10000 train_time:463888ms step_avg:81.09ms +[2025-07-06 14:00:30] [Rank 0] step:5721/10000 train_time:463888ms step_avg:81.09ms +[2025-07-06 14:00:31] [Rank 0] step:5741/10000 train_time:465387ms step_avg:81.06ms +[2025-07-06 14:00:31] [Rank 0] step:5741/10000 train_time:465387ms step_avg:81.06ms +[2025-07-06 14:00:34] [Rank 0] step:5761/10000 train_time:467557ms step_avg:81.16ms +[2025-07-06 14:00:34] [Rank 0] step:5761/10000 train_time:467557ms step_avg:81.16ms +[2025-07-06 14:00:35] [Rank 0] step:5781/10000 train_time:469036ms step_avg:81.13ms +[2025-07-06 14:00:35] [Rank 0] step:5781/10000 train_time:469036ms step_avg:81.13ms +[2025-07-06 14:00:37] [Rank 0] step:5801/10000 train_time:470535ms step_avg:81.11ms +[2025-07-06 14:00:37] [Rank 0] step:5801/10000 train_time:470535ms step_avg:81.11ms +[2025-07-06 14:00:38] [Rank 0] step:5821/10000 train_time:472036ms step_avg:81.09ms +[2025-07-06 14:00:38] [Rank 0] step:5821/10000 train_time:472036ms step_avg:81.09ms +[2025-07-06 14:00:40] [Rank 0] step:5841/10000 train_time:473537ms step_avg:81.07ms +[2025-07-06 14:00:40] [Rank 0] step:5841/10000 train_time:473537ms step_avg:81.07ms +[2025-07-06 14:00:42] [Rank 0] step:5861/10000 train_time:475805ms step_avg:81.18ms +[2025-07-06 14:00:42] [Rank 0] step:5861/10000 train_time:475805ms step_avg:81.18ms +[2025-07-06 14:00:43] [Rank 0] step:5881/10000 train_time:477304ms step_avg:81.16ms +[2025-07-06 14:00:43] [Rank 0] step:5881/10000 train_time:477304ms step_avg:81.16ms +[2025-07-06 14:00:45] [Rank 0] step:5901/10000 train_time:478804ms step_avg:81.14ms +[2025-07-06 14:00:45] [Rank 0] step:5901/10000 train_time:478804ms step_avg:81.14ms +[2025-07-06 14:00:46] [Rank 0] step:5921/10000 train_time:480304ms step_avg:81.12ms +[2025-07-06 14:00:46] [Rank 0] step:5921/10000 train_time:480304ms step_avg:81.12ms +[2025-07-06 14:00:48] [Rank 0] step:5941/10000 train_time:482470ms step_avg:81.21ms +[2025-07-06 14:00:48] [Rank 0] step:5941/10000 train_time:482470ms step_avg:81.21ms +[2025-07-06 14:00:50] [Rank 0] step:5961/10000 train_time:483950ms step_avg:81.19ms +[2025-07-06 14:00:50] [Rank 0] step:5961/10000 train_time:483950ms step_avg:81.19ms +[2025-07-06 14:00:51] [Rank 0] step:5981/10000 train_time:485449ms step_avg:81.17ms +[2025-07-06 14:00:51] [Rank 0] step:5981/10000 train_time:485449ms step_avg:81.17ms +[2025-07-06 14:00:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 14:00:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 14:00:54] [Rank 0] PRINT: step:6000/10000 train_loss:1.3099 val_loss:1.2918 train_time:486949ms step_avg:81.16ms +[2025-07-06 14:00:54] [Rank 0] PRINT: step:6000/10000 train_loss:1.3099 val_loss:1.2918 train_time:486949ms step_avg:81.16ms +[2025-07-06 14:00:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 14:00:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 14:00:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 14:00:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 14:00:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 14:00:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 14:06:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 14:06:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 14:06:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 14:06:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 14:06:19] [Rank 0] Total Loss: 4.4828 +[2025-07-06 14:06:19] [Rank 0] Total Loss: 4.4828 +[2025-07-06 14:06:19] [Rank 0] Total FTA: 0.3588 +[2025-07-06 14:06:19] [Rank 0] Total FTA: 0.3588 +[2025-07-06 14:06:19] [Rank 0] Group 0 Loss: 4.6128 +[2025-07-06 14:06:19] [Rank 0] Group 0 Loss: 4.6128 +[2025-07-06 14:06:19] [Rank 0] Group 1 Loss: 4.4200 +[2025-07-06 14:06:19] [Rank 0] Group 1 Loss: 4.4200 +[2025-07-06 14:06:19] [Rank 0] Group 2 Loss: 4.1980 +[2025-07-06 14:06:19] [Rank 0] Group 2 Loss: 4.1980 +[2025-07-06 14:06:19] [Rank 0] Group 3 Loss: 4.5309 +[2025-07-06 14:06:19] [Rank 0] Group 3 Loss: 4.5309 +[2025-07-06 14:06:19] [Rank 0] Group 4 Loss: 4.4624 +[2025-07-06 14:06:19] [Rank 0] Group 4 Loss: 4.4624 +[2025-07-06 14:06:19] [Rank 0] Group 5 Loss: 4.4033 +[2025-07-06 14:06:19] [Rank 0] Group 5 Loss: 4.4033 +[2025-07-06 14:06:19] [Rank 0] Group 6 Loss: 4.4061 +[2025-07-06 14:06:19] [Rank 0] Group 6 Loss: 4.4061 +[2025-07-06 14:06:19] [Rank 0] Group 7 Loss: 4.5084 +[2025-07-06 14:06:19] [Rank 0] Group 7 Loss: 4.5084 +[2025-07-06 14:06:19] [Rank 0] Group 8 Loss: 4.5484 +[2025-07-06 14:06:19] [Rank 0] Group 8 Loss: 4.5484 +[2025-07-06 14:06:19] [Rank 0] Group 9 Loss: 4.4453 +[2025-07-06 14:06:19] [Rank 0] Group 9 Loss: 4.4453 +[2025-07-06 14:06:19] [Rank 0] Group 10 Loss: 4.5045 +[2025-07-06 14:06:19] [Rank 0] Group 10 Loss: 4.5045 +[2025-07-06 14:06:19] [Rank 0] Group 11 Loss: 4.5280 +[2025-07-06 14:06:19] [Rank 0] Group 11 Loss: 4.5280 +[2025-07-06 14:06:19] [Rank 0] Group 0 FTA: 0.5020 +[2025-07-06 14:06:19] [Rank 0] Group 0 FTA: 0.5020 +[2025-07-06 14:06:19] [Rank 0] Group 1 FTA: 0.5000 +[2025-07-06 14:06:19] [Rank 0] Group 1 FTA: 0.5000 +[2025-07-06 14:06:19] [Rank 0] Group 2 FTA: 0.3411 +[2025-07-06 14:06:19] [Rank 0] Group 2 FTA: 0.3411 +[2025-07-06 14:06:19] [Rank 0] Group 3 FTA: 0.1875 +[2025-07-06 14:06:19] [Rank 0] Group 3 FTA: 0.1875 +[2025-07-06 14:06:19] [Rank 0] Group 4 FTA: 0.1823 +[2025-07-06 14:06:19] [Rank 0] Group 4 FTA: 0.1823 +[2025-07-06 14:06:19] [Rank 0] Group 5 FTA: 0.3750 +[2025-07-06 14:06:19] [Rank 0] Group 5 FTA: 0.3750 +[2025-07-06 14:06:19] [Rank 0] Group 6 FTA: 0.3464 +[2025-07-06 14:06:19] [Rank 0] Group 6 FTA: 0.3464 +[2025-07-06 14:06:19] [Rank 0] Group 7 FTA: 0.3828 +[2025-07-06 14:06:19] [Rank 0] Group 7 FTA: 0.3828 +[2025-07-06 14:06:19] [Rank 0] Group 8 FTA: 0.3672 +[2025-07-06 14:06:19] [Rank 0] Group 8 FTA: 0.3672 +[2025-07-06 14:06:20] [Rank 0] Group 9 FTA: 0.3477 +[2025-07-06 14:06:20] [Rank 0] Group 9 FTA: 0.3477 +[2025-07-06 14:06:20] [Rank 0] Group 10 FTA: 0.3516 +[2025-07-06 14:06:20] [Rank 0] Group 10 FTA: 0.3516 +[2025-07-06 14:06:20] [Rank 0] Group 11 FTA: 0.3281 +[2025-07-06 14:06:20] [Rank 0] Group 11 FTA: 0.3281 +[2025-07-06 14:06:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 14:06:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 14:06:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 14:06:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 14:06:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 14:06:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 14:06:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 14:06:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 14:06:21] [Rank 0] step:6001/10000 train_time:486970ms step_avg:81.15ms +[2025-07-06 14:06:21] [Rank 0] step:6001/10000 train_time:486970ms step_avg:81.15ms +[2025-07-06 14:06:22] [Rank 0] step:6021/10000 train_time:488490ms step_avg:81.13ms +[2025-07-06 14:06:22] [Rank 0] step:6021/10000 train_time:488490ms step_avg:81.13ms +[2025-07-06 14:06:25] [Rank 0] step:6041/10000 train_time:490640ms step_avg:81.22ms +[2025-07-06 14:06:25] [Rank 0] step:6041/10000 train_time:490640ms step_avg:81.22ms +[2025-07-06 14:06:26] [Rank 0] step:6061/10000 train_time:492133ms step_avg:81.20ms +[2025-07-06 14:06:26] [Rank 0] step:6061/10000 train_time:492133ms step_avg:81.20ms +[2025-07-06 14:06:28] [Rank 0] step:6081/10000 train_time:493627ms step_avg:81.18ms +[2025-07-06 14:06:28] [Rank 0] step:6081/10000 train_time:493627ms step_avg:81.18ms +[2025-07-06 14:06:29] [Rank 0] step:6101/10000 train_time:495121ms step_avg:81.15ms +[2025-07-06 14:06:29] [Rank 0] step:6101/10000 train_time:495121ms step_avg:81.15ms +[2025-07-06 14:06:31] [Rank 0] step:6121/10000 train_time:496668ms step_avg:81.14ms +[2025-07-06 14:06:31] [Rank 0] step:6121/10000 train_time:496668ms step_avg:81.14ms +[2025-07-06 14:06:33] [Rank 0] step:6141/10000 train_time:498752ms step_avg:81.22ms +[2025-07-06 14:06:33] [Rank 0] step:6141/10000 train_time:498752ms step_avg:81.22ms +[2025-07-06 14:06:34] [Rank 0] step:6161/10000 train_time:500351ms step_avg:81.21ms +[2025-07-06 14:06:34] [Rank 0] step:6161/10000 train_time:500351ms step_avg:81.21ms +[2025-07-06 14:06:36] [Rank 0] step:6181/10000 train_time:501846ms step_avg:81.19ms +[2025-07-06 14:06:36] [Rank 0] step:6181/10000 train_time:501846ms step_avg:81.19ms +[2025-07-06 14:06:37] [Rank 0] step:6201/10000 train_time:503344ms step_avg:81.17ms +[2025-07-06 14:06:37] [Rank 0] step:6201/10000 train_time:503344ms step_avg:81.17ms +[2025-07-06 14:06:39] [Rank 0] step:6221/10000 train_time:505178ms step_avg:81.21ms +[2025-07-06 14:06:39] [Rank 0] step:6221/10000 train_time:505178ms step_avg:81.21ms +[2025-07-06 14:06:41] [Rank 0] step:6241/10000 train_time:506676ms step_avg:81.19ms +[2025-07-06 14:06:41] [Rank 0] step:6241/10000 train_time:506676ms step_avg:81.19ms +[2025-07-06 14:06:42] [Rank 0] step:6261/10000 train_time:508174ms step_avg:81.17ms +[2025-07-06 14:06:42] [Rank 0] step:6261/10000 train_time:508174ms step_avg:81.17ms +[2025-07-06 14:06:44] [Rank 0] step:6281/10000 train_time:509673ms step_avg:81.15ms +[2025-07-06 14:06:44] [Rank 0] step:6281/10000 train_time:509673ms step_avg:81.15ms +[2025-07-06 14:06:45] [Rank 0] step:6301/10000 train_time:511323ms step_avg:81.15ms +[2025-07-06 14:06:45] [Rank 0] step:6301/10000 train_time:511323ms step_avg:81.15ms +[2025-07-06 14:06:47] [Rank 0] step:6321/10000 train_time:513007ms step_avg:81.16ms +[2025-07-06 14:06:47] [Rank 0] step:6321/10000 train_time:513007ms step_avg:81.16ms +[2025-07-06 14:06:48] [Rank 0] step:6341/10000 train_time:514504ms step_avg:81.14ms +[2025-07-06 14:06:48] [Rank 0] step:6341/10000 train_time:514504ms step_avg:81.14ms +[2025-07-06 14:06:50] [Rank 0] step:6361/10000 train_time:516004ms step_avg:81.12ms +[2025-07-06 14:06:50] [Rank 0] step:6361/10000 train_time:516004ms step_avg:81.12ms +[2025-07-06 14:06:51] [Rank 0] step:6381/10000 train_time:517502ms step_avg:81.10ms +[2025-07-06 14:06:51] [Rank 0] step:6381/10000 train_time:517502ms step_avg:81.10ms +[2025-07-06 14:06:54] [Rank 0] step:6401/10000 train_time:519669ms step_avg:81.19ms +[2025-07-06 14:06:54] [Rank 0] step:6401/10000 train_time:519669ms step_avg:81.19ms +[2025-07-06 14:06:55] [Rank 0] step:6421/10000 train_time:521164ms step_avg:81.17ms +[2025-07-06 14:06:55] [Rank 0] step:6421/10000 train_time:521164ms step_avg:81.17ms +[2025-07-06 14:06:57] [Rank 0] step:6441/10000 train_time:522661ms step_avg:81.15ms +[2025-07-06 14:06:57] [Rank 0] step:6441/10000 train_time:522661ms step_avg:81.15ms +[2025-07-06 14:06:58] [Rank 0] step:6461/10000 train_time:524159ms step_avg:81.13ms +[2025-07-06 14:06:58] [Rank 0] step:6461/10000 train_time:524159ms step_avg:81.13ms +[2025-07-06 14:07:00] [Rank 0] step:6481/10000 train_time:525712ms step_avg:81.12ms +[2025-07-06 14:07:00] [Rank 0] step:6481/10000 train_time:525712ms step_avg:81.12ms +[2025-07-06 14:07:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 14:07:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 14:07:03] [Rank 0] PRINT: step:6500/10000 train_loss:1.2780 val_loss:1.2619 train_time:527813ms step_avg:81.20ms +[2025-07-06 14:07:03] [Rank 0] PRINT: step:6500/10000 train_loss:1.2780 val_loss:1.2619 train_time:527813ms step_avg:81.20ms +[2025-07-06 14:07:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 14:07:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 14:07:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 14:07:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 14:07:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 14:07:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 14:12:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 14:12:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 14:12:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 14:12:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 14:12:29] [Rank 0] Total Loss: 4.5678 +[2025-07-06 14:12:29] [Rank 0] Total Loss: 4.5678 +[2025-07-06 14:12:29] [Rank 0] Total FTA: 0.3827 +[2025-07-06 14:12:29] [Rank 0] Total FTA: 0.3827 +[2025-07-06 14:12:29] [Rank 0] Group 0 Loss: 5.1378 +[2025-07-06 14:12:29] [Rank 0] Group 0 Loss: 5.1378 +[2025-07-06 14:12:29] [Rank 0] Group 1 Loss: 4.2887 +[2025-07-06 14:12:29] [Rank 0] Group 1 Loss: 4.2887 +[2025-07-06 14:12:29] [Rank 0] Group 2 Loss: 4.3066 +[2025-07-06 14:12:29] [Rank 0] Group 2 Loss: 4.3066 +[2025-07-06 14:12:29] [Rank 0] Group 3 Loss: 4.4451 +[2025-07-06 14:12:29] [Rank 0] Group 3 Loss: 4.4451 +[2025-07-06 14:12:29] [Rank 0] Group 4 Loss: 4.6304 +[2025-07-06 14:12:29] [Rank 0] Group 4 Loss: 4.6304 +[2025-07-06 14:12:29] [Rank 0] Group 5 Loss: 4.4223 +[2025-07-06 14:12:29] [Rank 0] Group 5 Loss: 4.4223 +[2025-07-06 14:12:29] [Rank 0] Group 6 Loss: 4.4568 +[2025-07-06 14:12:29] [Rank 0] Group 6 Loss: 4.4568 +[2025-07-06 14:12:29] [Rank 0] Group 7 Loss: 4.5089 +[2025-07-06 14:12:29] [Rank 0] Group 7 Loss: 4.5089 +[2025-07-06 14:12:29] [Rank 0] Group 8 Loss: 4.5165 +[2025-07-06 14:12:29] [Rank 0] Group 8 Loss: 4.5165 +[2025-07-06 14:12:29] [Rank 0] Group 9 Loss: 4.5173 +[2025-07-06 14:12:29] [Rank 0] Group 9 Loss: 4.5173 +[2025-07-06 14:12:29] [Rank 0] Group 10 Loss: 4.5333 +[2025-07-06 14:12:29] [Rank 0] Group 10 Loss: 4.5333 +[2025-07-06 14:12:29] [Rank 0] Group 11 Loss: 4.5323 +[2025-07-06 14:12:29] [Rank 0] Group 11 Loss: 4.5323 +[2025-07-06 14:12:29] [Rank 0] Group 0 FTA: 0.5124 +[2025-07-06 14:12:29] [Rank 0] Group 0 FTA: 0.5124 +[2025-07-06 14:12:30] [Rank 0] Group 1 FTA: 0.4870 +[2025-07-06 14:12:30] [Rank 0] Group 1 FTA: 0.4870 +[2025-07-06 14:12:30] [Rank 0] Group 2 FTA: 0.4115 +[2025-07-06 14:12:30] [Rank 0] Group 2 FTA: 0.4115 +[2025-07-06 14:12:30] [Rank 0] Group 3 FTA: 0.3073 +[2025-07-06 14:12:30] [Rank 0] Group 3 FTA: 0.3073 +[2025-07-06 14:12:30] [Rank 0] Group 4 FTA: 0.2005 +[2025-07-06 14:12:30] [Rank 0] Group 4 FTA: 0.2005 +[2025-07-06 14:12:30] [Rank 0] Group 5 FTA: 0.4141 +[2025-07-06 14:12:30] [Rank 0] Group 5 FTA: 0.4141 +[2025-07-06 14:12:30] [Rank 0] Group 6 FTA: 0.3568 +[2025-07-06 14:12:30] [Rank 0] Group 6 FTA: 0.3568 +[2025-07-06 14:12:30] [Rank 0] Group 7 FTA: 0.3542 +[2025-07-06 14:12:30] [Rank 0] Group 7 FTA: 0.3542 +[2025-07-06 14:12:30] [Rank 0] Group 8 FTA: 0.3464 +[2025-07-06 14:12:30] [Rank 0] Group 8 FTA: 0.3464 +[2025-07-06 14:12:30] [Rank 0] Group 9 FTA: 0.3945 +[2025-07-06 14:12:30] [Rank 0] Group 9 FTA: 0.3945 +[2025-07-06 14:12:30] [Rank 0] Group 10 FTA: 0.3438 +[2025-07-06 14:12:30] [Rank 0] Group 10 FTA: 0.3438 +[2025-07-06 14:12:30] [Rank 0] Group 11 FTA: 0.3711 +[2025-07-06 14:12:30] [Rank 0] Group 11 FTA: 0.3711 +[2025-07-06 14:12:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 14:12:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 14:12:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 14:12:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 14:12:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 14:12:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 14:12:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 14:12:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 14:12:31] [Rank 0] step:6501/10000 train_time:527836ms step_avg:81.19ms +[2025-07-06 14:12:31] [Rank 0] step:6501/10000 train_time:527836ms step_avg:81.19ms +[2025-07-06 14:12:33] [Rank 0] step:6521/10000 train_time:529346ms step_avg:81.18ms +[2025-07-06 14:12:33] [Rank 0] step:6521/10000 train_time:529346ms step_avg:81.18ms +[2025-07-06 14:12:34] [Rank 0] step:6541/10000 train_time:530840ms step_avg:81.16ms +[2025-07-06 14:12:34] [Rank 0] step:6541/10000 train_time:530840ms step_avg:81.16ms +[2025-07-06 14:12:36] [Rank 0] step:6561/10000 train_time:532446ms step_avg:81.15ms +[2025-07-06 14:12:36] [Rank 0] step:6561/10000 train_time:532446ms step_avg:81.15ms +[2025-07-06 14:12:38] [Rank 0] step:6581/10000 train_time:534284ms step_avg:81.19ms +[2025-07-06 14:12:38] [Rank 0] step:6581/10000 train_time:534284ms step_avg:81.19ms +[2025-07-06 14:12:39] [Rank 0] step:6601/10000 train_time:535779ms step_avg:81.17ms +[2025-07-06 14:12:39] [Rank 0] step:6601/10000 train_time:535779ms step_avg:81.17ms +[2025-07-06 14:12:40] [Rank 0] step:6621/10000 train_time:537275ms step_avg:81.15ms +[2025-07-06 14:12:40] [Rank 0] step:6621/10000 train_time:537275ms step_avg:81.15ms +[2025-07-06 14:12:42] [Rank 0] step:6641/10000 train_time:538770ms step_avg:81.13ms +[2025-07-06 14:12:42] [Rank 0] step:6641/10000 train_time:538770ms step_avg:81.13ms +[2025-07-06 14:12:44] [Rank 0] step:6661/10000 train_time:540936ms step_avg:81.21ms +[2025-07-06 14:12:44] [Rank 0] step:6661/10000 train_time:540936ms step_avg:81.21ms +[2025-07-06 14:12:46] [Rank 0] step:6681/10000 train_time:542413ms step_avg:81.19ms +[2025-07-06 14:12:46] [Rank 0] step:6681/10000 train_time:542413ms step_avg:81.19ms +[2025-07-06 14:12:47] [Rank 0] step:6701/10000 train_time:543909ms step_avg:81.17ms +[2025-07-06 14:12:47] [Rank 0] step:6701/10000 train_time:543909ms step_avg:81.17ms +[2025-07-06 14:12:49] [Rank 0] step:6721/10000 train_time:545508ms step_avg:81.16ms +[2025-07-06 14:12:49] [Rank 0] step:6721/10000 train_time:545508ms step_avg:81.16ms +[2025-07-06 14:12:50] [Rank 0] step:6741/10000 train_time:547109ms step_avg:81.16ms +[2025-07-06 14:12:50] [Rank 0] step:6741/10000 train_time:547109ms step_avg:81.16ms +[2025-07-06 14:12:52] [Rank 0] step:6761/10000 train_time:549246ms step_avg:81.24ms +[2025-07-06 14:12:52] [Rank 0] step:6761/10000 train_time:549246ms step_avg:81.24ms +[2025-07-06 14:12:54] [Rank 0] step:6781/10000 train_time:550742ms step_avg:81.22ms +[2025-07-06 14:12:54] [Rank 0] step:6781/10000 train_time:550742ms step_avg:81.22ms +[2025-07-06 14:12:55] [Rank 0] step:6801/10000 train_time:552239ms step_avg:81.20ms +[2025-07-06 14:12:55] [Rank 0] step:6801/10000 train_time:552239ms step_avg:81.20ms +[2025-07-06 14:12:57] [Rank 0] step:6821/10000 train_time:553736ms step_avg:81.18ms +[2025-07-06 14:12:57] [Rank 0] step:6821/10000 train_time:553736ms step_avg:81.18ms +[2025-07-06 14:12:59] [Rank 0] step:6841/10000 train_time:555287ms step_avg:81.17ms +[2025-07-06 14:12:59] [Rank 0] step:6841/10000 train_time:555287ms step_avg:81.17ms +[2025-07-06 14:13:01] [Rank 0] step:6861/10000 train_time:557392ms step_avg:81.24ms +[2025-07-06 14:13:01] [Rank 0] step:6861/10000 train_time:557392ms step_avg:81.24ms +[2025-07-06 14:13:02] [Rank 0] step:6881/10000 train_time:558891ms step_avg:81.22ms +[2025-07-06 14:13:02] [Rank 0] step:6881/10000 train_time:558891ms step_avg:81.22ms +[2025-07-06 14:13:04] [Rank 0] step:6901/10000 train_time:560391ms step_avg:81.20ms +[2025-07-06 14:13:04] [Rank 0] step:6901/10000 train_time:560391ms step_avg:81.20ms +[2025-07-06 14:13:05] [Rank 0] step:6921/10000 train_time:561892ms step_avg:81.19ms +[2025-07-06 14:13:05] [Rank 0] step:6921/10000 train_time:561892ms step_avg:81.19ms +[2025-07-06 14:13:07] [Rank 0] step:6941/10000 train_time:564033ms step_avg:81.26ms +[2025-07-06 14:13:07] [Rank 0] step:6941/10000 train_time:564033ms step_avg:81.26ms +[2025-07-06 14:13:09] [Rank 0] step:6961/10000 train_time:565531ms step_avg:81.24ms +[2025-07-06 14:13:09] [Rank 0] step:6961/10000 train_time:565531ms step_avg:81.24ms +[2025-07-06 14:13:10] [Rank 0] step:6981/10000 train_time:567130ms step_avg:81.24ms +[2025-07-06 14:13:10] [Rank 0] step:6981/10000 train_time:567130ms step_avg:81.24ms +[2025-07-06 14:13:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 14:13:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 14:13:13] [Rank 0] PRINT: step:7000/10000 train_loss:1.2497 val_loss:1.2364 train_time:568629ms step_avg:81.23ms +[2025-07-06 14:13:13] [Rank 0] PRINT: step:7000/10000 train_loss:1.2497 val_loss:1.2364 train_time:568629ms step_avg:81.23ms +[2025-07-06 14:13:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 14:13:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 14:13:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 14:13:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 14:13:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 14:13:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 14:18:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 14:18:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 14:18:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 14:18:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 14:18:39] [Rank 0] Total Loss: 4.6152 +[2025-07-06 14:18:39] [Rank 0] Total Loss: 4.6152 +[2025-07-06 14:18:39] [Rank 0] Total FTA: 0.3632 +[2025-07-06 14:18:39] [Rank 0] Total FTA: 0.3632 +[2025-07-06 14:18:39] [Rank 0] Group 0 Loss: 5.2063 +[2025-07-06 14:18:39] [Rank 0] Group 0 Loss: 5.2063 +[2025-07-06 14:18:39] [Rank 0] Group 1 Loss: 4.2781 +[2025-07-06 14:18:39] [Rank 0] Group 1 Loss: 4.2781 +[2025-07-06 14:18:39] [Rank 0] Group 2 Loss: 4.4536 +[2025-07-06 14:18:39] [Rank 0] Group 2 Loss: 4.4536 +[2025-07-06 14:18:39] [Rank 0] Group 3 Loss: 4.5628 +[2025-07-06 14:18:39] [Rank 0] Group 3 Loss: 4.5628 +[2025-07-06 14:18:39] [Rank 0] Group 4 Loss: 4.5665 +[2025-07-06 14:18:39] [Rank 0] Group 4 Loss: 4.5665 +[2025-07-06 14:18:39] [Rank 0] Group 5 Loss: 4.4403 +[2025-07-06 14:18:39] [Rank 0] Group 5 Loss: 4.4403 +[2025-07-06 14:18:39] [Rank 0] Group 6 Loss: 4.5130 +[2025-07-06 14:18:39] [Rank 0] Group 6 Loss: 4.5130 +[2025-07-06 14:18:39] [Rank 0] Group 7 Loss: 4.5579 +[2025-07-06 14:18:39] [Rank 0] Group 7 Loss: 4.5579 +[2025-07-06 14:18:39] [Rank 0] Group 8 Loss: 4.5308 +[2025-07-06 14:18:39] [Rank 0] Group 8 Loss: 4.5308 +[2025-07-06 14:18:39] [Rank 0] Group 9 Loss: 4.5667 +[2025-07-06 14:18:39] [Rank 0] Group 9 Loss: 4.5667 +[2025-07-06 14:18:39] [Rank 0] Group 10 Loss: 4.5862 +[2025-07-06 14:18:39] [Rank 0] Group 10 Loss: 4.5862 +[2025-07-06 14:18:39] [Rank 0] Group 11 Loss: 4.5800 +[2025-07-06 14:18:39] [Rank 0] Group 11 Loss: 4.5800 +[2025-07-06 14:18:39] [Rank 0] Group 0 FTA: 0.5137 +[2025-07-06 14:18:39] [Rank 0] Group 0 FTA: 0.5137 +[2025-07-06 14:18:39] [Rank 0] Group 1 FTA: 0.5260 +[2025-07-06 14:18:39] [Rank 0] Group 1 FTA: 0.5260 +[2025-07-06 14:18:39] [Rank 0] Group 2 FTA: 0.1484 +[2025-07-06 14:18:39] [Rank 0] Group 2 FTA: 0.1484 +[2025-07-06 14:18:39] [Rank 0] Group 3 FTA: 0.2630 +[2025-07-06 14:18:39] [Rank 0] Group 3 FTA: 0.2630 +[2025-07-06 14:18:39] [Rank 0] Group 4 FTA: 0.2500 +[2025-07-06 14:18:39] [Rank 0] Group 4 FTA: 0.2500 +[2025-07-06 14:18:39] [Rank 0] Group 5 FTA: 0.3385 +[2025-07-06 14:18:39] [Rank 0] Group 5 FTA: 0.3385 +[2025-07-06 14:18:39] [Rank 0] Group 6 FTA: 0.3281 +[2025-07-06 14:18:39] [Rank 0] Group 6 FTA: 0.3281 +[2025-07-06 14:18:39] [Rank 0] Group 7 FTA: 0.3438 +[2025-07-06 14:18:39] [Rank 0] Group 7 FTA: 0.3438 +[2025-07-06 14:18:39] [Rank 0] Group 8 FTA: 0.3464 +[2025-07-06 14:18:39] [Rank 0] Group 8 FTA: 0.3464 +[2025-07-06 14:18:39] [Rank 0] Group 9 FTA: 0.3750 +[2025-07-06 14:18:39] [Rank 0] Group 9 FTA: 0.3750 +[2025-07-06 14:18:39] [Rank 0] Group 10 FTA: 0.3906 +[2025-07-06 14:18:39] [Rank 0] Group 10 FTA: 0.3906 +[2025-07-06 14:18:39] [Rank 0] Group 11 FTA: 0.3691 +[2025-07-06 14:18:39] [Rank 0] Group 11 FTA: 0.3691 +[2025-07-06 14:18:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 14:18:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 14:18:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 14:18:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 14:18:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 14:18:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 14:18:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 14:18:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 14:18:41] [Rank 0] step:7001/10000 train_time:568650ms step_avg:81.22ms +[2025-07-06 14:18:41] [Rank 0] step:7001/10000 train_time:568650ms step_avg:81.22ms +[2025-07-06 14:18:43] [Rank 0] step:7021/10000 train_time:570838ms step_avg:81.30ms +[2025-07-06 14:18:43] [Rank 0] step:7021/10000 train_time:570838ms step_avg:81.30ms +[2025-07-06 14:18:44] [Rank 0] step:7041/10000 train_time:572311ms step_avg:81.28ms +[2025-07-06 14:18:44] [Rank 0] step:7041/10000 train_time:572311ms step_avg:81.28ms +[2025-07-06 14:18:46] [Rank 0] step:7061/10000 train_time:573803ms step_avg:81.26ms +[2025-07-06 14:18:46] [Rank 0] step:7061/10000 train_time:573803ms step_avg:81.26ms +[2025-07-06 14:18:47] [Rank 0] step:7081/10000 train_time:575298ms step_avg:81.25ms +[2025-07-06 14:18:47] [Rank 0] step:7081/10000 train_time:575298ms step_avg:81.25ms +[2025-07-06 14:18:49] [Rank 0] step:7101/10000 train_time:576795ms step_avg:81.23ms +[2025-07-06 14:18:49] [Rank 0] step:7101/10000 train_time:576795ms step_avg:81.23ms +[2025-07-06 14:18:51] [Rank 0] step:7121/10000 train_time:578957ms step_avg:81.30ms +[2025-07-06 14:18:51] [Rank 0] step:7121/10000 train_time:578957ms step_avg:81.30ms +[2025-07-06 14:18:52] [Rank 0] step:7141/10000 train_time:580452ms step_avg:81.28ms +[2025-07-06 14:18:52] [Rank 0] step:7141/10000 train_time:580452ms step_avg:81.28ms +[2025-07-06 14:18:54] [Rank 0] step:7161/10000 train_time:581954ms step_avg:81.27ms +[2025-07-06 14:18:54] [Rank 0] step:7161/10000 train_time:581954ms step_avg:81.27ms +[2025-07-06 14:18:56] [Rank 0] step:7181/10000 train_time:583620ms step_avg:81.27ms +[2025-07-06 14:18:56] [Rank 0] step:7181/10000 train_time:583620ms step_avg:81.27ms +[2025-07-06 14:18:58] [Rank 0] step:7201/10000 train_time:585195ms step_avg:81.27ms +[2025-07-06 14:18:58] [Rank 0] step:7201/10000 train_time:585195ms step_avg:81.27ms +[2025-07-06 14:18:59] [Rank 0] step:7221/10000 train_time:587288ms step_avg:81.33ms +[2025-07-06 14:18:59] [Rank 0] step:7221/10000 train_time:587288ms step_avg:81.33ms +[2025-07-06 14:19:01] [Rank 0] step:7241/10000 train_time:588786ms step_avg:81.31ms +[2025-07-06 14:19:01] [Rank 0] step:7241/10000 train_time:588786ms step_avg:81.31ms +[2025-07-06 14:19:02] [Rank 0] step:7261/10000 train_time:590286ms step_avg:81.30ms +[2025-07-06 14:19:02] [Rank 0] step:7261/10000 train_time:590286ms step_avg:81.30ms +[2025-07-06 14:19:04] [Rank 0] step:7281/10000 train_time:591784ms step_avg:81.28ms +[2025-07-06 14:19:04] [Rank 0] step:7281/10000 train_time:591784ms step_avg:81.28ms +[2025-07-06 14:19:06] [Rank 0] step:7301/10000 train_time:593932ms step_avg:81.35ms +[2025-07-06 14:19:06] [Rank 0] step:7301/10000 train_time:593932ms step_avg:81.35ms +[2025-07-06 14:19:07] [Rank 0] step:7321/10000 train_time:595429ms step_avg:81.33ms +[2025-07-06 14:19:07] [Rank 0] step:7321/10000 train_time:595429ms step_avg:81.33ms +[2025-07-06 14:19:09] [Rank 0] step:7341/10000 train_time:596928ms step_avg:81.31ms +[2025-07-06 14:19:09] [Rank 0] step:7341/10000 train_time:596928ms step_avg:81.31ms +[2025-07-06 14:19:10] [Rank 0] step:7361/10000 train_time:598426ms step_avg:81.30ms +[2025-07-06 14:19:10] [Rank 0] step:7361/10000 train_time:598426ms step_avg:81.30ms +[2025-07-06 14:19:12] [Rank 0] step:7381/10000 train_time:600598ms step_avg:81.37ms +[2025-07-06 14:19:12] [Rank 0] step:7381/10000 train_time:600598ms step_avg:81.37ms +[2025-07-06 14:19:14] [Rank 0] step:7401/10000 train_time:602178ms step_avg:81.36ms +[2025-07-06 14:19:14] [Rank 0] step:7401/10000 train_time:602178ms step_avg:81.36ms +[2025-07-06 14:19:16] [Rank 0] step:7421/10000 train_time:603676ms step_avg:81.35ms +[2025-07-06 14:19:16] [Rank 0] step:7421/10000 train_time:603676ms step_avg:81.35ms +[2025-07-06 14:19:17] [Rank 0] step:7441/10000 train_time:605173ms step_avg:81.33ms +[2025-07-06 14:19:17] [Rank 0] step:7441/10000 train_time:605173ms step_avg:81.33ms +[2025-07-06 14:19:19] [Rank 0] step:7461/10000 train_time:606671ms step_avg:81.31ms +[2025-07-06 14:19:19] [Rank 0] step:7461/10000 train_time:606671ms step_avg:81.31ms +[2025-07-06 14:19:21] [Rank 0] step:7481/10000 train_time:608909ms step_avg:81.39ms +[2025-07-06 14:19:21] [Rank 0] step:7481/10000 train_time:608909ms step_avg:81.39ms +[2025-07-06 14:19:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 14:19:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 14:19:23] [Rank 0] PRINT: step:7500/10000 train_loss:1.2256 val_loss:1.2142 train_time:610404ms step_avg:81.39ms +[2025-07-06 14:19:23] [Rank 0] PRINT: step:7500/10000 train_loss:1.2256 val_loss:1.2142 train_time:610404ms step_avg:81.39ms +[2025-07-06 14:19:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 14:19:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 14:19:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 14:19:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 14:19:23] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 14:19:23] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 14:24:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 14:24:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 14:24:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 14:24:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 14:24:49] [Rank 0] Total Loss: 4.6179 +[2025-07-06 14:24:49] [Rank 0] Total Loss: 4.6179 +[2025-07-06 14:24:49] [Rank 0] Total FTA: 0.3980 +[2025-07-06 14:24:49] [Rank 0] Total FTA: 0.3980 +[2025-07-06 14:24:49] [Rank 0] Group 0 Loss: 5.0902 +[2025-07-06 14:24:49] [Rank 0] Group 0 Loss: 5.0902 +[2025-07-06 14:24:49] [Rank 0] Group 1 Loss: 4.5596 +[2025-07-06 14:24:49] [Rank 0] Group 1 Loss: 4.5596 +[2025-07-06 14:24:49] [Rank 0] Group 2 Loss: 4.2597 +[2025-07-06 14:24:49] [Rank 0] Group 2 Loss: 4.2597 +[2025-07-06 14:24:49] [Rank 0] Group 3 Loss: 4.6891 +[2025-07-06 14:24:49] [Rank 0] Group 3 Loss: 4.6891 +[2025-07-06 14:24:49] [Rank 0] Group 4 Loss: 4.6620 +[2025-07-06 14:24:49] [Rank 0] Group 4 Loss: 4.6620 +[2025-07-06 14:24:49] [Rank 0] Group 5 Loss: 4.5014 +[2025-07-06 14:24:49] [Rank 0] Group 5 Loss: 4.5014 +[2025-07-06 14:24:49] [Rank 0] Group 6 Loss: 4.4917 +[2025-07-06 14:24:49] [Rank 0] Group 6 Loss: 4.4917 +[2025-07-06 14:24:49] [Rank 0] Group 7 Loss: 4.5417 +[2025-07-06 14:24:49] [Rank 0] Group 7 Loss: 4.5417 +[2025-07-06 14:24:49] [Rank 0] Group 8 Loss: 4.5265 +[2025-07-06 14:24:49] [Rank 0] Group 8 Loss: 4.5265 +[2025-07-06 14:24:49] [Rank 0] Group 9 Loss: 4.5496 +[2025-07-06 14:24:49] [Rank 0] Group 9 Loss: 4.5496 +[2025-07-06 14:24:49] [Rank 0] Group 10 Loss: 4.6121 +[2025-07-06 14:24:49] [Rank 0] Group 10 Loss: 4.6121 +[2025-07-06 14:24:49] [Rank 0] Group 11 Loss: 4.5498 +[2025-07-06 14:24:49] [Rank 0] Group 11 Loss: 4.5498 +[2025-07-06 14:24:49] [Rank 0] Group 0 FTA: 0.5137 +[2025-07-06 14:24:49] [Rank 0] Group 0 FTA: 0.5137 +[2025-07-06 14:24:49] [Rank 0] Group 1 FTA: 0.4922 +[2025-07-06 14:24:49] [Rank 0] Group 1 FTA: 0.4922 +[2025-07-06 14:24:49] [Rank 0] Group 2 FTA: 0.4635 +[2025-07-06 14:24:49] [Rank 0] Group 2 FTA: 0.4635 +[2025-07-06 14:24:49] [Rank 0] Group 3 FTA: 0.3021 +[2025-07-06 14:24:49] [Rank 0] Group 3 FTA: 0.3021 +[2025-07-06 14:24:49] [Rank 0] Group 4 FTA: 0.1875 +[2025-07-06 14:24:49] [Rank 0] Group 4 FTA: 0.1875 +[2025-07-06 14:24:49] [Rank 0] Group 5 FTA: 0.3490 +[2025-07-06 14:24:49] [Rank 0] Group 5 FTA: 0.3490 +[2025-07-06 14:24:49] [Rank 0] Group 6 FTA: 0.3906 +[2025-07-06 14:24:49] [Rank 0] Group 6 FTA: 0.3906 +[2025-07-06 14:24:49] [Rank 0] Group 7 FTA: 0.4036 +[2025-07-06 14:24:49] [Rank 0] Group 7 FTA: 0.4036 +[2025-07-06 14:24:49] [Rank 0] Group 8 FTA: 0.3490 +[2025-07-06 14:24:49] [Rank 0] Group 8 FTA: 0.3490 +[2025-07-06 14:24:49] [Rank 0] Group 9 FTA: 0.3672 +[2025-07-06 14:24:49] [Rank 0] Group 9 FTA: 0.3672 +[2025-07-06 14:24:49] [Rank 0] Group 10 FTA: 0.4355 +[2025-07-06 14:24:49] [Rank 0] Group 10 FTA: 0.4355 +[2025-07-06 14:24:49] [Rank 0] Group 11 FTA: 0.3926 +[2025-07-06 14:24:49] [Rank 0] Group 11 FTA: 0.3926 +[2025-07-06 14:24:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 14:24:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 14:24:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 14:24:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 14:24:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 14:24:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 14:24:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 14:24:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 14:24:50] [Rank 0] step:7501/10000 train_time:610426ms step_avg:81.38ms +[2025-07-06 14:24:50] [Rank 0] step:7501/10000 train_time:610426ms step_avg:81.38ms +[2025-07-06 14:24:52] [Rank 0] step:7521/10000 train_time:611937ms step_avg:81.36ms +[2025-07-06 14:24:52] [Rank 0] step:7521/10000 train_time:611937ms step_avg:81.36ms +[2025-07-06 14:24:53] [Rank 0] step:7541/10000 train_time:613432ms step_avg:81.35ms +[2025-07-06 14:24:53] [Rank 0] step:7541/10000 train_time:613432ms step_avg:81.35ms +[2025-07-06 14:24:55] [Rank 0] step:7561/10000 train_time:615589ms step_avg:81.42ms +[2025-07-06 14:24:55] [Rank 0] step:7561/10000 train_time:615589ms step_avg:81.42ms +[2025-07-06 14:24:57] [Rank 0] step:7581/10000 train_time:617063ms step_avg:81.40ms +[2025-07-06 14:24:57] [Rank 0] step:7581/10000 train_time:617063ms step_avg:81.40ms +[2025-07-06 14:24:58] [Rank 0] step:7601/10000 train_time:618560ms step_avg:81.38ms +[2025-07-06 14:24:58] [Rank 0] step:7601/10000 train_time:618560ms step_avg:81.38ms +[2025-07-06 14:25:00] [Rank 0] step:7621/10000 train_time:620056ms step_avg:81.36ms +[2025-07-06 14:25:00] [Rank 0] step:7621/10000 train_time:620056ms step_avg:81.36ms +[2025-07-06 14:25:01] [Rank 0] step:7641/10000 train_time:621553ms step_avg:81.34ms +[2025-07-06 14:25:01] [Rank 0] step:7641/10000 train_time:621553ms step_avg:81.34ms +[2025-07-06 14:25:04] [Rank 0] step:7661/10000 train_time:623708ms step_avg:81.41ms +[2025-07-06 14:25:04] [Rank 0] step:7661/10000 train_time:623708ms step_avg:81.41ms +[2025-07-06 14:25:05] [Rank 0] step:7681/10000 train_time:625203ms step_avg:81.40ms +[2025-07-06 14:25:05] [Rank 0] step:7681/10000 train_time:625203ms step_avg:81.40ms +[2025-07-06 14:25:07] [Rank 0] step:7701/10000 train_time:626701ms step_avg:81.38ms +[2025-07-06 14:25:07] [Rank 0] step:7701/10000 train_time:626701ms step_avg:81.38ms +[2025-07-06 14:25:08] [Rank 0] step:7721/10000 train_time:628198ms step_avg:81.36ms +[2025-07-06 14:25:08] [Rank 0] step:7721/10000 train_time:628198ms step_avg:81.36ms +[2025-07-06 14:25:10] [Rank 0] step:7741/10000 train_time:630373ms step_avg:81.43ms +[2025-07-06 14:25:10] [Rank 0] step:7741/10000 train_time:630373ms step_avg:81.43ms +[2025-07-06 14:25:12] [Rank 0] step:7761/10000 train_time:631952ms step_avg:81.43ms +[2025-07-06 14:25:12] [Rank 0] step:7761/10000 train_time:631952ms step_avg:81.43ms +[2025-07-06 14:25:13] [Rank 0] step:7781/10000 train_time:633449ms step_avg:81.41ms +[2025-07-06 14:25:13] [Rank 0] step:7781/10000 train_time:633449ms step_avg:81.41ms +[2025-07-06 14:25:15] [Rank 0] step:7801/10000 train_time:635095ms step_avg:81.41ms +[2025-07-06 14:25:15] [Rank 0] step:7801/10000 train_time:635095ms step_avg:81.41ms +[2025-07-06 14:25:16] [Rank 0] step:7821/10000 train_time:636659ms step_avg:81.40ms +[2025-07-06 14:25:16] [Rank 0] step:7821/10000 train_time:636659ms step_avg:81.40ms +[2025-07-06 14:25:19] [Rank 0] step:7841/10000 train_time:638907ms step_avg:81.48ms +[2025-07-06 14:25:19] [Rank 0] step:7841/10000 train_time:638907ms step_avg:81.48ms +[2025-07-06 14:25:20] [Rank 0] step:7861/10000 train_time:640405ms step_avg:81.47ms +[2025-07-06 14:25:20] [Rank 0] step:7861/10000 train_time:640405ms step_avg:81.47ms +[2025-07-06 14:25:22] [Rank 0] step:7881/10000 train_time:641903ms step_avg:81.45ms +[2025-07-06 14:25:22] [Rank 0] step:7881/10000 train_time:641903ms step_avg:81.45ms +[2025-07-06 14:25:23] [Rank 0] step:7901/10000 train_time:643403ms step_avg:81.43ms +[2025-07-06 14:25:23] [Rank 0] step:7901/10000 train_time:643403ms step_avg:81.43ms +[2025-07-06 14:25:25] [Rank 0] step:7921/10000 train_time:645057ms step_avg:81.44ms +[2025-07-06 14:25:25] [Rank 0] step:7921/10000 train_time:645057ms step_avg:81.44ms +[2025-07-06 14:25:27] [Rank 0] step:7941/10000 train_time:647149ms step_avg:81.49ms +[2025-07-06 14:25:27] [Rank 0] step:7941/10000 train_time:647149ms step_avg:81.49ms +[2025-07-06 14:25:28] [Rank 0] step:7961/10000 train_time:648652ms step_avg:81.48ms +[2025-07-06 14:25:28] [Rank 0] step:7961/10000 train_time:648652ms step_avg:81.48ms +[2025-07-06 14:25:30] [Rank 0] step:7981/10000 train_time:650155ms step_avg:81.46ms +[2025-07-06 14:25:30] [Rank 0] step:7981/10000 train_time:650155ms step_avg:81.46ms +[2025-07-06 14:25:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 14:25:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 14:25:32] [Rank 0] PRINT: step:8000/10000 train_loss:1.2048 val_loss:1.1950 train_time:651658ms step_avg:81.46ms +[2025-07-06 14:25:32] [Rank 0] PRINT: step:8000/10000 train_loss:1.2048 val_loss:1.1950 train_time:651658ms step_avg:81.46ms +[2025-07-06 14:25:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 14:25:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 14:25:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 14:25:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 14:25:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 14:25:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 14:30:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 14:30:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 14:30:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 14:30:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 14:30:57] [Rank 0] Total Loss: 4.6907 +[2025-07-06 14:30:57] [Rank 0] Total Loss: 4.6907 +[2025-07-06 14:30:57] [Rank 0] Total FTA: 0.3717 +[2025-07-06 14:30:57] [Rank 0] Total FTA: 0.3717 +[2025-07-06 14:30:57] [Rank 0] Group 0 Loss: 4.9767 +[2025-07-06 14:30:57] [Rank 0] Group 0 Loss: 4.9767 +[2025-07-06 14:30:57] [Rank 0] Group 1 Loss: 4.4668 +[2025-07-06 14:30:57] [Rank 0] Group 1 Loss: 4.4668 +[2025-07-06 14:30:57] [Rank 0] Group 2 Loss: 4.2880 +[2025-07-06 14:30:57] [Rank 0] Group 2 Loss: 4.2880 +[2025-07-06 14:30:57] [Rank 0] Group 3 Loss: 4.9464 +[2025-07-06 14:30:57] [Rank 0] Group 3 Loss: 4.9464 +[2025-07-06 14:30:57] [Rank 0] Group 4 Loss: 4.6755 +[2025-07-06 14:30:57] [Rank 0] Group 4 Loss: 4.6755 +[2025-07-06 14:30:57] [Rank 0] Group 5 Loss: 4.6877 +[2025-07-06 14:30:57] [Rank 0] Group 5 Loss: 4.6877 +[2025-07-06 14:30:57] [Rank 0] Group 6 Loss: 4.5542 +[2025-07-06 14:30:57] [Rank 0] Group 6 Loss: 4.5542 +[2025-07-06 14:30:57] [Rank 0] Group 7 Loss: 4.6922 +[2025-07-06 14:30:57] [Rank 0] Group 7 Loss: 4.6922 +[2025-07-06 14:30:57] [Rank 0] Group 8 Loss: 4.6865 +[2025-07-06 14:30:57] [Rank 0] Group 8 Loss: 4.6865 +[2025-07-06 14:30:57] [Rank 0] Group 9 Loss: 4.6498 +[2025-07-06 14:30:57] [Rank 0] Group 9 Loss: 4.6498 +[2025-07-06 14:30:57] [Rank 0] Group 10 Loss: 4.6959 +[2025-07-06 14:30:57] [Rank 0] Group 10 Loss: 4.6959 +[2025-07-06 14:30:57] [Rank 0] Group 11 Loss: 4.6819 +[2025-07-06 14:30:57] [Rank 0] Group 11 Loss: 4.6819 +[2025-07-06 14:30:57] [Rank 0] Group 0 FTA: 0.3212 +[2025-07-06 14:30:57] [Rank 0] Group 0 FTA: 0.3212 +[2025-07-06 14:30:57] [Rank 0] Group 1 FTA: 0.4740 +[2025-07-06 14:30:57] [Rank 0] Group 1 FTA: 0.4740 +[2025-07-06 14:30:57] [Rank 0] Group 2 FTA: 0.4453 +[2025-07-06 14:30:57] [Rank 0] Group 2 FTA: 0.4453 +[2025-07-06 14:30:57] [Rank 0] Group 3 FTA: 0.3203 +[2025-07-06 14:30:57] [Rank 0] Group 3 FTA: 0.3203 +[2025-07-06 14:30:57] [Rank 0] Group 4 FTA: 0.2109 +[2025-07-06 14:30:57] [Rank 0] Group 4 FTA: 0.2109 +[2025-07-06 14:30:57] [Rank 0] Group 5 FTA: 0.2865 +[2025-07-06 14:30:57] [Rank 0] Group 5 FTA: 0.2865 +[2025-07-06 14:30:57] [Rank 0] Group 6 FTA: 0.4245 +[2025-07-06 14:30:57] [Rank 0] Group 6 FTA: 0.4245 +[2025-07-06 14:30:57] [Rank 0] Group 7 FTA: 0.3984 +[2025-07-06 14:30:57] [Rank 0] Group 7 FTA: 0.3984 +[2025-07-06 14:30:57] [Rank 0] Group 8 FTA: 0.3750 +[2025-07-06 14:30:57] [Rank 0] Group 8 FTA: 0.3750 +[2025-07-06 14:30:57] [Rank 0] Group 9 FTA: 0.3984 +[2025-07-06 14:30:57] [Rank 0] Group 9 FTA: 0.3984 +[2025-07-06 14:30:57] [Rank 0] Group 10 FTA: 0.4062 +[2025-07-06 14:30:57] [Rank 0] Group 10 FTA: 0.4062 +[2025-07-06 14:30:57] [Rank 0] Group 11 FTA: 0.4004 +[2025-07-06 14:30:57] [Rank 0] Group 11 FTA: 0.4004 +[2025-07-06 14:30:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 14:30:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 14:30:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 14:30:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 14:30:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 14:30:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 14:30:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 14:30:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 14:30:59] [Rank 0] step:8001/10000 train_time:651681ms step_avg:81.45ms +[2025-07-06 14:30:59] [Rank 0] step:8001/10000 train_time:651681ms step_avg:81.45ms +[2025-07-06 14:31:01] [Rank 0] step:8021/10000 train_time:653858ms step_avg:81.52ms +[2025-07-06 14:31:01] [Rank 0] step:8021/10000 train_time:653858ms step_avg:81.52ms +[2025-07-06 14:31:03] [Rank 0] step:8041/10000 train_time:655352ms step_avg:81.50ms +[2025-07-06 14:31:03] [Rank 0] step:8041/10000 train_time:655352ms step_avg:81.50ms +[2025-07-06 14:31:04] [Rank 0] step:8061/10000 train_time:656949ms step_avg:81.50ms +[2025-07-06 14:31:04] [Rank 0] step:8061/10000 train_time:656949ms step_avg:81.50ms +[2025-07-06 14:31:06] [Rank 0] step:8081/10000 train_time:658444ms step_avg:81.48ms +[2025-07-06 14:31:06] [Rank 0] step:8081/10000 train_time:658444ms step_avg:81.48ms +[2025-07-06 14:31:08] [Rank 0] step:8101/10000 train_time:659991ms step_avg:81.47ms +[2025-07-06 14:31:08] [Rank 0] step:8101/10000 train_time:659991ms step_avg:81.47ms +[2025-07-06 14:31:09] [Rank 0] step:8121/10000 train_time:662101ms step_avg:81.53ms +[2025-07-06 14:31:09] [Rank 0] step:8121/10000 train_time:662101ms step_avg:81.53ms +[2025-07-06 14:31:11] [Rank 0] step:8141/10000 train_time:663697ms step_avg:81.53ms +[2025-07-06 14:31:11] [Rank 0] step:8141/10000 train_time:663697ms step_avg:81.53ms +[2025-07-06 14:31:12] [Rank 0] step:8161/10000 train_time:665194ms step_avg:81.51ms +[2025-07-06 14:31:12] [Rank 0] step:8161/10000 train_time:665194ms step_avg:81.51ms +[2025-07-06 14:31:14] [Rank 0] step:8181/10000 train_time:666693ms step_avg:81.49ms +[2025-07-06 14:31:14] [Rank 0] step:8181/10000 train_time:666693ms step_avg:81.49ms +[2025-07-06 14:31:16] [Rank 0] step:8201/10000 train_time:668837ms step_avg:81.56ms +[2025-07-06 14:31:16] [Rank 0] step:8201/10000 train_time:668837ms step_avg:81.56ms +[2025-07-06 14:31:18] [Rank 0] step:8221/10000 train_time:670333ms step_avg:81.54ms +[2025-07-06 14:31:18] [Rank 0] step:8221/10000 train_time:670333ms step_avg:81.54ms +[2025-07-06 14:31:19] [Rank 0] step:8241/10000 train_time:671830ms step_avg:81.52ms +[2025-07-06 14:31:19] [Rank 0] step:8241/10000 train_time:671830ms step_avg:81.52ms +[2025-07-06 14:31:21] [Rank 0] step:8261/10000 train_time:673328ms step_avg:81.51ms +[2025-07-06 14:31:21] [Rank 0] step:8261/10000 train_time:673328ms step_avg:81.51ms +[2025-07-06 14:31:22] [Rank 0] step:8281/10000 train_time:674874ms step_avg:81.50ms +[2025-07-06 14:31:22] [Rank 0] step:8281/10000 train_time:674874ms step_avg:81.50ms +[2025-07-06 14:31:24] [Rank 0] step:8301/10000 train_time:676562ms step_avg:81.50ms +[2025-07-06 14:31:24] [Rank 0] step:8301/10000 train_time:676562ms step_avg:81.50ms +[2025-07-06 14:31:25] [Rank 0] step:8321/10000 train_time:678062ms step_avg:81.49ms +[2025-07-06 14:31:25] [Rank 0] step:8321/10000 train_time:678062ms step_avg:81.49ms +[2025-07-06 14:31:27] [Rank 0] step:8341/10000 train_time:679561ms step_avg:81.47ms +[2025-07-06 14:31:27] [Rank 0] step:8341/10000 train_time:679561ms step_avg:81.47ms +[2025-07-06 14:31:28] [Rank 0] step:8361/10000 train_time:681062ms step_avg:81.46ms +[2025-07-06 14:31:28] [Rank 0] step:8361/10000 train_time:681062ms step_avg:81.46ms +[2025-07-06 14:31:30] [Rank 0] step:8381/10000 train_time:683207ms step_avg:81.52ms +[2025-07-06 14:31:30] [Rank 0] step:8381/10000 train_time:683207ms step_avg:81.52ms +[2025-07-06 14:31:32] [Rank 0] step:8401/10000 train_time:684809ms step_avg:81.52ms +[2025-07-06 14:31:32] [Rank 0] step:8401/10000 train_time:684809ms step_avg:81.52ms +[2025-07-06 14:31:34] [Rank 0] step:8421/10000 train_time:686311ms step_avg:81.50ms +[2025-07-06 14:31:34] [Rank 0] step:8421/10000 train_time:686311ms step_avg:81.50ms +[2025-07-06 14:31:35] [Rank 0] step:8441/10000 train_time:688095ms step_avg:81.52ms +[2025-07-06 14:31:35] [Rank 0] step:8441/10000 train_time:688095ms step_avg:81.52ms +[2025-07-06 14:31:38] [Rank 0] step:8461/10000 train_time:690382ms step_avg:81.60ms +[2025-07-06 14:31:38] [Rank 0] step:8461/10000 train_time:690382ms step_avg:81.60ms +[2025-07-06 14:31:39] [Rank 0] step:8481/10000 train_time:691865ms step_avg:81.58ms +[2025-07-06 14:31:39] [Rank 0] step:8481/10000 train_time:691865ms step_avg:81.58ms +[2025-07-06 14:31:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 14:31:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 14:31:42] [Rank 0] PRINT: step:8500/10000 train_loss:1.1875 val_loss:1.1803 train_time:693371ms step_avg:81.57ms +[2025-07-06 14:31:42] [Rank 0] PRINT: step:8500/10000 train_loss:1.1875 val_loss:1.1803 train_time:693371ms step_avg:81.57ms +[2025-07-06 14:31:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 14:31:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 14:31:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 14:31:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 14:31:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 14:31:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 14:37:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 14:37:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 14:37:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 14:37:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 14:37:07] [Rank 0] Total Loss: 4.6311 +[2025-07-06 14:37:07] [Rank 0] Total Loss: 4.6311 +[2025-07-06 14:37:07] [Rank 0] Total FTA: 0.4175 +[2025-07-06 14:37:07] [Rank 0] Total FTA: 0.4175 +[2025-07-06 14:37:07] [Rank 0] Group 0 Loss: 4.7163 +[2025-07-06 14:37:07] [Rank 0] Group 0 Loss: 4.7163 +[2025-07-06 14:37:07] [Rank 0] Group 1 Loss: 4.4906 +[2025-07-06 14:37:07] [Rank 0] Group 1 Loss: 4.4906 +[2025-07-06 14:37:07] [Rank 0] Group 2 Loss: 4.2399 +[2025-07-06 14:37:07] [Rank 0] Group 2 Loss: 4.2399 +[2025-07-06 14:37:07] [Rank 0] Group 3 Loss: 4.7751 +[2025-07-06 14:37:07] [Rank 0] Group 3 Loss: 4.7751 +[2025-07-06 14:37:07] [Rank 0] Group 4 Loss: 4.6050 +[2025-07-06 14:37:07] [Rank 0] Group 4 Loss: 4.6050 +[2025-07-06 14:37:07] [Rank 0] Group 5 Loss: 4.6451 +[2025-07-06 14:37:07] [Rank 0] Group 5 Loss: 4.6451 +[2025-07-06 14:37:08] [Rank 0] Group 6 Loss: 4.5803 +[2025-07-06 14:37:08] [Rank 0] Group 6 Loss: 4.5803 +[2025-07-06 14:37:08] [Rank 0] Group 7 Loss: 4.6590 +[2025-07-06 14:37:08] [Rank 0] Group 7 Loss: 4.6590 +[2025-07-06 14:37:08] [Rank 0] Group 8 Loss: 4.6743 +[2025-07-06 14:37:08] [Rank 0] Group 8 Loss: 4.6743 +[2025-07-06 14:37:08] [Rank 0] Group 9 Loss: 4.6366 +[2025-07-06 14:37:08] [Rank 0] Group 9 Loss: 4.6366 +[2025-07-06 14:37:08] [Rank 0] Group 10 Loss: 4.6802 +[2025-07-06 14:37:08] [Rank 0] Group 10 Loss: 4.6802 +[2025-07-06 14:37:08] [Rank 0] Group 11 Loss: 4.6833 +[2025-07-06 14:37:08] [Rank 0] Group 11 Loss: 4.6833 +[2025-07-06 14:37:08] [Rank 0] Group 0 FTA: 0.5345 +[2025-07-06 14:37:08] [Rank 0] Group 0 FTA: 0.5345 +[2025-07-06 14:37:08] [Rank 0] Group 1 FTA: 0.5078 +[2025-07-06 14:37:08] [Rank 0] Group 1 FTA: 0.5078 +[2025-07-06 14:37:08] [Rank 0] Group 2 FTA: 0.4557 +[2025-07-06 14:37:08] [Rank 0] Group 2 FTA: 0.4557 +[2025-07-06 14:37:08] [Rank 0] Group 3 FTA: 0.2969 +[2025-07-06 14:37:08] [Rank 0] Group 3 FTA: 0.2969 +[2025-07-06 14:37:08] [Rank 0] Group 4 FTA: 0.2083 +[2025-07-06 14:37:08] [Rank 0] Group 4 FTA: 0.2083 +[2025-07-06 14:37:08] [Rank 0] Group 5 FTA: 0.4453 +[2025-07-06 14:37:08] [Rank 0] Group 5 FTA: 0.4453 +[2025-07-06 14:37:08] [Rank 0] Group 6 FTA: 0.4115 +[2025-07-06 14:37:08] [Rank 0] Group 6 FTA: 0.4115 +[2025-07-06 14:37:08] [Rank 0] Group 7 FTA: 0.4531 +[2025-07-06 14:37:08] [Rank 0] Group 7 FTA: 0.4531 +[2025-07-06 14:37:08] [Rank 0] Group 8 FTA: 0.4245 +[2025-07-06 14:37:08] [Rank 0] Group 8 FTA: 0.4245 +[2025-07-06 14:37:08] [Rank 0] Group 9 FTA: 0.3867 +[2025-07-06 14:37:08] [Rank 0] Group 9 FTA: 0.3867 +[2025-07-06 14:37:08] [Rank 0] Group 10 FTA: 0.4023 +[2025-07-06 14:37:08] [Rank 0] Group 10 FTA: 0.4023 +[2025-07-06 14:37:08] [Rank 0] Group 11 FTA: 0.3965 +[2025-07-06 14:37:08] [Rank 0] Group 11 FTA: 0.3965 +[2025-07-06 14:37:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 14:37:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 14:37:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 14:37:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 14:37:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 14:37:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 14:37:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 14:37:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 14:37:09] [Rank 0] step:8501/10000 train_time:693394ms step_avg:81.57ms +[2025-07-06 14:37:09] [Rank 0] step:8501/10000 train_time:693394ms step_avg:81.57ms +[2025-07-06 14:37:11] [Rank 0] step:8521/10000 train_time:694906ms step_avg:81.55ms +[2025-07-06 14:37:11] [Rank 0] step:8521/10000 train_time:694906ms step_avg:81.55ms +[2025-07-06 14:37:12] [Rank 0] step:8541/10000 train_time:696399ms step_avg:81.54ms +[2025-07-06 14:37:12] [Rank 0] step:8541/10000 train_time:696399ms step_avg:81.54ms +[2025-07-06 14:37:15] [Rank 0] step:8561/10000 train_time:698542ms step_avg:81.60ms +[2025-07-06 14:37:15] [Rank 0] step:8561/10000 train_time:698542ms step_avg:81.60ms +[2025-07-06 14:37:16] [Rank 0] step:8581/10000 train_time:700037ms step_avg:81.58ms +[2025-07-06 14:37:16] [Rank 0] step:8581/10000 train_time:700037ms step_avg:81.58ms +[2025-07-06 14:37:18] [Rank 0] step:8601/10000 train_time:701531ms step_avg:81.56ms +[2025-07-06 14:37:18] [Rank 0] step:8601/10000 train_time:701531ms step_avg:81.56ms +[2025-07-06 14:37:19] [Rank 0] step:8621/10000 train_time:703029ms step_avg:81.55ms +[2025-07-06 14:37:19] [Rank 0] step:8621/10000 train_time:703029ms step_avg:81.55ms +[2025-07-06 14:37:21] [Rank 0] step:8641/10000 train_time:705209ms step_avg:81.61ms +[2025-07-06 14:37:21] [Rank 0] step:8641/10000 train_time:705209ms step_avg:81.61ms +[2025-07-06 14:37:23] [Rank 0] step:8661/10000 train_time:706685ms step_avg:81.59ms +[2025-07-06 14:37:23] [Rank 0] step:8661/10000 train_time:706685ms step_avg:81.59ms +[2025-07-06 14:37:24] [Rank 0] step:8681/10000 train_time:708184ms step_avg:81.58ms +[2025-07-06 14:37:24] [Rank 0] step:8681/10000 train_time:708184ms step_avg:81.58ms +[2025-07-06 14:37:26] [Rank 0] step:8701/10000 train_time:709681ms step_avg:81.56ms +[2025-07-06 14:37:26] [Rank 0] step:8701/10000 train_time:709681ms step_avg:81.56ms +[2025-07-06 14:37:27] [Rank 0] step:8721/10000 train_time:711179ms step_avg:81.55ms +[2025-07-06 14:37:27] [Rank 0] step:8721/10000 train_time:711179ms step_avg:81.55ms +[2025-07-06 14:37:29] [Rank 0] step:8741/10000 train_time:713329ms step_avg:81.61ms +[2025-07-06 14:37:29] [Rank 0] step:8741/10000 train_time:713329ms step_avg:81.61ms +[2025-07-06 14:37:31] [Rank 0] step:8761/10000 train_time:714827ms step_avg:81.59ms +[2025-07-06 14:37:31] [Rank 0] step:8761/10000 train_time:714827ms step_avg:81.59ms +[2025-07-06 14:37:32] [Rank 0] step:8781/10000 train_time:716326ms step_avg:81.58ms +[2025-07-06 14:37:32] [Rank 0] step:8781/10000 train_time:716326ms step_avg:81.58ms +[2025-07-06 14:37:34] [Rank 0] step:8801/10000 train_time:717825ms step_avg:81.56ms +[2025-07-06 14:37:34] [Rank 0] step:8801/10000 train_time:717825ms step_avg:81.56ms +[2025-07-06 14:37:36] [Rank 0] step:8821/10000 train_time:719586ms step_avg:81.58ms +[2025-07-06 14:37:36] [Rank 0] step:8821/10000 train_time:719586ms step_avg:81.58ms +[2025-07-06 14:37:38] [Rank 0] step:8841/10000 train_time:721575ms step_avg:81.62ms +[2025-07-06 14:37:38] [Rank 0] step:8841/10000 train_time:721575ms step_avg:81.62ms +[2025-07-06 14:37:39] [Rank 0] step:8861/10000 train_time:723176ms step_avg:81.61ms +[2025-07-06 14:37:39] [Rank 0] step:8861/10000 train_time:723176ms step_avg:81.61ms +[2025-07-06 14:37:41] [Rank 0] step:8881/10000 train_time:724677ms step_avg:81.60ms +[2025-07-06 14:37:41] [Rank 0] step:8881/10000 train_time:724677ms step_avg:81.60ms +[2025-07-06 14:37:42] [Rank 0] step:8901/10000 train_time:726180ms step_avg:81.58ms +[2025-07-06 14:37:42] [Rank 0] step:8901/10000 train_time:726180ms step_avg:81.58ms +[2025-07-06 14:37:44] [Rank 0] step:8921/10000 train_time:728338ms step_avg:81.64ms +[2025-07-06 14:37:44] [Rank 0] step:8921/10000 train_time:728338ms step_avg:81.64ms +[2025-07-06 14:37:46] [Rank 0] step:8941/10000 train_time:729942ms step_avg:81.64ms +[2025-07-06 14:37:46] [Rank 0] step:8941/10000 train_time:729942ms step_avg:81.64ms +[2025-07-06 14:37:47] [Rank 0] step:8961/10000 train_time:731444ms step_avg:81.63ms +[2025-07-06 14:37:47] [Rank 0] step:8961/10000 train_time:731444ms step_avg:81.63ms +[2025-07-06 14:37:49] [Rank 0] step:8981/10000 train_time:732948ms step_avg:81.61ms +[2025-07-06 14:37:49] [Rank 0] step:8981/10000 train_time:732948ms step_avg:81.61ms +[2025-07-06 14:37:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 14:37:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 14:37:51] [Rank 0] PRINT: step:9000/10000 train_loss:1.1741 val_loss:1.1693 train_time:734452ms step_avg:81.61ms +[2025-07-06 14:37:51] [Rank 0] PRINT: step:9000/10000 train_loss:1.1741 val_loss:1.1693 train_time:734452ms step_avg:81.61ms +[2025-07-06 14:37:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 14:37:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 14:37:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 14:37:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 14:37:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 14:37:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 14:43:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 14:43:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 14:43:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 14:43:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 14:43:19] [Rank 0] Total Loss: 4.6201 +[2025-07-06 14:43:19] [Rank 0] Total Loss: 4.6201 +[2025-07-06 14:43:19] [Rank 0] Total FTA: 0.4026 +[2025-07-06 14:43:19] [Rank 0] Total FTA: 0.4026 +[2025-07-06 14:43:19] [Rank 0] Group 0 Loss: 4.7948 +[2025-07-06 14:43:19] [Rank 0] Group 0 Loss: 4.7948 +[2025-07-06 14:43:19] [Rank 0] Group 1 Loss: 4.5819 +[2025-07-06 14:43:19] [Rank 0] Group 1 Loss: 4.5819 +[2025-07-06 14:43:19] [Rank 0] Group 2 Loss: 4.2130 +[2025-07-06 14:43:19] [Rank 0] Group 2 Loss: 4.2130 +[2025-07-06 14:43:19] [Rank 0] Group 3 Loss: 4.7670 +[2025-07-06 14:43:19] [Rank 0] Group 3 Loss: 4.7670 +[2025-07-06 14:43:19] [Rank 0] Group 4 Loss: 4.5566 +[2025-07-06 14:43:19] [Rank 0] Group 4 Loss: 4.5566 +[2025-07-06 14:43:19] [Rank 0] Group 5 Loss: 4.5782 +[2025-07-06 14:43:19] [Rank 0] Group 5 Loss: 4.5782 +[2025-07-06 14:43:19] [Rank 0] Group 6 Loss: 4.5510 +[2025-07-06 14:43:19] [Rank 0] Group 6 Loss: 4.5510 +[2025-07-06 14:43:19] [Rank 0] Group 7 Loss: 4.5303 +[2025-07-06 14:43:19] [Rank 0] Group 7 Loss: 4.5303 +[2025-07-06 14:43:19] [Rank 0] Group 8 Loss: 4.6495 +[2025-07-06 14:43:19] [Rank 0] Group 8 Loss: 4.6495 +[2025-07-06 14:43:19] [Rank 0] Group 9 Loss: 4.6108 +[2025-07-06 14:43:19] [Rank 0] Group 9 Loss: 4.6108 +[2025-07-06 14:43:19] [Rank 0] Group 10 Loss: 4.6956 +[2025-07-06 14:43:19] [Rank 0] Group 10 Loss: 4.6956 +[2025-07-06 14:43:19] [Rank 0] Group 11 Loss: 4.6536 +[2025-07-06 14:43:19] [Rank 0] Group 11 Loss: 4.6536 +[2025-07-06 14:43:19] [Rank 0] Group 0 FTA: 0.4824 +[2025-07-06 14:43:19] [Rank 0] Group 0 FTA: 0.4824 +[2025-07-06 14:43:19] [Rank 0] Group 1 FTA: 0.4714 +[2025-07-06 14:43:19] [Rank 0] Group 1 FTA: 0.4714 +[2025-07-06 14:43:19] [Rank 0] Group 2 FTA: 0.2422 +[2025-07-06 14:43:19] [Rank 0] Group 2 FTA: 0.2422 +[2025-07-06 14:43:19] [Rank 0] Group 3 FTA: 0.2995 +[2025-07-06 14:43:19] [Rank 0] Group 3 FTA: 0.2995 +[2025-07-06 14:43:19] [Rank 0] Group 4 FTA: 0.3073 +[2025-07-06 14:43:19] [Rank 0] Group 4 FTA: 0.3073 +[2025-07-06 14:43:19] [Rank 0] Group 5 FTA: 0.4141 +[2025-07-06 14:43:19] [Rank 0] Group 5 FTA: 0.4141 +[2025-07-06 14:43:19] [Rank 0] Group 6 FTA: 0.4193 +[2025-07-06 14:43:19] [Rank 0] Group 6 FTA: 0.4193 +[2025-07-06 14:43:19] [Rank 0] Group 7 FTA: 0.4375 +[2025-07-06 14:43:19] [Rank 0] Group 7 FTA: 0.4375 +[2025-07-06 14:43:19] [Rank 0] Group 8 FTA: 0.4349 +[2025-07-06 14:43:19] [Rank 0] Group 8 FTA: 0.4349 +[2025-07-06 14:43:19] [Rank 0] Group 9 FTA: 0.3750 +[2025-07-06 14:43:19] [Rank 0] Group 9 FTA: 0.3750 +[2025-07-06 14:43:19] [Rank 0] Group 10 FTA: 0.4141 +[2025-07-06 14:43:19] [Rank 0] Group 10 FTA: 0.4141 +[2025-07-06 14:43:19] [Rank 0] Group 11 FTA: 0.4170 +[2025-07-06 14:43:19] [Rank 0] Group 11 FTA: 0.4170 +[2025-07-06 14:43:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 14:43:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 14:43:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 14:43:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 14:43:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 14:43:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 14:43:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 14:43:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 14:43:21] [Rank 0] step:9001/10000 train_time:734482ms step_avg:81.60ms +[2025-07-06 14:43:21] [Rank 0] step:9001/10000 train_time:734482ms step_avg:81.60ms +[2025-07-06 14:43:22] [Rank 0] step:9021/10000 train_time:736680ms step_avg:81.66ms +[2025-07-06 14:43:22] [Rank 0] step:9021/10000 train_time:736680ms step_avg:81.66ms +[2025-07-06 14:43:24] [Rank 0] step:9041/10000 train_time:738174ms step_avg:81.65ms +[2025-07-06 14:43:24] [Rank 0] step:9041/10000 train_time:738174ms step_avg:81.65ms +[2025-07-06 14:43:25] [Rank 0] step:9061/10000 train_time:739669ms step_avg:81.63ms +[2025-07-06 14:43:25] [Rank 0] step:9061/10000 train_time:739669ms step_avg:81.63ms +[2025-07-06 14:43:27] [Rank 0] step:9081/10000 train_time:741165ms step_avg:81.62ms +[2025-07-06 14:43:27] [Rank 0] step:9081/10000 train_time:741165ms step_avg:81.62ms +[2025-07-06 14:43:29] [Rank 0] step:9101/10000 train_time:743319ms step_avg:81.67ms +[2025-07-06 14:43:29] [Rank 0] step:9101/10000 train_time:743319ms step_avg:81.67ms +[2025-07-06 14:43:30] [Rank 0] step:9121/10000 train_time:744815ms step_avg:81.66ms +[2025-07-06 14:43:30] [Rank 0] step:9121/10000 train_time:744815ms step_avg:81.66ms +[2025-07-06 14:43:32] [Rank 0] step:9141/10000 train_time:746312ms step_avg:81.64ms +[2025-07-06 14:43:32] [Rank 0] step:9141/10000 train_time:746312ms step_avg:81.64ms +[2025-07-06 14:43:33] [Rank 0] step:9161/10000 train_time:747812ms step_avg:81.63ms +[2025-07-06 14:43:33] [Rank 0] step:9161/10000 train_time:747812ms step_avg:81.63ms +[2025-07-06 14:43:36] [Rank 0] step:9181/10000 train_time:749989ms step_avg:81.69ms +[2025-07-06 14:43:36] [Rank 0] step:9181/10000 train_time:749989ms step_avg:81.69ms +[2025-07-06 14:43:37] [Rank 0] step:9201/10000 train_time:751465ms step_avg:81.67ms +[2025-07-06 14:43:37] [Rank 0] step:9201/10000 train_time:751465ms step_avg:81.67ms +[2025-07-06 14:43:39] [Rank 0] step:9221/10000 train_time:752964ms step_avg:81.66ms +[2025-07-06 14:43:39] [Rank 0] step:9221/10000 train_time:752964ms step_avg:81.66ms +[2025-07-06 14:43:40] [Rank 0] step:9241/10000 train_time:754464ms step_avg:81.64ms +[2025-07-06 14:43:40] [Rank 0] step:9241/10000 train_time:754464ms step_avg:81.64ms +[2025-07-06 14:43:42] [Rank 0] step:9261/10000 train_time:755962ms step_avg:81.63ms +[2025-07-06 14:43:42] [Rank 0] step:9261/10000 train_time:755962ms step_avg:81.63ms +[2025-07-06 14:43:44] [Rank 0] step:9281/10000 train_time:758109ms step_avg:81.68ms +[2025-07-06 14:43:44] [Rank 0] step:9281/10000 train_time:758109ms step_avg:81.68ms +[2025-07-06 14:43:45] [Rank 0] step:9301/10000 train_time:759605ms step_avg:81.67ms +[2025-07-06 14:43:45] [Rank 0] step:9301/10000 train_time:759605ms step_avg:81.67ms +[2025-07-06 14:43:47] [Rank 0] step:9321/10000 train_time:761105ms step_avg:81.65ms +[2025-07-06 14:43:47] [Rank 0] step:9321/10000 train_time:761105ms step_avg:81.65ms +[2025-07-06 14:43:48] [Rank 0] step:9341/10000 train_time:762606ms step_avg:81.64ms +[2025-07-06 14:43:48] [Rank 0] step:9341/10000 train_time:762606ms step_avg:81.64ms +[2025-07-06 14:43:50] [Rank 0] step:9361/10000 train_time:764771ms step_avg:81.70ms +[2025-07-06 14:43:50] [Rank 0] step:9361/10000 train_time:764771ms step_avg:81.70ms +[2025-07-06 14:43:52] [Rank 0] step:9381/10000 train_time:766250ms step_avg:81.68ms +[2025-07-06 14:43:52] [Rank 0] step:9381/10000 train_time:766250ms step_avg:81.68ms +[2025-07-06 14:43:53] [Rank 0] step:9401/10000 train_time:767751ms step_avg:81.67ms +[2025-07-06 14:43:53] [Rank 0] step:9401/10000 train_time:767751ms step_avg:81.67ms +[2025-07-06 14:43:55] [Rank 0] step:9421/10000 train_time:769251ms step_avg:81.65ms +[2025-07-06 14:43:55] [Rank 0] step:9421/10000 train_time:769251ms step_avg:81.65ms +[2025-07-06 14:43:56] [Rank 0] step:9441/10000 train_time:770752ms step_avg:81.64ms +[2025-07-06 14:43:56] [Rank 0] step:9441/10000 train_time:770752ms step_avg:81.64ms +[2025-07-06 14:43:58] [Rank 0] step:9461/10000 train_time:772488ms step_avg:81.65ms +[2025-07-06 14:43:58] [Rank 0] step:9461/10000 train_time:772488ms step_avg:81.65ms +[2025-07-06 14:44:00] [Rank 0] step:9481/10000 train_time:774092ms step_avg:81.65ms +[2025-07-06 14:44:00] [Rank 0] step:9481/10000 train_time:774092ms step_avg:81.65ms +[2025-07-06 14:44:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 14:44:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 14:44:02] [Rank 0] PRINT: step:9500/10000 train_loss:1.1641 val_loss:1.1612 train_time:775597ms step_avg:81.64ms +[2025-07-06 14:44:02] [Rank 0] PRINT: step:9500/10000 train_loss:1.1641 val_loss:1.1612 train_time:775597ms step_avg:81.64ms +[2025-07-06 14:44:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 14:44:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 14:44:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 14:44:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 14:44:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 14:44:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 14:49:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 14:49:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 14:49:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 14:49:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 14:49:28] [Rank 0] Total Loss: 4.6677 +[2025-07-06 14:49:28] [Rank 0] Total Loss: 4.6677 +[2025-07-06 14:49:28] [Rank 0] Total FTA: 0.3964 +[2025-07-06 14:49:28] [Rank 0] Total FTA: 0.3964 +[2025-07-06 14:49:28] [Rank 0] Group 0 Loss: 4.8455 +[2025-07-06 14:49:28] [Rank 0] Group 0 Loss: 4.8455 +[2025-07-06 14:49:28] [Rank 0] Group 1 Loss: 4.4191 +[2025-07-06 14:49:28] [Rank 0] Group 1 Loss: 4.4191 +[2025-07-06 14:49:28] [Rank 0] Group 2 Loss: 4.3587 +[2025-07-06 14:49:28] [Rank 0] Group 2 Loss: 4.3587 +[2025-07-06 14:49:28] [Rank 0] Group 3 Loss: 4.7456 +[2025-07-06 14:49:28] [Rank 0] Group 3 Loss: 4.7456 +[2025-07-06 14:49:28] [Rank 0] Group 4 Loss: 4.6767 +[2025-07-06 14:49:28] [Rank 0] Group 4 Loss: 4.6767 +[2025-07-06 14:49:28] [Rank 0] Group 5 Loss: 4.6645 +[2025-07-06 14:49:28] [Rank 0] Group 5 Loss: 4.6645 +[2025-07-06 14:49:28] [Rank 0] Group 6 Loss: 4.6053 +[2025-07-06 14:49:28] [Rank 0] Group 6 Loss: 4.6053 +[2025-07-06 14:49:28] [Rank 0] Group 7 Loss: 4.6369 +[2025-07-06 14:49:28] [Rank 0] Group 7 Loss: 4.6369 +[2025-07-06 14:49:28] [Rank 0] Group 8 Loss: 4.7052 +[2025-07-06 14:49:28] [Rank 0] Group 8 Loss: 4.7052 +[2025-07-06 14:49:28] [Rank 0] Group 9 Loss: 4.7269 +[2025-07-06 14:49:28] [Rank 0] Group 9 Loss: 4.7269 +[2025-07-06 14:49:28] [Rank 0] Group 10 Loss: 4.6869 +[2025-07-06 14:49:28] [Rank 0] Group 10 Loss: 4.6869 +[2025-07-06 14:49:28] [Rank 0] Group 11 Loss: 4.7084 +[2025-07-06 14:49:28] [Rank 0] Group 11 Loss: 4.7084 +[2025-07-06 14:49:28] [Rank 0] Group 0 FTA: 0.5072 +[2025-07-06 14:49:28] [Rank 0] Group 0 FTA: 0.5072 +[2025-07-06 14:49:28] [Rank 0] Group 1 FTA: 0.5182 +[2025-07-06 14:49:28] [Rank 0] Group 1 FTA: 0.5182 +[2025-07-06 14:49:28] [Rank 0] Group 2 FTA: 0.2604 +[2025-07-06 14:49:28] [Rank 0] Group 2 FTA: 0.2604 +[2025-07-06 14:49:28] [Rank 0] Group 3 FTA: 0.2057 +[2025-07-06 14:49:28] [Rank 0] Group 3 FTA: 0.2057 +[2025-07-06 14:49:28] [Rank 0] Group 4 FTA: 0.3203 +[2025-07-06 14:49:28] [Rank 0] Group 4 FTA: 0.3203 +[2025-07-06 14:49:28] [Rank 0] Group 5 FTA: 0.4271 +[2025-07-06 14:49:28] [Rank 0] Group 5 FTA: 0.4271 +[2025-07-06 14:49:28] [Rank 0] Group 6 FTA: 0.3750 +[2025-07-06 14:49:28] [Rank 0] Group 6 FTA: 0.3750 +[2025-07-06 14:49:28] [Rank 0] Group 7 FTA: 0.4010 +[2025-07-06 14:49:28] [Rank 0] Group 7 FTA: 0.4010 +[2025-07-06 14:49:28] [Rank 0] Group 8 FTA: 0.4062 +[2025-07-06 14:49:28] [Rank 0] Group 8 FTA: 0.4062 +[2025-07-06 14:49:28] [Rank 0] Group 9 FTA: 0.4297 +[2025-07-06 14:49:28] [Rank 0] Group 9 FTA: 0.4297 +[2025-07-06 14:49:28] [Rank 0] Group 10 FTA: 0.3984 +[2025-07-06 14:49:28] [Rank 0] Group 10 FTA: 0.3984 +[2025-07-06 14:49:28] [Rank 0] Group 11 FTA: 0.4004 +[2025-07-06 14:49:28] [Rank 0] Group 11 FTA: 0.4004 +[2025-07-06 14:49:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 14:49:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 14:49:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 14:49:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 14:49:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 14:49:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 14:49:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 14:49:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 14:49:29] [Rank 0] step:9501/10000 train_time:775619ms step_avg:81.64ms +[2025-07-06 14:49:29] [Rank 0] step:9501/10000 train_time:775619ms step_avg:81.64ms +[2025-07-06 14:49:31] [Rank 0] step:9521/10000 train_time:777213ms step_avg:81.63ms +[2025-07-06 14:49:31] [Rank 0] step:9521/10000 train_time:777213ms step_avg:81.63ms +[2025-07-06 14:49:33] [Rank 0] step:9541/10000 train_time:778761ms step_avg:81.62ms +[2025-07-06 14:49:33] [Rank 0] step:9541/10000 train_time:778761ms step_avg:81.62ms +[2025-07-06 14:49:35] [Rank 0] step:9561/10000 train_time:780956ms step_avg:81.68ms +[2025-07-06 14:49:35] [Rank 0] step:9561/10000 train_time:780956ms step_avg:81.68ms +[2025-07-06 14:49:36] [Rank 0] step:9581/10000 train_time:782550ms step_avg:81.68ms +[2025-07-06 14:49:36] [Rank 0] step:9581/10000 train_time:782550ms step_avg:81.68ms +[2025-07-06 14:49:38] [Rank 0] step:9601/10000 train_time:784046ms step_avg:81.66ms +[2025-07-06 14:49:38] [Rank 0] step:9601/10000 train_time:784046ms step_avg:81.66ms +[2025-07-06 14:49:39] [Rank 0] step:9621/10000 train_time:785543ms step_avg:81.65ms +[2025-07-06 14:49:39] [Rank 0] step:9621/10000 train_time:785543ms step_avg:81.65ms +[2025-07-06 14:49:42] [Rank 0] step:9641/10000 train_time:787689ms step_avg:81.70ms +[2025-07-06 14:49:42] [Rank 0] step:9641/10000 train_time:787689ms step_avg:81.70ms +[2025-07-06 14:49:43] [Rank 0] step:9661/10000 train_time:789287ms step_avg:81.70ms +[2025-07-06 14:49:43] [Rank 0] step:9661/10000 train_time:789287ms step_avg:81.70ms +[2025-07-06 14:49:45] [Rank 0] step:9681/10000 train_time:790784ms step_avg:81.68ms +[2025-07-06 14:49:45] [Rank 0] step:9681/10000 train_time:790784ms step_avg:81.68ms +[2025-07-06 14:49:46] [Rank 0] step:9701/10000 train_time:792281ms step_avg:81.67ms +[2025-07-06 14:49:46] [Rank 0] step:9701/10000 train_time:792281ms step_avg:81.67ms +[2025-07-06 14:49:48] [Rank 0] step:9721/10000 train_time:793829ms step_avg:81.66ms +[2025-07-06 14:49:48] [Rank 0] step:9721/10000 train_time:793829ms step_avg:81.66ms +[2025-07-06 14:49:50] [Rank 0] step:9741/10000 train_time:795931ms step_avg:81.71ms +[2025-07-06 14:49:50] [Rank 0] step:9741/10000 train_time:795931ms step_avg:81.71ms +[2025-07-06 14:49:51] [Rank 0] step:9761/10000 train_time:797429ms step_avg:81.70ms +[2025-07-06 14:49:51] [Rank 0] step:9761/10000 train_time:797429ms step_avg:81.70ms +[2025-07-06 14:49:53] [Rank 0] step:9781/10000 train_time:798927ms step_avg:81.68ms +[2025-07-06 14:49:53] [Rank 0] step:9781/10000 train_time:798927ms step_avg:81.68ms +[2025-07-06 14:49:54] [Rank 0] step:9801/10000 train_time:800524ms step_avg:81.68ms +[2025-07-06 14:49:54] [Rank 0] step:9801/10000 train_time:800524ms step_avg:81.68ms +[2025-07-06 14:49:56] [Rank 0] step:9821/10000 train_time:802667ms step_avg:81.73ms +[2025-07-06 14:49:56] [Rank 0] step:9821/10000 train_time:802667ms step_avg:81.73ms +[2025-07-06 14:49:58] [Rank 0] step:9841/10000 train_time:804165ms step_avg:81.72ms +[2025-07-06 14:49:58] [Rank 0] step:9841/10000 train_time:804165ms step_avg:81.72ms +[2025-07-06 14:50:00] [Rank 0] step:9861/10000 train_time:805770ms step_avg:81.71ms +[2025-07-06 14:50:00] [Rank 0] step:9861/10000 train_time:805770ms step_avg:81.71ms +[2025-07-06 14:50:01] [Rank 0] step:9881/10000 train_time:807270ms step_avg:81.70ms +[2025-07-06 14:50:01] [Rank 0] step:9881/10000 train_time:807270ms step_avg:81.70ms +[2025-07-06 14:50:03] [Rank 0] step:9901/10000 train_time:808770ms step_avg:81.69ms +[2025-07-06 14:50:03] [Rank 0] step:9901/10000 train_time:808770ms step_avg:81.69ms +[2025-07-06 14:50:05] [Rank 0] step:9921/10000 train_time:810907ms step_avg:81.74ms +[2025-07-06 14:50:05] [Rank 0] step:9921/10000 train_time:810907ms step_avg:81.74ms +[2025-07-06 14:50:06] [Rank 0] step:9941/10000 train_time:812410ms step_avg:81.72ms +[2025-07-06 14:50:06] [Rank 0] step:9941/10000 train_time:812410ms step_avg:81.72ms +[2025-07-06 14:50:08] [Rank 0] step:9961/10000 train_time:813912ms step_avg:81.71ms +[2025-07-06 14:50:08] [Rank 0] step:9961/10000 train_time:813912ms step_avg:81.71ms +[2025-07-06 14:50:09] [Rank 0] step:9981/10000 train_time:815412ms step_avg:81.70ms +[2025-07-06 14:50:09] [Rank 0] step:9981/10000 train_time:815412ms step_avg:81.70ms +[2025-07-06 14:50:11] [Rank 0] step:10000/10000 train_time:817507ms step_avg:81.75ms +[2025-07-06 14:50:11] [Rank 0] step:10000/10000 train_time:817507ms step_avg:81.75ms +[2025-07-06 14:50:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 14:50:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 14:50:12] [Rank 0] PRINT: step:10000/10000 train_loss:1.1568 val_loss:1.1554 train_time:817587ms step_avg:81.76ms +[2025-07-06 14:50:12] [Rank 0] PRINT: step:10000/10000 train_loss:1.1568 val_loss:1.1554 train_time:817587ms step_avg:81.76ms +[2025-07-06 14:50:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 14:50:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 14:50:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 14:50:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 14:50:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 14:50:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 14:55:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 14:55:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 14:55:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 14:55:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 14:55:40] [Rank 0] Total Loss: 4.7050 +[2025-07-06 14:55:40] [Rank 0] Total Loss: 4.7050 +[2025-07-06 14:55:40] [Rank 0] Total FTA: 0.4058 +[2025-07-06 14:55:40] [Rank 0] Total FTA: 0.4058 +[2025-07-06 14:55:40] [Rank 0] Group 0 Loss: 4.7892 +[2025-07-06 14:55:40] [Rank 0] Group 0 Loss: 4.7892 +[2025-07-06 14:55:40] [Rank 0] Group 1 Loss: 4.6513 +[2025-07-06 14:55:40] [Rank 0] Group 1 Loss: 4.6513 +[2025-07-06 14:55:40] [Rank 0] Group 2 Loss: 4.3577 +[2025-07-06 14:55:40] [Rank 0] Group 2 Loss: 4.3577 +[2025-07-06 14:55:40] [Rank 0] Group 3 Loss: 4.7987 +[2025-07-06 14:55:40] [Rank 0] Group 3 Loss: 4.7987 +[2025-07-06 14:55:40] [Rank 0] Group 4 Loss: 4.7220 +[2025-07-06 14:55:40] [Rank 0] Group 4 Loss: 4.7220 +[2025-07-06 14:55:40] [Rank 0] Group 5 Loss: 4.6707 +[2025-07-06 14:55:40] [Rank 0] Group 5 Loss: 4.6707 +[2025-07-06 14:55:40] [Rank 0] Group 6 Loss: 4.6104 +[2025-07-06 14:55:40] [Rank 0] Group 6 Loss: 4.6104 +[2025-07-06 14:55:40] [Rank 0] Group 7 Loss: 4.7403 +[2025-07-06 14:55:40] [Rank 0] Group 7 Loss: 4.7403 +[2025-07-06 14:55:40] [Rank 0] Group 8 Loss: 4.7887 +[2025-07-06 14:55:40] [Rank 0] Group 8 Loss: 4.7887 +[2025-07-06 14:55:40] [Rank 0] Group 9 Loss: 4.7117 +[2025-07-06 14:55:40] [Rank 0] Group 9 Loss: 4.7117 +[2025-07-06 14:55:40] [Rank 0] Group 10 Loss: 4.7369 +[2025-07-06 14:55:40] [Rank 0] Group 10 Loss: 4.7369 +[2025-07-06 14:55:40] [Rank 0] Group 11 Loss: 4.7369 +[2025-07-06 14:55:40] [Rank 0] Group 11 Loss: 4.7369 +[2025-07-06 14:55:40] [Rank 0] Group 0 FTA: 0.4915 +[2025-07-06 14:55:40] [Rank 0] Group 0 FTA: 0.4915 +[2025-07-06 14:55:40] [Rank 0] Group 1 FTA: 0.4792 +[2025-07-06 14:55:40] [Rank 0] Group 1 FTA: 0.4792 +[2025-07-06 14:55:40] [Rank 0] Group 2 FTA: 0.2240 +[2025-07-06 14:55:40] [Rank 0] Group 2 FTA: 0.2240 +[2025-07-06 14:55:40] [Rank 0] Group 3 FTA: 0.2917 +[2025-07-06 14:55:40] [Rank 0] Group 3 FTA: 0.2917 +[2025-07-06 14:55:40] [Rank 0] Group 4 FTA: 0.3724 +[2025-07-06 14:55:40] [Rank 0] Group 4 FTA: 0.3724 +[2025-07-06 14:55:40] [Rank 0] Group 5 FTA: 0.4375 +[2025-07-06 14:55:40] [Rank 0] Group 5 FTA: 0.4375 +[2025-07-06 14:55:40] [Rank 0] Group 6 FTA: 0.4141 +[2025-07-06 14:55:40] [Rank 0] Group 6 FTA: 0.4141 +[2025-07-06 14:55:40] [Rank 0] Group 7 FTA: 0.4193 +[2025-07-06 14:55:40] [Rank 0] Group 7 FTA: 0.4193 +[2025-07-06 14:55:40] [Rank 0] Group 8 FTA: 0.3854 +[2025-07-06 14:55:40] [Rank 0] Group 8 FTA: 0.3854 +[2025-07-06 14:55:40] [Rank 0] Group 9 FTA: 0.3789 +[2025-07-06 14:55:40] [Rank 0] Group 9 FTA: 0.3789 +[2025-07-06 14:55:40] [Rank 0] Group 10 FTA: 0.4707 +[2025-07-06 14:55:40] [Rank 0] Group 10 FTA: 0.4707 +[2025-07-06 14:55:40] [Rank 0] Group 11 FTA: 0.3994 +[2025-07-06 14:55:40] [Rank 0] Group 11 FTA: 0.3994 +[2025-07-06 14:55:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 14:55:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-06 14:55:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 14:55:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-06 14:55:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 14:55:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-06 14:55:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 14:55:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-06 14:55:41] [Rank 0] step:10001/10000 train_time:817608ms step_avg:81.75ms +[2025-07-06 14:55:41] [Rank 0] step:10001/10000 train_time:817608ms step_avg:81.75ms +[2025-07-06 14:55:41] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 14:55:41 2025 --- +[2025-07-06 14:55:41] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 14:55:41 2025 --- +[2025-07-06 14:55:42] [Rank 0] PRINT: Peak memory allocated: 9109 MiB reserved: 10356 MiB +[2025-07-06 14:55:42] [Rank 0] PRINT: Peak memory allocated: 9109 MiB reserved: 10356 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/config.json b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..cfbee1e0ff407a5293fb2b1abbe41863ea563e12 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "27b5ee61-ad50-4218-b338-ffd9e03356ab", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..d2b0651eb927bacee24ece33f289a5d98034664b --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3f0ecc5c81a914dd87909aed3a0fdd7d6629eee22420999e91c99e24a7963034 +size 436526 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..aae595073f70ac5be096eb7821b2a143b2082094 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10246c3037849668bf4ae37d5dff8c95324eec58a40a16cf8422a6e5cae8095e +size 304157 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..55da148bce56cf8ba14c0357e41f8581d32e4697 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b165fb26c2cd7fc97a9c18a9483d1e801ce25df71a17f0ea10beaf5b76d07421 +size 103081 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..f57909be952eba6ea6c0cf00dedaa7a8cfa3d0e8 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c61051e2d1df966878a501132413d3dbfb45c79985bccf88a8043d2fa7c3289 +size 112878 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/training_log_27b5ee61-ad50-4218-b338-ffd9e03356ab.txt b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/training_log_27b5ee61-ad50-4218-b338-ffd9e03356ab.txt new file mode 100644 index 0000000000000000000000000000000000000000..4a89f5bd64b8d7ecacd6b6605768b34a5715451d --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/training_log_27b5ee61-ad50-4218-b338-ffd9e03356ab.txt @@ -0,0 +1,5144 @@ +[2025-07-08 03:07:30] [Rank 0] PRINT: --- Script Start: Tue Jul 8 03:07:30 2025 --- +[2025-07-08 03:07:30] [Rank 0] PRINT: --- Script Start: Tue Jul 8 03:07:30 2025 --- +[2025-07-08 03:07:30] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-08 03:07:30] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-08 03:07:30] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-08 03:07:30] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-08 03:07:30] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-08 03:07:30] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-08 03:07:30] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43 +[2025-07-08 03:07:30] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43 +[2025-07-08 03:07:30] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-08 03:07:30] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-08 03:07:31] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-08 03:07:31] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-08 03:07:31] [Rank 0] PRINT: Constructing model... +[2025-07-08 03:07:31] [Rank 0] PRINT: Constructing model... +[2025-07-08 03:07:33] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-08 03:07:33] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-08 03:07:33] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-08 03:07:33] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-08 03:07:33] [Rank 0] PRINT: Testing model forward function: +[2025-07-08 03:07:33] [Rank 0] PRINT: Testing model forward function: +[2025-07-08 03:07:33] [Rank 0] PRINT: Model test - Result type: +[2025-07-08 03:07:33] [Rank 0] PRINT: Model test - Result type: +[2025-07-08 03:07:33] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-08 03:07:33] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-08 03:07:33] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-08 03:07:33] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-08 03:07:33] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-08 03:07:33] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-08 03:07:33] [Rank 0] PRINT: Model returns: +[2025-07-08 03:07:33] [Rank 0] PRINT: Model returns: +[2025-07-08 03:07:33] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-08 03:07:33] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-08 03:07:34] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-08 03:07:34] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-08 03:07:34] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-08 03:07:34] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-08 03:07:34] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-08 03:07:34] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-08 03:07:34] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-08 03:07:34] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-08 03:07:34] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-08 03:07:34] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-08 03:07:34] [Rank 0] PRINT: Model compilation complete. +[2025-07-08 03:07:34] [Rank 0] PRINT: Model compilation complete. +[2025-07-08 03:07:34] [Rank 0] PRINT: Starting warmup... +[2025-07-08 03:07:34] [Rank 0] PRINT: Starting warmup... +[2025-07-08 03:08:39] [Rank 0] PRINT: Warmup complete. +[2025-07-08 03:08:39] [Rank 0] PRINT: Warmup complete. +[2025-07-08 03:08:39] [Rank 0] PRINT: Starting training... +[2025-07-08 03:08:39] [Rank 0] PRINT: Starting training... +[2025-07-08 03:08:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:08:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:08:46] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-08 03:08:46] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-08 03:08:48] [Rank 0] step:21/10000 train_time:1752ms step_avg:83.42ms +[2025-07-08 03:08:48] [Rank 0] step:21/10000 train_time:1752ms step_avg:83.42ms +[2025-07-08 03:08:49] [Rank 0] step:41/10000 train_time:3203ms step_avg:78.13ms +[2025-07-08 03:08:49] [Rank 0] step:41/10000 train_time:3203ms step_avg:78.13ms +[2025-07-08 03:08:51] [Rank 0] step:61/10000 train_time:4657ms step_avg:76.34ms +[2025-07-08 03:08:51] [Rank 0] step:61/10000 train_time:4657ms step_avg:76.34ms +[2025-07-08 03:08:52] [Rank 0] step:81/10000 train_time:6114ms step_avg:75.48ms +[2025-07-08 03:08:52] [Rank 0] step:81/10000 train_time:6114ms step_avg:75.48ms +[2025-07-08 03:08:54] [Rank 0] step:101/10000 train_time:8260ms step_avg:81.78ms +[2025-07-08 03:08:54] [Rank 0] step:101/10000 train_time:8260ms step_avg:81.78ms +[2025-07-08 03:08:56] [Rank 0] step:121/10000 train_time:9714ms step_avg:80.28ms +[2025-07-08 03:08:56] [Rank 0] step:121/10000 train_time:9714ms step_avg:80.28ms +[2025-07-08 03:08:57] [Rank 0] step:141/10000 train_time:11169ms step_avg:79.21ms +[2025-07-08 03:08:57] [Rank 0] step:141/10000 train_time:11169ms step_avg:79.21ms +[2025-07-08 03:08:59] [Rank 0] step:161/10000 train_time:12626ms step_avg:78.42ms +[2025-07-08 03:08:59] [Rank 0] step:161/10000 train_time:12626ms step_avg:78.42ms +[2025-07-08 03:09:01] [Rank 0] step:181/10000 train_time:14342ms step_avg:79.24ms +[2025-07-08 03:09:01] [Rank 0] step:181/10000 train_time:14342ms step_avg:79.24ms +[2025-07-08 03:09:02] [Rank 0] step:201/10000 train_time:16207ms step_avg:80.63ms +[2025-07-08 03:09:02] [Rank 0] step:201/10000 train_time:16207ms step_avg:80.63ms +[2025-07-08 03:09:04] [Rank 0] step:221/10000 train_time:17661ms step_avg:79.91ms +[2025-07-08 03:09:04] [Rank 0] step:221/10000 train_time:17661ms step_avg:79.91ms +[2025-07-08 03:09:05] [Rank 0] step:241/10000 train_time:19119ms step_avg:79.33ms +[2025-07-08 03:09:05] [Rank 0] step:241/10000 train_time:19119ms step_avg:79.33ms +[2025-07-08 03:09:07] [Rank 0] step:261/10000 train_time:20577ms step_avg:78.84ms +[2025-07-08 03:09:07] [Rank 0] step:261/10000 train_time:20577ms step_avg:78.84ms +[2025-07-08 03:09:08] [Rank 0] step:281/10000 train_time:22271ms step_avg:79.26ms +[2025-07-08 03:09:08] [Rank 0] step:281/10000 train_time:22271ms step_avg:79.26ms +[2025-07-08 03:09:10] [Rank 0] step:301/10000 train_time:23730ms step_avg:78.84ms +[2025-07-08 03:09:10] [Rank 0] step:301/10000 train_time:23730ms step_avg:78.84ms +[2025-07-08 03:09:11] [Rank 0] step:321/10000 train_time:25189ms step_avg:78.47ms +[2025-07-08 03:09:11] [Rank 0] step:321/10000 train_time:25189ms step_avg:78.47ms +[2025-07-08 03:09:13] [Rank 0] step:341/10000 train_time:26647ms step_avg:78.14ms +[2025-07-08 03:09:13] [Rank 0] step:341/10000 train_time:26647ms step_avg:78.14ms +[2025-07-08 03:09:15] [Rank 0] step:361/10000 train_time:28110ms step_avg:77.87ms +[2025-07-08 03:09:15] [Rank 0] step:361/10000 train_time:28110ms step_avg:77.87ms +[2025-07-08 03:09:17] [Rank 0] step:381/10000 train_time:30409ms step_avg:79.81ms +[2025-07-08 03:09:17] [Rank 0] step:381/10000 train_time:30409ms step_avg:79.81ms +[2025-07-08 03:09:18] [Rank 0] step:401/10000 train_time:31868ms step_avg:79.47ms +[2025-07-08 03:09:18] [Rank 0] step:401/10000 train_time:31868ms step_avg:79.47ms +[2025-07-08 03:09:20] [Rank 0] step:421/10000 train_time:33331ms step_avg:79.17ms +[2025-07-08 03:09:20] [Rank 0] step:421/10000 train_time:33331ms step_avg:79.17ms +[2025-07-08 03:09:21] [Rank 0] step:441/10000 train_time:34794ms step_avg:78.90ms +[2025-07-08 03:09:21] [Rank 0] step:441/10000 train_time:34794ms step_avg:78.90ms +[2025-07-08 03:09:23] [Rank 0] step:461/10000 train_time:36492ms step_avg:79.16ms +[2025-07-08 03:09:23] [Rank 0] step:461/10000 train_time:36492ms step_avg:79.16ms +[2025-07-08 03:09:24] [Rank 0] step:481/10000 train_time:37956ms step_avg:78.91ms +[2025-07-08 03:09:24] [Rank 0] step:481/10000 train_time:37956ms step_avg:78.91ms +[2025-07-08 03:09:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:09:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:09:26] [Rank 0] PRINT: step:500/10000 train_loss:8.7308 val_loss:7.1021 train_time:39415ms step_avg:78.83ms +[2025-07-08 03:09:26] [Rank 0] PRINT: step:500/10000 train_loss:8.7308 val_loss:7.1021 train_time:39415ms step_avg:78.83ms +[2025-07-08 03:09:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:09:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:09:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:09:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:09:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:09:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:14:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:14:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:14:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:14:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:14:56] [Rank 0] Total Loss: 7.6726 +[2025-07-08 03:14:56] [Rank 0] Total Loss: 7.6726 +[2025-07-08 03:14:56] [Rank 0] Total FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Total FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 0 Loss: 7.6810 +[2025-07-08 03:14:56] [Rank 0] Group 0 Loss: 7.6810 +[2025-07-08 03:14:56] [Rank 0] Group 1 Loss: 7.6344 +[2025-07-08 03:14:56] [Rank 0] Group 1 Loss: 7.6344 +[2025-07-08 03:14:56] [Rank 0] Group 2 Loss: 7.7779 +[2025-07-08 03:14:56] [Rank 0] Group 2 Loss: 7.7779 +[2025-07-08 03:14:56] [Rank 0] Group 3 Loss: 7.6394 +[2025-07-08 03:14:56] [Rank 0] Group 3 Loss: 7.6394 +[2025-07-08 03:14:56] [Rank 0] Group 4 Loss: 7.6771 +[2025-07-08 03:14:56] [Rank 0] Group 4 Loss: 7.6771 +[2025-07-08 03:14:56] [Rank 0] Group 5 Loss: 7.6452 +[2025-07-08 03:14:56] [Rank 0] Group 5 Loss: 7.6452 +[2025-07-08 03:14:56] [Rank 0] Group 6 Loss: 7.6801 +[2025-07-08 03:14:56] [Rank 0] Group 6 Loss: 7.6801 +[2025-07-08 03:14:56] [Rank 0] Group 7 Loss: 7.6765 +[2025-07-08 03:14:56] [Rank 0] Group 7 Loss: 7.6765 +[2025-07-08 03:14:56] [Rank 0] Group 8 Loss: 7.6424 +[2025-07-08 03:14:56] [Rank 0] Group 8 Loss: 7.6424 +[2025-07-08 03:14:56] [Rank 0] Group 9 Loss: 7.6730 +[2025-07-08 03:14:56] [Rank 0] Group 9 Loss: 7.6730 +[2025-07-08 03:14:56] [Rank 0] Group 10 Loss: 7.6739 +[2025-07-08 03:14:56] [Rank 0] Group 10 Loss: 7.6739 +[2025-07-08 03:14:56] [Rank 0] Group 11 Loss: 7.6685 +[2025-07-08 03:14:56] [Rank 0] Group 11 Loss: 7.6685 +[2025-07-08 03:14:56] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-08 03:14:56] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-08 03:14:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 03:14:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 03:14:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 03:14:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 03:14:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 03:14:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 03:14:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 03:14:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 03:14:57] [Rank 0] step:501/10000 train_time:39437ms step_avg:78.72ms +[2025-07-08 03:14:57] [Rank 0] step:501/10000 train_time:39437ms step_avg:78.72ms +[2025-07-08 03:14:59] [Rank 0] step:521/10000 train_time:40908ms step_avg:78.52ms +[2025-07-08 03:14:59] [Rank 0] step:521/10000 train_time:40908ms step_avg:78.52ms +[2025-07-08 03:15:01] [Rank 0] step:541/10000 train_time:43048ms step_avg:79.57ms +[2025-07-08 03:15:01] [Rank 0] step:541/10000 train_time:43048ms step_avg:79.57ms +[2025-07-08 03:15:03] [Rank 0] step:561/10000 train_time:44489ms step_avg:79.30ms +[2025-07-08 03:15:03] [Rank 0] step:561/10000 train_time:44489ms step_avg:79.30ms +[2025-07-08 03:15:04] [Rank 0] step:581/10000 train_time:45949ms step_avg:79.09ms +[2025-07-08 03:15:04] [Rank 0] step:581/10000 train_time:45949ms step_avg:79.09ms +[2025-07-08 03:15:05] [Rank 0] step:601/10000 train_time:47407ms step_avg:78.88ms +[2025-07-08 03:15:05] [Rank 0] step:601/10000 train_time:47407ms step_avg:78.88ms +[2025-07-08 03:15:07] [Rank 0] step:621/10000 train_time:48867ms step_avg:78.69ms +[2025-07-08 03:15:07] [Rank 0] step:621/10000 train_time:48867ms step_avg:78.69ms +[2025-07-08 03:15:09] [Rank 0] step:641/10000 train_time:50567ms step_avg:78.89ms +[2025-07-08 03:15:09] [Rank 0] step:641/10000 train_time:50567ms step_avg:78.89ms +[2025-07-08 03:15:10] [Rank 0] step:661/10000 train_time:52027ms step_avg:78.71ms +[2025-07-08 03:15:10] [Rank 0] step:661/10000 train_time:52027ms step_avg:78.71ms +[2025-07-08 03:15:12] [Rank 0] step:681/10000 train_time:53489ms step_avg:78.55ms +[2025-07-08 03:15:12] [Rank 0] step:681/10000 train_time:53489ms step_avg:78.55ms +[2025-07-08 03:15:13] [Rank 0] step:701/10000 train_time:54955ms step_avg:78.39ms +[2025-07-08 03:15:13] [Rank 0] step:701/10000 train_time:54955ms step_avg:78.39ms +[2025-07-08 03:15:15] [Rank 0] step:721/10000 train_time:57090ms step_avg:79.18ms +[2025-07-08 03:15:15] [Rank 0] step:721/10000 train_time:57090ms step_avg:79.18ms +[2025-07-08 03:15:17] [Rank 0] step:741/10000 train_time:58533ms step_avg:78.99ms +[2025-07-08 03:15:17] [Rank 0] step:741/10000 train_time:58533ms step_avg:78.99ms +[2025-07-08 03:15:18] [Rank 0] step:761/10000 train_time:60007ms step_avg:78.85ms +[2025-07-08 03:15:18] [Rank 0] step:761/10000 train_time:60007ms step_avg:78.85ms +[2025-07-08 03:15:20] [Rank 0] step:781/10000 train_time:61483ms step_avg:78.72ms +[2025-07-08 03:15:20] [Rank 0] step:781/10000 train_time:61483ms step_avg:78.72ms +[2025-07-08 03:15:21] [Rank 0] step:801/10000 train_time:62958ms step_avg:78.60ms +[2025-07-08 03:15:21] [Rank 0] step:801/10000 train_time:62958ms step_avg:78.60ms +[2025-07-08 03:15:23] [Rank 0] step:821/10000 train_time:64670ms step_avg:78.77ms +[2025-07-08 03:15:23] [Rank 0] step:821/10000 train_time:64670ms step_avg:78.77ms +[2025-07-08 03:15:24] [Rank 0] step:841/10000 train_time:66144ms step_avg:78.65ms +[2025-07-08 03:15:24] [Rank 0] step:841/10000 train_time:66144ms step_avg:78.65ms +[2025-07-08 03:15:26] [Rank 0] step:861/10000 train_time:67621ms step_avg:78.54ms +[2025-07-08 03:15:26] [Rank 0] step:861/10000 train_time:67621ms step_avg:78.54ms +[2025-07-08 03:15:27] [Rank 0] step:881/10000 train_time:69100ms step_avg:78.43ms +[2025-07-08 03:15:27] [Rank 0] step:881/10000 train_time:69100ms step_avg:78.43ms +[2025-07-08 03:15:29] [Rank 0] step:901/10000 train_time:70632ms step_avg:78.39ms +[2025-07-08 03:15:29] [Rank 0] step:901/10000 train_time:70632ms step_avg:78.39ms +[2025-07-08 03:15:31] [Rank 0] step:921/10000 train_time:72725ms step_avg:78.96ms +[2025-07-08 03:15:31] [Rank 0] step:921/10000 train_time:72725ms step_avg:78.96ms +[2025-07-08 03:15:32] [Rank 0] step:941/10000 train_time:74204ms step_avg:78.86ms +[2025-07-08 03:15:32] [Rank 0] step:941/10000 train_time:74204ms step_avg:78.86ms +[2025-07-08 03:15:34] [Rank 0] step:961/10000 train_time:75681ms step_avg:78.75ms +[2025-07-08 03:15:34] [Rank 0] step:961/10000 train_time:75681ms step_avg:78.75ms +[2025-07-08 03:15:36] [Rank 0] step:981/10000 train_time:77450ms step_avg:78.95ms +[2025-07-08 03:15:36] [Rank 0] step:981/10000 train_time:77450ms step_avg:78.95ms +[2025-07-08 03:15:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:15:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:15:39] [Rank 0] PRINT: step:1000/10000 train_loss:6.0336 val_loss:5.1164 train_time:79598ms step_avg:79.60ms +[2025-07-08 03:15:39] [Rank 0] PRINT: step:1000/10000 train_loss:6.0336 val_loss:5.1164 train_time:79598ms step_avg:79.60ms +[2025-07-08 03:15:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:15:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:15:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:15:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:15:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:15:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:21:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:21:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:21:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:21:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:21:07] [Rank 0] Total Loss: 6.1348 +[2025-07-08 03:21:07] [Rank 0] Total Loss: 6.1348 +[2025-07-08 03:21:07] [Rank 0] Total FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Total FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 0 Loss: 6.0935 +[2025-07-08 03:21:07] [Rank 0] Group 0 Loss: 6.0935 +[2025-07-08 03:21:07] [Rank 0] Group 1 Loss: 6.1801 +[2025-07-08 03:21:07] [Rank 0] Group 1 Loss: 6.1801 +[2025-07-08 03:21:07] [Rank 0] Group 2 Loss: 6.2065 +[2025-07-08 03:21:07] [Rank 0] Group 2 Loss: 6.2065 +[2025-07-08 03:21:07] [Rank 0] Group 3 Loss: 6.0783 +[2025-07-08 03:21:07] [Rank 0] Group 3 Loss: 6.0783 +[2025-07-08 03:21:07] [Rank 0] Group 4 Loss: 6.1676 +[2025-07-08 03:21:07] [Rank 0] Group 4 Loss: 6.1676 +[2025-07-08 03:21:07] [Rank 0] Group 5 Loss: 6.1109 +[2025-07-08 03:21:07] [Rank 0] Group 5 Loss: 6.1109 +[2025-07-08 03:21:07] [Rank 0] Group 6 Loss: 6.1456 +[2025-07-08 03:21:07] [Rank 0] Group 6 Loss: 6.1456 +[2025-07-08 03:21:07] [Rank 0] Group 7 Loss: 6.1316 +[2025-07-08 03:21:07] [Rank 0] Group 7 Loss: 6.1316 +[2025-07-08 03:21:07] [Rank 0] Group 8 Loss: 6.1218 +[2025-07-08 03:21:07] [Rank 0] Group 8 Loss: 6.1218 +[2025-07-08 03:21:07] [Rank 0] Group 9 Loss: 6.1331 +[2025-07-08 03:21:07] [Rank 0] Group 9 Loss: 6.1331 +[2025-07-08 03:21:07] [Rank 0] Group 10 Loss: 6.1439 +[2025-07-08 03:21:07] [Rank 0] Group 10 Loss: 6.1439 +[2025-07-08 03:21:07] [Rank 0] Group 11 Loss: 6.1377 +[2025-07-08 03:21:07] [Rank 0] Group 11 Loss: 6.1377 +[2025-07-08 03:21:07] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-08 03:21:07] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-08 03:21:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 03:21:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 03:21:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 03:21:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 03:21:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 03:21:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 03:21:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 03:21:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 03:21:09] [Rank 0] step:1001/10000 train_time:79620ms step_avg:79.54ms +[2025-07-08 03:21:09] [Rank 0] step:1001/10000 train_time:79620ms step_avg:79.54ms +[2025-07-08 03:21:10] [Rank 0] step:1021/10000 train_time:81097ms step_avg:79.43ms +[2025-07-08 03:21:10] [Rank 0] step:1021/10000 train_time:81097ms step_avg:79.43ms +[2025-07-08 03:21:12] [Rank 0] step:1041/10000 train_time:82567ms step_avg:79.32ms +[2025-07-08 03:21:12] [Rank 0] step:1041/10000 train_time:82567ms step_avg:79.32ms +[2025-07-08 03:21:13] [Rank 0] step:1061/10000 train_time:84037ms step_avg:79.21ms +[2025-07-08 03:21:13] [Rank 0] step:1061/10000 train_time:84037ms step_avg:79.21ms +[2025-07-08 03:21:15] [Rank 0] step:1081/10000 train_time:85771ms step_avg:79.34ms +[2025-07-08 03:21:15] [Rank 0] step:1081/10000 train_time:85771ms step_avg:79.34ms +[2025-07-08 03:21:17] [Rank 0] step:1101/10000 train_time:87649ms step_avg:79.61ms +[2025-07-08 03:21:17] [Rank 0] step:1101/10000 train_time:87649ms step_avg:79.61ms +[2025-07-08 03:21:18] [Rank 0] step:1121/10000 train_time:89119ms step_avg:79.50ms +[2025-07-08 03:21:18] [Rank 0] step:1121/10000 train_time:89119ms step_avg:79.50ms +[2025-07-08 03:21:20] [Rank 0] step:1141/10000 train_time:90596ms step_avg:79.40ms +[2025-07-08 03:21:20] [Rank 0] step:1141/10000 train_time:90596ms step_avg:79.40ms +[2025-07-08 03:21:21] [Rank 0] step:1161/10000 train_time:92072ms step_avg:79.30ms +[2025-07-08 03:21:21] [Rank 0] step:1161/10000 train_time:92072ms step_avg:79.30ms +[2025-07-08 03:21:23] [Rank 0] step:1181/10000 train_time:93779ms step_avg:79.41ms +[2025-07-08 03:21:23] [Rank 0] step:1181/10000 train_time:93779ms step_avg:79.41ms +[2025-07-08 03:21:24] [Rank 0] step:1201/10000 train_time:95257ms step_avg:79.31ms +[2025-07-08 03:21:24] [Rank 0] step:1201/10000 train_time:95257ms step_avg:79.31ms +[2025-07-08 03:21:26] [Rank 0] step:1221/10000 train_time:96733ms step_avg:79.22ms +[2025-07-08 03:21:26] [Rank 0] step:1221/10000 train_time:96733ms step_avg:79.22ms +[2025-07-08 03:21:27] [Rank 0] step:1241/10000 train_time:98210ms step_avg:79.14ms +[2025-07-08 03:21:27] [Rank 0] step:1241/10000 train_time:98210ms step_avg:79.14ms +[2025-07-08 03:21:29] [Rank 0] step:1261/10000 train_time:100377ms step_avg:79.60ms +[2025-07-08 03:21:29] [Rank 0] step:1261/10000 train_time:100377ms step_avg:79.60ms +[2025-07-08 03:21:31] [Rank 0] step:1281/10000 train_time:101836ms step_avg:79.50ms +[2025-07-08 03:21:31] [Rank 0] step:1281/10000 train_time:101836ms step_avg:79.50ms +[2025-07-08 03:21:32] [Rank 0] step:1301/10000 train_time:103311ms step_avg:79.41ms +[2025-07-08 03:21:32] [Rank 0] step:1301/10000 train_time:103311ms step_avg:79.41ms +[2025-07-08 03:21:34] [Rank 0] step:1321/10000 train_time:104789ms step_avg:79.33ms +[2025-07-08 03:21:34] [Rank 0] step:1321/10000 train_time:104789ms step_avg:79.33ms +[2025-07-08 03:21:35] [Rank 0] step:1341/10000 train_time:106270ms step_avg:79.25ms +[2025-07-08 03:21:35] [Rank 0] step:1341/10000 train_time:106270ms step_avg:79.25ms +[2025-07-08 03:21:37] [Rank 0] step:1361/10000 train_time:108414ms step_avg:79.66ms +[2025-07-08 03:21:37] [Rank 0] step:1361/10000 train_time:108414ms step_avg:79.66ms +[2025-07-08 03:21:39] [Rank 0] step:1381/10000 train_time:109890ms step_avg:79.57ms +[2025-07-08 03:21:39] [Rank 0] step:1381/10000 train_time:109890ms step_avg:79.57ms +[2025-07-08 03:21:40] [Rank 0] step:1401/10000 train_time:111372ms step_avg:79.49ms +[2025-07-08 03:21:40] [Rank 0] step:1401/10000 train_time:111372ms step_avg:79.49ms +[2025-07-08 03:21:42] [Rank 0] step:1421/10000 train_time:112854ms step_avg:79.42ms +[2025-07-08 03:21:42] [Rank 0] step:1421/10000 train_time:112854ms step_avg:79.42ms +[2025-07-08 03:21:44] [Rank 0] step:1441/10000 train_time:114587ms step_avg:79.52ms +[2025-07-08 03:21:44] [Rank 0] step:1441/10000 train_time:114587ms step_avg:79.52ms +[2025-07-08 03:21:46] [Rank 0] step:1461/10000 train_time:116453ms step_avg:79.71ms +[2025-07-08 03:21:46] [Rank 0] step:1461/10000 train_time:116453ms step_avg:79.71ms +[2025-07-08 03:21:47] [Rank 0] step:1481/10000 train_time:117931ms step_avg:79.63ms +[2025-07-08 03:21:47] [Rank 0] step:1481/10000 train_time:117931ms step_avg:79.63ms +[2025-07-08 03:21:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:21:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:21:49] [Rank 0] PRINT: step:1500/10000 train_loss:4.4017 val_loss:3.7121 train_time:119409ms step_avg:79.61ms +[2025-07-08 03:21:49] [Rank 0] PRINT: step:1500/10000 train_loss:4.4017 val_loss:3.7121 train_time:119409ms step_avg:79.61ms +[2025-07-08 03:21:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:21:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:21:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:21:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:21:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:21:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:27:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:27:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:27:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:27:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:27:19] [Rank 0] Total Loss: 5.2064 +[2025-07-08 03:27:19] [Rank 0] Total Loss: 5.2064 +[2025-07-08 03:27:19] [Rank 0] Total FTA: 0.0746 +[2025-07-08 03:27:19] [Rank 0] Total FTA: 0.0746 +[2025-07-08 03:27:19] [Rank 0] Group 0 Loss: 5.2070 +[2025-07-08 03:27:19] [Rank 0] Group 0 Loss: 5.2070 +[2025-07-08 03:27:19] [Rank 0] Group 1 Loss: 5.2817 +[2025-07-08 03:27:19] [Rank 0] Group 1 Loss: 5.2817 +[2025-07-08 03:27:19] [Rank 0] Group 2 Loss: 5.2341 +[2025-07-08 03:27:19] [Rank 0] Group 2 Loss: 5.2341 +[2025-07-08 03:27:19] [Rank 0] Group 3 Loss: 5.1679 +[2025-07-08 03:27:19] [Rank 0] Group 3 Loss: 5.1679 +[2025-07-08 03:27:19] [Rank 0] Group 4 Loss: 5.2469 +[2025-07-08 03:27:19] [Rank 0] Group 4 Loss: 5.2469 +[2025-07-08 03:27:19] [Rank 0] Group 5 Loss: 5.1593 +[2025-07-08 03:27:19] [Rank 0] Group 5 Loss: 5.1593 +[2025-07-08 03:27:19] [Rank 0] Group 6 Loss: 5.1572 +[2025-07-08 03:27:19] [Rank 0] Group 6 Loss: 5.1572 +[2025-07-08 03:27:19] [Rank 0] Group 7 Loss: 5.1930 +[2025-07-08 03:27:19] [Rank 0] Group 7 Loss: 5.1930 +[2025-07-08 03:27:19] [Rank 0] Group 8 Loss: 5.1713 +[2025-07-08 03:27:19] [Rank 0] Group 8 Loss: 5.1713 +[2025-07-08 03:27:19] [Rank 0] Group 9 Loss: 5.2594 +[2025-07-08 03:27:19] [Rank 0] Group 9 Loss: 5.2594 +[2025-07-08 03:27:19] [Rank 0] Group 10 Loss: 5.1979 +[2025-07-08 03:27:19] [Rank 0] Group 10 Loss: 5.1979 +[2025-07-08 03:27:19] [Rank 0] Group 11 Loss: 5.2121 +[2025-07-08 03:27:19] [Rank 0] Group 11 Loss: 5.2121 +[2025-07-08 03:27:19] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-08 03:27:19] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-08 03:27:19] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 03:27:19] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 03:27:19] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-08 03:27:19] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-08 03:27:19] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-08 03:27:19] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-08 03:27:19] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-08 03:27:19] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-08 03:27:19] [Rank 0] Group 5 FTA: 0.0391 +[2025-07-08 03:27:19] [Rank 0] Group 5 FTA: 0.0391 +[2025-07-08 03:27:19] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-08 03:27:19] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-08 03:27:19] [Rank 0] Group 7 FTA: 0.1094 +[2025-07-08 03:27:19] [Rank 0] Group 7 FTA: 0.1094 +[2025-07-08 03:27:19] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-08 03:27:19] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-08 03:27:19] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-08 03:27:19] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-08 03:27:19] [Rank 0] Group 10 FTA: 0.0840 +[2025-07-08 03:27:19] [Rank 0] Group 10 FTA: 0.0840 +[2025-07-08 03:27:19] [Rank 0] Group 11 FTA: 0.0791 +[2025-07-08 03:27:19] [Rank 0] Group 11 FTA: 0.0791 +[2025-07-08 03:27:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 03:27:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 03:27:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 03:27:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 03:27:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 03:27:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 03:27:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 03:27:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 03:27:20] [Rank 0] step:1501/10000 train_time:119431ms step_avg:79.57ms +[2025-07-08 03:27:20] [Rank 0] step:1501/10000 train_time:119431ms step_avg:79.57ms +[2025-07-08 03:27:22] [Rank 0] step:1521/10000 train_time:120904ms step_avg:79.49ms +[2025-07-08 03:27:22] [Rank 0] step:1521/10000 train_time:120904ms step_avg:79.49ms +[2025-07-08 03:27:24] [Rank 0] step:1541/10000 train_time:123022ms step_avg:79.83ms +[2025-07-08 03:27:24] [Rank 0] step:1541/10000 train_time:123022ms step_avg:79.83ms +[2025-07-08 03:27:26] [Rank 0] step:1561/10000 train_time:124491ms step_avg:79.75ms +[2025-07-08 03:27:26] [Rank 0] step:1561/10000 train_time:124491ms step_avg:79.75ms +[2025-07-08 03:27:27] [Rank 0] step:1581/10000 train_time:125960ms step_avg:79.67ms +[2025-07-08 03:27:27] [Rank 0] step:1581/10000 train_time:125960ms step_avg:79.67ms +[2025-07-08 03:27:28] [Rank 0] step:1601/10000 train_time:127432ms step_avg:79.60ms +[2025-07-08 03:27:28] [Rank 0] step:1601/10000 train_time:127432ms step_avg:79.60ms +[2025-07-08 03:27:31] [Rank 0] step:1621/10000 train_time:129574ms step_avg:79.93ms +[2025-07-08 03:27:31] [Rank 0] step:1621/10000 train_time:129574ms step_avg:79.93ms +[2025-07-08 03:27:32] [Rank 0] step:1641/10000 train_time:131027ms step_avg:79.85ms +[2025-07-08 03:27:32] [Rank 0] step:1641/10000 train_time:131027ms step_avg:79.85ms +[2025-07-08 03:27:34] [Rank 0] step:1661/10000 train_time:132498ms step_avg:79.77ms +[2025-07-08 03:27:34] [Rank 0] step:1661/10000 train_time:132498ms step_avg:79.77ms +[2025-07-08 03:27:35] [Rank 0] step:1681/10000 train_time:133973ms step_avg:79.70ms +[2025-07-08 03:27:35] [Rank 0] step:1681/10000 train_time:133973ms step_avg:79.70ms +[2025-07-08 03:27:36] [Rank 0] step:1701/10000 train_time:135445ms step_avg:79.63ms +[2025-07-08 03:27:36] [Rank 0] step:1701/10000 train_time:135445ms step_avg:79.63ms +[2025-07-08 03:27:39] [Rank 0] step:1721/10000 train_time:137584ms step_avg:79.94ms +[2025-07-08 03:27:39] [Rank 0] step:1721/10000 train_time:137584ms step_avg:79.94ms +[2025-07-08 03:27:40] [Rank 0] step:1741/10000 train_time:139061ms step_avg:79.87ms +[2025-07-08 03:27:40] [Rank 0] step:1741/10000 train_time:139061ms step_avg:79.87ms +[2025-07-08 03:27:42] [Rank 0] step:1761/10000 train_time:140541ms step_avg:79.81ms +[2025-07-08 03:27:42] [Rank 0] step:1761/10000 train_time:140541ms step_avg:79.81ms +[2025-07-08 03:27:43] [Rank 0] step:1781/10000 train_time:142018ms step_avg:79.74ms +[2025-07-08 03:27:43] [Rank 0] step:1781/10000 train_time:142018ms step_avg:79.74ms +[2025-07-08 03:27:45] [Rank 0] step:1801/10000 train_time:143548ms step_avg:79.70ms +[2025-07-08 03:27:45] [Rank 0] step:1801/10000 train_time:143548ms step_avg:79.70ms +[2025-07-08 03:27:47] [Rank 0] step:1821/10000 train_time:145624ms step_avg:79.97ms +[2025-07-08 03:27:47] [Rank 0] step:1821/10000 train_time:145624ms step_avg:79.97ms +[2025-07-08 03:27:48] [Rank 0] step:1841/10000 train_time:147103ms step_avg:79.90ms +[2025-07-08 03:27:48] [Rank 0] step:1841/10000 train_time:147103ms step_avg:79.90ms +[2025-07-08 03:27:50] [Rank 0] step:1861/10000 train_time:148580ms step_avg:79.84ms +[2025-07-08 03:27:50] [Rank 0] step:1861/10000 train_time:148580ms step_avg:79.84ms +[2025-07-08 03:27:51] [Rank 0] step:1881/10000 train_time:150059ms step_avg:79.78ms +[2025-07-08 03:27:51] [Rank 0] step:1881/10000 train_time:150059ms step_avg:79.78ms +[2025-07-08 03:27:53] [Rank 0] step:1901/10000 train_time:152210ms step_avg:80.07ms +[2025-07-08 03:27:53] [Rank 0] step:1901/10000 train_time:152210ms step_avg:80.07ms +[2025-07-08 03:27:55] [Rank 0] step:1921/10000 train_time:153686ms step_avg:80.00ms +[2025-07-08 03:27:55] [Rank 0] step:1921/10000 train_time:153686ms step_avg:80.00ms +[2025-07-08 03:27:56] [Rank 0] step:1941/10000 train_time:155164ms step_avg:79.94ms +[2025-07-08 03:27:56] [Rank 0] step:1941/10000 train_time:155164ms step_avg:79.94ms +[2025-07-08 03:27:58] [Rank 0] step:1961/10000 train_time:156643ms step_avg:79.88ms +[2025-07-08 03:27:58] [Rank 0] step:1961/10000 train_time:156643ms step_avg:79.88ms +[2025-07-08 03:27:59] [Rank 0] step:1981/10000 train_time:158176ms step_avg:79.85ms +[2025-07-08 03:27:59] [Rank 0] step:1981/10000 train_time:158176ms step_avg:79.85ms +[2025-07-08 03:28:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:28:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:28:02] [Rank 0] PRINT: step:2000/10000 train_loss:3.1211 val_loss:2.6130 train_time:159839ms step_avg:79.92ms +[2025-07-08 03:28:02] [Rank 0] PRINT: step:2000/10000 train_loss:3.1211 val_loss:2.6130 train_time:159839ms step_avg:79.92ms +[2025-07-08 03:28:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:28:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:28:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:28:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:28:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:28:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:33:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:33:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:33:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:33:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:33:31] [Rank 0] Total Loss: 4.4778 +[2025-07-08 03:33:31] [Rank 0] Total Loss: 4.4778 +[2025-07-08 03:33:31] [Rank 0] Total FTA: 0.0870 +[2025-07-08 03:33:31] [Rank 0] Total FTA: 0.0870 +[2025-07-08 03:33:31] [Rank 0] Group 0 Loss: 4.6106 +[2025-07-08 03:33:31] [Rank 0] Group 0 Loss: 4.6106 +[2025-07-08 03:33:31] [Rank 0] Group 1 Loss: 4.4763 +[2025-07-08 03:33:31] [Rank 0] Group 1 Loss: 4.4763 +[2025-07-08 03:33:31] [Rank 0] Group 2 Loss: 4.3444 +[2025-07-08 03:33:31] [Rank 0] Group 2 Loss: 4.3444 +[2025-07-08 03:33:31] [Rank 0] Group 3 Loss: 4.4630 +[2025-07-08 03:33:31] [Rank 0] Group 3 Loss: 4.4630 +[2025-07-08 03:33:31] [Rank 0] Group 4 Loss: 4.4545 +[2025-07-08 03:33:31] [Rank 0] Group 4 Loss: 4.4545 +[2025-07-08 03:33:31] [Rank 0] Group 5 Loss: 4.4448 +[2025-07-08 03:33:31] [Rank 0] Group 5 Loss: 4.4448 +[2025-07-08 03:33:31] [Rank 0] Group 6 Loss: 4.4426 +[2025-07-08 03:33:31] [Rank 0] Group 6 Loss: 4.4426 +[2025-07-08 03:33:31] [Rank 0] Group 7 Loss: 4.4755 +[2025-07-08 03:33:31] [Rank 0] Group 7 Loss: 4.4755 +[2025-07-08 03:33:31] [Rank 0] Group 8 Loss: 4.4702 +[2025-07-08 03:33:31] [Rank 0] Group 8 Loss: 4.4702 +[2025-07-08 03:33:31] [Rank 0] Group 9 Loss: 4.4911 +[2025-07-08 03:33:31] [Rank 0] Group 9 Loss: 4.4911 +[2025-07-08 03:33:31] [Rank 0] Group 10 Loss: 4.4724 +[2025-07-08 03:33:31] [Rank 0] Group 10 Loss: 4.4724 +[2025-07-08 03:33:31] [Rank 0] Group 11 Loss: 4.4716 +[2025-07-08 03:33:31] [Rank 0] Group 11 Loss: 4.4716 +[2025-07-08 03:33:31] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-08 03:33:31] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-08 03:33:31] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 03:33:31] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 03:33:31] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-08 03:33:31] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-08 03:33:31] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-08 03:33:31] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-08 03:33:31] [Rank 0] Group 4 FTA: 0.0130 +[2025-07-08 03:33:31] [Rank 0] Group 4 FTA: 0.0130 +[2025-07-08 03:33:31] [Rank 0] Group 5 FTA: 0.0521 +[2025-07-08 03:33:31] [Rank 0] Group 5 FTA: 0.0521 +[2025-07-08 03:33:31] [Rank 0] Group 6 FTA: 0.1224 +[2025-07-08 03:33:31] [Rank 0] Group 6 FTA: 0.1224 +[2025-07-08 03:33:31] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-08 03:33:31] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-08 03:33:31] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-08 03:33:31] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-08 03:33:31] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-08 03:33:31] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-08 03:33:31] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-08 03:33:31] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-08 03:33:31] [Rank 0] Group 11 FTA: 0.0977 +[2025-07-08 03:33:31] [Rank 0] Group 11 FTA: 0.0977 +[2025-07-08 03:33:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 03:33:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 03:33:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 03:33:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 03:33:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 03:33:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 03:33:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 03:33:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 03:33:33] [Rank 0] step:2001/10000 train_time:159862ms step_avg:79.89ms +[2025-07-08 03:33:33] [Rank 0] step:2001/10000 train_time:159862ms step_avg:79.89ms +[2025-07-08 03:33:34] [Rank 0] step:2021/10000 train_time:161336ms step_avg:79.83ms +[2025-07-08 03:33:34] [Rank 0] step:2021/10000 train_time:161336ms step_avg:79.83ms +[2025-07-08 03:33:36] [Rank 0] step:2041/10000 train_time:162806ms step_avg:79.77ms +[2025-07-08 03:33:36] [Rank 0] step:2041/10000 train_time:162806ms step_avg:79.77ms +[2025-07-08 03:33:37] [Rank 0] step:2061/10000 train_time:164276ms step_avg:79.71ms +[2025-07-08 03:33:37] [Rank 0] step:2061/10000 train_time:164276ms step_avg:79.71ms +[2025-07-08 03:33:39] [Rank 0] step:2081/10000 train_time:166408ms step_avg:79.97ms +[2025-07-08 03:33:39] [Rank 0] step:2081/10000 train_time:166408ms step_avg:79.97ms +[2025-07-08 03:33:41] [Rank 0] step:2101/10000 train_time:167879ms step_avg:79.90ms +[2025-07-08 03:33:41] [Rank 0] step:2101/10000 train_time:167879ms step_avg:79.90ms +[2025-07-08 03:33:42] [Rank 0] step:2121/10000 train_time:169353ms step_avg:79.85ms +[2025-07-08 03:33:42] [Rank 0] step:2121/10000 train_time:169353ms step_avg:79.85ms +[2025-07-08 03:33:44] [Rank 0] step:2141/10000 train_time:170827ms step_avg:79.79ms +[2025-07-08 03:33:44] [Rank 0] step:2141/10000 train_time:170827ms step_avg:79.79ms +[2025-07-08 03:33:45] [Rank 0] step:2161/10000 train_time:172357ms step_avg:79.76ms +[2025-07-08 03:33:45] [Rank 0] step:2161/10000 train_time:172357ms step_avg:79.76ms +[2025-07-08 03:33:47] [Rank 0] step:2181/10000 train_time:174014ms step_avg:79.79ms +[2025-07-08 03:33:47] [Rank 0] step:2181/10000 train_time:174014ms step_avg:79.79ms +[2025-07-08 03:33:48] [Rank 0] step:2201/10000 train_time:175493ms step_avg:79.73ms +[2025-07-08 03:33:48] [Rank 0] step:2201/10000 train_time:175493ms step_avg:79.73ms +[2025-07-08 03:33:50] [Rank 0] step:2221/10000 train_time:176972ms step_avg:79.68ms +[2025-07-08 03:33:50] [Rank 0] step:2221/10000 train_time:176972ms step_avg:79.68ms +[2025-07-08 03:33:51] [Rank 0] step:2241/10000 train_time:178469ms step_avg:79.64ms +[2025-07-08 03:33:51] [Rank 0] step:2241/10000 train_time:178469ms step_avg:79.64ms +[2025-07-08 03:33:53] [Rank 0] step:2261/10000 train_time:180630ms step_avg:79.89ms +[2025-07-08 03:33:53] [Rank 0] step:2261/10000 train_time:180630ms step_avg:79.89ms +[2025-07-08 03:33:55] [Rank 0] step:2281/10000 train_time:182130ms step_avg:79.85ms +[2025-07-08 03:33:55] [Rank 0] step:2281/10000 train_time:182130ms step_avg:79.85ms +[2025-07-08 03:33:56] [Rank 0] step:2301/10000 train_time:183632ms step_avg:79.81ms +[2025-07-08 03:33:56] [Rank 0] step:2301/10000 train_time:183632ms step_avg:79.81ms +[2025-07-08 03:33:58] [Rank 0] step:2321/10000 train_time:185137ms step_avg:79.77ms +[2025-07-08 03:33:58] [Rank 0] step:2321/10000 train_time:185137ms step_avg:79.77ms +[2025-07-08 03:34:00] [Rank 0] step:2341/10000 train_time:186641ms step_avg:79.73ms +[2025-07-08 03:34:00] [Rank 0] step:2341/10000 train_time:186641ms step_avg:79.73ms +[2025-07-08 03:34:02] [Rank 0] step:2361/10000 train_time:188811ms step_avg:79.97ms +[2025-07-08 03:34:02] [Rank 0] step:2361/10000 train_time:188811ms step_avg:79.97ms +[2025-07-08 03:34:03] [Rank 0] step:2381/10000 train_time:190317ms step_avg:79.93ms +[2025-07-08 03:34:03] [Rank 0] step:2381/10000 train_time:190317ms step_avg:79.93ms +[2025-07-08 03:34:05] [Rank 0] step:2401/10000 train_time:191821ms step_avg:79.89ms +[2025-07-08 03:34:05] [Rank 0] step:2401/10000 train_time:191821ms step_avg:79.89ms +[2025-07-08 03:34:06] [Rank 0] step:2421/10000 train_time:193327ms step_avg:79.85ms +[2025-07-08 03:34:06] [Rank 0] step:2421/10000 train_time:193327ms step_avg:79.85ms +[2025-07-08 03:34:08] [Rank 0] step:2441/10000 train_time:195478ms step_avg:80.08ms +[2025-07-08 03:34:08] [Rank 0] step:2441/10000 train_time:195478ms step_avg:80.08ms +[2025-07-08 03:34:10] [Rank 0] step:2461/10000 train_time:196982ms step_avg:80.04ms +[2025-07-08 03:34:10] [Rank 0] step:2461/10000 train_time:196982ms step_avg:80.04ms +[2025-07-08 03:34:11] [Rank 0] step:2481/10000 train_time:198486ms step_avg:80.00ms +[2025-07-08 03:34:11] [Rank 0] step:2481/10000 train_time:198486ms step_avg:80.00ms +[2025-07-08 03:34:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:34:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:34:14] [Rank 0] PRINT: step:2500/10000 train_loss:2.2709 val_loss:2.0053 train_time:199989ms step_avg:80.00ms +[2025-07-08 03:34:14] [Rank 0] PRINT: step:2500/10000 train_loss:2.2709 val_loss:2.0053 train_time:199989ms step_avg:80.00ms +[2025-07-08 03:34:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:34:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:34:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:34:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:34:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:34:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:39:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:39:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:39:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:39:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:39:42] [Rank 0] Total Loss: 4.1957 +[2025-07-08 03:39:42] [Rank 0] Total Loss: 4.1957 +[2025-07-08 03:39:42] [Rank 0] Total FTA: 0.0996 +[2025-07-08 03:39:42] [Rank 0] Total FTA: 0.0996 +[2025-07-08 03:39:42] [Rank 0] Group 0 Loss: 4.4049 +[2025-07-08 03:39:42] [Rank 0] Group 0 Loss: 4.4049 +[2025-07-08 03:39:42] [Rank 0] Group 1 Loss: 4.2166 +[2025-07-08 03:39:42] [Rank 0] Group 1 Loss: 4.2166 +[2025-07-08 03:39:42] [Rank 0] Group 2 Loss: 3.9988 +[2025-07-08 03:39:42] [Rank 0] Group 2 Loss: 3.9988 +[2025-07-08 03:39:42] [Rank 0] Group 3 Loss: 4.2572 +[2025-07-08 03:39:42] [Rank 0] Group 3 Loss: 4.2572 +[2025-07-08 03:39:42] [Rank 0] Group 4 Loss: 4.2000 +[2025-07-08 03:39:42] [Rank 0] Group 4 Loss: 4.2000 +[2025-07-08 03:39:42] [Rank 0] Group 5 Loss: 4.1666 +[2025-07-08 03:39:42] [Rank 0] Group 5 Loss: 4.1666 +[2025-07-08 03:39:42] [Rank 0] Group 6 Loss: 4.0636 +[2025-07-08 03:39:42] [Rank 0] Group 6 Loss: 4.0636 +[2025-07-08 03:39:42] [Rank 0] Group 7 Loss: 4.1644 +[2025-07-08 03:39:42] [Rank 0] Group 7 Loss: 4.1644 +[2025-07-08 03:39:42] [Rank 0] Group 8 Loss: 4.1719 +[2025-07-08 03:39:42] [Rank 0] Group 8 Loss: 4.1719 +[2025-07-08 03:39:42] [Rank 0] Group 9 Loss: 4.1277 +[2025-07-08 03:39:42] [Rank 0] Group 9 Loss: 4.1277 +[2025-07-08 03:39:42] [Rank 0] Group 10 Loss: 4.1679 +[2025-07-08 03:39:42] [Rank 0] Group 10 Loss: 4.1679 +[2025-07-08 03:39:42] [Rank 0] Group 11 Loss: 4.1920 +[2025-07-08 03:39:42] [Rank 0] Group 11 Loss: 4.1920 +[2025-07-08 03:39:42] [Rank 0] Group 0 FTA: 0.1404 +[2025-07-08 03:39:42] [Rank 0] Group 0 FTA: 0.1404 +[2025-07-08 03:39:42] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 03:39:42] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 03:39:42] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-08 03:39:42] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-08 03:39:42] [Rank 0] Group 3 FTA: 0.0833 +[2025-07-08 03:39:42] [Rank 0] Group 3 FTA: 0.0833 +[2025-07-08 03:39:42] [Rank 0] Group 4 FTA: 0.0573 +[2025-07-08 03:39:42] [Rank 0] Group 4 FTA: 0.0573 +[2025-07-08 03:39:42] [Rank 0] Group 5 FTA: 0.0859 +[2025-07-08 03:39:42] [Rank 0] Group 5 FTA: 0.0859 +[2025-07-08 03:39:42] [Rank 0] Group 6 FTA: 0.1094 +[2025-07-08 03:39:42] [Rank 0] Group 6 FTA: 0.1094 +[2025-07-08 03:39:42] [Rank 0] Group 7 FTA: 0.1224 +[2025-07-08 03:39:42] [Rank 0] Group 7 FTA: 0.1224 +[2025-07-08 03:39:42] [Rank 0] Group 8 FTA: 0.1302 +[2025-07-08 03:39:42] [Rank 0] Group 8 FTA: 0.1302 +[2025-07-08 03:39:42] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-08 03:39:42] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-08 03:39:42] [Rank 0] Group 10 FTA: 0.1133 +[2025-07-08 03:39:42] [Rank 0] Group 10 FTA: 0.1133 +[2025-07-08 03:39:42] [Rank 0] Group 11 FTA: 0.1123 +[2025-07-08 03:39:42] [Rank 0] Group 11 FTA: 0.1123 +[2025-07-08 03:39:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 03:39:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 03:39:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 03:39:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 03:39:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 03:39:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 03:39:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 03:39:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 03:39:44] [Rank 0] step:2501/10000 train_time:200010ms step_avg:79.97ms +[2025-07-08 03:39:44] [Rank 0] step:2501/10000 train_time:200010ms step_avg:79.97ms +[2025-07-08 03:39:46] [Rank 0] step:2521/10000 train_time:202185ms step_avg:80.20ms +[2025-07-08 03:39:46] [Rank 0] step:2521/10000 train_time:202185ms step_avg:80.20ms +[2025-07-08 03:39:47] [Rank 0] step:2541/10000 train_time:203661ms step_avg:80.15ms +[2025-07-08 03:39:47] [Rank 0] step:2541/10000 train_time:203661ms step_avg:80.15ms +[2025-07-08 03:39:49] [Rank 0] step:2561/10000 train_time:205416ms step_avg:80.21ms +[2025-07-08 03:39:49] [Rank 0] step:2561/10000 train_time:205416ms step_avg:80.21ms +[2025-07-08 03:39:51] [Rank 0] step:2581/10000 train_time:206910ms step_avg:80.17ms +[2025-07-08 03:39:51] [Rank 0] step:2581/10000 train_time:206910ms step_avg:80.17ms +[2025-07-08 03:39:52] [Rank 0] step:2601/10000 train_time:208403ms step_avg:80.12ms +[2025-07-08 03:39:52] [Rank 0] step:2601/10000 train_time:208403ms step_avg:80.12ms +[2025-07-08 03:39:54] [Rank 0] step:2621/10000 train_time:210561ms step_avg:80.34ms +[2025-07-08 03:39:54] [Rank 0] step:2621/10000 train_time:210561ms step_avg:80.34ms +[2025-07-08 03:39:56] [Rank 0] step:2641/10000 train_time:212052ms step_avg:80.29ms +[2025-07-08 03:39:56] [Rank 0] step:2641/10000 train_time:212052ms step_avg:80.29ms +[2025-07-08 03:39:57] [Rank 0] step:2661/10000 train_time:213548ms step_avg:80.25ms +[2025-07-08 03:39:57] [Rank 0] step:2661/10000 train_time:213548ms step_avg:80.25ms +[2025-07-08 03:39:59] [Rank 0] step:2681/10000 train_time:215044ms step_avg:80.21ms +[2025-07-08 03:39:59] [Rank 0] step:2681/10000 train_time:215044ms step_avg:80.21ms +[2025-07-08 03:40:01] [Rank 0] step:2701/10000 train_time:216801ms step_avg:80.27ms +[2025-07-08 03:40:01] [Rank 0] step:2701/10000 train_time:216801ms step_avg:80.27ms +[2025-07-08 03:40:02] [Rank 0] step:2721/10000 train_time:218716ms step_avg:80.38ms +[2025-07-08 03:40:02] [Rank 0] step:2721/10000 train_time:218716ms step_avg:80.38ms +[2025-07-08 03:40:04] [Rank 0] step:2741/10000 train_time:220215ms step_avg:80.34ms +[2025-07-08 03:40:04] [Rank 0] step:2741/10000 train_time:220215ms step_avg:80.34ms +[2025-07-08 03:40:05] [Rank 0] step:2761/10000 train_time:221714ms step_avg:80.30ms +[2025-07-08 03:40:05] [Rank 0] step:2761/10000 train_time:221714ms step_avg:80.30ms +[2025-07-08 03:40:07] [Rank 0] step:2781/10000 train_time:223214ms step_avg:80.26ms +[2025-07-08 03:40:07] [Rank 0] step:2781/10000 train_time:223214ms step_avg:80.26ms +[2025-07-08 03:40:09] [Rank 0] step:2801/10000 train_time:225378ms step_avg:80.46ms +[2025-07-08 03:40:09] [Rank 0] step:2801/10000 train_time:225378ms step_avg:80.46ms +[2025-07-08 03:40:11] [Rank 0] step:2821/10000 train_time:226877ms step_avg:80.42ms +[2025-07-08 03:40:11] [Rank 0] step:2821/10000 train_time:226877ms step_avg:80.42ms +[2025-07-08 03:40:12] [Rank 0] step:2841/10000 train_time:228378ms step_avg:80.39ms +[2025-07-08 03:40:12] [Rank 0] step:2841/10000 train_time:228378ms step_avg:80.39ms +[2025-07-08 03:40:14] [Rank 0] step:2861/10000 train_time:229877ms step_avg:80.35ms +[2025-07-08 03:40:14] [Rank 0] step:2861/10000 train_time:229877ms step_avg:80.35ms +[2025-07-08 03:40:15] [Rank 0] step:2881/10000 train_time:231381ms step_avg:80.31ms +[2025-07-08 03:40:15] [Rank 0] step:2881/10000 train_time:231381ms step_avg:80.31ms +[2025-07-08 03:40:17] [Rank 0] step:2901/10000 train_time:233115ms step_avg:80.36ms +[2025-07-08 03:40:17] [Rank 0] step:2901/10000 train_time:233115ms step_avg:80.36ms +[2025-07-08 03:40:18] [Rank 0] step:2921/10000 train_time:234615ms step_avg:80.32ms +[2025-07-08 03:40:18] [Rank 0] step:2921/10000 train_time:234615ms step_avg:80.32ms +[2025-07-08 03:40:20] [Rank 0] step:2941/10000 train_time:236115ms step_avg:80.28ms +[2025-07-08 03:40:20] [Rank 0] step:2941/10000 train_time:236115ms step_avg:80.28ms +[2025-07-08 03:40:21] [Rank 0] step:2961/10000 train_time:237617ms step_avg:80.25ms +[2025-07-08 03:40:21] [Rank 0] step:2961/10000 train_time:237617ms step_avg:80.25ms +[2025-07-08 03:40:23] [Rank 0] step:2981/10000 train_time:239353ms step_avg:80.29ms +[2025-07-08 03:40:23] [Rank 0] step:2981/10000 train_time:239353ms step_avg:80.29ms +[2025-07-08 03:40:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:40:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:40:25] [Rank 0] PRINT: step:3000/10000 train_loss:1.8467 val_loss:1.7173 train_time:240855ms step_avg:80.29ms +[2025-07-08 03:40:25] [Rank 0] PRINT: step:3000/10000 train_loss:1.8467 val_loss:1.7173 train_time:240855ms step_avg:80.29ms +[2025-07-08 03:40:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:40:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:40:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:40:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:40:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:40:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:45:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:45:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:45:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:45:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:45:56] [Rank 0] Total Loss: 4.1668 +[2025-07-08 03:45:56] [Rank 0] Total Loss: 4.1668 +[2025-07-08 03:45:56] [Rank 0] Total FTA: 0.1768 +[2025-07-08 03:45:56] [Rank 0] Total FTA: 0.1768 +[2025-07-08 03:45:57] [Rank 0] Group 0 Loss: 4.4775 +[2025-07-08 03:45:57] [Rank 0] Group 0 Loss: 4.4775 +[2025-07-08 03:45:57] [Rank 0] Group 1 Loss: 4.0359 +[2025-07-08 03:45:57] [Rank 0] Group 1 Loss: 4.0359 +[2025-07-08 03:45:57] [Rank 0] Group 2 Loss: 3.9535 +[2025-07-08 03:45:57] [Rank 0] Group 2 Loss: 3.9535 +[2025-07-08 03:45:57] [Rank 0] Group 3 Loss: 4.1087 +[2025-07-08 03:45:57] [Rank 0] Group 3 Loss: 4.1087 +[2025-07-08 03:45:57] [Rank 0] Group 4 Loss: 4.2271 +[2025-07-08 03:45:57] [Rank 0] Group 4 Loss: 4.2271 +[2025-07-08 03:45:57] [Rank 0] Group 5 Loss: 4.1400 +[2025-07-08 03:45:57] [Rank 0] Group 5 Loss: 4.1400 +[2025-07-08 03:45:57] [Rank 0] Group 6 Loss: 4.1001 +[2025-07-08 03:45:57] [Rank 0] Group 6 Loss: 4.1001 +[2025-07-08 03:45:57] [Rank 0] Group 7 Loss: 4.1536 +[2025-07-08 03:45:57] [Rank 0] Group 7 Loss: 4.1536 +[2025-07-08 03:45:57] [Rank 0] Group 8 Loss: 4.1201 +[2025-07-08 03:45:57] [Rank 0] Group 8 Loss: 4.1201 +[2025-07-08 03:45:57] [Rank 0] Group 9 Loss: 4.1657 +[2025-07-08 03:45:57] [Rank 0] Group 9 Loss: 4.1657 +[2025-07-08 03:45:57] [Rank 0] Group 10 Loss: 4.1443 +[2025-07-08 03:45:57] [Rank 0] Group 10 Loss: 4.1443 +[2025-07-08 03:45:57] [Rank 0] Group 11 Loss: 4.1308 +[2025-07-08 03:45:57] [Rank 0] Group 11 Loss: 4.1308 +[2025-07-08 03:45:57] [Rank 0] Group 0 FTA: 0.3576 +[2025-07-08 03:45:57] [Rank 0] Group 0 FTA: 0.3576 +[2025-07-08 03:45:57] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-08 03:45:57] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-08 03:45:57] [Rank 0] Group 2 FTA: 0.1536 +[2025-07-08 03:45:57] [Rank 0] Group 2 FTA: 0.1536 +[2025-07-08 03:45:57] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-08 03:45:57] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-08 03:45:57] [Rank 0] Group 4 FTA: 0.0990 +[2025-07-08 03:45:57] [Rank 0] Group 4 FTA: 0.0990 +[2025-07-08 03:45:57] [Rank 0] Group 5 FTA: 0.1615 +[2025-07-08 03:45:57] [Rank 0] Group 5 FTA: 0.1615 +[2025-07-08 03:45:57] [Rank 0] Group 6 FTA: 0.1406 +[2025-07-08 03:45:57] [Rank 0] Group 6 FTA: 0.1406 +[2025-07-08 03:45:57] [Rank 0] Group 7 FTA: 0.1562 +[2025-07-08 03:45:57] [Rank 0] Group 7 FTA: 0.1562 +[2025-07-08 03:45:57] [Rank 0] Group 8 FTA: 0.1641 +[2025-07-08 03:45:57] [Rank 0] Group 8 FTA: 0.1641 +[2025-07-08 03:45:57] [Rank 0] Group 9 FTA: 0.1602 +[2025-07-08 03:45:57] [Rank 0] Group 9 FTA: 0.1602 +[2025-07-08 03:45:57] [Rank 0] Group 10 FTA: 0.1914 +[2025-07-08 03:45:57] [Rank 0] Group 10 FTA: 0.1914 +[2025-07-08 03:45:57] [Rank 0] Group 11 FTA: 0.1602 +[2025-07-08 03:45:57] [Rank 0] Group 11 FTA: 0.1602 +[2025-07-08 03:45:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 03:45:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 03:45:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 03:45:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 03:45:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 03:45:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 03:45:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 03:45:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 03:45:58] [Rank 0] step:3001/10000 train_time:240876ms step_avg:80.27ms +[2025-07-08 03:45:58] [Rank 0] step:3001/10000 train_time:240876ms step_avg:80.27ms +[2025-07-08 03:46:00] [Rank 0] step:3021/10000 train_time:242383ms step_avg:80.23ms +[2025-07-08 03:46:00] [Rank 0] step:3021/10000 train_time:242383ms step_avg:80.23ms +[2025-07-08 03:46:01] [Rank 0] step:3041/10000 train_time:243876ms step_avg:80.20ms +[2025-07-08 03:46:01] [Rank 0] step:3041/10000 train_time:243876ms step_avg:80.20ms +[2025-07-08 03:46:03] [Rank 0] step:3061/10000 train_time:245423ms step_avg:80.18ms +[2025-07-08 03:46:03] [Rank 0] step:3061/10000 train_time:245423ms step_avg:80.18ms +[2025-07-08 03:46:05] [Rank 0] step:3081/10000 train_time:247530ms step_avg:80.34ms +[2025-07-08 03:46:05] [Rank 0] step:3081/10000 train_time:247530ms step_avg:80.34ms +[2025-07-08 03:46:06] [Rank 0] step:3101/10000 train_time:249024ms step_avg:80.30ms +[2025-07-08 03:46:06] [Rank 0] step:3101/10000 train_time:249024ms step_avg:80.30ms +[2025-07-08 03:46:08] [Rank 0] step:3121/10000 train_time:250674ms step_avg:80.32ms +[2025-07-08 03:46:08] [Rank 0] step:3121/10000 train_time:250674ms step_avg:80.32ms +[2025-07-08 03:46:09] [Rank 0] step:3141/10000 train_time:252256ms step_avg:80.31ms +[2025-07-08 03:46:09] [Rank 0] step:3141/10000 train_time:252256ms step_avg:80.31ms +[2025-07-08 03:46:12] [Rank 0] step:3161/10000 train_time:254398ms step_avg:80.48ms +[2025-07-08 03:46:12] [Rank 0] step:3161/10000 train_time:254398ms step_avg:80.48ms +[2025-07-08 03:46:13] [Rank 0] step:3181/10000 train_time:255890ms step_avg:80.44ms +[2025-07-08 03:46:13] [Rank 0] step:3181/10000 train_time:255890ms step_avg:80.44ms +[2025-07-08 03:46:15] [Rank 0] step:3201/10000 train_time:257387ms step_avg:80.41ms +[2025-07-08 03:46:15] [Rank 0] step:3201/10000 train_time:257387ms step_avg:80.41ms +[2025-07-08 03:46:16] [Rank 0] step:3221/10000 train_time:258884ms step_avg:80.37ms +[2025-07-08 03:46:16] [Rank 0] step:3221/10000 train_time:258884ms step_avg:80.37ms +[2025-07-08 03:46:18] [Rank 0] step:3241/10000 train_time:260382ms step_avg:80.34ms +[2025-07-08 03:46:18] [Rank 0] step:3241/10000 train_time:260382ms step_avg:80.34ms +[2025-07-08 03:46:19] [Rank 0] step:3261/10000 train_time:262119ms step_avg:80.38ms +[2025-07-08 03:46:19] [Rank 0] step:3261/10000 train_time:262119ms step_avg:80.38ms +[2025-07-08 03:46:21] [Rank 0] step:3281/10000 train_time:263616ms step_avg:80.35ms +[2025-07-08 03:46:21] [Rank 0] step:3281/10000 train_time:263616ms step_avg:80.35ms +[2025-07-08 03:46:22] [Rank 0] step:3301/10000 train_time:265117ms step_avg:80.31ms +[2025-07-08 03:46:22] [Rank 0] step:3301/10000 train_time:265117ms step_avg:80.31ms +[2025-07-08 03:46:24] [Rank 0] step:3321/10000 train_time:266617ms step_avg:80.28ms +[2025-07-08 03:46:24] [Rank 0] step:3321/10000 train_time:266617ms step_avg:80.28ms +[2025-07-08 03:46:26] [Rank 0] step:3341/10000 train_time:268349ms step_avg:80.32ms +[2025-07-08 03:46:26] [Rank 0] step:3341/10000 train_time:268349ms step_avg:80.32ms +[2025-07-08 03:46:27] [Rank 0] step:3361/10000 train_time:269849ms step_avg:80.29ms +[2025-07-08 03:46:27] [Rank 0] step:3361/10000 train_time:269849ms step_avg:80.29ms +[2025-07-08 03:46:29] [Rank 0] step:3381/10000 train_time:271350ms step_avg:80.26ms +[2025-07-08 03:46:29] [Rank 0] step:3381/10000 train_time:271350ms step_avg:80.26ms +[2025-07-08 03:46:30] [Rank 0] step:3401/10000 train_time:272852ms step_avg:80.23ms +[2025-07-08 03:46:30] [Rank 0] step:3401/10000 train_time:272852ms step_avg:80.23ms +[2025-07-08 03:46:32] [Rank 0] step:3421/10000 train_time:274610ms step_avg:80.27ms +[2025-07-08 03:46:32] [Rank 0] step:3421/10000 train_time:274610ms step_avg:80.27ms +[2025-07-08 03:46:34] [Rank 0] step:3441/10000 train_time:276495ms step_avg:80.35ms +[2025-07-08 03:46:34] [Rank 0] step:3441/10000 train_time:276495ms step_avg:80.35ms +[2025-07-08 03:46:35] [Rank 0] step:3461/10000 train_time:277997ms step_avg:80.32ms +[2025-07-08 03:46:35] [Rank 0] step:3461/10000 train_time:277997ms step_avg:80.32ms +[2025-07-08 03:46:37] [Rank 0] step:3481/10000 train_time:279500ms step_avg:80.29ms +[2025-07-08 03:46:37] [Rank 0] step:3481/10000 train_time:279500ms step_avg:80.29ms +[2025-07-08 03:46:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:46:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:46:39] [Rank 0] PRINT: step:3500/10000 train_loss:1.6379 val_loss:1.5676 train_time:281003ms step_avg:80.29ms +[2025-07-08 03:46:39] [Rank 0] PRINT: step:3500/10000 train_loss:1.6379 val_loss:1.5676 train_time:281003ms step_avg:80.29ms +[2025-07-08 03:46:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:46:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:46:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:46:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:46:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:46:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:52:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:52:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:52:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:52:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:52:08] [Rank 0] Total Loss: 4.2570 +[2025-07-08 03:52:08] [Rank 0] Total Loss: 4.2570 +[2025-07-08 03:52:08] [Rank 0] Total FTA: 0.1786 +[2025-07-08 03:52:08] [Rank 0] Total FTA: 0.1786 +[2025-07-08 03:52:08] [Rank 0] Group 0 Loss: 4.5560 +[2025-07-08 03:52:08] [Rank 0] Group 0 Loss: 4.5560 +[2025-07-08 03:52:08] [Rank 0] Group 1 Loss: 4.3028 +[2025-07-08 03:52:08] [Rank 0] Group 1 Loss: 4.3028 +[2025-07-08 03:52:08] [Rank 0] Group 2 Loss: 3.9443 +[2025-07-08 03:52:08] [Rank 0] Group 2 Loss: 3.9443 +[2025-07-08 03:52:08] [Rank 0] Group 3 Loss: 4.4081 +[2025-07-08 03:52:08] [Rank 0] Group 3 Loss: 4.4081 +[2025-07-08 03:52:08] [Rank 0] Group 4 Loss: 4.2295 +[2025-07-08 03:52:08] [Rank 0] Group 4 Loss: 4.2295 +[2025-07-08 03:52:08] [Rank 0] Group 5 Loss: 4.1419 +[2025-07-08 03:52:08] [Rank 0] Group 5 Loss: 4.1419 +[2025-07-08 03:52:08] [Rank 0] Group 6 Loss: 4.1521 +[2025-07-08 03:52:08] [Rank 0] Group 6 Loss: 4.1521 +[2025-07-08 03:52:08] [Rank 0] Group 7 Loss: 4.2164 +[2025-07-08 03:52:08] [Rank 0] Group 7 Loss: 4.2164 +[2025-07-08 03:52:08] [Rank 0] Group 8 Loss: 4.2199 +[2025-07-08 03:52:08] [Rank 0] Group 8 Loss: 4.2199 +[2025-07-08 03:52:08] [Rank 0] Group 9 Loss: 4.2110 +[2025-07-08 03:52:08] [Rank 0] Group 9 Loss: 4.2110 +[2025-07-08 03:52:08] [Rank 0] Group 10 Loss: 4.2073 +[2025-07-08 03:52:08] [Rank 0] Group 10 Loss: 4.2073 +[2025-07-08 03:52:08] [Rank 0] Group 11 Loss: 4.2345 +[2025-07-08 03:52:08] [Rank 0] Group 11 Loss: 4.2345 +[2025-07-08 03:52:08] [Rank 0] Group 0 FTA: 0.1704 +[2025-07-08 03:52:08] [Rank 0] Group 0 FTA: 0.1704 +[2025-07-08 03:52:08] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 03:52:08] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 03:52:08] [Rank 0] Group 2 FTA: 0.3021 +[2025-07-08 03:52:08] [Rank 0] Group 2 FTA: 0.3021 +[2025-07-08 03:52:08] [Rank 0] Group 3 FTA: 0.0833 +[2025-07-08 03:52:08] [Rank 0] Group 3 FTA: 0.0833 +[2025-07-08 03:52:08] [Rank 0] Group 4 FTA: 0.1094 +[2025-07-08 03:52:08] [Rank 0] Group 4 FTA: 0.1094 +[2025-07-08 03:52:08] [Rank 0] Group 5 FTA: 0.1979 +[2025-07-08 03:52:08] [Rank 0] Group 5 FTA: 0.1979 +[2025-07-08 03:52:08] [Rank 0] Group 6 FTA: 0.1797 +[2025-07-08 03:52:08] [Rank 0] Group 6 FTA: 0.1797 +[2025-07-08 03:52:08] [Rank 0] Group 7 FTA: 0.2500 +[2025-07-08 03:52:08] [Rank 0] Group 7 FTA: 0.2500 +[2025-07-08 03:52:08] [Rank 0] Group 8 FTA: 0.2161 +[2025-07-08 03:52:08] [Rank 0] Group 8 FTA: 0.2161 +[2025-07-08 03:52:08] [Rank 0] Group 9 FTA: 0.2266 +[2025-07-08 03:52:08] [Rank 0] Group 9 FTA: 0.2266 +[2025-07-08 03:52:08] [Rank 0] Group 10 FTA: 0.1992 +[2025-07-08 03:52:08] [Rank 0] Group 10 FTA: 0.1992 +[2025-07-08 03:52:08] [Rank 0] Group 11 FTA: 0.1963 +[2025-07-08 03:52:08] [Rank 0] Group 11 FTA: 0.1963 +[2025-07-08 03:52:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 03:52:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 03:52:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 03:52:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 03:52:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 03:52:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 03:52:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 03:52:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 03:52:09] [Rank 0] step:3501/10000 train_time:281024ms step_avg:80.27ms +[2025-07-08 03:52:09] [Rank 0] step:3501/10000 train_time:281024ms step_avg:80.27ms +[2025-07-08 03:52:11] [Rank 0] step:3521/10000 train_time:282758ms step_avg:80.31ms +[2025-07-08 03:52:11] [Rank 0] step:3521/10000 train_time:282758ms step_avg:80.31ms +[2025-07-08 03:52:13] [Rank 0] step:3541/10000 train_time:284249ms step_avg:80.27ms +[2025-07-08 03:52:13] [Rank 0] step:3541/10000 train_time:284249ms step_avg:80.27ms +[2025-07-08 03:52:14] [Rank 0] step:3561/10000 train_time:285743ms step_avg:80.24ms +[2025-07-08 03:52:14] [Rank 0] step:3561/10000 train_time:285743ms step_avg:80.24ms +[2025-07-08 03:52:16] [Rank 0] step:3581/10000 train_time:287241ms step_avg:80.21ms +[2025-07-08 03:52:16] [Rank 0] step:3581/10000 train_time:287241ms step_avg:80.21ms +[2025-07-08 03:52:18] [Rank 0] step:3601/10000 train_time:288736ms step_avg:80.18ms +[2025-07-08 03:52:18] [Rank 0] step:3601/10000 train_time:288736ms step_avg:80.18ms +[2025-07-08 03:52:19] [Rank 0] step:3621/10000 train_time:290890ms step_avg:80.33ms +[2025-07-08 03:52:19] [Rank 0] step:3621/10000 train_time:290890ms step_avg:80.33ms +[2025-07-08 03:52:21] [Rank 0] step:3641/10000 train_time:292386ms step_avg:80.30ms +[2025-07-08 03:52:21] [Rank 0] step:3641/10000 train_time:292386ms step_avg:80.30ms +[2025-07-08 03:52:22] [Rank 0] step:3661/10000 train_time:293887ms step_avg:80.28ms +[2025-07-08 03:52:22] [Rank 0] step:3661/10000 train_time:293887ms step_avg:80.28ms +[2025-07-08 03:52:24] [Rank 0] step:3681/10000 train_time:295386ms step_avg:80.25ms +[2025-07-08 03:52:24] [Rank 0] step:3681/10000 train_time:295386ms step_avg:80.25ms +[2025-07-08 03:52:26] [Rank 0] step:3701/10000 train_time:297531ms step_avg:80.39ms +[2025-07-08 03:52:26] [Rank 0] step:3701/10000 train_time:297531ms step_avg:80.39ms +[2025-07-08 03:52:28] [Rank 0] step:3721/10000 train_time:299272ms step_avg:80.43ms +[2025-07-08 03:52:28] [Rank 0] step:3721/10000 train_time:299272ms step_avg:80.43ms +[2025-07-08 03:52:29] [Rank 0] step:3741/10000 train_time:300770ms step_avg:80.40ms +[2025-07-08 03:52:29] [Rank 0] step:3741/10000 train_time:300770ms step_avg:80.40ms +[2025-07-08 03:52:31] [Rank 0] step:3761/10000 train_time:302275ms step_avg:80.37ms +[2025-07-08 03:52:31] [Rank 0] step:3761/10000 train_time:302275ms step_avg:80.37ms +[2025-07-08 03:52:33] [Rank 0] step:3781/10000 train_time:304032ms step_avg:80.41ms +[2025-07-08 03:52:33] [Rank 0] step:3781/10000 train_time:304032ms step_avg:80.41ms +[2025-07-08 03:52:34] [Rank 0] step:3801/10000 train_time:305931ms step_avg:80.49ms +[2025-07-08 03:52:34] [Rank 0] step:3801/10000 train_time:305931ms step_avg:80.49ms +[2025-07-08 03:52:36] [Rank 0] step:3821/10000 train_time:307435ms step_avg:80.46ms +[2025-07-08 03:52:36] [Rank 0] step:3821/10000 train_time:307435ms step_avg:80.46ms +[2025-07-08 03:52:37] [Rank 0] step:3841/10000 train_time:308937ms step_avg:80.43ms +[2025-07-08 03:52:37] [Rank 0] step:3841/10000 train_time:308937ms step_avg:80.43ms +[2025-07-08 03:52:39] [Rank 0] step:3861/10000 train_time:310440ms step_avg:80.40ms +[2025-07-08 03:52:39] [Rank 0] step:3861/10000 train_time:310440ms step_avg:80.40ms +[2025-07-08 03:52:41] [Rank 0] step:3881/10000 train_time:312604ms step_avg:80.55ms +[2025-07-08 03:52:41] [Rank 0] step:3881/10000 train_time:312604ms step_avg:80.55ms +[2025-07-08 03:52:43] [Rank 0] step:3901/10000 train_time:314107ms step_avg:80.52ms +[2025-07-08 03:52:43] [Rank 0] step:3901/10000 train_time:314107ms step_avg:80.52ms +[2025-07-08 03:52:44] [Rank 0] step:3921/10000 train_time:315608ms step_avg:80.49ms +[2025-07-08 03:52:44] [Rank 0] step:3921/10000 train_time:315608ms step_avg:80.49ms +[2025-07-08 03:52:46] [Rank 0] step:3941/10000 train_time:317111ms step_avg:80.46ms +[2025-07-08 03:52:46] [Rank 0] step:3941/10000 train_time:317111ms step_avg:80.46ms +[2025-07-08 03:52:48] [Rank 0] step:3961/10000 train_time:318615ms step_avg:80.44ms +[2025-07-08 03:52:48] [Rank 0] step:3961/10000 train_time:318615ms step_avg:80.44ms +[2025-07-08 03:52:49] [Rank 0] step:3981/10000 train_time:320785ms step_avg:80.58ms +[2025-07-08 03:52:49] [Rank 0] step:3981/10000 train_time:320785ms step_avg:80.58ms +[2025-07-08 03:52:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:52:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:52:52] [Rank 0] PRINT: step:4000/10000 train_loss:1.5190 val_loss:1.4731 train_time:322287ms step_avg:80.57ms +[2025-07-08 03:52:52] [Rank 0] PRINT: step:4000/10000 train_loss:1.5190 val_loss:1.4731 train_time:322287ms step_avg:80.57ms +[2025-07-08 03:52:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:52:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:52:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:52:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:52:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:52:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:58:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:58:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:58:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:58:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:58:19] [Rank 0] Total Loss: 4.3630 +[2025-07-08 03:58:19] [Rank 0] Total Loss: 4.3630 +[2025-07-08 03:58:19] [Rank 0] Total FTA: 0.2734 +[2025-07-08 03:58:19] [Rank 0] Total FTA: 0.2734 +[2025-07-08 03:58:19] [Rank 0] Group 0 Loss: 4.6302 +[2025-07-08 03:58:19] [Rank 0] Group 0 Loss: 4.6302 +[2025-07-08 03:58:19] [Rank 0] Group 1 Loss: 4.1304 +[2025-07-08 03:58:19] [Rank 0] Group 1 Loss: 4.1304 +[2025-07-08 03:58:19] [Rank 0] Group 2 Loss: 4.1039 +[2025-07-08 03:58:19] [Rank 0] Group 2 Loss: 4.1039 +[2025-07-08 03:58:19] [Rank 0] Group 3 Loss: 4.3379 +[2025-07-08 03:58:19] [Rank 0] Group 3 Loss: 4.3379 +[2025-07-08 03:58:19] [Rank 0] Group 4 Loss: 4.3415 +[2025-07-08 03:58:19] [Rank 0] Group 4 Loss: 4.3415 +[2025-07-08 03:58:19] [Rank 0] Group 5 Loss: 4.3508 +[2025-07-08 03:58:19] [Rank 0] Group 5 Loss: 4.3508 +[2025-07-08 03:58:19] [Rank 0] Group 6 Loss: 4.2933 +[2025-07-08 03:58:19] [Rank 0] Group 6 Loss: 4.2933 +[2025-07-08 03:58:19] [Rank 0] Group 7 Loss: 4.3529 +[2025-07-08 03:58:19] [Rank 0] Group 7 Loss: 4.3529 +[2025-07-08 03:58:19] [Rank 0] Group 8 Loss: 4.3895 +[2025-07-08 03:58:19] [Rank 0] Group 8 Loss: 4.3895 +[2025-07-08 03:58:19] [Rank 0] Group 9 Loss: 4.3502 +[2025-07-08 03:58:19] [Rank 0] Group 9 Loss: 4.3502 +[2025-07-08 03:58:19] [Rank 0] Group 10 Loss: 4.3986 +[2025-07-08 03:58:19] [Rank 0] Group 10 Loss: 4.3986 +[2025-07-08 03:58:19] [Rank 0] Group 11 Loss: 4.3743 +[2025-07-08 03:58:19] [Rank 0] Group 11 Loss: 4.3743 +[2025-07-08 03:58:19] [Rank 0] Group 0 FTA: 0.3550 +[2025-07-08 03:58:19] [Rank 0] Group 0 FTA: 0.3550 +[2025-07-08 03:58:19] [Rank 0] Group 1 FTA: 0.5391 +[2025-07-08 03:58:19] [Rank 0] Group 1 FTA: 0.5391 +[2025-07-08 03:58:19] [Rank 0] Group 2 FTA: 0.4245 +[2025-07-08 03:58:19] [Rank 0] Group 2 FTA: 0.4245 +[2025-07-08 03:58:19] [Rank 0] Group 3 FTA: 0.1302 +[2025-07-08 03:58:19] [Rank 0] Group 3 FTA: 0.1302 +[2025-07-08 03:58:19] [Rank 0] Group 4 FTA: 0.1615 +[2025-07-08 03:58:19] [Rank 0] Group 4 FTA: 0.1615 +[2025-07-08 03:58:19] [Rank 0] Group 5 FTA: 0.2266 +[2025-07-08 03:58:19] [Rank 0] Group 5 FTA: 0.2266 +[2025-07-08 03:58:19] [Rank 0] Group 6 FTA: 0.2057 +[2025-07-08 03:58:19] [Rank 0] Group 6 FTA: 0.2057 +[2025-07-08 03:58:19] [Rank 0] Group 7 FTA: 0.2526 +[2025-07-08 03:58:19] [Rank 0] Group 7 FTA: 0.2526 +[2025-07-08 03:58:19] [Rank 0] Group 8 FTA: 0.2396 +[2025-07-08 03:58:19] [Rank 0] Group 8 FTA: 0.2396 +[2025-07-08 03:58:19] [Rank 0] Group 9 FTA: 0.2227 +[2025-07-08 03:58:19] [Rank 0] Group 9 FTA: 0.2227 +[2025-07-08 03:58:19] [Rank 0] Group 10 FTA: 0.2227 +[2025-07-08 03:58:19] [Rank 0] Group 10 FTA: 0.2227 +[2025-07-08 03:58:19] [Rank 0] Group 11 FTA: 0.2529 +[2025-07-08 03:58:19] [Rank 0] Group 11 FTA: 0.2529 +[2025-07-08 03:58:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 03:58:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 03:58:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 03:58:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 03:58:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 03:58:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 03:58:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 03:58:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 03:58:20] [Rank 0] step:4001/10000 train_time:322308ms step_avg:80.56ms +[2025-07-08 03:58:20] [Rank 0] step:4001/10000 train_time:322308ms step_avg:80.56ms +[2025-07-08 03:58:22] [Rank 0] step:4021/10000 train_time:323821ms step_avg:80.53ms +[2025-07-08 03:58:22] [Rank 0] step:4021/10000 train_time:323821ms step_avg:80.53ms +[2025-07-08 03:58:23] [Rank 0] step:4041/10000 train_time:325314ms step_avg:80.50ms +[2025-07-08 03:58:23] [Rank 0] step:4041/10000 train_time:325314ms step_avg:80.50ms +[2025-07-08 03:58:25] [Rank 0] step:4061/10000 train_time:327465ms step_avg:80.64ms +[2025-07-08 03:58:25] [Rank 0] step:4061/10000 train_time:327465ms step_avg:80.64ms +[2025-07-08 03:58:27] [Rank 0] step:4081/10000 train_time:328957ms step_avg:80.61ms +[2025-07-08 03:58:27] [Rank 0] step:4081/10000 train_time:328957ms step_avg:80.61ms +[2025-07-08 03:58:28] [Rank 0] step:4101/10000 train_time:330453ms step_avg:80.58ms +[2025-07-08 03:58:28] [Rank 0] step:4101/10000 train_time:330453ms step_avg:80.58ms +[2025-07-08 03:58:30] [Rank 0] step:4121/10000 train_time:331949ms step_avg:80.55ms +[2025-07-08 03:58:30] [Rank 0] step:4121/10000 train_time:331949ms step_avg:80.55ms +[2025-07-08 03:58:32] [Rank 0] step:4141/10000 train_time:333445ms step_avg:80.52ms +[2025-07-08 03:58:32] [Rank 0] step:4141/10000 train_time:333445ms step_avg:80.52ms +[2025-07-08 03:58:33] [Rank 0] step:4161/10000 train_time:335184ms step_avg:80.55ms +[2025-07-08 03:58:33] [Rank 0] step:4161/10000 train_time:335184ms step_avg:80.55ms +[2025-07-08 03:58:35] [Rank 0] step:4181/10000 train_time:336680ms step_avg:80.53ms +[2025-07-08 03:58:35] [Rank 0] step:4181/10000 train_time:336680ms step_avg:80.53ms +[2025-07-08 03:58:36] [Rank 0] step:4201/10000 train_time:338179ms step_avg:80.50ms +[2025-07-08 03:58:36] [Rank 0] step:4201/10000 train_time:338179ms step_avg:80.50ms +[2025-07-08 03:58:38] [Rank 0] step:4221/10000 train_time:339680ms step_avg:80.47ms +[2025-07-08 03:58:38] [Rank 0] step:4221/10000 train_time:339680ms step_avg:80.47ms +[2025-07-08 03:58:39] [Rank 0] step:4241/10000 train_time:341418ms step_avg:80.50ms +[2025-07-08 03:58:39] [Rank 0] step:4241/10000 train_time:341418ms step_avg:80.50ms +[2025-07-08 03:58:41] [Rank 0] step:4261/10000 train_time:342919ms step_avg:80.48ms +[2025-07-08 03:58:41] [Rank 0] step:4261/10000 train_time:342919ms step_avg:80.48ms +[2025-07-08 03:58:42] [Rank 0] step:4281/10000 train_time:344422ms step_avg:80.45ms +[2025-07-08 03:58:42] [Rank 0] step:4281/10000 train_time:344422ms step_avg:80.45ms +[2025-07-08 03:58:44] [Rank 0] step:4301/10000 train_time:345923ms step_avg:80.43ms +[2025-07-08 03:58:44] [Rank 0] step:4301/10000 train_time:345923ms step_avg:80.43ms +[2025-07-08 03:58:46] [Rank 0] step:4321/10000 train_time:347478ms step_avg:80.42ms +[2025-07-08 03:58:46] [Rank 0] step:4321/10000 train_time:347478ms step_avg:80.42ms +[2025-07-08 03:58:48] [Rank 0] step:4341/10000 train_time:349761ms step_avg:80.57ms +[2025-07-08 03:58:48] [Rank 0] step:4341/10000 train_time:349761ms step_avg:80.57ms +[2025-07-08 03:58:49] [Rank 0] step:4361/10000 train_time:351261ms step_avg:80.55ms +[2025-07-08 03:58:49] [Rank 0] step:4361/10000 train_time:351261ms step_avg:80.55ms +[2025-07-08 03:58:51] [Rank 0] step:4381/10000 train_time:352763ms step_avg:80.52ms +[2025-07-08 03:58:51] [Rank 0] step:4381/10000 train_time:352763ms step_avg:80.52ms +[2025-07-08 03:58:52] [Rank 0] step:4401/10000 train_time:354265ms step_avg:80.50ms +[2025-07-08 03:58:52] [Rank 0] step:4401/10000 train_time:354265ms step_avg:80.50ms +[2025-07-08 03:58:54] [Rank 0] step:4421/10000 train_time:356430ms step_avg:80.62ms +[2025-07-08 03:58:54] [Rank 0] step:4421/10000 train_time:356430ms step_avg:80.62ms +[2025-07-08 03:58:56] [Rank 0] step:4441/10000 train_time:357928ms step_avg:80.60ms +[2025-07-08 03:58:56] [Rank 0] step:4441/10000 train_time:357928ms step_avg:80.60ms +[2025-07-08 03:58:57] [Rank 0] step:4461/10000 train_time:359430ms step_avg:80.57ms +[2025-07-08 03:58:57] [Rank 0] step:4461/10000 train_time:359430ms step_avg:80.57ms +[2025-07-08 03:58:59] [Rank 0] step:4481/10000 train_time:360933ms step_avg:80.55ms +[2025-07-08 03:58:59] [Rank 0] step:4481/10000 train_time:360933ms step_avg:80.55ms +[2025-07-08 03:59:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:59:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:59:01] [Rank 0] PRINT: step:4500/10000 train_loss:1.4415 val_loss:1.4087 train_time:362437ms step_avg:80.54ms +[2025-07-08 03:59:01] [Rank 0] PRINT: step:4500/10000 train_loss:1.4415 val_loss:1.4087 train_time:362437ms step_avg:80.54ms +[2025-07-08 03:59:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:59:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:59:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:59:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:59:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:59:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:04:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:04:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:04:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:04:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:04:31] [Rank 0] Total Loss: 4.4901 +[2025-07-08 04:04:31] [Rank 0] Total Loss: 4.4901 +[2025-07-08 04:04:31] [Rank 0] Total FTA: 0.3073 +[2025-07-08 04:04:31] [Rank 0] Total FTA: 0.3073 +[2025-07-08 04:04:31] [Rank 0] Group 0 Loss: 4.7438 +[2025-07-08 04:04:31] [Rank 0] Group 0 Loss: 4.7438 +[2025-07-08 04:04:31] [Rank 0] Group 1 Loss: 4.5308 +[2025-07-08 04:04:31] [Rank 0] Group 1 Loss: 4.5308 +[2025-07-08 04:04:31] [Rank 0] Group 2 Loss: 4.1729 +[2025-07-08 04:04:31] [Rank 0] Group 2 Loss: 4.1729 +[2025-07-08 04:04:31] [Rank 0] Group 3 Loss: 4.7055 +[2025-07-08 04:04:31] [Rank 0] Group 3 Loss: 4.7055 +[2025-07-08 04:04:31] [Rank 0] Group 4 Loss: 4.5288 +[2025-07-08 04:04:31] [Rank 0] Group 4 Loss: 4.5288 +[2025-07-08 04:04:31] [Rank 0] Group 5 Loss: 4.3413 +[2025-07-08 04:04:31] [Rank 0] Group 5 Loss: 4.3413 +[2025-07-08 04:04:31] [Rank 0] Group 6 Loss: 4.3573 +[2025-07-08 04:04:31] [Rank 0] Group 6 Loss: 4.3573 +[2025-07-08 04:04:31] [Rank 0] Group 7 Loss: 4.4707 +[2025-07-08 04:04:31] [Rank 0] Group 7 Loss: 4.4707 +[2025-07-08 04:04:31] [Rank 0] Group 8 Loss: 4.4574 +[2025-07-08 04:04:31] [Rank 0] Group 8 Loss: 4.4574 +[2025-07-08 04:04:31] [Rank 0] Group 9 Loss: 4.3799 +[2025-07-08 04:04:31] [Rank 0] Group 9 Loss: 4.3799 +[2025-07-08 04:04:31] [Rank 0] Group 10 Loss: 4.4432 +[2025-07-08 04:04:31] [Rank 0] Group 10 Loss: 4.4432 +[2025-07-08 04:04:31] [Rank 0] Group 11 Loss: 4.4839 +[2025-07-08 04:04:31] [Rank 0] Group 11 Loss: 4.4839 +[2025-07-08 04:04:31] [Rank 0] Group 0 FTA: 0.3693 +[2025-07-08 04:04:31] [Rank 0] Group 0 FTA: 0.3693 +[2025-07-08 04:04:31] [Rank 0] Group 1 FTA: 0.4844 +[2025-07-08 04:04:31] [Rank 0] Group 1 FTA: 0.4844 +[2025-07-08 04:04:31] [Rank 0] Group 2 FTA: 0.4505 +[2025-07-08 04:04:31] [Rank 0] Group 2 FTA: 0.4505 +[2025-07-08 04:04:31] [Rank 0] Group 3 FTA: 0.2865 +[2025-07-08 04:04:31] [Rank 0] Group 3 FTA: 0.2865 +[2025-07-08 04:04:31] [Rank 0] Group 4 FTA: 0.1432 +[2025-07-08 04:04:31] [Rank 0] Group 4 FTA: 0.1432 +[2025-07-08 04:04:31] [Rank 0] Group 5 FTA: 0.2708 +[2025-07-08 04:04:31] [Rank 0] Group 5 FTA: 0.2708 +[2025-07-08 04:04:31] [Rank 0] Group 6 FTA: 0.2630 +[2025-07-08 04:04:31] [Rank 0] Group 6 FTA: 0.2630 +[2025-07-08 04:04:31] [Rank 0] Group 7 FTA: 0.2839 +[2025-07-08 04:04:31] [Rank 0] Group 7 FTA: 0.2839 +[2025-07-08 04:04:31] [Rank 0] Group 8 FTA: 0.2760 +[2025-07-08 04:04:31] [Rank 0] Group 8 FTA: 0.2760 +[2025-07-08 04:04:31] [Rank 0] Group 9 FTA: 0.2969 +[2025-07-08 04:04:31] [Rank 0] Group 9 FTA: 0.2969 +[2025-07-08 04:04:31] [Rank 0] Group 10 FTA: 0.2773 +[2025-07-08 04:04:31] [Rank 0] Group 10 FTA: 0.2773 +[2025-07-08 04:04:31] [Rank 0] Group 11 FTA: 0.2783 +[2025-07-08 04:04:31] [Rank 0] Group 11 FTA: 0.2783 +[2025-07-08 04:04:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 04:04:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 04:04:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 04:04:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 04:04:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 04:04:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 04:04:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 04:04:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 04:04:33] [Rank 0] step:4501/10000 train_time:362468ms step_avg:80.53ms +[2025-07-08 04:04:33] [Rank 0] step:4501/10000 train_time:362468ms step_avg:80.53ms +[2025-07-08 04:04:34] [Rank 0] step:4521/10000 train_time:364662ms step_avg:80.66ms +[2025-07-08 04:04:34] [Rank 0] step:4521/10000 train_time:364662ms step_avg:80.66ms +[2025-07-08 04:04:36] [Rank 0] step:4541/10000 train_time:366156ms step_avg:80.63ms +[2025-07-08 04:04:36] [Rank 0] step:4541/10000 train_time:366156ms step_avg:80.63ms +[2025-07-08 04:04:37] [Rank 0] step:4561/10000 train_time:367652ms step_avg:80.61ms +[2025-07-08 04:04:37] [Rank 0] step:4561/10000 train_time:367652ms step_avg:80.61ms +[2025-07-08 04:04:39] [Rank 0] step:4581/10000 train_time:369147ms step_avg:80.58ms +[2025-07-08 04:04:39] [Rank 0] step:4581/10000 train_time:369147ms step_avg:80.58ms +[2025-07-08 04:04:41] [Rank 0] step:4601/10000 train_time:371306ms step_avg:80.70ms +[2025-07-08 04:04:41] [Rank 0] step:4601/10000 train_time:371306ms step_avg:80.70ms +[2025-07-08 04:04:43] [Rank 0] step:4621/10000 train_time:372800ms step_avg:80.68ms +[2025-07-08 04:04:43] [Rank 0] step:4621/10000 train_time:372800ms step_avg:80.68ms +[2025-07-08 04:04:44] [Rank 0] step:4641/10000 train_time:374294ms step_avg:80.65ms +[2025-07-08 04:04:44] [Rank 0] step:4641/10000 train_time:374294ms step_avg:80.65ms +[2025-07-08 04:04:46] [Rank 0] step:4661/10000 train_time:375792ms step_avg:80.62ms +[2025-07-08 04:04:46] [Rank 0] step:4661/10000 train_time:375792ms step_avg:80.62ms +[2025-07-08 04:04:48] [Rank 0] step:4681/10000 train_time:377294ms step_avg:80.60ms +[2025-07-08 04:04:48] [Rank 0] step:4681/10000 train_time:377294ms step_avg:80.60ms +[2025-07-08 04:04:49] [Rank 0] step:4701/10000 train_time:379452ms step_avg:80.72ms +[2025-07-08 04:04:49] [Rank 0] step:4701/10000 train_time:379452ms step_avg:80.72ms +[2025-07-08 04:04:51] [Rank 0] step:4721/10000 train_time:380954ms step_avg:80.69ms +[2025-07-08 04:04:51] [Rank 0] step:4721/10000 train_time:380954ms step_avg:80.69ms +[2025-07-08 04:04:52] [Rank 0] step:4741/10000 train_time:382454ms step_avg:80.67ms +[2025-07-08 04:04:52] [Rank 0] step:4741/10000 train_time:382454ms step_avg:80.67ms +[2025-07-08 04:04:54] [Rank 0] step:4761/10000 train_time:383956ms step_avg:80.65ms +[2025-07-08 04:04:54] [Rank 0] step:4761/10000 train_time:383956ms step_avg:80.65ms +[2025-07-08 04:04:56] [Rank 0] step:4781/10000 train_time:386123ms step_avg:80.76ms +[2025-07-08 04:04:56] [Rank 0] step:4781/10000 train_time:386123ms step_avg:80.76ms +[2025-07-08 04:04:57] [Rank 0] step:4801/10000 train_time:387623ms step_avg:80.74ms +[2025-07-08 04:04:57] [Rank 0] step:4801/10000 train_time:387623ms step_avg:80.74ms +[2025-07-08 04:04:59] [Rank 0] step:4821/10000 train_time:389126ms step_avg:80.71ms +[2025-07-08 04:04:59] [Rank 0] step:4821/10000 train_time:389126ms step_avg:80.71ms +[2025-07-08 04:05:00] [Rank 0] step:4841/10000 train_time:390629ms step_avg:80.69ms +[2025-07-08 04:05:00] [Rank 0] step:4841/10000 train_time:390629ms step_avg:80.69ms +[2025-07-08 04:05:03] [Rank 0] step:4861/10000 train_time:392132ms step_avg:80.67ms +[2025-07-08 04:05:03] [Rank 0] step:4861/10000 train_time:392132ms step_avg:80.67ms +[2025-07-08 04:05:04] [Rank 0] step:4881/10000 train_time:394303ms step_avg:80.78ms +[2025-07-08 04:05:04] [Rank 0] step:4881/10000 train_time:394303ms step_avg:80.78ms +[2025-07-08 04:05:06] [Rank 0] step:4901/10000 train_time:395807ms step_avg:80.76ms +[2025-07-08 04:05:06] [Rank 0] step:4901/10000 train_time:395807ms step_avg:80.76ms +[2025-07-08 04:05:07] [Rank 0] step:4921/10000 train_time:397575ms step_avg:80.79ms +[2025-07-08 04:05:07] [Rank 0] step:4921/10000 train_time:397575ms step_avg:80.79ms +[2025-07-08 04:05:09] [Rank 0] step:4941/10000 train_time:399078ms step_avg:80.77ms +[2025-07-08 04:05:09] [Rank 0] step:4941/10000 train_time:399078ms step_avg:80.77ms +[2025-07-08 04:05:11] [Rank 0] step:4961/10000 train_time:400814ms step_avg:80.79ms +[2025-07-08 04:05:11] [Rank 0] step:4961/10000 train_time:400814ms step_avg:80.79ms +[2025-07-08 04:05:12] [Rank 0] step:4981/10000 train_time:402315ms step_avg:80.77ms +[2025-07-08 04:05:12] [Rank 0] step:4981/10000 train_time:402315ms step_avg:80.77ms +[2025-07-08 04:05:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:05:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:05:15] [Rank 0] PRINT: step:5000/10000 train_loss:1.3860 val_loss:1.3611 train_time:403815ms step_avg:80.76ms +[2025-07-08 04:05:15] [Rank 0] PRINT: step:5000/10000 train_loss:1.3860 val_loss:1.3611 train_time:403815ms step_avg:80.76ms +[2025-07-08 04:05:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:05:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:05:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:05:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:05:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:05:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:10:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:10:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:10:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:10:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:10:42] [Rank 0] Total Loss: 4.5436 +[2025-07-08 04:10:42] [Rank 0] Total Loss: 4.5436 +[2025-07-08 04:10:42] [Rank 0] Total FTA: 0.3327 +[2025-07-08 04:10:42] [Rank 0] Total FTA: 0.3327 +[2025-07-08 04:10:42] [Rank 0] Group 0 Loss: 4.7375 +[2025-07-08 04:10:42] [Rank 0] Group 0 Loss: 4.7375 +[2025-07-08 04:10:42] [Rank 0] Group 1 Loss: 4.6076 +[2025-07-08 04:10:42] [Rank 0] Group 1 Loss: 4.6076 +[2025-07-08 04:10:42] [Rank 0] Group 2 Loss: 4.2798 +[2025-07-08 04:10:42] [Rank 0] Group 2 Loss: 4.2798 +[2025-07-08 04:10:42] [Rank 0] Group 3 Loss: 4.6486 +[2025-07-08 04:10:42] [Rank 0] Group 3 Loss: 4.6486 +[2025-07-08 04:10:42] [Rank 0] Group 4 Loss: 4.4393 +[2025-07-08 04:10:42] [Rank 0] Group 4 Loss: 4.4393 +[2025-07-08 04:10:42] [Rank 0] Group 5 Loss: 4.5308 +[2025-07-08 04:10:42] [Rank 0] Group 5 Loss: 4.5308 +[2025-07-08 04:10:42] [Rank 0] Group 6 Loss: 4.4762 +[2025-07-08 04:10:42] [Rank 0] Group 6 Loss: 4.4762 +[2025-07-08 04:10:42] [Rank 0] Group 7 Loss: 4.5173 +[2025-07-08 04:10:42] [Rank 0] Group 7 Loss: 4.5173 +[2025-07-08 04:10:42] [Rank 0] Group 8 Loss: 4.5392 +[2025-07-08 04:10:42] [Rank 0] Group 8 Loss: 4.5392 +[2025-07-08 04:10:42] [Rank 0] Group 9 Loss: 4.4663 +[2025-07-08 04:10:42] [Rank 0] Group 9 Loss: 4.4663 +[2025-07-08 04:10:42] [Rank 0] Group 10 Loss: 4.5771 +[2025-07-08 04:10:42] [Rank 0] Group 10 Loss: 4.5771 +[2025-07-08 04:10:42] [Rank 0] Group 11 Loss: 4.5168 +[2025-07-08 04:10:42] [Rank 0] Group 11 Loss: 4.5168 +[2025-07-08 04:10:42] [Rank 0] Group 0 FTA: 0.5085 +[2025-07-08 04:10:42] [Rank 0] Group 0 FTA: 0.5085 +[2025-07-08 04:10:42] [Rank 0] Group 1 FTA: 0.4948 +[2025-07-08 04:10:42] [Rank 0] Group 1 FTA: 0.4948 +[2025-07-08 04:10:42] [Rank 0] Group 2 FTA: 0.3151 +[2025-07-08 04:10:42] [Rank 0] Group 2 FTA: 0.3151 +[2025-07-08 04:10:42] [Rank 0] Group 3 FTA: 0.1432 +[2025-07-08 04:10:42] [Rank 0] Group 3 FTA: 0.1432 +[2025-07-08 04:10:42] [Rank 0] Group 4 FTA: 0.2396 +[2025-07-08 04:10:42] [Rank 0] Group 4 FTA: 0.2396 +[2025-07-08 04:10:42] [Rank 0] Group 5 FTA: 0.2734 +[2025-07-08 04:10:42] [Rank 0] Group 5 FTA: 0.2734 +[2025-07-08 04:10:42] [Rank 0] Group 6 FTA: 0.2448 +[2025-07-08 04:10:42] [Rank 0] Group 6 FTA: 0.2448 +[2025-07-08 04:10:42] [Rank 0] Group 7 FTA: 0.3307 +[2025-07-08 04:10:42] [Rank 0] Group 7 FTA: 0.3307 +[2025-07-08 04:10:42] [Rank 0] Group 8 FTA: 0.3151 +[2025-07-08 04:10:42] [Rank 0] Group 8 FTA: 0.3151 +[2025-07-08 04:10:42] [Rank 0] Group 9 FTA: 0.2969 +[2025-07-08 04:10:42] [Rank 0] Group 9 FTA: 0.2969 +[2025-07-08 04:10:42] [Rank 0] Group 10 FTA: 0.3496 +[2025-07-08 04:10:42] [Rank 0] Group 10 FTA: 0.3496 +[2025-07-08 04:10:42] [Rank 0] Group 11 FTA: 0.3154 +[2025-07-08 04:10:42] [Rank 0] Group 11 FTA: 0.3154 +[2025-07-08 04:10:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 04:10:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 04:10:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 04:10:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 04:10:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 04:10:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 04:10:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 04:10:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 04:10:44] [Rank 0] step:5001/10000 train_time:403837ms step_avg:80.75ms +[2025-07-08 04:10:44] [Rank 0] step:5001/10000 train_time:403837ms step_avg:80.75ms +[2025-07-08 04:10:45] [Rank 0] step:5021/10000 train_time:405338ms step_avg:80.73ms +[2025-07-08 04:10:45] [Rank 0] step:5021/10000 train_time:405338ms step_avg:80.73ms +[2025-07-08 04:10:47] [Rank 0] step:5041/10000 train_time:406882ms step_avg:80.71ms +[2025-07-08 04:10:47] [Rank 0] step:5041/10000 train_time:406882ms step_avg:80.71ms +[2025-07-08 04:10:49] [Rank 0] step:5061/10000 train_time:408992ms step_avg:80.81ms +[2025-07-08 04:10:49] [Rank 0] step:5061/10000 train_time:408992ms step_avg:80.81ms +[2025-07-08 04:10:50] [Rank 0] step:5081/10000 train_time:410484ms step_avg:80.79ms +[2025-07-08 04:10:50] [Rank 0] step:5081/10000 train_time:410484ms step_avg:80.79ms +[2025-07-08 04:10:52] [Rank 0] step:5101/10000 train_time:411979ms step_avg:80.76ms +[2025-07-08 04:10:52] [Rank 0] step:5101/10000 train_time:411979ms step_avg:80.76ms +[2025-07-08 04:10:53] [Rank 0] step:5121/10000 train_time:413477ms step_avg:80.74ms +[2025-07-08 04:10:53] [Rank 0] step:5121/10000 train_time:413477ms step_avg:80.74ms +[2025-07-08 04:10:55] [Rank 0] step:5141/10000 train_time:415636ms step_avg:80.85ms +[2025-07-08 04:10:55] [Rank 0] step:5141/10000 train_time:415636ms step_avg:80.85ms +[2025-07-08 04:10:57] [Rank 0] step:5161/10000 train_time:417134ms step_avg:80.82ms +[2025-07-08 04:10:57] [Rank 0] step:5161/10000 train_time:417134ms step_avg:80.82ms +[2025-07-08 04:10:58] [Rank 0] step:5181/10000 train_time:418634ms step_avg:80.80ms +[2025-07-08 04:10:58] [Rank 0] step:5181/10000 train_time:418634ms step_avg:80.80ms +[2025-07-08 04:11:00] [Rank 0] step:5201/10000 train_time:420134ms step_avg:80.78ms +[2025-07-08 04:11:00] [Rank 0] step:5201/10000 train_time:420134ms step_avg:80.78ms +[2025-07-08 04:11:02] [Rank 0] step:5221/10000 train_time:421633ms step_avg:80.76ms +[2025-07-08 04:11:02] [Rank 0] step:5221/10000 train_time:421633ms step_avg:80.76ms +[2025-07-08 04:11:03] [Rank 0] step:5241/10000 train_time:423784ms step_avg:80.86ms +[2025-07-08 04:11:03] [Rank 0] step:5241/10000 train_time:423784ms step_avg:80.86ms +[2025-07-08 04:11:05] [Rank 0] step:5261/10000 train_time:425283ms step_avg:80.84ms +[2025-07-08 04:11:05] [Rank 0] step:5261/10000 train_time:425283ms step_avg:80.84ms +[2025-07-08 04:11:06] [Rank 0] step:5281/10000 train_time:426785ms step_avg:80.82ms +[2025-07-08 04:11:06] [Rank 0] step:5281/10000 train_time:426785ms step_avg:80.82ms +[2025-07-08 04:11:08] [Rank 0] step:5301/10000 train_time:428286ms step_avg:80.79ms +[2025-07-08 04:11:08] [Rank 0] step:5301/10000 train_time:428286ms step_avg:80.79ms +[2025-07-08 04:11:10] [Rank 0] step:5321/10000 train_time:430456ms step_avg:80.90ms +[2025-07-08 04:11:10] [Rank 0] step:5321/10000 train_time:430456ms step_avg:80.90ms +[2025-07-08 04:11:12] [Rank 0] step:5341/10000 train_time:431957ms step_avg:80.88ms +[2025-07-08 04:11:12] [Rank 0] step:5341/10000 train_time:431957ms step_avg:80.88ms +[2025-07-08 04:11:13] [Rank 0] step:5361/10000 train_time:433461ms step_avg:80.85ms +[2025-07-08 04:11:13] [Rank 0] step:5361/10000 train_time:433461ms step_avg:80.85ms +[2025-07-08 04:11:15] [Rank 0] step:5381/10000 train_time:434964ms step_avg:80.83ms +[2025-07-08 04:11:15] [Rank 0] step:5381/10000 train_time:434964ms step_avg:80.83ms +[2025-07-08 04:11:17] [Rank 0] step:5401/10000 train_time:436468ms step_avg:80.81ms +[2025-07-08 04:11:17] [Rank 0] step:5401/10000 train_time:436468ms step_avg:80.81ms +[2025-07-08 04:11:18] [Rank 0] step:5421/10000 train_time:438633ms step_avg:80.91ms +[2025-07-08 04:11:18] [Rank 0] step:5421/10000 train_time:438633ms step_avg:80.91ms +[2025-07-08 04:11:20] [Rank 0] step:5441/10000 train_time:440137ms step_avg:80.89ms +[2025-07-08 04:11:20] [Rank 0] step:5441/10000 train_time:440137ms step_avg:80.89ms +[2025-07-08 04:11:21] [Rank 0] step:5461/10000 train_time:441640ms step_avg:80.87ms +[2025-07-08 04:11:21] [Rank 0] step:5461/10000 train_time:441640ms step_avg:80.87ms +[2025-07-08 04:11:23] [Rank 0] step:5481/10000 train_time:443143ms step_avg:80.85ms +[2025-07-08 04:11:23] [Rank 0] step:5481/10000 train_time:443143ms step_avg:80.85ms +[2025-07-08 04:11:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:11:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:11:25] [Rank 0] PRINT: step:5500/10000 train_loss:1.3424 val_loss:1.3213 train_time:444881ms step_avg:80.89ms +[2025-07-08 04:11:25] [Rank 0] PRINT: step:5500/10000 train_loss:1.3424 val_loss:1.3213 train_time:444881ms step_avg:80.89ms +[2025-07-08 04:11:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:11:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:11:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:11:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:11:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:11:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:16:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:16:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:16:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:16:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:16:54] [Rank 0] Total Loss: 4.6148 +[2025-07-08 04:16:54] [Rank 0] Total Loss: 4.6148 +[2025-07-08 04:16:54] [Rank 0] Total FTA: 0.3591 +[2025-07-08 04:16:54] [Rank 0] Total FTA: 0.3591 +[2025-07-08 04:16:54] [Rank 0] Group 0 Loss: 4.8501 +[2025-07-08 04:16:54] [Rank 0] Group 0 Loss: 4.8501 +[2025-07-08 04:16:54] [Rank 0] Group 1 Loss: 4.5104 +[2025-07-08 04:16:54] [Rank 0] Group 1 Loss: 4.5104 +[2025-07-08 04:16:54] [Rank 0] Group 2 Loss: 4.2637 +[2025-07-08 04:16:54] [Rank 0] Group 2 Loss: 4.2637 +[2025-07-08 04:16:54] [Rank 0] Group 3 Loss: 4.7100 +[2025-07-08 04:16:54] [Rank 0] Group 3 Loss: 4.7100 +[2025-07-08 04:16:54] [Rank 0] Group 4 Loss: 4.5894 +[2025-07-08 04:16:54] [Rank 0] Group 4 Loss: 4.5894 +[2025-07-08 04:16:54] [Rank 0] Group 5 Loss: 4.4963 +[2025-07-08 04:16:54] [Rank 0] Group 5 Loss: 4.4963 +[2025-07-08 04:16:54] [Rank 0] Group 6 Loss: 4.5602 +[2025-07-08 04:16:54] [Rank 0] Group 6 Loss: 4.5602 +[2025-07-08 04:16:54] [Rank 0] Group 7 Loss: 4.5902 +[2025-07-08 04:16:54] [Rank 0] Group 7 Loss: 4.5902 +[2025-07-08 04:16:54] [Rank 0] Group 8 Loss: 4.6395 +[2025-07-08 04:16:54] [Rank 0] Group 8 Loss: 4.6395 +[2025-07-08 04:16:54] [Rank 0] Group 9 Loss: 4.6253 +[2025-07-08 04:16:54] [Rank 0] Group 9 Loss: 4.6253 +[2025-07-08 04:16:54] [Rank 0] Group 10 Loss: 4.6420 +[2025-07-08 04:16:54] [Rank 0] Group 10 Loss: 4.6420 +[2025-07-08 04:16:54] [Rank 0] Group 11 Loss: 4.6314 +[2025-07-08 04:16:54] [Rank 0] Group 11 Loss: 4.6314 +[2025-07-08 04:16:54] [Rank 0] Group 0 FTA: 0.4967 +[2025-07-08 04:16:54] [Rank 0] Group 0 FTA: 0.4967 +[2025-07-08 04:16:54] [Rank 0] Group 1 FTA: 0.5286 +[2025-07-08 04:16:54] [Rank 0] Group 1 FTA: 0.5286 +[2025-07-08 04:16:54] [Rank 0] Group 2 FTA: 0.4375 +[2025-07-08 04:16:54] [Rank 0] Group 2 FTA: 0.4375 +[2025-07-08 04:16:54] [Rank 0] Group 3 FTA: 0.3073 +[2025-07-08 04:16:54] [Rank 0] Group 3 FTA: 0.3073 +[2025-07-08 04:16:54] [Rank 0] Group 4 FTA: 0.2656 +[2025-07-08 04:16:54] [Rank 0] Group 4 FTA: 0.2656 +[2025-07-08 04:16:54] [Rank 0] Group 5 FTA: 0.3307 +[2025-07-08 04:16:54] [Rank 0] Group 5 FTA: 0.3307 +[2025-07-08 04:16:54] [Rank 0] Group 6 FTA: 0.2760 +[2025-07-08 04:16:54] [Rank 0] Group 6 FTA: 0.2760 +[2025-07-08 04:16:54] [Rank 0] Group 7 FTA: 0.3568 +[2025-07-08 04:16:54] [Rank 0] Group 7 FTA: 0.3568 +[2025-07-08 04:16:54] [Rank 0] Group 8 FTA: 0.3021 +[2025-07-08 04:16:54] [Rank 0] Group 8 FTA: 0.3021 +[2025-07-08 04:16:54] [Rank 0] Group 9 FTA: 0.3281 +[2025-07-08 04:16:54] [Rank 0] Group 9 FTA: 0.3281 +[2025-07-08 04:16:54] [Rank 0] Group 10 FTA: 0.3242 +[2025-07-08 04:16:54] [Rank 0] Group 10 FTA: 0.3242 +[2025-07-08 04:16:54] [Rank 0] Group 11 FTA: 0.3066 +[2025-07-08 04:16:54] [Rank 0] Group 11 FTA: 0.3066 +[2025-07-08 04:16:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 04:16:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 04:16:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 04:16:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 04:16:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 04:16:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 04:16:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 04:16:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 04:16:55] [Rank 0] step:5501/10000 train_time:444903ms step_avg:80.88ms +[2025-07-08 04:16:55] [Rank 0] step:5501/10000 train_time:444903ms step_avg:80.88ms +[2025-07-08 04:16:57] [Rank 0] step:5521/10000 train_time:446410ms step_avg:80.86ms +[2025-07-08 04:16:57] [Rank 0] step:5521/10000 train_time:446410ms step_avg:80.86ms +[2025-07-08 04:16:58] [Rank 0] step:5541/10000 train_time:447904ms step_avg:80.83ms +[2025-07-08 04:16:58] [Rank 0] step:5541/10000 train_time:447904ms step_avg:80.83ms +[2025-07-08 04:17:00] [Rank 0] step:5561/10000 train_time:449400ms step_avg:80.81ms +[2025-07-08 04:17:00] [Rank 0] step:5561/10000 train_time:449400ms step_avg:80.81ms +[2025-07-08 04:17:02] [Rank 0] step:5581/10000 train_time:450897ms step_avg:80.79ms +[2025-07-08 04:17:02] [Rank 0] step:5581/10000 train_time:450897ms step_avg:80.79ms +[2025-07-08 04:17:03] [Rank 0] step:5601/10000 train_time:453037ms step_avg:80.89ms +[2025-07-08 04:17:03] [Rank 0] step:5601/10000 train_time:453037ms step_avg:80.89ms +[2025-07-08 04:17:05] [Rank 0] step:5621/10000 train_time:454532ms step_avg:80.86ms +[2025-07-08 04:17:05] [Rank 0] step:5621/10000 train_time:454532ms step_avg:80.86ms +[2025-07-08 04:17:06] [Rank 0] step:5641/10000 train_time:456030ms step_avg:80.84ms +[2025-07-08 04:17:06] [Rank 0] step:5641/10000 train_time:456030ms step_avg:80.84ms +[2025-07-08 04:17:08] [Rank 0] step:5661/10000 train_time:457530ms step_avg:80.82ms +[2025-07-08 04:17:08] [Rank 0] step:5661/10000 train_time:457530ms step_avg:80.82ms +[2025-07-08 04:17:10] [Rank 0] step:5681/10000 train_time:459682ms step_avg:80.92ms +[2025-07-08 04:17:10] [Rank 0] step:5681/10000 train_time:459682ms step_avg:80.92ms +[2025-07-08 04:17:11] [Rank 0] step:5701/10000 train_time:461181ms step_avg:80.89ms +[2025-07-08 04:17:11] [Rank 0] step:5701/10000 train_time:461181ms step_avg:80.89ms +[2025-07-08 04:17:13] [Rank 0] step:5721/10000 train_time:462680ms step_avg:80.87ms +[2025-07-08 04:17:13] [Rank 0] step:5721/10000 train_time:462680ms step_avg:80.87ms +[2025-07-08 04:17:14] [Rank 0] step:5741/10000 train_time:464179ms step_avg:80.85ms +[2025-07-08 04:17:14] [Rank 0] step:5741/10000 train_time:464179ms step_avg:80.85ms +[2025-07-08 04:17:17] [Rank 0] step:5761/10000 train_time:465681ms step_avg:80.83ms +[2025-07-08 04:17:17] [Rank 0] step:5761/10000 train_time:465681ms step_avg:80.83ms +[2025-07-08 04:17:18] [Rank 0] step:5781/10000 train_time:467827ms step_avg:80.92ms +[2025-07-08 04:17:18] [Rank 0] step:5781/10000 train_time:467827ms step_avg:80.92ms +[2025-07-08 04:17:20] [Rank 0] step:5801/10000 train_time:469329ms step_avg:80.90ms +[2025-07-08 04:17:20] [Rank 0] step:5801/10000 train_time:469329ms step_avg:80.90ms +[2025-07-08 04:17:21] [Rank 0] step:5821/10000 train_time:470831ms step_avg:80.88ms +[2025-07-08 04:17:21] [Rank 0] step:5821/10000 train_time:470831ms step_avg:80.88ms +[2025-07-08 04:17:23] [Rank 0] step:5841/10000 train_time:472335ms step_avg:80.87ms +[2025-07-08 04:17:23] [Rank 0] step:5841/10000 train_time:472335ms step_avg:80.87ms +[2025-07-08 04:17:25] [Rank 0] step:5861/10000 train_time:474499ms step_avg:80.96ms +[2025-07-08 04:17:25] [Rank 0] step:5861/10000 train_time:474499ms step_avg:80.96ms +[2025-07-08 04:17:26] [Rank 0] step:5881/10000 train_time:476000ms step_avg:80.94ms +[2025-07-08 04:17:26] [Rank 0] step:5881/10000 train_time:476000ms step_avg:80.94ms +[2025-07-08 04:17:28] [Rank 0] step:5901/10000 train_time:477502ms step_avg:80.92ms +[2025-07-08 04:17:28] [Rank 0] step:5901/10000 train_time:477502ms step_avg:80.92ms +[2025-07-08 04:17:29] [Rank 0] step:5921/10000 train_time:479008ms step_avg:80.90ms +[2025-07-08 04:17:29] [Rank 0] step:5921/10000 train_time:479008ms step_avg:80.90ms +[2025-07-08 04:17:31] [Rank 0] step:5941/10000 train_time:480562ms step_avg:80.89ms +[2025-07-08 04:17:31] [Rank 0] step:5941/10000 train_time:480562ms step_avg:80.89ms +[2025-07-08 04:17:33] [Rank 0] step:5961/10000 train_time:482250ms step_avg:80.90ms +[2025-07-08 04:17:33] [Rank 0] step:5961/10000 train_time:482250ms step_avg:80.90ms +[2025-07-08 04:17:34] [Rank 0] step:5981/10000 train_time:483754ms step_avg:80.88ms +[2025-07-08 04:17:34] [Rank 0] step:5981/10000 train_time:483754ms step_avg:80.88ms +[2025-07-08 04:17:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:17:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:17:36] [Rank 0] PRINT: step:6000/10000 train_loss:1.3054 val_loss:1.2870 train_time:485256ms step_avg:80.88ms +[2025-07-08 04:17:36] [Rank 0] PRINT: step:6000/10000 train_loss:1.3054 val_loss:1.2870 train_time:485256ms step_avg:80.88ms +[2025-07-08 04:17:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:17:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:17:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:17:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:17:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:17:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:23:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:23:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:23:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:23:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:23:04] [Rank 0] Total Loss: 4.6581 +[2025-07-08 04:23:04] [Rank 0] Total Loss: 4.6581 +[2025-07-08 04:23:04] [Rank 0] Total FTA: 0.3215 +[2025-07-08 04:23:04] [Rank 0] Total FTA: 0.3215 +[2025-07-08 04:23:04] [Rank 0] Group 0 Loss: 4.9594 +[2025-07-08 04:23:04] [Rank 0] Group 0 Loss: 4.9594 +[2025-07-08 04:23:04] [Rank 0] Group 1 Loss: 4.7655 +[2025-07-08 04:23:04] [Rank 0] Group 1 Loss: 4.7655 +[2025-07-08 04:23:04] [Rank 0] Group 2 Loss: 4.2603 +[2025-07-08 04:23:04] [Rank 0] Group 2 Loss: 4.2603 +[2025-07-08 04:23:04] [Rank 0] Group 3 Loss: 4.7050 +[2025-07-08 04:23:04] [Rank 0] Group 3 Loss: 4.7050 +[2025-07-08 04:23:04] [Rank 0] Group 4 Loss: 4.6217 +[2025-07-08 04:23:04] [Rank 0] Group 4 Loss: 4.6217 +[2025-07-08 04:23:04] [Rank 0] Group 5 Loss: 4.5421 +[2025-07-08 04:23:04] [Rank 0] Group 5 Loss: 4.5421 +[2025-07-08 04:23:04] [Rank 0] Group 6 Loss: 4.4804 +[2025-07-08 04:23:04] [Rank 0] Group 6 Loss: 4.4804 +[2025-07-08 04:23:04] [Rank 0] Group 7 Loss: 4.6602 +[2025-07-08 04:23:04] [Rank 0] Group 7 Loss: 4.6602 +[2025-07-08 04:23:04] [Rank 0] Group 8 Loss: 4.6578 +[2025-07-08 04:23:04] [Rank 0] Group 8 Loss: 4.6578 +[2025-07-08 04:23:04] [Rank 0] Group 9 Loss: 4.6743 +[2025-07-08 04:23:04] [Rank 0] Group 9 Loss: 4.6743 +[2025-07-08 04:23:04] [Rank 0] Group 10 Loss: 4.6688 +[2025-07-08 04:23:04] [Rank 0] Group 10 Loss: 4.6688 +[2025-07-08 04:23:04] [Rank 0] Group 11 Loss: 4.6368 +[2025-07-08 04:23:04] [Rank 0] Group 11 Loss: 4.6368 +[2025-07-08 04:23:04] [Rank 0] Group 0 FTA: 0.5059 +[2025-07-08 04:23:04] [Rank 0] Group 0 FTA: 0.5059 +[2025-07-08 04:23:04] [Rank 0] Group 1 FTA: 0.1745 +[2025-07-08 04:23:04] [Rank 0] Group 1 FTA: 0.1745 +[2025-07-08 04:23:04] [Rank 0] Group 2 FTA: 0.3516 +[2025-07-08 04:23:04] [Rank 0] Group 2 FTA: 0.3516 +[2025-07-08 04:23:04] [Rank 0] Group 3 FTA: 0.2552 +[2025-07-08 04:23:04] [Rank 0] Group 3 FTA: 0.2552 +[2025-07-08 04:23:04] [Rank 0] Group 4 FTA: 0.2240 +[2025-07-08 04:23:04] [Rank 0] Group 4 FTA: 0.2240 +[2025-07-08 04:23:04] [Rank 0] Group 5 FTA: 0.3333 +[2025-07-08 04:23:04] [Rank 0] Group 5 FTA: 0.3333 +[2025-07-08 04:23:04] [Rank 0] Group 6 FTA: 0.3047 +[2025-07-08 04:23:04] [Rank 0] Group 6 FTA: 0.3047 +[2025-07-08 04:23:04] [Rank 0] Group 7 FTA: 0.3073 +[2025-07-08 04:23:04] [Rank 0] Group 7 FTA: 0.3073 +[2025-07-08 04:23:04] [Rank 0] Group 8 FTA: 0.2891 +[2025-07-08 04:23:04] [Rank 0] Group 8 FTA: 0.2891 +[2025-07-08 04:23:04] [Rank 0] Group 9 FTA: 0.3633 +[2025-07-08 04:23:04] [Rank 0] Group 9 FTA: 0.3633 +[2025-07-08 04:23:04] [Rank 0] Group 10 FTA: 0.2969 +[2025-07-08 04:23:04] [Rank 0] Group 10 FTA: 0.2969 +[2025-07-08 04:23:04] [Rank 0] Group 11 FTA: 0.3096 +[2025-07-08 04:23:04] [Rank 0] Group 11 FTA: 0.3096 +[2025-07-08 04:23:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 04:23:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 04:23:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 04:23:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 04:23:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 04:23:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 04:23:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 04:23:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 04:23:05] [Rank 0] step:6001/10000 train_time:485277ms step_avg:80.87ms +[2025-07-08 04:23:05] [Rank 0] step:6001/10000 train_time:485277ms step_avg:80.87ms +[2025-07-08 04:23:07] [Rank 0] step:6021/10000 train_time:486797ms step_avg:80.85ms +[2025-07-08 04:23:07] [Rank 0] step:6021/10000 train_time:486797ms step_avg:80.85ms +[2025-07-08 04:23:09] [Rank 0] step:6041/10000 train_time:488945ms step_avg:80.94ms +[2025-07-08 04:23:09] [Rank 0] step:6041/10000 train_time:488945ms step_avg:80.94ms +[2025-07-08 04:23:11] [Rank 0] step:6061/10000 train_time:490437ms step_avg:80.92ms +[2025-07-08 04:23:11] [Rank 0] step:6061/10000 train_time:490437ms step_avg:80.92ms +[2025-07-08 04:23:12] [Rank 0] step:6081/10000 train_time:491930ms step_avg:80.90ms +[2025-07-08 04:23:12] [Rank 0] step:6081/10000 train_time:491930ms step_avg:80.90ms +[2025-07-08 04:23:14] [Rank 0] step:6101/10000 train_time:493427ms step_avg:80.88ms +[2025-07-08 04:23:14] [Rank 0] step:6101/10000 train_time:493427ms step_avg:80.88ms +[2025-07-08 04:23:15] [Rank 0] step:6121/10000 train_time:494921ms step_avg:80.86ms +[2025-07-08 04:23:15] [Rank 0] step:6121/10000 train_time:494921ms step_avg:80.86ms +[2025-07-08 04:23:17] [Rank 0] step:6141/10000 train_time:496454ms step_avg:80.84ms +[2025-07-08 04:23:17] [Rank 0] step:6141/10000 train_time:496454ms step_avg:80.84ms +[2025-07-08 04:23:18] [Rank 0] step:6161/10000 train_time:497948ms step_avg:80.82ms +[2025-07-08 04:23:18] [Rank 0] step:6161/10000 train_time:497948ms step_avg:80.82ms +[2025-07-08 04:23:20] [Rank 0] step:6181/10000 train_time:499445ms step_avg:80.80ms +[2025-07-08 04:23:20] [Rank 0] step:6181/10000 train_time:499445ms step_avg:80.80ms +[2025-07-08 04:23:21] [Rank 0] step:6201/10000 train_time:500941ms step_avg:80.78ms +[2025-07-08 04:23:21] [Rank 0] step:6201/10000 train_time:500941ms step_avg:80.78ms +[2025-07-08 04:23:23] [Rank 0] step:6221/10000 train_time:503093ms step_avg:80.87ms +[2025-07-08 04:23:23] [Rank 0] step:6221/10000 train_time:503093ms step_avg:80.87ms +[2025-07-08 04:23:25] [Rank 0] step:6241/10000 train_time:504589ms step_avg:80.85ms +[2025-07-08 04:23:25] [Rank 0] step:6241/10000 train_time:504589ms step_avg:80.85ms +[2025-07-08 04:23:26] [Rank 0] step:6261/10000 train_time:506092ms step_avg:80.83ms +[2025-07-08 04:23:26] [Rank 0] step:6261/10000 train_time:506092ms step_avg:80.83ms +[2025-07-08 04:23:28] [Rank 0] step:6281/10000 train_time:507592ms step_avg:80.81ms +[2025-07-08 04:23:28] [Rank 0] step:6281/10000 train_time:507592ms step_avg:80.81ms +[2025-07-08 04:23:30] [Rank 0] step:6301/10000 train_time:509092ms step_avg:80.80ms +[2025-07-08 04:23:30] [Rank 0] step:6301/10000 train_time:509092ms step_avg:80.80ms +[2025-07-08 04:23:31] [Rank 0] step:6321/10000 train_time:511238ms step_avg:80.88ms +[2025-07-08 04:23:31] [Rank 0] step:6321/10000 train_time:511238ms step_avg:80.88ms +[2025-07-08 04:23:33] [Rank 0] step:6341/10000 train_time:512736ms step_avg:80.86ms +[2025-07-08 04:23:33] [Rank 0] step:6341/10000 train_time:512736ms step_avg:80.86ms +[2025-07-08 04:23:34] [Rank 0] step:6361/10000 train_time:514239ms step_avg:80.84ms +[2025-07-08 04:23:34] [Rank 0] step:6361/10000 train_time:514239ms step_avg:80.84ms +[2025-07-08 04:23:36] [Rank 0] step:6381/10000 train_time:515739ms step_avg:80.82ms +[2025-07-08 04:23:36] [Rank 0] step:6381/10000 train_time:515739ms step_avg:80.82ms +[2025-07-08 04:23:38] [Rank 0] step:6401/10000 train_time:517907ms step_avg:80.91ms +[2025-07-08 04:23:38] [Rank 0] step:6401/10000 train_time:517907ms step_avg:80.91ms +[2025-07-08 04:23:40] [Rank 0] step:6421/10000 train_time:519410ms step_avg:80.89ms +[2025-07-08 04:23:40] [Rank 0] step:6421/10000 train_time:519410ms step_avg:80.89ms +[2025-07-08 04:23:41] [Rank 0] step:6441/10000 train_time:520911ms step_avg:80.87ms +[2025-07-08 04:23:41] [Rank 0] step:6441/10000 train_time:520911ms step_avg:80.87ms +[2025-07-08 04:23:43] [Rank 0] step:6461/10000 train_time:522414ms step_avg:80.86ms +[2025-07-08 04:23:43] [Rank 0] step:6461/10000 train_time:522414ms step_avg:80.86ms +[2025-07-08 04:23:45] [Rank 0] step:6481/10000 train_time:523916ms step_avg:80.84ms +[2025-07-08 04:23:45] [Rank 0] step:6481/10000 train_time:523916ms step_avg:80.84ms +[2025-07-08 04:23:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:23:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:23:47] [Rank 0] PRINT: step:6500/10000 train_loss:1.2725 val_loss:1.2555 train_time:526087ms step_avg:80.94ms +[2025-07-08 04:23:47] [Rank 0] PRINT: step:6500/10000 train_loss:1.2725 val_loss:1.2555 train_time:526087ms step_avg:80.94ms +[2025-07-08 04:23:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:23:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:23:47] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:23:47] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:23:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:23:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:29:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:29:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:29:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:29:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:29:15] [Rank 0] Total Loss: 4.7244 +[2025-07-08 04:29:15] [Rank 0] Total Loss: 4.7244 +[2025-07-08 04:29:15] [Rank 0] Total FTA: 0.3783 +[2025-07-08 04:29:15] [Rank 0] Total FTA: 0.3783 +[2025-07-08 04:29:15] [Rank 0] Group 0 Loss: 4.9719 +[2025-07-08 04:29:15] [Rank 0] Group 0 Loss: 4.9719 +[2025-07-08 04:29:15] [Rank 0] Group 1 Loss: 4.8322 +[2025-07-08 04:29:15] [Rank 0] Group 1 Loss: 4.8322 +[2025-07-08 04:29:15] [Rank 0] Group 2 Loss: 4.3597 +[2025-07-08 04:29:15] [Rank 0] Group 2 Loss: 4.3597 +[2025-07-08 04:29:15] [Rank 0] Group 3 Loss: 4.8549 +[2025-07-08 04:29:15] [Rank 0] Group 3 Loss: 4.8549 +[2025-07-08 04:29:15] [Rank 0] Group 4 Loss: 4.6849 +[2025-07-08 04:29:15] [Rank 0] Group 4 Loss: 4.6849 +[2025-07-08 04:29:15] [Rank 0] Group 5 Loss: 4.6162 +[2025-07-08 04:29:15] [Rank 0] Group 5 Loss: 4.6162 +[2025-07-08 04:29:15] [Rank 0] Group 6 Loss: 4.5997 +[2025-07-08 04:29:15] [Rank 0] Group 6 Loss: 4.5997 +[2025-07-08 04:29:15] [Rank 0] Group 7 Loss: 4.6651 +[2025-07-08 04:29:15] [Rank 0] Group 7 Loss: 4.6651 +[2025-07-08 04:29:15] [Rank 0] Group 8 Loss: 4.6734 +[2025-07-08 04:29:15] [Rank 0] Group 8 Loss: 4.6734 +[2025-07-08 04:29:15] [Rank 0] Group 9 Loss: 4.7229 +[2025-07-08 04:29:15] [Rank 0] Group 9 Loss: 4.7229 +[2025-07-08 04:29:15] [Rank 0] Group 10 Loss: 4.7006 +[2025-07-08 04:29:15] [Rank 0] Group 10 Loss: 4.7006 +[2025-07-08 04:29:15] [Rank 0] Group 11 Loss: 4.7419 +[2025-07-08 04:29:15] [Rank 0] Group 11 Loss: 4.7419 +[2025-07-08 04:29:15] [Rank 0] Group 0 FTA: 0.6619 +[2025-07-08 04:29:15] [Rank 0] Group 0 FTA: 0.6619 +[2025-07-08 04:29:15] [Rank 0] Group 1 FTA: 0.3438 +[2025-07-08 04:29:15] [Rank 0] Group 1 FTA: 0.3438 +[2025-07-08 04:29:15] [Rank 0] Group 2 FTA: 0.3281 +[2025-07-08 04:29:15] [Rank 0] Group 2 FTA: 0.3281 +[2025-07-08 04:29:15] [Rank 0] Group 3 FTA: 0.2266 +[2025-07-08 04:29:15] [Rank 0] Group 3 FTA: 0.2266 +[2025-07-08 04:29:15] [Rank 0] Group 4 FTA: 0.2031 +[2025-07-08 04:29:15] [Rank 0] Group 4 FTA: 0.2031 +[2025-07-08 04:29:15] [Rank 0] Group 5 FTA: 0.4375 +[2025-07-08 04:29:15] [Rank 0] Group 5 FTA: 0.4375 +[2025-07-08 04:29:15] [Rank 0] Group 6 FTA: 0.3750 +[2025-07-08 04:29:15] [Rank 0] Group 6 FTA: 0.3750 +[2025-07-08 04:29:15] [Rank 0] Group 7 FTA: 0.3750 +[2025-07-08 04:29:15] [Rank 0] Group 7 FTA: 0.3750 +[2025-07-08 04:29:15] [Rank 0] Group 8 FTA: 0.3594 +[2025-07-08 04:29:15] [Rank 0] Group 8 FTA: 0.3594 +[2025-07-08 04:29:15] [Rank 0] Group 9 FTA: 0.3281 +[2025-07-08 04:29:15] [Rank 0] Group 9 FTA: 0.3281 +[2025-07-08 04:29:15] [Rank 0] Group 10 FTA: 0.3320 +[2025-07-08 04:29:15] [Rank 0] Group 10 FTA: 0.3320 +[2025-07-08 04:29:15] [Rank 0] Group 11 FTA: 0.3428 +[2025-07-08 04:29:15] [Rank 0] Group 11 FTA: 0.3428 +[2025-07-08 04:29:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 04:29:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 04:29:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 04:29:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 04:29:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 04:29:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 04:29:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 04:29:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 04:29:17] [Rank 0] step:6501/10000 train_time:526109ms step_avg:80.93ms +[2025-07-08 04:29:17] [Rank 0] step:6501/10000 train_time:526109ms step_avg:80.93ms +[2025-07-08 04:29:18] [Rank 0] step:6521/10000 train_time:527612ms step_avg:80.91ms +[2025-07-08 04:29:18] [Rank 0] step:6521/10000 train_time:527612ms step_avg:80.91ms +[2025-07-08 04:29:20] [Rank 0] step:6541/10000 train_time:529105ms step_avg:80.89ms +[2025-07-08 04:29:20] [Rank 0] step:6541/10000 train_time:529105ms step_avg:80.89ms +[2025-07-08 04:29:22] [Rank 0] step:6561/10000 train_time:530851ms step_avg:80.91ms +[2025-07-08 04:29:22] [Rank 0] step:6561/10000 train_time:530851ms step_avg:80.91ms +[2025-07-08 04:29:24] [Rank 0] step:6581/10000 train_time:533012ms step_avg:80.99ms +[2025-07-08 04:29:24] [Rank 0] step:6581/10000 train_time:533012ms step_avg:80.99ms +[2025-07-08 04:29:25] [Rank 0] step:6601/10000 train_time:534504ms step_avg:80.97ms +[2025-07-08 04:29:25] [Rank 0] step:6601/10000 train_time:534504ms step_avg:80.97ms +[2025-07-08 04:29:27] [Rank 0] step:6621/10000 train_time:535997ms step_avg:80.95ms +[2025-07-08 04:29:27] [Rank 0] step:6621/10000 train_time:535997ms step_avg:80.95ms +[2025-07-08 04:29:28] [Rank 0] step:6641/10000 train_time:537490ms step_avg:80.94ms +[2025-07-08 04:29:28] [Rank 0] step:6641/10000 train_time:537490ms step_avg:80.94ms +[2025-07-08 04:29:30] [Rank 0] step:6661/10000 train_time:538984ms step_avg:80.92ms +[2025-07-08 04:29:30] [Rank 0] step:6661/10000 train_time:538984ms step_avg:80.92ms +[2025-07-08 04:29:31] [Rank 0] step:6681/10000 train_time:540718ms step_avg:80.93ms +[2025-07-08 04:29:31] [Rank 0] step:6681/10000 train_time:540718ms step_avg:80.93ms +[2025-07-08 04:29:33] [Rank 0] step:6701/10000 train_time:542215ms step_avg:80.92ms +[2025-07-08 04:29:33] [Rank 0] step:6701/10000 train_time:542215ms step_avg:80.92ms +[2025-07-08 04:29:34] [Rank 0] step:6721/10000 train_time:543712ms step_avg:80.90ms +[2025-07-08 04:29:34] [Rank 0] step:6721/10000 train_time:543712ms step_avg:80.90ms +[2025-07-08 04:29:36] [Rank 0] step:6741/10000 train_time:545210ms step_avg:80.88ms +[2025-07-08 04:29:36] [Rank 0] step:6741/10000 train_time:545210ms step_avg:80.88ms +[2025-07-08 04:29:38] [Rank 0] step:6761/10000 train_time:547048ms step_avg:80.91ms +[2025-07-08 04:29:38] [Rank 0] step:6761/10000 train_time:547048ms step_avg:80.91ms +[2025-07-08 04:29:39] [Rank 0] step:6781/10000 train_time:548548ms step_avg:80.89ms +[2025-07-08 04:29:39] [Rank 0] step:6781/10000 train_time:548548ms step_avg:80.89ms +[2025-07-08 04:29:41] [Rank 0] step:6801/10000 train_time:550047ms step_avg:80.88ms +[2025-07-08 04:29:41] [Rank 0] step:6801/10000 train_time:550047ms step_avg:80.88ms +[2025-07-08 04:29:42] [Rank 0] step:6821/10000 train_time:551546ms step_avg:80.86ms +[2025-07-08 04:29:42] [Rank 0] step:6821/10000 train_time:551546ms step_avg:80.86ms +[2025-07-08 04:29:44] [Rank 0] step:6841/10000 train_time:553726ms step_avg:80.94ms +[2025-07-08 04:29:44] [Rank 0] step:6841/10000 train_time:553726ms step_avg:80.94ms +[2025-07-08 04:29:46] [Rank 0] step:6861/10000 train_time:555207ms step_avg:80.92ms +[2025-07-08 04:29:46] [Rank 0] step:6861/10000 train_time:555207ms step_avg:80.92ms +[2025-07-08 04:29:47] [Rank 0] step:6881/10000 train_time:556706ms step_avg:80.90ms +[2025-07-08 04:29:47] [Rank 0] step:6881/10000 train_time:556706ms step_avg:80.90ms +[2025-07-08 04:29:49] [Rank 0] step:6901/10000 train_time:558208ms step_avg:80.89ms +[2025-07-08 04:29:49] [Rank 0] step:6901/10000 train_time:558208ms step_avg:80.89ms +[2025-07-08 04:29:50] [Rank 0] step:6921/10000 train_time:559711ms step_avg:80.87ms +[2025-07-08 04:29:50] [Rank 0] step:6921/10000 train_time:559711ms step_avg:80.87ms +[2025-07-08 04:29:53] [Rank 0] step:6941/10000 train_time:561877ms step_avg:80.95ms +[2025-07-08 04:29:53] [Rank 0] step:6941/10000 train_time:561877ms step_avg:80.95ms +[2025-07-08 04:29:54] [Rank 0] step:6961/10000 train_time:563376ms step_avg:80.93ms +[2025-07-08 04:29:54] [Rank 0] step:6961/10000 train_time:563376ms step_avg:80.93ms +[2025-07-08 04:29:56] [Rank 0] step:6981/10000 train_time:564879ms step_avg:80.92ms +[2025-07-08 04:29:56] [Rank 0] step:6981/10000 train_time:564879ms step_avg:80.92ms +[2025-07-08 04:29:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:29:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:29:58] [Rank 0] PRINT: step:7000/10000 train_loss:1.2426 val_loss:1.2285 train_time:566380ms step_avg:80.91ms +[2025-07-08 04:29:58] [Rank 0] PRINT: step:7000/10000 train_loss:1.2426 val_loss:1.2285 train_time:566380ms step_avg:80.91ms +[2025-07-08 04:29:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:29:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:29:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:29:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:29:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:29:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:35:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:35:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:35:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:35:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:35:26] [Rank 0] Total Loss: 4.7706 +[2025-07-08 04:35:26] [Rank 0] Total Loss: 4.7706 +[2025-07-08 04:35:26] [Rank 0] Total FTA: 0.3838 +[2025-07-08 04:35:26] [Rank 0] Total FTA: 0.3838 +[2025-07-08 04:35:26] [Rank 0] Group 0 Loss: 4.9868 +[2025-07-08 04:35:26] [Rank 0] Group 0 Loss: 4.9868 +[2025-07-08 04:35:26] [Rank 0] Group 1 Loss: 5.0045 +[2025-07-08 04:35:26] [Rank 0] Group 1 Loss: 5.0045 +[2025-07-08 04:35:26] [Rank 0] Group 2 Loss: 4.4202 +[2025-07-08 04:35:26] [Rank 0] Group 2 Loss: 4.4202 +[2025-07-08 04:35:26] [Rank 0] Group 3 Loss: 4.7951 +[2025-07-08 04:35:26] [Rank 0] Group 3 Loss: 4.7951 +[2025-07-08 04:35:26] [Rank 0] Group 4 Loss: 4.7139 +[2025-07-08 04:35:26] [Rank 0] Group 4 Loss: 4.7139 +[2025-07-08 04:35:26] [Rank 0] Group 5 Loss: 4.6472 +[2025-07-08 04:35:26] [Rank 0] Group 5 Loss: 4.6472 +[2025-07-08 04:35:26] [Rank 0] Group 6 Loss: 4.6440 +[2025-07-08 04:35:26] [Rank 0] Group 6 Loss: 4.6440 +[2025-07-08 04:35:26] [Rank 0] Group 7 Loss: 4.7187 +[2025-07-08 04:35:26] [Rank 0] Group 7 Loss: 4.7187 +[2025-07-08 04:35:26] [Rank 0] Group 8 Loss: 4.7908 +[2025-07-08 04:35:26] [Rank 0] Group 8 Loss: 4.7908 +[2025-07-08 04:35:26] [Rank 0] Group 9 Loss: 4.7107 +[2025-07-08 04:35:26] [Rank 0] Group 9 Loss: 4.7107 +[2025-07-08 04:35:26] [Rank 0] Group 10 Loss: 4.7937 +[2025-07-08 04:35:26] [Rank 0] Group 10 Loss: 4.7937 +[2025-07-08 04:35:26] [Rank 0] Group 11 Loss: 4.7734 +[2025-07-08 04:35:26] [Rank 0] Group 11 Loss: 4.7734 +[2025-07-08 04:35:26] [Rank 0] Group 0 FTA: 0.6632 +[2025-07-08 04:35:26] [Rank 0] Group 0 FTA: 0.6632 +[2025-07-08 04:35:26] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-08 04:35:26] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-08 04:35:26] [Rank 0] Group 2 FTA: 0.4297 +[2025-07-08 04:35:26] [Rank 0] Group 2 FTA: 0.4297 +[2025-07-08 04:35:26] [Rank 0] Group 3 FTA: 0.2578 +[2025-07-08 04:35:26] [Rank 0] Group 3 FTA: 0.2578 +[2025-07-08 04:35:26] [Rank 0] Group 4 FTA: 0.2266 +[2025-07-08 04:35:26] [Rank 0] Group 4 FTA: 0.2266 +[2025-07-08 04:35:26] [Rank 0] Group 5 FTA: 0.4219 +[2025-07-08 04:35:26] [Rank 0] Group 5 FTA: 0.4219 +[2025-07-08 04:35:26] [Rank 0] Group 6 FTA: 0.3255 +[2025-07-08 04:35:26] [Rank 0] Group 6 FTA: 0.3255 +[2025-07-08 04:35:27] [Rank 0] Group 7 FTA: 0.3724 +[2025-07-08 04:35:27] [Rank 0] Group 7 FTA: 0.3724 +[2025-07-08 04:35:27] [Rank 0] Group 8 FTA: 0.3542 +[2025-07-08 04:35:27] [Rank 0] Group 8 FTA: 0.3542 +[2025-07-08 04:35:27] [Rank 0] Group 9 FTA: 0.3125 +[2025-07-08 04:35:27] [Rank 0] Group 9 FTA: 0.3125 +[2025-07-08 04:35:27] [Rank 0] Group 10 FTA: 0.3691 +[2025-07-08 04:35:27] [Rank 0] Group 10 FTA: 0.3691 +[2025-07-08 04:35:27] [Rank 0] Group 11 FTA: 0.3994 +[2025-07-08 04:35:27] [Rank 0] Group 11 FTA: 0.3994 +[2025-07-08 04:35:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 04:35:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 04:35:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 04:35:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 04:35:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 04:35:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 04:35:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 04:35:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 04:35:28] [Rank 0] step:7001/10000 train_time:566400ms step_avg:80.90ms +[2025-07-08 04:35:28] [Rank 0] step:7001/10000 train_time:566400ms step_avg:80.90ms +[2025-07-08 04:35:30] [Rank 0] step:7021/10000 train_time:568612ms step_avg:80.99ms +[2025-07-08 04:35:30] [Rank 0] step:7021/10000 train_time:568612ms step_avg:80.99ms +[2025-07-08 04:35:32] [Rank 0] step:7041/10000 train_time:570086ms step_avg:80.97ms +[2025-07-08 04:35:32] [Rank 0] step:7041/10000 train_time:570086ms step_avg:80.97ms +[2025-07-08 04:35:33] [Rank 0] step:7061/10000 train_time:571579ms step_avg:80.95ms +[2025-07-08 04:35:33] [Rank 0] step:7061/10000 train_time:571579ms step_avg:80.95ms +[2025-07-08 04:35:35] [Rank 0] step:7081/10000 train_time:573075ms step_avg:80.93ms +[2025-07-08 04:35:35] [Rank 0] step:7081/10000 train_time:573075ms step_avg:80.93ms +[2025-07-08 04:35:36] [Rank 0] step:7101/10000 train_time:574570ms step_avg:80.91ms +[2025-07-08 04:35:36] [Rank 0] step:7101/10000 train_time:574570ms step_avg:80.91ms +[2025-07-08 04:35:38] [Rank 0] step:7121/10000 train_time:576731ms step_avg:80.99ms +[2025-07-08 04:35:38] [Rank 0] step:7121/10000 train_time:576731ms step_avg:80.99ms +[2025-07-08 04:35:40] [Rank 0] step:7141/10000 train_time:578488ms step_avg:81.01ms +[2025-07-08 04:35:40] [Rank 0] step:7141/10000 train_time:578488ms step_avg:81.01ms +[2025-07-08 04:35:42] [Rank 0] step:7161/10000 train_time:579981ms step_avg:80.99ms +[2025-07-08 04:35:42] [Rank 0] step:7161/10000 train_time:579981ms step_avg:80.99ms +[2025-07-08 04:35:43] [Rank 0] step:7181/10000 train_time:581480ms step_avg:80.97ms +[2025-07-08 04:35:43] [Rank 0] step:7181/10000 train_time:581480ms step_avg:80.97ms +[2025-07-08 04:35:45] [Rank 0] step:7201/10000 train_time:582988ms step_avg:80.96ms +[2025-07-08 04:35:45] [Rank 0] step:7201/10000 train_time:582988ms step_avg:80.96ms +[2025-07-08 04:35:47] [Rank 0] step:7221/10000 train_time:585131ms step_avg:81.03ms +[2025-07-08 04:35:47] [Rank 0] step:7221/10000 train_time:585131ms step_avg:81.03ms +[2025-07-08 04:35:48] [Rank 0] step:7241/10000 train_time:586629ms step_avg:81.01ms +[2025-07-08 04:35:48] [Rank 0] step:7241/10000 train_time:586629ms step_avg:81.01ms +[2025-07-08 04:35:50] [Rank 0] step:7261/10000 train_time:588129ms step_avg:81.00ms +[2025-07-08 04:35:50] [Rank 0] step:7261/10000 train_time:588129ms step_avg:81.00ms +[2025-07-08 04:35:51] [Rank 0] step:7281/10000 train_time:589631ms step_avg:80.98ms +[2025-07-08 04:35:51] [Rank 0] step:7281/10000 train_time:589631ms step_avg:80.98ms +[2025-07-08 04:35:53] [Rank 0] step:7301/10000 train_time:591771ms step_avg:81.05ms +[2025-07-08 04:35:53] [Rank 0] step:7301/10000 train_time:591771ms step_avg:81.05ms +[2025-07-08 04:35:55] [Rank 0] step:7321/10000 train_time:593271ms step_avg:81.04ms +[2025-07-08 04:35:55] [Rank 0] step:7321/10000 train_time:593271ms step_avg:81.04ms +[2025-07-08 04:35:56] [Rank 0] step:7341/10000 train_time:594769ms step_avg:81.02ms +[2025-07-08 04:35:56] [Rank 0] step:7341/10000 train_time:594769ms step_avg:81.02ms +[2025-07-08 04:35:58] [Rank 0] step:7361/10000 train_time:596268ms step_avg:81.00ms +[2025-07-08 04:35:58] [Rank 0] step:7361/10000 train_time:596268ms step_avg:81.00ms +[2025-07-08 04:36:00] [Rank 0] step:7381/10000 train_time:597770ms step_avg:80.99ms +[2025-07-08 04:36:00] [Rank 0] step:7381/10000 train_time:597770ms step_avg:80.99ms +[2025-07-08 04:36:01] [Rank 0] step:7401/10000 train_time:599506ms step_avg:81.00ms +[2025-07-08 04:36:01] [Rank 0] step:7401/10000 train_time:599506ms step_avg:81.00ms +[2025-07-08 04:36:03] [Rank 0] step:7421/10000 train_time:601006ms step_avg:80.99ms +[2025-07-08 04:36:03] [Rank 0] step:7421/10000 train_time:601006ms step_avg:80.99ms +[2025-07-08 04:36:04] [Rank 0] step:7441/10000 train_time:602507ms step_avg:80.97ms +[2025-07-08 04:36:04] [Rank 0] step:7441/10000 train_time:602507ms step_avg:80.97ms +[2025-07-08 04:36:06] [Rank 0] step:7461/10000 train_time:604008ms step_avg:80.96ms +[2025-07-08 04:36:06] [Rank 0] step:7461/10000 train_time:604008ms step_avg:80.96ms +[2025-07-08 04:36:08] [Rank 0] step:7481/10000 train_time:606177ms step_avg:81.03ms +[2025-07-08 04:36:08] [Rank 0] step:7481/10000 train_time:606177ms step_avg:81.03ms +[2025-07-08 04:36:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:36:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:36:10] [Rank 0] PRINT: step:7500/10000 train_loss:1.2180 val_loss:1.2063 train_time:607674ms step_avg:81.02ms +[2025-07-08 04:36:10] [Rank 0] PRINT: step:7500/10000 train_loss:1.2180 val_loss:1.2063 train_time:607674ms step_avg:81.02ms +[2025-07-08 04:36:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:36:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:36:10] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:36:10] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:36:10] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:36:10] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:41:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:41:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:41:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:41:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:41:38] [Rank 0] Total Loss: 4.8230 +[2025-07-08 04:41:38] [Rank 0] Total Loss: 4.8230 +[2025-07-08 04:41:38] [Rank 0] Total FTA: 0.4039 +[2025-07-08 04:41:38] [Rank 0] Total FTA: 0.4039 +[2025-07-08 04:41:38] [Rank 0] Group 0 Loss: 5.0027 +[2025-07-08 04:41:38] [Rank 0] Group 0 Loss: 5.0027 +[2025-07-08 04:41:38] [Rank 0] Group 1 Loss: 5.1372 +[2025-07-08 04:41:38] [Rank 0] Group 1 Loss: 5.1372 +[2025-07-08 04:41:38] [Rank 0] Group 2 Loss: 4.4552 +[2025-07-08 04:41:38] [Rank 0] Group 2 Loss: 4.4552 +[2025-07-08 04:41:38] [Rank 0] Group 3 Loss: 4.9242 +[2025-07-08 04:41:38] [Rank 0] Group 3 Loss: 4.9242 +[2025-07-08 04:41:38] [Rank 0] Group 4 Loss: 4.8304 +[2025-07-08 04:41:38] [Rank 0] Group 4 Loss: 4.8304 +[2025-07-08 04:41:38] [Rank 0] Group 5 Loss: 4.7054 +[2025-07-08 04:41:38] [Rank 0] Group 5 Loss: 4.7054 +[2025-07-08 04:41:38] [Rank 0] Group 6 Loss: 4.6842 +[2025-07-08 04:41:38] [Rank 0] Group 6 Loss: 4.6842 +[2025-07-08 04:41:38] [Rank 0] Group 7 Loss: 4.7477 +[2025-07-08 04:41:38] [Rank 0] Group 7 Loss: 4.7477 +[2025-07-08 04:41:38] [Rank 0] Group 8 Loss: 4.8376 +[2025-07-08 04:41:38] [Rank 0] Group 8 Loss: 4.8376 +[2025-07-08 04:41:38] [Rank 0] Group 9 Loss: 4.7567 +[2025-07-08 04:41:38] [Rank 0] Group 9 Loss: 4.7567 +[2025-07-08 04:41:38] [Rank 0] Group 10 Loss: 4.8467 +[2025-07-08 04:41:38] [Rank 0] Group 10 Loss: 4.8467 +[2025-07-08 04:41:38] [Rank 0] Group 11 Loss: 4.7911 +[2025-07-08 04:41:38] [Rank 0] Group 11 Loss: 4.7911 +[2025-07-08 04:41:38] [Rank 0] Group 0 FTA: 0.6879 +[2025-07-08 04:41:38] [Rank 0] Group 0 FTA: 0.6879 +[2025-07-08 04:41:38] [Rank 0] Group 1 FTA: 0.3229 +[2025-07-08 04:41:38] [Rank 0] Group 1 FTA: 0.3229 +[2025-07-08 04:41:38] [Rank 0] Group 2 FTA: 0.3281 +[2025-07-08 04:41:38] [Rank 0] Group 2 FTA: 0.3281 +[2025-07-08 04:41:38] [Rank 0] Group 3 FTA: 0.3203 +[2025-07-08 04:41:38] [Rank 0] Group 3 FTA: 0.3203 +[2025-07-08 04:41:38] [Rank 0] Group 4 FTA: 0.3099 +[2025-07-08 04:41:38] [Rank 0] Group 4 FTA: 0.3099 +[2025-07-08 04:41:38] [Rank 0] Group 5 FTA: 0.3932 +[2025-07-08 04:41:38] [Rank 0] Group 5 FTA: 0.3932 +[2025-07-08 04:41:38] [Rank 0] Group 6 FTA: 0.3411 +[2025-07-08 04:41:38] [Rank 0] Group 6 FTA: 0.3411 +[2025-07-08 04:41:38] [Rank 0] Group 7 FTA: 0.3984 +[2025-07-08 04:41:38] [Rank 0] Group 7 FTA: 0.3984 +[2025-07-08 04:41:38] [Rank 0] Group 8 FTA: 0.3828 +[2025-07-08 04:41:38] [Rank 0] Group 8 FTA: 0.3828 +[2025-07-08 04:41:38] [Rank 0] Group 9 FTA: 0.3555 +[2025-07-08 04:41:38] [Rank 0] Group 9 FTA: 0.3555 +[2025-07-08 04:41:38] [Rank 0] Group 10 FTA: 0.4062 +[2025-07-08 04:41:38] [Rank 0] Group 10 FTA: 0.4062 +[2025-07-08 04:41:38] [Rank 0] Group 11 FTA: 0.3643 +[2025-07-08 04:41:38] [Rank 0] Group 11 FTA: 0.3643 +[2025-07-08 04:41:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 04:41:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 04:41:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 04:41:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 04:41:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 04:41:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 04:41:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 04:41:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 04:41:39] [Rank 0] step:7501/10000 train_time:607694ms step_avg:81.02ms +[2025-07-08 04:41:39] [Rank 0] step:7501/10000 train_time:607694ms step_avg:81.02ms +[2025-07-08 04:41:41] [Rank 0] step:7521/10000 train_time:609199ms step_avg:81.00ms +[2025-07-08 04:41:41] [Rank 0] step:7521/10000 train_time:609199ms step_avg:81.00ms +[2025-07-08 04:41:42] [Rank 0] step:7541/10000 train_time:610698ms step_avg:80.98ms +[2025-07-08 04:41:42] [Rank 0] step:7541/10000 train_time:610698ms step_avg:80.98ms +[2025-07-08 04:41:44] [Rank 0] step:7561/10000 train_time:612875ms step_avg:81.06ms +[2025-07-08 04:41:44] [Rank 0] step:7561/10000 train_time:612875ms step_avg:81.06ms +[2025-07-08 04:41:46] [Rank 0] step:7581/10000 train_time:614349ms step_avg:81.04ms +[2025-07-08 04:41:46] [Rank 0] step:7581/10000 train_time:614349ms step_avg:81.04ms +[2025-07-08 04:41:47] [Rank 0] step:7601/10000 train_time:615843ms step_avg:81.02ms +[2025-07-08 04:41:47] [Rank 0] step:7601/10000 train_time:615843ms step_avg:81.02ms +[2025-07-08 04:41:49] [Rank 0] step:7621/10000 train_time:617339ms step_avg:81.00ms +[2025-07-08 04:41:49] [Rank 0] step:7621/10000 train_time:617339ms step_avg:81.00ms +[2025-07-08 04:41:50] [Rank 0] step:7641/10000 train_time:618834ms step_avg:80.99ms +[2025-07-08 04:41:50] [Rank 0] step:7641/10000 train_time:618834ms step_avg:80.99ms +[2025-07-08 04:41:52] [Rank 0] step:7661/10000 train_time:620995ms step_avg:81.06ms +[2025-07-08 04:41:52] [Rank 0] step:7661/10000 train_time:620995ms step_avg:81.06ms +[2025-07-08 04:41:54] [Rank 0] step:7681/10000 train_time:622490ms step_avg:81.04ms +[2025-07-08 04:41:54] [Rank 0] step:7681/10000 train_time:622490ms step_avg:81.04ms +[2025-07-08 04:41:55] [Rank 0] step:7701/10000 train_time:623989ms step_avg:81.03ms +[2025-07-08 04:41:55] [Rank 0] step:7701/10000 train_time:623989ms step_avg:81.03ms +[2025-07-08 04:41:57] [Rank 0] step:7721/10000 train_time:625490ms step_avg:81.01ms +[2025-07-08 04:41:57] [Rank 0] step:7721/10000 train_time:625490ms step_avg:81.01ms +[2025-07-08 04:41:59] [Rank 0] step:7741/10000 train_time:626991ms step_avg:81.00ms +[2025-07-08 04:41:59] [Rank 0] step:7741/10000 train_time:626991ms step_avg:81.00ms +[2025-07-08 04:42:00] [Rank 0] step:7761/10000 train_time:628927ms step_avg:81.04ms +[2025-07-08 04:42:00] [Rank 0] step:7761/10000 train_time:628927ms step_avg:81.04ms +[2025-07-08 04:42:02] [Rank 0] step:7781/10000 train_time:630425ms step_avg:81.02ms +[2025-07-08 04:42:02] [Rank 0] step:7781/10000 train_time:630425ms step_avg:81.02ms +[2025-07-08 04:42:03] [Rank 0] step:7801/10000 train_time:631925ms step_avg:81.01ms +[2025-07-08 04:42:03] [Rank 0] step:7801/10000 train_time:631925ms step_avg:81.01ms +[2025-07-08 04:42:05] [Rank 0] step:7821/10000 train_time:633426ms step_avg:80.99ms +[2025-07-08 04:42:05] [Rank 0] step:7821/10000 train_time:633426ms step_avg:80.99ms +[2025-07-08 04:42:07] [Rank 0] step:7841/10000 train_time:635161ms step_avg:81.01ms +[2025-07-08 04:42:07] [Rank 0] step:7841/10000 train_time:635161ms step_avg:81.01ms +[2025-07-08 04:42:08] [Rank 0] step:7861/10000 train_time:636662ms step_avg:80.99ms +[2025-07-08 04:42:08] [Rank 0] step:7861/10000 train_time:636662ms step_avg:80.99ms +[2025-07-08 04:42:10] [Rank 0] step:7881/10000 train_time:638161ms step_avg:80.97ms +[2025-07-08 04:42:10] [Rank 0] step:7881/10000 train_time:638161ms step_avg:80.97ms +[2025-07-08 04:42:11] [Rank 0] step:7901/10000 train_time:639664ms step_avg:80.96ms +[2025-07-08 04:42:11] [Rank 0] step:7901/10000 train_time:639664ms step_avg:80.96ms +[2025-07-08 04:42:13] [Rank 0] step:7921/10000 train_time:641424ms step_avg:80.98ms +[2025-07-08 04:42:13] [Rank 0] step:7921/10000 train_time:641424ms step_avg:80.98ms +[2025-07-08 04:42:14] [Rank 0] step:7941/10000 train_time:642905ms step_avg:80.96ms +[2025-07-08 04:42:14] [Rank 0] step:7941/10000 train_time:642905ms step_avg:80.96ms +[2025-07-08 04:42:16] [Rank 0] step:7961/10000 train_time:644408ms step_avg:80.95ms +[2025-07-08 04:42:16] [Rank 0] step:7961/10000 train_time:644408ms step_avg:80.95ms +[2025-07-08 04:42:17] [Rank 0] step:7981/10000 train_time:645909ms step_avg:80.93ms +[2025-07-08 04:42:17] [Rank 0] step:7981/10000 train_time:645909ms step_avg:80.93ms +[2025-07-08 04:42:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:42:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:42:20] [Rank 0] PRINT: step:8000/10000 train_loss:1.1980 val_loss:1.1898 train_time:647414ms step_avg:80.93ms +[2025-07-08 04:42:20] [Rank 0] PRINT: step:8000/10000 train_loss:1.1980 val_loss:1.1898 train_time:647414ms step_avg:80.93ms +[2025-07-08 04:42:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:42:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:42:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:42:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:42:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:42:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:47:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:47:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:47:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:47:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:47:48] [Rank 0] Total Loss: 4.8641 +[2025-07-08 04:47:48] [Rank 0] Total Loss: 4.8641 +[2025-07-08 04:47:48] [Rank 0] Total FTA: 0.4136 +[2025-07-08 04:47:48] [Rank 0] Total FTA: 0.4136 +[2025-07-08 04:47:48] [Rank 0] Group 0 Loss: 5.0033 +[2025-07-08 04:47:48] [Rank 0] Group 0 Loss: 5.0033 +[2025-07-08 04:47:48] [Rank 0] Group 1 Loss: 5.0918 +[2025-07-08 04:47:48] [Rank 0] Group 1 Loss: 5.0918 +[2025-07-08 04:47:48] [Rank 0] Group 2 Loss: 4.5221 +[2025-07-08 04:47:48] [Rank 0] Group 2 Loss: 4.5221 +[2025-07-08 04:47:48] [Rank 0] Group 3 Loss: 5.0659 +[2025-07-08 04:47:48] [Rank 0] Group 3 Loss: 5.0659 +[2025-07-08 04:47:48] [Rank 0] Group 4 Loss: 4.8583 +[2025-07-08 04:47:48] [Rank 0] Group 4 Loss: 4.8583 +[2025-07-08 04:47:48] [Rank 0] Group 5 Loss: 4.7221 +[2025-07-08 04:47:48] [Rank 0] Group 5 Loss: 4.7221 +[2025-07-08 04:47:48] [Rank 0] Group 6 Loss: 4.7841 +[2025-07-08 04:47:48] [Rank 0] Group 6 Loss: 4.7841 +[2025-07-08 04:47:48] [Rank 0] Group 7 Loss: 4.8232 +[2025-07-08 04:47:48] [Rank 0] Group 7 Loss: 4.8232 +[2025-07-08 04:47:48] [Rank 0] Group 8 Loss: 4.8355 +[2025-07-08 04:47:48] [Rank 0] Group 8 Loss: 4.8355 +[2025-07-08 04:47:48] [Rank 0] Group 9 Loss: 4.8336 +[2025-07-08 04:47:48] [Rank 0] Group 9 Loss: 4.8336 +[2025-07-08 04:47:48] [Rank 0] Group 10 Loss: 4.8605 +[2025-07-08 04:47:48] [Rank 0] Group 10 Loss: 4.8605 +[2025-07-08 04:47:48] [Rank 0] Group 11 Loss: 4.8477 +[2025-07-08 04:47:48] [Rank 0] Group 11 Loss: 4.8477 +[2025-07-08 04:47:48] [Rank 0] Group 0 FTA: 0.6853 +[2025-07-08 04:47:48] [Rank 0] Group 0 FTA: 0.6853 +[2025-07-08 04:47:48] [Rank 0] Group 1 FTA: 0.3359 +[2025-07-08 04:47:48] [Rank 0] Group 1 FTA: 0.3359 +[2025-07-08 04:47:48] [Rank 0] Group 2 FTA: 0.4844 +[2025-07-08 04:47:48] [Rank 0] Group 2 FTA: 0.4844 +[2025-07-08 04:47:48] [Rank 0] Group 3 FTA: 0.2292 +[2025-07-08 04:47:48] [Rank 0] Group 3 FTA: 0.2292 +[2025-07-08 04:47:48] [Rank 0] Group 4 FTA: 0.3021 +[2025-07-08 04:47:48] [Rank 0] Group 4 FTA: 0.3021 +[2025-07-08 04:47:48] [Rank 0] Group 5 FTA: 0.3880 +[2025-07-08 04:47:48] [Rank 0] Group 5 FTA: 0.3880 +[2025-07-08 04:47:48] [Rank 0] Group 6 FTA: 0.3516 +[2025-07-08 04:47:48] [Rank 0] Group 6 FTA: 0.3516 +[2025-07-08 04:47:48] [Rank 0] Group 7 FTA: 0.4349 +[2025-07-08 04:47:48] [Rank 0] Group 7 FTA: 0.4349 +[2025-07-08 04:47:48] [Rank 0] Group 8 FTA: 0.4115 +[2025-07-08 04:47:48] [Rank 0] Group 8 FTA: 0.4115 +[2025-07-08 04:47:48] [Rank 0] Group 9 FTA: 0.3477 +[2025-07-08 04:47:48] [Rank 0] Group 9 FTA: 0.3477 +[2025-07-08 04:47:48] [Rank 0] Group 10 FTA: 0.3809 +[2025-07-08 04:47:48] [Rank 0] Group 10 FTA: 0.3809 +[2025-07-08 04:47:48] [Rank 0] Group 11 FTA: 0.3818 +[2025-07-08 04:47:48] [Rank 0] Group 11 FTA: 0.3818 +[2025-07-08 04:47:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 04:47:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 04:47:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 04:47:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 04:47:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 04:47:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 04:47:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 04:47:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 04:47:50] [Rank 0] step:8001/10000 train_time:647436ms step_avg:80.92ms +[2025-07-08 04:47:50] [Rank 0] step:8001/10000 train_time:647436ms step_avg:80.92ms +[2025-07-08 04:47:52] [Rank 0] step:8021/10000 train_time:649601ms step_avg:80.99ms +[2025-07-08 04:47:52] [Rank 0] step:8021/10000 train_time:649601ms step_avg:80.99ms +[2025-07-08 04:47:54] [Rank 0] step:8041/10000 train_time:651093ms step_avg:80.97ms +[2025-07-08 04:47:54] [Rank 0] step:8041/10000 train_time:651093ms step_avg:80.97ms +[2025-07-08 04:47:55] [Rank 0] step:8061/10000 train_time:652585ms step_avg:80.96ms +[2025-07-08 04:47:55] [Rank 0] step:8061/10000 train_time:652585ms step_avg:80.96ms +[2025-07-08 04:47:56] [Rank 0] step:8081/10000 train_time:654080ms step_avg:80.94ms +[2025-07-08 04:47:56] [Rank 0] step:8081/10000 train_time:654080ms step_avg:80.94ms +[2025-07-08 04:47:59] [Rank 0] step:8101/10000 train_time:655576ms step_avg:80.93ms +[2025-07-08 04:47:59] [Rank 0] step:8101/10000 train_time:655576ms step_avg:80.93ms +[2025-07-08 04:48:00] [Rank 0] step:8121/10000 train_time:657714ms step_avg:80.99ms +[2025-07-08 04:48:00] [Rank 0] step:8121/10000 train_time:657714ms step_avg:80.99ms +[2025-07-08 04:48:02] [Rank 0] step:8141/10000 train_time:659209ms step_avg:80.97ms +[2025-07-08 04:48:02] [Rank 0] step:8141/10000 train_time:659209ms step_avg:80.97ms +[2025-07-08 04:48:03] [Rank 0] step:8161/10000 train_time:660708ms step_avg:80.96ms +[2025-07-08 04:48:03] [Rank 0] step:8161/10000 train_time:660708ms step_avg:80.96ms +[2025-07-08 04:48:05] [Rank 0] step:8181/10000 train_time:662206ms step_avg:80.94ms +[2025-07-08 04:48:05] [Rank 0] step:8181/10000 train_time:662206ms step_avg:80.94ms +[2025-07-08 04:48:07] [Rank 0] step:8201/10000 train_time:664353ms step_avg:81.01ms +[2025-07-08 04:48:07] [Rank 0] step:8201/10000 train_time:664353ms step_avg:81.01ms +[2025-07-08 04:48:08] [Rank 0] step:8221/10000 train_time:665851ms step_avg:80.99ms +[2025-07-08 04:48:08] [Rank 0] step:8221/10000 train_time:665851ms step_avg:80.99ms +[2025-07-08 04:48:10] [Rank 0] step:8241/10000 train_time:667349ms step_avg:80.98ms +[2025-07-08 04:48:10] [Rank 0] step:8241/10000 train_time:667349ms step_avg:80.98ms +[2025-07-08 04:48:11] [Rank 0] step:8261/10000 train_time:668848ms step_avg:80.96ms +[2025-07-08 04:48:11] [Rank 0] step:8261/10000 train_time:668848ms step_avg:80.96ms +[2025-07-08 04:48:13] [Rank 0] step:8281/10000 train_time:670602ms step_avg:80.98ms +[2025-07-08 04:48:13] [Rank 0] step:8281/10000 train_time:670602ms step_avg:80.98ms +[2025-07-08 04:48:15] [Rank 0] step:8301/10000 train_time:672496ms step_avg:81.01ms +[2025-07-08 04:48:15] [Rank 0] step:8301/10000 train_time:672496ms step_avg:81.01ms +[2025-07-08 04:48:16] [Rank 0] step:8321/10000 train_time:673996ms step_avg:81.00ms +[2025-07-08 04:48:16] [Rank 0] step:8321/10000 train_time:673996ms step_avg:81.00ms +[2025-07-08 04:48:18] [Rank 0] step:8341/10000 train_time:675498ms step_avg:80.99ms +[2025-07-08 04:48:18] [Rank 0] step:8341/10000 train_time:675498ms step_avg:80.99ms +[2025-07-08 04:48:20] [Rank 0] step:8361/10000 train_time:677255ms step_avg:81.00ms +[2025-07-08 04:48:20] [Rank 0] step:8361/10000 train_time:677255ms step_avg:81.00ms +[2025-07-08 04:48:22] [Rank 0] step:8381/10000 train_time:679395ms step_avg:81.06ms +[2025-07-08 04:48:22] [Rank 0] step:8381/10000 train_time:679395ms step_avg:81.06ms +[2025-07-08 04:48:23] [Rank 0] step:8401/10000 train_time:680892ms step_avg:81.05ms +[2025-07-08 04:48:23] [Rank 0] step:8401/10000 train_time:680892ms step_avg:81.05ms +[2025-07-08 04:48:25] [Rank 0] step:8421/10000 train_time:682394ms step_avg:81.03ms +[2025-07-08 04:48:25] [Rank 0] step:8421/10000 train_time:682394ms step_avg:81.03ms +[2025-07-08 04:48:26] [Rank 0] step:8441/10000 train_time:683895ms step_avg:81.02ms +[2025-07-08 04:48:26] [Rank 0] step:8441/10000 train_time:683895ms step_avg:81.02ms +[2025-07-08 04:48:28] [Rank 0] step:8461/10000 train_time:685397ms step_avg:81.01ms +[2025-07-08 04:48:28] [Rank 0] step:8461/10000 train_time:685397ms step_avg:81.01ms +[2025-07-08 04:48:29] [Rank 0] step:8481/10000 train_time:687033ms step_avg:81.01ms +[2025-07-08 04:48:29] [Rank 0] step:8481/10000 train_time:687033ms step_avg:81.01ms +[2025-07-08 04:48:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:48:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:48:32] [Rank 0] PRINT: step:8500/10000 train_loss:1.1826 val_loss:1.1761 train_time:688533ms step_avg:81.00ms +[2025-07-08 04:48:32] [Rank 0] PRINT: step:8500/10000 train_loss:1.1826 val_loss:1.1761 train_time:688533ms step_avg:81.00ms +[2025-07-08 04:48:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:48:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:48:34] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:48:34] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:48:34] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:48:34] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:54:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:54:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:54:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:54:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:54:02] [Rank 0] Total Loss: 4.8721 +[2025-07-08 04:54:02] [Rank 0] Total Loss: 4.8721 +[2025-07-08 04:54:02] [Rank 0] Total FTA: 0.3925 +[2025-07-08 04:54:02] [Rank 0] Total FTA: 0.3925 +[2025-07-08 04:54:02] [Rank 0] Group 0 Loss: 5.1124 +[2025-07-08 04:54:02] [Rank 0] Group 0 Loss: 5.1124 +[2025-07-08 04:54:02] [Rank 0] Group 1 Loss: 5.1449 +[2025-07-08 04:54:02] [Rank 0] Group 1 Loss: 5.1449 +[2025-07-08 04:54:02] [Rank 0] Group 2 Loss: 4.4592 +[2025-07-08 04:54:02] [Rank 0] Group 2 Loss: 4.4592 +[2025-07-08 04:54:02] [Rank 0] Group 3 Loss: 5.0723 +[2025-07-08 04:54:02] [Rank 0] Group 3 Loss: 5.0723 +[2025-07-08 04:54:02] [Rank 0] Group 4 Loss: 4.8274 +[2025-07-08 04:54:02] [Rank 0] Group 4 Loss: 4.8274 +[2025-07-08 04:54:02] [Rank 0] Group 5 Loss: 4.6721 +[2025-07-08 04:54:02] [Rank 0] Group 5 Loss: 4.6721 +[2025-07-08 04:54:02] [Rank 0] Group 6 Loss: 4.7939 +[2025-07-08 04:54:02] [Rank 0] Group 6 Loss: 4.7939 +[2025-07-08 04:54:02] [Rank 0] Group 7 Loss: 4.7655 +[2025-07-08 04:54:02] [Rank 0] Group 7 Loss: 4.7655 +[2025-07-08 04:54:02] [Rank 0] Group 8 Loss: 4.7936 +[2025-07-08 04:54:02] [Rank 0] Group 8 Loss: 4.7936 +[2025-07-08 04:54:02] [Rank 0] Group 9 Loss: 4.8112 +[2025-07-08 04:54:02] [Rank 0] Group 9 Loss: 4.8112 +[2025-07-08 04:54:02] [Rank 0] Group 10 Loss: 4.8600 +[2025-07-08 04:54:02] [Rank 0] Group 10 Loss: 4.8600 +[2025-07-08 04:54:02] [Rank 0] Group 11 Loss: 4.8806 +[2025-07-08 04:54:02] [Rank 0] Group 11 Loss: 4.8806 +[2025-07-08 04:54:02] [Rank 0] Group 0 FTA: 0.4954 +[2025-07-08 04:54:02] [Rank 0] Group 0 FTA: 0.4954 +[2025-07-08 04:54:02] [Rank 0] Group 1 FTA: 0.3411 +[2025-07-08 04:54:02] [Rank 0] Group 1 FTA: 0.3411 +[2025-07-08 04:54:02] [Rank 0] Group 2 FTA: 0.4609 +[2025-07-08 04:54:02] [Rank 0] Group 2 FTA: 0.4609 +[2025-07-08 04:54:02] [Rank 0] Group 3 FTA: 0.2604 +[2025-07-08 04:54:02] [Rank 0] Group 3 FTA: 0.2604 +[2025-07-08 04:54:02] [Rank 0] Group 4 FTA: 0.2734 +[2025-07-08 04:54:02] [Rank 0] Group 4 FTA: 0.2734 +[2025-07-08 04:54:02] [Rank 0] Group 5 FTA: 0.4609 +[2025-07-08 04:54:02] [Rank 0] Group 5 FTA: 0.4609 +[2025-07-08 04:54:02] [Rank 0] Group 6 FTA: 0.4141 +[2025-07-08 04:54:02] [Rank 0] Group 6 FTA: 0.4141 +[2025-07-08 04:54:02] [Rank 0] Group 7 FTA: 0.3776 +[2025-07-08 04:54:02] [Rank 0] Group 7 FTA: 0.3776 +[2025-07-08 04:54:02] [Rank 0] Group 8 FTA: 0.3672 +[2025-07-08 04:54:02] [Rank 0] Group 8 FTA: 0.3672 +[2025-07-08 04:54:02] [Rank 0] Group 9 FTA: 0.3945 +[2025-07-08 04:54:02] [Rank 0] Group 9 FTA: 0.3945 +[2025-07-08 04:54:02] [Rank 0] Group 10 FTA: 0.3750 +[2025-07-08 04:54:02] [Rank 0] Group 10 FTA: 0.3750 +[2025-07-08 04:54:02] [Rank 0] Group 11 FTA: 0.3926 +[2025-07-08 04:54:02] [Rank 0] Group 11 FTA: 0.3926 +[2025-07-08 04:54:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 04:54:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 04:54:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 04:54:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 04:54:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 04:54:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 04:54:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 04:54:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 04:54:03] [Rank 0] step:8501/10000 train_time:688554ms step_avg:81.00ms +[2025-07-08 04:54:03] [Rank 0] step:8501/10000 train_time:688554ms step_avg:81.00ms +[2025-07-08 04:54:05] [Rank 0] step:8521/10000 train_time:690058ms step_avg:80.98ms +[2025-07-08 04:54:05] [Rank 0] step:8521/10000 train_time:690058ms step_avg:80.98ms +[2025-07-08 04:54:06] [Rank 0] step:8541/10000 train_time:691551ms step_avg:80.97ms +[2025-07-08 04:54:06] [Rank 0] step:8541/10000 train_time:691551ms step_avg:80.97ms +[2025-07-08 04:54:09] [Rank 0] step:8561/10000 train_time:693699ms step_avg:81.03ms +[2025-07-08 04:54:09] [Rank 0] step:8561/10000 train_time:693699ms step_avg:81.03ms +[2025-07-08 04:54:10] [Rank 0] step:8581/10000 train_time:695194ms step_avg:81.02ms +[2025-07-08 04:54:10] [Rank 0] step:8581/10000 train_time:695194ms step_avg:81.02ms +[2025-07-08 04:54:12] [Rank 0] step:8601/10000 train_time:696689ms step_avg:81.00ms +[2025-07-08 04:54:12] [Rank 0] step:8601/10000 train_time:696689ms step_avg:81.00ms +[2025-07-08 04:54:13] [Rank 0] step:8621/10000 train_time:698185ms step_avg:80.99ms +[2025-07-08 04:54:13] [Rank 0] step:8621/10000 train_time:698185ms step_avg:80.99ms +[2025-07-08 04:54:15] [Rank 0] step:8641/10000 train_time:699681ms step_avg:80.97ms +[2025-07-08 04:54:15] [Rank 0] step:8641/10000 train_time:699681ms step_avg:80.97ms +[2025-07-08 04:54:17] [Rank 0] step:8661/10000 train_time:701845ms step_avg:81.04ms +[2025-07-08 04:54:17] [Rank 0] step:8661/10000 train_time:701845ms step_avg:81.04ms +[2025-07-08 04:54:18] [Rank 0] step:8681/10000 train_time:703343ms step_avg:81.02ms +[2025-07-08 04:54:18] [Rank 0] step:8681/10000 train_time:703343ms step_avg:81.02ms +[2025-07-08 04:54:20] [Rank 0] step:8701/10000 train_time:704843ms step_avg:81.01ms +[2025-07-08 04:54:20] [Rank 0] step:8701/10000 train_time:704843ms step_avg:81.01ms +[2025-07-08 04:54:21] [Rank 0] step:8721/10000 train_time:706343ms step_avg:80.99ms +[2025-07-08 04:54:21] [Rank 0] step:8721/10000 train_time:706343ms step_avg:80.99ms +[2025-07-08 04:54:23] [Rank 0] step:8741/10000 train_time:708485ms step_avg:81.05ms +[2025-07-08 04:54:23] [Rank 0] step:8741/10000 train_time:708485ms step_avg:81.05ms +[2025-07-08 04:54:25] [Rank 0] step:8761/10000 train_time:709984ms step_avg:81.04ms +[2025-07-08 04:54:25] [Rank 0] step:8761/10000 train_time:709984ms step_avg:81.04ms +[2025-07-08 04:54:26] [Rank 0] step:8781/10000 train_time:711485ms step_avg:81.03ms +[2025-07-08 04:54:26] [Rank 0] step:8781/10000 train_time:711485ms step_avg:81.03ms +[2025-07-08 04:54:28] [Rank 0] step:8801/10000 train_time:712987ms step_avg:81.01ms +[2025-07-08 04:54:28] [Rank 0] step:8801/10000 train_time:712987ms step_avg:81.01ms +[2025-07-08 04:54:30] [Rank 0] step:8821/10000 train_time:715151ms step_avg:81.07ms +[2025-07-08 04:54:30] [Rank 0] step:8821/10000 train_time:715151ms step_avg:81.07ms +[2025-07-08 04:54:31] [Rank 0] step:8841/10000 train_time:716631ms step_avg:81.06ms +[2025-07-08 04:54:31] [Rank 0] step:8841/10000 train_time:716631ms step_avg:81.06ms +[2025-07-08 04:54:33] [Rank 0] step:8861/10000 train_time:718131ms step_avg:81.04ms +[2025-07-08 04:54:33] [Rank 0] step:8861/10000 train_time:718131ms step_avg:81.04ms +[2025-07-08 04:54:34] [Rank 0] step:8881/10000 train_time:719633ms step_avg:81.03ms +[2025-07-08 04:54:34] [Rank 0] step:8881/10000 train_time:719633ms step_avg:81.03ms +[2025-07-08 04:54:36] [Rank 0] step:8901/10000 train_time:721133ms step_avg:81.02ms +[2025-07-08 04:54:36] [Rank 0] step:8901/10000 train_time:721133ms step_avg:81.02ms +[2025-07-08 04:54:38] [Rank 0] step:8921/10000 train_time:723404ms step_avg:81.09ms +[2025-07-08 04:54:38] [Rank 0] step:8921/10000 train_time:723404ms step_avg:81.09ms +[2025-07-08 04:54:40] [Rank 0] step:8941/10000 train_time:724905ms step_avg:81.08ms +[2025-07-08 04:54:40] [Rank 0] step:8941/10000 train_time:724905ms step_avg:81.08ms +[2025-07-08 04:54:41] [Rank 0] step:8961/10000 train_time:726407ms step_avg:81.06ms +[2025-07-08 04:54:41] [Rank 0] step:8961/10000 train_time:726407ms step_avg:81.06ms +[2025-07-08 04:54:43] [Rank 0] step:8981/10000 train_time:727911ms step_avg:81.05ms +[2025-07-08 04:54:43] [Rank 0] step:8981/10000 train_time:727911ms step_avg:81.05ms +[2025-07-08 04:54:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:54:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:54:45] [Rank 0] PRINT: step:9000/10000 train_loss:1.1696 val_loss:1.1642 train_time:729415ms step_avg:81.05ms +[2025-07-08 04:54:45] [Rank 0] PRINT: step:9000/10000 train_loss:1.1696 val_loss:1.1642 train_time:729415ms step_avg:81.05ms +[2025-07-08 04:54:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:54:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:54:45] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:54:45] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:54:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:54:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:00:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:00:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:00:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:00:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:00:15] [Rank 0] Total Loss: 4.9517 +[2025-07-08 05:00:15] [Rank 0] Total Loss: 4.9517 +[2025-07-08 05:00:15] [Rank 0] Total FTA: 0.4301 +[2025-07-08 05:00:15] [Rank 0] Total FTA: 0.4301 +[2025-07-08 05:00:15] [Rank 0] Group 0 Loss: 5.0923 +[2025-07-08 05:00:15] [Rank 0] Group 0 Loss: 5.0923 +[2025-07-08 05:00:15] [Rank 0] Group 1 Loss: 5.2861 +[2025-07-08 05:00:15] [Rank 0] Group 1 Loss: 5.2861 +[2025-07-08 05:00:15] [Rank 0] Group 2 Loss: 4.5051 +[2025-07-08 05:00:15] [Rank 0] Group 2 Loss: 4.5051 +[2025-07-08 05:00:15] [Rank 0] Group 3 Loss: 4.9711 +[2025-07-08 05:00:15] [Rank 0] Group 3 Loss: 4.9711 +[2025-07-08 05:00:15] [Rank 0] Group 4 Loss: 4.9879 +[2025-07-08 05:00:15] [Rank 0] Group 4 Loss: 4.9879 +[2025-07-08 05:00:15] [Rank 0] Group 5 Loss: 4.8492 +[2025-07-08 05:00:15] [Rank 0] Group 5 Loss: 4.8492 +[2025-07-08 05:00:15] [Rank 0] Group 6 Loss: 4.8693 +[2025-07-08 05:00:15] [Rank 0] Group 6 Loss: 4.8693 +[2025-07-08 05:00:15] [Rank 0] Group 7 Loss: 4.9038 +[2025-07-08 05:00:15] [Rank 0] Group 7 Loss: 4.9038 +[2025-07-08 05:00:15] [Rank 0] Group 8 Loss: 4.9919 +[2025-07-08 05:00:15] [Rank 0] Group 8 Loss: 4.9919 +[2025-07-08 05:00:15] [Rank 0] Group 9 Loss: 4.9698 +[2025-07-08 05:00:15] [Rank 0] Group 9 Loss: 4.9698 +[2025-07-08 05:00:15] [Rank 0] Group 10 Loss: 4.9691 +[2025-07-08 05:00:15] [Rank 0] Group 10 Loss: 4.9691 +[2025-07-08 05:00:15] [Rank 0] Group 11 Loss: 4.9264 +[2025-07-08 05:00:15] [Rank 0] Group 11 Loss: 4.9264 +[2025-07-08 05:00:15] [Rank 0] Group 0 FTA: 0.6580 +[2025-07-08 05:00:15] [Rank 0] Group 0 FTA: 0.6580 +[2025-07-08 05:00:15] [Rank 0] Group 1 FTA: 0.3750 +[2025-07-08 05:00:15] [Rank 0] Group 1 FTA: 0.3750 +[2025-07-08 05:00:15] [Rank 0] Group 2 FTA: 0.4427 +[2025-07-08 05:00:15] [Rank 0] Group 2 FTA: 0.4427 +[2025-07-08 05:00:15] [Rank 0] Group 3 FTA: 0.2240 +[2025-07-08 05:00:15] [Rank 0] Group 3 FTA: 0.2240 +[2025-07-08 05:00:15] [Rank 0] Group 4 FTA: 0.2891 +[2025-07-08 05:00:15] [Rank 0] Group 4 FTA: 0.2891 +[2025-07-08 05:00:15] [Rank 0] Group 5 FTA: 0.4922 +[2025-07-08 05:00:15] [Rank 0] Group 5 FTA: 0.4922 +[2025-07-08 05:00:15] [Rank 0] Group 6 FTA: 0.4557 +[2025-07-08 05:00:15] [Rank 0] Group 6 FTA: 0.4557 +[2025-07-08 05:00:15] [Rank 0] Group 7 FTA: 0.4427 +[2025-07-08 05:00:15] [Rank 0] Group 7 FTA: 0.4427 +[2025-07-08 05:00:15] [Rank 0] Group 8 FTA: 0.3984 +[2025-07-08 05:00:15] [Rank 0] Group 8 FTA: 0.3984 +[2025-07-08 05:00:15] [Rank 0] Group 9 FTA: 0.3711 +[2025-07-08 05:00:15] [Rank 0] Group 9 FTA: 0.3711 +[2025-07-08 05:00:15] [Rank 0] Group 10 FTA: 0.4062 +[2025-07-08 05:00:15] [Rank 0] Group 10 FTA: 0.4062 +[2025-07-08 05:00:15] [Rank 0] Group 11 FTA: 0.4062 +[2025-07-08 05:00:15] [Rank 0] Group 11 FTA: 0.4062 +[2025-07-08 05:00:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 05:00:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 05:00:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 05:00:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 05:00:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 05:00:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 05:00:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 05:00:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 05:00:17] [Rank 0] step:9001/10000 train_time:730171ms step_avg:81.12ms +[2025-07-08 05:00:17] [Rank 0] step:9001/10000 train_time:730171ms step_avg:81.12ms +[2025-07-08 05:00:19] [Rank 0] step:9021/10000 train_time:731675ms step_avg:81.11ms +[2025-07-08 05:00:19] [Rank 0] step:9021/10000 train_time:731675ms step_avg:81.11ms +[2025-07-08 05:00:20] [Rank 0] step:9041/10000 train_time:733169ms step_avg:81.09ms +[2025-07-08 05:00:20] [Rank 0] step:9041/10000 train_time:733169ms step_avg:81.09ms +[2025-07-08 05:00:22] [Rank 0] step:9061/10000 train_time:734663ms step_avg:81.08ms +[2025-07-08 05:00:22] [Rank 0] step:9061/10000 train_time:734663ms step_avg:81.08ms +[2025-07-08 05:00:23] [Rank 0] step:9081/10000 train_time:736156ms step_avg:81.07ms +[2025-07-08 05:00:23] [Rank 0] step:9081/10000 train_time:736156ms step_avg:81.07ms +[2025-07-08 05:00:26] [Rank 0] step:9101/10000 train_time:738319ms step_avg:81.13ms +[2025-07-08 05:00:26] [Rank 0] step:9101/10000 train_time:738319ms step_avg:81.13ms +[2025-07-08 05:00:27] [Rank 0] step:9121/10000 train_time:739813ms step_avg:81.11ms +[2025-07-08 05:00:27] [Rank 0] step:9121/10000 train_time:739813ms step_avg:81.11ms +[2025-07-08 05:00:29] [Rank 0] step:9141/10000 train_time:741308ms step_avg:81.10ms +[2025-07-08 05:00:29] [Rank 0] step:9141/10000 train_time:741308ms step_avg:81.10ms +[2025-07-08 05:00:30] [Rank 0] step:9161/10000 train_time:742805ms step_avg:81.08ms +[2025-07-08 05:00:30] [Rank 0] step:9161/10000 train_time:742805ms step_avg:81.08ms +[2025-07-08 05:00:32] [Rank 0] step:9181/10000 train_time:744303ms step_avg:81.07ms +[2025-07-08 05:00:32] [Rank 0] step:9181/10000 train_time:744303ms step_avg:81.07ms +[2025-07-08 05:00:34] [Rank 0] step:9201/10000 train_time:746467ms step_avg:81.13ms +[2025-07-08 05:00:34] [Rank 0] step:9201/10000 train_time:746467ms step_avg:81.13ms +[2025-07-08 05:00:35] [Rank 0] step:9221/10000 train_time:747965ms step_avg:81.12ms +[2025-07-08 05:00:35] [Rank 0] step:9221/10000 train_time:747965ms step_avg:81.12ms +[2025-07-08 05:00:37] [Rank 0] step:9241/10000 train_time:749464ms step_avg:81.10ms +[2025-07-08 05:00:37] [Rank 0] step:9241/10000 train_time:749464ms step_avg:81.10ms +[2025-07-08 05:00:38] [Rank 0] step:9261/10000 train_time:750963ms step_avg:81.09ms +[2025-07-08 05:00:38] [Rank 0] step:9261/10000 train_time:750963ms step_avg:81.09ms +[2025-07-08 05:00:40] [Rank 0] step:9281/10000 train_time:753103ms step_avg:81.14ms +[2025-07-08 05:00:40] [Rank 0] step:9281/10000 train_time:753103ms step_avg:81.14ms +[2025-07-08 05:00:42] [Rank 0] step:9301/10000 train_time:754600ms step_avg:81.13ms +[2025-07-08 05:00:42] [Rank 0] step:9301/10000 train_time:754600ms step_avg:81.13ms +[2025-07-08 05:00:43] [Rank 0] step:9321/10000 train_time:756101ms step_avg:81.12ms +[2025-07-08 05:00:43] [Rank 0] step:9321/10000 train_time:756101ms step_avg:81.12ms +[2025-07-08 05:00:45] [Rank 0] step:9341/10000 train_time:757605ms step_avg:81.11ms +[2025-07-08 05:00:45] [Rank 0] step:9341/10000 train_time:757605ms step_avg:81.11ms +[2025-07-08 05:00:47] [Rank 0] step:9361/10000 train_time:759160ms step_avg:81.10ms +[2025-07-08 05:00:47] [Rank 0] step:9361/10000 train_time:759160ms step_avg:81.10ms +[2025-07-08 05:00:48] [Rank 0] step:9381/10000 train_time:760844ms step_avg:81.10ms +[2025-07-08 05:00:48] [Rank 0] step:9381/10000 train_time:760844ms step_avg:81.10ms +[2025-07-08 05:00:50] [Rank 0] step:9401/10000 train_time:762344ms step_avg:81.09ms +[2025-07-08 05:00:50] [Rank 0] step:9401/10000 train_time:762344ms step_avg:81.09ms +[2025-07-08 05:00:51] [Rank 0] step:9421/10000 train_time:763845ms step_avg:81.08ms +[2025-07-08 05:00:51] [Rank 0] step:9421/10000 train_time:763845ms step_avg:81.08ms +[2025-07-08 05:00:53] [Rank 0] step:9441/10000 train_time:765346ms step_avg:81.07ms +[2025-07-08 05:00:53] [Rank 0] step:9441/10000 train_time:765346ms step_avg:81.07ms +[2025-07-08 05:00:54] [Rank 0] step:9461/10000 train_time:767086ms step_avg:81.08ms +[2025-07-08 05:00:54] [Rank 0] step:9461/10000 train_time:767086ms step_avg:81.08ms +[2025-07-08 05:00:56] [Rank 0] step:9481/10000 train_time:768588ms step_avg:81.07ms +[2025-07-08 05:00:56] [Rank 0] step:9481/10000 train_time:768588ms step_avg:81.07ms +[2025-07-08 05:00:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:00:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:00:58] [Rank 0] PRINT: step:9500/10000 train_loss:1.1591 val_loss:1.1560 train_time:770343ms step_avg:81.09ms +[2025-07-08 05:00:58] [Rank 0] PRINT: step:9500/10000 train_loss:1.1591 val_loss:1.1560 train_time:770343ms step_avg:81.09ms +[2025-07-08 05:00:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:00:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:00:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:00:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:00:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:00:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:06:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:06:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:06:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:06:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:06:27] [Rank 0] Total Loss: 4.9462 +[2025-07-08 05:06:27] [Rank 0] Total Loss: 4.9462 +[2025-07-08 05:06:27] [Rank 0] Total FTA: 0.4213 +[2025-07-08 05:06:27] [Rank 0] Total FTA: 0.4213 +[2025-07-08 05:06:27] [Rank 0] Group 0 Loss: 5.1363 +[2025-07-08 05:06:27] [Rank 0] Group 0 Loss: 5.1363 +[2025-07-08 05:06:27] [Rank 0] Group 1 Loss: 5.3533 +[2025-07-08 05:06:27] [Rank 0] Group 1 Loss: 5.3533 +[2025-07-08 05:06:27] [Rank 0] Group 2 Loss: 4.4946 +[2025-07-08 05:06:27] [Rank 0] Group 2 Loss: 4.4946 +[2025-07-08 05:06:27] [Rank 0] Group 3 Loss: 4.9421 +[2025-07-08 05:06:27] [Rank 0] Group 3 Loss: 4.9421 +[2025-07-08 05:06:27] [Rank 0] Group 4 Loss: 4.8889 +[2025-07-08 05:06:27] [Rank 0] Group 4 Loss: 4.8889 +[2025-07-08 05:06:27] [Rank 0] Group 5 Loss: 4.7506 +[2025-07-08 05:06:27] [Rank 0] Group 5 Loss: 4.7506 +[2025-07-08 05:06:27] [Rank 0] Group 6 Loss: 4.8757 +[2025-07-08 05:06:27] [Rank 0] Group 6 Loss: 4.8757 +[2025-07-08 05:06:27] [Rank 0] Group 7 Loss: 4.9340 +[2025-07-08 05:06:27] [Rank 0] Group 7 Loss: 4.9340 +[2025-07-08 05:06:27] [Rank 0] Group 8 Loss: 4.8955 +[2025-07-08 05:06:27] [Rank 0] Group 8 Loss: 4.8955 +[2025-07-08 05:06:27] [Rank 0] Group 9 Loss: 4.9769 +[2025-07-08 05:06:27] [Rank 0] Group 9 Loss: 4.9769 +[2025-07-08 05:06:27] [Rank 0] Group 10 Loss: 4.9353 +[2025-07-08 05:06:27] [Rank 0] Group 10 Loss: 4.9353 +[2025-07-08 05:06:27] [Rank 0] Group 11 Loss: 4.9641 +[2025-07-08 05:06:27] [Rank 0] Group 11 Loss: 4.9641 +[2025-07-08 05:06:27] [Rank 0] Group 0 FTA: 0.6723 +[2025-07-08 05:06:27] [Rank 0] Group 0 FTA: 0.6723 +[2025-07-08 05:06:27] [Rank 0] Group 1 FTA: 0.2917 +[2025-07-08 05:06:27] [Rank 0] Group 1 FTA: 0.2917 +[2025-07-08 05:06:27] [Rank 0] Group 2 FTA: 0.3490 +[2025-07-08 05:06:27] [Rank 0] Group 2 FTA: 0.3490 +[2025-07-08 05:06:27] [Rank 0] Group 3 FTA: 0.1901 +[2025-07-08 05:06:27] [Rank 0] Group 3 FTA: 0.1901 +[2025-07-08 05:06:27] [Rank 0] Group 4 FTA: 0.2734 +[2025-07-08 05:06:27] [Rank 0] Group 4 FTA: 0.2734 +[2025-07-08 05:06:27] [Rank 0] Group 5 FTA: 0.4974 +[2025-07-08 05:06:27] [Rank 0] Group 5 FTA: 0.4974 +[2025-07-08 05:06:27] [Rank 0] Group 6 FTA: 0.3958 +[2025-07-08 05:06:27] [Rank 0] Group 6 FTA: 0.3958 +[2025-07-08 05:06:27] [Rank 0] Group 7 FTA: 0.4688 +[2025-07-08 05:06:27] [Rank 0] Group 7 FTA: 0.4688 +[2025-07-08 05:06:27] [Rank 0] Group 8 FTA: 0.4427 +[2025-07-08 05:06:27] [Rank 0] Group 8 FTA: 0.4427 +[2025-07-08 05:06:27] [Rank 0] Group 9 FTA: 0.4297 +[2025-07-08 05:06:27] [Rank 0] Group 9 FTA: 0.4297 +[2025-07-08 05:06:27] [Rank 0] Group 10 FTA: 0.4160 +[2025-07-08 05:06:27] [Rank 0] Group 10 FTA: 0.4160 +[2025-07-08 05:06:27] [Rank 0] Group 11 FTA: 0.4062 +[2025-07-08 05:06:27] [Rank 0] Group 11 FTA: 0.4062 +[2025-07-08 05:06:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 05:06:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 05:06:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 05:06:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 05:06:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 05:06:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 05:06:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 05:06:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 05:06:29] [Rank 0] step:9501/10000 train_time:770365ms step_avg:81.08ms +[2025-07-08 05:06:29] [Rank 0] step:9501/10000 train_time:770365ms step_avg:81.08ms +[2025-07-08 05:06:30] [Rank 0] step:9521/10000 train_time:771877ms step_avg:81.07ms +[2025-07-08 05:06:30] [Rank 0] step:9521/10000 train_time:771877ms step_avg:81.07ms +[2025-07-08 05:06:33] [Rank 0] step:9541/10000 train_time:774052ms step_avg:81.13ms +[2025-07-08 05:06:33] [Rank 0] step:9541/10000 train_time:774052ms step_avg:81.13ms +[2025-07-08 05:06:34] [Rank 0] step:9561/10000 train_time:775525ms step_avg:81.11ms +[2025-07-08 05:06:34] [Rank 0] step:9561/10000 train_time:775525ms step_avg:81.11ms +[2025-07-08 05:06:36] [Rank 0] step:9581/10000 train_time:777018ms step_avg:81.10ms +[2025-07-08 05:06:36] [Rank 0] step:9581/10000 train_time:777018ms step_avg:81.10ms +[2025-07-08 05:06:37] [Rank 0] step:9601/10000 train_time:778514ms step_avg:81.09ms +[2025-07-08 05:06:37] [Rank 0] step:9601/10000 train_time:778514ms step_avg:81.09ms +[2025-07-08 05:06:39] [Rank 0] step:9621/10000 train_time:780011ms step_avg:81.07ms +[2025-07-08 05:06:39] [Rank 0] step:9621/10000 train_time:780011ms step_avg:81.07ms +[2025-07-08 05:06:40] [Rank 0] step:9641/10000 train_time:781740ms step_avg:81.08ms +[2025-07-08 05:06:40] [Rank 0] step:9641/10000 train_time:781740ms step_avg:81.08ms +[2025-07-08 05:06:42] [Rank 0] step:9661/10000 train_time:783237ms step_avg:81.07ms +[2025-07-08 05:06:42] [Rank 0] step:9661/10000 train_time:783237ms step_avg:81.07ms +[2025-07-08 05:06:43] [Rank 0] step:9681/10000 train_time:784734ms step_avg:81.06ms +[2025-07-08 05:06:43] [Rank 0] step:9681/10000 train_time:784734ms step_avg:81.06ms +[2025-07-08 05:06:45] [Rank 0] step:9701/10000 train_time:786230ms step_avg:81.05ms +[2025-07-08 05:06:45] [Rank 0] step:9701/10000 train_time:786230ms step_avg:81.05ms +[2025-07-08 05:06:47] [Rank 0] step:9721/10000 train_time:788390ms step_avg:81.10ms +[2025-07-08 05:06:47] [Rank 0] step:9721/10000 train_time:788390ms step_avg:81.10ms +[2025-07-08 05:06:48] [Rank 0] step:9741/10000 train_time:789864ms step_avg:81.09ms +[2025-07-08 05:06:48] [Rank 0] step:9741/10000 train_time:789864ms step_avg:81.09ms +[2025-07-08 05:06:50] [Rank 0] step:9761/10000 train_time:791359ms step_avg:81.07ms +[2025-07-08 05:06:50] [Rank 0] step:9761/10000 train_time:791359ms step_avg:81.07ms +[2025-07-08 05:06:51] [Rank 0] step:9781/10000 train_time:792857ms step_avg:81.06ms +[2025-07-08 05:06:51] [Rank 0] step:9781/10000 train_time:792857ms step_avg:81.06ms +[2025-07-08 05:06:53] [Rank 0] step:9801/10000 train_time:794355ms step_avg:81.05ms +[2025-07-08 05:06:53] [Rank 0] step:9801/10000 train_time:794355ms step_avg:81.05ms +[2025-07-08 05:06:55] [Rank 0] step:9821/10000 train_time:796507ms step_avg:81.10ms +[2025-07-08 05:06:55] [Rank 0] step:9821/10000 train_time:796507ms step_avg:81.10ms +[2025-07-08 05:06:57] [Rank 0] step:9841/10000 train_time:798005ms step_avg:81.09ms +[2025-07-08 05:06:57] [Rank 0] step:9841/10000 train_time:798005ms step_avg:81.09ms +[2025-07-08 05:06:58] [Rank 0] step:9861/10000 train_time:799502ms step_avg:81.08ms +[2025-07-08 05:06:58] [Rank 0] step:9861/10000 train_time:799502ms step_avg:81.08ms +[2025-07-08 05:07:00] [Rank 0] step:9881/10000 train_time:801001ms step_avg:81.06ms +[2025-07-08 05:07:00] [Rank 0] step:9881/10000 train_time:801001ms step_avg:81.06ms +[2025-07-08 05:07:02] [Rank 0] step:9901/10000 train_time:802499ms step_avg:81.05ms +[2025-07-08 05:07:02] [Rank 0] step:9901/10000 train_time:802499ms step_avg:81.05ms +[2025-07-08 05:07:03] [Rank 0] step:9921/10000 train_time:804648ms step_avg:81.11ms +[2025-07-08 05:07:03] [Rank 0] step:9921/10000 train_time:804648ms step_avg:81.11ms +[2025-07-08 05:07:05] [Rank 0] step:9941/10000 train_time:806151ms step_avg:81.09ms +[2025-07-08 05:07:05] [Rank 0] step:9941/10000 train_time:806151ms step_avg:81.09ms +[2025-07-08 05:07:06] [Rank 0] step:9961/10000 train_time:807650ms step_avg:81.08ms +[2025-07-08 05:07:06] [Rank 0] step:9961/10000 train_time:807650ms step_avg:81.08ms +[2025-07-08 05:07:08] [Rank 0] step:9981/10000 train_time:809150ms step_avg:81.07ms +[2025-07-08 05:07:08] [Rank 0] step:9981/10000 train_time:809150ms step_avg:81.07ms +[2025-07-08 05:07:10] [Rank 0] step:10000/10000 train_time:811214ms step_avg:81.12ms +[2025-07-08 05:07:10] [Rank 0] step:10000/10000 train_time:811214ms step_avg:81.12ms +[2025-07-08 05:07:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:07:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:07:11] [Rank 0] PRINT: step:10000/10000 train_loss:1.1516 val_loss:1.1502 train_time:811297ms step_avg:81.13ms +[2025-07-08 05:07:11] [Rank 0] PRINT: step:10000/10000 train_loss:1.1516 val_loss:1.1502 train_time:811297ms step_avg:81.13ms +[2025-07-08 05:07:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:07:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:07:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:07:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:07:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:07:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:12:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:12:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:12:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:12:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:12:40] [Rank 0] Total Loss: 4.9923 +[2025-07-08 05:12:40] [Rank 0] Total Loss: 4.9923 +[2025-07-08 05:12:40] [Rank 0] Total FTA: 0.4301 +[2025-07-08 05:12:40] [Rank 0] Total FTA: 0.4301 +[2025-07-08 05:12:40] [Rank 0] Group 0 Loss: 5.1559 +[2025-07-08 05:12:40] [Rank 0] Group 0 Loss: 5.1559 +[2025-07-08 05:12:40] [Rank 0] Group 1 Loss: 5.3859 +[2025-07-08 05:12:40] [Rank 0] Group 1 Loss: 5.3859 +[2025-07-08 05:12:40] [Rank 0] Group 2 Loss: 4.6512 +[2025-07-08 05:12:40] [Rank 0] Group 2 Loss: 4.6512 +[2025-07-08 05:12:40] [Rank 0] Group 3 Loss: 4.9906 +[2025-07-08 05:12:40] [Rank 0] Group 3 Loss: 4.9906 +[2025-07-08 05:12:40] [Rank 0] Group 4 Loss: 5.0210 +[2025-07-08 05:12:40] [Rank 0] Group 4 Loss: 5.0210 +[2025-07-08 05:12:40] [Rank 0] Group 5 Loss: 4.8458 +[2025-07-08 05:12:40] [Rank 0] Group 5 Loss: 4.8458 +[2025-07-08 05:12:40] [Rank 0] Group 6 Loss: 4.8397 +[2025-07-08 05:12:40] [Rank 0] Group 6 Loss: 4.8397 +[2025-07-08 05:12:40] [Rank 0] Group 7 Loss: 4.9417 +[2025-07-08 05:12:40] [Rank 0] Group 7 Loss: 4.9417 +[2025-07-08 05:12:40] [Rank 0] Group 8 Loss: 4.9613 +[2025-07-08 05:12:40] [Rank 0] Group 8 Loss: 4.9613 +[2025-07-08 05:12:40] [Rank 0] Group 9 Loss: 4.9926 +[2025-07-08 05:12:40] [Rank 0] Group 9 Loss: 4.9926 +[2025-07-08 05:12:40] [Rank 0] Group 10 Loss: 4.9521 +[2025-07-08 05:12:40] [Rank 0] Group 10 Loss: 4.9521 +[2025-07-08 05:12:40] [Rank 0] Group 11 Loss: 5.0024 +[2025-07-08 05:12:40] [Rank 0] Group 11 Loss: 5.0024 +[2025-07-08 05:12:40] [Rank 0] Group 0 FTA: 0.6398 +[2025-07-08 05:12:40] [Rank 0] Group 0 FTA: 0.6398 +[2025-07-08 05:12:40] [Rank 0] Group 1 FTA: 0.3281 +[2025-07-08 05:12:40] [Rank 0] Group 1 FTA: 0.3281 +[2025-07-08 05:12:40] [Rank 0] Group 2 FTA: 0.3177 +[2025-07-08 05:12:40] [Rank 0] Group 2 FTA: 0.3177 +[2025-07-08 05:12:40] [Rank 0] Group 3 FTA: 0.2682 +[2025-07-08 05:12:40] [Rank 0] Group 3 FTA: 0.2682 +[2025-07-08 05:12:40] [Rank 0] Group 4 FTA: 0.3568 +[2025-07-08 05:12:40] [Rank 0] Group 4 FTA: 0.3568 +[2025-07-08 05:12:40] [Rank 0] Group 5 FTA: 0.4818 +[2025-07-08 05:12:40] [Rank 0] Group 5 FTA: 0.4818 +[2025-07-08 05:12:40] [Rank 0] Group 6 FTA: 0.3984 +[2025-07-08 05:12:40] [Rank 0] Group 6 FTA: 0.3984 +[2025-07-08 05:12:40] [Rank 0] Group 7 FTA: 0.4297 +[2025-07-08 05:12:40] [Rank 0] Group 7 FTA: 0.4297 +[2025-07-08 05:12:40] [Rank 0] Group 8 FTA: 0.4219 +[2025-07-08 05:12:40] [Rank 0] Group 8 FTA: 0.4219 +[2025-07-08 05:12:40] [Rank 0] Group 9 FTA: 0.4336 +[2025-07-08 05:12:40] [Rank 0] Group 9 FTA: 0.4336 +[2025-07-08 05:12:40] [Rank 0] Group 10 FTA: 0.4316 +[2025-07-08 05:12:40] [Rank 0] Group 10 FTA: 0.4316 +[2025-07-08 05:12:40] [Rank 0] Group 11 FTA: 0.4355 +[2025-07-08 05:12:40] [Rank 0] Group 11 FTA: 0.4355 +[2025-07-08 05:12:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 05:12:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_loss_curves.png +[2025-07-08 05:12:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 05:12:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/per_class_acc_curves.png +[2025-07-08 05:12:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 05:12:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_loss_curve.png +[2025-07-08 05:12:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 05:12:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_43/total_acc_curve.png +[2025-07-08 05:12:42] [Rank 0] step:10001/10000 train_time:811317ms step_avg:81.12ms +[2025-07-08 05:12:42] [Rank 0] step:10001/10000 train_time:811317ms step_avg:81.12ms +[2025-07-08 05:12:42] [Rank 0] PRINT: --- Training Finished: Tue Jul 8 05:12:42 2025 --- +[2025-07-08 05:12:42] [Rank 0] PRINT: --- Training Finished: Tue Jul 8 05:12:42 2025 --- +[2025-07-08 05:12:42] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9916 MiB +[2025-07-08 05:12:42] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9916 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/config.json b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..94bb04ca110df636507008301a576745da8057b9 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "2ce76235-4eaf-47e4-ad7f-6ac3ecb66647", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..0044ab84234337dbd50634cded38d2743301a5bc --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa578ea3e057098cc47d7c47568464dd9e3f761d7bc177adc23205dea1395061 +size 403643 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..353026a6c2ae51c4d8a1922ebb724a07f2529362 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:032ff8724655168f5d4e67cdf6211a000ad4451e57229e46762113773904f755 +size 309085 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..479772281b9a4963fec1f5a1c0b1fa6d5e2f9f94 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de3719e1b215af761b14d401f8dd693ece684487e1c4523e5403dafbde61146f +size 100236 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..92150f1007e802aff3367ef5fc1566b49bffe489 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c1938ba6ce2bbdc1fda4cc973a08db246fe0026a87107922086cda098b328f0 +size 116968 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/training_log_2ce76235-4eaf-47e4-ad7f-6ac3ecb66647.txt b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/training_log_2ce76235-4eaf-47e4-ad7f-6ac3ecb66647.txt new file mode 100644 index 0000000000000000000000000000000000000000..8f795cf6bf73048488d449e69826ec85b2c3b9ea --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/training_log_2ce76235-4eaf-47e4-ad7f-6ac3ecb66647.txt @@ -0,0 +1,5132 @@ +[2025-07-07 01:58:23] [Rank 0] PRINT: --- Script Start: Mon Jul 7 01:58:23 2025 --- +[2025-07-07 01:58:23] [Rank 0] PRINT: --- Script Start: Mon Jul 7 01:58:23 2025 --- +[2025-07-07 01:58:23] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-07 01:58:23] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-07 01:58:23] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 01:58:23] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 01:58:23] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-07 01:58:23] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-07 01:58:23] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45 +[2025-07-07 01:58:23] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45 +[2025-07-07 01:58:23] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 01:58:23] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 01:58:23] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 01:58:23] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 01:58:23] [Rank 0] PRINT: Constructing model... +[2025-07-07 01:58:23] [Rank 0] PRINT: Constructing model... +[2025-07-07 01:58:25] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 01:58:25] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 01:58:25] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 01:58:25] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 01:58:25] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 01:58:25] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 01:58:26] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 01:58:26] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 01:58:26] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 01:58:26] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 01:58:26] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 01:58:26] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 01:58:26] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 01:58:26] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 01:58:26] [Rank 0] PRINT: Model returns: +[2025-07-07 01:58:26] [Rank 0] PRINT: Model returns: +[2025-07-07 01:58:26] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 01:58:26] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 01:58:26] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-07 01:58:26] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-07 01:58:26] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-07 01:58:26] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-07 01:58:26] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-07 01:58:26] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-07 01:58:26] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-07 01:58:26] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-07 01:58:26] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 01:58:26] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 01:58:26] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 01:58:26] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 01:58:26] [Rank 0] PRINT: Starting warmup... +[2025-07-07 01:58:26] [Rank 0] PRINT: Starting warmup... +[2025-07-07 02:00:01] [Rank 0] PRINT: Warmup complete. +[2025-07-07 02:00:01] [Rank 0] PRINT: Warmup complete. +[2025-07-07 02:00:01] [Rank 0] PRINT: Starting training... +[2025-07-07 02:00:01] [Rank 0] PRINT: Starting training... +[2025-07-07 02:00:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:00:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:00:09] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 02:00:09] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 02:00:11] [Rank 0] step:21/10000 train_time:1553ms step_avg:73.95ms +[2025-07-07 02:00:11] [Rank 0] step:21/10000 train_time:1553ms step_avg:73.95ms +[2025-07-07 02:00:13] [Rank 0] step:41/10000 train_time:3003ms step_avg:73.24ms +[2025-07-07 02:00:13] [Rank 0] step:41/10000 train_time:3003ms step_avg:73.24ms +[2025-07-07 02:00:14] [Rank 0] step:61/10000 train_time:4455ms step_avg:73.04ms +[2025-07-07 02:00:14] [Rank 0] step:61/10000 train_time:4455ms step_avg:73.04ms +[2025-07-07 02:00:15] [Rank 0] step:81/10000 train_time:5909ms step_avg:72.95ms +[2025-07-07 02:00:15] [Rank 0] step:81/10000 train_time:5909ms step_avg:72.95ms +[2025-07-07 02:00:17] [Rank 0] step:101/10000 train_time:7605ms step_avg:75.30ms +[2025-07-07 02:00:17] [Rank 0] step:101/10000 train_time:7605ms step_avg:75.30ms +[2025-07-07 02:00:19] [Rank 0] step:121/10000 train_time:9059ms step_avg:74.87ms +[2025-07-07 02:00:19] [Rank 0] step:121/10000 train_time:9059ms step_avg:74.87ms +[2025-07-07 02:00:20] [Rank 0] step:141/10000 train_time:10511ms step_avg:74.55ms +[2025-07-07 02:00:20] [Rank 0] step:141/10000 train_time:10511ms step_avg:74.55ms +[2025-07-07 02:00:22] [Rank 0] step:161/10000 train_time:11963ms step_avg:74.30ms +[2025-07-07 02:00:22] [Rank 0] step:161/10000 train_time:11963ms step_avg:74.30ms +[2025-07-07 02:00:24] [Rank 0] step:181/10000 train_time:13471ms step_avg:74.42ms +[2025-07-07 02:00:24] [Rank 0] step:181/10000 train_time:13471ms step_avg:74.42ms +[2025-07-07 02:00:25] [Rank 0] step:201/10000 train_time:15523ms step_avg:77.23ms +[2025-07-07 02:00:25] [Rank 0] step:201/10000 train_time:15523ms step_avg:77.23ms +[2025-07-07 02:00:27] [Rank 0] step:221/10000 train_time:16975ms step_avg:76.81ms +[2025-07-07 02:00:27] [Rank 0] step:221/10000 train_time:16975ms step_avg:76.81ms +[2025-07-07 02:00:28] [Rank 0] step:241/10000 train_time:18432ms step_avg:76.48ms +[2025-07-07 02:00:28] [Rank 0] step:241/10000 train_time:18432ms step_avg:76.48ms +[2025-07-07 02:00:29] [Rank 0] step:261/10000 train_time:19889ms step_avg:76.20ms +[2025-07-07 02:00:29] [Rank 0] step:261/10000 train_time:19889ms step_avg:76.20ms +[2025-07-07 02:00:32] [Rank 0] step:281/10000 train_time:21992ms step_avg:78.27ms +[2025-07-07 02:00:32] [Rank 0] step:281/10000 train_time:21992ms step_avg:78.27ms +[2025-07-07 02:00:33] [Rank 0] step:301/10000 train_time:23451ms step_avg:77.91ms +[2025-07-07 02:00:33] [Rank 0] step:301/10000 train_time:23451ms step_avg:77.91ms +[2025-07-07 02:00:34] [Rank 0] step:321/10000 train_time:24910ms step_avg:77.60ms +[2025-07-07 02:00:34] [Rank 0] step:321/10000 train_time:24910ms step_avg:77.60ms +[2025-07-07 02:00:36] [Rank 0] step:341/10000 train_time:26372ms step_avg:77.34ms +[2025-07-07 02:00:36] [Rank 0] step:341/10000 train_time:26372ms step_avg:77.34ms +[2025-07-07 02:00:38] [Rank 0] step:361/10000 train_time:27886ms step_avg:77.25ms +[2025-07-07 02:00:38] [Rank 0] step:361/10000 train_time:27886ms step_avg:77.25ms +[2025-07-07 02:00:40] [Rank 0] step:381/10000 train_time:29958ms step_avg:78.63ms +[2025-07-07 02:00:40] [Rank 0] step:381/10000 train_time:29958ms step_avg:78.63ms +[2025-07-07 02:00:41] [Rank 0] step:401/10000 train_time:31419ms step_avg:78.35ms +[2025-07-07 02:00:41] [Rank 0] step:401/10000 train_time:31419ms step_avg:78.35ms +[2025-07-07 02:00:42] [Rank 0] step:421/10000 train_time:32878ms step_avg:78.09ms +[2025-07-07 02:00:42] [Rank 0] step:421/10000 train_time:32878ms step_avg:78.09ms +[2025-07-07 02:00:44] [Rank 0] step:441/10000 train_time:34338ms step_avg:77.86ms +[2025-07-07 02:00:44] [Rank 0] step:441/10000 train_time:34338ms step_avg:77.86ms +[2025-07-07 02:00:46] [Rank 0] step:461/10000 train_time:36457ms step_avg:79.08ms +[2025-07-07 02:00:46] [Rank 0] step:461/10000 train_time:36457ms step_avg:79.08ms +[2025-07-07 02:00:47] [Rank 0] step:481/10000 train_time:37917ms step_avg:78.83ms +[2025-07-07 02:00:47] [Rank 0] step:481/10000 train_time:37917ms step_avg:78.83ms +[2025-07-07 02:00:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:00:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:00:50] [Rank 0] PRINT: step:500/10000 train_loss:8.7313 val_loss:7.1019 train_time:39378ms step_avg:78.76ms +[2025-07-07 02:00:50] [Rank 0] PRINT: step:500/10000 train_loss:8.7313 val_loss:7.1019 train_time:39378ms step_avg:78.76ms +[2025-07-07 02:00:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:00:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:00:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:00:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:00:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:00:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:06:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:06:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:06:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:06:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:06:09] [Rank 0] Total Loss: 7.6695 +[2025-07-07 02:06:09] [Rank 0] Total Loss: 7.6695 +[2025-07-07 02:06:09] [Rank 0] Total FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Total FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 0 Loss: 7.6790 +[2025-07-07 02:06:09] [Rank 0] Group 0 Loss: 7.6790 +[2025-07-07 02:06:09] [Rank 0] Group 1 Loss: 7.6257 +[2025-07-07 02:06:09] [Rank 0] Group 1 Loss: 7.6257 +[2025-07-07 02:06:09] [Rank 0] Group 2 Loss: 7.7775 +[2025-07-07 02:06:09] [Rank 0] Group 2 Loss: 7.7775 +[2025-07-07 02:06:09] [Rank 0] Group 3 Loss: 7.6363 +[2025-07-07 02:06:09] [Rank 0] Group 3 Loss: 7.6363 +[2025-07-07 02:06:09] [Rank 0] Group 4 Loss: 7.6926 +[2025-07-07 02:06:09] [Rank 0] Group 4 Loss: 7.6926 +[2025-07-07 02:06:09] [Rank 0] Group 5 Loss: 7.6359 +[2025-07-07 02:06:09] [Rank 0] Group 5 Loss: 7.6359 +[2025-07-07 02:06:09] [Rank 0] Group 6 Loss: 7.6749 +[2025-07-07 02:06:09] [Rank 0] Group 6 Loss: 7.6749 +[2025-07-07 02:06:09] [Rank 0] Group 7 Loss: 7.6601 +[2025-07-07 02:06:09] [Rank 0] Group 7 Loss: 7.6601 +[2025-07-07 02:06:09] [Rank 0] Group 8 Loss: 7.6408 +[2025-07-07 02:06:09] [Rank 0] Group 8 Loss: 7.6408 +[2025-07-07 02:06:09] [Rank 0] Group 9 Loss: 7.6662 +[2025-07-07 02:06:09] [Rank 0] Group 9 Loss: 7.6662 +[2025-07-07 02:06:09] [Rank 0] Group 10 Loss: 7.6761 +[2025-07-07 02:06:09] [Rank 0] Group 10 Loss: 7.6761 +[2025-07-07 02:06:09] [Rank 0] Group 11 Loss: 7.6646 +[2025-07-07 02:06:09] [Rank 0] Group 11 Loss: 7.6646 +[2025-07-07 02:06:09] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 02:06:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 02:06:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 02:06:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 02:06:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 02:06:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 02:06:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 02:06:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 02:06:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 02:06:10] [Rank 0] step:501/10000 train_time:39398ms step_avg:78.64ms +[2025-07-07 02:06:10] [Rank 0] step:501/10000 train_time:39398ms step_avg:78.64ms +[2025-07-07 02:06:12] [Rank 0] step:521/10000 train_time:41036ms step_avg:78.76ms +[2025-07-07 02:06:12] [Rank 0] step:521/10000 train_time:41036ms step_avg:78.76ms +[2025-07-07 02:06:14] [Rank 0] step:541/10000 train_time:42486ms step_avg:78.53ms +[2025-07-07 02:06:14] [Rank 0] step:541/10000 train_time:42486ms step_avg:78.53ms +[2025-07-07 02:06:15] [Rank 0] step:561/10000 train_time:44594ms step_avg:79.49ms +[2025-07-07 02:06:15] [Rank 0] step:561/10000 train_time:44594ms step_avg:79.49ms +[2025-07-07 02:06:17] [Rank 0] step:581/10000 train_time:46049ms step_avg:79.26ms +[2025-07-07 02:06:17] [Rank 0] step:581/10000 train_time:46049ms step_avg:79.26ms +[2025-07-07 02:06:18] [Rank 0] step:601/10000 train_time:47507ms step_avg:79.05ms +[2025-07-07 02:06:18] [Rank 0] step:601/10000 train_time:47507ms step_avg:79.05ms +[2025-07-07 02:06:20] [Rank 0] step:621/10000 train_time:48964ms step_avg:78.85ms +[2025-07-07 02:06:20] [Rank 0] step:621/10000 train_time:48964ms step_avg:78.85ms +[2025-07-07 02:06:22] [Rank 0] step:641/10000 train_time:51066ms step_avg:79.67ms +[2025-07-07 02:06:22] [Rank 0] step:641/10000 train_time:51066ms step_avg:79.67ms +[2025-07-07 02:06:23] [Rank 0] step:661/10000 train_time:52525ms step_avg:79.46ms +[2025-07-07 02:06:23] [Rank 0] step:661/10000 train_time:52525ms step_avg:79.46ms +[2025-07-07 02:06:25] [Rank 0] step:681/10000 train_time:53985ms step_avg:79.27ms +[2025-07-07 02:06:25] [Rank 0] step:681/10000 train_time:53985ms step_avg:79.27ms +[2025-07-07 02:06:26] [Rank 0] step:701/10000 train_time:55445ms step_avg:79.09ms +[2025-07-07 02:06:26] [Rank 0] step:701/10000 train_time:55445ms step_avg:79.09ms +[2025-07-07 02:06:28] [Rank 0] step:721/10000 train_time:56960ms step_avg:79.00ms +[2025-07-07 02:06:28] [Rank 0] step:721/10000 train_time:56960ms step_avg:79.00ms +[2025-07-07 02:06:29] [Rank 0] step:741/10000 train_time:58501ms step_avg:78.95ms +[2025-07-07 02:06:29] [Rank 0] step:741/10000 train_time:58501ms step_avg:78.95ms +[2025-07-07 02:06:31] [Rank 0] step:761/10000 train_time:59973ms step_avg:78.81ms +[2025-07-07 02:06:31] [Rank 0] step:761/10000 train_time:59973ms step_avg:78.81ms +[2025-07-07 02:06:32] [Rank 0] step:781/10000 train_time:61445ms step_avg:78.67ms +[2025-07-07 02:06:32] [Rank 0] step:781/10000 train_time:61445ms step_avg:78.67ms +[2025-07-07 02:06:34] [Rank 0] step:801/10000 train_time:62919ms step_avg:78.55ms +[2025-07-07 02:06:34] [Rank 0] step:801/10000 train_time:62919ms step_avg:78.55ms +[2025-07-07 02:06:35] [Rank 0] step:821/10000 train_time:64625ms step_avg:78.71ms +[2025-07-07 02:06:35] [Rank 0] step:821/10000 train_time:64625ms step_avg:78.71ms +[2025-07-07 02:06:37] [Rank 0] step:841/10000 train_time:66100ms step_avg:78.60ms +[2025-07-07 02:06:37] [Rank 0] step:841/10000 train_time:66100ms step_avg:78.60ms +[2025-07-07 02:06:38] [Rank 0] step:861/10000 train_time:67576ms step_avg:78.49ms +[2025-07-07 02:06:38] [Rank 0] step:861/10000 train_time:67576ms step_avg:78.49ms +[2025-07-07 02:06:40] [Rank 0] step:881/10000 train_time:69051ms step_avg:78.38ms +[2025-07-07 02:06:40] [Rank 0] step:881/10000 train_time:69051ms step_avg:78.38ms +[2025-07-07 02:06:41] [Rank 0] step:901/10000 train_time:70780ms step_avg:78.56ms +[2025-07-07 02:06:41] [Rank 0] step:901/10000 train_time:70780ms step_avg:78.56ms +[2025-07-07 02:06:43] [Rank 0] step:921/10000 train_time:72239ms step_avg:78.44ms +[2025-07-07 02:06:43] [Rank 0] step:921/10000 train_time:72239ms step_avg:78.44ms +[2025-07-07 02:06:44] [Rank 0] step:941/10000 train_time:73717ms step_avg:78.34ms +[2025-07-07 02:06:44] [Rank 0] step:941/10000 train_time:73717ms step_avg:78.34ms +[2025-07-07 02:06:46] [Rank 0] step:961/10000 train_time:75193ms step_avg:78.25ms +[2025-07-07 02:06:46] [Rank 0] step:961/10000 train_time:75193ms step_avg:78.25ms +[2025-07-07 02:06:47] [Rank 0] step:981/10000 train_time:76671ms step_avg:78.16ms +[2025-07-07 02:06:47] [Rank 0] step:981/10000 train_time:76671ms step_avg:78.16ms +[2025-07-07 02:06:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:06:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:06:50] [Rank 0] PRINT: step:1000/10000 train_loss:6.0328 val_loss:5.1164 train_time:78384ms step_avg:78.38ms +[2025-07-07 02:06:50] [Rank 0] PRINT: step:1000/10000 train_loss:6.0328 val_loss:5.1164 train_time:78384ms step_avg:78.38ms +[2025-07-07 02:06:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:06:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:06:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:06:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:06:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:06:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:12:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:12:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:12:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:12:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:12:09] [Rank 0] Total Loss: 6.1083 +[2025-07-07 02:12:09] [Rank 0] Total Loss: 6.1083 +[2025-07-07 02:12:09] [Rank 0] Total FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Total FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 0 Loss: 6.0308 +[2025-07-07 02:12:09] [Rank 0] Group 0 Loss: 6.0308 +[2025-07-07 02:12:09] [Rank 0] Group 1 Loss: 6.1955 +[2025-07-07 02:12:09] [Rank 0] Group 1 Loss: 6.1955 +[2025-07-07 02:12:09] [Rank 0] Group 2 Loss: 6.1783 +[2025-07-07 02:12:09] [Rank 0] Group 2 Loss: 6.1783 +[2025-07-07 02:12:09] [Rank 0] Group 3 Loss: 6.1067 +[2025-07-07 02:12:09] [Rank 0] Group 3 Loss: 6.1067 +[2025-07-07 02:12:09] [Rank 0] Group 4 Loss: 6.1331 +[2025-07-07 02:12:09] [Rank 0] Group 4 Loss: 6.1331 +[2025-07-07 02:12:09] [Rank 0] Group 5 Loss: 6.0882 +[2025-07-07 02:12:09] [Rank 0] Group 5 Loss: 6.0882 +[2025-07-07 02:12:09] [Rank 0] Group 6 Loss: 6.1077 +[2025-07-07 02:12:09] [Rank 0] Group 6 Loss: 6.1077 +[2025-07-07 02:12:09] [Rank 0] Group 7 Loss: 6.1263 +[2025-07-07 02:12:09] [Rank 0] Group 7 Loss: 6.1263 +[2025-07-07 02:12:09] [Rank 0] Group 8 Loss: 6.0755 +[2025-07-07 02:12:09] [Rank 0] Group 8 Loss: 6.0755 +[2025-07-07 02:12:09] [Rank 0] Group 9 Loss: 6.1023 +[2025-07-07 02:12:09] [Rank 0] Group 9 Loss: 6.1023 +[2025-07-07 02:12:09] [Rank 0] Group 10 Loss: 6.1057 +[2025-07-07 02:12:09] [Rank 0] Group 10 Loss: 6.1057 +[2025-07-07 02:12:09] [Rank 0] Group 11 Loss: 6.1149 +[2025-07-07 02:12:09] [Rank 0] Group 11 Loss: 6.1149 +[2025-07-07 02:12:09] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 02:12:09] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 02:12:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 02:12:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 02:12:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 02:12:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 02:12:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 02:12:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 02:12:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 02:12:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 02:12:11] [Rank 0] step:1001/10000 train_time:78406ms step_avg:78.33ms +[2025-07-07 02:12:11] [Rank 0] step:1001/10000 train_time:78406ms step_avg:78.33ms +[2025-07-07 02:12:12] [Rank 0] step:1021/10000 train_time:79883ms step_avg:78.24ms +[2025-07-07 02:12:12] [Rank 0] step:1021/10000 train_time:79883ms step_avg:78.24ms +[2025-07-07 02:12:14] [Rank 0] step:1041/10000 train_time:81348ms step_avg:78.14ms +[2025-07-07 02:12:14] [Rank 0] step:1041/10000 train_time:81348ms step_avg:78.14ms +[2025-07-07 02:12:15] [Rank 0] step:1061/10000 train_time:82816ms step_avg:78.05ms +[2025-07-07 02:12:15] [Rank 0] step:1061/10000 train_time:82816ms step_avg:78.05ms +[2025-07-07 02:12:17] [Rank 0] step:1081/10000 train_time:84336ms step_avg:78.02ms +[2025-07-07 02:12:17] [Rank 0] step:1081/10000 train_time:84336ms step_avg:78.02ms +[2025-07-07 02:12:19] [Rank 0] step:1101/10000 train_time:86410ms step_avg:78.48ms +[2025-07-07 02:12:19] [Rank 0] step:1101/10000 train_time:86410ms step_avg:78.48ms +[2025-07-07 02:12:20] [Rank 0] step:1121/10000 train_time:87878ms step_avg:78.39ms +[2025-07-07 02:12:20] [Rank 0] step:1121/10000 train_time:87878ms step_avg:78.39ms +[2025-07-07 02:12:22] [Rank 0] step:1141/10000 train_time:89350ms step_avg:78.31ms +[2025-07-07 02:12:22] [Rank 0] step:1141/10000 train_time:89350ms step_avg:78.31ms +[2025-07-07 02:12:23] [Rank 0] step:1161/10000 train_time:90822ms step_avg:78.23ms +[2025-07-07 02:12:23] [Rank 0] step:1161/10000 train_time:90822ms step_avg:78.23ms +[2025-07-07 02:12:25] [Rank 0] step:1181/10000 train_time:92937ms step_avg:78.69ms +[2025-07-07 02:12:25] [Rank 0] step:1181/10000 train_time:92937ms step_avg:78.69ms +[2025-07-07 02:12:27] [Rank 0] step:1201/10000 train_time:94410ms step_avg:78.61ms +[2025-07-07 02:12:27] [Rank 0] step:1201/10000 train_time:94410ms step_avg:78.61ms +[2025-07-07 02:12:28] [Rank 0] step:1221/10000 train_time:95883ms step_avg:78.53ms +[2025-07-07 02:12:28] [Rank 0] step:1221/10000 train_time:95883ms step_avg:78.53ms +[2025-07-07 02:12:30] [Rank 0] step:1241/10000 train_time:97358ms step_avg:78.45ms +[2025-07-07 02:12:30] [Rank 0] step:1241/10000 train_time:97358ms step_avg:78.45ms +[2025-07-07 02:12:31] [Rank 0] step:1261/10000 train_time:99268ms step_avg:78.72ms +[2025-07-07 02:12:31] [Rank 0] step:1261/10000 train_time:99268ms step_avg:78.72ms +[2025-07-07 02:12:33] [Rank 0] step:1281/10000 train_time:100723ms step_avg:78.63ms +[2025-07-07 02:12:33] [Rank 0] step:1281/10000 train_time:100723ms step_avg:78.63ms +[2025-07-07 02:12:34] [Rank 0] step:1301/10000 train_time:102199ms step_avg:78.55ms +[2025-07-07 02:12:34] [Rank 0] step:1301/10000 train_time:102199ms step_avg:78.55ms +[2025-07-07 02:12:36] [Rank 0] step:1321/10000 train_time:103672ms step_avg:78.48ms +[2025-07-07 02:12:36] [Rank 0] step:1321/10000 train_time:103672ms step_avg:78.48ms +[2025-07-07 02:12:37] [Rank 0] step:1341/10000 train_time:105150ms step_avg:78.41ms +[2025-07-07 02:12:37] [Rank 0] step:1341/10000 train_time:105150ms step_avg:78.41ms +[2025-07-07 02:12:39] [Rank 0] step:1361/10000 train_time:107274ms step_avg:78.82ms +[2025-07-07 02:12:39] [Rank 0] step:1361/10000 train_time:107274ms step_avg:78.82ms +[2025-07-07 02:12:41] [Rank 0] step:1381/10000 train_time:108752ms step_avg:78.75ms +[2025-07-07 02:12:41] [Rank 0] step:1381/10000 train_time:108752ms step_avg:78.75ms +[2025-07-07 02:12:42] [Rank 0] step:1401/10000 train_time:110228ms step_avg:78.68ms +[2025-07-07 02:12:42] [Rank 0] step:1401/10000 train_time:110228ms step_avg:78.68ms +[2025-07-07 02:12:44] [Rank 0] step:1421/10000 train_time:111709ms step_avg:78.61ms +[2025-07-07 02:12:44] [Rank 0] step:1421/10000 train_time:111709ms step_avg:78.61ms +[2025-07-07 02:12:46] [Rank 0] step:1441/10000 train_time:113240ms step_avg:78.58ms +[2025-07-07 02:12:46] [Rank 0] step:1441/10000 train_time:113240ms step_avg:78.58ms +[2025-07-07 02:12:48] [Rank 0] step:1461/10000 train_time:115315ms step_avg:78.93ms +[2025-07-07 02:12:48] [Rank 0] step:1461/10000 train_time:115315ms step_avg:78.93ms +[2025-07-07 02:12:49] [Rank 0] step:1481/10000 train_time:116793ms step_avg:78.86ms +[2025-07-07 02:12:49] [Rank 0] step:1481/10000 train_time:116793ms step_avg:78.86ms +[2025-07-07 02:12:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:12:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:12:51] [Rank 0] PRINT: step:1500/10000 train_loss:4.3998 val_loss:3.7089 train_time:118273ms step_avg:78.85ms +[2025-07-07 02:12:51] [Rank 0] PRINT: step:1500/10000 train_loss:4.3998 val_loss:3.7089 train_time:118273ms step_avg:78.85ms +[2025-07-07 02:12:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:12:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:12:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:12:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:12:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:12:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:18:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:18:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:18:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:18:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:18:12] [Rank 0] Total Loss: 5.1773 +[2025-07-07 02:18:12] [Rank 0] Total Loss: 5.1773 +[2025-07-07 02:18:12] [Rank 0] Total FTA: 0.0859 +[2025-07-07 02:18:12] [Rank 0] Total FTA: 0.0859 +[2025-07-07 02:18:12] [Rank 0] Group 0 Loss: 5.1696 +[2025-07-07 02:18:12] [Rank 0] Group 0 Loss: 5.1696 +[2025-07-07 02:18:12] [Rank 0] Group 1 Loss: 5.2665 +[2025-07-07 02:18:12] [Rank 0] Group 1 Loss: 5.2665 +[2025-07-07 02:18:12] [Rank 0] Group 2 Loss: 5.1597 +[2025-07-07 02:18:12] [Rank 0] Group 2 Loss: 5.1597 +[2025-07-07 02:18:12] [Rank 0] Group 3 Loss: 5.1127 +[2025-07-07 02:18:12] [Rank 0] Group 3 Loss: 5.1127 +[2025-07-07 02:18:12] [Rank 0] Group 4 Loss: 5.1903 +[2025-07-07 02:18:12] [Rank 0] Group 4 Loss: 5.1903 +[2025-07-07 02:18:12] [Rank 0] Group 5 Loss: 5.1698 +[2025-07-07 02:18:12] [Rank 0] Group 5 Loss: 5.1698 +[2025-07-07 02:18:12] [Rank 0] Group 6 Loss: 5.1632 +[2025-07-07 02:18:12] [Rank 0] Group 6 Loss: 5.1632 +[2025-07-07 02:18:12] [Rank 0] Group 7 Loss: 5.2143 +[2025-07-07 02:18:12] [Rank 0] Group 7 Loss: 5.2143 +[2025-07-07 02:18:12] [Rank 0] Group 8 Loss: 5.1624 +[2025-07-07 02:18:12] [Rank 0] Group 8 Loss: 5.1624 +[2025-07-07 02:18:12] [Rank 0] Group 9 Loss: 5.1434 +[2025-07-07 02:18:12] [Rank 0] Group 9 Loss: 5.1434 +[2025-07-07 02:18:12] [Rank 0] Group 10 Loss: 5.1744 +[2025-07-07 02:18:12] [Rank 0] Group 10 Loss: 5.1744 +[2025-07-07 02:18:12] [Rank 0] Group 11 Loss: 5.1852 +[2025-07-07 02:18:12] [Rank 0] Group 11 Loss: 5.1852 +[2025-07-07 02:18:12] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 02:18:12] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 02:18:12] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 02:18:12] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 02:18:12] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 02:18:12] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 02:18:12] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 02:18:12] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 02:18:12] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-07 02:18:12] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-07 02:18:12] [Rank 0] Group 5 FTA: 0.0573 +[2025-07-07 02:18:12] [Rank 0] Group 5 FTA: 0.0573 +[2025-07-07 02:18:12] [Rank 0] Group 6 FTA: 0.0729 +[2025-07-07 02:18:12] [Rank 0] Group 6 FTA: 0.0729 +[2025-07-07 02:18:12] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-07 02:18:12] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-07 02:18:12] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-07 02:18:12] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-07 02:18:12] [Rank 0] Group 9 FTA: 0.1094 +[2025-07-07 02:18:12] [Rank 0] Group 9 FTA: 0.1094 +[2025-07-07 02:18:12] [Rank 0] Group 10 FTA: 0.0859 +[2025-07-07 02:18:12] [Rank 0] Group 10 FTA: 0.0859 +[2025-07-07 02:18:12] [Rank 0] Group 11 FTA: 0.0840 +[2025-07-07 02:18:12] [Rank 0] Group 11 FTA: 0.0840 +[2025-07-07 02:18:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 02:18:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 02:18:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 02:18:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 02:18:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 02:18:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 02:18:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 02:18:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 02:18:14] [Rank 0] step:1501/10000 train_time:118294ms step_avg:78.81ms +[2025-07-07 02:18:14] [Rank 0] step:1501/10000 train_time:118294ms step_avg:78.81ms +[2025-07-07 02:18:15] [Rank 0] step:1521/10000 train_time:119765ms step_avg:78.74ms +[2025-07-07 02:18:15] [Rank 0] step:1521/10000 train_time:119765ms step_avg:78.74ms +[2025-07-07 02:18:17] [Rank 0] step:1541/10000 train_time:121901ms step_avg:79.11ms +[2025-07-07 02:18:17] [Rank 0] step:1541/10000 train_time:121901ms step_avg:79.11ms +[2025-07-07 02:18:19] [Rank 0] step:1561/10000 train_time:123369ms step_avg:79.03ms +[2025-07-07 02:18:19] [Rank 0] step:1561/10000 train_time:123369ms step_avg:79.03ms +[2025-07-07 02:18:20] [Rank 0] step:1581/10000 train_time:124838ms step_avg:78.96ms +[2025-07-07 02:18:20] [Rank 0] step:1581/10000 train_time:124838ms step_avg:78.96ms +[2025-07-07 02:18:22] [Rank 0] step:1601/10000 train_time:126308ms step_avg:78.89ms +[2025-07-07 02:18:22] [Rank 0] step:1601/10000 train_time:126308ms step_avg:78.89ms +[2025-07-07 02:18:24] [Rank 0] step:1621/10000 train_time:127831ms step_avg:78.86ms +[2025-07-07 02:18:24] [Rank 0] step:1621/10000 train_time:127831ms step_avg:78.86ms +[2025-07-07 02:18:25] [Rank 0] step:1641/10000 train_time:129902ms step_avg:79.16ms +[2025-07-07 02:18:25] [Rank 0] step:1641/10000 train_time:129902ms step_avg:79.16ms +[2025-07-07 02:18:27] [Rank 0] step:1661/10000 train_time:131372ms step_avg:79.09ms +[2025-07-07 02:18:27] [Rank 0] step:1661/10000 train_time:131372ms step_avg:79.09ms +[2025-07-07 02:18:28] [Rank 0] step:1681/10000 train_time:132846ms step_avg:79.03ms +[2025-07-07 02:18:28] [Rank 0] step:1681/10000 train_time:132846ms step_avg:79.03ms +[2025-07-07 02:18:30] [Rank 0] step:1701/10000 train_time:134318ms step_avg:78.96ms +[2025-07-07 02:18:30] [Rank 0] step:1701/10000 train_time:134318ms step_avg:78.96ms +[2025-07-07 02:18:32] [Rank 0] step:1721/10000 train_time:136433ms step_avg:79.28ms +[2025-07-07 02:18:32] [Rank 0] step:1721/10000 train_time:136433ms step_avg:79.28ms +[2025-07-07 02:18:33] [Rank 0] step:1741/10000 train_time:137907ms step_avg:79.21ms +[2025-07-07 02:18:33] [Rank 0] step:1741/10000 train_time:137907ms step_avg:79.21ms +[2025-07-07 02:18:35] [Rank 0] step:1761/10000 train_time:139384ms step_avg:79.15ms +[2025-07-07 02:18:35] [Rank 0] step:1761/10000 train_time:139384ms step_avg:79.15ms +[2025-07-07 02:18:36] [Rank 0] step:1781/10000 train_time:140861ms step_avg:79.09ms +[2025-07-07 02:18:36] [Rank 0] step:1781/10000 train_time:140861ms step_avg:79.09ms +[2025-07-07 02:18:38] [Rank 0] step:1801/10000 train_time:142387ms step_avg:79.06ms +[2025-07-07 02:18:38] [Rank 0] step:1801/10000 train_time:142387ms step_avg:79.06ms +[2025-07-07 02:18:40] [Rank 0] step:1821/10000 train_time:144472ms step_avg:79.34ms +[2025-07-07 02:18:40] [Rank 0] step:1821/10000 train_time:144472ms step_avg:79.34ms +[2025-07-07 02:18:41] [Rank 0] step:1841/10000 train_time:145950ms step_avg:79.28ms +[2025-07-07 02:18:41] [Rank 0] step:1841/10000 train_time:145950ms step_avg:79.28ms +[2025-07-07 02:18:43] [Rank 0] step:1861/10000 train_time:147427ms step_avg:79.22ms +[2025-07-07 02:18:43] [Rank 0] step:1861/10000 train_time:147427ms step_avg:79.22ms +[2025-07-07 02:18:44] [Rank 0] step:1881/10000 train_time:148906ms step_avg:79.16ms +[2025-07-07 02:18:44] [Rank 0] step:1881/10000 train_time:148906ms step_avg:79.16ms +[2025-07-07 02:18:46] [Rank 0] step:1901/10000 train_time:151026ms step_avg:79.45ms +[2025-07-07 02:18:46] [Rank 0] step:1901/10000 train_time:151026ms step_avg:79.45ms +[2025-07-07 02:18:48] [Rank 0] step:1921/10000 train_time:152504ms step_avg:79.39ms +[2025-07-07 02:18:48] [Rank 0] step:1921/10000 train_time:152504ms step_avg:79.39ms +[2025-07-07 02:18:49] [Rank 0] step:1941/10000 train_time:153983ms step_avg:79.33ms +[2025-07-07 02:18:49] [Rank 0] step:1941/10000 train_time:153983ms step_avg:79.33ms +[2025-07-07 02:18:51] [Rank 0] step:1961/10000 train_time:155682ms step_avg:79.39ms +[2025-07-07 02:18:51] [Rank 0] step:1961/10000 train_time:155682ms step_avg:79.39ms +[2025-07-07 02:18:53] [Rank 0] step:1981/10000 train_time:157830ms step_avg:79.67ms +[2025-07-07 02:18:53] [Rank 0] step:1981/10000 train_time:157830ms step_avg:79.67ms +[2025-07-07 02:18:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:18:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:18:55] [Rank 0] PRINT: step:2000/10000 train_loss:3.1233 val_loss:2.6165 train_time:159290ms step_avg:79.65ms +[2025-07-07 02:18:55] [Rank 0] PRINT: step:2000/10000 train_loss:3.1233 val_loss:2.6165 train_time:159290ms step_avg:79.65ms +[2025-07-07 02:18:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:18:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:18:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:18:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:18:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:18:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:24:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:24:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:24:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:24:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:24:17] [Rank 0] Total Loss: 4.4623 +[2025-07-07 02:24:17] [Rank 0] Total Loss: 4.4623 +[2025-07-07 02:24:17] [Rank 0] Total FTA: 0.0914 +[2025-07-07 02:24:17] [Rank 0] Total FTA: 0.0914 +[2025-07-07 02:24:17] [Rank 0] Group 0 Loss: 4.6060 +[2025-07-07 02:24:17] [Rank 0] Group 0 Loss: 4.6060 +[2025-07-07 02:24:17] [Rank 0] Group 1 Loss: 4.5237 +[2025-07-07 02:24:17] [Rank 0] Group 1 Loss: 4.5237 +[2025-07-07 02:24:17] [Rank 0] Group 2 Loss: 4.3815 +[2025-07-07 02:24:17] [Rank 0] Group 2 Loss: 4.3815 +[2025-07-07 02:24:17] [Rank 0] Group 3 Loss: 4.4422 +[2025-07-07 02:24:17] [Rank 0] Group 3 Loss: 4.4422 +[2025-07-07 02:24:17] [Rank 0] Group 4 Loss: 4.4276 +[2025-07-07 02:24:17] [Rank 0] Group 4 Loss: 4.4276 +[2025-07-07 02:24:17] [Rank 0] Group 5 Loss: 4.4246 +[2025-07-07 02:24:17] [Rank 0] Group 5 Loss: 4.4246 +[2025-07-07 02:24:17] [Rank 0] Group 6 Loss: 4.4115 +[2025-07-07 02:24:17] [Rank 0] Group 6 Loss: 4.4115 +[2025-07-07 02:24:17] [Rank 0] Group 7 Loss: 4.4634 +[2025-07-07 02:24:17] [Rank 0] Group 7 Loss: 4.4634 +[2025-07-07 02:24:17] [Rank 0] Group 8 Loss: 4.4360 +[2025-07-07 02:24:17] [Rank 0] Group 8 Loss: 4.4360 +[2025-07-07 02:24:17] [Rank 0] Group 9 Loss: 4.4866 +[2025-07-07 02:24:17] [Rank 0] Group 9 Loss: 4.4866 +[2025-07-07 02:24:17] [Rank 0] Group 10 Loss: 4.4463 +[2025-07-07 02:24:17] [Rank 0] Group 10 Loss: 4.4463 +[2025-07-07 02:24:17] [Rank 0] Group 11 Loss: 4.4270 +[2025-07-07 02:24:17] [Rank 0] Group 11 Loss: 4.4270 +[2025-07-07 02:24:17] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 02:24:17] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 02:24:17] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 02:24:17] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 02:24:17] [Rank 0] Group 2 FTA: 0.0885 +[2025-07-07 02:24:17] [Rank 0] Group 2 FTA: 0.0885 +[2025-07-07 02:24:17] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 02:24:17] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 02:24:17] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 02:24:17] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 02:24:17] [Rank 0] Group 5 FTA: 0.0703 +[2025-07-07 02:24:17] [Rank 0] Group 5 FTA: 0.0703 +[2025-07-07 02:24:17] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-07 02:24:17] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-07 02:24:17] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 02:24:17] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 02:24:17] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-07 02:24:17] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-07 02:24:17] [Rank 0] Group 9 FTA: 0.1250 +[2025-07-07 02:24:17] [Rank 0] Group 9 FTA: 0.1250 +[2025-07-07 02:24:17] [Rank 0] Group 10 FTA: 0.0820 +[2025-07-07 02:24:17] [Rank 0] Group 10 FTA: 0.0820 +[2025-07-07 02:24:17] [Rank 0] Group 11 FTA: 0.1094 +[2025-07-07 02:24:17] [Rank 0] Group 11 FTA: 0.1094 +[2025-07-07 02:24:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 02:24:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 02:24:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 02:24:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 02:24:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 02:24:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 02:24:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 02:24:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 02:24:18] [Rank 0] step:2001/10000 train_time:159312ms step_avg:79.62ms +[2025-07-07 02:24:18] [Rank 0] step:2001/10000 train_time:159312ms step_avg:79.62ms +[2025-07-07 02:24:20] [Rank 0] step:2021/10000 train_time:160774ms step_avg:79.55ms +[2025-07-07 02:24:20] [Rank 0] step:2021/10000 train_time:160774ms step_avg:79.55ms +[2025-07-07 02:24:21] [Rank 0] step:2041/10000 train_time:162241ms step_avg:79.49ms +[2025-07-07 02:24:21] [Rank 0] step:2041/10000 train_time:162241ms step_avg:79.49ms +[2025-07-07 02:24:22] [Rank 0] step:2061/10000 train_time:163710ms step_avg:79.43ms +[2025-07-07 02:24:22] [Rank 0] step:2061/10000 train_time:163710ms step_avg:79.43ms +[2025-07-07 02:24:25] [Rank 0] step:2081/10000 train_time:165827ms step_avg:79.69ms +[2025-07-07 02:24:25] [Rank 0] step:2081/10000 train_time:165827ms step_avg:79.69ms +[2025-07-07 02:24:26] [Rank 0] step:2101/10000 train_time:167296ms step_avg:79.63ms +[2025-07-07 02:24:26] [Rank 0] step:2101/10000 train_time:167296ms step_avg:79.63ms +[2025-07-07 02:24:28] [Rank 0] step:2121/10000 train_time:168764ms step_avg:79.57ms +[2025-07-07 02:24:28] [Rank 0] step:2121/10000 train_time:168764ms step_avg:79.57ms +[2025-07-07 02:24:29] [Rank 0] step:2141/10000 train_time:170237ms step_avg:79.51ms +[2025-07-07 02:24:29] [Rank 0] step:2141/10000 train_time:170237ms step_avg:79.51ms +[2025-07-07 02:24:31] [Rank 0] step:2161/10000 train_time:171760ms step_avg:79.48ms +[2025-07-07 02:24:31] [Rank 0] step:2161/10000 train_time:171760ms step_avg:79.48ms +[2025-07-07 02:24:33] [Rank 0] step:2181/10000 train_time:173829ms step_avg:79.70ms +[2025-07-07 02:24:33] [Rank 0] step:2181/10000 train_time:173829ms step_avg:79.70ms +[2025-07-07 02:24:34] [Rank 0] step:2201/10000 train_time:175301ms step_avg:79.65ms +[2025-07-07 02:24:34] [Rank 0] step:2201/10000 train_time:175301ms step_avg:79.65ms +[2025-07-07 02:24:36] [Rank 0] step:2221/10000 train_time:176776ms step_avg:79.59ms +[2025-07-07 02:24:36] [Rank 0] step:2221/10000 train_time:176776ms step_avg:79.59ms +[2025-07-07 02:24:37] [Rank 0] step:2241/10000 train_time:178270ms step_avg:79.55ms +[2025-07-07 02:24:37] [Rank 0] step:2241/10000 train_time:178270ms step_avg:79.55ms +[2025-07-07 02:24:39] [Rank 0] step:2261/10000 train_time:179798ms step_avg:79.52ms +[2025-07-07 02:24:39] [Rank 0] step:2261/10000 train_time:179798ms step_avg:79.52ms +[2025-07-07 02:24:40] [Rank 0] step:2281/10000 train_time:181296ms step_avg:79.48ms +[2025-07-07 02:24:40] [Rank 0] step:2281/10000 train_time:181296ms step_avg:79.48ms +[2025-07-07 02:24:42] [Rank 0] step:2301/10000 train_time:182795ms step_avg:79.44ms +[2025-07-07 02:24:42] [Rank 0] step:2301/10000 train_time:182795ms step_avg:79.44ms +[2025-07-07 02:24:43] [Rank 0] step:2321/10000 train_time:184297ms step_avg:79.40ms +[2025-07-07 02:24:43] [Rank 0] step:2321/10000 train_time:184297ms step_avg:79.40ms +[2025-07-07 02:24:45] [Rank 0] step:2341/10000 train_time:186048ms step_avg:79.47ms +[2025-07-07 02:24:45] [Rank 0] step:2341/10000 train_time:186048ms step_avg:79.47ms +[2025-07-07 02:24:47] [Rank 0] step:2361/10000 train_time:187938ms step_avg:79.60ms +[2025-07-07 02:24:47] [Rank 0] step:2361/10000 train_time:187938ms step_avg:79.60ms +[2025-07-07 02:24:48] [Rank 0] step:2381/10000 train_time:189436ms step_avg:79.56ms +[2025-07-07 02:24:48] [Rank 0] step:2381/10000 train_time:189436ms step_avg:79.56ms +[2025-07-07 02:24:50] [Rank 0] step:2401/10000 train_time:190936ms step_avg:79.52ms +[2025-07-07 02:24:50] [Rank 0] step:2401/10000 train_time:190936ms step_avg:79.52ms +[2025-07-07 02:24:51] [Rank 0] step:2421/10000 train_time:192436ms step_avg:79.49ms +[2025-07-07 02:24:51] [Rank 0] step:2421/10000 train_time:192436ms step_avg:79.49ms +[2025-07-07 02:24:53] [Rank 0] step:2441/10000 train_time:194071ms step_avg:79.50ms +[2025-07-07 02:24:53] [Rank 0] step:2441/10000 train_time:194071ms step_avg:79.50ms +[2025-07-07 02:24:54] [Rank 0] step:2461/10000 train_time:195572ms step_avg:79.47ms +[2025-07-07 02:24:54] [Rank 0] step:2461/10000 train_time:195572ms step_avg:79.47ms +[2025-07-07 02:24:56] [Rank 0] step:2481/10000 train_time:197072ms step_avg:79.43ms +[2025-07-07 02:24:56] [Rank 0] step:2481/10000 train_time:197072ms step_avg:79.43ms +[2025-07-07 02:24:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:24:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:24:58] [Rank 0] PRINT: step:2500/10000 train_loss:2.2752 val_loss:2.0094 train_time:198574ms step_avg:79.43ms +[2025-07-07 02:24:58] [Rank 0] PRINT: step:2500/10000 train_loss:2.2752 val_loss:2.0094 train_time:198574ms step_avg:79.43ms +[2025-07-07 02:24:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:24:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:24:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:24:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:24:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:24:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:30:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:30:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:30:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:30:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:30:19] [Rank 0] Total Loss: 4.1537 +[2025-07-07 02:30:19] [Rank 0] Total Loss: 4.1537 +[2025-07-07 02:30:19] [Rank 0] Total FTA: 0.1330 +[2025-07-07 02:30:19] [Rank 0] Total FTA: 0.1330 +[2025-07-07 02:30:19] [Rank 0] Group 0 Loss: 4.3627 +[2025-07-07 02:30:19] [Rank 0] Group 0 Loss: 4.3627 +[2025-07-07 02:30:19] [Rank 0] Group 1 Loss: 4.2222 +[2025-07-07 02:30:19] [Rank 0] Group 1 Loss: 4.2222 +[2025-07-07 02:30:19] [Rank 0] Group 2 Loss: 3.9566 +[2025-07-07 02:30:19] [Rank 0] Group 2 Loss: 3.9566 +[2025-07-07 02:30:19] [Rank 0] Group 3 Loss: 4.1317 +[2025-07-07 02:30:19] [Rank 0] Group 3 Loss: 4.1317 +[2025-07-07 02:30:19] [Rank 0] Group 4 Loss: 4.0898 +[2025-07-07 02:30:19] [Rank 0] Group 4 Loss: 4.0898 +[2025-07-07 02:30:19] [Rank 0] Group 5 Loss: 4.0674 +[2025-07-07 02:30:19] [Rank 0] Group 5 Loss: 4.0674 +[2025-07-07 02:30:19] [Rank 0] Group 6 Loss: 4.0816 +[2025-07-07 02:30:19] [Rank 0] Group 6 Loss: 4.0816 +[2025-07-07 02:30:19] [Rank 0] Group 7 Loss: 4.1521 +[2025-07-07 02:30:19] [Rank 0] Group 7 Loss: 4.1521 +[2025-07-07 02:30:19] [Rank 0] Group 8 Loss: 4.1464 +[2025-07-07 02:30:19] [Rank 0] Group 8 Loss: 4.1464 +[2025-07-07 02:30:19] [Rank 0] Group 9 Loss: 4.0984 +[2025-07-07 02:30:19] [Rank 0] Group 9 Loss: 4.0984 +[2025-07-07 02:30:19] [Rank 0] Group 10 Loss: 4.1735 +[2025-07-07 02:30:19] [Rank 0] Group 10 Loss: 4.1735 +[2025-07-07 02:30:19] [Rank 0] Group 11 Loss: 4.1437 +[2025-07-07 02:30:19] [Rank 0] Group 11 Loss: 4.1437 +[2025-07-07 02:30:19] [Rank 0] Group 0 FTA: 0.3407 +[2025-07-07 02:30:19] [Rank 0] Group 0 FTA: 0.3407 +[2025-07-07 02:30:19] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 02:30:19] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 02:30:19] [Rank 0] Group 2 FTA: 0.2422 +[2025-07-07 02:30:19] [Rank 0] Group 2 FTA: 0.2422 +[2025-07-07 02:30:19] [Rank 0] Group 3 FTA: 0.0286 +[2025-07-07 02:30:19] [Rank 0] Group 3 FTA: 0.0286 +[2025-07-07 02:30:19] [Rank 0] Group 4 FTA: 0.0625 +[2025-07-07 02:30:19] [Rank 0] Group 4 FTA: 0.0625 +[2025-07-07 02:30:19] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-07 02:30:19] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-07 02:30:19] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 02:30:19] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 02:30:19] [Rank 0] Group 7 FTA: 0.1276 +[2025-07-07 02:30:19] [Rank 0] Group 7 FTA: 0.1276 +[2025-07-07 02:30:19] [Rank 0] Group 8 FTA: 0.1042 +[2025-07-07 02:30:19] [Rank 0] Group 8 FTA: 0.1042 +[2025-07-07 02:30:19] [Rank 0] Group 9 FTA: 0.1250 +[2025-07-07 02:30:19] [Rank 0] Group 9 FTA: 0.1250 +[2025-07-07 02:30:19] [Rank 0] Group 10 FTA: 0.1250 +[2025-07-07 02:30:19] [Rank 0] Group 10 FTA: 0.1250 +[2025-07-07 02:30:19] [Rank 0] Group 11 FTA: 0.1113 +[2025-07-07 02:30:19] [Rank 0] Group 11 FTA: 0.1113 +[2025-07-07 02:30:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 02:30:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 02:30:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 02:30:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 02:30:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 02:30:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 02:30:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 02:30:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 02:30:21] [Rank 0] step:2501/10000 train_time:198595ms step_avg:79.41ms +[2025-07-07 02:30:21] [Rank 0] step:2501/10000 train_time:198595ms step_avg:79.41ms +[2025-07-07 02:30:23] [Rank 0] step:2521/10000 train_time:200105ms step_avg:79.38ms +[2025-07-07 02:30:23] [Rank 0] step:2521/10000 train_time:200105ms step_avg:79.38ms +[2025-07-07 02:30:24] [Rank 0] step:2541/10000 train_time:202242ms step_avg:79.59ms +[2025-07-07 02:30:24] [Rank 0] step:2541/10000 train_time:202242ms step_avg:79.59ms +[2025-07-07 02:30:26] [Rank 0] step:2561/10000 train_time:203893ms step_avg:79.61ms +[2025-07-07 02:30:26] [Rank 0] step:2561/10000 train_time:203893ms step_avg:79.61ms +[2025-07-07 02:30:27] [Rank 0] step:2581/10000 train_time:205384ms step_avg:79.58ms +[2025-07-07 02:30:27] [Rank 0] step:2581/10000 train_time:205384ms step_avg:79.58ms +[2025-07-07 02:30:29] [Rank 0] step:2601/10000 train_time:206876ms step_avg:79.54ms +[2025-07-07 02:30:29] [Rank 0] step:2601/10000 train_time:206876ms step_avg:79.54ms +[2025-07-07 02:30:31] [Rank 0] step:2621/10000 train_time:209012ms step_avg:79.75ms +[2025-07-07 02:30:31] [Rank 0] step:2621/10000 train_time:209012ms step_avg:79.75ms +[2025-07-07 02:30:32] [Rank 0] step:2641/10000 train_time:210504ms step_avg:79.71ms +[2025-07-07 02:30:32] [Rank 0] step:2641/10000 train_time:210504ms step_avg:79.71ms +[2025-07-07 02:30:34] [Rank 0] step:2661/10000 train_time:211998ms step_avg:79.67ms +[2025-07-07 02:30:34] [Rank 0] step:2661/10000 train_time:211998ms step_avg:79.67ms +[2025-07-07 02:30:35] [Rank 0] step:2681/10000 train_time:213493ms step_avg:79.63ms +[2025-07-07 02:30:35] [Rank 0] step:2681/10000 train_time:213493ms step_avg:79.63ms +[2025-07-07 02:30:38] [Rank 0] step:2701/10000 train_time:215038ms step_avg:79.61ms +[2025-07-07 02:30:38] [Rank 0] step:2701/10000 train_time:215038ms step_avg:79.61ms +[2025-07-07 02:30:39] [Rank 0] step:2721/10000 train_time:217127ms step_avg:79.80ms +[2025-07-07 02:30:39] [Rank 0] step:2721/10000 train_time:217127ms step_avg:79.80ms +[2025-07-07 02:30:41] [Rank 0] step:2741/10000 train_time:218623ms step_avg:79.76ms +[2025-07-07 02:30:41] [Rank 0] step:2741/10000 train_time:218623ms step_avg:79.76ms +[2025-07-07 02:30:42] [Rank 0] step:2761/10000 train_time:220119ms step_avg:79.72ms +[2025-07-07 02:30:42] [Rank 0] step:2761/10000 train_time:220119ms step_avg:79.72ms +[2025-07-07 02:30:44] [Rank 0] step:2781/10000 train_time:221615ms step_avg:79.69ms +[2025-07-07 02:30:44] [Rank 0] step:2781/10000 train_time:221615ms step_avg:79.69ms +[2025-07-07 02:30:46] [Rank 0] step:2801/10000 train_time:223767ms step_avg:79.89ms +[2025-07-07 02:30:46] [Rank 0] step:2801/10000 train_time:223767ms step_avg:79.89ms +[2025-07-07 02:30:47] [Rank 0] step:2821/10000 train_time:225262ms step_avg:79.85ms +[2025-07-07 02:30:47] [Rank 0] step:2821/10000 train_time:225262ms step_avg:79.85ms +[2025-07-07 02:30:49] [Rank 0] step:2841/10000 train_time:226759ms step_avg:79.82ms +[2025-07-07 02:30:49] [Rank 0] step:2841/10000 train_time:226759ms step_avg:79.82ms +[2025-07-07 02:30:50] [Rank 0] step:2861/10000 train_time:228255ms step_avg:79.78ms +[2025-07-07 02:30:50] [Rank 0] step:2861/10000 train_time:228255ms step_avg:79.78ms +[2025-07-07 02:30:52] [Rank 0] step:2881/10000 train_time:230005ms step_avg:79.84ms +[2025-07-07 02:30:52] [Rank 0] step:2881/10000 train_time:230005ms step_avg:79.84ms +[2025-07-07 02:30:53] [Rank 0] step:2901/10000 train_time:231485ms step_avg:79.79ms +[2025-07-07 02:30:53] [Rank 0] step:2901/10000 train_time:231485ms step_avg:79.79ms +[2025-07-07 02:30:55] [Rank 0] step:2921/10000 train_time:232988ms step_avg:79.76ms +[2025-07-07 02:30:55] [Rank 0] step:2921/10000 train_time:232988ms step_avg:79.76ms +[2025-07-07 02:30:56] [Rank 0] step:2941/10000 train_time:234489ms step_avg:79.73ms +[2025-07-07 02:30:56] [Rank 0] step:2941/10000 train_time:234489ms step_avg:79.73ms +[2025-07-07 02:30:58] [Rank 0] step:2961/10000 train_time:235990ms step_avg:79.70ms +[2025-07-07 02:30:58] [Rank 0] step:2961/10000 train_time:235990ms step_avg:79.70ms +[2025-07-07 02:31:00] [Rank 0] step:2981/10000 train_time:237727ms step_avg:79.75ms +[2025-07-07 02:31:00] [Rank 0] step:2981/10000 train_time:237727ms step_avg:79.75ms +[2025-07-07 02:31:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:31:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:31:02] [Rank 0] PRINT: step:3000/10000 train_loss:1.8494 val_loss:1.7193 train_time:239230ms step_avg:79.74ms +[2025-07-07 02:31:02] [Rank 0] PRINT: step:3000/10000 train_loss:1.8494 val_loss:1.7193 train_time:239230ms step_avg:79.74ms +[2025-07-07 02:31:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:31:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:31:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:31:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:31:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:31:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:36:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:36:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:36:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:36:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:36:26] [Rank 0] Total Loss: 4.1757 +[2025-07-07 02:36:26] [Rank 0] Total Loss: 4.1757 +[2025-07-07 02:36:26] [Rank 0] Total FTA: 0.1898 +[2025-07-07 02:36:26] [Rank 0] Total FTA: 0.1898 +[2025-07-07 02:36:26] [Rank 0] Group 0 Loss: 4.4847 +[2025-07-07 02:36:26] [Rank 0] Group 0 Loss: 4.4847 +[2025-07-07 02:36:26] [Rank 0] Group 1 Loss: 4.1614 +[2025-07-07 02:36:26] [Rank 0] Group 1 Loss: 4.1614 +[2025-07-07 02:36:26] [Rank 0] Group 2 Loss: 3.9261 +[2025-07-07 02:36:26] [Rank 0] Group 2 Loss: 3.9261 +[2025-07-07 02:36:26] [Rank 0] Group 3 Loss: 4.2640 +[2025-07-07 02:36:26] [Rank 0] Group 3 Loss: 4.2640 +[2025-07-07 02:36:26] [Rank 0] Group 4 Loss: 4.1393 +[2025-07-07 02:36:26] [Rank 0] Group 4 Loss: 4.1393 +[2025-07-07 02:36:26] [Rank 0] Group 5 Loss: 4.0640 +[2025-07-07 02:36:26] [Rank 0] Group 5 Loss: 4.0640 +[2025-07-07 02:36:26] [Rank 0] Group 6 Loss: 4.0505 +[2025-07-07 02:36:26] [Rank 0] Group 6 Loss: 4.0505 +[2025-07-07 02:36:26] [Rank 0] Group 7 Loss: 4.2021 +[2025-07-07 02:36:26] [Rank 0] Group 7 Loss: 4.2021 +[2025-07-07 02:36:26] [Rank 0] Group 8 Loss: 4.1405 +[2025-07-07 02:36:26] [Rank 0] Group 8 Loss: 4.1405 +[2025-07-07 02:36:26] [Rank 0] Group 9 Loss: 4.1200 +[2025-07-07 02:36:26] [Rank 0] Group 9 Loss: 4.1200 +[2025-07-07 02:36:26] [Rank 0] Group 10 Loss: 4.1760 +[2025-07-07 02:36:26] [Rank 0] Group 10 Loss: 4.1760 +[2025-07-07 02:36:26] [Rank 0] Group 11 Loss: 4.1290 +[2025-07-07 02:36:26] [Rank 0] Group 11 Loss: 4.1290 +[2025-07-07 02:36:26] [Rank 0] Group 0 FTA: 0.3290 +[2025-07-07 02:36:26] [Rank 0] Group 0 FTA: 0.3290 +[2025-07-07 02:36:26] [Rank 0] Group 1 FTA: 0.1667 +[2025-07-07 02:36:26] [Rank 0] Group 1 FTA: 0.1667 +[2025-07-07 02:36:26] [Rank 0] Group 2 FTA: 0.2474 +[2025-07-07 02:36:26] [Rank 0] Group 2 FTA: 0.2474 +[2025-07-07 02:36:27] [Rank 0] Group 3 FTA: 0.0729 +[2025-07-07 02:36:27] [Rank 0] Group 3 FTA: 0.0729 +[2025-07-07 02:36:27] [Rank 0] Group 4 FTA: 0.1458 +[2025-07-07 02:36:27] [Rank 0] Group 4 FTA: 0.1458 +[2025-07-07 02:36:27] [Rank 0] Group 5 FTA: 0.1667 +[2025-07-07 02:36:27] [Rank 0] Group 5 FTA: 0.1667 +[2025-07-07 02:36:27] [Rank 0] Group 6 FTA: 0.1771 +[2025-07-07 02:36:27] [Rank 0] Group 6 FTA: 0.1771 +[2025-07-07 02:36:27] [Rank 0] Group 7 FTA: 0.1719 +[2025-07-07 02:36:27] [Rank 0] Group 7 FTA: 0.1719 +[2025-07-07 02:36:27] [Rank 0] Group 8 FTA: 0.2005 +[2025-07-07 02:36:27] [Rank 0] Group 8 FTA: 0.2005 +[2025-07-07 02:36:27] [Rank 0] Group 9 FTA: 0.1484 +[2025-07-07 02:36:27] [Rank 0] Group 9 FTA: 0.1484 +[2025-07-07 02:36:27] [Rank 0] Group 10 FTA: 0.1660 +[2025-07-07 02:36:27] [Rank 0] Group 10 FTA: 0.1660 +[2025-07-07 02:36:27] [Rank 0] Group 11 FTA: 0.1709 +[2025-07-07 02:36:27] [Rank 0] Group 11 FTA: 0.1709 +[2025-07-07 02:36:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 02:36:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 02:36:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 02:36:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 02:36:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 02:36:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 02:36:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 02:36:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 02:36:28] [Rank 0] step:3001/10000 train_time:239250ms step_avg:79.72ms +[2025-07-07 02:36:28] [Rank 0] step:3001/10000 train_time:239250ms step_avg:79.72ms +[2025-07-07 02:36:29] [Rank 0] step:3021/10000 train_time:240741ms step_avg:79.69ms +[2025-07-07 02:36:29] [Rank 0] step:3021/10000 train_time:240741ms step_avg:79.69ms +[2025-07-07 02:36:31] [Rank 0] step:3041/10000 train_time:242232ms step_avg:79.66ms +[2025-07-07 02:36:31] [Rank 0] step:3041/10000 train_time:242232ms step_avg:79.66ms +[2025-07-07 02:36:33] [Rank 0] step:3061/10000 train_time:243986ms step_avg:79.71ms +[2025-07-07 02:36:33] [Rank 0] step:3061/10000 train_time:243986ms step_avg:79.71ms +[2025-07-07 02:36:35] [Rank 0] step:3081/10000 train_time:245887ms step_avg:79.81ms +[2025-07-07 02:36:35] [Rank 0] step:3081/10000 train_time:245887ms step_avg:79.81ms +[2025-07-07 02:36:36] [Rank 0] step:3101/10000 train_time:247380ms step_avg:79.77ms +[2025-07-07 02:36:36] [Rank 0] step:3101/10000 train_time:247380ms step_avg:79.77ms +[2025-07-07 02:36:38] [Rank 0] step:3121/10000 train_time:248873ms step_avg:79.74ms +[2025-07-07 02:36:38] [Rank 0] step:3121/10000 train_time:248873ms step_avg:79.74ms +[2025-07-07 02:36:39] [Rank 0] step:3141/10000 train_time:250367ms step_avg:79.71ms +[2025-07-07 02:36:39] [Rank 0] step:3141/10000 train_time:250367ms step_avg:79.71ms +[2025-07-07 02:36:41] [Rank 0] step:3161/10000 train_time:252529ms step_avg:79.89ms +[2025-07-07 02:36:41] [Rank 0] step:3161/10000 train_time:252529ms step_avg:79.89ms +[2025-07-07 02:36:43] [Rank 0] step:3181/10000 train_time:254021ms step_avg:79.86ms +[2025-07-07 02:36:43] [Rank 0] step:3181/10000 train_time:254021ms step_avg:79.86ms +[2025-07-07 02:36:44] [Rank 0] step:3201/10000 train_time:255517ms step_avg:79.82ms +[2025-07-07 02:36:44] [Rank 0] step:3201/10000 train_time:255517ms step_avg:79.82ms +[2025-07-07 02:36:46] [Rank 0] step:3221/10000 train_time:257147ms step_avg:79.83ms +[2025-07-07 02:36:46] [Rank 0] step:3221/10000 train_time:257147ms step_avg:79.83ms +[2025-07-07 02:36:48] [Rank 0] step:3241/10000 train_time:258699ms step_avg:79.82ms +[2025-07-07 02:36:48] [Rank 0] step:3241/10000 train_time:258699ms step_avg:79.82ms +[2025-07-07 02:36:50] [Rank 0] step:3261/10000 train_time:260801ms step_avg:79.98ms +[2025-07-07 02:36:50] [Rank 0] step:3261/10000 train_time:260801ms step_avg:79.98ms +[2025-07-07 02:36:51] [Rank 0] step:3281/10000 train_time:262299ms step_avg:79.94ms +[2025-07-07 02:36:51] [Rank 0] step:3281/10000 train_time:262299ms step_avg:79.94ms +[2025-07-07 02:36:53] [Rank 0] step:3301/10000 train_time:263796ms step_avg:79.91ms +[2025-07-07 02:36:53] [Rank 0] step:3301/10000 train_time:263796ms step_avg:79.91ms +[2025-07-07 02:36:54] [Rank 0] step:3321/10000 train_time:265296ms step_avg:79.88ms +[2025-07-07 02:36:54] [Rank 0] step:3321/10000 train_time:265296ms step_avg:79.88ms +[2025-07-07 02:36:56] [Rank 0] step:3341/10000 train_time:266828ms step_avg:79.86ms +[2025-07-07 02:36:56] [Rank 0] step:3341/10000 train_time:266828ms step_avg:79.86ms +[2025-07-07 02:36:57] [Rank 0] step:3361/10000 train_time:268327ms step_avg:79.84ms +[2025-07-07 02:36:57] [Rank 0] step:3361/10000 train_time:268327ms step_avg:79.84ms +[2025-07-07 02:36:59] [Rank 0] step:3381/10000 train_time:269827ms step_avg:79.81ms +[2025-07-07 02:36:59] [Rank 0] step:3381/10000 train_time:269827ms step_avg:79.81ms +[2025-07-07 02:37:00] [Rank 0] step:3401/10000 train_time:271328ms step_avg:79.78ms +[2025-07-07 02:37:00] [Rank 0] step:3401/10000 train_time:271328ms step_avg:79.78ms +[2025-07-07 02:37:02] [Rank 0] step:3421/10000 train_time:273085ms step_avg:79.83ms +[2025-07-07 02:37:02] [Rank 0] step:3421/10000 train_time:273085ms step_avg:79.83ms +[2025-07-07 02:37:04] [Rank 0] step:3441/10000 train_time:274985ms step_avg:79.91ms +[2025-07-07 02:37:04] [Rank 0] step:3441/10000 train_time:274985ms step_avg:79.91ms +[2025-07-07 02:37:05] [Rank 0] step:3461/10000 train_time:276485ms step_avg:79.89ms +[2025-07-07 02:37:05] [Rank 0] step:3461/10000 train_time:276485ms step_avg:79.89ms +[2025-07-07 02:37:07] [Rank 0] step:3481/10000 train_time:277987ms step_avg:79.86ms +[2025-07-07 02:37:07] [Rank 0] step:3481/10000 train_time:277987ms step_avg:79.86ms +[2025-07-07 02:37:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:37:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:37:09] [Rank 0] PRINT: step:3500/10000 train_loss:1.6385 val_loss:1.5673 train_time:279489ms step_avg:79.85ms +[2025-07-07 02:37:09] [Rank 0] PRINT: step:3500/10000 train_loss:1.6385 val_loss:1.5673 train_time:279489ms step_avg:79.85ms +[2025-07-07 02:37:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:37:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:37:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:37:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:37:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:37:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:42:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:42:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:42:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:42:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:42:32] [Rank 0] Total Loss: 4.2141 +[2025-07-07 02:42:32] [Rank 0] Total Loss: 4.2141 +[2025-07-07 02:42:32] [Rank 0] Total FTA: 0.2244 +[2025-07-07 02:42:32] [Rank 0] Total FTA: 0.2244 +[2025-07-07 02:42:32] [Rank 0] Group 0 Loss: 4.4791 +[2025-07-07 02:42:32] [Rank 0] Group 0 Loss: 4.4791 +[2025-07-07 02:42:32] [Rank 0] Group 1 Loss: 4.1809 +[2025-07-07 02:42:32] [Rank 0] Group 1 Loss: 4.1809 +[2025-07-07 02:42:32] [Rank 0] Group 2 Loss: 4.0288 +[2025-07-07 02:42:32] [Rank 0] Group 2 Loss: 4.0288 +[2025-07-07 02:42:32] [Rank 0] Group 3 Loss: 4.2256 +[2025-07-07 02:42:32] [Rank 0] Group 3 Loss: 4.2256 +[2025-07-07 02:42:32] [Rank 0] Group 4 Loss: 4.2249 +[2025-07-07 02:42:32] [Rank 0] Group 4 Loss: 4.2249 +[2025-07-07 02:42:32] [Rank 0] Group 5 Loss: 4.1401 +[2025-07-07 02:42:32] [Rank 0] Group 5 Loss: 4.1401 +[2025-07-07 02:42:32] [Rank 0] Group 6 Loss: 4.0990 +[2025-07-07 02:42:32] [Rank 0] Group 6 Loss: 4.0990 +[2025-07-07 02:42:32] [Rank 0] Group 7 Loss: 4.2208 +[2025-07-07 02:42:32] [Rank 0] Group 7 Loss: 4.2208 +[2025-07-07 02:42:32] [Rank 0] Group 8 Loss: 4.1679 +[2025-07-07 02:42:32] [Rank 0] Group 8 Loss: 4.1679 +[2025-07-07 02:42:32] [Rank 0] Group 9 Loss: 4.1539 +[2025-07-07 02:42:32] [Rank 0] Group 9 Loss: 4.1539 +[2025-07-07 02:42:32] [Rank 0] Group 10 Loss: 4.1931 +[2025-07-07 02:42:32] [Rank 0] Group 10 Loss: 4.1931 +[2025-07-07 02:42:32] [Rank 0] Group 11 Loss: 4.1999 +[2025-07-07 02:42:32] [Rank 0] Group 11 Loss: 4.1999 +[2025-07-07 02:42:32] [Rank 0] Group 0 FTA: 0.3316 +[2025-07-07 02:42:32] [Rank 0] Group 0 FTA: 0.3316 +[2025-07-07 02:42:32] [Rank 0] Group 1 FTA: 0.1745 +[2025-07-07 02:42:32] [Rank 0] Group 1 FTA: 0.1745 +[2025-07-07 02:42:32] [Rank 0] Group 2 FTA: 0.2500 +[2025-07-07 02:42:32] [Rank 0] Group 2 FTA: 0.2500 +[2025-07-07 02:42:32] [Rank 0] Group 3 FTA: 0.1276 +[2025-07-07 02:42:32] [Rank 0] Group 3 FTA: 0.1276 +[2025-07-07 02:42:32] [Rank 0] Group 4 FTA: 0.1849 +[2025-07-07 02:42:32] [Rank 0] Group 4 FTA: 0.1849 +[2025-07-07 02:42:32] [Rank 0] Group 5 FTA: 0.2083 +[2025-07-07 02:42:32] [Rank 0] Group 5 FTA: 0.2083 +[2025-07-07 02:42:32] [Rank 0] Group 6 FTA: 0.1484 +[2025-07-07 02:42:32] [Rank 0] Group 6 FTA: 0.1484 +[2025-07-07 02:42:32] [Rank 0] Group 7 FTA: 0.2214 +[2025-07-07 02:42:32] [Rank 0] Group 7 FTA: 0.2214 +[2025-07-07 02:42:32] [Rank 0] Group 8 FTA: 0.2552 +[2025-07-07 02:42:32] [Rank 0] Group 8 FTA: 0.2552 +[2025-07-07 02:42:32] [Rank 0] Group 9 FTA: 0.2539 +[2025-07-07 02:42:32] [Rank 0] Group 9 FTA: 0.2539 +[2025-07-07 02:42:32] [Rank 0] Group 10 FTA: 0.2324 +[2025-07-07 02:42:32] [Rank 0] Group 10 FTA: 0.2324 +[2025-07-07 02:42:32] [Rank 0] Group 11 FTA: 0.2168 +[2025-07-07 02:42:32] [Rank 0] Group 11 FTA: 0.2168 +[2025-07-07 02:42:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 02:42:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 02:42:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 02:42:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 02:42:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 02:42:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 02:42:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 02:42:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 02:42:34] [Rank 0] step:3501/10000 train_time:279511ms step_avg:79.84ms +[2025-07-07 02:42:34] [Rank 0] step:3501/10000 train_time:279511ms step_avg:79.84ms +[2025-07-07 02:42:36] [Rank 0] step:3521/10000 train_time:281662ms step_avg:79.99ms +[2025-07-07 02:42:36] [Rank 0] step:3521/10000 train_time:281662ms step_avg:79.99ms +[2025-07-07 02:42:37] [Rank 0] step:3541/10000 train_time:283153ms step_avg:79.96ms +[2025-07-07 02:42:37] [Rank 0] step:3541/10000 train_time:283153ms step_avg:79.96ms +[2025-07-07 02:42:39] [Rank 0] step:3561/10000 train_time:284646ms step_avg:79.93ms +[2025-07-07 02:42:39] [Rank 0] step:3561/10000 train_time:284646ms step_avg:79.93ms +[2025-07-07 02:42:40] [Rank 0] step:3581/10000 train_time:286140ms step_avg:79.90ms +[2025-07-07 02:42:40] [Rank 0] step:3581/10000 train_time:286140ms step_avg:79.90ms +[2025-07-07 02:42:42] [Rank 0] step:3601/10000 train_time:287685ms step_avg:79.89ms +[2025-07-07 02:42:42] [Rank 0] step:3601/10000 train_time:287685ms step_avg:79.89ms +[2025-07-07 02:42:44] [Rank 0] step:3621/10000 train_time:289771ms step_avg:80.03ms +[2025-07-07 02:42:44] [Rank 0] step:3621/10000 train_time:289771ms step_avg:80.03ms +[2025-07-07 02:42:45] [Rank 0] step:3641/10000 train_time:291266ms step_avg:80.00ms +[2025-07-07 02:42:45] [Rank 0] step:3641/10000 train_time:291266ms step_avg:80.00ms +[2025-07-07 02:42:47] [Rank 0] step:3661/10000 train_time:292761ms step_avg:79.97ms +[2025-07-07 02:42:47] [Rank 0] step:3661/10000 train_time:292761ms step_avg:79.97ms +[2025-07-07 02:42:48] [Rank 0] step:3681/10000 train_time:294256ms step_avg:79.94ms +[2025-07-07 02:42:48] [Rank 0] step:3681/10000 train_time:294256ms step_avg:79.94ms +[2025-07-07 02:42:50] [Rank 0] step:3701/10000 train_time:296415ms step_avg:80.09ms +[2025-07-07 02:42:50] [Rank 0] step:3701/10000 train_time:296415ms step_avg:80.09ms +[2025-07-07 02:42:52] [Rank 0] step:3721/10000 train_time:297909ms step_avg:80.06ms +[2025-07-07 02:42:52] [Rank 0] step:3721/10000 train_time:297909ms step_avg:80.06ms +[2025-07-07 02:42:53] [Rank 0] step:3741/10000 train_time:299406ms step_avg:80.03ms +[2025-07-07 02:42:53] [Rank 0] step:3741/10000 train_time:299406ms step_avg:80.03ms +[2025-07-07 02:42:55] [Rank 0] step:3761/10000 train_time:300904ms step_avg:80.01ms +[2025-07-07 02:42:55] [Rank 0] step:3761/10000 train_time:300904ms step_avg:80.01ms +[2025-07-07 02:42:57] [Rank 0] step:3781/10000 train_time:302657ms step_avg:80.05ms +[2025-07-07 02:42:57] [Rank 0] step:3781/10000 train_time:302657ms step_avg:80.05ms +[2025-07-07 02:42:58] [Rank 0] step:3801/10000 train_time:304135ms step_avg:80.01ms +[2025-07-07 02:42:58] [Rank 0] step:3801/10000 train_time:304135ms step_avg:80.01ms +[2025-07-07 02:43:00] [Rank 0] step:3821/10000 train_time:305632ms step_avg:79.99ms +[2025-07-07 02:43:00] [Rank 0] step:3821/10000 train_time:305632ms step_avg:79.99ms +[2025-07-07 02:43:01] [Rank 0] step:3841/10000 train_time:307132ms step_avg:79.96ms +[2025-07-07 02:43:01] [Rank 0] step:3841/10000 train_time:307132ms step_avg:79.96ms +[2025-07-07 02:43:03] [Rank 0] step:3861/10000 train_time:308633ms step_avg:79.94ms +[2025-07-07 02:43:03] [Rank 0] step:3861/10000 train_time:308633ms step_avg:79.94ms +[2025-07-07 02:43:05] [Rank 0] step:3881/10000 train_time:310493ms step_avg:80.00ms +[2025-07-07 02:43:05] [Rank 0] step:3881/10000 train_time:310493ms step_avg:80.00ms +[2025-07-07 02:43:06] [Rank 0] step:3901/10000 train_time:311996ms step_avg:79.98ms +[2025-07-07 02:43:06] [Rank 0] step:3901/10000 train_time:311996ms step_avg:79.98ms +[2025-07-07 02:43:08] [Rank 0] step:3921/10000 train_time:313493ms step_avg:79.95ms +[2025-07-07 02:43:08] [Rank 0] step:3921/10000 train_time:313493ms step_avg:79.95ms +[2025-07-07 02:43:09] [Rank 0] step:3941/10000 train_time:314995ms step_avg:79.93ms +[2025-07-07 02:43:09] [Rank 0] step:3941/10000 train_time:314995ms step_avg:79.93ms +[2025-07-07 02:43:11] [Rank 0] step:3961/10000 train_time:316549ms step_avg:79.92ms +[2025-07-07 02:43:11] [Rank 0] step:3961/10000 train_time:316549ms step_avg:79.92ms +[2025-07-07 02:43:12] [Rank 0] step:3981/10000 train_time:318234ms step_avg:79.94ms +[2025-07-07 02:43:12] [Rank 0] step:3981/10000 train_time:318234ms step_avg:79.94ms +[2025-07-07 02:43:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:43:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:43:15] [Rank 0] PRINT: step:4000/10000 train_loss:1.5200 val_loss:1.4753 train_time:319737ms step_avg:79.93ms +[2025-07-07 02:43:15] [Rank 0] PRINT: step:4000/10000 train_loss:1.5200 val_loss:1.4753 train_time:319737ms step_avg:79.93ms +[2025-07-07 02:43:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:43:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:43:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:43:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:43:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:43:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:48:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:48:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:48:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:48:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:48:39] [Rank 0] Total Loss: 4.3128 +[2025-07-07 02:48:39] [Rank 0] Total Loss: 4.3128 +[2025-07-07 02:48:39] [Rank 0] Total FTA: 0.2688 +[2025-07-07 02:48:39] [Rank 0] Total FTA: 0.2688 +[2025-07-07 02:48:39] [Rank 0] Group 0 Loss: 4.5534 +[2025-07-07 02:48:39] [Rank 0] Group 0 Loss: 4.5534 +[2025-07-07 02:48:39] [Rank 0] Group 1 Loss: 4.4707 +[2025-07-07 02:48:39] [Rank 0] Group 1 Loss: 4.4707 +[2025-07-07 02:48:39] [Rank 0] Group 2 Loss: 4.0536 +[2025-07-07 02:48:39] [Rank 0] Group 2 Loss: 4.0536 +[2025-07-07 02:48:39] [Rank 0] Group 3 Loss: 4.3911 +[2025-07-07 02:48:39] [Rank 0] Group 3 Loss: 4.3911 +[2025-07-07 02:48:39] [Rank 0] Group 4 Loss: 4.2449 +[2025-07-07 02:48:39] [Rank 0] Group 4 Loss: 4.2449 +[2025-07-07 02:48:39] [Rank 0] Group 5 Loss: 4.1876 +[2025-07-07 02:48:39] [Rank 0] Group 5 Loss: 4.1876 +[2025-07-07 02:48:39] [Rank 0] Group 6 Loss: 4.1368 +[2025-07-07 02:48:39] [Rank 0] Group 6 Loss: 4.1368 +[2025-07-07 02:48:39] [Rank 0] Group 7 Loss: 4.3269 +[2025-07-07 02:48:39] [Rank 0] Group 7 Loss: 4.3269 +[2025-07-07 02:48:39] [Rank 0] Group 8 Loss: 4.2995 +[2025-07-07 02:48:39] [Rank 0] Group 8 Loss: 4.2995 +[2025-07-07 02:48:39] [Rank 0] Group 9 Loss: 4.2790 +[2025-07-07 02:48:39] [Rank 0] Group 9 Loss: 4.2790 +[2025-07-07 02:48:39] [Rank 0] Group 10 Loss: 4.2983 +[2025-07-07 02:48:39] [Rank 0] Group 10 Loss: 4.2983 +[2025-07-07 02:48:39] [Rank 0] Group 11 Loss: 4.2944 +[2025-07-07 02:48:39] [Rank 0] Group 11 Loss: 4.2944 +[2025-07-07 02:48:39] [Rank 0] Group 0 FTA: 0.1756 +[2025-07-07 02:48:39] [Rank 0] Group 0 FTA: 0.1756 +[2025-07-07 02:48:39] [Rank 0] Group 1 FTA: 0.4896 +[2025-07-07 02:48:39] [Rank 0] Group 1 FTA: 0.4896 +[2025-07-07 02:48:39] [Rank 0] Group 2 FTA: 0.1875 +[2025-07-07 02:48:39] [Rank 0] Group 2 FTA: 0.1875 +[2025-07-07 02:48:39] [Rank 0] Group 3 FTA: 0.2214 +[2025-07-07 02:48:39] [Rank 0] Group 3 FTA: 0.2214 +[2025-07-07 02:48:39] [Rank 0] Group 4 FTA: 0.1693 +[2025-07-07 02:48:39] [Rank 0] Group 4 FTA: 0.1693 +[2025-07-07 02:48:39] [Rank 0] Group 5 FTA: 0.2891 +[2025-07-07 02:48:39] [Rank 0] Group 5 FTA: 0.2891 +[2025-07-07 02:48:39] [Rank 0] Group 6 FTA: 0.2396 +[2025-07-07 02:48:39] [Rank 0] Group 6 FTA: 0.2396 +[2025-07-07 02:48:39] [Rank 0] Group 7 FTA: 0.2891 +[2025-07-07 02:48:39] [Rank 0] Group 7 FTA: 0.2891 +[2025-07-07 02:48:39] [Rank 0] Group 8 FTA: 0.3047 +[2025-07-07 02:48:39] [Rank 0] Group 8 FTA: 0.3047 +[2025-07-07 02:48:39] [Rank 0] Group 9 FTA: 0.2773 +[2025-07-07 02:48:39] [Rank 0] Group 9 FTA: 0.2773 +[2025-07-07 02:48:39] [Rank 0] Group 10 FTA: 0.3086 +[2025-07-07 02:48:39] [Rank 0] Group 10 FTA: 0.3086 +[2025-07-07 02:48:39] [Rank 0] Group 11 FTA: 0.3018 +[2025-07-07 02:48:39] [Rank 0] Group 11 FTA: 0.3018 +[2025-07-07 02:48:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 02:48:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 02:48:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 02:48:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 02:48:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 02:48:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 02:48:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 02:48:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 02:48:40] [Rank 0] step:4001/10000 train_time:319759ms step_avg:79.92ms +[2025-07-07 02:48:40] [Rank 0] step:4001/10000 train_time:319759ms step_avg:79.92ms +[2025-07-07 02:48:42] [Rank 0] step:4021/10000 train_time:321264ms step_avg:79.90ms +[2025-07-07 02:48:42] [Rank 0] step:4021/10000 train_time:321264ms step_avg:79.90ms +[2025-07-07 02:48:43] [Rank 0] step:4041/10000 train_time:322755ms step_avg:79.87ms +[2025-07-07 02:48:43] [Rank 0] step:4041/10000 train_time:322755ms step_avg:79.87ms +[2025-07-07 02:48:45] [Rank 0] step:4061/10000 train_time:324900ms step_avg:80.01ms +[2025-07-07 02:48:45] [Rank 0] step:4061/10000 train_time:324900ms step_avg:80.01ms +[2025-07-07 02:48:47] [Rank 0] step:4081/10000 train_time:326395ms step_avg:79.98ms +[2025-07-07 02:48:47] [Rank 0] step:4081/10000 train_time:326395ms step_avg:79.98ms +[2025-07-07 02:48:48] [Rank 0] step:4101/10000 train_time:327886ms step_avg:79.95ms +[2025-07-07 02:48:48] [Rank 0] step:4101/10000 train_time:327886ms step_avg:79.95ms +[2025-07-07 02:48:50] [Rank 0] step:4121/10000 train_time:329380ms step_avg:79.93ms +[2025-07-07 02:48:50] [Rank 0] step:4121/10000 train_time:329380ms step_avg:79.93ms +[2025-07-07 02:48:52] [Rank 0] step:4141/10000 train_time:331537ms step_avg:80.06ms +[2025-07-07 02:48:52] [Rank 0] step:4141/10000 train_time:331537ms step_avg:80.06ms +[2025-07-07 02:48:54] [Rank 0] step:4161/10000 train_time:333011ms step_avg:80.03ms +[2025-07-07 02:48:54] [Rank 0] step:4161/10000 train_time:333011ms step_avg:80.03ms +[2025-07-07 02:48:55] [Rank 0] step:4181/10000 train_time:334505ms step_avg:80.01ms +[2025-07-07 02:48:55] [Rank 0] step:4181/10000 train_time:334505ms step_avg:80.01ms +[2025-07-07 02:48:56] [Rank 0] step:4201/10000 train_time:336000ms step_avg:79.98ms +[2025-07-07 02:48:56] [Rank 0] step:4201/10000 train_time:336000ms step_avg:79.98ms +[2025-07-07 02:48:58] [Rank 0] step:4221/10000 train_time:337499ms step_avg:79.96ms +[2025-07-07 02:48:58] [Rank 0] step:4221/10000 train_time:337499ms step_avg:79.96ms +[2025-07-07 02:49:00] [Rank 0] step:4241/10000 train_time:339235ms step_avg:79.99ms +[2025-07-07 02:49:00] [Rank 0] step:4241/10000 train_time:339235ms step_avg:79.99ms +[2025-07-07 02:49:01] [Rank 0] step:4261/10000 train_time:340733ms step_avg:79.97ms +[2025-07-07 02:49:01] [Rank 0] step:4261/10000 train_time:340733ms step_avg:79.97ms +[2025-07-07 02:49:03] [Rank 0] step:4281/10000 train_time:342232ms step_avg:79.94ms +[2025-07-07 02:49:03] [Rank 0] step:4281/10000 train_time:342232ms step_avg:79.94ms +[2025-07-07 02:49:04] [Rank 0] step:4301/10000 train_time:343731ms step_avg:79.92ms +[2025-07-07 02:49:04] [Rank 0] step:4301/10000 train_time:343731ms step_avg:79.92ms +[2025-07-07 02:49:06] [Rank 0] step:4321/10000 train_time:345483ms step_avg:79.95ms +[2025-07-07 02:49:06] [Rank 0] step:4321/10000 train_time:345483ms step_avg:79.95ms +[2025-07-07 02:49:08] [Rank 0] step:4341/10000 train_time:347381ms step_avg:80.02ms +[2025-07-07 02:49:08] [Rank 0] step:4341/10000 train_time:347381ms step_avg:80.02ms +[2025-07-07 02:49:09] [Rank 0] step:4361/10000 train_time:348880ms step_avg:80.00ms +[2025-07-07 02:49:09] [Rank 0] step:4361/10000 train_time:348880ms step_avg:80.00ms +[2025-07-07 02:49:11] [Rank 0] step:4381/10000 train_time:350382ms step_avg:79.98ms +[2025-07-07 02:49:11] [Rank 0] step:4381/10000 train_time:350382ms step_avg:79.98ms +[2025-07-07 02:49:12] [Rank 0] step:4401/10000 train_time:351884ms step_avg:79.96ms +[2025-07-07 02:49:12] [Rank 0] step:4401/10000 train_time:351884ms step_avg:79.96ms +[2025-07-07 02:49:15] [Rank 0] step:4421/10000 train_time:354022ms step_avg:80.08ms +[2025-07-07 02:49:15] [Rank 0] step:4421/10000 train_time:354022ms step_avg:80.08ms +[2025-07-07 02:49:16] [Rank 0] step:4441/10000 train_time:355520ms step_avg:80.05ms +[2025-07-07 02:49:16] [Rank 0] step:4441/10000 train_time:355520ms step_avg:80.05ms +[2025-07-07 02:49:18] [Rank 0] step:4461/10000 train_time:357022ms step_avg:80.03ms +[2025-07-07 02:49:18] [Rank 0] step:4461/10000 train_time:357022ms step_avg:80.03ms +[2025-07-07 02:49:19] [Rank 0] step:4481/10000 train_time:358523ms step_avg:80.01ms +[2025-07-07 02:49:19] [Rank 0] step:4481/10000 train_time:358523ms step_avg:80.01ms +[2025-07-07 02:49:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:49:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:49:21] [Rank 0] PRINT: step:4500/10000 train_loss:1.4452 val_loss:1.4135 train_time:360026ms step_avg:80.01ms +[2025-07-07 02:49:21] [Rank 0] PRINT: step:4500/10000 train_loss:1.4452 val_loss:1.4135 train_time:360026ms step_avg:80.01ms +[2025-07-07 02:49:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:49:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:49:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:49:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:49:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:49:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:54:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:54:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:54:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:54:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:54:46] [Rank 0] Total Loss: 4.4005 +[2025-07-07 02:54:46] [Rank 0] Total Loss: 4.4005 +[2025-07-07 02:54:46] [Rank 0] Total FTA: 0.3254 +[2025-07-07 02:54:46] [Rank 0] Total FTA: 0.3254 +[2025-07-07 02:54:46] [Rank 0] Group 0 Loss: 4.6249 +[2025-07-07 02:54:46] [Rank 0] Group 0 Loss: 4.6249 +[2025-07-07 02:54:46] [Rank 0] Group 1 Loss: 4.3584 +[2025-07-07 02:54:46] [Rank 0] Group 1 Loss: 4.3584 +[2025-07-07 02:54:46] [Rank 0] Group 2 Loss: 4.1069 +[2025-07-07 02:54:46] [Rank 0] Group 2 Loss: 4.1069 +[2025-07-07 02:54:46] [Rank 0] Group 3 Loss: 4.5371 +[2025-07-07 02:54:46] [Rank 0] Group 3 Loss: 4.5371 +[2025-07-07 02:54:46] [Rank 0] Group 4 Loss: 4.3827 +[2025-07-07 02:54:46] [Rank 0] Group 4 Loss: 4.3827 +[2025-07-07 02:54:46] [Rank 0] Group 5 Loss: 4.2731 +[2025-07-07 02:54:46] [Rank 0] Group 5 Loss: 4.2731 +[2025-07-07 02:54:46] [Rank 0] Group 6 Loss: 4.3159 +[2025-07-07 02:54:46] [Rank 0] Group 6 Loss: 4.3159 +[2025-07-07 02:54:46] [Rank 0] Group 7 Loss: 4.4113 +[2025-07-07 02:54:46] [Rank 0] Group 7 Loss: 4.4113 +[2025-07-07 02:54:46] [Rank 0] Group 8 Loss: 4.4368 +[2025-07-07 02:54:46] [Rank 0] Group 8 Loss: 4.4368 +[2025-07-07 02:54:46] [Rank 0] Group 9 Loss: 4.3566 +[2025-07-07 02:54:46] [Rank 0] Group 9 Loss: 4.3566 +[2025-07-07 02:54:46] [Rank 0] Group 10 Loss: 4.3808 +[2025-07-07 02:54:46] [Rank 0] Group 10 Loss: 4.3808 +[2025-07-07 02:54:46] [Rank 0] Group 11 Loss: 4.3959 +[2025-07-07 02:54:46] [Rank 0] Group 11 Loss: 4.3959 +[2025-07-07 02:54:46] [Rank 0] Group 0 FTA: 0.3433 +[2025-07-07 02:54:46] [Rank 0] Group 0 FTA: 0.3433 +[2025-07-07 02:54:46] [Rank 0] Group 1 FTA: 0.4922 +[2025-07-07 02:54:46] [Rank 0] Group 1 FTA: 0.4922 +[2025-07-07 02:54:46] [Rank 0] Group 2 FTA: 0.4089 +[2025-07-07 02:54:46] [Rank 0] Group 2 FTA: 0.4089 +[2025-07-07 02:54:46] [Rank 0] Group 3 FTA: 0.2344 +[2025-07-07 02:54:46] [Rank 0] Group 3 FTA: 0.2344 +[2025-07-07 02:54:46] [Rank 0] Group 4 FTA: 0.2318 +[2025-07-07 02:54:46] [Rank 0] Group 4 FTA: 0.2318 +[2025-07-07 02:54:46] [Rank 0] Group 5 FTA: 0.3359 +[2025-07-07 02:54:46] [Rank 0] Group 5 FTA: 0.3359 +[2025-07-07 02:54:46] [Rank 0] Group 6 FTA: 0.2734 +[2025-07-07 02:54:46] [Rank 0] Group 6 FTA: 0.2734 +[2025-07-07 02:54:46] [Rank 0] Group 7 FTA: 0.3255 +[2025-07-07 02:54:46] [Rank 0] Group 7 FTA: 0.3255 +[2025-07-07 02:54:46] [Rank 0] Group 8 FTA: 0.3333 +[2025-07-07 02:54:46] [Rank 0] Group 8 FTA: 0.3333 +[2025-07-07 02:54:46] [Rank 0] Group 9 FTA: 0.3164 +[2025-07-07 02:54:46] [Rank 0] Group 9 FTA: 0.3164 +[2025-07-07 02:54:46] [Rank 0] Group 10 FTA: 0.3125 +[2025-07-07 02:54:46] [Rank 0] Group 10 FTA: 0.3125 +[2025-07-07 02:54:46] [Rank 0] Group 11 FTA: 0.3086 +[2025-07-07 02:54:46] [Rank 0] Group 11 FTA: 0.3086 +[2025-07-07 02:54:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 02:54:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 02:54:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 02:54:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 02:54:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 02:54:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 02:54:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 02:54:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 02:54:49] [Rank 0] step:4501/10000 train_time:360055ms step_avg:79.99ms +[2025-07-07 02:54:49] [Rank 0] step:4501/10000 train_time:360055ms step_avg:79.99ms +[2025-07-07 02:54:50] [Rank 0] step:4521/10000 train_time:362262ms step_avg:80.13ms +[2025-07-07 02:54:50] [Rank 0] step:4521/10000 train_time:362262ms step_avg:80.13ms +[2025-07-07 02:54:52] [Rank 0] step:4541/10000 train_time:363753ms step_avg:80.10ms +[2025-07-07 02:54:52] [Rank 0] step:4541/10000 train_time:363753ms step_avg:80.10ms +[2025-07-07 02:54:53] [Rank 0] step:4561/10000 train_time:365244ms step_avg:80.08ms +[2025-07-07 02:54:53] [Rank 0] step:4561/10000 train_time:365244ms step_avg:80.08ms +[2025-07-07 02:54:55] [Rank 0] step:4581/10000 train_time:366736ms step_avg:80.06ms +[2025-07-07 02:54:55] [Rank 0] step:4581/10000 train_time:366736ms step_avg:80.06ms +[2025-07-07 02:54:57] [Rank 0] step:4601/10000 train_time:368873ms step_avg:80.17ms +[2025-07-07 02:54:57] [Rank 0] step:4601/10000 train_time:368873ms step_avg:80.17ms +[2025-07-07 02:54:58] [Rank 0] step:4621/10000 train_time:370368ms step_avg:80.15ms +[2025-07-07 02:54:58] [Rank 0] step:4621/10000 train_time:370368ms step_avg:80.15ms +[2025-07-07 02:55:00] [Rank 0] step:4641/10000 train_time:371861ms step_avg:80.13ms +[2025-07-07 02:55:00] [Rank 0] step:4641/10000 train_time:371861ms step_avg:80.13ms +[2025-07-07 02:55:01] [Rank 0] step:4661/10000 train_time:373356ms step_avg:80.10ms +[2025-07-07 02:55:01] [Rank 0] step:4661/10000 train_time:373356ms step_avg:80.10ms +[2025-07-07 02:55:03] [Rank 0] step:4681/10000 train_time:375510ms step_avg:80.22ms +[2025-07-07 02:55:03] [Rank 0] step:4681/10000 train_time:375510ms step_avg:80.22ms +[2025-07-07 02:55:05] [Rank 0] step:4701/10000 train_time:376985ms step_avg:80.19ms +[2025-07-07 02:55:05] [Rank 0] step:4701/10000 train_time:376985ms step_avg:80.19ms +[2025-07-07 02:55:06] [Rank 0] step:4721/10000 train_time:378482ms step_avg:80.17ms +[2025-07-07 02:55:06] [Rank 0] step:4721/10000 train_time:378482ms step_avg:80.17ms +[2025-07-07 02:55:08] [Rank 0] step:4741/10000 train_time:379980ms step_avg:80.15ms +[2025-07-07 02:55:08] [Rank 0] step:4741/10000 train_time:379980ms step_avg:80.15ms +[2025-07-07 02:55:09] [Rank 0] step:4761/10000 train_time:381478ms step_avg:80.13ms +[2025-07-07 02:55:09] [Rank 0] step:4761/10000 train_time:381478ms step_avg:80.13ms +[2025-07-07 02:55:11] [Rank 0] step:4781/10000 train_time:383211ms step_avg:80.15ms +[2025-07-07 02:55:11] [Rank 0] step:4781/10000 train_time:383211ms step_avg:80.15ms +[2025-07-07 02:55:12] [Rank 0] step:4801/10000 train_time:384712ms step_avg:80.13ms +[2025-07-07 02:55:12] [Rank 0] step:4801/10000 train_time:384712ms step_avg:80.13ms +[2025-07-07 02:55:14] [Rank 0] step:4821/10000 train_time:386208ms step_avg:80.11ms +[2025-07-07 02:55:14] [Rank 0] step:4821/10000 train_time:386208ms step_avg:80.11ms +[2025-07-07 02:55:15] [Rank 0] step:4841/10000 train_time:387708ms step_avg:80.09ms +[2025-07-07 02:55:15] [Rank 0] step:4841/10000 train_time:387708ms step_avg:80.09ms +[2025-07-07 02:55:18] [Rank 0] step:4861/10000 train_time:389877ms step_avg:80.21ms +[2025-07-07 02:55:18] [Rank 0] step:4861/10000 train_time:389877ms step_avg:80.21ms +[2025-07-07 02:55:19] [Rank 0] step:4881/10000 train_time:391355ms step_avg:80.18ms +[2025-07-07 02:55:19] [Rank 0] step:4881/10000 train_time:391355ms step_avg:80.18ms +[2025-07-07 02:55:21] [Rank 0] step:4901/10000 train_time:392856ms step_avg:80.16ms +[2025-07-07 02:55:21] [Rank 0] step:4901/10000 train_time:392856ms step_avg:80.16ms +[2025-07-07 02:55:22] [Rank 0] step:4921/10000 train_time:394356ms step_avg:80.14ms +[2025-07-07 02:55:22] [Rank 0] step:4921/10000 train_time:394356ms step_avg:80.14ms +[2025-07-07 02:55:24] [Rank 0] step:4941/10000 train_time:395856ms step_avg:80.12ms +[2025-07-07 02:55:24] [Rank 0] step:4941/10000 train_time:395856ms step_avg:80.12ms +[2025-07-07 02:55:26] [Rank 0] step:4961/10000 train_time:397997ms step_avg:80.23ms +[2025-07-07 02:55:26] [Rank 0] step:4961/10000 train_time:397997ms step_avg:80.23ms +[2025-07-07 02:55:27] [Rank 0] step:4981/10000 train_time:399498ms step_avg:80.20ms +[2025-07-07 02:55:27] [Rank 0] step:4981/10000 train_time:399498ms step_avg:80.20ms +[2025-07-07 02:55:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:55:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:55:30] [Rank 0] PRINT: step:5000/10000 train_loss:1.3915 val_loss:1.3671 train_time:400998ms step_avg:80.20ms +[2025-07-07 02:55:30] [Rank 0] PRINT: step:5000/10000 train_loss:1.3915 val_loss:1.3671 train_time:400998ms step_avg:80.20ms +[2025-07-07 02:55:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:55:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:55:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:55:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:55:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:55:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:00:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:00:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:00:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:00:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:00:54] [Rank 0] Total Loss: 4.5251 +[2025-07-07 03:00:54] [Rank 0] Total Loss: 4.5251 +[2025-07-07 03:00:54] [Rank 0] Total FTA: 0.3579 +[2025-07-07 03:00:54] [Rank 0] Total FTA: 0.3579 +[2025-07-07 03:00:54] [Rank 0] Group 0 Loss: 4.9485 +[2025-07-07 03:00:54] [Rank 0] Group 0 Loss: 4.9485 +[2025-07-07 03:00:54] [Rank 0] Group 1 Loss: 4.7051 +[2025-07-07 03:00:54] [Rank 0] Group 1 Loss: 4.7051 +[2025-07-07 03:00:54] [Rank 0] Group 2 Loss: 4.2253 +[2025-07-07 03:00:54] [Rank 0] Group 2 Loss: 4.2253 +[2025-07-07 03:00:54] [Rank 0] Group 3 Loss: 4.5439 +[2025-07-07 03:00:54] [Rank 0] Group 3 Loss: 4.5439 +[2025-07-07 03:00:54] [Rank 0] Group 4 Loss: 4.4595 +[2025-07-07 03:00:54] [Rank 0] Group 4 Loss: 4.4595 +[2025-07-07 03:00:54] [Rank 0] Group 5 Loss: 4.4530 +[2025-07-07 03:00:54] [Rank 0] Group 5 Loss: 4.4530 +[2025-07-07 03:00:54] [Rank 0] Group 6 Loss: 4.4091 +[2025-07-07 03:00:54] [Rank 0] Group 6 Loss: 4.4091 +[2025-07-07 03:00:54] [Rank 0] Group 7 Loss: 4.5046 +[2025-07-07 03:00:54] [Rank 0] Group 7 Loss: 4.5046 +[2025-07-07 03:00:54] [Rank 0] Group 8 Loss: 4.4511 +[2025-07-07 03:00:54] [Rank 0] Group 8 Loss: 4.4511 +[2025-07-07 03:00:54] [Rank 0] Group 9 Loss: 4.4590 +[2025-07-07 03:00:54] [Rank 0] Group 9 Loss: 4.4590 +[2025-07-07 03:00:54] [Rank 0] Group 10 Loss: 4.4349 +[2025-07-07 03:00:54] [Rank 0] Group 10 Loss: 4.4349 +[2025-07-07 03:00:54] [Rank 0] Group 11 Loss: 4.4373 +[2025-07-07 03:00:54] [Rank 0] Group 11 Loss: 4.4373 +[2025-07-07 03:00:54] [Rank 0] Group 0 FTA: 0.4967 +[2025-07-07 03:00:54] [Rank 0] Group 0 FTA: 0.4967 +[2025-07-07 03:00:54] [Rank 0] Group 1 FTA: 0.4766 +[2025-07-07 03:00:54] [Rank 0] Group 1 FTA: 0.4766 +[2025-07-07 03:00:54] [Rank 0] Group 2 FTA: 0.4115 +[2025-07-07 03:00:54] [Rank 0] Group 2 FTA: 0.4115 +[2025-07-07 03:00:54] [Rank 0] Group 3 FTA: 0.2188 +[2025-07-07 03:00:54] [Rank 0] Group 3 FTA: 0.2188 +[2025-07-07 03:00:54] [Rank 0] Group 4 FTA: 0.2552 +[2025-07-07 03:00:54] [Rank 0] Group 4 FTA: 0.2552 +[2025-07-07 03:00:54] [Rank 0] Group 5 FTA: 0.3359 +[2025-07-07 03:00:54] [Rank 0] Group 5 FTA: 0.3359 +[2025-07-07 03:00:54] [Rank 0] Group 6 FTA: 0.3646 +[2025-07-07 03:00:54] [Rank 0] Group 6 FTA: 0.3646 +[2025-07-07 03:00:54] [Rank 0] Group 7 FTA: 0.3281 +[2025-07-07 03:00:54] [Rank 0] Group 7 FTA: 0.3281 +[2025-07-07 03:00:54] [Rank 0] Group 8 FTA: 0.3177 +[2025-07-07 03:00:54] [Rank 0] Group 8 FTA: 0.3177 +[2025-07-07 03:00:54] [Rank 0] Group 9 FTA: 0.2773 +[2025-07-07 03:00:54] [Rank 0] Group 9 FTA: 0.2773 +[2025-07-07 03:00:54] [Rank 0] Group 10 FTA: 0.3672 +[2025-07-07 03:00:54] [Rank 0] Group 10 FTA: 0.3672 +[2025-07-07 03:00:54] [Rank 0] Group 11 FTA: 0.3271 +[2025-07-07 03:00:54] [Rank 0] Group 11 FTA: 0.3271 +[2025-07-07 03:00:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 03:00:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 03:00:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 03:00:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 03:00:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 03:00:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 03:00:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 03:00:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 03:00:55] [Rank 0] step:5001/10000 train_time:401020ms step_avg:80.19ms +[2025-07-07 03:00:55] [Rank 0] step:5001/10000 train_time:401020ms step_avg:80.19ms +[2025-07-07 03:00:57] [Rank 0] step:5021/10000 train_time:402524ms step_avg:80.17ms +[2025-07-07 03:00:57] [Rank 0] step:5021/10000 train_time:402524ms step_avg:80.17ms +[2025-07-07 03:00:59] [Rank 0] step:5041/10000 train_time:404275ms step_avg:80.20ms +[2025-07-07 03:00:59] [Rank 0] step:5041/10000 train_time:404275ms step_avg:80.20ms +[2025-07-07 03:01:00] [Rank 0] step:5061/10000 train_time:406317ms step_avg:80.28ms +[2025-07-07 03:01:00] [Rank 0] step:5061/10000 train_time:406317ms step_avg:80.28ms +[2025-07-07 03:01:02] [Rank 0] step:5081/10000 train_time:407809ms step_avg:80.26ms +[2025-07-07 03:01:02] [Rank 0] step:5081/10000 train_time:407809ms step_avg:80.26ms +[2025-07-07 03:01:03] [Rank 0] step:5101/10000 train_time:409302ms step_avg:80.24ms +[2025-07-07 03:01:03] [Rank 0] step:5101/10000 train_time:409302ms step_avg:80.24ms +[2025-07-07 03:01:05] [Rank 0] step:5121/10000 train_time:410796ms step_avg:80.22ms +[2025-07-07 03:01:05] [Rank 0] step:5121/10000 train_time:410796ms step_avg:80.22ms +[2025-07-07 03:01:07] [Rank 0] step:5141/10000 train_time:412524ms step_avg:80.24ms +[2025-07-07 03:01:07] [Rank 0] step:5141/10000 train_time:412524ms step_avg:80.24ms +[2025-07-07 03:01:08] [Rank 0] step:5161/10000 train_time:414018ms step_avg:80.22ms +[2025-07-07 03:01:08] [Rank 0] step:5161/10000 train_time:414018ms step_avg:80.22ms +[2025-07-07 03:01:10] [Rank 0] step:5181/10000 train_time:415513ms step_avg:80.20ms +[2025-07-07 03:01:10] [Rank 0] step:5181/10000 train_time:415513ms step_avg:80.20ms +[2025-07-07 03:01:11] [Rank 0] step:5201/10000 train_time:417011ms step_avg:80.18ms +[2025-07-07 03:01:11] [Rank 0] step:5201/10000 train_time:417011ms step_avg:80.18ms +[2025-07-07 03:01:13] [Rank 0] step:5221/10000 train_time:418559ms step_avg:80.17ms +[2025-07-07 03:01:13] [Rank 0] step:5221/10000 train_time:418559ms step_avg:80.17ms +[2025-07-07 03:01:15] [Rank 0] step:5241/10000 train_time:420658ms step_avg:80.26ms +[2025-07-07 03:01:15] [Rank 0] step:5241/10000 train_time:420658ms step_avg:80.26ms +[2025-07-07 03:01:16] [Rank 0] step:5261/10000 train_time:422157ms step_avg:80.24ms +[2025-07-07 03:01:16] [Rank 0] step:5261/10000 train_time:422157ms step_avg:80.24ms +[2025-07-07 03:01:18] [Rank 0] step:5281/10000 train_time:423656ms step_avg:80.22ms +[2025-07-07 03:01:18] [Rank 0] step:5281/10000 train_time:423656ms step_avg:80.22ms +[2025-07-07 03:01:19] [Rank 0] step:5301/10000 train_time:425157ms step_avg:80.20ms +[2025-07-07 03:01:19] [Rank 0] step:5301/10000 train_time:425157ms step_avg:80.20ms +[2025-07-07 03:01:21] [Rank 0] step:5321/10000 train_time:426892ms step_avg:80.23ms +[2025-07-07 03:01:21] [Rank 0] step:5321/10000 train_time:426892ms step_avg:80.23ms +[2025-07-07 03:01:22] [Rank 0] step:5341/10000 train_time:428393ms step_avg:80.21ms +[2025-07-07 03:01:22] [Rank 0] step:5341/10000 train_time:428393ms step_avg:80.21ms +[2025-07-07 03:01:24] [Rank 0] step:5361/10000 train_time:429892ms step_avg:80.19ms +[2025-07-07 03:01:24] [Rank 0] step:5361/10000 train_time:429892ms step_avg:80.19ms +[2025-07-07 03:01:25] [Rank 0] step:5381/10000 train_time:431392ms step_avg:80.17ms +[2025-07-07 03:01:25] [Rank 0] step:5381/10000 train_time:431392ms step_avg:80.17ms +[2025-07-07 03:01:27] [Rank 0] step:5401/10000 train_time:432945ms step_avg:80.16ms +[2025-07-07 03:01:27] [Rank 0] step:5401/10000 train_time:432945ms step_avg:80.16ms +[2025-07-07 03:01:29] [Rank 0] step:5421/10000 train_time:434631ms step_avg:80.18ms +[2025-07-07 03:01:29] [Rank 0] step:5421/10000 train_time:434631ms step_avg:80.18ms +[2025-07-07 03:01:30] [Rank 0] step:5441/10000 train_time:436134ms step_avg:80.16ms +[2025-07-07 03:01:30] [Rank 0] step:5441/10000 train_time:436134ms step_avg:80.16ms +[2025-07-07 03:01:32] [Rank 0] step:5461/10000 train_time:437636ms step_avg:80.14ms +[2025-07-07 03:01:32] [Rank 0] step:5461/10000 train_time:437636ms step_avg:80.14ms +[2025-07-07 03:01:33] [Rank 0] step:5481/10000 train_time:439138ms step_avg:80.12ms +[2025-07-07 03:01:33] [Rank 0] step:5481/10000 train_time:439138ms step_avg:80.12ms +[2025-07-07 03:01:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:01:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:01:36] [Rank 0] PRINT: step:5500/10000 train_loss:1.3489 val_loss:1.3283 train_time:441287ms step_avg:80.23ms +[2025-07-07 03:01:36] [Rank 0] PRINT: step:5500/10000 train_loss:1.3489 val_loss:1.3283 train_time:441287ms step_avg:80.23ms +[2025-07-07 03:01:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:01:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:01:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:01:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:01:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:01:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:07:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:07:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:07:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:07:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:07:01] [Rank 0] Total Loss: 4.5896 +[2025-07-07 03:07:01] [Rank 0] Total Loss: 4.5896 +[2025-07-07 03:07:01] [Rank 0] Total FTA: 0.3613 +[2025-07-07 03:07:01] [Rank 0] Total FTA: 0.3613 +[2025-07-07 03:07:01] [Rank 0] Group 0 Loss: 4.9400 +[2025-07-07 03:07:01] [Rank 0] Group 0 Loss: 4.9400 +[2025-07-07 03:07:01] [Rank 0] Group 1 Loss: 4.5592 +[2025-07-07 03:07:01] [Rank 0] Group 1 Loss: 4.5592 +[2025-07-07 03:07:01] [Rank 0] Group 2 Loss: 4.4229 +[2025-07-07 03:07:01] [Rank 0] Group 2 Loss: 4.4229 +[2025-07-07 03:07:01] [Rank 0] Group 3 Loss: 4.6084 +[2025-07-07 03:07:01] [Rank 0] Group 3 Loss: 4.6084 +[2025-07-07 03:07:01] [Rank 0] Group 4 Loss: 4.5288 +[2025-07-07 03:07:01] [Rank 0] Group 4 Loss: 4.5288 +[2025-07-07 03:07:01] [Rank 0] Group 5 Loss: 4.4816 +[2025-07-07 03:07:01] [Rank 0] Group 5 Loss: 4.4816 +[2025-07-07 03:07:01] [Rank 0] Group 6 Loss: 4.4652 +[2025-07-07 03:07:01] [Rank 0] Group 6 Loss: 4.4652 +[2025-07-07 03:07:01] [Rank 0] Group 7 Loss: 4.5633 +[2025-07-07 03:07:01] [Rank 0] Group 7 Loss: 4.5633 +[2025-07-07 03:07:01] [Rank 0] Group 8 Loss: 4.5872 +[2025-07-07 03:07:01] [Rank 0] Group 8 Loss: 4.5872 +[2025-07-07 03:07:01] [Rank 0] Group 9 Loss: 4.4872 +[2025-07-07 03:07:01] [Rank 0] Group 9 Loss: 4.4872 +[2025-07-07 03:07:01] [Rank 0] Group 10 Loss: 4.5435 +[2025-07-07 03:07:01] [Rank 0] Group 10 Loss: 4.5435 +[2025-07-07 03:07:01] [Rank 0] Group 11 Loss: 4.5630 +[2025-07-07 03:07:01] [Rank 0] Group 11 Loss: 4.5630 +[2025-07-07 03:07:01] [Rank 0] Group 0 FTA: 0.3225 +[2025-07-07 03:07:01] [Rank 0] Group 0 FTA: 0.3225 +[2025-07-07 03:07:01] [Rank 0] Group 1 FTA: 0.4766 +[2025-07-07 03:07:01] [Rank 0] Group 1 FTA: 0.4766 +[2025-07-07 03:07:01] [Rank 0] Group 2 FTA: 0.4271 +[2025-07-07 03:07:01] [Rank 0] Group 2 FTA: 0.4271 +[2025-07-07 03:07:01] [Rank 0] Group 3 FTA: 0.3438 +[2025-07-07 03:07:01] [Rank 0] Group 3 FTA: 0.3438 +[2025-07-07 03:07:01] [Rank 0] Group 4 FTA: 0.3203 +[2025-07-07 03:07:01] [Rank 0] Group 4 FTA: 0.3203 +[2025-07-07 03:07:01] [Rank 0] Group 5 FTA: 0.3854 +[2025-07-07 03:07:01] [Rank 0] Group 5 FTA: 0.3854 +[2025-07-07 03:07:01] [Rank 0] Group 6 FTA: 0.3385 +[2025-07-07 03:07:01] [Rank 0] Group 6 FTA: 0.3385 +[2025-07-07 03:07:01] [Rank 0] Group 7 FTA: 0.3516 +[2025-07-07 03:07:01] [Rank 0] Group 7 FTA: 0.3516 +[2025-07-07 03:07:01] [Rank 0] Group 8 FTA: 0.4062 +[2025-07-07 03:07:01] [Rank 0] Group 8 FTA: 0.4062 +[2025-07-07 03:07:01] [Rank 0] Group 9 FTA: 0.3789 +[2025-07-07 03:07:01] [Rank 0] Group 9 FTA: 0.3789 +[2025-07-07 03:07:01] [Rank 0] Group 10 FTA: 0.3438 +[2025-07-07 03:07:01] [Rank 0] Group 10 FTA: 0.3438 +[2025-07-07 03:07:01] [Rank 0] Group 11 FTA: 0.3350 +[2025-07-07 03:07:01] [Rank 0] Group 11 FTA: 0.3350 +[2025-07-07 03:07:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 03:07:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 03:07:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 03:07:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 03:07:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 03:07:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 03:07:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 03:07:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 03:07:03] [Rank 0] step:5501/10000 train_time:441308ms step_avg:80.22ms +[2025-07-07 03:07:03] [Rank 0] step:5501/10000 train_time:441308ms step_avg:80.22ms +[2025-07-07 03:07:04] [Rank 0] step:5521/10000 train_time:442812ms step_avg:80.20ms +[2025-07-07 03:07:04] [Rank 0] step:5521/10000 train_time:442812ms step_avg:80.20ms +[2025-07-07 03:07:06] [Rank 0] step:5541/10000 train_time:444302ms step_avg:80.18ms +[2025-07-07 03:07:06] [Rank 0] step:5541/10000 train_time:444302ms step_avg:80.18ms +[2025-07-07 03:07:07] [Rank 0] step:5561/10000 train_time:445796ms step_avg:80.16ms +[2025-07-07 03:07:07] [Rank 0] step:5561/10000 train_time:445796ms step_avg:80.16ms +[2025-07-07 03:07:09] [Rank 0] step:5581/10000 train_time:447342ms step_avg:80.15ms +[2025-07-07 03:07:09] [Rank 0] step:5581/10000 train_time:447342ms step_avg:80.15ms +[2025-07-07 03:07:11] [Rank 0] step:5601/10000 train_time:449446ms step_avg:80.24ms +[2025-07-07 03:07:11] [Rank 0] step:5601/10000 train_time:449446ms step_avg:80.24ms +[2025-07-07 03:07:12] [Rank 0] step:5621/10000 train_time:450939ms step_avg:80.22ms +[2025-07-07 03:07:12] [Rank 0] step:5621/10000 train_time:450939ms step_avg:80.22ms +[2025-07-07 03:07:14] [Rank 0] step:5641/10000 train_time:452434ms step_avg:80.20ms +[2025-07-07 03:07:14] [Rank 0] step:5641/10000 train_time:452434ms step_avg:80.20ms +[2025-07-07 03:07:15] [Rank 0] step:5661/10000 train_time:453929ms step_avg:80.19ms +[2025-07-07 03:07:15] [Rank 0] step:5661/10000 train_time:453929ms step_avg:80.19ms +[2025-07-07 03:07:18] [Rank 0] step:5681/10000 train_time:456088ms step_avg:80.28ms +[2025-07-07 03:07:18] [Rank 0] step:5681/10000 train_time:456088ms step_avg:80.28ms +[2025-07-07 03:07:19] [Rank 0] step:5701/10000 train_time:457743ms step_avg:80.29ms +[2025-07-07 03:07:19] [Rank 0] step:5701/10000 train_time:457743ms step_avg:80.29ms +[2025-07-07 03:07:21] [Rank 0] step:5721/10000 train_time:459241ms step_avg:80.27ms +[2025-07-07 03:07:21] [Rank 0] step:5721/10000 train_time:459241ms step_avg:80.27ms +[2025-07-07 03:07:22] [Rank 0] step:5741/10000 train_time:460738ms step_avg:80.25ms +[2025-07-07 03:07:22] [Rank 0] step:5741/10000 train_time:460738ms step_avg:80.25ms +[2025-07-07 03:07:24] [Rank 0] step:5761/10000 train_time:462490ms step_avg:80.28ms +[2025-07-07 03:07:24] [Rank 0] step:5761/10000 train_time:462490ms step_avg:80.28ms +[2025-07-07 03:07:25] [Rank 0] step:5781/10000 train_time:463968ms step_avg:80.26ms +[2025-07-07 03:07:25] [Rank 0] step:5781/10000 train_time:463968ms step_avg:80.26ms +[2025-07-07 03:07:27] [Rank 0] step:5801/10000 train_time:465467ms step_avg:80.24ms +[2025-07-07 03:07:27] [Rank 0] step:5801/10000 train_time:465467ms step_avg:80.24ms +[2025-07-07 03:07:28] [Rank 0] step:5821/10000 train_time:466966ms step_avg:80.22ms +[2025-07-07 03:07:28] [Rank 0] step:5821/10000 train_time:466966ms step_avg:80.22ms +[2025-07-07 03:07:30] [Rank 0] step:5841/10000 train_time:468466ms step_avg:80.20ms +[2025-07-07 03:07:30] [Rank 0] step:5841/10000 train_time:468466ms step_avg:80.20ms +[2025-07-07 03:07:32] [Rank 0] step:5861/10000 train_time:470616ms step_avg:80.30ms +[2025-07-07 03:07:32] [Rank 0] step:5861/10000 train_time:470616ms step_avg:80.30ms +[2025-07-07 03:07:34] [Rank 0] step:5881/10000 train_time:472117ms step_avg:80.28ms +[2025-07-07 03:07:34] [Rank 0] step:5881/10000 train_time:472117ms step_avg:80.28ms +[2025-07-07 03:07:35] [Rank 0] step:5901/10000 train_time:473620ms step_avg:80.26ms +[2025-07-07 03:07:35] [Rank 0] step:5901/10000 train_time:473620ms step_avg:80.26ms +[2025-07-07 03:07:37] [Rank 0] step:5921/10000 train_time:475121ms step_avg:80.24ms +[2025-07-07 03:07:37] [Rank 0] step:5921/10000 train_time:475121ms step_avg:80.24ms +[2025-07-07 03:07:39] [Rank 0] step:5941/10000 train_time:476672ms step_avg:80.23ms +[2025-07-07 03:07:39] [Rank 0] step:5941/10000 train_time:476672ms step_avg:80.23ms +[2025-07-07 03:07:40] [Rank 0] step:5961/10000 train_time:478765ms step_avg:80.32ms +[2025-07-07 03:07:40] [Rank 0] step:5961/10000 train_time:478765ms step_avg:80.32ms +[2025-07-07 03:07:42] [Rank 0] step:5981/10000 train_time:480267ms step_avg:80.30ms +[2025-07-07 03:07:42] [Rank 0] step:5981/10000 train_time:480267ms step_avg:80.30ms +[2025-07-07 03:07:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:07:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:07:44] [Rank 0] PRINT: step:6000/10000 train_loss:1.3128 val_loss:1.2947 train_time:481768ms step_avg:80.29ms +[2025-07-07 03:07:44] [Rank 0] PRINT: step:6000/10000 train_loss:1.3128 val_loss:1.2947 train_time:481768ms step_avg:80.29ms +[2025-07-07 03:07:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:07:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:07:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:07:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:07:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:07:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:13:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:13:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:13:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:13:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:13:06] [Rank 0] Total Loss: 4.6384 +[2025-07-07 03:13:06] [Rank 0] Total Loss: 4.6384 +[2025-07-07 03:13:06] [Rank 0] Total FTA: 0.3684 +[2025-07-07 03:13:06] [Rank 0] Total FTA: 0.3684 +[2025-07-07 03:13:06] [Rank 0] Group 0 Loss: 5.0557 +[2025-07-07 03:13:06] [Rank 0] Group 0 Loss: 5.0557 +[2025-07-07 03:13:06] [Rank 0] Group 1 Loss: 4.5526 +[2025-07-07 03:13:06] [Rank 0] Group 1 Loss: 4.5526 +[2025-07-07 03:13:06] [Rank 0] Group 2 Loss: 4.5213 +[2025-07-07 03:13:06] [Rank 0] Group 2 Loss: 4.5213 +[2025-07-07 03:13:06] [Rank 0] Group 3 Loss: 4.5094 +[2025-07-07 03:13:06] [Rank 0] Group 3 Loss: 4.5094 +[2025-07-07 03:13:06] [Rank 0] Group 4 Loss: 4.5601 +[2025-07-07 03:13:06] [Rank 0] Group 4 Loss: 4.5601 +[2025-07-07 03:13:06] [Rank 0] Group 5 Loss: 4.5175 +[2025-07-07 03:13:06] [Rank 0] Group 5 Loss: 4.5175 +[2025-07-07 03:13:06] [Rank 0] Group 6 Loss: 4.4984 +[2025-07-07 03:13:06] [Rank 0] Group 6 Loss: 4.4984 +[2025-07-07 03:13:06] [Rank 0] Group 7 Loss: 4.6326 +[2025-07-07 03:13:06] [Rank 0] Group 7 Loss: 4.6326 +[2025-07-07 03:13:06] [Rank 0] Group 8 Loss: 4.5874 +[2025-07-07 03:13:06] [Rank 0] Group 8 Loss: 4.5874 +[2025-07-07 03:13:06] [Rank 0] Group 9 Loss: 4.5949 +[2025-07-07 03:13:06] [Rank 0] Group 9 Loss: 4.5949 +[2025-07-07 03:13:06] [Rank 0] Group 10 Loss: 4.5715 +[2025-07-07 03:13:06] [Rank 0] Group 10 Loss: 4.5715 +[2025-07-07 03:13:06] [Rank 0] Group 11 Loss: 4.6422 +[2025-07-07 03:13:06] [Rank 0] Group 11 Loss: 4.6422 +[2025-07-07 03:13:06] [Rank 0] Group 0 FTA: 0.3472 +[2025-07-07 03:13:06] [Rank 0] Group 0 FTA: 0.3472 +[2025-07-07 03:13:06] [Rank 0] Group 1 FTA: 0.5417 +[2025-07-07 03:13:06] [Rank 0] Group 1 FTA: 0.5417 +[2025-07-07 03:13:06] [Rank 0] Group 2 FTA: 0.4818 +[2025-07-07 03:13:06] [Rank 0] Group 2 FTA: 0.4818 +[2025-07-07 03:13:06] [Rank 0] Group 3 FTA: 0.2630 +[2025-07-07 03:13:06] [Rank 0] Group 3 FTA: 0.2630 +[2025-07-07 03:13:06] [Rank 0] Group 4 FTA: 0.2656 +[2025-07-07 03:13:06] [Rank 0] Group 4 FTA: 0.2656 +[2025-07-07 03:13:06] [Rank 0] Group 5 FTA: 0.3490 +[2025-07-07 03:13:06] [Rank 0] Group 5 FTA: 0.3490 +[2025-07-07 03:13:06] [Rank 0] Group 6 FTA: 0.3359 +[2025-07-07 03:13:06] [Rank 0] Group 6 FTA: 0.3359 +[2025-07-07 03:13:06] [Rank 0] Group 7 FTA: 0.4062 +[2025-07-07 03:13:06] [Rank 0] Group 7 FTA: 0.4062 +[2025-07-07 03:13:06] [Rank 0] Group 8 FTA: 0.3411 +[2025-07-07 03:13:06] [Rank 0] Group 8 FTA: 0.3411 +[2025-07-07 03:13:06] [Rank 0] Group 9 FTA: 0.3945 +[2025-07-07 03:13:06] [Rank 0] Group 9 FTA: 0.3945 +[2025-07-07 03:13:06] [Rank 0] Group 10 FTA: 0.3613 +[2025-07-07 03:13:06] [Rank 0] Group 10 FTA: 0.3613 +[2025-07-07 03:13:06] [Rank 0] Group 11 FTA: 0.3672 +[2025-07-07 03:13:06] [Rank 0] Group 11 FTA: 0.3672 +[2025-07-07 03:13:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 03:13:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 03:13:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 03:13:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 03:13:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 03:13:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 03:13:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 03:13:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 03:13:07] [Rank 0] step:6001/10000 train_time:481790ms step_avg:80.28ms +[2025-07-07 03:13:07] [Rank 0] step:6001/10000 train_time:481790ms step_avg:80.28ms +[2025-07-07 03:13:09] [Rank 0] step:6021/10000 train_time:483286ms step_avg:80.27ms +[2025-07-07 03:13:09] [Rank 0] step:6021/10000 train_time:483286ms step_avg:80.27ms +[2025-07-07 03:13:11] [Rank 0] step:6041/10000 train_time:485120ms step_avg:80.30ms +[2025-07-07 03:13:11] [Rank 0] step:6041/10000 train_time:485120ms step_avg:80.30ms +[2025-07-07 03:13:12] [Rank 0] step:6061/10000 train_time:486609ms step_avg:80.29ms +[2025-07-07 03:13:12] [Rank 0] step:6061/10000 train_time:486609ms step_avg:80.29ms +[2025-07-07 03:13:14] [Rank 0] step:6081/10000 train_time:488101ms step_avg:80.27ms +[2025-07-07 03:13:14] [Rank 0] step:6081/10000 train_time:488101ms step_avg:80.27ms +[2025-07-07 03:13:15] [Rank 0] step:6101/10000 train_time:489593ms step_avg:80.25ms +[2025-07-07 03:13:15] [Rank 0] step:6101/10000 train_time:489593ms step_avg:80.25ms +[2025-07-07 03:13:17] [Rank 0] step:6121/10000 train_time:491139ms step_avg:80.24ms +[2025-07-07 03:13:17] [Rank 0] step:6121/10000 train_time:491139ms step_avg:80.24ms +[2025-07-07 03:13:19] [Rank 0] step:6141/10000 train_time:493227ms step_avg:80.32ms +[2025-07-07 03:13:19] [Rank 0] step:6141/10000 train_time:493227ms step_avg:80.32ms +[2025-07-07 03:13:20] [Rank 0] step:6161/10000 train_time:494723ms step_avg:80.30ms +[2025-07-07 03:13:20] [Rank 0] step:6161/10000 train_time:494723ms step_avg:80.30ms +[2025-07-07 03:13:22] [Rank 0] step:6181/10000 train_time:496216ms step_avg:80.28ms +[2025-07-07 03:13:22] [Rank 0] step:6181/10000 train_time:496216ms step_avg:80.28ms +[2025-07-07 03:13:23] [Rank 0] step:6201/10000 train_time:497712ms step_avg:80.26ms +[2025-07-07 03:13:23] [Rank 0] step:6201/10000 train_time:497712ms step_avg:80.26ms +[2025-07-07 03:13:25] [Rank 0] step:6221/10000 train_time:499870ms step_avg:80.35ms +[2025-07-07 03:13:25] [Rank 0] step:6221/10000 train_time:499870ms step_avg:80.35ms +[2025-07-07 03:13:27] [Rank 0] step:6241/10000 train_time:501363ms step_avg:80.33ms +[2025-07-07 03:13:27] [Rank 0] step:6241/10000 train_time:501363ms step_avg:80.33ms +[2025-07-07 03:13:28] [Rank 0] step:6261/10000 train_time:502860ms step_avg:80.32ms +[2025-07-07 03:13:28] [Rank 0] step:6261/10000 train_time:502860ms step_avg:80.32ms +[2025-07-07 03:13:30] [Rank 0] step:6281/10000 train_time:504358ms step_avg:80.30ms +[2025-07-07 03:13:30] [Rank 0] step:6281/10000 train_time:504358ms step_avg:80.30ms +[2025-07-07 03:13:32] [Rank 0] step:6301/10000 train_time:505907ms step_avg:80.29ms +[2025-07-07 03:13:32] [Rank 0] step:6301/10000 train_time:505907ms step_avg:80.29ms +[2025-07-07 03:13:34] [Rank 0] step:6321/10000 train_time:508012ms step_avg:80.37ms +[2025-07-07 03:13:34] [Rank 0] step:6321/10000 train_time:508012ms step_avg:80.37ms +[2025-07-07 03:13:35] [Rank 0] step:6341/10000 train_time:509511ms step_avg:80.35ms +[2025-07-07 03:13:35] [Rank 0] step:6341/10000 train_time:509511ms step_avg:80.35ms +[2025-07-07 03:13:37] [Rank 0] step:6361/10000 train_time:511008ms step_avg:80.33ms +[2025-07-07 03:13:37] [Rank 0] step:6361/10000 train_time:511008ms step_avg:80.33ms +[2025-07-07 03:13:38] [Rank 0] step:6381/10000 train_time:512575ms step_avg:80.33ms +[2025-07-07 03:13:38] [Rank 0] step:6381/10000 train_time:512575ms step_avg:80.33ms +[2025-07-07 03:13:40] [Rank 0] step:6401/10000 train_time:514749ms step_avg:80.42ms +[2025-07-07 03:13:40] [Rank 0] step:6401/10000 train_time:514749ms step_avg:80.42ms +[2025-07-07 03:13:42] [Rank 0] step:6421/10000 train_time:516250ms step_avg:80.40ms +[2025-07-07 03:13:42] [Rank 0] step:6421/10000 train_time:516250ms step_avg:80.40ms +[2025-07-07 03:13:43] [Rank 0] step:6441/10000 train_time:517751ms step_avg:80.38ms +[2025-07-07 03:13:43] [Rank 0] step:6441/10000 train_time:517751ms step_avg:80.38ms +[2025-07-07 03:13:45] [Rank 0] step:6461/10000 train_time:519255ms step_avg:80.37ms +[2025-07-07 03:13:45] [Rank 0] step:6461/10000 train_time:519255ms step_avg:80.37ms +[2025-07-07 03:13:47] [Rank 0] step:6481/10000 train_time:520755ms step_avg:80.35ms +[2025-07-07 03:13:47] [Rank 0] step:6481/10000 train_time:520755ms step_avg:80.35ms +[2025-07-07 03:13:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:13:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:13:49] [Rank 0] PRINT: step:6500/10000 train_loss:1.2808 val_loss:1.2646 train_time:522493ms step_avg:80.38ms +[2025-07-07 03:13:49] [Rank 0] PRINT: step:6500/10000 train_loss:1.2808 val_loss:1.2646 train_time:522493ms step_avg:80.38ms +[2025-07-07 03:13:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:13:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:13:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:13:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:13:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:13:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:19:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:19:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:19:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:19:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:19:11] [Rank 0] Total Loss: 4.6787 +[2025-07-07 03:19:11] [Rank 0] Total Loss: 4.6787 +[2025-07-07 03:19:11] [Rank 0] Total FTA: 0.3946 +[2025-07-07 03:19:11] [Rank 0] Total FTA: 0.3946 +[2025-07-07 03:19:11] [Rank 0] Group 0 Loss: 4.9243 +[2025-07-07 03:19:11] [Rank 0] Group 0 Loss: 4.9243 +[2025-07-07 03:19:11] [Rank 0] Group 1 Loss: 4.6465 +[2025-07-07 03:19:11] [Rank 0] Group 1 Loss: 4.6465 +[2025-07-07 03:19:11] [Rank 0] Group 2 Loss: 4.4754 +[2025-07-07 03:19:11] [Rank 0] Group 2 Loss: 4.4754 +[2025-07-07 03:19:11] [Rank 0] Group 3 Loss: 4.6582 +[2025-07-07 03:19:11] [Rank 0] Group 3 Loss: 4.6582 +[2025-07-07 03:19:11] [Rank 0] Group 4 Loss: 4.6888 +[2025-07-07 03:19:11] [Rank 0] Group 4 Loss: 4.6888 +[2025-07-07 03:19:11] [Rank 0] Group 5 Loss: 4.5555 +[2025-07-07 03:19:11] [Rank 0] Group 5 Loss: 4.5555 +[2025-07-07 03:19:11] [Rank 0] Group 6 Loss: 4.5292 +[2025-07-07 03:19:11] [Rank 0] Group 6 Loss: 4.5292 +[2025-07-07 03:19:11] [Rank 0] Group 7 Loss: 4.7063 +[2025-07-07 03:19:11] [Rank 0] Group 7 Loss: 4.7063 +[2025-07-07 03:19:11] [Rank 0] Group 8 Loss: 4.6632 +[2025-07-07 03:19:11] [Rank 0] Group 8 Loss: 4.6632 +[2025-07-07 03:19:11] [Rank 0] Group 9 Loss: 4.6360 +[2025-07-07 03:19:11] [Rank 0] Group 9 Loss: 4.6360 +[2025-07-07 03:19:11] [Rank 0] Group 10 Loss: 4.6988 +[2025-07-07 03:19:11] [Rank 0] Group 10 Loss: 4.6988 +[2025-07-07 03:19:11] [Rank 0] Group 11 Loss: 4.6849 +[2025-07-07 03:19:11] [Rank 0] Group 11 Loss: 4.6849 +[2025-07-07 03:19:11] [Rank 0] Group 0 FTA: 0.5020 +[2025-07-07 03:19:11] [Rank 0] Group 0 FTA: 0.5020 +[2025-07-07 03:19:11] [Rank 0] Group 1 FTA: 0.4427 +[2025-07-07 03:19:11] [Rank 0] Group 1 FTA: 0.4427 +[2025-07-07 03:19:11] [Rank 0] Group 2 FTA: 0.4271 +[2025-07-07 03:19:11] [Rank 0] Group 2 FTA: 0.4271 +[2025-07-07 03:19:11] [Rank 0] Group 3 FTA: 0.3359 +[2025-07-07 03:19:11] [Rank 0] Group 3 FTA: 0.3359 +[2025-07-07 03:19:11] [Rank 0] Group 4 FTA: 0.3203 +[2025-07-07 03:19:11] [Rank 0] Group 4 FTA: 0.3203 +[2025-07-07 03:19:11] [Rank 0] Group 5 FTA: 0.4062 +[2025-07-07 03:19:11] [Rank 0] Group 5 FTA: 0.4062 +[2025-07-07 03:19:11] [Rank 0] Group 6 FTA: 0.3438 +[2025-07-07 03:19:11] [Rank 0] Group 6 FTA: 0.3438 +[2025-07-07 03:19:11] [Rank 0] Group 7 FTA: 0.3854 +[2025-07-07 03:19:11] [Rank 0] Group 7 FTA: 0.3854 +[2025-07-07 03:19:11] [Rank 0] Group 8 FTA: 0.3646 +[2025-07-07 03:19:11] [Rank 0] Group 8 FTA: 0.3646 +[2025-07-07 03:19:11] [Rank 0] Group 9 FTA: 0.4336 +[2025-07-07 03:19:11] [Rank 0] Group 9 FTA: 0.4336 +[2025-07-07 03:19:12] [Rank 0] Group 10 FTA: 0.3848 +[2025-07-07 03:19:12] [Rank 0] Group 10 FTA: 0.3848 +[2025-07-07 03:19:12] [Rank 0] Group 11 FTA: 0.3584 +[2025-07-07 03:19:12] [Rank 0] Group 11 FTA: 0.3584 +[2025-07-07 03:19:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 03:19:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 03:19:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 03:19:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 03:19:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 03:19:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 03:19:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 03:19:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 03:19:13] [Rank 0] step:6501/10000 train_time:522515ms step_avg:80.37ms +[2025-07-07 03:19:13] [Rank 0] step:6501/10000 train_time:522515ms step_avg:80.37ms +[2025-07-07 03:19:14] [Rank 0] step:6521/10000 train_time:524013ms step_avg:80.36ms +[2025-07-07 03:19:14] [Rank 0] step:6521/10000 train_time:524013ms step_avg:80.36ms +[2025-07-07 03:19:16] [Rank 0] step:6541/10000 train_time:525503ms step_avg:80.34ms +[2025-07-07 03:19:16] [Rank 0] step:6541/10000 train_time:525503ms step_avg:80.34ms +[2025-07-07 03:19:17] [Rank 0] step:6561/10000 train_time:526995ms step_avg:80.32ms +[2025-07-07 03:19:17] [Rank 0] step:6561/10000 train_time:526995ms step_avg:80.32ms +[2025-07-07 03:19:20] [Rank 0] step:6581/10000 train_time:529127ms step_avg:80.40ms +[2025-07-07 03:19:20] [Rank 0] step:6581/10000 train_time:529127ms step_avg:80.40ms +[2025-07-07 03:19:21] [Rank 0] step:6601/10000 train_time:530619ms step_avg:80.38ms +[2025-07-07 03:19:21] [Rank 0] step:6601/10000 train_time:530619ms step_avg:80.38ms +[2025-07-07 03:19:23] [Rank 0] step:6621/10000 train_time:532112ms step_avg:80.37ms +[2025-07-07 03:19:23] [Rank 0] step:6621/10000 train_time:532112ms step_avg:80.37ms +[2025-07-07 03:19:24] [Rank 0] step:6641/10000 train_time:533606ms step_avg:80.35ms +[2025-07-07 03:19:24] [Rank 0] step:6641/10000 train_time:533606ms step_avg:80.35ms +[2025-07-07 03:19:26] [Rank 0] step:6661/10000 train_time:535357ms step_avg:80.37ms +[2025-07-07 03:19:26] [Rank 0] step:6661/10000 train_time:535357ms step_avg:80.37ms +[2025-07-07 03:19:28] [Rank 0] step:6681/10000 train_time:537238ms step_avg:80.41ms +[2025-07-07 03:19:28] [Rank 0] step:6681/10000 train_time:537238ms step_avg:80.41ms +[2025-07-07 03:19:29] [Rank 0] step:6701/10000 train_time:538734ms step_avg:80.40ms +[2025-07-07 03:19:29] [Rank 0] step:6701/10000 train_time:538734ms step_avg:80.40ms +[2025-07-07 03:19:31] [Rank 0] step:6721/10000 train_time:540229ms step_avg:80.38ms +[2025-07-07 03:19:31] [Rank 0] step:6721/10000 train_time:540229ms step_avg:80.38ms +[2025-07-07 03:19:32] [Rank 0] step:6741/10000 train_time:541723ms step_avg:80.36ms +[2025-07-07 03:19:32] [Rank 0] step:6741/10000 train_time:541723ms step_avg:80.36ms +[2025-07-07 03:19:34] [Rank 0] step:6761/10000 train_time:543453ms step_avg:80.38ms +[2025-07-07 03:19:34] [Rank 0] step:6761/10000 train_time:543453ms step_avg:80.38ms +[2025-07-07 03:19:35] [Rank 0] step:6781/10000 train_time:544949ms step_avg:80.36ms +[2025-07-07 03:19:35] [Rank 0] step:6781/10000 train_time:544949ms step_avg:80.36ms +[2025-07-07 03:19:37] [Rank 0] step:6801/10000 train_time:546444ms step_avg:80.35ms +[2025-07-07 03:19:37] [Rank 0] step:6801/10000 train_time:546444ms step_avg:80.35ms +[2025-07-07 03:19:38] [Rank 0] step:6821/10000 train_time:547940ms step_avg:80.33ms +[2025-07-07 03:19:38] [Rank 0] step:6821/10000 train_time:547940ms step_avg:80.33ms +[2025-07-07 03:19:41] [Rank 0] step:6841/10000 train_time:549487ms step_avg:80.32ms +[2025-07-07 03:19:41] [Rank 0] step:6841/10000 train_time:549487ms step_avg:80.32ms +[2025-07-07 03:19:42] [Rank 0] step:6861/10000 train_time:551575ms step_avg:80.39ms +[2025-07-07 03:19:42] [Rank 0] step:6861/10000 train_time:551575ms step_avg:80.39ms +[2025-07-07 03:19:44] [Rank 0] step:6881/10000 train_time:553073ms step_avg:80.38ms +[2025-07-07 03:19:44] [Rank 0] step:6881/10000 train_time:553073ms step_avg:80.38ms +[2025-07-07 03:19:45] [Rank 0] step:6901/10000 train_time:554571ms step_avg:80.36ms +[2025-07-07 03:19:45] [Rank 0] step:6901/10000 train_time:554571ms step_avg:80.36ms +[2025-07-07 03:19:47] [Rank 0] step:6921/10000 train_time:556071ms step_avg:80.35ms +[2025-07-07 03:19:47] [Rank 0] step:6921/10000 train_time:556071ms step_avg:80.35ms +[2025-07-07 03:19:49] [Rank 0] step:6941/10000 train_time:558216ms step_avg:80.42ms +[2025-07-07 03:19:49] [Rank 0] step:6941/10000 train_time:558216ms step_avg:80.42ms +[2025-07-07 03:19:50] [Rank 0] step:6961/10000 train_time:559714ms step_avg:80.41ms +[2025-07-07 03:19:50] [Rank 0] step:6961/10000 train_time:559714ms step_avg:80.41ms +[2025-07-07 03:19:52] [Rank 0] step:6981/10000 train_time:561212ms step_avg:80.39ms +[2025-07-07 03:19:52] [Rank 0] step:6981/10000 train_time:561212ms step_avg:80.39ms +[2025-07-07 03:19:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:19:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:19:54] [Rank 0] PRINT: step:7000/10000 train_loss:1.2526 val_loss:1.2394 train_time:562711ms step_avg:80.39ms +[2025-07-07 03:19:54] [Rank 0] PRINT: step:7000/10000 train_loss:1.2526 val_loss:1.2394 train_time:562711ms step_avg:80.39ms +[2025-07-07 03:19:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:19:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:19:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:19:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:19:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:19:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:25:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:25:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:25:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:25:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:25:17] [Rank 0] Total Loss: 4.7786 +[2025-07-07 03:25:17] [Rank 0] Total Loss: 4.7786 +[2025-07-07 03:25:17] [Rank 0] Total FTA: 0.3732 +[2025-07-07 03:25:17] [Rank 0] Total FTA: 0.3732 +[2025-07-07 03:25:17] [Rank 0] Group 0 Loss: 5.0531 +[2025-07-07 03:25:17] [Rank 0] Group 0 Loss: 5.0531 +[2025-07-07 03:25:17] [Rank 0] Group 1 Loss: 4.8475 +[2025-07-07 03:25:17] [Rank 0] Group 1 Loss: 4.8475 +[2025-07-07 03:25:17] [Rank 0] Group 2 Loss: 4.4780 +[2025-07-07 03:25:17] [Rank 0] Group 2 Loss: 4.4780 +[2025-07-07 03:25:17] [Rank 0] Group 3 Loss: 4.7230 +[2025-07-07 03:25:17] [Rank 0] Group 3 Loss: 4.7230 +[2025-07-07 03:25:17] [Rank 0] Group 4 Loss: 4.8296 +[2025-07-07 03:25:17] [Rank 0] Group 4 Loss: 4.8296 +[2025-07-07 03:25:17] [Rank 0] Group 5 Loss: 4.7079 +[2025-07-07 03:25:17] [Rank 0] Group 5 Loss: 4.7079 +[2025-07-07 03:25:17] [Rank 0] Group 6 Loss: 4.6569 +[2025-07-07 03:25:17] [Rank 0] Group 6 Loss: 4.6569 +[2025-07-07 03:25:17] [Rank 0] Group 7 Loss: 4.7103 +[2025-07-07 03:25:17] [Rank 0] Group 7 Loss: 4.7103 +[2025-07-07 03:25:17] [Rank 0] Group 8 Loss: 4.7860 +[2025-07-07 03:25:17] [Rank 0] Group 8 Loss: 4.7860 +[2025-07-07 03:25:17] [Rank 0] Group 9 Loss: 4.7437 +[2025-07-07 03:25:17] [Rank 0] Group 9 Loss: 4.7437 +[2025-07-07 03:25:17] [Rank 0] Group 10 Loss: 4.7907 +[2025-07-07 03:25:17] [Rank 0] Group 10 Loss: 4.7907 +[2025-07-07 03:25:17] [Rank 0] Group 11 Loss: 4.7588 +[2025-07-07 03:25:17] [Rank 0] Group 11 Loss: 4.7588 +[2025-07-07 03:25:17] [Rank 0] Group 0 FTA: 0.3238 +[2025-07-07 03:25:17] [Rank 0] Group 0 FTA: 0.3238 +[2025-07-07 03:25:17] [Rank 0] Group 1 FTA: 0.4583 +[2025-07-07 03:25:17] [Rank 0] Group 1 FTA: 0.4583 +[2025-07-07 03:25:17] [Rank 0] Group 2 FTA: 0.3958 +[2025-07-07 03:25:17] [Rank 0] Group 2 FTA: 0.3958 +[2025-07-07 03:25:17] [Rank 0] Group 3 FTA: 0.2552 +[2025-07-07 03:25:17] [Rank 0] Group 3 FTA: 0.2552 +[2025-07-07 03:25:17] [Rank 0] Group 4 FTA: 0.2630 +[2025-07-07 03:25:17] [Rank 0] Group 4 FTA: 0.2630 +[2025-07-07 03:25:17] [Rank 0] Group 5 FTA: 0.4714 +[2025-07-07 03:25:17] [Rank 0] Group 5 FTA: 0.4714 +[2025-07-07 03:25:17] [Rank 0] Group 6 FTA: 0.3594 +[2025-07-07 03:25:17] [Rank 0] Group 6 FTA: 0.3594 +[2025-07-07 03:25:17] [Rank 0] Group 7 FTA: 0.3776 +[2025-07-07 03:25:17] [Rank 0] Group 7 FTA: 0.3776 +[2025-07-07 03:25:17] [Rank 0] Group 8 FTA: 0.4167 +[2025-07-07 03:25:17] [Rank 0] Group 8 FTA: 0.4167 +[2025-07-07 03:25:17] [Rank 0] Group 9 FTA: 0.3750 +[2025-07-07 03:25:17] [Rank 0] Group 9 FTA: 0.3750 +[2025-07-07 03:25:17] [Rank 0] Group 10 FTA: 0.3633 +[2025-07-07 03:25:17] [Rank 0] Group 10 FTA: 0.3633 +[2025-07-07 03:25:17] [Rank 0] Group 11 FTA: 0.4102 +[2025-07-07 03:25:17] [Rank 0] Group 11 FTA: 0.4102 +[2025-07-07 03:25:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 03:25:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 03:25:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 03:25:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 03:25:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 03:25:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 03:25:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 03:25:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 03:25:19] [Rank 0] step:7001/10000 train_time:562731ms step_avg:80.38ms +[2025-07-07 03:25:19] [Rank 0] step:7001/10000 train_time:562731ms step_avg:80.38ms +[2025-07-07 03:25:21] [Rank 0] step:7021/10000 train_time:564489ms step_avg:80.40ms +[2025-07-07 03:25:21] [Rank 0] step:7021/10000 train_time:564489ms step_avg:80.40ms +[2025-07-07 03:25:22] [Rank 0] step:7041/10000 train_time:566367ms step_avg:80.44ms +[2025-07-07 03:25:22] [Rank 0] step:7041/10000 train_time:566367ms step_avg:80.44ms +[2025-07-07 03:25:24] [Rank 0] step:7061/10000 train_time:567857ms step_avg:80.42ms +[2025-07-07 03:25:24] [Rank 0] step:7061/10000 train_time:567857ms step_avg:80.42ms +[2025-07-07 03:25:25] [Rank 0] step:7081/10000 train_time:569350ms step_avg:80.41ms +[2025-07-07 03:25:25] [Rank 0] step:7081/10000 train_time:569350ms step_avg:80.41ms +[2025-07-07 03:25:27] [Rank 0] step:7101/10000 train_time:570843ms step_avg:80.39ms +[2025-07-07 03:25:27] [Rank 0] step:7101/10000 train_time:570843ms step_avg:80.39ms +[2025-07-07 03:25:29] [Rank 0] step:7121/10000 train_time:572980ms step_avg:80.46ms +[2025-07-07 03:25:29] [Rank 0] step:7121/10000 train_time:572980ms step_avg:80.46ms +[2025-07-07 03:25:30] [Rank 0] step:7141/10000 train_time:574474ms step_avg:80.45ms +[2025-07-07 03:25:30] [Rank 0] step:7141/10000 train_time:574474ms step_avg:80.45ms +[2025-07-07 03:25:32] [Rank 0] step:7161/10000 train_time:575970ms step_avg:80.43ms +[2025-07-07 03:25:32] [Rank 0] step:7161/10000 train_time:575970ms step_avg:80.43ms +[2025-07-07 03:25:33] [Rank 0] step:7181/10000 train_time:577466ms step_avg:80.42ms +[2025-07-07 03:25:33] [Rank 0] step:7181/10000 train_time:577466ms step_avg:80.42ms +[2025-07-07 03:25:36] [Rank 0] step:7201/10000 train_time:579216ms step_avg:80.44ms +[2025-07-07 03:25:36] [Rank 0] step:7201/10000 train_time:579216ms step_avg:80.44ms +[2025-07-07 03:25:37] [Rank 0] step:7221/10000 train_time:581123ms step_avg:80.48ms +[2025-07-07 03:25:37] [Rank 0] step:7221/10000 train_time:581123ms step_avg:80.48ms +[2025-07-07 03:25:39] [Rank 0] step:7241/10000 train_time:582617ms step_avg:80.46ms +[2025-07-07 03:25:39] [Rank 0] step:7241/10000 train_time:582617ms step_avg:80.46ms +[2025-07-07 03:25:40] [Rank 0] step:7261/10000 train_time:584116ms step_avg:80.45ms +[2025-07-07 03:25:40] [Rank 0] step:7261/10000 train_time:584116ms step_avg:80.45ms +[2025-07-07 03:25:42] [Rank 0] step:7281/10000 train_time:585613ms step_avg:80.43ms +[2025-07-07 03:25:42] [Rank 0] step:7281/10000 train_time:585613ms step_avg:80.43ms +[2025-07-07 03:25:44] [Rank 0] step:7301/10000 train_time:587768ms step_avg:80.51ms +[2025-07-07 03:25:44] [Rank 0] step:7301/10000 train_time:587768ms step_avg:80.51ms +[2025-07-07 03:25:45] [Rank 0] step:7321/10000 train_time:589267ms step_avg:80.49ms +[2025-07-07 03:25:45] [Rank 0] step:7321/10000 train_time:589267ms step_avg:80.49ms +[2025-07-07 03:25:47] [Rank 0] step:7341/10000 train_time:590764ms step_avg:80.47ms +[2025-07-07 03:25:47] [Rank 0] step:7341/10000 train_time:590764ms step_avg:80.47ms +[2025-07-07 03:25:48] [Rank 0] step:7361/10000 train_time:592263ms step_avg:80.46ms +[2025-07-07 03:25:48] [Rank 0] step:7361/10000 train_time:592263ms step_avg:80.46ms +[2025-07-07 03:25:50] [Rank 0] step:7381/10000 train_time:594433ms step_avg:80.54ms +[2025-07-07 03:25:50] [Rank 0] step:7381/10000 train_time:594433ms step_avg:80.54ms +[2025-07-07 03:25:52] [Rank 0] step:7401/10000 train_time:595910ms step_avg:80.52ms +[2025-07-07 03:25:52] [Rank 0] step:7401/10000 train_time:595910ms step_avg:80.52ms +[2025-07-07 03:25:53] [Rank 0] step:7421/10000 train_time:597409ms step_avg:80.50ms +[2025-07-07 03:25:53] [Rank 0] step:7421/10000 train_time:597409ms step_avg:80.50ms +[2025-07-07 03:25:55] [Rank 0] step:7441/10000 train_time:598908ms step_avg:80.49ms +[2025-07-07 03:25:55] [Rank 0] step:7441/10000 train_time:598908ms step_avg:80.49ms +[2025-07-07 03:25:56] [Rank 0] step:7461/10000 train_time:600407ms step_avg:80.47ms +[2025-07-07 03:25:56] [Rank 0] step:7461/10000 train_time:600407ms step_avg:80.47ms +[2025-07-07 03:25:58] [Rank 0] step:7481/10000 train_time:602551ms step_avg:80.54ms +[2025-07-07 03:25:58] [Rank 0] step:7481/10000 train_time:602551ms step_avg:80.54ms +[2025-07-07 03:26:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:26:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:26:01] [Rank 0] PRINT: step:7500/10000 train_loss:1.2290 val_loss:1.2189 train_time:604050ms step_avg:80.54ms +[2025-07-07 03:26:01] [Rank 0] PRINT: step:7500/10000 train_loss:1.2290 val_loss:1.2189 train_time:604050ms step_avg:80.54ms +[2025-07-07 03:26:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:26:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:26:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:26:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:26:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:26:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:31:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:31:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:31:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:31:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:31:22] [Rank 0] Total Loss: 4.7318 +[2025-07-07 03:31:22] [Rank 0] Total Loss: 4.7318 +[2025-07-07 03:31:22] [Rank 0] Total FTA: 0.3913 +[2025-07-07 03:31:22] [Rank 0] Total FTA: 0.3913 +[2025-07-07 03:31:22] [Rank 0] Group 0 Loss: 4.8458 +[2025-07-07 03:31:22] [Rank 0] Group 0 Loss: 4.8458 +[2025-07-07 03:31:22] [Rank 0] Group 1 Loss: 4.7515 +[2025-07-07 03:31:22] [Rank 0] Group 1 Loss: 4.7515 +[2025-07-07 03:31:22] [Rank 0] Group 2 Loss: 4.4584 +[2025-07-07 03:31:22] [Rank 0] Group 2 Loss: 4.4584 +[2025-07-07 03:31:22] [Rank 0] Group 3 Loss: 4.8075 +[2025-07-07 03:31:22] [Rank 0] Group 3 Loss: 4.8075 +[2025-07-07 03:31:22] [Rank 0] Group 4 Loss: 4.7378 +[2025-07-07 03:31:22] [Rank 0] Group 4 Loss: 4.7378 +[2025-07-07 03:31:22] [Rank 0] Group 5 Loss: 4.5894 +[2025-07-07 03:31:22] [Rank 0] Group 5 Loss: 4.5894 +[2025-07-07 03:31:22] [Rank 0] Group 6 Loss: 4.7150 +[2025-07-07 03:31:22] [Rank 0] Group 6 Loss: 4.7150 +[2025-07-07 03:31:22] [Rank 0] Group 7 Loss: 4.7478 +[2025-07-07 03:31:22] [Rank 0] Group 7 Loss: 4.7478 +[2025-07-07 03:31:22] [Rank 0] Group 8 Loss: 4.7492 +[2025-07-07 03:31:22] [Rank 0] Group 8 Loss: 4.7492 +[2025-07-07 03:31:22] [Rank 0] Group 9 Loss: 4.7449 +[2025-07-07 03:31:22] [Rank 0] Group 9 Loss: 4.7449 +[2025-07-07 03:31:22] [Rank 0] Group 10 Loss: 4.7463 +[2025-07-07 03:31:22] [Rank 0] Group 10 Loss: 4.7463 +[2025-07-07 03:31:22] [Rank 0] Group 11 Loss: 4.7475 +[2025-07-07 03:31:22] [Rank 0] Group 11 Loss: 4.7475 +[2025-07-07 03:31:22] [Rank 0] Group 0 FTA: 0.4941 +[2025-07-07 03:31:22] [Rank 0] Group 0 FTA: 0.4941 +[2025-07-07 03:31:22] [Rank 0] Group 1 FTA: 0.2604 +[2025-07-07 03:31:22] [Rank 0] Group 1 FTA: 0.2604 +[2025-07-07 03:31:22] [Rank 0] Group 2 FTA: 0.3464 +[2025-07-07 03:31:22] [Rank 0] Group 2 FTA: 0.3464 +[2025-07-07 03:31:22] [Rank 0] Group 3 FTA: 0.3620 +[2025-07-07 03:31:22] [Rank 0] Group 3 FTA: 0.3620 +[2025-07-07 03:31:22] [Rank 0] Group 4 FTA: 0.2943 +[2025-07-07 03:31:22] [Rank 0] Group 4 FTA: 0.2943 +[2025-07-07 03:31:23] [Rank 0] Group 5 FTA: 0.4740 +[2025-07-07 03:31:23] [Rank 0] Group 5 FTA: 0.4740 +[2025-07-07 03:31:23] [Rank 0] Group 6 FTA: 0.3880 +[2025-07-07 03:31:23] [Rank 0] Group 6 FTA: 0.3880 +[2025-07-07 03:31:23] [Rank 0] Group 7 FTA: 0.3984 +[2025-07-07 03:31:23] [Rank 0] Group 7 FTA: 0.3984 +[2025-07-07 03:31:23] [Rank 0] Group 8 FTA: 0.3750 +[2025-07-07 03:31:23] [Rank 0] Group 8 FTA: 0.3750 +[2025-07-07 03:31:23] [Rank 0] Group 9 FTA: 0.4180 +[2025-07-07 03:31:23] [Rank 0] Group 9 FTA: 0.4180 +[2025-07-07 03:31:23] [Rank 0] Group 10 FTA: 0.3730 +[2025-07-07 03:31:23] [Rank 0] Group 10 FTA: 0.3730 +[2025-07-07 03:31:23] [Rank 0] Group 11 FTA: 0.4033 +[2025-07-07 03:31:23] [Rank 0] Group 11 FTA: 0.4033 +[2025-07-07 03:31:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 03:31:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 03:31:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 03:31:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 03:31:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 03:31:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 03:31:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 03:31:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 03:31:24] [Rank 0] step:7501/10000 train_time:604072ms step_avg:80.53ms +[2025-07-07 03:31:24] [Rank 0] step:7501/10000 train_time:604072ms step_avg:80.53ms +[2025-07-07 03:31:26] [Rank 0] step:7521/10000 train_time:605563ms step_avg:80.52ms +[2025-07-07 03:31:26] [Rank 0] step:7521/10000 train_time:605563ms step_avg:80.52ms +[2025-07-07 03:31:27] [Rank 0] step:7541/10000 train_time:607051ms step_avg:80.50ms +[2025-07-07 03:31:27] [Rank 0] step:7541/10000 train_time:607051ms step_avg:80.50ms +[2025-07-07 03:31:29] [Rank 0] step:7561/10000 train_time:608594ms step_avg:80.49ms +[2025-07-07 03:31:29] [Rank 0] step:7561/10000 train_time:608594ms step_avg:80.49ms +[2025-07-07 03:31:31] [Rank 0] step:7581/10000 train_time:610683ms step_avg:80.55ms +[2025-07-07 03:31:31] [Rank 0] step:7581/10000 train_time:610683ms step_avg:80.55ms +[2025-07-07 03:31:32] [Rank 0] step:7601/10000 train_time:612177ms step_avg:80.54ms +[2025-07-07 03:31:32] [Rank 0] step:7601/10000 train_time:612177ms step_avg:80.54ms +[2025-07-07 03:31:34] [Rank 0] step:7621/10000 train_time:613767ms step_avg:80.54ms +[2025-07-07 03:31:34] [Rank 0] step:7621/10000 train_time:613767ms step_avg:80.54ms +[2025-07-07 03:31:35] [Rank 0] step:7641/10000 train_time:615260ms step_avg:80.52ms +[2025-07-07 03:31:35] [Rank 0] step:7641/10000 train_time:615260ms step_avg:80.52ms +[2025-07-07 03:31:37] [Rank 0] step:7661/10000 train_time:617423ms step_avg:80.59ms +[2025-07-07 03:31:37] [Rank 0] step:7661/10000 train_time:617423ms step_avg:80.59ms +[2025-07-07 03:31:39] [Rank 0] step:7681/10000 train_time:618918ms step_avg:80.58ms +[2025-07-07 03:31:39] [Rank 0] step:7681/10000 train_time:618918ms step_avg:80.58ms +[2025-07-07 03:31:40] [Rank 0] step:7701/10000 train_time:620414ms step_avg:80.56ms +[2025-07-07 03:31:40] [Rank 0] step:7701/10000 train_time:620414ms step_avg:80.56ms +[2025-07-07 03:31:42] [Rank 0] step:7721/10000 train_time:621912ms step_avg:80.55ms +[2025-07-07 03:31:42] [Rank 0] step:7721/10000 train_time:621912ms step_avg:80.55ms +[2025-07-07 03:31:44] [Rank 0] step:7741/10000 train_time:623662ms step_avg:80.57ms +[2025-07-07 03:31:44] [Rank 0] step:7741/10000 train_time:623662ms step_avg:80.57ms +[2025-07-07 03:31:46] [Rank 0] step:7761/10000 train_time:625563ms step_avg:80.60ms +[2025-07-07 03:31:46] [Rank 0] step:7761/10000 train_time:625563ms step_avg:80.60ms +[2025-07-07 03:31:47] [Rank 0] step:7781/10000 train_time:627058ms step_avg:80.59ms +[2025-07-07 03:31:47] [Rank 0] step:7781/10000 train_time:627058ms step_avg:80.59ms +[2025-07-07 03:31:48] [Rank 0] step:7801/10000 train_time:628557ms step_avg:80.57ms +[2025-07-07 03:31:48] [Rank 0] step:7801/10000 train_time:628557ms step_avg:80.57ms +[2025-07-07 03:31:50] [Rank 0] step:7821/10000 train_time:630057ms step_avg:80.56ms +[2025-07-07 03:31:50] [Rank 0] step:7821/10000 train_time:630057ms step_avg:80.56ms +[2025-07-07 03:31:52] [Rank 0] step:7841/10000 train_time:632205ms step_avg:80.63ms +[2025-07-07 03:31:52] [Rank 0] step:7841/10000 train_time:632205ms step_avg:80.63ms +[2025-07-07 03:31:54] [Rank 0] step:7861/10000 train_time:633703ms step_avg:80.61ms +[2025-07-07 03:31:54] [Rank 0] step:7861/10000 train_time:633703ms step_avg:80.61ms +[2025-07-07 03:31:55] [Rank 0] step:7881/10000 train_time:635205ms step_avg:80.60ms +[2025-07-07 03:31:55] [Rank 0] step:7881/10000 train_time:635205ms step_avg:80.60ms +[2025-07-07 03:31:57] [Rank 0] step:7901/10000 train_time:636705ms step_avg:80.59ms +[2025-07-07 03:31:57] [Rank 0] step:7901/10000 train_time:636705ms step_avg:80.59ms +[2025-07-07 03:31:59] [Rank 0] step:7921/10000 train_time:638871ms step_avg:80.66ms +[2025-07-07 03:31:59] [Rank 0] step:7921/10000 train_time:638871ms step_avg:80.66ms +[2025-07-07 03:32:00] [Rank 0] step:7941/10000 train_time:640350ms step_avg:80.64ms +[2025-07-07 03:32:00] [Rank 0] step:7941/10000 train_time:640350ms step_avg:80.64ms +[2025-07-07 03:32:02] [Rank 0] step:7961/10000 train_time:641850ms step_avg:80.62ms +[2025-07-07 03:32:02] [Rank 0] step:7961/10000 train_time:641850ms step_avg:80.62ms +[2025-07-07 03:32:03] [Rank 0] step:7981/10000 train_time:643351ms step_avg:80.61ms +[2025-07-07 03:32:03] [Rank 0] step:7981/10000 train_time:643351ms step_avg:80.61ms +[2025-07-07 03:32:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:32:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:32:06] [Rank 0] PRINT: step:8000/10000 train_loss:1.2089 val_loss:1.1990 train_time:644853ms step_avg:80.61ms +[2025-07-07 03:32:06] [Rank 0] PRINT: step:8000/10000 train_loss:1.2089 val_loss:1.1990 train_time:644853ms step_avg:80.61ms +[2025-07-07 03:32:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:32:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:32:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:32:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:32:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:32:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:37:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:37:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:37:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:37:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:37:27] [Rank 0] Total Loss: 4.8659 +[2025-07-07 03:37:27] [Rank 0] Total Loss: 4.8659 +[2025-07-07 03:37:27] [Rank 0] Total FTA: 0.4309 +[2025-07-07 03:37:27] [Rank 0] Total FTA: 0.4309 +[2025-07-07 03:37:27] [Rank 0] Group 0 Loss: 5.3149 +[2025-07-07 03:37:27] [Rank 0] Group 0 Loss: 5.3149 +[2025-07-07 03:37:27] [Rank 0] Group 1 Loss: 4.6939 +[2025-07-07 03:37:27] [Rank 0] Group 1 Loss: 4.6939 +[2025-07-07 03:37:27] [Rank 0] Group 2 Loss: 4.5107 +[2025-07-07 03:37:27] [Rank 0] Group 2 Loss: 4.5107 +[2025-07-07 03:37:27] [Rank 0] Group 3 Loss: 4.8910 +[2025-07-07 03:37:27] [Rank 0] Group 3 Loss: 4.8910 +[2025-07-07 03:37:27] [Rank 0] Group 4 Loss: 4.8848 +[2025-07-07 03:37:27] [Rank 0] Group 4 Loss: 4.8848 +[2025-07-07 03:37:27] [Rank 0] Group 5 Loss: 4.7983 +[2025-07-07 03:37:27] [Rank 0] Group 5 Loss: 4.7983 +[2025-07-07 03:37:27] [Rank 0] Group 6 Loss: 4.7747 +[2025-07-07 03:37:27] [Rank 0] Group 6 Loss: 4.7747 +[2025-07-07 03:37:27] [Rank 0] Group 7 Loss: 4.8790 +[2025-07-07 03:37:27] [Rank 0] Group 7 Loss: 4.8790 +[2025-07-07 03:37:27] [Rank 0] Group 8 Loss: 4.8633 +[2025-07-07 03:37:27] [Rank 0] Group 8 Loss: 4.8633 +[2025-07-07 03:37:27] [Rank 0] Group 9 Loss: 4.8245 +[2025-07-07 03:37:27] [Rank 0] Group 9 Loss: 4.8245 +[2025-07-07 03:37:27] [Rank 0] Group 10 Loss: 4.7900 +[2025-07-07 03:37:27] [Rank 0] Group 10 Loss: 4.7900 +[2025-07-07 03:37:27] [Rank 0] Group 11 Loss: 4.8137 +[2025-07-07 03:37:27] [Rank 0] Group 11 Loss: 4.8137 +[2025-07-07 03:37:27] [Rank 0] Group 0 FTA: 0.4824 +[2025-07-07 03:37:27] [Rank 0] Group 0 FTA: 0.4824 +[2025-07-07 03:37:27] [Rank 0] Group 1 FTA: 0.5547 +[2025-07-07 03:37:27] [Rank 0] Group 1 FTA: 0.5547 +[2025-07-07 03:37:27] [Rank 0] Group 2 FTA: 0.5078 +[2025-07-07 03:37:27] [Rank 0] Group 2 FTA: 0.5078 +[2025-07-07 03:37:27] [Rank 0] Group 3 FTA: 0.3177 +[2025-07-07 03:37:27] [Rank 0] Group 3 FTA: 0.3177 +[2025-07-07 03:37:27] [Rank 0] Group 4 FTA: 0.3464 +[2025-07-07 03:37:27] [Rank 0] Group 4 FTA: 0.3464 +[2025-07-07 03:37:27] [Rank 0] Group 5 FTA: 0.4427 +[2025-07-07 03:37:27] [Rank 0] Group 5 FTA: 0.4427 +[2025-07-07 03:37:27] [Rank 0] Group 6 FTA: 0.3802 +[2025-07-07 03:37:27] [Rank 0] Group 6 FTA: 0.3802 +[2025-07-07 03:37:27] [Rank 0] Group 7 FTA: 0.4245 +[2025-07-07 03:37:27] [Rank 0] Group 7 FTA: 0.4245 +[2025-07-07 03:37:27] [Rank 0] Group 8 FTA: 0.4167 +[2025-07-07 03:37:27] [Rank 0] Group 8 FTA: 0.4167 +[2025-07-07 03:37:27] [Rank 0] Group 9 FTA: 0.4766 +[2025-07-07 03:37:27] [Rank 0] Group 9 FTA: 0.4766 +[2025-07-07 03:37:27] [Rank 0] Group 10 FTA: 0.4141 +[2025-07-07 03:37:27] [Rank 0] Group 10 FTA: 0.4141 +[2025-07-07 03:37:27] [Rank 0] Group 11 FTA: 0.4102 +[2025-07-07 03:37:27] [Rank 0] Group 11 FTA: 0.4102 +[2025-07-07 03:37:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 03:37:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 03:37:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 03:37:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 03:37:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 03:37:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 03:37:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 03:37:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 03:37:29] [Rank 0] step:8001/10000 train_time:644876ms step_avg:80.60ms +[2025-07-07 03:37:29] [Rank 0] step:8001/10000 train_time:644876ms step_avg:80.60ms +[2025-07-07 03:37:31] [Rank 0] step:8021/10000 train_time:647051ms step_avg:80.67ms +[2025-07-07 03:37:31] [Rank 0] step:8021/10000 train_time:647051ms step_avg:80.67ms +[2025-07-07 03:37:33] [Rank 0] step:8041/10000 train_time:648543ms step_avg:80.65ms +[2025-07-07 03:37:33] [Rank 0] step:8041/10000 train_time:648543ms step_avg:80.65ms +[2025-07-07 03:37:34] [Rank 0] step:8061/10000 train_time:650035ms step_avg:80.64ms +[2025-07-07 03:37:34] [Rank 0] step:8061/10000 train_time:650035ms step_avg:80.64ms +[2025-07-07 03:37:36] [Rank 0] step:8081/10000 train_time:651528ms step_avg:80.62ms +[2025-07-07 03:37:36] [Rank 0] step:8081/10000 train_time:651528ms step_avg:80.62ms +[2025-07-07 03:37:38] [Rank 0] step:8101/10000 train_time:653275ms step_avg:80.64ms +[2025-07-07 03:37:38] [Rank 0] step:8101/10000 train_time:653275ms step_avg:80.64ms +[2025-07-07 03:37:39] [Rank 0] step:8121/10000 train_time:655162ms step_avg:80.67ms +[2025-07-07 03:37:39] [Rank 0] step:8121/10000 train_time:655162ms step_avg:80.67ms +[2025-07-07 03:37:41] [Rank 0] step:8141/10000 train_time:656655ms step_avg:80.66ms +[2025-07-07 03:37:41] [Rank 0] step:8141/10000 train_time:656655ms step_avg:80.66ms +[2025-07-07 03:37:42] [Rank 0] step:8161/10000 train_time:658151ms step_avg:80.65ms +[2025-07-07 03:37:42] [Rank 0] step:8161/10000 train_time:658151ms step_avg:80.65ms +[2025-07-07 03:37:44] [Rank 0] step:8181/10000 train_time:659648ms step_avg:80.63ms +[2025-07-07 03:37:44] [Rank 0] step:8181/10000 train_time:659648ms step_avg:80.63ms +[2025-07-07 03:37:46] [Rank 0] step:8201/10000 train_time:661803ms step_avg:80.70ms +[2025-07-07 03:37:46] [Rank 0] step:8201/10000 train_time:661803ms step_avg:80.70ms +[2025-07-07 03:37:47] [Rank 0] step:8221/10000 train_time:663300ms step_avg:80.68ms +[2025-07-07 03:37:47] [Rank 0] step:8221/10000 train_time:663300ms step_avg:80.68ms +[2025-07-07 03:37:49] [Rank 0] step:8241/10000 train_time:664798ms step_avg:80.67ms +[2025-07-07 03:37:49] [Rank 0] step:8241/10000 train_time:664798ms step_avg:80.67ms +[2025-07-07 03:37:50] [Rank 0] step:8261/10000 train_time:666295ms step_avg:80.66ms +[2025-07-07 03:37:50] [Rank 0] step:8261/10000 train_time:666295ms step_avg:80.66ms +[2025-07-07 03:37:53] [Rank 0] step:8281/10000 train_time:667796ms step_avg:80.64ms +[2025-07-07 03:37:53] [Rank 0] step:8281/10000 train_time:667796ms step_avg:80.64ms +[2025-07-07 03:37:54] [Rank 0] step:8301/10000 train_time:669949ms step_avg:80.71ms +[2025-07-07 03:37:54] [Rank 0] step:8301/10000 train_time:669949ms step_avg:80.71ms +[2025-07-07 03:37:56] [Rank 0] step:8321/10000 train_time:671449ms step_avg:80.69ms +[2025-07-07 03:37:56] [Rank 0] step:8321/10000 train_time:671449ms step_avg:80.69ms +[2025-07-07 03:37:57] [Rank 0] step:8341/10000 train_time:672949ms step_avg:80.68ms +[2025-07-07 03:37:57] [Rank 0] step:8341/10000 train_time:672949ms step_avg:80.68ms +[2025-07-07 03:37:59] [Rank 0] step:8361/10000 train_time:674451ms step_avg:80.67ms +[2025-07-07 03:37:59] [Rank 0] step:8361/10000 train_time:674451ms step_avg:80.67ms +[2025-07-07 03:38:01] [Rank 0] step:8381/10000 train_time:676620ms step_avg:80.73ms +[2025-07-07 03:38:01] [Rank 0] step:8381/10000 train_time:676620ms step_avg:80.73ms +[2025-07-07 03:38:02] [Rank 0] step:8401/10000 train_time:678123ms step_avg:80.72ms +[2025-07-07 03:38:02] [Rank 0] step:8401/10000 train_time:678123ms step_avg:80.72ms +[2025-07-07 03:38:04] [Rank 0] step:8421/10000 train_time:679625ms step_avg:80.71ms +[2025-07-07 03:38:04] [Rank 0] step:8421/10000 train_time:679625ms step_avg:80.71ms +[2025-07-07 03:38:05] [Rank 0] step:8441/10000 train_time:681128ms step_avg:80.69ms +[2025-07-07 03:38:05] [Rank 0] step:8441/10000 train_time:681128ms step_avg:80.69ms +[2025-07-07 03:38:07] [Rank 0] step:8461/10000 train_time:682682ms step_avg:80.69ms +[2025-07-07 03:38:07] [Rank 0] step:8461/10000 train_time:682682ms step_avg:80.69ms +[2025-07-07 03:38:09] [Rank 0] step:8481/10000 train_time:684802ms step_avg:80.75ms +[2025-07-07 03:38:09] [Rank 0] step:8481/10000 train_time:684802ms step_avg:80.75ms +[2025-07-07 03:38:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:38:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:38:11] [Rank 0] PRINT: step:8500/10000 train_loss:1.1909 val_loss:1.1837 train_time:686305ms step_avg:80.74ms +[2025-07-07 03:38:11] [Rank 0] PRINT: step:8500/10000 train_loss:1.1909 val_loss:1.1837 train_time:686305ms step_avg:80.74ms +[2025-07-07 03:38:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:38:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:38:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:38:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:38:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:38:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:43:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:43:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:43:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:43:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:43:34] [Rank 0] Total Loss: 4.7962 +[2025-07-07 03:43:34] [Rank 0] Total Loss: 4.7962 +[2025-07-07 03:43:34] [Rank 0] Total FTA: 0.4440 +[2025-07-07 03:43:34] [Rank 0] Total FTA: 0.4440 +[2025-07-07 03:43:34] [Rank 0] Group 0 Loss: 4.9560 +[2025-07-07 03:43:34] [Rank 0] Group 0 Loss: 4.9560 +[2025-07-07 03:43:34] [Rank 0] Group 1 Loss: 4.6531 +[2025-07-07 03:43:34] [Rank 0] Group 1 Loss: 4.6531 +[2025-07-07 03:43:34] [Rank 0] Group 2 Loss: 4.4686 +[2025-07-07 03:43:34] [Rank 0] Group 2 Loss: 4.4686 +[2025-07-07 03:43:34] [Rank 0] Group 3 Loss: 4.8881 +[2025-07-07 03:43:34] [Rank 0] Group 3 Loss: 4.8881 +[2025-07-07 03:43:34] [Rank 0] Group 4 Loss: 4.8538 +[2025-07-07 03:43:34] [Rank 0] Group 4 Loss: 4.8538 +[2025-07-07 03:43:34] [Rank 0] Group 5 Loss: 4.7374 +[2025-07-07 03:43:34] [Rank 0] Group 5 Loss: 4.7374 +[2025-07-07 03:43:34] [Rank 0] Group 6 Loss: 4.7782 +[2025-07-07 03:43:34] [Rank 0] Group 6 Loss: 4.7782 +[2025-07-07 03:43:34] [Rank 0] Group 7 Loss: 4.7533 +[2025-07-07 03:43:34] [Rank 0] Group 7 Loss: 4.7533 +[2025-07-07 03:43:34] [Rank 0] Group 8 Loss: 4.8047 +[2025-07-07 03:43:34] [Rank 0] Group 8 Loss: 4.8047 +[2025-07-07 03:43:34] [Rank 0] Group 9 Loss: 4.7458 +[2025-07-07 03:43:34] [Rank 0] Group 9 Loss: 4.7458 +[2025-07-07 03:43:34] [Rank 0] Group 10 Loss: 4.7827 +[2025-07-07 03:43:34] [Rank 0] Group 10 Loss: 4.7827 +[2025-07-07 03:43:34] [Rank 0] Group 11 Loss: 4.8576 +[2025-07-07 03:43:34] [Rank 0] Group 11 Loss: 4.8576 +[2025-07-07 03:43:34] [Rank 0] Group 0 FTA: 0.4980 +[2025-07-07 03:43:34] [Rank 0] Group 0 FTA: 0.4980 +[2025-07-07 03:43:34] [Rank 0] Group 1 FTA: 0.5182 +[2025-07-07 03:43:34] [Rank 0] Group 1 FTA: 0.5182 +[2025-07-07 03:43:34] [Rank 0] Group 2 FTA: 0.4062 +[2025-07-07 03:43:34] [Rank 0] Group 2 FTA: 0.4062 +[2025-07-07 03:43:34] [Rank 0] Group 3 FTA: 0.4036 +[2025-07-07 03:43:34] [Rank 0] Group 3 FTA: 0.4036 +[2025-07-07 03:43:34] [Rank 0] Group 4 FTA: 0.4297 +[2025-07-07 03:43:34] [Rank 0] Group 4 FTA: 0.4297 +[2025-07-07 03:43:34] [Rank 0] Group 5 FTA: 0.4740 +[2025-07-07 03:43:34] [Rank 0] Group 5 FTA: 0.4740 +[2025-07-07 03:43:34] [Rank 0] Group 6 FTA: 0.4089 +[2025-07-07 03:43:34] [Rank 0] Group 6 FTA: 0.4089 +[2025-07-07 03:43:34] [Rank 0] Group 7 FTA: 0.4167 +[2025-07-07 03:43:34] [Rank 0] Group 7 FTA: 0.4167 +[2025-07-07 03:43:34] [Rank 0] Group 8 FTA: 0.4193 +[2025-07-07 03:43:34] [Rank 0] Group 8 FTA: 0.4193 +[2025-07-07 03:43:34] [Rank 0] Group 9 FTA: 0.3906 +[2025-07-07 03:43:34] [Rank 0] Group 9 FTA: 0.3906 +[2025-07-07 03:43:34] [Rank 0] Group 10 FTA: 0.4316 +[2025-07-07 03:43:34] [Rank 0] Group 10 FTA: 0.4316 +[2025-07-07 03:43:34] [Rank 0] Group 11 FTA: 0.4512 +[2025-07-07 03:43:34] [Rank 0] Group 11 FTA: 0.4512 +[2025-07-07 03:43:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 03:43:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 03:43:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 03:43:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 03:43:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 03:43:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 03:43:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 03:43:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 03:43:36] [Rank 0] step:8501/10000 train_time:686329ms step_avg:80.74ms +[2025-07-07 03:43:36] [Rank 0] step:8501/10000 train_time:686329ms step_avg:80.74ms +[2025-07-07 03:43:37] [Rank 0] step:8521/10000 train_time:687824ms step_avg:80.72ms +[2025-07-07 03:43:37] [Rank 0] step:8521/10000 train_time:687824ms step_avg:80.72ms +[2025-07-07 03:43:39] [Rank 0] step:8541/10000 train_time:689314ms step_avg:80.71ms +[2025-07-07 03:43:39] [Rank 0] step:8541/10000 train_time:689314ms step_avg:80.71ms +[2025-07-07 03:43:41] [Rank 0] step:8561/10000 train_time:691459ms step_avg:80.77ms +[2025-07-07 03:43:41] [Rank 0] step:8561/10000 train_time:691459ms step_avg:80.77ms +[2025-07-07 03:43:42] [Rank 0] step:8581/10000 train_time:692949ms step_avg:80.75ms +[2025-07-07 03:43:42] [Rank 0] step:8581/10000 train_time:692949ms step_avg:80.75ms +[2025-07-07 03:43:44] [Rank 0] step:8601/10000 train_time:694443ms step_avg:80.74ms +[2025-07-07 03:43:44] [Rank 0] step:8601/10000 train_time:694443ms step_avg:80.74ms +[2025-07-07 03:43:45] [Rank 0] step:8621/10000 train_time:695936ms step_avg:80.73ms +[2025-07-07 03:43:45] [Rank 0] step:8621/10000 train_time:695936ms step_avg:80.73ms +[2025-07-07 03:43:48] [Rank 0] step:8641/10000 train_time:697482ms step_avg:80.72ms +[2025-07-07 03:43:48] [Rank 0] step:8641/10000 train_time:697482ms step_avg:80.72ms +[2025-07-07 03:43:49] [Rank 0] step:8661/10000 train_time:699570ms step_avg:80.77ms +[2025-07-07 03:43:49] [Rank 0] step:8661/10000 train_time:699570ms step_avg:80.77ms +[2025-07-07 03:43:51] [Rank 0] step:8681/10000 train_time:701064ms step_avg:80.76ms +[2025-07-07 03:43:51] [Rank 0] step:8681/10000 train_time:701064ms step_avg:80.76ms +[2025-07-07 03:43:52] [Rank 0] step:8701/10000 train_time:702560ms step_avg:80.74ms +[2025-07-07 03:43:52] [Rank 0] step:8701/10000 train_time:702560ms step_avg:80.74ms +[2025-07-07 03:43:54] [Rank 0] step:8721/10000 train_time:704057ms step_avg:80.73ms +[2025-07-07 03:43:54] [Rank 0] step:8721/10000 train_time:704057ms step_avg:80.73ms +[2025-07-07 03:43:56] [Rank 0] step:8741/10000 train_time:706213ms step_avg:80.79ms +[2025-07-07 03:43:56] [Rank 0] step:8741/10000 train_time:706213ms step_avg:80.79ms +[2025-07-07 03:43:57] [Rank 0] step:8761/10000 train_time:707710ms step_avg:80.78ms +[2025-07-07 03:43:57] [Rank 0] step:8761/10000 train_time:707710ms step_avg:80.78ms +[2025-07-07 03:43:59] [Rank 0] step:8781/10000 train_time:709207ms step_avg:80.77ms +[2025-07-07 03:43:59] [Rank 0] step:8781/10000 train_time:709207ms step_avg:80.77ms +[2025-07-07 03:44:00] [Rank 0] step:8801/10000 train_time:710707ms step_avg:80.75ms +[2025-07-07 03:44:00] [Rank 0] step:8801/10000 train_time:710707ms step_avg:80.75ms +[2025-07-07 03:44:02] [Rank 0] step:8821/10000 train_time:712464ms step_avg:80.77ms +[2025-07-07 03:44:02] [Rank 0] step:8821/10000 train_time:712464ms step_avg:80.77ms +[2025-07-07 03:44:04] [Rank 0] step:8841/10000 train_time:714358ms step_avg:80.80ms +[2025-07-07 03:44:04] [Rank 0] step:8841/10000 train_time:714358ms step_avg:80.80ms +[2025-07-07 03:44:05] [Rank 0] step:8861/10000 train_time:715858ms step_avg:80.79ms +[2025-07-07 03:44:05] [Rank 0] step:8861/10000 train_time:715858ms step_avg:80.79ms +[2025-07-07 03:44:07] [Rank 0] step:8881/10000 train_time:717358ms step_avg:80.77ms +[2025-07-07 03:44:07] [Rank 0] step:8881/10000 train_time:717358ms step_avg:80.77ms +[2025-07-07 03:44:08] [Rank 0] step:8901/10000 train_time:718859ms step_avg:80.76ms +[2025-07-07 03:44:08] [Rank 0] step:8901/10000 train_time:718859ms step_avg:80.76ms +[2025-07-07 03:44:11] [Rank 0] step:8921/10000 train_time:721031ms step_avg:80.82ms +[2025-07-07 03:44:11] [Rank 0] step:8921/10000 train_time:721031ms step_avg:80.82ms +[2025-07-07 03:44:12] [Rank 0] step:8941/10000 train_time:722532ms step_avg:80.81ms +[2025-07-07 03:44:12] [Rank 0] step:8941/10000 train_time:722532ms step_avg:80.81ms +[2025-07-07 03:44:14] [Rank 0] step:8961/10000 train_time:724086ms step_avg:80.80ms +[2025-07-07 03:44:14] [Rank 0] step:8961/10000 train_time:724086ms step_avg:80.80ms +[2025-07-07 03:44:15] [Rank 0] step:8981/10000 train_time:725588ms step_avg:80.79ms +[2025-07-07 03:44:15] [Rank 0] step:8981/10000 train_time:725588ms step_avg:80.79ms +[2025-07-07 03:44:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:44:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:44:17] [Rank 0] PRINT: step:9000/10000 train_loss:1.1774 val_loss:1.1719 train_time:727090ms step_avg:80.79ms +[2025-07-07 03:44:17] [Rank 0] PRINT: step:9000/10000 train_loss:1.1774 val_loss:1.1719 train_time:727090ms step_avg:80.79ms +[2025-07-07 03:44:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:44:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:44:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:44:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:44:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:44:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:49:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:49:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:49:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:49:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:49:42] [Rank 0] Total Loss: 4.8694 +[2025-07-07 03:49:42] [Rank 0] Total Loss: 4.8694 +[2025-07-07 03:49:42] [Rank 0] Total FTA: 0.4495 +[2025-07-07 03:49:42] [Rank 0] Total FTA: 0.4495 +[2025-07-07 03:49:42] [Rank 0] Group 0 Loss: 5.1137 +[2025-07-07 03:49:42] [Rank 0] Group 0 Loss: 5.1137 +[2025-07-07 03:49:42] [Rank 0] Group 1 Loss: 4.8965 +[2025-07-07 03:49:42] [Rank 0] Group 1 Loss: 4.8965 +[2025-07-07 03:49:42] [Rank 0] Group 2 Loss: 4.5905 +[2025-07-07 03:49:42] [Rank 0] Group 2 Loss: 4.5905 +[2025-07-07 03:49:42] [Rank 0] Group 3 Loss: 4.9837 +[2025-07-07 03:49:42] [Rank 0] Group 3 Loss: 4.9837 +[2025-07-07 03:49:42] [Rank 0] Group 4 Loss: 4.7050 +[2025-07-07 03:49:42] [Rank 0] Group 4 Loss: 4.7050 +[2025-07-07 03:49:42] [Rank 0] Group 5 Loss: 4.8296 +[2025-07-07 03:49:42] [Rank 0] Group 5 Loss: 4.8296 +[2025-07-07 03:49:42] [Rank 0] Group 6 Loss: 4.8274 +[2025-07-07 03:49:42] [Rank 0] Group 6 Loss: 4.8274 +[2025-07-07 03:49:42] [Rank 0] Group 7 Loss: 4.8661 +[2025-07-07 03:49:42] [Rank 0] Group 7 Loss: 4.8661 +[2025-07-07 03:49:42] [Rank 0] Group 8 Loss: 4.9175 +[2025-07-07 03:49:42] [Rank 0] Group 8 Loss: 4.9175 +[2025-07-07 03:49:42] [Rank 0] Group 9 Loss: 4.7169 +[2025-07-07 03:49:42] [Rank 0] Group 9 Loss: 4.7169 +[2025-07-07 03:49:42] [Rank 0] Group 10 Loss: 4.8867 +[2025-07-07 03:49:42] [Rank 0] Group 10 Loss: 4.8867 +[2025-07-07 03:49:42] [Rank 0] Group 11 Loss: 4.8425 +[2025-07-07 03:49:42] [Rank 0] Group 11 Loss: 4.8425 +[2025-07-07 03:49:42] [Rank 0] Group 0 FTA: 0.5020 +[2025-07-07 03:49:42] [Rank 0] Group 0 FTA: 0.5020 +[2025-07-07 03:49:42] [Rank 0] Group 1 FTA: 0.5130 +[2025-07-07 03:49:42] [Rank 0] Group 1 FTA: 0.5130 +[2025-07-07 03:49:42] [Rank 0] Group 2 FTA: 0.4896 +[2025-07-07 03:49:42] [Rank 0] Group 2 FTA: 0.4896 +[2025-07-07 03:49:42] [Rank 0] Group 3 FTA: 0.3542 +[2025-07-07 03:49:42] [Rank 0] Group 3 FTA: 0.3542 +[2025-07-07 03:49:42] [Rank 0] Group 4 FTA: 0.3880 +[2025-07-07 03:49:42] [Rank 0] Group 4 FTA: 0.3880 +[2025-07-07 03:49:42] [Rank 0] Group 5 FTA: 0.4792 +[2025-07-07 03:49:42] [Rank 0] Group 5 FTA: 0.4792 +[2025-07-07 03:49:42] [Rank 0] Group 6 FTA: 0.4505 +[2025-07-07 03:49:42] [Rank 0] Group 6 FTA: 0.4505 +[2025-07-07 03:49:42] [Rank 0] Group 7 FTA: 0.4271 +[2025-07-07 03:49:42] [Rank 0] Group 7 FTA: 0.4271 +[2025-07-07 03:49:42] [Rank 0] Group 8 FTA: 0.4219 +[2025-07-07 03:49:42] [Rank 0] Group 8 FTA: 0.4219 +[2025-07-07 03:49:42] [Rank 0] Group 9 FTA: 0.4570 +[2025-07-07 03:49:42] [Rank 0] Group 9 FTA: 0.4570 +[2025-07-07 03:49:42] [Rank 0] Group 10 FTA: 0.4531 +[2025-07-07 03:49:42] [Rank 0] Group 10 FTA: 0.4531 +[2025-07-07 03:49:42] [Rank 0] Group 11 FTA: 0.4336 +[2025-07-07 03:49:42] [Rank 0] Group 11 FTA: 0.4336 +[2025-07-07 03:49:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 03:49:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 03:49:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 03:49:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 03:49:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 03:49:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 03:49:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 03:49:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 03:49:45] [Rank 0] step:9001/10000 train_time:727221ms step_avg:80.79ms +[2025-07-07 03:49:45] [Rank 0] step:9001/10000 train_time:727221ms step_avg:80.79ms +[2025-07-07 03:49:46] [Rank 0] step:9021/10000 train_time:729314ms step_avg:80.85ms +[2025-07-07 03:49:46] [Rank 0] step:9021/10000 train_time:729314ms step_avg:80.85ms +[2025-07-07 03:49:48] [Rank 0] step:9041/10000 train_time:730806ms step_avg:80.83ms +[2025-07-07 03:49:48] [Rank 0] step:9041/10000 train_time:730806ms step_avg:80.83ms +[2025-07-07 03:49:49] [Rank 0] step:9061/10000 train_time:732300ms step_avg:80.82ms +[2025-07-07 03:49:49] [Rank 0] step:9061/10000 train_time:732300ms step_avg:80.82ms +[2025-07-07 03:49:51] [Rank 0] step:9081/10000 train_time:733793ms step_avg:80.81ms +[2025-07-07 03:49:51] [Rank 0] step:9081/10000 train_time:733793ms step_avg:80.81ms +[2025-07-07 03:49:52] [Rank 0] step:9101/10000 train_time:735519ms step_avg:80.82ms +[2025-07-07 03:49:52] [Rank 0] step:9101/10000 train_time:735519ms step_avg:80.82ms +[2025-07-07 03:49:54] [Rank 0] step:9121/10000 train_time:737015ms step_avg:80.80ms +[2025-07-07 03:49:54] [Rank 0] step:9121/10000 train_time:737015ms step_avg:80.80ms +[2025-07-07 03:49:55] [Rank 0] step:9141/10000 train_time:738508ms step_avg:80.79ms +[2025-07-07 03:49:55] [Rank 0] step:9141/10000 train_time:738508ms step_avg:80.79ms +[2025-07-07 03:49:57] [Rank 0] step:9161/10000 train_time:740001ms step_avg:80.78ms +[2025-07-07 03:49:57] [Rank 0] step:9161/10000 train_time:740001ms step_avg:80.78ms +[2025-07-07 03:49:59] [Rank 0] step:9181/10000 train_time:742177ms step_avg:80.84ms +[2025-07-07 03:49:59] [Rank 0] step:9181/10000 train_time:742177ms step_avg:80.84ms +[2025-07-07 03:50:00] [Rank 0] step:9201/10000 train_time:743652ms step_avg:80.82ms +[2025-07-07 03:50:00] [Rank 0] step:9201/10000 train_time:743652ms step_avg:80.82ms +[2025-07-07 03:50:02] [Rank 0] step:9221/10000 train_time:745146ms step_avg:80.81ms +[2025-07-07 03:50:02] [Rank 0] step:9221/10000 train_time:745146ms step_avg:80.81ms +[2025-07-07 03:50:03] [Rank 0] step:9241/10000 train_time:746642ms step_avg:80.80ms +[2025-07-07 03:50:03] [Rank 0] step:9241/10000 train_time:746642ms step_avg:80.80ms +[2025-07-07 03:50:05] [Rank 0] step:9261/10000 train_time:748138ms step_avg:80.78ms +[2025-07-07 03:50:05] [Rank 0] step:9261/10000 train_time:748138ms step_avg:80.78ms +[2025-07-07 03:50:07] [Rank 0] step:9281/10000 train_time:749871ms step_avg:80.80ms +[2025-07-07 03:50:07] [Rank 0] step:9281/10000 train_time:749871ms step_avg:80.80ms +[2025-07-07 03:50:08] [Rank 0] step:9301/10000 train_time:751368ms step_avg:80.78ms +[2025-07-07 03:50:08] [Rank 0] step:9301/10000 train_time:751368ms step_avg:80.78ms +[2025-07-07 03:50:10] [Rank 0] step:9321/10000 train_time:752867ms step_avg:80.77ms +[2025-07-07 03:50:10] [Rank 0] step:9321/10000 train_time:752867ms step_avg:80.77ms +[2025-07-07 03:50:11] [Rank 0] step:9341/10000 train_time:754368ms step_avg:80.76ms +[2025-07-07 03:50:11] [Rank 0] step:9341/10000 train_time:754368ms step_avg:80.76ms +[2025-07-07 03:50:13] [Rank 0] step:9361/10000 train_time:756545ms step_avg:80.82ms +[2025-07-07 03:50:13] [Rank 0] step:9361/10000 train_time:756545ms step_avg:80.82ms +[2025-07-07 03:50:15] [Rank 0] step:9381/10000 train_time:758022ms step_avg:80.80ms +[2025-07-07 03:50:15] [Rank 0] step:9381/10000 train_time:758022ms step_avg:80.80ms +[2025-07-07 03:50:16] [Rank 0] step:9401/10000 train_time:759521ms step_avg:80.79ms +[2025-07-07 03:50:16] [Rank 0] step:9401/10000 train_time:759521ms step_avg:80.79ms +[2025-07-07 03:50:18] [Rank 0] step:9421/10000 train_time:761022ms step_avg:80.78ms +[2025-07-07 03:50:18] [Rank 0] step:9421/10000 train_time:761022ms step_avg:80.78ms +[2025-07-07 03:50:19] [Rank 0] step:9441/10000 train_time:762523ms step_avg:80.77ms +[2025-07-07 03:50:19] [Rank 0] step:9441/10000 train_time:762523ms step_avg:80.77ms +[2025-07-07 03:50:21] [Rank 0] step:9461/10000 train_time:764662ms step_avg:80.82ms +[2025-07-07 03:50:21] [Rank 0] step:9461/10000 train_time:764662ms step_avg:80.82ms +[2025-07-07 03:50:23] [Rank 0] step:9481/10000 train_time:766161ms step_avg:80.81ms +[2025-07-07 03:50:23] [Rank 0] step:9481/10000 train_time:766161ms step_avg:80.81ms +[2025-07-07 03:50:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:50:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:50:25] [Rank 0] PRINT: step:9500/10000 train_loss:1.1665 val_loss:1.1632 train_time:767661ms step_avg:80.81ms +[2025-07-07 03:50:25] [Rank 0] PRINT: step:9500/10000 train_loss:1.1665 val_loss:1.1632 train_time:767661ms step_avg:80.81ms +[2025-07-07 03:50:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:50:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:50:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:50:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:50:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:50:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:55:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:55:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:55:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:55:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:55:52] [Rank 0] Total Loss: 4.8822 +[2025-07-07 03:55:52] [Rank 0] Total Loss: 4.8822 +[2025-07-07 03:55:52] [Rank 0] Total FTA: 0.4546 +[2025-07-07 03:55:52] [Rank 0] Total FTA: 0.4546 +[2025-07-07 03:55:52] [Rank 0] Group 0 Loss: 5.1899 +[2025-07-07 03:55:52] [Rank 0] Group 0 Loss: 5.1899 +[2025-07-07 03:55:52] [Rank 0] Group 1 Loss: 4.7143 +[2025-07-07 03:55:52] [Rank 0] Group 1 Loss: 4.7143 +[2025-07-07 03:55:52] [Rank 0] Group 2 Loss: 4.5796 +[2025-07-07 03:55:52] [Rank 0] Group 2 Loss: 4.5796 +[2025-07-07 03:55:52] [Rank 0] Group 3 Loss: 4.9506 +[2025-07-07 03:55:52] [Rank 0] Group 3 Loss: 4.9506 +[2025-07-07 03:55:52] [Rank 0] Group 4 Loss: 4.7999 +[2025-07-07 03:55:52] [Rank 0] Group 4 Loss: 4.7999 +[2025-07-07 03:55:52] [Rank 0] Group 5 Loss: 4.8655 +[2025-07-07 03:55:52] [Rank 0] Group 5 Loss: 4.8655 +[2025-07-07 03:55:52] [Rank 0] Group 6 Loss: 4.8454 +[2025-07-07 03:55:52] [Rank 0] Group 6 Loss: 4.8454 +[2025-07-07 03:55:52] [Rank 0] Group 7 Loss: 4.8799 +[2025-07-07 03:55:52] [Rank 0] Group 7 Loss: 4.8799 +[2025-07-07 03:55:52] [Rank 0] Group 8 Loss: 4.8234 +[2025-07-07 03:55:52] [Rank 0] Group 8 Loss: 4.8234 +[2025-07-07 03:55:52] [Rank 0] Group 9 Loss: 4.9342 +[2025-07-07 03:55:52] [Rank 0] Group 9 Loss: 4.9342 +[2025-07-07 03:55:52] [Rank 0] Group 10 Loss: 4.9153 +[2025-07-07 03:55:52] [Rank 0] Group 10 Loss: 4.9153 +[2025-07-07 03:55:52] [Rank 0] Group 11 Loss: 4.8463 +[2025-07-07 03:55:52] [Rank 0] Group 11 Loss: 4.8463 +[2025-07-07 03:55:52] [Rank 0] Group 0 FTA: 0.5137 +[2025-07-07 03:55:52] [Rank 0] Group 0 FTA: 0.5137 +[2025-07-07 03:55:52] [Rank 0] Group 1 FTA: 0.5208 +[2025-07-07 03:55:52] [Rank 0] Group 1 FTA: 0.5208 +[2025-07-07 03:55:52] [Rank 0] Group 2 FTA: 0.5026 +[2025-07-07 03:55:52] [Rank 0] Group 2 FTA: 0.5026 +[2025-07-07 03:55:52] [Rank 0] Group 3 FTA: 0.3828 +[2025-07-07 03:55:52] [Rank 0] Group 3 FTA: 0.3828 +[2025-07-07 03:55:52] [Rank 0] Group 4 FTA: 0.4297 +[2025-07-07 03:55:52] [Rank 0] Group 4 FTA: 0.4297 +[2025-07-07 03:55:52] [Rank 0] Group 5 FTA: 0.4115 +[2025-07-07 03:55:52] [Rank 0] Group 5 FTA: 0.4115 +[2025-07-07 03:55:52] [Rank 0] Group 6 FTA: 0.4219 +[2025-07-07 03:55:52] [Rank 0] Group 6 FTA: 0.4219 +[2025-07-07 03:55:52] [Rank 0] Group 7 FTA: 0.4323 +[2025-07-07 03:55:52] [Rank 0] Group 7 FTA: 0.4323 +[2025-07-07 03:55:52] [Rank 0] Group 8 FTA: 0.4505 +[2025-07-07 03:55:52] [Rank 0] Group 8 FTA: 0.4505 +[2025-07-07 03:55:52] [Rank 0] Group 9 FTA: 0.4180 +[2025-07-07 03:55:52] [Rank 0] Group 9 FTA: 0.4180 +[2025-07-07 03:55:52] [Rank 0] Group 10 FTA: 0.4668 +[2025-07-07 03:55:52] [Rank 0] Group 10 FTA: 0.4668 +[2025-07-07 03:55:52] [Rank 0] Group 11 FTA: 0.4453 +[2025-07-07 03:55:52] [Rank 0] Group 11 FTA: 0.4453 +[2025-07-07 03:55:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 03:55:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 03:55:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 03:55:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 03:55:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 03:55:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 03:55:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 03:55:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 03:55:53] [Rank 0] step:9501/10000 train_time:767685ms step_avg:80.80ms +[2025-07-07 03:55:53] [Rank 0] step:9501/10000 train_time:767685ms step_avg:80.80ms +[2025-07-07 03:55:55] [Rank 0] step:9521/10000 train_time:769198ms step_avg:80.79ms +[2025-07-07 03:55:55] [Rank 0] step:9521/10000 train_time:769198ms step_avg:80.79ms +[2025-07-07 03:55:57] [Rank 0] step:9541/10000 train_time:770744ms step_avg:80.78ms +[2025-07-07 03:55:57] [Rank 0] step:9541/10000 train_time:770744ms step_avg:80.78ms +[2025-07-07 03:55:59] [Rank 0] step:9561/10000 train_time:772830ms step_avg:80.83ms +[2025-07-07 03:55:59] [Rank 0] step:9561/10000 train_time:772830ms step_avg:80.83ms +[2025-07-07 03:56:00] [Rank 0] step:9581/10000 train_time:774321ms step_avg:80.82ms +[2025-07-07 03:56:00] [Rank 0] step:9581/10000 train_time:774321ms step_avg:80.82ms +[2025-07-07 03:56:02] [Rank 0] step:9601/10000 train_time:775814ms step_avg:80.81ms +[2025-07-07 03:56:02] [Rank 0] step:9601/10000 train_time:775814ms step_avg:80.81ms +[2025-07-07 03:56:03] [Rank 0] step:9621/10000 train_time:777307ms step_avg:80.79ms +[2025-07-07 03:56:03] [Rank 0] step:9621/10000 train_time:777307ms step_avg:80.79ms +[2025-07-07 03:56:05] [Rank 0] step:9641/10000 train_time:779446ms step_avg:80.85ms +[2025-07-07 03:56:05] [Rank 0] step:9641/10000 train_time:779446ms step_avg:80.85ms +[2025-07-07 03:56:07] [Rank 0] step:9661/10000 train_time:780939ms step_avg:80.83ms +[2025-07-07 03:56:07] [Rank 0] step:9661/10000 train_time:780939ms step_avg:80.83ms +[2025-07-07 03:56:08] [Rank 0] step:9681/10000 train_time:782433ms step_avg:80.82ms +[2025-07-07 03:56:08] [Rank 0] step:9681/10000 train_time:782433ms step_avg:80.82ms +[2025-07-07 03:56:10] [Rank 0] step:9701/10000 train_time:783928ms step_avg:80.81ms +[2025-07-07 03:56:10] [Rank 0] step:9701/10000 train_time:783928ms step_avg:80.81ms +[2025-07-07 03:56:12] [Rank 0] step:9721/10000 train_time:785678ms step_avg:80.82ms +[2025-07-07 03:56:12] [Rank 0] step:9721/10000 train_time:785678ms step_avg:80.82ms +[2025-07-07 03:56:13] [Rank 0] step:9741/10000 train_time:787584ms step_avg:80.85ms +[2025-07-07 03:56:13] [Rank 0] step:9741/10000 train_time:787584ms step_avg:80.85ms +[2025-07-07 03:56:15] [Rank 0] step:9761/10000 train_time:789079ms step_avg:80.84ms +[2025-07-07 03:56:15] [Rank 0] step:9761/10000 train_time:789079ms step_avg:80.84ms +[2025-07-07 03:56:16] [Rank 0] step:9781/10000 train_time:790576ms step_avg:80.83ms +[2025-07-07 03:56:16] [Rank 0] step:9781/10000 train_time:790576ms step_avg:80.83ms +[2025-07-07 03:56:18] [Rank 0] step:9801/10000 train_time:792073ms step_avg:80.82ms +[2025-07-07 03:56:18] [Rank 0] step:9801/10000 train_time:792073ms step_avg:80.82ms +[2025-07-07 03:56:20] [Rank 0] step:9821/10000 train_time:794228ms step_avg:80.87ms +[2025-07-07 03:56:20] [Rank 0] step:9821/10000 train_time:794228ms step_avg:80.87ms +[2025-07-07 03:56:22] [Rank 0] step:9841/10000 train_time:795725ms step_avg:80.86ms +[2025-07-07 03:56:22] [Rank 0] step:9841/10000 train_time:795725ms step_avg:80.86ms +[2025-07-07 03:56:23] [Rank 0] step:9861/10000 train_time:797222ms step_avg:80.85ms +[2025-07-07 03:56:23] [Rank 0] step:9861/10000 train_time:797222ms step_avg:80.85ms +[2025-07-07 03:56:25] [Rank 0] step:9881/10000 train_time:798719ms step_avg:80.83ms +[2025-07-07 03:56:25] [Rank 0] step:9881/10000 train_time:798719ms step_avg:80.83ms +[2025-07-07 03:56:26] [Rank 0] step:9901/10000 train_time:800267ms step_avg:80.83ms +[2025-07-07 03:56:26] [Rank 0] step:9901/10000 train_time:800267ms step_avg:80.83ms +[2025-07-07 03:56:28] [Rank 0] step:9921/10000 train_time:801954ms step_avg:80.83ms +[2025-07-07 03:56:28] [Rank 0] step:9921/10000 train_time:801954ms step_avg:80.83ms +[2025-07-07 03:56:29] [Rank 0] step:9941/10000 train_time:803453ms step_avg:80.82ms +[2025-07-07 03:56:29] [Rank 0] step:9941/10000 train_time:803453ms step_avg:80.82ms +[2025-07-07 03:56:31] [Rank 0] step:9961/10000 train_time:804954ms step_avg:80.81ms +[2025-07-07 03:56:31] [Rank 0] step:9961/10000 train_time:804954ms step_avg:80.81ms +[2025-07-07 03:56:32] [Rank 0] step:9981/10000 train_time:806455ms step_avg:80.80ms +[2025-07-07 03:56:32] [Rank 0] step:9981/10000 train_time:806455ms step_avg:80.80ms +[2025-07-07 03:56:34] [Rank 0] step:10000/10000 train_time:808523ms step_avg:80.85ms +[2025-07-07 03:56:34] [Rank 0] step:10000/10000 train_time:808523ms step_avg:80.85ms +[2025-07-07 03:56:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:56:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:56:35] [Rank 0] PRINT: step:10000/10000 train_loss:1.1584 val_loss:1.1566 train_time:808602ms step_avg:80.86ms +[2025-07-07 03:56:35] [Rank 0] PRINT: step:10000/10000 train_loss:1.1584 val_loss:1.1566 train_time:808602ms step_avg:80.86ms +[2025-07-07 03:56:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:56:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:56:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:56:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:56:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:56:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:02:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:02:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:02:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:02:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:02:02] [Rank 0] Total Loss: 4.9409 +[2025-07-07 04:02:02] [Rank 0] Total Loss: 4.9409 +[2025-07-07 04:02:02] [Rank 0] Total FTA: 0.4607 +[2025-07-07 04:02:02] [Rank 0] Total FTA: 0.4607 +[2025-07-07 04:02:02] [Rank 0] Group 0 Loss: 5.3886 +[2025-07-07 04:02:02] [Rank 0] Group 0 Loss: 5.3886 +[2025-07-07 04:02:02] [Rank 0] Group 1 Loss: 4.7152 +[2025-07-07 04:02:02] [Rank 0] Group 1 Loss: 4.7152 +[2025-07-07 04:02:02] [Rank 0] Group 2 Loss: 4.6003 +[2025-07-07 04:02:02] [Rank 0] Group 2 Loss: 4.6003 +[2025-07-07 04:02:02] [Rank 0] Group 3 Loss: 4.9291 +[2025-07-07 04:02:02] [Rank 0] Group 3 Loss: 4.9291 +[2025-07-07 04:02:02] [Rank 0] Group 4 Loss: 4.7994 +[2025-07-07 04:02:02] [Rank 0] Group 4 Loss: 4.7994 +[2025-07-07 04:02:02] [Rank 0] Group 5 Loss: 4.8002 +[2025-07-07 04:02:02] [Rank 0] Group 5 Loss: 4.8002 +[2025-07-07 04:02:02] [Rank 0] Group 6 Loss: 4.9057 +[2025-07-07 04:02:02] [Rank 0] Group 6 Loss: 4.9057 +[2025-07-07 04:02:02] [Rank 0] Group 7 Loss: 4.9634 +[2025-07-07 04:02:02] [Rank 0] Group 7 Loss: 4.9634 +[2025-07-07 04:02:02] [Rank 0] Group 8 Loss: 4.8341 +[2025-07-07 04:02:02] [Rank 0] Group 8 Loss: 4.8341 +[2025-07-07 04:02:02] [Rank 0] Group 9 Loss: 4.9171 +[2025-07-07 04:02:02] [Rank 0] Group 9 Loss: 4.9171 +[2025-07-07 04:02:02] [Rank 0] Group 10 Loss: 4.9884 +[2025-07-07 04:02:02] [Rank 0] Group 10 Loss: 4.9884 +[2025-07-07 04:02:02] [Rank 0] Group 11 Loss: 4.9542 +[2025-07-07 04:02:02] [Rank 0] Group 11 Loss: 4.9542 +[2025-07-07 04:02:02] [Rank 0] Group 0 FTA: 0.4811 +[2025-07-07 04:02:02] [Rank 0] Group 0 FTA: 0.4811 +[2025-07-07 04:02:02] [Rank 0] Group 1 FTA: 0.3776 +[2025-07-07 04:02:02] [Rank 0] Group 1 FTA: 0.3776 +[2025-07-07 04:02:02] [Rank 0] Group 2 FTA: 0.4609 +[2025-07-07 04:02:02] [Rank 0] Group 2 FTA: 0.4609 +[2025-07-07 04:02:02] [Rank 0] Group 3 FTA: 0.4740 +[2025-07-07 04:02:02] [Rank 0] Group 3 FTA: 0.4740 +[2025-07-07 04:02:02] [Rank 0] Group 4 FTA: 0.4062 +[2025-07-07 04:02:02] [Rank 0] Group 4 FTA: 0.4062 +[2025-07-07 04:02:02] [Rank 0] Group 5 FTA: 0.5052 +[2025-07-07 04:02:02] [Rank 0] Group 5 FTA: 0.5052 +[2025-07-07 04:02:02] [Rank 0] Group 6 FTA: 0.4401 +[2025-07-07 04:02:02] [Rank 0] Group 6 FTA: 0.4401 +[2025-07-07 04:02:02] [Rank 0] Group 7 FTA: 0.4766 +[2025-07-07 04:02:02] [Rank 0] Group 7 FTA: 0.4766 +[2025-07-07 04:02:02] [Rank 0] Group 8 FTA: 0.4375 +[2025-07-07 04:02:02] [Rank 0] Group 8 FTA: 0.4375 +[2025-07-07 04:02:02] [Rank 0] Group 9 FTA: 0.4609 +[2025-07-07 04:02:02] [Rank 0] Group 9 FTA: 0.4609 +[2025-07-07 04:02:02] [Rank 0] Group 10 FTA: 0.4766 +[2025-07-07 04:02:02] [Rank 0] Group 10 FTA: 0.4766 +[2025-07-07 04:02:02] [Rank 0] Group 11 FTA: 0.4775 +[2025-07-07 04:02:02] [Rank 0] Group 11 FTA: 0.4775 +[2025-07-07 04:02:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 04:02:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 04:02:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 04:02:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 04:02:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 04:02:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 04:02:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 04:02:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 04:02:04] [Rank 0] step:10001/10000 train_time:808624ms step_avg:80.85ms +[2025-07-07 04:02:04] [Rank 0] step:10001/10000 train_time:808624ms step_avg:80.85ms +[2025-07-07 04:02:04] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 04:02:04 2025 --- +[2025-07-07 04:02:04] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 04:02:04 2025 --- +[2025-07-07 04:02:04] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9956 MiB +[2025-07-07 04:02:04] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9956 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/config.json b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..9e459adfa5242d9c9bf0d2dee11f08781be8b4d4 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "5ec7e230-cccc-4682-8a30-7649b5f17b6a", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..53f7088bd7f5dda2eadd2d3dd02591564cec2854 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:572bc928edb342c56e3744b44e809e56da45b5f0623db2af00b29439e8f67b3e +size 423343 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..f8dc9c8161f04bba55ae2b8b167b657595aef008 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:76429cec7ae5f7caef4fc2cd07b9a7c8889e0c780b60a6a08099ecbc9899d7da +size 305607 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..b7e6b6daa11ae3c72ba84549558ff016c960c036 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf7c3110a4ff5fbf675ea16722f23749ab50bc5e5252ad8a64d08cfa9dd478ec +size 96970 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..571938a66c2401e8f2e8d791cf2981617cf435de --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e197bb4c0eaa6702dc38fb53780a3212f3654d680d985b5d39914da3bcc1e88e +size 114662 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/training_log_5ec7e230-cccc-4682-8a30-7649b5f17b6a.txt b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/training_log_5ec7e230-cccc-4682-8a30-7649b5f17b6a.txt new file mode 100644 index 0000000000000000000000000000000000000000..c11e1abe6d951339a0fcd3587a4d80680c7432d7 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/training_log_5ec7e230-cccc-4682-8a30-7649b5f17b6a.txt @@ -0,0 +1,5132 @@ +[2025-07-07 02:04:49] [Rank 0] PRINT: --- Script Start: Mon Jul 7 02:04:49 2025 --- +[2025-07-07 02:04:49] [Rank 0] PRINT: --- Script Start: Mon Jul 7 02:04:49 2025 --- +[2025-07-07 02:04:49] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-07 02:04:49] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-07 02:04:49] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 02:04:49] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 02:04:49] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-07 02:04:49] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-07 02:04:49] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48 +[2025-07-07 02:04:49] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48 +[2025-07-07 02:04:49] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 02:04:49] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 02:04:49] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 02:04:49] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 02:04:49] [Rank 0] PRINT: Constructing model... +[2025-07-07 02:04:49] [Rank 0] PRINT: Constructing model... +[2025-07-07 02:04:51] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 02:04:51] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 02:04:51] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 02:04:51] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 02:04:51] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 02:04:51] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 02:04:52] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 02:04:52] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 02:04:52] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 02:04:52] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 02:04:52] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 02:04:52] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 02:04:52] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 02:04:52] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 02:04:52] [Rank 0] PRINT: Model returns: +[2025-07-07 02:04:52] [Rank 0] PRINT: Model returns: +[2025-07-07 02:04:52] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 02:04:52] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 02:04:52] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-07 02:04:52] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-07 02:04:52] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-07 02:04:52] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-07 02:04:52] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-07 02:04:52] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-07 02:04:52] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-07 02:04:52] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-07 02:04:52] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 02:04:52] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 02:04:52] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 02:04:52] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 02:04:52] [Rank 0] PRINT: Starting warmup... +[2025-07-07 02:04:52] [Rank 0] PRINT: Starting warmup... +[2025-07-07 02:06:01] [Rank 0] PRINT: Warmup complete. +[2025-07-07 02:06:01] [Rank 0] PRINT: Warmup complete. +[2025-07-07 02:06:01] [Rank 0] PRINT: Starting training... +[2025-07-07 02:06:01] [Rank 0] PRINT: Starting training... +[2025-07-07 02:06:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:06:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:06:10] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 02:06:10] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 02:06:12] [Rank 0] step:21/10000 train_time:1549ms step_avg:73.76ms +[2025-07-07 02:06:12] [Rank 0] step:21/10000 train_time:1549ms step_avg:73.76ms +[2025-07-07 02:06:13] [Rank 0] step:41/10000 train_time:3000ms step_avg:73.17ms +[2025-07-07 02:06:13] [Rank 0] step:41/10000 train_time:3000ms step_avg:73.17ms +[2025-07-07 02:06:15] [Rank 0] step:61/10000 train_time:4453ms step_avg:73.01ms +[2025-07-07 02:06:15] [Rank 0] step:61/10000 train_time:4453ms step_avg:73.01ms +[2025-07-07 02:06:16] [Rank 0] step:81/10000 train_time:5908ms step_avg:72.94ms +[2025-07-07 02:06:16] [Rank 0] step:81/10000 train_time:5908ms step_avg:72.94ms +[2025-07-07 02:06:18] [Rank 0] step:101/10000 train_time:8026ms step_avg:79.46ms +[2025-07-07 02:06:18] [Rank 0] step:101/10000 train_time:8026ms step_avg:79.46ms +[2025-07-07 02:06:20] [Rank 0] step:121/10000 train_time:9479ms step_avg:78.33ms +[2025-07-07 02:06:20] [Rank 0] step:121/10000 train_time:9479ms step_avg:78.33ms +[2025-07-07 02:06:21] [Rank 0] step:141/10000 train_time:10934ms step_avg:77.55ms +[2025-07-07 02:06:21] [Rank 0] step:141/10000 train_time:10934ms step_avg:77.55ms +[2025-07-07 02:06:23] [Rank 0] step:161/10000 train_time:12391ms step_avg:76.96ms +[2025-07-07 02:06:23] [Rank 0] step:161/10000 train_time:12391ms step_avg:76.96ms +[2025-07-07 02:06:24] [Rank 0] step:181/10000 train_time:13845ms step_avg:76.49ms +[2025-07-07 02:06:24] [Rank 0] step:181/10000 train_time:13845ms step_avg:76.49ms +[2025-07-07 02:06:26] [Rank 0] step:201/10000 train_time:15544ms step_avg:77.33ms +[2025-07-07 02:06:26] [Rank 0] step:201/10000 train_time:15544ms step_avg:77.33ms +[2025-07-07 02:06:27] [Rank 0] step:221/10000 train_time:17000ms step_avg:76.92ms +[2025-07-07 02:06:27] [Rank 0] step:221/10000 train_time:17000ms step_avg:76.92ms +[2025-07-07 02:06:29] [Rank 0] step:241/10000 train_time:18456ms step_avg:76.58ms +[2025-07-07 02:06:29] [Rank 0] step:241/10000 train_time:18456ms step_avg:76.58ms +[2025-07-07 02:06:30] [Rank 0] step:261/10000 train_time:19914ms step_avg:76.30ms +[2025-07-07 02:06:30] [Rank 0] step:261/10000 train_time:19914ms step_avg:76.30ms +[2025-07-07 02:06:32] [Rank 0] step:281/10000 train_time:22040ms step_avg:78.43ms +[2025-07-07 02:06:32] [Rank 0] step:281/10000 train_time:22040ms step_avg:78.43ms +[2025-07-07 02:06:34] [Rank 0] step:301/10000 train_time:23497ms step_avg:78.06ms +[2025-07-07 02:06:34] [Rank 0] step:301/10000 train_time:23497ms step_avg:78.06ms +[2025-07-07 02:06:35] [Rank 0] step:321/10000 train_time:24958ms step_avg:77.75ms +[2025-07-07 02:06:35] [Rank 0] step:321/10000 train_time:24958ms step_avg:77.75ms +[2025-07-07 02:06:37] [Rank 0] step:341/10000 train_time:26420ms step_avg:77.48ms +[2025-07-07 02:06:37] [Rank 0] step:341/10000 train_time:26420ms step_avg:77.48ms +[2025-07-07 02:06:39] [Rank 0] step:361/10000 train_time:28137ms step_avg:77.94ms +[2025-07-07 02:06:39] [Rank 0] step:361/10000 train_time:28137ms step_avg:77.94ms +[2025-07-07 02:06:40] [Rank 0] step:381/10000 train_time:30006ms step_avg:78.76ms +[2025-07-07 02:06:40] [Rank 0] step:381/10000 train_time:30006ms step_avg:78.76ms +[2025-07-07 02:06:42] [Rank 0] step:401/10000 train_time:31469ms step_avg:78.48ms +[2025-07-07 02:06:42] [Rank 0] step:401/10000 train_time:31469ms step_avg:78.48ms +[2025-07-07 02:06:43] [Rank 0] step:421/10000 train_time:32931ms step_avg:78.22ms +[2025-07-07 02:06:43] [Rank 0] step:421/10000 train_time:32931ms step_avg:78.22ms +[2025-07-07 02:06:45] [Rank 0] step:441/10000 train_time:34394ms step_avg:77.99ms +[2025-07-07 02:06:45] [Rank 0] step:441/10000 train_time:34394ms step_avg:77.99ms +[2025-07-07 02:06:46] [Rank 0] step:461/10000 train_time:36262ms step_avg:78.66ms +[2025-07-07 02:06:46] [Rank 0] step:461/10000 train_time:36262ms step_avg:78.66ms +[2025-07-07 02:06:48] [Rank 0] step:481/10000 train_time:37770ms step_avg:78.52ms +[2025-07-07 02:06:48] [Rank 0] step:481/10000 train_time:37770ms step_avg:78.52ms +[2025-07-07 02:06:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:06:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:06:50] [Rank 0] PRINT: step:500/10000 train_loss:8.7311 val_loss:7.1023 train_time:39356ms step_avg:78.71ms +[2025-07-07 02:06:50] [Rank 0] PRINT: step:500/10000 train_loss:8.7311 val_loss:7.1023 train_time:39356ms step_avg:78.71ms +[2025-07-07 02:06:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:06:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:06:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:06:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:06:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:06:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:12:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:12:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:12:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:12:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:12:12] [Rank 0] Total Loss: 7.6732 +[2025-07-07 02:12:12] [Rank 0] Total Loss: 7.6732 +[2025-07-07 02:12:12] [Rank 0] Total FTA: 0.0012 +[2025-07-07 02:12:12] [Rank 0] Total FTA: 0.0012 +[2025-07-07 02:12:12] [Rank 0] Group 0 Loss: 7.6886 +[2025-07-07 02:12:12] [Rank 0] Group 0 Loss: 7.6886 +[2025-07-07 02:12:12] [Rank 0] Group 1 Loss: 7.6249 +[2025-07-07 02:12:12] [Rank 0] Group 1 Loss: 7.6249 +[2025-07-07 02:12:12] [Rank 0] Group 2 Loss: 7.7784 +[2025-07-07 02:12:12] [Rank 0] Group 2 Loss: 7.7784 +[2025-07-07 02:12:12] [Rank 0] Group 3 Loss: 7.6473 +[2025-07-07 02:12:12] [Rank 0] Group 3 Loss: 7.6473 +[2025-07-07 02:12:12] [Rank 0] Group 4 Loss: 7.6873 +[2025-07-07 02:12:12] [Rank 0] Group 4 Loss: 7.6873 +[2025-07-07 02:12:12] [Rank 0] Group 5 Loss: 7.6380 +[2025-07-07 02:12:12] [Rank 0] Group 5 Loss: 7.6380 +[2025-07-07 02:12:12] [Rank 0] Group 6 Loss: 7.6822 +[2025-07-07 02:12:12] [Rank 0] Group 6 Loss: 7.6822 +[2025-07-07 02:12:12] [Rank 0] Group 7 Loss: 7.6688 +[2025-07-07 02:12:12] [Rank 0] Group 7 Loss: 7.6688 +[2025-07-07 02:12:12] [Rank 0] Group 8 Loss: 7.6520 +[2025-07-07 02:12:12] [Rank 0] Group 8 Loss: 7.6520 +[2025-07-07 02:12:12] [Rank 0] Group 9 Loss: 7.6868 +[2025-07-07 02:12:12] [Rank 0] Group 9 Loss: 7.6868 +[2025-07-07 02:12:12] [Rank 0] Group 10 Loss: 7.6616 +[2025-07-07 02:12:12] [Rank 0] Group 10 Loss: 7.6616 +[2025-07-07 02:12:12] [Rank 0] Group 11 Loss: 7.6668 +[2025-07-07 02:12:12] [Rank 0] Group 11 Loss: 7.6668 +[2025-07-07 02:12:12] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 02:12:12] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 02:12:12] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 02:12:12] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 02:12:12] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 02:12:12] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 02:12:12] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 02:12:12] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 02:12:12] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 02:12:12] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 02:12:12] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 02:12:12] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 02:12:12] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 02:12:12] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 02:12:12] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 02:12:12] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 02:12:12] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 02:12:12] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 02:12:12] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 02:12:12] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 02:12:12] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 02:12:12] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 02:12:12] [Rank 0] Group 11 FTA: 0.0010 +[2025-07-07 02:12:12] [Rank 0] Group 11 FTA: 0.0010 +[2025-07-07 02:12:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 02:12:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 02:12:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 02:12:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 02:12:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 02:12:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 02:12:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 02:12:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 02:12:13] [Rank 0] step:501/10000 train_time:39377ms step_avg:78.60ms +[2025-07-07 02:12:13] [Rank 0] step:501/10000 train_time:39377ms step_avg:78.60ms +[2025-07-07 02:12:15] [Rank 0] step:521/10000 train_time:40826ms step_avg:78.36ms +[2025-07-07 02:12:15] [Rank 0] step:521/10000 train_time:40826ms step_avg:78.36ms +[2025-07-07 02:12:17] [Rank 0] step:541/10000 train_time:42943ms step_avg:79.38ms +[2025-07-07 02:12:17] [Rank 0] step:541/10000 train_time:42943ms step_avg:79.38ms +[2025-07-07 02:12:18] [Rank 0] step:561/10000 train_time:44379ms step_avg:79.11ms +[2025-07-07 02:12:18] [Rank 0] step:561/10000 train_time:44379ms step_avg:79.11ms +[2025-07-07 02:12:20] [Rank 0] step:581/10000 train_time:45834ms step_avg:78.89ms +[2025-07-07 02:12:20] [Rank 0] step:581/10000 train_time:45834ms step_avg:78.89ms +[2025-07-07 02:12:21] [Rank 0] step:601/10000 train_time:47292ms step_avg:78.69ms +[2025-07-07 02:12:21] [Rank 0] step:601/10000 train_time:47292ms step_avg:78.69ms +[2025-07-07 02:12:23] [Rank 0] step:621/10000 train_time:48753ms step_avg:78.51ms +[2025-07-07 02:12:23] [Rank 0] step:621/10000 train_time:48753ms step_avg:78.51ms +[2025-07-07 02:12:25] [Rank 0] step:641/10000 train_time:50883ms step_avg:79.38ms +[2025-07-07 02:12:25] [Rank 0] step:641/10000 train_time:50883ms step_avg:79.38ms +[2025-07-07 02:12:26] [Rank 0] step:661/10000 train_time:52343ms step_avg:79.19ms +[2025-07-07 02:12:26] [Rank 0] step:661/10000 train_time:52343ms step_avg:79.19ms +[2025-07-07 02:12:28] [Rank 0] step:681/10000 train_time:53805ms step_avg:79.01ms +[2025-07-07 02:12:28] [Rank 0] step:681/10000 train_time:53805ms step_avg:79.01ms +[2025-07-07 02:12:29] [Rank 0] step:701/10000 train_time:55270ms step_avg:78.84ms +[2025-07-07 02:12:29] [Rank 0] step:701/10000 train_time:55270ms step_avg:78.84ms +[2025-07-07 02:12:31] [Rank 0] step:721/10000 train_time:56783ms step_avg:78.76ms +[2025-07-07 02:12:31] [Rank 0] step:721/10000 train_time:56783ms step_avg:78.76ms +[2025-07-07 02:12:32] [Rank 0] step:741/10000 train_time:58436ms step_avg:78.86ms +[2025-07-07 02:12:32] [Rank 0] step:741/10000 train_time:58436ms step_avg:78.86ms +[2025-07-07 02:12:34] [Rank 0] step:761/10000 train_time:59910ms step_avg:78.73ms +[2025-07-07 02:12:34] [Rank 0] step:761/10000 train_time:59910ms step_avg:78.73ms +[2025-07-07 02:12:35] [Rank 0] step:781/10000 train_time:61388ms step_avg:78.60ms +[2025-07-07 02:12:35] [Rank 0] step:781/10000 train_time:61388ms step_avg:78.60ms +[2025-07-07 02:12:37] [Rank 0] step:801/10000 train_time:62861ms step_avg:78.48ms +[2025-07-07 02:12:37] [Rank 0] step:801/10000 train_time:62861ms step_avg:78.48ms +[2025-07-07 02:12:39] [Rank 0] step:821/10000 train_time:65002ms step_avg:79.17ms +[2025-07-07 02:12:39] [Rank 0] step:821/10000 train_time:65002ms step_avg:79.17ms +[2025-07-07 02:12:40] [Rank 0] step:841/10000 train_time:66475ms step_avg:79.04ms +[2025-07-07 02:12:40] [Rank 0] step:841/10000 train_time:66475ms step_avg:79.04ms +[2025-07-07 02:12:42] [Rank 0] step:861/10000 train_time:67951ms step_avg:78.92ms +[2025-07-07 02:12:42] [Rank 0] step:861/10000 train_time:67951ms step_avg:78.92ms +[2025-07-07 02:12:43] [Rank 0] step:881/10000 train_time:69426ms step_avg:78.80ms +[2025-07-07 02:12:43] [Rank 0] step:881/10000 train_time:69426ms step_avg:78.80ms +[2025-07-07 02:12:45] [Rank 0] step:901/10000 train_time:70955ms step_avg:78.75ms +[2025-07-07 02:12:45] [Rank 0] step:901/10000 train_time:70955ms step_avg:78.75ms +[2025-07-07 02:12:47] [Rank 0] step:921/10000 train_time:73039ms step_avg:79.30ms +[2025-07-07 02:12:47] [Rank 0] step:921/10000 train_time:73039ms step_avg:79.30ms +[2025-07-07 02:12:48] [Rank 0] step:941/10000 train_time:74516ms step_avg:79.19ms +[2025-07-07 02:12:48] [Rank 0] step:941/10000 train_time:74516ms step_avg:79.19ms +[2025-07-07 02:12:50] [Rank 0] step:961/10000 train_time:75990ms step_avg:79.07ms +[2025-07-07 02:12:50] [Rank 0] step:961/10000 train_time:75990ms step_avg:79.07ms +[2025-07-07 02:12:51] [Rank 0] step:981/10000 train_time:77465ms step_avg:78.97ms +[2025-07-07 02:12:51] [Rank 0] step:981/10000 train_time:77465ms step_avg:78.97ms +[2025-07-07 02:12:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:12:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:12:54] [Rank 0] PRINT: step:1000/10000 train_loss:6.0325 val_loss:5.1161 train_time:79594ms step_avg:79.59ms +[2025-07-07 02:12:54] [Rank 0] PRINT: step:1000/10000 train_loss:6.0325 val_loss:5.1161 train_time:79594ms step_avg:79.59ms +[2025-07-07 02:12:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:12:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:12:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:12:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:12:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:12:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:18:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:18:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:18:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:18:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:18:19] [Rank 0] Total Loss: 6.1230 +[2025-07-07 02:18:19] [Rank 0] Total Loss: 6.1230 +[2025-07-07 02:18:19] [Rank 0] Total FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Total FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 0 Loss: 6.1007 +[2025-07-07 02:18:19] [Rank 0] Group 0 Loss: 6.1007 +[2025-07-07 02:18:19] [Rank 0] Group 1 Loss: 6.2028 +[2025-07-07 02:18:19] [Rank 0] Group 1 Loss: 6.2028 +[2025-07-07 02:18:19] [Rank 0] Group 2 Loss: 6.1844 +[2025-07-07 02:18:19] [Rank 0] Group 2 Loss: 6.1844 +[2025-07-07 02:18:19] [Rank 0] Group 3 Loss: 6.0589 +[2025-07-07 02:18:19] [Rank 0] Group 3 Loss: 6.0589 +[2025-07-07 02:18:19] [Rank 0] Group 4 Loss: 6.1941 +[2025-07-07 02:18:19] [Rank 0] Group 4 Loss: 6.1941 +[2025-07-07 02:18:19] [Rank 0] Group 5 Loss: 6.0797 +[2025-07-07 02:18:19] [Rank 0] Group 5 Loss: 6.0797 +[2025-07-07 02:18:19] [Rank 0] Group 6 Loss: 6.1244 +[2025-07-07 02:18:19] [Rank 0] Group 6 Loss: 6.1244 +[2025-07-07 02:18:19] [Rank 0] Group 7 Loss: 6.1369 +[2025-07-07 02:18:19] [Rank 0] Group 7 Loss: 6.1369 +[2025-07-07 02:18:19] [Rank 0] Group 8 Loss: 6.0719 +[2025-07-07 02:18:19] [Rank 0] Group 8 Loss: 6.0719 +[2025-07-07 02:18:19] [Rank 0] Group 9 Loss: 6.1208 +[2025-07-07 02:18:19] [Rank 0] Group 9 Loss: 6.1208 +[2025-07-07 02:18:19] [Rank 0] Group 10 Loss: 6.1171 +[2025-07-07 02:18:19] [Rank 0] Group 10 Loss: 6.1171 +[2025-07-07 02:18:19] [Rank 0] Group 11 Loss: 6.1175 +[2025-07-07 02:18:19] [Rank 0] Group 11 Loss: 6.1175 +[2025-07-07 02:18:19] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 02:18:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 02:18:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 02:18:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 02:18:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 02:18:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 02:18:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 02:18:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 02:18:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 02:18:20] [Rank 0] step:1001/10000 train_time:79616ms step_avg:79.54ms +[2025-07-07 02:18:20] [Rank 0] step:1001/10000 train_time:79616ms step_avg:79.54ms +[2025-07-07 02:18:22] [Rank 0] step:1021/10000 train_time:81156ms step_avg:79.49ms +[2025-07-07 02:18:22] [Rank 0] step:1021/10000 train_time:81156ms step_avg:79.49ms +[2025-07-07 02:18:23] [Rank 0] step:1041/10000 train_time:82707ms step_avg:79.45ms +[2025-07-07 02:18:23] [Rank 0] step:1041/10000 train_time:82707ms step_avg:79.45ms +[2025-07-07 02:18:25] [Rank 0] step:1061/10000 train_time:84341ms step_avg:79.49ms +[2025-07-07 02:18:25] [Rank 0] step:1061/10000 train_time:84341ms step_avg:79.49ms +[2025-07-07 02:18:27] [Rank 0] step:1081/10000 train_time:85812ms step_avg:79.38ms +[2025-07-07 02:18:27] [Rank 0] step:1081/10000 train_time:85812ms step_avg:79.38ms +[2025-07-07 02:18:29] [Rank 0] step:1101/10000 train_time:87956ms step_avg:79.89ms +[2025-07-07 02:18:29] [Rank 0] step:1101/10000 train_time:87956ms step_avg:79.89ms +[2025-07-07 02:18:30] [Rank 0] step:1121/10000 train_time:89427ms step_avg:79.77ms +[2025-07-07 02:18:30] [Rank 0] step:1121/10000 train_time:89427ms step_avg:79.77ms +[2025-07-07 02:18:32] [Rank 0] step:1141/10000 train_time:90900ms step_avg:79.67ms +[2025-07-07 02:18:32] [Rank 0] step:1141/10000 train_time:90900ms step_avg:79.67ms +[2025-07-07 02:18:33] [Rank 0] step:1161/10000 train_time:92377ms step_avg:79.57ms +[2025-07-07 02:18:33] [Rank 0] step:1161/10000 train_time:92377ms step_avg:79.57ms +[2025-07-07 02:18:35] [Rank 0] step:1181/10000 train_time:94514ms step_avg:80.03ms +[2025-07-07 02:18:35] [Rank 0] step:1181/10000 train_time:94514ms step_avg:80.03ms +[2025-07-07 02:18:37] [Rank 0] step:1201/10000 train_time:95986ms step_avg:79.92ms +[2025-07-07 02:18:37] [Rank 0] step:1201/10000 train_time:95986ms step_avg:79.92ms +[2025-07-07 02:18:38] [Rank 0] step:1221/10000 train_time:97462ms step_avg:79.82ms +[2025-07-07 02:18:38] [Rank 0] step:1221/10000 train_time:97462ms step_avg:79.82ms +[2025-07-07 02:18:40] [Rank 0] step:1241/10000 train_time:98943ms step_avg:79.73ms +[2025-07-07 02:18:40] [Rank 0] step:1241/10000 train_time:98943ms step_avg:79.73ms +[2025-07-07 02:18:41] [Rank 0] step:1261/10000 train_time:100574ms step_avg:79.76ms +[2025-07-07 02:18:41] [Rank 0] step:1261/10000 train_time:100574ms step_avg:79.76ms +[2025-07-07 02:18:43] [Rank 0] step:1281/10000 train_time:102133ms step_avg:79.73ms +[2025-07-07 02:18:43] [Rank 0] step:1281/10000 train_time:102133ms step_avg:79.73ms +[2025-07-07 02:18:44] [Rank 0] step:1301/10000 train_time:103611ms step_avg:79.64ms +[2025-07-07 02:18:44] [Rank 0] step:1301/10000 train_time:103611ms step_avg:79.64ms +[2025-07-07 02:18:46] [Rank 0] step:1321/10000 train_time:105094ms step_avg:79.56ms +[2025-07-07 02:18:46] [Rank 0] step:1321/10000 train_time:105094ms step_avg:79.56ms +[2025-07-07 02:18:47] [Rank 0] step:1341/10000 train_time:106572ms step_avg:79.47ms +[2025-07-07 02:18:47] [Rank 0] step:1341/10000 train_time:106572ms step_avg:79.47ms +[2025-07-07 02:18:49] [Rank 0] step:1361/10000 train_time:108291ms step_avg:79.57ms +[2025-07-07 02:18:49] [Rank 0] step:1361/10000 train_time:108291ms step_avg:79.57ms +[2025-07-07 02:18:50] [Rank 0] step:1381/10000 train_time:109768ms step_avg:79.48ms +[2025-07-07 02:18:50] [Rank 0] step:1381/10000 train_time:109768ms step_avg:79.48ms +[2025-07-07 02:18:52] [Rank 0] step:1401/10000 train_time:111247ms step_avg:79.41ms +[2025-07-07 02:18:52] [Rank 0] step:1401/10000 train_time:111247ms step_avg:79.41ms +[2025-07-07 02:18:53] [Rank 0] step:1421/10000 train_time:112727ms step_avg:79.33ms +[2025-07-07 02:18:53] [Rank 0] step:1421/10000 train_time:112727ms step_avg:79.33ms +[2025-07-07 02:18:56] [Rank 0] step:1441/10000 train_time:114258ms step_avg:79.29ms +[2025-07-07 02:18:56] [Rank 0] step:1441/10000 train_time:114258ms step_avg:79.29ms +[2025-07-07 02:18:57] [Rank 0] step:1461/10000 train_time:116346ms step_avg:79.63ms +[2025-07-07 02:18:57] [Rank 0] step:1461/10000 train_time:116346ms step_avg:79.63ms +[2025-07-07 02:18:58] [Rank 0] step:1481/10000 train_time:117825ms step_avg:79.56ms +[2025-07-07 02:18:58] [Rank 0] step:1481/10000 train_time:117825ms step_avg:79.56ms +[2025-07-07 02:19:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:19:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:19:01] [Rank 0] PRINT: step:1500/10000 train_loss:4.4001 val_loss:3.7067 train_time:119300ms step_avg:79.53ms +[2025-07-07 02:19:01] [Rank 0] PRINT: step:1500/10000 train_loss:4.4001 val_loss:3.7067 train_time:119300ms step_avg:79.53ms +[2025-07-07 02:19:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:19:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:19:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:19:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:19:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:19:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:24:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:24:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:24:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:24:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:24:24] [Rank 0] Total Loss: 5.1754 +[2025-07-07 02:24:24] [Rank 0] Total Loss: 5.1754 +[2025-07-07 02:24:24] [Rank 0] Total FTA: 0.0820 +[2025-07-07 02:24:24] [Rank 0] Total FTA: 0.0820 +[2025-07-07 02:24:24] [Rank 0] Group 0 Loss: 5.1791 +[2025-07-07 02:24:24] [Rank 0] Group 0 Loss: 5.1791 +[2025-07-07 02:24:24] [Rank 0] Group 1 Loss: 5.3129 +[2025-07-07 02:24:24] [Rank 0] Group 1 Loss: 5.3129 +[2025-07-07 02:24:24] [Rank 0] Group 2 Loss: 5.1769 +[2025-07-07 02:24:24] [Rank 0] Group 2 Loss: 5.1769 +[2025-07-07 02:24:24] [Rank 0] Group 3 Loss: 5.1265 +[2025-07-07 02:24:24] [Rank 0] Group 3 Loss: 5.1265 +[2025-07-07 02:24:24] [Rank 0] Group 4 Loss: 5.1831 +[2025-07-07 02:24:24] [Rank 0] Group 4 Loss: 5.1831 +[2025-07-07 02:24:24] [Rank 0] Group 5 Loss: 5.1412 +[2025-07-07 02:24:24] [Rank 0] Group 5 Loss: 5.1412 +[2025-07-07 02:24:24] [Rank 0] Group 6 Loss: 5.1517 +[2025-07-07 02:24:24] [Rank 0] Group 6 Loss: 5.1517 +[2025-07-07 02:24:24] [Rank 0] Group 7 Loss: 5.1841 +[2025-07-07 02:24:24] [Rank 0] Group 7 Loss: 5.1841 +[2025-07-07 02:24:24] [Rank 0] Group 8 Loss: 5.1676 +[2025-07-07 02:24:24] [Rank 0] Group 8 Loss: 5.1676 +[2025-07-07 02:24:24] [Rank 0] Group 9 Loss: 5.1900 +[2025-07-07 02:24:24] [Rank 0] Group 9 Loss: 5.1900 +[2025-07-07 02:24:24] [Rank 0] Group 10 Loss: 5.1685 +[2025-07-07 02:24:24] [Rank 0] Group 10 Loss: 5.1685 +[2025-07-07 02:24:24] [Rank 0] Group 11 Loss: 5.1574 +[2025-07-07 02:24:24] [Rank 0] Group 11 Loss: 5.1574 +[2025-07-07 02:24:24] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 02:24:24] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 02:24:24] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 02:24:24] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 02:24:24] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 02:24:24] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 02:24:24] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 02:24:24] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 02:24:24] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 02:24:24] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 02:24:24] [Rank 0] Group 5 FTA: 0.0573 +[2025-07-07 02:24:24] [Rank 0] Group 5 FTA: 0.0573 +[2025-07-07 02:24:24] [Rank 0] Group 6 FTA: 0.0833 +[2025-07-07 02:24:24] [Rank 0] Group 6 FTA: 0.0833 +[2025-07-07 02:24:24] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-07 02:24:24] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-07 02:24:24] [Rank 0] Group 8 FTA: 0.0729 +[2025-07-07 02:24:24] [Rank 0] Group 8 FTA: 0.0729 +[2025-07-07 02:24:24] [Rank 0] Group 9 FTA: 0.0703 +[2025-07-07 02:24:24] [Rank 0] Group 9 FTA: 0.0703 +[2025-07-07 02:24:24] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 02:24:24] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 02:24:24] [Rank 0] Group 11 FTA: 0.0889 +[2025-07-07 02:24:24] [Rank 0] Group 11 FTA: 0.0889 +[2025-07-07 02:24:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 02:24:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 02:24:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 02:24:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 02:24:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 02:24:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 02:24:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 02:24:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 02:24:26] [Rank 0] step:1501/10000 train_time:119322ms step_avg:79.49ms +[2025-07-07 02:24:26] [Rank 0] step:1501/10000 train_time:119322ms step_avg:79.49ms +[2025-07-07 02:24:27] [Rank 0] step:1521/10000 train_time:120798ms step_avg:79.42ms +[2025-07-07 02:24:27] [Rank 0] step:1521/10000 train_time:120798ms step_avg:79.42ms +[2025-07-07 02:24:29] [Rank 0] step:1541/10000 train_time:122904ms step_avg:79.76ms +[2025-07-07 02:24:29] [Rank 0] step:1541/10000 train_time:122904ms step_avg:79.76ms +[2025-07-07 02:24:31] [Rank 0] step:1561/10000 train_time:124377ms step_avg:79.68ms +[2025-07-07 02:24:31] [Rank 0] step:1561/10000 train_time:124377ms step_avg:79.68ms +[2025-07-07 02:24:32] [Rank 0] step:1581/10000 train_time:125847ms step_avg:79.60ms +[2025-07-07 02:24:32] [Rank 0] step:1581/10000 train_time:125847ms step_avg:79.60ms +[2025-07-07 02:24:34] [Rank 0] step:1601/10000 train_time:127318ms step_avg:79.52ms +[2025-07-07 02:24:34] [Rank 0] step:1601/10000 train_time:127318ms step_avg:79.52ms +[2025-07-07 02:24:36] [Rank 0] step:1621/10000 train_time:128842ms step_avg:79.48ms +[2025-07-07 02:24:36] [Rank 0] step:1621/10000 train_time:128842ms step_avg:79.48ms +[2025-07-07 02:24:37] [Rank 0] step:1641/10000 train_time:130914ms step_avg:79.78ms +[2025-07-07 02:24:37] [Rank 0] step:1641/10000 train_time:130914ms step_avg:79.78ms +[2025-07-07 02:24:39] [Rank 0] step:1661/10000 train_time:132386ms step_avg:79.70ms +[2025-07-07 02:24:39] [Rank 0] step:1661/10000 train_time:132386ms step_avg:79.70ms +[2025-07-07 02:24:40] [Rank 0] step:1681/10000 train_time:133861ms step_avg:79.63ms +[2025-07-07 02:24:40] [Rank 0] step:1681/10000 train_time:133861ms step_avg:79.63ms +[2025-07-07 02:24:42] [Rank 0] step:1701/10000 train_time:135493ms step_avg:79.66ms +[2025-07-07 02:24:42] [Rank 0] step:1701/10000 train_time:135493ms step_avg:79.66ms +[2025-07-07 02:24:44] [Rank 0] step:1721/10000 train_time:137727ms step_avg:80.03ms +[2025-07-07 02:24:44] [Rank 0] step:1721/10000 train_time:137727ms step_avg:80.03ms +[2025-07-07 02:24:46] [Rank 0] step:1741/10000 train_time:139205ms step_avg:79.96ms +[2025-07-07 02:24:46] [Rank 0] step:1741/10000 train_time:139205ms step_avg:79.96ms +[2025-07-07 02:24:47] [Rank 0] step:1761/10000 train_time:140683ms step_avg:79.89ms +[2025-07-07 02:24:47] [Rank 0] step:1761/10000 train_time:140683ms step_avg:79.89ms +[2025-07-07 02:24:48] [Rank 0] step:1781/10000 train_time:142158ms step_avg:79.82ms +[2025-07-07 02:24:48] [Rank 0] step:1781/10000 train_time:142158ms step_avg:79.82ms +[2025-07-07 02:24:51] [Rank 0] step:1801/10000 train_time:143637ms step_avg:79.75ms +[2025-07-07 02:24:51] [Rank 0] step:1801/10000 train_time:143637ms step_avg:79.75ms +[2025-07-07 02:24:52] [Rank 0] step:1821/10000 train_time:145764ms step_avg:80.05ms +[2025-07-07 02:24:52] [Rank 0] step:1821/10000 train_time:145764ms step_avg:80.05ms +[2025-07-07 02:24:54] [Rank 0] step:1841/10000 train_time:147244ms step_avg:79.98ms +[2025-07-07 02:24:54] [Rank 0] step:1841/10000 train_time:147244ms step_avg:79.98ms +[2025-07-07 02:24:55] [Rank 0] step:1861/10000 train_time:148721ms step_avg:79.91ms +[2025-07-07 02:24:55] [Rank 0] step:1861/10000 train_time:148721ms step_avg:79.91ms +[2025-07-07 02:24:57] [Rank 0] step:1881/10000 train_time:150201ms step_avg:79.85ms +[2025-07-07 02:24:57] [Rank 0] step:1881/10000 train_time:150201ms step_avg:79.85ms +[2025-07-07 02:24:59] [Rank 0] step:1901/10000 train_time:152351ms step_avg:80.14ms +[2025-07-07 02:24:59] [Rank 0] step:1901/10000 train_time:152351ms step_avg:80.14ms +[2025-07-07 02:25:00] [Rank 0] step:1921/10000 train_time:153829ms step_avg:80.08ms +[2025-07-07 02:25:00] [Rank 0] step:1921/10000 train_time:153829ms step_avg:80.08ms +[2025-07-07 02:25:02] [Rank 0] step:1941/10000 train_time:155307ms step_avg:80.01ms +[2025-07-07 02:25:02] [Rank 0] step:1941/10000 train_time:155307ms step_avg:80.01ms +[2025-07-07 02:25:03] [Rank 0] step:1961/10000 train_time:156787ms step_avg:79.95ms +[2025-07-07 02:25:03] [Rank 0] step:1961/10000 train_time:156787ms step_avg:79.95ms +[2025-07-07 02:25:05] [Rank 0] step:1981/10000 train_time:158522ms step_avg:80.02ms +[2025-07-07 02:25:05] [Rank 0] step:1981/10000 train_time:158522ms step_avg:80.02ms +[2025-07-07 02:25:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:25:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:25:07] [Rank 0] PRINT: step:2000/10000 train_loss:3.1184 val_loss:2.6075 train_time:160390ms step_avg:80.19ms +[2025-07-07 02:25:07] [Rank 0] PRINT: step:2000/10000 train_loss:3.1184 val_loss:2.6075 train_time:160390ms step_avg:80.19ms +[2025-07-07 02:25:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:25:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:25:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:25:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:25:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:25:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:30:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:30:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:30:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:30:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:30:31] [Rank 0] Total Loss: 4.4274 +[2025-07-07 02:30:31] [Rank 0] Total Loss: 4.4274 +[2025-07-07 02:30:31] [Rank 0] Total FTA: 0.0921 +[2025-07-07 02:30:31] [Rank 0] Total FTA: 0.0921 +[2025-07-07 02:30:31] [Rank 0] Group 0 Loss: 4.4859 +[2025-07-07 02:30:31] [Rank 0] Group 0 Loss: 4.4859 +[2025-07-07 02:30:31] [Rank 0] Group 1 Loss: 4.5573 +[2025-07-07 02:30:31] [Rank 0] Group 1 Loss: 4.5573 +[2025-07-07 02:30:31] [Rank 0] Group 2 Loss: 4.3451 +[2025-07-07 02:30:31] [Rank 0] Group 2 Loss: 4.3451 +[2025-07-07 02:30:31] [Rank 0] Group 3 Loss: 4.4551 +[2025-07-07 02:30:31] [Rank 0] Group 3 Loss: 4.4551 +[2025-07-07 02:30:31] [Rank 0] Group 4 Loss: 4.3784 +[2025-07-07 02:30:31] [Rank 0] Group 4 Loss: 4.3784 +[2025-07-07 02:30:31] [Rank 0] Group 5 Loss: 4.3939 +[2025-07-07 02:30:31] [Rank 0] Group 5 Loss: 4.3939 +[2025-07-07 02:30:31] [Rank 0] Group 6 Loss: 4.3986 +[2025-07-07 02:30:31] [Rank 0] Group 6 Loss: 4.3986 +[2025-07-07 02:30:31] [Rank 0] Group 7 Loss: 4.4778 +[2025-07-07 02:30:31] [Rank 0] Group 7 Loss: 4.4778 +[2025-07-07 02:30:31] [Rank 0] Group 8 Loss: 4.3951 +[2025-07-07 02:30:31] [Rank 0] Group 8 Loss: 4.3951 +[2025-07-07 02:30:31] [Rank 0] Group 9 Loss: 4.3535 +[2025-07-07 02:30:31] [Rank 0] Group 9 Loss: 4.3535 +[2025-07-07 02:30:31] [Rank 0] Group 10 Loss: 4.4016 +[2025-07-07 02:30:31] [Rank 0] Group 10 Loss: 4.4016 +[2025-07-07 02:30:31] [Rank 0] Group 11 Loss: 4.4213 +[2025-07-07 02:30:31] [Rank 0] Group 11 Loss: 4.4213 +[2025-07-07 02:30:31] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 02:30:31] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 02:30:31] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 02:30:31] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 02:30:31] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 02:30:31] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 02:30:31] [Rank 0] Group 3 FTA: 0.0625 +[2025-07-07 02:30:31] [Rank 0] Group 3 FTA: 0.0625 +[2025-07-07 02:30:31] [Rank 0] Group 4 FTA: 0.0495 +[2025-07-07 02:30:31] [Rank 0] Group 4 FTA: 0.0495 +[2025-07-07 02:30:31] [Rank 0] Group 5 FTA: 0.0625 +[2025-07-07 02:30:31] [Rank 0] Group 5 FTA: 0.0625 +[2025-07-07 02:30:31] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-07 02:30:31] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-07 02:30:31] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-07 02:30:31] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-07 02:30:31] [Rank 0] Group 8 FTA: 0.1224 +[2025-07-07 02:30:31] [Rank 0] Group 8 FTA: 0.1224 +[2025-07-07 02:30:31] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 02:30:31] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 02:30:31] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-07 02:30:31] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-07 02:30:31] [Rank 0] Group 11 FTA: 0.0879 +[2025-07-07 02:30:31] [Rank 0] Group 11 FTA: 0.0879 +[2025-07-07 02:30:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 02:30:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 02:30:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 02:30:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 02:30:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 02:30:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 02:30:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 02:30:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 02:30:33] [Rank 0] step:2001/10000 train_time:160412ms step_avg:80.17ms +[2025-07-07 02:30:33] [Rank 0] step:2001/10000 train_time:160412ms step_avg:80.17ms +[2025-07-07 02:30:34] [Rank 0] step:2021/10000 train_time:161896ms step_avg:80.11ms +[2025-07-07 02:30:34] [Rank 0] step:2021/10000 train_time:161896ms step_avg:80.11ms +[2025-07-07 02:30:36] [Rank 0] step:2041/10000 train_time:163368ms step_avg:80.04ms +[2025-07-07 02:30:36] [Rank 0] step:2041/10000 train_time:163368ms step_avg:80.04ms +[2025-07-07 02:30:37] [Rank 0] step:2061/10000 train_time:164836ms step_avg:79.98ms +[2025-07-07 02:30:37] [Rank 0] step:2061/10000 train_time:164836ms step_avg:79.98ms +[2025-07-07 02:30:39] [Rank 0] step:2081/10000 train_time:166966ms step_avg:80.23ms +[2025-07-07 02:30:39] [Rank 0] step:2081/10000 train_time:166966ms step_avg:80.23ms +[2025-07-07 02:30:41] [Rank 0] step:2101/10000 train_time:168434ms step_avg:80.17ms +[2025-07-07 02:30:41] [Rank 0] step:2101/10000 train_time:168434ms step_avg:80.17ms +[2025-07-07 02:30:42] [Rank 0] step:2121/10000 train_time:169905ms step_avg:80.11ms +[2025-07-07 02:30:42] [Rank 0] step:2121/10000 train_time:169905ms step_avg:80.11ms +[2025-07-07 02:30:44] [Rank 0] step:2141/10000 train_time:171384ms step_avg:80.05ms +[2025-07-07 02:30:44] [Rank 0] step:2141/10000 train_time:171384ms step_avg:80.05ms +[2025-07-07 02:30:46] [Rank 0] step:2161/10000 train_time:172913ms step_avg:80.02ms +[2025-07-07 02:30:46] [Rank 0] step:2161/10000 train_time:172913ms step_avg:80.02ms +[2025-07-07 02:30:47] [Rank 0] step:2181/10000 train_time:175000ms step_avg:80.24ms +[2025-07-07 02:30:47] [Rank 0] step:2181/10000 train_time:175000ms step_avg:80.24ms +[2025-07-07 02:30:49] [Rank 0] step:2201/10000 train_time:176478ms step_avg:80.18ms +[2025-07-07 02:30:49] [Rank 0] step:2201/10000 train_time:176478ms step_avg:80.18ms +[2025-07-07 02:30:50] [Rank 0] step:2221/10000 train_time:177950ms step_avg:80.12ms +[2025-07-07 02:30:50] [Rank 0] step:2221/10000 train_time:177950ms step_avg:80.12ms +[2025-07-07 02:30:52] [Rank 0] step:2241/10000 train_time:179446ms step_avg:80.07ms +[2025-07-07 02:30:52] [Rank 0] step:2241/10000 train_time:179446ms step_avg:80.07ms +[2025-07-07 02:30:54] [Rank 0] step:2261/10000 train_time:181177ms step_avg:80.13ms +[2025-07-07 02:30:54] [Rank 0] step:2261/10000 train_time:181177ms step_avg:80.13ms +[2025-07-07 02:30:55] [Rank 0] step:2281/10000 train_time:182676ms step_avg:80.09ms +[2025-07-07 02:30:55] [Rank 0] step:2281/10000 train_time:182676ms step_avg:80.09ms +[2025-07-07 02:30:57] [Rank 0] step:2301/10000 train_time:184176ms step_avg:80.04ms +[2025-07-07 02:30:57] [Rank 0] step:2301/10000 train_time:184176ms step_avg:80.04ms +[2025-07-07 02:30:58] [Rank 0] step:2321/10000 train_time:185678ms step_avg:80.00ms +[2025-07-07 02:30:58] [Rank 0] step:2321/10000 train_time:185678ms step_avg:80.00ms +[2025-07-07 02:31:00] [Rank 0] step:2341/10000 train_time:187232ms step_avg:79.98ms +[2025-07-07 02:31:00] [Rank 0] step:2341/10000 train_time:187232ms step_avg:79.98ms +[2025-07-07 02:31:01] [Rank 0] step:2361/10000 train_time:188933ms step_avg:80.02ms +[2025-07-07 02:31:01] [Rank 0] step:2361/10000 train_time:188933ms step_avg:80.02ms +[2025-07-07 02:31:03] [Rank 0] step:2381/10000 train_time:190469ms step_avg:80.00ms +[2025-07-07 02:31:03] [Rank 0] step:2381/10000 train_time:190469ms step_avg:80.00ms +[2025-07-07 02:31:05] [Rank 0] step:2401/10000 train_time:192126ms step_avg:80.02ms +[2025-07-07 02:31:05] [Rank 0] step:2401/10000 train_time:192126ms step_avg:80.02ms +[2025-07-07 02:31:06] [Rank 0] step:2421/10000 train_time:193627ms step_avg:79.98ms +[2025-07-07 02:31:06] [Rank 0] step:2421/10000 train_time:193627ms step_avg:79.98ms +[2025-07-07 02:31:08] [Rank 0] step:2441/10000 train_time:195467ms step_avg:80.08ms +[2025-07-07 02:31:08] [Rank 0] step:2441/10000 train_time:195467ms step_avg:80.08ms +[2025-07-07 02:31:09] [Rank 0] step:2461/10000 train_time:196967ms step_avg:80.04ms +[2025-07-07 02:31:09] [Rank 0] step:2461/10000 train_time:196967ms step_avg:80.04ms +[2025-07-07 02:31:11] [Rank 0] step:2481/10000 train_time:198468ms step_avg:80.00ms +[2025-07-07 02:31:11] [Rank 0] step:2481/10000 train_time:198468ms step_avg:80.00ms +[2025-07-07 02:31:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:31:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:31:13] [Rank 0] PRINT: step:2500/10000 train_loss:2.2672 val_loss:2.0013 train_time:199969ms step_avg:79.99ms +[2025-07-07 02:31:13] [Rank 0] PRINT: step:2500/10000 train_loss:2.2672 val_loss:2.0013 train_time:199969ms step_avg:79.99ms +[2025-07-07 02:31:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:31:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:31:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:31:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:31:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:31:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:36:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:36:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:36:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:36:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:36:38] [Rank 0] Total Loss: 4.0838 +[2025-07-07 02:36:38] [Rank 0] Total Loss: 4.0838 +[2025-07-07 02:36:38] [Rank 0] Total FTA: 0.1150 +[2025-07-07 02:36:38] [Rank 0] Total FTA: 0.1150 +[2025-07-07 02:36:38] [Rank 0] Group 0 Loss: 4.3035 +[2025-07-07 02:36:38] [Rank 0] Group 0 Loss: 4.3035 +[2025-07-07 02:36:38] [Rank 0] Group 1 Loss: 4.0166 +[2025-07-07 02:36:38] [Rank 0] Group 1 Loss: 4.0166 +[2025-07-07 02:36:38] [Rank 0] Group 2 Loss: 3.9008 +[2025-07-07 02:36:38] [Rank 0] Group 2 Loss: 3.9008 +[2025-07-07 02:36:38] [Rank 0] Group 3 Loss: 4.1605 +[2025-07-07 02:36:38] [Rank 0] Group 3 Loss: 4.1605 +[2025-07-07 02:36:38] [Rank 0] Group 4 Loss: 4.0725 +[2025-07-07 02:36:38] [Rank 0] Group 4 Loss: 4.0725 +[2025-07-07 02:36:38] [Rank 0] Group 5 Loss: 4.0505 +[2025-07-07 02:36:38] [Rank 0] Group 5 Loss: 4.0505 +[2025-07-07 02:36:38] [Rank 0] Group 6 Loss: 3.9902 +[2025-07-07 02:36:38] [Rank 0] Group 6 Loss: 3.9902 +[2025-07-07 02:36:38] [Rank 0] Group 7 Loss: 4.0507 +[2025-07-07 02:36:38] [Rank 0] Group 7 Loss: 4.0507 +[2025-07-07 02:36:38] [Rank 0] Group 8 Loss: 4.0403 +[2025-07-07 02:36:38] [Rank 0] Group 8 Loss: 4.0403 +[2025-07-07 02:36:38] [Rank 0] Group 9 Loss: 4.0358 +[2025-07-07 02:36:38] [Rank 0] Group 9 Loss: 4.0358 +[2025-07-07 02:36:38] [Rank 0] Group 10 Loss: 4.0735 +[2025-07-07 02:36:38] [Rank 0] Group 10 Loss: 4.0735 +[2025-07-07 02:36:38] [Rank 0] Group 11 Loss: 4.0816 +[2025-07-07 02:36:38] [Rank 0] Group 11 Loss: 4.0816 +[2025-07-07 02:36:38] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 02:36:38] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 02:36:38] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 02:36:38] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 02:36:38] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 02:36:38] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 02:36:38] [Rank 0] Group 3 FTA: 0.1328 +[2025-07-07 02:36:38] [Rank 0] Group 3 FTA: 0.1328 +[2025-07-07 02:36:38] [Rank 0] Group 4 FTA: 0.0807 +[2025-07-07 02:36:38] [Rank 0] Group 4 FTA: 0.0807 +[2025-07-07 02:36:38] [Rank 0] Group 5 FTA: 0.1198 +[2025-07-07 02:36:38] [Rank 0] Group 5 FTA: 0.1198 +[2025-07-07 02:36:38] [Rank 0] Group 6 FTA: 0.1406 +[2025-07-07 02:36:38] [Rank 0] Group 6 FTA: 0.1406 +[2025-07-07 02:36:38] [Rank 0] Group 7 FTA: 0.1536 +[2025-07-07 02:36:38] [Rank 0] Group 7 FTA: 0.1536 +[2025-07-07 02:36:38] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-07 02:36:38] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-07 02:36:38] [Rank 0] Group 9 FTA: 0.1055 +[2025-07-07 02:36:38] [Rank 0] Group 9 FTA: 0.1055 +[2025-07-07 02:36:38] [Rank 0] Group 10 FTA: 0.1309 +[2025-07-07 02:36:38] [Rank 0] Group 10 FTA: 0.1309 +[2025-07-07 02:36:38] [Rank 0] Group 11 FTA: 0.1221 +[2025-07-07 02:36:38] [Rank 0] Group 11 FTA: 0.1221 +[2025-07-07 02:36:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 02:36:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 02:36:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 02:36:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 02:36:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 02:36:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 02:36:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 02:36:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 02:36:39] [Rank 0] step:2501/10000 train_time:199991ms step_avg:79.96ms +[2025-07-07 02:36:39] [Rank 0] step:2501/10000 train_time:199991ms step_avg:79.96ms +[2025-07-07 02:36:41] [Rank 0] step:2521/10000 train_time:202178ms step_avg:80.20ms +[2025-07-07 02:36:41] [Rank 0] step:2521/10000 train_time:202178ms step_avg:80.20ms +[2025-07-07 02:36:43] [Rank 0] step:2541/10000 train_time:203651ms step_avg:80.15ms +[2025-07-07 02:36:43] [Rank 0] step:2541/10000 train_time:203651ms step_avg:80.15ms +[2025-07-07 02:36:44] [Rank 0] step:2561/10000 train_time:205143ms step_avg:80.10ms +[2025-07-07 02:36:44] [Rank 0] step:2561/10000 train_time:205143ms step_avg:80.10ms +[2025-07-07 02:36:46] [Rank 0] step:2581/10000 train_time:206637ms step_avg:80.06ms +[2025-07-07 02:36:46] [Rank 0] step:2581/10000 train_time:206637ms step_avg:80.06ms +[2025-07-07 02:36:47] [Rank 0] step:2601/10000 train_time:208131ms step_avg:80.02ms +[2025-07-07 02:36:47] [Rank 0] step:2601/10000 train_time:208131ms step_avg:80.02ms +[2025-07-07 02:36:49] [Rank 0] step:2621/10000 train_time:209863ms step_avg:80.07ms +[2025-07-07 02:36:49] [Rank 0] step:2621/10000 train_time:209863ms step_avg:80.07ms +[2025-07-07 02:36:51] [Rank 0] step:2641/10000 train_time:211357ms step_avg:80.03ms +[2025-07-07 02:36:51] [Rank 0] step:2641/10000 train_time:211357ms step_avg:80.03ms +[2025-07-07 02:36:52] [Rank 0] step:2661/10000 train_time:212854ms step_avg:79.99ms +[2025-07-07 02:36:52] [Rank 0] step:2661/10000 train_time:212854ms step_avg:79.99ms +[2025-07-07 02:36:53] [Rank 0] step:2681/10000 train_time:214350ms step_avg:79.95ms +[2025-07-07 02:36:53] [Rank 0] step:2681/10000 train_time:214350ms step_avg:79.95ms +[2025-07-07 02:36:56] [Rank 0] step:2701/10000 train_time:215897ms step_avg:79.93ms +[2025-07-07 02:36:56] [Rank 0] step:2701/10000 train_time:215897ms step_avg:79.93ms +[2025-07-07 02:36:57] [Rank 0] step:2721/10000 train_time:217993ms step_avg:80.11ms +[2025-07-07 02:36:57] [Rank 0] step:2721/10000 train_time:217993ms step_avg:80.11ms +[2025-07-07 02:36:59] [Rank 0] step:2741/10000 train_time:219493ms step_avg:80.08ms +[2025-07-07 02:36:59] [Rank 0] step:2741/10000 train_time:219493ms step_avg:80.08ms +[2025-07-07 02:37:00] [Rank 0] step:2761/10000 train_time:220994ms step_avg:80.04ms +[2025-07-07 02:37:00] [Rank 0] step:2761/10000 train_time:220994ms step_avg:80.04ms +[2025-07-07 02:37:02] [Rank 0] step:2781/10000 train_time:222495ms step_avg:80.01ms +[2025-07-07 02:37:02] [Rank 0] step:2781/10000 train_time:222495ms step_avg:80.01ms +[2025-07-07 02:37:04] [Rank 0] step:2801/10000 train_time:224668ms step_avg:80.21ms +[2025-07-07 02:37:04] [Rank 0] step:2801/10000 train_time:224668ms step_avg:80.21ms +[2025-07-07 02:37:05] [Rank 0] step:2821/10000 train_time:226168ms step_avg:80.17ms +[2025-07-07 02:37:05] [Rank 0] step:2821/10000 train_time:226168ms step_avg:80.17ms +[2025-07-07 02:37:07] [Rank 0] step:2841/10000 train_time:227672ms step_avg:80.14ms +[2025-07-07 02:37:07] [Rank 0] step:2841/10000 train_time:227672ms step_avg:80.14ms +[2025-07-07 02:37:08] [Rank 0] step:2861/10000 train_time:229176ms step_avg:80.10ms +[2025-07-07 02:37:08] [Rank 0] step:2861/10000 train_time:229176ms step_avg:80.10ms +[2025-07-07 02:37:10] [Rank 0] step:2881/10000 train_time:230933ms step_avg:80.16ms +[2025-07-07 02:37:10] [Rank 0] step:2881/10000 train_time:230933ms step_avg:80.16ms +[2025-07-07 02:37:12] [Rank 0] step:2901/10000 train_time:232840ms step_avg:80.26ms +[2025-07-07 02:37:12] [Rank 0] step:2901/10000 train_time:232840ms step_avg:80.26ms +[2025-07-07 02:37:13] [Rank 0] step:2921/10000 train_time:234343ms step_avg:80.23ms +[2025-07-07 02:37:13] [Rank 0] step:2921/10000 train_time:234343ms step_avg:80.23ms +[2025-07-07 02:37:15] [Rank 0] step:2941/10000 train_time:235843ms step_avg:80.19ms +[2025-07-07 02:37:15] [Rank 0] step:2941/10000 train_time:235843ms step_avg:80.19ms +[2025-07-07 02:37:16] [Rank 0] step:2961/10000 train_time:237344ms step_avg:80.16ms +[2025-07-07 02:37:16] [Rank 0] step:2961/10000 train_time:237344ms step_avg:80.16ms +[2025-07-07 02:37:19] [Rank 0] step:2981/10000 train_time:239514ms step_avg:80.35ms +[2025-07-07 02:37:19] [Rank 0] step:2981/10000 train_time:239514ms step_avg:80.35ms +[2025-07-07 02:37:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:37:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:37:21] [Rank 0] PRINT: step:3000/10000 train_loss:1.8436 val_loss:1.7148 train_time:241072ms step_avg:80.36ms +[2025-07-07 02:37:21] [Rank 0] PRINT: step:3000/10000 train_loss:1.8436 val_loss:1.7148 train_time:241072ms step_avg:80.36ms +[2025-07-07 02:37:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:37:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:37:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:37:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:37:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:37:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:42:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:42:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:42:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:42:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:42:45] [Rank 0] Total Loss: 4.1066 +[2025-07-07 02:42:45] [Rank 0] Total Loss: 4.1066 +[2025-07-07 02:42:45] [Rank 0] Total FTA: 0.1477 +[2025-07-07 02:42:45] [Rank 0] Total FTA: 0.1477 +[2025-07-07 02:42:45] [Rank 0] Group 0 Loss: 4.3462 +[2025-07-07 02:42:45] [Rank 0] Group 0 Loss: 4.3462 +[2025-07-07 02:42:45] [Rank 0] Group 1 Loss: 4.1613 +[2025-07-07 02:42:45] [Rank 0] Group 1 Loss: 4.1613 +[2025-07-07 02:42:45] [Rank 0] Group 2 Loss: 3.9925 +[2025-07-07 02:42:45] [Rank 0] Group 2 Loss: 3.9925 +[2025-07-07 02:42:45] [Rank 0] Group 3 Loss: 4.1045 +[2025-07-07 02:42:45] [Rank 0] Group 3 Loss: 4.1045 +[2025-07-07 02:42:45] [Rank 0] Group 4 Loss: 4.0576 +[2025-07-07 02:42:45] [Rank 0] Group 4 Loss: 4.0576 +[2025-07-07 02:42:45] [Rank 0] Group 5 Loss: 4.0293 +[2025-07-07 02:42:45] [Rank 0] Group 5 Loss: 4.0293 +[2025-07-07 02:42:45] [Rank 0] Group 6 Loss: 3.9825 +[2025-07-07 02:42:45] [Rank 0] Group 6 Loss: 3.9825 +[2025-07-07 02:42:45] [Rank 0] Group 7 Loss: 4.0880 +[2025-07-07 02:42:45] [Rank 0] Group 7 Loss: 4.0880 +[2025-07-07 02:42:45] [Rank 0] Group 8 Loss: 4.0646 +[2025-07-07 02:42:45] [Rank 0] Group 8 Loss: 4.0646 +[2025-07-07 02:42:45] [Rank 0] Group 9 Loss: 4.0539 +[2025-07-07 02:42:45] [Rank 0] Group 9 Loss: 4.0539 +[2025-07-07 02:42:45] [Rank 0] Group 10 Loss: 4.0541 +[2025-07-07 02:42:45] [Rank 0] Group 10 Loss: 4.0541 +[2025-07-07 02:42:45] [Rank 0] Group 11 Loss: 4.1057 +[2025-07-07 02:42:45] [Rank 0] Group 11 Loss: 4.1057 +[2025-07-07 02:42:45] [Rank 0] Group 0 FTA: 0.3329 +[2025-07-07 02:42:45] [Rank 0] Group 0 FTA: 0.3329 +[2025-07-07 02:42:45] [Rank 0] Group 1 FTA: 0.1406 +[2025-07-07 02:42:45] [Rank 0] Group 1 FTA: 0.1406 +[2025-07-07 02:42:45] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 02:42:45] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 02:42:45] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-07 02:42:45] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-07 02:42:45] [Rank 0] Group 4 FTA: 0.0755 +[2025-07-07 02:42:45] [Rank 0] Group 4 FTA: 0.0755 +[2025-07-07 02:42:45] [Rank 0] Group 5 FTA: 0.1250 +[2025-07-07 02:42:45] [Rank 0] Group 5 FTA: 0.1250 +[2025-07-07 02:42:45] [Rank 0] Group 6 FTA: 0.1198 +[2025-07-07 02:42:45] [Rank 0] Group 6 FTA: 0.1198 +[2025-07-07 02:42:45] [Rank 0] Group 7 FTA: 0.1484 +[2025-07-07 02:42:45] [Rank 0] Group 7 FTA: 0.1484 +[2025-07-07 02:42:45] [Rank 0] Group 8 FTA: 0.1380 +[2025-07-07 02:42:45] [Rank 0] Group 8 FTA: 0.1380 +[2025-07-07 02:42:45] [Rank 0] Group 9 FTA: 0.1328 +[2025-07-07 02:42:45] [Rank 0] Group 9 FTA: 0.1328 +[2025-07-07 02:42:45] [Rank 0] Group 10 FTA: 0.1602 +[2025-07-07 02:42:45] [Rank 0] Group 10 FTA: 0.1602 +[2025-07-07 02:42:45] [Rank 0] Group 11 FTA: 0.1572 +[2025-07-07 02:42:45] [Rank 0] Group 11 FTA: 0.1572 +[2025-07-07 02:42:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 02:42:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 02:42:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 02:42:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 02:42:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 02:42:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 02:42:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 02:42:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 02:42:47] [Rank 0] step:3001/10000 train_time:241094ms step_avg:80.34ms +[2025-07-07 02:42:47] [Rank 0] step:3001/10000 train_time:241094ms step_avg:80.34ms +[2025-07-07 02:42:48] [Rank 0] step:3021/10000 train_time:242607ms step_avg:80.31ms +[2025-07-07 02:42:48] [Rank 0] step:3021/10000 train_time:242607ms step_avg:80.31ms +[2025-07-07 02:42:50] [Rank 0] step:3041/10000 train_time:244102ms step_avg:80.27ms +[2025-07-07 02:42:50] [Rank 0] step:3041/10000 train_time:244102ms step_avg:80.27ms +[2025-07-07 02:42:52] [Rank 0] step:3061/10000 train_time:246286ms step_avg:80.46ms +[2025-07-07 02:42:52] [Rank 0] step:3061/10000 train_time:246286ms step_avg:80.46ms +[2025-07-07 02:42:53] [Rank 0] step:3081/10000 train_time:247763ms step_avg:80.42ms +[2025-07-07 02:42:53] [Rank 0] step:3081/10000 train_time:247763ms step_avg:80.42ms +[2025-07-07 02:42:55] [Rank 0] step:3101/10000 train_time:249257ms step_avg:80.38ms +[2025-07-07 02:42:55] [Rank 0] step:3101/10000 train_time:249257ms step_avg:80.38ms +[2025-07-07 02:42:56] [Rank 0] step:3121/10000 train_time:250752ms step_avg:80.34ms +[2025-07-07 02:42:56] [Rank 0] step:3121/10000 train_time:250752ms step_avg:80.34ms +[2025-07-07 02:42:58] [Rank 0] step:3141/10000 train_time:252247ms step_avg:80.31ms +[2025-07-07 02:42:58] [Rank 0] step:3141/10000 train_time:252247ms step_avg:80.31ms +[2025-07-07 02:43:00] [Rank 0] step:3161/10000 train_time:254405ms step_avg:80.48ms +[2025-07-07 02:43:00] [Rank 0] step:3161/10000 train_time:254405ms step_avg:80.48ms +[2025-07-07 02:43:02] [Rank 0] step:3181/10000 train_time:255897ms step_avg:80.45ms +[2025-07-07 02:43:02] [Rank 0] step:3181/10000 train_time:255897ms step_avg:80.45ms +[2025-07-07 02:43:03] [Rank 0] step:3201/10000 train_time:257397ms step_avg:80.41ms +[2025-07-07 02:43:03] [Rank 0] step:3201/10000 train_time:257397ms step_avg:80.41ms +[2025-07-07 02:43:05] [Rank 0] step:3221/10000 train_time:258896ms step_avg:80.38ms +[2025-07-07 02:43:05] [Rank 0] step:3221/10000 train_time:258896ms step_avg:80.38ms +[2025-07-07 02:43:07] [Rank 0] step:3241/10000 train_time:260656ms step_avg:80.42ms +[2025-07-07 02:43:07] [Rank 0] step:3241/10000 train_time:260656ms step_avg:80.42ms +[2025-07-07 02:43:08] [Rank 0] step:3261/10000 train_time:262549ms step_avg:80.51ms +[2025-07-07 02:43:08] [Rank 0] step:3261/10000 train_time:262549ms step_avg:80.51ms +[2025-07-07 02:43:10] [Rank 0] step:3281/10000 train_time:264051ms step_avg:80.48ms +[2025-07-07 02:43:10] [Rank 0] step:3281/10000 train_time:264051ms step_avg:80.48ms +[2025-07-07 02:43:11] [Rank 0] step:3301/10000 train_time:265555ms step_avg:80.45ms +[2025-07-07 02:43:11] [Rank 0] step:3301/10000 train_time:265555ms step_avg:80.45ms +[2025-07-07 02:43:13] [Rank 0] step:3321/10000 train_time:267058ms step_avg:80.41ms +[2025-07-07 02:43:13] [Rank 0] step:3321/10000 train_time:267058ms step_avg:80.41ms +[2025-07-07 02:43:14] [Rank 0] step:3341/10000 train_time:268794ms step_avg:80.45ms +[2025-07-07 02:43:14] [Rank 0] step:3341/10000 train_time:268794ms step_avg:80.45ms +[2025-07-07 02:43:16] [Rank 0] step:3361/10000 train_time:270295ms step_avg:80.42ms +[2025-07-07 02:43:16] [Rank 0] step:3361/10000 train_time:270295ms step_avg:80.42ms +[2025-07-07 02:43:17] [Rank 0] step:3381/10000 train_time:271800ms step_avg:80.39ms +[2025-07-07 02:43:17] [Rank 0] step:3381/10000 train_time:271800ms step_avg:80.39ms +[2025-07-07 02:43:19] [Rank 0] step:3401/10000 train_time:273305ms step_avg:80.36ms +[2025-07-07 02:43:19] [Rank 0] step:3401/10000 train_time:273305ms step_avg:80.36ms +[2025-07-07 02:43:21] [Rank 0] step:3421/10000 train_time:275067ms step_avg:80.41ms +[2025-07-07 02:43:21] [Rank 0] step:3421/10000 train_time:275067ms step_avg:80.41ms +[2025-07-07 02:43:23] [Rank 0] step:3441/10000 train_time:276956ms step_avg:80.49ms +[2025-07-07 02:43:23] [Rank 0] step:3441/10000 train_time:276956ms step_avg:80.49ms +[2025-07-07 02:43:24] [Rank 0] step:3461/10000 train_time:278461ms step_avg:80.46ms +[2025-07-07 02:43:24] [Rank 0] step:3461/10000 train_time:278461ms step_avg:80.46ms +[2025-07-07 02:43:26] [Rank 0] step:3481/10000 train_time:279965ms step_avg:80.43ms +[2025-07-07 02:43:26] [Rank 0] step:3481/10000 train_time:279965ms step_avg:80.43ms +[2025-07-07 02:43:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:43:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:43:28] [Rank 0] PRINT: step:3500/10000 train_loss:1.6354 val_loss:1.5649 train_time:281469ms step_avg:80.42ms +[2025-07-07 02:43:28] [Rank 0] PRINT: step:3500/10000 train_loss:1.6354 val_loss:1.5649 train_time:281469ms step_avg:80.42ms +[2025-07-07 02:43:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:43:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:43:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:43:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:43:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:43:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:48:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:48:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:48:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:48:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:48:52] [Rank 0] Total Loss: 4.2115 +[2025-07-07 02:48:52] [Rank 0] Total Loss: 4.2115 +[2025-07-07 02:48:52] [Rank 0] Total FTA: 0.1931 +[2025-07-07 02:48:52] [Rank 0] Total FTA: 0.1931 +[2025-07-07 02:48:52] [Rank 0] Group 0 Loss: 4.5239 +[2025-07-07 02:48:52] [Rank 0] Group 0 Loss: 4.5239 +[2025-07-07 02:48:52] [Rank 0] Group 1 Loss: 4.1381 +[2025-07-07 02:48:52] [Rank 0] Group 1 Loss: 4.1381 +[2025-07-07 02:48:52] [Rank 0] Group 2 Loss: 4.0951 +[2025-07-07 02:48:52] [Rank 0] Group 2 Loss: 4.0951 +[2025-07-07 02:48:52] [Rank 0] Group 3 Loss: 4.1499 +[2025-07-07 02:48:52] [Rank 0] Group 3 Loss: 4.1499 +[2025-07-07 02:48:52] [Rank 0] Group 4 Loss: 4.1954 +[2025-07-07 02:48:52] [Rank 0] Group 4 Loss: 4.1954 +[2025-07-07 02:48:52] [Rank 0] Group 5 Loss: 4.1609 +[2025-07-07 02:48:52] [Rank 0] Group 5 Loss: 4.1609 +[2025-07-07 02:48:52] [Rank 0] Group 6 Loss: 4.1112 +[2025-07-07 02:48:52] [Rank 0] Group 6 Loss: 4.1112 +[2025-07-07 02:48:52] [Rank 0] Group 7 Loss: 4.1998 +[2025-07-07 02:48:52] [Rank 0] Group 7 Loss: 4.1998 +[2025-07-07 02:48:52] [Rank 0] Group 8 Loss: 4.1849 +[2025-07-07 02:48:52] [Rank 0] Group 8 Loss: 4.1849 +[2025-07-07 02:48:52] [Rank 0] Group 9 Loss: 4.1556 +[2025-07-07 02:48:52] [Rank 0] Group 9 Loss: 4.1556 +[2025-07-07 02:48:52] [Rank 0] Group 10 Loss: 4.1797 +[2025-07-07 02:48:52] [Rank 0] Group 10 Loss: 4.1797 +[2025-07-07 02:48:52] [Rank 0] Group 11 Loss: 4.1779 +[2025-07-07 02:48:52] [Rank 0] Group 11 Loss: 4.1779 +[2025-07-07 02:48:52] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 02:48:52] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 02:48:52] [Rank 0] Group 1 FTA: 0.1849 +[2025-07-07 02:48:52] [Rank 0] Group 1 FTA: 0.1849 +[2025-07-07 02:48:52] [Rank 0] Group 2 FTA: 0.1589 +[2025-07-07 02:48:52] [Rank 0] Group 2 FTA: 0.1589 +[2025-07-07 02:48:52] [Rank 0] Group 3 FTA: 0.1771 +[2025-07-07 02:48:52] [Rank 0] Group 3 FTA: 0.1771 +[2025-07-07 02:48:52] [Rank 0] Group 4 FTA: 0.1250 +[2025-07-07 02:48:52] [Rank 0] Group 4 FTA: 0.1250 +[2025-07-07 02:48:52] [Rank 0] Group 5 FTA: 0.1875 +[2025-07-07 02:48:52] [Rank 0] Group 5 FTA: 0.1875 +[2025-07-07 02:48:52] [Rank 0] Group 6 FTA: 0.1823 +[2025-07-07 02:48:52] [Rank 0] Group 6 FTA: 0.1823 +[2025-07-07 02:48:52] [Rank 0] Group 7 FTA: 0.2526 +[2025-07-07 02:48:52] [Rank 0] Group 7 FTA: 0.2526 +[2025-07-07 02:48:52] [Rank 0] Group 8 FTA: 0.2083 +[2025-07-07 02:48:52] [Rank 0] Group 8 FTA: 0.2083 +[2025-07-07 02:48:52] [Rank 0] Group 9 FTA: 0.2148 +[2025-07-07 02:48:52] [Rank 0] Group 9 FTA: 0.2148 +[2025-07-07 02:48:52] [Rank 0] Group 10 FTA: 0.2402 +[2025-07-07 02:48:52] [Rank 0] Group 10 FTA: 0.2402 +[2025-07-07 02:48:52] [Rank 0] Group 11 FTA: 0.2080 +[2025-07-07 02:48:52] [Rank 0] Group 11 FTA: 0.2080 +[2025-07-07 02:48:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 02:48:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 02:48:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 02:48:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 02:48:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 02:48:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 02:48:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 02:48:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 02:48:53] [Rank 0] step:3501/10000 train_time:281490ms step_avg:80.40ms +[2025-07-07 02:48:53] [Rank 0] step:3501/10000 train_time:281490ms step_avg:80.40ms +[2025-07-07 02:48:55] [Rank 0] step:3521/10000 train_time:283752ms step_avg:80.59ms +[2025-07-07 02:48:55] [Rank 0] step:3521/10000 train_time:283752ms step_avg:80.59ms +[2025-07-07 02:48:57] [Rank 0] step:3541/10000 train_time:285414ms step_avg:80.60ms +[2025-07-07 02:48:57] [Rank 0] step:3541/10000 train_time:285414ms step_avg:80.60ms +[2025-07-07 02:48:59] [Rank 0] step:3561/10000 train_time:286974ms step_avg:80.59ms +[2025-07-07 02:48:59] [Rank 0] step:3561/10000 train_time:286974ms step_avg:80.59ms +[2025-07-07 02:49:00] [Rank 0] step:3581/10000 train_time:288469ms step_avg:80.56ms +[2025-07-07 02:49:00] [Rank 0] step:3581/10000 train_time:288469ms step_avg:80.56ms +[2025-07-07 02:49:02] [Rank 0] step:3601/10000 train_time:290015ms step_avg:80.54ms +[2025-07-07 02:49:02] [Rank 0] step:3601/10000 train_time:290015ms step_avg:80.54ms +[2025-07-07 02:49:04] [Rank 0] step:3621/10000 train_time:292108ms step_avg:80.67ms +[2025-07-07 02:49:04] [Rank 0] step:3621/10000 train_time:292108ms step_avg:80.67ms +[2025-07-07 02:49:05] [Rank 0] step:3641/10000 train_time:293603ms step_avg:80.64ms +[2025-07-07 02:49:05] [Rank 0] step:3641/10000 train_time:293603ms step_avg:80.64ms +[2025-07-07 02:49:07] [Rank 0] step:3661/10000 train_time:295102ms step_avg:80.61ms +[2025-07-07 02:49:07] [Rank 0] step:3661/10000 train_time:295102ms step_avg:80.61ms +[2025-07-07 02:49:08] [Rank 0] step:3681/10000 train_time:296601ms step_avg:80.58ms +[2025-07-07 02:49:08] [Rank 0] step:3681/10000 train_time:296601ms step_avg:80.58ms +[2025-07-07 02:49:10] [Rank 0] step:3701/10000 train_time:298750ms step_avg:80.72ms +[2025-07-07 02:49:10] [Rank 0] step:3701/10000 train_time:298750ms step_avg:80.72ms +[2025-07-07 02:49:12] [Rank 0] step:3721/10000 train_time:300248ms step_avg:80.69ms +[2025-07-07 02:49:12] [Rank 0] step:3721/10000 train_time:300248ms step_avg:80.69ms +[2025-07-07 02:49:13] [Rank 0] step:3741/10000 train_time:301748ms step_avg:80.66ms +[2025-07-07 02:49:13] [Rank 0] step:3741/10000 train_time:301748ms step_avg:80.66ms +[2025-07-07 02:49:15] [Rank 0] step:3761/10000 train_time:303251ms step_avg:80.63ms +[2025-07-07 02:49:15] [Rank 0] step:3761/10000 train_time:303251ms step_avg:80.63ms +[2025-07-07 02:49:17] [Rank 0] step:3781/10000 train_time:304906ms step_avg:80.64ms +[2025-07-07 02:49:17] [Rank 0] step:3781/10000 train_time:304906ms step_avg:80.64ms +[2025-07-07 02:49:18] [Rank 0] step:3801/10000 train_time:306387ms step_avg:80.61ms +[2025-07-07 02:49:18] [Rank 0] step:3801/10000 train_time:306387ms step_avg:80.61ms +[2025-07-07 02:49:20] [Rank 0] step:3821/10000 train_time:307890ms step_avg:80.58ms +[2025-07-07 02:49:20] [Rank 0] step:3821/10000 train_time:307890ms step_avg:80.58ms +[2025-07-07 02:49:21] [Rank 0] step:3841/10000 train_time:309394ms step_avg:80.55ms +[2025-07-07 02:49:21] [Rank 0] step:3841/10000 train_time:309394ms step_avg:80.55ms +[2025-07-07 02:49:23] [Rank 0] step:3861/10000 train_time:310898ms step_avg:80.52ms +[2025-07-07 02:49:23] [Rank 0] step:3861/10000 train_time:310898ms step_avg:80.52ms +[2025-07-07 02:49:24] [Rank 0] step:3881/10000 train_time:312635ms step_avg:80.56ms +[2025-07-07 02:49:24] [Rank 0] step:3881/10000 train_time:312635ms step_avg:80.56ms +[2025-07-07 02:49:26] [Rank 0] step:3901/10000 train_time:314139ms step_avg:80.53ms +[2025-07-07 02:49:26] [Rank 0] step:3901/10000 train_time:314139ms step_avg:80.53ms +[2025-07-07 02:49:27] [Rank 0] step:3921/10000 train_time:315645ms step_avg:80.50ms +[2025-07-07 02:49:27] [Rank 0] step:3921/10000 train_time:315645ms step_avg:80.50ms +[2025-07-07 02:49:29] [Rank 0] step:3941/10000 train_time:317151ms step_avg:80.47ms +[2025-07-07 02:49:29] [Rank 0] step:3941/10000 train_time:317151ms step_avg:80.47ms +[2025-07-07 02:49:31] [Rank 0] step:3961/10000 train_time:318655ms step_avg:80.45ms +[2025-07-07 02:49:31] [Rank 0] step:3961/10000 train_time:318655ms step_avg:80.45ms +[2025-07-07 02:49:32] [Rank 0] step:3981/10000 train_time:320398ms step_avg:80.48ms +[2025-07-07 02:49:32] [Rank 0] step:3981/10000 train_time:320398ms step_avg:80.48ms +[2025-07-07 02:49:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:49:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:49:34] [Rank 0] PRINT: step:4000/10000 train_loss:1.5178 val_loss:1.4720 train_time:321904ms step_avg:80.48ms +[2025-07-07 02:49:34] [Rank 0] PRINT: step:4000/10000 train_loss:1.5178 val_loss:1.4720 train_time:321904ms step_avg:80.48ms +[2025-07-07 02:49:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:49:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:49:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:49:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:49:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:49:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:54:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:54:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:54:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:54:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:54:58] [Rank 0] Total Loss: 4.3221 +[2025-07-07 02:54:58] [Rank 0] Total Loss: 4.3221 +[2025-07-07 02:54:58] [Rank 0] Total FTA: 0.2856 +[2025-07-07 02:54:58] [Rank 0] Total FTA: 0.2856 +[2025-07-07 02:54:58] [Rank 0] Group 0 Loss: 4.9076 +[2025-07-07 02:54:58] [Rank 0] Group 0 Loss: 4.9076 +[2025-07-07 02:54:58] [Rank 0] Group 1 Loss: 4.2470 +[2025-07-07 02:54:58] [Rank 0] Group 1 Loss: 4.2470 +[2025-07-07 02:54:58] [Rank 0] Group 2 Loss: 4.1241 +[2025-07-07 02:54:58] [Rank 0] Group 2 Loss: 4.1241 +[2025-07-07 02:54:58] [Rank 0] Group 3 Loss: 4.2843 +[2025-07-07 02:54:58] [Rank 0] Group 3 Loss: 4.2843 +[2025-07-07 02:54:58] [Rank 0] Group 4 Loss: 4.2964 +[2025-07-07 02:54:58] [Rank 0] Group 4 Loss: 4.2964 +[2025-07-07 02:54:58] [Rank 0] Group 5 Loss: 4.1767 +[2025-07-07 02:54:58] [Rank 0] Group 5 Loss: 4.1767 +[2025-07-07 02:54:58] [Rank 0] Group 6 Loss: 4.1536 +[2025-07-07 02:54:58] [Rank 0] Group 6 Loss: 4.1536 +[2025-07-07 02:54:58] [Rank 0] Group 7 Loss: 4.2817 +[2025-07-07 02:54:58] [Rank 0] Group 7 Loss: 4.2817 +[2025-07-07 02:54:58] [Rank 0] Group 8 Loss: 4.2182 +[2025-07-07 02:54:58] [Rank 0] Group 8 Loss: 4.2182 +[2025-07-07 02:54:58] [Rank 0] Group 9 Loss: 4.2254 +[2025-07-07 02:54:58] [Rank 0] Group 9 Loss: 4.2254 +[2025-07-07 02:54:58] [Rank 0] Group 10 Loss: 4.2557 +[2025-07-07 02:54:58] [Rank 0] Group 10 Loss: 4.2557 +[2025-07-07 02:54:58] [Rank 0] Group 11 Loss: 4.2378 +[2025-07-07 02:54:58] [Rank 0] Group 11 Loss: 4.2378 +[2025-07-07 02:54:58] [Rank 0] Group 0 FTA: 0.4954 +[2025-07-07 02:54:58] [Rank 0] Group 0 FTA: 0.4954 +[2025-07-07 02:54:58] [Rank 0] Group 1 FTA: 0.3542 +[2025-07-07 02:54:58] [Rank 0] Group 1 FTA: 0.3542 +[2025-07-07 02:54:58] [Rank 0] Group 2 FTA: 0.2422 +[2025-07-07 02:54:58] [Rank 0] Group 2 FTA: 0.2422 +[2025-07-07 02:54:58] [Rank 0] Group 3 FTA: 0.2240 +[2025-07-07 02:54:58] [Rank 0] Group 3 FTA: 0.2240 +[2025-07-07 02:54:58] [Rank 0] Group 4 FTA: 0.1667 +[2025-07-07 02:54:58] [Rank 0] Group 4 FTA: 0.1667 +[2025-07-07 02:54:58] [Rank 0] Group 5 FTA: 0.2240 +[2025-07-07 02:54:58] [Rank 0] Group 5 FTA: 0.2240 +[2025-07-07 02:54:58] [Rank 0] Group 6 FTA: 0.2500 +[2025-07-07 02:54:58] [Rank 0] Group 6 FTA: 0.2500 +[2025-07-07 02:54:58] [Rank 0] Group 7 FTA: 0.2448 +[2025-07-07 02:54:58] [Rank 0] Group 7 FTA: 0.2448 +[2025-07-07 02:54:58] [Rank 0] Group 8 FTA: 0.2370 +[2025-07-07 02:54:58] [Rank 0] Group 8 FTA: 0.2370 +[2025-07-07 02:54:58] [Rank 0] Group 9 FTA: 0.2461 +[2025-07-07 02:54:58] [Rank 0] Group 9 FTA: 0.2461 +[2025-07-07 02:54:58] [Rank 0] Group 10 FTA: 0.2617 +[2025-07-07 02:54:58] [Rank 0] Group 10 FTA: 0.2617 +[2025-07-07 02:54:58] [Rank 0] Group 11 FTA: 0.2783 +[2025-07-07 02:54:58] [Rank 0] Group 11 FTA: 0.2783 +[2025-07-07 02:54:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 02:54:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 02:54:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 02:54:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 02:54:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 02:54:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 02:55:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 02:55:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 02:55:00] [Rank 0] step:4001/10000 train_time:321927ms step_avg:80.46ms +[2025-07-07 02:55:00] [Rank 0] step:4001/10000 train_time:321927ms step_avg:80.46ms +[2025-07-07 02:55:01] [Rank 0] step:4021/10000 train_time:323418ms step_avg:80.43ms +[2025-07-07 02:55:01] [Rank 0] step:4021/10000 train_time:323418ms step_avg:80.43ms +[2025-07-07 02:55:03] [Rank 0] step:4041/10000 train_time:324910ms step_avg:80.40ms +[2025-07-07 02:55:03] [Rank 0] step:4041/10000 train_time:324910ms step_avg:80.40ms +[2025-07-07 02:55:05] [Rank 0] step:4061/10000 train_time:327057ms step_avg:80.54ms +[2025-07-07 02:55:05] [Rank 0] step:4061/10000 train_time:327057ms step_avg:80.54ms +[2025-07-07 02:55:06] [Rank 0] step:4081/10000 train_time:328550ms step_avg:80.51ms +[2025-07-07 02:55:06] [Rank 0] step:4081/10000 train_time:328550ms step_avg:80.51ms +[2025-07-07 02:55:08] [Rank 0] step:4101/10000 train_time:330042ms step_avg:80.48ms +[2025-07-07 02:55:08] [Rank 0] step:4101/10000 train_time:330042ms step_avg:80.48ms +[2025-07-07 02:55:09] [Rank 0] step:4121/10000 train_time:331538ms step_avg:80.45ms +[2025-07-07 02:55:09] [Rank 0] step:4121/10000 train_time:331538ms step_avg:80.45ms +[2025-07-07 02:55:12] [Rank 0] step:4141/10000 train_time:333034ms step_avg:80.42ms +[2025-07-07 02:55:12] [Rank 0] step:4141/10000 train_time:333034ms step_avg:80.42ms +[2025-07-07 02:55:13] [Rank 0] step:4161/10000 train_time:335200ms step_avg:80.56ms +[2025-07-07 02:55:13] [Rank 0] step:4161/10000 train_time:335200ms step_avg:80.56ms +[2025-07-07 02:55:15] [Rank 0] step:4181/10000 train_time:336697ms step_avg:80.53ms +[2025-07-07 02:55:15] [Rank 0] step:4181/10000 train_time:336697ms step_avg:80.53ms +[2025-07-07 02:55:16] [Rank 0] step:4201/10000 train_time:338439ms step_avg:80.56ms +[2025-07-07 02:55:16] [Rank 0] step:4201/10000 train_time:338439ms step_avg:80.56ms +[2025-07-07 02:55:18] [Rank 0] step:4221/10000 train_time:340029ms step_avg:80.56ms +[2025-07-07 02:55:18] [Rank 0] step:4221/10000 train_time:340029ms step_avg:80.56ms +[2025-07-07 02:55:20] [Rank 0] step:4241/10000 train_time:342223ms step_avg:80.69ms +[2025-07-07 02:55:20] [Rank 0] step:4241/10000 train_time:342223ms step_avg:80.69ms +[2025-07-07 02:55:22] [Rank 0] step:4261/10000 train_time:343720ms step_avg:80.67ms +[2025-07-07 02:55:22] [Rank 0] step:4261/10000 train_time:343720ms step_avg:80.67ms +[2025-07-07 02:55:23] [Rank 0] step:4281/10000 train_time:345222ms step_avg:80.64ms +[2025-07-07 02:55:23] [Rank 0] step:4281/10000 train_time:345222ms step_avg:80.64ms +[2025-07-07 02:55:25] [Rank 0] step:4301/10000 train_time:346719ms step_avg:80.61ms +[2025-07-07 02:55:25] [Rank 0] step:4301/10000 train_time:346719ms step_avg:80.61ms +[2025-07-07 02:55:27] [Rank 0] step:4321/10000 train_time:348217ms step_avg:80.59ms +[2025-07-07 02:55:27] [Rank 0] step:4321/10000 train_time:348217ms step_avg:80.59ms +[2025-07-07 02:55:28] [Rank 0] step:4341/10000 train_time:350370ms step_avg:80.71ms +[2025-07-07 02:55:28] [Rank 0] step:4341/10000 train_time:350370ms step_avg:80.71ms +[2025-07-07 02:55:30] [Rank 0] step:4361/10000 train_time:351870ms step_avg:80.69ms +[2025-07-07 02:55:30] [Rank 0] step:4361/10000 train_time:351870ms step_avg:80.69ms +[2025-07-07 02:55:31] [Rank 0] step:4381/10000 train_time:353369ms step_avg:80.66ms +[2025-07-07 02:55:31] [Rank 0] step:4381/10000 train_time:353369ms step_avg:80.66ms +[2025-07-07 02:55:33] [Rank 0] step:4401/10000 train_time:354868ms step_avg:80.63ms +[2025-07-07 02:55:33] [Rank 0] step:4401/10000 train_time:354868ms step_avg:80.63ms +[2025-07-07 02:55:35] [Rank 0] step:4421/10000 train_time:357012ms step_avg:80.75ms +[2025-07-07 02:55:35] [Rank 0] step:4421/10000 train_time:357012ms step_avg:80.75ms +[2025-07-07 02:55:36] [Rank 0] step:4441/10000 train_time:358512ms step_avg:80.73ms +[2025-07-07 02:55:36] [Rank 0] step:4441/10000 train_time:358512ms step_avg:80.73ms +[2025-07-07 02:55:38] [Rank 0] step:4461/10000 train_time:360014ms step_avg:80.70ms +[2025-07-07 02:55:38] [Rank 0] step:4461/10000 train_time:360014ms step_avg:80.70ms +[2025-07-07 02:55:39] [Rank 0] step:4481/10000 train_time:361515ms step_avg:80.68ms +[2025-07-07 02:55:39] [Rank 0] step:4481/10000 train_time:361515ms step_avg:80.68ms +[2025-07-07 02:55:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:55:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 02:55:42] [Rank 0] PRINT: step:4500/10000 train_loss:1.4414 val_loss:1.4092 train_time:363016ms step_avg:80.67ms +[2025-07-07 02:55:42] [Rank 0] PRINT: step:4500/10000 train_loss:1.4414 val_loss:1.4092 train_time:363016ms step_avg:80.67ms +[2025-07-07 02:55:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:55:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 02:55:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:55:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 02:55:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:55:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:01:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:01:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:01:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:01:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:01:08] [Rank 0] Total Loss: 4.2309 +[2025-07-07 03:01:08] [Rank 0] Total Loss: 4.2309 +[2025-07-07 03:01:08] [Rank 0] Total FTA: 0.3062 +[2025-07-07 03:01:08] [Rank 0] Total FTA: 0.3062 +[2025-07-07 03:01:08] [Rank 0] Group 0 Loss: 4.5209 +[2025-07-07 03:01:08] [Rank 0] Group 0 Loss: 4.5209 +[2025-07-07 03:01:08] [Rank 0] Group 1 Loss: 4.1369 +[2025-07-07 03:01:08] [Rank 0] Group 1 Loss: 4.1369 +[2025-07-07 03:01:08] [Rank 0] Group 2 Loss: 4.0270 +[2025-07-07 03:01:08] [Rank 0] Group 2 Loss: 4.0270 +[2025-07-07 03:01:08] [Rank 0] Group 3 Loss: 4.2549 +[2025-07-07 03:01:08] [Rank 0] Group 3 Loss: 4.2549 +[2025-07-07 03:01:08] [Rank 0] Group 4 Loss: 4.1922 +[2025-07-07 03:01:08] [Rank 0] Group 4 Loss: 4.1922 +[2025-07-07 03:01:08] [Rank 0] Group 5 Loss: 4.1844 +[2025-07-07 03:01:08] [Rank 0] Group 5 Loss: 4.1844 +[2025-07-07 03:01:08] [Rank 0] Group 6 Loss: 4.1887 +[2025-07-07 03:01:08] [Rank 0] Group 6 Loss: 4.1887 +[2025-07-07 03:01:08] [Rank 0] Group 7 Loss: 4.1794 +[2025-07-07 03:01:08] [Rank 0] Group 7 Loss: 4.1794 +[2025-07-07 03:01:08] [Rank 0] Group 8 Loss: 4.2232 +[2025-07-07 03:01:08] [Rank 0] Group 8 Loss: 4.2232 +[2025-07-07 03:01:08] [Rank 0] Group 9 Loss: 4.2125 +[2025-07-07 03:01:08] [Rank 0] Group 9 Loss: 4.2125 +[2025-07-07 03:01:08] [Rank 0] Group 10 Loss: 4.2040 +[2025-07-07 03:01:08] [Rank 0] Group 10 Loss: 4.2040 +[2025-07-07 03:01:08] [Rank 0] Group 11 Loss: 4.2039 +[2025-07-07 03:01:08] [Rank 0] Group 11 Loss: 4.2039 +[2025-07-07 03:01:08] [Rank 0] Group 0 FTA: 0.5020 +[2025-07-07 03:01:08] [Rank 0] Group 0 FTA: 0.5020 +[2025-07-07 03:01:08] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-07 03:01:08] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-07 03:01:08] [Rank 0] Group 2 FTA: 0.4531 +[2025-07-07 03:01:08] [Rank 0] Group 2 FTA: 0.4531 +[2025-07-07 03:01:08] [Rank 0] Group 3 FTA: 0.2214 +[2025-07-07 03:01:08] [Rank 0] Group 3 FTA: 0.2214 +[2025-07-07 03:01:08] [Rank 0] Group 4 FTA: 0.1068 +[2025-07-07 03:01:08] [Rank 0] Group 4 FTA: 0.1068 +[2025-07-07 03:01:08] [Rank 0] Group 5 FTA: 0.3125 +[2025-07-07 03:01:08] [Rank 0] Group 5 FTA: 0.3125 +[2025-07-07 03:01:08] [Rank 0] Group 6 FTA: 0.2708 +[2025-07-07 03:01:08] [Rank 0] Group 6 FTA: 0.2708 +[2025-07-07 03:01:08] [Rank 0] Group 7 FTA: 0.2891 +[2025-07-07 03:01:08] [Rank 0] Group 7 FTA: 0.2891 +[2025-07-07 03:01:08] [Rank 0] Group 8 FTA: 0.2578 +[2025-07-07 03:01:08] [Rank 0] Group 8 FTA: 0.2578 +[2025-07-07 03:01:08] [Rank 0] Group 9 FTA: 0.3125 +[2025-07-07 03:01:08] [Rank 0] Group 9 FTA: 0.3125 +[2025-07-07 03:01:08] [Rank 0] Group 10 FTA: 0.2773 +[2025-07-07 03:01:08] [Rank 0] Group 10 FTA: 0.2773 +[2025-07-07 03:01:08] [Rank 0] Group 11 FTA: 0.3154 +[2025-07-07 03:01:08] [Rank 0] Group 11 FTA: 0.3154 +[2025-07-07 03:01:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 03:01:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 03:01:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 03:01:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 03:01:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 03:01:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 03:01:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 03:01:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 03:01:10] [Rank 0] step:4501/10000 train_time:363045ms step_avg:80.66ms +[2025-07-07 03:01:10] [Rank 0] step:4501/10000 train_time:363045ms step_avg:80.66ms +[2025-07-07 03:01:12] [Rank 0] step:4521/10000 train_time:365244ms step_avg:80.79ms +[2025-07-07 03:01:12] [Rank 0] step:4521/10000 train_time:365244ms step_avg:80.79ms +[2025-07-07 03:01:13] [Rank 0] step:4541/10000 train_time:366738ms step_avg:80.76ms +[2025-07-07 03:01:13] [Rank 0] step:4541/10000 train_time:366738ms step_avg:80.76ms +[2025-07-07 03:01:15] [Rank 0] step:4561/10000 train_time:368231ms step_avg:80.73ms +[2025-07-07 03:01:15] [Rank 0] step:4561/10000 train_time:368231ms step_avg:80.73ms +[2025-07-07 03:01:16] [Rank 0] step:4581/10000 train_time:369726ms step_avg:80.71ms +[2025-07-07 03:01:16] [Rank 0] step:4581/10000 train_time:369726ms step_avg:80.71ms +[2025-07-07 03:01:18] [Rank 0] step:4601/10000 train_time:371887ms step_avg:80.83ms +[2025-07-07 03:01:18] [Rank 0] step:4601/10000 train_time:371887ms step_avg:80.83ms +[2025-07-07 03:01:20] [Rank 0] step:4621/10000 train_time:373381ms step_avg:80.80ms +[2025-07-07 03:01:20] [Rank 0] step:4621/10000 train_time:373381ms step_avg:80.80ms +[2025-07-07 03:01:21] [Rank 0] step:4641/10000 train_time:374877ms step_avg:80.78ms +[2025-07-07 03:01:21] [Rank 0] step:4641/10000 train_time:374877ms step_avg:80.78ms +[2025-07-07 03:01:23] [Rank 0] step:4661/10000 train_time:376376ms step_avg:80.75ms +[2025-07-07 03:01:23] [Rank 0] step:4661/10000 train_time:376376ms step_avg:80.75ms +[2025-07-07 03:01:25] [Rank 0] step:4681/10000 train_time:377876ms step_avg:80.73ms +[2025-07-07 03:01:25] [Rank 0] step:4681/10000 train_time:377876ms step_avg:80.73ms +[2025-07-07 03:01:26] [Rank 0] step:4701/10000 train_time:380035ms step_avg:80.84ms +[2025-07-07 03:01:26] [Rank 0] step:4701/10000 train_time:380035ms step_avg:80.84ms +[2025-07-07 03:01:28] [Rank 0] step:4721/10000 train_time:381535ms step_avg:80.82ms +[2025-07-07 03:01:28] [Rank 0] step:4721/10000 train_time:381535ms step_avg:80.82ms +[2025-07-07 03:01:29] [Rank 0] step:4741/10000 train_time:383036ms step_avg:80.79ms +[2025-07-07 03:01:29] [Rank 0] step:4741/10000 train_time:383036ms step_avg:80.79ms +[2025-07-07 03:01:31] [Rank 0] step:4761/10000 train_time:384538ms step_avg:80.77ms +[2025-07-07 03:01:31] [Rank 0] step:4761/10000 train_time:384538ms step_avg:80.77ms +[2025-07-07 03:01:33] [Rank 0] step:4781/10000 train_time:386706ms step_avg:80.88ms +[2025-07-07 03:01:33] [Rank 0] step:4781/10000 train_time:386706ms step_avg:80.88ms +[2025-07-07 03:01:35] [Rank 0] step:4801/10000 train_time:388272ms step_avg:80.87ms +[2025-07-07 03:01:35] [Rank 0] step:4801/10000 train_time:388272ms step_avg:80.87ms +[2025-07-07 03:01:36] [Rank 0] step:4821/10000 train_time:389870ms step_avg:80.87ms +[2025-07-07 03:01:36] [Rank 0] step:4821/10000 train_time:389870ms step_avg:80.87ms +[2025-07-07 03:01:38] [Rank 0] step:4841/10000 train_time:391500ms step_avg:80.87ms +[2025-07-07 03:01:38] [Rank 0] step:4841/10000 train_time:391500ms step_avg:80.87ms +[2025-07-07 03:01:40] [Rank 0] step:4861/10000 train_time:393053ms step_avg:80.86ms +[2025-07-07 03:01:40] [Rank 0] step:4861/10000 train_time:393053ms step_avg:80.86ms +[2025-07-07 03:01:42] [Rank 0] step:4881/10000 train_time:395173ms step_avg:80.96ms +[2025-07-07 03:01:42] [Rank 0] step:4881/10000 train_time:395173ms step_avg:80.96ms +[2025-07-07 03:01:43] [Rank 0] step:4901/10000 train_time:396672ms step_avg:80.94ms +[2025-07-07 03:01:43] [Rank 0] step:4901/10000 train_time:396672ms step_avg:80.94ms +[2025-07-07 03:01:45] [Rank 0] step:4921/10000 train_time:398174ms step_avg:80.91ms +[2025-07-07 03:01:45] [Rank 0] step:4921/10000 train_time:398174ms step_avg:80.91ms +[2025-07-07 03:01:46] [Rank 0] step:4941/10000 train_time:399676ms step_avg:80.89ms +[2025-07-07 03:01:46] [Rank 0] step:4941/10000 train_time:399676ms step_avg:80.89ms +[2025-07-07 03:01:48] [Rank 0] step:4961/10000 train_time:401839ms step_avg:81.00ms +[2025-07-07 03:01:48] [Rank 0] step:4961/10000 train_time:401839ms step_avg:81.00ms +[2025-07-07 03:01:50] [Rank 0] step:4981/10000 train_time:403338ms step_avg:80.98ms +[2025-07-07 03:01:50] [Rank 0] step:4981/10000 train_time:403338ms step_avg:80.98ms +[2025-07-07 03:01:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:01:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:01:52] [Rank 0] PRINT: step:5000/10000 train_loss:1.3870 val_loss:1.3619 train_time:404841ms step_avg:80.97ms +[2025-07-07 03:01:52] [Rank 0] PRINT: step:5000/10000 train_loss:1.3870 val_loss:1.3619 train_time:404841ms step_avg:80.97ms +[2025-07-07 03:01:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:01:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:01:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:01:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:01:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:01:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:07:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:07:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:07:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:07:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:07:17] [Rank 0] Total Loss: 4.3508 +[2025-07-07 03:07:17] [Rank 0] Total Loss: 4.3508 +[2025-07-07 03:07:17] [Rank 0] Total FTA: 0.3304 +[2025-07-07 03:07:17] [Rank 0] Total FTA: 0.3304 +[2025-07-07 03:07:17] [Rank 0] Group 0 Loss: 4.5397 +[2025-07-07 03:07:17] [Rank 0] Group 0 Loss: 4.5397 +[2025-07-07 03:07:17] [Rank 0] Group 1 Loss: 4.1793 +[2025-07-07 03:07:17] [Rank 0] Group 1 Loss: 4.1793 +[2025-07-07 03:07:17] [Rank 0] Group 2 Loss: 4.1192 +[2025-07-07 03:07:17] [Rank 0] Group 2 Loss: 4.1192 +[2025-07-07 03:07:17] [Rank 0] Group 3 Loss: 4.2541 +[2025-07-07 03:07:17] [Rank 0] Group 3 Loss: 4.2541 +[2025-07-07 03:07:17] [Rank 0] Group 4 Loss: 4.4273 +[2025-07-07 03:07:17] [Rank 0] Group 4 Loss: 4.4273 +[2025-07-07 03:07:17] [Rank 0] Group 5 Loss: 4.3634 +[2025-07-07 03:07:17] [Rank 0] Group 5 Loss: 4.3634 +[2025-07-07 03:07:17] [Rank 0] Group 6 Loss: 4.3105 +[2025-07-07 03:07:17] [Rank 0] Group 6 Loss: 4.3105 +[2025-07-07 03:07:17] [Rank 0] Group 7 Loss: 4.4005 +[2025-07-07 03:07:17] [Rank 0] Group 7 Loss: 4.4005 +[2025-07-07 03:07:17] [Rank 0] Group 8 Loss: 4.3239 +[2025-07-07 03:07:17] [Rank 0] Group 8 Loss: 4.3239 +[2025-07-07 03:07:17] [Rank 0] Group 9 Loss: 4.3386 +[2025-07-07 03:07:17] [Rank 0] Group 9 Loss: 4.3386 +[2025-07-07 03:07:17] [Rank 0] Group 10 Loss: 4.3603 +[2025-07-07 03:07:17] [Rank 0] Group 10 Loss: 4.3603 +[2025-07-07 03:07:17] [Rank 0] Group 11 Loss: 4.3680 +[2025-07-07 03:07:17] [Rank 0] Group 11 Loss: 4.3680 +[2025-07-07 03:07:17] [Rank 0] Group 0 FTA: 0.5202 +[2025-07-07 03:07:17] [Rank 0] Group 0 FTA: 0.5202 +[2025-07-07 03:07:17] [Rank 0] Group 1 FTA: 0.4974 +[2025-07-07 03:07:17] [Rank 0] Group 1 FTA: 0.4974 +[2025-07-07 03:07:17] [Rank 0] Group 2 FTA: 0.3776 +[2025-07-07 03:07:17] [Rank 0] Group 2 FTA: 0.3776 +[2025-07-07 03:07:17] [Rank 0] Group 3 FTA: 0.1589 +[2025-07-07 03:07:17] [Rank 0] Group 3 FTA: 0.1589 +[2025-07-07 03:07:17] [Rank 0] Group 4 FTA: 0.1589 +[2025-07-07 03:07:17] [Rank 0] Group 4 FTA: 0.1589 +[2025-07-07 03:07:17] [Rank 0] Group 5 FTA: 0.3177 +[2025-07-07 03:07:17] [Rank 0] Group 5 FTA: 0.3177 +[2025-07-07 03:07:17] [Rank 0] Group 6 FTA: 0.2891 +[2025-07-07 03:07:17] [Rank 0] Group 6 FTA: 0.2891 +[2025-07-07 03:07:17] [Rank 0] Group 7 FTA: 0.2995 +[2025-07-07 03:07:17] [Rank 0] Group 7 FTA: 0.2995 +[2025-07-07 03:07:17] [Rank 0] Group 8 FTA: 0.2708 +[2025-07-07 03:07:17] [Rank 0] Group 8 FTA: 0.2708 +[2025-07-07 03:07:17] [Rank 0] Group 9 FTA: 0.3086 +[2025-07-07 03:07:17] [Rank 0] Group 9 FTA: 0.3086 +[2025-07-07 03:07:17] [Rank 0] Group 10 FTA: 0.3633 +[2025-07-07 03:07:17] [Rank 0] Group 10 FTA: 0.3633 +[2025-07-07 03:07:17] [Rank 0] Group 11 FTA: 0.2793 +[2025-07-07 03:07:17] [Rank 0] Group 11 FTA: 0.2793 +[2025-07-07 03:07:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 03:07:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 03:07:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 03:07:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 03:07:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 03:07:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 03:07:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 03:07:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 03:07:19] [Rank 0] step:5001/10000 train_time:404862ms step_avg:80.96ms +[2025-07-07 03:07:19] [Rank 0] step:5001/10000 train_time:404862ms step_avg:80.96ms +[2025-07-07 03:07:20] [Rank 0] step:5021/10000 train_time:406378ms step_avg:80.94ms +[2025-07-07 03:07:20] [Rank 0] step:5021/10000 train_time:406378ms step_avg:80.94ms +[2025-07-07 03:07:22] [Rank 0] step:5041/10000 train_time:408128ms step_avg:80.96ms +[2025-07-07 03:07:22] [Rank 0] step:5041/10000 train_time:408128ms step_avg:80.96ms +[2025-07-07 03:07:24] [Rank 0] step:5061/10000 train_time:410029ms step_avg:81.02ms +[2025-07-07 03:07:24] [Rank 0] step:5061/10000 train_time:410029ms step_avg:81.02ms +[2025-07-07 03:07:25] [Rank 0] step:5081/10000 train_time:411522ms step_avg:80.99ms +[2025-07-07 03:07:25] [Rank 0] step:5081/10000 train_time:411522ms step_avg:80.99ms +[2025-07-07 03:07:27] [Rank 0] step:5101/10000 train_time:413017ms step_avg:80.97ms +[2025-07-07 03:07:27] [Rank 0] step:5101/10000 train_time:413017ms step_avg:80.97ms +[2025-07-07 03:07:28] [Rank 0] step:5121/10000 train_time:414513ms step_avg:80.94ms +[2025-07-07 03:07:28] [Rank 0] step:5121/10000 train_time:414513ms step_avg:80.94ms +[2025-07-07 03:07:30] [Rank 0] step:5141/10000 train_time:416671ms step_avg:81.05ms +[2025-07-07 03:07:30] [Rank 0] step:5141/10000 train_time:416671ms step_avg:81.05ms +[2025-07-07 03:07:32] [Rank 0] step:5161/10000 train_time:418165ms step_avg:81.02ms +[2025-07-07 03:07:32] [Rank 0] step:5161/10000 train_time:418165ms step_avg:81.02ms +[2025-07-07 03:07:33] [Rank 0] step:5181/10000 train_time:419664ms step_avg:81.00ms +[2025-07-07 03:07:33] [Rank 0] step:5181/10000 train_time:419664ms step_avg:81.00ms +[2025-07-07 03:07:35] [Rank 0] step:5201/10000 train_time:421165ms step_avg:80.98ms +[2025-07-07 03:07:35] [Rank 0] step:5201/10000 train_time:421165ms step_avg:80.98ms +[2025-07-07 03:07:37] [Rank 0] step:5221/10000 train_time:422717ms step_avg:80.96ms +[2025-07-07 03:07:37] [Rank 0] step:5221/10000 train_time:422717ms step_avg:80.96ms +[2025-07-07 03:07:39] [Rank 0] step:5241/10000 train_time:424816ms step_avg:81.06ms +[2025-07-07 03:07:39] [Rank 0] step:5241/10000 train_time:424816ms step_avg:81.06ms +[2025-07-07 03:07:40] [Rank 0] step:5261/10000 train_time:426319ms step_avg:81.03ms +[2025-07-07 03:07:40] [Rank 0] step:5261/10000 train_time:426319ms step_avg:81.03ms +[2025-07-07 03:07:42] [Rank 0] step:5281/10000 train_time:427822ms step_avg:81.01ms +[2025-07-07 03:07:42] [Rank 0] step:5281/10000 train_time:427822ms step_avg:81.01ms +[2025-07-07 03:07:43] [Rank 0] step:5301/10000 train_time:429328ms step_avg:80.99ms +[2025-07-07 03:07:43] [Rank 0] step:5301/10000 train_time:429328ms step_avg:80.99ms +[2025-07-07 03:07:45] [Rank 0] step:5321/10000 train_time:431070ms step_avg:81.01ms +[2025-07-07 03:07:45] [Rank 0] step:5321/10000 train_time:431070ms step_avg:81.01ms +[2025-07-07 03:07:46] [Rank 0] step:5341/10000 train_time:432575ms step_avg:80.99ms +[2025-07-07 03:07:46] [Rank 0] step:5341/10000 train_time:432575ms step_avg:80.99ms +[2025-07-07 03:07:48] [Rank 0] step:5361/10000 train_time:434082ms step_avg:80.97ms +[2025-07-07 03:07:48] [Rank 0] step:5361/10000 train_time:434082ms step_avg:80.97ms +[2025-07-07 03:07:49] [Rank 0] step:5381/10000 train_time:435588ms step_avg:80.95ms +[2025-07-07 03:07:49] [Rank 0] step:5381/10000 train_time:435588ms step_avg:80.95ms +[2025-07-07 03:07:52] [Rank 0] step:5401/10000 train_time:437768ms step_avg:81.05ms +[2025-07-07 03:07:52] [Rank 0] step:5401/10000 train_time:437768ms step_avg:81.05ms +[2025-07-07 03:07:53] [Rank 0] step:5421/10000 train_time:439256ms step_avg:81.03ms +[2025-07-07 03:07:53] [Rank 0] step:5421/10000 train_time:439256ms step_avg:81.03ms +[2025-07-07 03:07:55] [Rank 0] step:5441/10000 train_time:440959ms step_avg:81.04ms +[2025-07-07 03:07:55] [Rank 0] step:5441/10000 train_time:440959ms step_avg:81.04ms +[2025-07-07 03:07:56] [Rank 0] step:5461/10000 train_time:442521ms step_avg:81.03ms +[2025-07-07 03:07:56] [Rank 0] step:5461/10000 train_time:442521ms step_avg:81.03ms +[2025-07-07 03:07:58] [Rank 0] step:5481/10000 train_time:444059ms step_avg:81.02ms +[2025-07-07 03:07:58] [Rank 0] step:5481/10000 train_time:444059ms step_avg:81.02ms +[2025-07-07 03:08:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:08:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:08:01] [Rank 0] PRINT: step:5500/10000 train_loss:1.3441 val_loss:1.3231 train_time:446207ms step_avg:81.13ms +[2025-07-07 03:08:01] [Rank 0] PRINT: step:5500/10000 train_loss:1.3441 val_loss:1.3231 train_time:446207ms step_avg:81.13ms +[2025-07-07 03:08:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:08:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:08:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:08:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:08:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:08:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:13:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:13:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:13:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:13:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:13:25] [Rank 0] Total Loss: 4.3554 +[2025-07-07 03:13:25] [Rank 0] Total Loss: 4.3554 +[2025-07-07 03:13:25] [Rank 0] Total FTA: 0.3515 +[2025-07-07 03:13:25] [Rank 0] Total FTA: 0.3515 +[2025-07-07 03:13:25] [Rank 0] Group 0 Loss: 4.5790 +[2025-07-07 03:13:25] [Rank 0] Group 0 Loss: 4.5790 +[2025-07-07 03:13:25] [Rank 0] Group 1 Loss: 4.0670 +[2025-07-07 03:13:25] [Rank 0] Group 1 Loss: 4.0670 +[2025-07-07 03:13:25] [Rank 0] Group 2 Loss: 4.1089 +[2025-07-07 03:13:25] [Rank 0] Group 2 Loss: 4.1089 +[2025-07-07 03:13:25] [Rank 0] Group 3 Loss: 4.5431 +[2025-07-07 03:13:25] [Rank 0] Group 3 Loss: 4.5431 +[2025-07-07 03:13:25] [Rank 0] Group 4 Loss: 4.3410 +[2025-07-07 03:13:25] [Rank 0] Group 4 Loss: 4.3410 +[2025-07-07 03:13:26] [Rank 0] Group 5 Loss: 4.3030 +[2025-07-07 03:13:26] [Rank 0] Group 5 Loss: 4.3030 +[2025-07-07 03:13:26] [Rank 0] Group 6 Loss: 4.2412 +[2025-07-07 03:13:26] [Rank 0] Group 6 Loss: 4.2412 +[2025-07-07 03:13:26] [Rank 0] Group 7 Loss: 4.4319 +[2025-07-07 03:13:26] [Rank 0] Group 7 Loss: 4.4319 +[2025-07-07 03:13:26] [Rank 0] Group 8 Loss: 4.3713 +[2025-07-07 03:13:26] [Rank 0] Group 8 Loss: 4.3713 +[2025-07-07 03:13:26] [Rank 0] Group 9 Loss: 4.2999 +[2025-07-07 03:13:26] [Rank 0] Group 9 Loss: 4.2999 +[2025-07-07 03:13:26] [Rank 0] Group 10 Loss: 4.3225 +[2025-07-07 03:13:26] [Rank 0] Group 10 Loss: 4.3225 +[2025-07-07 03:13:26] [Rank 0] Group 11 Loss: 4.3810 +[2025-07-07 03:13:26] [Rank 0] Group 11 Loss: 4.3810 +[2025-07-07 03:13:26] [Rank 0] Group 0 FTA: 0.4837 +[2025-07-07 03:13:26] [Rank 0] Group 0 FTA: 0.4837 +[2025-07-07 03:13:26] [Rank 0] Group 1 FTA: 0.2969 +[2025-07-07 03:13:26] [Rank 0] Group 1 FTA: 0.2969 +[2025-07-07 03:13:26] [Rank 0] Group 2 FTA: 0.5078 +[2025-07-07 03:13:26] [Rank 0] Group 2 FTA: 0.5078 +[2025-07-07 03:13:26] [Rank 0] Group 3 FTA: 0.2604 +[2025-07-07 03:13:26] [Rank 0] Group 3 FTA: 0.2604 +[2025-07-07 03:13:26] [Rank 0] Group 4 FTA: 0.2448 +[2025-07-07 03:13:26] [Rank 0] Group 4 FTA: 0.2448 +[2025-07-07 03:13:26] [Rank 0] Group 5 FTA: 0.3854 +[2025-07-07 03:13:26] [Rank 0] Group 5 FTA: 0.3854 +[2025-07-07 03:13:26] [Rank 0] Group 6 FTA: 0.3099 +[2025-07-07 03:13:26] [Rank 0] Group 6 FTA: 0.3099 +[2025-07-07 03:13:26] [Rank 0] Group 7 FTA: 0.3047 +[2025-07-07 03:13:26] [Rank 0] Group 7 FTA: 0.3047 +[2025-07-07 03:13:26] [Rank 0] Group 8 FTA: 0.3125 +[2025-07-07 03:13:26] [Rank 0] Group 8 FTA: 0.3125 +[2025-07-07 03:13:26] [Rank 0] Group 9 FTA: 0.3320 +[2025-07-07 03:13:26] [Rank 0] Group 9 FTA: 0.3320 +[2025-07-07 03:13:26] [Rank 0] Group 10 FTA: 0.3438 +[2025-07-07 03:13:26] [Rank 0] Group 10 FTA: 0.3438 +[2025-07-07 03:13:26] [Rank 0] Group 11 FTA: 0.3320 +[2025-07-07 03:13:26] [Rank 0] Group 11 FTA: 0.3320 +[2025-07-07 03:13:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 03:13:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 03:13:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 03:13:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 03:13:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 03:13:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 03:13:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 03:13:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 03:13:27] [Rank 0] step:5501/10000 train_time:446229ms step_avg:81.12ms +[2025-07-07 03:13:27] [Rank 0] step:5501/10000 train_time:446229ms step_avg:81.12ms +[2025-07-07 03:13:29] [Rank 0] step:5521/10000 train_time:447720ms step_avg:81.09ms +[2025-07-07 03:13:29] [Rank 0] step:5521/10000 train_time:447720ms step_avg:81.09ms +[2025-07-07 03:13:30] [Rank 0] step:5541/10000 train_time:449213ms step_avg:81.07ms +[2025-07-07 03:13:30] [Rank 0] step:5541/10000 train_time:449213ms step_avg:81.07ms +[2025-07-07 03:13:32] [Rank 0] step:5561/10000 train_time:450707ms step_avg:81.05ms +[2025-07-07 03:13:32] [Rank 0] step:5561/10000 train_time:450707ms step_avg:81.05ms +[2025-07-07 03:13:34] [Rank 0] step:5581/10000 train_time:452892ms step_avg:81.15ms +[2025-07-07 03:13:34] [Rank 0] step:5581/10000 train_time:452892ms step_avg:81.15ms +[2025-07-07 03:13:35] [Rank 0] step:5601/10000 train_time:454367ms step_avg:81.12ms +[2025-07-07 03:13:35] [Rank 0] step:5601/10000 train_time:454367ms step_avg:81.12ms +[2025-07-07 03:13:37] [Rank 0] step:5621/10000 train_time:455861ms step_avg:81.10ms +[2025-07-07 03:13:37] [Rank 0] step:5621/10000 train_time:455861ms step_avg:81.10ms +[2025-07-07 03:13:38] [Rank 0] step:5641/10000 train_time:457358ms step_avg:81.08ms +[2025-07-07 03:13:38] [Rank 0] step:5641/10000 train_time:457358ms step_avg:81.08ms +[2025-07-07 03:13:40] [Rank 0] step:5661/10000 train_time:458856ms step_avg:81.06ms +[2025-07-07 03:13:40] [Rank 0] step:5661/10000 train_time:458856ms step_avg:81.06ms +[2025-07-07 03:13:42] [Rank 0] step:5681/10000 train_time:461012ms step_avg:81.15ms +[2025-07-07 03:13:42] [Rank 0] step:5681/10000 train_time:461012ms step_avg:81.15ms +[2025-07-07 03:13:43] [Rank 0] step:5701/10000 train_time:462509ms step_avg:81.13ms +[2025-07-07 03:13:43] [Rank 0] step:5701/10000 train_time:462509ms step_avg:81.13ms +[2025-07-07 03:13:45] [Rank 0] step:5721/10000 train_time:464009ms step_avg:81.11ms +[2025-07-07 03:13:45] [Rank 0] step:5721/10000 train_time:464009ms step_avg:81.11ms +[2025-07-07 03:13:46] [Rank 0] step:5741/10000 train_time:465511ms step_avg:81.09ms +[2025-07-07 03:13:46] [Rank 0] step:5741/10000 train_time:465511ms step_avg:81.09ms +[2025-07-07 03:13:48] [Rank 0] step:5761/10000 train_time:467267ms step_avg:81.11ms +[2025-07-07 03:13:48] [Rank 0] step:5761/10000 train_time:467267ms step_avg:81.11ms +[2025-07-07 03:13:50] [Rank 0] step:5781/10000 train_time:468748ms step_avg:81.08ms +[2025-07-07 03:13:50] [Rank 0] step:5781/10000 train_time:468748ms step_avg:81.08ms +[2025-07-07 03:13:51] [Rank 0] step:5801/10000 train_time:470250ms step_avg:81.06ms +[2025-07-07 03:13:51] [Rank 0] step:5801/10000 train_time:470250ms step_avg:81.06ms +[2025-07-07 03:13:53] [Rank 0] step:5821/10000 train_time:471755ms step_avg:81.04ms +[2025-07-07 03:13:53] [Rank 0] step:5821/10000 train_time:471755ms step_avg:81.04ms +[2025-07-07 03:13:54] [Rank 0] step:5841/10000 train_time:473260ms step_avg:81.02ms +[2025-07-07 03:13:54] [Rank 0] step:5841/10000 train_time:473260ms step_avg:81.02ms +[2025-07-07 03:13:56] [Rank 0] step:5861/10000 train_time:474999ms step_avg:81.04ms +[2025-07-07 03:13:56] [Rank 0] step:5861/10000 train_time:474999ms step_avg:81.04ms +[2025-07-07 03:13:57] [Rank 0] step:5881/10000 train_time:476504ms step_avg:81.02ms +[2025-07-07 03:13:57] [Rank 0] step:5881/10000 train_time:476504ms step_avg:81.02ms +[2025-07-07 03:13:59] [Rank 0] step:5901/10000 train_time:478008ms step_avg:81.00ms +[2025-07-07 03:13:59] [Rank 0] step:5901/10000 train_time:478008ms step_avg:81.00ms +[2025-07-07 03:14:00] [Rank 0] step:5921/10000 train_time:479514ms step_avg:80.99ms +[2025-07-07 03:14:00] [Rank 0] step:5921/10000 train_time:479514ms step_avg:80.99ms +[2025-07-07 03:14:02] [Rank 0] step:5941/10000 train_time:481021ms step_avg:80.97ms +[2025-07-07 03:14:02] [Rank 0] step:5941/10000 train_time:481021ms step_avg:80.97ms +[2025-07-07 03:14:04] [Rank 0] step:5961/10000 train_time:482761ms step_avg:80.99ms +[2025-07-07 03:14:04] [Rank 0] step:5961/10000 train_time:482761ms step_avg:80.99ms +[2025-07-07 03:14:05] [Rank 0] step:5981/10000 train_time:484266ms step_avg:80.97ms +[2025-07-07 03:14:05] [Rank 0] step:5981/10000 train_time:484266ms step_avg:80.97ms +[2025-07-07 03:14:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:14:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:14:07] [Rank 0] PRINT: step:6000/10000 train_loss:1.3073 val_loss:1.2884 train_time:485770ms step_avg:80.96ms +[2025-07-07 03:14:07] [Rank 0] PRINT: step:6000/10000 train_loss:1.3073 val_loss:1.2884 train_time:485770ms step_avg:80.96ms +[2025-07-07 03:14:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:14:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:14:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:14:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:14:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:14:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:19:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:19:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:19:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:19:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:19:31] [Rank 0] Total Loss: 4.4142 +[2025-07-07 03:19:31] [Rank 0] Total Loss: 4.4142 +[2025-07-07 03:19:31] [Rank 0] Total FTA: 0.3634 +[2025-07-07 03:19:31] [Rank 0] Total FTA: 0.3634 +[2025-07-07 03:19:31] [Rank 0] Group 0 Loss: 4.5673 +[2025-07-07 03:19:31] [Rank 0] Group 0 Loss: 4.5673 +[2025-07-07 03:19:31] [Rank 0] Group 1 Loss: 4.2305 +[2025-07-07 03:19:31] [Rank 0] Group 1 Loss: 4.2305 +[2025-07-07 03:19:31] [Rank 0] Group 2 Loss: 4.2039 +[2025-07-07 03:19:31] [Rank 0] Group 2 Loss: 4.2039 +[2025-07-07 03:19:31] [Rank 0] Group 3 Loss: 4.4965 +[2025-07-07 03:19:31] [Rank 0] Group 3 Loss: 4.4965 +[2025-07-07 03:19:31] [Rank 0] Group 4 Loss: 4.4763 +[2025-07-07 03:19:31] [Rank 0] Group 4 Loss: 4.4763 +[2025-07-07 03:19:31] [Rank 0] Group 5 Loss: 4.4071 +[2025-07-07 03:19:31] [Rank 0] Group 5 Loss: 4.4071 +[2025-07-07 03:19:31] [Rank 0] Group 6 Loss: 4.2821 +[2025-07-07 03:19:31] [Rank 0] Group 6 Loss: 4.2821 +[2025-07-07 03:19:31] [Rank 0] Group 7 Loss: 4.4470 +[2025-07-07 03:19:31] [Rank 0] Group 7 Loss: 4.4470 +[2025-07-07 03:19:31] [Rank 0] Group 8 Loss: 4.3671 +[2025-07-07 03:19:31] [Rank 0] Group 8 Loss: 4.3671 +[2025-07-07 03:19:31] [Rank 0] Group 9 Loss: 4.3567 +[2025-07-07 03:19:31] [Rank 0] Group 9 Loss: 4.3567 +[2025-07-07 03:19:31] [Rank 0] Group 10 Loss: 4.4651 +[2025-07-07 03:19:31] [Rank 0] Group 10 Loss: 4.4651 +[2025-07-07 03:19:31] [Rank 0] Group 11 Loss: 4.4392 +[2025-07-07 03:19:31] [Rank 0] Group 11 Loss: 4.4392 +[2025-07-07 03:19:31] [Rank 0] Group 0 FTA: 0.5111 +[2025-07-07 03:19:31] [Rank 0] Group 0 FTA: 0.5111 +[2025-07-07 03:19:31] [Rank 0] Group 1 FTA: 0.5521 +[2025-07-07 03:19:31] [Rank 0] Group 1 FTA: 0.5521 +[2025-07-07 03:19:31] [Rank 0] Group 2 FTA: 0.4036 +[2025-07-07 03:19:31] [Rank 0] Group 2 FTA: 0.4036 +[2025-07-07 03:19:31] [Rank 0] Group 3 FTA: 0.2943 +[2025-07-07 03:19:31] [Rank 0] Group 3 FTA: 0.2943 +[2025-07-07 03:19:31] [Rank 0] Group 4 FTA: 0.1250 +[2025-07-07 03:19:31] [Rank 0] Group 4 FTA: 0.1250 +[2025-07-07 03:19:31] [Rank 0] Group 5 FTA: 0.3750 +[2025-07-07 03:19:31] [Rank 0] Group 5 FTA: 0.3750 +[2025-07-07 03:19:31] [Rank 0] Group 6 FTA: 0.3151 +[2025-07-07 03:19:31] [Rank 0] Group 6 FTA: 0.3151 +[2025-07-07 03:19:31] [Rank 0] Group 7 FTA: 0.3411 +[2025-07-07 03:19:31] [Rank 0] Group 7 FTA: 0.3411 +[2025-07-07 03:19:31] [Rank 0] Group 8 FTA: 0.3333 +[2025-07-07 03:19:31] [Rank 0] Group 8 FTA: 0.3333 +[2025-07-07 03:19:31] [Rank 0] Group 9 FTA: 0.3242 +[2025-07-07 03:19:31] [Rank 0] Group 9 FTA: 0.3242 +[2025-07-07 03:19:31] [Rank 0] Group 10 FTA: 0.3770 +[2025-07-07 03:19:31] [Rank 0] Group 10 FTA: 0.3770 +[2025-07-07 03:19:31] [Rank 0] Group 11 FTA: 0.3184 +[2025-07-07 03:19:31] [Rank 0] Group 11 FTA: 0.3184 +[2025-07-07 03:19:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 03:19:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 03:19:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 03:19:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 03:19:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 03:19:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 03:19:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 03:19:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 03:19:33] [Rank 0] step:6001/10000 train_time:485791ms step_avg:80.95ms +[2025-07-07 03:19:33] [Rank 0] step:6001/10000 train_time:485791ms step_avg:80.95ms +[2025-07-07 03:19:34] [Rank 0] step:6021/10000 train_time:487286ms step_avg:80.93ms +[2025-07-07 03:19:34] [Rank 0] step:6021/10000 train_time:487286ms step_avg:80.93ms +[2025-07-07 03:19:36] [Rank 0] step:6041/10000 train_time:489429ms step_avg:81.02ms +[2025-07-07 03:19:36] [Rank 0] step:6041/10000 train_time:489429ms step_avg:81.02ms +[2025-07-07 03:19:38] [Rank 0] step:6061/10000 train_time:490920ms step_avg:81.00ms +[2025-07-07 03:19:38] [Rank 0] step:6061/10000 train_time:490920ms step_avg:81.00ms +[2025-07-07 03:19:39] [Rank 0] step:6081/10000 train_time:492413ms step_avg:80.98ms +[2025-07-07 03:19:39] [Rank 0] step:6081/10000 train_time:492413ms step_avg:80.98ms +[2025-07-07 03:19:41] [Rank 0] step:6101/10000 train_time:493908ms step_avg:80.96ms +[2025-07-07 03:19:41] [Rank 0] step:6101/10000 train_time:493908ms step_avg:80.96ms +[2025-07-07 03:19:42] [Rank 0] step:6121/10000 train_time:495402ms step_avg:80.93ms +[2025-07-07 03:19:42] [Rank 0] step:6121/10000 train_time:495402ms step_avg:80.93ms +[2025-07-07 03:19:44] [Rank 0] step:6141/10000 train_time:497133ms step_avg:80.95ms +[2025-07-07 03:19:44] [Rank 0] step:6141/10000 train_time:497133ms step_avg:80.95ms +[2025-07-07 03:19:45] [Rank 0] step:6161/10000 train_time:498629ms step_avg:80.93ms +[2025-07-07 03:19:45] [Rank 0] step:6161/10000 train_time:498629ms step_avg:80.93ms +[2025-07-07 03:19:47] [Rank 0] step:6181/10000 train_time:500125ms step_avg:80.91ms +[2025-07-07 03:19:47] [Rank 0] step:6181/10000 train_time:500125ms step_avg:80.91ms +[2025-07-07 03:19:48] [Rank 0] step:6201/10000 train_time:501622ms step_avg:80.89ms +[2025-07-07 03:19:48] [Rank 0] step:6201/10000 train_time:501622ms step_avg:80.89ms +[2025-07-07 03:19:51] [Rank 0] step:6221/10000 train_time:503766ms step_avg:80.98ms +[2025-07-07 03:19:51] [Rank 0] step:6221/10000 train_time:503766ms step_avg:80.98ms +[2025-07-07 03:19:52] [Rank 0] step:6241/10000 train_time:505261ms step_avg:80.96ms +[2025-07-07 03:19:52] [Rank 0] step:6241/10000 train_time:505261ms step_avg:80.96ms +[2025-07-07 03:19:54] [Rank 0] step:6261/10000 train_time:506760ms step_avg:80.94ms +[2025-07-07 03:19:54] [Rank 0] step:6261/10000 train_time:506760ms step_avg:80.94ms +[2025-07-07 03:19:55] [Rank 0] step:6281/10000 train_time:508258ms step_avg:80.92ms +[2025-07-07 03:19:55] [Rank 0] step:6281/10000 train_time:508258ms step_avg:80.92ms +[2025-07-07 03:19:57] [Rank 0] step:6301/10000 train_time:509758ms step_avg:80.90ms +[2025-07-07 03:19:57] [Rank 0] step:6301/10000 train_time:509758ms step_avg:80.90ms +[2025-07-07 03:19:58] [Rank 0] step:6321/10000 train_time:511492ms step_avg:80.92ms +[2025-07-07 03:19:58] [Rank 0] step:6321/10000 train_time:511492ms step_avg:80.92ms +[2025-07-07 03:20:00] [Rank 0] step:6341/10000 train_time:512988ms step_avg:80.90ms +[2025-07-07 03:20:00] [Rank 0] step:6341/10000 train_time:512988ms step_avg:80.90ms +[2025-07-07 03:20:01] [Rank 0] step:6361/10000 train_time:514490ms step_avg:80.88ms +[2025-07-07 03:20:01] [Rank 0] step:6361/10000 train_time:514490ms step_avg:80.88ms +[2025-07-07 03:20:03] [Rank 0] step:6381/10000 train_time:515996ms step_avg:80.86ms +[2025-07-07 03:20:03] [Rank 0] step:6381/10000 train_time:515996ms step_avg:80.86ms +[2025-07-07 03:20:05] [Rank 0] step:6401/10000 train_time:517736ms step_avg:80.88ms +[2025-07-07 03:20:05] [Rank 0] step:6401/10000 train_time:517736ms step_avg:80.88ms +[2025-07-07 03:20:06] [Rank 0] step:6421/10000 train_time:519239ms step_avg:80.87ms +[2025-07-07 03:20:06] [Rank 0] step:6421/10000 train_time:519239ms step_avg:80.87ms +[2025-07-07 03:20:08] [Rank 0] step:6441/10000 train_time:520744ms step_avg:80.85ms +[2025-07-07 03:20:08] [Rank 0] step:6441/10000 train_time:520744ms step_avg:80.85ms +[2025-07-07 03:20:09] [Rank 0] step:6461/10000 train_time:522247ms step_avg:80.83ms +[2025-07-07 03:20:09] [Rank 0] step:6461/10000 train_time:522247ms step_avg:80.83ms +[2025-07-07 03:20:11] [Rank 0] step:6481/10000 train_time:523750ms step_avg:80.81ms +[2025-07-07 03:20:11] [Rank 0] step:6481/10000 train_time:523750ms step_avg:80.81ms +[2025-07-07 03:20:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:20:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:20:13] [Rank 0] PRINT: step:6500/10000 train_loss:1.2745 val_loss:1.2587 train_time:525485ms step_avg:80.84ms +[2025-07-07 03:20:13] [Rank 0] PRINT: step:6500/10000 train_loss:1.2745 val_loss:1.2587 train_time:525485ms step_avg:80.84ms +[2025-07-07 03:20:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:20:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:20:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:20:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:20:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:20:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:25:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:25:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:25:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:25:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:25:40] [Rank 0] Total Loss: 4.5125 +[2025-07-07 03:25:40] [Rank 0] Total Loss: 4.5125 +[2025-07-07 03:25:40] [Rank 0] Total FTA: 0.3691 +[2025-07-07 03:25:40] [Rank 0] Total FTA: 0.3691 +[2025-07-07 03:25:40] [Rank 0] Group 0 Loss: 4.6856 +[2025-07-07 03:25:40] [Rank 0] Group 0 Loss: 4.6856 +[2025-07-07 03:25:40] [Rank 0] Group 1 Loss: 4.2878 +[2025-07-07 03:25:40] [Rank 0] Group 1 Loss: 4.2878 +[2025-07-07 03:25:40] [Rank 0] Group 2 Loss: 4.4267 +[2025-07-07 03:25:40] [Rank 0] Group 2 Loss: 4.4267 +[2025-07-07 03:25:40] [Rank 0] Group 3 Loss: 4.4843 +[2025-07-07 03:25:40] [Rank 0] Group 3 Loss: 4.4843 +[2025-07-07 03:25:40] [Rank 0] Group 4 Loss: 4.5388 +[2025-07-07 03:25:40] [Rank 0] Group 4 Loss: 4.5388 +[2025-07-07 03:25:40] [Rank 0] Group 5 Loss: 4.5080 +[2025-07-07 03:25:40] [Rank 0] Group 5 Loss: 4.5080 +[2025-07-07 03:25:40] [Rank 0] Group 6 Loss: 4.4381 +[2025-07-07 03:25:40] [Rank 0] Group 6 Loss: 4.4381 +[2025-07-07 03:25:40] [Rank 0] Group 7 Loss: 4.5823 +[2025-07-07 03:25:40] [Rank 0] Group 7 Loss: 4.5823 +[2025-07-07 03:25:40] [Rank 0] Group 8 Loss: 4.5329 +[2025-07-07 03:25:40] [Rank 0] Group 8 Loss: 4.5329 +[2025-07-07 03:25:40] [Rank 0] Group 9 Loss: 4.4989 +[2025-07-07 03:25:40] [Rank 0] Group 9 Loss: 4.4989 +[2025-07-07 03:25:40] [Rank 0] Group 10 Loss: 4.5402 +[2025-07-07 03:25:40] [Rank 0] Group 10 Loss: 4.5402 +[2025-07-07 03:25:40] [Rank 0] Group 11 Loss: 4.4851 +[2025-07-07 03:25:40] [Rank 0] Group 11 Loss: 4.4851 +[2025-07-07 03:25:40] [Rank 0] Group 0 FTA: 0.4993 +[2025-07-07 03:25:40] [Rank 0] Group 0 FTA: 0.4993 +[2025-07-07 03:25:40] [Rank 0] Group 1 FTA: 0.4948 +[2025-07-07 03:25:40] [Rank 0] Group 1 FTA: 0.4948 +[2025-07-07 03:25:40] [Rank 0] Group 2 FTA: 0.3255 +[2025-07-07 03:25:40] [Rank 0] Group 2 FTA: 0.3255 +[2025-07-07 03:25:40] [Rank 0] Group 3 FTA: 0.2526 +[2025-07-07 03:25:40] [Rank 0] Group 3 FTA: 0.2526 +[2025-07-07 03:25:40] [Rank 0] Group 4 FTA: 0.2318 +[2025-07-07 03:25:40] [Rank 0] Group 4 FTA: 0.2318 +[2025-07-07 03:25:40] [Rank 0] Group 5 FTA: 0.3490 +[2025-07-07 03:25:40] [Rank 0] Group 5 FTA: 0.3490 +[2025-07-07 03:25:40] [Rank 0] Group 6 FTA: 0.3151 +[2025-07-07 03:25:40] [Rank 0] Group 6 FTA: 0.3151 +[2025-07-07 03:25:40] [Rank 0] Group 7 FTA: 0.3594 +[2025-07-07 03:25:40] [Rank 0] Group 7 FTA: 0.3594 +[2025-07-07 03:25:40] [Rank 0] Group 8 FTA: 0.3828 +[2025-07-07 03:25:40] [Rank 0] Group 8 FTA: 0.3828 +[2025-07-07 03:25:40] [Rank 0] Group 9 FTA: 0.3789 +[2025-07-07 03:25:40] [Rank 0] Group 9 FTA: 0.3789 +[2025-07-07 03:25:40] [Rank 0] Group 10 FTA: 0.3672 +[2025-07-07 03:25:40] [Rank 0] Group 10 FTA: 0.3672 +[2025-07-07 03:25:40] [Rank 0] Group 11 FTA: 0.3604 +[2025-07-07 03:25:40] [Rank 0] Group 11 FTA: 0.3604 +[2025-07-07 03:25:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 03:25:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 03:25:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 03:25:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 03:25:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 03:25:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 03:25:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 03:25:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 03:25:42] [Rank 0] step:6501/10000 train_time:525506ms step_avg:80.83ms +[2025-07-07 03:25:42] [Rank 0] step:6501/10000 train_time:525506ms step_avg:80.83ms +[2025-07-07 03:25:43] [Rank 0] step:6521/10000 train_time:527013ms step_avg:80.82ms +[2025-07-07 03:25:43] [Rank 0] step:6521/10000 train_time:527013ms step_avg:80.82ms +[2025-07-07 03:25:45] [Rank 0] step:6541/10000 train_time:528506ms step_avg:80.80ms +[2025-07-07 03:25:45] [Rank 0] step:6541/10000 train_time:528506ms step_avg:80.80ms +[2025-07-07 03:25:46] [Rank 0] step:6561/10000 train_time:530000ms step_avg:80.78ms +[2025-07-07 03:25:46] [Rank 0] step:6561/10000 train_time:530000ms step_avg:80.78ms +[2025-07-07 03:25:48] [Rank 0] step:6581/10000 train_time:532153ms step_avg:80.86ms +[2025-07-07 03:25:48] [Rank 0] step:6581/10000 train_time:532153ms step_avg:80.86ms +[2025-07-07 03:25:50] [Rank 0] step:6601/10000 train_time:533838ms step_avg:80.87ms +[2025-07-07 03:25:50] [Rank 0] step:6601/10000 train_time:533838ms step_avg:80.87ms +[2025-07-07 03:25:51] [Rank 0] step:6621/10000 train_time:535395ms step_avg:80.86ms +[2025-07-07 03:25:51] [Rank 0] step:6621/10000 train_time:535395ms step_avg:80.86ms +[2025-07-07 03:25:53] [Rank 0] step:6641/10000 train_time:536890ms step_avg:80.84ms +[2025-07-07 03:25:53] [Rank 0] step:6641/10000 train_time:536890ms step_avg:80.84ms +[2025-07-07 03:25:55] [Rank 0] step:6661/10000 train_time:538384ms step_avg:80.83ms +[2025-07-07 03:25:55] [Rank 0] step:6661/10000 train_time:538384ms step_avg:80.83ms +[2025-07-07 03:25:57] [Rank 0] step:6681/10000 train_time:540521ms step_avg:80.90ms +[2025-07-07 03:25:57] [Rank 0] step:6681/10000 train_time:540521ms step_avg:80.90ms +[2025-07-07 03:25:58] [Rank 0] step:6701/10000 train_time:542018ms step_avg:80.89ms +[2025-07-07 03:25:58] [Rank 0] step:6701/10000 train_time:542018ms step_avg:80.89ms +[2025-07-07 03:26:00] [Rank 0] step:6721/10000 train_time:543518ms step_avg:80.87ms +[2025-07-07 03:26:00] [Rank 0] step:6721/10000 train_time:543518ms step_avg:80.87ms +[2025-07-07 03:26:01] [Rank 0] step:6741/10000 train_time:545018ms step_avg:80.85ms +[2025-07-07 03:26:01] [Rank 0] step:6741/10000 train_time:545018ms step_avg:80.85ms +[2025-07-07 03:26:03] [Rank 0] step:6761/10000 train_time:547163ms step_avg:80.93ms +[2025-07-07 03:26:03] [Rank 0] step:6761/10000 train_time:547163ms step_avg:80.93ms +[2025-07-07 03:26:05] [Rank 0] step:6781/10000 train_time:548663ms step_avg:80.91ms +[2025-07-07 03:26:05] [Rank 0] step:6781/10000 train_time:548663ms step_avg:80.91ms +[2025-07-07 03:26:06] [Rank 0] step:6801/10000 train_time:550164ms step_avg:80.89ms +[2025-07-07 03:26:06] [Rank 0] step:6801/10000 train_time:550164ms step_avg:80.89ms +[2025-07-07 03:26:08] [Rank 0] step:6821/10000 train_time:551665ms step_avg:80.88ms +[2025-07-07 03:26:08] [Rank 0] step:6821/10000 train_time:551665ms step_avg:80.88ms +[2025-07-07 03:26:09] [Rank 0] step:6841/10000 train_time:553424ms step_avg:80.90ms +[2025-07-07 03:26:09] [Rank 0] step:6841/10000 train_time:553424ms step_avg:80.90ms +[2025-07-07 03:26:11] [Rank 0] step:6861/10000 train_time:554906ms step_avg:80.88ms +[2025-07-07 03:26:11] [Rank 0] step:6861/10000 train_time:554906ms step_avg:80.88ms +[2025-07-07 03:26:12] [Rank 0] step:6881/10000 train_time:556407ms step_avg:80.86ms +[2025-07-07 03:26:12] [Rank 0] step:6881/10000 train_time:556407ms step_avg:80.86ms +[2025-07-07 03:26:14] [Rank 0] step:6901/10000 train_time:557908ms step_avg:80.84ms +[2025-07-07 03:26:14] [Rank 0] step:6901/10000 train_time:557908ms step_avg:80.84ms +[2025-07-07 03:26:15] [Rank 0] step:6921/10000 train_time:559410ms step_avg:80.83ms +[2025-07-07 03:26:15] [Rank 0] step:6921/10000 train_time:559410ms step_avg:80.83ms +[2025-07-07 03:26:18] [Rank 0] step:6941/10000 train_time:561566ms step_avg:80.91ms +[2025-07-07 03:26:18] [Rank 0] step:6941/10000 train_time:561566ms step_avg:80.91ms +[2025-07-07 03:26:19] [Rank 0] step:6961/10000 train_time:563065ms step_avg:80.89ms +[2025-07-07 03:26:19] [Rank 0] step:6961/10000 train_time:563065ms step_avg:80.89ms +[2025-07-07 03:26:21] [Rank 0] step:6981/10000 train_time:564564ms step_avg:80.87ms +[2025-07-07 03:26:21] [Rank 0] step:6981/10000 train_time:564564ms step_avg:80.87ms +[2025-07-07 03:26:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:26:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:26:23] [Rank 0] PRINT: step:7000/10000 train_loss:1.2466 val_loss:1.2338 train_time:566064ms step_avg:80.87ms +[2025-07-07 03:26:23] [Rank 0] PRINT: step:7000/10000 train_loss:1.2466 val_loss:1.2338 train_time:566064ms step_avg:80.87ms +[2025-07-07 03:26:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:26:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:26:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:26:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:26:23] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:26:23] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:31:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:31:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:31:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:31:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:31:48] [Rank 0] Total Loss: 4.5835 +[2025-07-07 03:31:48] [Rank 0] Total Loss: 4.5835 +[2025-07-07 03:31:48] [Rank 0] Total FTA: 0.3790 +[2025-07-07 03:31:48] [Rank 0] Total FTA: 0.3790 +[2025-07-07 03:31:48] [Rank 0] Group 0 Loss: 4.6592 +[2025-07-07 03:31:48] [Rank 0] Group 0 Loss: 4.6592 +[2025-07-07 03:31:48] [Rank 0] Group 1 Loss: 4.2476 +[2025-07-07 03:31:48] [Rank 0] Group 1 Loss: 4.2476 +[2025-07-07 03:31:48] [Rank 0] Group 2 Loss: 4.3466 +[2025-07-07 03:31:48] [Rank 0] Group 2 Loss: 4.3466 +[2025-07-07 03:31:48] [Rank 0] Group 3 Loss: 4.7771 +[2025-07-07 03:31:48] [Rank 0] Group 3 Loss: 4.7771 +[2025-07-07 03:31:48] [Rank 0] Group 4 Loss: 4.6393 +[2025-07-07 03:31:48] [Rank 0] Group 4 Loss: 4.6393 +[2025-07-07 03:31:48] [Rank 0] Group 5 Loss: 4.5216 +[2025-07-07 03:31:48] [Rank 0] Group 5 Loss: 4.5216 +[2025-07-07 03:31:48] [Rank 0] Group 6 Loss: 4.5831 +[2025-07-07 03:31:48] [Rank 0] Group 6 Loss: 4.5831 +[2025-07-07 03:31:48] [Rank 0] Group 7 Loss: 4.6053 +[2025-07-07 03:31:48] [Rank 0] Group 7 Loss: 4.6053 +[2025-07-07 03:31:48] [Rank 0] Group 8 Loss: 4.6044 +[2025-07-07 03:31:48] [Rank 0] Group 8 Loss: 4.6044 +[2025-07-07 03:31:48] [Rank 0] Group 9 Loss: 4.5971 +[2025-07-07 03:31:48] [Rank 0] Group 9 Loss: 4.5971 +[2025-07-07 03:31:48] [Rank 0] Group 10 Loss: 4.6047 +[2025-07-07 03:31:48] [Rank 0] Group 10 Loss: 4.6047 +[2025-07-07 03:31:48] [Rank 0] Group 11 Loss: 4.6415 +[2025-07-07 03:31:48] [Rank 0] Group 11 Loss: 4.6415 +[2025-07-07 03:31:48] [Rank 0] Group 0 FTA: 0.4967 +[2025-07-07 03:31:48] [Rank 0] Group 0 FTA: 0.4967 +[2025-07-07 03:31:48] [Rank 0] Group 1 FTA: 0.5000 +[2025-07-07 03:31:48] [Rank 0] Group 1 FTA: 0.5000 +[2025-07-07 03:31:48] [Rank 0] Group 2 FTA: 0.4844 +[2025-07-07 03:31:48] [Rank 0] Group 2 FTA: 0.4844 +[2025-07-07 03:31:48] [Rank 0] Group 3 FTA: 0.2604 +[2025-07-07 03:31:48] [Rank 0] Group 3 FTA: 0.2604 +[2025-07-07 03:31:48] [Rank 0] Group 4 FTA: 0.1797 +[2025-07-07 03:31:48] [Rank 0] Group 4 FTA: 0.1797 +[2025-07-07 03:31:48] [Rank 0] Group 5 FTA: 0.4271 +[2025-07-07 03:31:48] [Rank 0] Group 5 FTA: 0.4271 +[2025-07-07 03:31:48] [Rank 0] Group 6 FTA: 0.3594 +[2025-07-07 03:31:48] [Rank 0] Group 6 FTA: 0.3594 +[2025-07-07 03:31:48] [Rank 0] Group 7 FTA: 0.3464 +[2025-07-07 03:31:48] [Rank 0] Group 7 FTA: 0.3464 +[2025-07-07 03:31:48] [Rank 0] Group 8 FTA: 0.3698 +[2025-07-07 03:31:48] [Rank 0] Group 8 FTA: 0.3698 +[2025-07-07 03:31:48] [Rank 0] Group 9 FTA: 0.3281 +[2025-07-07 03:31:48] [Rank 0] Group 9 FTA: 0.3281 +[2025-07-07 03:31:48] [Rank 0] Group 10 FTA: 0.3086 +[2025-07-07 03:31:48] [Rank 0] Group 10 FTA: 0.3086 +[2025-07-07 03:31:48] [Rank 0] Group 11 FTA: 0.3779 +[2025-07-07 03:31:48] [Rank 0] Group 11 FTA: 0.3779 +[2025-07-07 03:31:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 03:31:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 03:31:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 03:31:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 03:31:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 03:31:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 03:31:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 03:31:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 03:31:50] [Rank 0] step:7001/10000 train_time:566084ms step_avg:80.86ms +[2025-07-07 03:31:50] [Rank 0] step:7001/10000 train_time:566084ms step_avg:80.86ms +[2025-07-07 03:31:51] [Rank 0] step:7021/10000 train_time:567854ms step_avg:80.88ms +[2025-07-07 03:31:51] [Rank 0] step:7021/10000 train_time:567854ms step_avg:80.88ms +[2025-07-07 03:31:53] [Rank 0] step:7041/10000 train_time:569328ms step_avg:80.86ms +[2025-07-07 03:31:53] [Rank 0] step:7041/10000 train_time:569328ms step_avg:80.86ms +[2025-07-07 03:31:54] [Rank 0] step:7061/10000 train_time:570823ms step_avg:80.84ms +[2025-07-07 03:31:54] [Rank 0] step:7061/10000 train_time:570823ms step_avg:80.84ms +[2025-07-07 03:31:56] [Rank 0] step:7081/10000 train_time:572316ms step_avg:80.82ms +[2025-07-07 03:31:56] [Rank 0] step:7081/10000 train_time:572316ms step_avg:80.82ms +[2025-07-07 03:31:57] [Rank 0] step:7101/10000 train_time:573811ms step_avg:80.81ms +[2025-07-07 03:31:57] [Rank 0] step:7101/10000 train_time:573811ms step_avg:80.81ms +[2025-07-07 03:31:59] [Rank 0] step:7121/10000 train_time:575967ms step_avg:80.88ms +[2025-07-07 03:31:59] [Rank 0] step:7121/10000 train_time:575967ms step_avg:80.88ms +[2025-07-07 03:32:01] [Rank 0] step:7141/10000 train_time:577461ms step_avg:80.87ms +[2025-07-07 03:32:01] [Rank 0] step:7141/10000 train_time:577461ms step_avg:80.87ms +[2025-07-07 03:32:02] [Rank 0] step:7161/10000 train_time:578957ms step_avg:80.85ms +[2025-07-07 03:32:02] [Rank 0] step:7161/10000 train_time:578957ms step_avg:80.85ms +[2025-07-07 03:32:04] [Rank 0] step:7181/10000 train_time:580455ms step_avg:80.83ms +[2025-07-07 03:32:04] [Rank 0] step:7181/10000 train_time:580455ms step_avg:80.83ms +[2025-07-07 03:32:06] [Rank 0] step:7201/10000 train_time:582007ms step_avg:80.82ms +[2025-07-07 03:32:06] [Rank 0] step:7201/10000 train_time:582007ms step_avg:80.82ms +[2025-07-07 03:32:08] [Rank 0] step:7221/10000 train_time:584111ms step_avg:80.89ms +[2025-07-07 03:32:08] [Rank 0] step:7221/10000 train_time:584111ms step_avg:80.89ms +[2025-07-07 03:32:09] [Rank 0] step:7241/10000 train_time:585736ms step_avg:80.89ms +[2025-07-07 03:32:09] [Rank 0] step:7241/10000 train_time:585736ms step_avg:80.89ms +[2025-07-07 03:32:11] [Rank 0] step:7261/10000 train_time:587330ms step_avg:80.89ms +[2025-07-07 03:32:11] [Rank 0] step:7261/10000 train_time:587330ms step_avg:80.89ms +[2025-07-07 03:32:12] [Rank 0] step:7281/10000 train_time:588897ms step_avg:80.88ms +[2025-07-07 03:32:12] [Rank 0] step:7281/10000 train_time:588897ms step_avg:80.88ms +[2025-07-07 03:32:14] [Rank 0] step:7301/10000 train_time:591041ms step_avg:80.95ms +[2025-07-07 03:32:14] [Rank 0] step:7301/10000 train_time:591041ms step_avg:80.95ms +[2025-07-07 03:32:16] [Rank 0] step:7321/10000 train_time:592540ms step_avg:80.94ms +[2025-07-07 03:32:16] [Rank 0] step:7321/10000 train_time:592540ms step_avg:80.94ms +[2025-07-07 03:32:17] [Rank 0] step:7341/10000 train_time:594043ms step_avg:80.92ms +[2025-07-07 03:32:17] [Rank 0] step:7341/10000 train_time:594043ms step_avg:80.92ms +[2025-07-07 03:32:19] [Rank 0] step:7361/10000 train_time:595543ms step_avg:80.91ms +[2025-07-07 03:32:19] [Rank 0] step:7361/10000 train_time:595543ms step_avg:80.91ms +[2025-07-07 03:32:21] [Rank 0] step:7381/10000 train_time:597046ms step_avg:80.89ms +[2025-07-07 03:32:21] [Rank 0] step:7381/10000 train_time:597046ms step_avg:80.89ms +[2025-07-07 03:32:23] [Rank 0] step:7401/10000 train_time:599189ms step_avg:80.96ms +[2025-07-07 03:32:23] [Rank 0] step:7401/10000 train_time:599189ms step_avg:80.96ms +[2025-07-07 03:32:24] [Rank 0] step:7421/10000 train_time:600691ms step_avg:80.94ms +[2025-07-07 03:32:24] [Rank 0] step:7421/10000 train_time:600691ms step_avg:80.94ms +[2025-07-07 03:32:26] [Rank 0] step:7441/10000 train_time:602194ms step_avg:80.93ms +[2025-07-07 03:32:26] [Rank 0] step:7441/10000 train_time:602194ms step_avg:80.93ms +[2025-07-07 03:32:27] [Rank 0] step:7461/10000 train_time:603698ms step_avg:80.91ms +[2025-07-07 03:32:27] [Rank 0] step:7461/10000 train_time:603698ms step_avg:80.91ms +[2025-07-07 03:32:29] [Rank 0] step:7481/10000 train_time:605858ms step_avg:80.99ms +[2025-07-07 03:32:29] [Rank 0] step:7481/10000 train_time:605858ms step_avg:80.99ms +[2025-07-07 03:32:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:32:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:32:32] [Rank 0] PRINT: step:7500/10000 train_loss:1.2237 val_loss:1.2131 train_time:607357ms step_avg:80.98ms +[2025-07-07 03:32:32] [Rank 0] PRINT: step:7500/10000 train_loss:1.2237 val_loss:1.2131 train_time:607357ms step_avg:80.98ms +[2025-07-07 03:32:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:32:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:32:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:32:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:32:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:32:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:37:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:37:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:37:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:37:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:37:56] [Rank 0] Total Loss: 4.5814 +[2025-07-07 03:37:56] [Rank 0] Total Loss: 4.5814 +[2025-07-07 03:37:56] [Rank 0] Total FTA: 0.3964 +[2025-07-07 03:37:56] [Rank 0] Total FTA: 0.3964 +[2025-07-07 03:37:56] [Rank 0] Group 0 Loss: 4.8037 +[2025-07-07 03:37:56] [Rank 0] Group 0 Loss: 4.8037 +[2025-07-07 03:37:56] [Rank 0] Group 1 Loss: 4.1433 +[2025-07-07 03:37:56] [Rank 0] Group 1 Loss: 4.1433 +[2025-07-07 03:37:56] [Rank 0] Group 2 Loss: 4.4200 +[2025-07-07 03:37:56] [Rank 0] Group 2 Loss: 4.4200 +[2025-07-07 03:37:56] [Rank 0] Group 3 Loss: 4.5626 +[2025-07-07 03:37:56] [Rank 0] Group 3 Loss: 4.5626 +[2025-07-07 03:37:56] [Rank 0] Group 4 Loss: 4.5552 +[2025-07-07 03:37:56] [Rank 0] Group 4 Loss: 4.5552 +[2025-07-07 03:37:56] [Rank 0] Group 5 Loss: 4.5544 +[2025-07-07 03:37:56] [Rank 0] Group 5 Loss: 4.5544 +[2025-07-07 03:37:56] [Rank 0] Group 6 Loss: 4.4796 +[2025-07-07 03:37:56] [Rank 0] Group 6 Loss: 4.4796 +[2025-07-07 03:37:56] [Rank 0] Group 7 Loss: 4.6627 +[2025-07-07 03:37:56] [Rank 0] Group 7 Loss: 4.6627 +[2025-07-07 03:37:56] [Rank 0] Group 8 Loss: 4.6209 +[2025-07-07 03:37:56] [Rank 0] Group 8 Loss: 4.6209 +[2025-07-07 03:37:56] [Rank 0] Group 9 Loss: 4.6198 +[2025-07-07 03:37:56] [Rank 0] Group 9 Loss: 4.6198 +[2025-07-07 03:37:56] [Rank 0] Group 10 Loss: 4.6230 +[2025-07-07 03:37:56] [Rank 0] Group 10 Loss: 4.6230 +[2025-07-07 03:37:56] [Rank 0] Group 11 Loss: 4.6289 +[2025-07-07 03:37:56] [Rank 0] Group 11 Loss: 4.6289 +[2025-07-07 03:37:56] [Rank 0] Group 0 FTA: 0.5163 +[2025-07-07 03:37:56] [Rank 0] Group 0 FTA: 0.5163 +[2025-07-07 03:37:56] [Rank 0] Group 1 FTA: 0.5260 +[2025-07-07 03:37:56] [Rank 0] Group 1 FTA: 0.5260 +[2025-07-07 03:37:56] [Rank 0] Group 2 FTA: 0.4297 +[2025-07-07 03:37:56] [Rank 0] Group 2 FTA: 0.4297 +[2025-07-07 03:37:56] [Rank 0] Group 3 FTA: 0.2760 +[2025-07-07 03:37:56] [Rank 0] Group 3 FTA: 0.2760 +[2025-07-07 03:37:56] [Rank 0] Group 4 FTA: 0.2266 +[2025-07-07 03:37:56] [Rank 0] Group 4 FTA: 0.2266 +[2025-07-07 03:37:56] [Rank 0] Group 5 FTA: 0.4167 +[2025-07-07 03:37:56] [Rank 0] Group 5 FTA: 0.4167 +[2025-07-07 03:37:56] [Rank 0] Group 6 FTA: 0.3568 +[2025-07-07 03:37:56] [Rank 0] Group 6 FTA: 0.3568 +[2025-07-07 03:37:56] [Rank 0] Group 7 FTA: 0.3568 +[2025-07-07 03:37:56] [Rank 0] Group 7 FTA: 0.3568 +[2025-07-07 03:37:56] [Rank 0] Group 8 FTA: 0.3333 +[2025-07-07 03:37:56] [Rank 0] Group 8 FTA: 0.3333 +[2025-07-07 03:37:56] [Rank 0] Group 9 FTA: 0.3672 +[2025-07-07 03:37:56] [Rank 0] Group 9 FTA: 0.3672 +[2025-07-07 03:37:56] [Rank 0] Group 10 FTA: 0.4043 +[2025-07-07 03:37:56] [Rank 0] Group 10 FTA: 0.4043 +[2025-07-07 03:37:56] [Rank 0] Group 11 FTA: 0.4033 +[2025-07-07 03:37:56] [Rank 0] Group 11 FTA: 0.4033 +[2025-07-07 03:37:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 03:37:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 03:37:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 03:37:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 03:37:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 03:37:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 03:37:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 03:37:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 03:37:57] [Rank 0] step:7501/10000 train_time:607378ms step_avg:80.97ms +[2025-07-07 03:37:57] [Rank 0] step:7501/10000 train_time:607378ms step_avg:80.97ms +[2025-07-07 03:37:59] [Rank 0] step:7521/10000 train_time:608886ms step_avg:80.96ms +[2025-07-07 03:37:59] [Rank 0] step:7521/10000 train_time:608886ms step_avg:80.96ms +[2025-07-07 03:38:00] [Rank 0] step:7541/10000 train_time:610380ms step_avg:80.94ms +[2025-07-07 03:38:00] [Rank 0] step:7541/10000 train_time:610380ms step_avg:80.94ms +[2025-07-07 03:38:03] [Rank 0] step:7561/10000 train_time:611928ms step_avg:80.93ms +[2025-07-07 03:38:03] [Rank 0] step:7561/10000 train_time:611928ms step_avg:80.93ms +[2025-07-07 03:38:04] [Rank 0] step:7581/10000 train_time:614016ms step_avg:80.99ms +[2025-07-07 03:38:04] [Rank 0] step:7581/10000 train_time:614016ms step_avg:80.99ms +[2025-07-07 03:38:06] [Rank 0] step:7601/10000 train_time:615510ms step_avg:80.98ms +[2025-07-07 03:38:06] [Rank 0] step:7601/10000 train_time:615510ms step_avg:80.98ms +[2025-07-07 03:38:07] [Rank 0] step:7621/10000 train_time:617006ms step_avg:80.96ms +[2025-07-07 03:38:07] [Rank 0] step:7621/10000 train_time:617006ms step_avg:80.96ms +[2025-07-07 03:38:09] [Rank 0] step:7641/10000 train_time:618504ms step_avg:80.95ms +[2025-07-07 03:38:09] [Rank 0] step:7641/10000 train_time:618504ms step_avg:80.95ms +[2025-07-07 03:38:11] [Rank 0] step:7661/10000 train_time:620657ms step_avg:81.02ms +[2025-07-07 03:38:11] [Rank 0] step:7661/10000 train_time:620657ms step_avg:81.02ms +[2025-07-07 03:38:12] [Rank 0] step:7681/10000 train_time:622153ms step_avg:81.00ms +[2025-07-07 03:38:12] [Rank 0] step:7681/10000 train_time:622153ms step_avg:81.00ms +[2025-07-07 03:38:14] [Rank 0] step:7701/10000 train_time:623651ms step_avg:80.98ms +[2025-07-07 03:38:14] [Rank 0] step:7701/10000 train_time:623651ms step_avg:80.98ms +[2025-07-07 03:38:15] [Rank 0] step:7721/10000 train_time:625150ms step_avg:80.97ms +[2025-07-07 03:38:15] [Rank 0] step:7721/10000 train_time:625150ms step_avg:80.97ms +[2025-07-07 03:38:17] [Rank 0] step:7741/10000 train_time:626803ms step_avg:80.97ms +[2025-07-07 03:38:17] [Rank 0] step:7741/10000 train_time:626803ms step_avg:80.97ms +[2025-07-07 03:38:18] [Rank 0] step:7761/10000 train_time:628283ms step_avg:80.95ms +[2025-07-07 03:38:18] [Rank 0] step:7761/10000 train_time:628283ms step_avg:80.95ms +[2025-07-07 03:38:20] [Rank 0] step:7781/10000 train_time:629783ms step_avg:80.94ms +[2025-07-07 03:38:20] [Rank 0] step:7781/10000 train_time:629783ms step_avg:80.94ms +[2025-07-07 03:38:21] [Rank 0] step:7801/10000 train_time:631288ms step_avg:80.92ms +[2025-07-07 03:38:21] [Rank 0] step:7801/10000 train_time:631288ms step_avg:80.92ms +[2025-07-07 03:38:23] [Rank 0] step:7821/10000 train_time:632793ms step_avg:80.91ms +[2025-07-07 03:38:23] [Rank 0] step:7821/10000 train_time:632793ms step_avg:80.91ms +[2025-07-07 03:38:25] [Rank 0] step:7841/10000 train_time:634534ms step_avg:80.93ms +[2025-07-07 03:38:25] [Rank 0] step:7841/10000 train_time:634534ms step_avg:80.93ms +[2025-07-07 03:38:26] [Rank 0] step:7861/10000 train_time:636038ms step_avg:80.91ms +[2025-07-07 03:38:26] [Rank 0] step:7861/10000 train_time:636038ms step_avg:80.91ms +[2025-07-07 03:38:28] [Rank 0] step:7881/10000 train_time:637544ms step_avg:80.90ms +[2025-07-07 03:38:28] [Rank 0] step:7881/10000 train_time:637544ms step_avg:80.90ms +[2025-07-07 03:38:29] [Rank 0] step:7901/10000 train_time:639293ms step_avg:80.91ms +[2025-07-07 03:38:29] [Rank 0] step:7901/10000 train_time:639293ms step_avg:80.91ms +[2025-07-07 03:38:32] [Rank 0] step:7921/10000 train_time:641565ms step_avg:81.00ms +[2025-07-07 03:38:32] [Rank 0] step:7921/10000 train_time:641565ms step_avg:81.00ms +[2025-07-07 03:38:33] [Rank 0] step:7941/10000 train_time:643049ms step_avg:80.98ms +[2025-07-07 03:38:33] [Rank 0] step:7941/10000 train_time:643049ms step_avg:80.98ms +[2025-07-07 03:38:35] [Rank 0] step:7961/10000 train_time:644555ms step_avg:80.96ms +[2025-07-07 03:38:35] [Rank 0] step:7961/10000 train_time:644555ms step_avg:80.96ms +[2025-07-07 03:38:36] [Rank 0] step:7981/10000 train_time:646063ms step_avg:80.95ms +[2025-07-07 03:38:36] [Rank 0] step:7981/10000 train_time:646063ms step_avg:80.95ms +[2025-07-07 03:38:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:38:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:38:38] [Rank 0] PRINT: step:8000/10000 train_loss:1.2046 val_loss:1.1960 train_time:647568ms step_avg:80.95ms +[2025-07-07 03:38:38] [Rank 0] PRINT: step:8000/10000 train_loss:1.2046 val_loss:1.1960 train_time:647568ms step_avg:80.95ms +[2025-07-07 03:38:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:38:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:38:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:38:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:38:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:38:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:44:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:44:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:44:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:44:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:44:05] [Rank 0] Total Loss: 4.6629 +[2025-07-07 03:44:05] [Rank 0] Total Loss: 4.6629 +[2025-07-07 03:44:05] [Rank 0] Total FTA: 0.3870 +[2025-07-07 03:44:05] [Rank 0] Total FTA: 0.3870 +[2025-07-07 03:44:05] [Rank 0] Group 0 Loss: 4.6856 +[2025-07-07 03:44:05] [Rank 0] Group 0 Loss: 4.6856 +[2025-07-07 03:44:05] [Rank 0] Group 1 Loss: 4.2958 +[2025-07-07 03:44:05] [Rank 0] Group 1 Loss: 4.2958 +[2025-07-07 03:44:05] [Rank 0] Group 2 Loss: 4.7815 +[2025-07-07 03:44:05] [Rank 0] Group 2 Loss: 4.7815 +[2025-07-07 03:44:05] [Rank 0] Group 3 Loss: 4.8385 +[2025-07-07 03:44:05] [Rank 0] Group 3 Loss: 4.8385 +[2025-07-07 03:44:05] [Rank 0] Group 4 Loss: 4.6282 +[2025-07-07 03:44:05] [Rank 0] Group 4 Loss: 4.6282 +[2025-07-07 03:44:05] [Rank 0] Group 5 Loss: 4.5967 +[2025-07-07 03:44:05] [Rank 0] Group 5 Loss: 4.5967 +[2025-07-07 03:44:05] [Rank 0] Group 6 Loss: 4.4958 +[2025-07-07 03:44:05] [Rank 0] Group 6 Loss: 4.4958 +[2025-07-07 03:44:05] [Rank 0] Group 7 Loss: 4.6826 +[2025-07-07 03:44:05] [Rank 0] Group 7 Loss: 4.6826 +[2025-07-07 03:44:05] [Rank 0] Group 8 Loss: 4.7334 +[2025-07-07 03:44:05] [Rank 0] Group 8 Loss: 4.7334 +[2025-07-07 03:44:05] [Rank 0] Group 9 Loss: 4.7136 +[2025-07-07 03:44:05] [Rank 0] Group 9 Loss: 4.7136 +[2025-07-07 03:44:05] [Rank 0] Group 10 Loss: 4.6652 +[2025-07-07 03:44:05] [Rank 0] Group 10 Loss: 4.6652 +[2025-07-07 03:44:05] [Rank 0] Group 11 Loss: 4.7263 +[2025-07-07 03:44:05] [Rank 0] Group 11 Loss: 4.7263 +[2025-07-07 03:44:05] [Rank 0] Group 0 FTA: 0.5059 +[2025-07-07 03:44:05] [Rank 0] Group 0 FTA: 0.5059 +[2025-07-07 03:44:05] [Rank 0] Group 1 FTA: 0.5130 +[2025-07-07 03:44:05] [Rank 0] Group 1 FTA: 0.5130 +[2025-07-07 03:44:05] [Rank 0] Group 2 FTA: 0.3281 +[2025-07-07 03:44:05] [Rank 0] Group 2 FTA: 0.3281 +[2025-07-07 03:44:05] [Rank 0] Group 3 FTA: 0.2396 +[2025-07-07 03:44:05] [Rank 0] Group 3 FTA: 0.2396 +[2025-07-07 03:44:05] [Rank 0] Group 4 FTA: 0.3047 +[2025-07-07 03:44:05] [Rank 0] Group 4 FTA: 0.3047 +[2025-07-07 03:44:05] [Rank 0] Group 5 FTA: 0.3802 +[2025-07-07 03:44:05] [Rank 0] Group 5 FTA: 0.3802 +[2025-07-07 03:44:05] [Rank 0] Group 6 FTA: 0.3724 +[2025-07-07 03:44:05] [Rank 0] Group 6 FTA: 0.3724 +[2025-07-07 03:44:05] [Rank 0] Group 7 FTA: 0.3932 +[2025-07-07 03:44:05] [Rank 0] Group 7 FTA: 0.3932 +[2025-07-07 03:44:05] [Rank 0] Group 8 FTA: 0.3828 +[2025-07-07 03:44:05] [Rank 0] Group 8 FTA: 0.3828 +[2025-07-07 03:44:05] [Rank 0] Group 9 FTA: 0.3750 +[2025-07-07 03:44:05] [Rank 0] Group 9 FTA: 0.3750 +[2025-07-07 03:44:05] [Rank 0] Group 10 FTA: 0.3770 +[2025-07-07 03:44:05] [Rank 0] Group 10 FTA: 0.3770 +[2025-07-07 03:44:05] [Rank 0] Group 11 FTA: 0.3740 +[2025-07-07 03:44:05] [Rank 0] Group 11 FTA: 0.3740 +[2025-07-07 03:44:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 03:44:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 03:44:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 03:44:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 03:44:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 03:44:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 03:44:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 03:44:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 03:44:06] [Rank 0] step:8001/10000 train_time:647589ms step_avg:80.94ms +[2025-07-07 03:44:06] [Rank 0] step:8001/10000 train_time:647589ms step_avg:80.94ms +[2025-07-07 03:44:09] [Rank 0] step:8021/10000 train_time:649745ms step_avg:81.01ms +[2025-07-07 03:44:09] [Rank 0] step:8021/10000 train_time:649745ms step_avg:81.01ms +[2025-07-07 03:44:10] [Rank 0] step:8041/10000 train_time:651233ms step_avg:80.99ms +[2025-07-07 03:44:10] [Rank 0] step:8041/10000 train_time:651233ms step_avg:80.99ms +[2025-07-07 03:44:12] [Rank 0] step:8061/10000 train_time:652725ms step_avg:80.97ms +[2025-07-07 03:44:12] [Rank 0] step:8061/10000 train_time:652725ms step_avg:80.97ms +[2025-07-07 03:44:13] [Rank 0] step:8081/10000 train_time:654218ms step_avg:80.96ms +[2025-07-07 03:44:13] [Rank 0] step:8081/10000 train_time:654218ms step_avg:80.96ms +[2025-07-07 03:44:15] [Rank 0] step:8101/10000 train_time:656380ms step_avg:81.02ms +[2025-07-07 03:44:15] [Rank 0] step:8101/10000 train_time:656380ms step_avg:81.02ms +[2025-07-07 03:44:17] [Rank 0] step:8121/10000 train_time:657855ms step_avg:81.01ms +[2025-07-07 03:44:17] [Rank 0] step:8121/10000 train_time:657855ms step_avg:81.01ms +[2025-07-07 03:44:18] [Rank 0] step:8141/10000 train_time:659349ms step_avg:80.99ms +[2025-07-07 03:44:18] [Rank 0] step:8141/10000 train_time:659349ms step_avg:80.99ms +[2025-07-07 03:44:20] [Rank 0] step:8161/10000 train_time:660844ms step_avg:80.98ms +[2025-07-07 03:44:20] [Rank 0] step:8161/10000 train_time:660844ms step_avg:80.98ms +[2025-07-07 03:44:21] [Rank 0] step:8181/10000 train_time:662341ms step_avg:80.96ms +[2025-07-07 03:44:21] [Rank 0] step:8181/10000 train_time:662341ms step_avg:80.96ms +[2025-07-07 03:44:23] [Rank 0] step:8201/10000 train_time:664498ms step_avg:81.03ms +[2025-07-07 03:44:23] [Rank 0] step:8201/10000 train_time:664498ms step_avg:81.03ms +[2025-07-07 03:44:25] [Rank 0] step:8221/10000 train_time:665991ms step_avg:81.01ms +[2025-07-07 03:44:25] [Rank 0] step:8221/10000 train_time:665991ms step_avg:81.01ms +[2025-07-07 03:44:26] [Rank 0] step:8241/10000 train_time:667484ms step_avg:81.00ms +[2025-07-07 03:44:26] [Rank 0] step:8241/10000 train_time:667484ms step_avg:81.00ms +[2025-07-07 03:44:28] [Rank 0] step:8261/10000 train_time:668979ms step_avg:80.98ms +[2025-07-07 03:44:28] [Rank 0] step:8261/10000 train_time:668979ms step_avg:80.98ms +[2025-07-07 03:44:30] [Rank 0] step:8281/10000 train_time:670474ms step_avg:80.97ms +[2025-07-07 03:44:30] [Rank 0] step:8281/10000 train_time:670474ms step_avg:80.97ms +[2025-07-07 03:44:31] [Rank 0] step:8301/10000 train_time:672203ms step_avg:80.98ms +[2025-07-07 03:44:31] [Rank 0] step:8301/10000 train_time:672203ms step_avg:80.98ms +[2025-07-07 03:44:33] [Rank 0] step:8321/10000 train_time:673697ms step_avg:80.96ms +[2025-07-07 03:44:33] [Rank 0] step:8321/10000 train_time:673697ms step_avg:80.96ms +[2025-07-07 03:44:34] [Rank 0] step:8341/10000 train_time:675195ms step_avg:80.95ms +[2025-07-07 03:44:34] [Rank 0] step:8341/10000 train_time:675195ms step_avg:80.95ms +[2025-07-07 03:44:36] [Rank 0] step:8361/10000 train_time:676693ms step_avg:80.93ms +[2025-07-07 03:44:36] [Rank 0] step:8361/10000 train_time:676693ms step_avg:80.93ms +[2025-07-07 03:44:38] [Rank 0] step:8381/10000 train_time:678837ms step_avg:81.00ms +[2025-07-07 03:44:38] [Rank 0] step:8381/10000 train_time:678837ms step_avg:81.00ms +[2025-07-07 03:44:39] [Rank 0] step:8401/10000 train_time:680334ms step_avg:80.98ms +[2025-07-07 03:44:39] [Rank 0] step:8401/10000 train_time:680334ms step_avg:80.98ms +[2025-07-07 03:44:41] [Rank 0] step:8421/10000 train_time:681834ms step_avg:80.97ms +[2025-07-07 03:44:41] [Rank 0] step:8421/10000 train_time:681834ms step_avg:80.97ms +[2025-07-07 03:44:42] [Rank 0] step:8441/10000 train_time:683333ms step_avg:80.95ms +[2025-07-07 03:44:42] [Rank 0] step:8441/10000 train_time:683333ms step_avg:80.95ms +[2025-07-07 03:44:44] [Rank 0] step:8461/10000 train_time:684885ms step_avg:80.95ms +[2025-07-07 03:44:44] [Rank 0] step:8461/10000 train_time:684885ms step_avg:80.95ms +[2025-07-07 03:44:45] [Rank 0] step:8481/10000 train_time:686571ms step_avg:80.95ms +[2025-07-07 03:44:45] [Rank 0] step:8481/10000 train_time:686571ms step_avg:80.95ms +[2025-07-07 03:44:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:44:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:44:48] [Rank 0] PRINT: step:8500/10000 train_loss:1.1889 val_loss:1.1820 train_time:688071ms step_avg:80.95ms +[2025-07-07 03:44:48] [Rank 0] PRINT: step:8500/10000 train_loss:1.1889 val_loss:1.1820 train_time:688071ms step_avg:80.95ms +[2025-07-07 03:44:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:44:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:44:48] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:44:48] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:44:48] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:44:48] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:50:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:50:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:50:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:50:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:50:13] [Rank 0] Total Loss: 4.7498 +[2025-07-07 03:50:13] [Rank 0] Total Loss: 4.7498 +[2025-07-07 03:50:13] [Rank 0] Total FTA: 0.3815 +[2025-07-07 03:50:13] [Rank 0] Total FTA: 0.3815 +[2025-07-07 03:50:13] [Rank 0] Group 0 Loss: 4.9205 +[2025-07-07 03:50:13] [Rank 0] Group 0 Loss: 4.9205 +[2025-07-07 03:50:13] [Rank 0] Group 1 Loss: 4.4886 +[2025-07-07 03:50:13] [Rank 0] Group 1 Loss: 4.4886 +[2025-07-07 03:50:13] [Rank 0] Group 2 Loss: 4.7100 +[2025-07-07 03:50:13] [Rank 0] Group 2 Loss: 4.7100 +[2025-07-07 03:50:13] [Rank 0] Group 3 Loss: 4.8282 +[2025-07-07 03:50:13] [Rank 0] Group 3 Loss: 4.8282 +[2025-07-07 03:50:13] [Rank 0] Group 4 Loss: 4.7260 +[2025-07-07 03:50:13] [Rank 0] Group 4 Loss: 4.7260 +[2025-07-07 03:50:14] [Rank 0] Group 5 Loss: 4.6739 +[2025-07-07 03:50:14] [Rank 0] Group 5 Loss: 4.6739 +[2025-07-07 03:50:14] [Rank 0] Group 6 Loss: 4.6481 +[2025-07-07 03:50:14] [Rank 0] Group 6 Loss: 4.6481 +[2025-07-07 03:50:14] [Rank 0] Group 7 Loss: 4.7725 +[2025-07-07 03:50:14] [Rank 0] Group 7 Loss: 4.7725 +[2025-07-07 03:50:14] [Rank 0] Group 8 Loss: 4.8067 +[2025-07-07 03:50:14] [Rank 0] Group 8 Loss: 4.8067 +[2025-07-07 03:50:14] [Rank 0] Group 9 Loss: 4.8329 +[2025-07-07 03:50:14] [Rank 0] Group 9 Loss: 4.8329 +[2025-07-07 03:50:14] [Rank 0] Group 10 Loss: 4.7502 +[2025-07-07 03:50:14] [Rank 0] Group 10 Loss: 4.7502 +[2025-07-07 03:50:14] [Rank 0] Group 11 Loss: 4.7297 +[2025-07-07 03:50:14] [Rank 0] Group 11 Loss: 4.7297 +[2025-07-07 03:50:14] [Rank 0] Group 0 FTA: 0.5085 +[2025-07-07 03:50:14] [Rank 0] Group 0 FTA: 0.5085 +[2025-07-07 03:50:14] [Rank 0] Group 1 FTA: 0.4661 +[2025-07-07 03:50:14] [Rank 0] Group 1 FTA: 0.4661 +[2025-07-07 03:50:14] [Rank 0] Group 2 FTA: 0.4167 +[2025-07-07 03:50:14] [Rank 0] Group 2 FTA: 0.4167 +[2025-07-07 03:50:14] [Rank 0] Group 3 FTA: 0.2214 +[2025-07-07 03:50:14] [Rank 0] Group 3 FTA: 0.2214 +[2025-07-07 03:50:14] [Rank 0] Group 4 FTA: 0.3125 +[2025-07-07 03:50:14] [Rank 0] Group 4 FTA: 0.3125 +[2025-07-07 03:50:14] [Rank 0] Group 5 FTA: 0.3151 +[2025-07-07 03:50:14] [Rank 0] Group 5 FTA: 0.3151 +[2025-07-07 03:50:14] [Rank 0] Group 6 FTA: 0.4141 +[2025-07-07 03:50:14] [Rank 0] Group 6 FTA: 0.4141 +[2025-07-07 03:50:14] [Rank 0] Group 7 FTA: 0.3307 +[2025-07-07 03:50:14] [Rank 0] Group 7 FTA: 0.3307 +[2025-07-07 03:50:14] [Rank 0] Group 8 FTA: 0.3646 +[2025-07-07 03:50:14] [Rank 0] Group 8 FTA: 0.3646 +[2025-07-07 03:50:14] [Rank 0] Group 9 FTA: 0.3594 +[2025-07-07 03:50:14] [Rank 0] Group 9 FTA: 0.3594 +[2025-07-07 03:50:14] [Rank 0] Group 10 FTA: 0.3574 +[2025-07-07 03:50:14] [Rank 0] Group 10 FTA: 0.3574 +[2025-07-07 03:50:14] [Rank 0] Group 11 FTA: 0.3828 +[2025-07-07 03:50:14] [Rank 0] Group 11 FTA: 0.3828 +[2025-07-07 03:50:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 03:50:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 03:50:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 03:50:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 03:50:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 03:50:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 03:50:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 03:50:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 03:50:15] [Rank 0] step:8501/10000 train_time:688092ms step_avg:80.94ms +[2025-07-07 03:50:15] [Rank 0] step:8501/10000 train_time:688092ms step_avg:80.94ms +[2025-07-07 03:50:17] [Rank 0] step:8521/10000 train_time:689612ms step_avg:80.93ms +[2025-07-07 03:50:17] [Rank 0] step:8521/10000 train_time:689612ms step_avg:80.93ms +[2025-07-07 03:50:18] [Rank 0] step:8541/10000 train_time:691104ms step_avg:80.92ms +[2025-07-07 03:50:18] [Rank 0] step:8541/10000 train_time:691104ms step_avg:80.92ms +[2025-07-07 03:50:20] [Rank 0] step:8561/10000 train_time:693253ms step_avg:80.98ms +[2025-07-07 03:50:20] [Rank 0] step:8561/10000 train_time:693253ms step_avg:80.98ms +[2025-07-07 03:50:22] [Rank 0] step:8581/10000 train_time:694745ms step_avg:80.96ms +[2025-07-07 03:50:22] [Rank 0] step:8581/10000 train_time:694745ms step_avg:80.96ms +[2025-07-07 03:50:23] [Rank 0] step:8601/10000 train_time:696239ms step_avg:80.95ms +[2025-07-07 03:50:23] [Rank 0] step:8601/10000 train_time:696239ms step_avg:80.95ms +[2025-07-07 03:50:25] [Rank 0] step:8621/10000 train_time:697734ms step_avg:80.93ms +[2025-07-07 03:50:25] [Rank 0] step:8621/10000 train_time:697734ms step_avg:80.93ms +[2025-07-07 03:50:26] [Rank 0] step:8641/10000 train_time:699231ms step_avg:80.92ms +[2025-07-07 03:50:26] [Rank 0] step:8641/10000 train_time:699231ms step_avg:80.92ms +[2025-07-07 03:50:28] [Rank 0] step:8661/10000 train_time:700962ms step_avg:80.93ms +[2025-07-07 03:50:28] [Rank 0] step:8661/10000 train_time:700962ms step_avg:80.93ms +[2025-07-07 03:50:29] [Rank 0] step:8681/10000 train_time:702455ms step_avg:80.92ms +[2025-07-07 03:50:29] [Rank 0] step:8681/10000 train_time:702455ms step_avg:80.92ms +[2025-07-07 03:50:31] [Rank 0] step:8701/10000 train_time:703952ms step_avg:80.90ms +[2025-07-07 03:50:31] [Rank 0] step:8701/10000 train_time:703952ms step_avg:80.90ms +[2025-07-07 03:50:32] [Rank 0] step:8721/10000 train_time:705451ms step_avg:80.89ms +[2025-07-07 03:50:32] [Rank 0] step:8721/10000 train_time:705451ms step_avg:80.89ms +[2025-07-07 03:50:35] [Rank 0] step:8741/10000 train_time:707618ms step_avg:80.95ms +[2025-07-07 03:50:35] [Rank 0] step:8741/10000 train_time:707618ms step_avg:80.95ms +[2025-07-07 03:50:36] [Rank 0] step:8761/10000 train_time:709116ms step_avg:80.94ms +[2025-07-07 03:50:36] [Rank 0] step:8761/10000 train_time:709116ms step_avg:80.94ms +[2025-07-07 03:50:38] [Rank 0] step:8781/10000 train_time:710617ms step_avg:80.93ms +[2025-07-07 03:50:38] [Rank 0] step:8781/10000 train_time:710617ms step_avg:80.93ms +[2025-07-07 03:50:39] [Rank 0] step:8801/10000 train_time:712113ms step_avg:80.91ms +[2025-07-07 03:50:39] [Rank 0] step:8801/10000 train_time:712113ms step_avg:80.91ms +[2025-07-07 03:50:41] [Rank 0] step:8821/10000 train_time:713611ms step_avg:80.90ms +[2025-07-07 03:50:41] [Rank 0] step:8821/10000 train_time:713611ms step_avg:80.90ms +[2025-07-07 03:50:43] [Rank 0] step:8841/10000 train_time:715761ms step_avg:80.96ms +[2025-07-07 03:50:43] [Rank 0] step:8841/10000 train_time:715761ms step_avg:80.96ms +[2025-07-07 03:50:44] [Rank 0] step:8861/10000 train_time:717258ms step_avg:80.95ms +[2025-07-07 03:50:44] [Rank 0] step:8861/10000 train_time:717258ms step_avg:80.95ms +[2025-07-07 03:50:46] [Rank 0] step:8881/10000 train_time:718756ms step_avg:80.93ms +[2025-07-07 03:50:46] [Rank 0] step:8881/10000 train_time:718756ms step_avg:80.93ms +[2025-07-07 03:50:47] [Rank 0] step:8901/10000 train_time:720254ms step_avg:80.92ms +[2025-07-07 03:50:47] [Rank 0] step:8901/10000 train_time:720254ms step_avg:80.92ms +[2025-07-07 03:50:49] [Rank 0] step:8921/10000 train_time:721987ms step_avg:80.93ms +[2025-07-07 03:50:49] [Rank 0] step:8921/10000 train_time:721987ms step_avg:80.93ms +[2025-07-07 03:50:50] [Rank 0] step:8941/10000 train_time:723486ms step_avg:80.92ms +[2025-07-07 03:50:50] [Rank 0] step:8941/10000 train_time:723486ms step_avg:80.92ms +[2025-07-07 03:50:52] [Rank 0] step:8961/10000 train_time:724986ms step_avg:80.90ms +[2025-07-07 03:50:52] [Rank 0] step:8961/10000 train_time:724986ms step_avg:80.90ms +[2025-07-07 03:50:53] [Rank 0] step:8981/10000 train_time:726486ms step_avg:80.89ms +[2025-07-07 03:50:53] [Rank 0] step:8981/10000 train_time:726486ms step_avg:80.89ms +[2025-07-07 03:50:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:50:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:50:56] [Rank 0] PRINT: step:9000/10000 train_loss:1.1755 val_loss:1.1696 train_time:727987ms step_avg:80.89ms +[2025-07-07 03:50:56] [Rank 0] PRINT: step:9000/10000 train_loss:1.1755 val_loss:1.1696 train_time:727987ms step_avg:80.89ms +[2025-07-07 03:50:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:50:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:50:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:50:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:50:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:50:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:56:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:56:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 03:56:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:56:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 03:56:19] [Rank 0] Total Loss: 4.7360 +[2025-07-07 03:56:19] [Rank 0] Total Loss: 4.7360 +[2025-07-07 03:56:19] [Rank 0] Total FTA: 0.4158 +[2025-07-07 03:56:19] [Rank 0] Total FTA: 0.4158 +[2025-07-07 03:56:19] [Rank 0] Group 0 Loss: 4.7162 +[2025-07-07 03:56:19] [Rank 0] Group 0 Loss: 4.7162 +[2025-07-07 03:56:19] [Rank 0] Group 1 Loss: 4.5374 +[2025-07-07 03:56:19] [Rank 0] Group 1 Loss: 4.5374 +[2025-07-07 03:56:19] [Rank 0] Group 2 Loss: 4.6915 +[2025-07-07 03:56:19] [Rank 0] Group 2 Loss: 4.6915 +[2025-07-07 03:56:19] [Rank 0] Group 3 Loss: 4.8362 +[2025-07-07 03:56:19] [Rank 0] Group 3 Loss: 4.8362 +[2025-07-07 03:56:19] [Rank 0] Group 4 Loss: 4.7700 +[2025-07-07 03:56:19] [Rank 0] Group 4 Loss: 4.7700 +[2025-07-07 03:56:19] [Rank 0] Group 5 Loss: 4.7071 +[2025-07-07 03:56:19] [Rank 0] Group 5 Loss: 4.7071 +[2025-07-07 03:56:19] [Rank 0] Group 6 Loss: 4.6692 +[2025-07-07 03:56:19] [Rank 0] Group 6 Loss: 4.6692 +[2025-07-07 03:56:19] [Rank 0] Group 7 Loss: 4.8132 +[2025-07-07 03:56:19] [Rank 0] Group 7 Loss: 4.8132 +[2025-07-07 03:56:19] [Rank 0] Group 8 Loss: 4.7848 +[2025-07-07 03:56:19] [Rank 0] Group 8 Loss: 4.7848 +[2025-07-07 03:56:19] [Rank 0] Group 9 Loss: 4.7229 +[2025-07-07 03:56:19] [Rank 0] Group 9 Loss: 4.7229 +[2025-07-07 03:56:19] [Rank 0] Group 10 Loss: 4.7567 +[2025-07-07 03:56:19] [Rank 0] Group 10 Loss: 4.7567 +[2025-07-07 03:56:19] [Rank 0] Group 11 Loss: 4.7731 +[2025-07-07 03:56:19] [Rank 0] Group 11 Loss: 4.7731 +[2025-07-07 03:56:19] [Rank 0] Group 0 FTA: 0.5020 +[2025-07-07 03:56:19] [Rank 0] Group 0 FTA: 0.5020 +[2025-07-07 03:56:19] [Rank 0] Group 1 FTA: 0.5417 +[2025-07-07 03:56:19] [Rank 0] Group 1 FTA: 0.5417 +[2025-07-07 03:56:19] [Rank 0] Group 2 FTA: 0.5417 +[2025-07-07 03:56:19] [Rank 0] Group 2 FTA: 0.5417 +[2025-07-07 03:56:19] [Rank 0] Group 3 FTA: 0.2578 +[2025-07-07 03:56:19] [Rank 0] Group 3 FTA: 0.2578 +[2025-07-07 03:56:19] [Rank 0] Group 4 FTA: 0.2604 +[2025-07-07 03:56:19] [Rank 0] Group 4 FTA: 0.2604 +[2025-07-07 03:56:19] [Rank 0] Group 5 FTA: 0.3411 +[2025-07-07 03:56:19] [Rank 0] Group 5 FTA: 0.3411 +[2025-07-07 03:56:19] [Rank 0] Group 6 FTA: 0.3958 +[2025-07-07 03:56:19] [Rank 0] Group 6 FTA: 0.3958 +[2025-07-07 03:56:19] [Rank 0] Group 7 FTA: 0.4036 +[2025-07-07 03:56:19] [Rank 0] Group 7 FTA: 0.4036 +[2025-07-07 03:56:19] [Rank 0] Group 8 FTA: 0.4193 +[2025-07-07 03:56:19] [Rank 0] Group 8 FTA: 0.4193 +[2025-07-07 03:56:19] [Rank 0] Group 9 FTA: 0.3984 +[2025-07-07 03:56:19] [Rank 0] Group 9 FTA: 0.3984 +[2025-07-07 03:56:19] [Rank 0] Group 10 FTA: 0.4199 +[2025-07-07 03:56:19] [Rank 0] Group 10 FTA: 0.4199 +[2025-07-07 03:56:19] [Rank 0] Group 11 FTA: 0.4150 +[2025-07-07 03:56:19] [Rank 0] Group 11 FTA: 0.4150 +[2025-07-07 03:56:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 03:56:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 03:56:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 03:56:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 03:56:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 03:56:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 03:56:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 03:56:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 03:56:23] [Rank 0] step:9001/10000 train_time:728117ms step_avg:80.89ms +[2025-07-07 03:56:23] [Rank 0] step:9001/10000 train_time:728117ms step_avg:80.89ms +[2025-07-07 03:56:24] [Rank 0] step:9021/10000 train_time:730048ms step_avg:80.93ms +[2025-07-07 03:56:24] [Rank 0] step:9021/10000 train_time:730048ms step_avg:80.93ms +[2025-07-07 03:56:26] [Rank 0] step:9041/10000 train_time:731604ms step_avg:80.92ms +[2025-07-07 03:56:26] [Rank 0] step:9041/10000 train_time:731604ms step_avg:80.92ms +[2025-07-07 03:56:27] [Rank 0] step:9061/10000 train_time:733096ms step_avg:80.91ms +[2025-07-07 03:56:27] [Rank 0] step:9061/10000 train_time:733096ms step_avg:80.91ms +[2025-07-07 03:56:29] [Rank 0] step:9081/10000 train_time:734591ms step_avg:80.89ms +[2025-07-07 03:56:29] [Rank 0] step:9081/10000 train_time:734591ms step_avg:80.89ms +[2025-07-07 03:56:31] [Rank 0] step:9101/10000 train_time:736736ms step_avg:80.95ms +[2025-07-07 03:56:31] [Rank 0] step:9101/10000 train_time:736736ms step_avg:80.95ms +[2025-07-07 03:56:32] [Rank 0] step:9121/10000 train_time:738229ms step_avg:80.94ms +[2025-07-07 03:56:32] [Rank 0] step:9121/10000 train_time:738229ms step_avg:80.94ms +[2025-07-07 03:56:34] [Rank 0] step:9141/10000 train_time:739725ms step_avg:80.92ms +[2025-07-07 03:56:34] [Rank 0] step:9141/10000 train_time:739725ms step_avg:80.92ms +[2025-07-07 03:56:35] [Rank 0] step:9161/10000 train_time:741221ms step_avg:80.91ms +[2025-07-07 03:56:35] [Rank 0] step:9161/10000 train_time:741221ms step_avg:80.91ms +[2025-07-07 03:56:37] [Rank 0] step:9181/10000 train_time:742770ms step_avg:80.90ms +[2025-07-07 03:56:37] [Rank 0] step:9181/10000 train_time:742770ms step_avg:80.90ms +[2025-07-07 03:56:39] [Rank 0] step:9201/10000 train_time:744450ms step_avg:80.91ms +[2025-07-07 03:56:39] [Rank 0] step:9201/10000 train_time:744450ms step_avg:80.91ms +[2025-07-07 03:56:40] [Rank 0] step:9221/10000 train_time:745948ms step_avg:80.90ms +[2025-07-07 03:56:40] [Rank 0] step:9221/10000 train_time:745948ms step_avg:80.90ms +[2025-07-07 03:56:42] [Rank 0] step:9241/10000 train_time:747448ms step_avg:80.88ms +[2025-07-07 03:56:42] [Rank 0] step:9241/10000 train_time:747448ms step_avg:80.88ms +[2025-07-07 03:56:43] [Rank 0] step:9261/10000 train_time:748948ms step_avg:80.87ms +[2025-07-07 03:56:43] [Rank 0] step:9261/10000 train_time:748948ms step_avg:80.87ms +[2025-07-07 03:56:45] [Rank 0] step:9281/10000 train_time:751102ms step_avg:80.93ms +[2025-07-07 03:56:45] [Rank 0] step:9281/10000 train_time:751102ms step_avg:80.93ms +[2025-07-07 03:56:47] [Rank 0] step:9301/10000 train_time:752600ms step_avg:80.92ms +[2025-07-07 03:56:47] [Rank 0] step:9301/10000 train_time:752600ms step_avg:80.92ms +[2025-07-07 03:56:48] [Rank 0] step:9321/10000 train_time:754102ms step_avg:80.90ms +[2025-07-07 03:56:48] [Rank 0] step:9321/10000 train_time:754102ms step_avg:80.90ms +[2025-07-07 03:56:50] [Rank 0] step:9341/10000 train_time:755604ms step_avg:80.89ms +[2025-07-07 03:56:50] [Rank 0] step:9341/10000 train_time:755604ms step_avg:80.89ms +[2025-07-07 03:56:52] [Rank 0] step:9361/10000 train_time:757366ms step_avg:80.91ms +[2025-07-07 03:56:52] [Rank 0] step:9361/10000 train_time:757366ms step_avg:80.91ms +[2025-07-07 03:56:53] [Rank 0] step:9381/10000 train_time:759250ms step_avg:80.93ms +[2025-07-07 03:56:53] [Rank 0] step:9381/10000 train_time:759250ms step_avg:80.93ms +[2025-07-07 03:56:55] [Rank 0] step:9401/10000 train_time:760750ms step_avg:80.92ms +[2025-07-07 03:56:55] [Rank 0] step:9401/10000 train_time:760750ms step_avg:80.92ms +[2025-07-07 03:56:56] [Rank 0] step:9421/10000 train_time:762252ms step_avg:80.91ms +[2025-07-07 03:56:56] [Rank 0] step:9421/10000 train_time:762252ms step_avg:80.91ms +[2025-07-07 03:56:58] [Rank 0] step:9441/10000 train_time:763753ms step_avg:80.90ms +[2025-07-07 03:56:58] [Rank 0] step:9441/10000 train_time:763753ms step_avg:80.90ms +[2025-07-07 03:57:00] [Rank 0] step:9461/10000 train_time:765919ms step_avg:80.96ms +[2025-07-07 03:57:00] [Rank 0] step:9461/10000 train_time:765919ms step_avg:80.96ms +[2025-07-07 03:57:02] [Rank 0] step:9481/10000 train_time:767423ms step_avg:80.94ms +[2025-07-07 03:57:02] [Rank 0] step:9481/10000 train_time:767423ms step_avg:80.94ms +[2025-07-07 03:57:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:57:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 03:57:04] [Rank 0] PRINT: step:9500/10000 train_loss:1.1641 val_loss:1.1608 train_time:768926ms step_avg:80.94ms +[2025-07-07 03:57:04] [Rank 0] PRINT: step:9500/10000 train_loss:1.1641 val_loss:1.1608 train_time:768926ms step_avg:80.94ms +[2025-07-07 03:57:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:57:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 03:57:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:57:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 03:57:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 03:57:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:02:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:02:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:02:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:02:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:02:25] [Rank 0] Total Loss: 4.7802 +[2025-07-07 04:02:25] [Rank 0] Total Loss: 4.7802 +[2025-07-07 04:02:25] [Rank 0] Total FTA: 0.4078 +[2025-07-07 04:02:25] [Rank 0] Total FTA: 0.4078 +[2025-07-07 04:02:25] [Rank 0] Group 0 Loss: 4.7465 +[2025-07-07 04:02:25] [Rank 0] Group 0 Loss: 4.7465 +[2025-07-07 04:02:25] [Rank 0] Group 1 Loss: 4.4747 +[2025-07-07 04:02:25] [Rank 0] Group 1 Loss: 4.4747 +[2025-07-07 04:02:25] [Rank 0] Group 2 Loss: 4.5349 +[2025-07-07 04:02:25] [Rank 0] Group 2 Loss: 4.5349 +[2025-07-07 04:02:25] [Rank 0] Group 3 Loss: 5.0212 +[2025-07-07 04:02:25] [Rank 0] Group 3 Loss: 5.0212 +[2025-07-07 04:02:25] [Rank 0] Group 4 Loss: 4.7430 +[2025-07-07 04:02:25] [Rank 0] Group 4 Loss: 4.7430 +[2025-07-07 04:02:25] [Rank 0] Group 5 Loss: 4.7522 +[2025-07-07 04:02:25] [Rank 0] Group 5 Loss: 4.7522 +[2025-07-07 04:02:25] [Rank 0] Group 6 Loss: 4.7001 +[2025-07-07 04:02:25] [Rank 0] Group 6 Loss: 4.7001 +[2025-07-07 04:02:25] [Rank 0] Group 7 Loss: 4.9186 +[2025-07-07 04:02:25] [Rank 0] Group 7 Loss: 4.9186 +[2025-07-07 04:02:25] [Rank 0] Group 8 Loss: 4.8306 +[2025-07-07 04:02:25] [Rank 0] Group 8 Loss: 4.8306 +[2025-07-07 04:02:25] [Rank 0] Group 9 Loss: 4.8622 +[2025-07-07 04:02:25] [Rank 0] Group 9 Loss: 4.8622 +[2025-07-07 04:02:25] [Rank 0] Group 10 Loss: 4.8500 +[2025-07-07 04:02:25] [Rank 0] Group 10 Loss: 4.8500 +[2025-07-07 04:02:25] [Rank 0] Group 11 Loss: 4.8497 +[2025-07-07 04:02:25] [Rank 0] Group 11 Loss: 4.8497 +[2025-07-07 04:02:25] [Rank 0] Group 0 FTA: 0.4954 +[2025-07-07 04:02:25] [Rank 0] Group 0 FTA: 0.4954 +[2025-07-07 04:02:25] [Rank 0] Group 1 FTA: 0.4531 +[2025-07-07 04:02:25] [Rank 0] Group 1 FTA: 0.4531 +[2025-07-07 04:02:25] [Rank 0] Group 2 FTA: 0.4089 +[2025-07-07 04:02:25] [Rank 0] Group 2 FTA: 0.4089 +[2025-07-07 04:02:25] [Rank 0] Group 3 FTA: 0.2448 +[2025-07-07 04:02:25] [Rank 0] Group 3 FTA: 0.2448 +[2025-07-07 04:02:25] [Rank 0] Group 4 FTA: 0.2682 +[2025-07-07 04:02:25] [Rank 0] Group 4 FTA: 0.2682 +[2025-07-07 04:02:25] [Rank 0] Group 5 FTA: 0.3932 +[2025-07-07 04:02:25] [Rank 0] Group 5 FTA: 0.3932 +[2025-07-07 04:02:25] [Rank 0] Group 6 FTA: 0.4427 +[2025-07-07 04:02:25] [Rank 0] Group 6 FTA: 0.4427 +[2025-07-07 04:02:25] [Rank 0] Group 7 FTA: 0.4349 +[2025-07-07 04:02:25] [Rank 0] Group 7 FTA: 0.4349 +[2025-07-07 04:02:25] [Rank 0] Group 8 FTA: 0.4297 +[2025-07-07 04:02:25] [Rank 0] Group 8 FTA: 0.4297 +[2025-07-07 04:02:25] [Rank 0] Group 9 FTA: 0.4805 +[2025-07-07 04:02:25] [Rank 0] Group 9 FTA: 0.4805 +[2025-07-07 04:02:25] [Rank 0] Group 10 FTA: 0.4238 +[2025-07-07 04:02:25] [Rank 0] Group 10 FTA: 0.4238 +[2025-07-07 04:02:25] [Rank 0] Group 11 FTA: 0.3857 +[2025-07-07 04:02:25] [Rank 0] Group 11 FTA: 0.3857 +[2025-07-07 04:02:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 04:02:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 04:02:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 04:02:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 04:02:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 04:02:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 04:02:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 04:02:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 04:02:26] [Rank 0] step:9501/10000 train_time:768948ms step_avg:80.93ms +[2025-07-07 04:02:26] [Rank 0] step:9501/10000 train_time:768948ms step_avg:80.93ms +[2025-07-07 04:02:28] [Rank 0] step:9521/10000 train_time:770444ms step_avg:80.92ms +[2025-07-07 04:02:28] [Rank 0] step:9521/10000 train_time:770444ms step_avg:80.92ms +[2025-07-07 04:02:30] [Rank 0] step:9541/10000 train_time:772611ms step_avg:80.98ms +[2025-07-07 04:02:30] [Rank 0] step:9541/10000 train_time:772611ms step_avg:80.98ms +[2025-07-07 04:02:31] [Rank 0] step:9561/10000 train_time:774085ms step_avg:80.96ms +[2025-07-07 04:02:31] [Rank 0] step:9561/10000 train_time:774085ms step_avg:80.96ms +[2025-07-07 04:02:33] [Rank 0] step:9581/10000 train_time:775577ms step_avg:80.95ms +[2025-07-07 04:02:33] [Rank 0] step:9581/10000 train_time:775577ms step_avg:80.95ms +[2025-07-07 04:02:34] [Rank 0] step:9601/10000 train_time:777076ms step_avg:80.94ms +[2025-07-07 04:02:34] [Rank 0] step:9601/10000 train_time:777076ms step_avg:80.94ms +[2025-07-07 04:02:36] [Rank 0] step:9621/10000 train_time:778569ms step_avg:80.92ms +[2025-07-07 04:02:36] [Rank 0] step:9621/10000 train_time:778569ms step_avg:80.92ms +[2025-07-07 04:02:38] [Rank 0] step:9641/10000 train_time:780728ms step_avg:80.98ms +[2025-07-07 04:02:38] [Rank 0] step:9641/10000 train_time:780728ms step_avg:80.98ms +[2025-07-07 04:02:40] [Rank 0] step:9661/10000 train_time:782224ms step_avg:80.97ms +[2025-07-07 04:02:40] [Rank 0] step:9661/10000 train_time:782224ms step_avg:80.97ms +[2025-07-07 04:02:41] [Rank 0] step:9681/10000 train_time:783721ms step_avg:80.95ms +[2025-07-07 04:02:41] [Rank 0] step:9681/10000 train_time:783721ms step_avg:80.95ms +[2025-07-07 04:02:43] [Rank 0] step:9701/10000 train_time:785374ms step_avg:80.96ms +[2025-07-07 04:02:43] [Rank 0] step:9701/10000 train_time:785374ms step_avg:80.96ms +[2025-07-07 04:02:45] [Rank 0] step:9721/10000 train_time:787588ms step_avg:81.02ms +[2025-07-07 04:02:45] [Rank 0] step:9721/10000 train_time:787588ms step_avg:81.02ms +[2025-07-07 04:02:46] [Rank 0] step:9741/10000 train_time:789063ms step_avg:81.00ms +[2025-07-07 04:02:46] [Rank 0] step:9741/10000 train_time:789063ms step_avg:81.00ms +[2025-07-07 04:02:48] [Rank 0] step:9761/10000 train_time:790562ms step_avg:80.99ms +[2025-07-07 04:02:48] [Rank 0] step:9761/10000 train_time:790562ms step_avg:80.99ms +[2025-07-07 04:02:49] [Rank 0] step:9781/10000 train_time:792061ms step_avg:80.98ms +[2025-07-07 04:02:49] [Rank 0] step:9781/10000 train_time:792061ms step_avg:80.98ms +[2025-07-07 04:02:51] [Rank 0] step:9801/10000 train_time:793562ms step_avg:80.97ms +[2025-07-07 04:02:51] [Rank 0] step:9801/10000 train_time:793562ms step_avg:80.97ms +[2025-07-07 04:02:53] [Rank 0] step:9821/10000 train_time:795707ms step_avg:81.02ms +[2025-07-07 04:02:53] [Rank 0] step:9821/10000 train_time:795707ms step_avg:81.02ms +[2025-07-07 04:02:55] [Rank 0] step:9841/10000 train_time:797205ms step_avg:81.01ms +[2025-07-07 04:02:55] [Rank 0] step:9841/10000 train_time:797205ms step_avg:81.01ms +[2025-07-07 04:02:56] [Rank 0] step:9861/10000 train_time:798707ms step_avg:81.00ms +[2025-07-07 04:02:56] [Rank 0] step:9861/10000 train_time:798707ms step_avg:81.00ms +[2025-07-07 04:02:58] [Rank 0] step:9881/10000 train_time:800211ms step_avg:80.98ms +[2025-07-07 04:02:58] [Rank 0] step:9881/10000 train_time:800211ms step_avg:80.98ms +[2025-07-07 04:03:00] [Rank 0] step:9901/10000 train_time:801716ms step_avg:80.97ms +[2025-07-07 04:03:00] [Rank 0] step:9901/10000 train_time:801716ms step_avg:80.97ms +[2025-07-07 04:03:01] [Rank 0] step:9921/10000 train_time:803886ms step_avg:81.03ms +[2025-07-07 04:03:01] [Rank 0] step:9921/10000 train_time:803886ms step_avg:81.03ms +[2025-07-07 04:03:03] [Rank 0] step:9941/10000 train_time:805389ms step_avg:81.02ms +[2025-07-07 04:03:03] [Rank 0] step:9941/10000 train_time:805389ms step_avg:81.02ms +[2025-07-07 04:03:04] [Rank 0] step:9961/10000 train_time:806894ms step_avg:81.01ms +[2025-07-07 04:03:04] [Rank 0] step:9961/10000 train_time:806894ms step_avg:81.01ms +[2025-07-07 04:03:06] [Rank 0] step:9981/10000 train_time:808397ms step_avg:80.99ms +[2025-07-07 04:03:06] [Rank 0] step:9981/10000 train_time:808397ms step_avg:80.99ms +[2025-07-07 04:03:08] [Rank 0] step:10000/10000 train_time:810481ms step_avg:81.05ms +[2025-07-07 04:03:08] [Rank 0] step:10000/10000 train_time:810481ms step_avg:81.05ms +[2025-07-07 04:03:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:03:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:03:09] [Rank 0] PRINT: step:10000/10000 train_loss:1.1564 val_loss:1.1546 train_time:810560ms step_avg:81.06ms +[2025-07-07 04:03:09] [Rank 0] PRINT: step:10000/10000 train_loss:1.1564 val_loss:1.1546 train_time:810560ms step_avg:81.06ms +[2025-07-07 04:03:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:03:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:03:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:03:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:03:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:03:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:08:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:08:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:08:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:08:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:08:31] [Rank 0] Total Loss: 4.7932 +[2025-07-07 04:08:31] [Rank 0] Total Loss: 4.7932 +[2025-07-07 04:08:31] [Rank 0] Total FTA: 0.4182 +[2025-07-07 04:08:31] [Rank 0] Total FTA: 0.4182 +[2025-07-07 04:08:31] [Rank 0] Group 0 Loss: 4.7955 +[2025-07-07 04:08:31] [Rank 0] Group 0 Loss: 4.7955 +[2025-07-07 04:08:31] [Rank 0] Group 1 Loss: 4.4599 +[2025-07-07 04:08:31] [Rank 0] Group 1 Loss: 4.4599 +[2025-07-07 04:08:31] [Rank 0] Group 2 Loss: 4.6128 +[2025-07-07 04:08:31] [Rank 0] Group 2 Loss: 4.6128 +[2025-07-07 04:08:31] [Rank 0] Group 3 Loss: 4.9915 +[2025-07-07 04:08:31] [Rank 0] Group 3 Loss: 4.9915 +[2025-07-07 04:08:31] [Rank 0] Group 4 Loss: 4.8297 +[2025-07-07 04:08:31] [Rank 0] Group 4 Loss: 4.8297 +[2025-07-07 04:08:31] [Rank 0] Group 5 Loss: 4.7768 +[2025-07-07 04:08:31] [Rank 0] Group 5 Loss: 4.7768 +[2025-07-07 04:08:31] [Rank 0] Group 6 Loss: 4.7276 +[2025-07-07 04:08:31] [Rank 0] Group 6 Loss: 4.7276 +[2025-07-07 04:08:31] [Rank 0] Group 7 Loss: 4.8316 +[2025-07-07 04:08:31] [Rank 0] Group 7 Loss: 4.8316 +[2025-07-07 04:08:31] [Rank 0] Group 8 Loss: 4.8074 +[2025-07-07 04:08:31] [Rank 0] Group 8 Loss: 4.8074 +[2025-07-07 04:08:31] [Rank 0] Group 9 Loss: 4.8243 +[2025-07-07 04:08:31] [Rank 0] Group 9 Loss: 4.8243 +[2025-07-07 04:08:31] [Rank 0] Group 10 Loss: 4.8476 +[2025-07-07 04:08:31] [Rank 0] Group 10 Loss: 4.8476 +[2025-07-07 04:08:31] [Rank 0] Group 11 Loss: 4.8722 +[2025-07-07 04:08:31] [Rank 0] Group 11 Loss: 4.8722 +[2025-07-07 04:08:31] [Rank 0] Group 0 FTA: 0.4850 +[2025-07-07 04:08:31] [Rank 0] Group 0 FTA: 0.4850 +[2025-07-07 04:08:31] [Rank 0] Group 1 FTA: 0.5469 +[2025-07-07 04:08:31] [Rank 0] Group 1 FTA: 0.5469 +[2025-07-07 04:08:31] [Rank 0] Group 2 FTA: 0.4531 +[2025-07-07 04:08:31] [Rank 0] Group 2 FTA: 0.4531 +[2025-07-07 04:08:31] [Rank 0] Group 3 FTA: 0.3099 +[2025-07-07 04:08:31] [Rank 0] Group 3 FTA: 0.3099 +[2025-07-07 04:08:31] [Rank 0] Group 4 FTA: 0.2526 +[2025-07-07 04:08:31] [Rank 0] Group 4 FTA: 0.2526 +[2025-07-07 04:08:31] [Rank 0] Group 5 FTA: 0.4062 +[2025-07-07 04:08:31] [Rank 0] Group 5 FTA: 0.4062 +[2025-07-07 04:08:31] [Rank 0] Group 6 FTA: 0.4531 +[2025-07-07 04:08:31] [Rank 0] Group 6 FTA: 0.4531 +[2025-07-07 04:08:31] [Rank 0] Group 7 FTA: 0.4036 +[2025-07-07 04:08:31] [Rank 0] Group 7 FTA: 0.4036 +[2025-07-07 04:08:31] [Rank 0] Group 8 FTA: 0.3542 +[2025-07-07 04:08:31] [Rank 0] Group 8 FTA: 0.3542 +[2025-07-07 04:08:31] [Rank 0] Group 9 FTA: 0.3477 +[2025-07-07 04:08:31] [Rank 0] Group 9 FTA: 0.3477 +[2025-07-07 04:08:31] [Rank 0] Group 10 FTA: 0.4160 +[2025-07-07 04:08:31] [Rank 0] Group 10 FTA: 0.4160 +[2025-07-07 04:08:31] [Rank 0] Group 11 FTA: 0.4492 +[2025-07-07 04:08:31] [Rank 0] Group 11 FTA: 0.4492 +[2025-07-07 04:08:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 04:08:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 04:08:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 04:08:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 04:08:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 04:08:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 04:08:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 04:08:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 04:08:32] [Rank 0] step:10001/10000 train_time:810582ms step_avg:81.05ms +[2025-07-07 04:08:32] [Rank 0] step:10001/10000 train_time:810582ms step_avg:81.05ms +[2025-07-07 04:08:32] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 04:08:32 2025 --- +[2025-07-07 04:08:32] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 04:08:32 2025 --- +[2025-07-07 04:08:32] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 10356 MiB +[2025-07-07 04:08:32] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 10356 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/config.json b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..1e10194972f2f6a57ed7ee8834bb50790c8a72cd --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "8a187f04-7a9c-4299-96b5-969bbb1b8453", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..7d3b957e31db3efb2f3e42ff20546de63af6e16c --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5f632bd3831f4ff08440904afe05ab108c41c0916ba72e31bab5fddfcbc887e +size 481306 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..54af000b0b0cbdf25bc2b2af9f04c9d62fdeab79 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7ccb268a1420e3863bab1cfa05a3bf711832622bdd47838cb50d188743061ff +size 395819 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..071c0b36ee1f53ebaf4d8165b3173f03a94020b2 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a40d3903baf6d2e859d70b0407b4899f152f69935f67559ac208f818966aadbc +size 112011 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..2b20bb94913cbc22b65db296cb4253c9487c773a --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9cd61a21fa57db68f033668f8b6ea5161196d1a6dee45f0123a02a5fc4b4b30 +size 132025 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/training_log_8a187f04-7a9c-4299-96b5-969bbb1b8453.txt b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/training_log_8a187f04-7a9c-4299-96b5-969bbb1b8453.txt new file mode 100644 index 0000000000000000000000000000000000000000..a0e38564ceb5a7f5778cf6cd26d5d0b48d5b0457 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/training_log_8a187f04-7a9c-4299-96b5-969bbb1b8453.txt @@ -0,0 +1,5132 @@ +[2025-07-06 10:48:09] [Rank 0] PRINT: --- Script Start: Sun Jul 6 10:48:09 2025 --- +[2025-07-06 10:48:09] [Rank 0] PRINT: --- Script Start: Sun Jul 6 10:48:09 2025 --- +[2025-07-06 10:48:09] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-06 10:48:09] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-06 10:48:09] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 10:48:09] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 10:48:09] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-06 10:48:09] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-06 10:48:09] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42 +[2025-07-06 10:48:09] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42 +[2025-07-06 10:48:09] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 10:48:09] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 10:48:10] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 10:48:10] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 10:48:10] [Rank 0] PRINT: Constructing model... +[2025-07-06 10:48:10] [Rank 0] PRINT: Constructing model... +[2025-07-06 10:48:12] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 10:48:12] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 10:48:12] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 10:48:12] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 10:48:12] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 10:48:12] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 10:48:13] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 10:48:13] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 10:48:13] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 10:48:13] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 10:48:13] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 10:48:13] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 10:48:13] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 10:48:13] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 10:48:13] [Rank 0] PRINT: Model returns: +[2025-07-06 10:48:13] [Rank 0] PRINT: Model returns: +[2025-07-06 10:48:13] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 10:48:13] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 10:48:13] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 10:48:13] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 10:48:13] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 10:48:13] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 10:48:13] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 10:48:13] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 10:48:13] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 10:48:13] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 10:48:13] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 10:48:13] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 10:48:13] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 10:48:13] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 10:48:13] [Rank 0] PRINT: Starting warmup... +[2025-07-06 10:48:13] [Rank 0] PRINT: Starting warmup... +[2025-07-06 10:49:25] [Rank 0] PRINT: Warmup complete. +[2025-07-06 10:49:25] [Rank 0] PRINT: Warmup complete. +[2025-07-06 10:49:25] [Rank 0] PRINT: Starting training... +[2025-07-06 10:49:25] [Rank 0] PRINT: Starting training... +[2025-07-06 10:49:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 10:49:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 10:49:32] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 10:49:32] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 10:49:34] [Rank 0] step:21/10000 train_time:1752ms step_avg:83.43ms +[2025-07-06 10:49:34] [Rank 0] step:21/10000 train_time:1752ms step_avg:83.43ms +[2025-07-06 10:49:35] [Rank 0] step:41/10000 train_time:3308ms step_avg:80.67ms +[2025-07-06 10:49:35] [Rank 0] step:41/10000 train_time:3308ms step_avg:80.67ms +[2025-07-06 10:49:37] [Rank 0] step:61/10000 train_time:4759ms step_avg:78.02ms +[2025-07-06 10:49:37] [Rank 0] step:61/10000 train_time:4759ms step_avg:78.02ms +[2025-07-06 10:49:38] [Rank 0] step:81/10000 train_time:6212ms step_avg:76.70ms +[2025-07-06 10:49:38] [Rank 0] step:81/10000 train_time:6212ms step_avg:76.70ms +[2025-07-06 10:49:40] [Rank 0] step:101/10000 train_time:8007ms step_avg:79.28ms +[2025-07-06 10:49:40] [Rank 0] step:101/10000 train_time:8007ms step_avg:79.28ms +[2025-07-06 10:49:42] [Rank 0] step:121/10000 train_time:9462ms step_avg:78.20ms +[2025-07-06 10:49:42] [Rank 0] step:121/10000 train_time:9462ms step_avg:78.20ms +[2025-07-06 10:49:43] [Rank 0] step:141/10000 train_time:11018ms step_avg:78.14ms +[2025-07-06 10:49:43] [Rank 0] step:141/10000 train_time:11018ms step_avg:78.14ms +[2025-07-06 10:49:45] [Rank 0] step:161/10000 train_time:12574ms step_avg:78.10ms +[2025-07-06 10:49:45] [Rank 0] step:161/10000 train_time:12574ms step_avg:78.10ms +[2025-07-06 10:49:47] [Rank 0] step:181/10000 train_time:14385ms step_avg:79.48ms +[2025-07-06 10:49:47] [Rank 0] step:181/10000 train_time:14385ms step_avg:79.48ms +[2025-07-06 10:49:48] [Rank 0] step:201/10000 train_time:16334ms step_avg:81.27ms +[2025-07-06 10:49:48] [Rank 0] step:201/10000 train_time:16334ms step_avg:81.27ms +[2025-07-06 10:49:50] [Rank 0] step:221/10000 train_time:17892ms step_avg:80.96ms +[2025-07-06 10:49:50] [Rank 0] step:221/10000 train_time:17892ms step_avg:80.96ms +[2025-07-06 10:49:51] [Rank 0] step:241/10000 train_time:19353ms step_avg:80.30ms +[2025-07-06 10:49:51] [Rank 0] step:241/10000 train_time:19353ms step_avg:80.30ms +[2025-07-06 10:49:53] [Rank 0] step:261/10000 train_time:20914ms step_avg:80.13ms +[2025-07-06 10:49:53] [Rank 0] step:261/10000 train_time:20914ms step_avg:80.13ms +[2025-07-06 10:49:55] [Rank 0] step:281/10000 train_time:23025ms step_avg:81.94ms +[2025-07-06 10:49:55] [Rank 0] step:281/10000 train_time:23025ms step_avg:81.94ms +[2025-07-06 10:49:57] [Rank 0] step:301/10000 train_time:24588ms step_avg:81.69ms +[2025-07-06 10:49:57] [Rank 0] step:301/10000 train_time:24588ms step_avg:81.69ms +[2025-07-06 10:49:58] [Rank 0] step:321/10000 train_time:26052ms step_avg:81.16ms +[2025-07-06 10:49:58] [Rank 0] step:321/10000 train_time:26052ms step_avg:81.16ms +[2025-07-06 10:50:00] [Rank 0] step:341/10000 train_time:27618ms step_avg:80.99ms +[2025-07-06 10:50:00] [Rank 0] step:341/10000 train_time:27618ms step_avg:80.99ms +[2025-07-06 10:50:01] [Rank 0] step:361/10000 train_time:29137ms step_avg:80.71ms +[2025-07-06 10:50:01] [Rank 0] step:361/10000 train_time:29137ms step_avg:80.71ms +[2025-07-06 10:50:03] [Rank 0] step:381/10000 train_time:30688ms step_avg:80.55ms +[2025-07-06 10:50:03] [Rank 0] step:381/10000 train_time:30688ms step_avg:80.55ms +[2025-07-06 10:50:04] [Rank 0] step:401/10000 train_time:32255ms step_avg:80.44ms +[2025-07-06 10:50:04] [Rank 0] step:401/10000 train_time:32255ms step_avg:80.44ms +[2025-07-06 10:50:06] [Rank 0] step:421/10000 train_time:33823ms step_avg:80.34ms +[2025-07-06 10:50:06] [Rank 0] step:421/10000 train_time:33823ms step_avg:80.34ms +[2025-07-06 10:50:07] [Rank 0] step:441/10000 train_time:35292ms step_avg:80.03ms +[2025-07-06 10:50:07] [Rank 0] step:441/10000 train_time:35292ms step_avg:80.03ms +[2025-07-06 10:50:10] [Rank 0] step:461/10000 train_time:37524ms step_avg:81.40ms +[2025-07-06 10:50:10] [Rank 0] step:461/10000 train_time:37524ms step_avg:81.40ms +[2025-07-06 10:50:11] [Rank 0] step:481/10000 train_time:38989ms step_avg:81.06ms +[2025-07-06 10:50:11] [Rank 0] step:481/10000 train_time:38989ms step_avg:81.06ms +[2025-07-06 10:50:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 10:50:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 10:50:13] [Rank 0] PRINT: step:500/10000 train_loss:6.8636 val_loss:4.3853 train_time:40458ms step_avg:80.92ms +[2025-07-06 10:50:13] [Rank 0] PRINT: step:500/10000 train_loss:6.8636 val_loss:4.3853 train_time:40458ms step_avg:80.92ms +[2025-07-06 10:50:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 10:50:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 10:50:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 10:50:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 10:50:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 10:50:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 10:55:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 10:55:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 10:55:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 10:55:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 10:55:31] [Rank 0] Total Loss: 5.5424 +[2025-07-06 10:55:31] [Rank 0] Total Loss: 5.5424 +[2025-07-06 10:55:32] [Rank 0] Total FTA: 0.0701 +[2025-07-06 10:55:32] [Rank 0] Total FTA: 0.0701 +[2025-07-06 10:55:32] [Rank 0] Group 0 Loss: 5.5322 +[2025-07-06 10:55:32] [Rank 0] Group 0 Loss: 5.5322 +[2025-07-06 10:55:32] [Rank 0] Group 1 Loss: 5.6114 +[2025-07-06 10:55:32] [Rank 0] Group 1 Loss: 5.6114 +[2025-07-06 10:55:32] [Rank 0] Group 2 Loss: 5.5677 +[2025-07-06 10:55:32] [Rank 0] Group 2 Loss: 5.5677 +[2025-07-06 10:55:32] [Rank 0] Group 3 Loss: 5.4814 +[2025-07-06 10:55:32] [Rank 0] Group 3 Loss: 5.4814 +[2025-07-06 10:55:32] [Rank 0] Group 4 Loss: 5.5733 +[2025-07-06 10:55:32] [Rank 0] Group 4 Loss: 5.5733 +[2025-07-06 10:55:32] [Rank 0] Group 5 Loss: 5.5178 +[2025-07-06 10:55:32] [Rank 0] Group 5 Loss: 5.5178 +[2025-07-06 10:55:32] [Rank 0] Group 6 Loss: 5.5237 +[2025-07-06 10:55:32] [Rank 0] Group 6 Loss: 5.5237 +[2025-07-06 10:55:32] [Rank 0] Group 7 Loss: 5.5481 +[2025-07-06 10:55:32] [Rank 0] Group 7 Loss: 5.5481 +[2025-07-06 10:55:32] [Rank 0] Group 8 Loss: 5.5224 +[2025-07-06 10:55:32] [Rank 0] Group 8 Loss: 5.5224 +[2025-07-06 10:55:32] [Rank 0] Group 9 Loss: 5.5116 +[2025-07-06 10:55:32] [Rank 0] Group 9 Loss: 5.5116 +[2025-07-06 10:55:32] [Rank 0] Group 10 Loss: 5.5298 +[2025-07-06 10:55:32] [Rank 0] Group 10 Loss: 5.5298 +[2025-07-06 10:55:32] [Rank 0] Group 11 Loss: 5.5614 +[2025-07-06 10:55:32] [Rank 0] Group 11 Loss: 5.5614 +[2025-07-06 10:55:32] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-06 10:55:32] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-06 10:55:32] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 10:55:32] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 10:55:32] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-06 10:55:32] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-06 10:55:32] [Rank 0] Group 3 FTA: 0.0547 +[2025-07-06 10:55:32] [Rank 0] Group 3 FTA: 0.0547 +[2025-07-06 10:55:32] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-06 10:55:32] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-06 10:55:32] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-06 10:55:32] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-06 10:55:32] [Rank 0] Group 6 FTA: 0.0625 +[2025-07-06 10:55:32] [Rank 0] Group 6 FTA: 0.0625 +[2025-07-06 10:55:32] [Rank 0] Group 7 FTA: 0.0781 +[2025-07-06 10:55:32] [Rank 0] Group 7 FTA: 0.0781 +[2025-07-06 10:55:32] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-06 10:55:32] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-06 10:55:32] [Rank 0] Group 9 FTA: 0.0586 +[2025-07-06 10:55:32] [Rank 0] Group 9 FTA: 0.0586 +[2025-07-06 10:55:32] [Rank 0] Group 10 FTA: 0.0645 +[2025-07-06 10:55:32] [Rank 0] Group 10 FTA: 0.0645 +[2025-07-06 10:55:32] [Rank 0] Group 11 FTA: 0.0703 +[2025-07-06 10:55:32] [Rank 0] Group 11 FTA: 0.0703 +[2025-07-06 10:55:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 10:55:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 10:55:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 10:55:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 10:55:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 10:55:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 10:55:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 10:55:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 10:55:33] [Rank 0] step:501/10000 train_time:40478ms step_avg:80.79ms +[2025-07-06 10:55:33] [Rank 0] step:501/10000 train_time:40478ms step_avg:80.79ms +[2025-07-06 10:55:35] [Rank 0] step:521/10000 train_time:41931ms step_avg:80.48ms +[2025-07-06 10:55:35] [Rank 0] step:521/10000 train_time:41931ms step_avg:80.48ms +[2025-07-06 10:55:37] [Rank 0] step:541/10000 train_time:43650ms step_avg:80.68ms +[2025-07-06 10:55:37] [Rank 0] step:541/10000 train_time:43650ms step_avg:80.68ms +[2025-07-06 10:55:38] [Rank 0] step:561/10000 train_time:45607ms step_avg:81.30ms +[2025-07-06 10:55:38] [Rank 0] step:561/10000 train_time:45607ms step_avg:81.30ms +[2025-07-06 10:55:40] [Rank 0] step:581/10000 train_time:47063ms step_avg:81.00ms +[2025-07-06 10:55:40] [Rank 0] step:581/10000 train_time:47063ms step_avg:81.00ms +[2025-07-06 10:55:41] [Rank 0] step:601/10000 train_time:48523ms step_avg:80.74ms +[2025-07-06 10:55:41] [Rank 0] step:601/10000 train_time:48523ms step_avg:80.74ms +[2025-07-06 10:55:43] [Rank 0] step:621/10000 train_time:49986ms step_avg:80.49ms +[2025-07-06 10:55:43] [Rank 0] step:621/10000 train_time:49986ms step_avg:80.49ms +[2025-07-06 10:55:45] [Rank 0] step:641/10000 train_time:52198ms step_avg:81.43ms +[2025-07-06 10:55:45] [Rank 0] step:641/10000 train_time:52198ms step_avg:81.43ms +[2025-07-06 10:55:47] [Rank 0] step:661/10000 train_time:53756ms step_avg:81.33ms +[2025-07-06 10:55:47] [Rank 0] step:661/10000 train_time:53756ms step_avg:81.33ms +[2025-07-06 10:55:48] [Rank 0] step:681/10000 train_time:55314ms step_avg:81.23ms +[2025-07-06 10:55:48] [Rank 0] step:681/10000 train_time:55314ms step_avg:81.23ms +[2025-07-06 10:55:50] [Rank 0] step:701/10000 train_time:56878ms step_avg:81.14ms +[2025-07-06 10:55:50] [Rank 0] step:701/10000 train_time:56878ms step_avg:81.14ms +[2025-07-06 10:55:52] [Rank 0] step:721/10000 train_time:58394ms step_avg:80.99ms +[2025-07-06 10:55:52] [Rank 0] step:721/10000 train_time:58394ms step_avg:80.99ms +[2025-07-06 10:55:53] [Rank 0] step:741/10000 train_time:60453ms step_avg:81.58ms +[2025-07-06 10:55:53] [Rank 0] step:741/10000 train_time:60453ms step_avg:81.58ms +[2025-07-06 10:55:55] [Rank 0] step:761/10000 train_time:61931ms step_avg:81.38ms +[2025-07-06 10:55:55] [Rank 0] step:761/10000 train_time:61931ms step_avg:81.38ms +[2025-07-06 10:55:56] [Rank 0] step:781/10000 train_time:63404ms step_avg:81.18ms +[2025-07-06 10:55:56] [Rank 0] step:781/10000 train_time:63404ms step_avg:81.18ms +[2025-07-06 10:55:58] [Rank 0] step:801/10000 train_time:64879ms step_avg:81.00ms +[2025-07-06 10:55:58] [Rank 0] step:801/10000 train_time:64879ms step_avg:81.00ms +[2025-07-06 10:56:00] [Rank 0] step:821/10000 train_time:67020ms step_avg:81.63ms +[2025-07-06 10:56:00] [Rank 0] step:821/10000 train_time:67020ms step_avg:81.63ms +[2025-07-06 10:56:01] [Rank 0] step:841/10000 train_time:68491ms step_avg:81.44ms +[2025-07-06 10:56:01] [Rank 0] step:841/10000 train_time:68491ms step_avg:81.44ms +[2025-07-06 10:56:03] [Rank 0] step:861/10000 train_time:70067ms step_avg:81.38ms +[2025-07-06 10:56:03] [Rank 0] step:861/10000 train_time:70067ms step_avg:81.38ms +[2025-07-06 10:56:04] [Rank 0] step:881/10000 train_time:71639ms step_avg:81.32ms +[2025-07-06 10:56:04] [Rank 0] step:881/10000 train_time:71639ms step_avg:81.32ms +[2025-07-06 10:56:06] [Rank 0] step:901/10000 train_time:73270ms step_avg:81.32ms +[2025-07-06 10:56:06] [Rank 0] step:901/10000 train_time:73270ms step_avg:81.32ms +[2025-07-06 10:56:08] [Rank 0] step:921/10000 train_time:74825ms step_avg:81.24ms +[2025-07-06 10:56:08] [Rank 0] step:921/10000 train_time:74825ms step_avg:81.24ms +[2025-07-06 10:56:09] [Rank 0] step:941/10000 train_time:76303ms step_avg:81.09ms +[2025-07-06 10:56:09] [Rank 0] step:941/10000 train_time:76303ms step_avg:81.09ms +[2025-07-06 10:56:11] [Rank 0] step:961/10000 train_time:77777ms step_avg:80.93ms +[2025-07-06 10:56:11] [Rank 0] step:961/10000 train_time:77777ms step_avg:80.93ms +[2025-07-06 10:56:12] [Rank 0] step:981/10000 train_time:79252ms step_avg:80.79ms +[2025-07-06 10:56:12] [Rank 0] step:981/10000 train_time:79252ms step_avg:80.79ms +[2025-07-06 10:56:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 10:56:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 10:56:15] [Rank 0] PRINT: step:1000/10000 train_loss:2.9588 val_loss:2.0170 train_time:81390ms step_avg:81.39ms +[2025-07-06 10:56:15] [Rank 0] PRINT: step:1000/10000 train_loss:2.9588 val_loss:2.0170 train_time:81390ms step_avg:81.39ms +[2025-07-06 10:56:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 10:56:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 10:56:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 10:56:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 10:56:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 10:56:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 11:01:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 11:01:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 11:01:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 11:01:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 11:01:33] [Rank 0] Total Loss: 4.0488 +[2025-07-06 11:01:33] [Rank 0] Total Loss: 4.0488 +[2025-07-06 11:01:33] [Rank 0] Total FTA: 0.0944 +[2025-07-06 11:01:33] [Rank 0] Total FTA: 0.0944 +[2025-07-06 11:01:33] [Rank 0] Group 0 Loss: 4.2966 +[2025-07-06 11:01:33] [Rank 0] Group 0 Loss: 4.2966 +[2025-07-06 11:01:33] [Rank 0] Group 1 Loss: 4.0785 +[2025-07-06 11:01:33] [Rank 0] Group 1 Loss: 4.0785 +[2025-07-06 11:01:33] [Rank 0] Group 2 Loss: 3.9363 +[2025-07-06 11:01:33] [Rank 0] Group 2 Loss: 3.9363 +[2025-07-06 11:01:33] [Rank 0] Group 3 Loss: 3.9797 +[2025-07-06 11:01:33] [Rank 0] Group 3 Loss: 3.9797 +[2025-07-06 11:01:33] [Rank 0] Group 4 Loss: 3.9911 +[2025-07-06 11:01:33] [Rank 0] Group 4 Loss: 3.9911 +[2025-07-06 11:01:33] [Rank 0] Group 5 Loss: 3.9794 +[2025-07-06 11:01:33] [Rank 0] Group 5 Loss: 3.9794 +[2025-07-06 11:01:33] [Rank 0] Group 6 Loss: 3.9610 +[2025-07-06 11:01:33] [Rank 0] Group 6 Loss: 3.9610 +[2025-07-06 11:01:33] [Rank 0] Group 7 Loss: 4.0414 +[2025-07-06 11:01:33] [Rank 0] Group 7 Loss: 4.0414 +[2025-07-06 11:01:33] [Rank 0] Group 8 Loss: 4.0219 +[2025-07-06 11:01:33] [Rank 0] Group 8 Loss: 4.0219 +[2025-07-06 11:01:33] [Rank 0] Group 9 Loss: 3.9794 +[2025-07-06 11:01:33] [Rank 0] Group 9 Loss: 3.9794 +[2025-07-06 11:01:33] [Rank 0] Group 10 Loss: 4.0163 +[2025-07-06 11:01:33] [Rank 0] Group 10 Loss: 4.0163 +[2025-07-06 11:01:33] [Rank 0] Group 11 Loss: 4.0469 +[2025-07-06 11:01:33] [Rank 0] Group 11 Loss: 4.0469 +[2025-07-06 11:01:33] [Rank 0] Group 0 FTA: 0.1547 +[2025-07-06 11:01:33] [Rank 0] Group 0 FTA: 0.1547 +[2025-07-06 11:01:33] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 11:01:33] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 11:01:33] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-06 11:01:33] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-06 11:01:33] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-06 11:01:33] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-06 11:01:33] [Rank 0] Group 4 FTA: 0.0495 +[2025-07-06 11:01:33] [Rank 0] Group 4 FTA: 0.0495 +[2025-07-06 11:01:33] [Rank 0] Group 5 FTA: 0.0964 +[2025-07-06 11:01:33] [Rank 0] Group 5 FTA: 0.0964 +[2025-07-06 11:01:33] [Rank 0] Group 6 FTA: 0.0807 +[2025-07-06 11:01:33] [Rank 0] Group 6 FTA: 0.0807 +[2025-07-06 11:01:33] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-06 11:01:33] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-06 11:01:33] [Rank 0] Group 8 FTA: 0.1042 +[2025-07-06 11:01:33] [Rank 0] Group 8 FTA: 0.1042 +[2025-07-06 11:01:33] [Rank 0] Group 9 FTA: 0.0703 +[2025-07-06 11:01:33] [Rank 0] Group 9 FTA: 0.0703 +[2025-07-06 11:01:33] [Rank 0] Group 10 FTA: 0.0820 +[2025-07-06 11:01:33] [Rank 0] Group 10 FTA: 0.0820 +[2025-07-06 11:01:33] [Rank 0] Group 11 FTA: 0.1055 +[2025-07-06 11:01:33] [Rank 0] Group 11 FTA: 0.1055 +[2025-07-06 11:01:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 11:01:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 11:01:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 11:01:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 11:01:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 11:01:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 11:01:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 11:01:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 11:01:35] [Rank 0] step:1001/10000 train_time:81410ms step_avg:81.33ms +[2025-07-06 11:01:35] [Rank 0] step:1001/10000 train_time:81410ms step_avg:81.33ms +[2025-07-06 11:01:37] [Rank 0] step:1021/10000 train_time:82882ms step_avg:81.18ms +[2025-07-06 11:01:37] [Rank 0] step:1021/10000 train_time:82882ms step_avg:81.18ms +[2025-07-06 11:01:38] [Rank 0] step:1041/10000 train_time:84350ms step_avg:81.03ms +[2025-07-06 11:01:38] [Rank 0] step:1041/10000 train_time:84350ms step_avg:81.03ms +[2025-07-06 11:01:39] [Rank 0] step:1061/10000 train_time:85818ms step_avg:80.88ms +[2025-07-06 11:01:39] [Rank 0] step:1061/10000 train_time:85818ms step_avg:80.88ms +[2025-07-06 11:01:42] [Rank 0] step:1081/10000 train_time:87389ms step_avg:80.84ms +[2025-07-06 11:01:42] [Rank 0] step:1081/10000 train_time:87389ms step_avg:80.84ms +[2025-07-06 11:01:43] [Rank 0] step:1101/10000 train_time:89507ms step_avg:81.30ms +[2025-07-06 11:01:43] [Rank 0] step:1101/10000 train_time:89507ms step_avg:81.30ms +[2025-07-06 11:01:45] [Rank 0] step:1121/10000 train_time:90978ms step_avg:81.16ms +[2025-07-06 11:01:45] [Rank 0] step:1121/10000 train_time:90978ms step_avg:81.16ms +[2025-07-06 11:01:46] [Rank 0] step:1141/10000 train_time:92446ms step_avg:81.02ms +[2025-07-06 11:01:46] [Rank 0] step:1141/10000 train_time:92446ms step_avg:81.02ms +[2025-07-06 11:01:48] [Rank 0] step:1161/10000 train_time:94019ms step_avg:80.98ms +[2025-07-06 11:01:48] [Rank 0] step:1161/10000 train_time:94019ms step_avg:80.98ms +[2025-07-06 11:01:50] [Rank 0] step:1181/10000 train_time:95864ms step_avg:81.17ms +[2025-07-06 11:01:50] [Rank 0] step:1181/10000 train_time:95864ms step_avg:81.17ms +[2025-07-06 11:01:51] [Rank 0] step:1201/10000 train_time:97337ms step_avg:81.05ms +[2025-07-06 11:01:51] [Rank 0] step:1201/10000 train_time:97337ms step_avg:81.05ms +[2025-07-06 11:01:52] [Rank 0] step:1221/10000 train_time:98804ms step_avg:80.92ms +[2025-07-06 11:01:52] [Rank 0] step:1221/10000 train_time:98804ms step_avg:80.92ms +[2025-07-06 11:01:54] [Rank 0] step:1241/10000 train_time:100375ms step_avg:80.88ms +[2025-07-06 11:01:54] [Rank 0] step:1241/10000 train_time:100375ms step_avg:80.88ms +[2025-07-06 11:01:56] [Rank 0] step:1261/10000 train_time:102204ms step_avg:81.05ms +[2025-07-06 11:01:56] [Rank 0] step:1261/10000 train_time:102204ms step_avg:81.05ms +[2025-07-06 11:01:58] [Rank 0] step:1281/10000 train_time:104069ms step_avg:81.24ms +[2025-07-06 11:01:58] [Rank 0] step:1281/10000 train_time:104069ms step_avg:81.24ms +[2025-07-06 11:01:59] [Rank 0] step:1301/10000 train_time:105543ms step_avg:81.12ms +[2025-07-06 11:01:59] [Rank 0] step:1301/10000 train_time:105543ms step_avg:81.12ms +[2025-07-06 11:02:01] [Rank 0] step:1321/10000 train_time:107123ms step_avg:81.09ms +[2025-07-06 11:02:01] [Rank 0] step:1321/10000 train_time:107123ms step_avg:81.09ms +[2025-07-06 11:02:02] [Rank 0] step:1341/10000 train_time:108598ms step_avg:80.98ms +[2025-07-06 11:02:02] [Rank 0] step:1341/10000 train_time:108598ms step_avg:80.98ms +[2025-07-06 11:02:04] [Rank 0] step:1361/10000 train_time:110722ms step_avg:81.35ms +[2025-07-06 11:02:04] [Rank 0] step:1361/10000 train_time:110722ms step_avg:81.35ms +[2025-07-06 11:02:06] [Rank 0] step:1381/10000 train_time:112199ms step_avg:81.24ms +[2025-07-06 11:02:06] [Rank 0] step:1381/10000 train_time:112199ms step_avg:81.24ms +[2025-07-06 11:02:07] [Rank 0] step:1401/10000 train_time:113672ms step_avg:81.14ms +[2025-07-06 11:02:07] [Rank 0] step:1401/10000 train_time:113672ms step_avg:81.14ms +[2025-07-06 11:02:09] [Rank 0] step:1421/10000 train_time:115147ms step_avg:81.03ms +[2025-07-06 11:02:09] [Rank 0] step:1421/10000 train_time:115147ms step_avg:81.03ms +[2025-07-06 11:02:11] [Rank 0] step:1441/10000 train_time:117305ms step_avg:81.41ms +[2025-07-06 11:02:11] [Rank 0] step:1441/10000 train_time:117305ms step_avg:81.41ms +[2025-07-06 11:02:13] [Rank 0] step:1461/10000 train_time:118863ms step_avg:81.36ms +[2025-07-06 11:02:13] [Rank 0] step:1461/10000 train_time:118863ms step_avg:81.36ms +[2025-07-06 11:02:14] [Rank 0] step:1481/10000 train_time:120336ms step_avg:81.25ms +[2025-07-06 11:02:14] [Rank 0] step:1481/10000 train_time:120336ms step_avg:81.25ms +[2025-07-06 11:02:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 11:02:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 11:02:16] [Rank 0] PRINT: step:1500/10000 train_loss:1.7483 val_loss:1.5915 train_time:121810ms step_avg:81.21ms +[2025-07-06 11:02:16] [Rank 0] PRINT: step:1500/10000 train_loss:1.7483 val_loss:1.5915 train_time:121810ms step_avg:81.21ms +[2025-07-06 11:02:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 11:02:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 11:02:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 11:02:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 11:02:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 11:02:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 11:07:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 11:07:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 11:07:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 11:07:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 11:07:34] [Rank 0] Total Loss: 4.1879 +[2025-07-06 11:07:34] [Rank 0] Total Loss: 4.1879 +[2025-07-06 11:07:34] [Rank 0] Total FTA: 0.1489 +[2025-07-06 11:07:34] [Rank 0] Total FTA: 0.1489 +[2025-07-06 11:07:34] [Rank 0] Group 0 Loss: 4.4933 +[2025-07-06 11:07:34] [Rank 0] Group 0 Loss: 4.4933 +[2025-07-06 11:07:34] [Rank 0] Group 1 Loss: 4.0713 +[2025-07-06 11:07:34] [Rank 0] Group 1 Loss: 4.0713 +[2025-07-06 11:07:34] [Rank 0] Group 2 Loss: 4.0037 +[2025-07-06 11:07:34] [Rank 0] Group 2 Loss: 4.0037 +[2025-07-06 11:07:34] [Rank 0] Group 3 Loss: 4.1593 +[2025-07-06 11:07:34] [Rank 0] Group 3 Loss: 4.1593 +[2025-07-06 11:07:34] [Rank 0] Group 4 Loss: 4.1710 +[2025-07-06 11:07:34] [Rank 0] Group 4 Loss: 4.1710 +[2025-07-06 11:07:34] [Rank 0] Group 5 Loss: 4.1591 +[2025-07-06 11:07:34] [Rank 0] Group 5 Loss: 4.1591 +[2025-07-06 11:07:34] [Rank 0] Group 6 Loss: 4.0736 +[2025-07-06 11:07:34] [Rank 0] Group 6 Loss: 4.0736 +[2025-07-06 11:07:34] [Rank 0] Group 7 Loss: 4.1896 +[2025-07-06 11:07:34] [Rank 0] Group 7 Loss: 4.1896 +[2025-07-06 11:07:34] [Rank 0] Group 8 Loss: 4.1524 +[2025-07-06 11:07:34] [Rank 0] Group 8 Loss: 4.1524 +[2025-07-06 11:07:34] [Rank 0] Group 9 Loss: 4.1220 +[2025-07-06 11:07:34] [Rank 0] Group 9 Loss: 4.1220 +[2025-07-06 11:07:35] [Rank 0] Group 10 Loss: 4.1763 +[2025-07-06 11:07:35] [Rank 0] Group 10 Loss: 4.1763 +[2025-07-06 11:07:35] [Rank 0] Group 11 Loss: 4.1770 +[2025-07-06 11:07:35] [Rank 0] Group 11 Loss: 4.1770 +[2025-07-06 11:07:35] [Rank 0] Group 0 FTA: 0.3342 +[2025-07-06 11:07:35] [Rank 0] Group 0 FTA: 0.3342 +[2025-07-06 11:07:35] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 11:07:35] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 11:07:35] [Rank 0] Group 2 FTA: 0.2812 +[2025-07-06 11:07:35] [Rank 0] Group 2 FTA: 0.2812 +[2025-07-06 11:07:35] [Rank 0] Group 3 FTA: 0.1016 +[2025-07-06 11:07:35] [Rank 0] Group 3 FTA: 0.1016 +[2025-07-06 11:07:35] [Rank 0] Group 4 FTA: 0.0807 +[2025-07-06 11:07:35] [Rank 0] Group 4 FTA: 0.0807 +[2025-07-06 11:07:35] [Rank 0] Group 5 FTA: 0.0469 +[2025-07-06 11:07:35] [Rank 0] Group 5 FTA: 0.0469 +[2025-07-06 11:07:35] [Rank 0] Group 6 FTA: 0.1432 +[2025-07-06 11:07:35] [Rank 0] Group 6 FTA: 0.1432 +[2025-07-06 11:07:35] [Rank 0] Group 7 FTA: 0.1224 +[2025-07-06 11:07:35] [Rank 0] Group 7 FTA: 0.1224 +[2025-07-06 11:07:35] [Rank 0] Group 8 FTA: 0.1198 +[2025-07-06 11:07:35] [Rank 0] Group 8 FTA: 0.1198 +[2025-07-06 11:07:35] [Rank 0] Group 9 FTA: 0.1055 +[2025-07-06 11:07:35] [Rank 0] Group 9 FTA: 0.1055 +[2025-07-06 11:07:35] [Rank 0] Group 10 FTA: 0.1445 +[2025-07-06 11:07:35] [Rank 0] Group 10 FTA: 0.1445 +[2025-07-06 11:07:35] [Rank 0] Group 11 FTA: 0.1338 +[2025-07-06 11:07:35] [Rank 0] Group 11 FTA: 0.1338 +[2025-07-06 11:07:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 11:07:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 11:07:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 11:07:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 11:07:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 11:07:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 11:07:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 11:07:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 11:07:37] [Rank 0] step:1501/10000 train_time:121830ms step_avg:81.17ms +[2025-07-06 11:07:37] [Rank 0] step:1501/10000 train_time:121830ms step_avg:81.17ms +[2025-07-06 11:07:38] [Rank 0] step:1521/10000 train_time:123300ms step_avg:81.07ms +[2025-07-06 11:07:38] [Rank 0] step:1521/10000 train_time:123300ms step_avg:81.07ms +[2025-07-06 11:07:40] [Rank 0] step:1541/10000 train_time:125516ms step_avg:81.45ms +[2025-07-06 11:07:40] [Rank 0] step:1541/10000 train_time:125516ms step_avg:81.45ms +[2025-07-06 11:07:42] [Rank 0] step:1561/10000 train_time:126984ms step_avg:81.35ms +[2025-07-06 11:07:42] [Rank 0] step:1561/10000 train_time:126984ms step_avg:81.35ms +[2025-07-06 11:07:43] [Rank 0] step:1581/10000 train_time:128453ms step_avg:81.25ms +[2025-07-06 11:07:43] [Rank 0] step:1581/10000 train_time:128453ms step_avg:81.25ms +[2025-07-06 11:07:45] [Rank 0] step:1601/10000 train_time:129922ms step_avg:81.15ms +[2025-07-06 11:07:45] [Rank 0] step:1601/10000 train_time:129922ms step_avg:81.15ms +[2025-07-06 11:07:47] [Rank 0] step:1621/10000 train_time:132071ms step_avg:81.47ms +[2025-07-06 11:07:47] [Rank 0] step:1621/10000 train_time:132071ms step_avg:81.47ms +[2025-07-06 11:07:48] [Rank 0] step:1641/10000 train_time:133520ms step_avg:81.36ms +[2025-07-06 11:07:48] [Rank 0] step:1641/10000 train_time:133520ms step_avg:81.36ms +[2025-07-06 11:07:50] [Rank 0] step:1661/10000 train_time:134988ms step_avg:81.27ms +[2025-07-06 11:07:50] [Rank 0] step:1661/10000 train_time:134988ms step_avg:81.27ms +[2025-07-06 11:07:51] [Rank 0] step:1681/10000 train_time:136559ms step_avg:81.24ms +[2025-07-06 11:07:51] [Rank 0] step:1681/10000 train_time:136559ms step_avg:81.24ms +[2025-07-06 11:07:53] [Rank 0] step:1701/10000 train_time:138030ms step_avg:81.15ms +[2025-07-06 11:07:53] [Rank 0] step:1701/10000 train_time:138030ms step_avg:81.15ms +[2025-07-06 11:07:54] [Rank 0] step:1721/10000 train_time:139737ms step_avg:81.20ms +[2025-07-06 11:07:54] [Rank 0] step:1721/10000 train_time:139737ms step_avg:81.20ms +[2025-07-06 11:07:56] [Rank 0] step:1741/10000 train_time:141310ms step_avg:81.17ms +[2025-07-06 11:07:56] [Rank 0] step:1741/10000 train_time:141310ms step_avg:81.17ms +[2025-07-06 11:07:58] [Rank 0] step:1761/10000 train_time:142883ms step_avg:81.14ms +[2025-07-06 11:07:58] [Rank 0] step:1761/10000 train_time:142883ms step_avg:81.14ms +[2025-07-06 11:07:59] [Rank 0] step:1781/10000 train_time:144357ms step_avg:81.05ms +[2025-07-06 11:07:59] [Rank 0] step:1781/10000 train_time:144357ms step_avg:81.05ms +[2025-07-06 11:08:01] [Rank 0] step:1801/10000 train_time:145987ms step_avg:81.06ms +[2025-07-06 11:08:01] [Rank 0] step:1801/10000 train_time:145987ms step_avg:81.06ms +[2025-07-06 11:08:03] [Rank 0] step:1821/10000 train_time:148053ms step_avg:81.30ms +[2025-07-06 11:08:03] [Rank 0] step:1821/10000 train_time:148053ms step_avg:81.30ms +[2025-07-06 11:08:04] [Rank 0] step:1841/10000 train_time:149528ms step_avg:81.22ms +[2025-07-06 11:08:04] [Rank 0] step:1841/10000 train_time:149528ms step_avg:81.22ms +[2025-07-06 11:08:06] [Rank 0] step:1861/10000 train_time:151000ms step_avg:81.14ms +[2025-07-06 11:08:06] [Rank 0] step:1861/10000 train_time:151000ms step_avg:81.14ms +[2025-07-06 11:08:07] [Rank 0] step:1881/10000 train_time:152472ms step_avg:81.06ms +[2025-07-06 11:08:07] [Rank 0] step:1881/10000 train_time:152472ms step_avg:81.06ms +[2025-07-06 11:08:09] [Rank 0] step:1901/10000 train_time:154695ms step_avg:81.38ms +[2025-07-06 11:08:09] [Rank 0] step:1901/10000 train_time:154695ms step_avg:81.38ms +[2025-07-06 11:08:11] [Rank 0] step:1921/10000 train_time:156168ms step_avg:81.30ms +[2025-07-06 11:08:11] [Rank 0] step:1921/10000 train_time:156168ms step_avg:81.30ms +[2025-07-06 11:08:12] [Rank 0] step:1941/10000 train_time:157640ms step_avg:81.22ms +[2025-07-06 11:08:12] [Rank 0] step:1941/10000 train_time:157640ms step_avg:81.22ms +[2025-07-06 11:08:14] [Rank 0] step:1961/10000 train_time:159114ms step_avg:81.14ms +[2025-07-06 11:08:14] [Rank 0] step:1961/10000 train_time:159114ms step_avg:81.14ms +[2025-07-06 11:08:16] [Rank 0] step:1981/10000 train_time:160637ms step_avg:81.09ms +[2025-07-06 11:08:16] [Rank 0] step:1981/10000 train_time:160637ms step_avg:81.09ms +[2025-07-06 11:08:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 11:08:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 11:08:18] [Rank 0] PRINT: step:2000/10000 train_loss:1.4938 val_loss:1.4320 train_time:162707ms step_avg:81.35ms +[2025-07-06 11:08:18] [Rank 0] PRINT: step:2000/10000 train_loss:1.4938 val_loss:1.4320 train_time:162707ms step_avg:81.35ms +[2025-07-06 11:08:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 11:08:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 11:08:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 11:08:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 11:08:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 11:08:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 11:13:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 11:13:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 11:13:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 11:13:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 11:13:37] [Rank 0] Total Loss: 4.3059 +[2025-07-06 11:13:37] [Rank 0] Total Loss: 4.3059 +[2025-07-06 11:13:37] [Rank 0] Total FTA: 0.2100 +[2025-07-06 11:13:37] [Rank 0] Total FTA: 0.2100 +[2025-07-06 11:13:37] [Rank 0] Group 0 Loss: 4.6461 +[2025-07-06 11:13:37] [Rank 0] Group 0 Loss: 4.6461 +[2025-07-06 11:13:37] [Rank 0] Group 1 Loss: 4.2867 +[2025-07-06 11:13:37] [Rank 0] Group 1 Loss: 4.2867 +[2025-07-06 11:13:38] [Rank 0] Group 2 Loss: 4.1008 +[2025-07-06 11:13:38] [Rank 0] Group 2 Loss: 4.1008 +[2025-07-06 11:13:38] [Rank 0] Group 3 Loss: 4.2642 +[2025-07-06 11:13:38] [Rank 0] Group 3 Loss: 4.2642 +[2025-07-06 11:13:38] [Rank 0] Group 4 Loss: 4.1915 +[2025-07-06 11:13:38] [Rank 0] Group 4 Loss: 4.1915 +[2025-07-06 11:13:38] [Rank 0] Group 5 Loss: 4.1512 +[2025-07-06 11:13:38] [Rank 0] Group 5 Loss: 4.1512 +[2025-07-06 11:13:38] [Rank 0] Group 6 Loss: 4.2181 +[2025-07-06 11:13:38] [Rank 0] Group 6 Loss: 4.2181 +[2025-07-06 11:13:38] [Rank 0] Group 7 Loss: 4.3134 +[2025-07-06 11:13:38] [Rank 0] Group 7 Loss: 4.3134 +[2025-07-06 11:13:38] [Rank 0] Group 8 Loss: 4.3057 +[2025-07-06 11:13:38] [Rank 0] Group 8 Loss: 4.3057 +[2025-07-06 11:13:38] [Rank 0] Group 9 Loss: 4.2433 +[2025-07-06 11:13:38] [Rank 0] Group 9 Loss: 4.2433 +[2025-07-06 11:13:38] [Rank 0] Group 10 Loss: 4.2898 +[2025-07-06 11:13:38] [Rank 0] Group 10 Loss: 4.2898 +[2025-07-06 11:13:38] [Rank 0] Group 11 Loss: 4.3049 +[2025-07-06 11:13:38] [Rank 0] Group 11 Loss: 4.3049 +[2025-07-06 11:13:38] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-06 11:13:38] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-06 11:13:38] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-06 11:13:38] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-06 11:13:38] [Rank 0] Group 2 FTA: 0.3438 +[2025-07-06 11:13:38] [Rank 0] Group 2 FTA: 0.3438 +[2025-07-06 11:13:38] [Rank 0] Group 3 FTA: 0.1562 +[2025-07-06 11:13:38] [Rank 0] Group 3 FTA: 0.1562 +[2025-07-06 11:13:38] [Rank 0] Group 4 FTA: 0.1849 +[2025-07-06 11:13:38] [Rank 0] Group 4 FTA: 0.1849 +[2025-07-06 11:13:38] [Rank 0] Group 5 FTA: 0.1589 +[2025-07-06 11:13:38] [Rank 0] Group 5 FTA: 0.1589 +[2025-07-06 11:13:38] [Rank 0] Group 6 FTA: 0.2057 +[2025-07-06 11:13:38] [Rank 0] Group 6 FTA: 0.2057 +[2025-07-06 11:13:38] [Rank 0] Group 7 FTA: 0.2318 +[2025-07-06 11:13:38] [Rank 0] Group 7 FTA: 0.2318 +[2025-07-06 11:13:38] [Rank 0] Group 8 FTA: 0.2240 +[2025-07-06 11:13:38] [Rank 0] Group 8 FTA: 0.2240 +[2025-07-06 11:13:38] [Rank 0] Group 9 FTA: 0.2344 +[2025-07-06 11:13:38] [Rank 0] Group 9 FTA: 0.2344 +[2025-07-06 11:13:39] [Rank 0] Group 10 FTA: 0.2441 +[2025-07-06 11:13:39] [Rank 0] Group 10 FTA: 0.2441 +[2025-07-06 11:13:39] [Rank 0] Group 11 FTA: 0.2324 +[2025-07-06 11:13:39] [Rank 0] Group 11 FTA: 0.2324 +[2025-07-06 11:13:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 11:13:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 11:13:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 11:13:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 11:13:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 11:13:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 11:13:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 11:13:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 11:13:40] [Rank 0] step:2001/10000 train_time:162727ms step_avg:81.32ms +[2025-07-06 11:13:40] [Rank 0] step:2001/10000 train_time:162727ms step_avg:81.32ms +[2025-07-06 11:13:42] [Rank 0] step:2021/10000 train_time:164298ms step_avg:81.30ms +[2025-07-06 11:13:42] [Rank 0] step:2021/10000 train_time:164298ms step_avg:81.30ms +[2025-07-06 11:13:43] [Rank 0] step:2041/10000 train_time:165765ms step_avg:81.22ms +[2025-07-06 11:13:43] [Rank 0] step:2041/10000 train_time:165765ms step_avg:81.22ms +[2025-07-06 11:13:45] [Rank 0] step:2061/10000 train_time:167234ms step_avg:81.14ms +[2025-07-06 11:13:45] [Rank 0] step:2061/10000 train_time:167234ms step_avg:81.14ms +[2025-07-06 11:13:47] [Rank 0] step:2081/10000 train_time:169345ms step_avg:81.38ms +[2025-07-06 11:13:47] [Rank 0] step:2081/10000 train_time:169345ms step_avg:81.38ms +[2025-07-06 11:13:48] [Rank 0] step:2101/10000 train_time:170813ms step_avg:81.30ms +[2025-07-06 11:13:48] [Rank 0] step:2101/10000 train_time:170813ms step_avg:81.30ms +[2025-07-06 11:13:50] [Rank 0] step:2121/10000 train_time:172281ms step_avg:81.23ms +[2025-07-06 11:13:50] [Rank 0] step:2121/10000 train_time:172281ms step_avg:81.23ms +[2025-07-06 11:13:51] [Rank 0] step:2141/10000 train_time:173750ms step_avg:81.15ms +[2025-07-06 11:13:51] [Rank 0] step:2141/10000 train_time:173750ms step_avg:81.15ms +[2025-07-06 11:13:53] [Rank 0] step:2161/10000 train_time:175476ms step_avg:81.20ms +[2025-07-06 11:13:53] [Rank 0] step:2161/10000 train_time:175476ms step_avg:81.20ms +[2025-07-06 11:13:55] [Rank 0] step:2181/10000 train_time:177345ms step_avg:81.31ms +[2025-07-06 11:13:55] [Rank 0] step:2181/10000 train_time:177345ms step_avg:81.31ms +[2025-07-06 11:13:56] [Rank 0] step:2201/10000 train_time:178814ms step_avg:81.24ms +[2025-07-06 11:13:56] [Rank 0] step:2201/10000 train_time:178814ms step_avg:81.24ms +[2025-07-06 11:13:58] [Rank 0] step:2221/10000 train_time:180386ms step_avg:81.22ms +[2025-07-06 11:13:58] [Rank 0] step:2221/10000 train_time:180386ms step_avg:81.22ms +[2025-07-06 11:13:59] [Rank 0] step:2241/10000 train_time:181878ms step_avg:81.16ms +[2025-07-06 11:13:59] [Rank 0] step:2241/10000 train_time:181878ms step_avg:81.16ms +[2025-07-06 11:14:02] [Rank 0] step:2261/10000 train_time:184013ms step_avg:81.39ms +[2025-07-06 11:14:02] [Rank 0] step:2261/10000 train_time:184013ms step_avg:81.39ms +[2025-07-06 11:14:03] [Rank 0] step:2281/10000 train_time:185609ms step_avg:81.37ms +[2025-07-06 11:14:03] [Rank 0] step:2281/10000 train_time:185609ms step_avg:81.37ms +[2025-07-06 11:14:05] [Rank 0] step:2301/10000 train_time:187106ms step_avg:81.31ms +[2025-07-06 11:14:05] [Rank 0] step:2301/10000 train_time:187106ms step_avg:81.31ms +[2025-07-06 11:14:06] [Rank 0] step:2321/10000 train_time:188604ms step_avg:81.26ms +[2025-07-06 11:14:06] [Rank 0] step:2321/10000 train_time:188604ms step_avg:81.26ms +[2025-07-06 11:14:08] [Rank 0] step:2341/10000 train_time:190102ms step_avg:81.21ms +[2025-07-06 11:14:08] [Rank 0] step:2341/10000 train_time:190102ms step_avg:81.21ms +[2025-07-06 11:14:09] [Rank 0] step:2361/10000 train_time:191833ms step_avg:81.25ms +[2025-07-06 11:14:09] [Rank 0] step:2361/10000 train_time:191833ms step_avg:81.25ms +[2025-07-06 11:14:11] [Rank 0] step:2381/10000 train_time:193330ms step_avg:81.20ms +[2025-07-06 11:14:11] [Rank 0] step:2381/10000 train_time:193330ms step_avg:81.20ms +[2025-07-06 11:14:12] [Rank 0] step:2401/10000 train_time:194829ms step_avg:81.14ms +[2025-07-06 11:14:12] [Rank 0] step:2401/10000 train_time:194829ms step_avg:81.14ms +[2025-07-06 11:14:14] [Rank 0] step:2421/10000 train_time:196326ms step_avg:81.09ms +[2025-07-06 11:14:14] [Rank 0] step:2421/10000 train_time:196326ms step_avg:81.09ms +[2025-07-06 11:14:16] [Rank 0] step:2441/10000 train_time:198478ms step_avg:81.31ms +[2025-07-06 11:14:16] [Rank 0] step:2441/10000 train_time:198478ms step_avg:81.31ms +[2025-07-06 11:14:18] [Rank 0] step:2461/10000 train_time:199977ms step_avg:81.26ms +[2025-07-06 11:14:18] [Rank 0] step:2461/10000 train_time:199977ms step_avg:81.26ms +[2025-07-06 11:14:19] [Rank 0] step:2481/10000 train_time:201474ms step_avg:81.21ms +[2025-07-06 11:14:19] [Rank 0] step:2481/10000 train_time:201474ms step_avg:81.21ms +[2025-07-06 11:14:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 11:14:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 11:14:21] [Rank 0] PRINT: step:2500/10000 train_loss:1.3613 val_loss:1.3019 train_time:202974ms step_avg:81.19ms +[2025-07-06 11:14:21] [Rank 0] PRINT: step:2500/10000 train_loss:1.3613 val_loss:1.3019 train_time:202974ms step_avg:81.19ms +[2025-07-06 11:14:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 11:14:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 11:14:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 11:14:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 11:14:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 11:14:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 11:19:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 11:19:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 11:19:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 11:19:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 11:19:40] [Rank 0] Total Loss: 4.5355 +[2025-07-06 11:19:40] [Rank 0] Total Loss: 4.5355 +[2025-07-06 11:19:40] [Rank 0] Total FTA: 0.3101 +[2025-07-06 11:19:40] [Rank 0] Total FTA: 0.3101 +[2025-07-06 11:19:40] [Rank 0] Group 0 Loss: 4.7991 +[2025-07-06 11:19:40] [Rank 0] Group 0 Loss: 4.7991 +[2025-07-06 11:19:40] [Rank 0] Group 1 Loss: 4.4362 +[2025-07-06 11:19:40] [Rank 0] Group 1 Loss: 4.4362 +[2025-07-06 11:19:40] [Rank 0] Group 2 Loss: 4.1761 +[2025-07-06 11:19:40] [Rank 0] Group 2 Loss: 4.1761 +[2025-07-06 11:19:40] [Rank 0] Group 3 Loss: 4.4866 +[2025-07-06 11:19:40] [Rank 0] Group 3 Loss: 4.4866 +[2025-07-06 11:19:40] [Rank 0] Group 4 Loss: 4.4978 +[2025-07-06 11:19:40] [Rank 0] Group 4 Loss: 4.4978 +[2025-07-06 11:19:40] [Rank 0] Group 5 Loss: 4.4728 +[2025-07-06 11:19:40] [Rank 0] Group 5 Loss: 4.4728 +[2025-07-06 11:19:40] [Rank 0] Group 6 Loss: 4.5035 +[2025-07-06 11:19:40] [Rank 0] Group 6 Loss: 4.5035 +[2025-07-06 11:19:40] [Rank 0] Group 7 Loss: 4.5499 +[2025-07-06 11:19:40] [Rank 0] Group 7 Loss: 4.5499 +[2025-07-06 11:19:40] [Rank 0] Group 8 Loss: 4.5680 +[2025-07-06 11:19:40] [Rank 0] Group 8 Loss: 4.5680 +[2025-07-06 11:19:40] [Rank 0] Group 9 Loss: 4.5421 +[2025-07-06 11:19:40] [Rank 0] Group 9 Loss: 4.5421 +[2025-07-06 11:19:40] [Rank 0] Group 10 Loss: 4.5642 +[2025-07-06 11:19:40] [Rank 0] Group 10 Loss: 4.5642 +[2025-07-06 11:19:40] [Rank 0] Group 11 Loss: 4.5438 +[2025-07-06 11:19:40] [Rank 0] Group 11 Loss: 4.5438 +[2025-07-06 11:19:40] [Rank 0] Group 0 FTA: 0.1912 +[2025-07-06 11:19:40] [Rank 0] Group 0 FTA: 0.1912 +[2025-07-06 11:19:40] [Rank 0] Group 1 FTA: 0.4974 +[2025-07-06 11:19:40] [Rank 0] Group 1 FTA: 0.4974 +[2025-07-06 11:19:40] [Rank 0] Group 2 FTA: 0.3932 +[2025-07-06 11:19:40] [Rank 0] Group 2 FTA: 0.3932 +[2025-07-06 11:19:40] [Rank 0] Group 3 FTA: 0.2031 +[2025-07-06 11:19:40] [Rank 0] Group 3 FTA: 0.2031 +[2025-07-06 11:19:40] [Rank 0] Group 4 FTA: 0.2604 +[2025-07-06 11:19:40] [Rank 0] Group 4 FTA: 0.2604 +[2025-07-06 11:19:40] [Rank 0] Group 5 FTA: 0.3203 +[2025-07-06 11:19:40] [Rank 0] Group 5 FTA: 0.3203 +[2025-07-06 11:19:40] [Rank 0] Group 6 FTA: 0.2891 +[2025-07-06 11:19:40] [Rank 0] Group 6 FTA: 0.2891 +[2025-07-06 11:19:40] [Rank 0] Group 7 FTA: 0.3125 +[2025-07-06 11:19:40] [Rank 0] Group 7 FTA: 0.3125 +[2025-07-06 11:19:40] [Rank 0] Group 8 FTA: 0.3490 +[2025-07-06 11:19:40] [Rank 0] Group 8 FTA: 0.3490 +[2025-07-06 11:19:40] [Rank 0] Group 9 FTA: 0.2930 +[2025-07-06 11:19:40] [Rank 0] Group 9 FTA: 0.2930 +[2025-07-06 11:19:40] [Rank 0] Group 10 FTA: 0.3613 +[2025-07-06 11:19:40] [Rank 0] Group 10 FTA: 0.3613 +[2025-07-06 11:19:40] [Rank 0] Group 11 FTA: 0.3242 +[2025-07-06 11:19:40] [Rank 0] Group 11 FTA: 0.3242 +[2025-07-06 11:19:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 11:19:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 11:19:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 11:19:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 11:19:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 11:19:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 11:19:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 11:19:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 11:19:42] [Rank 0] step:2501/10000 train_time:202995ms step_avg:81.17ms +[2025-07-06 11:19:42] [Rank 0] step:2501/10000 train_time:202995ms step_avg:81.17ms +[2025-07-06 11:19:44] [Rank 0] step:2521/10000 train_time:204670ms step_avg:81.19ms +[2025-07-06 11:19:44] [Rank 0] step:2521/10000 train_time:204670ms step_avg:81.19ms +[2025-07-06 11:19:45] [Rank 0] step:2541/10000 train_time:206218ms step_avg:81.16ms +[2025-07-06 11:19:45] [Rank 0] step:2541/10000 train_time:206218ms step_avg:81.16ms +[2025-07-06 11:19:47] [Rank 0] step:2561/10000 train_time:207708ms step_avg:81.10ms +[2025-07-06 11:19:47] [Rank 0] step:2561/10000 train_time:207708ms step_avg:81.10ms +[2025-07-06 11:19:48] [Rank 0] step:2581/10000 train_time:209198ms step_avg:81.05ms +[2025-07-06 11:19:48] [Rank 0] step:2581/10000 train_time:209198ms step_avg:81.05ms +[2025-07-06 11:19:50] [Rank 0] step:2601/10000 train_time:210689ms step_avg:81.00ms +[2025-07-06 11:19:50] [Rank 0] step:2601/10000 train_time:210689ms step_avg:81.00ms +[2025-07-06 11:19:52] [Rank 0] step:2621/10000 train_time:212849ms step_avg:81.21ms +[2025-07-06 11:19:52] [Rank 0] step:2621/10000 train_time:212849ms step_avg:81.21ms +[2025-07-06 11:19:53] [Rank 0] step:2641/10000 train_time:214340ms step_avg:81.16ms +[2025-07-06 11:19:53] [Rank 0] step:2641/10000 train_time:214340ms step_avg:81.16ms +[2025-07-06 11:19:55] [Rank 0] step:2661/10000 train_time:215833ms step_avg:81.11ms +[2025-07-06 11:19:55] [Rank 0] step:2661/10000 train_time:215833ms step_avg:81.11ms +[2025-07-06 11:19:56] [Rank 0] step:2681/10000 train_time:217326ms step_avg:81.06ms +[2025-07-06 11:19:56] [Rank 0] step:2681/10000 train_time:217326ms step_avg:81.06ms +[2025-07-06 11:19:58] [Rank 0] step:2701/10000 train_time:219485ms step_avg:81.26ms +[2025-07-06 11:19:58] [Rank 0] step:2701/10000 train_time:219485ms step_avg:81.26ms +[2025-07-06 11:20:00] [Rank 0] step:2721/10000 train_time:220958ms step_avg:81.20ms +[2025-07-06 11:20:00] [Rank 0] step:2721/10000 train_time:220958ms step_avg:81.20ms +[2025-07-06 11:20:01] [Rank 0] step:2741/10000 train_time:222452ms step_avg:81.16ms +[2025-07-06 11:20:01] [Rank 0] step:2741/10000 train_time:222452ms step_avg:81.16ms +[2025-07-06 11:20:03] [Rank 0] step:2761/10000 train_time:223947ms step_avg:81.11ms +[2025-07-06 11:20:03] [Rank 0] step:2761/10000 train_time:223947ms step_avg:81.11ms +[2025-07-06 11:20:04] [Rank 0] step:2781/10000 train_time:225442ms step_avg:81.06ms +[2025-07-06 11:20:04] [Rank 0] step:2781/10000 train_time:225442ms step_avg:81.06ms +[2025-07-06 11:20:07] [Rank 0] step:2801/10000 train_time:227602ms step_avg:81.26ms +[2025-07-06 11:20:07] [Rank 0] step:2801/10000 train_time:227602ms step_avg:81.26ms +[2025-07-06 11:20:08] [Rank 0] step:2821/10000 train_time:229097ms step_avg:81.21ms +[2025-07-06 11:20:08] [Rank 0] step:2821/10000 train_time:229097ms step_avg:81.21ms +[2025-07-06 11:20:10] [Rank 0] step:2841/10000 train_time:230592ms step_avg:81.17ms +[2025-07-06 11:20:10] [Rank 0] step:2841/10000 train_time:230592ms step_avg:81.17ms +[2025-07-06 11:20:11] [Rank 0] step:2861/10000 train_time:232088ms step_avg:81.12ms +[2025-07-06 11:20:11] [Rank 0] step:2861/10000 train_time:232088ms step_avg:81.12ms +[2025-07-06 11:20:13] [Rank 0] step:2881/10000 train_time:234268ms step_avg:81.31ms +[2025-07-06 11:20:13] [Rank 0] step:2881/10000 train_time:234268ms step_avg:81.31ms +[2025-07-06 11:20:15] [Rank 0] step:2901/10000 train_time:235744ms step_avg:81.26ms +[2025-07-06 11:20:15] [Rank 0] step:2901/10000 train_time:235744ms step_avg:81.26ms +[2025-07-06 11:20:16] [Rank 0] step:2921/10000 train_time:237241ms step_avg:81.22ms +[2025-07-06 11:20:16] [Rank 0] step:2921/10000 train_time:237241ms step_avg:81.22ms +[2025-07-06 11:20:18] [Rank 0] step:2941/10000 train_time:238841ms step_avg:81.21ms +[2025-07-06 11:20:18] [Rank 0] step:2941/10000 train_time:238841ms step_avg:81.21ms +[2025-07-06 11:20:19] [Rank 0] step:2961/10000 train_time:240341ms step_avg:81.17ms +[2025-07-06 11:20:19] [Rank 0] step:2961/10000 train_time:240341ms step_avg:81.17ms +[2025-07-06 11:20:21] [Rank 0] step:2981/10000 train_time:242481ms step_avg:81.34ms +[2025-07-06 11:20:21] [Rank 0] step:2981/10000 train_time:242481ms step_avg:81.34ms +[2025-07-06 11:20:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 11:20:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 11:20:24] [Rank 0] PRINT: step:3000/10000 train_loss:1.2604 val_loss:1.2128 train_time:244079ms step_avg:81.36ms +[2025-07-06 11:20:24] [Rank 0] PRINT: step:3000/10000 train_loss:1.2604 val_loss:1.2128 train_time:244079ms step_avg:81.36ms +[2025-07-06 11:20:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 11:20:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 11:20:24] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 11:20:24] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 11:20:24] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 11:20:24] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 11:25:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 11:25:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 11:25:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 11:25:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 11:25:43] [Rank 0] Total Loss: 4.6568 +[2025-07-06 11:25:43] [Rank 0] Total Loss: 4.6568 +[2025-07-06 11:25:43] [Rank 0] Total FTA: 0.4060 +[2025-07-06 11:25:43] [Rank 0] Total FTA: 0.4060 +[2025-07-06 11:25:43] [Rank 0] Group 0 Loss: 4.8534 +[2025-07-06 11:25:43] [Rank 0] Group 0 Loss: 4.8534 +[2025-07-06 11:25:43] [Rank 0] Group 1 Loss: 4.5286 +[2025-07-06 11:25:43] [Rank 0] Group 1 Loss: 4.5286 +[2025-07-06 11:25:43] [Rank 0] Group 2 Loss: 4.3828 +[2025-07-06 11:25:43] [Rank 0] Group 2 Loss: 4.3828 +[2025-07-06 11:25:43] [Rank 0] Group 3 Loss: 4.6424 +[2025-07-06 11:25:43] [Rank 0] Group 3 Loss: 4.6424 +[2025-07-06 11:25:43] [Rank 0] Group 4 Loss: 4.6642 +[2025-07-06 11:25:43] [Rank 0] Group 4 Loss: 4.6642 +[2025-07-06 11:25:43] [Rank 0] Group 5 Loss: 4.6038 +[2025-07-06 11:25:43] [Rank 0] Group 5 Loss: 4.6038 +[2025-07-06 11:25:43] [Rank 0] Group 6 Loss: 4.5773 +[2025-07-06 11:25:43] [Rank 0] Group 6 Loss: 4.5773 +[2025-07-06 11:25:43] [Rank 0] Group 7 Loss: 4.7030 +[2025-07-06 11:25:43] [Rank 0] Group 7 Loss: 4.7030 +[2025-07-06 11:25:43] [Rank 0] Group 8 Loss: 4.6808 +[2025-07-06 11:25:43] [Rank 0] Group 8 Loss: 4.6808 +[2025-07-06 11:25:43] [Rank 0] Group 9 Loss: 4.6528 +[2025-07-06 11:25:43] [Rank 0] Group 9 Loss: 4.6528 +[2025-07-06 11:25:43] [Rank 0] Group 10 Loss: 4.6699 +[2025-07-06 11:25:43] [Rank 0] Group 10 Loss: 4.6699 +[2025-07-06 11:25:43] [Rank 0] Group 11 Loss: 4.6804 +[2025-07-06 11:25:43] [Rank 0] Group 11 Loss: 4.6804 +[2025-07-06 11:25:43] [Rank 0] Group 0 FTA: 0.5345 +[2025-07-06 11:25:43] [Rank 0] Group 0 FTA: 0.5345 +[2025-07-06 11:25:43] [Rank 0] Group 1 FTA: 0.4740 +[2025-07-06 11:25:43] [Rank 0] Group 1 FTA: 0.4740 +[2025-07-06 11:25:43] [Rank 0] Group 2 FTA: 0.4141 +[2025-07-06 11:25:43] [Rank 0] Group 2 FTA: 0.4141 +[2025-07-06 11:25:43] [Rank 0] Group 3 FTA: 0.2656 +[2025-07-06 11:25:43] [Rank 0] Group 3 FTA: 0.2656 +[2025-07-06 11:25:43] [Rank 0] Group 4 FTA: 0.4219 +[2025-07-06 11:25:43] [Rank 0] Group 4 FTA: 0.4219 +[2025-07-06 11:25:43] [Rank 0] Group 5 FTA: 0.4557 +[2025-07-06 11:25:43] [Rank 0] Group 5 FTA: 0.4557 +[2025-07-06 11:25:43] [Rank 0] Group 6 FTA: 0.3203 +[2025-07-06 11:25:43] [Rank 0] Group 6 FTA: 0.3203 +[2025-07-06 11:25:43] [Rank 0] Group 7 FTA: 0.4193 +[2025-07-06 11:25:43] [Rank 0] Group 7 FTA: 0.4193 +[2025-07-06 11:25:43] [Rank 0] Group 8 FTA: 0.3750 +[2025-07-06 11:25:43] [Rank 0] Group 8 FTA: 0.3750 +[2025-07-06 11:25:43] [Rank 0] Group 9 FTA: 0.3711 +[2025-07-06 11:25:43] [Rank 0] Group 9 FTA: 0.3711 +[2025-07-06 11:25:43] [Rank 0] Group 10 FTA: 0.3516 +[2025-07-06 11:25:43] [Rank 0] Group 10 FTA: 0.3516 +[2025-07-06 11:25:43] [Rank 0] Group 11 FTA: 0.3838 +[2025-07-06 11:25:43] [Rank 0] Group 11 FTA: 0.3838 +[2025-07-06 11:25:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 11:25:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 11:25:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 11:25:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 11:25:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 11:25:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 11:25:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 11:25:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 11:25:45] [Rank 0] step:3001/10000 train_time:244100ms step_avg:81.34ms +[2025-07-06 11:25:45] [Rank 0] step:3001/10000 train_time:244100ms step_avg:81.34ms +[2025-07-06 11:25:46] [Rank 0] step:3021/10000 train_time:245616ms step_avg:81.30ms +[2025-07-06 11:25:46] [Rank 0] step:3021/10000 train_time:245616ms step_avg:81.30ms +[2025-07-06 11:25:48] [Rank 0] step:3041/10000 train_time:247105ms step_avg:81.26ms +[2025-07-06 11:25:48] [Rank 0] step:3041/10000 train_time:247105ms step_avg:81.26ms +[2025-07-06 11:25:50] [Rank 0] step:3061/10000 train_time:248649ms step_avg:81.23ms +[2025-07-06 11:25:50] [Rank 0] step:3061/10000 train_time:248649ms step_avg:81.23ms +[2025-07-06 11:25:52] [Rank 0] step:3081/10000 train_time:250737ms step_avg:81.38ms +[2025-07-06 11:25:52] [Rank 0] step:3081/10000 train_time:250737ms step_avg:81.38ms +[2025-07-06 11:25:53] [Rank 0] step:3101/10000 train_time:252228ms step_avg:81.34ms +[2025-07-06 11:25:53] [Rank 0] step:3101/10000 train_time:252228ms step_avg:81.34ms +[2025-07-06 11:25:55] [Rank 0] step:3121/10000 train_time:253721ms step_avg:81.29ms +[2025-07-06 11:25:55] [Rank 0] step:3121/10000 train_time:253721ms step_avg:81.29ms +[2025-07-06 11:25:56] [Rank 0] step:3141/10000 train_time:255213ms step_avg:81.25ms +[2025-07-06 11:25:56] [Rank 0] step:3141/10000 train_time:255213ms step_avg:81.25ms +[2025-07-06 11:25:58] [Rank 0] step:3161/10000 train_time:257349ms step_avg:81.41ms +[2025-07-06 11:25:58] [Rank 0] step:3161/10000 train_time:257349ms step_avg:81.41ms +[2025-07-06 11:26:00] [Rank 0] step:3181/10000 train_time:258839ms step_avg:81.37ms +[2025-07-06 11:26:00] [Rank 0] step:3181/10000 train_time:258839ms step_avg:81.37ms +[2025-07-06 11:26:01] [Rank 0] step:3201/10000 train_time:260332ms step_avg:81.33ms +[2025-07-06 11:26:01] [Rank 0] step:3201/10000 train_time:260332ms step_avg:81.33ms +[2025-07-06 11:26:03] [Rank 0] step:3221/10000 train_time:261826ms step_avg:81.29ms +[2025-07-06 11:26:03] [Rank 0] step:3221/10000 train_time:261826ms step_avg:81.29ms +[2025-07-06 11:26:05] [Rank 0] step:3241/10000 train_time:263559ms step_avg:81.32ms +[2025-07-06 11:26:05] [Rank 0] step:3241/10000 train_time:263559ms step_avg:81.32ms +[2025-07-06 11:26:06] [Rank 0] step:3261/10000 train_time:265300ms step_avg:81.36ms +[2025-07-06 11:26:06] [Rank 0] step:3261/10000 train_time:265300ms step_avg:81.36ms +[2025-07-06 11:26:08] [Rank 0] step:3281/10000 train_time:266794ms step_avg:81.31ms +[2025-07-06 11:26:08] [Rank 0] step:3281/10000 train_time:266794ms step_avg:81.31ms +[2025-07-06 11:26:09] [Rank 0] step:3301/10000 train_time:268386ms step_avg:81.30ms +[2025-07-06 11:26:09] [Rank 0] step:3301/10000 train_time:268386ms step_avg:81.30ms +[2025-07-06 11:26:11] [Rank 0] step:3321/10000 train_time:269882ms step_avg:81.27ms +[2025-07-06 11:26:11] [Rank 0] step:3321/10000 train_time:269882ms step_avg:81.27ms +[2025-07-06 11:26:13] [Rank 0] step:3341/10000 train_time:272134ms step_avg:81.45ms +[2025-07-06 11:26:13] [Rank 0] step:3341/10000 train_time:272134ms step_avg:81.45ms +[2025-07-06 11:26:14] [Rank 0] step:3361/10000 train_time:273631ms step_avg:81.41ms +[2025-07-06 11:26:14] [Rank 0] step:3361/10000 train_time:273631ms step_avg:81.41ms +[2025-07-06 11:26:16] [Rank 0] step:3381/10000 train_time:275126ms step_avg:81.37ms +[2025-07-06 11:26:16] [Rank 0] step:3381/10000 train_time:275126ms step_avg:81.37ms +[2025-07-06 11:26:17] [Rank 0] step:3401/10000 train_time:276623ms step_avg:81.34ms +[2025-07-06 11:26:17] [Rank 0] step:3401/10000 train_time:276623ms step_avg:81.34ms +[2025-07-06 11:26:20] [Rank 0] step:3421/10000 train_time:278168ms step_avg:81.31ms +[2025-07-06 11:26:20] [Rank 0] step:3421/10000 train_time:278168ms step_avg:81.31ms +[2025-07-06 11:26:21] [Rank 0] step:3441/10000 train_time:280278ms step_avg:81.45ms +[2025-07-06 11:26:21] [Rank 0] step:3441/10000 train_time:280278ms step_avg:81.45ms +[2025-07-06 11:26:23] [Rank 0] step:3461/10000 train_time:281876ms step_avg:81.44ms +[2025-07-06 11:26:23] [Rank 0] step:3461/10000 train_time:281876ms step_avg:81.44ms +[2025-07-06 11:26:24] [Rank 0] step:3481/10000 train_time:283373ms step_avg:81.41ms +[2025-07-06 11:26:24] [Rank 0] step:3481/10000 train_time:283373ms step_avg:81.41ms +[2025-07-06 11:26:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 11:26:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 11:26:27] [Rank 0] PRINT: step:3500/10000 train_loss:1.1867 val_loss:1.1525 train_time:284871ms step_avg:81.39ms +[2025-07-06 11:26:27] [Rank 0] PRINT: step:3500/10000 train_loss:1.1867 val_loss:1.1525 train_time:284871ms step_avg:81.39ms +[2025-07-06 11:26:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 11:26:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 11:26:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 11:26:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 11:26:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 11:26:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 11:31:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 11:31:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 11:31:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 11:31:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 11:31:47] [Rank 0] Total Loss: 4.9846 +[2025-07-06 11:31:47] [Rank 0] Total Loss: 4.9846 +[2025-07-06 11:31:47] [Rank 0] Total FTA: 0.4179 +[2025-07-06 11:31:47] [Rank 0] Total FTA: 0.4179 +[2025-07-06 11:31:47] [Rank 0] Group 0 Loss: 5.3059 +[2025-07-06 11:31:47] [Rank 0] Group 0 Loss: 5.3059 +[2025-07-06 11:31:47] [Rank 0] Group 1 Loss: 4.7510 +[2025-07-06 11:31:47] [Rank 0] Group 1 Loss: 4.7510 +[2025-07-06 11:31:47] [Rank 0] Group 2 Loss: 4.7056 +[2025-07-06 11:31:47] [Rank 0] Group 2 Loss: 4.7056 +[2025-07-06 11:31:47] [Rank 0] Group 3 Loss: 5.1502 +[2025-07-06 11:31:47] [Rank 0] Group 3 Loss: 5.1502 +[2025-07-06 11:31:47] [Rank 0] Group 4 Loss: 4.9629 +[2025-07-06 11:31:47] [Rank 0] Group 4 Loss: 4.9629 +[2025-07-06 11:31:47] [Rank 0] Group 5 Loss: 4.9013 +[2025-07-06 11:31:47] [Rank 0] Group 5 Loss: 4.9013 +[2025-07-06 11:31:48] [Rank 0] Group 6 Loss: 4.9342 +[2025-07-06 11:31:48] [Rank 0] Group 6 Loss: 4.9342 +[2025-07-06 11:31:48] [Rank 0] Group 7 Loss: 4.9951 +[2025-07-06 11:31:48] [Rank 0] Group 7 Loss: 4.9951 +[2025-07-06 11:31:48] [Rank 0] Group 8 Loss: 4.9521 +[2025-07-06 11:31:48] [Rank 0] Group 8 Loss: 4.9521 +[2025-07-06 11:31:48] [Rank 0] Group 9 Loss: 4.9298 +[2025-07-06 11:31:48] [Rank 0] Group 9 Loss: 4.9298 +[2025-07-06 11:31:48] [Rank 0] Group 10 Loss: 4.9533 +[2025-07-06 11:31:48] [Rank 0] Group 10 Loss: 4.9533 +[2025-07-06 11:31:48] [Rank 0] Group 11 Loss: 4.9693 +[2025-07-06 11:31:48] [Rank 0] Group 11 Loss: 4.9693 +[2025-07-06 11:31:48] [Rank 0] Group 0 FTA: 0.3394 +[2025-07-06 11:31:48] [Rank 0] Group 0 FTA: 0.3394 +[2025-07-06 11:31:48] [Rank 0] Group 1 FTA: 0.3620 +[2025-07-06 11:31:48] [Rank 0] Group 1 FTA: 0.3620 +[2025-07-06 11:31:48] [Rank 0] Group 2 FTA: 0.4401 +[2025-07-06 11:31:48] [Rank 0] Group 2 FTA: 0.4401 +[2025-07-06 11:31:48] [Rank 0] Group 3 FTA: 0.3828 +[2025-07-06 11:31:48] [Rank 0] Group 3 FTA: 0.3828 +[2025-07-06 11:31:48] [Rank 0] Group 4 FTA: 0.4167 +[2025-07-06 11:31:48] [Rank 0] Group 4 FTA: 0.4167 +[2025-07-06 11:31:48] [Rank 0] Group 5 FTA: 0.5208 +[2025-07-06 11:31:48] [Rank 0] Group 5 FTA: 0.5208 +[2025-07-06 11:31:48] [Rank 0] Group 6 FTA: 0.3828 +[2025-07-06 11:31:48] [Rank 0] Group 6 FTA: 0.3828 +[2025-07-06 11:31:48] [Rank 0] Group 7 FTA: 0.4245 +[2025-07-06 11:31:48] [Rank 0] Group 7 FTA: 0.4245 +[2025-07-06 11:31:48] [Rank 0] Group 8 FTA: 0.4505 +[2025-07-06 11:31:48] [Rank 0] Group 8 FTA: 0.4505 +[2025-07-06 11:31:48] [Rank 0] Group 9 FTA: 0.4531 +[2025-07-06 11:31:48] [Rank 0] Group 9 FTA: 0.4531 +[2025-07-06 11:31:49] [Rank 0] Group 10 FTA: 0.4277 +[2025-07-06 11:31:49] [Rank 0] Group 10 FTA: 0.4277 +[2025-07-06 11:31:49] [Rank 0] Group 11 FTA: 0.4492 +[2025-07-06 11:31:49] [Rank 0] Group 11 FTA: 0.4492 +[2025-07-06 11:31:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 11:31:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 11:31:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 11:31:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 11:31:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 11:31:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 11:31:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 11:31:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 11:31:51] [Rank 0] step:3501/10000 train_time:284893ms step_avg:81.37ms +[2025-07-06 11:31:51] [Rank 0] step:3501/10000 train_time:284893ms step_avg:81.37ms +[2025-07-06 11:31:53] [Rank 0] step:3521/10000 train_time:287053ms step_avg:81.53ms +[2025-07-06 11:31:53] [Rank 0] step:3521/10000 train_time:287053ms step_avg:81.53ms +[2025-07-06 11:31:54] [Rank 0] step:3541/10000 train_time:288540ms step_avg:81.49ms +[2025-07-06 11:31:54] [Rank 0] step:3541/10000 train_time:288540ms step_avg:81.49ms +[2025-07-06 11:31:56] [Rank 0] step:3561/10000 train_time:290030ms step_avg:81.45ms +[2025-07-06 11:31:56] [Rank 0] step:3561/10000 train_time:290030ms step_avg:81.45ms +[2025-07-06 11:31:57] [Rank 0] step:3581/10000 train_time:291521ms step_avg:81.41ms +[2025-07-06 11:31:57] [Rank 0] step:3581/10000 train_time:291521ms step_avg:81.41ms +[2025-07-06 11:31:59] [Rank 0] step:3601/10000 train_time:293168ms step_avg:81.41ms +[2025-07-06 11:31:59] [Rank 0] step:3601/10000 train_time:293168ms step_avg:81.41ms +[2025-07-06 11:32:01] [Rank 0] step:3621/10000 train_time:294850ms step_avg:81.43ms +[2025-07-06 11:32:01] [Rank 0] step:3621/10000 train_time:294850ms step_avg:81.43ms +[2025-07-06 11:32:02] [Rank 0] step:3641/10000 train_time:296447ms step_avg:81.42ms +[2025-07-06 11:32:02] [Rank 0] step:3641/10000 train_time:296447ms step_avg:81.42ms +[2025-07-06 11:32:04] [Rank 0] step:3661/10000 train_time:297939ms step_avg:81.38ms +[2025-07-06 11:32:04] [Rank 0] step:3661/10000 train_time:297939ms step_avg:81.38ms +[2025-07-06 11:32:05] [Rank 0] step:3681/10000 train_time:299432ms step_avg:81.35ms +[2025-07-06 11:32:05] [Rank 0] step:3681/10000 train_time:299432ms step_avg:81.35ms +[2025-07-06 11:32:07] [Rank 0] step:3701/10000 train_time:301682ms step_avg:81.51ms +[2025-07-06 11:32:07] [Rank 0] step:3701/10000 train_time:301682ms step_avg:81.51ms +[2025-07-06 11:32:09] [Rank 0] step:3721/10000 train_time:303275ms step_avg:81.50ms +[2025-07-06 11:32:09] [Rank 0] step:3721/10000 train_time:303275ms step_avg:81.50ms +[2025-07-06 11:32:10] [Rank 0] step:3741/10000 train_time:304769ms step_avg:81.47ms +[2025-07-06 11:32:10] [Rank 0] step:3741/10000 train_time:304769ms step_avg:81.47ms +[2025-07-06 11:32:12] [Rank 0] step:3761/10000 train_time:306365ms step_avg:81.46ms +[2025-07-06 11:32:12] [Rank 0] step:3761/10000 train_time:306365ms step_avg:81.46ms +[2025-07-06 11:32:14] [Rank 0] step:3781/10000 train_time:308223ms step_avg:81.52ms +[2025-07-06 11:32:14] [Rank 0] step:3781/10000 train_time:308223ms step_avg:81.52ms +[2025-07-06 11:32:16] [Rank 0] step:3801/10000 train_time:310213ms step_avg:81.61ms +[2025-07-06 11:32:16] [Rank 0] step:3801/10000 train_time:310213ms step_avg:81.61ms +[2025-07-06 11:32:17] [Rank 0] step:3821/10000 train_time:311707ms step_avg:81.58ms +[2025-07-06 11:32:17] [Rank 0] step:3821/10000 train_time:311707ms step_avg:81.58ms +[2025-07-06 11:32:19] [Rank 0] step:3841/10000 train_time:313203ms step_avg:81.54ms +[2025-07-06 11:32:19] [Rank 0] step:3841/10000 train_time:313203ms step_avg:81.54ms +[2025-07-06 11:32:20] [Rank 0] step:3861/10000 train_time:314696ms step_avg:81.51ms +[2025-07-06 11:32:20] [Rank 0] step:3861/10000 train_time:314696ms step_avg:81.51ms +[2025-07-06 11:32:23] [Rank 0] step:3881/10000 train_time:316851ms step_avg:81.64ms +[2025-07-06 11:32:23] [Rank 0] step:3881/10000 train_time:316851ms step_avg:81.64ms +[2025-07-06 11:32:24] [Rank 0] step:3901/10000 train_time:318576ms step_avg:81.67ms +[2025-07-06 11:32:24] [Rank 0] step:3901/10000 train_time:318576ms step_avg:81.67ms +[2025-07-06 11:32:26] [Rank 0] step:3921/10000 train_time:320070ms step_avg:81.63ms +[2025-07-06 11:32:26] [Rank 0] step:3921/10000 train_time:320070ms step_avg:81.63ms +[2025-07-06 11:32:27] [Rank 0] step:3941/10000 train_time:321667ms step_avg:81.62ms +[2025-07-06 11:32:27] [Rank 0] step:3941/10000 train_time:321667ms step_avg:81.62ms +[2025-07-06 11:32:30] [Rank 0] step:3961/10000 train_time:323932ms step_avg:81.78ms +[2025-07-06 11:32:30] [Rank 0] step:3961/10000 train_time:323932ms step_avg:81.78ms +[2025-07-06 11:32:31] [Rank 0] step:3981/10000 train_time:325409ms step_avg:81.74ms +[2025-07-06 11:32:31] [Rank 0] step:3981/10000 train_time:325409ms step_avg:81.74ms +[2025-07-06 11:32:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 11:32:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 11:32:33] [Rank 0] PRINT: step:4000/10000 train_loss:1.1348 val_loss:1.1087 train_time:326907ms step_avg:81.73ms +[2025-07-06 11:32:33] [Rank 0] PRINT: step:4000/10000 train_loss:1.1348 val_loss:1.1087 train_time:326907ms step_avg:81.73ms +[2025-07-06 11:32:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 11:32:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 11:32:34] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 11:32:34] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 11:32:34] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 11:32:34] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 11:37:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 11:37:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 11:37:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 11:37:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 11:37:56] [Rank 0] Total Loss: 4.9502 +[2025-07-06 11:37:56] [Rank 0] Total Loss: 4.9502 +[2025-07-06 11:37:56] [Rank 0] Total FTA: 0.4910 +[2025-07-06 11:37:56] [Rank 0] Total FTA: 0.4910 +[2025-07-06 11:37:56] [Rank 0] Group 0 Loss: 5.2040 +[2025-07-06 11:37:56] [Rank 0] Group 0 Loss: 5.2040 +[2025-07-06 11:37:56] [Rank 0] Group 1 Loss: 4.5710 +[2025-07-06 11:37:56] [Rank 0] Group 1 Loss: 4.5710 +[2025-07-06 11:37:56] [Rank 0] Group 2 Loss: 4.6761 +[2025-07-06 11:37:56] [Rank 0] Group 2 Loss: 4.6761 +[2025-07-06 11:37:56] [Rank 0] Group 3 Loss: 5.1846 +[2025-07-06 11:37:56] [Rank 0] Group 3 Loss: 5.1846 +[2025-07-06 11:37:56] [Rank 0] Group 4 Loss: 4.9141 +[2025-07-06 11:37:56] [Rank 0] Group 4 Loss: 4.9141 +[2025-07-06 11:37:56] [Rank 0] Group 5 Loss: 4.8565 +[2025-07-06 11:37:56] [Rank 0] Group 5 Loss: 4.8565 +[2025-07-06 11:37:56] [Rank 0] Group 6 Loss: 4.9094 +[2025-07-06 11:37:56] [Rank 0] Group 6 Loss: 4.9094 +[2025-07-06 11:37:56] [Rank 0] Group 7 Loss: 4.9569 +[2025-07-06 11:37:56] [Rank 0] Group 7 Loss: 4.9569 +[2025-07-06 11:37:56] [Rank 0] Group 8 Loss: 4.9458 +[2025-07-06 11:37:56] [Rank 0] Group 8 Loss: 4.9458 +[2025-07-06 11:37:56] [Rank 0] Group 9 Loss: 4.9326 +[2025-07-06 11:37:56] [Rank 0] Group 9 Loss: 4.9326 +[2025-07-06 11:37:56] [Rank 0] Group 10 Loss: 4.9874 +[2025-07-06 11:37:56] [Rank 0] Group 10 Loss: 4.9874 +[2025-07-06 11:37:56] [Rank 0] Group 11 Loss: 4.9656 +[2025-07-06 11:37:56] [Rank 0] Group 11 Loss: 4.9656 +[2025-07-06 11:37:56] [Rank 0] Group 0 FTA: 0.4681 +[2025-07-06 11:37:56] [Rank 0] Group 0 FTA: 0.4681 +[2025-07-06 11:37:56] [Rank 0] Group 1 FTA: 0.6927 +[2025-07-06 11:37:56] [Rank 0] Group 1 FTA: 0.6927 +[2025-07-06 11:37:56] [Rank 0] Group 2 FTA: 0.4870 +[2025-07-06 11:37:56] [Rank 0] Group 2 FTA: 0.4870 +[2025-07-06 11:37:56] [Rank 0] Group 3 FTA: 0.4062 +[2025-07-06 11:37:56] [Rank 0] Group 3 FTA: 0.4062 +[2025-07-06 11:37:56] [Rank 0] Group 4 FTA: 0.4401 +[2025-07-06 11:37:56] [Rank 0] Group 4 FTA: 0.4401 +[2025-07-06 11:37:56] [Rank 0] Group 5 FTA: 0.5573 +[2025-07-06 11:37:56] [Rank 0] Group 5 FTA: 0.5573 +[2025-07-06 11:37:56] [Rank 0] Group 6 FTA: 0.4427 +[2025-07-06 11:37:56] [Rank 0] Group 6 FTA: 0.4427 +[2025-07-06 11:37:56] [Rank 0] Group 7 FTA: 0.4505 +[2025-07-06 11:37:56] [Rank 0] Group 7 FTA: 0.4505 +[2025-07-06 11:37:56] [Rank 0] Group 8 FTA: 0.4844 +[2025-07-06 11:37:56] [Rank 0] Group 8 FTA: 0.4844 +[2025-07-06 11:37:56] [Rank 0] Group 9 FTA: 0.5117 +[2025-07-06 11:37:56] [Rank 0] Group 9 FTA: 0.5117 +[2025-07-06 11:37:56] [Rank 0] Group 10 FTA: 0.5098 +[2025-07-06 11:37:56] [Rank 0] Group 10 FTA: 0.5098 +[2025-07-06 11:37:56] [Rank 0] Group 11 FTA: 0.4814 +[2025-07-06 11:37:56] [Rank 0] Group 11 FTA: 0.4814 +[2025-07-06 11:37:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 11:37:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 11:37:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 11:37:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 11:37:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 11:37:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 11:37:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 11:37:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 11:37:58] [Rank 0] step:4001/10000 train_time:326927ms step_avg:81.71ms +[2025-07-06 11:37:58] [Rank 0] step:4001/10000 train_time:326927ms step_avg:81.71ms +[2025-07-06 11:37:59] [Rank 0] step:4021/10000 train_time:328443ms step_avg:81.68ms +[2025-07-06 11:37:59] [Rank 0] step:4021/10000 train_time:328443ms step_avg:81.68ms +[2025-07-06 11:38:01] [Rank 0] step:4041/10000 train_time:329931ms step_avg:81.65ms +[2025-07-06 11:38:01] [Rank 0] step:4041/10000 train_time:329931ms step_avg:81.65ms +[2025-07-06 11:38:03] [Rank 0] step:4061/10000 train_time:332073ms step_avg:81.77ms +[2025-07-06 11:38:03] [Rank 0] step:4061/10000 train_time:332073ms step_avg:81.77ms +[2025-07-06 11:38:04] [Rank 0] step:4081/10000 train_time:333566ms step_avg:81.74ms +[2025-07-06 11:38:04] [Rank 0] step:4081/10000 train_time:333566ms step_avg:81.74ms +[2025-07-06 11:38:06] [Rank 0] step:4101/10000 train_time:335055ms step_avg:81.70ms +[2025-07-06 11:38:06] [Rank 0] step:4101/10000 train_time:335055ms step_avg:81.70ms +[2025-07-06 11:38:07] [Rank 0] step:4121/10000 train_time:336547ms step_avg:81.67ms +[2025-07-06 11:38:07] [Rank 0] step:4121/10000 train_time:336547ms step_avg:81.67ms +[2025-07-06 11:38:09] [Rank 0] step:4141/10000 train_time:338194ms step_avg:81.67ms +[2025-07-06 11:38:09] [Rank 0] step:4141/10000 train_time:338194ms step_avg:81.67ms +[2025-07-06 11:38:11] [Rank 0] step:4161/10000 train_time:339767ms step_avg:81.66ms +[2025-07-06 11:38:11] [Rank 0] step:4161/10000 train_time:339767ms step_avg:81.66ms +[2025-07-06 11:38:12] [Rank 0] step:4181/10000 train_time:341259ms step_avg:81.62ms +[2025-07-06 11:38:12] [Rank 0] step:4181/10000 train_time:341259ms step_avg:81.62ms +[2025-07-06 11:38:14] [Rank 0] step:4201/10000 train_time:342755ms step_avg:81.59ms +[2025-07-06 11:38:14] [Rank 0] step:4201/10000 train_time:342755ms step_avg:81.59ms +[2025-07-06 11:38:15] [Rank 0] step:4221/10000 train_time:344247ms step_avg:81.56ms +[2025-07-06 11:38:15] [Rank 0] step:4221/10000 train_time:344247ms step_avg:81.56ms +[2025-07-06 11:38:17] [Rank 0] step:4241/10000 train_time:345980ms step_avg:81.58ms +[2025-07-06 11:38:17] [Rank 0] step:4241/10000 train_time:345980ms step_avg:81.58ms +[2025-07-06 11:38:18] [Rank 0] step:4261/10000 train_time:347472ms step_avg:81.55ms +[2025-07-06 11:38:18] [Rank 0] step:4261/10000 train_time:347472ms step_avg:81.55ms +[2025-07-06 11:38:20] [Rank 0] step:4281/10000 train_time:348967ms step_avg:81.52ms +[2025-07-06 11:38:20] [Rank 0] step:4281/10000 train_time:348967ms step_avg:81.52ms +[2025-07-06 11:38:21] [Rank 0] step:4301/10000 train_time:350462ms step_avg:81.48ms +[2025-07-06 11:38:21] [Rank 0] step:4301/10000 train_time:350462ms step_avg:81.48ms +[2025-07-06 11:38:23] [Rank 0] step:4321/10000 train_time:351958ms step_avg:81.45ms +[2025-07-06 11:38:23] [Rank 0] step:4321/10000 train_time:351958ms step_avg:81.45ms +[2025-07-06 11:38:25] [Rank 0] step:4341/10000 train_time:354105ms step_avg:81.57ms +[2025-07-06 11:38:25] [Rank 0] step:4341/10000 train_time:354105ms step_avg:81.57ms +[2025-07-06 11:38:27] [Rank 0] step:4361/10000 train_time:355701ms step_avg:81.56ms +[2025-07-06 11:38:27] [Rank 0] step:4361/10000 train_time:355701ms step_avg:81.56ms +[2025-07-06 11:38:28] [Rank 0] step:4381/10000 train_time:357199ms step_avg:81.53ms +[2025-07-06 11:38:28] [Rank 0] step:4381/10000 train_time:357199ms step_avg:81.53ms +[2025-07-06 11:38:30] [Rank 0] step:4401/10000 train_time:358695ms step_avg:81.50ms +[2025-07-06 11:38:30] [Rank 0] step:4401/10000 train_time:358695ms step_avg:81.50ms +[2025-07-06 11:38:32] [Rank 0] step:4421/10000 train_time:360941ms step_avg:81.64ms +[2025-07-06 11:38:32] [Rank 0] step:4421/10000 train_time:360941ms step_avg:81.64ms +[2025-07-06 11:38:33] [Rank 0] step:4441/10000 train_time:362437ms step_avg:81.61ms +[2025-07-06 11:38:33] [Rank 0] step:4441/10000 train_time:362437ms step_avg:81.61ms +[2025-07-06 11:38:35] [Rank 0] step:4461/10000 train_time:363934ms step_avg:81.58ms +[2025-07-06 11:38:35] [Rank 0] step:4461/10000 train_time:363934ms step_avg:81.58ms +[2025-07-06 11:38:36] [Rank 0] step:4481/10000 train_time:365433ms step_avg:81.55ms +[2025-07-06 11:38:36] [Rank 0] step:4481/10000 train_time:365433ms step_avg:81.55ms +[2025-07-06 11:38:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 11:38:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 11:38:39] [Rank 0] PRINT: step:4500/10000 train_loss:1.0974 val_loss:1.0754 train_time:366932ms step_avg:81.54ms +[2025-07-06 11:38:39] [Rank 0] PRINT: step:4500/10000 train_loss:1.0974 val_loss:1.0754 train_time:366932ms step_avg:81.54ms +[2025-07-06 11:38:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 11:38:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 11:38:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 11:38:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 11:38:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 11:38:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 11:43:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 11:43:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 11:43:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 11:43:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 11:43:59] [Rank 0] Total Loss: 5.1018 +[2025-07-06 11:43:59] [Rank 0] Total Loss: 5.1018 +[2025-07-06 11:43:59] [Rank 0] Total FTA: 0.4825 +[2025-07-06 11:43:59] [Rank 0] Total FTA: 0.4825 +[2025-07-06 11:43:59] [Rank 0] Group 0 Loss: 5.3178 +[2025-07-06 11:43:59] [Rank 0] Group 0 Loss: 5.3178 +[2025-07-06 11:43:59] [Rank 0] Group 1 Loss: 4.7868 +[2025-07-06 11:43:59] [Rank 0] Group 1 Loss: 4.7868 +[2025-07-06 11:43:59] [Rank 0] Group 2 Loss: 4.8196 +[2025-07-06 11:43:59] [Rank 0] Group 2 Loss: 4.8196 +[2025-07-06 11:43:59] [Rank 0] Group 3 Loss: 5.2339 +[2025-07-06 11:43:59] [Rank 0] Group 3 Loss: 5.2339 +[2025-07-06 11:43:59] [Rank 0] Group 4 Loss: 5.0415 +[2025-07-06 11:43:59] [Rank 0] Group 4 Loss: 5.0415 +[2025-07-06 11:43:59] [Rank 0] Group 5 Loss: 5.0759 +[2025-07-06 11:43:59] [Rank 0] Group 5 Loss: 5.0759 +[2025-07-06 11:43:59] [Rank 0] Group 6 Loss: 5.0115 +[2025-07-06 11:43:59] [Rank 0] Group 6 Loss: 5.0115 +[2025-07-06 11:43:59] [Rank 0] Group 7 Loss: 5.1436 +[2025-07-06 11:43:59] [Rank 0] Group 7 Loss: 5.1436 +[2025-07-06 11:43:59] [Rank 0] Group 8 Loss: 5.1409 +[2025-07-06 11:43:59] [Rank 0] Group 8 Loss: 5.1409 +[2025-07-06 11:43:59] [Rank 0] Group 9 Loss: 5.1281 +[2025-07-06 11:43:59] [Rank 0] Group 9 Loss: 5.1281 +[2025-07-06 11:43:59] [Rank 0] Group 10 Loss: 5.1316 +[2025-07-06 11:43:59] [Rank 0] Group 10 Loss: 5.1316 +[2025-07-06 11:43:59] [Rank 0] Group 11 Loss: 5.1282 +[2025-07-06 11:43:59] [Rank 0] Group 11 Loss: 5.1282 +[2025-07-06 11:43:59] [Rank 0] Group 0 FTA: 0.1717 +[2025-07-06 11:43:59] [Rank 0] Group 0 FTA: 0.1717 +[2025-07-06 11:43:59] [Rank 0] Group 1 FTA: 0.5339 +[2025-07-06 11:43:59] [Rank 0] Group 1 FTA: 0.5339 +[2025-07-06 11:43:59] [Rank 0] Group 2 FTA: 0.5859 +[2025-07-06 11:43:59] [Rank 0] Group 2 FTA: 0.5859 +[2025-07-06 11:43:59] [Rank 0] Group 3 FTA: 0.5339 +[2025-07-06 11:43:59] [Rank 0] Group 3 FTA: 0.5339 +[2025-07-06 11:43:59] [Rank 0] Group 4 FTA: 0.4661 +[2025-07-06 11:43:59] [Rank 0] Group 4 FTA: 0.4661 +[2025-07-06 11:43:59] [Rank 0] Group 5 FTA: 0.5990 +[2025-07-06 11:43:59] [Rank 0] Group 5 FTA: 0.5990 +[2025-07-06 11:43:59] [Rank 0] Group 6 FTA: 0.4766 +[2025-07-06 11:43:59] [Rank 0] Group 6 FTA: 0.4766 +[2025-07-06 11:43:59] [Rank 0] Group 7 FTA: 0.4870 +[2025-07-06 11:43:59] [Rank 0] Group 7 FTA: 0.4870 +[2025-07-06 11:43:59] [Rank 0] Group 8 FTA: 0.5312 +[2025-07-06 11:43:59] [Rank 0] Group 8 FTA: 0.5312 +[2025-07-06 11:43:59] [Rank 0] Group 9 FTA: 0.5625 +[2025-07-06 11:43:59] [Rank 0] Group 9 FTA: 0.5625 +[2025-07-06 11:43:59] [Rank 0] Group 10 FTA: 0.5352 +[2025-07-06 11:43:59] [Rank 0] Group 10 FTA: 0.5352 +[2025-07-06 11:43:59] [Rank 0] Group 11 FTA: 0.5371 +[2025-07-06 11:43:59] [Rank 0] Group 11 FTA: 0.5371 +[2025-07-06 11:44:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 11:44:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 11:44:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 11:44:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 11:44:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 11:44:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 11:44:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 11:44:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 11:44:01] [Rank 0] step:4501/10000 train_time:367069ms step_avg:81.55ms +[2025-07-06 11:44:01] [Rank 0] step:4501/10000 train_time:367069ms step_avg:81.55ms +[2025-07-06 11:44:03] [Rank 0] step:4521/10000 train_time:368735ms step_avg:81.56ms +[2025-07-06 11:44:03] [Rank 0] step:4521/10000 train_time:368735ms step_avg:81.56ms +[2025-07-06 11:44:04] [Rank 0] step:4541/10000 train_time:370226ms step_avg:81.53ms +[2025-07-06 11:44:04] [Rank 0] step:4541/10000 train_time:370226ms step_avg:81.53ms +[2025-07-06 11:44:06] [Rank 0] step:4561/10000 train_time:371717ms step_avg:81.50ms +[2025-07-06 11:44:06] [Rank 0] step:4561/10000 train_time:371717ms step_avg:81.50ms +[2025-07-06 11:44:07] [Rank 0] step:4581/10000 train_time:373208ms step_avg:81.47ms +[2025-07-06 11:44:07] [Rank 0] step:4581/10000 train_time:373208ms step_avg:81.47ms +[2025-07-06 11:44:09] [Rank 0] step:4601/10000 train_time:375359ms step_avg:81.58ms +[2025-07-06 11:44:09] [Rank 0] step:4601/10000 train_time:375359ms step_avg:81.58ms +[2025-07-06 11:44:11] [Rank 0] step:4621/10000 train_time:376850ms step_avg:81.55ms +[2025-07-06 11:44:11] [Rank 0] step:4621/10000 train_time:376850ms step_avg:81.55ms +[2025-07-06 11:44:12] [Rank 0] step:4641/10000 train_time:378445ms step_avg:81.54ms +[2025-07-06 11:44:12] [Rank 0] step:4641/10000 train_time:378445ms step_avg:81.54ms +[2025-07-06 11:44:14] [Rank 0] step:4661/10000 train_time:380037ms step_avg:81.54ms +[2025-07-06 11:44:14] [Rank 0] step:4661/10000 train_time:380037ms step_avg:81.54ms +[2025-07-06 11:44:16] [Rank 0] step:4681/10000 train_time:382219ms step_avg:81.65ms +[2025-07-06 11:44:16] [Rank 0] step:4681/10000 train_time:382219ms step_avg:81.65ms +[2025-07-06 11:44:18] [Rank 0] step:4701/10000 train_time:383693ms step_avg:81.62ms +[2025-07-06 11:44:18] [Rank 0] step:4701/10000 train_time:383693ms step_avg:81.62ms +[2025-07-06 11:44:19] [Rank 0] step:4721/10000 train_time:385188ms step_avg:81.59ms +[2025-07-06 11:44:19] [Rank 0] step:4721/10000 train_time:385188ms step_avg:81.59ms +[2025-07-06 11:44:21] [Rank 0] step:4741/10000 train_time:386682ms step_avg:81.56ms +[2025-07-06 11:44:21] [Rank 0] step:4741/10000 train_time:386682ms step_avg:81.56ms +[2025-07-06 11:44:22] [Rank 0] step:4761/10000 train_time:388178ms step_avg:81.53ms +[2025-07-06 11:44:22] [Rank 0] step:4761/10000 train_time:388178ms step_avg:81.53ms +[2025-07-06 11:44:24] [Rank 0] step:4781/10000 train_time:390334ms step_avg:81.64ms +[2025-07-06 11:44:24] [Rank 0] step:4781/10000 train_time:390334ms step_avg:81.64ms +[2025-07-06 11:44:26] [Rank 0] step:4801/10000 train_time:391830ms step_avg:81.61ms +[2025-07-06 11:44:26] [Rank 0] step:4801/10000 train_time:391830ms step_avg:81.61ms +[2025-07-06 11:44:27] [Rank 0] step:4821/10000 train_time:393325ms step_avg:81.59ms +[2025-07-06 11:44:27] [Rank 0] step:4821/10000 train_time:393325ms step_avg:81.59ms +[2025-07-06 11:44:29] [Rank 0] step:4841/10000 train_time:394822ms step_avg:81.56ms +[2025-07-06 11:44:29] [Rank 0] step:4841/10000 train_time:394822ms step_avg:81.56ms +[2025-07-06 11:44:31] [Rank 0] step:4861/10000 train_time:397098ms step_avg:81.69ms +[2025-07-06 11:44:31] [Rank 0] step:4861/10000 train_time:397098ms step_avg:81.69ms +[2025-07-06 11:44:33] [Rank 0] step:4881/10000 train_time:398575ms step_avg:81.66ms +[2025-07-06 11:44:33] [Rank 0] step:4881/10000 train_time:398575ms step_avg:81.66ms +[2025-07-06 11:44:34] [Rank 0] step:4901/10000 train_time:400175ms step_avg:81.65ms +[2025-07-06 11:44:34] [Rank 0] step:4901/10000 train_time:400175ms step_avg:81.65ms +[2025-07-06 11:44:36] [Rank 0] step:4921/10000 train_time:401672ms step_avg:81.62ms +[2025-07-06 11:44:36] [Rank 0] step:4921/10000 train_time:401672ms step_avg:81.62ms +[2025-07-06 11:44:37] [Rank 0] step:4941/10000 train_time:403168ms step_avg:81.60ms +[2025-07-06 11:44:37] [Rank 0] step:4941/10000 train_time:403168ms step_avg:81.60ms +[2025-07-06 11:44:39] [Rank 0] step:4961/10000 train_time:405408ms step_avg:81.72ms +[2025-07-06 11:44:39] [Rank 0] step:4961/10000 train_time:405408ms step_avg:81.72ms +[2025-07-06 11:44:41] [Rank 0] step:4981/10000 train_time:407005ms step_avg:81.71ms +[2025-07-06 11:44:41] [Rank 0] step:4981/10000 train_time:407005ms step_avg:81.71ms +[2025-07-06 11:44:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 11:44:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 11:44:43] [Rank 0] PRINT: step:5000/10000 train_loss:1.0649 val_loss:1.0469 train_time:408502ms step_avg:81.70ms +[2025-07-06 11:44:43] [Rank 0] PRINT: step:5000/10000 train_loss:1.0649 val_loss:1.0469 train_time:408502ms step_avg:81.70ms +[2025-07-06 11:44:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 11:44:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 11:44:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 11:44:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 11:44:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 11:44:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 11:50:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 11:50:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 11:50:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 11:50:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 11:50:05] [Rank 0] Total Loss: 5.1932 +[2025-07-06 11:50:05] [Rank 0] Total Loss: 5.1932 +[2025-07-06 11:50:05] [Rank 0] Total FTA: 0.5454 +[2025-07-06 11:50:05] [Rank 0] Total FTA: 0.5454 +[2025-07-06 11:50:05] [Rank 0] Group 0 Loss: 5.5458 +[2025-07-06 11:50:05] [Rank 0] Group 0 Loss: 5.5458 +[2025-07-06 11:50:05] [Rank 0] Group 1 Loss: 4.7851 +[2025-07-06 11:50:05] [Rank 0] Group 1 Loss: 4.7851 +[2025-07-06 11:50:05] [Rank 0] Group 2 Loss: 4.8495 +[2025-07-06 11:50:05] [Rank 0] Group 2 Loss: 4.8495 +[2025-07-06 11:50:05] [Rank 0] Group 3 Loss: 5.3323 +[2025-07-06 11:50:05] [Rank 0] Group 3 Loss: 5.3323 +[2025-07-06 11:50:05] [Rank 0] Group 4 Loss: 5.2340 +[2025-07-06 11:50:05] [Rank 0] Group 4 Loss: 5.2340 +[2025-07-06 11:50:05] [Rank 0] Group 5 Loss: 5.1717 +[2025-07-06 11:50:05] [Rank 0] Group 5 Loss: 5.1717 +[2025-07-06 11:50:05] [Rank 0] Group 6 Loss: 5.1212 +[2025-07-06 11:50:05] [Rank 0] Group 6 Loss: 5.1212 +[2025-07-06 11:50:05] [Rank 0] Group 7 Loss: 5.1621 +[2025-07-06 11:50:05] [Rank 0] Group 7 Loss: 5.1621 +[2025-07-06 11:50:05] [Rank 0] Group 8 Loss: 5.1673 +[2025-07-06 11:50:05] [Rank 0] Group 8 Loss: 5.1673 +[2025-07-06 11:50:05] [Rank 0] Group 9 Loss: 5.1452 +[2025-07-06 11:50:05] [Rank 0] Group 9 Loss: 5.1452 +[2025-07-06 11:50:06] [Rank 0] Group 10 Loss: 5.2035 +[2025-07-06 11:50:06] [Rank 0] Group 10 Loss: 5.2035 +[2025-07-06 11:50:06] [Rank 0] Group 11 Loss: 5.2063 +[2025-07-06 11:50:06] [Rank 0] Group 11 Loss: 5.2063 +[2025-07-06 11:50:06] [Rank 0] Group 0 FTA: 0.3524 +[2025-07-06 11:50:06] [Rank 0] Group 0 FTA: 0.3524 +[2025-07-06 11:50:06] [Rank 0] Group 1 FTA: 0.4714 +[2025-07-06 11:50:06] [Rank 0] Group 1 FTA: 0.4714 +[2025-07-06 11:50:06] [Rank 0] Group 2 FTA: 0.7214 +[2025-07-06 11:50:06] [Rank 0] Group 2 FTA: 0.7214 +[2025-07-06 11:50:06] [Rank 0] Group 3 FTA: 0.5339 +[2025-07-06 11:50:06] [Rank 0] Group 3 FTA: 0.5339 +[2025-07-06 11:50:06] [Rank 0] Group 4 FTA: 0.5130 +[2025-07-06 11:50:06] [Rank 0] Group 4 FTA: 0.5130 +[2025-07-06 11:50:06] [Rank 0] Group 5 FTA: 0.6120 +[2025-07-06 11:50:06] [Rank 0] Group 5 FTA: 0.6120 +[2025-07-06 11:50:06] [Rank 0] Group 6 FTA: 0.5339 +[2025-07-06 11:50:06] [Rank 0] Group 6 FTA: 0.5339 +[2025-07-06 11:50:06] [Rank 0] Group 7 FTA: 0.5703 +[2025-07-06 11:50:06] [Rank 0] Group 7 FTA: 0.5703 +[2025-07-06 11:50:06] [Rank 0] Group 8 FTA: 0.5599 +[2025-07-06 11:50:06] [Rank 0] Group 8 FTA: 0.5599 +[2025-07-06 11:50:06] [Rank 0] Group 9 FTA: 0.5625 +[2025-07-06 11:50:06] [Rank 0] Group 9 FTA: 0.5625 +[2025-07-06 11:50:06] [Rank 0] Group 10 FTA: 0.5801 +[2025-07-06 11:50:06] [Rank 0] Group 10 FTA: 0.5801 +[2025-07-06 11:50:06] [Rank 0] Group 11 FTA: 0.6113 +[2025-07-06 11:50:06] [Rank 0] Group 11 FTA: 0.6113 +[2025-07-06 11:50:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 11:50:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 11:50:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 11:50:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 11:50:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 11:50:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 11:50:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 11:50:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 11:50:07] [Rank 0] step:5001/10000 train_time:408523ms step_avg:81.69ms +[2025-07-06 11:50:07] [Rank 0] step:5001/10000 train_time:408523ms step_avg:81.69ms +[2025-07-06 11:50:09] [Rank 0] step:5021/10000 train_time:410016ms step_avg:81.66ms +[2025-07-06 11:50:09] [Rank 0] step:5021/10000 train_time:410016ms step_avg:81.66ms +[2025-07-06 11:50:11] [Rank 0] step:5041/10000 train_time:412182ms step_avg:81.77ms +[2025-07-06 11:50:11] [Rank 0] step:5041/10000 train_time:412182ms step_avg:81.77ms +[2025-07-06 11:50:12] [Rank 0] step:5061/10000 train_time:413652ms step_avg:81.73ms +[2025-07-06 11:50:12] [Rank 0] step:5061/10000 train_time:413652ms step_avg:81.73ms +[2025-07-06 11:50:14] [Rank 0] step:5081/10000 train_time:415142ms step_avg:81.70ms +[2025-07-06 11:50:14] [Rank 0] step:5081/10000 train_time:415142ms step_avg:81.70ms +[2025-07-06 11:50:15] [Rank 0] step:5101/10000 train_time:416633ms step_avg:81.68ms +[2025-07-06 11:50:15] [Rank 0] step:5101/10000 train_time:416633ms step_avg:81.68ms +[2025-07-06 11:50:17] [Rank 0] step:5121/10000 train_time:418127ms step_avg:81.65ms +[2025-07-06 11:50:17] [Rank 0] step:5121/10000 train_time:418127ms step_avg:81.65ms +[2025-07-06 11:50:19] [Rank 0] step:5141/10000 train_time:420387ms step_avg:81.77ms +[2025-07-06 11:50:19] [Rank 0] step:5141/10000 train_time:420387ms step_avg:81.77ms +[2025-07-06 11:50:21] [Rank 0] step:5161/10000 train_time:421986ms step_avg:81.76ms +[2025-07-06 11:50:21] [Rank 0] step:5161/10000 train_time:421986ms step_avg:81.76ms +[2025-07-06 11:50:22] [Rank 0] step:5181/10000 train_time:423478ms step_avg:81.74ms +[2025-07-06 11:50:22] [Rank 0] step:5181/10000 train_time:423478ms step_avg:81.74ms +[2025-07-06 11:50:24] [Rank 0] step:5201/10000 train_time:425071ms step_avg:81.73ms +[2025-07-06 11:50:24] [Rank 0] step:5201/10000 train_time:425071ms step_avg:81.73ms +[2025-07-06 11:50:26] [Rank 0] step:5221/10000 train_time:426717ms step_avg:81.73ms +[2025-07-06 11:50:26] [Rank 0] step:5221/10000 train_time:426717ms step_avg:81.73ms +[2025-07-06 11:50:27] [Rank 0] step:5241/10000 train_time:428825ms step_avg:81.82ms +[2025-07-06 11:50:27] [Rank 0] step:5241/10000 train_time:428825ms step_avg:81.82ms +[2025-07-06 11:50:29] [Rank 0] step:5261/10000 train_time:430319ms step_avg:81.79ms +[2025-07-06 11:50:29] [Rank 0] step:5261/10000 train_time:430319ms step_avg:81.79ms +[2025-07-06 11:50:30] [Rank 0] step:5281/10000 train_time:431814ms step_avg:81.77ms +[2025-07-06 11:50:30] [Rank 0] step:5281/10000 train_time:431814ms step_avg:81.77ms +[2025-07-06 11:50:32] [Rank 0] step:5301/10000 train_time:433412ms step_avg:81.76ms +[2025-07-06 11:50:32] [Rank 0] step:5301/10000 train_time:433412ms step_avg:81.76ms +[2025-07-06 11:50:34] [Rank 0] step:5321/10000 train_time:435563ms step_avg:81.86ms +[2025-07-06 11:50:34] [Rank 0] step:5321/10000 train_time:435563ms step_avg:81.86ms +[2025-07-06 11:50:36] [Rank 0] step:5341/10000 train_time:437058ms step_avg:81.83ms +[2025-07-06 11:50:36] [Rank 0] step:5341/10000 train_time:437058ms step_avg:81.83ms +[2025-07-06 11:50:37] [Rank 0] step:5361/10000 train_time:438657ms step_avg:81.82ms +[2025-07-06 11:50:37] [Rank 0] step:5361/10000 train_time:438657ms step_avg:81.82ms +[2025-07-06 11:50:39] [Rank 0] step:5381/10000 train_time:440156ms step_avg:81.80ms +[2025-07-06 11:50:39] [Rank 0] step:5381/10000 train_time:440156ms step_avg:81.80ms +[2025-07-06 11:50:41] [Rank 0] step:5401/10000 train_time:441656ms step_avg:81.77ms +[2025-07-06 11:50:41] [Rank 0] step:5401/10000 train_time:441656ms step_avg:81.77ms +[2025-07-06 11:50:42] [Rank 0] step:5421/10000 train_time:443806ms step_avg:81.87ms +[2025-07-06 11:50:42] [Rank 0] step:5421/10000 train_time:443806ms step_avg:81.87ms +[2025-07-06 11:50:44] [Rank 0] step:5441/10000 train_time:445305ms step_avg:81.84ms +[2025-07-06 11:50:44] [Rank 0] step:5441/10000 train_time:445305ms step_avg:81.84ms +[2025-07-06 11:50:45] [Rank 0] step:5461/10000 train_time:446804ms step_avg:81.82ms +[2025-07-06 11:50:45] [Rank 0] step:5461/10000 train_time:446804ms step_avg:81.82ms +[2025-07-06 11:50:47] [Rank 0] step:5481/10000 train_time:448304ms step_avg:81.79ms +[2025-07-06 11:50:47] [Rank 0] step:5481/10000 train_time:448304ms step_avg:81.79ms +[2025-07-06 11:50:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 11:50:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 11:50:50] [Rank 0] PRINT: step:5500/10000 train_loss:1.0378 val_loss:1.0229 train_time:450444ms step_avg:81.90ms +[2025-07-06 11:50:50] [Rank 0] PRINT: step:5500/10000 train_loss:1.0378 val_loss:1.0229 train_time:450444ms step_avg:81.90ms +[2025-07-06 11:50:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 11:50:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 11:50:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 11:50:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 11:50:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 11:50:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 11:56:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 11:56:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 11:56:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 11:56:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 11:56:11] [Rank 0] Total Loss: 5.2335 +[2025-07-06 11:56:11] [Rank 0] Total Loss: 5.2335 +[2025-07-06 11:56:11] [Rank 0] Total FTA: 0.6274 +[2025-07-06 11:56:11] [Rank 0] Total FTA: 0.6274 +[2025-07-06 11:56:11] [Rank 0] Group 0 Loss: 5.5903 +[2025-07-06 11:56:11] [Rank 0] Group 0 Loss: 5.5903 +[2025-07-06 11:56:11] [Rank 0] Group 1 Loss: 4.9446 +[2025-07-06 11:56:11] [Rank 0] Group 1 Loss: 4.9446 +[2025-07-06 11:56:11] [Rank 0] Group 2 Loss: 4.8630 +[2025-07-06 11:56:11] [Rank 0] Group 2 Loss: 4.8630 +[2025-07-06 11:56:11] [Rank 0] Group 3 Loss: 5.3477 +[2025-07-06 11:56:11] [Rank 0] Group 3 Loss: 5.3477 +[2025-07-06 11:56:11] [Rank 0] Group 4 Loss: 5.2347 +[2025-07-06 11:56:11] [Rank 0] Group 4 Loss: 5.2347 +[2025-07-06 11:56:11] [Rank 0] Group 5 Loss: 5.1689 +[2025-07-06 11:56:11] [Rank 0] Group 5 Loss: 5.1689 +[2025-07-06 11:56:11] [Rank 0] Group 6 Loss: 5.1219 +[2025-07-06 11:56:11] [Rank 0] Group 6 Loss: 5.1219 +[2025-07-06 11:56:11] [Rank 0] Group 7 Loss: 5.3027 +[2025-07-06 11:56:11] [Rank 0] Group 7 Loss: 5.3027 +[2025-07-06 11:56:11] [Rank 0] Group 8 Loss: 5.2387 +[2025-07-06 11:56:11] [Rank 0] Group 8 Loss: 5.2387 +[2025-07-06 11:56:11] [Rank 0] Group 9 Loss: 5.1916 +[2025-07-06 11:56:11] [Rank 0] Group 9 Loss: 5.1916 +[2025-07-06 11:56:11] [Rank 0] Group 10 Loss: 5.2054 +[2025-07-06 11:56:11] [Rank 0] Group 10 Loss: 5.2054 +[2025-07-06 11:56:11] [Rank 0] Group 11 Loss: 5.2322 +[2025-07-06 11:56:11] [Rank 0] Group 11 Loss: 5.2322 +[2025-07-06 11:56:11] [Rank 0] Group 0 FTA: 0.6671 +[2025-07-06 11:56:11] [Rank 0] Group 0 FTA: 0.6671 +[2025-07-06 11:56:11] [Rank 0] Group 1 FTA: 0.5443 +[2025-07-06 11:56:11] [Rank 0] Group 1 FTA: 0.5443 +[2025-07-06 11:56:11] [Rank 0] Group 2 FTA: 0.6250 +[2025-07-06 11:56:11] [Rank 0] Group 2 FTA: 0.6250 +[2025-07-06 11:56:11] [Rank 0] Group 3 FTA: 0.4766 +[2025-07-06 11:56:11] [Rank 0] Group 3 FTA: 0.4766 +[2025-07-06 11:56:11] [Rank 0] Group 4 FTA: 0.5651 +[2025-07-06 11:56:11] [Rank 0] Group 4 FTA: 0.5651 +[2025-07-06 11:56:11] [Rank 0] Group 5 FTA: 0.6250 +[2025-07-06 11:56:11] [Rank 0] Group 5 FTA: 0.6250 +[2025-07-06 11:56:11] [Rank 0] Group 6 FTA: 0.6068 +[2025-07-06 11:56:11] [Rank 0] Group 6 FTA: 0.6068 +[2025-07-06 11:56:11] [Rank 0] Group 7 FTA: 0.7005 +[2025-07-06 11:56:11] [Rank 0] Group 7 FTA: 0.7005 +[2025-07-06 11:56:11] [Rank 0] Group 8 FTA: 0.6302 +[2025-07-06 11:56:11] [Rank 0] Group 8 FTA: 0.6302 +[2025-07-06 11:56:11] [Rank 0] Group 9 FTA: 0.6445 +[2025-07-06 11:56:11] [Rank 0] Group 9 FTA: 0.6445 +[2025-07-06 11:56:11] [Rank 0] Group 10 FTA: 0.6836 +[2025-07-06 11:56:11] [Rank 0] Group 10 FTA: 0.6836 +[2025-07-06 11:56:11] [Rank 0] Group 11 FTA: 0.6572 +[2025-07-06 11:56:11] [Rank 0] Group 11 FTA: 0.6572 +[2025-07-06 11:56:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 11:56:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 11:56:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 11:56:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 11:56:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 11:56:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 11:56:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 11:56:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 11:56:13] [Rank 0] step:5501/10000 train_time:450467ms step_avg:81.89ms +[2025-07-06 11:56:13] [Rank 0] step:5501/10000 train_time:450467ms step_avg:81.89ms +[2025-07-06 11:56:14] [Rank 0] step:5521/10000 train_time:452071ms step_avg:81.88ms +[2025-07-06 11:56:14] [Rank 0] step:5521/10000 train_time:452071ms step_avg:81.88ms +[2025-07-06 11:56:16] [Rank 0] step:5541/10000 train_time:453560ms step_avg:81.86ms +[2025-07-06 11:56:16] [Rank 0] step:5541/10000 train_time:453560ms step_avg:81.86ms +[2025-07-06 11:56:17] [Rank 0] step:5561/10000 train_time:455051ms step_avg:81.83ms +[2025-07-06 11:56:17] [Rank 0] step:5561/10000 train_time:455051ms step_avg:81.83ms +[2025-07-06 11:56:20] [Rank 0] step:5581/10000 train_time:456900ms step_avg:81.87ms +[2025-07-06 11:56:20] [Rank 0] step:5581/10000 train_time:456900ms step_avg:81.87ms +[2025-07-06 11:56:21] [Rank 0] step:5601/10000 train_time:458927ms step_avg:81.94ms +[2025-07-06 11:56:21] [Rank 0] step:5601/10000 train_time:458927ms step_avg:81.94ms +[2025-07-06 11:56:23] [Rank 0] step:5621/10000 train_time:460419ms step_avg:81.91ms +[2025-07-06 11:56:23] [Rank 0] step:5621/10000 train_time:460419ms step_avg:81.91ms +[2025-07-06 11:56:24] [Rank 0] step:5641/10000 train_time:461913ms step_avg:81.89ms +[2025-07-06 11:56:24] [Rank 0] step:5641/10000 train_time:461913ms step_avg:81.89ms +[2025-07-06 11:56:26] [Rank 0] step:5661/10000 train_time:463406ms step_avg:81.86ms +[2025-07-06 11:56:26] [Rank 0] step:5661/10000 train_time:463406ms step_avg:81.86ms +[2025-07-06 11:56:28] [Rank 0] step:5681/10000 train_time:465540ms step_avg:81.95ms +[2025-07-06 11:56:28] [Rank 0] step:5681/10000 train_time:465540ms step_avg:81.95ms +[2025-07-06 11:56:29] [Rank 0] step:5701/10000 train_time:467033ms step_avg:81.92ms +[2025-07-06 11:56:29] [Rank 0] step:5701/10000 train_time:467033ms step_avg:81.92ms +[2025-07-06 11:56:31] [Rank 0] step:5721/10000 train_time:468529ms step_avg:81.90ms +[2025-07-06 11:56:31] [Rank 0] step:5721/10000 train_time:468529ms step_avg:81.90ms +[2025-07-06 11:56:32] [Rank 0] step:5741/10000 train_time:470027ms step_avg:81.87ms +[2025-07-06 11:56:32] [Rank 0] step:5741/10000 train_time:470027ms step_avg:81.87ms +[2025-07-06 11:56:35] [Rank 0] step:5761/10000 train_time:471523ms step_avg:81.85ms +[2025-07-06 11:56:35] [Rank 0] step:5761/10000 train_time:471523ms step_avg:81.85ms +[2025-07-06 11:56:36] [Rank 0] step:5781/10000 train_time:473787ms step_avg:81.96ms +[2025-07-06 11:56:36] [Rank 0] step:5781/10000 train_time:473787ms step_avg:81.96ms +[2025-07-06 11:56:38] [Rank 0] step:5801/10000 train_time:475388ms step_avg:81.95ms +[2025-07-06 11:56:38] [Rank 0] step:5801/10000 train_time:475388ms step_avg:81.95ms +[2025-07-06 11:56:39] [Rank 0] step:5821/10000 train_time:476949ms step_avg:81.94ms +[2025-07-06 11:56:39] [Rank 0] step:5821/10000 train_time:476949ms step_avg:81.94ms +[2025-07-06 11:56:41] [Rank 0] step:5841/10000 train_time:478446ms step_avg:81.91ms +[2025-07-06 11:56:41] [Rank 0] step:5841/10000 train_time:478446ms step_avg:81.91ms +[2025-07-06 11:56:43] [Rank 0] step:5861/10000 train_time:480583ms step_avg:82.00ms +[2025-07-06 11:56:43] [Rank 0] step:5861/10000 train_time:480583ms step_avg:82.00ms +[2025-07-06 11:56:44] [Rank 0] step:5881/10000 train_time:482080ms step_avg:81.97ms +[2025-07-06 11:56:44] [Rank 0] step:5881/10000 train_time:482080ms step_avg:81.97ms +[2025-07-06 11:56:46] [Rank 0] step:5901/10000 train_time:483576ms step_avg:81.95ms +[2025-07-06 11:56:46] [Rank 0] step:5901/10000 train_time:483576ms step_avg:81.95ms +[2025-07-06 11:56:47] [Rank 0] step:5921/10000 train_time:485073ms step_avg:81.92ms +[2025-07-06 11:56:47] [Rank 0] step:5921/10000 train_time:485073ms step_avg:81.92ms +[2025-07-06 11:56:50] [Rank 0] step:5941/10000 train_time:486825ms step_avg:81.94ms +[2025-07-06 11:56:50] [Rank 0] step:5941/10000 train_time:486825ms step_avg:81.94ms +[2025-07-06 11:56:51] [Rank 0] step:5961/10000 train_time:488830ms step_avg:82.00ms +[2025-07-06 11:56:51] [Rank 0] step:5961/10000 train_time:488830ms step_avg:82.00ms +[2025-07-06 11:56:53] [Rank 0] step:5981/10000 train_time:490326ms step_avg:81.98ms +[2025-07-06 11:56:53] [Rank 0] step:5981/10000 train_time:490326ms step_avg:81.98ms +[2025-07-06 11:56:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 11:56:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 11:56:55] [Rank 0] PRINT: step:6000/10000 train_loss:1.0145 val_loss:1.0027 train_time:491823ms step_avg:81.97ms +[2025-07-06 11:56:55] [Rank 0] PRINT: step:6000/10000 train_loss:1.0145 val_loss:1.0027 train_time:491823ms step_avg:81.97ms +[2025-07-06 11:56:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 11:56:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 11:56:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 11:56:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 11:56:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 11:56:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 12:02:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 12:02:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 12:02:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 12:02:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 12:02:16] [Rank 0] Total Loss: 5.3440 +[2025-07-06 12:02:16] [Rank 0] Total Loss: 5.3440 +[2025-07-06 12:02:16] [Rank 0] Total FTA: 0.6748 +[2025-07-06 12:02:16] [Rank 0] Total FTA: 0.6748 +[2025-07-06 12:02:16] [Rank 0] Group 0 Loss: 5.6193 +[2025-07-06 12:02:16] [Rank 0] Group 0 Loss: 5.6193 +[2025-07-06 12:02:16] [Rank 0] Group 1 Loss: 5.0628 +[2025-07-06 12:02:16] [Rank 0] Group 1 Loss: 5.0628 +[2025-07-06 12:02:16] [Rank 0] Group 2 Loss: 5.1131 +[2025-07-06 12:02:16] [Rank 0] Group 2 Loss: 5.1131 +[2025-07-06 12:02:16] [Rank 0] Group 3 Loss: 5.3388 +[2025-07-06 12:02:16] [Rank 0] Group 3 Loss: 5.3388 +[2025-07-06 12:02:16] [Rank 0] Group 4 Loss: 5.2635 +[2025-07-06 12:02:16] [Rank 0] Group 4 Loss: 5.2635 +[2025-07-06 12:02:16] [Rank 0] Group 5 Loss: 5.3220 +[2025-07-06 12:02:16] [Rank 0] Group 5 Loss: 5.3220 +[2025-07-06 12:02:16] [Rank 0] Group 6 Loss: 5.2358 +[2025-07-06 12:02:16] [Rank 0] Group 6 Loss: 5.2358 +[2025-07-06 12:02:16] [Rank 0] Group 7 Loss: 5.3591 +[2025-07-06 12:02:16] [Rank 0] Group 7 Loss: 5.3591 +[2025-07-06 12:02:16] [Rank 0] Group 8 Loss: 5.4138 +[2025-07-06 12:02:16] [Rank 0] Group 8 Loss: 5.4138 +[2025-07-06 12:02:17] [Rank 0] Group 9 Loss: 5.2927 +[2025-07-06 12:02:17] [Rank 0] Group 9 Loss: 5.2927 +[2025-07-06 12:02:17] [Rank 0] Group 10 Loss: 5.3483 +[2025-07-06 12:02:17] [Rank 0] Group 10 Loss: 5.3483 +[2025-07-06 12:02:17] [Rank 0] Group 11 Loss: 5.3890 +[2025-07-06 12:02:17] [Rank 0] Group 11 Loss: 5.3890 +[2025-07-06 12:02:17] [Rank 0] Group 0 FTA: 0.5020 +[2025-07-06 12:02:17] [Rank 0] Group 0 FTA: 0.5020 +[2025-07-06 12:02:17] [Rank 0] Group 1 FTA: 0.6615 +[2025-07-06 12:02:17] [Rank 0] Group 1 FTA: 0.6615 +[2025-07-06 12:02:17] [Rank 0] Group 2 FTA: 0.7682 +[2025-07-06 12:02:17] [Rank 0] Group 2 FTA: 0.7682 +[2025-07-06 12:02:17] [Rank 0] Group 3 FTA: 0.7396 +[2025-07-06 12:02:17] [Rank 0] Group 3 FTA: 0.7396 +[2025-07-06 12:02:17] [Rank 0] Group 4 FTA: 0.6224 +[2025-07-06 12:02:17] [Rank 0] Group 4 FTA: 0.6224 +[2025-07-06 12:02:17] [Rank 0] Group 5 FTA: 0.7057 +[2025-07-06 12:02:17] [Rank 0] Group 5 FTA: 0.7057 +[2025-07-06 12:02:17] [Rank 0] Group 6 FTA: 0.6771 +[2025-07-06 12:02:17] [Rank 0] Group 6 FTA: 0.6771 +[2025-07-06 12:02:17] [Rank 0] Group 7 FTA: 0.7474 +[2025-07-06 12:02:17] [Rank 0] Group 7 FTA: 0.7474 +[2025-07-06 12:02:17] [Rank 0] Group 8 FTA: 0.6927 +[2025-07-06 12:02:17] [Rank 0] Group 8 FTA: 0.6927 +[2025-07-06 12:02:17] [Rank 0] Group 9 FTA: 0.6914 +[2025-07-06 12:02:17] [Rank 0] Group 9 FTA: 0.6914 +[2025-07-06 12:02:17] [Rank 0] Group 10 FTA: 0.7012 +[2025-07-06 12:02:17] [Rank 0] Group 10 FTA: 0.7012 +[2025-07-06 12:02:17] [Rank 0] Group 11 FTA: 0.7061 +[2025-07-06 12:02:17] [Rank 0] Group 11 FTA: 0.7061 +[2025-07-06 12:02:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 12:02:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 12:02:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 12:02:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 12:02:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 12:02:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 12:02:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 12:02:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 12:02:19] [Rank 0] step:6001/10000 train_time:491844ms step_avg:81.96ms +[2025-07-06 12:02:19] [Rank 0] step:6001/10000 train_time:491844ms step_avg:81.96ms +[2025-07-06 12:02:20] [Rank 0] step:6021/10000 train_time:493349ms step_avg:81.94ms +[2025-07-06 12:02:20] [Rank 0] step:6021/10000 train_time:493349ms step_avg:81.94ms +[2025-07-06 12:02:22] [Rank 0] step:6041/10000 train_time:495494ms step_avg:82.02ms +[2025-07-06 12:02:22] [Rank 0] step:6041/10000 train_time:495494ms step_avg:82.02ms +[2025-07-06 12:02:24] [Rank 0] step:6061/10000 train_time:496983ms step_avg:82.00ms +[2025-07-06 12:02:24] [Rank 0] step:6061/10000 train_time:496983ms step_avg:82.00ms +[2025-07-06 12:02:25] [Rank 0] step:6081/10000 train_time:498473ms step_avg:81.97ms +[2025-07-06 12:02:25] [Rank 0] step:6081/10000 train_time:498473ms step_avg:81.97ms +[2025-07-06 12:02:27] [Rank 0] step:6101/10000 train_time:500066ms step_avg:81.96ms +[2025-07-06 12:02:27] [Rank 0] step:6101/10000 train_time:500066ms step_avg:81.96ms +[2025-07-06 12:02:29] [Rank 0] step:6121/10000 train_time:501658ms step_avg:81.96ms +[2025-07-06 12:02:29] [Rank 0] step:6121/10000 train_time:501658ms step_avg:81.96ms +[2025-07-06 12:02:30] [Rank 0] step:6141/10000 train_time:503382ms step_avg:81.97ms +[2025-07-06 12:02:30] [Rank 0] step:6141/10000 train_time:503382ms step_avg:81.97ms +[2025-07-06 12:02:32] [Rank 0] step:6161/10000 train_time:504974ms step_avg:81.96ms +[2025-07-06 12:02:32] [Rank 0] step:6161/10000 train_time:504974ms step_avg:81.96ms +[2025-07-06 12:02:33] [Rank 0] step:6181/10000 train_time:506467ms step_avg:81.94ms +[2025-07-06 12:02:33] [Rank 0] step:6181/10000 train_time:506467ms step_avg:81.94ms +[2025-07-06 12:02:35] [Rank 0] step:6201/10000 train_time:508067ms step_avg:81.93ms +[2025-07-06 12:02:35] [Rank 0] step:6201/10000 train_time:508067ms step_avg:81.93ms +[2025-07-06 12:02:37] [Rank 0] step:6221/10000 train_time:509798ms step_avg:81.95ms +[2025-07-06 12:02:37] [Rank 0] step:6221/10000 train_time:509798ms step_avg:81.95ms +[2025-07-06 12:02:38] [Rank 0] step:6241/10000 train_time:511290ms step_avg:81.92ms +[2025-07-06 12:02:38] [Rank 0] step:6241/10000 train_time:511290ms step_avg:81.92ms +[2025-07-06 12:02:40] [Rank 0] step:6261/10000 train_time:512787ms step_avg:81.90ms +[2025-07-06 12:02:40] [Rank 0] step:6261/10000 train_time:512787ms step_avg:81.90ms +[2025-07-06 12:02:41] [Rank 0] step:6281/10000 train_time:514283ms step_avg:81.88ms +[2025-07-06 12:02:41] [Rank 0] step:6281/10000 train_time:514283ms step_avg:81.88ms +[2025-07-06 12:02:43] [Rank 0] step:6301/10000 train_time:516467ms step_avg:81.97ms +[2025-07-06 12:02:43] [Rank 0] step:6301/10000 train_time:516467ms step_avg:81.97ms +[2025-07-06 12:02:45] [Rank 0] step:6321/10000 train_time:517945ms step_avg:81.94ms +[2025-07-06 12:02:45] [Rank 0] step:6321/10000 train_time:517945ms step_avg:81.94ms +[2025-07-06 12:02:46] [Rank 0] step:6341/10000 train_time:519443ms step_avg:81.92ms +[2025-07-06 12:02:46] [Rank 0] step:6341/10000 train_time:519443ms step_avg:81.92ms +[2025-07-06 12:02:48] [Rank 0] step:6361/10000 train_time:520942ms step_avg:81.90ms +[2025-07-06 12:02:48] [Rank 0] step:6361/10000 train_time:520942ms step_avg:81.90ms +[2025-07-06 12:02:49] [Rank 0] step:6381/10000 train_time:522439ms step_avg:81.87ms +[2025-07-06 12:02:49] [Rank 0] step:6381/10000 train_time:522439ms step_avg:81.87ms +[2025-07-06 12:02:52] [Rank 0] step:6401/10000 train_time:524681ms step_avg:81.97ms +[2025-07-06 12:02:52] [Rank 0] step:6401/10000 train_time:524681ms step_avg:81.97ms +[2025-07-06 12:02:53] [Rank 0] step:6421/10000 train_time:526176ms step_avg:81.95ms +[2025-07-06 12:02:53] [Rank 0] step:6421/10000 train_time:526176ms step_avg:81.95ms +[2025-07-06 12:02:55] [Rank 0] step:6441/10000 train_time:527671ms step_avg:81.92ms +[2025-07-06 12:02:55] [Rank 0] step:6441/10000 train_time:527671ms step_avg:81.92ms +[2025-07-06 12:02:56] [Rank 0] step:6461/10000 train_time:529171ms step_avg:81.90ms +[2025-07-06 12:02:56] [Rank 0] step:6461/10000 train_time:529171ms step_avg:81.90ms +[2025-07-06 12:02:58] [Rank 0] step:6481/10000 train_time:530806ms step_avg:81.90ms +[2025-07-06 12:02:58] [Rank 0] step:6481/10000 train_time:530806ms step_avg:81.90ms +[2025-07-06 12:03:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 12:03:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 12:03:00] [Rank 0] PRINT: step:6500/10000 train_loss:0.9951 val_loss:0.9840 train_time:532646ms step_avg:81.95ms +[2025-07-06 12:03:00] [Rank 0] PRINT: step:6500/10000 train_loss:0.9951 val_loss:0.9840 train_time:532646ms step_avg:81.95ms +[2025-07-06 12:03:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 12:03:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 12:03:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 12:03:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 12:03:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 12:03:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 12:08:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 12:08:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 12:08:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 12:08:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 12:08:21] [Rank 0] Total Loss: 5.4082 +[2025-07-06 12:08:21] [Rank 0] Total Loss: 5.4082 +[2025-07-06 12:08:21] [Rank 0] Total FTA: 0.7563 +[2025-07-06 12:08:21] [Rank 0] Total FTA: 0.7563 +[2025-07-06 12:08:21] [Rank 0] Group 0 Loss: 5.7002 +[2025-07-06 12:08:21] [Rank 0] Group 0 Loss: 5.7002 +[2025-07-06 12:08:21] [Rank 0] Group 1 Loss: 5.1790 +[2025-07-06 12:08:21] [Rank 0] Group 1 Loss: 5.1790 +[2025-07-06 12:08:21] [Rank 0] Group 2 Loss: 4.9932 +[2025-07-06 12:08:21] [Rank 0] Group 2 Loss: 4.9932 +[2025-07-06 12:08:21] [Rank 0] Group 3 Loss: 5.5369 +[2025-07-06 12:08:21] [Rank 0] Group 3 Loss: 5.5369 +[2025-07-06 12:08:21] [Rank 0] Group 4 Loss: 5.4376 +[2025-07-06 12:08:21] [Rank 0] Group 4 Loss: 5.4376 +[2025-07-06 12:08:21] [Rank 0] Group 5 Loss: 5.2896 +[2025-07-06 12:08:21] [Rank 0] Group 5 Loss: 5.2896 +[2025-07-06 12:08:21] [Rank 0] Group 6 Loss: 5.2946 +[2025-07-06 12:08:21] [Rank 0] Group 6 Loss: 5.2946 +[2025-07-06 12:08:21] [Rank 0] Group 7 Loss: 5.4606 +[2025-07-06 12:08:21] [Rank 0] Group 7 Loss: 5.4606 +[2025-07-06 12:08:21] [Rank 0] Group 8 Loss: 5.3923 +[2025-07-06 12:08:21] [Rank 0] Group 8 Loss: 5.3923 +[2025-07-06 12:08:21] [Rank 0] Group 9 Loss: 5.3878 +[2025-07-06 12:08:21] [Rank 0] Group 9 Loss: 5.3878 +[2025-07-06 12:08:21] [Rank 0] Group 10 Loss: 5.3926 +[2025-07-06 12:08:21] [Rank 0] Group 10 Loss: 5.3926 +[2025-07-06 12:08:21] [Rank 0] Group 11 Loss: 5.4577 +[2025-07-06 12:08:21] [Rank 0] Group 11 Loss: 5.4577 +[2025-07-06 12:08:21] [Rank 0] Group 0 FTA: 0.8401 +[2025-07-06 12:08:21] [Rank 0] Group 0 FTA: 0.8401 +[2025-07-06 12:08:21] [Rank 0] Group 1 FTA: 0.8359 +[2025-07-06 12:08:21] [Rank 0] Group 1 FTA: 0.8359 +[2025-07-06 12:08:21] [Rank 0] Group 2 FTA: 0.7786 +[2025-07-06 12:08:21] [Rank 0] Group 2 FTA: 0.7786 +[2025-07-06 12:08:21] [Rank 0] Group 3 FTA: 0.5833 +[2025-07-06 12:08:21] [Rank 0] Group 3 FTA: 0.5833 +[2025-07-06 12:08:21] [Rank 0] Group 4 FTA: 0.6693 +[2025-07-06 12:08:21] [Rank 0] Group 4 FTA: 0.6693 +[2025-07-06 12:08:21] [Rank 0] Group 5 FTA: 0.8021 +[2025-07-06 12:08:21] [Rank 0] Group 5 FTA: 0.8021 +[2025-07-06 12:08:21] [Rank 0] Group 6 FTA: 0.7578 +[2025-07-06 12:08:21] [Rank 0] Group 6 FTA: 0.7578 +[2025-07-06 12:08:21] [Rank 0] Group 7 FTA: 0.7604 +[2025-07-06 12:08:21] [Rank 0] Group 7 FTA: 0.7604 +[2025-07-06 12:08:21] [Rank 0] Group 8 FTA: 0.7396 +[2025-07-06 12:08:21] [Rank 0] Group 8 FTA: 0.7396 +[2025-07-06 12:08:21] [Rank 0] Group 9 FTA: 0.7305 +[2025-07-06 12:08:21] [Rank 0] Group 9 FTA: 0.7305 +[2025-07-06 12:08:21] [Rank 0] Group 10 FTA: 0.7383 +[2025-07-06 12:08:21] [Rank 0] Group 10 FTA: 0.7383 +[2025-07-06 12:08:21] [Rank 0] Group 11 FTA: 0.7549 +[2025-07-06 12:08:21] [Rank 0] Group 11 FTA: 0.7549 +[2025-07-06 12:08:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 12:08:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 12:08:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 12:08:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 12:08:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 12:08:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 12:08:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 12:08:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 12:08:23] [Rank 0] step:6501/10000 train_time:532667ms step_avg:81.94ms +[2025-07-06 12:08:23] [Rank 0] step:6501/10000 train_time:532667ms step_avg:81.94ms +[2025-07-06 12:08:24] [Rank 0] step:6521/10000 train_time:534166ms step_avg:81.91ms +[2025-07-06 12:08:24] [Rank 0] step:6521/10000 train_time:534166ms step_avg:81.91ms +[2025-07-06 12:08:26] [Rank 0] step:6541/10000 train_time:535656ms step_avg:81.89ms +[2025-07-06 12:08:26] [Rank 0] step:6541/10000 train_time:535656ms step_avg:81.89ms +[2025-07-06 12:08:27] [Rank 0] step:6561/10000 train_time:537249ms step_avg:81.89ms +[2025-07-06 12:08:27] [Rank 0] step:6561/10000 train_time:537249ms step_avg:81.89ms +[2025-07-06 12:08:29] [Rank 0] step:6581/10000 train_time:539400ms step_avg:81.96ms +[2025-07-06 12:08:29] [Rank 0] step:6581/10000 train_time:539400ms step_avg:81.96ms +[2025-07-06 12:08:31] [Rank 0] step:6601/10000 train_time:540892ms step_avg:81.94ms +[2025-07-06 12:08:31] [Rank 0] step:6601/10000 train_time:540892ms step_avg:81.94ms +[2025-07-06 12:08:32] [Rank 0] step:6621/10000 train_time:542387ms step_avg:81.92ms +[2025-07-06 12:08:32] [Rank 0] step:6621/10000 train_time:542387ms step_avg:81.92ms +[2025-07-06 12:08:34] [Rank 0] step:6641/10000 train_time:543878ms step_avg:81.90ms +[2025-07-06 12:08:34] [Rank 0] step:6641/10000 train_time:543878ms step_avg:81.90ms +[2025-07-06 12:08:36] [Rank 0] step:6661/10000 train_time:546045ms step_avg:81.98ms +[2025-07-06 12:08:36] [Rank 0] step:6661/10000 train_time:546045ms step_avg:81.98ms +[2025-07-06 12:08:38] [Rank 0] step:6681/10000 train_time:547516ms step_avg:81.95ms +[2025-07-06 12:08:38] [Rank 0] step:6681/10000 train_time:547516ms step_avg:81.95ms +[2025-07-06 12:08:39] [Rank 0] step:6701/10000 train_time:549010ms step_avg:81.93ms +[2025-07-06 12:08:39] [Rank 0] step:6701/10000 train_time:549010ms step_avg:81.93ms +[2025-07-06 12:08:41] [Rank 0] step:6721/10000 train_time:550605ms step_avg:81.92ms +[2025-07-06 12:08:41] [Rank 0] step:6721/10000 train_time:550605ms step_avg:81.92ms +[2025-07-06 12:08:42] [Rank 0] step:6741/10000 train_time:552099ms step_avg:81.90ms +[2025-07-06 12:08:42] [Rank 0] step:6741/10000 train_time:552099ms step_avg:81.90ms +[2025-07-06 12:08:44] [Rank 0] step:6761/10000 train_time:553936ms step_avg:81.93ms +[2025-07-06 12:08:44] [Rank 0] step:6761/10000 train_time:553936ms step_avg:81.93ms +[2025-07-06 12:08:45] [Rank 0] step:6781/10000 train_time:555430ms step_avg:81.91ms +[2025-07-06 12:08:45] [Rank 0] step:6781/10000 train_time:555430ms step_avg:81.91ms +[2025-07-06 12:08:47] [Rank 0] step:6801/10000 train_time:556929ms step_avg:81.89ms +[2025-07-06 12:08:47] [Rank 0] step:6801/10000 train_time:556929ms step_avg:81.89ms +[2025-07-06 12:08:49] [Rank 0] step:6821/10000 train_time:558527ms step_avg:81.88ms +[2025-07-06 12:08:49] [Rank 0] step:6821/10000 train_time:558527ms step_avg:81.88ms +[2025-07-06 12:08:51] [Rank 0] step:6841/10000 train_time:560280ms step_avg:81.90ms +[2025-07-06 12:08:51] [Rank 0] step:6841/10000 train_time:560280ms step_avg:81.90ms +[2025-07-06 12:08:52] [Rank 0] step:6861/10000 train_time:562271ms step_avg:81.95ms +[2025-07-06 12:08:52] [Rank 0] step:6861/10000 train_time:562271ms step_avg:81.95ms +[2025-07-06 12:08:54] [Rank 0] step:6881/10000 train_time:563767ms step_avg:81.93ms +[2025-07-06 12:08:54] [Rank 0] step:6881/10000 train_time:563767ms step_avg:81.93ms +[2025-07-06 12:08:55] [Rank 0] step:6901/10000 train_time:565264ms step_avg:81.91ms +[2025-07-06 12:08:55] [Rank 0] step:6901/10000 train_time:565264ms step_avg:81.91ms +[2025-07-06 12:08:57] [Rank 0] step:6921/10000 train_time:566759ms step_avg:81.89ms +[2025-07-06 12:08:57] [Rank 0] step:6921/10000 train_time:566759ms step_avg:81.89ms +[2025-07-06 12:08:59] [Rank 0] step:6941/10000 train_time:568907ms step_avg:81.96ms +[2025-07-06 12:08:59] [Rank 0] step:6941/10000 train_time:568907ms step_avg:81.96ms +[2025-07-06 12:09:01] [Rank 0] step:6961/10000 train_time:570507ms step_avg:81.96ms +[2025-07-06 12:09:01] [Rank 0] step:6961/10000 train_time:570507ms step_avg:81.96ms +[2025-07-06 12:09:02] [Rank 0] step:6981/10000 train_time:572107ms step_avg:81.95ms +[2025-07-06 12:09:02] [Rank 0] step:6981/10000 train_time:572107ms step_avg:81.95ms +[2025-07-06 12:09:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 12:09:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 12:09:05] [Rank 0] PRINT: step:7000/10000 train_loss:0.9778 val_loss:0.9701 train_time:573603ms step_avg:81.94ms +[2025-07-06 12:09:05] [Rank 0] PRINT: step:7000/10000 train_loss:0.9778 val_loss:0.9701 train_time:573603ms step_avg:81.94ms +[2025-07-06 12:09:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 12:09:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 12:09:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 12:09:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 12:09:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 12:09:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 12:14:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 12:14:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 12:14:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 12:14:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 12:14:28] [Rank 0] Total Loss: 5.5616 +[2025-07-06 12:14:28] [Rank 0] Total Loss: 5.5616 +[2025-07-06 12:14:28] [Rank 0] Total FTA: 0.7543 +[2025-07-06 12:14:28] [Rank 0] Total FTA: 0.7543 +[2025-07-06 12:14:28] [Rank 0] Group 0 Loss: 5.9591 +[2025-07-06 12:14:28] [Rank 0] Group 0 Loss: 5.9591 +[2025-07-06 12:14:28] [Rank 0] Group 1 Loss: 5.2621 +[2025-07-06 12:14:28] [Rank 0] Group 1 Loss: 5.2621 +[2025-07-06 12:14:28] [Rank 0] Group 2 Loss: 5.1368 +[2025-07-06 12:14:28] [Rank 0] Group 2 Loss: 5.1368 +[2025-07-06 12:14:28] [Rank 0] Group 3 Loss: 5.7494 +[2025-07-06 12:14:28] [Rank 0] Group 3 Loss: 5.7494 +[2025-07-06 12:14:28] [Rank 0] Group 4 Loss: 5.5854 +[2025-07-06 12:14:28] [Rank 0] Group 4 Loss: 5.5854 +[2025-07-06 12:14:28] [Rank 0] Group 5 Loss: 5.4267 +[2025-07-06 12:14:28] [Rank 0] Group 5 Loss: 5.4267 +[2025-07-06 12:14:28] [Rank 0] Group 6 Loss: 5.3852 +[2025-07-06 12:14:28] [Rank 0] Group 6 Loss: 5.3852 +[2025-07-06 12:14:28] [Rank 0] Group 7 Loss: 5.6314 +[2025-07-06 12:14:28] [Rank 0] Group 7 Loss: 5.6314 +[2025-07-06 12:14:28] [Rank 0] Group 8 Loss: 5.5258 +[2025-07-06 12:14:28] [Rank 0] Group 8 Loss: 5.5258 +[2025-07-06 12:14:28] [Rank 0] Group 9 Loss: 5.5393 +[2025-07-06 12:14:28] [Rank 0] Group 9 Loss: 5.5393 +[2025-07-06 12:14:28] [Rank 0] Group 10 Loss: 5.5367 +[2025-07-06 12:14:28] [Rank 0] Group 10 Loss: 5.5367 +[2025-07-06 12:14:28] [Rank 0] Group 11 Loss: 5.5774 +[2025-07-06 12:14:28] [Rank 0] Group 11 Loss: 5.5774 +[2025-07-06 12:14:28] [Rank 0] Group 0 FTA: 0.8296 +[2025-07-06 12:14:28] [Rank 0] Group 0 FTA: 0.8296 +[2025-07-06 12:14:28] [Rank 0] Group 1 FTA: 0.5260 +[2025-07-06 12:14:28] [Rank 0] Group 1 FTA: 0.5260 +[2025-07-06 12:14:28] [Rank 0] Group 2 FTA: 0.6484 +[2025-07-06 12:14:28] [Rank 0] Group 2 FTA: 0.6484 +[2025-07-06 12:14:28] [Rank 0] Group 3 FTA: 0.7266 +[2025-07-06 12:14:28] [Rank 0] Group 3 FTA: 0.7266 +[2025-07-06 12:14:28] [Rank 0] Group 4 FTA: 0.7656 +[2025-07-06 12:14:28] [Rank 0] Group 4 FTA: 0.7656 +[2025-07-06 12:14:28] [Rank 0] Group 5 FTA: 0.7734 +[2025-07-06 12:14:28] [Rank 0] Group 5 FTA: 0.7734 +[2025-07-06 12:14:28] [Rank 0] Group 6 FTA: 0.7839 +[2025-07-06 12:14:28] [Rank 0] Group 6 FTA: 0.7839 +[2025-07-06 12:14:28] [Rank 0] Group 7 FTA: 0.7760 +[2025-07-06 12:14:28] [Rank 0] Group 7 FTA: 0.7760 +[2025-07-06 12:14:28] [Rank 0] Group 8 FTA: 0.7474 +[2025-07-06 12:14:28] [Rank 0] Group 8 FTA: 0.7474 +[2025-07-06 12:14:28] [Rank 0] Group 9 FTA: 0.8203 +[2025-07-06 12:14:28] [Rank 0] Group 9 FTA: 0.8203 +[2025-07-06 12:14:28] [Rank 0] Group 10 FTA: 0.7969 +[2025-07-06 12:14:28] [Rank 0] Group 10 FTA: 0.7969 +[2025-07-06 12:14:28] [Rank 0] Group 11 FTA: 0.7676 +[2025-07-06 12:14:28] [Rank 0] Group 11 FTA: 0.7676 +[2025-07-06 12:14:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 12:14:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 12:14:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 12:14:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 12:14:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 12:14:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 12:14:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 12:14:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 12:14:29] [Rank 0] step:7001/10000 train_time:573625ms step_avg:81.93ms +[2025-07-06 12:14:29] [Rank 0] step:7001/10000 train_time:573625ms step_avg:81.93ms +[2025-07-06 12:14:31] [Rank 0] step:7021/10000 train_time:575174ms step_avg:81.92ms +[2025-07-06 12:14:31] [Rank 0] step:7021/10000 train_time:575174ms step_avg:81.92ms +[2025-07-06 12:14:33] [Rank 0] step:7041/10000 train_time:577511ms step_avg:82.02ms +[2025-07-06 12:14:33] [Rank 0] step:7041/10000 train_time:577511ms step_avg:82.02ms +[2025-07-06 12:14:35] [Rank 0] step:7061/10000 train_time:579032ms step_avg:82.00ms +[2025-07-06 12:14:35] [Rank 0] step:7061/10000 train_time:579032ms step_avg:82.00ms +[2025-07-06 12:14:36] [Rank 0] step:7081/10000 train_time:580523ms step_avg:81.98ms +[2025-07-06 12:14:36] [Rank 0] step:7081/10000 train_time:580523ms step_avg:81.98ms +[2025-07-06 12:14:38] [Rank 0] step:7101/10000 train_time:582017ms step_avg:81.96ms +[2025-07-06 12:14:38] [Rank 0] step:7101/10000 train_time:582017ms step_avg:81.96ms +[2025-07-06 12:14:40] [Rank 0] step:7121/10000 train_time:584177ms step_avg:82.04ms +[2025-07-06 12:14:40] [Rank 0] step:7121/10000 train_time:584177ms step_avg:82.04ms +[2025-07-06 12:14:41] [Rank 0] step:7141/10000 train_time:585673ms step_avg:82.02ms +[2025-07-06 12:14:41] [Rank 0] step:7141/10000 train_time:585673ms step_avg:82.02ms +[2025-07-06 12:14:43] [Rank 0] step:7161/10000 train_time:587167ms step_avg:82.00ms +[2025-07-06 12:14:43] [Rank 0] step:7161/10000 train_time:587167ms step_avg:82.00ms +[2025-07-06 12:14:44] [Rank 0] step:7181/10000 train_time:588661ms step_avg:81.97ms +[2025-07-06 12:14:44] [Rank 0] step:7181/10000 train_time:588661ms step_avg:81.97ms +[2025-07-06 12:14:46] [Rank 0] step:7201/10000 train_time:590207ms step_avg:81.96ms +[2025-07-06 12:14:46] [Rank 0] step:7201/10000 train_time:590207ms step_avg:81.96ms +[2025-07-06 12:14:48] [Rank 0] step:7221/10000 train_time:592289ms step_avg:82.02ms +[2025-07-06 12:14:48] [Rank 0] step:7221/10000 train_time:592289ms step_avg:82.02ms +[2025-07-06 12:14:49] [Rank 0] step:7241/10000 train_time:593785ms step_avg:82.00ms +[2025-07-06 12:14:49] [Rank 0] step:7241/10000 train_time:593785ms step_avg:82.00ms +[2025-07-06 12:14:51] [Rank 0] step:7261/10000 train_time:595282ms step_avg:81.98ms +[2025-07-06 12:14:51] [Rank 0] step:7261/10000 train_time:595282ms step_avg:81.98ms +[2025-07-06 12:14:52] [Rank 0] step:7281/10000 train_time:596779ms step_avg:81.96ms +[2025-07-06 12:14:52] [Rank 0] step:7281/10000 train_time:596779ms step_avg:81.96ms +[2025-07-06 12:14:54] [Rank 0] step:7301/10000 train_time:598933ms step_avg:82.03ms +[2025-07-06 12:14:54] [Rank 0] step:7301/10000 train_time:598933ms step_avg:82.03ms +[2025-07-06 12:14:56] [Rank 0] step:7321/10000 train_time:600428ms step_avg:82.01ms +[2025-07-06 12:14:56] [Rank 0] step:7321/10000 train_time:600428ms step_avg:82.01ms +[2025-07-06 12:14:57] [Rank 0] step:7341/10000 train_time:601926ms step_avg:82.00ms +[2025-07-06 12:14:57] [Rank 0] step:7341/10000 train_time:601926ms step_avg:82.00ms +[2025-07-06 12:14:59] [Rank 0] step:7361/10000 train_time:603424ms step_avg:81.98ms +[2025-07-06 12:14:59] [Rank 0] step:7361/10000 train_time:603424ms step_avg:81.98ms +[2025-07-06 12:15:01] [Rank 0] step:7381/10000 train_time:604975ms step_avg:81.96ms +[2025-07-06 12:15:01] [Rank 0] step:7381/10000 train_time:604975ms step_avg:81.96ms +[2025-07-06 12:15:02] [Rank 0] step:7401/10000 train_time:606653ms step_avg:81.97ms +[2025-07-06 12:15:02] [Rank 0] step:7401/10000 train_time:606653ms step_avg:81.97ms +[2025-07-06 12:15:04] [Rank 0] step:7421/10000 train_time:608152ms step_avg:81.95ms +[2025-07-06 12:15:04] [Rank 0] step:7421/10000 train_time:608152ms step_avg:81.95ms +[2025-07-06 12:15:05] [Rank 0] step:7441/10000 train_time:609648ms step_avg:81.93ms +[2025-07-06 12:15:05] [Rank 0] step:7441/10000 train_time:609648ms step_avg:81.93ms +[2025-07-06 12:15:07] [Rank 0] step:7461/10000 train_time:611146ms step_avg:81.91ms +[2025-07-06 12:15:07] [Rank 0] step:7461/10000 train_time:611146ms step_avg:81.91ms +[2025-07-06 12:15:09] [Rank 0] step:7481/10000 train_time:613300ms step_avg:81.98ms +[2025-07-06 12:15:09] [Rank 0] step:7481/10000 train_time:613300ms step_avg:81.98ms +[2025-07-06 12:15:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 12:15:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 12:15:11] [Rank 0] PRINT: step:7500/10000 train_loss:0.9642 val_loss:0.9574 train_time:614798ms step_avg:81.97ms +[2025-07-06 12:15:11] [Rank 0] PRINT: step:7500/10000 train_loss:0.9642 val_loss:0.9574 train_time:614798ms step_avg:81.97ms +[2025-07-06 12:15:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 12:15:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 12:15:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 12:15:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 12:15:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 12:15:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 12:20:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 12:20:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 12:20:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 12:20:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 12:20:33] [Rank 0] Total Loss: 5.5620 +[2025-07-06 12:20:33] [Rank 0] Total Loss: 5.5620 +[2025-07-06 12:20:33] [Rank 0] Total FTA: 0.7685 +[2025-07-06 12:20:33] [Rank 0] Total FTA: 0.7685 +[2025-07-06 12:20:33] [Rank 0] Group 0 Loss: 5.9438 +[2025-07-06 12:20:33] [Rank 0] Group 0 Loss: 5.9438 +[2025-07-06 12:20:33] [Rank 0] Group 1 Loss: 5.2711 +[2025-07-06 12:20:33] [Rank 0] Group 1 Loss: 5.2711 +[2025-07-06 12:20:33] [Rank 0] Group 2 Loss: 5.2190 +[2025-07-06 12:20:33] [Rank 0] Group 2 Loss: 5.2190 +[2025-07-06 12:20:33] [Rank 0] Group 3 Loss: 5.6877 +[2025-07-06 12:20:33] [Rank 0] Group 3 Loss: 5.6877 +[2025-07-06 12:20:33] [Rank 0] Group 4 Loss: 5.6216 +[2025-07-06 12:20:33] [Rank 0] Group 4 Loss: 5.6216 +[2025-07-06 12:20:33] [Rank 0] Group 5 Loss: 5.4671 +[2025-07-06 12:20:33] [Rank 0] Group 5 Loss: 5.4671 +[2025-07-06 12:20:33] [Rank 0] Group 6 Loss: 5.4155 +[2025-07-06 12:20:33] [Rank 0] Group 6 Loss: 5.4155 +[2025-07-06 12:20:33] [Rank 0] Group 7 Loss: 5.5904 +[2025-07-06 12:20:33] [Rank 0] Group 7 Loss: 5.5904 +[2025-07-06 12:20:33] [Rank 0] Group 8 Loss: 5.5063 +[2025-07-06 12:20:33] [Rank 0] Group 8 Loss: 5.5063 +[2025-07-06 12:20:33] [Rank 0] Group 9 Loss: 5.5355 +[2025-07-06 12:20:33] [Rank 0] Group 9 Loss: 5.5355 +[2025-07-06 12:20:33] [Rank 0] Group 10 Loss: 5.5473 +[2025-07-06 12:20:33] [Rank 0] Group 10 Loss: 5.5473 +[2025-07-06 12:20:33] [Rank 0] Group 11 Loss: 5.5582 +[2025-07-06 12:20:33] [Rank 0] Group 11 Loss: 5.5582 +[2025-07-06 12:20:33] [Rank 0] Group 0 FTA: 0.6736 +[2025-07-06 12:20:33] [Rank 0] Group 0 FTA: 0.6736 +[2025-07-06 12:20:33] [Rank 0] Group 1 FTA: 0.8151 +[2025-07-06 12:20:33] [Rank 0] Group 1 FTA: 0.8151 +[2025-07-06 12:20:33] [Rank 0] Group 2 FTA: 0.6276 +[2025-07-06 12:20:33] [Rank 0] Group 2 FTA: 0.6276 +[2025-07-06 12:20:33] [Rank 0] Group 3 FTA: 0.7526 +[2025-07-06 12:20:33] [Rank 0] Group 3 FTA: 0.7526 +[2025-07-06 12:20:33] [Rank 0] Group 4 FTA: 0.7891 +[2025-07-06 12:20:33] [Rank 0] Group 4 FTA: 0.7891 +[2025-07-06 12:20:33] [Rank 0] Group 5 FTA: 0.8542 +[2025-07-06 12:20:33] [Rank 0] Group 5 FTA: 0.8542 +[2025-07-06 12:20:33] [Rank 0] Group 6 FTA: 0.7708 +[2025-07-06 12:20:33] [Rank 0] Group 6 FTA: 0.7708 +[2025-07-06 12:20:33] [Rank 0] Group 7 FTA: 0.8099 +[2025-07-06 12:20:33] [Rank 0] Group 7 FTA: 0.8099 +[2025-07-06 12:20:33] [Rank 0] Group 8 FTA: 0.7891 +[2025-07-06 12:20:33] [Rank 0] Group 8 FTA: 0.7891 +[2025-07-06 12:20:33] [Rank 0] Group 9 FTA: 0.7812 +[2025-07-06 12:20:33] [Rank 0] Group 9 FTA: 0.7812 +[2025-07-06 12:20:33] [Rank 0] Group 10 FTA: 0.8223 +[2025-07-06 12:20:33] [Rank 0] Group 10 FTA: 0.8223 +[2025-07-06 12:20:33] [Rank 0] Group 11 FTA: 0.7871 +[2025-07-06 12:20:33] [Rank 0] Group 11 FTA: 0.7871 +[2025-07-06 12:20:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 12:20:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 12:20:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 12:20:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 12:20:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 12:20:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 12:20:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 12:20:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 12:20:35] [Rank 0] step:7501/10000 train_time:614818ms step_avg:81.96ms +[2025-07-06 12:20:35] [Rank 0] step:7501/10000 train_time:614818ms step_avg:81.96ms +[2025-07-06 12:20:36] [Rank 0] step:7521/10000 train_time:616323ms step_avg:81.95ms +[2025-07-06 12:20:36] [Rank 0] step:7521/10000 train_time:616323ms step_avg:81.95ms +[2025-07-06 12:20:38] [Rank 0] step:7541/10000 train_time:617814ms step_avg:81.93ms +[2025-07-06 12:20:38] [Rank 0] step:7541/10000 train_time:617814ms step_avg:81.93ms +[2025-07-06 12:20:40] [Rank 0] step:7561/10000 train_time:619563ms step_avg:81.94ms +[2025-07-06 12:20:40] [Rank 0] step:7561/10000 train_time:619563ms step_avg:81.94ms +[2025-07-06 12:20:42] [Rank 0] step:7581/10000 train_time:621438ms step_avg:81.97ms +[2025-07-06 12:20:42] [Rank 0] step:7581/10000 train_time:621438ms step_avg:81.97ms +[2025-07-06 12:20:43] [Rank 0] step:7601/10000 train_time:622933ms step_avg:81.95ms +[2025-07-06 12:20:43] [Rank 0] step:7601/10000 train_time:622933ms step_avg:81.95ms +[2025-07-06 12:20:45] [Rank 0] step:7621/10000 train_time:624427ms step_avg:81.93ms +[2025-07-06 12:20:45] [Rank 0] step:7621/10000 train_time:624427ms step_avg:81.93ms +[2025-07-06 12:20:46] [Rank 0] step:7641/10000 train_time:625919ms step_avg:81.92ms +[2025-07-06 12:20:46] [Rank 0] step:7641/10000 train_time:625919ms step_avg:81.92ms +[2025-07-06 12:20:48] [Rank 0] step:7661/10000 train_time:628082ms step_avg:81.98ms +[2025-07-06 12:20:48] [Rank 0] step:7661/10000 train_time:628082ms step_avg:81.98ms +[2025-07-06 12:20:50] [Rank 0] step:7681/10000 train_time:629575ms step_avg:81.97ms +[2025-07-06 12:20:50] [Rank 0] step:7681/10000 train_time:629575ms step_avg:81.97ms +[2025-07-06 12:20:51] [Rank 0] step:7701/10000 train_time:631069ms step_avg:81.95ms +[2025-07-06 12:20:51] [Rank 0] step:7701/10000 train_time:631069ms step_avg:81.95ms +[2025-07-06 12:20:53] [Rank 0] step:7721/10000 train_time:632788ms step_avg:81.96ms +[2025-07-06 12:20:53] [Rank 0] step:7721/10000 train_time:632788ms step_avg:81.96ms +[2025-07-06 12:20:55] [Rank 0] step:7741/10000 train_time:634335ms step_avg:81.94ms +[2025-07-06 12:20:55] [Rank 0] step:7741/10000 train_time:634335ms step_avg:81.94ms +[2025-07-06 12:20:57] [Rank 0] step:7761/10000 train_time:636419ms step_avg:82.00ms +[2025-07-06 12:20:57] [Rank 0] step:7761/10000 train_time:636419ms step_avg:82.00ms +[2025-07-06 12:20:58] [Rank 0] step:7781/10000 train_time:637918ms step_avg:81.98ms +[2025-07-06 12:20:58] [Rank 0] step:7781/10000 train_time:637918ms step_avg:81.98ms +[2025-07-06 12:21:00] [Rank 0] step:7801/10000 train_time:639415ms step_avg:81.97ms +[2025-07-06 12:21:00] [Rank 0] step:7801/10000 train_time:639415ms step_avg:81.97ms +[2025-07-06 12:21:01] [Rank 0] step:7821/10000 train_time:640910ms step_avg:81.95ms +[2025-07-06 12:21:01] [Rank 0] step:7821/10000 train_time:640910ms step_avg:81.95ms +[2025-07-06 12:21:03] [Rank 0] step:7841/10000 train_time:643061ms step_avg:82.01ms +[2025-07-06 12:21:03] [Rank 0] step:7841/10000 train_time:643061ms step_avg:82.01ms +[2025-07-06 12:21:05] [Rank 0] step:7861/10000 train_time:644558ms step_avg:81.99ms +[2025-07-06 12:21:05] [Rank 0] step:7861/10000 train_time:644558ms step_avg:81.99ms +[2025-07-06 12:21:06] [Rank 0] step:7881/10000 train_time:646055ms step_avg:81.98ms +[2025-07-06 12:21:06] [Rank 0] step:7881/10000 train_time:646055ms step_avg:81.98ms +[2025-07-06 12:21:08] [Rank 0] step:7901/10000 train_time:647554ms step_avg:81.96ms +[2025-07-06 12:21:08] [Rank 0] step:7901/10000 train_time:647554ms step_avg:81.96ms +[2025-07-06 12:21:09] [Rank 0] step:7921/10000 train_time:649098ms step_avg:81.95ms +[2025-07-06 12:21:09] [Rank 0] step:7921/10000 train_time:649098ms step_avg:81.95ms +[2025-07-06 12:21:11] [Rank 0] step:7941/10000 train_time:650780ms step_avg:81.95ms +[2025-07-06 12:21:11] [Rank 0] step:7941/10000 train_time:650780ms step_avg:81.95ms +[2025-07-06 12:21:12] [Rank 0] step:7961/10000 train_time:652276ms step_avg:81.93ms +[2025-07-06 12:21:12] [Rank 0] step:7961/10000 train_time:652276ms step_avg:81.93ms +[2025-07-06 12:21:14] [Rank 0] step:7981/10000 train_time:653874ms step_avg:81.93ms +[2025-07-06 12:21:14] [Rank 0] step:7981/10000 train_time:653874ms step_avg:81.93ms +[2025-07-06 12:21:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 12:21:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 12:21:16] [Rank 0] PRINT: step:8000/10000 train_loss:0.9522 val_loss:0.9468 train_time:655372ms step_avg:81.92ms +[2025-07-06 12:21:16] [Rank 0] PRINT: step:8000/10000 train_loss:0.9522 val_loss:0.9468 train_time:655372ms step_avg:81.92ms +[2025-07-06 12:21:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 12:21:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 12:21:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 12:21:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 12:21:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 12:21:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 12:26:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 12:26:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 12:26:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 12:26:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 12:26:37] [Rank 0] Total Loss: 5.5320 +[2025-07-06 12:26:37] [Rank 0] Total Loss: 5.5320 +[2025-07-06 12:26:37] [Rank 0] Total FTA: 0.8436 +[2025-07-06 12:26:37] [Rank 0] Total FTA: 0.8436 +[2025-07-06 12:26:37] [Rank 0] Group 0 Loss: 5.8548 +[2025-07-06 12:26:37] [Rank 0] Group 0 Loss: 5.8548 +[2025-07-06 12:26:37] [Rank 0] Group 1 Loss: 5.3958 +[2025-07-06 12:26:37] [Rank 0] Group 1 Loss: 5.3958 +[2025-07-06 12:26:37] [Rank 0] Group 2 Loss: 5.1796 +[2025-07-06 12:26:37] [Rank 0] Group 2 Loss: 5.1796 +[2025-07-06 12:26:37] [Rank 0] Group 3 Loss: 5.6285 +[2025-07-06 12:26:37] [Rank 0] Group 3 Loss: 5.6285 +[2025-07-06 12:26:37] [Rank 0] Group 4 Loss: 5.4495 +[2025-07-06 12:26:37] [Rank 0] Group 4 Loss: 5.4495 +[2025-07-06 12:26:37] [Rank 0] Group 5 Loss: 5.4420 +[2025-07-06 12:26:37] [Rank 0] Group 5 Loss: 5.4420 +[2025-07-06 12:26:37] [Rank 0] Group 6 Loss: 5.4004 +[2025-07-06 12:26:37] [Rank 0] Group 6 Loss: 5.4004 +[2025-07-06 12:26:37] [Rank 0] Group 7 Loss: 5.5387 +[2025-07-06 12:26:37] [Rank 0] Group 7 Loss: 5.5387 +[2025-07-06 12:26:37] [Rank 0] Group 8 Loss: 5.5596 +[2025-07-06 12:26:37] [Rank 0] Group 8 Loss: 5.5596 +[2025-07-06 12:26:37] [Rank 0] Group 9 Loss: 5.5298 +[2025-07-06 12:26:37] [Rank 0] Group 9 Loss: 5.5298 +[2025-07-06 12:26:37] [Rank 0] Group 10 Loss: 5.5234 +[2025-07-06 12:26:37] [Rank 0] Group 10 Loss: 5.5234 +[2025-07-06 12:26:37] [Rank 0] Group 11 Loss: 5.5425 +[2025-07-06 12:26:37] [Rank 0] Group 11 Loss: 5.5425 +[2025-07-06 12:26:37] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 12:26:37] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 12:26:37] [Rank 0] Group 1 FTA: 0.8281 +[2025-07-06 12:26:37] [Rank 0] Group 1 FTA: 0.8281 +[2025-07-06 12:26:37] [Rank 0] Group 2 FTA: 0.7812 +[2025-07-06 12:26:37] [Rank 0] Group 2 FTA: 0.7812 +[2025-07-06 12:26:37] [Rank 0] Group 3 FTA: 0.8750 +[2025-07-06 12:26:37] [Rank 0] Group 3 FTA: 0.8750 +[2025-07-06 12:26:37] [Rank 0] Group 4 FTA: 0.8516 +[2025-07-06 12:26:37] [Rank 0] Group 4 FTA: 0.8516 +[2025-07-06 12:26:37] [Rank 0] Group 5 FTA: 0.8516 +[2025-07-06 12:26:37] [Rank 0] Group 5 FTA: 0.8516 +[2025-07-06 12:26:37] [Rank 0] Group 6 FTA: 0.7865 +[2025-07-06 12:26:37] [Rank 0] Group 6 FTA: 0.7865 +[2025-07-06 12:26:37] [Rank 0] Group 7 FTA: 0.7917 +[2025-07-06 12:26:37] [Rank 0] Group 7 FTA: 0.7917 +[2025-07-06 12:26:37] [Rank 0] Group 8 FTA: 0.8125 +[2025-07-06 12:26:37] [Rank 0] Group 8 FTA: 0.8125 +[2025-07-06 12:26:37] [Rank 0] Group 9 FTA: 0.7695 +[2025-07-06 12:26:37] [Rank 0] Group 9 FTA: 0.7695 +[2025-07-06 12:26:37] [Rank 0] Group 10 FTA: 0.8262 +[2025-07-06 12:26:37] [Rank 0] Group 10 FTA: 0.8262 +[2025-07-06 12:26:37] [Rank 0] Group 11 FTA: 0.8174 +[2025-07-06 12:26:37] [Rank 0] Group 11 FTA: 0.8174 +[2025-07-06 12:26:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 12:26:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 12:26:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 12:26:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 12:26:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 12:26:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 12:26:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 12:26:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 12:26:39] [Rank 0] step:8001/10000 train_time:655393ms step_avg:81.91ms +[2025-07-06 12:26:39] [Rank 0] step:8001/10000 train_time:655393ms step_avg:81.91ms +[2025-07-06 12:26:41] [Rank 0] step:8021/10000 train_time:657549ms step_avg:81.98ms +[2025-07-06 12:26:41] [Rank 0] step:8021/10000 train_time:657549ms step_avg:81.98ms +[2025-07-06 12:26:43] [Rank 0] step:8041/10000 train_time:659139ms step_avg:81.97ms +[2025-07-06 12:26:43] [Rank 0] step:8041/10000 train_time:659139ms step_avg:81.97ms +[2025-07-06 12:26:44] [Rank 0] step:8061/10000 train_time:660630ms step_avg:81.95ms +[2025-07-06 12:26:44] [Rank 0] step:8061/10000 train_time:660630ms step_avg:81.95ms +[2025-07-06 12:26:46] [Rank 0] step:8081/10000 train_time:662123ms step_avg:81.94ms +[2025-07-06 12:26:46] [Rank 0] step:8081/10000 train_time:662123ms step_avg:81.94ms +[2025-07-06 12:26:48] [Rank 0] step:8101/10000 train_time:663617ms step_avg:81.92ms +[2025-07-06 12:26:48] [Rank 0] step:8101/10000 train_time:663617ms step_avg:81.92ms +[2025-07-06 12:26:49] [Rank 0] step:8121/10000 train_time:665758ms step_avg:81.98ms +[2025-07-06 12:26:49] [Rank 0] step:8121/10000 train_time:665758ms step_avg:81.98ms +[2025-07-06 12:26:51] [Rank 0] step:8141/10000 train_time:667251ms step_avg:81.96ms +[2025-07-06 12:26:51] [Rank 0] step:8141/10000 train_time:667251ms step_avg:81.96ms +[2025-07-06 12:26:52] [Rank 0] step:8161/10000 train_time:668844ms step_avg:81.96ms +[2025-07-06 12:26:52] [Rank 0] step:8161/10000 train_time:668844ms step_avg:81.96ms +[2025-07-06 12:26:54] [Rank 0] step:8181/10000 train_time:670441ms step_avg:81.95ms +[2025-07-06 12:26:54] [Rank 0] step:8181/10000 train_time:670441ms step_avg:81.95ms +[2025-07-06 12:26:56] [Rank 0] step:8201/10000 train_time:672592ms step_avg:82.01ms +[2025-07-06 12:26:56] [Rank 0] step:8201/10000 train_time:672592ms step_avg:82.01ms +[2025-07-06 12:26:58] [Rank 0] step:8221/10000 train_time:674189ms step_avg:82.01ms +[2025-07-06 12:26:58] [Rank 0] step:8221/10000 train_time:674189ms step_avg:82.01ms +[2025-07-06 12:26:59] [Rank 0] step:8241/10000 train_time:675784ms step_avg:82.00ms +[2025-07-06 12:26:59] [Rank 0] step:8241/10000 train_time:675784ms step_avg:82.00ms +[2025-07-06 12:27:01] [Rank 0] step:8261/10000 train_time:677382ms step_avg:82.00ms +[2025-07-06 12:27:01] [Rank 0] step:8261/10000 train_time:677382ms step_avg:82.00ms +[2025-07-06 12:27:03] [Rank 0] step:8281/10000 train_time:678926ms step_avg:81.99ms +[2025-07-06 12:27:03] [Rank 0] step:8281/10000 train_time:678926ms step_avg:81.99ms +[2025-07-06 12:27:04] [Rank 0] step:8301/10000 train_time:680610ms step_avg:81.99ms +[2025-07-06 12:27:04] [Rank 0] step:8301/10000 train_time:680610ms step_avg:81.99ms +[2025-07-06 12:27:06] [Rank 0] step:8321/10000 train_time:682106ms step_avg:81.97ms +[2025-07-06 12:27:06] [Rank 0] step:8321/10000 train_time:682106ms step_avg:81.97ms +[2025-07-06 12:27:07] [Rank 0] step:8341/10000 train_time:683702ms step_avg:81.97ms +[2025-07-06 12:27:07] [Rank 0] step:8341/10000 train_time:683702ms step_avg:81.97ms +[2025-07-06 12:27:09] [Rank 0] step:8361/10000 train_time:685200ms step_avg:81.95ms +[2025-07-06 12:27:09] [Rank 0] step:8361/10000 train_time:685200ms step_avg:81.95ms +[2025-07-06 12:27:11] [Rank 0] step:8381/10000 train_time:687346ms step_avg:82.01ms +[2025-07-06 12:27:11] [Rank 0] step:8381/10000 train_time:687346ms step_avg:82.01ms +[2025-07-06 12:27:13] [Rank 0] step:8401/10000 train_time:689170ms step_avg:82.03ms +[2025-07-06 12:27:13] [Rank 0] step:8401/10000 train_time:689170ms step_avg:82.03ms +[2025-07-06 12:27:14] [Rank 0] step:8421/10000 train_time:690668ms step_avg:82.02ms +[2025-07-06 12:27:14] [Rank 0] step:8421/10000 train_time:690668ms step_avg:82.02ms +[2025-07-06 12:27:16] [Rank 0] step:8441/10000 train_time:692166ms step_avg:82.00ms +[2025-07-06 12:27:16] [Rank 0] step:8441/10000 train_time:692166ms step_avg:82.00ms +[2025-07-06 12:27:18] [Rank 0] step:8461/10000 train_time:693919ms step_avg:82.01ms +[2025-07-06 12:27:18] [Rank 0] step:8461/10000 train_time:693919ms step_avg:82.01ms +[2025-07-06 12:27:19] [Rank 0] step:8481/10000 train_time:695810ms step_avg:82.04ms +[2025-07-06 12:27:19] [Rank 0] step:8481/10000 train_time:695810ms step_avg:82.04ms +[2025-07-06 12:27:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 12:27:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 12:27:22] [Rank 0] PRINT: step:8500/10000 train_loss:0.9419 val_loss:0.9377 train_time:697307ms step_avg:82.04ms +[2025-07-06 12:27:22] [Rank 0] PRINT: step:8500/10000 train_loss:0.9419 val_loss:0.9377 train_time:697307ms step_avg:82.04ms +[2025-07-06 12:27:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 12:27:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 12:27:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 12:27:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 12:27:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 12:27:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 12:32:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 12:32:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 12:32:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 12:32:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 12:32:43] [Rank 0] Total Loss: 5.6118 +[2025-07-06 12:32:43] [Rank 0] Total Loss: 5.6118 +[2025-07-06 12:32:43] [Rank 0] Total FTA: 0.8828 +[2025-07-06 12:32:43] [Rank 0] Total FTA: 0.8828 +[2025-07-06 12:32:43] [Rank 0] Group 0 Loss: 5.9260 +[2025-07-06 12:32:43] [Rank 0] Group 0 Loss: 5.9260 +[2025-07-06 12:32:43] [Rank 0] Group 1 Loss: 5.4098 +[2025-07-06 12:32:43] [Rank 0] Group 1 Loss: 5.4098 +[2025-07-06 12:32:43] [Rank 0] Group 2 Loss: 5.1147 +[2025-07-06 12:32:43] [Rank 0] Group 2 Loss: 5.1147 +[2025-07-06 12:32:43] [Rank 0] Group 3 Loss: 5.8435 +[2025-07-06 12:32:43] [Rank 0] Group 3 Loss: 5.8435 +[2025-07-06 12:32:43] [Rank 0] Group 4 Loss: 5.6298 +[2025-07-06 12:32:43] [Rank 0] Group 4 Loss: 5.6298 +[2025-07-06 12:32:43] [Rank 0] Group 5 Loss: 5.5419 +[2025-07-06 12:32:43] [Rank 0] Group 5 Loss: 5.5419 +[2025-07-06 12:32:43] [Rank 0] Group 6 Loss: 5.4783 +[2025-07-06 12:32:43] [Rank 0] Group 6 Loss: 5.4783 +[2025-07-06 12:32:43] [Rank 0] Group 7 Loss: 5.6685 +[2025-07-06 12:32:43] [Rank 0] Group 7 Loss: 5.6685 +[2025-07-06 12:32:43] [Rank 0] Group 8 Loss: 5.6205 +[2025-07-06 12:32:43] [Rank 0] Group 8 Loss: 5.6205 +[2025-07-06 12:32:43] [Rank 0] Group 9 Loss: 5.5897 +[2025-07-06 12:32:43] [Rank 0] Group 9 Loss: 5.5897 +[2025-07-06 12:32:43] [Rank 0] Group 10 Loss: 5.5653 +[2025-07-06 12:32:43] [Rank 0] Group 10 Loss: 5.5653 +[2025-07-06 12:32:43] [Rank 0] Group 11 Loss: 5.6251 +[2025-07-06 12:32:43] [Rank 0] Group 11 Loss: 5.6251 +[2025-07-06 12:32:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 12:32:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 12:32:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 12:32:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 12:32:43] [Rank 0] Group 2 FTA: 0.7995 +[2025-07-06 12:32:43] [Rank 0] Group 2 FTA: 0.7995 +[2025-07-06 12:32:44] [Rank 0] Group 3 FTA: 0.8802 +[2025-07-06 12:32:44] [Rank 0] Group 3 FTA: 0.8802 +[2025-07-06 12:32:44] [Rank 0] Group 4 FTA: 0.8594 +[2025-07-06 12:32:44] [Rank 0] Group 4 FTA: 0.8594 +[2025-07-06 12:32:44] [Rank 0] Group 5 FTA: 0.9062 +[2025-07-06 12:32:44] [Rank 0] Group 5 FTA: 0.9062 +[2025-07-06 12:32:44] [Rank 0] Group 6 FTA: 0.8073 +[2025-07-06 12:32:44] [Rank 0] Group 6 FTA: 0.8073 +[2025-07-06 12:32:44] [Rank 0] Group 7 FTA: 0.8698 +[2025-07-06 12:32:44] [Rank 0] Group 7 FTA: 0.8698 +[2025-07-06 12:32:44] [Rank 0] Group 8 FTA: 0.8646 +[2025-07-06 12:32:44] [Rank 0] Group 8 FTA: 0.8646 +[2025-07-06 12:32:44] [Rank 0] Group 9 FTA: 0.8398 +[2025-07-06 12:32:44] [Rank 0] Group 9 FTA: 0.8398 +[2025-07-06 12:32:44] [Rank 0] Group 10 FTA: 0.8809 +[2025-07-06 12:32:44] [Rank 0] Group 10 FTA: 0.8809 +[2025-07-06 12:32:44] [Rank 0] Group 11 FTA: 0.8350 +[2025-07-06 12:32:44] [Rank 0] Group 11 FTA: 0.8350 +[2025-07-06 12:32:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 12:32:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 12:32:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 12:32:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 12:32:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 12:32:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 12:32:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 12:32:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 12:32:45] [Rank 0] step:8501/10000 train_time:697328ms step_avg:82.03ms +[2025-07-06 12:32:45] [Rank 0] step:8501/10000 train_time:697328ms step_avg:82.03ms +[2025-07-06 12:32:47] [Rank 0] step:8521/10000 train_time:698844ms step_avg:82.01ms +[2025-07-06 12:32:47] [Rank 0] step:8521/10000 train_time:698844ms step_avg:82.01ms +[2025-07-06 12:32:48] [Rank 0] step:8541/10000 train_time:700334ms step_avg:82.00ms +[2025-07-06 12:32:48] [Rank 0] step:8541/10000 train_time:700334ms step_avg:82.00ms +[2025-07-06 12:32:50] [Rank 0] step:8561/10000 train_time:702480ms step_avg:82.06ms +[2025-07-06 12:32:50] [Rank 0] step:8561/10000 train_time:702480ms step_avg:82.06ms +[2025-07-06 12:32:52] [Rank 0] step:8581/10000 train_time:703972ms step_avg:82.04ms +[2025-07-06 12:32:52] [Rank 0] step:8581/10000 train_time:703972ms step_avg:82.04ms +[2025-07-06 12:32:53] [Rank 0] step:8601/10000 train_time:705464ms step_avg:82.02ms +[2025-07-06 12:32:53] [Rank 0] step:8601/10000 train_time:705464ms step_avg:82.02ms +[2025-07-06 12:32:55] [Rank 0] step:8621/10000 train_time:706957ms step_avg:82.00ms +[2025-07-06 12:32:55] [Rank 0] step:8621/10000 train_time:706957ms step_avg:82.00ms +[2025-07-06 12:32:57] [Rank 0] step:8641/10000 train_time:709119ms step_avg:82.06ms +[2025-07-06 12:32:57] [Rank 0] step:8641/10000 train_time:709119ms step_avg:82.06ms +[2025-07-06 12:32:58] [Rank 0] step:8661/10000 train_time:710591ms step_avg:82.04ms +[2025-07-06 12:32:58] [Rank 0] step:8661/10000 train_time:710591ms step_avg:82.04ms +[2025-07-06 12:33:00] [Rank 0] step:8681/10000 train_time:712087ms step_avg:82.03ms +[2025-07-06 12:33:00] [Rank 0] step:8681/10000 train_time:712087ms step_avg:82.03ms +[2025-07-06 12:33:02] [Rank 0] step:8701/10000 train_time:713685ms step_avg:82.02ms +[2025-07-06 12:33:02] [Rank 0] step:8701/10000 train_time:713685ms step_avg:82.02ms +[2025-07-06 12:33:03] [Rank 0] step:8721/10000 train_time:715284ms step_avg:82.02ms +[2025-07-06 12:33:03] [Rank 0] step:8721/10000 train_time:715284ms step_avg:82.02ms +[2025-07-06 12:33:05] [Rank 0] step:8741/10000 train_time:717425ms step_avg:82.08ms +[2025-07-06 12:33:05] [Rank 0] step:8741/10000 train_time:717425ms step_avg:82.08ms +[2025-07-06 12:33:07] [Rank 0] step:8761/10000 train_time:718920ms step_avg:82.06ms +[2025-07-06 12:33:07] [Rank 0] step:8761/10000 train_time:718920ms step_avg:82.06ms +[2025-07-06 12:33:08] [Rank 0] step:8781/10000 train_time:720415ms step_avg:82.04ms +[2025-07-06 12:33:08] [Rank 0] step:8781/10000 train_time:720415ms step_avg:82.04ms +[2025-07-06 12:33:10] [Rank 0] step:8801/10000 train_time:721913ms step_avg:82.03ms +[2025-07-06 12:33:10] [Rank 0] step:8801/10000 train_time:721913ms step_avg:82.03ms +[2025-07-06 12:33:12] [Rank 0] step:8821/10000 train_time:724093ms step_avg:82.09ms +[2025-07-06 12:33:12] [Rank 0] step:8821/10000 train_time:724093ms step_avg:82.09ms +[2025-07-06 12:33:14] [Rank 0] step:8841/10000 train_time:725673ms step_avg:82.08ms +[2025-07-06 12:33:14] [Rank 0] step:8841/10000 train_time:725673ms step_avg:82.08ms +[2025-07-06 12:33:15] [Rank 0] step:8861/10000 train_time:727170ms step_avg:82.06ms +[2025-07-06 12:33:15] [Rank 0] step:8861/10000 train_time:727170ms step_avg:82.06ms +[2025-07-06 12:33:17] [Rank 0] step:8881/10000 train_time:728770ms step_avg:82.06ms +[2025-07-06 12:33:17] [Rank 0] step:8881/10000 train_time:728770ms step_avg:82.06ms +[2025-07-06 12:33:18] [Rank 0] step:8901/10000 train_time:730268ms step_avg:82.04ms +[2025-07-06 12:33:18] [Rank 0] step:8901/10000 train_time:730268ms step_avg:82.04ms +[2025-07-06 12:33:20] [Rank 0] step:8921/10000 train_time:732434ms step_avg:82.10ms +[2025-07-06 12:33:20] [Rank 0] step:8921/10000 train_time:732434ms step_avg:82.10ms +[2025-07-06 12:33:22] [Rank 0] step:8941/10000 train_time:733931ms step_avg:82.09ms +[2025-07-06 12:33:22] [Rank 0] step:8941/10000 train_time:733931ms step_avg:82.09ms +[2025-07-06 12:33:23] [Rank 0] step:8961/10000 train_time:735430ms step_avg:82.07ms +[2025-07-06 12:33:23] [Rank 0] step:8961/10000 train_time:735430ms step_avg:82.07ms +[2025-07-06 12:33:25] [Rank 0] step:8981/10000 train_time:736930ms step_avg:82.05ms +[2025-07-06 12:33:25] [Rank 0] step:8981/10000 train_time:736930ms step_avg:82.05ms +[2025-07-06 12:33:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 12:33:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 12:33:27] [Rank 0] PRINT: step:9000/10000 train_loss:0.9331 val_loss:0.9308 train_time:738429ms step_avg:82.05ms +[2025-07-06 12:33:27] [Rank 0] PRINT: step:9000/10000 train_loss:0.9331 val_loss:0.9308 train_time:738429ms step_avg:82.05ms +[2025-07-06 12:33:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 12:33:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 12:33:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 12:33:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 12:33:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 12:33:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 12:38:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 12:38:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 12:38:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 12:38:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 12:38:49] [Rank 0] Total Loss: 5.7214 +[2025-07-06 12:38:49] [Rank 0] Total Loss: 5.7214 +[2025-07-06 12:38:49] [Rank 0] Total FTA: 0.9123 +[2025-07-06 12:38:49] [Rank 0] Total FTA: 0.9123 +[2025-07-06 12:38:49] [Rank 0] Group 0 Loss: 5.9721 +[2025-07-06 12:38:49] [Rank 0] Group 0 Loss: 5.9721 +[2025-07-06 12:38:49] [Rank 0] Group 1 Loss: 5.4052 +[2025-07-06 12:38:49] [Rank 0] Group 1 Loss: 5.4052 +[2025-07-06 12:38:49] [Rank 0] Group 2 Loss: 5.2639 +[2025-07-06 12:38:49] [Rank 0] Group 2 Loss: 5.2639 +[2025-07-06 12:38:49] [Rank 0] Group 3 Loss: 5.9155 +[2025-07-06 12:38:49] [Rank 0] Group 3 Loss: 5.9155 +[2025-07-06 12:38:50] [Rank 0] Group 4 Loss: 5.7642 +[2025-07-06 12:38:50] [Rank 0] Group 4 Loss: 5.7642 +[2025-07-06 12:38:50] [Rank 0] Group 5 Loss: 5.7029 +[2025-07-06 12:38:50] [Rank 0] Group 5 Loss: 5.7029 +[2025-07-06 12:38:50] [Rank 0] Group 6 Loss: 5.5810 +[2025-07-06 12:38:50] [Rank 0] Group 6 Loss: 5.5810 +[2025-07-06 12:38:50] [Rank 0] Group 7 Loss: 5.6795 +[2025-07-06 12:38:50] [Rank 0] Group 7 Loss: 5.6795 +[2025-07-06 12:38:50] [Rank 0] Group 8 Loss: 5.7798 +[2025-07-06 12:38:50] [Rank 0] Group 8 Loss: 5.7798 +[2025-07-06 12:38:50] [Rank 0] Group 9 Loss: 5.7165 +[2025-07-06 12:38:50] [Rank 0] Group 9 Loss: 5.7165 +[2025-07-06 12:38:50] [Rank 0] Group 10 Loss: 5.7746 +[2025-07-06 12:38:50] [Rank 0] Group 10 Loss: 5.7746 +[2025-07-06 12:38:50] [Rank 0] Group 11 Loss: 5.7626 +[2025-07-06 12:38:50] [Rank 0] Group 11 Loss: 5.7626 +[2025-07-06 12:38:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 12:38:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 12:38:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 12:38:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 12:38:50] [Rank 0] Group 2 FTA: 0.7500 +[2025-07-06 12:38:50] [Rank 0] Group 2 FTA: 0.7500 +[2025-07-06 12:38:50] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 12:38:50] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 12:38:50] [Rank 0] Group 4 FTA: 0.9323 +[2025-07-06 12:38:50] [Rank 0] Group 4 FTA: 0.9323 +[2025-07-06 12:38:50] [Rank 0] Group 5 FTA: 0.9219 +[2025-07-06 12:38:50] [Rank 0] Group 5 FTA: 0.9219 +[2025-07-06 12:38:50] [Rank 0] Group 6 FTA: 0.9193 +[2025-07-06 12:38:50] [Rank 0] Group 6 FTA: 0.9193 +[2025-07-06 12:38:50] [Rank 0] Group 7 FTA: 0.8724 +[2025-07-06 12:38:50] [Rank 0] Group 7 FTA: 0.8724 +[2025-07-06 12:38:50] [Rank 0] Group 8 FTA: 0.8776 +[2025-07-06 12:38:50] [Rank 0] Group 8 FTA: 0.8776 +[2025-07-06 12:38:50] [Rank 0] Group 9 FTA: 0.8867 +[2025-07-06 12:38:50] [Rank 0] Group 9 FTA: 0.8867 +[2025-07-06 12:38:50] [Rank 0] Group 10 FTA: 0.8613 +[2025-07-06 12:38:50] [Rank 0] Group 10 FTA: 0.8613 +[2025-07-06 12:38:50] [Rank 0] Group 11 FTA: 0.8877 +[2025-07-06 12:38:50] [Rank 0] Group 11 FTA: 0.8877 +[2025-07-06 12:38:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 12:38:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 12:38:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 12:38:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 12:38:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 12:38:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 12:38:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 12:38:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 12:38:52] [Rank 0] step:9001/10000 train_time:739178ms step_avg:82.12ms +[2025-07-06 12:38:52] [Rank 0] step:9001/10000 train_time:739178ms step_avg:82.12ms +[2025-07-06 12:38:53] [Rank 0] step:9021/10000 train_time:740671ms step_avg:82.11ms +[2025-07-06 12:38:53] [Rank 0] step:9021/10000 train_time:740671ms step_avg:82.11ms +[2025-07-06 12:38:55] [Rank 0] step:9041/10000 train_time:742161ms step_avg:82.09ms +[2025-07-06 12:38:55] [Rank 0] step:9041/10000 train_time:742161ms step_avg:82.09ms +[2025-07-06 12:38:56] [Rank 0] step:9061/10000 train_time:743652ms step_avg:82.07ms +[2025-07-06 12:38:56] [Rank 0] step:9061/10000 train_time:743652ms step_avg:82.07ms +[2025-07-06 12:38:58] [Rank 0] step:9081/10000 train_time:745143ms step_avg:82.06ms +[2025-07-06 12:38:58] [Rank 0] step:9081/10000 train_time:745143ms step_avg:82.06ms +[2025-07-06 12:39:00] [Rank 0] step:9101/10000 train_time:747290ms step_avg:82.11ms +[2025-07-06 12:39:00] [Rank 0] step:9101/10000 train_time:747290ms step_avg:82.11ms +[2025-07-06 12:39:02] [Rank 0] step:9121/10000 train_time:748782ms step_avg:82.09ms +[2025-07-06 12:39:02] [Rank 0] step:9121/10000 train_time:748782ms step_avg:82.09ms +[2025-07-06 12:39:03] [Rank 0] step:9141/10000 train_time:750378ms step_avg:82.09ms +[2025-07-06 12:39:03] [Rank 0] step:9141/10000 train_time:750378ms step_avg:82.09ms +[2025-07-06 12:39:05] [Rank 0] step:9161/10000 train_time:751875ms step_avg:82.07ms +[2025-07-06 12:39:05] [Rank 0] step:9161/10000 train_time:751875ms step_avg:82.07ms +[2025-07-06 12:39:06] [Rank 0] step:9181/10000 train_time:753417ms step_avg:82.06ms +[2025-07-06 12:39:06] [Rank 0] step:9181/10000 train_time:753417ms step_avg:82.06ms +[2025-07-06 12:39:08] [Rank 0] step:9201/10000 train_time:755101ms step_avg:82.07ms +[2025-07-06 12:39:08] [Rank 0] step:9201/10000 train_time:755101ms step_avg:82.07ms +[2025-07-06 12:39:09] [Rank 0] step:9221/10000 train_time:756598ms step_avg:82.05ms +[2025-07-06 12:39:09] [Rank 0] step:9221/10000 train_time:756598ms step_avg:82.05ms +[2025-07-06 12:39:11] [Rank 0] step:9241/10000 train_time:758104ms step_avg:82.04ms +[2025-07-06 12:39:11] [Rank 0] step:9241/10000 train_time:758104ms step_avg:82.04ms +[2025-07-06 12:39:12] [Rank 0] step:9261/10000 train_time:759601ms step_avg:82.02ms +[2025-07-06 12:39:12] [Rank 0] step:9261/10000 train_time:759601ms step_avg:82.02ms +[2025-07-06 12:39:14] [Rank 0] step:9281/10000 train_time:761333ms step_avg:82.03ms +[2025-07-06 12:39:14] [Rank 0] step:9281/10000 train_time:761333ms step_avg:82.03ms +[2025-07-06 12:39:16] [Rank 0] step:9301/10000 train_time:762832ms step_avg:82.02ms +[2025-07-06 12:39:16] [Rank 0] step:9301/10000 train_time:762832ms step_avg:82.02ms +[2025-07-06 12:39:17] [Rank 0] step:9321/10000 train_time:764330ms step_avg:82.00ms +[2025-07-06 12:39:17] [Rank 0] step:9321/10000 train_time:764330ms step_avg:82.00ms +[2025-07-06 12:39:19] [Rank 0] step:9341/10000 train_time:765829ms step_avg:81.99ms +[2025-07-06 12:39:19] [Rank 0] step:9341/10000 train_time:765829ms step_avg:81.99ms +[2025-07-06 12:39:21] [Rank 0] step:9361/10000 train_time:768102ms step_avg:82.05ms +[2025-07-06 12:39:21] [Rank 0] step:9361/10000 train_time:768102ms step_avg:82.05ms +[2025-07-06 12:39:22] [Rank 0] step:9381/10000 train_time:769582ms step_avg:82.04ms +[2025-07-06 12:39:22] [Rank 0] step:9381/10000 train_time:769582ms step_avg:82.04ms +[2025-07-06 12:39:24] [Rank 0] step:9401/10000 train_time:771079ms step_avg:82.02ms +[2025-07-06 12:39:24] [Rank 0] step:9401/10000 train_time:771079ms step_avg:82.02ms +[2025-07-06 12:39:25] [Rank 0] step:9421/10000 train_time:772576ms step_avg:82.01ms +[2025-07-06 12:39:25] [Rank 0] step:9421/10000 train_time:772576ms step_avg:82.01ms +[2025-07-06 12:39:27] [Rank 0] step:9441/10000 train_time:774077ms step_avg:81.99ms +[2025-07-06 12:39:27] [Rank 0] step:9441/10000 train_time:774077ms step_avg:81.99ms +[2025-07-06 12:39:29] [Rank 0] step:9461/10000 train_time:776220ms step_avg:82.04ms +[2025-07-06 12:39:29] [Rank 0] step:9461/10000 train_time:776220ms step_avg:82.04ms +[2025-07-06 12:39:31] [Rank 0] step:9481/10000 train_time:777720ms step_avg:82.03ms +[2025-07-06 12:39:31] [Rank 0] step:9481/10000 train_time:777720ms step_avg:82.03ms +[2025-07-06 12:39:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 12:39:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 12:39:33] [Rank 0] PRINT: step:9500/10000 train_loss:0.9262 val_loss:0.9249 train_time:779219ms step_avg:82.02ms +[2025-07-06 12:39:33] [Rank 0] PRINT: step:9500/10000 train_loss:0.9262 val_loss:0.9249 train_time:779219ms step_avg:82.02ms +[2025-07-06 12:39:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 12:39:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 12:39:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 12:39:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 12:39:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 12:39:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 12:44:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 12:44:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 12:44:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 12:44:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 12:44:58] [Rank 0] Total Loss: 5.7387 +[2025-07-06 12:44:58] [Rank 0] Total Loss: 5.7387 +[2025-07-06 12:44:58] [Rank 0] Total FTA: 0.9064 +[2025-07-06 12:44:58] [Rank 0] Total FTA: 0.9064 +[2025-07-06 12:44:58] [Rank 0] Group 0 Loss: 5.9295 +[2025-07-06 12:44:58] [Rank 0] Group 0 Loss: 5.9295 +[2025-07-06 12:44:58] [Rank 0] Group 1 Loss: 5.3989 +[2025-07-06 12:44:58] [Rank 0] Group 1 Loss: 5.3989 +[2025-07-06 12:44:58] [Rank 0] Group 2 Loss: 5.3436 +[2025-07-06 12:44:58] [Rank 0] Group 2 Loss: 5.3436 +[2025-07-06 12:44:58] [Rank 0] Group 3 Loss: 5.9386 +[2025-07-06 12:44:58] [Rank 0] Group 3 Loss: 5.9386 +[2025-07-06 12:44:58] [Rank 0] Group 4 Loss: 5.8061 +[2025-07-06 12:44:58] [Rank 0] Group 4 Loss: 5.8061 +[2025-07-06 12:44:58] [Rank 0] Group 5 Loss: 5.7040 +[2025-07-06 12:44:58] [Rank 0] Group 5 Loss: 5.7040 +[2025-07-06 12:44:58] [Rank 0] Group 6 Loss: 5.6205 +[2025-07-06 12:44:58] [Rank 0] Group 6 Loss: 5.6205 +[2025-07-06 12:44:58] [Rank 0] Group 7 Loss: 5.7329 +[2025-07-06 12:44:58] [Rank 0] Group 7 Loss: 5.7329 +[2025-07-06 12:44:58] [Rank 0] Group 8 Loss: 5.7703 +[2025-07-06 12:44:58] [Rank 0] Group 8 Loss: 5.7703 +[2025-07-06 12:44:58] [Rank 0] Group 9 Loss: 5.8175 +[2025-07-06 12:44:58] [Rank 0] Group 9 Loss: 5.8175 +[2025-07-06 12:44:58] [Rank 0] Group 10 Loss: 5.7540 +[2025-07-06 12:44:58] [Rank 0] Group 10 Loss: 5.7540 +[2025-07-06 12:44:58] [Rank 0] Group 11 Loss: 5.7911 +[2025-07-06 12:44:58] [Rank 0] Group 11 Loss: 5.7911 +[2025-07-06 12:44:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 12:44:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 12:44:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 12:44:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 12:44:58] [Rank 0] Group 2 FTA: 0.9271 +[2025-07-06 12:44:58] [Rank 0] Group 2 FTA: 0.9271 +[2025-07-06 12:44:58] [Rank 0] Group 3 FTA: 0.7943 +[2025-07-06 12:44:58] [Rank 0] Group 3 FTA: 0.7943 +[2025-07-06 12:44:58] [Rank 0] Group 4 FTA: 0.9453 +[2025-07-06 12:44:58] [Rank 0] Group 4 FTA: 0.9453 +[2025-07-06 12:44:58] [Rank 0] Group 5 FTA: 0.8568 +[2025-07-06 12:44:58] [Rank 0] Group 5 FTA: 0.8568 +[2025-07-06 12:44:58] [Rank 0] Group 6 FTA: 0.8932 +[2025-07-06 12:44:58] [Rank 0] Group 6 FTA: 0.8932 +[2025-07-06 12:44:58] [Rank 0] Group 7 FTA: 0.8594 +[2025-07-06 12:44:58] [Rank 0] Group 7 FTA: 0.8594 +[2025-07-06 12:44:58] [Rank 0] Group 8 FTA: 0.8620 +[2025-07-06 12:44:58] [Rank 0] Group 8 FTA: 0.8620 +[2025-07-06 12:44:58] [Rank 0] Group 9 FTA: 0.9258 +[2025-07-06 12:44:58] [Rank 0] Group 9 FTA: 0.9258 +[2025-07-06 12:44:58] [Rank 0] Group 10 FTA: 0.9004 +[2025-07-06 12:44:58] [Rank 0] Group 10 FTA: 0.9004 +[2025-07-06 12:44:58] [Rank 0] Group 11 FTA: 0.8770 +[2025-07-06 12:44:58] [Rank 0] Group 11 FTA: 0.8770 +[2025-07-06 12:44:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 12:44:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 12:44:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 12:44:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 12:44:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 12:44:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 12:44:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 12:44:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 12:44:59] [Rank 0] step:9501/10000 train_time:779240ms step_avg:82.02ms +[2025-07-06 12:44:59] [Rank 0] step:9501/10000 train_time:779240ms step_avg:82.02ms +[2025-07-06 12:45:01] [Rank 0] step:9521/10000 train_time:780736ms step_avg:82.00ms +[2025-07-06 12:45:01] [Rank 0] step:9521/10000 train_time:780736ms step_avg:82.00ms +[2025-07-06 12:45:03] [Rank 0] step:9541/10000 train_time:782234ms step_avg:81.99ms +[2025-07-06 12:45:03] [Rank 0] step:9541/10000 train_time:782234ms step_avg:81.99ms +[2025-07-06 12:45:05] [Rank 0] step:9561/10000 train_time:784486ms step_avg:82.05ms +[2025-07-06 12:45:05] [Rank 0] step:9561/10000 train_time:784486ms step_avg:82.05ms +[2025-07-06 12:45:06] [Rank 0] step:9581/10000 train_time:785978ms step_avg:82.04ms +[2025-07-06 12:45:06] [Rank 0] step:9581/10000 train_time:785978ms step_avg:82.04ms +[2025-07-06 12:45:08] [Rank 0] step:9601/10000 train_time:787666ms step_avg:82.04ms +[2025-07-06 12:45:08] [Rank 0] step:9601/10000 train_time:787666ms step_avg:82.04ms +[2025-07-06 12:45:09] [Rank 0] step:9621/10000 train_time:789162ms step_avg:82.02ms +[2025-07-06 12:45:09] [Rank 0] step:9621/10000 train_time:789162ms step_avg:82.02ms +[2025-07-06 12:45:12] [Rank 0] step:9641/10000 train_time:791413ms step_avg:82.09ms +[2025-07-06 12:45:12] [Rank 0] step:9641/10000 train_time:791413ms step_avg:82.09ms +[2025-07-06 12:45:13] [Rank 0] step:9661/10000 train_time:792903ms step_avg:82.07ms +[2025-07-06 12:45:13] [Rank 0] step:9661/10000 train_time:792903ms step_avg:82.07ms +[2025-07-06 12:45:15] [Rank 0] step:9681/10000 train_time:794397ms step_avg:82.06ms +[2025-07-06 12:45:15] [Rank 0] step:9681/10000 train_time:794397ms step_avg:82.06ms +[2025-07-06 12:45:16] [Rank 0] step:9701/10000 train_time:795891ms step_avg:82.04ms +[2025-07-06 12:45:16] [Rank 0] step:9701/10000 train_time:795891ms step_avg:82.04ms +[2025-07-06 12:45:18] [Rank 0] step:9721/10000 train_time:797436ms step_avg:82.03ms +[2025-07-06 12:45:18] [Rank 0] step:9721/10000 train_time:797436ms step_avg:82.03ms +[2025-07-06 12:45:20] [Rank 0] step:9741/10000 train_time:799521ms step_avg:82.08ms +[2025-07-06 12:45:20] [Rank 0] step:9741/10000 train_time:799521ms step_avg:82.08ms +[2025-07-06 12:45:21] [Rank 0] step:9761/10000 train_time:801124ms step_avg:82.07ms +[2025-07-06 12:45:21] [Rank 0] step:9761/10000 train_time:801124ms step_avg:82.07ms +[2025-07-06 12:45:23] [Rank 0] step:9781/10000 train_time:802621ms step_avg:82.06ms +[2025-07-06 12:45:23] [Rank 0] step:9781/10000 train_time:802621ms step_avg:82.06ms +[2025-07-06 12:45:24] [Rank 0] step:9801/10000 train_time:804117ms step_avg:82.04ms +[2025-07-06 12:45:24] [Rank 0] step:9801/10000 train_time:804117ms step_avg:82.04ms +[2025-07-06 12:45:26] [Rank 0] step:9821/10000 train_time:806261ms step_avg:82.10ms +[2025-07-06 12:45:26] [Rank 0] step:9821/10000 train_time:806261ms step_avg:82.10ms +[2025-07-06 12:45:28] [Rank 0] step:9841/10000 train_time:807861ms step_avg:82.09ms +[2025-07-06 12:45:28] [Rank 0] step:9841/10000 train_time:807861ms step_avg:82.09ms +[2025-07-06 12:45:30] [Rank 0] step:9861/10000 train_time:809460ms step_avg:82.09ms +[2025-07-06 12:45:30] [Rank 0] step:9861/10000 train_time:809460ms step_avg:82.09ms +[2025-07-06 12:45:31] [Rank 0] step:9881/10000 train_time:810957ms step_avg:82.07ms +[2025-07-06 12:45:31] [Rank 0] step:9881/10000 train_time:810957ms step_avg:82.07ms +[2025-07-06 12:45:33] [Rank 0] step:9901/10000 train_time:812708ms step_avg:82.08ms +[2025-07-06 12:45:33] [Rank 0] step:9901/10000 train_time:812708ms step_avg:82.08ms +[2025-07-06 12:45:35] [Rank 0] step:9921/10000 train_time:814697ms step_avg:82.12ms +[2025-07-06 12:45:35] [Rank 0] step:9921/10000 train_time:814697ms step_avg:82.12ms +[2025-07-06 12:45:36] [Rank 0] step:9941/10000 train_time:816295ms step_avg:82.11ms +[2025-07-06 12:45:36] [Rank 0] step:9941/10000 train_time:816295ms step_avg:82.11ms +[2025-07-06 12:45:38] [Rank 0] step:9961/10000 train_time:817892ms step_avg:82.11ms +[2025-07-06 12:45:38] [Rank 0] step:9961/10000 train_time:817892ms step_avg:82.11ms +[2025-07-06 12:45:40] [Rank 0] step:9981/10000 train_time:819392ms step_avg:82.10ms +[2025-07-06 12:45:40] [Rank 0] step:9981/10000 train_time:819392ms step_avg:82.10ms +[2025-07-06 12:45:42] [Rank 0] step:10000/10000 train_time:821483ms step_avg:82.15ms +[2025-07-06 12:45:42] [Rank 0] step:10000/10000 train_time:821483ms step_avg:82.15ms +[2025-07-06 12:45:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 12:45:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 12:45:43] [Rank 0] PRINT: step:10000/10000 train_loss:0.9206 val_loss:0.9208 train_time:821563ms step_avg:82.16ms +[2025-07-06 12:45:43] [Rank 0] PRINT: step:10000/10000 train_loss:0.9206 val_loss:0.9208 train_time:821563ms step_avg:82.16ms +[2025-07-06 12:45:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 12:45:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 12:45:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 12:45:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 12:45:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 12:45:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 12:51:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 12:51:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 12:51:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 12:51:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 12:51:03] [Rank 0] Total Loss: 5.7629 +[2025-07-06 12:51:03] [Rank 0] Total Loss: 5.7629 +[2025-07-06 12:51:03] [Rank 0] Total FTA: 0.9109 +[2025-07-06 12:51:03] [Rank 0] Total FTA: 0.9109 +[2025-07-06 12:51:03] [Rank 0] Group 0 Loss: 5.9702 +[2025-07-06 12:51:03] [Rank 0] Group 0 Loss: 5.9702 +[2025-07-06 12:51:03] [Rank 0] Group 1 Loss: 5.5340 +[2025-07-06 12:51:03] [Rank 0] Group 1 Loss: 5.5340 +[2025-07-06 12:51:03] [Rank 0] Group 2 Loss: 5.3032 +[2025-07-06 12:51:03] [Rank 0] Group 2 Loss: 5.3032 +[2025-07-06 12:51:03] [Rank 0] Group 3 Loss: 5.9638 +[2025-07-06 12:51:03] [Rank 0] Group 3 Loss: 5.9638 +[2025-07-06 12:51:03] [Rank 0] Group 4 Loss: 5.8401 +[2025-07-06 12:51:03] [Rank 0] Group 4 Loss: 5.8401 +[2025-07-06 12:51:03] [Rank 0] Group 5 Loss: 5.7087 +[2025-07-06 12:51:03] [Rank 0] Group 5 Loss: 5.7087 +[2025-07-06 12:51:03] [Rank 0] Group 6 Loss: 5.5992 +[2025-07-06 12:51:03] [Rank 0] Group 6 Loss: 5.5992 +[2025-07-06 12:51:03] [Rank 0] Group 7 Loss: 5.8161 +[2025-07-06 12:51:03] [Rank 0] Group 7 Loss: 5.8161 +[2025-07-06 12:51:03] [Rank 0] Group 8 Loss: 5.8084 +[2025-07-06 12:51:03] [Rank 0] Group 8 Loss: 5.8084 +[2025-07-06 12:51:03] [Rank 0] Group 9 Loss: 5.7456 +[2025-07-06 12:51:03] [Rank 0] Group 9 Loss: 5.7456 +[2025-07-06 12:51:03] [Rank 0] Group 10 Loss: 5.7668 +[2025-07-06 12:51:03] [Rank 0] Group 10 Loss: 5.7668 +[2025-07-06 12:51:03] [Rank 0] Group 11 Loss: 5.8081 +[2025-07-06 12:51:03] [Rank 0] Group 11 Loss: 5.8081 +[2025-07-06 12:51:04] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 12:51:04] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 12:51:04] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 12:51:04] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 12:51:04] [Rank 0] Group 2 FTA: 0.9271 +[2025-07-06 12:51:04] [Rank 0] Group 2 FTA: 0.9271 +[2025-07-06 12:51:04] [Rank 0] Group 3 FTA: 0.7109 +[2025-07-06 12:51:04] [Rank 0] Group 3 FTA: 0.7109 +[2025-07-06 12:51:04] [Rank 0] Group 4 FTA: 0.8750 +[2025-07-06 12:51:04] [Rank 0] Group 4 FTA: 0.8750 +[2025-07-06 12:51:04] [Rank 0] Group 5 FTA: 0.9245 +[2025-07-06 12:51:04] [Rank 0] Group 5 FTA: 0.9245 +[2025-07-06 12:51:04] [Rank 0] Group 6 FTA: 0.9193 +[2025-07-06 12:51:04] [Rank 0] Group 6 FTA: 0.9193 +[2025-07-06 12:51:04] [Rank 0] Group 7 FTA: 0.8828 +[2025-07-06 12:51:04] [Rank 0] Group 7 FTA: 0.8828 +[2025-07-06 12:51:04] [Rank 0] Group 8 FTA: 0.9115 +[2025-07-06 12:51:04] [Rank 0] Group 8 FTA: 0.9115 +[2025-07-06 12:51:04] [Rank 0] Group 9 FTA: 0.8906 +[2025-07-06 12:51:04] [Rank 0] Group 9 FTA: 0.8906 +[2025-07-06 12:51:04] [Rank 0] Group 10 FTA: 0.9023 +[2025-07-06 12:51:04] [Rank 0] Group 10 FTA: 0.9023 +[2025-07-06 12:51:04] [Rank 0] Group 11 FTA: 0.9043 +[2025-07-06 12:51:04] [Rank 0] Group 11 FTA: 0.9043 +[2025-07-06 12:51:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 12:51:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-06 12:51:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 12:51:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-06 12:51:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 12:51:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-06 12:51:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 12:51:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-06 12:51:05] [Rank 0] step:10001/10000 train_time:821586ms step_avg:82.15ms +[2025-07-06 12:51:05] [Rank 0] step:10001/10000 train_time:821586ms step_avg:82.15ms +[2025-07-06 12:51:05] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 12:51:05 2025 --- +[2025-07-06 12:51:05] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 12:51:05 2025 --- +[2025-07-06 12:51:05] [Rank 0] PRINT: Peak memory allocated: 9109 MiB reserved: 10356 MiB +[2025-07-06 12:51:05] [Rank 0] PRINT: Peak memory allocated: 9109 MiB reserved: 10356 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/config.json b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..8b69f548fe80c6a8d30379fadf6d7e5c15d5e8c5 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "210ada37-d284-472b-ac60-f253f1ba33b4", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..88ab31219a9cc83b41a7849014c708ddccb9df6b --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1809d412cef2426c27e2b398ff8eb52dfb03c4690da7ce33445209848ee3004 +size 483567 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..397b0840913c350af71984b4e64fcc5b30eca641 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5d333d703e7d0d179a0e19c45e3f3ad350b6f59ee2a0bea9ab71d6b5907a29f2 +size 390766 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..b0889fa9efb117ebb61d3ee4687c7f12f430495e --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec33cc9d3af32b9a453ba289c51024eaa97cd9da46eb518d57076129a87dd2e7 +size 110554 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..c5fd868f3a305506debe386096d30602bca14c53 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b72455d67984976a55be6261cbb92b1dd0d04c97a678d41747e70bb24d3957c +size 114754 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/training_log_210ada37-d284-472b-ac60-f253f1ba33b4.txt b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/training_log_210ada37-d284-472b-ac60-f253f1ba33b4.txt new file mode 100644 index 0000000000000000000000000000000000000000..25e4e937fc4e14b67c862e958c6f827ae89e23b6 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/training_log_210ada37-d284-472b-ac60-f253f1ba33b4.txt @@ -0,0 +1,5144 @@ +[2025-07-08 01:03:04] [Rank 0] PRINT: --- Script Start: Tue Jul 8 01:03:04 2025 --- +[2025-07-08 01:03:04] [Rank 0] PRINT: --- Script Start: Tue Jul 8 01:03:04 2025 --- +[2025-07-08 01:03:04] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-08 01:03:04] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-08 01:03:04] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-08 01:03:04] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-08 01:03:04] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-08 01:03:04] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-08 01:03:04] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43 +[2025-07-08 01:03:04] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43 +[2025-07-08 01:03:04] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-08 01:03:04] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-08 01:03:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-08 01:03:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-08 01:03:04] [Rank 0] PRINT: Constructing model... +[2025-07-08 01:03:04] [Rank 0] PRINT: Constructing model... +[2025-07-08 01:03:06] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-08 01:03:06] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-08 01:03:06] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-08 01:03:06] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-08 01:03:06] [Rank 0] PRINT: Testing model forward function: +[2025-07-08 01:03:06] [Rank 0] PRINT: Testing model forward function: +[2025-07-08 01:03:07] [Rank 0] PRINT: Model test - Result type: +[2025-07-08 01:03:07] [Rank 0] PRINT: Model test - Result type: +[2025-07-08 01:03:07] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-08 01:03:07] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-08 01:03:07] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-08 01:03:07] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-08 01:03:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-08 01:03:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-08 01:03:07] [Rank 0] PRINT: Model returns: +[2025-07-08 01:03:07] [Rank 0] PRINT: Model returns: +[2025-07-08 01:03:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-08 01:03:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-08 01:03:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-08 01:03:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-08 01:03:07] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-08 01:03:07] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-08 01:03:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-08 01:03:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-08 01:03:07] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-08 01:03:07] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-08 01:03:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-08 01:03:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-08 01:03:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-08 01:03:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-08 01:03:07] [Rank 0] PRINT: Starting warmup... +[2025-07-08 01:03:07] [Rank 0] PRINT: Starting warmup... +[2025-07-08 01:04:15] [Rank 0] PRINT: Warmup complete. +[2025-07-08 01:04:15] [Rank 0] PRINT: Warmup complete. +[2025-07-08 01:04:15] [Rank 0] PRINT: Starting training... +[2025-07-08 01:04:15] [Rank 0] PRINT: Starting training... +[2025-07-08 01:04:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:04:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:04:24] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-08 01:04:24] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-08 01:04:25] [Rank 0] step:21/10000 train_time:1554ms step_avg:74.02ms +[2025-07-08 01:04:25] [Rank 0] step:21/10000 train_time:1554ms step_avg:74.02ms +[2025-07-08 01:04:27] [Rank 0] step:41/10000 train_time:3007ms step_avg:73.34ms +[2025-07-08 01:04:27] [Rank 0] step:41/10000 train_time:3007ms step_avg:73.34ms +[2025-07-08 01:04:28] [Rank 0] step:61/10000 train_time:4462ms step_avg:73.14ms +[2025-07-08 01:04:28] [Rank 0] step:61/10000 train_time:4462ms step_avg:73.14ms +[2025-07-08 01:04:30] [Rank 0] step:81/10000 train_time:5917ms step_avg:73.05ms +[2025-07-08 01:04:30] [Rank 0] step:81/10000 train_time:5917ms step_avg:73.05ms +[2025-07-08 01:04:31] [Rank 0] step:101/10000 train_time:7618ms step_avg:75.43ms +[2025-07-08 01:04:31] [Rank 0] step:101/10000 train_time:7618ms step_avg:75.43ms +[2025-07-08 01:04:33] [Rank 0] step:121/10000 train_time:9074ms step_avg:74.99ms +[2025-07-08 01:04:33] [Rank 0] step:121/10000 train_time:9074ms step_avg:74.99ms +[2025-07-08 01:04:34] [Rank 0] step:141/10000 train_time:10532ms step_avg:74.69ms +[2025-07-08 01:04:34] [Rank 0] step:141/10000 train_time:10532ms step_avg:74.69ms +[2025-07-08 01:04:36] [Rank 0] step:161/10000 train_time:11992ms step_avg:74.48ms +[2025-07-08 01:04:36] [Rank 0] step:161/10000 train_time:11992ms step_avg:74.48ms +[2025-07-08 01:04:38] [Rank 0] step:181/10000 train_time:13449ms step_avg:74.30ms +[2025-07-08 01:04:38] [Rank 0] step:181/10000 train_time:13449ms step_avg:74.30ms +[2025-07-08 01:04:39] [Rank 0] step:201/10000 train_time:15572ms step_avg:77.47ms +[2025-07-08 01:04:39] [Rank 0] step:201/10000 train_time:15572ms step_avg:77.47ms +[2025-07-08 01:04:41] [Rank 0] step:221/10000 train_time:17037ms step_avg:77.09ms +[2025-07-08 01:04:41] [Rank 0] step:221/10000 train_time:17037ms step_avg:77.09ms +[2025-07-08 01:04:42] [Rank 0] step:241/10000 train_time:18503ms step_avg:76.78ms +[2025-07-08 01:04:42] [Rank 0] step:241/10000 train_time:18503ms step_avg:76.78ms +[2025-07-08 01:04:44] [Rank 0] step:261/10000 train_time:19967ms step_avg:76.50ms +[2025-07-08 01:04:44] [Rank 0] step:261/10000 train_time:19967ms step_avg:76.50ms +[2025-07-08 01:04:46] [Rank 0] step:281/10000 train_time:22075ms step_avg:78.56ms +[2025-07-08 01:04:46] [Rank 0] step:281/10000 train_time:22075ms step_avg:78.56ms +[2025-07-08 01:04:47] [Rank 0] step:301/10000 train_time:23538ms step_avg:78.20ms +[2025-07-08 01:04:47] [Rank 0] step:301/10000 train_time:23538ms step_avg:78.20ms +[2025-07-08 01:04:49] [Rank 0] step:321/10000 train_time:25004ms step_avg:77.89ms +[2025-07-08 01:04:49] [Rank 0] step:321/10000 train_time:25004ms step_avg:77.89ms +[2025-07-08 01:04:50] [Rank 0] step:341/10000 train_time:26473ms step_avg:77.63ms +[2025-07-08 01:04:50] [Rank 0] step:341/10000 train_time:26473ms step_avg:77.63ms +[2025-07-08 01:04:52] [Rank 0] step:361/10000 train_time:27940ms step_avg:77.40ms +[2025-07-08 01:04:52] [Rank 0] step:361/10000 train_time:27940ms step_avg:77.40ms +[2025-07-08 01:04:54] [Rank 0] step:381/10000 train_time:30076ms step_avg:78.94ms +[2025-07-08 01:04:54] [Rank 0] step:381/10000 train_time:30076ms step_avg:78.94ms +[2025-07-08 01:04:55] [Rank 0] step:401/10000 train_time:31703ms step_avg:79.06ms +[2025-07-08 01:04:55] [Rank 0] step:401/10000 train_time:31703ms step_avg:79.06ms +[2025-07-08 01:04:57] [Rank 0] step:421/10000 train_time:33310ms step_avg:79.12ms +[2025-07-08 01:04:57] [Rank 0] step:421/10000 train_time:33310ms step_avg:79.12ms +[2025-07-08 01:04:58] [Rank 0] step:441/10000 train_time:34780ms step_avg:78.87ms +[2025-07-08 01:04:58] [Rank 0] step:441/10000 train_time:34780ms step_avg:78.87ms +[2025-07-08 01:05:01] [Rank 0] step:461/10000 train_time:36891ms step_avg:80.02ms +[2025-07-08 01:05:01] [Rank 0] step:461/10000 train_time:36891ms step_avg:80.02ms +[2025-07-08 01:05:02] [Rank 0] step:481/10000 train_time:38363ms step_avg:79.76ms +[2025-07-08 01:05:02] [Rank 0] step:481/10000 train_time:38363ms step_avg:79.76ms +[2025-07-08 01:05:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:05:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:05:04] [Rank 0] PRINT: step:500/10000 train_loss:6.8671 val_loss:4.3916 train_time:39833ms step_avg:79.67ms +[2025-07-08 01:05:04] [Rank 0] PRINT: step:500/10000 train_loss:6.8671 val_loss:4.3916 train_time:39833ms step_avg:79.67ms +[2025-07-08 01:05:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:05:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:05:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:05:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:05:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:05:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:10:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:10:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:10:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:10:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:10:26] [Rank 0] Total Loss: 5.6020 +[2025-07-08 01:10:26] [Rank 0] Total Loss: 5.6020 +[2025-07-08 01:10:26] [Rank 0] Total FTA: 0.0588 +[2025-07-08 01:10:26] [Rank 0] Total FTA: 0.0588 +[2025-07-08 01:10:26] [Rank 0] Group 0 Loss: 5.5618 +[2025-07-08 01:10:26] [Rank 0] Group 0 Loss: 5.5618 +[2025-07-08 01:10:26] [Rank 0] Group 1 Loss: 5.6940 +[2025-07-08 01:10:26] [Rank 0] Group 1 Loss: 5.6940 +[2025-07-08 01:10:26] [Rank 0] Group 2 Loss: 5.6356 +[2025-07-08 01:10:26] [Rank 0] Group 2 Loss: 5.6356 +[2025-07-08 01:10:26] [Rank 0] Group 3 Loss: 5.5477 +[2025-07-08 01:10:26] [Rank 0] Group 3 Loss: 5.5477 +[2025-07-08 01:10:26] [Rank 0] Group 4 Loss: 5.5996 +[2025-07-08 01:10:26] [Rank 0] Group 4 Loss: 5.5996 +[2025-07-08 01:10:26] [Rank 0] Group 5 Loss: 5.6006 +[2025-07-08 01:10:26] [Rank 0] Group 5 Loss: 5.6006 +[2025-07-08 01:10:26] [Rank 0] Group 6 Loss: 5.6047 +[2025-07-08 01:10:26] [Rank 0] Group 6 Loss: 5.6047 +[2025-07-08 01:10:26] [Rank 0] Group 7 Loss: 5.6183 +[2025-07-08 01:10:26] [Rank 0] Group 7 Loss: 5.6183 +[2025-07-08 01:10:26] [Rank 0] Group 8 Loss: 5.5782 +[2025-07-08 01:10:26] [Rank 0] Group 8 Loss: 5.5782 +[2025-07-08 01:10:26] [Rank 0] Group 9 Loss: 5.5998 +[2025-07-08 01:10:26] [Rank 0] Group 9 Loss: 5.5998 +[2025-07-08 01:10:26] [Rank 0] Group 10 Loss: 5.6062 +[2025-07-08 01:10:26] [Rank 0] Group 10 Loss: 5.6062 +[2025-07-08 01:10:26] [Rank 0] Group 11 Loss: 5.6074 +[2025-07-08 01:10:26] [Rank 0] Group 11 Loss: 5.6074 +[2025-07-08 01:10:26] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-08 01:10:26] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-08 01:10:26] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 01:10:26] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 01:10:26] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-08 01:10:26] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-08 01:10:26] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-08 01:10:26] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-08 01:10:26] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-08 01:10:26] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-08 01:10:26] [Rank 0] Group 5 FTA: 0.0286 +[2025-07-08 01:10:26] [Rank 0] Group 5 FTA: 0.0286 +[2025-07-08 01:10:26] [Rank 0] Group 6 FTA: 0.0573 +[2025-07-08 01:10:26] [Rank 0] Group 6 FTA: 0.0573 +[2025-07-08 01:10:26] [Rank 0] Group 7 FTA: 0.0859 +[2025-07-08 01:10:26] [Rank 0] Group 7 FTA: 0.0859 +[2025-07-08 01:10:26] [Rank 0] Group 8 FTA: 0.0521 +[2025-07-08 01:10:26] [Rank 0] Group 8 FTA: 0.0521 +[2025-07-08 01:10:26] [Rank 0] Group 9 FTA: 0.0430 +[2025-07-08 01:10:26] [Rank 0] Group 9 FTA: 0.0430 +[2025-07-08 01:10:26] [Rank 0] Group 10 FTA: 0.0449 +[2025-07-08 01:10:26] [Rank 0] Group 10 FTA: 0.0449 +[2025-07-08 01:10:26] [Rank 0] Group 11 FTA: 0.0547 +[2025-07-08 01:10:26] [Rank 0] Group 11 FTA: 0.0547 +[2025-07-08 01:10:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 01:10:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 01:10:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 01:10:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 01:10:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 01:10:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 01:10:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 01:10:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 01:10:27] [Rank 0] step:501/10000 train_time:39853ms step_avg:79.55ms +[2025-07-08 01:10:27] [Rank 0] step:501/10000 train_time:39853ms step_avg:79.55ms +[2025-07-08 01:10:29] [Rank 0] step:521/10000 train_time:41334ms step_avg:79.34ms +[2025-07-08 01:10:29] [Rank 0] step:521/10000 train_time:41334ms step_avg:79.34ms +[2025-07-08 01:10:31] [Rank 0] step:541/10000 train_time:42854ms step_avg:79.21ms +[2025-07-08 01:10:31] [Rank 0] step:541/10000 train_time:42854ms step_avg:79.21ms +[2025-07-08 01:10:32] [Rank 0] step:561/10000 train_time:44904ms step_avg:80.04ms +[2025-07-08 01:10:32] [Rank 0] step:561/10000 train_time:44904ms step_avg:80.04ms +[2025-07-08 01:10:34] [Rank 0] step:581/10000 train_time:46366ms step_avg:79.80ms +[2025-07-08 01:10:34] [Rank 0] step:581/10000 train_time:46366ms step_avg:79.80ms +[2025-07-08 01:10:35] [Rank 0] step:601/10000 train_time:47832ms step_avg:79.59ms +[2025-07-08 01:10:35] [Rank 0] step:601/10000 train_time:47832ms step_avg:79.59ms +[2025-07-08 01:10:37] [Rank 0] step:621/10000 train_time:49300ms step_avg:79.39ms +[2025-07-08 01:10:37] [Rank 0] step:621/10000 train_time:49300ms step_avg:79.39ms +[2025-07-08 01:10:39] [Rank 0] step:641/10000 train_time:51438ms step_avg:80.25ms +[2025-07-08 01:10:39] [Rank 0] step:641/10000 train_time:51438ms step_avg:80.25ms +[2025-07-08 01:10:40] [Rank 0] step:661/10000 train_time:52904ms step_avg:80.04ms +[2025-07-08 01:10:40] [Rank 0] step:661/10000 train_time:52904ms step_avg:80.04ms +[2025-07-08 01:10:42] [Rank 0] step:681/10000 train_time:54373ms step_avg:79.84ms +[2025-07-08 01:10:42] [Rank 0] step:681/10000 train_time:54373ms step_avg:79.84ms +[2025-07-08 01:10:43] [Rank 0] step:701/10000 train_time:55842ms step_avg:79.66ms +[2025-07-08 01:10:43] [Rank 0] step:701/10000 train_time:55842ms step_avg:79.66ms +[2025-07-08 01:10:45] [Rank 0] step:721/10000 train_time:57365ms step_avg:79.56ms +[2025-07-08 01:10:45] [Rank 0] step:721/10000 train_time:57365ms step_avg:79.56ms +[2025-07-08 01:10:47] [Rank 0] step:741/10000 train_time:59437ms step_avg:80.21ms +[2025-07-08 01:10:47] [Rank 0] step:741/10000 train_time:59437ms step_avg:80.21ms +[2025-07-08 01:10:48] [Rank 0] step:761/10000 train_time:60917ms step_avg:80.05ms +[2025-07-08 01:10:48] [Rank 0] step:761/10000 train_time:60917ms step_avg:80.05ms +[2025-07-08 01:10:50] [Rank 0] step:781/10000 train_time:62394ms step_avg:79.89ms +[2025-07-08 01:10:50] [Rank 0] step:781/10000 train_time:62394ms step_avg:79.89ms +[2025-07-08 01:10:51] [Rank 0] step:801/10000 train_time:63873ms step_avg:79.74ms +[2025-07-08 01:10:51] [Rank 0] step:801/10000 train_time:63873ms step_avg:79.74ms +[2025-07-08 01:10:53] [Rank 0] step:821/10000 train_time:66002ms step_avg:80.39ms +[2025-07-08 01:10:53] [Rank 0] step:821/10000 train_time:66002ms step_avg:80.39ms +[2025-07-08 01:10:55] [Rank 0] step:841/10000 train_time:67485ms step_avg:80.24ms +[2025-07-08 01:10:55] [Rank 0] step:841/10000 train_time:67485ms step_avg:80.24ms +[2025-07-08 01:10:56] [Rank 0] step:861/10000 train_time:68961ms step_avg:80.09ms +[2025-07-08 01:10:56] [Rank 0] step:861/10000 train_time:68961ms step_avg:80.09ms +[2025-07-08 01:10:58] [Rank 0] step:881/10000 train_time:70438ms step_avg:79.95ms +[2025-07-08 01:10:58] [Rank 0] step:881/10000 train_time:70438ms step_avg:79.95ms +[2025-07-08 01:11:00] [Rank 0] step:901/10000 train_time:71970ms step_avg:79.88ms +[2025-07-08 01:11:00] [Rank 0] step:901/10000 train_time:71970ms step_avg:79.88ms +[2025-07-08 01:11:01] [Rank 0] step:921/10000 train_time:74044ms step_avg:80.40ms +[2025-07-08 01:11:01] [Rank 0] step:921/10000 train_time:74044ms step_avg:80.40ms +[2025-07-08 01:11:03] [Rank 0] step:941/10000 train_time:75522ms step_avg:80.26ms +[2025-07-08 01:11:03] [Rank 0] step:941/10000 train_time:75522ms step_avg:80.26ms +[2025-07-08 01:11:04] [Rank 0] step:961/10000 train_time:77001ms step_avg:80.13ms +[2025-07-08 01:11:04] [Rank 0] step:961/10000 train_time:77001ms step_avg:80.13ms +[2025-07-08 01:11:06] [Rank 0] step:981/10000 train_time:78484ms step_avg:80.00ms +[2025-07-08 01:11:06] [Rank 0] step:981/10000 train_time:78484ms step_avg:80.00ms +[2025-07-08 01:11:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:11:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:11:09] [Rank 0] PRINT: step:1000/10000 train_loss:2.9559 val_loss:2.0115 train_time:80626ms step_avg:80.63ms +[2025-07-08 01:11:09] [Rank 0] PRINT: step:1000/10000 train_loss:2.9559 val_loss:2.0115 train_time:80626ms step_avg:80.63ms +[2025-07-08 01:11:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:11:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:11:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:11:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:11:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:11:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:16:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:16:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:16:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:16:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:16:32] [Rank 0] Total Loss: 4.1271 +[2025-07-08 01:16:32] [Rank 0] Total Loss: 4.1271 +[2025-07-08 01:16:32] [Rank 0] Total FTA: 0.0822 +[2025-07-08 01:16:32] [Rank 0] Total FTA: 0.0822 +[2025-07-08 01:16:32] [Rank 0] Group 0 Loss: 4.3866 +[2025-07-08 01:16:32] [Rank 0] Group 0 Loss: 4.3866 +[2025-07-08 01:16:32] [Rank 0] Group 1 Loss: 4.1564 +[2025-07-08 01:16:32] [Rank 0] Group 1 Loss: 4.1564 +[2025-07-08 01:16:32] [Rank 0] Group 2 Loss: 3.9958 +[2025-07-08 01:16:32] [Rank 0] Group 2 Loss: 3.9958 +[2025-07-08 01:16:32] [Rank 0] Group 3 Loss: 4.0512 +[2025-07-08 01:16:32] [Rank 0] Group 3 Loss: 4.0512 +[2025-07-08 01:16:32] [Rank 0] Group 4 Loss: 4.0970 +[2025-07-08 01:16:32] [Rank 0] Group 4 Loss: 4.0970 +[2025-07-08 01:16:32] [Rank 0] Group 5 Loss: 4.0476 +[2025-07-08 01:16:32] [Rank 0] Group 5 Loss: 4.0476 +[2025-07-08 01:16:32] [Rank 0] Group 6 Loss: 4.0256 +[2025-07-08 01:16:32] [Rank 0] Group 6 Loss: 4.0256 +[2025-07-08 01:16:32] [Rank 0] Group 7 Loss: 4.1026 +[2025-07-08 01:16:32] [Rank 0] Group 7 Loss: 4.1026 +[2025-07-08 01:16:32] [Rank 0] Group 8 Loss: 4.1219 +[2025-07-08 01:16:32] [Rank 0] Group 8 Loss: 4.1219 +[2025-07-08 01:16:32] [Rank 0] Group 9 Loss: 4.0986 +[2025-07-08 01:16:32] [Rank 0] Group 9 Loss: 4.0986 +[2025-07-08 01:16:32] [Rank 0] Group 10 Loss: 4.1060 +[2025-07-08 01:16:32] [Rank 0] Group 10 Loss: 4.1060 +[2025-07-08 01:16:32] [Rank 0] Group 11 Loss: 4.1070 +[2025-07-08 01:16:32] [Rank 0] Group 11 Loss: 4.1070 +[2025-07-08 01:16:32] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-08 01:16:32] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-08 01:16:32] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 01:16:32] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 01:16:32] [Rank 0] Group 2 FTA: 0.0677 +[2025-07-08 01:16:32] [Rank 0] Group 2 FTA: 0.0677 +[2025-07-08 01:16:32] [Rank 0] Group 3 FTA: 0.0443 +[2025-07-08 01:16:32] [Rank 0] Group 3 FTA: 0.0443 +[2025-07-08 01:16:32] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-08 01:16:32] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-08 01:16:32] [Rank 0] Group 5 FTA: 0.0703 +[2025-07-08 01:16:32] [Rank 0] Group 5 FTA: 0.0703 +[2025-07-08 01:16:32] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-08 01:16:32] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-08 01:16:32] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-08 01:16:32] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-08 01:16:32] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-08 01:16:32] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-08 01:16:32] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-08 01:16:32] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-08 01:16:32] [Rank 0] Group 10 FTA: 0.0840 +[2025-07-08 01:16:32] [Rank 0] Group 10 FTA: 0.0840 +[2025-07-08 01:16:32] [Rank 0] Group 11 FTA: 0.0801 +[2025-07-08 01:16:32] [Rank 0] Group 11 FTA: 0.0801 +[2025-07-08 01:16:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 01:16:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 01:16:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 01:16:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 01:16:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 01:16:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 01:16:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 01:16:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 01:16:34] [Rank 0] step:1001/10000 train_time:80645ms step_avg:80.56ms +[2025-07-08 01:16:34] [Rank 0] step:1001/10000 train_time:80645ms step_avg:80.56ms +[2025-07-08 01:16:35] [Rank 0] step:1021/10000 train_time:82139ms step_avg:80.45ms +[2025-07-08 01:16:35] [Rank 0] step:1021/10000 train_time:82139ms step_avg:80.45ms +[2025-07-08 01:16:37] [Rank 0] step:1041/10000 train_time:83610ms step_avg:80.32ms +[2025-07-08 01:16:37] [Rank 0] step:1041/10000 train_time:83610ms step_avg:80.32ms +[2025-07-08 01:16:38] [Rank 0] step:1061/10000 train_time:85081ms step_avg:80.19ms +[2025-07-08 01:16:38] [Rank 0] step:1061/10000 train_time:85081ms step_avg:80.19ms +[2025-07-08 01:16:40] [Rank 0] step:1081/10000 train_time:87225ms step_avg:80.69ms +[2025-07-08 01:16:40] [Rank 0] step:1081/10000 train_time:87225ms step_avg:80.69ms +[2025-07-08 01:16:42] [Rank 0] step:1101/10000 train_time:88677ms step_avg:80.54ms +[2025-07-08 01:16:42] [Rank 0] step:1101/10000 train_time:88677ms step_avg:80.54ms +[2025-07-08 01:16:43] [Rank 0] step:1121/10000 train_time:90151ms step_avg:80.42ms +[2025-07-08 01:16:43] [Rank 0] step:1121/10000 train_time:90151ms step_avg:80.42ms +[2025-07-08 01:16:45] [Rank 0] step:1141/10000 train_time:91625ms step_avg:80.30ms +[2025-07-08 01:16:45] [Rank 0] step:1141/10000 train_time:91625ms step_avg:80.30ms +[2025-07-08 01:16:46] [Rank 0] step:1161/10000 train_time:93102ms step_avg:80.19ms +[2025-07-08 01:16:46] [Rank 0] step:1161/10000 train_time:93102ms step_avg:80.19ms +[2025-07-08 01:16:48] [Rank 0] step:1181/10000 train_time:95238ms step_avg:80.64ms +[2025-07-08 01:16:48] [Rank 0] step:1181/10000 train_time:95238ms step_avg:80.64ms +[2025-07-08 01:16:50] [Rank 0] step:1201/10000 train_time:96715ms step_avg:80.53ms +[2025-07-08 01:16:50] [Rank 0] step:1201/10000 train_time:96715ms step_avg:80.53ms +[2025-07-08 01:16:51] [Rank 0] step:1221/10000 train_time:98191ms step_avg:80.42ms +[2025-07-08 01:16:51] [Rank 0] step:1221/10000 train_time:98191ms step_avg:80.42ms +[2025-07-08 01:16:53] [Rank 0] step:1241/10000 train_time:99670ms step_avg:80.31ms +[2025-07-08 01:16:53] [Rank 0] step:1241/10000 train_time:99670ms step_avg:80.31ms +[2025-07-08 01:16:55] [Rank 0] step:1261/10000 train_time:101147ms step_avg:80.21ms +[2025-07-08 01:16:55] [Rank 0] step:1261/10000 train_time:101147ms step_avg:80.21ms +[2025-07-08 01:16:56] [Rank 0] step:1281/10000 train_time:103275ms step_avg:80.62ms +[2025-07-08 01:16:56] [Rank 0] step:1281/10000 train_time:103275ms step_avg:80.62ms +[2025-07-08 01:16:58] [Rank 0] step:1301/10000 train_time:104755ms step_avg:80.52ms +[2025-07-08 01:16:58] [Rank 0] step:1301/10000 train_time:104755ms step_avg:80.52ms +[2025-07-08 01:16:59] [Rank 0] step:1321/10000 train_time:106235ms step_avg:80.42ms +[2025-07-08 01:16:59] [Rank 0] step:1321/10000 train_time:106235ms step_avg:80.42ms +[2025-07-08 01:17:01] [Rank 0] step:1341/10000 train_time:107712ms step_avg:80.32ms +[2025-07-08 01:17:01] [Rank 0] step:1341/10000 train_time:107712ms step_avg:80.32ms +[2025-07-08 01:17:03] [Rank 0] step:1361/10000 train_time:109863ms step_avg:80.72ms +[2025-07-08 01:17:03] [Rank 0] step:1361/10000 train_time:109863ms step_avg:80.72ms +[2025-07-08 01:17:04] [Rank 0] step:1381/10000 train_time:111341ms step_avg:80.62ms +[2025-07-08 01:17:04] [Rank 0] step:1381/10000 train_time:111341ms step_avg:80.62ms +[2025-07-08 01:17:06] [Rank 0] step:1401/10000 train_time:112822ms step_avg:80.53ms +[2025-07-08 01:17:06] [Rank 0] step:1401/10000 train_time:112822ms step_avg:80.53ms +[2025-07-08 01:17:07] [Rank 0] step:1421/10000 train_time:114303ms step_avg:80.44ms +[2025-07-08 01:17:07] [Rank 0] step:1421/10000 train_time:114303ms step_avg:80.44ms +[2025-07-08 01:17:09] [Rank 0] step:1441/10000 train_time:115783ms step_avg:80.35ms +[2025-07-08 01:17:09] [Rank 0] step:1441/10000 train_time:115783ms step_avg:80.35ms +[2025-07-08 01:17:11] [Rank 0] step:1461/10000 train_time:117933ms step_avg:80.72ms +[2025-07-08 01:17:11] [Rank 0] step:1461/10000 train_time:117933ms step_avg:80.72ms +[2025-07-08 01:17:12] [Rank 0] step:1481/10000 train_time:119413ms step_avg:80.63ms +[2025-07-08 01:17:12] [Rank 0] step:1481/10000 train_time:119413ms step_avg:80.63ms +[2025-07-08 01:17:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:17:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:17:15] [Rank 0] PRINT: step:1500/10000 train_loss:1.7506 val_loss:1.6006 train_time:120893ms step_avg:80.60ms +[2025-07-08 01:17:15] [Rank 0] PRINT: step:1500/10000 train_loss:1.7506 val_loss:1.6006 train_time:120893ms step_avg:80.60ms +[2025-07-08 01:17:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:17:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:17:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:17:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:17:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:17:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:22:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:22:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:22:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:22:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:22:37] [Rank 0] Total Loss: 4.2426 +[2025-07-08 01:22:37] [Rank 0] Total Loss: 4.2426 +[2025-07-08 01:22:37] [Rank 0] Total FTA: 0.1225 +[2025-07-08 01:22:37] [Rank 0] Total FTA: 0.1225 +[2025-07-08 01:22:37] [Rank 0] Group 0 Loss: 4.4256 +[2025-07-08 01:22:37] [Rank 0] Group 0 Loss: 4.4256 +[2025-07-08 01:22:37] [Rank 0] Group 1 Loss: 4.2260 +[2025-07-08 01:22:37] [Rank 0] Group 1 Loss: 4.2260 +[2025-07-08 01:22:37] [Rank 0] Group 2 Loss: 4.0903 +[2025-07-08 01:22:37] [Rank 0] Group 2 Loss: 4.0903 +[2025-07-08 01:22:37] [Rank 0] Group 3 Loss: 4.2373 +[2025-07-08 01:22:37] [Rank 0] Group 3 Loss: 4.2373 +[2025-07-08 01:22:37] [Rank 0] Group 4 Loss: 4.2773 +[2025-07-08 01:22:37] [Rank 0] Group 4 Loss: 4.2773 +[2025-07-08 01:22:37] [Rank 0] Group 5 Loss: 4.1851 +[2025-07-08 01:22:37] [Rank 0] Group 5 Loss: 4.1851 +[2025-07-08 01:22:37] [Rank 0] Group 6 Loss: 4.0957 +[2025-07-08 01:22:37] [Rank 0] Group 6 Loss: 4.0957 +[2025-07-08 01:22:37] [Rank 0] Group 7 Loss: 4.2330 +[2025-07-08 01:22:37] [Rank 0] Group 7 Loss: 4.2330 +[2025-07-08 01:22:37] [Rank 0] Group 8 Loss: 4.2008 +[2025-07-08 01:22:37] [Rank 0] Group 8 Loss: 4.2008 +[2025-07-08 01:22:37] [Rank 0] Group 9 Loss: 4.2648 +[2025-07-08 01:22:37] [Rank 0] Group 9 Loss: 4.2648 +[2025-07-08 01:22:37] [Rank 0] Group 10 Loss: 4.2318 +[2025-07-08 01:22:37] [Rank 0] Group 10 Loss: 4.2318 +[2025-07-08 01:22:37] [Rank 0] Group 11 Loss: 4.2536 +[2025-07-08 01:22:37] [Rank 0] Group 11 Loss: 4.2536 +[2025-07-08 01:22:37] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-08 01:22:37] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-08 01:22:37] [Rank 0] Group 1 FTA: 0.1458 +[2025-07-08 01:22:37] [Rank 0] Group 1 FTA: 0.1458 +[2025-07-08 01:22:37] [Rank 0] Group 2 FTA: 0.1458 +[2025-07-08 01:22:37] [Rank 0] Group 2 FTA: 0.1458 +[2025-07-08 01:22:37] [Rank 0] Group 3 FTA: 0.0781 +[2025-07-08 01:22:37] [Rank 0] Group 3 FTA: 0.0781 +[2025-07-08 01:22:37] [Rank 0] Group 4 FTA: 0.0677 +[2025-07-08 01:22:37] [Rank 0] Group 4 FTA: 0.0677 +[2025-07-08 01:22:37] [Rank 0] Group 5 FTA: 0.1146 +[2025-07-08 01:22:37] [Rank 0] Group 5 FTA: 0.1146 +[2025-07-08 01:22:37] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-08 01:22:37] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-08 01:22:37] [Rank 0] Group 7 FTA: 0.1589 +[2025-07-08 01:22:37] [Rank 0] Group 7 FTA: 0.1589 +[2025-07-08 01:22:37] [Rank 0] Group 8 FTA: 0.1276 +[2025-07-08 01:22:37] [Rank 0] Group 8 FTA: 0.1276 +[2025-07-08 01:22:37] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-08 01:22:37] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-08 01:22:37] [Rank 0] Group 10 FTA: 0.1230 +[2025-07-08 01:22:37] [Rank 0] Group 10 FTA: 0.1230 +[2025-07-08 01:22:37] [Rank 0] Group 11 FTA: 0.1133 +[2025-07-08 01:22:37] [Rank 0] Group 11 FTA: 0.1133 +[2025-07-08 01:22:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 01:22:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 01:22:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 01:22:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 01:22:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 01:22:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 01:22:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 01:22:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 01:22:38] [Rank 0] step:1501/10000 train_time:120913ms step_avg:80.55ms +[2025-07-08 01:22:38] [Rank 0] step:1501/10000 train_time:120913ms step_avg:80.55ms +[2025-07-08 01:22:40] [Rank 0] step:1521/10000 train_time:122403ms step_avg:80.48ms +[2025-07-08 01:22:40] [Rank 0] step:1521/10000 train_time:122403ms step_avg:80.48ms +[2025-07-08 01:22:42] [Rank 0] step:1541/10000 train_time:124536ms step_avg:80.81ms +[2025-07-08 01:22:42] [Rank 0] step:1541/10000 train_time:124536ms step_avg:80.81ms +[2025-07-08 01:22:44] [Rank 0] step:1561/10000 train_time:126006ms step_avg:80.72ms +[2025-07-08 01:22:44] [Rank 0] step:1561/10000 train_time:126006ms step_avg:80.72ms +[2025-07-08 01:22:45] [Rank 0] step:1581/10000 train_time:127483ms step_avg:80.63ms +[2025-07-08 01:22:45] [Rank 0] step:1581/10000 train_time:127483ms step_avg:80.63ms +[2025-07-08 01:22:47] [Rank 0] step:1601/10000 train_time:128956ms step_avg:80.55ms +[2025-07-08 01:22:47] [Rank 0] step:1601/10000 train_time:128956ms step_avg:80.55ms +[2025-07-08 01:22:48] [Rank 0] step:1621/10000 train_time:130685ms step_avg:80.62ms +[2025-07-08 01:22:48] [Rank 0] step:1621/10000 train_time:130685ms step_avg:80.62ms +[2025-07-08 01:22:50] [Rank 0] step:1641/10000 train_time:132390ms step_avg:80.68ms +[2025-07-08 01:22:50] [Rank 0] step:1641/10000 train_time:132390ms step_avg:80.68ms +[2025-07-08 01:22:51] [Rank 0] step:1661/10000 train_time:133871ms step_avg:80.60ms +[2025-07-08 01:22:51] [Rank 0] step:1661/10000 train_time:133871ms step_avg:80.60ms +[2025-07-08 01:22:53] [Rank 0] step:1681/10000 train_time:135342ms step_avg:80.51ms +[2025-07-08 01:22:53] [Rank 0] step:1681/10000 train_time:135342ms step_avg:80.51ms +[2025-07-08 01:22:54] [Rank 0] step:1701/10000 train_time:136816ms step_avg:80.43ms +[2025-07-08 01:22:54] [Rank 0] step:1701/10000 train_time:136816ms step_avg:80.43ms +[2025-07-08 01:22:56] [Rank 0] step:1721/10000 train_time:138940ms step_avg:80.73ms +[2025-07-08 01:22:56] [Rank 0] step:1721/10000 train_time:138940ms step_avg:80.73ms +[2025-07-08 01:22:58] [Rank 0] step:1741/10000 train_time:140414ms step_avg:80.65ms +[2025-07-08 01:22:58] [Rank 0] step:1741/10000 train_time:140414ms step_avg:80.65ms +[2025-07-08 01:22:59] [Rank 0] step:1761/10000 train_time:141890ms step_avg:80.57ms +[2025-07-08 01:22:59] [Rank 0] step:1761/10000 train_time:141890ms step_avg:80.57ms +[2025-07-08 01:23:01] [Rank 0] step:1781/10000 train_time:143367ms step_avg:80.50ms +[2025-07-08 01:23:01] [Rank 0] step:1781/10000 train_time:143367ms step_avg:80.50ms +[2025-07-08 01:23:03] [Rank 0] step:1801/10000 train_time:145518ms step_avg:80.80ms +[2025-07-08 01:23:03] [Rank 0] step:1801/10000 train_time:145518ms step_avg:80.80ms +[2025-07-08 01:23:05] [Rank 0] step:1821/10000 train_time:146976ms step_avg:80.71ms +[2025-07-08 01:23:05] [Rank 0] step:1821/10000 train_time:146976ms step_avg:80.71ms +[2025-07-08 01:23:06] [Rank 0] step:1841/10000 train_time:148454ms step_avg:80.64ms +[2025-07-08 01:23:06] [Rank 0] step:1841/10000 train_time:148454ms step_avg:80.64ms +[2025-07-08 01:23:07] [Rank 0] step:1861/10000 train_time:149935ms step_avg:80.57ms +[2025-07-08 01:23:07] [Rank 0] step:1861/10000 train_time:149935ms step_avg:80.57ms +[2025-07-08 01:23:09] [Rank 0] step:1881/10000 train_time:151413ms step_avg:80.50ms +[2025-07-08 01:23:09] [Rank 0] step:1881/10000 train_time:151413ms step_avg:80.50ms +[2025-07-08 01:23:11] [Rank 0] step:1901/10000 train_time:153562ms step_avg:80.78ms +[2025-07-08 01:23:11] [Rank 0] step:1901/10000 train_time:153562ms step_avg:80.78ms +[2025-07-08 01:23:13] [Rank 0] step:1921/10000 train_time:155041ms step_avg:80.71ms +[2025-07-08 01:23:13] [Rank 0] step:1921/10000 train_time:155041ms step_avg:80.71ms +[2025-07-08 01:23:14] [Rank 0] step:1941/10000 train_time:156520ms step_avg:80.64ms +[2025-07-08 01:23:14] [Rank 0] step:1941/10000 train_time:156520ms step_avg:80.64ms +[2025-07-08 01:23:16] [Rank 0] step:1961/10000 train_time:158001ms step_avg:80.57ms +[2025-07-08 01:23:16] [Rank 0] step:1961/10000 train_time:158001ms step_avg:80.57ms +[2025-07-08 01:23:18] [Rank 0] step:1981/10000 train_time:159483ms step_avg:80.51ms +[2025-07-08 01:23:18] [Rank 0] step:1981/10000 train_time:159483ms step_avg:80.51ms +[2025-07-08 01:23:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:23:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:23:20] [Rank 0] PRINT: step:2000/10000 train_loss:1.5055 val_loss:1.4430 train_time:161603ms step_avg:80.80ms +[2025-07-08 01:23:20] [Rank 0] PRINT: step:2000/10000 train_loss:1.5055 val_loss:1.4430 train_time:161603ms step_avg:80.80ms +[2025-07-08 01:23:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:23:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:23:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:23:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:23:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:23:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:28:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:28:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:28:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:28:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:28:42] [Rank 0] Total Loss: 4.4393 +[2025-07-08 01:28:42] [Rank 0] Total Loss: 4.4393 +[2025-07-08 01:28:42] [Rank 0] Total FTA: 0.2256 +[2025-07-08 01:28:42] [Rank 0] Total FTA: 0.2256 +[2025-07-08 01:28:42] [Rank 0] Group 0 Loss: 4.8241 +[2025-07-08 01:28:42] [Rank 0] Group 0 Loss: 4.8241 +[2025-07-08 01:28:42] [Rank 0] Group 1 Loss: 4.3783 +[2025-07-08 01:28:42] [Rank 0] Group 1 Loss: 4.3783 +[2025-07-08 01:28:42] [Rank 0] Group 2 Loss: 4.2109 +[2025-07-08 01:28:42] [Rank 0] Group 2 Loss: 4.2109 +[2025-07-08 01:28:42] [Rank 0] Group 3 Loss: 4.4056 +[2025-07-08 01:28:42] [Rank 0] Group 3 Loss: 4.4056 +[2025-07-08 01:28:42] [Rank 0] Group 4 Loss: 4.3365 +[2025-07-08 01:28:42] [Rank 0] Group 4 Loss: 4.3365 +[2025-07-08 01:28:42] [Rank 0] Group 5 Loss: 4.3729 +[2025-07-08 01:28:42] [Rank 0] Group 5 Loss: 4.3729 +[2025-07-08 01:28:42] [Rank 0] Group 6 Loss: 4.3136 +[2025-07-08 01:28:42] [Rank 0] Group 6 Loss: 4.3136 +[2025-07-08 01:28:42] [Rank 0] Group 7 Loss: 4.4156 +[2025-07-08 01:28:42] [Rank 0] Group 7 Loss: 4.4156 +[2025-07-08 01:28:42] [Rank 0] Group 8 Loss: 4.4211 +[2025-07-08 01:28:42] [Rank 0] Group 8 Loss: 4.4211 +[2025-07-08 01:28:42] [Rank 0] Group 9 Loss: 4.3968 +[2025-07-08 01:28:42] [Rank 0] Group 9 Loss: 4.3968 +[2025-07-08 01:28:42] [Rank 0] Group 10 Loss: 4.4294 +[2025-07-08 01:28:42] [Rank 0] Group 10 Loss: 4.4294 +[2025-07-08 01:28:42] [Rank 0] Group 11 Loss: 4.4134 +[2025-07-08 01:28:42] [Rank 0] Group 11 Loss: 4.4134 +[2025-07-08 01:28:42] [Rank 0] Group 0 FTA: 0.3498 +[2025-07-08 01:28:42] [Rank 0] Group 0 FTA: 0.3498 +[2025-07-08 01:28:42] [Rank 0] Group 1 FTA: 0.1849 +[2025-07-08 01:28:42] [Rank 0] Group 1 FTA: 0.1849 +[2025-07-08 01:28:42] [Rank 0] Group 2 FTA: 0.2396 +[2025-07-08 01:28:42] [Rank 0] Group 2 FTA: 0.2396 +[2025-07-08 01:28:42] [Rank 0] Group 3 FTA: 0.1823 +[2025-07-08 01:28:42] [Rank 0] Group 3 FTA: 0.1823 +[2025-07-08 01:28:42] [Rank 0] Group 4 FTA: 0.1042 +[2025-07-08 01:28:42] [Rank 0] Group 4 FTA: 0.1042 +[2025-07-08 01:28:42] [Rank 0] Group 5 FTA: 0.2135 +[2025-07-08 01:28:42] [Rank 0] Group 5 FTA: 0.2135 +[2025-07-08 01:28:42] [Rank 0] Group 6 FTA: 0.1745 +[2025-07-08 01:28:42] [Rank 0] Group 6 FTA: 0.1745 +[2025-07-08 01:28:42] [Rank 0] Group 7 FTA: 0.1849 +[2025-07-08 01:28:42] [Rank 0] Group 7 FTA: 0.1849 +[2025-07-08 01:28:42] [Rank 0] Group 8 FTA: 0.1953 +[2025-07-08 01:28:42] [Rank 0] Group 8 FTA: 0.1953 +[2025-07-08 01:28:42] [Rank 0] Group 9 FTA: 0.2500 +[2025-07-08 01:28:42] [Rank 0] Group 9 FTA: 0.2500 +[2025-07-08 01:28:42] [Rank 0] Group 10 FTA: 0.2402 +[2025-07-08 01:28:42] [Rank 0] Group 10 FTA: 0.2402 +[2025-07-08 01:28:42] [Rank 0] Group 11 FTA: 0.2412 +[2025-07-08 01:28:42] [Rank 0] Group 11 FTA: 0.2412 +[2025-07-08 01:28:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 01:28:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 01:28:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 01:28:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 01:28:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 01:28:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 01:28:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 01:28:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 01:28:44] [Rank 0] step:2001/10000 train_time:161622ms step_avg:80.77ms +[2025-07-08 01:28:44] [Rank 0] step:2001/10000 train_time:161622ms step_avg:80.77ms +[2025-07-08 01:28:45] [Rank 0] step:2021/10000 train_time:163094ms step_avg:80.70ms +[2025-07-08 01:28:45] [Rank 0] step:2021/10000 train_time:163094ms step_avg:80.70ms +[2025-07-08 01:28:47] [Rank 0] step:2041/10000 train_time:164561ms step_avg:80.63ms +[2025-07-08 01:28:47] [Rank 0] step:2041/10000 train_time:164561ms step_avg:80.63ms +[2025-07-08 01:28:48] [Rank 0] step:2061/10000 train_time:166038ms step_avg:80.56ms +[2025-07-08 01:28:48] [Rank 0] step:2061/10000 train_time:166038ms step_avg:80.56ms +[2025-07-08 01:28:50] [Rank 0] step:2081/10000 train_time:168171ms step_avg:80.81ms +[2025-07-08 01:28:50] [Rank 0] step:2081/10000 train_time:168171ms step_avg:80.81ms +[2025-07-08 01:28:52] [Rank 0] step:2101/10000 train_time:169640ms step_avg:80.74ms +[2025-07-08 01:28:52] [Rank 0] step:2101/10000 train_time:169640ms step_avg:80.74ms +[2025-07-08 01:28:53] [Rank 0] step:2121/10000 train_time:171110ms step_avg:80.67ms +[2025-07-08 01:28:53] [Rank 0] step:2121/10000 train_time:171110ms step_avg:80.67ms +[2025-07-08 01:28:55] [Rank 0] step:2141/10000 train_time:172582ms step_avg:80.61ms +[2025-07-08 01:28:55] [Rank 0] step:2141/10000 train_time:172582ms step_avg:80.61ms +[2025-07-08 01:28:57] [Rank 0] step:2161/10000 train_time:174051ms step_avg:80.54ms +[2025-07-08 01:28:57] [Rank 0] step:2161/10000 train_time:174051ms step_avg:80.54ms +[2025-07-08 01:28:58] [Rank 0] step:2181/10000 train_time:176174ms step_avg:80.78ms +[2025-07-08 01:28:58] [Rank 0] step:2181/10000 train_time:176174ms step_avg:80.78ms +[2025-07-08 01:29:00] [Rank 0] step:2201/10000 train_time:177643ms step_avg:80.71ms +[2025-07-08 01:29:00] [Rank 0] step:2201/10000 train_time:177643ms step_avg:80.71ms +[2025-07-08 01:29:01] [Rank 0] step:2221/10000 train_time:179116ms step_avg:80.65ms +[2025-07-08 01:29:01] [Rank 0] step:2221/10000 train_time:179116ms step_avg:80.65ms +[2025-07-08 01:29:03] [Rank 0] step:2241/10000 train_time:180612ms step_avg:80.59ms +[2025-07-08 01:29:03] [Rank 0] step:2241/10000 train_time:180612ms step_avg:80.59ms +[2025-07-08 01:29:05] [Rank 0] step:2261/10000 train_time:182776ms step_avg:80.84ms +[2025-07-08 01:29:05] [Rank 0] step:2261/10000 train_time:182776ms step_avg:80.84ms +[2025-07-08 01:29:07] [Rank 0] step:2281/10000 train_time:184271ms step_avg:80.79ms +[2025-07-08 01:29:07] [Rank 0] step:2281/10000 train_time:184271ms step_avg:80.79ms +[2025-07-08 01:29:08] [Rank 0] step:2301/10000 train_time:185768ms step_avg:80.73ms +[2025-07-08 01:29:08] [Rank 0] step:2301/10000 train_time:185768ms step_avg:80.73ms +[2025-07-08 01:29:10] [Rank 0] step:2321/10000 train_time:187532ms step_avg:80.80ms +[2025-07-08 01:29:10] [Rank 0] step:2321/10000 train_time:187532ms step_avg:80.80ms +[2025-07-08 01:29:12] [Rank 0] step:2341/10000 train_time:189287ms step_avg:80.86ms +[2025-07-08 01:29:12] [Rank 0] step:2341/10000 train_time:189287ms step_avg:80.86ms +[2025-07-08 01:29:13] [Rank 0] step:2361/10000 train_time:191175ms step_avg:80.97ms +[2025-07-08 01:29:13] [Rank 0] step:2361/10000 train_time:191175ms step_avg:80.97ms +[2025-07-08 01:29:15] [Rank 0] step:2381/10000 train_time:192674ms step_avg:80.92ms +[2025-07-08 01:29:15] [Rank 0] step:2381/10000 train_time:192674ms step_avg:80.92ms +[2025-07-08 01:29:16] [Rank 0] step:2401/10000 train_time:194173ms step_avg:80.87ms +[2025-07-08 01:29:16] [Rank 0] step:2401/10000 train_time:194173ms step_avg:80.87ms +[2025-07-08 01:29:18] [Rank 0] step:2421/10000 train_time:195671ms step_avg:80.82ms +[2025-07-08 01:29:18] [Rank 0] step:2421/10000 train_time:195671ms step_avg:80.82ms +[2025-07-08 01:29:20] [Rank 0] step:2441/10000 train_time:197815ms step_avg:81.04ms +[2025-07-08 01:29:20] [Rank 0] step:2441/10000 train_time:197815ms step_avg:81.04ms +[2025-07-08 01:29:22] [Rank 0] step:2461/10000 train_time:199314ms step_avg:80.99ms +[2025-07-08 01:29:22] [Rank 0] step:2461/10000 train_time:199314ms step_avg:80.99ms +[2025-07-08 01:29:23] [Rank 0] step:2481/10000 train_time:200812ms step_avg:80.94ms +[2025-07-08 01:29:23] [Rank 0] step:2481/10000 train_time:200812ms step_avg:80.94ms +[2025-07-08 01:29:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:29:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:29:25] [Rank 0] PRINT: step:2500/10000 train_loss:1.3709 val_loss:1.3083 train_time:202312ms step_avg:80.92ms +[2025-07-08 01:29:25] [Rank 0] PRINT: step:2500/10000 train_loss:1.3709 val_loss:1.3083 train_time:202312ms step_avg:80.92ms +[2025-07-08 01:29:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:29:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:29:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:29:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:29:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:29:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:34:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:34:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:34:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:34:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:34:48] [Rank 0] Total Loss: 4.6322 +[2025-07-08 01:34:48] [Rank 0] Total Loss: 4.6322 +[2025-07-08 01:34:48] [Rank 0] Total FTA: 0.3124 +[2025-07-08 01:34:48] [Rank 0] Total FTA: 0.3124 +[2025-07-08 01:34:48] [Rank 0] Group 0 Loss: 4.8665 +[2025-07-08 01:34:48] [Rank 0] Group 0 Loss: 4.8665 +[2025-07-08 01:34:48] [Rank 0] Group 1 Loss: 4.8286 +[2025-07-08 01:34:48] [Rank 0] Group 1 Loss: 4.8286 +[2025-07-08 01:34:48] [Rank 0] Group 2 Loss: 4.2807 +[2025-07-08 01:34:48] [Rank 0] Group 2 Loss: 4.2807 +[2025-07-08 01:34:48] [Rank 0] Group 3 Loss: 4.5941 +[2025-07-08 01:34:48] [Rank 0] Group 3 Loss: 4.5941 +[2025-07-08 01:34:48] [Rank 0] Group 4 Loss: 4.6245 +[2025-07-08 01:34:48] [Rank 0] Group 4 Loss: 4.6245 +[2025-07-08 01:34:48] [Rank 0] Group 5 Loss: 4.5283 +[2025-07-08 01:34:48] [Rank 0] Group 5 Loss: 4.5283 +[2025-07-08 01:34:48] [Rank 0] Group 6 Loss: 4.5688 +[2025-07-08 01:34:48] [Rank 0] Group 6 Loss: 4.5688 +[2025-07-08 01:34:48] [Rank 0] Group 7 Loss: 4.6124 +[2025-07-08 01:34:48] [Rank 0] Group 7 Loss: 4.6124 +[2025-07-08 01:34:48] [Rank 0] Group 8 Loss: 4.6159 +[2025-07-08 01:34:48] [Rank 0] Group 8 Loss: 4.6159 +[2025-07-08 01:34:48] [Rank 0] Group 9 Loss: 4.5320 +[2025-07-08 01:34:48] [Rank 0] Group 9 Loss: 4.5320 +[2025-07-08 01:34:48] [Rank 0] Group 10 Loss: 4.6070 +[2025-07-08 01:34:48] [Rank 0] Group 10 Loss: 4.6070 +[2025-07-08 01:34:48] [Rank 0] Group 11 Loss: 4.6457 +[2025-07-08 01:34:48] [Rank 0] Group 11 Loss: 4.6457 +[2025-07-08 01:34:48] [Rank 0] Group 0 FTA: 0.3147 +[2025-07-08 01:34:48] [Rank 0] Group 0 FTA: 0.3147 +[2025-07-08 01:34:48] [Rank 0] Group 1 FTA: 0.3021 +[2025-07-08 01:34:48] [Rank 0] Group 1 FTA: 0.3021 +[2025-07-08 01:34:48] [Rank 0] Group 2 FTA: 0.3646 +[2025-07-08 01:34:48] [Rank 0] Group 2 FTA: 0.3646 +[2025-07-08 01:34:48] [Rank 0] Group 3 FTA: 0.3203 +[2025-07-08 01:34:48] [Rank 0] Group 3 FTA: 0.3203 +[2025-07-08 01:34:48] [Rank 0] Group 4 FTA: 0.1745 +[2025-07-08 01:34:48] [Rank 0] Group 4 FTA: 0.1745 +[2025-07-08 01:34:48] [Rank 0] Group 5 FTA: 0.3802 +[2025-07-08 01:34:48] [Rank 0] Group 5 FTA: 0.3802 +[2025-07-08 01:34:48] [Rank 0] Group 6 FTA: 0.2812 +[2025-07-08 01:34:48] [Rank 0] Group 6 FTA: 0.2812 +[2025-07-08 01:34:48] [Rank 0] Group 7 FTA: 0.3594 +[2025-07-08 01:34:48] [Rank 0] Group 7 FTA: 0.3594 +[2025-07-08 01:34:48] [Rank 0] Group 8 FTA: 0.3073 +[2025-07-08 01:34:48] [Rank 0] Group 8 FTA: 0.3073 +[2025-07-08 01:34:48] [Rank 0] Group 9 FTA: 0.3242 +[2025-07-08 01:34:48] [Rank 0] Group 9 FTA: 0.3242 +[2025-07-08 01:34:48] [Rank 0] Group 10 FTA: 0.3164 +[2025-07-08 01:34:48] [Rank 0] Group 10 FTA: 0.3164 +[2025-07-08 01:34:48] [Rank 0] Group 11 FTA: 0.3096 +[2025-07-08 01:34:48] [Rank 0] Group 11 FTA: 0.3096 +[2025-07-08 01:34:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 01:34:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 01:34:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 01:34:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 01:34:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 01:34:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 01:34:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 01:34:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 01:34:50] [Rank 0] step:2501/10000 train_time:202331ms step_avg:80.90ms +[2025-07-08 01:34:50] [Rank 0] step:2501/10000 train_time:202331ms step_avg:80.90ms +[2025-07-08 01:34:52] [Rank 0] step:2521/10000 train_time:204091ms step_avg:80.96ms +[2025-07-08 01:34:52] [Rank 0] step:2521/10000 train_time:204091ms step_avg:80.96ms +[2025-07-08 01:34:53] [Rank 0] step:2541/10000 train_time:205970ms step_avg:81.06ms +[2025-07-08 01:34:53] [Rank 0] step:2541/10000 train_time:205970ms step_avg:81.06ms +[2025-07-08 01:34:55] [Rank 0] step:2561/10000 train_time:207463ms step_avg:81.01ms +[2025-07-08 01:34:55] [Rank 0] step:2561/10000 train_time:207463ms step_avg:81.01ms +[2025-07-08 01:34:56] [Rank 0] step:2581/10000 train_time:208957ms step_avg:80.96ms +[2025-07-08 01:34:56] [Rank 0] step:2581/10000 train_time:208957ms step_avg:80.96ms +[2025-07-08 01:34:58] [Rank 0] step:2601/10000 train_time:210452ms step_avg:80.91ms +[2025-07-08 01:34:58] [Rank 0] step:2601/10000 train_time:210452ms step_avg:80.91ms +[2025-07-08 01:35:00] [Rank 0] step:2621/10000 train_time:212613ms step_avg:81.12ms +[2025-07-08 01:35:00] [Rank 0] step:2621/10000 train_time:212613ms step_avg:81.12ms +[2025-07-08 01:35:01] [Rank 0] step:2641/10000 train_time:214107ms step_avg:81.07ms +[2025-07-08 01:35:01] [Rank 0] step:2641/10000 train_time:214107ms step_avg:81.07ms +[2025-07-08 01:35:03] [Rank 0] step:2661/10000 train_time:215602ms step_avg:81.02ms +[2025-07-08 01:35:03] [Rank 0] step:2661/10000 train_time:215602ms step_avg:81.02ms +[2025-07-08 01:35:04] [Rank 0] step:2681/10000 train_time:217098ms step_avg:80.98ms +[2025-07-08 01:35:04] [Rank 0] step:2681/10000 train_time:217098ms step_avg:80.98ms +[2025-07-08 01:35:06] [Rank 0] step:2701/10000 train_time:218642ms step_avg:80.95ms +[2025-07-08 01:35:06] [Rank 0] step:2701/10000 train_time:218642ms step_avg:80.95ms +[2025-07-08 01:35:08] [Rank 0] step:2721/10000 train_time:220424ms step_avg:81.01ms +[2025-07-08 01:35:08] [Rank 0] step:2721/10000 train_time:220424ms step_avg:81.01ms +[2025-07-08 01:35:09] [Rank 0] step:2741/10000 train_time:221920ms step_avg:80.96ms +[2025-07-08 01:35:09] [Rank 0] step:2741/10000 train_time:221920ms step_avg:80.96ms +[2025-07-08 01:35:11] [Rank 0] step:2761/10000 train_time:223418ms step_avg:80.92ms +[2025-07-08 01:35:11] [Rank 0] step:2761/10000 train_time:223418ms step_avg:80.92ms +[2025-07-08 01:35:12] [Rank 0] step:2781/10000 train_time:224916ms step_avg:80.88ms +[2025-07-08 01:35:12] [Rank 0] step:2781/10000 train_time:224916ms step_avg:80.88ms +[2025-07-08 01:35:14] [Rank 0] step:2801/10000 train_time:227079ms step_avg:81.07ms +[2025-07-08 01:35:14] [Rank 0] step:2801/10000 train_time:227079ms step_avg:81.07ms +[2025-07-08 01:35:16] [Rank 0] step:2821/10000 train_time:228574ms step_avg:81.03ms +[2025-07-08 01:35:16] [Rank 0] step:2821/10000 train_time:228574ms step_avg:81.03ms +[2025-07-08 01:35:17] [Rank 0] step:2841/10000 train_time:230071ms step_avg:80.98ms +[2025-07-08 01:35:17] [Rank 0] step:2841/10000 train_time:230071ms step_avg:80.98ms +[2025-07-08 01:35:19] [Rank 0] step:2861/10000 train_time:231569ms step_avg:80.94ms +[2025-07-08 01:35:19] [Rank 0] step:2861/10000 train_time:231569ms step_avg:80.94ms +[2025-07-08 01:35:21] [Rank 0] step:2881/10000 train_time:233067ms step_avg:80.90ms +[2025-07-08 01:35:21] [Rank 0] step:2881/10000 train_time:233067ms step_avg:80.90ms +[2025-07-08 01:35:23] [Rank 0] step:2901/10000 train_time:235220ms step_avg:81.08ms +[2025-07-08 01:35:23] [Rank 0] step:2901/10000 train_time:235220ms step_avg:81.08ms +[2025-07-08 01:35:24] [Rank 0] step:2921/10000 train_time:236719ms step_avg:81.04ms +[2025-07-08 01:35:24] [Rank 0] step:2921/10000 train_time:236719ms step_avg:81.04ms +[2025-07-08 01:35:26] [Rank 0] step:2941/10000 train_time:238217ms step_avg:81.00ms +[2025-07-08 01:35:26] [Rank 0] step:2941/10000 train_time:238217ms step_avg:81.00ms +[2025-07-08 01:35:27] [Rank 0] step:2961/10000 train_time:239714ms step_avg:80.96ms +[2025-07-08 01:35:27] [Rank 0] step:2961/10000 train_time:239714ms step_avg:80.96ms +[2025-07-08 01:35:29] [Rank 0] step:2981/10000 train_time:241941ms step_avg:81.16ms +[2025-07-08 01:35:29] [Rank 0] step:2981/10000 train_time:241941ms step_avg:81.16ms +[2025-07-08 01:35:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:35:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:35:31] [Rank 0] PRINT: step:3000/10000 train_loss:1.2650 val_loss:1.2167 train_time:243438ms step_avg:81.15ms +[2025-07-08 01:35:31] [Rank 0] PRINT: step:3000/10000 train_loss:1.2650 val_loss:1.2167 train_time:243438ms step_avg:81.15ms +[2025-07-08 01:35:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:35:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:35:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:35:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:35:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:35:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:40:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:40:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:40:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:40:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:40:55] [Rank 0] Total Loss: 4.6535 +[2025-07-08 01:40:55] [Rank 0] Total Loss: 4.6535 +[2025-07-08 01:40:55] [Rank 0] Total FTA: 0.3902 +[2025-07-08 01:40:55] [Rank 0] Total FTA: 0.3902 +[2025-07-08 01:40:55] [Rank 0] Group 0 Loss: 4.9103 +[2025-07-08 01:40:55] [Rank 0] Group 0 Loss: 4.9103 +[2025-07-08 01:40:55] [Rank 0] Group 1 Loss: 4.6839 +[2025-07-08 01:40:55] [Rank 0] Group 1 Loss: 4.6839 +[2025-07-08 01:40:55] [Rank 0] Group 2 Loss: 4.2597 +[2025-07-08 01:40:55] [Rank 0] Group 2 Loss: 4.2597 +[2025-07-08 01:40:55] [Rank 0] Group 3 Loss: 4.6167 +[2025-07-08 01:40:55] [Rank 0] Group 3 Loss: 4.6167 +[2025-07-08 01:40:55] [Rank 0] Group 4 Loss: 4.5269 +[2025-07-08 01:40:55] [Rank 0] Group 4 Loss: 4.5269 +[2025-07-08 01:40:55] [Rank 0] Group 5 Loss: 4.6587 +[2025-07-08 01:40:55] [Rank 0] Group 5 Loss: 4.6587 +[2025-07-08 01:40:55] [Rank 0] Group 6 Loss: 4.5755 +[2025-07-08 01:40:55] [Rank 0] Group 6 Loss: 4.5755 +[2025-07-08 01:40:55] [Rank 0] Group 7 Loss: 4.6617 +[2025-07-08 01:40:55] [Rank 0] Group 7 Loss: 4.6617 +[2025-07-08 01:40:55] [Rank 0] Group 8 Loss: 4.6585 +[2025-07-08 01:40:55] [Rank 0] Group 8 Loss: 4.6585 +[2025-07-08 01:40:55] [Rank 0] Group 9 Loss: 4.6751 +[2025-07-08 01:40:55] [Rank 0] Group 9 Loss: 4.6751 +[2025-07-08 01:40:55] [Rank 0] Group 10 Loss: 4.6744 +[2025-07-08 01:40:55] [Rank 0] Group 10 Loss: 4.6744 +[2025-07-08 01:40:55] [Rank 0] Group 11 Loss: 4.6645 +[2025-07-08 01:40:55] [Rank 0] Group 11 Loss: 4.6645 +[2025-07-08 01:40:55] [Rank 0] Group 0 FTA: 0.5150 +[2025-07-08 01:40:55] [Rank 0] Group 0 FTA: 0.5150 +[2025-07-08 01:40:55] [Rank 0] Group 1 FTA: 0.5078 +[2025-07-08 01:40:55] [Rank 0] Group 1 FTA: 0.5078 +[2025-07-08 01:40:55] [Rank 0] Group 2 FTA: 0.3151 +[2025-07-08 01:40:55] [Rank 0] Group 2 FTA: 0.3151 +[2025-07-08 01:40:55] [Rank 0] Group 3 FTA: 0.3385 +[2025-07-08 01:40:55] [Rank 0] Group 3 FTA: 0.3385 +[2025-07-08 01:40:55] [Rank 0] Group 4 FTA: 0.2917 +[2025-07-08 01:40:55] [Rank 0] Group 4 FTA: 0.2917 +[2025-07-08 01:40:55] [Rank 0] Group 5 FTA: 0.3646 +[2025-07-08 01:40:55] [Rank 0] Group 5 FTA: 0.3646 +[2025-07-08 01:40:55] [Rank 0] Group 6 FTA: 0.3333 +[2025-07-08 01:40:55] [Rank 0] Group 6 FTA: 0.3333 +[2025-07-08 01:40:55] [Rank 0] Group 7 FTA: 0.3828 +[2025-07-08 01:40:55] [Rank 0] Group 7 FTA: 0.3828 +[2025-07-08 01:40:55] [Rank 0] Group 8 FTA: 0.3828 +[2025-07-08 01:40:55] [Rank 0] Group 8 FTA: 0.3828 +[2025-07-08 01:40:55] [Rank 0] Group 9 FTA: 0.3945 +[2025-07-08 01:40:55] [Rank 0] Group 9 FTA: 0.3945 +[2025-07-08 01:40:55] [Rank 0] Group 10 FTA: 0.4121 +[2025-07-08 01:40:55] [Rank 0] Group 10 FTA: 0.4121 +[2025-07-08 01:40:55] [Rank 0] Group 11 FTA: 0.3613 +[2025-07-08 01:40:55] [Rank 0] Group 11 FTA: 0.3613 +[2025-07-08 01:40:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 01:40:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 01:40:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 01:40:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 01:40:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 01:40:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 01:40:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 01:40:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 01:40:56] [Rank 0] step:3001/10000 train_time:243458ms step_avg:81.13ms +[2025-07-08 01:40:56] [Rank 0] step:3001/10000 train_time:243458ms step_avg:81.13ms +[2025-07-08 01:40:58] [Rank 0] step:3021/10000 train_time:244951ms step_avg:81.08ms +[2025-07-08 01:40:58] [Rank 0] step:3021/10000 train_time:244951ms step_avg:81.08ms +[2025-07-08 01:40:59] [Rank 0] step:3041/10000 train_time:246441ms step_avg:81.04ms +[2025-07-08 01:40:59] [Rank 0] step:3041/10000 train_time:246441ms step_avg:81.04ms +[2025-07-08 01:41:01] [Rank 0] step:3061/10000 train_time:247989ms step_avg:81.02ms +[2025-07-08 01:41:01] [Rank 0] step:3061/10000 train_time:247989ms step_avg:81.02ms +[2025-07-08 01:41:03] [Rank 0] step:3081/10000 train_time:250101ms step_avg:81.18ms +[2025-07-08 01:41:03] [Rank 0] step:3081/10000 train_time:250101ms step_avg:81.18ms +[2025-07-08 01:41:04] [Rank 0] step:3101/10000 train_time:251594ms step_avg:81.13ms +[2025-07-08 01:41:04] [Rank 0] step:3101/10000 train_time:251594ms step_avg:81.13ms +[2025-07-08 01:41:06] [Rank 0] step:3121/10000 train_time:253088ms step_avg:81.09ms +[2025-07-08 01:41:06] [Rank 0] step:3121/10000 train_time:253088ms step_avg:81.09ms +[2025-07-08 01:41:07] [Rank 0] step:3141/10000 train_time:254586ms step_avg:81.05ms +[2025-07-08 01:41:07] [Rank 0] step:3141/10000 train_time:254586ms step_avg:81.05ms +[2025-07-08 01:41:10] [Rank 0] step:3161/10000 train_time:256744ms step_avg:81.22ms +[2025-07-08 01:41:10] [Rank 0] step:3161/10000 train_time:256744ms step_avg:81.22ms +[2025-07-08 01:41:11] [Rank 0] step:3181/10000 train_time:258238ms step_avg:81.18ms +[2025-07-08 01:41:11] [Rank 0] step:3181/10000 train_time:258238ms step_avg:81.18ms +[2025-07-08 01:41:13] [Rank 0] step:3201/10000 train_time:259734ms step_avg:81.14ms +[2025-07-08 01:41:13] [Rank 0] step:3201/10000 train_time:259734ms step_avg:81.14ms +[2025-07-08 01:41:14] [Rank 0] step:3221/10000 train_time:261232ms step_avg:81.10ms +[2025-07-08 01:41:14] [Rank 0] step:3221/10000 train_time:261232ms step_avg:81.10ms +[2025-07-08 01:41:16] [Rank 0] step:3241/10000 train_time:262729ms step_avg:81.06ms +[2025-07-08 01:41:16] [Rank 0] step:3241/10000 train_time:262729ms step_avg:81.06ms +[2025-07-08 01:41:17] [Rank 0] step:3261/10000 train_time:264461ms step_avg:81.10ms +[2025-07-08 01:41:17] [Rank 0] step:3261/10000 train_time:264461ms step_avg:81.10ms +[2025-07-08 01:41:19] [Rank 0] step:3281/10000 train_time:265958ms step_avg:81.06ms +[2025-07-08 01:41:19] [Rank 0] step:3281/10000 train_time:265958ms step_avg:81.06ms +[2025-07-08 01:41:20] [Rank 0] step:3301/10000 train_time:267455ms step_avg:81.02ms +[2025-07-08 01:41:20] [Rank 0] step:3301/10000 train_time:267455ms step_avg:81.02ms +[2025-07-08 01:41:22] [Rank 0] step:3321/10000 train_time:268954ms step_avg:80.99ms +[2025-07-08 01:41:22] [Rank 0] step:3321/10000 train_time:268954ms step_avg:80.99ms +[2025-07-08 01:41:24] [Rank 0] step:3341/10000 train_time:271111ms step_avg:81.15ms +[2025-07-08 01:41:24] [Rank 0] step:3341/10000 train_time:271111ms step_avg:81.15ms +[2025-07-08 01:41:25] [Rank 0] step:3361/10000 train_time:272608ms step_avg:81.11ms +[2025-07-08 01:41:25] [Rank 0] step:3361/10000 train_time:272608ms step_avg:81.11ms +[2025-07-08 01:41:27] [Rank 0] step:3381/10000 train_time:274106ms step_avg:81.07ms +[2025-07-08 01:41:27] [Rank 0] step:3381/10000 train_time:274106ms step_avg:81.07ms +[2025-07-08 01:41:28] [Rank 0] step:3401/10000 train_time:275605ms step_avg:81.04ms +[2025-07-08 01:41:28] [Rank 0] step:3401/10000 train_time:275605ms step_avg:81.04ms +[2025-07-08 01:41:31] [Rank 0] step:3421/10000 train_time:277360ms step_avg:81.08ms +[2025-07-08 01:41:31] [Rank 0] step:3421/10000 train_time:277360ms step_avg:81.08ms +[2025-07-08 01:41:32] [Rank 0] step:3441/10000 train_time:279254ms step_avg:81.16ms +[2025-07-08 01:41:32] [Rank 0] step:3441/10000 train_time:279254ms step_avg:81.16ms +[2025-07-08 01:41:34] [Rank 0] step:3461/10000 train_time:280752ms step_avg:81.12ms +[2025-07-08 01:41:34] [Rank 0] step:3461/10000 train_time:280752ms step_avg:81.12ms +[2025-07-08 01:41:35] [Rank 0] step:3481/10000 train_time:282254ms step_avg:81.08ms +[2025-07-08 01:41:35] [Rank 0] step:3481/10000 train_time:282254ms step_avg:81.08ms +[2025-07-08 01:41:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:41:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:41:37] [Rank 0] PRINT: step:3500/10000 train_loss:1.1910 val_loss:1.1569 train_time:283750ms step_avg:81.07ms +[2025-07-08 01:41:37] [Rank 0] PRINT: step:3500/10000 train_loss:1.1910 val_loss:1.1569 train_time:283750ms step_avg:81.07ms +[2025-07-08 01:41:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:41:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:41:38] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:41:38] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:41:38] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:41:38] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:47:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:47:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:47:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:47:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:47:01] [Rank 0] Total Loss: 4.9294 +[2025-07-08 01:47:01] [Rank 0] Total Loss: 4.9294 +[2025-07-08 01:47:01] [Rank 0] Total FTA: 0.3964 +[2025-07-08 01:47:01] [Rank 0] Total FTA: 0.3964 +[2025-07-08 01:47:01] [Rank 0] Group 0 Loss: 5.2915 +[2025-07-08 01:47:01] [Rank 0] Group 0 Loss: 5.2915 +[2025-07-08 01:47:01] [Rank 0] Group 1 Loss: 5.0947 +[2025-07-08 01:47:01] [Rank 0] Group 1 Loss: 5.0947 +[2025-07-08 01:47:01] [Rank 0] Group 2 Loss: 4.4915 +[2025-07-08 01:47:01] [Rank 0] Group 2 Loss: 4.4915 +[2025-07-08 01:47:01] [Rank 0] Group 3 Loss: 4.8779 +[2025-07-08 01:47:01] [Rank 0] Group 3 Loss: 4.8779 +[2025-07-08 01:47:01] [Rank 0] Group 4 Loss: 4.8082 +[2025-07-08 01:47:01] [Rank 0] Group 4 Loss: 4.8082 +[2025-07-08 01:47:01] [Rank 0] Group 5 Loss: 4.8302 +[2025-07-08 01:47:01] [Rank 0] Group 5 Loss: 4.8302 +[2025-07-08 01:47:01] [Rank 0] Group 6 Loss: 4.8354 +[2025-07-08 01:47:01] [Rank 0] Group 6 Loss: 4.8354 +[2025-07-08 01:47:01] [Rank 0] Group 7 Loss: 4.9432 +[2025-07-08 01:47:01] [Rank 0] Group 7 Loss: 4.9432 +[2025-07-08 01:47:01] [Rank 0] Group 8 Loss: 4.8713 +[2025-07-08 01:47:01] [Rank 0] Group 8 Loss: 4.8713 +[2025-07-08 01:47:01] [Rank 0] Group 9 Loss: 4.8689 +[2025-07-08 01:47:01] [Rank 0] Group 9 Loss: 4.8689 +[2025-07-08 01:47:01] [Rank 0] Group 10 Loss: 4.8845 +[2025-07-08 01:47:01] [Rank 0] Group 10 Loss: 4.8845 +[2025-07-08 01:47:01] [Rank 0] Group 11 Loss: 4.9513 +[2025-07-08 01:47:01] [Rank 0] Group 11 Loss: 4.9513 +[2025-07-08 01:47:01] [Rank 0] Group 0 FTA: 0.3290 +[2025-07-08 01:47:01] [Rank 0] Group 0 FTA: 0.3290 +[2025-07-08 01:47:01] [Rank 0] Group 1 FTA: 0.4766 +[2025-07-08 01:47:01] [Rank 0] Group 1 FTA: 0.4766 +[2025-07-08 01:47:01] [Rank 0] Group 2 FTA: 0.3880 +[2025-07-08 01:47:01] [Rank 0] Group 2 FTA: 0.3880 +[2025-07-08 01:47:01] [Rank 0] Group 3 FTA: 0.3724 +[2025-07-08 01:47:01] [Rank 0] Group 3 FTA: 0.3724 +[2025-07-08 01:47:01] [Rank 0] Group 4 FTA: 0.3125 +[2025-07-08 01:47:01] [Rank 0] Group 4 FTA: 0.3125 +[2025-07-08 01:47:01] [Rank 0] Group 5 FTA: 0.4661 +[2025-07-08 01:47:01] [Rank 0] Group 5 FTA: 0.4661 +[2025-07-08 01:47:01] [Rank 0] Group 6 FTA: 0.3724 +[2025-07-08 01:47:01] [Rank 0] Group 6 FTA: 0.3724 +[2025-07-08 01:47:01] [Rank 0] Group 7 FTA: 0.3906 +[2025-07-08 01:47:01] [Rank 0] Group 7 FTA: 0.3906 +[2025-07-08 01:47:01] [Rank 0] Group 8 FTA: 0.4219 +[2025-07-08 01:47:01] [Rank 0] Group 8 FTA: 0.4219 +[2025-07-08 01:47:01] [Rank 0] Group 9 FTA: 0.4219 +[2025-07-08 01:47:01] [Rank 0] Group 9 FTA: 0.4219 +[2025-07-08 01:47:01] [Rank 0] Group 10 FTA: 0.4199 +[2025-07-08 01:47:01] [Rank 0] Group 10 FTA: 0.4199 +[2025-07-08 01:47:01] [Rank 0] Group 11 FTA: 0.4180 +[2025-07-08 01:47:01] [Rank 0] Group 11 FTA: 0.4180 +[2025-07-08 01:47:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 01:47:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 01:47:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 01:47:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 01:47:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 01:47:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 01:47:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 01:47:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 01:47:02] [Rank 0] step:3501/10000 train_time:283770ms step_avg:81.05ms +[2025-07-08 01:47:02] [Rank 0] step:3501/10000 train_time:283770ms step_avg:81.05ms +[2025-07-08 01:47:05] [Rank 0] step:3521/10000 train_time:286058ms step_avg:81.24ms +[2025-07-08 01:47:05] [Rank 0] step:3521/10000 train_time:286058ms step_avg:81.24ms +[2025-07-08 01:47:06] [Rank 0] step:3541/10000 train_time:287549ms step_avg:81.21ms +[2025-07-08 01:47:06] [Rank 0] step:3541/10000 train_time:287549ms step_avg:81.21ms +[2025-07-08 01:47:08] [Rank 0] step:3561/10000 train_time:289043ms step_avg:81.17ms +[2025-07-08 01:47:08] [Rank 0] step:3561/10000 train_time:289043ms step_avg:81.17ms +[2025-07-08 01:47:09] [Rank 0] step:3581/10000 train_time:290536ms step_avg:81.13ms +[2025-07-08 01:47:09] [Rank 0] step:3581/10000 train_time:290536ms step_avg:81.13ms +[2025-07-08 01:47:11] [Rank 0] step:3601/10000 train_time:292087ms step_avg:81.11ms +[2025-07-08 01:47:11] [Rank 0] step:3601/10000 train_time:292087ms step_avg:81.11ms +[2025-07-08 01:47:13] [Rank 0] step:3621/10000 train_time:294182ms step_avg:81.24ms +[2025-07-08 01:47:13] [Rank 0] step:3621/10000 train_time:294182ms step_avg:81.24ms +[2025-07-08 01:47:14] [Rank 0] step:3641/10000 train_time:295676ms step_avg:81.21ms +[2025-07-08 01:47:14] [Rank 0] step:3641/10000 train_time:295676ms step_avg:81.21ms +[2025-07-08 01:47:16] [Rank 0] step:3661/10000 train_time:297170ms step_avg:81.17ms +[2025-07-08 01:47:16] [Rank 0] step:3661/10000 train_time:297170ms step_avg:81.17ms +[2025-07-08 01:47:17] [Rank 0] step:3681/10000 train_time:298665ms step_avg:81.14ms +[2025-07-08 01:47:17] [Rank 0] step:3681/10000 train_time:298665ms step_avg:81.14ms +[2025-07-08 01:47:19] [Rank 0] step:3701/10000 train_time:300397ms step_avg:81.17ms +[2025-07-08 01:47:19] [Rank 0] step:3701/10000 train_time:300397ms step_avg:81.17ms +[2025-07-08 01:47:20] [Rank 0] step:3721/10000 train_time:301893ms step_avg:81.13ms +[2025-07-08 01:47:20] [Rank 0] step:3721/10000 train_time:301893ms step_avg:81.13ms +[2025-07-08 01:47:22] [Rank 0] step:3741/10000 train_time:303389ms step_avg:81.10ms +[2025-07-08 01:47:22] [Rank 0] step:3741/10000 train_time:303389ms step_avg:81.10ms +[2025-07-08 01:47:23] [Rank 0] step:3761/10000 train_time:304886ms step_avg:81.07ms +[2025-07-08 01:47:23] [Rank 0] step:3761/10000 train_time:304886ms step_avg:81.07ms +[2025-07-08 01:47:26] [Rank 0] step:3781/10000 train_time:307043ms step_avg:81.21ms +[2025-07-08 01:47:26] [Rank 0] step:3781/10000 train_time:307043ms step_avg:81.21ms +[2025-07-08 01:47:27] [Rank 0] step:3801/10000 train_time:308521ms step_avg:81.17ms +[2025-07-08 01:47:27] [Rank 0] step:3801/10000 train_time:308521ms step_avg:81.17ms +[2025-07-08 01:47:29] [Rank 0] step:3821/10000 train_time:310020ms step_avg:81.14ms +[2025-07-08 01:47:29] [Rank 0] step:3821/10000 train_time:310020ms step_avg:81.14ms +[2025-07-08 01:47:30] [Rank 0] step:3841/10000 train_time:311517ms step_avg:81.10ms +[2025-07-08 01:47:30] [Rank 0] step:3841/10000 train_time:311517ms step_avg:81.10ms +[2025-07-08 01:47:32] [Rank 0] step:3861/10000 train_time:313015ms step_avg:81.07ms +[2025-07-08 01:47:32] [Rank 0] step:3861/10000 train_time:313015ms step_avg:81.07ms +[2025-07-08 01:47:34] [Rank 0] step:3881/10000 train_time:315163ms step_avg:81.21ms +[2025-07-08 01:47:34] [Rank 0] step:3881/10000 train_time:315163ms step_avg:81.21ms +[2025-07-08 01:47:35] [Rank 0] step:3901/10000 train_time:316658ms step_avg:81.17ms +[2025-07-08 01:47:35] [Rank 0] step:3901/10000 train_time:316658ms step_avg:81.17ms +[2025-07-08 01:47:37] [Rank 0] step:3921/10000 train_time:318158ms step_avg:81.14ms +[2025-07-08 01:47:37] [Rank 0] step:3921/10000 train_time:318158ms step_avg:81.14ms +[2025-07-08 01:47:38] [Rank 0] step:3941/10000 train_time:319657ms step_avg:81.11ms +[2025-07-08 01:47:38] [Rank 0] step:3941/10000 train_time:319657ms step_avg:81.11ms +[2025-07-08 01:47:40] [Rank 0] step:3961/10000 train_time:321414ms step_avg:81.14ms +[2025-07-08 01:47:40] [Rank 0] step:3961/10000 train_time:321414ms step_avg:81.14ms +[2025-07-08 01:47:41] [Rank 0] step:3981/10000 train_time:322894ms step_avg:81.11ms +[2025-07-08 01:47:41] [Rank 0] step:3981/10000 train_time:322894ms step_avg:81.11ms +[2025-07-08 01:47:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:47:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:47:44] [Rank 0] PRINT: step:4000/10000 train_loss:1.1388 val_loss:1.1100 train_time:324394ms step_avg:81.10ms +[2025-07-08 01:47:44] [Rank 0] PRINT: step:4000/10000 train_loss:1.1388 val_loss:1.1100 train_time:324394ms step_avg:81.10ms +[2025-07-08 01:47:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:47:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:47:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:47:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:47:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:47:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:53:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:53:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:53:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:53:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:53:07] [Rank 0] Total Loss: 4.9198 +[2025-07-08 01:53:07] [Rank 0] Total Loss: 4.9198 +[2025-07-08 01:53:07] [Rank 0] Total FTA: 0.4404 +[2025-07-08 01:53:07] [Rank 0] Total FTA: 0.4404 +[2025-07-08 01:53:07] [Rank 0] Group 0 Loss: 4.9931 +[2025-07-08 01:53:07] [Rank 0] Group 0 Loss: 4.9931 +[2025-07-08 01:53:07] [Rank 0] Group 1 Loss: 4.7542 +[2025-07-08 01:53:07] [Rank 0] Group 1 Loss: 4.7542 +[2025-07-08 01:53:07] [Rank 0] Group 2 Loss: 4.5496 +[2025-07-08 01:53:07] [Rank 0] Group 2 Loss: 4.5496 +[2025-07-08 01:53:07] [Rank 0] Group 3 Loss: 4.8229 +[2025-07-08 01:53:07] [Rank 0] Group 3 Loss: 4.8229 +[2025-07-08 01:53:07] [Rank 0] Group 4 Loss: 4.8952 +[2025-07-08 01:53:07] [Rank 0] Group 4 Loss: 4.8952 +[2025-07-08 01:53:07] [Rank 0] Group 5 Loss: 5.0478 +[2025-07-08 01:53:07] [Rank 0] Group 5 Loss: 5.0478 +[2025-07-08 01:53:07] [Rank 0] Group 6 Loss: 4.8835 +[2025-07-08 01:53:07] [Rank 0] Group 6 Loss: 4.8835 +[2025-07-08 01:53:07] [Rank 0] Group 7 Loss: 5.0104 +[2025-07-08 01:53:07] [Rank 0] Group 7 Loss: 5.0104 +[2025-07-08 01:53:07] [Rank 0] Group 8 Loss: 4.9767 +[2025-07-08 01:53:07] [Rank 0] Group 8 Loss: 4.9767 +[2025-07-08 01:53:07] [Rank 0] Group 9 Loss: 4.9659 +[2025-07-08 01:53:07] [Rank 0] Group 9 Loss: 4.9659 +[2025-07-08 01:53:07] [Rank 0] Group 10 Loss: 4.9810 +[2025-07-08 01:53:07] [Rank 0] Group 10 Loss: 4.9810 +[2025-07-08 01:53:07] [Rank 0] Group 11 Loss: 4.9791 +[2025-07-08 01:53:07] [Rank 0] Group 11 Loss: 4.9791 +[2025-07-08 01:53:07] [Rank 0] Group 0 FTA: 0.3524 +[2025-07-08 01:53:07] [Rank 0] Group 0 FTA: 0.3524 +[2025-07-08 01:53:07] [Rank 0] Group 1 FTA: 0.3385 +[2025-07-08 01:53:07] [Rank 0] Group 1 FTA: 0.3385 +[2025-07-08 01:53:07] [Rank 0] Group 2 FTA: 0.4974 +[2025-07-08 01:53:07] [Rank 0] Group 2 FTA: 0.4974 +[2025-07-08 01:53:07] [Rank 0] Group 3 FTA: 0.2474 +[2025-07-08 01:53:07] [Rank 0] Group 3 FTA: 0.2474 +[2025-07-08 01:53:07] [Rank 0] Group 4 FTA: 0.5000 +[2025-07-08 01:53:07] [Rank 0] Group 4 FTA: 0.5000 +[2025-07-08 01:53:07] [Rank 0] Group 5 FTA: 0.4818 +[2025-07-08 01:53:07] [Rank 0] Group 5 FTA: 0.4818 +[2025-07-08 01:53:07] [Rank 0] Group 6 FTA: 0.4427 +[2025-07-08 01:53:07] [Rank 0] Group 6 FTA: 0.4427 +[2025-07-08 01:53:07] [Rank 0] Group 7 FTA: 0.5156 +[2025-07-08 01:53:07] [Rank 0] Group 7 FTA: 0.5156 +[2025-07-08 01:53:07] [Rank 0] Group 8 FTA: 0.4583 +[2025-07-08 01:53:07] [Rank 0] Group 8 FTA: 0.4583 +[2025-07-08 01:53:07] [Rank 0] Group 9 FTA: 0.4844 +[2025-07-08 01:53:07] [Rank 0] Group 9 FTA: 0.4844 +[2025-07-08 01:53:07] [Rank 0] Group 10 FTA: 0.4824 +[2025-07-08 01:53:07] [Rank 0] Group 10 FTA: 0.4824 +[2025-07-08 01:53:07] [Rank 0] Group 11 FTA: 0.4902 +[2025-07-08 01:53:07] [Rank 0] Group 11 FTA: 0.4902 +[2025-07-08 01:53:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 01:53:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 01:53:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 01:53:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 01:53:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 01:53:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 01:53:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 01:53:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 01:53:09] [Rank 0] step:4001/10000 train_time:324414ms step_avg:81.08ms +[2025-07-08 01:53:09] [Rank 0] step:4001/10000 train_time:324414ms step_avg:81.08ms +[2025-07-08 01:53:10] [Rank 0] step:4021/10000 train_time:325904ms step_avg:81.05ms +[2025-07-08 01:53:10] [Rank 0] step:4021/10000 train_time:325904ms step_avg:81.05ms +[2025-07-08 01:53:12] [Rank 0] step:4041/10000 train_time:327399ms step_avg:81.02ms +[2025-07-08 01:53:12] [Rank 0] step:4041/10000 train_time:327399ms step_avg:81.02ms +[2025-07-08 01:53:14] [Rank 0] step:4061/10000 train_time:329130ms step_avg:81.05ms +[2025-07-08 01:53:14] [Rank 0] step:4061/10000 train_time:329130ms step_avg:81.05ms +[2025-07-08 01:53:15] [Rank 0] step:4081/10000 train_time:330623ms step_avg:81.02ms +[2025-07-08 01:53:15] [Rank 0] step:4081/10000 train_time:330623ms step_avg:81.02ms +[2025-07-08 01:53:17] [Rank 0] step:4101/10000 train_time:332118ms step_avg:80.98ms +[2025-07-08 01:53:17] [Rank 0] step:4101/10000 train_time:332118ms step_avg:80.98ms +[2025-07-08 01:53:18] [Rank 0] step:4121/10000 train_time:333612ms step_avg:80.95ms +[2025-07-08 01:53:18] [Rank 0] step:4121/10000 train_time:333612ms step_avg:80.95ms +[2025-07-08 01:53:20] [Rank 0] step:4141/10000 train_time:335784ms step_avg:81.09ms +[2025-07-08 01:53:20] [Rank 0] step:4141/10000 train_time:335784ms step_avg:81.09ms +[2025-07-08 01:53:22] [Rank 0] step:4161/10000 train_time:337259ms step_avg:81.05ms +[2025-07-08 01:53:22] [Rank 0] step:4161/10000 train_time:337259ms step_avg:81.05ms +[2025-07-08 01:53:23] [Rank 0] step:4181/10000 train_time:338856ms step_avg:81.05ms +[2025-07-08 01:53:23] [Rank 0] step:4181/10000 train_time:338856ms step_avg:81.05ms +[2025-07-08 01:53:25] [Rank 0] step:4201/10000 train_time:340472ms step_avg:81.05ms +[2025-07-08 01:53:25] [Rank 0] step:4201/10000 train_time:340472ms step_avg:81.05ms +[2025-07-08 01:53:26] [Rank 0] step:4221/10000 train_time:341967ms step_avg:81.02ms +[2025-07-08 01:53:26] [Rank 0] step:4221/10000 train_time:341967ms step_avg:81.02ms +[2025-07-08 01:53:29] [Rank 0] step:4241/10000 train_time:344126ms step_avg:81.14ms +[2025-07-08 01:53:29] [Rank 0] step:4241/10000 train_time:344126ms step_avg:81.14ms +[2025-07-08 01:53:30] [Rank 0] step:4261/10000 train_time:345621ms step_avg:81.11ms +[2025-07-08 01:53:30] [Rank 0] step:4261/10000 train_time:345621ms step_avg:81.11ms +[2025-07-08 01:53:32] [Rank 0] step:4281/10000 train_time:347118ms step_avg:81.08ms +[2025-07-08 01:53:32] [Rank 0] step:4281/10000 train_time:347118ms step_avg:81.08ms +[2025-07-08 01:53:33] [Rank 0] step:4301/10000 train_time:348615ms step_avg:81.05ms +[2025-07-08 01:53:33] [Rank 0] step:4301/10000 train_time:348615ms step_avg:81.05ms +[2025-07-08 01:53:35] [Rank 0] step:4321/10000 train_time:350371ms step_avg:81.09ms +[2025-07-08 01:53:35] [Rank 0] step:4321/10000 train_time:350371ms step_avg:81.09ms +[2025-07-08 01:53:36] [Rank 0] step:4341/10000 train_time:351948ms step_avg:81.08ms +[2025-07-08 01:53:36] [Rank 0] step:4341/10000 train_time:351948ms step_avg:81.08ms +[2025-07-08 01:53:38] [Rank 0] step:4361/10000 train_time:353447ms step_avg:81.05ms +[2025-07-08 01:53:38] [Rank 0] step:4361/10000 train_time:353447ms step_avg:81.05ms +[2025-07-08 01:53:39] [Rank 0] step:4381/10000 train_time:354944ms step_avg:81.02ms +[2025-07-08 01:53:39] [Rank 0] step:4381/10000 train_time:354944ms step_avg:81.02ms +[2025-07-08 01:53:41] [Rank 0] step:4401/10000 train_time:356442ms step_avg:80.99ms +[2025-07-08 01:53:41] [Rank 0] step:4401/10000 train_time:356442ms step_avg:80.99ms +[2025-07-08 01:53:42] [Rank 0] step:4421/10000 train_time:358075ms step_avg:80.99ms +[2025-07-08 01:53:42] [Rank 0] step:4421/10000 train_time:358075ms step_avg:80.99ms +[2025-07-08 01:53:44] [Rank 0] step:4441/10000 train_time:359571ms step_avg:80.97ms +[2025-07-08 01:53:44] [Rank 0] step:4441/10000 train_time:359571ms step_avg:80.97ms +[2025-07-08 01:53:45] [Rank 0] step:4461/10000 train_time:361069ms step_avg:80.94ms +[2025-07-08 01:53:45] [Rank 0] step:4461/10000 train_time:361069ms step_avg:80.94ms +[2025-07-08 01:53:47] [Rank 0] step:4481/10000 train_time:362568ms step_avg:80.91ms +[2025-07-08 01:53:47] [Rank 0] step:4481/10000 train_time:362568ms step_avg:80.91ms +[2025-07-08 01:53:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:53:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:53:49] [Rank 0] PRINT: step:4500/10000 train_loss:1.0961 val_loss:1.0733 train_time:364067ms step_avg:80.90ms +[2025-07-08 01:53:49] [Rank 0] PRINT: step:4500/10000 train_loss:1.0961 val_loss:1.0733 train_time:364067ms step_avg:80.90ms +[2025-07-08 01:53:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:53:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:53:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:53:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:53:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:53:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:59:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:59:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:59:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:59:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:59:16] [Rank 0] Total Loss: 5.1304 +[2025-07-08 01:59:16] [Rank 0] Total Loss: 5.1304 +[2025-07-08 01:59:16] [Rank 0] Total FTA: 0.5246 +[2025-07-08 01:59:16] [Rank 0] Total FTA: 0.5246 +[2025-07-08 01:59:16] [Rank 0] Group 0 Loss: 5.4585 +[2025-07-08 01:59:16] [Rank 0] Group 0 Loss: 5.4585 +[2025-07-08 01:59:16] [Rank 0] Group 1 Loss: 4.9222 +[2025-07-08 01:59:16] [Rank 0] Group 1 Loss: 4.9222 +[2025-07-08 01:59:16] [Rank 0] Group 2 Loss: 4.8095 +[2025-07-08 01:59:16] [Rank 0] Group 2 Loss: 4.8095 +[2025-07-08 01:59:16] [Rank 0] Group 3 Loss: 5.2676 +[2025-07-08 01:59:16] [Rank 0] Group 3 Loss: 5.2676 +[2025-07-08 01:59:16] [Rank 0] Group 4 Loss: 4.9695 +[2025-07-08 01:59:16] [Rank 0] Group 4 Loss: 4.9695 +[2025-07-08 01:59:16] [Rank 0] Group 5 Loss: 5.1695 +[2025-07-08 01:59:16] [Rank 0] Group 5 Loss: 5.1695 +[2025-07-08 01:59:16] [Rank 0] Group 6 Loss: 4.9814 +[2025-07-08 01:59:16] [Rank 0] Group 6 Loss: 4.9814 +[2025-07-08 01:59:16] [Rank 0] Group 7 Loss: 5.1770 +[2025-07-08 01:59:16] [Rank 0] Group 7 Loss: 5.1770 +[2025-07-08 01:59:16] [Rank 0] Group 8 Loss: 5.1047 +[2025-07-08 01:59:16] [Rank 0] Group 8 Loss: 5.1047 +[2025-07-08 01:59:16] [Rank 0] Group 9 Loss: 5.0349 +[2025-07-08 01:59:16] [Rank 0] Group 9 Loss: 5.0349 +[2025-07-08 01:59:16] [Rank 0] Group 10 Loss: 5.0910 +[2025-07-08 01:59:16] [Rank 0] Group 10 Loss: 5.0910 +[2025-07-08 01:59:16] [Rank 0] Group 11 Loss: 5.1681 +[2025-07-08 01:59:16] [Rank 0] Group 11 Loss: 5.1681 +[2025-07-08 01:59:16] [Rank 0] Group 0 FTA: 0.5540 +[2025-07-08 01:59:16] [Rank 0] Group 0 FTA: 0.5540 +[2025-07-08 01:59:16] [Rank 0] Group 1 FTA: 0.4844 +[2025-07-08 01:59:16] [Rank 0] Group 1 FTA: 0.4844 +[2025-07-08 01:59:16] [Rank 0] Group 2 FTA: 0.6068 +[2025-07-08 01:59:16] [Rank 0] Group 2 FTA: 0.6068 +[2025-07-08 01:59:16] [Rank 0] Group 3 FTA: 0.4870 +[2025-07-08 01:59:16] [Rank 0] Group 3 FTA: 0.4870 +[2025-07-08 01:59:16] [Rank 0] Group 4 FTA: 0.4427 +[2025-07-08 01:59:16] [Rank 0] Group 4 FTA: 0.4427 +[2025-07-08 01:59:16] [Rank 0] Group 5 FTA: 0.5443 +[2025-07-08 01:59:16] [Rank 0] Group 5 FTA: 0.5443 +[2025-07-08 01:59:16] [Rank 0] Group 6 FTA: 0.4479 +[2025-07-08 01:59:16] [Rank 0] Group 6 FTA: 0.4479 +[2025-07-08 01:59:16] [Rank 0] Group 7 FTA: 0.5234 +[2025-07-08 01:59:16] [Rank 0] Group 7 FTA: 0.5234 +[2025-07-08 01:59:16] [Rank 0] Group 8 FTA: 0.5234 +[2025-07-08 01:59:16] [Rank 0] Group 8 FTA: 0.5234 +[2025-07-08 01:59:16] [Rank 0] Group 9 FTA: 0.5430 +[2025-07-08 01:59:16] [Rank 0] Group 9 FTA: 0.5430 +[2025-07-08 01:59:16] [Rank 0] Group 10 FTA: 0.5625 +[2025-07-08 01:59:16] [Rank 0] Group 10 FTA: 0.5625 +[2025-07-08 01:59:16] [Rank 0] Group 11 FTA: 0.5303 +[2025-07-08 01:59:16] [Rank 0] Group 11 FTA: 0.5303 +[2025-07-08 01:59:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 01:59:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 01:59:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 01:59:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 01:59:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 01:59:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 01:59:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 01:59:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 01:59:18] [Rank 0] step:4501/10000 train_time:364093ms step_avg:80.89ms +[2025-07-08 01:59:18] [Rank 0] step:4501/10000 train_time:364093ms step_avg:80.89ms +[2025-07-08 01:59:19] [Rank 0] step:4521/10000 train_time:366285ms step_avg:81.02ms +[2025-07-08 01:59:19] [Rank 0] step:4521/10000 train_time:366285ms step_avg:81.02ms +[2025-07-08 01:59:21] [Rank 0] step:4541/10000 train_time:367778ms step_avg:80.99ms +[2025-07-08 01:59:21] [Rank 0] step:4541/10000 train_time:367778ms step_avg:80.99ms +[2025-07-08 01:59:22] [Rank 0] step:4561/10000 train_time:369269ms step_avg:80.96ms +[2025-07-08 01:59:22] [Rank 0] step:4561/10000 train_time:369269ms step_avg:80.96ms +[2025-07-08 01:59:24] [Rank 0] step:4581/10000 train_time:370762ms step_avg:80.93ms +[2025-07-08 01:59:24] [Rank 0] step:4581/10000 train_time:370762ms step_avg:80.93ms +[2025-07-08 01:59:26] [Rank 0] step:4601/10000 train_time:372928ms step_avg:81.05ms +[2025-07-08 01:59:26] [Rank 0] step:4601/10000 train_time:372928ms step_avg:81.05ms +[2025-07-08 01:59:28] [Rank 0] step:4621/10000 train_time:374423ms step_avg:81.03ms +[2025-07-08 01:59:28] [Rank 0] step:4621/10000 train_time:374423ms step_avg:81.03ms +[2025-07-08 01:59:29] [Rank 0] step:4641/10000 train_time:375919ms step_avg:81.00ms +[2025-07-08 01:59:29] [Rank 0] step:4641/10000 train_time:375919ms step_avg:81.00ms +[2025-07-08 01:59:31] [Rank 0] step:4661/10000 train_time:377413ms step_avg:80.97ms +[2025-07-08 01:59:31] [Rank 0] step:4661/10000 train_time:377413ms step_avg:80.97ms +[2025-07-08 01:59:33] [Rank 0] step:4681/10000 train_time:378962ms step_avg:80.96ms +[2025-07-08 01:59:33] [Rank 0] step:4681/10000 train_time:378962ms step_avg:80.96ms +[2025-07-08 01:59:34] [Rank 0] step:4701/10000 train_time:381076ms step_avg:81.06ms +[2025-07-08 01:59:34] [Rank 0] step:4701/10000 train_time:381076ms step_avg:81.06ms +[2025-07-08 01:59:36] [Rank 0] step:4721/10000 train_time:382571ms step_avg:81.04ms +[2025-07-08 01:59:36] [Rank 0] step:4721/10000 train_time:382571ms step_avg:81.04ms +[2025-07-08 01:59:37] [Rank 0] step:4741/10000 train_time:384069ms step_avg:81.01ms +[2025-07-08 01:59:37] [Rank 0] step:4741/10000 train_time:384069ms step_avg:81.01ms +[2025-07-08 01:59:39] [Rank 0] step:4761/10000 train_time:385568ms step_avg:80.98ms +[2025-07-08 01:59:39] [Rank 0] step:4761/10000 train_time:385568ms step_avg:80.98ms +[2025-07-08 01:59:40] [Rank 0] step:4781/10000 train_time:387302ms step_avg:81.01ms +[2025-07-08 01:59:40] [Rank 0] step:4781/10000 train_time:387302ms step_avg:81.01ms +[2025-07-08 01:59:42] [Rank 0] step:4801/10000 train_time:388799ms step_avg:80.98ms +[2025-07-08 01:59:42] [Rank 0] step:4801/10000 train_time:388799ms step_avg:80.98ms +[2025-07-08 01:59:44] [Rank 0] step:4821/10000 train_time:390430ms step_avg:80.99ms +[2025-07-08 01:59:44] [Rank 0] step:4821/10000 train_time:390430ms step_avg:80.99ms +[2025-07-08 01:59:45] [Rank 0] step:4841/10000 train_time:391930ms step_avg:80.96ms +[2025-07-08 01:59:45] [Rank 0] step:4841/10000 train_time:391930ms step_avg:80.96ms +[2025-07-08 01:59:47] [Rank 0] step:4861/10000 train_time:393481ms step_avg:80.95ms +[2025-07-08 01:59:47] [Rank 0] step:4861/10000 train_time:393481ms step_avg:80.95ms +[2025-07-08 01:59:49] [Rank 0] step:4881/10000 train_time:395570ms step_avg:81.04ms +[2025-07-08 01:59:49] [Rank 0] step:4881/10000 train_time:395570ms step_avg:81.04ms +[2025-07-08 01:59:50] [Rank 0] step:4901/10000 train_time:397070ms step_avg:81.02ms +[2025-07-08 01:59:50] [Rank 0] step:4901/10000 train_time:397070ms step_avg:81.02ms +[2025-07-08 01:59:52] [Rank 0] step:4921/10000 train_time:398570ms step_avg:80.99ms +[2025-07-08 01:59:52] [Rank 0] step:4921/10000 train_time:398570ms step_avg:80.99ms +[2025-07-08 01:59:53] [Rank 0] step:4941/10000 train_time:400069ms step_avg:80.97ms +[2025-07-08 01:59:53] [Rank 0] step:4941/10000 train_time:400069ms step_avg:80.97ms +[2025-07-08 01:59:55] [Rank 0] step:4961/10000 train_time:401802ms step_avg:80.99ms +[2025-07-08 01:59:55] [Rank 0] step:4961/10000 train_time:401802ms step_avg:80.99ms +[2025-07-08 01:59:56] [Rank 0] step:4981/10000 train_time:403301ms step_avg:80.97ms +[2025-07-08 01:59:56] [Rank 0] step:4981/10000 train_time:403301ms step_avg:80.97ms +[2025-07-08 01:59:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:59:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:59:59] [Rank 0] PRINT: step:5000/10000 train_loss:1.0621 val_loss:1.0435 train_time:404802ms step_avg:80.96ms +[2025-07-08 01:59:59] [Rank 0] PRINT: step:5000/10000 train_loss:1.0621 val_loss:1.0435 train_time:404802ms step_avg:80.96ms +[2025-07-08 01:59:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:59:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:59:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:59:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:59:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:59:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:05:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:05:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:05:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:05:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:05:22] [Rank 0] Total Loss: 5.1863 +[2025-07-08 02:05:22] [Rank 0] Total Loss: 5.1863 +[2025-07-08 02:05:22] [Rank 0] Total FTA: 0.5216 +[2025-07-08 02:05:22] [Rank 0] Total FTA: 0.5216 +[2025-07-08 02:05:22] [Rank 0] Group 0 Loss: 5.7650 +[2025-07-08 02:05:22] [Rank 0] Group 0 Loss: 5.7650 +[2025-07-08 02:05:22] [Rank 0] Group 1 Loss: 4.9425 +[2025-07-08 02:05:22] [Rank 0] Group 1 Loss: 4.9425 +[2025-07-08 02:05:22] [Rank 0] Group 2 Loss: 4.8128 +[2025-07-08 02:05:22] [Rank 0] Group 2 Loss: 4.8128 +[2025-07-08 02:05:22] [Rank 0] Group 3 Loss: 5.3243 +[2025-07-08 02:05:22] [Rank 0] Group 3 Loss: 5.3243 +[2025-07-08 02:05:22] [Rank 0] Group 4 Loss: 4.9859 +[2025-07-08 02:05:22] [Rank 0] Group 4 Loss: 4.9859 +[2025-07-08 02:05:22] [Rank 0] Group 5 Loss: 5.1494 +[2025-07-08 02:05:22] [Rank 0] Group 5 Loss: 5.1494 +[2025-07-08 02:05:22] [Rank 0] Group 6 Loss: 5.0591 +[2025-07-08 02:05:22] [Rank 0] Group 6 Loss: 5.0591 +[2025-07-08 02:05:23] [Rank 0] Group 7 Loss: 5.1597 +[2025-07-08 02:05:23] [Rank 0] Group 7 Loss: 5.1597 +[2025-07-08 02:05:23] [Rank 0] Group 8 Loss: 5.1106 +[2025-07-08 02:05:23] [Rank 0] Group 8 Loss: 5.1106 +[2025-07-08 02:05:23] [Rank 0] Group 9 Loss: 5.0572 +[2025-07-08 02:05:23] [Rank 0] Group 9 Loss: 5.0572 +[2025-07-08 02:05:23] [Rank 0] Group 10 Loss: 5.2086 +[2025-07-08 02:05:23] [Rank 0] Group 10 Loss: 5.2086 +[2025-07-08 02:05:23] [Rank 0] Group 11 Loss: 5.1276 +[2025-07-08 02:05:23] [Rank 0] Group 11 Loss: 5.1276 +[2025-07-08 02:05:23] [Rank 0] Group 0 FTA: 0.3355 +[2025-07-08 02:05:23] [Rank 0] Group 0 FTA: 0.3355 +[2025-07-08 02:05:23] [Rank 0] Group 1 FTA: 0.4948 +[2025-07-08 02:05:23] [Rank 0] Group 1 FTA: 0.4948 +[2025-07-08 02:05:23] [Rank 0] Group 2 FTA: 0.4922 +[2025-07-08 02:05:23] [Rank 0] Group 2 FTA: 0.4922 +[2025-07-08 02:05:23] [Rank 0] Group 3 FTA: 0.5495 +[2025-07-08 02:05:23] [Rank 0] Group 3 FTA: 0.5495 +[2025-07-08 02:05:23] [Rank 0] Group 4 FTA: 0.4844 +[2025-07-08 02:05:23] [Rank 0] Group 4 FTA: 0.4844 +[2025-07-08 02:05:23] [Rank 0] Group 5 FTA: 0.6094 +[2025-07-08 02:05:23] [Rank 0] Group 5 FTA: 0.6094 +[2025-07-08 02:05:23] [Rank 0] Group 6 FTA: 0.4974 +[2025-07-08 02:05:23] [Rank 0] Group 6 FTA: 0.4974 +[2025-07-08 02:05:23] [Rank 0] Group 7 FTA: 0.5911 +[2025-07-08 02:05:23] [Rank 0] Group 7 FTA: 0.5911 +[2025-07-08 02:05:23] [Rank 0] Group 8 FTA: 0.5417 +[2025-07-08 02:05:23] [Rank 0] Group 8 FTA: 0.5417 +[2025-07-08 02:05:23] [Rank 0] Group 9 FTA: 0.5625 +[2025-07-08 02:05:23] [Rank 0] Group 9 FTA: 0.5625 +[2025-07-08 02:05:23] [Rank 0] Group 10 FTA: 0.5625 +[2025-07-08 02:05:23] [Rank 0] Group 10 FTA: 0.5625 +[2025-07-08 02:05:23] [Rank 0] Group 11 FTA: 0.5977 +[2025-07-08 02:05:23] [Rank 0] Group 11 FTA: 0.5977 +[2025-07-08 02:05:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 02:05:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 02:05:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 02:05:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 02:05:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 02:05:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 02:05:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 02:05:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 02:05:24] [Rank 0] step:5001/10000 train_time:404822ms step_avg:80.95ms +[2025-07-08 02:05:24] [Rank 0] step:5001/10000 train_time:404822ms step_avg:80.95ms +[2025-07-08 02:05:25] [Rank 0] step:5021/10000 train_time:406315ms step_avg:80.92ms +[2025-07-08 02:05:25] [Rank 0] step:5021/10000 train_time:406315ms step_avg:80.92ms +[2025-07-08 02:05:28] [Rank 0] step:5041/10000 train_time:407858ms step_avg:80.91ms +[2025-07-08 02:05:28] [Rank 0] step:5041/10000 train_time:407858ms step_avg:80.91ms +[2025-07-08 02:05:29] [Rank 0] step:5061/10000 train_time:409950ms step_avg:81.00ms +[2025-07-08 02:05:29] [Rank 0] step:5061/10000 train_time:409950ms step_avg:81.00ms +[2025-07-08 02:05:31] [Rank 0] step:5081/10000 train_time:411448ms step_avg:80.98ms +[2025-07-08 02:05:31] [Rank 0] step:5081/10000 train_time:411448ms step_avg:80.98ms +[2025-07-08 02:05:32] [Rank 0] step:5101/10000 train_time:412943ms step_avg:80.95ms +[2025-07-08 02:05:32] [Rank 0] step:5101/10000 train_time:412943ms step_avg:80.95ms +[2025-07-08 02:05:34] [Rank 0] step:5121/10000 train_time:414439ms step_avg:80.93ms +[2025-07-08 02:05:34] [Rank 0] step:5121/10000 train_time:414439ms step_avg:80.93ms +[2025-07-08 02:05:36] [Rank 0] step:5141/10000 train_time:416594ms step_avg:81.03ms +[2025-07-08 02:05:36] [Rank 0] step:5141/10000 train_time:416594ms step_avg:81.03ms +[2025-07-08 02:05:37] [Rank 0] step:5161/10000 train_time:418090ms step_avg:81.01ms +[2025-07-08 02:05:37] [Rank 0] step:5161/10000 train_time:418090ms step_avg:81.01ms +[2025-07-08 02:05:39] [Rank 0] step:5181/10000 train_time:419589ms step_avg:80.99ms +[2025-07-08 02:05:39] [Rank 0] step:5181/10000 train_time:419589ms step_avg:80.99ms +[2025-07-08 02:05:40] [Rank 0] step:5201/10000 train_time:421085ms step_avg:80.96ms +[2025-07-08 02:05:40] [Rank 0] step:5201/10000 train_time:421085ms step_avg:80.96ms +[2025-07-08 02:05:42] [Rank 0] step:5221/10000 train_time:422838ms step_avg:80.99ms +[2025-07-08 02:05:42] [Rank 0] step:5221/10000 train_time:422838ms step_avg:80.99ms +[2025-07-08 02:05:44] [Rank 0] step:5241/10000 train_time:424740ms step_avg:81.04ms +[2025-07-08 02:05:44] [Rank 0] step:5241/10000 train_time:424740ms step_avg:81.04ms +[2025-07-08 02:05:45] [Rank 0] step:5261/10000 train_time:426240ms step_avg:81.02ms +[2025-07-08 02:05:45] [Rank 0] step:5261/10000 train_time:426240ms step_avg:81.02ms +[2025-07-08 02:05:47] [Rank 0] step:5281/10000 train_time:427738ms step_avg:81.00ms +[2025-07-08 02:05:47] [Rank 0] step:5281/10000 train_time:427738ms step_avg:81.00ms +[2025-07-08 02:05:48] [Rank 0] step:5301/10000 train_time:429238ms step_avg:80.97ms +[2025-07-08 02:05:48] [Rank 0] step:5301/10000 train_time:429238ms step_avg:80.97ms +[2025-07-08 02:05:50] [Rank 0] step:5321/10000 train_time:430976ms step_avg:81.00ms +[2025-07-08 02:05:50] [Rank 0] step:5321/10000 train_time:430976ms step_avg:81.00ms +[2025-07-08 02:05:52] [Rank 0] step:5341/10000 train_time:432474ms step_avg:80.97ms +[2025-07-08 02:05:52] [Rank 0] step:5341/10000 train_time:432474ms step_avg:80.97ms +[2025-07-08 02:05:53] [Rank 0] step:5361/10000 train_time:433975ms step_avg:80.95ms +[2025-07-08 02:05:53] [Rank 0] step:5361/10000 train_time:433975ms step_avg:80.95ms +[2025-07-08 02:05:55] [Rank 0] step:5381/10000 train_time:435476ms step_avg:80.93ms +[2025-07-08 02:05:55] [Rank 0] step:5381/10000 train_time:435476ms step_avg:80.93ms +[2025-07-08 02:05:56] [Rank 0] step:5401/10000 train_time:436977ms step_avg:80.91ms +[2025-07-08 02:05:56] [Rank 0] step:5401/10000 train_time:436977ms step_avg:80.91ms +[2025-07-08 02:05:58] [Rank 0] step:5421/10000 train_time:438712ms step_avg:80.93ms +[2025-07-08 02:05:58] [Rank 0] step:5421/10000 train_time:438712ms step_avg:80.93ms +[2025-07-08 02:05:59] [Rank 0] step:5441/10000 train_time:440213ms step_avg:80.91ms +[2025-07-08 02:05:59] [Rank 0] step:5441/10000 train_time:440213ms step_avg:80.91ms +[2025-07-08 02:06:01] [Rank 0] step:5461/10000 train_time:441715ms step_avg:80.89ms +[2025-07-08 02:06:01] [Rank 0] step:5461/10000 train_time:441715ms step_avg:80.89ms +[2025-07-08 02:06:02] [Rank 0] step:5481/10000 train_time:443348ms step_avg:80.89ms +[2025-07-08 02:06:02] [Rank 0] step:5481/10000 train_time:443348ms step_avg:80.89ms +[2025-07-08 02:06:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:06:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:06:06] [Rank 0] PRINT: step:5500/10000 train_loss:1.0340 val_loss:1.0174 train_time:445558ms step_avg:81.01ms +[2025-07-08 02:06:06] [Rank 0] PRINT: step:5500/10000 train_loss:1.0340 val_loss:1.0174 train_time:445558ms step_avg:81.01ms +[2025-07-08 02:06:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:06:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:06:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:06:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:06:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:06:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:11:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:11:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:11:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:11:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:11:31] [Rank 0] Total Loss: 5.3401 +[2025-07-08 02:11:31] [Rank 0] Total Loss: 5.3401 +[2025-07-08 02:11:31] [Rank 0] Total FTA: 0.5842 +[2025-07-08 02:11:31] [Rank 0] Total FTA: 0.5842 +[2025-07-08 02:11:31] [Rank 0] Group 0 Loss: 5.7646 +[2025-07-08 02:11:31] [Rank 0] Group 0 Loss: 5.7646 +[2025-07-08 02:11:31] [Rank 0] Group 1 Loss: 5.6039 +[2025-07-08 02:11:31] [Rank 0] Group 1 Loss: 5.6039 +[2025-07-08 02:11:31] [Rank 0] Group 2 Loss: 5.0024 +[2025-07-08 02:11:31] [Rank 0] Group 2 Loss: 5.0024 +[2025-07-08 02:11:31] [Rank 0] Group 3 Loss: 5.3807 +[2025-07-08 02:11:31] [Rank 0] Group 3 Loss: 5.3807 +[2025-07-08 02:11:31] [Rank 0] Group 4 Loss: 5.2616 +[2025-07-08 02:11:31] [Rank 0] Group 4 Loss: 5.2616 +[2025-07-08 02:11:31] [Rank 0] Group 5 Loss: 5.2097 +[2025-07-08 02:11:31] [Rank 0] Group 5 Loss: 5.2097 +[2025-07-08 02:11:31] [Rank 0] Group 6 Loss: 5.1826 +[2025-07-08 02:11:31] [Rank 0] Group 6 Loss: 5.1826 +[2025-07-08 02:11:31] [Rank 0] Group 7 Loss: 5.2747 +[2025-07-08 02:11:31] [Rank 0] Group 7 Loss: 5.2747 +[2025-07-08 02:11:31] [Rank 0] Group 8 Loss: 5.2258 +[2025-07-08 02:11:31] [Rank 0] Group 8 Loss: 5.2258 +[2025-07-08 02:11:31] [Rank 0] Group 9 Loss: 5.2711 +[2025-07-08 02:11:31] [Rank 0] Group 9 Loss: 5.2711 +[2025-07-08 02:11:31] [Rank 0] Group 10 Loss: 5.2957 +[2025-07-08 02:11:31] [Rank 0] Group 10 Loss: 5.2957 +[2025-07-08 02:11:31] [Rank 0] Group 11 Loss: 5.2779 +[2025-07-08 02:11:31] [Rank 0] Group 11 Loss: 5.2779 +[2025-07-08 02:11:31] [Rank 0] Group 0 FTA: 0.4967 +[2025-07-08 02:11:31] [Rank 0] Group 0 FTA: 0.4967 +[2025-07-08 02:11:31] [Rank 0] Group 1 FTA: 0.3151 +[2025-07-08 02:11:31] [Rank 0] Group 1 FTA: 0.3151 +[2025-07-08 02:11:31] [Rank 0] Group 2 FTA: 0.7604 +[2025-07-08 02:11:31] [Rank 0] Group 2 FTA: 0.7604 +[2025-07-08 02:11:31] [Rank 0] Group 3 FTA: 0.5859 +[2025-07-08 02:11:31] [Rank 0] Group 3 FTA: 0.5859 +[2025-07-08 02:11:31] [Rank 0] Group 4 FTA: 0.4870 +[2025-07-08 02:11:31] [Rank 0] Group 4 FTA: 0.4870 +[2025-07-08 02:11:31] [Rank 0] Group 5 FTA: 0.6302 +[2025-07-08 02:11:31] [Rank 0] Group 5 FTA: 0.6302 +[2025-07-08 02:11:31] [Rank 0] Group 6 FTA: 0.5729 +[2025-07-08 02:11:31] [Rank 0] Group 6 FTA: 0.5729 +[2025-07-08 02:11:31] [Rank 0] Group 7 FTA: 0.6198 +[2025-07-08 02:11:31] [Rank 0] Group 7 FTA: 0.6198 +[2025-07-08 02:11:31] [Rank 0] Group 8 FTA: 0.5964 +[2025-07-08 02:11:31] [Rank 0] Group 8 FTA: 0.5964 +[2025-07-08 02:11:31] [Rank 0] Group 9 FTA: 0.6719 +[2025-07-08 02:11:31] [Rank 0] Group 9 FTA: 0.6719 +[2025-07-08 02:11:31] [Rank 0] Group 10 FTA: 0.6738 +[2025-07-08 02:11:31] [Rank 0] Group 10 FTA: 0.6738 +[2025-07-08 02:11:31] [Rank 0] Group 11 FTA: 0.6230 +[2025-07-08 02:11:31] [Rank 0] Group 11 FTA: 0.6230 +[2025-07-08 02:11:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 02:11:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 02:11:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 02:11:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 02:11:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 02:11:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 02:11:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 02:11:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 02:11:33] [Rank 0] step:5501/10000 train_time:445577ms step_avg:81.00ms +[2025-07-08 02:11:33] [Rank 0] step:5501/10000 train_time:445577ms step_avg:81.00ms +[2025-07-08 02:11:34] [Rank 0] step:5521/10000 train_time:447078ms step_avg:80.98ms +[2025-07-08 02:11:34] [Rank 0] step:5521/10000 train_time:447078ms step_avg:80.98ms +[2025-07-08 02:11:36] [Rank 0] step:5541/10000 train_time:448571ms step_avg:80.95ms +[2025-07-08 02:11:36] [Rank 0] step:5541/10000 train_time:448571ms step_avg:80.95ms +[2025-07-08 02:11:37] [Rank 0] step:5561/10000 train_time:450069ms step_avg:80.93ms +[2025-07-08 02:11:37] [Rank 0] step:5561/10000 train_time:450069ms step_avg:80.93ms +[2025-07-08 02:11:39] [Rank 0] step:5581/10000 train_time:451563ms step_avg:80.91ms +[2025-07-08 02:11:39] [Rank 0] step:5581/10000 train_time:451563ms step_avg:80.91ms +[2025-07-08 02:11:41] [Rank 0] step:5601/10000 train_time:453720ms step_avg:81.01ms +[2025-07-08 02:11:41] [Rank 0] step:5601/10000 train_time:453720ms step_avg:81.01ms +[2025-07-08 02:11:42] [Rank 0] step:5621/10000 train_time:455213ms step_avg:80.98ms +[2025-07-08 02:11:42] [Rank 0] step:5621/10000 train_time:455213ms step_avg:80.98ms +[2025-07-08 02:11:44] [Rank 0] step:5641/10000 train_time:456710ms step_avg:80.96ms +[2025-07-08 02:11:44] [Rank 0] step:5641/10000 train_time:456710ms step_avg:80.96ms +[2025-07-08 02:11:45] [Rank 0] step:5661/10000 train_time:458207ms step_avg:80.94ms +[2025-07-08 02:11:45] [Rank 0] step:5661/10000 train_time:458207ms step_avg:80.94ms +[2025-07-08 02:11:47] [Rank 0] step:5681/10000 train_time:460365ms step_avg:81.04ms +[2025-07-08 02:11:47] [Rank 0] step:5681/10000 train_time:460365ms step_avg:81.04ms +[2025-07-08 02:11:49] [Rank 0] step:5701/10000 train_time:461864ms step_avg:81.01ms +[2025-07-08 02:11:49] [Rank 0] step:5701/10000 train_time:461864ms step_avg:81.01ms +[2025-07-08 02:11:50] [Rank 0] step:5721/10000 train_time:463361ms step_avg:80.99ms +[2025-07-08 02:11:50] [Rank 0] step:5721/10000 train_time:463361ms step_avg:80.99ms +[2025-07-08 02:11:52] [Rank 0] step:5741/10000 train_time:464859ms step_avg:80.97ms +[2025-07-08 02:11:52] [Rank 0] step:5741/10000 train_time:464859ms step_avg:80.97ms +[2025-07-08 02:11:54] [Rank 0] step:5761/10000 train_time:466407ms step_avg:80.96ms +[2025-07-08 02:11:54] [Rank 0] step:5761/10000 train_time:466407ms step_avg:80.96ms +[2025-07-08 02:11:55] [Rank 0] step:5781/10000 train_time:468095ms step_avg:80.97ms +[2025-07-08 02:11:55] [Rank 0] step:5781/10000 train_time:468095ms step_avg:80.97ms +[2025-07-08 02:11:57] [Rank 0] step:5801/10000 train_time:469595ms step_avg:80.95ms +[2025-07-08 02:11:57] [Rank 0] step:5801/10000 train_time:469595ms step_avg:80.95ms +[2025-07-08 02:11:58] [Rank 0] step:5821/10000 train_time:471095ms step_avg:80.93ms +[2025-07-08 02:11:58] [Rank 0] step:5821/10000 train_time:471095ms step_avg:80.93ms +[2025-07-08 02:12:00] [Rank 0] step:5841/10000 train_time:472595ms step_avg:80.91ms +[2025-07-08 02:12:00] [Rank 0] step:5841/10000 train_time:472595ms step_avg:80.91ms +[2025-07-08 02:12:02] [Rank 0] step:5861/10000 train_time:474765ms step_avg:81.00ms +[2025-07-08 02:12:02] [Rank 0] step:5861/10000 train_time:474765ms step_avg:81.00ms +[2025-07-08 02:12:03] [Rank 0] step:5881/10000 train_time:476267ms step_avg:80.98ms +[2025-07-08 02:12:03] [Rank 0] step:5881/10000 train_time:476267ms step_avg:80.98ms +[2025-07-08 02:12:05] [Rank 0] step:5901/10000 train_time:477770ms step_avg:80.96ms +[2025-07-08 02:12:05] [Rank 0] step:5901/10000 train_time:477770ms step_avg:80.96ms +[2025-07-08 02:12:06] [Rank 0] step:5921/10000 train_time:479273ms step_avg:80.94ms +[2025-07-08 02:12:06] [Rank 0] step:5921/10000 train_time:479273ms step_avg:80.94ms +[2025-07-08 02:12:08] [Rank 0] step:5941/10000 train_time:480829ms step_avg:80.93ms +[2025-07-08 02:12:08] [Rank 0] step:5941/10000 train_time:480829ms step_avg:80.93ms +[2025-07-08 02:12:10] [Rank 0] step:5961/10000 train_time:482941ms step_avg:81.02ms +[2025-07-08 02:12:10] [Rank 0] step:5961/10000 train_time:482941ms step_avg:81.02ms +[2025-07-08 02:12:11] [Rank 0] step:5981/10000 train_time:484444ms step_avg:81.00ms +[2025-07-08 02:12:11] [Rank 0] step:5981/10000 train_time:484444ms step_avg:81.00ms +[2025-07-08 02:12:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:12:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:12:14] [Rank 0] PRINT: step:6000/10000 train_loss:1.0094 val_loss:0.9967 train_time:485947ms step_avg:80.99ms +[2025-07-08 02:12:14] [Rank 0] PRINT: step:6000/10000 train_loss:1.0094 val_loss:0.9967 train_time:485947ms step_avg:80.99ms +[2025-07-08 02:12:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:12:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:12:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:12:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:12:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:12:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:17:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:17:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:17:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:17:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:17:39] [Rank 0] Total Loss: 5.4445 +[2025-07-08 02:17:39] [Rank 0] Total Loss: 5.4445 +[2025-07-08 02:17:39] [Rank 0] Total FTA: 0.6732 +[2025-07-08 02:17:39] [Rank 0] Total FTA: 0.6732 +[2025-07-08 02:17:39] [Rank 0] Group 0 Loss: 6.0097 +[2025-07-08 02:17:39] [Rank 0] Group 0 Loss: 6.0097 +[2025-07-08 02:17:39] [Rank 0] Group 1 Loss: 5.1540 +[2025-07-08 02:17:39] [Rank 0] Group 1 Loss: 5.1540 +[2025-07-08 02:17:39] [Rank 0] Group 2 Loss: 5.0257 +[2025-07-08 02:17:39] [Rank 0] Group 2 Loss: 5.0257 +[2025-07-08 02:17:39] [Rank 0] Group 3 Loss: 5.4596 +[2025-07-08 02:17:39] [Rank 0] Group 3 Loss: 5.4596 +[2025-07-08 02:17:39] [Rank 0] Group 4 Loss: 5.2536 +[2025-07-08 02:17:39] [Rank 0] Group 4 Loss: 5.2536 +[2025-07-08 02:17:39] [Rank 0] Group 5 Loss: 5.3765 +[2025-07-08 02:17:39] [Rank 0] Group 5 Loss: 5.3765 +[2025-07-08 02:17:39] [Rank 0] Group 6 Loss: 5.2697 +[2025-07-08 02:17:39] [Rank 0] Group 6 Loss: 5.2697 +[2025-07-08 02:17:39] [Rank 0] Group 7 Loss: 5.4806 +[2025-07-08 02:17:39] [Rank 0] Group 7 Loss: 5.4806 +[2025-07-08 02:17:39] [Rank 0] Group 8 Loss: 5.4502 +[2025-07-08 02:17:39] [Rank 0] Group 8 Loss: 5.4502 +[2025-07-08 02:17:39] [Rank 0] Group 9 Loss: 5.4688 +[2025-07-08 02:17:39] [Rank 0] Group 9 Loss: 5.4688 +[2025-07-08 02:17:39] [Rank 0] Group 10 Loss: 5.4252 +[2025-07-08 02:17:39] [Rank 0] Group 10 Loss: 5.4252 +[2025-07-08 02:17:39] [Rank 0] Group 11 Loss: 5.4310 +[2025-07-08 02:17:39] [Rank 0] Group 11 Loss: 5.4310 +[2025-07-08 02:17:39] [Rank 0] Group 0 FTA: 0.6892 +[2025-07-08 02:17:39] [Rank 0] Group 0 FTA: 0.6892 +[2025-07-08 02:17:39] [Rank 0] Group 1 FTA: 0.6589 +[2025-07-08 02:17:39] [Rank 0] Group 1 FTA: 0.6589 +[2025-07-08 02:17:39] [Rank 0] Group 2 FTA: 0.7292 +[2025-07-08 02:17:39] [Rank 0] Group 2 FTA: 0.7292 +[2025-07-08 02:17:39] [Rank 0] Group 3 FTA: 0.6875 +[2025-07-08 02:17:39] [Rank 0] Group 3 FTA: 0.6875 +[2025-07-08 02:17:39] [Rank 0] Group 4 FTA: 0.6380 +[2025-07-08 02:17:39] [Rank 0] Group 4 FTA: 0.6380 +[2025-07-08 02:17:39] [Rank 0] Group 5 FTA: 0.6589 +[2025-07-08 02:17:39] [Rank 0] Group 5 FTA: 0.6589 +[2025-07-08 02:17:39] [Rank 0] Group 6 FTA: 0.6510 +[2025-07-08 02:17:39] [Rank 0] Group 6 FTA: 0.6510 +[2025-07-08 02:17:39] [Rank 0] Group 7 FTA: 0.6875 +[2025-07-08 02:17:39] [Rank 0] Group 7 FTA: 0.6875 +[2025-07-08 02:17:39] [Rank 0] Group 8 FTA: 0.6276 +[2025-07-08 02:17:39] [Rank 0] Group 8 FTA: 0.6276 +[2025-07-08 02:17:39] [Rank 0] Group 9 FTA: 0.7188 +[2025-07-08 02:17:39] [Rank 0] Group 9 FTA: 0.7188 +[2025-07-08 02:17:39] [Rank 0] Group 10 FTA: 0.6875 +[2025-07-08 02:17:39] [Rank 0] Group 10 FTA: 0.6875 +[2025-07-08 02:17:39] [Rank 0] Group 11 FTA: 0.6602 +[2025-07-08 02:17:39] [Rank 0] Group 11 FTA: 0.6602 +[2025-07-08 02:17:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 02:17:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 02:17:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 02:17:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 02:17:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 02:17:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 02:17:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 02:17:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 02:17:41] [Rank 0] step:6001/10000 train_time:485967ms step_avg:80.98ms +[2025-07-08 02:17:41] [Rank 0] step:6001/10000 train_time:485967ms step_avg:80.98ms +[2025-07-08 02:17:42] [Rank 0] step:6021/10000 train_time:487467ms step_avg:80.96ms +[2025-07-08 02:17:42] [Rank 0] step:6021/10000 train_time:487467ms step_avg:80.96ms +[2025-07-08 02:17:44] [Rank 0] step:6041/10000 train_time:489623ms step_avg:81.05ms +[2025-07-08 02:17:44] [Rank 0] step:6041/10000 train_time:489623ms step_avg:81.05ms +[2025-07-08 02:17:46] [Rank 0] step:6061/10000 train_time:491117ms step_avg:81.03ms +[2025-07-08 02:17:46] [Rank 0] step:6061/10000 train_time:491117ms step_avg:81.03ms +[2025-07-08 02:17:47] [Rank 0] step:6081/10000 train_time:492613ms step_avg:81.01ms +[2025-07-08 02:17:47] [Rank 0] step:6081/10000 train_time:492613ms step_avg:81.01ms +[2025-07-08 02:17:49] [Rank 0] step:6101/10000 train_time:494109ms step_avg:80.99ms +[2025-07-08 02:17:49] [Rank 0] step:6101/10000 train_time:494109ms step_avg:80.99ms +[2025-07-08 02:17:51] [Rank 0] step:6121/10000 train_time:495867ms step_avg:81.01ms +[2025-07-08 02:17:51] [Rank 0] step:6121/10000 train_time:495867ms step_avg:81.01ms +[2025-07-08 02:17:52] [Rank 0] step:6141/10000 train_time:497765ms step_avg:81.06ms +[2025-07-08 02:17:52] [Rank 0] step:6141/10000 train_time:497765ms step_avg:81.06ms +[2025-07-08 02:17:54] [Rank 0] step:6161/10000 train_time:499261ms step_avg:81.04ms +[2025-07-08 02:17:54] [Rank 0] step:6161/10000 train_time:499261ms step_avg:81.04ms +[2025-07-08 02:17:55] [Rank 0] step:6181/10000 train_time:500759ms step_avg:81.02ms +[2025-07-08 02:17:55] [Rank 0] step:6181/10000 train_time:500759ms step_avg:81.02ms +[2025-07-08 02:17:57] [Rank 0] step:6201/10000 train_time:502257ms step_avg:81.00ms +[2025-07-08 02:17:57] [Rank 0] step:6201/10000 train_time:502257ms step_avg:81.00ms +[2025-07-08 02:17:59] [Rank 0] step:6221/10000 train_time:503992ms step_avg:81.01ms +[2025-07-08 02:17:59] [Rank 0] step:6221/10000 train_time:503992ms step_avg:81.01ms +[2025-07-08 02:18:00] [Rank 0] step:6241/10000 train_time:505490ms step_avg:81.00ms +[2025-07-08 02:18:00] [Rank 0] step:6241/10000 train_time:505490ms step_avg:81.00ms +[2025-07-08 02:18:02] [Rank 0] step:6261/10000 train_time:506989ms step_avg:80.98ms +[2025-07-08 02:18:02] [Rank 0] step:6261/10000 train_time:506989ms step_avg:80.98ms +[2025-07-08 02:18:03] [Rank 0] step:6281/10000 train_time:508488ms step_avg:80.96ms +[2025-07-08 02:18:03] [Rank 0] step:6281/10000 train_time:508488ms step_avg:80.96ms +[2025-07-08 02:18:05] [Rank 0] step:6301/10000 train_time:510656ms step_avg:81.04ms +[2025-07-08 02:18:05] [Rank 0] step:6301/10000 train_time:510656ms step_avg:81.04ms +[2025-07-08 02:18:07] [Rank 0] step:6321/10000 train_time:512136ms step_avg:81.02ms +[2025-07-08 02:18:07] [Rank 0] step:6321/10000 train_time:512136ms step_avg:81.02ms +[2025-07-08 02:18:08] [Rank 0] step:6341/10000 train_time:513634ms step_avg:81.00ms +[2025-07-08 02:18:08] [Rank 0] step:6341/10000 train_time:513634ms step_avg:81.00ms +[2025-07-08 02:18:10] [Rank 0] step:6361/10000 train_time:515135ms step_avg:80.98ms +[2025-07-08 02:18:10] [Rank 0] step:6361/10000 train_time:515135ms step_avg:80.98ms +[2025-07-08 02:18:11] [Rank 0] step:6381/10000 train_time:516635ms step_avg:80.96ms +[2025-07-08 02:18:11] [Rank 0] step:6381/10000 train_time:516635ms step_avg:80.96ms +[2025-07-08 02:18:13] [Rank 0] step:6401/10000 train_time:518373ms step_avg:80.98ms +[2025-07-08 02:18:13] [Rank 0] step:6401/10000 train_time:518373ms step_avg:80.98ms +[2025-07-08 02:18:15] [Rank 0] step:6421/10000 train_time:519873ms step_avg:80.96ms +[2025-07-08 02:18:15] [Rank 0] step:6421/10000 train_time:519873ms step_avg:80.96ms +[2025-07-08 02:18:16] [Rank 0] step:6441/10000 train_time:521373ms step_avg:80.95ms +[2025-07-08 02:18:16] [Rank 0] step:6441/10000 train_time:521373ms step_avg:80.95ms +[2025-07-08 02:18:18] [Rank 0] step:6461/10000 train_time:522874ms step_avg:80.93ms +[2025-07-08 02:18:18] [Rank 0] step:6461/10000 train_time:522874ms step_avg:80.93ms +[2025-07-08 02:18:19] [Rank 0] step:6481/10000 train_time:524374ms step_avg:80.91ms +[2025-07-08 02:18:19] [Rank 0] step:6481/10000 train_time:524374ms step_avg:80.91ms +[2025-07-08 02:18:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:18:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:18:22] [Rank 0] PRINT: step:6500/10000 train_loss:0.9903 val_loss:0.9812 train_time:526109ms step_avg:80.94ms +[2025-07-08 02:18:22] [Rank 0] PRINT: step:6500/10000 train_loss:0.9903 val_loss:0.9812 train_time:526109ms step_avg:80.94ms +[2025-07-08 02:18:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:18:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:18:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:18:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:18:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:18:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:23:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:23:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:23:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:23:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:23:46] [Rank 0] Total Loss: 5.5712 +[2025-07-08 02:23:46] [Rank 0] Total Loss: 5.5712 +[2025-07-08 02:23:46] [Rank 0] Total FTA: 0.6402 +[2025-07-08 02:23:46] [Rank 0] Total FTA: 0.6402 +[2025-07-08 02:23:46] [Rank 0] Group 0 Loss: 6.1003 +[2025-07-08 02:23:46] [Rank 0] Group 0 Loss: 6.1003 +[2025-07-08 02:23:46] [Rank 0] Group 1 Loss: 5.3273 +[2025-07-08 02:23:46] [Rank 0] Group 1 Loss: 5.3273 +[2025-07-08 02:23:46] [Rank 0] Group 2 Loss: 5.2183 +[2025-07-08 02:23:46] [Rank 0] Group 2 Loss: 5.2183 +[2025-07-08 02:23:46] [Rank 0] Group 3 Loss: 5.5897 +[2025-07-08 02:23:46] [Rank 0] Group 3 Loss: 5.5897 +[2025-07-08 02:23:46] [Rank 0] Group 4 Loss: 5.4104 +[2025-07-08 02:23:46] [Rank 0] Group 4 Loss: 5.4104 +[2025-07-08 02:23:46] [Rank 0] Group 5 Loss: 5.5500 +[2025-07-08 02:23:46] [Rank 0] Group 5 Loss: 5.5500 +[2025-07-08 02:23:46] [Rank 0] Group 6 Loss: 5.4661 +[2025-07-08 02:23:46] [Rank 0] Group 6 Loss: 5.4661 +[2025-07-08 02:23:46] [Rank 0] Group 7 Loss: 5.5685 +[2025-07-08 02:23:46] [Rank 0] Group 7 Loss: 5.5685 +[2025-07-08 02:23:46] [Rank 0] Group 8 Loss: 5.5240 +[2025-07-08 02:23:46] [Rank 0] Group 8 Loss: 5.5240 +[2025-07-08 02:23:46] [Rank 0] Group 9 Loss: 5.5242 +[2025-07-08 02:23:46] [Rank 0] Group 9 Loss: 5.5242 +[2025-07-08 02:23:46] [Rank 0] Group 10 Loss: 5.5302 +[2025-07-08 02:23:46] [Rank 0] Group 10 Loss: 5.5302 +[2025-07-08 02:23:46] [Rank 0] Group 11 Loss: 5.5493 +[2025-07-08 02:23:46] [Rank 0] Group 11 Loss: 5.5493 +[2025-07-08 02:23:46] [Rank 0] Group 0 FTA: 0.3589 +[2025-07-08 02:23:46] [Rank 0] Group 0 FTA: 0.3589 +[2025-07-08 02:23:46] [Rank 0] Group 1 FTA: 0.6719 +[2025-07-08 02:23:46] [Rank 0] Group 1 FTA: 0.6719 +[2025-07-08 02:23:46] [Rank 0] Group 2 FTA: 0.7812 +[2025-07-08 02:23:46] [Rank 0] Group 2 FTA: 0.7812 +[2025-07-08 02:23:46] [Rank 0] Group 3 FTA: 0.5990 +[2025-07-08 02:23:46] [Rank 0] Group 3 FTA: 0.5990 +[2025-07-08 02:23:46] [Rank 0] Group 4 FTA: 0.7057 +[2025-07-08 02:23:46] [Rank 0] Group 4 FTA: 0.7057 +[2025-07-08 02:23:46] [Rank 0] Group 5 FTA: 0.7057 +[2025-07-08 02:23:46] [Rank 0] Group 5 FTA: 0.7057 +[2025-07-08 02:23:46] [Rank 0] Group 6 FTA: 0.6719 +[2025-07-08 02:23:46] [Rank 0] Group 6 FTA: 0.6719 +[2025-07-08 02:23:46] [Rank 0] Group 7 FTA: 0.6693 +[2025-07-08 02:23:46] [Rank 0] Group 7 FTA: 0.6693 +[2025-07-08 02:23:46] [Rank 0] Group 8 FTA: 0.6484 +[2025-07-08 02:23:46] [Rank 0] Group 8 FTA: 0.6484 +[2025-07-08 02:23:46] [Rank 0] Group 9 FTA: 0.6797 +[2025-07-08 02:23:46] [Rank 0] Group 9 FTA: 0.6797 +[2025-07-08 02:23:46] [Rank 0] Group 10 FTA: 0.6934 +[2025-07-08 02:23:46] [Rank 0] Group 10 FTA: 0.6934 +[2025-07-08 02:23:46] [Rank 0] Group 11 FTA: 0.6904 +[2025-07-08 02:23:46] [Rank 0] Group 11 FTA: 0.6904 +[2025-07-08 02:23:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 02:23:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 02:23:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 02:23:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 02:23:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 02:23:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 02:23:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 02:23:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 02:23:48] [Rank 0] step:6501/10000 train_time:526130ms step_avg:80.93ms +[2025-07-08 02:23:48] [Rank 0] step:6501/10000 train_time:526130ms step_avg:80.93ms +[2025-07-08 02:23:49] [Rank 0] step:6521/10000 train_time:527646ms step_avg:80.91ms +[2025-07-08 02:23:49] [Rank 0] step:6521/10000 train_time:527646ms step_avg:80.91ms +[2025-07-08 02:23:51] [Rank 0] step:6541/10000 train_time:529142ms step_avg:80.90ms +[2025-07-08 02:23:51] [Rank 0] step:6541/10000 train_time:529142ms step_avg:80.90ms +[2025-07-08 02:23:52] [Rank 0] step:6561/10000 train_time:530635ms step_avg:80.88ms +[2025-07-08 02:23:52] [Rank 0] step:6561/10000 train_time:530635ms step_avg:80.88ms +[2025-07-08 02:23:55] [Rank 0] step:6581/10000 train_time:532787ms step_avg:80.96ms +[2025-07-08 02:23:55] [Rank 0] step:6581/10000 train_time:532787ms step_avg:80.96ms +[2025-07-08 02:23:56] [Rank 0] step:6601/10000 train_time:534282ms step_avg:80.94ms +[2025-07-08 02:23:56] [Rank 0] step:6601/10000 train_time:534282ms step_avg:80.94ms +[2025-07-08 02:23:58] [Rank 0] step:6621/10000 train_time:536021ms step_avg:80.96ms +[2025-07-08 02:23:58] [Rank 0] step:6621/10000 train_time:536021ms step_avg:80.96ms +[2025-07-08 02:23:59] [Rank 0] step:6641/10000 train_time:537518ms step_avg:80.94ms +[2025-07-08 02:23:59] [Rank 0] step:6641/10000 train_time:537518ms step_avg:80.94ms +[2025-07-08 02:24:01] [Rank 0] step:6661/10000 train_time:539678ms step_avg:81.02ms +[2025-07-08 02:24:01] [Rank 0] step:6661/10000 train_time:539678ms step_avg:81.02ms +[2025-07-08 02:24:03] [Rank 0] step:6681/10000 train_time:541155ms step_avg:81.00ms +[2025-07-08 02:24:03] [Rank 0] step:6681/10000 train_time:541155ms step_avg:81.00ms +[2025-07-08 02:24:04] [Rank 0] step:6701/10000 train_time:542652ms step_avg:80.98ms +[2025-07-08 02:24:04] [Rank 0] step:6701/10000 train_time:542652ms step_avg:80.98ms +[2025-07-08 02:24:06] [Rank 0] step:6721/10000 train_time:544150ms step_avg:80.96ms +[2025-07-08 02:24:06] [Rank 0] step:6721/10000 train_time:544150ms step_avg:80.96ms +[2025-07-08 02:24:07] [Rank 0] step:6741/10000 train_time:545649ms step_avg:80.94ms +[2025-07-08 02:24:07] [Rank 0] step:6741/10000 train_time:545649ms step_avg:80.94ms +[2025-07-08 02:24:10] [Rank 0] step:6761/10000 train_time:547798ms step_avg:81.02ms +[2025-07-08 02:24:10] [Rank 0] step:6761/10000 train_time:547798ms step_avg:81.02ms +[2025-07-08 02:24:11] [Rank 0] step:6781/10000 train_time:549298ms step_avg:81.01ms +[2025-07-08 02:24:11] [Rank 0] step:6781/10000 train_time:549298ms step_avg:81.01ms +[2025-07-08 02:24:13] [Rank 0] step:6801/10000 train_time:550798ms step_avg:80.99ms +[2025-07-08 02:24:13] [Rank 0] step:6801/10000 train_time:550798ms step_avg:80.99ms +[2025-07-08 02:24:14] [Rank 0] step:6821/10000 train_time:552298ms step_avg:80.97ms +[2025-07-08 02:24:14] [Rank 0] step:6821/10000 train_time:552298ms step_avg:80.97ms +[2025-07-08 02:24:16] [Rank 0] step:6841/10000 train_time:553850ms step_avg:80.96ms +[2025-07-08 02:24:16] [Rank 0] step:6841/10000 train_time:553850ms step_avg:80.96ms +[2025-07-08 02:24:18] [Rank 0] step:6861/10000 train_time:555940ms step_avg:81.03ms +[2025-07-08 02:24:18] [Rank 0] step:6861/10000 train_time:555940ms step_avg:81.03ms +[2025-07-08 02:24:19] [Rank 0] step:6881/10000 train_time:557440ms step_avg:81.01ms +[2025-07-08 02:24:19] [Rank 0] step:6881/10000 train_time:557440ms step_avg:81.01ms +[2025-07-08 02:24:21] [Rank 0] step:6901/10000 train_time:558940ms step_avg:80.99ms +[2025-07-08 02:24:21] [Rank 0] step:6901/10000 train_time:558940ms step_avg:80.99ms +[2025-07-08 02:24:22] [Rank 0] step:6921/10000 train_time:560441ms step_avg:80.98ms +[2025-07-08 02:24:22] [Rank 0] step:6921/10000 train_time:560441ms step_avg:80.98ms +[2025-07-08 02:24:24] [Rank 0] step:6941/10000 train_time:562583ms step_avg:81.05ms +[2025-07-08 02:24:24] [Rank 0] step:6941/10000 train_time:562583ms step_avg:81.05ms +[2025-07-08 02:24:26] [Rank 0] step:6961/10000 train_time:564082ms step_avg:81.03ms +[2025-07-08 02:24:26] [Rank 0] step:6961/10000 train_time:564082ms step_avg:81.03ms +[2025-07-08 02:24:27] [Rank 0] step:6981/10000 train_time:565583ms step_avg:81.02ms +[2025-07-08 02:24:27] [Rank 0] step:6981/10000 train_time:565583ms step_avg:81.02ms +[2025-07-08 02:24:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:24:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:24:30] [Rank 0] PRINT: step:7000/10000 train_loss:0.9757 val_loss:0.9680 train_time:567083ms step_avg:81.01ms +[2025-07-08 02:24:30] [Rank 0] PRINT: step:7000/10000 train_loss:0.9757 val_loss:0.9680 train_time:567083ms step_avg:81.01ms +[2025-07-08 02:24:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:24:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:24:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:24:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:24:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:24:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:29:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:29:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:29:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:29:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:29:57] [Rank 0] Total Loss: 5.5924 +[2025-07-08 02:29:57] [Rank 0] Total Loss: 5.5924 +[2025-07-08 02:29:57] [Rank 0] Total FTA: 0.6359 +[2025-07-08 02:29:57] [Rank 0] Total FTA: 0.6359 +[2025-07-08 02:29:57] [Rank 0] Group 0 Loss: 5.8208 +[2025-07-08 02:29:57] [Rank 0] Group 0 Loss: 5.8208 +[2025-07-08 02:29:57] [Rank 0] Group 1 Loss: 5.4807 +[2025-07-08 02:29:57] [Rank 0] Group 1 Loss: 5.4807 +[2025-07-08 02:29:57] [Rank 0] Group 2 Loss: 5.3718 +[2025-07-08 02:29:57] [Rank 0] Group 2 Loss: 5.3718 +[2025-07-08 02:29:57] [Rank 0] Group 3 Loss: 5.6213 +[2025-07-08 02:29:57] [Rank 0] Group 3 Loss: 5.6213 +[2025-07-08 02:29:57] [Rank 0] Group 4 Loss: 5.5062 +[2025-07-08 02:29:57] [Rank 0] Group 4 Loss: 5.5062 +[2025-07-08 02:29:57] [Rank 0] Group 5 Loss: 5.5183 +[2025-07-08 02:29:57] [Rank 0] Group 5 Loss: 5.5183 +[2025-07-08 02:29:57] [Rank 0] Group 6 Loss: 5.4942 +[2025-07-08 02:29:57] [Rank 0] Group 6 Loss: 5.4942 +[2025-07-08 02:29:57] [Rank 0] Group 7 Loss: 5.6841 +[2025-07-08 02:29:57] [Rank 0] Group 7 Loss: 5.6841 +[2025-07-08 02:29:57] [Rank 0] Group 8 Loss: 5.6311 +[2025-07-08 02:29:57] [Rank 0] Group 8 Loss: 5.6311 +[2025-07-08 02:29:57] [Rank 0] Group 9 Loss: 5.5785 +[2025-07-08 02:29:57] [Rank 0] Group 9 Loss: 5.5785 +[2025-07-08 02:29:57] [Rank 0] Group 10 Loss: 5.5873 +[2025-07-08 02:29:57] [Rank 0] Group 10 Loss: 5.5873 +[2025-07-08 02:29:57] [Rank 0] Group 11 Loss: 5.5887 +[2025-07-08 02:29:57] [Rank 0] Group 11 Loss: 5.5887 +[2025-07-08 02:29:57] [Rank 0] Group 0 FTA: 0.3095 +[2025-07-08 02:29:57] [Rank 0] Group 0 FTA: 0.3095 +[2025-07-08 02:29:57] [Rank 0] Group 1 FTA: 0.8151 +[2025-07-08 02:29:57] [Rank 0] Group 1 FTA: 0.8151 +[2025-07-08 02:29:57] [Rank 0] Group 2 FTA: 0.7266 +[2025-07-08 02:29:57] [Rank 0] Group 2 FTA: 0.7266 +[2025-07-08 02:29:57] [Rank 0] Group 3 FTA: 0.5130 +[2025-07-08 02:29:57] [Rank 0] Group 3 FTA: 0.5130 +[2025-07-08 02:29:57] [Rank 0] Group 4 FTA: 0.6380 +[2025-07-08 02:29:57] [Rank 0] Group 4 FTA: 0.6380 +[2025-07-08 02:29:57] [Rank 0] Group 5 FTA: 0.7604 +[2025-07-08 02:29:57] [Rank 0] Group 5 FTA: 0.7604 +[2025-07-08 02:29:57] [Rank 0] Group 6 FTA: 0.5938 +[2025-07-08 02:29:57] [Rank 0] Group 6 FTA: 0.5938 +[2025-07-08 02:29:57] [Rank 0] Group 7 FTA: 0.6927 +[2025-07-08 02:29:57] [Rank 0] Group 7 FTA: 0.6927 +[2025-07-08 02:29:57] [Rank 0] Group 8 FTA: 0.6719 +[2025-07-08 02:29:57] [Rank 0] Group 8 FTA: 0.6719 +[2025-07-08 02:29:57] [Rank 0] Group 9 FTA: 0.7734 +[2025-07-08 02:29:57] [Rank 0] Group 9 FTA: 0.7734 +[2025-07-08 02:29:57] [Rank 0] Group 10 FTA: 0.6855 +[2025-07-08 02:29:57] [Rank 0] Group 10 FTA: 0.6855 +[2025-07-08 02:29:57] [Rank 0] Group 11 FTA: 0.7002 +[2025-07-08 02:29:57] [Rank 0] Group 11 FTA: 0.7002 +[2025-07-08 02:29:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 02:29:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 02:29:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 02:29:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 02:29:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 02:29:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 02:29:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 02:29:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 02:29:58] [Rank 0] step:7001/10000 train_time:567103ms step_avg:81.00ms +[2025-07-08 02:29:58] [Rank 0] step:7001/10000 train_time:567103ms step_avg:81.00ms +[2025-07-08 02:30:01] [Rank 0] step:7021/10000 train_time:568615ms step_avg:80.99ms +[2025-07-08 02:30:01] [Rank 0] step:7021/10000 train_time:568615ms step_avg:80.99ms +[2025-07-08 02:30:02] [Rank 0] step:7041/10000 train_time:570777ms step_avg:81.06ms +[2025-07-08 02:30:02] [Rank 0] step:7041/10000 train_time:570777ms step_avg:81.06ms +[2025-07-08 02:30:04] [Rank 0] step:7061/10000 train_time:572272ms step_avg:81.05ms +[2025-07-08 02:30:04] [Rank 0] step:7061/10000 train_time:572272ms step_avg:81.05ms +[2025-07-08 02:30:05] [Rank 0] step:7081/10000 train_time:573765ms step_avg:81.03ms +[2025-07-08 02:30:05] [Rank 0] step:7081/10000 train_time:573765ms step_avg:81.03ms +[2025-07-08 02:30:07] [Rank 0] step:7101/10000 train_time:575262ms step_avg:81.01ms +[2025-07-08 02:30:07] [Rank 0] step:7101/10000 train_time:575262ms step_avg:81.01ms +[2025-07-08 02:30:09] [Rank 0] step:7121/10000 train_time:577421ms step_avg:81.09ms +[2025-07-08 02:30:09] [Rank 0] step:7121/10000 train_time:577421ms step_avg:81.09ms +[2025-07-08 02:30:10] [Rank 0] step:7141/10000 train_time:578913ms step_avg:81.07ms +[2025-07-08 02:30:10] [Rank 0] step:7141/10000 train_time:578913ms step_avg:81.07ms +[2025-07-08 02:30:12] [Rank 0] step:7161/10000 train_time:580409ms step_avg:81.05ms +[2025-07-08 02:30:12] [Rank 0] step:7161/10000 train_time:580409ms step_avg:81.05ms +[2025-07-08 02:30:13] [Rank 0] step:7181/10000 train_time:581905ms step_avg:81.03ms +[2025-07-08 02:30:13] [Rank 0] step:7181/10000 train_time:581905ms step_avg:81.03ms +[2025-07-08 02:30:15] [Rank 0] step:7201/10000 train_time:583661ms step_avg:81.05ms +[2025-07-08 02:30:15] [Rank 0] step:7201/10000 train_time:583661ms step_avg:81.05ms +[2025-07-08 02:30:17] [Rank 0] step:7221/10000 train_time:585781ms step_avg:81.12ms +[2025-07-08 02:30:17] [Rank 0] step:7221/10000 train_time:585781ms step_avg:81.12ms +[2025-07-08 02:30:19] [Rank 0] step:7241/10000 train_time:587279ms step_avg:81.10ms +[2025-07-08 02:30:19] [Rank 0] step:7241/10000 train_time:587279ms step_avg:81.10ms +[2025-07-08 02:30:20] [Rank 0] step:7261/10000 train_time:588819ms step_avg:81.09ms +[2025-07-08 02:30:20] [Rank 0] step:7261/10000 train_time:588819ms step_avg:81.09ms +[2025-07-08 02:30:22] [Rank 0] step:7281/10000 train_time:590320ms step_avg:81.08ms +[2025-07-08 02:30:22] [Rank 0] step:7281/10000 train_time:590320ms step_avg:81.08ms +[2025-07-08 02:30:24] [Rank 0] step:7301/10000 train_time:592462ms step_avg:81.15ms +[2025-07-08 02:30:24] [Rank 0] step:7301/10000 train_time:592462ms step_avg:81.15ms +[2025-07-08 02:30:25] [Rank 0] step:7321/10000 train_time:593961ms step_avg:81.13ms +[2025-07-08 02:30:25] [Rank 0] step:7321/10000 train_time:593961ms step_avg:81.13ms +[2025-07-08 02:30:27] [Rank 0] step:7341/10000 train_time:595460ms step_avg:81.11ms +[2025-07-08 02:30:27] [Rank 0] step:7341/10000 train_time:595460ms step_avg:81.11ms +[2025-07-08 02:30:28] [Rank 0] step:7361/10000 train_time:596960ms step_avg:81.10ms +[2025-07-08 02:30:28] [Rank 0] step:7361/10000 train_time:596960ms step_avg:81.10ms +[2025-07-08 02:30:30] [Rank 0] step:7381/10000 train_time:599125ms step_avg:81.17ms +[2025-07-08 02:30:30] [Rank 0] step:7381/10000 train_time:599125ms step_avg:81.17ms +[2025-07-08 02:30:32] [Rank 0] step:7401/10000 train_time:600606ms step_avg:81.15ms +[2025-07-08 02:30:32] [Rank 0] step:7401/10000 train_time:600606ms step_avg:81.15ms +[2025-07-08 02:30:33] [Rank 0] step:7421/10000 train_time:602104ms step_avg:81.14ms +[2025-07-08 02:30:33] [Rank 0] step:7421/10000 train_time:602104ms step_avg:81.14ms +[2025-07-08 02:30:35] [Rank 0] step:7441/10000 train_time:603603ms step_avg:81.12ms +[2025-07-08 02:30:35] [Rank 0] step:7441/10000 train_time:603603ms step_avg:81.12ms +[2025-07-08 02:30:36] [Rank 0] step:7461/10000 train_time:605102ms step_avg:81.10ms +[2025-07-08 02:30:36] [Rank 0] step:7461/10000 train_time:605102ms step_avg:81.10ms +[2025-07-08 02:30:38] [Rank 0] step:7481/10000 train_time:606837ms step_avg:81.12ms +[2025-07-08 02:30:38] [Rank 0] step:7481/10000 train_time:606837ms step_avg:81.12ms +[2025-07-08 02:30:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:30:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:30:40] [Rank 0] PRINT: step:7500/10000 train_loss:0.9629 val_loss:0.9566 train_time:608337ms step_avg:81.11ms +[2025-07-08 02:30:40] [Rank 0] PRINT: step:7500/10000 train_loss:0.9629 val_loss:0.9566 train_time:608337ms step_avg:81.11ms +[2025-07-08 02:30:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:30:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:30:41] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:30:41] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:30:41] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:30:41] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:36:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:36:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:36:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:36:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:36:04] [Rank 0] Total Loss: 5.5321 +[2025-07-08 02:36:04] [Rank 0] Total Loss: 5.5321 +[2025-07-08 02:36:04] [Rank 0] Total FTA: 0.7163 +[2025-07-08 02:36:04] [Rank 0] Total FTA: 0.7163 +[2025-07-08 02:36:04] [Rank 0] Group 0 Loss: 5.7743 +[2025-07-08 02:36:04] [Rank 0] Group 0 Loss: 5.7743 +[2025-07-08 02:36:04] [Rank 0] Group 1 Loss: 5.3631 +[2025-07-08 02:36:04] [Rank 0] Group 1 Loss: 5.3631 +[2025-07-08 02:36:04] [Rank 0] Group 2 Loss: 5.2757 +[2025-07-08 02:36:04] [Rank 0] Group 2 Loss: 5.2757 +[2025-07-08 02:36:04] [Rank 0] Group 3 Loss: 5.5635 +[2025-07-08 02:36:04] [Rank 0] Group 3 Loss: 5.5635 +[2025-07-08 02:36:04] [Rank 0] Group 4 Loss: 5.4681 +[2025-07-08 02:36:04] [Rank 0] Group 4 Loss: 5.4681 +[2025-07-08 02:36:04] [Rank 0] Group 5 Loss: 5.4848 +[2025-07-08 02:36:04] [Rank 0] Group 5 Loss: 5.4848 +[2025-07-08 02:36:04] [Rank 0] Group 6 Loss: 5.4425 +[2025-07-08 02:36:04] [Rank 0] Group 6 Loss: 5.4425 +[2025-07-08 02:36:04] [Rank 0] Group 7 Loss: 5.5759 +[2025-07-08 02:36:04] [Rank 0] Group 7 Loss: 5.5759 +[2025-07-08 02:36:04] [Rank 0] Group 8 Loss: 5.5809 +[2025-07-08 02:36:04] [Rank 0] Group 8 Loss: 5.5809 +[2025-07-08 02:36:04] [Rank 0] Group 9 Loss: 5.4632 +[2025-07-08 02:36:04] [Rank 0] Group 9 Loss: 5.4632 +[2025-07-08 02:36:04] [Rank 0] Group 10 Loss: 5.5716 +[2025-07-08 02:36:04] [Rank 0] Group 10 Loss: 5.5716 +[2025-07-08 02:36:04] [Rank 0] Group 11 Loss: 5.5360 +[2025-07-08 02:36:04] [Rank 0] Group 11 Loss: 5.5360 +[2025-07-08 02:36:04] [Rank 0] Group 0 FTA: 0.6801 +[2025-07-08 02:36:04] [Rank 0] Group 0 FTA: 0.6801 +[2025-07-08 02:36:05] [Rank 0] Group 1 FTA: 0.6354 +[2025-07-08 02:36:05] [Rank 0] Group 1 FTA: 0.6354 +[2025-07-08 02:36:05] [Rank 0] Group 2 FTA: 0.6615 +[2025-07-08 02:36:05] [Rank 0] Group 2 FTA: 0.6615 +[2025-07-08 02:36:05] [Rank 0] Group 3 FTA: 0.8646 +[2025-07-08 02:36:05] [Rank 0] Group 3 FTA: 0.8646 +[2025-07-08 02:36:05] [Rank 0] Group 4 FTA: 0.7266 +[2025-07-08 02:36:05] [Rank 0] Group 4 FTA: 0.7266 +[2025-07-08 02:36:05] [Rank 0] Group 5 FTA: 0.8073 +[2025-07-08 02:36:05] [Rank 0] Group 5 FTA: 0.8073 +[2025-07-08 02:36:05] [Rank 0] Group 6 FTA: 0.6328 +[2025-07-08 02:36:05] [Rank 0] Group 6 FTA: 0.6328 +[2025-07-08 02:36:05] [Rank 0] Group 7 FTA: 0.6953 +[2025-07-08 02:36:05] [Rank 0] Group 7 FTA: 0.6953 +[2025-07-08 02:36:05] [Rank 0] Group 8 FTA: 0.7214 +[2025-07-08 02:36:05] [Rank 0] Group 8 FTA: 0.7214 +[2025-07-08 02:36:05] [Rank 0] Group 9 FTA: 0.7461 +[2025-07-08 02:36:05] [Rank 0] Group 9 FTA: 0.7461 +[2025-07-08 02:36:05] [Rank 0] Group 10 FTA: 0.7285 +[2025-07-08 02:36:05] [Rank 0] Group 10 FTA: 0.7285 +[2025-07-08 02:36:05] [Rank 0] Group 11 FTA: 0.7246 +[2025-07-08 02:36:05] [Rank 0] Group 11 FTA: 0.7246 +[2025-07-08 02:36:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 02:36:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 02:36:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 02:36:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 02:36:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 02:36:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 02:36:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 02:36:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 02:36:06] [Rank 0] step:7501/10000 train_time:608358ms step_avg:81.10ms +[2025-07-08 02:36:06] [Rank 0] step:7501/10000 train_time:608358ms step_avg:81.10ms +[2025-07-08 02:36:08] [Rank 0] step:7521/10000 train_time:609866ms step_avg:81.09ms +[2025-07-08 02:36:08] [Rank 0] step:7521/10000 train_time:609866ms step_avg:81.09ms +[2025-07-08 02:36:09] [Rank 0] step:7541/10000 train_time:611357ms step_avg:81.07ms +[2025-07-08 02:36:09] [Rank 0] step:7541/10000 train_time:611357ms step_avg:81.07ms +[2025-07-08 02:36:11] [Rank 0] step:7561/10000 train_time:612851ms step_avg:81.05ms +[2025-07-08 02:36:11] [Rank 0] step:7561/10000 train_time:612851ms step_avg:81.05ms +[2025-07-08 02:36:12] [Rank 0] step:7581/10000 train_time:614582ms step_avg:81.07ms +[2025-07-08 02:36:12] [Rank 0] step:7581/10000 train_time:614582ms step_avg:81.07ms +[2025-07-08 02:36:14] [Rank 0] step:7601/10000 train_time:616079ms step_avg:81.05ms +[2025-07-08 02:36:14] [Rank 0] step:7601/10000 train_time:616079ms step_avg:81.05ms +[2025-07-08 02:36:15] [Rank 0] step:7621/10000 train_time:617575ms step_avg:81.04ms +[2025-07-08 02:36:15] [Rank 0] step:7621/10000 train_time:617575ms step_avg:81.04ms +[2025-07-08 02:36:17] [Rank 0] step:7641/10000 train_time:619073ms step_avg:81.02ms +[2025-07-08 02:36:17] [Rank 0] step:7641/10000 train_time:619073ms step_avg:81.02ms +[2025-07-08 02:36:18] [Rank 0] step:7661/10000 train_time:620806ms step_avg:81.03ms +[2025-07-08 02:36:18] [Rank 0] step:7661/10000 train_time:620806ms step_avg:81.03ms +[2025-07-08 02:36:20] [Rank 0] step:7681/10000 train_time:622303ms step_avg:81.02ms +[2025-07-08 02:36:20] [Rank 0] step:7681/10000 train_time:622303ms step_avg:81.02ms +[2025-07-08 02:36:21] [Rank 0] step:7701/10000 train_time:623801ms step_avg:81.00ms +[2025-07-08 02:36:21] [Rank 0] step:7701/10000 train_time:623801ms step_avg:81.00ms +[2025-07-08 02:36:23] [Rank 0] step:7721/10000 train_time:625300ms step_avg:80.99ms +[2025-07-08 02:36:23] [Rank 0] step:7721/10000 train_time:625300ms step_avg:80.99ms +[2025-07-08 02:36:25] [Rank 0] step:7741/10000 train_time:626853ms step_avg:80.98ms +[2025-07-08 02:36:25] [Rank 0] step:7741/10000 train_time:626853ms step_avg:80.98ms +[2025-07-08 02:36:27] [Rank 0] step:7761/10000 train_time:628951ms step_avg:81.04ms +[2025-07-08 02:36:27] [Rank 0] step:7761/10000 train_time:628951ms step_avg:81.04ms +[2025-07-08 02:36:28] [Rank 0] step:7781/10000 train_time:630451ms step_avg:81.02ms +[2025-07-08 02:36:28] [Rank 0] step:7781/10000 train_time:630451ms step_avg:81.02ms +[2025-07-08 02:36:30] [Rank 0] step:7801/10000 train_time:631950ms step_avg:81.01ms +[2025-07-08 02:36:30] [Rank 0] step:7801/10000 train_time:631950ms step_avg:81.01ms +[2025-07-08 02:36:31] [Rank 0] step:7821/10000 train_time:633449ms step_avg:80.99ms +[2025-07-08 02:36:31] [Rank 0] step:7821/10000 train_time:633449ms step_avg:80.99ms +[2025-07-08 02:36:33] [Rank 0] step:7841/10000 train_time:635592ms step_avg:81.06ms +[2025-07-08 02:36:33] [Rank 0] step:7841/10000 train_time:635592ms step_avg:81.06ms +[2025-07-08 02:36:35] [Rank 0] step:7861/10000 train_time:637203ms step_avg:81.06ms +[2025-07-08 02:36:35] [Rank 0] step:7861/10000 train_time:637203ms step_avg:81.06ms +[2025-07-08 02:36:36] [Rank 0] step:7881/10000 train_time:638858ms step_avg:81.06ms +[2025-07-08 02:36:36] [Rank 0] step:7881/10000 train_time:638858ms step_avg:81.06ms +[2025-07-08 02:36:38] [Rank 0] step:7901/10000 train_time:640359ms step_avg:81.05ms +[2025-07-08 02:36:38] [Rank 0] step:7901/10000 train_time:640359ms step_avg:81.05ms +[2025-07-08 02:36:40] [Rank 0] step:7921/10000 train_time:641860ms step_avg:81.03ms +[2025-07-08 02:36:40] [Rank 0] step:7921/10000 train_time:641860ms step_avg:81.03ms +[2025-07-08 02:36:41] [Rank 0] step:7941/10000 train_time:643594ms step_avg:81.05ms +[2025-07-08 02:36:41] [Rank 0] step:7941/10000 train_time:643594ms step_avg:81.05ms +[2025-07-08 02:36:43] [Rank 0] step:7961/10000 train_time:645093ms step_avg:81.03ms +[2025-07-08 02:36:43] [Rank 0] step:7961/10000 train_time:645093ms step_avg:81.03ms +[2025-07-08 02:36:44] [Rank 0] step:7981/10000 train_time:646595ms step_avg:81.02ms +[2025-07-08 02:36:44] [Rank 0] step:7981/10000 train_time:646595ms step_avg:81.02ms +[2025-07-08 02:36:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:36:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:36:47] [Rank 0] PRINT: step:8000/10000 train_loss:0.9516 val_loss:0.9466 train_time:648096ms step_avg:81.01ms +[2025-07-08 02:36:47] [Rank 0] PRINT: step:8000/10000 train_loss:0.9516 val_loss:0.9466 train_time:648096ms step_avg:81.01ms +[2025-07-08 02:36:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:36:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:36:47] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:36:47] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:36:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:36:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:42:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:42:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:42:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:42:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:42:11] [Rank 0] Total Loss: 5.6824 +[2025-07-08 02:42:11] [Rank 0] Total Loss: 5.6824 +[2025-07-08 02:42:11] [Rank 0] Total FTA: 0.7286 +[2025-07-08 02:42:11] [Rank 0] Total FTA: 0.7286 +[2025-07-08 02:42:11] [Rank 0] Group 0 Loss: 6.3042 +[2025-07-08 02:42:11] [Rank 0] Group 0 Loss: 6.3042 +[2025-07-08 02:42:11] [Rank 0] Group 1 Loss: 5.7252 +[2025-07-08 02:42:11] [Rank 0] Group 1 Loss: 5.7252 +[2025-07-08 02:42:11] [Rank 0] Group 2 Loss: 5.4445 +[2025-07-08 02:42:11] [Rank 0] Group 2 Loss: 5.4445 +[2025-07-08 02:42:11] [Rank 0] Group 3 Loss: 5.6733 +[2025-07-08 02:42:11] [Rank 0] Group 3 Loss: 5.6733 +[2025-07-08 02:42:11] [Rank 0] Group 4 Loss: 5.5112 +[2025-07-08 02:42:11] [Rank 0] Group 4 Loss: 5.5112 +[2025-07-08 02:42:11] [Rank 0] Group 5 Loss: 5.5450 +[2025-07-08 02:42:11] [Rank 0] Group 5 Loss: 5.5450 +[2025-07-08 02:42:11] [Rank 0] Group 6 Loss: 5.5669 +[2025-07-08 02:42:11] [Rank 0] Group 6 Loss: 5.5669 +[2025-07-08 02:42:11] [Rank 0] Group 7 Loss: 5.6317 +[2025-07-08 02:42:11] [Rank 0] Group 7 Loss: 5.6317 +[2025-07-08 02:42:11] [Rank 0] Group 8 Loss: 5.5912 +[2025-07-08 02:42:11] [Rank 0] Group 8 Loss: 5.5912 +[2025-07-08 02:42:11] [Rank 0] Group 9 Loss: 5.5588 +[2025-07-08 02:42:11] [Rank 0] Group 9 Loss: 5.5588 +[2025-07-08 02:42:11] [Rank 0] Group 10 Loss: 5.5797 +[2025-07-08 02:42:11] [Rank 0] Group 10 Loss: 5.5797 +[2025-07-08 02:42:11] [Rank 0] Group 11 Loss: 5.5863 +[2025-07-08 02:42:11] [Rank 0] Group 11 Loss: 5.5863 +[2025-07-08 02:42:11] [Rank 0] Group 0 FTA: 0.5267 +[2025-07-08 02:42:11] [Rank 0] Group 0 FTA: 0.5267 +[2025-07-08 02:42:11] [Rank 0] Group 1 FTA: 0.8151 +[2025-07-08 02:42:11] [Rank 0] Group 1 FTA: 0.8151 +[2025-07-08 02:42:11] [Rank 0] Group 2 FTA: 0.7891 +[2025-07-08 02:42:11] [Rank 0] Group 2 FTA: 0.7891 +[2025-07-08 02:42:11] [Rank 0] Group 3 FTA: 0.7083 +[2025-07-08 02:42:11] [Rank 0] Group 3 FTA: 0.7083 +[2025-07-08 02:42:11] [Rank 0] Group 4 FTA: 0.7760 +[2025-07-08 02:42:11] [Rank 0] Group 4 FTA: 0.7760 +[2025-07-08 02:42:11] [Rank 0] Group 5 FTA: 0.7917 +[2025-07-08 02:42:11] [Rank 0] Group 5 FTA: 0.7917 +[2025-07-08 02:42:11] [Rank 0] Group 6 FTA: 0.7578 +[2025-07-08 02:42:11] [Rank 0] Group 6 FTA: 0.7578 +[2025-07-08 02:42:11] [Rank 0] Group 7 FTA: 0.7318 +[2025-07-08 02:42:11] [Rank 0] Group 7 FTA: 0.7318 +[2025-07-08 02:42:11] [Rank 0] Group 8 FTA: 0.7656 +[2025-07-08 02:42:11] [Rank 0] Group 8 FTA: 0.7656 +[2025-07-08 02:42:11] [Rank 0] Group 9 FTA: 0.7891 +[2025-07-08 02:42:11] [Rank 0] Group 9 FTA: 0.7891 +[2025-07-08 02:42:11] [Rank 0] Group 10 FTA: 0.7266 +[2025-07-08 02:42:11] [Rank 0] Group 10 FTA: 0.7266 +[2025-07-08 02:42:11] [Rank 0] Group 11 FTA: 0.7510 +[2025-07-08 02:42:11] [Rank 0] Group 11 FTA: 0.7510 +[2025-07-08 02:42:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 02:42:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 02:42:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 02:42:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 02:42:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 02:42:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 02:42:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 02:42:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 02:42:14] [Rank 0] step:8001/10000 train_time:648117ms step_avg:81.00ms +[2025-07-08 02:42:14] [Rank 0] step:8001/10000 train_time:648117ms step_avg:81.00ms +[2025-07-08 02:42:16] [Rank 0] step:8021/10000 train_time:650276ms step_avg:81.07ms +[2025-07-08 02:42:16] [Rank 0] step:8021/10000 train_time:650276ms step_avg:81.07ms +[2025-07-08 02:42:18] [Rank 0] step:8041/10000 train_time:651769ms step_avg:81.06ms +[2025-07-08 02:42:18] [Rank 0] step:8041/10000 train_time:651769ms step_avg:81.06ms +[2025-07-08 02:42:19] [Rank 0] step:8061/10000 train_time:653265ms step_avg:81.04ms +[2025-07-08 02:42:19] [Rank 0] step:8061/10000 train_time:653265ms step_avg:81.04ms +[2025-07-08 02:42:21] [Rank 0] step:8081/10000 train_time:654759ms step_avg:81.02ms +[2025-07-08 02:42:21] [Rank 0] step:8081/10000 train_time:654759ms step_avg:81.02ms +[2025-07-08 02:42:23] [Rank 0] step:8101/10000 train_time:656941ms step_avg:81.09ms +[2025-07-08 02:42:23] [Rank 0] step:8101/10000 train_time:656941ms step_avg:81.09ms +[2025-07-08 02:42:24] [Rank 0] step:8121/10000 train_time:658418ms step_avg:81.08ms +[2025-07-08 02:42:24] [Rank 0] step:8121/10000 train_time:658418ms step_avg:81.08ms +[2025-07-08 02:42:26] [Rank 0] step:8141/10000 train_time:659915ms step_avg:81.06ms +[2025-07-08 02:42:26] [Rank 0] step:8141/10000 train_time:659915ms step_avg:81.06ms +[2025-07-08 02:42:27] [Rank 0] step:8161/10000 train_time:661413ms step_avg:81.05ms +[2025-07-08 02:42:27] [Rank 0] step:8161/10000 train_time:661413ms step_avg:81.05ms +[2025-07-08 02:42:29] [Rank 0] step:8181/10000 train_time:662912ms step_avg:81.03ms +[2025-07-08 02:42:29] [Rank 0] step:8181/10000 train_time:662912ms step_avg:81.03ms +[2025-07-08 02:42:31] [Rank 0] step:8201/10000 train_time:665061ms step_avg:81.10ms +[2025-07-08 02:42:31] [Rank 0] step:8201/10000 train_time:665061ms step_avg:81.10ms +[2025-07-08 02:42:33] [Rank 0] step:8221/10000 train_time:666558ms step_avg:81.08ms +[2025-07-08 02:42:33] [Rank 0] step:8221/10000 train_time:666558ms step_avg:81.08ms +[2025-07-08 02:42:34] [Rank 0] step:8241/10000 train_time:668057ms step_avg:81.07ms +[2025-07-08 02:42:34] [Rank 0] step:8241/10000 train_time:668057ms step_avg:81.07ms +[2025-07-08 02:42:36] [Rank 0] step:8261/10000 train_time:669557ms step_avg:81.05ms +[2025-07-08 02:42:36] [Rank 0] step:8261/10000 train_time:669557ms step_avg:81.05ms +[2025-07-08 02:42:38] [Rank 0] step:8281/10000 train_time:671312ms step_avg:81.07ms +[2025-07-08 02:42:38] [Rank 0] step:8281/10000 train_time:671312ms step_avg:81.07ms +[2025-07-08 02:42:39] [Rank 0] step:8301/10000 train_time:673204ms step_avg:81.10ms +[2025-07-08 02:42:39] [Rank 0] step:8301/10000 train_time:673204ms step_avg:81.10ms +[2025-07-08 02:42:41] [Rank 0] step:8321/10000 train_time:674703ms step_avg:81.08ms +[2025-07-08 02:42:41] [Rank 0] step:8321/10000 train_time:674703ms step_avg:81.08ms +[2025-07-08 02:42:42] [Rank 0] step:8341/10000 train_time:676202ms step_avg:81.07ms +[2025-07-08 02:42:42] [Rank 0] step:8341/10000 train_time:676202ms step_avg:81.07ms +[2025-07-08 02:42:44] [Rank 0] step:8361/10000 train_time:677704ms step_avg:81.06ms +[2025-07-08 02:42:44] [Rank 0] step:8361/10000 train_time:677704ms step_avg:81.06ms +[2025-07-08 02:42:46] [Rank 0] step:8381/10000 train_time:679845ms step_avg:81.12ms +[2025-07-08 02:42:46] [Rank 0] step:8381/10000 train_time:679845ms step_avg:81.12ms +[2025-07-08 02:42:47] [Rank 0] step:8401/10000 train_time:681345ms step_avg:81.10ms +[2025-07-08 02:42:47] [Rank 0] step:8401/10000 train_time:681345ms step_avg:81.10ms +[2025-07-08 02:42:49] [Rank 0] step:8421/10000 train_time:682848ms step_avg:81.09ms +[2025-07-08 02:42:49] [Rank 0] step:8421/10000 train_time:682848ms step_avg:81.09ms +[2025-07-08 02:42:50] [Rank 0] step:8441/10000 train_time:684349ms step_avg:81.07ms +[2025-07-08 02:42:50] [Rank 0] step:8441/10000 train_time:684349ms step_avg:81.07ms +[2025-07-08 02:42:52] [Rank 0] step:8461/10000 train_time:685850ms step_avg:81.06ms +[2025-07-08 02:42:52] [Rank 0] step:8461/10000 train_time:685850ms step_avg:81.06ms +[2025-07-08 02:42:54] [Rank 0] step:8481/10000 train_time:687588ms step_avg:81.07ms +[2025-07-08 02:42:54] [Rank 0] step:8481/10000 train_time:687588ms step_avg:81.07ms +[2025-07-08 02:42:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:42:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:42:56] [Rank 0] PRINT: step:8500/10000 train_loss:0.9412 val_loss:0.9367 train_time:689332ms step_avg:81.10ms +[2025-07-08 02:42:56] [Rank 0] PRINT: step:8500/10000 train_loss:0.9412 val_loss:0.9367 train_time:689332ms step_avg:81.10ms +[2025-07-08 02:42:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:42:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:42:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:42:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:42:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:42:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:48:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:48:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:48:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:48:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:48:19] [Rank 0] Total Loss: 5.8031 +[2025-07-08 02:48:19] [Rank 0] Total Loss: 5.8031 +[2025-07-08 02:48:19] [Rank 0] Total FTA: 0.7179 +[2025-07-08 02:48:19] [Rank 0] Total FTA: 0.7179 +[2025-07-08 02:48:19] [Rank 0] Group 0 Loss: 6.4050 +[2025-07-08 02:48:19] [Rank 0] Group 0 Loss: 6.4050 +[2025-07-08 02:48:19] [Rank 0] Group 1 Loss: 5.8630 +[2025-07-08 02:48:19] [Rank 0] Group 1 Loss: 5.8630 +[2025-07-08 02:48:19] [Rank 0] Group 2 Loss: 5.4778 +[2025-07-08 02:48:19] [Rank 0] Group 2 Loss: 5.4778 +[2025-07-08 02:48:19] [Rank 0] Group 3 Loss: 5.8048 +[2025-07-08 02:48:19] [Rank 0] Group 3 Loss: 5.8048 +[2025-07-08 02:48:19] [Rank 0] Group 4 Loss: 5.6446 +[2025-07-08 02:48:19] [Rank 0] Group 4 Loss: 5.6446 +[2025-07-08 02:48:19] [Rank 0] Group 5 Loss: 5.6704 +[2025-07-08 02:48:19] [Rank 0] Group 5 Loss: 5.6704 +[2025-07-08 02:48:19] [Rank 0] Group 6 Loss: 5.6683 +[2025-07-08 02:48:19] [Rank 0] Group 6 Loss: 5.6683 +[2025-07-08 02:48:19] [Rank 0] Group 7 Loss: 5.7205 +[2025-07-08 02:48:19] [Rank 0] Group 7 Loss: 5.7205 +[2025-07-08 02:48:19] [Rank 0] Group 8 Loss: 5.6624 +[2025-07-08 02:48:19] [Rank 0] Group 8 Loss: 5.6624 +[2025-07-08 02:48:19] [Rank 0] Group 9 Loss: 5.6737 +[2025-07-08 02:48:19] [Rank 0] Group 9 Loss: 5.6737 +[2025-07-08 02:48:19] [Rank 0] Group 10 Loss: 5.7109 +[2025-07-08 02:48:19] [Rank 0] Group 10 Loss: 5.7109 +[2025-07-08 02:48:19] [Rank 0] Group 11 Loss: 5.7720 +[2025-07-08 02:48:19] [Rank 0] Group 11 Loss: 5.7720 +[2025-07-08 02:48:19] [Rank 0] Group 0 FTA: 0.4707 +[2025-07-08 02:48:19] [Rank 0] Group 0 FTA: 0.4707 +[2025-07-08 02:48:19] [Rank 0] Group 1 FTA: 0.6562 +[2025-07-08 02:48:19] [Rank 0] Group 1 FTA: 0.6562 +[2025-07-08 02:48:19] [Rank 0] Group 2 FTA: 0.7474 +[2025-07-08 02:48:19] [Rank 0] Group 2 FTA: 0.7474 +[2025-07-08 02:48:19] [Rank 0] Group 3 FTA: 0.8724 +[2025-07-08 02:48:19] [Rank 0] Group 3 FTA: 0.8724 +[2025-07-08 02:48:19] [Rank 0] Group 4 FTA: 0.8047 +[2025-07-08 02:48:19] [Rank 0] Group 4 FTA: 0.8047 +[2025-07-08 02:48:19] [Rank 0] Group 5 FTA: 0.7214 +[2025-07-08 02:48:19] [Rank 0] Group 5 FTA: 0.7214 +[2025-07-08 02:48:19] [Rank 0] Group 6 FTA: 0.7656 +[2025-07-08 02:48:19] [Rank 0] Group 6 FTA: 0.7656 +[2025-07-08 02:48:19] [Rank 0] Group 7 FTA: 0.7396 +[2025-07-08 02:48:19] [Rank 0] Group 7 FTA: 0.7396 +[2025-07-08 02:48:19] [Rank 0] Group 8 FTA: 0.7500 +[2025-07-08 02:48:19] [Rank 0] Group 8 FTA: 0.7500 +[2025-07-08 02:48:19] [Rank 0] Group 9 FTA: 0.7773 +[2025-07-08 02:48:19] [Rank 0] Group 9 FTA: 0.7773 +[2025-07-08 02:48:19] [Rank 0] Group 10 FTA: 0.7441 +[2025-07-08 02:48:19] [Rank 0] Group 10 FTA: 0.7441 +[2025-07-08 02:48:19] [Rank 0] Group 11 FTA: 0.7578 +[2025-07-08 02:48:19] [Rank 0] Group 11 FTA: 0.7578 +[2025-07-08 02:48:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 02:48:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 02:48:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 02:48:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 02:48:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 02:48:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 02:48:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 02:48:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 02:48:20] [Rank 0] step:8501/10000 train_time:689353ms step_avg:81.09ms +[2025-07-08 02:48:20] [Rank 0] step:8501/10000 train_time:689353ms step_avg:81.09ms +[2025-07-08 02:48:22] [Rank 0] step:8521/10000 train_time:690851ms step_avg:81.08ms +[2025-07-08 02:48:22] [Rank 0] step:8521/10000 train_time:690851ms step_avg:81.08ms +[2025-07-08 02:48:23] [Rank 0] step:8541/10000 train_time:692342ms step_avg:81.06ms +[2025-07-08 02:48:23] [Rank 0] step:8541/10000 train_time:692342ms step_avg:81.06ms +[2025-07-08 02:48:25] [Rank 0] step:8561/10000 train_time:694486ms step_avg:81.12ms +[2025-07-08 02:48:25] [Rank 0] step:8561/10000 train_time:694486ms step_avg:81.12ms +[2025-07-08 02:48:27] [Rank 0] step:8581/10000 train_time:695977ms step_avg:81.11ms +[2025-07-08 02:48:27] [Rank 0] step:8581/10000 train_time:695977ms step_avg:81.11ms +[2025-07-08 02:48:28] [Rank 0] step:8601/10000 train_time:697474ms step_avg:81.09ms +[2025-07-08 02:48:28] [Rank 0] step:8601/10000 train_time:697474ms step_avg:81.09ms +[2025-07-08 02:48:30] [Rank 0] step:8621/10000 train_time:698969ms step_avg:81.08ms +[2025-07-08 02:48:30] [Rank 0] step:8621/10000 train_time:698969ms step_avg:81.08ms +[2025-07-08 02:48:32] [Rank 0] step:8641/10000 train_time:701147ms step_avg:81.14ms +[2025-07-08 02:48:32] [Rank 0] step:8641/10000 train_time:701147ms step_avg:81.14ms +[2025-07-08 02:48:34] [Rank 0] step:8661/10000 train_time:702626ms step_avg:81.13ms +[2025-07-08 02:48:34] [Rank 0] step:8661/10000 train_time:702626ms step_avg:81.13ms +[2025-07-08 02:48:35] [Rank 0] step:8681/10000 train_time:704122ms step_avg:81.11ms +[2025-07-08 02:48:35] [Rank 0] step:8681/10000 train_time:704122ms step_avg:81.11ms +[2025-07-08 02:48:37] [Rank 0] step:8701/10000 train_time:705620ms step_avg:81.10ms +[2025-07-08 02:48:37] [Rank 0] step:8701/10000 train_time:705620ms step_avg:81.10ms +[2025-07-08 02:48:38] [Rank 0] step:8721/10000 train_time:707119ms step_avg:81.08ms +[2025-07-08 02:48:38] [Rank 0] step:8721/10000 train_time:707119ms step_avg:81.08ms +[2025-07-08 02:48:40] [Rank 0] step:8741/10000 train_time:709267ms step_avg:81.14ms +[2025-07-08 02:48:40] [Rank 0] step:8741/10000 train_time:709267ms step_avg:81.14ms +[2025-07-08 02:48:42] [Rank 0] step:8761/10000 train_time:710765ms step_avg:81.13ms +[2025-07-08 02:48:42] [Rank 0] step:8761/10000 train_time:710765ms step_avg:81.13ms +[2025-07-08 02:48:43] [Rank 0] step:8781/10000 train_time:712266ms step_avg:81.11ms +[2025-07-08 02:48:43] [Rank 0] step:8781/10000 train_time:712266ms step_avg:81.11ms +[2025-07-08 02:48:45] [Rank 0] step:8801/10000 train_time:713765ms step_avg:81.10ms +[2025-07-08 02:48:45] [Rank 0] step:8801/10000 train_time:713765ms step_avg:81.10ms +[2025-07-08 02:48:46] [Rank 0] step:8821/10000 train_time:715266ms step_avg:81.09ms +[2025-07-08 02:48:46] [Rank 0] step:8821/10000 train_time:715266ms step_avg:81.09ms +[2025-07-08 02:48:48] [Rank 0] step:8841/10000 train_time:717004ms step_avg:81.10ms +[2025-07-08 02:48:48] [Rank 0] step:8841/10000 train_time:717004ms step_avg:81.10ms +[2025-07-08 02:48:49] [Rank 0] step:8861/10000 train_time:718502ms step_avg:81.09ms +[2025-07-08 02:48:49] [Rank 0] step:8861/10000 train_time:718502ms step_avg:81.09ms +[2025-07-08 02:48:51] [Rank 0] step:8881/10000 train_time:720002ms step_avg:81.07ms +[2025-07-08 02:48:51] [Rank 0] step:8881/10000 train_time:720002ms step_avg:81.07ms +[2025-07-08 02:48:52] [Rank 0] step:8901/10000 train_time:721503ms step_avg:81.06ms +[2025-07-08 02:48:52] [Rank 0] step:8901/10000 train_time:721503ms step_avg:81.06ms +[2025-07-08 02:48:55] [Rank 0] step:8921/10000 train_time:723669ms step_avg:81.12ms +[2025-07-08 02:48:55] [Rank 0] step:8921/10000 train_time:723669ms step_avg:81.12ms +[2025-07-08 02:48:56] [Rank 0] step:8941/10000 train_time:725167ms step_avg:81.11ms +[2025-07-08 02:48:56] [Rank 0] step:8941/10000 train_time:725167ms step_avg:81.11ms +[2025-07-08 02:48:58] [Rank 0] step:8961/10000 train_time:726669ms step_avg:81.09ms +[2025-07-08 02:48:58] [Rank 0] step:8961/10000 train_time:726669ms step_avg:81.09ms +[2025-07-08 02:48:59] [Rank 0] step:8981/10000 train_time:728170ms step_avg:81.08ms +[2025-07-08 02:48:59] [Rank 0] step:8981/10000 train_time:728170ms step_avg:81.08ms +[2025-07-08 02:49:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:49:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:49:01] [Rank 0] PRINT: step:9000/10000 train_loss:0.9325 val_loss:0.9298 train_time:729671ms step_avg:81.07ms +[2025-07-08 02:49:01] [Rank 0] PRINT: step:9000/10000 train_loss:0.9325 val_loss:0.9298 train_time:729671ms step_avg:81.07ms +[2025-07-08 02:49:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:49:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:49:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:49:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:49:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:49:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:54:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:54:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:54:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:54:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:54:31] [Rank 0] Total Loss: 5.8195 +[2025-07-08 02:54:31] [Rank 0] Total Loss: 5.8195 +[2025-07-08 02:54:31] [Rank 0] Total FTA: 0.7204 +[2025-07-08 02:54:31] [Rank 0] Total FTA: 0.7204 +[2025-07-08 02:54:31] [Rank 0] Group 0 Loss: 6.3433 +[2025-07-08 02:54:31] [Rank 0] Group 0 Loss: 6.3433 +[2025-07-08 02:54:31] [Rank 0] Group 1 Loss: 5.8768 +[2025-07-08 02:54:31] [Rank 0] Group 1 Loss: 5.8768 +[2025-07-08 02:54:31] [Rank 0] Group 2 Loss: 5.6256 +[2025-07-08 02:54:31] [Rank 0] Group 2 Loss: 5.6256 +[2025-07-08 02:54:31] [Rank 0] Group 3 Loss: 5.6857 +[2025-07-08 02:54:31] [Rank 0] Group 3 Loss: 5.6857 +[2025-07-08 02:54:31] [Rank 0] Group 4 Loss: 5.7511 +[2025-07-08 02:54:31] [Rank 0] Group 4 Loss: 5.7511 +[2025-07-08 02:54:31] [Rank 0] Group 5 Loss: 5.7207 +[2025-07-08 02:54:31] [Rank 0] Group 5 Loss: 5.7207 +[2025-07-08 02:54:31] [Rank 0] Group 6 Loss: 5.6985 +[2025-07-08 02:54:31] [Rank 0] Group 6 Loss: 5.6985 +[2025-07-08 02:54:31] [Rank 0] Group 7 Loss: 5.7514 +[2025-07-08 02:54:31] [Rank 0] Group 7 Loss: 5.7514 +[2025-07-08 02:54:31] [Rank 0] Group 8 Loss: 5.7704 +[2025-07-08 02:54:31] [Rank 0] Group 8 Loss: 5.7704 +[2025-07-08 02:54:31] [Rank 0] Group 9 Loss: 5.7851 +[2025-07-08 02:54:31] [Rank 0] Group 9 Loss: 5.7851 +[2025-07-08 02:54:31] [Rank 0] Group 10 Loss: 5.7387 +[2025-07-08 02:54:31] [Rank 0] Group 10 Loss: 5.7387 +[2025-07-08 02:54:31] [Rank 0] Group 11 Loss: 5.7287 +[2025-07-08 02:54:31] [Rank 0] Group 11 Loss: 5.7287 +[2025-07-08 02:54:31] [Rank 0] Group 0 FTA: 0.5007 +[2025-07-08 02:54:31] [Rank 0] Group 0 FTA: 0.5007 +[2025-07-08 02:54:31] [Rank 0] Group 1 FTA: 0.5182 +[2025-07-08 02:54:31] [Rank 0] Group 1 FTA: 0.5182 +[2025-07-08 02:54:31] [Rank 0] Group 2 FTA: 0.8281 +[2025-07-08 02:54:31] [Rank 0] Group 2 FTA: 0.8281 +[2025-07-08 02:54:31] [Rank 0] Group 3 FTA: 0.8047 +[2025-07-08 02:54:31] [Rank 0] Group 3 FTA: 0.8047 +[2025-07-08 02:54:31] [Rank 0] Group 4 FTA: 0.6484 +[2025-07-08 02:54:31] [Rank 0] Group 4 FTA: 0.6484 +[2025-07-08 02:54:31] [Rank 0] Group 5 FTA: 0.8490 +[2025-07-08 02:54:31] [Rank 0] Group 5 FTA: 0.8490 +[2025-07-08 02:54:31] [Rank 0] Group 6 FTA: 0.7630 +[2025-07-08 02:54:31] [Rank 0] Group 6 FTA: 0.7630 +[2025-07-08 02:54:31] [Rank 0] Group 7 FTA: 0.7292 +[2025-07-08 02:54:31] [Rank 0] Group 7 FTA: 0.7292 +[2025-07-08 02:54:31] [Rank 0] Group 8 FTA: 0.7891 +[2025-07-08 02:54:31] [Rank 0] Group 8 FTA: 0.7891 +[2025-07-08 02:54:31] [Rank 0] Group 9 FTA: 0.7930 +[2025-07-08 02:54:31] [Rank 0] Group 9 FTA: 0.7930 +[2025-07-08 02:54:31] [Rank 0] Group 10 FTA: 0.7676 +[2025-07-08 02:54:31] [Rank 0] Group 10 FTA: 0.7676 +[2025-07-08 02:54:31] [Rank 0] Group 11 FTA: 0.7812 +[2025-07-08 02:54:31] [Rank 0] Group 11 FTA: 0.7812 +[2025-07-08 02:54:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 02:54:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 02:54:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 02:54:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 02:54:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 02:54:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 02:54:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 02:54:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 02:54:33] [Rank 0] step:9001/10000 train_time:729698ms step_avg:81.07ms +[2025-07-08 02:54:33] [Rank 0] step:9001/10000 train_time:729698ms step_avg:81.07ms +[2025-07-08 02:54:34] [Rank 0] step:9021/10000 train_time:731477ms step_avg:81.09ms +[2025-07-08 02:54:34] [Rank 0] step:9021/10000 train_time:731477ms step_avg:81.09ms +[2025-07-08 02:54:36] [Rank 0] step:9041/10000 train_time:732970ms step_avg:81.07ms +[2025-07-08 02:54:36] [Rank 0] step:9041/10000 train_time:732970ms step_avg:81.07ms +[2025-07-08 02:54:37] [Rank 0] step:9061/10000 train_time:734464ms step_avg:81.06ms +[2025-07-08 02:54:37] [Rank 0] step:9061/10000 train_time:734464ms step_avg:81.06ms +[2025-07-08 02:54:39] [Rank 0] step:9081/10000 train_time:735960ms step_avg:81.04ms +[2025-07-08 02:54:39] [Rank 0] step:9081/10000 train_time:735960ms step_avg:81.04ms +[2025-07-08 02:54:41] [Rank 0] step:9101/10000 train_time:738128ms step_avg:81.10ms +[2025-07-08 02:54:41] [Rank 0] step:9101/10000 train_time:738128ms step_avg:81.10ms +[2025-07-08 02:54:42] [Rank 0] step:9121/10000 train_time:739622ms step_avg:81.09ms +[2025-07-08 02:54:42] [Rank 0] step:9121/10000 train_time:739622ms step_avg:81.09ms +[2025-07-08 02:54:44] [Rank 0] step:9141/10000 train_time:741120ms step_avg:81.08ms +[2025-07-08 02:54:44] [Rank 0] step:9141/10000 train_time:741120ms step_avg:81.08ms +[2025-07-08 02:54:45] [Rank 0] step:9161/10000 train_time:742618ms step_avg:81.06ms +[2025-07-08 02:54:45] [Rank 0] step:9161/10000 train_time:742618ms step_avg:81.06ms +[2025-07-08 02:54:47] [Rank 0] step:9181/10000 train_time:744792ms step_avg:81.12ms +[2025-07-08 02:54:47] [Rank 0] step:9181/10000 train_time:744792ms step_avg:81.12ms +[2025-07-08 02:54:49] [Rank 0] step:9201/10000 train_time:746272ms step_avg:81.11ms +[2025-07-08 02:54:49] [Rank 0] step:9201/10000 train_time:746272ms step_avg:81.11ms +[2025-07-08 02:54:50] [Rank 0] step:9221/10000 train_time:747772ms step_avg:81.09ms +[2025-07-08 02:54:50] [Rank 0] step:9221/10000 train_time:747772ms step_avg:81.09ms +[2025-07-08 02:54:52] [Rank 0] step:9241/10000 train_time:749271ms step_avg:81.08ms +[2025-07-08 02:54:52] [Rank 0] step:9241/10000 train_time:749271ms step_avg:81.08ms +[2025-07-08 02:54:53] [Rank 0] step:9261/10000 train_time:750771ms step_avg:81.07ms +[2025-07-08 02:54:53] [Rank 0] step:9261/10000 train_time:750771ms step_avg:81.07ms +[2025-07-08 02:54:55] [Rank 0] step:9281/10000 train_time:752504ms step_avg:81.08ms +[2025-07-08 02:54:55] [Rank 0] step:9281/10000 train_time:752504ms step_avg:81.08ms +[2025-07-08 02:54:57] [Rank 0] step:9301/10000 train_time:754003ms step_avg:81.07ms +[2025-07-08 02:54:57] [Rank 0] step:9301/10000 train_time:754003ms step_avg:81.07ms +[2025-07-08 02:54:58] [Rank 0] step:9321/10000 train_time:755503ms step_avg:81.05ms +[2025-07-08 02:54:58] [Rank 0] step:9321/10000 train_time:755503ms step_avg:81.05ms +[2025-07-08 02:55:00] [Rank 0] step:9341/10000 train_time:757002ms step_avg:81.04ms +[2025-07-08 02:55:00] [Rank 0] step:9341/10000 train_time:757002ms step_avg:81.04ms +[2025-07-08 02:55:01] [Rank 0] step:9361/10000 train_time:758505ms step_avg:81.03ms +[2025-07-08 02:55:01] [Rank 0] step:9361/10000 train_time:758505ms step_avg:81.03ms +[2025-07-08 02:55:03] [Rank 0] step:9381/10000 train_time:760046ms step_avg:81.02ms +[2025-07-08 02:55:03] [Rank 0] step:9381/10000 train_time:760046ms step_avg:81.02ms +[2025-07-08 02:55:04] [Rank 0] step:9401/10000 train_time:761548ms step_avg:81.01ms +[2025-07-08 02:55:04] [Rank 0] step:9401/10000 train_time:761548ms step_avg:81.01ms +[2025-07-08 02:55:06] [Rank 0] step:9421/10000 train_time:763051ms step_avg:80.99ms +[2025-07-08 02:55:06] [Rank 0] step:9421/10000 train_time:763051ms step_avg:80.99ms +[2025-07-08 02:55:07] [Rank 0] step:9441/10000 train_time:764550ms step_avg:80.98ms +[2025-07-08 02:55:07] [Rank 0] step:9441/10000 train_time:764550ms step_avg:80.98ms +[2025-07-08 02:55:09] [Rank 0] step:9461/10000 train_time:766290ms step_avg:80.99ms +[2025-07-08 02:55:09] [Rank 0] step:9461/10000 train_time:766290ms step_avg:80.99ms +[2025-07-08 02:55:10] [Rank 0] step:9481/10000 train_time:767791ms step_avg:80.98ms +[2025-07-08 02:55:10] [Rank 0] step:9481/10000 train_time:767791ms step_avg:80.98ms +[2025-07-08 02:55:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:55:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:55:13] [Rank 0] PRINT: step:9500/10000 train_loss:0.9258 val_loss:0.9253 train_time:769293ms step_avg:80.98ms +[2025-07-08 02:55:13] [Rank 0] PRINT: step:9500/10000 train_loss:0.9258 val_loss:0.9253 train_time:769293ms step_avg:80.98ms +[2025-07-08 02:55:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:55:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:55:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:55:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:55:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:55:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:00:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:00:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:00:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:00:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:00:40] [Rank 0] Total Loss: 5.8698 +[2025-07-08 03:00:40] [Rank 0] Total Loss: 5.8698 +[2025-07-08 03:00:40] [Rank 0] Total FTA: 0.7563 +[2025-07-08 03:00:40] [Rank 0] Total FTA: 0.7563 +[2025-07-08 03:00:40] [Rank 0] Group 0 Loss: 6.4813 +[2025-07-08 03:00:40] [Rank 0] Group 0 Loss: 6.4813 +[2025-07-08 03:00:40] [Rank 0] Group 1 Loss: 6.0237 +[2025-07-08 03:00:40] [Rank 0] Group 1 Loss: 6.0237 +[2025-07-08 03:00:40] [Rank 0] Group 2 Loss: 5.5058 +[2025-07-08 03:00:40] [Rank 0] Group 2 Loss: 5.5058 +[2025-07-08 03:00:40] [Rank 0] Group 3 Loss: 5.7244 +[2025-07-08 03:00:40] [Rank 0] Group 3 Loss: 5.7244 +[2025-07-08 03:00:40] [Rank 0] Group 4 Loss: 5.6874 +[2025-07-08 03:00:40] [Rank 0] Group 4 Loss: 5.6874 +[2025-07-08 03:00:40] [Rank 0] Group 5 Loss: 5.7181 +[2025-07-08 03:00:40] [Rank 0] Group 5 Loss: 5.7181 +[2025-07-08 03:00:40] [Rank 0] Group 6 Loss: 5.7127 +[2025-07-08 03:00:40] [Rank 0] Group 6 Loss: 5.7127 +[2025-07-08 03:00:40] [Rank 0] Group 7 Loss: 5.8596 +[2025-07-08 03:00:40] [Rank 0] Group 7 Loss: 5.8596 +[2025-07-08 03:00:40] [Rank 0] Group 8 Loss: 5.7234 +[2025-07-08 03:00:40] [Rank 0] Group 8 Loss: 5.7234 +[2025-07-08 03:00:40] [Rank 0] Group 9 Loss: 5.8716 +[2025-07-08 03:00:40] [Rank 0] Group 9 Loss: 5.8716 +[2025-07-08 03:00:40] [Rank 0] Group 10 Loss: 5.8231 +[2025-07-08 03:00:40] [Rank 0] Group 10 Loss: 5.8231 +[2025-07-08 03:00:40] [Rank 0] Group 11 Loss: 5.8097 +[2025-07-08 03:00:40] [Rank 0] Group 11 Loss: 5.8097 +[2025-07-08 03:00:40] [Rank 0] Group 0 FTA: 0.6723 +[2025-07-08 03:00:40] [Rank 0] Group 0 FTA: 0.6723 +[2025-07-08 03:00:40] [Rank 0] Group 1 FTA: 0.4688 +[2025-07-08 03:00:40] [Rank 0] Group 1 FTA: 0.4688 +[2025-07-08 03:00:40] [Rank 0] Group 2 FTA: 0.8490 +[2025-07-08 03:00:40] [Rank 0] Group 2 FTA: 0.8490 +[2025-07-08 03:00:40] [Rank 0] Group 3 FTA: 0.8281 +[2025-07-08 03:00:40] [Rank 0] Group 3 FTA: 0.8281 +[2025-07-08 03:00:40] [Rank 0] Group 4 FTA: 0.7630 +[2025-07-08 03:00:40] [Rank 0] Group 4 FTA: 0.7630 +[2025-07-08 03:00:40] [Rank 0] Group 5 FTA: 0.8073 +[2025-07-08 03:00:40] [Rank 0] Group 5 FTA: 0.8073 +[2025-07-08 03:00:40] [Rank 0] Group 6 FTA: 0.8229 +[2025-07-08 03:00:40] [Rank 0] Group 6 FTA: 0.8229 +[2025-07-08 03:00:40] [Rank 0] Group 7 FTA: 0.7370 +[2025-07-08 03:00:40] [Rank 0] Group 7 FTA: 0.7370 +[2025-07-08 03:00:40] [Rank 0] Group 8 FTA: 0.8255 +[2025-07-08 03:00:40] [Rank 0] Group 8 FTA: 0.8255 +[2025-07-08 03:00:40] [Rank 0] Group 9 FTA: 0.8359 +[2025-07-08 03:00:40] [Rank 0] Group 9 FTA: 0.8359 +[2025-07-08 03:00:40] [Rank 0] Group 10 FTA: 0.7754 +[2025-07-08 03:00:40] [Rank 0] Group 10 FTA: 0.7754 +[2025-07-08 03:00:40] [Rank 0] Group 11 FTA: 0.7705 +[2025-07-08 03:00:40] [Rank 0] Group 11 FTA: 0.7705 +[2025-07-08 03:00:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 03:00:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 03:00:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 03:00:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 03:00:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 03:00:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 03:00:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 03:00:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 03:00:42] [Rank 0] step:9501/10000 train_time:769315ms step_avg:80.97ms +[2025-07-08 03:00:42] [Rank 0] step:9501/10000 train_time:769315ms step_avg:80.97ms +[2025-07-08 03:00:43] [Rank 0] step:9521/10000 train_time:770818ms step_avg:80.96ms +[2025-07-08 03:00:43] [Rank 0] step:9521/10000 train_time:770818ms step_avg:80.96ms +[2025-07-08 03:00:45] [Rank 0] step:9541/10000 train_time:772313ms step_avg:80.95ms +[2025-07-08 03:00:45] [Rank 0] step:9541/10000 train_time:772313ms step_avg:80.95ms +[2025-07-08 03:00:47] [Rank 0] step:9561/10000 train_time:774463ms step_avg:81.00ms +[2025-07-08 03:00:47] [Rank 0] step:9561/10000 train_time:774463ms step_avg:81.00ms +[2025-07-08 03:00:48] [Rank 0] step:9581/10000 train_time:775954ms step_avg:80.99ms +[2025-07-08 03:00:48] [Rank 0] step:9581/10000 train_time:775954ms step_avg:80.99ms +[2025-07-08 03:00:50] [Rank 0] step:9601/10000 train_time:777652ms step_avg:81.00ms +[2025-07-08 03:00:50] [Rank 0] step:9601/10000 train_time:777652ms step_avg:81.00ms +[2025-07-08 03:00:51] [Rank 0] step:9621/10000 train_time:779148ms step_avg:80.98ms +[2025-07-08 03:00:51] [Rank 0] step:9621/10000 train_time:779148ms step_avg:80.98ms +[2025-07-08 03:00:53] [Rank 0] step:9641/10000 train_time:780884ms step_avg:81.00ms +[2025-07-08 03:00:53] [Rank 0] step:9641/10000 train_time:780884ms step_avg:81.00ms +[2025-07-08 03:00:55] [Rank 0] step:9661/10000 train_time:782382ms step_avg:80.98ms +[2025-07-08 03:00:55] [Rank 0] step:9661/10000 train_time:782382ms step_avg:80.98ms +[2025-07-08 03:00:56] [Rank 0] step:9681/10000 train_time:783879ms step_avg:80.97ms +[2025-07-08 03:00:56] [Rank 0] step:9681/10000 train_time:783879ms step_avg:80.97ms +[2025-07-08 03:00:58] [Rank 0] step:9701/10000 train_time:785377ms step_avg:80.96ms +[2025-07-08 03:00:58] [Rank 0] step:9701/10000 train_time:785377ms step_avg:80.96ms +[2025-07-08 03:01:00] [Rank 0] step:9721/10000 train_time:787548ms step_avg:81.02ms +[2025-07-08 03:01:00] [Rank 0] step:9721/10000 train_time:787548ms step_avg:81.02ms +[2025-07-08 03:01:01] [Rank 0] step:9741/10000 train_time:789029ms step_avg:81.00ms +[2025-07-08 03:01:01] [Rank 0] step:9741/10000 train_time:789029ms step_avg:81.00ms +[2025-07-08 03:01:03] [Rank 0] step:9761/10000 train_time:790528ms step_avg:80.99ms +[2025-07-08 03:01:03] [Rank 0] step:9761/10000 train_time:790528ms step_avg:80.99ms +[2025-07-08 03:01:04] [Rank 0] step:9781/10000 train_time:792028ms step_avg:80.98ms +[2025-07-08 03:01:04] [Rank 0] step:9781/10000 train_time:792028ms step_avg:80.98ms +[2025-07-08 03:01:06] [Rank 0] step:9801/10000 train_time:793527ms step_avg:80.96ms +[2025-07-08 03:01:06] [Rank 0] step:9801/10000 train_time:793527ms step_avg:80.96ms +[2025-07-08 03:01:08] [Rank 0] step:9821/10000 train_time:795260ms step_avg:80.98ms +[2025-07-08 03:01:08] [Rank 0] step:9821/10000 train_time:795260ms step_avg:80.98ms +[2025-07-08 03:01:09] [Rank 0] step:9841/10000 train_time:796762ms step_avg:80.96ms +[2025-07-08 03:01:09] [Rank 0] step:9841/10000 train_time:796762ms step_avg:80.96ms +[2025-07-08 03:01:11] [Rank 0] step:9861/10000 train_time:798264ms step_avg:80.95ms +[2025-07-08 03:01:11] [Rank 0] step:9861/10000 train_time:798264ms step_avg:80.95ms +[2025-07-08 03:01:12] [Rank 0] step:9881/10000 train_time:799765ms step_avg:80.94ms +[2025-07-08 03:01:12] [Rank 0] step:9881/10000 train_time:799765ms step_avg:80.94ms +[2025-07-08 03:01:14] [Rank 0] step:9901/10000 train_time:801522ms step_avg:80.95ms +[2025-07-08 03:01:14] [Rank 0] step:9901/10000 train_time:801522ms step_avg:80.95ms +[2025-07-08 03:01:15] [Rank 0] step:9921/10000 train_time:803004ms step_avg:80.94ms +[2025-07-08 03:01:15] [Rank 0] step:9921/10000 train_time:803004ms step_avg:80.94ms +[2025-07-08 03:01:17] [Rank 0] step:9941/10000 train_time:804505ms step_avg:80.93ms +[2025-07-08 03:01:17] [Rank 0] step:9941/10000 train_time:804505ms step_avg:80.93ms +[2025-07-08 03:01:18] [Rank 0] step:9961/10000 train_time:806006ms step_avg:80.92ms +[2025-07-08 03:01:18] [Rank 0] step:9961/10000 train_time:806006ms step_avg:80.92ms +[2025-07-08 03:01:20] [Rank 0] step:9981/10000 train_time:807508ms step_avg:80.90ms +[2025-07-08 03:01:20] [Rank 0] step:9981/10000 train_time:807508ms step_avg:80.90ms +[2025-07-08 03:01:22] [Rank 0] step:10000/10000 train_time:809575ms step_avg:80.96ms +[2025-07-08 03:01:22] [Rank 0] step:10000/10000 train_time:809575ms step_avg:80.96ms +[2025-07-08 03:01:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:01:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:01:23] [Rank 0] PRINT: step:10000/10000 train_loss:0.9206 val_loss:0.9209 train_time:809655ms step_avg:80.97ms +[2025-07-08 03:01:23] [Rank 0] PRINT: step:10000/10000 train_loss:0.9206 val_loss:0.9209 train_time:809655ms step_avg:80.97ms +[2025-07-08 03:01:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:01:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:01:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:01:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:01:23] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:01:23] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:06:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:06:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:06:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:06:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:06:57] [Rank 0] Total Loss: 5.8934 +[2025-07-08 03:06:57] [Rank 0] Total Loss: 5.8934 +[2025-07-08 03:06:57] [Rank 0] Total FTA: 0.7396 +[2025-07-08 03:06:57] [Rank 0] Total FTA: 0.7396 +[2025-07-08 03:06:57] [Rank 0] Group 0 Loss: 6.4153 +[2025-07-08 03:06:57] [Rank 0] Group 0 Loss: 6.4153 +[2025-07-08 03:06:57] [Rank 0] Group 1 Loss: 6.0882 +[2025-07-08 03:06:57] [Rank 0] Group 1 Loss: 6.0882 +[2025-07-08 03:06:57] [Rank 0] Group 2 Loss: 5.5708 +[2025-07-08 03:06:57] [Rank 0] Group 2 Loss: 5.5708 +[2025-07-08 03:06:57] [Rank 0] Group 3 Loss: 5.7998 +[2025-07-08 03:06:57] [Rank 0] Group 3 Loss: 5.7998 +[2025-07-08 03:06:57] [Rank 0] Group 4 Loss: 5.7622 +[2025-07-08 03:06:57] [Rank 0] Group 4 Loss: 5.7622 +[2025-07-08 03:06:57] [Rank 0] Group 5 Loss: 5.8161 +[2025-07-08 03:06:57] [Rank 0] Group 5 Loss: 5.8161 +[2025-07-08 03:06:57] [Rank 0] Group 6 Loss: 5.6659 +[2025-07-08 03:06:57] [Rank 0] Group 6 Loss: 5.6659 +[2025-07-08 03:06:57] [Rank 0] Group 7 Loss: 5.8813 +[2025-07-08 03:06:57] [Rank 0] Group 7 Loss: 5.8813 +[2025-07-08 03:06:57] [Rank 0] Group 8 Loss: 5.7972 +[2025-07-08 03:06:57] [Rank 0] Group 8 Loss: 5.7972 +[2025-07-08 03:06:57] [Rank 0] Group 9 Loss: 5.7892 +[2025-07-08 03:06:57] [Rank 0] Group 9 Loss: 5.7892 +[2025-07-08 03:06:57] [Rank 0] Group 10 Loss: 5.7950 +[2025-07-08 03:06:57] [Rank 0] Group 10 Loss: 5.7950 +[2025-07-08 03:06:57] [Rank 0] Group 11 Loss: 5.8636 +[2025-07-08 03:06:57] [Rank 0] Group 11 Loss: 5.8636 +[2025-07-08 03:06:57] [Rank 0] Group 0 FTA: 0.4837 +[2025-07-08 03:06:57] [Rank 0] Group 0 FTA: 0.4837 +[2025-07-08 03:06:57] [Rank 0] Group 1 FTA: 0.4818 +[2025-07-08 03:06:57] [Rank 0] Group 1 FTA: 0.4818 +[2025-07-08 03:06:57] [Rank 0] Group 2 FTA: 0.9089 +[2025-07-08 03:06:57] [Rank 0] Group 2 FTA: 0.9089 +[2025-07-08 03:06:57] [Rank 0] Group 3 FTA: 0.8698 +[2025-07-08 03:06:57] [Rank 0] Group 3 FTA: 0.8698 +[2025-07-08 03:06:57] [Rank 0] Group 4 FTA: 0.7682 +[2025-07-08 03:06:57] [Rank 0] Group 4 FTA: 0.7682 +[2025-07-08 03:06:57] [Rank 0] Group 5 FTA: 0.7839 +[2025-07-08 03:06:57] [Rank 0] Group 5 FTA: 0.7839 +[2025-07-08 03:06:57] [Rank 0] Group 6 FTA: 0.8021 +[2025-07-08 03:06:57] [Rank 0] Group 6 FTA: 0.8021 +[2025-07-08 03:06:57] [Rank 0] Group 7 FTA: 0.7656 +[2025-07-08 03:06:57] [Rank 0] Group 7 FTA: 0.7656 +[2025-07-08 03:06:57] [Rank 0] Group 8 FTA: 0.7917 +[2025-07-08 03:06:57] [Rank 0] Group 8 FTA: 0.7917 +[2025-07-08 03:06:57] [Rank 0] Group 9 FTA: 0.8047 +[2025-07-08 03:06:57] [Rank 0] Group 9 FTA: 0.8047 +[2025-07-08 03:06:57] [Rank 0] Group 10 FTA: 0.7988 +[2025-07-08 03:06:57] [Rank 0] Group 10 FTA: 0.7988 +[2025-07-08 03:06:57] [Rank 0] Group 11 FTA: 0.7900 +[2025-07-08 03:06:57] [Rank 0] Group 11 FTA: 0.7900 +[2025-07-08 03:06:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 03:06:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_loss_curves.png +[2025-07-08 03:06:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 03:06:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/per_class_acc_curves.png +[2025-07-08 03:06:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 03:06:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_loss_curve.png +[2025-07-08 03:06:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 03:06:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_43/total_acc_curve.png +[2025-07-08 03:06:59] [Rank 0] step:10001/10000 train_time:809677ms step_avg:80.96ms +[2025-07-08 03:06:59] [Rank 0] step:10001/10000 train_time:809677ms step_avg:80.96ms +[2025-07-08 03:06:59] [Rank 0] PRINT: --- Training Finished: Tue Jul 8 03:06:59 2025 --- +[2025-07-08 03:06:59] [Rank 0] PRINT: --- Training Finished: Tue Jul 8 03:06:59 2025 --- +[2025-07-08 03:06:59] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9936 MiB +[2025-07-08 03:06:59] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9936 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/config.json b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..5c90e66c0fdf953a107442ca8e992b2d38e4c440 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "746216f1-f2dd-400c-90a0-de24808702cd", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..e33eff261b78ef7aa135664b4cad26af394d6666 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a61851c2356bf6f247fe5f82cdac2f3f6a5284c0505e5bc60b8ebb27fcae82f1 +size 474629 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..b07609a920efc00b827d5f5f014cb2d0db536a6b --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8c38d7cb770dee3f9b34e36a5fe1f0bcd614a98f68c0531c6274d7a74d124eb +size 410825 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..c442b9856852d0c626168ee867c73f2de1bb41e5 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ffecccd5a50b94e7d1e36479f21afc7f90fd8adda387ded3795877c82655270 +size 114020 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..3d59f4ab3e80dea092b4eaba2595c68039954a29 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f2a75692eebf18fef50779818bfb041a4c4721f95324328b3475a1464a18558 +size 128128 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/training_log_746216f1-f2dd-400c-90a0-de24808702cd.txt b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/training_log_746216f1-f2dd-400c-90a0-de24808702cd.txt new file mode 100644 index 0000000000000000000000000000000000000000..93a71028e80b7c3569a59e3ef68a47bb8b593f95 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/training_log_746216f1-f2dd-400c-90a0-de24808702cd.txt @@ -0,0 +1,5132 @@ +[2025-07-06 23:53:44] [Rank 0] PRINT: --- Script Start: Sun Jul 6 23:53:44 2025 --- +[2025-07-06 23:53:44] [Rank 0] PRINT: --- Script Start: Sun Jul 6 23:53:44 2025 --- +[2025-07-06 23:53:44] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-06 23:53:44] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-06 23:53:44] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 23:53:44] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 23:53:44] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-06 23:53:44] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-06 23:53:44] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45 +[2025-07-06 23:53:44] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45 +[2025-07-06 23:53:44] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 23:53:44] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 23:53:44] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 23:53:44] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 23:53:44] [Rank 0] PRINT: Constructing model... +[2025-07-06 23:53:44] [Rank 0] PRINT: Constructing model... +[2025-07-06 23:53:46] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 23:53:46] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 23:53:46] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 23:53:46] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 23:53:46] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 23:53:46] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 23:53:47] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 23:53:47] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 23:53:47] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 23:53:47] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 23:53:47] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 23:53:47] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 23:53:47] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 23:53:47] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 23:53:47] [Rank 0] PRINT: Model returns: +[2025-07-06 23:53:47] [Rank 0] PRINT: Model returns: +[2025-07-06 23:53:47] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 23:53:47] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 23:53:47] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 23:53:47] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 23:53:47] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 23:53:47] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 23:53:47] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 23:53:47] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 23:53:47] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 23:53:47] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 23:53:47] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 23:53:47] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 23:53:47] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 23:53:47] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 23:53:47] [Rank 0] PRINT: Starting warmup... +[2025-07-06 23:53:47] [Rank 0] PRINT: Starting warmup... +[2025-07-06 23:55:02] [Rank 0] PRINT: Warmup complete. +[2025-07-06 23:55:02] [Rank 0] PRINT: Warmup complete. +[2025-07-06 23:55:02] [Rank 0] PRINT: Starting training... +[2025-07-06 23:55:02] [Rank 0] PRINT: Starting training... +[2025-07-06 23:55:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:55:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:55:09] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 23:55:09] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 23:55:11] [Rank 0] step:21/10000 train_time:1651ms step_avg:78.64ms +[2025-07-06 23:55:11] [Rank 0] step:21/10000 train_time:1651ms step_avg:78.64ms +[2025-07-06 23:55:12] [Rank 0] step:41/10000 train_time:3107ms step_avg:75.77ms +[2025-07-06 23:55:12] [Rank 0] step:41/10000 train_time:3107ms step_avg:75.77ms +[2025-07-06 23:55:14] [Rank 0] step:61/10000 train_time:4558ms step_avg:74.73ms +[2025-07-06 23:55:14] [Rank 0] step:61/10000 train_time:4558ms step_avg:74.73ms +[2025-07-06 23:55:15] [Rank 0] step:81/10000 train_time:6011ms step_avg:74.21ms +[2025-07-06 23:55:15] [Rank 0] step:81/10000 train_time:6011ms step_avg:74.21ms +[2025-07-06 23:55:17] [Rank 0] step:101/10000 train_time:8137ms step_avg:80.56ms +[2025-07-06 23:55:17] [Rank 0] step:101/10000 train_time:8137ms step_avg:80.56ms +[2025-07-06 23:55:19] [Rank 0] step:121/10000 train_time:9596ms step_avg:79.30ms +[2025-07-06 23:55:19] [Rank 0] step:121/10000 train_time:9596ms step_avg:79.30ms +[2025-07-06 23:55:20] [Rank 0] step:141/10000 train_time:11051ms step_avg:78.37ms +[2025-07-06 23:55:20] [Rank 0] step:141/10000 train_time:11051ms step_avg:78.37ms +[2025-07-06 23:55:22] [Rank 0] step:161/10000 train_time:12509ms step_avg:77.70ms +[2025-07-06 23:55:22] [Rank 0] step:161/10000 train_time:12509ms step_avg:77.70ms +[2025-07-06 23:55:24] [Rank 0] step:181/10000 train_time:14628ms step_avg:80.82ms +[2025-07-06 23:55:24] [Rank 0] step:181/10000 train_time:14628ms step_avg:80.82ms +[2025-07-06 23:55:25] [Rank 0] step:201/10000 train_time:16068ms step_avg:79.94ms +[2025-07-06 23:55:25] [Rank 0] step:201/10000 train_time:16068ms step_avg:79.94ms +[2025-07-06 23:55:27] [Rank 0] step:221/10000 train_time:17527ms step_avg:79.31ms +[2025-07-06 23:55:27] [Rank 0] step:221/10000 train_time:17527ms step_avg:79.31ms +[2025-07-06 23:55:28] [Rank 0] step:241/10000 train_time:18990ms step_avg:78.80ms +[2025-07-06 23:55:28] [Rank 0] step:241/10000 train_time:18990ms step_avg:78.80ms +[2025-07-06 23:55:30] [Rank 0] step:261/10000 train_time:20546ms step_avg:78.72ms +[2025-07-06 23:55:30] [Rank 0] step:261/10000 train_time:20546ms step_avg:78.72ms +[2025-07-06 23:55:32] [Rank 0] step:281/10000 train_time:22667ms step_avg:80.67ms +[2025-07-06 23:55:32] [Rank 0] step:281/10000 train_time:22667ms step_avg:80.67ms +[2025-07-06 23:55:33] [Rank 0] step:301/10000 train_time:24130ms step_avg:80.17ms +[2025-07-06 23:55:33] [Rank 0] step:301/10000 train_time:24130ms step_avg:80.17ms +[2025-07-06 23:55:35] [Rank 0] step:321/10000 train_time:25594ms step_avg:79.73ms +[2025-07-06 23:55:35] [Rank 0] step:321/10000 train_time:25594ms step_avg:79.73ms +[2025-07-06 23:55:36] [Rank 0] step:341/10000 train_time:27061ms step_avg:79.36ms +[2025-07-06 23:55:36] [Rank 0] step:341/10000 train_time:27061ms step_avg:79.36ms +[2025-07-06 23:55:39] [Rank 0] step:361/10000 train_time:29189ms step_avg:80.86ms +[2025-07-06 23:55:39] [Rank 0] step:361/10000 train_time:29189ms step_avg:80.86ms +[2025-07-06 23:55:40] [Rank 0] step:381/10000 train_time:30634ms step_avg:80.41ms +[2025-07-06 23:55:40] [Rank 0] step:381/10000 train_time:30634ms step_avg:80.41ms +[2025-07-06 23:55:41] [Rank 0] step:401/10000 train_time:32103ms step_avg:80.06ms +[2025-07-06 23:55:41] [Rank 0] step:401/10000 train_time:32103ms step_avg:80.06ms +[2025-07-06 23:55:43] [Rank 0] step:421/10000 train_time:33572ms step_avg:79.74ms +[2025-07-06 23:55:43] [Rank 0] step:421/10000 train_time:33572ms step_avg:79.74ms +[2025-07-06 23:55:44] [Rank 0] step:441/10000 train_time:35039ms step_avg:79.45ms +[2025-07-06 23:55:44] [Rank 0] step:441/10000 train_time:35039ms step_avg:79.45ms +[2025-07-06 23:55:46] [Rank 0] step:461/10000 train_time:37163ms step_avg:80.61ms +[2025-07-06 23:55:46] [Rank 0] step:461/10000 train_time:37163ms step_avg:80.61ms +[2025-07-06 23:55:48] [Rank 0] step:481/10000 train_time:38630ms step_avg:80.31ms +[2025-07-06 23:55:48] [Rank 0] step:481/10000 train_time:38630ms step_avg:80.31ms +[2025-07-06 23:55:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:55:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:55:50] [Rank 0] PRINT: step:500/10000 train_loss:6.8670 val_loss:4.3925 train_time:40101ms step_avg:80.20ms +[2025-07-06 23:55:50] [Rank 0] PRINT: step:500/10000 train_loss:6.8670 val_loss:4.3925 train_time:40101ms step_avg:80.20ms +[2025-07-06 23:55:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:55:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:55:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:55:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:55:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:55:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:01:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:01:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:01:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:01:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:01:13] [Rank 0] Total Loss: 5.6065 +[2025-07-07 00:01:13] [Rank 0] Total Loss: 5.6065 +[2025-07-07 00:01:13] [Rank 0] Total FTA: 0.0650 +[2025-07-07 00:01:13] [Rank 0] Total FTA: 0.0650 +[2025-07-07 00:01:13] [Rank 0] Group 0 Loss: 5.5679 +[2025-07-07 00:01:13] [Rank 0] Group 0 Loss: 5.5679 +[2025-07-07 00:01:14] [Rank 0] Group 1 Loss: 5.6973 +[2025-07-07 00:01:14] [Rank 0] Group 1 Loss: 5.6973 +[2025-07-07 00:01:14] [Rank 0] Group 2 Loss: 5.6501 +[2025-07-07 00:01:14] [Rank 0] Group 2 Loss: 5.6501 +[2025-07-07 00:01:14] [Rank 0] Group 3 Loss: 5.5612 +[2025-07-07 00:01:14] [Rank 0] Group 3 Loss: 5.5612 +[2025-07-07 00:01:14] [Rank 0] Group 4 Loss: 5.6400 +[2025-07-07 00:01:14] [Rank 0] Group 4 Loss: 5.6400 +[2025-07-07 00:01:14] [Rank 0] Group 5 Loss: 5.5871 +[2025-07-07 00:01:14] [Rank 0] Group 5 Loss: 5.5871 +[2025-07-07 00:01:14] [Rank 0] Group 6 Loss: 5.6109 +[2025-07-07 00:01:14] [Rank 0] Group 6 Loss: 5.6109 +[2025-07-07 00:01:14] [Rank 0] Group 7 Loss: 5.5913 +[2025-07-07 00:01:14] [Rank 0] Group 7 Loss: 5.5913 +[2025-07-07 00:01:14] [Rank 0] Group 8 Loss: 5.5755 +[2025-07-07 00:01:14] [Rank 0] Group 8 Loss: 5.5755 +[2025-07-07 00:01:14] [Rank 0] Group 9 Loss: 5.6083 +[2025-07-07 00:01:14] [Rank 0] Group 9 Loss: 5.6083 +[2025-07-07 00:01:14] [Rank 0] Group 10 Loss: 5.6205 +[2025-07-07 00:01:14] [Rank 0] Group 10 Loss: 5.6205 +[2025-07-07 00:01:14] [Rank 0] Group 11 Loss: 5.6049 +[2025-07-07 00:01:14] [Rank 0] Group 11 Loss: 5.6049 +[2025-07-07 00:01:14] [Rank 0] Group 0 FTA: 0.1795 +[2025-07-07 00:01:14] [Rank 0] Group 0 FTA: 0.1795 +[2025-07-07 00:01:14] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 00:01:14] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 00:01:14] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 00:01:14] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 00:01:14] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 00:01:14] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 00:01:14] [Rank 0] Group 4 FTA: 0.0130 +[2025-07-07 00:01:14] [Rank 0] Group 4 FTA: 0.0130 +[2025-07-07 00:01:14] [Rank 0] Group 5 FTA: 0.0469 +[2025-07-07 00:01:14] [Rank 0] Group 5 FTA: 0.0469 +[2025-07-07 00:01:14] [Rank 0] Group 6 FTA: 0.0807 +[2025-07-07 00:01:14] [Rank 0] Group 6 FTA: 0.0807 +[2025-07-07 00:01:14] [Rank 0] Group 7 FTA: 0.0729 +[2025-07-07 00:01:14] [Rank 0] Group 7 FTA: 0.0729 +[2025-07-07 00:01:14] [Rank 0] Group 8 FTA: 0.0625 +[2025-07-07 00:01:14] [Rank 0] Group 8 FTA: 0.0625 +[2025-07-07 00:01:14] [Rank 0] Group 9 FTA: 0.0586 +[2025-07-07 00:01:14] [Rank 0] Group 9 FTA: 0.0586 +[2025-07-07 00:01:14] [Rank 0] Group 10 FTA: 0.0625 +[2025-07-07 00:01:14] [Rank 0] Group 10 FTA: 0.0625 +[2025-07-07 00:01:14] [Rank 0] Group 11 FTA: 0.0547 +[2025-07-07 00:01:14] [Rank 0] Group 11 FTA: 0.0547 +[2025-07-07 00:01:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 00:01:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 00:01:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 00:01:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 00:01:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 00:01:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 00:01:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 00:01:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 00:01:15] [Rank 0] step:501/10000 train_time:40122ms step_avg:80.08ms +[2025-07-07 00:01:15] [Rank 0] step:501/10000 train_time:40122ms step_avg:80.08ms +[2025-07-07 00:01:17] [Rank 0] step:521/10000 train_time:41581ms step_avg:79.81ms +[2025-07-07 00:01:17] [Rank 0] step:521/10000 train_time:41581ms step_avg:79.81ms +[2025-07-07 00:01:19] [Rank 0] step:541/10000 train_time:43298ms step_avg:80.03ms +[2025-07-07 00:01:19] [Rank 0] step:541/10000 train_time:43298ms step_avg:80.03ms +[2025-07-07 00:01:20] [Rank 0] step:561/10000 train_time:45151ms step_avg:80.48ms +[2025-07-07 00:01:20] [Rank 0] step:561/10000 train_time:45151ms step_avg:80.48ms +[2025-07-07 00:01:22] [Rank 0] step:581/10000 train_time:46609ms step_avg:80.22ms +[2025-07-07 00:01:22] [Rank 0] step:581/10000 train_time:46609ms step_avg:80.22ms +[2025-07-07 00:01:23] [Rank 0] step:601/10000 train_time:48072ms step_avg:79.99ms +[2025-07-07 00:01:23] [Rank 0] step:601/10000 train_time:48072ms step_avg:79.99ms +[2025-07-07 00:01:25] [Rank 0] step:621/10000 train_time:49531ms step_avg:79.76ms +[2025-07-07 00:01:25] [Rank 0] step:621/10000 train_time:49531ms step_avg:79.76ms +[2025-07-07 00:01:27] [Rank 0] step:641/10000 train_time:51652ms step_avg:80.58ms +[2025-07-07 00:01:27] [Rank 0] step:641/10000 train_time:51652ms step_avg:80.58ms +[2025-07-07 00:01:28] [Rank 0] step:661/10000 train_time:53114ms step_avg:80.35ms +[2025-07-07 00:01:28] [Rank 0] step:661/10000 train_time:53114ms step_avg:80.35ms +[2025-07-07 00:01:30] [Rank 0] step:681/10000 train_time:54578ms step_avg:80.14ms +[2025-07-07 00:01:30] [Rank 0] step:681/10000 train_time:54578ms step_avg:80.14ms +[2025-07-07 00:01:31] [Rank 0] step:701/10000 train_time:56044ms step_avg:79.95ms +[2025-07-07 00:01:31] [Rank 0] step:701/10000 train_time:56044ms step_avg:79.95ms +[2025-07-07 00:01:33] [Rank 0] step:721/10000 train_time:58177ms step_avg:80.69ms +[2025-07-07 00:01:33] [Rank 0] step:721/10000 train_time:58177ms step_avg:80.69ms +[2025-07-07 00:01:35] [Rank 0] step:741/10000 train_time:59621ms step_avg:80.46ms +[2025-07-07 00:01:35] [Rank 0] step:741/10000 train_time:59621ms step_avg:80.46ms +[2025-07-07 00:01:36] [Rank 0] step:761/10000 train_time:61095ms step_avg:80.28ms +[2025-07-07 00:01:36] [Rank 0] step:761/10000 train_time:61095ms step_avg:80.28ms +[2025-07-07 00:01:38] [Rank 0] step:781/10000 train_time:62572ms step_avg:80.12ms +[2025-07-07 00:01:38] [Rank 0] step:781/10000 train_time:62572ms step_avg:80.12ms +[2025-07-07 00:01:39] [Rank 0] step:801/10000 train_time:64045ms step_avg:79.96ms +[2025-07-07 00:01:39] [Rank 0] step:801/10000 train_time:64045ms step_avg:79.96ms +[2025-07-07 00:01:41] [Rank 0] step:821/10000 train_time:66186ms step_avg:80.62ms +[2025-07-07 00:01:41] [Rank 0] step:821/10000 train_time:66186ms step_avg:80.62ms +[2025-07-07 00:01:43] [Rank 0] step:841/10000 train_time:67661ms step_avg:80.45ms +[2025-07-07 00:01:43] [Rank 0] step:841/10000 train_time:67661ms step_avg:80.45ms +[2025-07-07 00:01:44] [Rank 0] step:861/10000 train_time:69137ms step_avg:80.30ms +[2025-07-07 00:01:44] [Rank 0] step:861/10000 train_time:69137ms step_avg:80.30ms +[2025-07-07 00:01:46] [Rank 0] step:881/10000 train_time:70615ms step_avg:80.15ms +[2025-07-07 00:01:46] [Rank 0] step:881/10000 train_time:70615ms step_avg:80.15ms +[2025-07-07 00:01:48] [Rank 0] step:901/10000 train_time:72144ms step_avg:80.07ms +[2025-07-07 00:01:48] [Rank 0] step:901/10000 train_time:72144ms step_avg:80.07ms +[2025-07-07 00:01:49] [Rank 0] step:921/10000 train_time:74363ms step_avg:80.74ms +[2025-07-07 00:01:49] [Rank 0] step:921/10000 train_time:74363ms step_avg:80.74ms +[2025-07-07 00:01:51] [Rank 0] step:941/10000 train_time:75840ms step_avg:80.60ms +[2025-07-07 00:01:51] [Rank 0] step:941/10000 train_time:75840ms step_avg:80.60ms +[2025-07-07 00:01:52] [Rank 0] step:961/10000 train_time:77316ms step_avg:80.45ms +[2025-07-07 00:01:52] [Rank 0] step:961/10000 train_time:77316ms step_avg:80.45ms +[2025-07-07 00:01:54] [Rank 0] step:981/10000 train_time:78792ms step_avg:80.32ms +[2025-07-07 00:01:54] [Rank 0] step:981/10000 train_time:78792ms step_avg:80.32ms +[2025-07-07 00:01:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:01:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:01:57] [Rank 0] PRINT: step:1000/10000 train_loss:2.9607 val_loss:2.0176 train_time:80938ms step_avg:80.94ms +[2025-07-07 00:01:57] [Rank 0] PRINT: step:1000/10000 train_loss:2.9607 val_loss:2.0176 train_time:80938ms step_avg:80.94ms +[2025-07-07 00:01:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:01:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:01:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:01:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:01:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:01:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:07:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:07:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:07:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:07:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:07:21] [Rank 0] Total Loss: 4.1285 +[2025-07-07 00:07:21] [Rank 0] Total Loss: 4.1285 +[2025-07-07 00:07:21] [Rank 0] Total FTA: 0.0996 +[2025-07-07 00:07:21] [Rank 0] Total FTA: 0.0996 +[2025-07-07 00:07:21] [Rank 0] Group 0 Loss: 4.3973 +[2025-07-07 00:07:21] [Rank 0] Group 0 Loss: 4.3973 +[2025-07-07 00:07:21] [Rank 0] Group 1 Loss: 4.1763 +[2025-07-07 00:07:21] [Rank 0] Group 1 Loss: 4.1763 +[2025-07-07 00:07:21] [Rank 0] Group 2 Loss: 3.9755 +[2025-07-07 00:07:21] [Rank 0] Group 2 Loss: 3.9755 +[2025-07-07 00:07:21] [Rank 0] Group 3 Loss: 4.1102 +[2025-07-07 00:07:21] [Rank 0] Group 3 Loss: 4.1102 +[2025-07-07 00:07:21] [Rank 0] Group 4 Loss: 4.1032 +[2025-07-07 00:07:21] [Rank 0] Group 4 Loss: 4.1032 +[2025-07-07 00:07:21] [Rank 0] Group 5 Loss: 4.0441 +[2025-07-07 00:07:21] [Rank 0] Group 5 Loss: 4.0441 +[2025-07-07 00:07:21] [Rank 0] Group 6 Loss: 4.0029 +[2025-07-07 00:07:21] [Rank 0] Group 6 Loss: 4.0029 +[2025-07-07 00:07:21] [Rank 0] Group 7 Loss: 4.1453 +[2025-07-07 00:07:21] [Rank 0] Group 7 Loss: 4.1453 +[2025-07-07 00:07:21] [Rank 0] Group 8 Loss: 4.0734 +[2025-07-07 00:07:21] [Rank 0] Group 8 Loss: 4.0734 +[2025-07-07 00:07:21] [Rank 0] Group 9 Loss: 4.0544 +[2025-07-07 00:07:21] [Rank 0] Group 9 Loss: 4.0544 +[2025-07-07 00:07:21] [Rank 0] Group 10 Loss: 4.0900 +[2025-07-07 00:07:21] [Rank 0] Group 10 Loss: 4.0900 +[2025-07-07 00:07:21] [Rank 0] Group 11 Loss: 4.1132 +[2025-07-07 00:07:21] [Rank 0] Group 11 Loss: 4.1132 +[2025-07-07 00:07:21] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-07 00:07:21] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-07 00:07:21] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 00:07:21] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 00:07:21] [Rank 0] Group 2 FTA: 0.1901 +[2025-07-07 00:07:21] [Rank 0] Group 2 FTA: 0.1901 +[2025-07-07 00:07:21] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-07 00:07:21] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-07 00:07:21] [Rank 0] Group 4 FTA: 0.0469 +[2025-07-07 00:07:21] [Rank 0] Group 4 FTA: 0.0469 +[2025-07-07 00:07:21] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-07 00:07:21] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-07 00:07:21] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-07 00:07:21] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-07 00:07:21] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-07 00:07:21] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-07 00:07:21] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 00:07:21] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 00:07:21] [Rank 0] Group 9 FTA: 0.1211 +[2025-07-07 00:07:21] [Rank 0] Group 9 FTA: 0.1211 +[2025-07-07 00:07:21] [Rank 0] Group 10 FTA: 0.0859 +[2025-07-07 00:07:21] [Rank 0] Group 10 FTA: 0.0859 +[2025-07-07 00:07:21] [Rank 0] Group 11 FTA: 0.1162 +[2025-07-07 00:07:21] [Rank 0] Group 11 FTA: 0.1162 +[2025-07-07 00:07:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 00:07:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 00:07:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 00:07:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 00:07:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 00:07:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 00:07:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 00:07:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 00:07:23] [Rank 0] step:1001/10000 train_time:80959ms step_avg:80.88ms +[2025-07-07 00:07:23] [Rank 0] step:1001/10000 train_time:80959ms step_avg:80.88ms +[2025-07-07 00:07:24] [Rank 0] step:1021/10000 train_time:82432ms step_avg:80.74ms +[2025-07-07 00:07:24] [Rank 0] step:1021/10000 train_time:82432ms step_avg:80.74ms +[2025-07-07 00:07:25] [Rank 0] step:1041/10000 train_time:83900ms step_avg:80.60ms +[2025-07-07 00:07:25] [Rank 0] step:1041/10000 train_time:83900ms step_avg:80.60ms +[2025-07-07 00:07:27] [Rank 0] step:1061/10000 train_time:85366ms step_avg:80.46ms +[2025-07-07 00:07:27] [Rank 0] step:1061/10000 train_time:85366ms step_avg:80.46ms +[2025-07-07 00:07:29] [Rank 0] step:1081/10000 train_time:87515ms step_avg:80.96ms +[2025-07-07 00:07:29] [Rank 0] step:1081/10000 train_time:87515ms step_avg:80.96ms +[2025-07-07 00:07:31] [Rank 0] step:1101/10000 train_time:88961ms step_avg:80.80ms +[2025-07-07 00:07:31] [Rank 0] step:1101/10000 train_time:88961ms step_avg:80.80ms +[2025-07-07 00:07:32] [Rank 0] step:1121/10000 train_time:90431ms step_avg:80.67ms +[2025-07-07 00:07:32] [Rank 0] step:1121/10000 train_time:90431ms step_avg:80.67ms +[2025-07-07 00:07:33] [Rank 0] step:1141/10000 train_time:91901ms step_avg:80.54ms +[2025-07-07 00:07:33] [Rank 0] step:1141/10000 train_time:91901ms step_avg:80.54ms +[2025-07-07 00:07:35] [Rank 0] step:1161/10000 train_time:93373ms step_avg:80.43ms +[2025-07-07 00:07:35] [Rank 0] step:1161/10000 train_time:93373ms step_avg:80.43ms +[2025-07-07 00:07:37] [Rank 0] step:1181/10000 train_time:95491ms step_avg:80.86ms +[2025-07-07 00:07:37] [Rank 0] step:1181/10000 train_time:95491ms step_avg:80.86ms +[2025-07-07 00:07:39] [Rank 0] step:1201/10000 train_time:96966ms step_avg:80.74ms +[2025-07-07 00:07:39] [Rank 0] step:1201/10000 train_time:96966ms step_avg:80.74ms +[2025-07-07 00:07:40] [Rank 0] step:1221/10000 train_time:98438ms step_avg:80.62ms +[2025-07-07 00:07:40] [Rank 0] step:1221/10000 train_time:98438ms step_avg:80.62ms +[2025-07-07 00:07:41] [Rank 0] step:1241/10000 train_time:99912ms step_avg:80.51ms +[2025-07-07 00:07:41] [Rank 0] step:1241/10000 train_time:99912ms step_avg:80.51ms +[2025-07-07 00:07:44] [Rank 0] step:1261/10000 train_time:102076ms step_avg:80.95ms +[2025-07-07 00:07:44] [Rank 0] step:1261/10000 train_time:102076ms step_avg:80.95ms +[2025-07-07 00:07:45] [Rank 0] step:1281/10000 train_time:103531ms step_avg:80.82ms +[2025-07-07 00:07:45] [Rank 0] step:1281/10000 train_time:103531ms step_avg:80.82ms +[2025-07-07 00:07:47] [Rank 0] step:1301/10000 train_time:105004ms step_avg:80.71ms +[2025-07-07 00:07:47] [Rank 0] step:1301/10000 train_time:105004ms step_avg:80.71ms +[2025-07-07 00:07:48] [Rank 0] step:1321/10000 train_time:106479ms step_avg:80.61ms +[2025-07-07 00:07:48] [Rank 0] step:1321/10000 train_time:106479ms step_avg:80.61ms +[2025-07-07 00:07:50] [Rank 0] step:1341/10000 train_time:107956ms step_avg:80.50ms +[2025-07-07 00:07:50] [Rank 0] step:1341/10000 train_time:107956ms step_avg:80.50ms +[2025-07-07 00:07:52] [Rank 0] step:1361/10000 train_time:110086ms step_avg:80.89ms +[2025-07-07 00:07:52] [Rank 0] step:1361/10000 train_time:110086ms step_avg:80.89ms +[2025-07-07 00:07:53] [Rank 0] step:1381/10000 train_time:111560ms step_avg:80.78ms +[2025-07-07 00:07:53] [Rank 0] step:1381/10000 train_time:111560ms step_avg:80.78ms +[2025-07-07 00:07:55] [Rank 0] step:1401/10000 train_time:113035ms step_avg:80.68ms +[2025-07-07 00:07:55] [Rank 0] step:1401/10000 train_time:113035ms step_avg:80.68ms +[2025-07-07 00:07:56] [Rank 0] step:1421/10000 train_time:114509ms step_avg:80.58ms +[2025-07-07 00:07:56] [Rank 0] step:1421/10000 train_time:114509ms step_avg:80.58ms +[2025-07-07 00:07:58] [Rank 0] step:1441/10000 train_time:116038ms step_avg:80.53ms +[2025-07-07 00:07:58] [Rank 0] step:1441/10000 train_time:116038ms step_avg:80.53ms +[2025-07-07 00:07:59] [Rank 0] step:1461/10000 train_time:117696ms step_avg:80.56ms +[2025-07-07 00:07:59] [Rank 0] step:1461/10000 train_time:117696ms step_avg:80.56ms +[2025-07-07 00:08:01] [Rank 0] step:1481/10000 train_time:119171ms step_avg:80.47ms +[2025-07-07 00:08:01] [Rank 0] step:1481/10000 train_time:119171ms step_avg:80.47ms +[2025-07-07 00:08:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:08:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:08:03] [Rank 0] PRINT: step:1500/10000 train_loss:1.7479 val_loss:1.5907 train_time:120647ms step_avg:80.43ms +[2025-07-07 00:08:03] [Rank 0] PRINT: step:1500/10000 train_loss:1.7479 val_loss:1.5907 train_time:120647ms step_avg:80.43ms +[2025-07-07 00:08:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:08:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:08:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:08:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:08:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:08:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:13:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:13:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:13:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:13:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:13:25] [Rank 0] Total Loss: 4.1806 +[2025-07-07 00:13:25] [Rank 0] Total Loss: 4.1806 +[2025-07-07 00:13:25] [Rank 0] Total FTA: 0.1484 +[2025-07-07 00:13:25] [Rank 0] Total FTA: 0.1484 +[2025-07-07 00:13:25] [Rank 0] Group 0 Loss: 4.4582 +[2025-07-07 00:13:25] [Rank 0] Group 0 Loss: 4.4582 +[2025-07-07 00:13:25] [Rank 0] Group 1 Loss: 4.2706 +[2025-07-07 00:13:25] [Rank 0] Group 1 Loss: 4.2706 +[2025-07-07 00:13:25] [Rank 0] Group 2 Loss: 4.0102 +[2025-07-07 00:13:25] [Rank 0] Group 2 Loss: 4.0102 +[2025-07-07 00:13:25] [Rank 0] Group 3 Loss: 4.0912 +[2025-07-07 00:13:25] [Rank 0] Group 3 Loss: 4.0912 +[2025-07-07 00:13:25] [Rank 0] Group 4 Loss: 4.1212 +[2025-07-07 00:13:25] [Rank 0] Group 4 Loss: 4.1212 +[2025-07-07 00:13:25] [Rank 0] Group 5 Loss: 4.1259 +[2025-07-07 00:13:25] [Rank 0] Group 5 Loss: 4.1259 +[2025-07-07 00:13:25] [Rank 0] Group 6 Loss: 4.0721 +[2025-07-07 00:13:25] [Rank 0] Group 6 Loss: 4.0721 +[2025-07-07 00:13:25] [Rank 0] Group 7 Loss: 4.2038 +[2025-07-07 00:13:25] [Rank 0] Group 7 Loss: 4.2038 +[2025-07-07 00:13:25] [Rank 0] Group 8 Loss: 4.1184 +[2025-07-07 00:13:25] [Rank 0] Group 8 Loss: 4.1184 +[2025-07-07 00:13:25] [Rank 0] Group 9 Loss: 4.0774 +[2025-07-07 00:13:25] [Rank 0] Group 9 Loss: 4.0774 +[2025-07-07 00:13:25] [Rank 0] Group 10 Loss: 4.1532 +[2025-07-07 00:13:25] [Rank 0] Group 10 Loss: 4.1532 +[2025-07-07 00:13:25] [Rank 0] Group 11 Loss: 4.1736 +[2025-07-07 00:13:25] [Rank 0] Group 11 Loss: 4.1736 +[2025-07-07 00:13:25] [Rank 0] Group 0 FTA: 0.3407 +[2025-07-07 00:13:25] [Rank 0] Group 0 FTA: 0.3407 +[2025-07-07 00:13:25] [Rank 0] Group 1 FTA: 0.1536 +[2025-07-07 00:13:25] [Rank 0] Group 1 FTA: 0.1536 +[2025-07-07 00:13:25] [Rank 0] Group 2 FTA: 0.1745 +[2025-07-07 00:13:25] [Rank 0] Group 2 FTA: 0.1745 +[2025-07-07 00:13:25] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 00:13:25] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 00:13:25] [Rank 0] Group 4 FTA: 0.0469 +[2025-07-07 00:13:25] [Rank 0] Group 4 FTA: 0.0469 +[2025-07-07 00:13:25] [Rank 0] Group 5 FTA: 0.1016 +[2025-07-07 00:13:25] [Rank 0] Group 5 FTA: 0.1016 +[2025-07-07 00:13:25] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-07 00:13:25] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-07 00:13:25] [Rank 0] Group 7 FTA: 0.1276 +[2025-07-07 00:13:25] [Rank 0] Group 7 FTA: 0.1276 +[2025-07-07 00:13:25] [Rank 0] Group 8 FTA: 0.1302 +[2025-07-07 00:13:25] [Rank 0] Group 8 FTA: 0.1302 +[2025-07-07 00:13:25] [Rank 0] Group 9 FTA: 0.1523 +[2025-07-07 00:13:25] [Rank 0] Group 9 FTA: 0.1523 +[2025-07-07 00:13:25] [Rank 0] Group 10 FTA: 0.1309 +[2025-07-07 00:13:25] [Rank 0] Group 10 FTA: 0.1309 +[2025-07-07 00:13:25] [Rank 0] Group 11 FTA: 0.1270 +[2025-07-07 00:13:25] [Rank 0] Group 11 FTA: 0.1270 +[2025-07-07 00:13:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 00:13:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 00:13:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 00:13:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 00:13:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 00:13:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 00:13:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 00:13:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 00:13:26] [Rank 0] step:1501/10000 train_time:120668ms step_avg:80.39ms +[2025-07-07 00:13:26] [Rank 0] step:1501/10000 train_time:120668ms step_avg:80.39ms +[2025-07-07 00:13:28] [Rank 0] step:1521/10000 train_time:122135ms step_avg:80.30ms +[2025-07-07 00:13:28] [Rank 0] step:1521/10000 train_time:122135ms step_avg:80.30ms +[2025-07-07 00:13:30] [Rank 0] step:1541/10000 train_time:124257ms step_avg:80.63ms +[2025-07-07 00:13:30] [Rank 0] step:1541/10000 train_time:124257ms step_avg:80.63ms +[2025-07-07 00:13:31] [Rank 0] step:1561/10000 train_time:125725ms step_avg:80.54ms +[2025-07-07 00:13:31] [Rank 0] step:1561/10000 train_time:125725ms step_avg:80.54ms +[2025-07-07 00:13:33] [Rank 0] step:1581/10000 train_time:127193ms step_avg:80.45ms +[2025-07-07 00:13:33] [Rank 0] step:1581/10000 train_time:127193ms step_avg:80.45ms +[2025-07-07 00:13:34] [Rank 0] step:1601/10000 train_time:128660ms step_avg:80.36ms +[2025-07-07 00:13:34] [Rank 0] step:1601/10000 train_time:128660ms step_avg:80.36ms +[2025-07-07 00:13:36] [Rank 0] step:1621/10000 train_time:130383ms step_avg:80.43ms +[2025-07-07 00:13:36] [Rank 0] step:1621/10000 train_time:130383ms step_avg:80.43ms +[2025-07-07 00:13:38] [Rank 0] step:1641/10000 train_time:132260ms step_avg:80.60ms +[2025-07-07 00:13:38] [Rank 0] step:1641/10000 train_time:132260ms step_avg:80.60ms +[2025-07-07 00:13:39] [Rank 0] step:1661/10000 train_time:133729ms step_avg:80.51ms +[2025-07-07 00:13:39] [Rank 0] step:1661/10000 train_time:133729ms step_avg:80.51ms +[2025-07-07 00:13:41] [Rank 0] step:1681/10000 train_time:135199ms step_avg:80.43ms +[2025-07-07 00:13:41] [Rank 0] step:1681/10000 train_time:135199ms step_avg:80.43ms +[2025-07-07 00:13:42] [Rank 0] step:1701/10000 train_time:136671ms step_avg:80.35ms +[2025-07-07 00:13:42] [Rank 0] step:1701/10000 train_time:136671ms step_avg:80.35ms +[2025-07-07 00:13:44] [Rank 0] step:1721/10000 train_time:138787ms step_avg:80.64ms +[2025-07-07 00:13:44] [Rank 0] step:1721/10000 train_time:138787ms step_avg:80.64ms +[2025-07-07 00:13:46] [Rank 0] step:1741/10000 train_time:140258ms step_avg:80.56ms +[2025-07-07 00:13:46] [Rank 0] step:1741/10000 train_time:140258ms step_avg:80.56ms +[2025-07-07 00:13:47] [Rank 0] step:1761/10000 train_time:141731ms step_avg:80.48ms +[2025-07-07 00:13:47] [Rank 0] step:1761/10000 train_time:141731ms step_avg:80.48ms +[2025-07-07 00:13:49] [Rank 0] step:1781/10000 train_time:143205ms step_avg:80.41ms +[2025-07-07 00:13:49] [Rank 0] step:1781/10000 train_time:143205ms step_avg:80.41ms +[2025-07-07 00:13:51] [Rank 0] step:1801/10000 train_time:144678ms step_avg:80.33ms +[2025-07-07 00:13:51] [Rank 0] step:1801/10000 train_time:144678ms step_avg:80.33ms +[2025-07-07 00:13:52] [Rank 0] step:1821/10000 train_time:146794ms step_avg:80.61ms +[2025-07-07 00:13:52] [Rank 0] step:1821/10000 train_time:146794ms step_avg:80.61ms +[2025-07-07 00:13:54] [Rank 0] step:1841/10000 train_time:148267ms step_avg:80.54ms +[2025-07-07 00:13:54] [Rank 0] step:1841/10000 train_time:148267ms step_avg:80.54ms +[2025-07-07 00:13:55] [Rank 0] step:1861/10000 train_time:149740ms step_avg:80.46ms +[2025-07-07 00:13:55] [Rank 0] step:1861/10000 train_time:149740ms step_avg:80.46ms +[2025-07-07 00:13:57] [Rank 0] step:1881/10000 train_time:151215ms step_avg:80.39ms +[2025-07-07 00:13:57] [Rank 0] step:1881/10000 train_time:151215ms step_avg:80.39ms +[2025-07-07 00:13:59] [Rank 0] step:1901/10000 train_time:153351ms step_avg:80.67ms +[2025-07-07 00:13:59] [Rank 0] step:1901/10000 train_time:153351ms step_avg:80.67ms +[2025-07-07 00:14:00] [Rank 0] step:1921/10000 train_time:154823ms step_avg:80.60ms +[2025-07-07 00:14:00] [Rank 0] step:1921/10000 train_time:154823ms step_avg:80.60ms +[2025-07-07 00:14:02] [Rank 0] step:1941/10000 train_time:156299ms step_avg:80.53ms +[2025-07-07 00:14:02] [Rank 0] step:1941/10000 train_time:156299ms step_avg:80.53ms +[2025-07-07 00:14:03] [Rank 0] step:1961/10000 train_time:157777ms step_avg:80.46ms +[2025-07-07 00:14:03] [Rank 0] step:1961/10000 train_time:157777ms step_avg:80.46ms +[2025-07-07 00:14:06] [Rank 0] step:1981/10000 train_time:159251ms step_avg:80.39ms +[2025-07-07 00:14:06] [Rank 0] step:1981/10000 train_time:159251ms step_avg:80.39ms +[2025-07-07 00:14:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:14:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:14:08] [Rank 0] PRINT: step:2000/10000 train_loss:1.4964 val_loss:1.4342 train_time:161386ms step_avg:80.69ms +[2025-07-07 00:14:08] [Rank 0] PRINT: step:2000/10000 train_loss:1.4964 val_loss:1.4342 train_time:161386ms step_avg:80.69ms +[2025-07-07 00:14:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:14:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:14:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:14:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:14:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:14:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:19:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:19:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:19:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:19:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:19:31] [Rank 0] Total Loss: 4.4514 +[2025-07-07 00:19:31] [Rank 0] Total Loss: 4.4514 +[2025-07-07 00:19:31] [Rank 0] Total FTA: 0.2008 +[2025-07-07 00:19:31] [Rank 0] Total FTA: 0.2008 +[2025-07-07 00:19:31] [Rank 0] Group 0 Loss: 4.6811 +[2025-07-07 00:19:31] [Rank 0] Group 0 Loss: 4.6811 +[2025-07-07 00:19:31] [Rank 0] Group 1 Loss: 4.3738 +[2025-07-07 00:19:31] [Rank 0] Group 1 Loss: 4.3738 +[2025-07-07 00:19:31] [Rank 0] Group 2 Loss: 4.2214 +[2025-07-07 00:19:31] [Rank 0] Group 2 Loss: 4.2214 +[2025-07-07 00:19:31] [Rank 0] Group 3 Loss: 4.4341 +[2025-07-07 00:19:31] [Rank 0] Group 3 Loss: 4.4341 +[2025-07-07 00:19:31] [Rank 0] Group 4 Loss: 4.3854 +[2025-07-07 00:19:31] [Rank 0] Group 4 Loss: 4.3854 +[2025-07-07 00:19:31] [Rank 0] Group 5 Loss: 4.3919 +[2025-07-07 00:19:31] [Rank 0] Group 5 Loss: 4.3919 +[2025-07-07 00:19:31] [Rank 0] Group 6 Loss: 4.4068 +[2025-07-07 00:19:31] [Rank 0] Group 6 Loss: 4.4068 +[2025-07-07 00:19:31] [Rank 0] Group 7 Loss: 4.4996 +[2025-07-07 00:19:31] [Rank 0] Group 7 Loss: 4.4996 +[2025-07-07 00:19:31] [Rank 0] Group 8 Loss: 4.4230 +[2025-07-07 00:19:31] [Rank 0] Group 8 Loss: 4.4230 +[2025-07-07 00:19:31] [Rank 0] Group 9 Loss: 4.5021 +[2025-07-07 00:19:31] [Rank 0] Group 9 Loss: 4.5021 +[2025-07-07 00:19:31] [Rank 0] Group 10 Loss: 4.4594 +[2025-07-07 00:19:31] [Rank 0] Group 10 Loss: 4.4594 +[2025-07-07 00:19:31] [Rank 0] Group 11 Loss: 4.4406 +[2025-07-07 00:19:31] [Rank 0] Group 11 Loss: 4.4406 +[2025-07-07 00:19:31] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 00:19:31] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 00:19:31] [Rank 0] Group 1 FTA: 0.1693 +[2025-07-07 00:19:31] [Rank 0] Group 1 FTA: 0.1693 +[2025-07-07 00:19:31] [Rank 0] Group 2 FTA: 0.2448 +[2025-07-07 00:19:31] [Rank 0] Group 2 FTA: 0.2448 +[2025-07-07 00:19:31] [Rank 0] Group 3 FTA: 0.2318 +[2025-07-07 00:19:31] [Rank 0] Group 3 FTA: 0.2318 +[2025-07-07 00:19:31] [Rank 0] Group 4 FTA: 0.1302 +[2025-07-07 00:19:31] [Rank 0] Group 4 FTA: 0.1302 +[2025-07-07 00:19:31] [Rank 0] Group 5 FTA: 0.2266 +[2025-07-07 00:19:31] [Rank 0] Group 5 FTA: 0.2266 +[2025-07-07 00:19:31] [Rank 0] Group 6 FTA: 0.1823 +[2025-07-07 00:19:31] [Rank 0] Group 6 FTA: 0.1823 +[2025-07-07 00:19:31] [Rank 0] Group 7 FTA: 0.1875 +[2025-07-07 00:19:31] [Rank 0] Group 7 FTA: 0.1875 +[2025-07-07 00:19:31] [Rank 0] Group 8 FTA: 0.1719 +[2025-07-07 00:19:31] [Rank 0] Group 8 FTA: 0.1719 +[2025-07-07 00:19:31] [Rank 0] Group 9 FTA: 0.2188 +[2025-07-07 00:19:31] [Rank 0] Group 9 FTA: 0.2188 +[2025-07-07 00:19:31] [Rank 0] Group 10 FTA: 0.2324 +[2025-07-07 00:19:31] [Rank 0] Group 10 FTA: 0.2324 +[2025-07-07 00:19:31] [Rank 0] Group 11 FTA: 0.2275 +[2025-07-07 00:19:31] [Rank 0] Group 11 FTA: 0.2275 +[2025-07-07 00:19:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 00:19:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 00:19:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 00:19:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 00:19:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 00:19:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 00:19:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 00:19:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 00:19:32] [Rank 0] step:2001/10000 train_time:161407ms step_avg:80.66ms +[2025-07-07 00:19:32] [Rank 0] step:2001/10000 train_time:161407ms step_avg:80.66ms +[2025-07-07 00:19:34] [Rank 0] step:2021/10000 train_time:162893ms step_avg:80.60ms +[2025-07-07 00:19:34] [Rank 0] step:2021/10000 train_time:162893ms step_avg:80.60ms +[2025-07-07 00:19:35] [Rank 0] step:2041/10000 train_time:164357ms step_avg:80.53ms +[2025-07-07 00:19:35] [Rank 0] step:2041/10000 train_time:164357ms step_avg:80.53ms +[2025-07-07 00:19:37] [Rank 0] step:2061/10000 train_time:165824ms step_avg:80.46ms +[2025-07-07 00:19:37] [Rank 0] step:2061/10000 train_time:165824ms step_avg:80.46ms +[2025-07-07 00:19:39] [Rank 0] step:2081/10000 train_time:167940ms step_avg:80.70ms +[2025-07-07 00:19:39] [Rank 0] step:2081/10000 train_time:167940ms step_avg:80.70ms +[2025-07-07 00:19:40] [Rank 0] step:2101/10000 train_time:169406ms step_avg:80.63ms +[2025-07-07 00:19:40] [Rank 0] step:2101/10000 train_time:169406ms step_avg:80.63ms +[2025-07-07 00:19:42] [Rank 0] step:2121/10000 train_time:170873ms step_avg:80.56ms +[2025-07-07 00:19:42] [Rank 0] step:2121/10000 train_time:170873ms step_avg:80.56ms +[2025-07-07 00:19:44] [Rank 0] step:2141/10000 train_time:172475ms step_avg:80.56ms +[2025-07-07 00:19:44] [Rank 0] step:2141/10000 train_time:172475ms step_avg:80.56ms +[2025-07-07 00:19:46] [Rank 0] step:2161/10000 train_time:174201ms step_avg:80.61ms +[2025-07-07 00:19:46] [Rank 0] step:2161/10000 train_time:174201ms step_avg:80.61ms +[2025-07-07 00:19:47] [Rank 0] step:2181/10000 train_time:176070ms step_avg:80.73ms +[2025-07-07 00:19:47] [Rank 0] step:2181/10000 train_time:176070ms step_avg:80.73ms +[2025-07-07 00:19:49] [Rank 0] step:2201/10000 train_time:177539ms step_avg:80.66ms +[2025-07-07 00:19:49] [Rank 0] step:2201/10000 train_time:177539ms step_avg:80.66ms +[2025-07-07 00:19:50] [Rank 0] step:2221/10000 train_time:179007ms step_avg:80.60ms +[2025-07-07 00:19:50] [Rank 0] step:2221/10000 train_time:179007ms step_avg:80.60ms +[2025-07-07 00:19:52] [Rank 0] step:2241/10000 train_time:180496ms step_avg:80.54ms +[2025-07-07 00:19:52] [Rank 0] step:2241/10000 train_time:180496ms step_avg:80.54ms +[2025-07-07 00:19:54] [Rank 0] step:2261/10000 train_time:182640ms step_avg:80.78ms +[2025-07-07 00:19:54] [Rank 0] step:2261/10000 train_time:182640ms step_avg:80.78ms +[2025-07-07 00:19:55] [Rank 0] step:2281/10000 train_time:184134ms step_avg:80.73ms +[2025-07-07 00:19:55] [Rank 0] step:2281/10000 train_time:184134ms step_avg:80.73ms +[2025-07-07 00:19:57] [Rank 0] step:2301/10000 train_time:185627ms step_avg:80.67ms +[2025-07-07 00:19:57] [Rank 0] step:2301/10000 train_time:185627ms step_avg:80.67ms +[2025-07-07 00:19:58] [Rank 0] step:2321/10000 train_time:187122ms step_avg:80.62ms +[2025-07-07 00:19:58] [Rank 0] step:2321/10000 train_time:187122ms step_avg:80.62ms +[2025-07-07 00:20:00] [Rank 0] step:2341/10000 train_time:188617ms step_avg:80.57ms +[2025-07-07 00:20:00] [Rank 0] step:2341/10000 train_time:188617ms step_avg:80.57ms +[2025-07-07 00:20:02] [Rank 0] step:2361/10000 train_time:190750ms step_avg:80.79ms +[2025-07-07 00:20:02] [Rank 0] step:2361/10000 train_time:190750ms step_avg:80.79ms +[2025-07-07 00:20:03] [Rank 0] step:2381/10000 train_time:192244ms step_avg:80.74ms +[2025-07-07 00:20:03] [Rank 0] step:2381/10000 train_time:192244ms step_avg:80.74ms +[2025-07-07 00:20:05] [Rank 0] step:2401/10000 train_time:193739ms step_avg:80.69ms +[2025-07-07 00:20:05] [Rank 0] step:2401/10000 train_time:193739ms step_avg:80.69ms +[2025-07-07 00:20:06] [Rank 0] step:2421/10000 train_time:195232ms step_avg:80.64ms +[2025-07-07 00:20:06] [Rank 0] step:2421/10000 train_time:195232ms step_avg:80.64ms +[2025-07-07 00:20:08] [Rank 0] step:2441/10000 train_time:197392ms step_avg:80.87ms +[2025-07-07 00:20:08] [Rank 0] step:2441/10000 train_time:197392ms step_avg:80.87ms +[2025-07-07 00:20:10] [Rank 0] step:2461/10000 train_time:198887ms step_avg:80.82ms +[2025-07-07 00:20:10] [Rank 0] step:2461/10000 train_time:198887ms step_avg:80.82ms +[2025-07-07 00:20:11] [Rank 0] step:2481/10000 train_time:200382ms step_avg:80.77ms +[2025-07-07 00:20:11] [Rank 0] step:2481/10000 train_time:200382ms step_avg:80.77ms +[2025-07-07 00:20:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:20:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:20:14] [Rank 0] PRINT: step:2500/10000 train_loss:1.3642 val_loss:1.3029 train_time:201879ms step_avg:80.75ms +[2025-07-07 00:20:14] [Rank 0] PRINT: step:2500/10000 train_loss:1.3642 val_loss:1.3029 train_time:201879ms step_avg:80.75ms +[2025-07-07 00:20:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:20:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:20:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:20:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:20:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:20:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:25:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:25:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:25:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:25:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:25:36] [Rank 0] Total Loss: 4.4927 +[2025-07-07 00:25:36] [Rank 0] Total Loss: 4.4927 +[2025-07-07 00:25:36] [Rank 0] Total FTA: 0.3075 +[2025-07-07 00:25:36] [Rank 0] Total FTA: 0.3075 +[2025-07-07 00:25:36] [Rank 0] Group 0 Loss: 4.7655 +[2025-07-07 00:25:36] [Rank 0] Group 0 Loss: 4.7655 +[2025-07-07 00:25:36] [Rank 0] Group 1 Loss: 4.3022 +[2025-07-07 00:25:36] [Rank 0] Group 1 Loss: 4.3022 +[2025-07-07 00:25:36] [Rank 0] Group 2 Loss: 4.4522 +[2025-07-07 00:25:36] [Rank 0] Group 2 Loss: 4.4522 +[2025-07-07 00:25:36] [Rank 0] Group 3 Loss: 4.4607 +[2025-07-07 00:25:36] [Rank 0] Group 3 Loss: 4.4607 +[2025-07-07 00:25:36] [Rank 0] Group 4 Loss: 4.4821 +[2025-07-07 00:25:36] [Rank 0] Group 4 Loss: 4.4821 +[2025-07-07 00:25:36] [Rank 0] Group 5 Loss: 4.3749 +[2025-07-07 00:25:36] [Rank 0] Group 5 Loss: 4.3749 +[2025-07-07 00:25:36] [Rank 0] Group 6 Loss: 4.3581 +[2025-07-07 00:25:36] [Rank 0] Group 6 Loss: 4.3581 +[2025-07-07 00:25:36] [Rank 0] Group 7 Loss: 4.4838 +[2025-07-07 00:25:36] [Rank 0] Group 7 Loss: 4.4838 +[2025-07-07 00:25:36] [Rank 0] Group 8 Loss: 4.4544 +[2025-07-07 00:25:36] [Rank 0] Group 8 Loss: 4.4544 +[2025-07-07 00:25:36] [Rank 0] Group 9 Loss: 4.4604 +[2025-07-07 00:25:36] [Rank 0] Group 9 Loss: 4.4604 +[2025-07-07 00:25:36] [Rank 0] Group 10 Loss: 4.5205 +[2025-07-07 00:25:36] [Rank 0] Group 10 Loss: 4.5205 +[2025-07-07 00:25:36] [Rank 0] Group 11 Loss: 4.4967 +[2025-07-07 00:25:36] [Rank 0] Group 11 Loss: 4.4967 +[2025-07-07 00:25:36] [Rank 0] Group 0 FTA: 0.1730 +[2025-07-07 00:25:36] [Rank 0] Group 0 FTA: 0.1730 +[2025-07-07 00:25:36] [Rank 0] Group 1 FTA: 0.4766 +[2025-07-07 00:25:36] [Rank 0] Group 1 FTA: 0.4766 +[2025-07-07 00:25:36] [Rank 0] Group 2 FTA: 0.4193 +[2025-07-07 00:25:36] [Rank 0] Group 2 FTA: 0.4193 +[2025-07-07 00:25:36] [Rank 0] Group 3 FTA: 0.2344 +[2025-07-07 00:25:36] [Rank 0] Group 3 FTA: 0.2344 +[2025-07-07 00:25:36] [Rank 0] Group 4 FTA: 0.2083 +[2025-07-07 00:25:36] [Rank 0] Group 4 FTA: 0.2083 +[2025-07-07 00:25:36] [Rank 0] Group 5 FTA: 0.3203 +[2025-07-07 00:25:36] [Rank 0] Group 5 FTA: 0.3203 +[2025-07-07 00:25:36] [Rank 0] Group 6 FTA: 0.2969 +[2025-07-07 00:25:36] [Rank 0] Group 6 FTA: 0.2969 +[2025-07-07 00:25:36] [Rank 0] Group 7 FTA: 0.3359 +[2025-07-07 00:25:36] [Rank 0] Group 7 FTA: 0.3359 +[2025-07-07 00:25:36] [Rank 0] Group 8 FTA: 0.2917 +[2025-07-07 00:25:36] [Rank 0] Group 8 FTA: 0.2917 +[2025-07-07 00:25:36] [Rank 0] Group 9 FTA: 0.3516 +[2025-07-07 00:25:36] [Rank 0] Group 9 FTA: 0.3516 +[2025-07-07 00:25:36] [Rank 0] Group 10 FTA: 0.3574 +[2025-07-07 00:25:36] [Rank 0] Group 10 FTA: 0.3574 +[2025-07-07 00:25:36] [Rank 0] Group 11 FTA: 0.3262 +[2025-07-07 00:25:36] [Rank 0] Group 11 FTA: 0.3262 +[2025-07-07 00:25:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 00:25:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 00:25:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 00:25:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 00:25:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 00:25:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 00:25:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 00:25:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 00:25:38] [Rank 0] step:2501/10000 train_time:201899ms step_avg:80.73ms +[2025-07-07 00:25:38] [Rank 0] step:2501/10000 train_time:201899ms step_avg:80.73ms +[2025-07-07 00:25:40] [Rank 0] step:2521/10000 train_time:203387ms step_avg:80.68ms +[2025-07-07 00:25:40] [Rank 0] step:2521/10000 train_time:203387ms step_avg:80.68ms +[2025-07-07 00:25:42] [Rank 0] step:2541/10000 train_time:205531ms step_avg:80.89ms +[2025-07-07 00:25:42] [Rank 0] step:2541/10000 train_time:205531ms step_avg:80.89ms +[2025-07-07 00:25:43] [Rank 0] step:2561/10000 train_time:207021ms step_avg:80.84ms +[2025-07-07 00:25:43] [Rank 0] step:2561/10000 train_time:207021ms step_avg:80.84ms +[2025-07-07 00:25:45] [Rank 0] step:2581/10000 train_time:208510ms step_avg:80.79ms +[2025-07-07 00:25:45] [Rank 0] step:2581/10000 train_time:208510ms step_avg:80.79ms +[2025-07-07 00:25:46] [Rank 0] step:2601/10000 train_time:209999ms step_avg:80.74ms +[2025-07-07 00:25:46] [Rank 0] step:2601/10000 train_time:209999ms step_avg:80.74ms +[2025-07-07 00:25:48] [Rank 0] step:2621/10000 train_time:212146ms step_avg:80.94ms +[2025-07-07 00:25:48] [Rank 0] step:2621/10000 train_time:212146ms step_avg:80.94ms +[2025-07-07 00:25:50] [Rank 0] step:2641/10000 train_time:213637ms step_avg:80.89ms +[2025-07-07 00:25:50] [Rank 0] step:2641/10000 train_time:213637ms step_avg:80.89ms +[2025-07-07 00:25:51] [Rank 0] step:2661/10000 train_time:215128ms step_avg:80.84ms +[2025-07-07 00:25:51] [Rank 0] step:2661/10000 train_time:215128ms step_avg:80.84ms +[2025-07-07 00:25:53] [Rank 0] step:2681/10000 train_time:216628ms step_avg:80.80ms +[2025-07-07 00:25:53] [Rank 0] step:2681/10000 train_time:216628ms step_avg:80.80ms +[2025-07-07 00:25:55] [Rank 0] step:2701/10000 train_time:218782ms step_avg:81.00ms +[2025-07-07 00:25:55] [Rank 0] step:2701/10000 train_time:218782ms step_avg:81.00ms +[2025-07-07 00:25:57] [Rank 0] step:2721/10000 train_time:220256ms step_avg:80.95ms +[2025-07-07 00:25:57] [Rank 0] step:2721/10000 train_time:220256ms step_avg:80.95ms +[2025-07-07 00:25:58] [Rank 0] step:2741/10000 train_time:221749ms step_avg:80.90ms +[2025-07-07 00:25:58] [Rank 0] step:2741/10000 train_time:221749ms step_avg:80.90ms +[2025-07-07 00:25:59] [Rank 0] step:2761/10000 train_time:223243ms step_avg:80.86ms +[2025-07-07 00:25:59] [Rank 0] step:2761/10000 train_time:223243ms step_avg:80.86ms +[2025-07-07 00:26:01] [Rank 0] step:2781/10000 train_time:224739ms step_avg:80.81ms +[2025-07-07 00:26:01] [Rank 0] step:2781/10000 train_time:224739ms step_avg:80.81ms +[2025-07-07 00:26:03] [Rank 0] step:2801/10000 train_time:226992ms step_avg:81.04ms +[2025-07-07 00:26:03] [Rank 0] step:2801/10000 train_time:226992ms step_avg:81.04ms +[2025-07-07 00:26:05] [Rank 0] step:2821/10000 train_time:228526ms step_avg:81.01ms +[2025-07-07 00:26:05] [Rank 0] step:2821/10000 train_time:228526ms step_avg:81.01ms +[2025-07-07 00:26:06] [Rank 0] step:2841/10000 train_time:230019ms step_avg:80.96ms +[2025-07-07 00:26:06] [Rank 0] step:2841/10000 train_time:230019ms step_avg:80.96ms +[2025-07-07 00:26:08] [Rank 0] step:2861/10000 train_time:231512ms step_avg:80.92ms +[2025-07-07 00:26:08] [Rank 0] step:2861/10000 train_time:231512ms step_avg:80.92ms +[2025-07-07 00:26:10] [Rank 0] step:2881/10000 train_time:233058ms step_avg:80.89ms +[2025-07-07 00:26:10] [Rank 0] step:2881/10000 train_time:233058ms step_avg:80.89ms +[2025-07-07 00:26:11] [Rank 0] step:2901/10000 train_time:235169ms step_avg:81.06ms +[2025-07-07 00:26:11] [Rank 0] step:2901/10000 train_time:235169ms step_avg:81.06ms +[2025-07-07 00:26:13] [Rank 0] step:2921/10000 train_time:236665ms step_avg:81.02ms +[2025-07-07 00:26:13] [Rank 0] step:2921/10000 train_time:236665ms step_avg:81.02ms +[2025-07-07 00:26:14] [Rank 0] step:2941/10000 train_time:238161ms step_avg:80.98ms +[2025-07-07 00:26:14] [Rank 0] step:2941/10000 train_time:238161ms step_avg:80.98ms +[2025-07-07 00:26:16] [Rank 0] step:2961/10000 train_time:239657ms step_avg:80.94ms +[2025-07-07 00:26:16] [Rank 0] step:2961/10000 train_time:239657ms step_avg:80.94ms +[2025-07-07 00:26:18] [Rank 0] step:2981/10000 train_time:241810ms step_avg:81.12ms +[2025-07-07 00:26:18] [Rank 0] step:2981/10000 train_time:241810ms step_avg:81.12ms +[2025-07-07 00:26:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:26:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:26:20] [Rank 0] PRINT: step:3000/10000 train_loss:1.2626 val_loss:1.2160 train_time:243305ms step_avg:81.10ms +[2025-07-07 00:26:20] [Rank 0] PRINT: step:3000/10000 train_loss:1.2626 val_loss:1.2160 train_time:243305ms step_avg:81.10ms +[2025-07-07 00:26:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:26:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:26:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:26:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:26:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:26:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:31:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:31:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:31:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:31:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:31:43] [Rank 0] Total Loss: 4.6786 +[2025-07-07 00:31:43] [Rank 0] Total Loss: 4.6786 +[2025-07-07 00:31:43] [Rank 0] Total FTA: 0.3613 +[2025-07-07 00:31:43] [Rank 0] Total FTA: 0.3613 +[2025-07-07 00:31:43] [Rank 0] Group 0 Loss: 4.8850 +[2025-07-07 00:31:43] [Rank 0] Group 0 Loss: 4.8850 +[2025-07-07 00:31:43] [Rank 0] Group 1 Loss: 4.6201 +[2025-07-07 00:31:43] [Rank 0] Group 1 Loss: 4.6201 +[2025-07-07 00:31:43] [Rank 0] Group 2 Loss: 4.5406 +[2025-07-07 00:31:43] [Rank 0] Group 2 Loss: 4.5406 +[2025-07-07 00:31:43] [Rank 0] Group 3 Loss: 4.6957 +[2025-07-07 00:31:43] [Rank 0] Group 3 Loss: 4.6957 +[2025-07-07 00:31:43] [Rank 0] Group 4 Loss: 4.5625 +[2025-07-07 00:31:43] [Rank 0] Group 4 Loss: 4.5625 +[2025-07-07 00:31:43] [Rank 0] Group 5 Loss: 4.6023 +[2025-07-07 00:31:43] [Rank 0] Group 5 Loss: 4.6023 +[2025-07-07 00:31:43] [Rank 0] Group 6 Loss: 4.5833 +[2025-07-07 00:31:43] [Rank 0] Group 6 Loss: 4.5833 +[2025-07-07 00:31:43] [Rank 0] Group 7 Loss: 4.7879 +[2025-07-07 00:31:43] [Rank 0] Group 7 Loss: 4.7879 +[2025-07-07 00:31:43] [Rank 0] Group 8 Loss: 4.6761 +[2025-07-07 00:31:43] [Rank 0] Group 8 Loss: 4.6761 +[2025-07-07 00:31:43] [Rank 0] Group 9 Loss: 4.6405 +[2025-07-07 00:31:43] [Rank 0] Group 9 Loss: 4.6405 +[2025-07-07 00:31:43] [Rank 0] Group 10 Loss: 4.7076 +[2025-07-07 00:31:43] [Rank 0] Group 10 Loss: 4.7076 +[2025-07-07 00:31:43] [Rank 0] Group 11 Loss: 4.6540 +[2025-07-07 00:31:43] [Rank 0] Group 11 Loss: 4.6540 +[2025-07-07 00:31:43] [Rank 0] Group 0 FTA: 0.3225 +[2025-07-07 00:31:43] [Rank 0] Group 0 FTA: 0.3225 +[2025-07-07 00:31:43] [Rank 0] Group 1 FTA: 0.3490 +[2025-07-07 00:31:43] [Rank 0] Group 1 FTA: 0.3490 +[2025-07-07 00:31:43] [Rank 0] Group 2 FTA: 0.3776 +[2025-07-07 00:31:43] [Rank 0] Group 2 FTA: 0.3776 +[2025-07-07 00:31:43] [Rank 0] Group 3 FTA: 0.3047 +[2025-07-07 00:31:43] [Rank 0] Group 3 FTA: 0.3047 +[2025-07-07 00:31:43] [Rank 0] Group 4 FTA: 0.3047 +[2025-07-07 00:31:43] [Rank 0] Group 4 FTA: 0.3047 +[2025-07-07 00:31:43] [Rank 0] Group 5 FTA: 0.3984 +[2025-07-07 00:31:43] [Rank 0] Group 5 FTA: 0.3984 +[2025-07-07 00:31:43] [Rank 0] Group 6 FTA: 0.3203 +[2025-07-07 00:31:43] [Rank 0] Group 6 FTA: 0.3203 +[2025-07-07 00:31:43] [Rank 0] Group 7 FTA: 0.3828 +[2025-07-07 00:31:43] [Rank 0] Group 7 FTA: 0.3828 +[2025-07-07 00:31:43] [Rank 0] Group 8 FTA: 0.4297 +[2025-07-07 00:31:43] [Rank 0] Group 8 FTA: 0.4297 +[2025-07-07 00:31:43] [Rank 0] Group 9 FTA: 0.3867 +[2025-07-07 00:31:43] [Rank 0] Group 9 FTA: 0.3867 +[2025-07-07 00:31:43] [Rank 0] Group 10 FTA: 0.3867 +[2025-07-07 00:31:43] [Rank 0] Group 10 FTA: 0.3867 +[2025-07-07 00:31:43] [Rank 0] Group 11 FTA: 0.3799 +[2025-07-07 00:31:43] [Rank 0] Group 11 FTA: 0.3799 +[2025-07-07 00:31:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 00:31:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 00:31:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 00:31:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 00:31:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 00:31:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 00:31:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 00:31:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 00:31:44] [Rank 0] step:3001/10000 train_time:243327ms step_avg:81.08ms +[2025-07-07 00:31:44] [Rank 0] step:3001/10000 train_time:243327ms step_avg:81.08ms +[2025-07-07 00:31:46] [Rank 0] step:3021/10000 train_time:244843ms step_avg:81.05ms +[2025-07-07 00:31:46] [Rank 0] step:3021/10000 train_time:244843ms step_avg:81.05ms +[2025-07-07 00:31:47] [Rank 0] step:3041/10000 train_time:246330ms step_avg:81.00ms +[2025-07-07 00:31:47] [Rank 0] step:3041/10000 train_time:246330ms step_avg:81.00ms +[2025-07-07 00:31:49] [Rank 0] step:3061/10000 train_time:247819ms step_avg:80.96ms +[2025-07-07 00:31:49] [Rank 0] step:3061/10000 train_time:247819ms step_avg:80.96ms +[2025-07-07 00:31:51] [Rank 0] step:3081/10000 train_time:249971ms step_avg:81.13ms +[2025-07-07 00:31:51] [Rank 0] step:3081/10000 train_time:249971ms step_avg:81.13ms +[2025-07-07 00:31:52] [Rank 0] step:3101/10000 train_time:251460ms step_avg:81.09ms +[2025-07-07 00:31:52] [Rank 0] step:3101/10000 train_time:251460ms step_avg:81.09ms +[2025-07-07 00:31:54] [Rank 0] step:3121/10000 train_time:252952ms step_avg:81.05ms +[2025-07-07 00:31:54] [Rank 0] step:3121/10000 train_time:252952ms step_avg:81.05ms +[2025-07-07 00:31:55] [Rank 0] step:3141/10000 train_time:254443ms step_avg:81.01ms +[2025-07-07 00:31:55] [Rank 0] step:3141/10000 train_time:254443ms step_avg:81.01ms +[2025-07-07 00:31:57] [Rank 0] step:3161/10000 train_time:256171ms step_avg:81.04ms +[2025-07-07 00:31:57] [Rank 0] step:3161/10000 train_time:256171ms step_avg:81.04ms +[2025-07-07 00:31:59] [Rank 0] step:3181/10000 train_time:257664ms step_avg:81.00ms +[2025-07-07 00:31:59] [Rank 0] step:3181/10000 train_time:257664ms step_avg:81.00ms +[2025-07-07 00:32:00] [Rank 0] step:3201/10000 train_time:259157ms step_avg:80.96ms +[2025-07-07 00:32:00] [Rank 0] step:3201/10000 train_time:259157ms step_avg:80.96ms +[2025-07-07 00:32:02] [Rank 0] step:3221/10000 train_time:260652ms step_avg:80.92ms +[2025-07-07 00:32:02] [Rank 0] step:3221/10000 train_time:260652ms step_avg:80.92ms +[2025-07-07 00:32:04] [Rank 0] step:3241/10000 train_time:262805ms step_avg:81.09ms +[2025-07-07 00:32:04] [Rank 0] step:3241/10000 train_time:262805ms step_avg:81.09ms +[2025-07-07 00:32:05] [Rank 0] step:3261/10000 train_time:264279ms step_avg:81.04ms +[2025-07-07 00:32:05] [Rank 0] step:3261/10000 train_time:264279ms step_avg:81.04ms +[2025-07-07 00:32:07] [Rank 0] step:3281/10000 train_time:265773ms step_avg:81.00ms +[2025-07-07 00:32:07] [Rank 0] step:3281/10000 train_time:265773ms step_avg:81.00ms +[2025-07-07 00:32:08] [Rank 0] step:3301/10000 train_time:267267ms step_avg:80.97ms +[2025-07-07 00:32:08] [Rank 0] step:3301/10000 train_time:267267ms step_avg:80.97ms +[2025-07-07 00:32:10] [Rank 0] step:3321/10000 train_time:268762ms step_avg:80.93ms +[2025-07-07 00:32:10] [Rank 0] step:3321/10000 train_time:268762ms step_avg:80.93ms +[2025-07-07 00:32:12] [Rank 0] step:3341/10000 train_time:270921ms step_avg:81.09ms +[2025-07-07 00:32:12] [Rank 0] step:3341/10000 train_time:270921ms step_avg:81.09ms +[2025-07-07 00:32:13] [Rank 0] step:3361/10000 train_time:272416ms step_avg:81.05ms +[2025-07-07 00:32:13] [Rank 0] step:3361/10000 train_time:272416ms step_avg:81.05ms +[2025-07-07 00:32:15] [Rank 0] step:3381/10000 train_time:273910ms step_avg:81.01ms +[2025-07-07 00:32:15] [Rank 0] step:3381/10000 train_time:273910ms step_avg:81.01ms +[2025-07-07 00:32:16] [Rank 0] step:3401/10000 train_time:275406ms step_avg:80.98ms +[2025-07-07 00:32:16] [Rank 0] step:3401/10000 train_time:275406ms step_avg:80.98ms +[2025-07-07 00:32:19] [Rank 0] step:3421/10000 train_time:277156ms step_avg:81.02ms +[2025-07-07 00:32:19] [Rank 0] step:3421/10000 train_time:277156ms step_avg:81.02ms +[2025-07-07 00:32:20] [Rank 0] step:3441/10000 train_time:279066ms step_avg:81.10ms +[2025-07-07 00:32:20] [Rank 0] step:3441/10000 train_time:279066ms step_avg:81.10ms +[2025-07-07 00:32:22] [Rank 0] step:3461/10000 train_time:280562ms step_avg:81.06ms +[2025-07-07 00:32:22] [Rank 0] step:3461/10000 train_time:280562ms step_avg:81.06ms +[2025-07-07 00:32:23] [Rank 0] step:3481/10000 train_time:282196ms step_avg:81.07ms +[2025-07-07 00:32:23] [Rank 0] step:3481/10000 train_time:282196ms step_avg:81.07ms +[2025-07-07 00:32:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:32:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:32:26] [Rank 0] PRINT: step:3500/10000 train_loss:1.1903 val_loss:1.1548 train_time:283692ms step_avg:81.05ms +[2025-07-07 00:32:26] [Rank 0] PRINT: step:3500/10000 train_loss:1.1903 val_loss:1.1548 train_time:283692ms step_avg:81.05ms +[2025-07-07 00:32:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:32:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:32:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:32:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:32:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:32:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:37:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:37:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:37:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:37:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:37:47] [Rank 0] Total Loss: 4.8862 +[2025-07-07 00:37:47] [Rank 0] Total Loss: 4.8862 +[2025-07-07 00:37:47] [Rank 0] Total FTA: 0.4261 +[2025-07-07 00:37:47] [Rank 0] Total FTA: 0.4261 +[2025-07-07 00:37:47] [Rank 0] Group 0 Loss: 5.2139 +[2025-07-07 00:37:47] [Rank 0] Group 0 Loss: 5.2139 +[2025-07-07 00:37:47] [Rank 0] Group 1 Loss: 4.7193 +[2025-07-07 00:37:47] [Rank 0] Group 1 Loss: 4.7193 +[2025-07-07 00:37:47] [Rank 0] Group 2 Loss: 4.6966 +[2025-07-07 00:37:47] [Rank 0] Group 2 Loss: 4.6966 +[2025-07-07 00:37:47] [Rank 0] Group 3 Loss: 4.9735 +[2025-07-07 00:37:47] [Rank 0] Group 3 Loss: 4.9735 +[2025-07-07 00:37:47] [Rank 0] Group 4 Loss: 4.9208 +[2025-07-07 00:37:47] [Rank 0] Group 4 Loss: 4.9208 +[2025-07-07 00:37:47] [Rank 0] Group 5 Loss: 4.8525 +[2025-07-07 00:37:47] [Rank 0] Group 5 Loss: 4.8525 +[2025-07-07 00:37:47] [Rank 0] Group 6 Loss: 4.7360 +[2025-07-07 00:37:47] [Rank 0] Group 6 Loss: 4.7360 +[2025-07-07 00:37:48] [Rank 0] Group 7 Loss: 4.8708 +[2025-07-07 00:37:48] [Rank 0] Group 7 Loss: 4.8708 +[2025-07-07 00:37:48] [Rank 0] Group 8 Loss: 4.8505 +[2025-07-07 00:37:48] [Rank 0] Group 8 Loss: 4.8505 +[2025-07-07 00:37:48] [Rank 0] Group 9 Loss: 4.8105 +[2025-07-07 00:37:48] [Rank 0] Group 9 Loss: 4.8105 +[2025-07-07 00:37:48] [Rank 0] Group 10 Loss: 4.8315 +[2025-07-07 00:37:48] [Rank 0] Group 10 Loss: 4.8315 +[2025-07-07 00:37:48] [Rank 0] Group 11 Loss: 4.8625 +[2025-07-07 00:37:48] [Rank 0] Group 11 Loss: 4.8625 +[2025-07-07 00:37:48] [Rank 0] Group 0 FTA: 0.3238 +[2025-07-07 00:37:48] [Rank 0] Group 0 FTA: 0.3238 +[2025-07-07 00:37:48] [Rank 0] Group 1 FTA: 0.5000 +[2025-07-07 00:37:48] [Rank 0] Group 1 FTA: 0.5000 +[2025-07-07 00:37:48] [Rank 0] Group 2 FTA: 0.4792 +[2025-07-07 00:37:48] [Rank 0] Group 2 FTA: 0.4792 +[2025-07-07 00:37:48] [Rank 0] Group 3 FTA: 0.4688 +[2025-07-07 00:37:48] [Rank 0] Group 3 FTA: 0.4688 +[2025-07-07 00:37:48] [Rank 0] Group 4 FTA: 0.3047 +[2025-07-07 00:37:48] [Rank 0] Group 4 FTA: 0.3047 +[2025-07-07 00:37:48] [Rank 0] Group 5 FTA: 0.4427 +[2025-07-07 00:37:48] [Rank 0] Group 5 FTA: 0.4427 +[2025-07-07 00:37:48] [Rank 0] Group 6 FTA: 0.3828 +[2025-07-07 00:37:48] [Rank 0] Group 6 FTA: 0.3828 +[2025-07-07 00:37:48] [Rank 0] Group 7 FTA: 0.4818 +[2025-07-07 00:37:48] [Rank 0] Group 7 FTA: 0.4818 +[2025-07-07 00:37:48] [Rank 0] Group 8 FTA: 0.4714 +[2025-07-07 00:37:48] [Rank 0] Group 8 FTA: 0.4714 +[2025-07-07 00:37:48] [Rank 0] Group 9 FTA: 0.4805 +[2025-07-07 00:37:48] [Rank 0] Group 9 FTA: 0.4805 +[2025-07-07 00:37:48] [Rank 0] Group 10 FTA: 0.4512 +[2025-07-07 00:37:48] [Rank 0] Group 10 FTA: 0.4512 +[2025-07-07 00:37:48] [Rank 0] Group 11 FTA: 0.4307 +[2025-07-07 00:37:48] [Rank 0] Group 11 FTA: 0.4307 +[2025-07-07 00:37:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 00:37:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 00:37:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 00:37:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 00:37:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 00:37:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 00:37:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 00:37:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 00:37:49] [Rank 0] step:3501/10000 train_time:283714ms step_avg:81.04ms +[2025-07-07 00:37:49] [Rank 0] step:3501/10000 train_time:283714ms step_avg:81.04ms +[2025-07-07 00:37:51] [Rank 0] step:3521/10000 train_time:285874ms step_avg:81.19ms +[2025-07-07 00:37:51] [Rank 0] step:3521/10000 train_time:285874ms step_avg:81.19ms +[2025-07-07 00:37:53] [Rank 0] step:3541/10000 train_time:287362ms step_avg:81.15ms +[2025-07-07 00:37:53] [Rank 0] step:3541/10000 train_time:287362ms step_avg:81.15ms +[2025-07-07 00:37:54] [Rank 0] step:3561/10000 train_time:288851ms step_avg:81.12ms +[2025-07-07 00:37:54] [Rank 0] step:3561/10000 train_time:288851ms step_avg:81.12ms +[2025-07-07 00:37:56] [Rank 0] step:3581/10000 train_time:290341ms step_avg:81.08ms +[2025-07-07 00:37:56] [Rank 0] step:3581/10000 train_time:290341ms step_avg:81.08ms +[2025-07-07 00:37:58] [Rank 0] step:3601/10000 train_time:292087ms step_avg:81.11ms +[2025-07-07 00:37:58] [Rank 0] step:3601/10000 train_time:292087ms step_avg:81.11ms +[2025-07-07 00:37:59] [Rank 0] step:3621/10000 train_time:293982ms step_avg:81.19ms +[2025-07-07 00:37:59] [Rank 0] step:3621/10000 train_time:293982ms step_avg:81.19ms +[2025-07-07 00:38:01] [Rank 0] step:3641/10000 train_time:295471ms step_avg:81.15ms +[2025-07-07 00:38:01] [Rank 0] step:3641/10000 train_time:295471ms step_avg:81.15ms +[2025-07-07 00:38:02] [Rank 0] step:3661/10000 train_time:296962ms step_avg:81.11ms +[2025-07-07 00:38:02] [Rank 0] step:3661/10000 train_time:296962ms step_avg:81.11ms +[2025-07-07 00:38:04] [Rank 0] step:3681/10000 train_time:298453ms step_avg:81.08ms +[2025-07-07 00:38:04] [Rank 0] step:3681/10000 train_time:298453ms step_avg:81.08ms +[2025-07-07 00:38:06] [Rank 0] step:3701/10000 train_time:300187ms step_avg:81.11ms +[2025-07-07 00:38:06] [Rank 0] step:3701/10000 train_time:300187ms step_avg:81.11ms +[2025-07-07 00:38:07] [Rank 0] step:3721/10000 train_time:301680ms step_avg:81.07ms +[2025-07-07 00:38:07] [Rank 0] step:3721/10000 train_time:301680ms step_avg:81.07ms +[2025-07-07 00:38:09] [Rank 0] step:3741/10000 train_time:303173ms step_avg:81.04ms +[2025-07-07 00:38:09] [Rank 0] step:3741/10000 train_time:303173ms step_avg:81.04ms +[2025-07-07 00:38:10] [Rank 0] step:3761/10000 train_time:304666ms step_avg:81.01ms +[2025-07-07 00:38:10] [Rank 0] step:3761/10000 train_time:304666ms step_avg:81.01ms +[2025-07-07 00:38:12] [Rank 0] step:3781/10000 train_time:306163ms step_avg:80.97ms +[2025-07-07 00:38:12] [Rank 0] step:3781/10000 train_time:306163ms step_avg:80.97ms +[2025-07-07 00:38:14] [Rank 0] step:3801/10000 train_time:308322ms step_avg:81.12ms +[2025-07-07 00:38:14] [Rank 0] step:3801/10000 train_time:308322ms step_avg:81.12ms +[2025-07-07 00:38:15] [Rank 0] step:3821/10000 train_time:309816ms step_avg:81.08ms +[2025-07-07 00:38:15] [Rank 0] step:3821/10000 train_time:309816ms step_avg:81.08ms +[2025-07-07 00:38:17] [Rank 0] step:3841/10000 train_time:311311ms step_avg:81.05ms +[2025-07-07 00:38:17] [Rank 0] step:3841/10000 train_time:311311ms step_avg:81.05ms +[2025-07-07 00:38:18] [Rank 0] step:3861/10000 train_time:312805ms step_avg:81.02ms +[2025-07-07 00:38:18] [Rank 0] step:3861/10000 train_time:312805ms step_avg:81.02ms +[2025-07-07 00:38:20] [Rank 0] step:3881/10000 train_time:314534ms step_avg:81.04ms +[2025-07-07 00:38:20] [Rank 0] step:3881/10000 train_time:314534ms step_avg:81.04ms +[2025-07-07 00:38:21] [Rank 0] step:3901/10000 train_time:316029ms step_avg:81.01ms +[2025-07-07 00:38:21] [Rank 0] step:3901/10000 train_time:316029ms step_avg:81.01ms +[2025-07-07 00:38:23] [Rank 0] step:3921/10000 train_time:317524ms step_avg:80.98ms +[2025-07-07 00:38:23] [Rank 0] step:3921/10000 train_time:317524ms step_avg:80.98ms +[2025-07-07 00:38:24] [Rank 0] step:3941/10000 train_time:319019ms step_avg:80.95ms +[2025-07-07 00:38:24] [Rank 0] step:3941/10000 train_time:319019ms step_avg:80.95ms +[2025-07-07 00:38:27] [Rank 0] step:3961/10000 train_time:320771ms step_avg:80.98ms +[2025-07-07 00:38:27] [Rank 0] step:3961/10000 train_time:320771ms step_avg:80.98ms +[2025-07-07 00:38:28] [Rank 0] step:3981/10000 train_time:322657ms step_avg:81.05ms +[2025-07-07 00:38:28] [Rank 0] step:3981/10000 train_time:322657ms step_avg:81.05ms +[2025-07-07 00:38:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:38:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:38:30] [Rank 0] PRINT: step:4000/10000 train_loss:1.1399 val_loss:1.1159 train_time:324153ms step_avg:81.04ms +[2025-07-07 00:38:30] [Rank 0] PRINT: step:4000/10000 train_loss:1.1399 val_loss:1.1159 train_time:324153ms step_avg:81.04ms +[2025-07-07 00:38:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:38:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:38:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:38:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:38:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:38:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:43:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:43:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:43:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:43:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:43:53] [Rank 0] Total Loss: 4.9166 +[2025-07-07 00:43:53] [Rank 0] Total Loss: 4.9166 +[2025-07-07 00:43:53] [Rank 0] Total FTA: 0.5070 +[2025-07-07 00:43:53] [Rank 0] Total FTA: 0.5070 +[2025-07-07 00:43:53] [Rank 0] Group 0 Loss: 5.2369 +[2025-07-07 00:43:53] [Rank 0] Group 0 Loss: 5.2369 +[2025-07-07 00:43:53] [Rank 0] Group 1 Loss: 4.6870 +[2025-07-07 00:43:53] [Rank 0] Group 1 Loss: 4.6870 +[2025-07-07 00:43:53] [Rank 0] Group 2 Loss: 4.8291 +[2025-07-07 00:43:53] [Rank 0] Group 2 Loss: 4.8291 +[2025-07-07 00:43:53] [Rank 0] Group 3 Loss: 4.9292 +[2025-07-07 00:43:53] [Rank 0] Group 3 Loss: 4.9292 +[2025-07-07 00:43:53] [Rank 0] Group 4 Loss: 4.8482 +[2025-07-07 00:43:53] [Rank 0] Group 4 Loss: 4.8482 +[2025-07-07 00:43:53] [Rank 0] Group 5 Loss: 4.8421 +[2025-07-07 00:43:53] [Rank 0] Group 5 Loss: 4.8421 +[2025-07-07 00:43:53] [Rank 0] Group 6 Loss: 4.8147 +[2025-07-07 00:43:53] [Rank 0] Group 6 Loss: 4.8147 +[2025-07-07 00:43:53] [Rank 0] Group 7 Loss: 4.9052 +[2025-07-07 00:43:53] [Rank 0] Group 7 Loss: 4.9052 +[2025-07-07 00:43:53] [Rank 0] Group 8 Loss: 4.9137 +[2025-07-07 00:43:53] [Rank 0] Group 8 Loss: 4.9137 +[2025-07-07 00:43:53] [Rank 0] Group 9 Loss: 4.8583 +[2025-07-07 00:43:53] [Rank 0] Group 9 Loss: 4.8583 +[2025-07-07 00:43:53] [Rank 0] Group 10 Loss: 4.9185 +[2025-07-07 00:43:53] [Rank 0] Group 10 Loss: 4.9185 +[2025-07-07 00:43:53] [Rank 0] Group 11 Loss: 4.9007 +[2025-07-07 00:43:53] [Rank 0] Group 11 Loss: 4.9007 +[2025-07-07 00:43:53] [Rank 0] Group 0 FTA: 0.6723 +[2025-07-07 00:43:53] [Rank 0] Group 0 FTA: 0.6723 +[2025-07-07 00:43:53] [Rank 0] Group 1 FTA: 0.4896 +[2025-07-07 00:43:53] [Rank 0] Group 1 FTA: 0.4896 +[2025-07-07 00:43:53] [Rank 0] Group 2 FTA: 0.3672 +[2025-07-07 00:43:53] [Rank 0] Group 2 FTA: 0.3672 +[2025-07-07 00:43:53] [Rank 0] Group 3 FTA: 0.4740 +[2025-07-07 00:43:53] [Rank 0] Group 3 FTA: 0.4740 +[2025-07-07 00:43:53] [Rank 0] Group 4 FTA: 0.4219 +[2025-07-07 00:43:53] [Rank 0] Group 4 FTA: 0.4219 +[2025-07-07 00:43:53] [Rank 0] Group 5 FTA: 0.5651 +[2025-07-07 00:43:53] [Rank 0] Group 5 FTA: 0.5651 +[2025-07-07 00:43:53] [Rank 0] Group 6 FTA: 0.4453 +[2025-07-07 00:43:53] [Rank 0] Group 6 FTA: 0.4453 +[2025-07-07 00:43:53] [Rank 0] Group 7 FTA: 0.4870 +[2025-07-07 00:43:53] [Rank 0] Group 7 FTA: 0.4870 +[2025-07-07 00:43:53] [Rank 0] Group 8 FTA: 0.4818 +[2025-07-07 00:43:53] [Rank 0] Group 8 FTA: 0.4818 +[2025-07-07 00:43:53] [Rank 0] Group 9 FTA: 0.4727 +[2025-07-07 00:43:53] [Rank 0] Group 9 FTA: 0.4727 +[2025-07-07 00:43:54] [Rank 0] Group 10 FTA: 0.5039 +[2025-07-07 00:43:54] [Rank 0] Group 10 FTA: 0.5039 +[2025-07-07 00:43:54] [Rank 0] Group 11 FTA: 0.5146 +[2025-07-07 00:43:54] [Rank 0] Group 11 FTA: 0.5146 +[2025-07-07 00:43:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 00:43:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 00:43:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 00:43:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 00:43:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 00:43:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 00:43:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 00:43:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 00:43:55] [Rank 0] step:4001/10000 train_time:324174ms step_avg:81.02ms +[2025-07-07 00:43:55] [Rank 0] step:4001/10000 train_time:324174ms step_avg:81.02ms +[2025-07-07 00:43:57] [Rank 0] step:4021/10000 train_time:325658ms step_avg:80.99ms +[2025-07-07 00:43:57] [Rank 0] step:4021/10000 train_time:325658ms step_avg:80.99ms +[2025-07-07 00:43:58] [Rank 0] step:4041/10000 train_time:327304ms step_avg:81.00ms +[2025-07-07 00:43:58] [Rank 0] step:4041/10000 train_time:327304ms step_avg:81.00ms +[2025-07-07 00:44:00] [Rank 0] step:4061/10000 train_time:329443ms step_avg:81.12ms +[2025-07-07 00:44:00] [Rank 0] step:4061/10000 train_time:329443ms step_avg:81.12ms +[2025-07-07 00:44:02] [Rank 0] step:4081/10000 train_time:330931ms step_avg:81.09ms +[2025-07-07 00:44:02] [Rank 0] step:4081/10000 train_time:330931ms step_avg:81.09ms +[2025-07-07 00:44:03] [Rank 0] step:4101/10000 train_time:332421ms step_avg:81.06ms +[2025-07-07 00:44:03] [Rank 0] step:4101/10000 train_time:332421ms step_avg:81.06ms +[2025-07-07 00:44:05] [Rank 0] step:4121/10000 train_time:333912ms step_avg:81.03ms +[2025-07-07 00:44:05] [Rank 0] step:4121/10000 train_time:333912ms step_avg:81.03ms +[2025-07-07 00:44:07] [Rank 0] step:4141/10000 train_time:335659ms step_avg:81.06ms +[2025-07-07 00:44:07] [Rank 0] step:4141/10000 train_time:335659ms step_avg:81.06ms +[2025-07-07 00:44:08] [Rank 0] step:4161/10000 train_time:337552ms step_avg:81.12ms +[2025-07-07 00:44:08] [Rank 0] step:4161/10000 train_time:337552ms step_avg:81.12ms +[2025-07-07 00:44:10] [Rank 0] step:4181/10000 train_time:339043ms step_avg:81.09ms +[2025-07-07 00:44:10] [Rank 0] step:4181/10000 train_time:339043ms step_avg:81.09ms +[2025-07-07 00:44:11] [Rank 0] step:4201/10000 train_time:340536ms step_avg:81.06ms +[2025-07-07 00:44:11] [Rank 0] step:4201/10000 train_time:340536ms step_avg:81.06ms +[2025-07-07 00:44:13] [Rank 0] step:4221/10000 train_time:342029ms step_avg:81.03ms +[2025-07-07 00:44:13] [Rank 0] step:4221/10000 train_time:342029ms step_avg:81.03ms +[2025-07-07 00:44:15] [Rank 0] step:4241/10000 train_time:344165ms step_avg:81.15ms +[2025-07-07 00:44:15] [Rank 0] step:4241/10000 train_time:344165ms step_avg:81.15ms +[2025-07-07 00:44:17] [Rank 0] step:4261/10000 train_time:345656ms step_avg:81.12ms +[2025-07-07 00:44:17] [Rank 0] step:4261/10000 train_time:345656ms step_avg:81.12ms +[2025-07-07 00:44:18] [Rank 0] step:4281/10000 train_time:347150ms step_avg:81.09ms +[2025-07-07 00:44:18] [Rank 0] step:4281/10000 train_time:347150ms step_avg:81.09ms +[2025-07-07 00:44:20] [Rank 0] step:4301/10000 train_time:348645ms step_avg:81.06ms +[2025-07-07 00:44:20] [Rank 0] step:4301/10000 train_time:348645ms step_avg:81.06ms +[2025-07-07 00:44:22] [Rank 0] step:4321/10000 train_time:350391ms step_avg:81.09ms +[2025-07-07 00:44:22] [Rank 0] step:4321/10000 train_time:350391ms step_avg:81.09ms +[2025-07-07 00:44:23] [Rank 0] step:4341/10000 train_time:352274ms step_avg:81.15ms +[2025-07-07 00:44:23] [Rank 0] step:4341/10000 train_time:352274ms step_avg:81.15ms +[2025-07-07 00:44:25] [Rank 0] step:4361/10000 train_time:353767ms step_avg:81.12ms +[2025-07-07 00:44:25] [Rank 0] step:4361/10000 train_time:353767ms step_avg:81.12ms +[2025-07-07 00:44:26] [Rank 0] step:4381/10000 train_time:355262ms step_avg:81.09ms +[2025-07-07 00:44:26] [Rank 0] step:4381/10000 train_time:355262ms step_avg:81.09ms +[2025-07-07 00:44:28] [Rank 0] step:4401/10000 train_time:356758ms step_avg:81.06ms +[2025-07-07 00:44:28] [Rank 0] step:4401/10000 train_time:356758ms step_avg:81.06ms +[2025-07-07 00:44:29] [Rank 0] step:4421/10000 train_time:358489ms step_avg:81.09ms +[2025-07-07 00:44:29] [Rank 0] step:4421/10000 train_time:358489ms step_avg:81.09ms +[2025-07-07 00:44:31] [Rank 0] step:4441/10000 train_time:359985ms step_avg:81.06ms +[2025-07-07 00:44:31] [Rank 0] step:4441/10000 train_time:359985ms step_avg:81.06ms +[2025-07-07 00:44:32] [Rank 0] step:4461/10000 train_time:361479ms step_avg:81.03ms +[2025-07-07 00:44:32] [Rank 0] step:4461/10000 train_time:361479ms step_avg:81.03ms +[2025-07-07 00:44:34] [Rank 0] step:4481/10000 train_time:362974ms step_avg:81.00ms +[2025-07-07 00:44:34] [Rank 0] step:4481/10000 train_time:362974ms step_avg:81.00ms +[2025-07-07 00:44:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:44:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:44:36] [Rank 0] PRINT: step:4500/10000 train_loss:1.1016 val_loss:1.0777 train_time:364469ms step_avg:80.99ms +[2025-07-07 00:44:36] [Rank 0] PRINT: step:4500/10000 train_loss:1.1016 val_loss:1.0777 train_time:364469ms step_avg:80.99ms +[2025-07-07 00:44:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:44:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:44:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:44:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:44:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:44:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:50:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:50:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:50:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:50:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:50:02] [Rank 0] Total Loss: 5.0706 +[2025-07-07 00:50:02] [Rank 0] Total Loss: 5.0706 +[2025-07-07 00:50:02] [Rank 0] Total FTA: 0.5880 +[2025-07-07 00:50:02] [Rank 0] Total FTA: 0.5880 +[2025-07-07 00:50:02] [Rank 0] Group 0 Loss: 5.2227 +[2025-07-07 00:50:02] [Rank 0] Group 0 Loss: 5.2227 +[2025-07-07 00:50:02] [Rank 0] Group 1 Loss: 4.9859 +[2025-07-07 00:50:02] [Rank 0] Group 1 Loss: 4.9859 +[2025-07-07 00:50:02] [Rank 0] Group 2 Loss: 5.0563 +[2025-07-07 00:50:02] [Rank 0] Group 2 Loss: 5.0563 +[2025-07-07 00:50:02] [Rank 0] Group 3 Loss: 5.1816 +[2025-07-07 00:50:02] [Rank 0] Group 3 Loss: 5.1816 +[2025-07-07 00:50:02] [Rank 0] Group 4 Loss: 5.0895 +[2025-07-07 00:50:02] [Rank 0] Group 4 Loss: 5.0895 +[2025-07-07 00:50:02] [Rank 0] Group 5 Loss: 4.9962 +[2025-07-07 00:50:02] [Rank 0] Group 5 Loss: 4.9962 +[2025-07-07 00:50:02] [Rank 0] Group 6 Loss: 4.9714 +[2025-07-07 00:50:02] [Rank 0] Group 6 Loss: 4.9714 +[2025-07-07 00:50:02] [Rank 0] Group 7 Loss: 5.0166 +[2025-07-07 00:50:02] [Rank 0] Group 7 Loss: 5.0166 +[2025-07-07 00:50:02] [Rank 0] Group 8 Loss: 5.0529 +[2025-07-07 00:50:02] [Rank 0] Group 8 Loss: 5.0529 +[2025-07-07 00:50:02] [Rank 0] Group 9 Loss: 5.0222 +[2025-07-07 00:50:02] [Rank 0] Group 9 Loss: 5.0222 +[2025-07-07 00:50:02] [Rank 0] Group 10 Loss: 5.0356 +[2025-07-07 00:50:02] [Rank 0] Group 10 Loss: 5.0356 +[2025-07-07 00:50:02] [Rank 0] Group 11 Loss: 5.0665 +[2025-07-07 00:50:02] [Rank 0] Group 11 Loss: 5.0665 +[2025-07-07 00:50:02] [Rank 0] Group 0 FTA: 0.6645 +[2025-07-07 00:50:02] [Rank 0] Group 0 FTA: 0.6645 +[2025-07-07 00:50:02] [Rank 0] Group 1 FTA: 0.4922 +[2025-07-07 00:50:02] [Rank 0] Group 1 FTA: 0.4922 +[2025-07-07 00:50:02] [Rank 0] Group 2 FTA: 0.7760 +[2025-07-07 00:50:02] [Rank 0] Group 2 FTA: 0.7760 +[2025-07-07 00:50:02] [Rank 0] Group 3 FTA: 0.4766 +[2025-07-07 00:50:02] [Rank 0] Group 3 FTA: 0.4766 +[2025-07-07 00:50:02] [Rank 0] Group 4 FTA: 0.5182 +[2025-07-07 00:50:02] [Rank 0] Group 4 FTA: 0.5182 +[2025-07-07 00:50:02] [Rank 0] Group 5 FTA: 0.6927 +[2025-07-07 00:50:02] [Rank 0] Group 5 FTA: 0.6927 +[2025-07-07 00:50:02] [Rank 0] Group 6 FTA: 0.5599 +[2025-07-07 00:50:02] [Rank 0] Group 6 FTA: 0.5599 +[2025-07-07 00:50:02] [Rank 0] Group 7 FTA: 0.5807 +[2025-07-07 00:50:02] [Rank 0] Group 7 FTA: 0.5807 +[2025-07-07 00:50:02] [Rank 0] Group 8 FTA: 0.5677 +[2025-07-07 00:50:02] [Rank 0] Group 8 FTA: 0.5677 +[2025-07-07 00:50:02] [Rank 0] Group 9 FTA: 0.5078 +[2025-07-07 00:50:02] [Rank 0] Group 9 FTA: 0.5078 +[2025-07-07 00:50:02] [Rank 0] Group 10 FTA: 0.5449 +[2025-07-07 00:50:02] [Rank 0] Group 10 FTA: 0.5449 +[2025-07-07 00:50:02] [Rank 0] Group 11 FTA: 0.5869 +[2025-07-07 00:50:02] [Rank 0] Group 11 FTA: 0.5869 +[2025-07-07 00:50:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 00:50:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 00:50:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 00:50:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 00:50:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 00:50:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 00:50:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 00:50:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 00:50:04] [Rank 0] step:4501/10000 train_time:364599ms step_avg:81.00ms +[2025-07-07 00:50:04] [Rank 0] step:4501/10000 train_time:364599ms step_avg:81.00ms +[2025-07-07 00:50:06] [Rank 0] step:4521/10000 train_time:366693ms step_avg:81.11ms +[2025-07-07 00:50:06] [Rank 0] step:4521/10000 train_time:366693ms step_avg:81.11ms +[2025-07-07 00:50:07] [Rank 0] step:4541/10000 train_time:368180ms step_avg:81.08ms +[2025-07-07 00:50:07] [Rank 0] step:4541/10000 train_time:368180ms step_avg:81.08ms +[2025-07-07 00:50:09] [Rank 0] step:4561/10000 train_time:369669ms step_avg:81.05ms +[2025-07-07 00:50:09] [Rank 0] step:4561/10000 train_time:369669ms step_avg:81.05ms +[2025-07-07 00:50:10] [Rank 0] step:4581/10000 train_time:371160ms step_avg:81.02ms +[2025-07-07 00:50:10] [Rank 0] step:4581/10000 train_time:371160ms step_avg:81.02ms +[2025-07-07 00:50:12] [Rank 0] step:4601/10000 train_time:373306ms step_avg:81.14ms +[2025-07-07 00:50:12] [Rank 0] step:4601/10000 train_time:373306ms step_avg:81.14ms +[2025-07-07 00:50:14] [Rank 0] step:4621/10000 train_time:374796ms step_avg:81.11ms +[2025-07-07 00:50:14] [Rank 0] step:4621/10000 train_time:374796ms step_avg:81.11ms +[2025-07-07 00:50:15] [Rank 0] step:4641/10000 train_time:376290ms step_avg:81.08ms +[2025-07-07 00:50:15] [Rank 0] step:4641/10000 train_time:376290ms step_avg:81.08ms +[2025-07-07 00:50:17] [Rank 0] step:4661/10000 train_time:377783ms step_avg:81.05ms +[2025-07-07 00:50:17] [Rank 0] step:4661/10000 train_time:377783ms step_avg:81.05ms +[2025-07-07 00:50:19] [Rank 0] step:4681/10000 train_time:379447ms step_avg:81.06ms +[2025-07-07 00:50:19] [Rank 0] step:4681/10000 train_time:379447ms step_avg:81.06ms +[2025-07-07 00:50:20] [Rank 0] step:4701/10000 train_time:381132ms step_avg:81.07ms +[2025-07-07 00:50:20] [Rank 0] step:4701/10000 train_time:381132ms step_avg:81.07ms +[2025-07-07 00:50:22] [Rank 0] step:4721/10000 train_time:382624ms step_avg:81.05ms +[2025-07-07 00:50:22] [Rank 0] step:4721/10000 train_time:382624ms step_avg:81.05ms +[2025-07-07 00:50:23] [Rank 0] step:4741/10000 train_time:384119ms step_avg:81.02ms +[2025-07-07 00:50:23] [Rank 0] step:4741/10000 train_time:384119ms step_avg:81.02ms +[2025-07-07 00:50:25] [Rank 0] step:4761/10000 train_time:385615ms step_avg:80.99ms +[2025-07-07 00:50:25] [Rank 0] step:4761/10000 train_time:385615ms step_avg:80.99ms +[2025-07-07 00:50:27] [Rank 0] step:4781/10000 train_time:387772ms step_avg:81.11ms +[2025-07-07 00:50:27] [Rank 0] step:4781/10000 train_time:387772ms step_avg:81.11ms +[2025-07-07 00:50:28] [Rank 0] step:4801/10000 train_time:389266ms step_avg:81.08ms +[2025-07-07 00:50:28] [Rank 0] step:4801/10000 train_time:389266ms step_avg:81.08ms +[2025-07-07 00:50:30] [Rank 0] step:4821/10000 train_time:390762ms step_avg:81.05ms +[2025-07-07 00:50:30] [Rank 0] step:4821/10000 train_time:390762ms step_avg:81.05ms +[2025-07-07 00:50:31] [Rank 0] step:4841/10000 train_time:392257ms step_avg:81.03ms +[2025-07-07 00:50:31] [Rank 0] step:4841/10000 train_time:392257ms step_avg:81.03ms +[2025-07-07 00:50:33] [Rank 0] step:4861/10000 train_time:393804ms step_avg:81.01ms +[2025-07-07 00:50:33] [Rank 0] step:4861/10000 train_time:393804ms step_avg:81.01ms +[2025-07-07 00:50:35] [Rank 0] step:4881/10000 train_time:395915ms step_avg:81.11ms +[2025-07-07 00:50:35] [Rank 0] step:4881/10000 train_time:395915ms step_avg:81.11ms +[2025-07-07 00:50:36] [Rank 0] step:4901/10000 train_time:397412ms step_avg:81.09ms +[2025-07-07 00:50:36] [Rank 0] step:4901/10000 train_time:397412ms step_avg:81.09ms +[2025-07-07 00:50:38] [Rank 0] step:4921/10000 train_time:398907ms step_avg:81.06ms +[2025-07-07 00:50:38] [Rank 0] step:4921/10000 train_time:398907ms step_avg:81.06ms +[2025-07-07 00:50:39] [Rank 0] step:4941/10000 train_time:400404ms step_avg:81.04ms +[2025-07-07 00:50:39] [Rank 0] step:4941/10000 train_time:400404ms step_avg:81.04ms +[2025-07-07 00:50:41] [Rank 0] step:4961/10000 train_time:402559ms step_avg:81.14ms +[2025-07-07 00:50:41] [Rank 0] step:4961/10000 train_time:402559ms step_avg:81.14ms +[2025-07-07 00:50:43] [Rank 0] step:4981/10000 train_time:404056ms step_avg:81.12ms +[2025-07-07 00:50:43] [Rank 0] step:4981/10000 train_time:404056ms step_avg:81.12ms +[2025-07-07 00:50:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:50:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:50:45] [Rank 0] PRINT: step:5000/10000 train_loss:1.0675 val_loss:1.0494 train_time:405554ms step_avg:81.11ms +[2025-07-07 00:50:45] [Rank 0] PRINT: step:5000/10000 train_loss:1.0675 val_loss:1.0494 train_time:405554ms step_avg:81.11ms +[2025-07-07 00:50:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:50:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:50:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:50:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:50:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:50:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:56:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:56:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:56:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:56:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:56:11] [Rank 0] Total Loss: 4.9592 +[2025-07-07 00:56:11] [Rank 0] Total Loss: 4.9592 +[2025-07-07 00:56:11] [Rank 0] Total FTA: 0.5743 +[2025-07-07 00:56:11] [Rank 0] Total FTA: 0.5743 +[2025-07-07 00:56:11] [Rank 0] Group 0 Loss: 5.1517 +[2025-07-07 00:56:11] [Rank 0] Group 0 Loss: 5.1517 +[2025-07-07 00:56:11] [Rank 0] Group 1 Loss: 4.6729 +[2025-07-07 00:56:11] [Rank 0] Group 1 Loss: 4.6729 +[2025-07-07 00:56:11] [Rank 0] Group 2 Loss: 4.9007 +[2025-07-07 00:56:11] [Rank 0] Group 2 Loss: 4.9007 +[2025-07-07 00:56:11] [Rank 0] Group 3 Loss: 5.1370 +[2025-07-07 00:56:11] [Rank 0] Group 3 Loss: 5.1370 +[2025-07-07 00:56:11] [Rank 0] Group 4 Loss: 4.8716 +[2025-07-07 00:56:11] [Rank 0] Group 4 Loss: 4.8716 +[2025-07-07 00:56:11] [Rank 0] Group 5 Loss: 4.9467 +[2025-07-07 00:56:11] [Rank 0] Group 5 Loss: 4.9467 +[2025-07-07 00:56:11] [Rank 0] Group 6 Loss: 4.9005 +[2025-07-07 00:56:11] [Rank 0] Group 6 Loss: 4.9005 +[2025-07-07 00:56:11] [Rank 0] Group 7 Loss: 4.9243 +[2025-07-07 00:56:11] [Rank 0] Group 7 Loss: 4.9243 +[2025-07-07 00:56:11] [Rank 0] Group 8 Loss: 4.9547 +[2025-07-07 00:56:11] [Rank 0] Group 8 Loss: 4.9547 +[2025-07-07 00:56:11] [Rank 0] Group 9 Loss: 4.9847 +[2025-07-07 00:56:11] [Rank 0] Group 9 Loss: 4.9847 +[2025-07-07 00:56:11] [Rank 0] Group 10 Loss: 4.9504 +[2025-07-07 00:56:11] [Rank 0] Group 10 Loss: 4.9504 +[2025-07-07 00:56:11] [Rank 0] Group 11 Loss: 4.9494 +[2025-07-07 00:56:11] [Rank 0] Group 11 Loss: 4.9494 +[2025-07-07 00:56:11] [Rank 0] Group 0 FTA: 0.4824 +[2025-07-07 00:56:11] [Rank 0] Group 0 FTA: 0.4824 +[2025-07-07 00:56:11] [Rank 0] Group 1 FTA: 0.1797 +[2025-07-07 00:56:11] [Rank 0] Group 1 FTA: 0.1797 +[2025-07-07 00:56:11] [Rank 0] Group 2 FTA: 0.6198 +[2025-07-07 00:56:11] [Rank 0] Group 2 FTA: 0.6198 +[2025-07-07 00:56:11] [Rank 0] Group 3 FTA: 0.6250 +[2025-07-07 00:56:11] [Rank 0] Group 3 FTA: 0.6250 +[2025-07-07 00:56:11] [Rank 0] Group 4 FTA: 0.5365 +[2025-07-07 00:56:11] [Rank 0] Group 4 FTA: 0.5365 +[2025-07-07 00:56:11] [Rank 0] Group 5 FTA: 0.6797 +[2025-07-07 00:56:11] [Rank 0] Group 5 FTA: 0.6797 +[2025-07-07 00:56:11] [Rank 0] Group 6 FTA: 0.5938 +[2025-07-07 00:56:11] [Rank 0] Group 6 FTA: 0.5938 +[2025-07-07 00:56:11] [Rank 0] Group 7 FTA: 0.6224 +[2025-07-07 00:56:11] [Rank 0] Group 7 FTA: 0.6224 +[2025-07-07 00:56:11] [Rank 0] Group 8 FTA: 0.6250 +[2025-07-07 00:56:11] [Rank 0] Group 8 FTA: 0.6250 +[2025-07-07 00:56:11] [Rank 0] Group 9 FTA: 0.6211 +[2025-07-07 00:56:11] [Rank 0] Group 9 FTA: 0.6211 +[2025-07-07 00:56:11] [Rank 0] Group 10 FTA: 0.6484 +[2025-07-07 00:56:11] [Rank 0] Group 10 FTA: 0.6484 +[2025-07-07 00:56:11] [Rank 0] Group 11 FTA: 0.6367 +[2025-07-07 00:56:11] [Rank 0] Group 11 FTA: 0.6367 +[2025-07-07 00:56:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 00:56:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 00:56:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 00:56:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 00:56:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 00:56:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 00:56:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 00:56:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 00:56:13] [Rank 0] step:5001/10000 train_time:405576ms step_avg:81.10ms +[2025-07-07 00:56:13] [Rank 0] step:5001/10000 train_time:405576ms step_avg:81.10ms +[2025-07-07 00:56:14] [Rank 0] step:5021/10000 train_time:407064ms step_avg:81.07ms +[2025-07-07 00:56:14] [Rank 0] step:5021/10000 train_time:407064ms step_avg:81.07ms +[2025-07-07 00:56:17] [Rank 0] step:5041/10000 train_time:408814ms step_avg:81.10ms +[2025-07-07 00:56:17] [Rank 0] step:5041/10000 train_time:408814ms step_avg:81.10ms +[2025-07-07 00:56:18] [Rank 0] step:5061/10000 train_time:410691ms step_avg:81.15ms +[2025-07-07 00:56:18] [Rank 0] step:5061/10000 train_time:410691ms step_avg:81.15ms +[2025-07-07 00:56:20] [Rank 0] step:5081/10000 train_time:412181ms step_avg:81.12ms +[2025-07-07 00:56:20] [Rank 0] step:5081/10000 train_time:412181ms step_avg:81.12ms +[2025-07-07 00:56:21] [Rank 0] step:5101/10000 train_time:413673ms step_avg:81.10ms +[2025-07-07 00:56:21] [Rank 0] step:5101/10000 train_time:413673ms step_avg:81.10ms +[2025-07-07 00:56:22] [Rank 0] step:5121/10000 train_time:415165ms step_avg:81.07ms +[2025-07-07 00:56:22] [Rank 0] step:5121/10000 train_time:415165ms step_avg:81.07ms +[2025-07-07 00:56:25] [Rank 0] step:5141/10000 train_time:417303ms step_avg:81.17ms +[2025-07-07 00:56:25] [Rank 0] step:5141/10000 train_time:417303ms step_avg:81.17ms +[2025-07-07 00:56:26] [Rank 0] step:5161/10000 train_time:418796ms step_avg:81.15ms +[2025-07-07 00:56:26] [Rank 0] step:5161/10000 train_time:418796ms step_avg:81.15ms +[2025-07-07 00:56:28] [Rank 0] step:5181/10000 train_time:420289ms step_avg:81.12ms +[2025-07-07 00:56:28] [Rank 0] step:5181/10000 train_time:420289ms step_avg:81.12ms +[2025-07-07 00:56:29] [Rank 0] step:5201/10000 train_time:421784ms step_avg:81.10ms +[2025-07-07 00:56:29] [Rank 0] step:5201/10000 train_time:421784ms step_avg:81.10ms +[2025-07-07 00:56:31] [Rank 0] step:5221/10000 train_time:423329ms step_avg:81.08ms +[2025-07-07 00:56:31] [Rank 0] step:5221/10000 train_time:423329ms step_avg:81.08ms +[2025-07-07 00:56:32] [Rank 0] step:5241/10000 train_time:425013ms step_avg:81.09ms +[2025-07-07 00:56:32] [Rank 0] step:5241/10000 train_time:425013ms step_avg:81.09ms +[2025-07-07 00:56:34] [Rank 0] step:5261/10000 train_time:426507ms step_avg:81.07ms +[2025-07-07 00:56:34] [Rank 0] step:5261/10000 train_time:426507ms step_avg:81.07ms +[2025-07-07 00:56:35] [Rank 0] step:5281/10000 train_time:428003ms step_avg:81.05ms +[2025-07-07 00:56:35] [Rank 0] step:5281/10000 train_time:428003ms step_avg:81.05ms +[2025-07-07 00:56:37] [Rank 0] step:5301/10000 train_time:429595ms step_avg:81.04ms +[2025-07-07 00:56:37] [Rank 0] step:5301/10000 train_time:429595ms step_avg:81.04ms +[2025-07-07 00:56:39] [Rank 0] step:5321/10000 train_time:431387ms step_avg:81.07ms +[2025-07-07 00:56:39] [Rank 0] step:5321/10000 train_time:431387ms step_avg:81.07ms +[2025-07-07 00:56:40] [Rank 0] step:5341/10000 train_time:432885ms step_avg:81.05ms +[2025-07-07 00:56:40] [Rank 0] step:5341/10000 train_time:432885ms step_avg:81.05ms +[2025-07-07 00:56:42] [Rank 0] step:5361/10000 train_time:434384ms step_avg:81.03ms +[2025-07-07 00:56:42] [Rank 0] step:5361/10000 train_time:434384ms step_avg:81.03ms +[2025-07-07 00:56:43] [Rank 0] step:5381/10000 train_time:435882ms step_avg:81.00ms +[2025-07-07 00:56:43] [Rank 0] step:5381/10000 train_time:435882ms step_avg:81.00ms +[2025-07-07 00:56:45] [Rank 0] step:5401/10000 train_time:437380ms step_avg:80.98ms +[2025-07-07 00:56:45] [Rank 0] step:5401/10000 train_time:437380ms step_avg:80.98ms +[2025-07-07 00:56:46] [Rank 0] step:5421/10000 train_time:439111ms step_avg:81.00ms +[2025-07-07 00:56:46] [Rank 0] step:5421/10000 train_time:439111ms step_avg:81.00ms +[2025-07-07 00:56:48] [Rank 0] step:5441/10000 train_time:440609ms step_avg:80.98ms +[2025-07-07 00:56:48] [Rank 0] step:5441/10000 train_time:440609ms step_avg:80.98ms +[2025-07-07 00:56:49] [Rank 0] step:5461/10000 train_time:442107ms step_avg:80.96ms +[2025-07-07 00:56:49] [Rank 0] step:5461/10000 train_time:442107ms step_avg:80.96ms +[2025-07-07 00:56:51] [Rank 0] step:5481/10000 train_time:443605ms step_avg:80.94ms +[2025-07-07 00:56:51] [Rank 0] step:5481/10000 train_time:443605ms step_avg:80.94ms +[2025-07-07 00:56:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:56:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:56:54] [Rank 0] PRINT: step:5500/10000 train_loss:1.0398 val_loss:1.0245 train_time:445755ms step_avg:81.05ms +[2025-07-07 00:56:54] [Rank 0] PRINT: step:5500/10000 train_loss:1.0398 val_loss:1.0245 train_time:445755ms step_avg:81.05ms +[2025-07-07 00:56:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:56:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:56:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:56:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:56:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:56:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:02:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:02:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:02:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:02:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:02:19] [Rank 0] Total Loss: 5.1750 +[2025-07-07 01:02:19] [Rank 0] Total Loss: 5.1750 +[2025-07-07 01:02:19] [Rank 0] Total FTA: 0.6789 +[2025-07-07 01:02:19] [Rank 0] Total FTA: 0.6789 +[2025-07-07 01:02:19] [Rank 0] Group 0 Loss: 5.3743 +[2025-07-07 01:02:19] [Rank 0] Group 0 Loss: 5.3743 +[2025-07-07 01:02:19] [Rank 0] Group 1 Loss: 4.9335 +[2025-07-07 01:02:19] [Rank 0] Group 1 Loss: 4.9335 +[2025-07-07 01:02:19] [Rank 0] Group 2 Loss: 5.1786 +[2025-07-07 01:02:19] [Rank 0] Group 2 Loss: 5.1786 +[2025-07-07 01:02:19] [Rank 0] Group 3 Loss: 5.2722 +[2025-07-07 01:02:19] [Rank 0] Group 3 Loss: 5.2722 +[2025-07-07 01:02:19] [Rank 0] Group 4 Loss: 5.1151 +[2025-07-07 01:02:19] [Rank 0] Group 4 Loss: 5.1151 +[2025-07-07 01:02:19] [Rank 0] Group 5 Loss: 5.1339 +[2025-07-07 01:02:19] [Rank 0] Group 5 Loss: 5.1339 +[2025-07-07 01:02:19] [Rank 0] Group 6 Loss: 5.0865 +[2025-07-07 01:02:19] [Rank 0] Group 6 Loss: 5.0865 +[2025-07-07 01:02:19] [Rank 0] Group 7 Loss: 5.1438 +[2025-07-07 01:02:19] [Rank 0] Group 7 Loss: 5.1438 +[2025-07-07 01:02:19] [Rank 0] Group 8 Loss: 5.2053 +[2025-07-07 01:02:19] [Rank 0] Group 8 Loss: 5.2053 +[2025-07-07 01:02:19] [Rank 0] Group 9 Loss: 5.1307 +[2025-07-07 01:02:19] [Rank 0] Group 9 Loss: 5.1307 +[2025-07-07 01:02:19] [Rank 0] Group 10 Loss: 5.1762 +[2025-07-07 01:02:19] [Rank 0] Group 10 Loss: 5.1762 +[2025-07-07 01:02:19] [Rank 0] Group 11 Loss: 5.1599 +[2025-07-07 01:02:19] [Rank 0] Group 11 Loss: 5.1599 +[2025-07-07 01:02:19] [Rank 0] Group 0 FTA: 0.6723 +[2025-07-07 01:02:19] [Rank 0] Group 0 FTA: 0.6723 +[2025-07-07 01:02:19] [Rank 0] Group 1 FTA: 0.6328 +[2025-07-07 01:02:19] [Rank 0] Group 1 FTA: 0.6328 +[2025-07-07 01:02:19] [Rank 0] Group 2 FTA: 0.7500 +[2025-07-07 01:02:19] [Rank 0] Group 2 FTA: 0.7500 +[2025-07-07 01:02:19] [Rank 0] Group 3 FTA: 0.6484 +[2025-07-07 01:02:19] [Rank 0] Group 3 FTA: 0.6484 +[2025-07-07 01:02:19] [Rank 0] Group 4 FTA: 0.6693 +[2025-07-07 01:02:19] [Rank 0] Group 4 FTA: 0.6693 +[2025-07-07 01:02:19] [Rank 0] Group 5 FTA: 0.7396 +[2025-07-07 01:02:19] [Rank 0] Group 5 FTA: 0.7396 +[2025-07-07 01:02:19] [Rank 0] Group 6 FTA: 0.6380 +[2025-07-07 01:02:19] [Rank 0] Group 6 FTA: 0.6380 +[2025-07-07 01:02:19] [Rank 0] Group 7 FTA: 0.6797 +[2025-07-07 01:02:19] [Rank 0] Group 7 FTA: 0.6797 +[2025-07-07 01:02:19] [Rank 0] Group 8 FTA: 0.6719 +[2025-07-07 01:02:19] [Rank 0] Group 8 FTA: 0.6719 +[2025-07-07 01:02:19] [Rank 0] Group 9 FTA: 0.7188 +[2025-07-07 01:02:19] [Rank 0] Group 9 FTA: 0.7188 +[2025-07-07 01:02:19] [Rank 0] Group 10 FTA: 0.6738 +[2025-07-07 01:02:19] [Rank 0] Group 10 FTA: 0.6738 +[2025-07-07 01:02:19] [Rank 0] Group 11 FTA: 0.6768 +[2025-07-07 01:02:19] [Rank 0] Group 11 FTA: 0.6768 +[2025-07-07 01:02:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 01:02:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 01:02:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 01:02:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 01:02:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 01:02:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 01:02:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 01:02:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 01:02:21] [Rank 0] step:5501/10000 train_time:445775ms step_avg:81.04ms +[2025-07-07 01:02:21] [Rank 0] step:5501/10000 train_time:445775ms step_avg:81.04ms +[2025-07-07 01:02:23] [Rank 0] step:5521/10000 train_time:447278ms step_avg:81.01ms +[2025-07-07 01:02:23] [Rank 0] step:5521/10000 train_time:447278ms step_avg:81.01ms +[2025-07-07 01:02:24] [Rank 0] step:5541/10000 train_time:448767ms step_avg:80.99ms +[2025-07-07 01:02:24] [Rank 0] step:5541/10000 train_time:448767ms step_avg:80.99ms +[2025-07-07 01:02:26] [Rank 0] step:5561/10000 train_time:450257ms step_avg:80.97ms +[2025-07-07 01:02:26] [Rank 0] step:5561/10000 train_time:450257ms step_avg:80.97ms +[2025-07-07 01:02:28] [Rank 0] step:5581/10000 train_time:451749ms step_avg:80.94ms +[2025-07-07 01:02:28] [Rank 0] step:5581/10000 train_time:451749ms step_avg:80.94ms +[2025-07-07 01:02:29] [Rank 0] step:5601/10000 train_time:453912ms step_avg:81.04ms +[2025-07-07 01:02:29] [Rank 0] step:5601/10000 train_time:453912ms step_avg:81.04ms +[2025-07-07 01:02:31] [Rank 0] step:5621/10000 train_time:455402ms step_avg:81.02ms +[2025-07-07 01:02:31] [Rank 0] step:5621/10000 train_time:455402ms step_avg:81.02ms +[2025-07-07 01:02:32] [Rank 0] step:5641/10000 train_time:456894ms step_avg:81.00ms +[2025-07-07 01:02:32] [Rank 0] step:5641/10000 train_time:456894ms step_avg:81.00ms +[2025-07-07 01:02:34] [Rank 0] step:5661/10000 train_time:458389ms step_avg:80.97ms +[2025-07-07 01:02:34] [Rank 0] step:5661/10000 train_time:458389ms step_avg:80.97ms +[2025-07-07 01:02:36] [Rank 0] step:5681/10000 train_time:460524ms step_avg:81.06ms +[2025-07-07 01:02:36] [Rank 0] step:5681/10000 train_time:460524ms step_avg:81.06ms +[2025-07-07 01:02:37] [Rank 0] step:5701/10000 train_time:462018ms step_avg:81.04ms +[2025-07-07 01:02:37] [Rank 0] step:5701/10000 train_time:462018ms step_avg:81.04ms +[2025-07-07 01:02:39] [Rank 0] step:5721/10000 train_time:463513ms step_avg:81.02ms +[2025-07-07 01:02:39] [Rank 0] step:5721/10000 train_time:463513ms step_avg:81.02ms +[2025-07-07 01:02:40] [Rank 0] step:5741/10000 train_time:465010ms step_avg:81.00ms +[2025-07-07 01:02:40] [Rank 0] step:5741/10000 train_time:465010ms step_avg:81.00ms +[2025-07-07 01:02:43] [Rank 0] step:5761/10000 train_time:466507ms step_avg:80.98ms +[2025-07-07 01:02:43] [Rank 0] step:5761/10000 train_time:466507ms step_avg:80.98ms +[2025-07-07 01:02:44] [Rank 0] step:5781/10000 train_time:468667ms step_avg:81.07ms +[2025-07-07 01:02:44] [Rank 0] step:5781/10000 train_time:468667ms step_avg:81.07ms +[2025-07-07 01:02:45] [Rank 0] step:5801/10000 train_time:470162ms step_avg:81.05ms +[2025-07-07 01:02:45] [Rank 0] step:5801/10000 train_time:470162ms step_avg:81.05ms +[2025-07-07 01:02:47] [Rank 0] step:5821/10000 train_time:471659ms step_avg:81.03ms +[2025-07-07 01:02:47] [Rank 0] step:5821/10000 train_time:471659ms step_avg:81.03ms +[2025-07-07 01:02:48] [Rank 0] step:5841/10000 train_time:473155ms step_avg:81.01ms +[2025-07-07 01:02:48] [Rank 0] step:5841/10000 train_time:473155ms step_avg:81.01ms +[2025-07-07 01:02:50] [Rank 0] step:5861/10000 train_time:474886ms step_avg:81.02ms +[2025-07-07 01:02:50] [Rank 0] step:5861/10000 train_time:474886ms step_avg:81.02ms +[2025-07-07 01:02:52] [Rank 0] step:5881/10000 train_time:476386ms step_avg:81.00ms +[2025-07-07 01:02:52] [Rank 0] step:5881/10000 train_time:476386ms step_avg:81.00ms +[2025-07-07 01:02:53] [Rank 0] step:5901/10000 train_time:477886ms step_avg:80.98ms +[2025-07-07 01:02:53] [Rank 0] step:5901/10000 train_time:477886ms step_avg:80.98ms +[2025-07-07 01:02:55] [Rank 0] step:5921/10000 train_time:479385ms step_avg:80.96ms +[2025-07-07 01:02:55] [Rank 0] step:5921/10000 train_time:479385ms step_avg:80.96ms +[2025-07-07 01:02:56] [Rank 0] step:5941/10000 train_time:480992ms step_avg:80.96ms +[2025-07-07 01:02:56] [Rank 0] step:5941/10000 train_time:480992ms step_avg:80.96ms +[2025-07-07 01:02:58] [Rank 0] step:5961/10000 train_time:482540ms step_avg:80.95ms +[2025-07-07 01:02:58] [Rank 0] step:5961/10000 train_time:482540ms step_avg:80.95ms +[2025-07-07 01:02:59] [Rank 0] step:5981/10000 train_time:484038ms step_avg:80.93ms +[2025-07-07 01:02:59] [Rank 0] step:5981/10000 train_time:484038ms step_avg:80.93ms +[2025-07-07 01:03:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:03:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:03:02] [Rank 0] PRINT: step:6000/10000 train_loss:1.0166 val_loss:1.0043 train_time:485538ms step_avg:80.92ms +[2025-07-07 01:03:02] [Rank 0] PRINT: step:6000/10000 train_loss:1.0166 val_loss:1.0043 train_time:485538ms step_avg:80.92ms +[2025-07-07 01:03:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:03:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:03:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:03:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:03:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:03:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:08:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:08:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:08:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:08:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:08:27] [Rank 0] Total Loss: 5.3009 +[2025-07-07 01:08:27] [Rank 0] Total Loss: 5.3009 +[2025-07-07 01:08:27] [Rank 0] Total FTA: 0.7232 +[2025-07-07 01:08:27] [Rank 0] Total FTA: 0.7232 +[2025-07-07 01:08:27] [Rank 0] Group 0 Loss: 5.6281 +[2025-07-07 01:08:27] [Rank 0] Group 0 Loss: 5.6281 +[2025-07-07 01:08:27] [Rank 0] Group 1 Loss: 5.0782 +[2025-07-07 01:08:27] [Rank 0] Group 1 Loss: 5.0782 +[2025-07-07 01:08:27] [Rank 0] Group 2 Loss: 5.2639 +[2025-07-07 01:08:27] [Rank 0] Group 2 Loss: 5.2639 +[2025-07-07 01:08:27] [Rank 0] Group 3 Loss: 5.4487 +[2025-07-07 01:08:27] [Rank 0] Group 3 Loss: 5.4487 +[2025-07-07 01:08:27] [Rank 0] Group 4 Loss: 5.1760 +[2025-07-07 01:08:27] [Rank 0] Group 4 Loss: 5.1760 +[2025-07-07 01:08:27] [Rank 0] Group 5 Loss: 5.2171 +[2025-07-07 01:08:27] [Rank 0] Group 5 Loss: 5.2171 +[2025-07-07 01:08:27] [Rank 0] Group 6 Loss: 5.1959 +[2025-07-07 01:08:27] [Rank 0] Group 6 Loss: 5.1959 +[2025-07-07 01:08:27] [Rank 0] Group 7 Loss: 5.2795 +[2025-07-07 01:08:27] [Rank 0] Group 7 Loss: 5.2795 +[2025-07-07 01:08:27] [Rank 0] Group 8 Loss: 5.2058 +[2025-07-07 01:08:27] [Rank 0] Group 8 Loss: 5.2058 +[2025-07-07 01:08:27] [Rank 0] Group 9 Loss: 5.2782 +[2025-07-07 01:08:27] [Rank 0] Group 9 Loss: 5.2782 +[2025-07-07 01:08:27] [Rank 0] Group 10 Loss: 5.2513 +[2025-07-07 01:08:27] [Rank 0] Group 10 Loss: 5.2513 +[2025-07-07 01:08:27] [Rank 0] Group 11 Loss: 5.2892 +[2025-07-07 01:08:27] [Rank 0] Group 11 Loss: 5.2892 +[2025-07-07 01:08:27] [Rank 0] Group 0 FTA: 0.8635 +[2025-07-07 01:08:27] [Rank 0] Group 0 FTA: 0.8635 +[2025-07-07 01:08:27] [Rank 0] Group 1 FTA: 0.6953 +[2025-07-07 01:08:27] [Rank 0] Group 1 FTA: 0.6953 +[2025-07-07 01:08:27] [Rank 0] Group 2 FTA: 0.8073 +[2025-07-07 01:08:27] [Rank 0] Group 2 FTA: 0.8073 +[2025-07-07 01:08:27] [Rank 0] Group 3 FTA: 0.4583 +[2025-07-07 01:08:27] [Rank 0] Group 3 FTA: 0.4583 +[2025-07-07 01:08:28] [Rank 0] Group 4 FTA: 0.6536 +[2025-07-07 01:08:28] [Rank 0] Group 4 FTA: 0.6536 +[2025-07-07 01:08:28] [Rank 0] Group 5 FTA: 0.7344 +[2025-07-07 01:08:28] [Rank 0] Group 5 FTA: 0.7344 +[2025-07-07 01:08:28] [Rank 0] Group 6 FTA: 0.6458 +[2025-07-07 01:08:28] [Rank 0] Group 6 FTA: 0.6458 +[2025-07-07 01:08:28] [Rank 0] Group 7 FTA: 0.7786 +[2025-07-07 01:08:28] [Rank 0] Group 7 FTA: 0.7786 +[2025-07-07 01:08:28] [Rank 0] Group 8 FTA: 0.6927 +[2025-07-07 01:08:28] [Rank 0] Group 8 FTA: 0.6927 +[2025-07-07 01:08:28] [Rank 0] Group 9 FTA: 0.7305 +[2025-07-07 01:08:28] [Rank 0] Group 9 FTA: 0.7305 +[2025-07-07 01:08:28] [Rank 0] Group 10 FTA: 0.7344 +[2025-07-07 01:08:28] [Rank 0] Group 10 FTA: 0.7344 +[2025-07-07 01:08:28] [Rank 0] Group 11 FTA: 0.7305 +[2025-07-07 01:08:28] [Rank 0] Group 11 FTA: 0.7305 +[2025-07-07 01:08:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 01:08:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 01:08:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 01:08:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 01:08:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 01:08:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 01:08:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 01:08:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 01:08:29] [Rank 0] step:6001/10000 train_time:485559ms step_avg:80.91ms +[2025-07-07 01:08:29] [Rank 0] step:6001/10000 train_time:485559ms step_avg:80.91ms +[2025-07-07 01:08:31] [Rank 0] step:6021/10000 train_time:487046ms step_avg:80.89ms +[2025-07-07 01:08:31] [Rank 0] step:6021/10000 train_time:487046ms step_avg:80.89ms +[2025-07-07 01:08:33] [Rank 0] step:6041/10000 train_time:489207ms step_avg:80.98ms +[2025-07-07 01:08:33] [Rank 0] step:6041/10000 train_time:489207ms step_avg:80.98ms +[2025-07-07 01:08:34] [Rank 0] step:6061/10000 train_time:490697ms step_avg:80.96ms +[2025-07-07 01:08:34] [Rank 0] step:6061/10000 train_time:490697ms step_avg:80.96ms +[2025-07-07 01:08:36] [Rank 0] step:6081/10000 train_time:492190ms step_avg:80.94ms +[2025-07-07 01:08:36] [Rank 0] step:6081/10000 train_time:492190ms step_avg:80.94ms +[2025-07-07 01:08:37] [Rank 0] step:6101/10000 train_time:493681ms step_avg:80.92ms +[2025-07-07 01:08:37] [Rank 0] step:6101/10000 train_time:493681ms step_avg:80.92ms +[2025-07-07 01:08:39] [Rank 0] step:6121/10000 train_time:495224ms step_avg:80.91ms +[2025-07-07 01:08:39] [Rank 0] step:6121/10000 train_time:495224ms step_avg:80.91ms +[2025-07-07 01:08:41] [Rank 0] step:6141/10000 train_time:497317ms step_avg:80.98ms +[2025-07-07 01:08:41] [Rank 0] step:6141/10000 train_time:497317ms step_avg:80.98ms +[2025-07-07 01:08:42] [Rank 0] step:6161/10000 train_time:498810ms step_avg:80.96ms +[2025-07-07 01:08:42] [Rank 0] step:6161/10000 train_time:498810ms step_avg:80.96ms +[2025-07-07 01:08:44] [Rank 0] step:6181/10000 train_time:500304ms step_avg:80.94ms +[2025-07-07 01:08:44] [Rank 0] step:6181/10000 train_time:500304ms step_avg:80.94ms +[2025-07-07 01:08:45] [Rank 0] step:6201/10000 train_time:501799ms step_avg:80.92ms +[2025-07-07 01:08:45] [Rank 0] step:6201/10000 train_time:501799ms step_avg:80.92ms +[2025-07-07 01:08:48] [Rank 0] step:6221/10000 train_time:503961ms step_avg:81.01ms +[2025-07-07 01:08:48] [Rank 0] step:6221/10000 train_time:503961ms step_avg:81.01ms +[2025-07-07 01:08:49] [Rank 0] step:6241/10000 train_time:505456ms step_avg:80.99ms +[2025-07-07 01:08:49] [Rank 0] step:6241/10000 train_time:505456ms step_avg:80.99ms +[2025-07-07 01:08:51] [Rank 0] step:6261/10000 train_time:506951ms step_avg:80.97ms +[2025-07-07 01:08:51] [Rank 0] step:6261/10000 train_time:506951ms step_avg:80.97ms +[2025-07-07 01:08:52] [Rank 0] step:6281/10000 train_time:508448ms step_avg:80.95ms +[2025-07-07 01:08:52] [Rank 0] step:6281/10000 train_time:508448ms step_avg:80.95ms +[2025-07-07 01:08:54] [Rank 0] step:6301/10000 train_time:510198ms step_avg:80.97ms +[2025-07-07 01:08:54] [Rank 0] step:6301/10000 train_time:510198ms step_avg:80.97ms +[2025-07-07 01:08:56] [Rank 0] step:6321/10000 train_time:512105ms step_avg:81.02ms +[2025-07-07 01:08:56] [Rank 0] step:6321/10000 train_time:512105ms step_avg:81.02ms +[2025-07-07 01:08:57] [Rank 0] step:6341/10000 train_time:513602ms step_avg:81.00ms +[2025-07-07 01:08:57] [Rank 0] step:6341/10000 train_time:513602ms step_avg:81.00ms +[2025-07-07 01:08:59] [Rank 0] step:6361/10000 train_time:515098ms step_avg:80.98ms +[2025-07-07 01:08:59] [Rank 0] step:6361/10000 train_time:515098ms step_avg:80.98ms +[2025-07-07 01:09:00] [Rank 0] step:6381/10000 train_time:516596ms step_avg:80.96ms +[2025-07-07 01:09:00] [Rank 0] step:6381/10000 train_time:516596ms step_avg:80.96ms +[2025-07-07 01:09:02] [Rank 0] step:6401/10000 train_time:518747ms step_avg:81.04ms +[2025-07-07 01:09:02] [Rank 0] step:6401/10000 train_time:518747ms step_avg:81.04ms +[2025-07-07 01:09:04] [Rank 0] step:6421/10000 train_time:520243ms step_avg:81.02ms +[2025-07-07 01:09:04] [Rank 0] step:6421/10000 train_time:520243ms step_avg:81.02ms +[2025-07-07 01:09:05] [Rank 0] step:6441/10000 train_time:521745ms step_avg:81.00ms +[2025-07-07 01:09:05] [Rank 0] step:6441/10000 train_time:521745ms step_avg:81.00ms +[2025-07-07 01:09:07] [Rank 0] step:6461/10000 train_time:523243ms step_avg:80.98ms +[2025-07-07 01:09:07] [Rank 0] step:6461/10000 train_time:523243ms step_avg:80.98ms +[2025-07-07 01:09:09] [Rank 0] step:6481/10000 train_time:524791ms step_avg:80.97ms +[2025-07-07 01:09:09] [Rank 0] step:6481/10000 train_time:524791ms step_avg:80.97ms +[2025-07-07 01:09:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:09:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:09:11] [Rank 0] PRINT: step:6500/10000 train_loss:0.9990 val_loss:0.9902 train_time:526890ms step_avg:81.06ms +[2025-07-07 01:09:11] [Rank 0] PRINT: step:6500/10000 train_loss:0.9990 val_loss:0.9902 train_time:526890ms step_avg:81.06ms +[2025-07-07 01:09:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:09:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:09:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:09:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:09:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:09:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:14:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:14:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:14:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:14:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:14:38] [Rank 0] Total Loss: 5.2042 +[2025-07-07 01:14:38] [Rank 0] Total Loss: 5.2042 +[2025-07-07 01:14:38] [Rank 0] Total FTA: 0.7138 +[2025-07-07 01:14:38] [Rank 0] Total FTA: 0.7138 +[2025-07-07 01:14:38] [Rank 0] Group 0 Loss: 5.3876 +[2025-07-07 01:14:38] [Rank 0] Group 0 Loss: 5.3876 +[2025-07-07 01:14:38] [Rank 0] Group 1 Loss: 5.0820 +[2025-07-07 01:14:38] [Rank 0] Group 1 Loss: 5.0820 +[2025-07-07 01:14:38] [Rank 0] Group 2 Loss: 5.0597 +[2025-07-07 01:14:38] [Rank 0] Group 2 Loss: 5.0597 +[2025-07-07 01:14:38] [Rank 0] Group 3 Loss: 5.4291 +[2025-07-07 01:14:38] [Rank 0] Group 3 Loss: 5.4291 +[2025-07-07 01:14:38] [Rank 0] Group 4 Loss: 5.1800 +[2025-07-07 01:14:38] [Rank 0] Group 4 Loss: 5.1800 +[2025-07-07 01:14:38] [Rank 0] Group 5 Loss: 5.1636 +[2025-07-07 01:14:38] [Rank 0] Group 5 Loss: 5.1636 +[2025-07-07 01:14:38] [Rank 0] Group 6 Loss: 5.0553 +[2025-07-07 01:14:38] [Rank 0] Group 6 Loss: 5.0553 +[2025-07-07 01:14:38] [Rank 0] Group 7 Loss: 5.2408 +[2025-07-07 01:14:38] [Rank 0] Group 7 Loss: 5.2408 +[2025-07-07 01:14:39] [Rank 0] Group 8 Loss: 5.1389 +[2025-07-07 01:14:39] [Rank 0] Group 8 Loss: 5.1389 +[2025-07-07 01:14:39] [Rank 0] Group 9 Loss: 5.1376 +[2025-07-07 01:14:39] [Rank 0] Group 9 Loss: 5.1376 +[2025-07-07 01:14:39] [Rank 0] Group 10 Loss: 5.2080 +[2025-07-07 01:14:39] [Rank 0] Group 10 Loss: 5.2080 +[2025-07-07 01:14:39] [Rank 0] Group 11 Loss: 5.1876 +[2025-07-07 01:14:39] [Rank 0] Group 11 Loss: 5.1876 +[2025-07-07 01:14:39] [Rank 0] Group 0 FTA: 0.6593 +[2025-07-07 01:14:39] [Rank 0] Group 0 FTA: 0.6593 +[2025-07-07 01:14:39] [Rank 0] Group 1 FTA: 0.4427 +[2025-07-07 01:14:39] [Rank 0] Group 1 FTA: 0.4427 +[2025-07-07 01:14:39] [Rank 0] Group 2 FTA: 0.8516 +[2025-07-07 01:14:39] [Rank 0] Group 2 FTA: 0.8516 +[2025-07-07 01:14:39] [Rank 0] Group 3 FTA: 0.6042 +[2025-07-07 01:14:39] [Rank 0] Group 3 FTA: 0.6042 +[2025-07-07 01:14:39] [Rank 0] Group 4 FTA: 0.6693 +[2025-07-07 01:14:39] [Rank 0] Group 4 FTA: 0.6693 +[2025-07-07 01:14:39] [Rank 0] Group 5 FTA: 0.7917 +[2025-07-07 01:14:39] [Rank 0] Group 5 FTA: 0.7917 +[2025-07-07 01:14:39] [Rank 0] Group 6 FTA: 0.7292 +[2025-07-07 01:14:39] [Rank 0] Group 6 FTA: 0.7292 +[2025-07-07 01:14:39] [Rank 0] Group 7 FTA: 0.7865 +[2025-07-07 01:14:39] [Rank 0] Group 7 FTA: 0.7865 +[2025-07-07 01:14:39] [Rank 0] Group 8 FTA: 0.7318 +[2025-07-07 01:14:39] [Rank 0] Group 8 FTA: 0.7318 +[2025-07-07 01:14:39] [Rank 0] Group 9 FTA: 0.7812 +[2025-07-07 01:14:39] [Rank 0] Group 9 FTA: 0.7812 +[2025-07-07 01:14:39] [Rank 0] Group 10 FTA: 0.7520 +[2025-07-07 01:14:39] [Rank 0] Group 10 FTA: 0.7520 +[2025-07-07 01:14:39] [Rank 0] Group 11 FTA: 0.7578 +[2025-07-07 01:14:39] [Rank 0] Group 11 FTA: 0.7578 +[2025-07-07 01:14:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 01:14:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 01:14:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 01:14:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 01:14:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 01:14:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 01:14:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 01:14:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 01:14:40] [Rank 0] step:6501/10000 train_time:526910ms step_avg:81.05ms +[2025-07-07 01:14:40] [Rank 0] step:6501/10000 train_time:526910ms step_avg:81.05ms +[2025-07-07 01:14:42] [Rank 0] step:6521/10000 train_time:528406ms step_avg:81.03ms +[2025-07-07 01:14:42] [Rank 0] step:6521/10000 train_time:528406ms step_avg:81.03ms +[2025-07-07 01:14:43] [Rank 0] step:6541/10000 train_time:529895ms step_avg:81.01ms +[2025-07-07 01:14:43] [Rank 0] step:6541/10000 train_time:529895ms step_avg:81.01ms +[2025-07-07 01:14:45] [Rank 0] step:6561/10000 train_time:531384ms step_avg:80.99ms +[2025-07-07 01:14:45] [Rank 0] step:6561/10000 train_time:531384ms step_avg:80.99ms +[2025-07-07 01:14:47] [Rank 0] step:6581/10000 train_time:533544ms step_avg:81.07ms +[2025-07-07 01:14:47] [Rank 0] step:6581/10000 train_time:533544ms step_avg:81.07ms +[2025-07-07 01:14:48] [Rank 0] step:6601/10000 train_time:535034ms step_avg:81.05ms +[2025-07-07 01:14:48] [Rank 0] step:6601/10000 train_time:535034ms step_avg:81.05ms +[2025-07-07 01:14:50] [Rank 0] step:6621/10000 train_time:536527ms step_avg:81.03ms +[2025-07-07 01:14:50] [Rank 0] step:6621/10000 train_time:536527ms step_avg:81.03ms +[2025-07-07 01:14:51] [Rank 0] step:6641/10000 train_time:538021ms step_avg:81.01ms +[2025-07-07 01:14:51] [Rank 0] step:6641/10000 train_time:538021ms step_avg:81.01ms +[2025-07-07 01:14:53] [Rank 0] step:6661/10000 train_time:539567ms step_avg:81.00ms +[2025-07-07 01:14:53] [Rank 0] step:6661/10000 train_time:539567ms step_avg:81.00ms +[2025-07-07 01:14:55] [Rank 0] step:6681/10000 train_time:541653ms step_avg:81.07ms +[2025-07-07 01:14:55] [Rank 0] step:6681/10000 train_time:541653ms step_avg:81.07ms +[2025-07-07 01:14:56] [Rank 0] step:6701/10000 train_time:543147ms step_avg:81.05ms +[2025-07-07 01:14:56] [Rank 0] step:6701/10000 train_time:543147ms step_avg:81.05ms +[2025-07-07 01:14:58] [Rank 0] step:6721/10000 train_time:544642ms step_avg:81.04ms +[2025-07-07 01:14:58] [Rank 0] step:6721/10000 train_time:544642ms step_avg:81.04ms +[2025-07-07 01:14:59] [Rank 0] step:6741/10000 train_time:546138ms step_avg:81.02ms +[2025-07-07 01:14:59] [Rank 0] step:6741/10000 train_time:546138ms step_avg:81.02ms +[2025-07-07 01:15:02] [Rank 0] step:6761/10000 train_time:548301ms step_avg:81.10ms +[2025-07-07 01:15:02] [Rank 0] step:6761/10000 train_time:548301ms step_avg:81.10ms +[2025-07-07 01:15:03] [Rank 0] step:6781/10000 train_time:549797ms step_avg:81.08ms +[2025-07-07 01:15:03] [Rank 0] step:6781/10000 train_time:549797ms step_avg:81.08ms +[2025-07-07 01:15:05] [Rank 0] step:6801/10000 train_time:551293ms step_avg:81.06ms +[2025-07-07 01:15:05] [Rank 0] step:6801/10000 train_time:551293ms step_avg:81.06ms +[2025-07-07 01:15:06] [Rank 0] step:6821/10000 train_time:552791ms step_avg:81.04ms +[2025-07-07 01:15:06] [Rank 0] step:6821/10000 train_time:552791ms step_avg:81.04ms +[2025-07-07 01:15:08] [Rank 0] step:6841/10000 train_time:554544ms step_avg:81.06ms +[2025-07-07 01:15:08] [Rank 0] step:6841/10000 train_time:554544ms step_avg:81.06ms +[2025-07-07 01:15:10] [Rank 0] step:6861/10000 train_time:556441ms step_avg:81.10ms +[2025-07-07 01:15:10] [Rank 0] step:6861/10000 train_time:556441ms step_avg:81.10ms +[2025-07-07 01:15:11] [Rank 0] step:6881/10000 train_time:557979ms step_avg:81.09ms +[2025-07-07 01:15:11] [Rank 0] step:6881/10000 train_time:557979ms step_avg:81.09ms +[2025-07-07 01:15:13] [Rank 0] step:6901/10000 train_time:559477ms step_avg:81.07ms +[2025-07-07 01:15:13] [Rank 0] step:6901/10000 train_time:559477ms step_avg:81.07ms +[2025-07-07 01:15:14] [Rank 0] step:6921/10000 train_time:560973ms step_avg:81.05ms +[2025-07-07 01:15:14] [Rank 0] step:6921/10000 train_time:560973ms step_avg:81.05ms +[2025-07-07 01:15:16] [Rank 0] step:6941/10000 train_time:563113ms step_avg:81.13ms +[2025-07-07 01:15:16] [Rank 0] step:6941/10000 train_time:563113ms step_avg:81.13ms +[2025-07-07 01:15:18] [Rank 0] step:6961/10000 train_time:564610ms step_avg:81.11ms +[2025-07-07 01:15:18] [Rank 0] step:6961/10000 train_time:564610ms step_avg:81.11ms +[2025-07-07 01:15:19] [Rank 0] step:6981/10000 train_time:566108ms step_avg:81.09ms +[2025-07-07 01:15:19] [Rank 0] step:6981/10000 train_time:566108ms step_avg:81.09ms +[2025-07-07 01:15:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:15:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:15:22] [Rank 0] PRINT: step:7000/10000 train_loss:0.9841 val_loss:0.9759 train_time:567606ms step_avg:81.09ms +[2025-07-07 01:15:22] [Rank 0] PRINT: step:7000/10000 train_loss:0.9841 val_loss:0.9759 train_time:567606ms step_avg:81.09ms +[2025-07-07 01:15:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:15:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:15:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:15:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:15:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:15:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:20:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:20:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:20:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:20:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:20:48] [Rank 0] Total Loss: 5.3303 +[2025-07-07 01:20:48] [Rank 0] Total Loss: 5.3303 +[2025-07-07 01:20:48] [Rank 0] Total FTA: 0.7392 +[2025-07-07 01:20:48] [Rank 0] Total FTA: 0.7392 +[2025-07-07 01:20:48] [Rank 0] Group 0 Loss: 5.5374 +[2025-07-07 01:20:48] [Rank 0] Group 0 Loss: 5.5374 +[2025-07-07 01:20:48] [Rank 0] Group 1 Loss: 5.1968 +[2025-07-07 01:20:48] [Rank 0] Group 1 Loss: 5.1968 +[2025-07-07 01:20:48] [Rank 0] Group 2 Loss: 5.4486 +[2025-07-07 01:20:48] [Rank 0] Group 2 Loss: 5.4486 +[2025-07-07 01:20:48] [Rank 0] Group 3 Loss: 5.4141 +[2025-07-07 01:20:48] [Rank 0] Group 3 Loss: 5.4141 +[2025-07-07 01:20:48] [Rank 0] Group 4 Loss: 5.1965 +[2025-07-07 01:20:48] [Rank 0] Group 4 Loss: 5.1965 +[2025-07-07 01:20:48] [Rank 0] Group 5 Loss: 5.1891 +[2025-07-07 01:20:48] [Rank 0] Group 5 Loss: 5.1891 +[2025-07-07 01:20:48] [Rank 0] Group 6 Loss: 5.2267 +[2025-07-07 01:20:48] [Rank 0] Group 6 Loss: 5.2267 +[2025-07-07 01:20:48] [Rank 0] Group 7 Loss: 5.2959 +[2025-07-07 01:20:48] [Rank 0] Group 7 Loss: 5.2959 +[2025-07-07 01:20:48] [Rank 0] Group 8 Loss: 5.3410 +[2025-07-07 01:20:48] [Rank 0] Group 8 Loss: 5.3410 +[2025-07-07 01:20:48] [Rank 0] Group 9 Loss: 5.3050 +[2025-07-07 01:20:48] [Rank 0] Group 9 Loss: 5.3050 +[2025-07-07 01:20:48] [Rank 0] Group 10 Loss: 5.3181 +[2025-07-07 01:20:48] [Rank 0] Group 10 Loss: 5.3181 +[2025-07-07 01:20:48] [Rank 0] Group 11 Loss: 5.3121 +[2025-07-07 01:20:48] [Rank 0] Group 11 Loss: 5.3121 +[2025-07-07 01:20:48] [Rank 0] Group 0 FTA: 0.6905 +[2025-07-07 01:20:48] [Rank 0] Group 0 FTA: 0.6905 +[2025-07-07 01:20:48] [Rank 0] Group 1 FTA: 0.6224 +[2025-07-07 01:20:48] [Rank 0] Group 1 FTA: 0.6224 +[2025-07-07 01:20:48] [Rank 0] Group 2 FTA: 0.7370 +[2025-07-07 01:20:48] [Rank 0] Group 2 FTA: 0.7370 +[2025-07-07 01:20:48] [Rank 0] Group 3 FTA: 0.7214 +[2025-07-07 01:20:48] [Rank 0] Group 3 FTA: 0.7214 +[2025-07-07 01:20:48] [Rank 0] Group 4 FTA: 0.6589 +[2025-07-07 01:20:48] [Rank 0] Group 4 FTA: 0.6589 +[2025-07-07 01:20:48] [Rank 0] Group 5 FTA: 0.8255 +[2025-07-07 01:20:48] [Rank 0] Group 5 FTA: 0.8255 +[2025-07-07 01:20:48] [Rank 0] Group 6 FTA: 0.7292 +[2025-07-07 01:20:48] [Rank 0] Group 6 FTA: 0.7292 +[2025-07-07 01:20:48] [Rank 0] Group 7 FTA: 0.7786 +[2025-07-07 01:20:48] [Rank 0] Group 7 FTA: 0.7786 +[2025-07-07 01:20:48] [Rank 0] Group 8 FTA: 0.7786 +[2025-07-07 01:20:48] [Rank 0] Group 8 FTA: 0.7786 +[2025-07-07 01:20:48] [Rank 0] Group 9 FTA: 0.7656 +[2025-07-07 01:20:48] [Rank 0] Group 9 FTA: 0.7656 +[2025-07-07 01:20:48] [Rank 0] Group 10 FTA: 0.7676 +[2025-07-07 01:20:48] [Rank 0] Group 10 FTA: 0.7676 +[2025-07-07 01:20:48] [Rank 0] Group 11 FTA: 0.7783 +[2025-07-07 01:20:48] [Rank 0] Group 11 FTA: 0.7783 +[2025-07-07 01:20:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 01:20:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 01:20:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 01:20:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 01:20:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 01:20:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 01:20:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 01:20:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 01:20:50] [Rank 0] step:7001/10000 train_time:567628ms step_avg:81.08ms +[2025-07-07 01:20:50] [Rank 0] step:7001/10000 train_time:567628ms step_avg:81.08ms +[2025-07-07 01:20:52] [Rank 0] step:7021/10000 train_time:569220ms step_avg:81.07ms +[2025-07-07 01:20:52] [Rank 0] step:7021/10000 train_time:569220ms step_avg:81.07ms +[2025-07-07 01:20:54] [Rank 0] step:7041/10000 train_time:571380ms step_avg:81.15ms +[2025-07-07 01:20:54] [Rank 0] step:7041/10000 train_time:571380ms step_avg:81.15ms +[2025-07-07 01:20:55] [Rank 0] step:7061/10000 train_time:572871ms step_avg:81.13ms +[2025-07-07 01:20:55] [Rank 0] step:7061/10000 train_time:572871ms step_avg:81.13ms +[2025-07-07 01:20:57] [Rank 0] step:7081/10000 train_time:574361ms step_avg:81.11ms +[2025-07-07 01:20:57] [Rank 0] step:7081/10000 train_time:574361ms step_avg:81.11ms +[2025-07-07 01:20:58] [Rank 0] step:7101/10000 train_time:575854ms step_avg:81.09ms +[2025-07-07 01:20:58] [Rank 0] step:7101/10000 train_time:575854ms step_avg:81.09ms +[2025-07-07 01:21:00] [Rank 0] step:7121/10000 train_time:577993ms step_avg:81.17ms +[2025-07-07 01:21:00] [Rank 0] step:7121/10000 train_time:577993ms step_avg:81.17ms +[2025-07-07 01:21:02] [Rank 0] step:7141/10000 train_time:579486ms step_avg:81.15ms +[2025-07-07 01:21:02] [Rank 0] step:7141/10000 train_time:579486ms step_avg:81.15ms +[2025-07-07 01:21:03] [Rank 0] step:7161/10000 train_time:580978ms step_avg:81.13ms +[2025-07-07 01:21:03] [Rank 0] step:7161/10000 train_time:580978ms step_avg:81.13ms +[2025-07-07 01:21:05] [Rank 0] step:7181/10000 train_time:582474ms step_avg:81.11ms +[2025-07-07 01:21:05] [Rank 0] step:7181/10000 train_time:582474ms step_avg:81.11ms +[2025-07-07 01:21:07] [Rank 0] step:7201/10000 train_time:584021ms step_avg:81.10ms +[2025-07-07 01:21:07] [Rank 0] step:7201/10000 train_time:584021ms step_avg:81.10ms +[2025-07-07 01:21:08] [Rank 0] step:7221/10000 train_time:586103ms step_avg:81.17ms +[2025-07-07 01:21:08] [Rank 0] step:7221/10000 train_time:586103ms step_avg:81.17ms +[2025-07-07 01:21:10] [Rank 0] step:7241/10000 train_time:587598ms step_avg:81.15ms +[2025-07-07 01:21:10] [Rank 0] step:7241/10000 train_time:587598ms step_avg:81.15ms +[2025-07-07 01:21:11] [Rank 0] step:7261/10000 train_time:589092ms step_avg:81.13ms +[2025-07-07 01:21:11] [Rank 0] step:7261/10000 train_time:589092ms step_avg:81.13ms +[2025-07-07 01:21:13] [Rank 0] step:7281/10000 train_time:590588ms step_avg:81.11ms +[2025-07-07 01:21:13] [Rank 0] step:7281/10000 train_time:590588ms step_avg:81.11ms +[2025-07-07 01:21:15] [Rank 0] step:7301/10000 train_time:592747ms step_avg:81.19ms +[2025-07-07 01:21:15] [Rank 0] step:7301/10000 train_time:592747ms step_avg:81.19ms +[2025-07-07 01:21:17] [Rank 0] step:7321/10000 train_time:594242ms step_avg:81.17ms +[2025-07-07 01:21:17] [Rank 0] step:7321/10000 train_time:594242ms step_avg:81.17ms +[2025-07-07 01:21:18] [Rank 0] step:7341/10000 train_time:595738ms step_avg:81.15ms +[2025-07-07 01:21:18] [Rank 0] step:7341/10000 train_time:595738ms step_avg:81.15ms +[2025-07-07 01:21:20] [Rank 0] step:7361/10000 train_time:597235ms step_avg:81.14ms +[2025-07-07 01:21:20] [Rank 0] step:7361/10000 train_time:597235ms step_avg:81.14ms +[2025-07-07 01:21:21] [Rank 0] step:7381/10000 train_time:598884ms step_avg:81.14ms +[2025-07-07 01:21:21] [Rank 0] step:7381/10000 train_time:598884ms step_avg:81.14ms +[2025-07-07 01:21:23] [Rank 0] step:7401/10000 train_time:600361ms step_avg:81.12ms +[2025-07-07 01:21:23] [Rank 0] step:7401/10000 train_time:600361ms step_avg:81.12ms +[2025-07-07 01:21:24] [Rank 0] step:7421/10000 train_time:601858ms step_avg:81.10ms +[2025-07-07 01:21:24] [Rank 0] step:7421/10000 train_time:601858ms step_avg:81.10ms +[2025-07-07 01:21:26] [Rank 0] step:7441/10000 train_time:603354ms step_avg:81.09ms +[2025-07-07 01:21:26] [Rank 0] step:7441/10000 train_time:603354ms step_avg:81.09ms +[2025-07-07 01:21:27] [Rank 0] step:7461/10000 train_time:604852ms step_avg:81.07ms +[2025-07-07 01:21:27] [Rank 0] step:7461/10000 train_time:604852ms step_avg:81.07ms +[2025-07-07 01:21:29] [Rank 0] step:7481/10000 train_time:606988ms step_avg:81.14ms +[2025-07-07 01:21:29] [Rank 0] step:7481/10000 train_time:606988ms step_avg:81.14ms +[2025-07-07 01:21:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:21:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:21:32] [Rank 0] PRINT: step:7500/10000 train_loss:0.9704 val_loss:0.9631 train_time:608485ms step_avg:81.13ms +[2025-07-07 01:21:32] [Rank 0] PRINT: step:7500/10000 train_loss:0.9704 val_loss:0.9631 train_time:608485ms step_avg:81.13ms +[2025-07-07 01:21:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:21:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:21:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:21:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:21:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:21:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:27:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:27:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:27:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:27:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:27:00] [Rank 0] Total Loss: 5.3605 +[2025-07-07 01:27:00] [Rank 0] Total Loss: 5.3605 +[2025-07-07 01:27:00] [Rank 0] Total FTA: 0.8344 +[2025-07-07 01:27:00] [Rank 0] Total FTA: 0.8344 +[2025-07-07 01:27:00] [Rank 0] Group 0 Loss: 5.2718 +[2025-07-07 01:27:00] [Rank 0] Group 0 Loss: 5.2718 +[2025-07-07 01:27:00] [Rank 0] Group 1 Loss: 5.2396 +[2025-07-07 01:27:00] [Rank 0] Group 1 Loss: 5.2396 +[2025-07-07 01:27:00] [Rank 0] Group 2 Loss: 5.2405 +[2025-07-07 01:27:00] [Rank 0] Group 2 Loss: 5.2405 +[2025-07-07 01:27:00] [Rank 0] Group 3 Loss: 5.5311 +[2025-07-07 01:27:00] [Rank 0] Group 3 Loss: 5.5311 +[2025-07-07 01:27:00] [Rank 0] Group 4 Loss: 5.3269 +[2025-07-07 01:27:00] [Rank 0] Group 4 Loss: 5.3269 +[2025-07-07 01:27:00] [Rank 0] Group 5 Loss: 5.2384 +[2025-07-07 01:27:00] [Rank 0] Group 5 Loss: 5.2384 +[2025-07-07 01:27:00] [Rank 0] Group 6 Loss: 5.3878 +[2025-07-07 01:27:00] [Rank 0] Group 6 Loss: 5.3878 +[2025-07-07 01:27:00] [Rank 0] Group 7 Loss: 5.3707 +[2025-07-07 01:27:00] [Rank 0] Group 7 Loss: 5.3707 +[2025-07-07 01:27:00] [Rank 0] Group 8 Loss: 5.4190 +[2025-07-07 01:27:00] [Rank 0] Group 8 Loss: 5.4190 +[2025-07-07 01:27:00] [Rank 0] Group 9 Loss: 5.4204 +[2025-07-07 01:27:00] [Rank 0] Group 9 Loss: 5.4204 +[2025-07-07 01:27:00] [Rank 0] Group 10 Loss: 5.4160 +[2025-07-07 01:27:00] [Rank 0] Group 10 Loss: 5.4160 +[2025-07-07 01:27:00] [Rank 0] Group 11 Loss: 5.4329 +[2025-07-07 01:27:00] [Rank 0] Group 11 Loss: 5.4329 +[2025-07-07 01:27:00] [Rank 0] Group 0 FTA: 0.8375 +[2025-07-07 01:27:00] [Rank 0] Group 0 FTA: 0.8375 +[2025-07-07 01:27:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 01:27:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 01:27:00] [Rank 0] Group 2 FTA: 0.8438 +[2025-07-07 01:27:00] [Rank 0] Group 2 FTA: 0.8438 +[2025-07-07 01:27:00] [Rank 0] Group 3 FTA: 0.8464 +[2025-07-07 01:27:00] [Rank 0] Group 3 FTA: 0.8464 +[2025-07-07 01:27:00] [Rank 0] Group 4 FTA: 0.7969 +[2025-07-07 01:27:00] [Rank 0] Group 4 FTA: 0.7969 +[2025-07-07 01:27:00] [Rank 0] Group 5 FTA: 0.9010 +[2025-07-07 01:27:00] [Rank 0] Group 5 FTA: 0.9010 +[2025-07-07 01:27:00] [Rank 0] Group 6 FTA: 0.7891 +[2025-07-07 01:27:00] [Rank 0] Group 6 FTA: 0.7891 +[2025-07-07 01:27:00] [Rank 0] Group 7 FTA: 0.8099 +[2025-07-07 01:27:00] [Rank 0] Group 7 FTA: 0.8099 +[2025-07-07 01:27:00] [Rank 0] Group 8 FTA: 0.8203 +[2025-07-07 01:27:00] [Rank 0] Group 8 FTA: 0.8203 +[2025-07-07 01:27:00] [Rank 0] Group 9 FTA: 0.7930 +[2025-07-07 01:27:00] [Rank 0] Group 9 FTA: 0.7930 +[2025-07-07 01:27:00] [Rank 0] Group 10 FTA: 0.7871 +[2025-07-07 01:27:00] [Rank 0] Group 10 FTA: 0.7871 +[2025-07-07 01:27:00] [Rank 0] Group 11 FTA: 0.8164 +[2025-07-07 01:27:00] [Rank 0] Group 11 FTA: 0.8164 +[2025-07-07 01:27:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 01:27:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 01:27:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 01:27:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 01:27:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 01:27:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 01:27:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 01:27:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 01:27:02] [Rank 0] step:7501/10000 train_time:608506ms step_avg:81.12ms +[2025-07-07 01:27:02] [Rank 0] step:7501/10000 train_time:608506ms step_avg:81.12ms +[2025-07-07 01:27:03] [Rank 0] step:7521/10000 train_time:610003ms step_avg:81.11ms +[2025-07-07 01:27:03] [Rank 0] step:7521/10000 train_time:610003ms step_avg:81.11ms +[2025-07-07 01:27:05] [Rank 0] step:7541/10000 train_time:611492ms step_avg:81.09ms +[2025-07-07 01:27:05] [Rank 0] step:7541/10000 train_time:611492ms step_avg:81.09ms +[2025-07-07 01:27:07] [Rank 0] step:7561/10000 train_time:613240ms step_avg:81.11ms +[2025-07-07 01:27:07] [Rank 0] step:7561/10000 train_time:613240ms step_avg:81.11ms +[2025-07-07 01:27:08] [Rank 0] step:7581/10000 train_time:615131ms step_avg:81.14ms +[2025-07-07 01:27:08] [Rank 0] step:7581/10000 train_time:615131ms step_avg:81.14ms +[2025-07-07 01:27:10] [Rank 0] step:7601/10000 train_time:616623ms step_avg:81.12ms +[2025-07-07 01:27:10] [Rank 0] step:7601/10000 train_time:616623ms step_avg:81.12ms +[2025-07-07 01:27:11] [Rank 0] step:7621/10000 train_time:618181ms step_avg:81.12ms +[2025-07-07 01:27:11] [Rank 0] step:7621/10000 train_time:618181ms step_avg:81.12ms +[2025-07-07 01:27:13] [Rank 0] step:7641/10000 train_time:619673ms step_avg:81.10ms +[2025-07-07 01:27:13] [Rank 0] step:7641/10000 train_time:619673ms step_avg:81.10ms +[2025-07-07 01:27:15] [Rank 0] step:7661/10000 train_time:621809ms step_avg:81.17ms +[2025-07-07 01:27:15] [Rank 0] step:7661/10000 train_time:621809ms step_avg:81.17ms +[2025-07-07 01:27:17] [Rank 0] step:7681/10000 train_time:623303ms step_avg:81.15ms +[2025-07-07 01:27:17] [Rank 0] step:7681/10000 train_time:623303ms step_avg:81.15ms +[2025-07-07 01:27:18] [Rank 0] step:7701/10000 train_time:624797ms step_avg:81.13ms +[2025-07-07 01:27:18] [Rank 0] step:7701/10000 train_time:624797ms step_avg:81.13ms +[2025-07-07 01:27:20] [Rank 0] step:7721/10000 train_time:626291ms step_avg:81.12ms +[2025-07-07 01:27:20] [Rank 0] step:7721/10000 train_time:626291ms step_avg:81.12ms +[2025-07-07 01:27:21] [Rank 0] step:7741/10000 train_time:627784ms step_avg:81.10ms +[2025-07-07 01:27:21] [Rank 0] step:7741/10000 train_time:627784ms step_avg:81.10ms +[2025-07-07 01:27:23] [Rank 0] step:7761/10000 train_time:629514ms step_avg:81.11ms +[2025-07-07 01:27:23] [Rank 0] step:7761/10000 train_time:629514ms step_avg:81.11ms +[2025-07-07 01:27:24] [Rank 0] step:7781/10000 train_time:631010ms step_avg:81.10ms +[2025-07-07 01:27:24] [Rank 0] step:7781/10000 train_time:631010ms step_avg:81.10ms +[2025-07-07 01:27:26] [Rank 0] step:7801/10000 train_time:632506ms step_avg:81.08ms +[2025-07-07 01:27:26] [Rank 0] step:7801/10000 train_time:632506ms step_avg:81.08ms +[2025-07-07 01:27:27] [Rank 0] step:7821/10000 train_time:634004ms step_avg:81.06ms +[2025-07-07 01:27:27] [Rank 0] step:7821/10000 train_time:634004ms step_avg:81.06ms +[2025-07-07 01:27:29] [Rank 0] step:7841/10000 train_time:636145ms step_avg:81.13ms +[2025-07-07 01:27:29] [Rank 0] step:7841/10000 train_time:636145ms step_avg:81.13ms +[2025-07-07 01:27:31] [Rank 0] step:7861/10000 train_time:637641ms step_avg:81.11ms +[2025-07-07 01:27:31] [Rank 0] step:7861/10000 train_time:637641ms step_avg:81.11ms +[2025-07-07 01:27:32] [Rank 0] step:7881/10000 train_time:639139ms step_avg:81.10ms +[2025-07-07 01:27:32] [Rank 0] step:7881/10000 train_time:639139ms step_avg:81.10ms +[2025-07-07 01:27:34] [Rank 0] step:7901/10000 train_time:640638ms step_avg:81.08ms +[2025-07-07 01:27:34] [Rank 0] step:7901/10000 train_time:640638ms step_avg:81.08ms +[2025-07-07 01:27:36] [Rank 0] step:7921/10000 train_time:642813ms step_avg:81.15ms +[2025-07-07 01:27:36] [Rank 0] step:7921/10000 train_time:642813ms step_avg:81.15ms +[2025-07-07 01:27:38] [Rank 0] step:7941/10000 train_time:644293ms step_avg:81.13ms +[2025-07-07 01:27:38] [Rank 0] step:7941/10000 train_time:644293ms step_avg:81.13ms +[2025-07-07 01:27:39] [Rank 0] step:7961/10000 train_time:645793ms step_avg:81.12ms +[2025-07-07 01:27:39] [Rank 0] step:7961/10000 train_time:645793ms step_avg:81.12ms +[2025-07-07 01:27:41] [Rank 0] step:7981/10000 train_time:647294ms step_avg:81.10ms +[2025-07-07 01:27:41] [Rank 0] step:7981/10000 train_time:647294ms step_avg:81.10ms +[2025-07-07 01:27:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:27:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:27:43] [Rank 0] PRINT: step:8000/10000 train_loss:0.9580 val_loss:0.9523 train_time:648796ms step_avg:81.10ms +[2025-07-07 01:27:43] [Rank 0] PRINT: step:8000/10000 train_loss:0.9580 val_loss:0.9523 train_time:648796ms step_avg:81.10ms +[2025-07-07 01:27:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:27:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:27:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:27:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:27:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:27:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:33:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:33:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:33:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:33:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:33:10] [Rank 0] Total Loss: 5.4571 +[2025-07-07 01:33:10] [Rank 0] Total Loss: 5.4571 +[2025-07-07 01:33:10] [Rank 0] Total FTA: 0.8314 +[2025-07-07 01:33:10] [Rank 0] Total FTA: 0.8314 +[2025-07-07 01:33:10] [Rank 0] Group 0 Loss: 5.5688 +[2025-07-07 01:33:10] [Rank 0] Group 0 Loss: 5.5688 +[2025-07-07 01:33:10] [Rank 0] Group 1 Loss: 5.2352 +[2025-07-07 01:33:10] [Rank 0] Group 1 Loss: 5.2352 +[2025-07-07 01:33:10] [Rank 0] Group 2 Loss: 5.2683 +[2025-07-07 01:33:10] [Rank 0] Group 2 Loss: 5.2683 +[2025-07-07 01:33:10] [Rank 0] Group 3 Loss: 5.5848 +[2025-07-07 01:33:10] [Rank 0] Group 3 Loss: 5.5848 +[2025-07-07 01:33:10] [Rank 0] Group 4 Loss: 5.4521 +[2025-07-07 01:33:10] [Rank 0] Group 4 Loss: 5.4521 +[2025-07-07 01:33:10] [Rank 0] Group 5 Loss: 5.3640 +[2025-07-07 01:33:10] [Rank 0] Group 5 Loss: 5.3640 +[2025-07-07 01:33:10] [Rank 0] Group 6 Loss: 5.3874 +[2025-07-07 01:33:10] [Rank 0] Group 6 Loss: 5.3874 +[2025-07-07 01:33:10] [Rank 0] Group 7 Loss: 5.4949 +[2025-07-07 01:33:10] [Rank 0] Group 7 Loss: 5.4949 +[2025-07-07 01:33:10] [Rank 0] Group 8 Loss: 5.5261 +[2025-07-07 01:33:10] [Rank 0] Group 8 Loss: 5.5261 +[2025-07-07 01:33:10] [Rank 0] Group 9 Loss: 5.5246 +[2025-07-07 01:33:10] [Rank 0] Group 9 Loss: 5.5246 +[2025-07-07 01:33:10] [Rank 0] Group 10 Loss: 5.4769 +[2025-07-07 01:33:10] [Rank 0] Group 10 Loss: 5.4769 +[2025-07-07 01:33:10] [Rank 0] Group 11 Loss: 5.4756 +[2025-07-07 01:33:10] [Rank 0] Group 11 Loss: 5.4756 +[2025-07-07 01:33:10] [Rank 0] Group 0 FTA: 0.8322 +[2025-07-07 01:33:10] [Rank 0] Group 0 FTA: 0.8322 +[2025-07-07 01:33:10] [Rank 0] Group 1 FTA: 0.8750 +[2025-07-07 01:33:10] [Rank 0] Group 1 FTA: 0.8750 +[2025-07-07 01:33:10] [Rank 0] Group 2 FTA: 0.7578 +[2025-07-07 01:33:10] [Rank 0] Group 2 FTA: 0.7578 +[2025-07-07 01:33:10] [Rank 0] Group 3 FTA: 0.9010 +[2025-07-07 01:33:10] [Rank 0] Group 3 FTA: 0.9010 +[2025-07-07 01:33:10] [Rank 0] Group 4 FTA: 0.8776 +[2025-07-07 01:33:10] [Rank 0] Group 4 FTA: 0.8776 +[2025-07-07 01:33:10] [Rank 0] Group 5 FTA: 0.8880 +[2025-07-07 01:33:10] [Rank 0] Group 5 FTA: 0.8880 +[2025-07-07 01:33:10] [Rank 0] Group 6 FTA: 0.8099 +[2025-07-07 01:33:10] [Rank 0] Group 6 FTA: 0.8099 +[2025-07-07 01:33:10] [Rank 0] Group 7 FTA: 0.8438 +[2025-07-07 01:33:10] [Rank 0] Group 7 FTA: 0.8438 +[2025-07-07 01:33:10] [Rank 0] Group 8 FTA: 0.7891 +[2025-07-07 01:33:10] [Rank 0] Group 8 FTA: 0.7891 +[2025-07-07 01:33:10] [Rank 0] Group 9 FTA: 0.8203 +[2025-07-07 01:33:10] [Rank 0] Group 9 FTA: 0.8203 +[2025-07-07 01:33:10] [Rank 0] Group 10 FTA: 0.8027 +[2025-07-07 01:33:10] [Rank 0] Group 10 FTA: 0.8027 +[2025-07-07 01:33:10] [Rank 0] Group 11 FTA: 0.8135 +[2025-07-07 01:33:10] [Rank 0] Group 11 FTA: 0.8135 +[2025-07-07 01:33:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 01:33:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 01:33:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 01:33:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 01:33:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 01:33:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 01:33:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 01:33:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 01:33:12] [Rank 0] step:8001/10000 train_time:648818ms step_avg:81.09ms +[2025-07-07 01:33:12] [Rank 0] step:8001/10000 train_time:648818ms step_avg:81.09ms +[2025-07-07 01:33:14] [Rank 0] step:8021/10000 train_time:650950ms step_avg:81.16ms +[2025-07-07 01:33:14] [Rank 0] step:8021/10000 train_time:650950ms step_avg:81.16ms +[2025-07-07 01:33:15] [Rank 0] step:8041/10000 train_time:652438ms step_avg:81.14ms +[2025-07-07 01:33:15] [Rank 0] step:8041/10000 train_time:652438ms step_avg:81.14ms +[2025-07-07 01:33:17] [Rank 0] step:8061/10000 train_time:653926ms step_avg:81.12ms +[2025-07-07 01:33:17] [Rank 0] step:8061/10000 train_time:653926ms step_avg:81.12ms +[2025-07-07 01:33:18] [Rank 0] step:8081/10000 train_time:655417ms step_avg:81.11ms +[2025-07-07 01:33:18] [Rank 0] step:8081/10000 train_time:655417ms step_avg:81.11ms +[2025-07-07 01:33:20] [Rank 0] step:8101/10000 train_time:657587ms step_avg:81.17ms +[2025-07-07 01:33:20] [Rank 0] step:8101/10000 train_time:657587ms step_avg:81.17ms +[2025-07-07 01:33:22] [Rank 0] step:8121/10000 train_time:659063ms step_avg:81.16ms +[2025-07-07 01:33:22] [Rank 0] step:8121/10000 train_time:659063ms step_avg:81.16ms +[2025-07-07 01:33:23] [Rank 0] step:8141/10000 train_time:660553ms step_avg:81.14ms +[2025-07-07 01:33:23] [Rank 0] step:8141/10000 train_time:660553ms step_avg:81.14ms +[2025-07-07 01:33:25] [Rank 0] step:8161/10000 train_time:662045ms step_avg:81.12ms +[2025-07-07 01:33:25] [Rank 0] step:8161/10000 train_time:662045ms step_avg:81.12ms +[2025-07-07 01:33:26] [Rank 0] step:8181/10000 train_time:663538ms step_avg:81.11ms +[2025-07-07 01:33:26] [Rank 0] step:8181/10000 train_time:663538ms step_avg:81.11ms +[2025-07-07 01:33:28] [Rank 0] step:8201/10000 train_time:665674ms step_avg:81.17ms +[2025-07-07 01:33:28] [Rank 0] step:8201/10000 train_time:665674ms step_avg:81.17ms +[2025-07-07 01:33:30] [Rank 0] step:8221/10000 train_time:667166ms step_avg:81.15ms +[2025-07-07 01:33:30] [Rank 0] step:8221/10000 train_time:667166ms step_avg:81.15ms +[2025-07-07 01:33:32] [Rank 0] step:8241/10000 train_time:668792ms step_avg:81.15ms +[2025-07-07 01:33:32] [Rank 0] step:8241/10000 train_time:668792ms step_avg:81.15ms +[2025-07-07 01:33:33] [Rank 0] step:8261/10000 train_time:670288ms step_avg:81.14ms +[2025-07-07 01:33:33] [Rank 0] step:8261/10000 train_time:670288ms step_avg:81.14ms +[2025-07-07 01:33:35] [Rank 0] step:8281/10000 train_time:671783ms step_avg:81.12ms +[2025-07-07 01:33:35] [Rank 0] step:8281/10000 train_time:671783ms step_avg:81.12ms +[2025-07-07 01:33:37] [Rank 0] step:8301/10000 train_time:673947ms step_avg:81.19ms +[2025-07-07 01:33:37] [Rank 0] step:8301/10000 train_time:673947ms step_avg:81.19ms +[2025-07-07 01:33:38] [Rank 0] step:8321/10000 train_time:675441ms step_avg:81.17ms +[2025-07-07 01:33:38] [Rank 0] step:8321/10000 train_time:675441ms step_avg:81.17ms +[2025-07-07 01:33:40] [Rank 0] step:8341/10000 train_time:676938ms step_avg:81.16ms +[2025-07-07 01:33:40] [Rank 0] step:8341/10000 train_time:676938ms step_avg:81.16ms +[2025-07-07 01:33:41] [Rank 0] step:8361/10000 train_time:678435ms step_avg:81.14ms +[2025-07-07 01:33:41] [Rank 0] step:8361/10000 train_time:678435ms step_avg:81.14ms +[2025-07-07 01:33:43] [Rank 0] step:8381/10000 train_time:680584ms step_avg:81.21ms +[2025-07-07 01:33:43] [Rank 0] step:8381/10000 train_time:680584ms step_avg:81.21ms +[2025-07-07 01:33:45] [Rank 0] step:8401/10000 train_time:682082ms step_avg:81.19ms +[2025-07-07 01:33:45] [Rank 0] step:8401/10000 train_time:682082ms step_avg:81.19ms +[2025-07-07 01:33:46] [Rank 0] step:8421/10000 train_time:683582ms step_avg:81.18ms +[2025-07-07 01:33:46] [Rank 0] step:8421/10000 train_time:683582ms step_avg:81.18ms +[2025-07-07 01:33:48] [Rank 0] step:8441/10000 train_time:685083ms step_avg:81.16ms +[2025-07-07 01:33:48] [Rank 0] step:8441/10000 train_time:685083ms step_avg:81.16ms +[2025-07-07 01:33:50] [Rank 0] step:8461/10000 train_time:686584ms step_avg:81.15ms +[2025-07-07 01:33:50] [Rank 0] step:8461/10000 train_time:686584ms step_avg:81.15ms +[2025-07-07 01:33:51] [Rank 0] step:8481/10000 train_time:688730ms step_avg:81.21ms +[2025-07-07 01:33:51] [Rank 0] step:8481/10000 train_time:688730ms step_avg:81.21ms +[2025-07-07 01:33:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:33:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:33:54] [Rank 0] PRINT: step:8500/10000 train_loss:0.9476 val_loss:0.9437 train_time:690229ms step_avg:81.20ms +[2025-07-07 01:33:54] [Rank 0] PRINT: step:8500/10000 train_loss:0.9476 val_loss:0.9437 train_time:690229ms step_avg:81.20ms +[2025-07-07 01:33:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:33:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:33:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:33:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:33:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:33:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:39:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:39:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:39:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:39:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:39:21] [Rank 0] Total Loss: 5.5781 +[2025-07-07 01:39:21] [Rank 0] Total Loss: 5.5781 +[2025-07-07 01:39:21] [Rank 0] Total FTA: 0.8464 +[2025-07-07 01:39:21] [Rank 0] Total FTA: 0.8464 +[2025-07-07 01:39:21] [Rank 0] Group 0 Loss: 5.6841 +[2025-07-07 01:39:21] [Rank 0] Group 0 Loss: 5.6841 +[2025-07-07 01:39:21] [Rank 0] Group 1 Loss: 5.3527 +[2025-07-07 01:39:21] [Rank 0] Group 1 Loss: 5.3527 +[2025-07-07 01:39:21] [Rank 0] Group 2 Loss: 5.4196 +[2025-07-07 01:39:21] [Rank 0] Group 2 Loss: 5.4196 +[2025-07-07 01:39:21] [Rank 0] Group 3 Loss: 5.7633 +[2025-07-07 01:39:21] [Rank 0] Group 3 Loss: 5.7633 +[2025-07-07 01:39:21] [Rank 0] Group 4 Loss: 5.5384 +[2025-07-07 01:39:21] [Rank 0] Group 4 Loss: 5.5384 +[2025-07-07 01:39:21] [Rank 0] Group 5 Loss: 5.5070 +[2025-07-07 01:39:21] [Rank 0] Group 5 Loss: 5.5070 +[2025-07-07 01:39:21] [Rank 0] Group 6 Loss: 5.5565 +[2025-07-07 01:39:21] [Rank 0] Group 6 Loss: 5.5565 +[2025-07-07 01:39:21] [Rank 0] Group 7 Loss: 5.5434 +[2025-07-07 01:39:21] [Rank 0] Group 7 Loss: 5.5434 +[2025-07-07 01:39:21] [Rank 0] Group 8 Loss: 5.5676 +[2025-07-07 01:39:21] [Rank 0] Group 8 Loss: 5.5676 +[2025-07-07 01:39:21] [Rank 0] Group 9 Loss: 5.5741 +[2025-07-07 01:39:21] [Rank 0] Group 9 Loss: 5.5741 +[2025-07-07 01:39:21] [Rank 0] Group 10 Loss: 5.5477 +[2025-07-07 01:39:21] [Rank 0] Group 10 Loss: 5.5477 +[2025-07-07 01:39:21] [Rank 0] Group 11 Loss: 5.6560 +[2025-07-07 01:39:21] [Rank 0] Group 11 Loss: 5.6560 +[2025-07-07 01:39:21] [Rank 0] Group 0 FTA: 0.8388 +[2025-07-07 01:39:21] [Rank 0] Group 0 FTA: 0.8388 +[2025-07-07 01:39:21] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 01:39:21] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 01:39:21] [Rank 0] Group 2 FTA: 0.7448 +[2025-07-07 01:39:21] [Rank 0] Group 2 FTA: 0.7448 +[2025-07-07 01:39:21] [Rank 0] Group 3 FTA: 0.8125 +[2025-07-07 01:39:21] [Rank 0] Group 3 FTA: 0.8125 +[2025-07-07 01:39:21] [Rank 0] Group 4 FTA: 0.8307 +[2025-07-07 01:39:21] [Rank 0] Group 4 FTA: 0.8307 +[2025-07-07 01:39:21] [Rank 0] Group 5 FTA: 0.9323 +[2025-07-07 01:39:21] [Rank 0] Group 5 FTA: 0.9323 +[2025-07-07 01:39:21] [Rank 0] Group 6 FTA: 0.7812 +[2025-07-07 01:39:21] [Rank 0] Group 6 FTA: 0.7812 +[2025-07-07 01:39:21] [Rank 0] Group 7 FTA: 0.8490 +[2025-07-07 01:39:21] [Rank 0] Group 7 FTA: 0.8490 +[2025-07-07 01:39:21] [Rank 0] Group 8 FTA: 0.8516 +[2025-07-07 01:39:21] [Rank 0] Group 8 FTA: 0.8516 +[2025-07-07 01:39:21] [Rank 0] Group 9 FTA: 0.8555 +[2025-07-07 01:39:21] [Rank 0] Group 9 FTA: 0.8555 +[2025-07-07 01:39:21] [Rank 0] Group 10 FTA: 0.8555 +[2025-07-07 01:39:21] [Rank 0] Group 10 FTA: 0.8555 +[2025-07-07 01:39:21] [Rank 0] Group 11 FTA: 0.8340 +[2025-07-07 01:39:21] [Rank 0] Group 11 FTA: 0.8340 +[2025-07-07 01:39:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 01:39:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 01:39:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 01:39:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 01:39:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 01:39:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 01:39:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 01:39:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 01:39:22] [Rank 0] step:8501/10000 train_time:690251ms step_avg:81.20ms +[2025-07-07 01:39:22] [Rank 0] step:8501/10000 train_time:690251ms step_avg:81.20ms +[2025-07-07 01:39:24] [Rank 0] step:8521/10000 train_time:691749ms step_avg:81.18ms +[2025-07-07 01:39:24] [Rank 0] step:8521/10000 train_time:691749ms step_avg:81.18ms +[2025-07-07 01:39:25] [Rank 0] step:8541/10000 train_time:693236ms step_avg:81.17ms +[2025-07-07 01:39:25] [Rank 0] step:8541/10000 train_time:693236ms step_avg:81.17ms +[2025-07-07 01:39:28] [Rank 0] step:8561/10000 train_time:695378ms step_avg:81.23ms +[2025-07-07 01:39:28] [Rank 0] step:8561/10000 train_time:695378ms step_avg:81.23ms +[2025-07-07 01:39:29] [Rank 0] step:8581/10000 train_time:696868ms step_avg:81.21ms +[2025-07-07 01:39:29] [Rank 0] step:8581/10000 train_time:696868ms step_avg:81.21ms +[2025-07-07 01:39:31] [Rank 0] step:8601/10000 train_time:698361ms step_avg:81.20ms +[2025-07-07 01:39:31] [Rank 0] step:8601/10000 train_time:698361ms step_avg:81.20ms +[2025-07-07 01:39:32] [Rank 0] step:8621/10000 train_time:699852ms step_avg:81.18ms +[2025-07-07 01:39:32] [Rank 0] step:8621/10000 train_time:699852ms step_avg:81.18ms +[2025-07-07 01:39:34] [Rank 0] step:8641/10000 train_time:701345ms step_avg:81.16ms +[2025-07-07 01:39:34] [Rank 0] step:8641/10000 train_time:701345ms step_avg:81.16ms +[2025-07-07 01:39:36] [Rank 0] step:8661/10000 train_time:703487ms step_avg:81.22ms +[2025-07-07 01:39:36] [Rank 0] step:8661/10000 train_time:703487ms step_avg:81.22ms +[2025-07-07 01:39:37] [Rank 0] step:8681/10000 train_time:704980ms step_avg:81.21ms +[2025-07-07 01:39:37] [Rank 0] step:8681/10000 train_time:704980ms step_avg:81.21ms +[2025-07-07 01:39:39] [Rank 0] step:8701/10000 train_time:706474ms step_avg:81.19ms +[2025-07-07 01:39:39] [Rank 0] step:8701/10000 train_time:706474ms step_avg:81.19ms +[2025-07-07 01:39:40] [Rank 0] step:8721/10000 train_time:707969ms step_avg:81.18ms +[2025-07-07 01:39:40] [Rank 0] step:8721/10000 train_time:707969ms step_avg:81.18ms +[2025-07-07 01:39:42] [Rank 0] step:8741/10000 train_time:710131ms step_avg:81.24ms +[2025-07-07 01:39:42] [Rank 0] step:8741/10000 train_time:710131ms step_avg:81.24ms +[2025-07-07 01:39:44] [Rank 0] step:8761/10000 train_time:711626ms step_avg:81.23ms +[2025-07-07 01:39:44] [Rank 0] step:8761/10000 train_time:711626ms step_avg:81.23ms +[2025-07-07 01:39:45] [Rank 0] step:8781/10000 train_time:713123ms step_avg:81.21ms +[2025-07-07 01:39:45] [Rank 0] step:8781/10000 train_time:713123ms step_avg:81.21ms +[2025-07-07 01:39:47] [Rank 0] step:8801/10000 train_time:714621ms step_avg:81.20ms +[2025-07-07 01:39:47] [Rank 0] step:8801/10000 train_time:714621ms step_avg:81.20ms +[2025-07-07 01:39:49] [Rank 0] step:8821/10000 train_time:716170ms step_avg:81.19ms +[2025-07-07 01:39:49] [Rank 0] step:8821/10000 train_time:716170ms step_avg:81.19ms +[2025-07-07 01:39:51] [Rank 0] step:8841/10000 train_time:718387ms step_avg:81.26ms +[2025-07-07 01:39:51] [Rank 0] step:8841/10000 train_time:718387ms step_avg:81.26ms +[2025-07-07 01:39:52] [Rank 0] step:8861/10000 train_time:719885ms step_avg:81.24ms +[2025-07-07 01:39:52] [Rank 0] step:8861/10000 train_time:719885ms step_avg:81.24ms +[2025-07-07 01:39:54] [Rank 0] step:8881/10000 train_time:721383ms step_avg:81.23ms +[2025-07-07 01:39:54] [Rank 0] step:8881/10000 train_time:721383ms step_avg:81.23ms +[2025-07-07 01:39:55] [Rank 0] step:8901/10000 train_time:722885ms step_avg:81.21ms +[2025-07-07 01:39:55] [Rank 0] step:8901/10000 train_time:722885ms step_avg:81.21ms +[2025-07-07 01:39:57] [Rank 0] step:8921/10000 train_time:724622ms step_avg:81.23ms +[2025-07-07 01:39:57] [Rank 0] step:8921/10000 train_time:724622ms step_avg:81.23ms +[2025-07-07 01:39:58] [Rank 0] step:8941/10000 train_time:726123ms step_avg:81.21ms +[2025-07-07 01:39:58] [Rank 0] step:8941/10000 train_time:726123ms step_avg:81.21ms +[2025-07-07 01:40:00] [Rank 0] step:8961/10000 train_time:727624ms step_avg:81.20ms +[2025-07-07 01:40:00] [Rank 0] step:8961/10000 train_time:727624ms step_avg:81.20ms +[2025-07-07 01:40:01] [Rank 0] step:8981/10000 train_time:729124ms step_avg:81.19ms +[2025-07-07 01:40:01] [Rank 0] step:8981/10000 train_time:729124ms step_avg:81.19ms +[2025-07-07 01:40:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:40:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:40:04] [Rank 0] PRINT: step:9000/10000 train_loss:0.9385 val_loss:0.9358 train_time:730626ms step_avg:81.18ms +[2025-07-07 01:40:04] [Rank 0] PRINT: step:9000/10000 train_loss:0.9385 val_loss:0.9358 train_time:730626ms step_avg:81.18ms +[2025-07-07 01:40:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:40:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:40:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:40:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:40:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:40:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:45:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:45:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:45:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:45:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:45:31] [Rank 0] Total Loss: 5.6094 +[2025-07-07 01:45:31] [Rank 0] Total Loss: 5.6094 +[2025-07-07 01:45:31] [Rank 0] Total FTA: 0.8679 +[2025-07-07 01:45:31] [Rank 0] Total FTA: 0.8679 +[2025-07-07 01:45:31] [Rank 0] Group 0 Loss: 5.6869 +[2025-07-07 01:45:31] [Rank 0] Group 0 Loss: 5.6869 +[2025-07-07 01:45:31] [Rank 0] Group 1 Loss: 5.4309 +[2025-07-07 01:45:31] [Rank 0] Group 1 Loss: 5.4309 +[2025-07-07 01:45:31] [Rank 0] Group 2 Loss: 5.4451 +[2025-07-07 01:45:31] [Rank 0] Group 2 Loss: 5.4451 +[2025-07-07 01:45:31] [Rank 0] Group 3 Loss: 5.8451 +[2025-07-07 01:45:31] [Rank 0] Group 3 Loss: 5.8451 +[2025-07-07 01:45:31] [Rank 0] Group 4 Loss: 5.5307 +[2025-07-07 01:45:31] [Rank 0] Group 4 Loss: 5.5307 +[2025-07-07 01:45:31] [Rank 0] Group 5 Loss: 5.5782 +[2025-07-07 01:45:31] [Rank 0] Group 5 Loss: 5.5782 +[2025-07-07 01:45:31] [Rank 0] Group 6 Loss: 5.5968 +[2025-07-07 01:45:31] [Rank 0] Group 6 Loss: 5.5968 +[2025-07-07 01:45:31] [Rank 0] Group 7 Loss: 5.5852 +[2025-07-07 01:45:31] [Rank 0] Group 7 Loss: 5.5852 +[2025-07-07 01:45:31] [Rank 0] Group 8 Loss: 5.6895 +[2025-07-07 01:45:31] [Rank 0] Group 8 Loss: 5.6895 +[2025-07-07 01:45:31] [Rank 0] Group 9 Loss: 5.5161 +[2025-07-07 01:45:31] [Rank 0] Group 9 Loss: 5.5161 +[2025-07-07 01:45:31] [Rank 0] Group 10 Loss: 5.6408 +[2025-07-07 01:45:31] [Rank 0] Group 10 Loss: 5.6408 +[2025-07-07 01:45:31] [Rank 0] Group 11 Loss: 5.6237 +[2025-07-07 01:45:31] [Rank 0] Group 11 Loss: 5.6237 +[2025-07-07 01:45:31] [Rank 0] Group 0 FTA: 0.8440 +[2025-07-07 01:45:31] [Rank 0] Group 0 FTA: 0.8440 +[2025-07-07 01:45:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 01:45:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 01:45:31] [Rank 0] Group 2 FTA: 0.7240 +[2025-07-07 01:45:31] [Rank 0] Group 2 FTA: 0.7240 +[2025-07-07 01:45:31] [Rank 0] Group 3 FTA: 0.9635 +[2025-07-07 01:45:31] [Rank 0] Group 3 FTA: 0.9635 +[2025-07-07 01:45:31] [Rank 0] Group 4 FTA: 0.8411 +[2025-07-07 01:45:31] [Rank 0] Group 4 FTA: 0.8411 +[2025-07-07 01:45:31] [Rank 0] Group 5 FTA: 0.9297 +[2025-07-07 01:45:31] [Rank 0] Group 5 FTA: 0.9297 +[2025-07-07 01:45:31] [Rank 0] Group 6 FTA: 0.8984 +[2025-07-07 01:45:31] [Rank 0] Group 6 FTA: 0.8984 +[2025-07-07 01:45:31] [Rank 0] Group 7 FTA: 0.8698 +[2025-07-07 01:45:31] [Rank 0] Group 7 FTA: 0.8698 +[2025-07-07 01:45:31] [Rank 0] Group 8 FTA: 0.8385 +[2025-07-07 01:45:31] [Rank 0] Group 8 FTA: 0.8385 +[2025-07-07 01:45:31] [Rank 0] Group 9 FTA: 0.8281 +[2025-07-07 01:45:31] [Rank 0] Group 9 FTA: 0.8281 +[2025-07-07 01:45:31] [Rank 0] Group 10 FTA: 0.8574 +[2025-07-07 01:45:31] [Rank 0] Group 10 FTA: 0.8574 +[2025-07-07 01:45:31] [Rank 0] Group 11 FTA: 0.8555 +[2025-07-07 01:45:31] [Rank 0] Group 11 FTA: 0.8555 +[2025-07-07 01:45:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 01:45:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 01:45:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 01:45:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 01:45:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 01:45:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 01:45:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 01:45:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 01:45:34] [Rank 0] step:9001/10000 train_time:731389ms step_avg:81.26ms +[2025-07-07 01:45:34] [Rank 0] step:9001/10000 train_time:731389ms step_avg:81.26ms +[2025-07-07 01:45:35] [Rank 0] step:9021/10000 train_time:732878ms step_avg:81.24ms +[2025-07-07 01:45:35] [Rank 0] step:9021/10000 train_time:732878ms step_avg:81.24ms +[2025-07-07 01:45:37] [Rank 0] step:9041/10000 train_time:734368ms step_avg:81.23ms +[2025-07-07 01:45:37] [Rank 0] step:9041/10000 train_time:734368ms step_avg:81.23ms +[2025-07-07 01:45:38] [Rank 0] step:9061/10000 train_time:735856ms step_avg:81.21ms +[2025-07-07 01:45:38] [Rank 0] step:9061/10000 train_time:735856ms step_avg:81.21ms +[2025-07-07 01:45:40] [Rank 0] step:9081/10000 train_time:737346ms step_avg:81.20ms +[2025-07-07 01:45:40] [Rank 0] step:9081/10000 train_time:737346ms step_avg:81.20ms +[2025-07-07 01:45:42] [Rank 0] step:9101/10000 train_time:739501ms step_avg:81.25ms +[2025-07-07 01:45:42] [Rank 0] step:9101/10000 train_time:739501ms step_avg:81.25ms +[2025-07-07 01:45:43] [Rank 0] step:9121/10000 train_time:740990ms step_avg:81.24ms +[2025-07-07 01:45:43] [Rank 0] step:9121/10000 train_time:740990ms step_avg:81.24ms +[2025-07-07 01:45:45] [Rank 0] step:9141/10000 train_time:742481ms step_avg:81.23ms +[2025-07-07 01:45:45] [Rank 0] step:9141/10000 train_time:742481ms step_avg:81.23ms +[2025-07-07 01:45:46] [Rank 0] step:9161/10000 train_time:743975ms step_avg:81.21ms +[2025-07-07 01:45:46] [Rank 0] step:9161/10000 train_time:743975ms step_avg:81.21ms +[2025-07-07 01:45:48] [Rank 0] step:9181/10000 train_time:746137ms step_avg:81.27ms +[2025-07-07 01:45:48] [Rank 0] step:9181/10000 train_time:746137ms step_avg:81.27ms +[2025-07-07 01:45:50] [Rank 0] step:9201/10000 train_time:747611ms step_avg:81.25ms +[2025-07-07 01:45:50] [Rank 0] step:9201/10000 train_time:747611ms step_avg:81.25ms +[2025-07-07 01:45:51] [Rank 0] step:9221/10000 train_time:749105ms step_avg:81.24ms +[2025-07-07 01:45:51] [Rank 0] step:9221/10000 train_time:749105ms step_avg:81.24ms +[2025-07-07 01:45:53] [Rank 0] step:9241/10000 train_time:750599ms step_avg:81.22ms +[2025-07-07 01:45:53] [Rank 0] step:9241/10000 train_time:750599ms step_avg:81.22ms +[2025-07-07 01:45:54] [Rank 0] step:9261/10000 train_time:752094ms step_avg:81.21ms +[2025-07-07 01:45:54] [Rank 0] step:9261/10000 train_time:752094ms step_avg:81.21ms +[2025-07-07 01:45:57] [Rank 0] step:9281/10000 train_time:754255ms step_avg:81.27ms +[2025-07-07 01:45:57] [Rank 0] step:9281/10000 train_time:754255ms step_avg:81.27ms +[2025-07-07 01:45:58] [Rank 0] step:9301/10000 train_time:755751ms step_avg:81.25ms +[2025-07-07 01:45:58] [Rank 0] step:9301/10000 train_time:755751ms step_avg:81.25ms +[2025-07-07 01:46:00] [Rank 0] step:9321/10000 train_time:757248ms step_avg:81.24ms +[2025-07-07 01:46:00] [Rank 0] step:9321/10000 train_time:757248ms step_avg:81.24ms +[2025-07-07 01:46:01] [Rank 0] step:9341/10000 train_time:758746ms step_avg:81.23ms +[2025-07-07 01:46:01] [Rank 0] step:9341/10000 train_time:758746ms step_avg:81.23ms +[2025-07-07 01:46:03] [Rank 0] step:9361/10000 train_time:760296ms step_avg:81.22ms +[2025-07-07 01:46:03] [Rank 0] step:9361/10000 train_time:760296ms step_avg:81.22ms +[2025-07-07 01:46:04] [Rank 0] step:9381/10000 train_time:761980ms step_avg:81.23ms +[2025-07-07 01:46:04] [Rank 0] step:9381/10000 train_time:761980ms step_avg:81.23ms +[2025-07-07 01:46:06] [Rank 0] step:9401/10000 train_time:763478ms step_avg:81.21ms +[2025-07-07 01:46:06] [Rank 0] step:9401/10000 train_time:763478ms step_avg:81.21ms +[2025-07-07 01:46:07] [Rank 0] step:9421/10000 train_time:764978ms step_avg:81.20ms +[2025-07-07 01:46:07] [Rank 0] step:9421/10000 train_time:764978ms step_avg:81.20ms +[2025-07-07 01:46:09] [Rank 0] step:9441/10000 train_time:766477ms step_avg:81.19ms +[2025-07-07 01:46:09] [Rank 0] step:9441/10000 train_time:766477ms step_avg:81.19ms +[2025-07-07 01:46:10] [Rank 0] step:9461/10000 train_time:768127ms step_avg:81.19ms +[2025-07-07 01:46:10] [Rank 0] step:9461/10000 train_time:768127ms step_avg:81.19ms +[2025-07-07 01:46:12] [Rank 0] step:9481/10000 train_time:769624ms step_avg:81.18ms +[2025-07-07 01:46:12] [Rank 0] step:9481/10000 train_time:769624ms step_avg:81.18ms +[2025-07-07 01:46:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:46:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:46:14] [Rank 0] PRINT: step:9500/10000 train_loss:0.9310 val_loss:0.9299 train_time:771124ms step_avg:81.17ms +[2025-07-07 01:46:14] [Rank 0] PRINT: step:9500/10000 train_loss:0.9310 val_loss:0.9299 train_time:771124ms step_avg:81.17ms +[2025-07-07 01:46:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:46:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:46:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:46:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:46:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:46:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:51:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:51:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:51:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:51:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:51:42] [Rank 0] Total Loss: 5.6768 +[2025-07-07 01:51:42] [Rank 0] Total Loss: 5.6768 +[2025-07-07 01:51:42] [Rank 0] Total FTA: 0.8912 +[2025-07-07 01:51:42] [Rank 0] Total FTA: 0.8912 +[2025-07-07 01:51:42] [Rank 0] Group 0 Loss: 5.8946 +[2025-07-07 01:51:42] [Rank 0] Group 0 Loss: 5.8946 +[2025-07-07 01:51:42] [Rank 0] Group 1 Loss: 5.4060 +[2025-07-07 01:51:42] [Rank 0] Group 1 Loss: 5.4060 +[2025-07-07 01:51:42] [Rank 0] Group 2 Loss: 5.6300 +[2025-07-07 01:51:42] [Rank 0] Group 2 Loss: 5.6300 +[2025-07-07 01:51:42] [Rank 0] Group 3 Loss: 5.8839 +[2025-07-07 01:51:42] [Rank 0] Group 3 Loss: 5.8839 +[2025-07-07 01:51:42] [Rank 0] Group 4 Loss: 5.6017 +[2025-07-07 01:51:42] [Rank 0] Group 4 Loss: 5.6017 +[2025-07-07 01:51:42] [Rank 0] Group 5 Loss: 5.5736 +[2025-07-07 01:51:42] [Rank 0] Group 5 Loss: 5.5736 +[2025-07-07 01:51:42] [Rank 0] Group 6 Loss: 5.5964 +[2025-07-07 01:51:42] [Rank 0] Group 6 Loss: 5.5964 +[2025-07-07 01:51:42] [Rank 0] Group 7 Loss: 5.6849 +[2025-07-07 01:51:42] [Rank 0] Group 7 Loss: 5.6849 +[2025-07-07 01:51:42] [Rank 0] Group 8 Loss: 5.5993 +[2025-07-07 01:51:42] [Rank 0] Group 8 Loss: 5.5993 +[2025-07-07 01:51:42] [Rank 0] Group 9 Loss: 5.7447 +[2025-07-07 01:51:42] [Rank 0] Group 9 Loss: 5.7447 +[2025-07-07 01:51:42] [Rank 0] Group 10 Loss: 5.7025 +[2025-07-07 01:51:42] [Rank 0] Group 10 Loss: 5.7025 +[2025-07-07 01:51:42] [Rank 0] Group 11 Loss: 5.6479 +[2025-07-07 01:51:42] [Rank 0] Group 11 Loss: 5.6479 +[2025-07-07 01:51:42] [Rank 0] Group 0 FTA: 0.8244 +[2025-07-07 01:51:42] [Rank 0] Group 0 FTA: 0.8244 +[2025-07-07 01:51:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 01:51:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 01:51:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 01:51:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 01:51:42] [Rank 0] Group 3 FTA: 0.9141 +[2025-07-07 01:51:42] [Rank 0] Group 3 FTA: 0.9141 +[2025-07-07 01:51:42] [Rank 0] Group 4 FTA: 0.8568 +[2025-07-07 01:51:42] [Rank 0] Group 4 FTA: 0.8568 +[2025-07-07 01:51:42] [Rank 0] Group 5 FTA: 0.9036 +[2025-07-07 01:51:42] [Rank 0] Group 5 FTA: 0.9036 +[2025-07-07 01:51:42] [Rank 0] Group 6 FTA: 0.8307 +[2025-07-07 01:51:42] [Rank 0] Group 6 FTA: 0.8307 +[2025-07-07 01:51:42] [Rank 0] Group 7 FTA: 0.8932 +[2025-07-07 01:51:42] [Rank 0] Group 7 FTA: 0.8932 +[2025-07-07 01:51:42] [Rank 0] Group 8 FTA: 0.8776 +[2025-07-07 01:51:42] [Rank 0] Group 8 FTA: 0.8776 +[2025-07-07 01:51:42] [Rank 0] Group 9 FTA: 0.8672 +[2025-07-07 01:51:42] [Rank 0] Group 9 FTA: 0.8672 +[2025-07-07 01:51:42] [Rank 0] Group 10 FTA: 0.8965 +[2025-07-07 01:51:42] [Rank 0] Group 10 FTA: 0.8965 +[2025-07-07 01:51:42] [Rank 0] Group 11 FTA: 0.8896 +[2025-07-07 01:51:42] [Rank 0] Group 11 FTA: 0.8896 +[2025-07-07 01:51:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 01:51:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 01:51:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 01:51:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 01:51:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 01:51:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 01:51:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 01:51:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 01:51:44] [Rank 0] step:9501/10000 train_time:771145ms step_avg:81.16ms +[2025-07-07 01:51:44] [Rank 0] step:9501/10000 train_time:771145ms step_avg:81.16ms +[2025-07-07 01:51:45] [Rank 0] step:9521/10000 train_time:772627ms step_avg:81.15ms +[2025-07-07 01:51:45] [Rank 0] step:9521/10000 train_time:772627ms step_avg:81.15ms +[2025-07-07 01:51:47] [Rank 0] step:9541/10000 train_time:774373ms step_avg:81.16ms +[2025-07-07 01:51:47] [Rank 0] step:9541/10000 train_time:774373ms step_avg:81.16ms +[2025-07-07 01:51:49] [Rank 0] step:9561/10000 train_time:776259ms step_avg:81.19ms +[2025-07-07 01:51:49] [Rank 0] step:9561/10000 train_time:776259ms step_avg:81.19ms +[2025-07-07 01:51:50] [Rank 0] step:9581/10000 train_time:777750ms step_avg:81.18ms +[2025-07-07 01:51:50] [Rank 0] step:9581/10000 train_time:777750ms step_avg:81.18ms +[2025-07-07 01:51:52] [Rank 0] step:9601/10000 train_time:779241ms step_avg:81.16ms +[2025-07-07 01:51:52] [Rank 0] step:9601/10000 train_time:779241ms step_avg:81.16ms +[2025-07-07 01:51:53] [Rank 0] step:9621/10000 train_time:780734ms step_avg:81.15ms +[2025-07-07 01:51:53] [Rank 0] step:9621/10000 train_time:780734ms step_avg:81.15ms +[2025-07-07 01:51:55] [Rank 0] step:9641/10000 train_time:782874ms step_avg:81.20ms +[2025-07-07 01:51:55] [Rank 0] step:9641/10000 train_time:782874ms step_avg:81.20ms +[2025-07-07 01:51:57] [Rank 0] step:9661/10000 train_time:784365ms step_avg:81.19ms +[2025-07-07 01:51:57] [Rank 0] step:9661/10000 train_time:784365ms step_avg:81.19ms +[2025-07-07 01:51:58] [Rank 0] step:9681/10000 train_time:785859ms step_avg:81.18ms +[2025-07-07 01:51:58] [Rank 0] step:9681/10000 train_time:785859ms step_avg:81.18ms +[2025-07-07 01:52:00] [Rank 0] step:9701/10000 train_time:787353ms step_avg:81.16ms +[2025-07-07 01:52:00] [Rank 0] step:9701/10000 train_time:787353ms step_avg:81.16ms +[2025-07-07 01:52:01] [Rank 0] step:9721/10000 train_time:788904ms step_avg:81.15ms +[2025-07-07 01:52:01] [Rank 0] step:9721/10000 train_time:788904ms step_avg:81.15ms +[2025-07-07 01:52:03] [Rank 0] step:9741/10000 train_time:790580ms step_avg:81.16ms +[2025-07-07 01:52:03] [Rank 0] step:9741/10000 train_time:790580ms step_avg:81.16ms +[2025-07-07 01:52:04] [Rank 0] step:9761/10000 train_time:792075ms step_avg:81.15ms +[2025-07-07 01:52:04] [Rank 0] step:9761/10000 train_time:792075ms step_avg:81.15ms +[2025-07-07 01:52:06] [Rank 0] step:9781/10000 train_time:793569ms step_avg:81.13ms +[2025-07-07 01:52:06] [Rank 0] step:9781/10000 train_time:793569ms step_avg:81.13ms +[2025-07-07 01:52:07] [Rank 0] step:9801/10000 train_time:795066ms step_avg:81.12ms +[2025-07-07 01:52:07] [Rank 0] step:9801/10000 train_time:795066ms step_avg:81.12ms +[2025-07-07 01:52:10] [Rank 0] step:9821/10000 train_time:797212ms step_avg:81.17ms +[2025-07-07 01:52:10] [Rank 0] step:9821/10000 train_time:797212ms step_avg:81.17ms +[2025-07-07 01:52:11] [Rank 0] step:9841/10000 train_time:798707ms step_avg:81.16ms +[2025-07-07 01:52:11] [Rank 0] step:9841/10000 train_time:798707ms step_avg:81.16ms +[2025-07-07 01:52:13] [Rank 0] step:9861/10000 train_time:800204ms step_avg:81.15ms +[2025-07-07 01:52:13] [Rank 0] step:9861/10000 train_time:800204ms step_avg:81.15ms +[2025-07-07 01:52:14] [Rank 0] step:9881/10000 train_time:801703ms step_avg:81.14ms +[2025-07-07 01:52:14] [Rank 0] step:9881/10000 train_time:801703ms step_avg:81.14ms +[2025-07-07 01:52:16] [Rank 0] step:9901/10000 train_time:803456ms step_avg:81.15ms +[2025-07-07 01:52:16] [Rank 0] step:9901/10000 train_time:803456ms step_avg:81.15ms +[2025-07-07 01:52:18] [Rank 0] step:9921/10000 train_time:805355ms step_avg:81.18ms +[2025-07-07 01:52:18] [Rank 0] step:9921/10000 train_time:805355ms step_avg:81.18ms +[2025-07-07 01:52:19] [Rank 0] step:9941/10000 train_time:806852ms step_avg:81.16ms +[2025-07-07 01:52:19] [Rank 0] step:9941/10000 train_time:806852ms step_avg:81.16ms +[2025-07-07 01:52:21] [Rank 0] step:9961/10000 train_time:808350ms step_avg:81.15ms +[2025-07-07 01:52:21] [Rank 0] step:9961/10000 train_time:808350ms step_avg:81.15ms +[2025-07-07 01:52:22] [Rank 0] step:9981/10000 train_time:809848ms step_avg:81.14ms +[2025-07-07 01:52:22] [Rank 0] step:9981/10000 train_time:809848ms step_avg:81.14ms +[2025-07-07 01:52:24] [Rank 0] step:10000/10000 train_time:811920ms step_avg:81.19ms +[2025-07-07 01:52:24] [Rank 0] step:10000/10000 train_time:811920ms step_avg:81.19ms +[2025-07-07 01:52:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:52:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:52:25] [Rank 0] PRINT: step:10000/10000 train_loss:0.9253 val_loss:0.9253 train_time:812000ms step_avg:81.20ms +[2025-07-07 01:52:25] [Rank 0] PRINT: step:10000/10000 train_loss:0.9253 val_loss:0.9253 train_time:812000ms step_avg:81.20ms +[2025-07-07 01:52:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:52:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:52:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:52:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:52:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:52:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:57:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:57:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:57:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:57:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:57:53] [Rank 0] Total Loss: 5.6859 +[2025-07-07 01:57:53] [Rank 0] Total Loss: 5.6859 +[2025-07-07 01:57:53] [Rank 0] Total FTA: 0.9290 +[2025-07-07 01:57:53] [Rank 0] Total FTA: 0.9290 +[2025-07-07 01:57:53] [Rank 0] Group 0 Loss: 5.8580 +[2025-07-07 01:57:53] [Rank 0] Group 0 Loss: 5.8580 +[2025-07-07 01:57:53] [Rank 0] Group 1 Loss: 5.4340 +[2025-07-07 01:57:53] [Rank 0] Group 1 Loss: 5.4340 +[2025-07-07 01:57:53] [Rank 0] Group 2 Loss: 5.6358 +[2025-07-07 01:57:53] [Rank 0] Group 2 Loss: 5.6358 +[2025-07-07 01:57:53] [Rank 0] Group 3 Loss: 5.8510 +[2025-07-07 01:57:53] [Rank 0] Group 3 Loss: 5.8510 +[2025-07-07 01:57:53] [Rank 0] Group 4 Loss: 5.5475 +[2025-07-07 01:57:53] [Rank 0] Group 4 Loss: 5.5475 +[2025-07-07 01:57:53] [Rank 0] Group 5 Loss: 5.5210 +[2025-07-07 01:57:53] [Rank 0] Group 5 Loss: 5.5210 +[2025-07-07 01:57:53] [Rank 0] Group 6 Loss: 5.6302 +[2025-07-07 01:57:53] [Rank 0] Group 6 Loss: 5.6302 +[2025-07-07 01:57:53] [Rank 0] Group 7 Loss: 5.7458 +[2025-07-07 01:57:53] [Rank 0] Group 7 Loss: 5.7458 +[2025-07-07 01:57:53] [Rank 0] Group 8 Loss: 5.5985 +[2025-07-07 01:57:53] [Rank 0] Group 8 Loss: 5.5985 +[2025-07-07 01:57:53] [Rank 0] Group 9 Loss: 5.6986 +[2025-07-07 01:57:53] [Rank 0] Group 9 Loss: 5.6986 +[2025-07-07 01:57:53] [Rank 0] Group 10 Loss: 5.7529 +[2025-07-07 01:57:53] [Rank 0] Group 10 Loss: 5.7529 +[2025-07-07 01:57:53] [Rank 0] Group 11 Loss: 5.7161 +[2025-07-07 01:57:53] [Rank 0] Group 11 Loss: 5.7161 +[2025-07-07 01:57:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 01:57:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 01:57:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 01:57:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 01:57:53] [Rank 0] Group 2 FTA: 0.8984 +[2025-07-07 01:57:53] [Rank 0] Group 2 FTA: 0.8984 +[2025-07-07 01:57:53] [Rank 0] Group 3 FTA: 0.9271 +[2025-07-07 01:57:53] [Rank 0] Group 3 FTA: 0.9271 +[2025-07-07 01:57:53] [Rank 0] Group 4 FTA: 0.9141 +[2025-07-07 01:57:53] [Rank 0] Group 4 FTA: 0.9141 +[2025-07-07 01:57:53] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-07 01:57:53] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-07 01:57:53] [Rank 0] Group 6 FTA: 0.9219 +[2025-07-07 01:57:53] [Rank 0] Group 6 FTA: 0.9219 +[2025-07-07 01:57:53] [Rank 0] Group 7 FTA: 0.9193 +[2025-07-07 01:57:53] [Rank 0] Group 7 FTA: 0.9193 +[2025-07-07 01:57:53] [Rank 0] Group 8 FTA: 0.8880 +[2025-07-07 01:57:53] [Rank 0] Group 8 FTA: 0.8880 +[2025-07-07 01:57:53] [Rank 0] Group 9 FTA: 0.8945 +[2025-07-07 01:57:53] [Rank 0] Group 9 FTA: 0.8945 +[2025-07-07 01:57:53] [Rank 0] Group 10 FTA: 0.9180 +[2025-07-07 01:57:53] [Rank 0] Group 10 FTA: 0.9180 +[2025-07-07 01:57:53] [Rank 0] Group 11 FTA: 0.8916 +[2025-07-07 01:57:53] [Rank 0] Group 11 FTA: 0.8916 +[2025-07-07 01:57:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 01:57:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 01:57:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 01:57:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 01:57:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 01:57:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 01:57:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 01:57:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 01:57:55] [Rank 0] step:10001/10000 train_time:812020ms step_avg:81.19ms +[2025-07-07 01:57:55] [Rank 0] step:10001/10000 train_time:812020ms step_avg:81.19ms +[2025-07-07 01:57:55] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 01:57:55 2025 --- +[2025-07-07 01:57:55] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 01:57:55 2025 --- +[2025-07-07 01:57:55] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9956 MiB +[2025-07-07 01:57:55] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9956 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/config.json b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..40f15b337c68751fb62e8fa3d6d778446fb9d938 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "877fe8e4-de5b-44d4-afbf-15fa55d5d177", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..c8f1f655bb390e0c401330d714354e4a72e95555 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10d22fd0f9b9387e5fb4fb43762e0e448184a251d39b774ee92f06dc52a64d82 +size 272212 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..b6277de7ced0d75f23e07dfd1a6276089ff88133 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35ac0ed0f4a426e7e297b89bf3f7076eac46997d1ec4d12ff8a83334fd1211e8 +size 352652 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..26319da435670fa574b81a54f45ff46484a36360 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2533741343c11b6cbf2b463db0cd508860ed46b81219f2b31769d8fa3c1e08c8 +size 90199 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..259dc0cf4cde80b515b49f82655db5a013bfa465 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19bf12ea05fac0637243afc1f370057dbd22307a22a6de2d7b51085d594f5503 +size 123511 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/training_log_877fe8e4-de5b-44d4-afbf-15fa55d5d177.txt b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/training_log_877fe8e4-de5b-44d4-afbf-15fa55d5d177.txt new file mode 100644 index 0000000000000000000000000000000000000000..905a17db78184840b21d7530caf9d46b24424279 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/training_log_877fe8e4-de5b-44d4-afbf-15fa55d5d177.txt @@ -0,0 +1,3308 @@ +[2025-07-08 07:04:03] [Rank 0] PRINT: --- Script Start: Tue Jul 8 07:04:03 2025 --- +[2025-07-08 07:04:03] [Rank 0] PRINT: --- Script Start: Tue Jul 8 07:04:03 2025 --- +[2025-07-08 07:04:03] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-08 07:04:03] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-08 07:04:03] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-08 07:04:03] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-08 07:04:03] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-08 07:04:03] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-08 07:04:03] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46 +[2025-07-08 07:04:03] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46 +[2025-07-08 07:04:03] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-08 07:04:03] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-08 07:04:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-08 07:04:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-08 07:04:04] [Rank 0] PRINT: Constructing model... +[2025-07-08 07:04:04] [Rank 0] PRINT: Constructing model... +[2025-07-08 07:04:05] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-08 07:04:05] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-08 07:04:05] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-08 07:04:05] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-08 07:04:05] [Rank 0] PRINT: Testing model forward function: +[2025-07-08 07:04:05] [Rank 0] PRINT: Testing model forward function: +[2025-07-08 07:04:08] [Rank 0] PRINT: Model test - Result type: +[2025-07-08 07:04:08] [Rank 0] PRINT: Model test - Result type: +[2025-07-08 07:04:08] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-08 07:04:08] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-08 07:04:08] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-08 07:04:08] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-08 07:04:08] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-08 07:04:08] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-08 07:04:08] [Rank 0] PRINT: Model returns: +[2025-07-08 07:04:08] [Rank 0] PRINT: Model returns: +[2025-07-08 07:04:08] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-08 07:04:08] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-08 07:04:08] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-08 07:04:08] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-08 07:04:08] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-08 07:04:08] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-08 07:04:08] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-08 07:04:08] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-08 07:04:08] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-08 07:04:08] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-08 07:04:08] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-08 07:04:08] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-08 07:04:08] [Rank 0] PRINT: Model compilation complete. +[2025-07-08 07:04:08] [Rank 0] PRINT: Model compilation complete. +[2025-07-08 07:04:08] [Rank 0] PRINT: Starting warmup... +[2025-07-08 07:04:08] [Rank 0] PRINT: Starting warmup... +[2025-07-08 07:05:14] [Rank 0] PRINT: Warmup complete. +[2025-07-08 07:05:14] [Rank 0] PRINT: Warmup complete. +[2025-07-08 07:05:14] [Rank 0] PRINT: Starting training... +[2025-07-08 07:05:14] [Rank 0] PRINT: Starting training... +[2025-07-08 07:05:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:05:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:05:22] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-08 07:05:22] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-08 07:05:23] [Rank 0] step:21/10000 train_time:1552ms step_avg:73.91ms +[2025-07-08 07:05:23] [Rank 0] step:21/10000 train_time:1552ms step_avg:73.91ms +[2025-07-08 07:05:25] [Rank 0] step:41/10000 train_time:3002ms step_avg:73.23ms +[2025-07-08 07:05:25] [Rank 0] step:41/10000 train_time:3002ms step_avg:73.23ms +[2025-07-08 07:05:26] [Rank 0] step:61/10000 train_time:4453ms step_avg:73.01ms +[2025-07-08 07:05:26] [Rank 0] step:61/10000 train_time:4453ms step_avg:73.01ms +[2025-07-08 07:05:28] [Rank 0] step:81/10000 train_time:5908ms step_avg:72.94ms +[2025-07-08 07:05:28] [Rank 0] step:81/10000 train_time:5908ms step_avg:72.94ms +[2025-07-08 07:05:29] [Rank 0] step:101/10000 train_time:7604ms step_avg:75.29ms +[2025-07-08 07:05:29] [Rank 0] step:101/10000 train_time:7604ms step_avg:75.29ms +[2025-07-08 07:05:31] [Rank 0] step:121/10000 train_time:9060ms step_avg:74.88ms +[2025-07-08 07:05:31] [Rank 0] step:121/10000 train_time:9060ms step_avg:74.88ms +[2025-07-08 07:05:32] [Rank 0] step:141/10000 train_time:10515ms step_avg:74.58ms +[2025-07-08 07:05:32] [Rank 0] step:141/10000 train_time:10515ms step_avg:74.58ms +[2025-07-08 07:05:34] [Rank 0] step:161/10000 train_time:11972ms step_avg:74.36ms +[2025-07-08 07:05:34] [Rank 0] step:161/10000 train_time:11972ms step_avg:74.36ms +[2025-07-08 07:05:36] [Rank 0] step:181/10000 train_time:13431ms step_avg:74.21ms +[2025-07-08 07:05:36] [Rank 0] step:181/10000 train_time:13431ms step_avg:74.21ms +[2025-07-08 07:05:37] [Rank 0] step:201/10000 train_time:15537ms step_avg:77.30ms +[2025-07-08 07:05:37] [Rank 0] step:201/10000 train_time:15537ms step_avg:77.30ms +[2025-07-08 07:05:39] [Rank 0] step:221/10000 train_time:16993ms step_avg:76.89ms +[2025-07-08 07:05:39] [Rank 0] step:221/10000 train_time:16993ms step_avg:76.89ms +[2025-07-08 07:05:40] [Rank 0] step:241/10000 train_time:18455ms step_avg:76.58ms +[2025-07-08 07:05:40] [Rank 0] step:241/10000 train_time:18455ms step_avg:76.58ms +[2025-07-08 07:05:42] [Rank 0] step:261/10000 train_time:19917ms step_avg:76.31ms +[2025-07-08 07:05:42] [Rank 0] step:261/10000 train_time:19917ms step_avg:76.31ms +[2025-07-08 07:05:44] [Rank 0] step:281/10000 train_time:22042ms step_avg:78.44ms +[2025-07-08 07:05:44] [Rank 0] step:281/10000 train_time:22042ms step_avg:78.44ms +[2025-07-08 07:05:45] [Rank 0] step:301/10000 train_time:23502ms step_avg:78.08ms +[2025-07-08 07:05:45] [Rank 0] step:301/10000 train_time:23502ms step_avg:78.08ms +[2025-07-08 07:05:47] [Rank 0] step:321/10000 train_time:24966ms step_avg:77.78ms +[2025-07-08 07:05:47] [Rank 0] step:321/10000 train_time:24966ms step_avg:77.78ms +[2025-07-08 07:05:48] [Rank 0] step:341/10000 train_time:26433ms step_avg:77.52ms +[2025-07-08 07:05:48] [Rank 0] step:341/10000 train_time:26433ms step_avg:77.52ms +[2025-07-08 07:05:50] [Rank 0] step:361/10000 train_time:27951ms step_avg:77.43ms +[2025-07-08 07:05:50] [Rank 0] step:361/10000 train_time:27951ms step_avg:77.43ms +[2025-07-08 07:05:52] [Rank 0] step:381/10000 train_time:30007ms step_avg:78.76ms +[2025-07-08 07:05:52] [Rank 0] step:381/10000 train_time:30007ms step_avg:78.76ms +[2025-07-08 07:05:53] [Rank 0] step:401/10000 train_time:31474ms step_avg:78.49ms +[2025-07-08 07:05:53] [Rank 0] step:401/10000 train_time:31474ms step_avg:78.49ms +[2025-07-08 07:05:55] [Rank 0] step:421/10000 train_time:32943ms step_avg:78.25ms +[2025-07-08 07:05:55] [Rank 0] step:421/10000 train_time:32943ms step_avg:78.25ms +[2025-07-08 07:05:56] [Rank 0] step:441/10000 train_time:34409ms step_avg:78.03ms +[2025-07-08 07:05:56] [Rank 0] step:441/10000 train_time:34409ms step_avg:78.03ms +[2025-07-08 07:05:58] [Rank 0] step:461/10000 train_time:36116ms step_avg:78.34ms +[2025-07-08 07:05:58] [Rank 0] step:461/10000 train_time:36116ms step_avg:78.34ms +[2025-07-08 07:05:59] [Rank 0] step:481/10000 train_time:37584ms step_avg:78.14ms +[2025-07-08 07:05:59] [Rank 0] step:481/10000 train_time:37584ms step_avg:78.14ms +[2025-07-08 07:06:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:06:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:06:02] [Rank 0] PRINT: step:500/10000 train_loss:6.8630 val_loss:4.3869 train_time:39054ms step_avg:78.11ms +[2025-07-08 07:06:02] [Rank 0] PRINT: step:500/10000 train_loss:6.8630 val_loss:4.3869 train_time:39054ms step_avg:78.11ms +[2025-07-08 07:06:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:06:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:06:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:06:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:06:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:06:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:11:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:11:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:11:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:11:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:11:22] [Rank 0] Total Loss: 5.5810 +[2025-07-08 07:11:22] [Rank 0] Total Loss: 5.5810 +[2025-07-08 07:11:22] [Rank 0] Total FTA: 0.0632 +[2025-07-08 07:11:22] [Rank 0] Total FTA: 0.0632 +[2025-07-08 07:11:22] [Rank 0] Group 0 Loss: 5.5504 +[2025-07-08 07:11:22] [Rank 0] Group 0 Loss: 5.5504 +[2025-07-08 07:11:22] [Rank 0] Group 1 Loss: 5.6757 +[2025-07-08 07:11:22] [Rank 0] Group 1 Loss: 5.6757 +[2025-07-08 07:11:22] [Rank 0] Group 2 Loss: 5.6139 +[2025-07-08 07:11:22] [Rank 0] Group 2 Loss: 5.6139 +[2025-07-08 07:11:22] [Rank 0] Group 3 Loss: 5.5227 +[2025-07-08 07:11:22] [Rank 0] Group 3 Loss: 5.5227 +[2025-07-08 07:11:22] [Rank 0] Group 4 Loss: 5.5936 +[2025-07-08 07:11:22] [Rank 0] Group 4 Loss: 5.5936 +[2025-07-08 07:11:22] [Rank 0] Group 5 Loss: 5.5594 +[2025-07-08 07:11:22] [Rank 0] Group 5 Loss: 5.5594 +[2025-07-08 07:11:22] [Rank 0] Group 6 Loss: 5.5817 +[2025-07-08 07:11:22] [Rank 0] Group 6 Loss: 5.5817 +[2025-07-08 07:11:22] [Rank 0] Group 7 Loss: 5.5767 +[2025-07-08 07:11:22] [Rank 0] Group 7 Loss: 5.5767 +[2025-07-08 07:11:22] [Rank 0] Group 8 Loss: 5.5665 +[2025-07-08 07:11:22] [Rank 0] Group 8 Loss: 5.5665 +[2025-07-08 07:11:22] [Rank 0] Group 9 Loss: 5.5942 +[2025-07-08 07:11:22] [Rank 0] Group 9 Loss: 5.5942 +[2025-07-08 07:11:22] [Rank 0] Group 10 Loss: 5.5785 +[2025-07-08 07:11:22] [Rank 0] Group 10 Loss: 5.5785 +[2025-07-08 07:11:22] [Rank 0] Group 11 Loss: 5.5860 +[2025-07-08 07:11:22] [Rank 0] Group 11 Loss: 5.5860 +[2025-07-08 07:11:22] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-08 07:11:22] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-08 07:11:22] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 07:11:22] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 07:11:22] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-08 07:11:22] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-08 07:11:22] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-08 07:11:22] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-08 07:11:22] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-08 07:11:22] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-08 07:11:22] [Rank 0] Group 5 FTA: 0.0443 +[2025-07-08 07:11:22] [Rank 0] Group 5 FTA: 0.0443 +[2025-07-08 07:11:22] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-08 07:11:22] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-08 07:11:22] [Rank 0] Group 7 FTA: 0.0729 +[2025-07-08 07:11:22] [Rank 0] Group 7 FTA: 0.0729 +[2025-07-08 07:11:22] [Rank 0] Group 8 FTA: 0.0599 +[2025-07-08 07:11:22] [Rank 0] Group 8 FTA: 0.0599 +[2025-07-08 07:11:22] [Rank 0] Group 9 FTA: 0.0586 +[2025-07-08 07:11:22] [Rank 0] Group 9 FTA: 0.0586 +[2025-07-08 07:11:22] [Rank 0] Group 10 FTA: 0.0684 +[2025-07-08 07:11:22] [Rank 0] Group 10 FTA: 0.0684 +[2025-07-08 07:11:22] [Rank 0] Group 11 FTA: 0.0576 +[2025-07-08 07:11:22] [Rank 0] Group 11 FTA: 0.0576 +[2025-07-08 07:11:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_loss_curves.png +[2025-07-08 07:11:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_loss_curves.png +[2025-07-08 07:11:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_acc_curves.png +[2025-07-08 07:11:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_acc_curves.png +[2025-07-08 07:11:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_loss_curve.png +[2025-07-08 07:11:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_loss_curve.png +[2025-07-08 07:11:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_acc_curve.png +[2025-07-08 07:11:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_acc_curve.png +[2025-07-08 07:11:24] [Rank 0] step:501/10000 train_time:39074ms step_avg:77.99ms +[2025-07-08 07:11:24] [Rank 0] step:501/10000 train_time:39074ms step_avg:77.99ms +[2025-07-08 07:11:25] [Rank 0] step:521/10000 train_time:40539ms step_avg:77.81ms +[2025-07-08 07:11:25] [Rank 0] step:521/10000 train_time:40539ms step_avg:77.81ms +[2025-07-08 07:11:27] [Rank 0] step:541/10000 train_time:42000ms step_avg:77.63ms +[2025-07-08 07:11:27] [Rank 0] step:541/10000 train_time:42000ms step_avg:77.63ms +[2025-07-08 07:11:28] [Rank 0] step:561/10000 train_time:43696ms step_avg:77.89ms +[2025-07-08 07:11:28] [Rank 0] step:561/10000 train_time:43696ms step_avg:77.89ms +[2025-07-08 07:11:30] [Rank 0] step:581/10000 train_time:45159ms step_avg:77.73ms +[2025-07-08 07:11:30] [Rank 0] step:581/10000 train_time:45159ms step_avg:77.73ms +[2025-07-08 07:11:31] [Rank 0] step:601/10000 train_time:46619ms step_avg:77.57ms +[2025-07-08 07:11:31] [Rank 0] step:601/10000 train_time:46619ms step_avg:77.57ms +[2025-07-08 07:11:33] [Rank 0] step:621/10000 train_time:48082ms step_avg:77.43ms +[2025-07-08 07:11:33] [Rank 0] step:621/10000 train_time:48082ms step_avg:77.43ms +[2025-07-08 07:11:34] [Rank 0] step:641/10000 train_time:49783ms step_avg:77.66ms +[2025-07-08 07:11:34] [Rank 0] step:641/10000 train_time:49783ms step_avg:77.66ms +[2025-07-08 07:11:36] [Rank 0] step:661/10000 train_time:51245ms step_avg:77.53ms +[2025-07-08 07:11:36] [Rank 0] step:661/10000 train_time:51245ms step_avg:77.53ms +[2025-07-08 07:11:37] [Rank 0] step:681/10000 train_time:52708ms step_avg:77.40ms +[2025-07-08 07:11:37] [Rank 0] step:681/10000 train_time:52708ms step_avg:77.40ms +[2025-07-08 07:11:39] [Rank 0] step:701/10000 train_time:54171ms step_avg:77.28ms +[2025-07-08 07:11:39] [Rank 0] step:701/10000 train_time:54171ms step_avg:77.28ms +[2025-07-08 07:11:41] [Rank 0] step:721/10000 train_time:56377ms step_avg:78.19ms +[2025-07-08 07:11:41] [Rank 0] step:721/10000 train_time:56377ms step_avg:78.19ms +[2025-07-08 07:11:43] [Rank 0] step:741/10000 train_time:57822ms step_avg:78.03ms +[2025-07-08 07:11:43] [Rank 0] step:741/10000 train_time:57822ms step_avg:78.03ms +[2025-07-08 07:11:44] [Rank 0] step:761/10000 train_time:59295ms step_avg:77.92ms +[2025-07-08 07:11:44] [Rank 0] step:761/10000 train_time:59295ms step_avg:77.92ms +[2025-07-08 07:11:45] [Rank 0] step:781/10000 train_time:60771ms step_avg:77.81ms +[2025-07-08 07:11:45] [Rank 0] step:781/10000 train_time:60771ms step_avg:77.81ms +[2025-07-08 07:11:47] [Rank 0] step:801/10000 train_time:62248ms step_avg:77.71ms +[2025-07-08 07:11:47] [Rank 0] step:801/10000 train_time:62248ms step_avg:77.71ms +[2025-07-08 07:11:49] [Rank 0] step:821/10000 train_time:64388ms step_avg:78.43ms +[2025-07-08 07:11:49] [Rank 0] step:821/10000 train_time:64388ms step_avg:78.43ms +[2025-07-08 07:11:51] [Rank 0] step:841/10000 train_time:65861ms step_avg:78.31ms +[2025-07-08 07:11:51] [Rank 0] step:841/10000 train_time:65861ms step_avg:78.31ms +[2025-07-08 07:11:52] [Rank 0] step:861/10000 train_time:67336ms step_avg:78.21ms +[2025-07-08 07:11:52] [Rank 0] step:861/10000 train_time:67336ms step_avg:78.21ms +[2025-07-08 07:11:54] [Rank 0] step:881/10000 train_time:68812ms step_avg:78.11ms +[2025-07-08 07:11:54] [Rank 0] step:881/10000 train_time:68812ms step_avg:78.11ms +[2025-07-08 07:11:55] [Rank 0] step:901/10000 train_time:70541ms step_avg:78.29ms +[2025-07-08 07:11:55] [Rank 0] step:901/10000 train_time:70541ms step_avg:78.29ms +[2025-07-08 07:11:57] [Rank 0] step:921/10000 train_time:71999ms step_avg:78.17ms +[2025-07-08 07:11:57] [Rank 0] step:921/10000 train_time:71999ms step_avg:78.17ms +[2025-07-08 07:11:58] [Rank 0] step:941/10000 train_time:73476ms step_avg:78.08ms +[2025-07-08 07:11:58] [Rank 0] step:941/10000 train_time:73476ms step_avg:78.08ms +[2025-07-08 07:12:00] [Rank 0] step:961/10000 train_time:74953ms step_avg:77.99ms +[2025-07-08 07:12:00] [Rank 0] step:961/10000 train_time:74953ms step_avg:77.99ms +[2025-07-08 07:12:01] [Rank 0] step:981/10000 train_time:76428ms step_avg:77.91ms +[2025-07-08 07:12:01] [Rank 0] step:981/10000 train_time:76428ms step_avg:77.91ms +[2025-07-08 07:12:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:12:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:12:04] [Rank 0] PRINT: step:1000/10000 train_loss:2.9607 val_loss:2.0195 train_time:78139ms step_avg:78.14ms +[2025-07-08 07:12:04] [Rank 0] PRINT: step:1000/10000 train_loss:2.9607 val_loss:2.0195 train_time:78139ms step_avg:78.14ms +[2025-07-08 07:12:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:12:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:12:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:12:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:12:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:12:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:17:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:17:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:17:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:17:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:17:27] [Rank 0] Total Loss: 4.0901 +[2025-07-08 07:17:27] [Rank 0] Total Loss: 4.0901 +[2025-07-08 07:17:27] [Rank 0] Total FTA: 0.0966 +[2025-07-08 07:17:27] [Rank 0] Total FTA: 0.0966 +[2025-07-08 07:17:27] [Rank 0] Group 0 Loss: 4.3259 +[2025-07-08 07:17:27] [Rank 0] Group 0 Loss: 4.3259 +[2025-07-08 07:17:27] [Rank 0] Group 1 Loss: 4.1395 +[2025-07-08 07:17:27] [Rank 0] Group 1 Loss: 4.1395 +[2025-07-08 07:17:27] [Rank 0] Group 2 Loss: 4.0125 +[2025-07-08 07:17:27] [Rank 0] Group 2 Loss: 4.0125 +[2025-07-08 07:17:27] [Rank 0] Group 3 Loss: 3.9662 +[2025-07-08 07:17:27] [Rank 0] Group 3 Loss: 3.9662 +[2025-07-08 07:17:27] [Rank 0] Group 4 Loss: 4.0218 +[2025-07-08 07:17:27] [Rank 0] Group 4 Loss: 4.0218 +[2025-07-08 07:17:27] [Rank 0] Group 5 Loss: 3.9879 +[2025-07-08 07:17:27] [Rank 0] Group 5 Loss: 3.9879 +[2025-07-08 07:17:27] [Rank 0] Group 6 Loss: 4.0381 +[2025-07-08 07:17:27] [Rank 0] Group 6 Loss: 4.0381 +[2025-07-08 07:17:27] [Rank 0] Group 7 Loss: 4.0920 +[2025-07-08 07:17:27] [Rank 0] Group 7 Loss: 4.0920 +[2025-07-08 07:17:27] [Rank 0] Group 8 Loss: 4.0835 +[2025-07-08 07:17:27] [Rank 0] Group 8 Loss: 4.0835 +[2025-07-08 07:17:27] [Rank 0] Group 9 Loss: 4.0401 +[2025-07-08 07:17:27] [Rank 0] Group 9 Loss: 4.0401 +[2025-07-08 07:17:27] [Rank 0] Group 10 Loss: 4.0950 +[2025-07-08 07:17:27] [Rank 0] Group 10 Loss: 4.0950 +[2025-07-08 07:17:27] [Rank 0] Group 11 Loss: 4.0654 +[2025-07-08 07:17:27] [Rank 0] Group 11 Loss: 4.0654 +[2025-07-08 07:17:27] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-08 07:17:27] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-08 07:17:27] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 07:17:27] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 07:17:27] [Rank 0] Group 2 FTA: 0.1823 +[2025-07-08 07:17:27] [Rank 0] Group 2 FTA: 0.1823 +[2025-07-08 07:17:27] [Rank 0] Group 3 FTA: 0.0208 +[2025-07-08 07:17:27] [Rank 0] Group 3 FTA: 0.0208 +[2025-07-08 07:17:27] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-08 07:17:27] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-08 07:17:27] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-08 07:17:27] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-08 07:17:27] [Rank 0] Group 6 FTA: 0.1146 +[2025-07-08 07:17:27] [Rank 0] Group 6 FTA: 0.1146 +[2025-07-08 07:17:27] [Rank 0] Group 7 FTA: 0.1354 +[2025-07-08 07:17:27] [Rank 0] Group 7 FTA: 0.1354 +[2025-07-08 07:17:27] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-08 07:17:27] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-08 07:17:27] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-08 07:17:27] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-08 07:17:27] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-08 07:17:27] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-08 07:17:27] [Rank 0] Group 11 FTA: 0.0791 +[2025-07-08 07:17:27] [Rank 0] Group 11 FTA: 0.0791 +[2025-07-08 07:17:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_loss_curves.png +[2025-07-08 07:17:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_loss_curves.png +[2025-07-08 07:17:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_acc_curves.png +[2025-07-08 07:17:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_acc_curves.png +[2025-07-08 07:17:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_loss_curve.png +[2025-07-08 07:17:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_loss_curve.png +[2025-07-08 07:17:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_acc_curve.png +[2025-07-08 07:17:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_acc_curve.png +[2025-07-08 07:17:28] [Rank 0] step:1001/10000 train_time:78159ms step_avg:78.08ms +[2025-07-08 07:17:28] [Rank 0] step:1001/10000 train_time:78159ms step_avg:78.08ms +[2025-07-08 07:17:30] [Rank 0] step:1021/10000 train_time:79636ms step_avg:78.00ms +[2025-07-08 07:17:30] [Rank 0] step:1021/10000 train_time:79636ms step_avg:78.00ms +[2025-07-08 07:17:31] [Rank 0] step:1041/10000 train_time:81102ms step_avg:77.91ms +[2025-07-08 07:17:31] [Rank 0] step:1041/10000 train_time:81102ms step_avg:77.91ms +[2025-07-08 07:17:33] [Rank 0] step:1061/10000 train_time:82569ms step_avg:77.82ms +[2025-07-08 07:17:33] [Rank 0] step:1061/10000 train_time:82569ms step_avg:77.82ms +[2025-07-08 07:17:35] [Rank 0] step:1081/10000 train_time:84703ms step_avg:78.36ms +[2025-07-08 07:17:35] [Rank 0] step:1081/10000 train_time:84703ms step_avg:78.36ms +[2025-07-08 07:17:36] [Rank 0] step:1101/10000 train_time:86152ms step_avg:78.25ms +[2025-07-08 07:17:36] [Rank 0] step:1101/10000 train_time:86152ms step_avg:78.25ms +[2025-07-08 07:17:38] [Rank 0] step:1121/10000 train_time:87619ms step_avg:78.16ms +[2025-07-08 07:17:38] [Rank 0] step:1121/10000 train_time:87619ms step_avg:78.16ms +[2025-07-08 07:17:39] [Rank 0] step:1141/10000 train_time:89089ms step_avg:78.08ms +[2025-07-08 07:17:39] [Rank 0] step:1141/10000 train_time:89089ms step_avg:78.08ms +[2025-07-08 07:17:41] [Rank 0] step:1161/10000 train_time:90558ms step_avg:78.00ms +[2025-07-08 07:17:41] [Rank 0] step:1161/10000 train_time:90558ms step_avg:78.00ms +[2025-07-08 07:17:43] [Rank 0] step:1181/10000 train_time:92679ms step_avg:78.48ms +[2025-07-08 07:17:43] [Rank 0] step:1181/10000 train_time:92679ms step_avg:78.48ms +[2025-07-08 07:17:44] [Rank 0] step:1201/10000 train_time:94149ms step_avg:78.39ms +[2025-07-08 07:17:44] [Rank 0] step:1201/10000 train_time:94149ms step_avg:78.39ms +[2025-07-08 07:17:46] [Rank 0] step:1221/10000 train_time:95619ms step_avg:78.31ms +[2025-07-08 07:17:46] [Rank 0] step:1221/10000 train_time:95619ms step_avg:78.31ms +[2025-07-08 07:17:47] [Rank 0] step:1241/10000 train_time:97091ms step_avg:78.24ms +[2025-07-08 07:17:47] [Rank 0] step:1241/10000 train_time:97091ms step_avg:78.24ms +[2025-07-08 07:17:49] [Rank 0] step:1261/10000 train_time:98565ms step_avg:78.16ms +[2025-07-08 07:17:49] [Rank 0] step:1261/10000 train_time:98565ms step_avg:78.16ms +[2025-07-08 07:17:50] [Rank 0] step:1281/10000 train_time:100273ms step_avg:78.28ms +[2025-07-08 07:17:50] [Rank 0] step:1281/10000 train_time:100273ms step_avg:78.28ms +[2025-07-08 07:17:52] [Rank 0] step:1301/10000 train_time:101749ms step_avg:78.21ms +[2025-07-08 07:17:52] [Rank 0] step:1301/10000 train_time:101749ms step_avg:78.21ms +[2025-07-08 07:17:53] [Rank 0] step:1321/10000 train_time:103222ms step_avg:78.14ms +[2025-07-08 07:17:53] [Rank 0] step:1321/10000 train_time:103222ms step_avg:78.14ms +[2025-07-08 07:17:55] [Rank 0] step:1341/10000 train_time:104696ms step_avg:78.07ms +[2025-07-08 07:17:55] [Rank 0] step:1341/10000 train_time:104696ms step_avg:78.07ms +[2025-07-08 07:17:56] [Rank 0] step:1361/10000 train_time:106406ms step_avg:78.18ms +[2025-07-08 07:17:56] [Rank 0] step:1361/10000 train_time:106406ms step_avg:78.18ms +[2025-07-08 07:17:58] [Rank 0] step:1381/10000 train_time:107882ms step_avg:78.12ms +[2025-07-08 07:17:58] [Rank 0] step:1381/10000 train_time:107882ms step_avg:78.12ms +[2025-07-08 07:17:59] [Rank 0] step:1401/10000 train_time:109450ms step_avg:78.12ms +[2025-07-08 07:17:59] [Rank 0] step:1401/10000 train_time:109450ms step_avg:78.12ms +[2025-07-08 07:18:01] [Rank 0] step:1421/10000 train_time:110928ms step_avg:78.06ms +[2025-07-08 07:18:01] [Rank 0] step:1421/10000 train_time:110928ms step_avg:78.06ms +[2025-07-08 07:18:03] [Rank 0] step:1441/10000 train_time:112403ms step_avg:78.00ms +[2025-07-08 07:18:03] [Rank 0] step:1441/10000 train_time:112403ms step_avg:78.00ms +[2025-07-08 07:18:04] [Rank 0] step:1461/10000 train_time:114543ms step_avg:78.40ms +[2025-07-08 07:18:04] [Rank 0] step:1461/10000 train_time:114543ms step_avg:78.40ms +[2025-07-08 07:18:06] [Rank 0] step:1481/10000 train_time:116016ms step_avg:78.34ms +[2025-07-08 07:18:06] [Rank 0] step:1481/10000 train_time:116016ms step_avg:78.34ms +[2025-07-08 07:18:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:18:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:18:08] [Rank 0] PRINT: step:1500/10000 train_loss:1.7504 val_loss:1.5972 train_time:117491ms step_avg:78.33ms +[2025-07-08 07:18:08] [Rank 0] PRINT: step:1500/10000 train_loss:1.7504 val_loss:1.5972 train_time:117491ms step_avg:78.33ms +[2025-07-08 07:18:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:18:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:18:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:18:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:18:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:18:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:23:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:23:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:23:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:23:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:23:34] [Rank 0] Total Loss: 4.2334 +[2025-07-08 07:23:34] [Rank 0] Total Loss: 4.2334 +[2025-07-08 07:23:34] [Rank 0] Total FTA: 0.1335 +[2025-07-08 07:23:34] [Rank 0] Total FTA: 0.1335 +[2025-07-08 07:23:34] [Rank 0] Group 0 Loss: 4.4835 +[2025-07-08 07:23:34] [Rank 0] Group 0 Loss: 4.4835 +[2025-07-08 07:23:34] [Rank 0] Group 1 Loss: 4.2693 +[2025-07-08 07:23:34] [Rank 0] Group 1 Loss: 4.2693 +[2025-07-08 07:23:34] [Rank 0] Group 2 Loss: 4.0381 +[2025-07-08 07:23:34] [Rank 0] Group 2 Loss: 4.0381 +[2025-07-08 07:23:34] [Rank 0] Group 3 Loss: 4.1849 +[2025-07-08 07:23:34] [Rank 0] Group 3 Loss: 4.1849 +[2025-07-08 07:23:34] [Rank 0] Group 4 Loss: 4.1759 +[2025-07-08 07:23:34] [Rank 0] Group 4 Loss: 4.1759 +[2025-07-08 07:23:34] [Rank 0] Group 5 Loss: 4.1182 +[2025-07-08 07:23:34] [Rank 0] Group 5 Loss: 4.1182 +[2025-07-08 07:23:34] [Rank 0] Group 6 Loss: 4.1176 +[2025-07-08 07:23:34] [Rank 0] Group 6 Loss: 4.1176 +[2025-07-08 07:23:34] [Rank 0] Group 7 Loss: 4.2425 +[2025-07-08 07:23:34] [Rank 0] Group 7 Loss: 4.2425 +[2025-07-08 07:23:34] [Rank 0] Group 8 Loss: 4.2352 +[2025-07-08 07:23:34] [Rank 0] Group 8 Loss: 4.2352 +[2025-07-08 07:23:34] [Rank 0] Group 9 Loss: 4.1957 +[2025-07-08 07:23:34] [Rank 0] Group 9 Loss: 4.1957 +[2025-07-08 07:23:34] [Rank 0] Group 10 Loss: 4.2642 +[2025-07-08 07:23:34] [Rank 0] Group 10 Loss: 4.2642 +[2025-07-08 07:23:34] [Rank 0] Group 11 Loss: 4.2218 +[2025-07-08 07:23:34] [Rank 0] Group 11 Loss: 4.2218 +[2025-07-08 07:23:34] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-08 07:23:34] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-08 07:23:34] [Rank 0] Group 1 FTA: 0.3464 +[2025-07-08 07:23:34] [Rank 0] Group 1 FTA: 0.3464 +[2025-07-08 07:23:34] [Rank 0] Group 2 FTA: 0.1562 +[2025-07-08 07:23:34] [Rank 0] Group 2 FTA: 0.1562 +[2025-07-08 07:23:34] [Rank 0] Group 3 FTA: 0.0677 +[2025-07-08 07:23:34] [Rank 0] Group 3 FTA: 0.0677 +[2025-07-08 07:23:34] [Rank 0] Group 4 FTA: 0.0573 +[2025-07-08 07:23:34] [Rank 0] Group 4 FTA: 0.0573 +[2025-07-08 07:23:34] [Rank 0] Group 5 FTA: 0.0859 +[2025-07-08 07:23:34] [Rank 0] Group 5 FTA: 0.0859 +[2025-07-08 07:23:34] [Rank 0] Group 6 FTA: 0.1354 +[2025-07-08 07:23:34] [Rank 0] Group 6 FTA: 0.1354 +[2025-07-08 07:23:34] [Rank 0] Group 7 FTA: 0.1302 +[2025-07-08 07:23:34] [Rank 0] Group 7 FTA: 0.1302 +[2025-07-08 07:23:34] [Rank 0] Group 8 FTA: 0.0911 +[2025-07-08 07:23:34] [Rank 0] Group 8 FTA: 0.0911 +[2025-07-08 07:23:34] [Rank 0] Group 9 FTA: 0.1094 +[2025-07-08 07:23:34] [Rank 0] Group 9 FTA: 0.1094 +[2025-07-08 07:23:34] [Rank 0] Group 10 FTA: 0.1172 +[2025-07-08 07:23:34] [Rank 0] Group 10 FTA: 0.1172 +[2025-07-08 07:23:34] [Rank 0] Group 11 FTA: 0.1162 +[2025-07-08 07:23:34] [Rank 0] Group 11 FTA: 0.1162 +[2025-07-08 07:23:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_loss_curves.png +[2025-07-08 07:23:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_loss_curves.png +[2025-07-08 07:23:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_acc_curves.png +[2025-07-08 07:23:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_acc_curves.png +[2025-07-08 07:23:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_loss_curve.png +[2025-07-08 07:23:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_loss_curve.png +[2025-07-08 07:23:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_acc_curve.png +[2025-07-08 07:23:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_acc_curve.png +[2025-07-08 07:23:36] [Rank 0] step:1501/10000 train_time:117511ms step_avg:78.29ms +[2025-07-08 07:23:36] [Rank 0] step:1501/10000 train_time:117511ms step_avg:78.29ms +[2025-07-08 07:23:37] [Rank 0] step:1521/10000 train_time:118976ms step_avg:78.22ms +[2025-07-08 07:23:37] [Rank 0] step:1521/10000 train_time:118976ms step_avg:78.22ms +[2025-07-08 07:23:39] [Rank 0] step:1541/10000 train_time:121096ms step_avg:78.58ms +[2025-07-08 07:23:39] [Rank 0] step:1541/10000 train_time:121096ms step_avg:78.58ms +[2025-07-08 07:23:41] [Rank 0] step:1561/10000 train_time:122564ms step_avg:78.52ms +[2025-07-08 07:23:41] [Rank 0] step:1561/10000 train_time:122564ms step_avg:78.52ms +[2025-07-08 07:23:42] [Rank 0] step:1581/10000 train_time:124030ms step_avg:78.45ms +[2025-07-08 07:23:42] [Rank 0] step:1581/10000 train_time:124030ms step_avg:78.45ms +[2025-07-08 07:23:44] [Rank 0] step:1601/10000 train_time:125498ms step_avg:78.39ms +[2025-07-08 07:23:44] [Rank 0] step:1601/10000 train_time:125498ms step_avg:78.39ms +[2025-07-08 07:23:45] [Rank 0] step:1621/10000 train_time:127019ms step_avg:78.36ms +[2025-07-08 07:23:45] [Rank 0] step:1621/10000 train_time:127019ms step_avg:78.36ms +[2025-07-08 07:23:47] [Rank 0] step:1641/10000 train_time:128672ms step_avg:78.41ms +[2025-07-08 07:23:47] [Rank 0] step:1641/10000 train_time:128672ms step_avg:78.41ms +[2025-07-08 07:23:48] [Rank 0] step:1661/10000 train_time:130142ms step_avg:78.35ms +[2025-07-08 07:23:48] [Rank 0] step:1661/10000 train_time:130142ms step_avg:78.35ms +[2025-07-08 07:23:50] [Rank 0] step:1681/10000 train_time:131609ms step_avg:78.29ms +[2025-07-08 07:23:50] [Rank 0] step:1681/10000 train_time:131609ms step_avg:78.29ms +[2025-07-08 07:23:51] [Rank 0] step:1701/10000 train_time:133079ms step_avg:78.24ms +[2025-07-08 07:23:51] [Rank 0] step:1701/10000 train_time:133079ms step_avg:78.24ms +[2025-07-08 07:23:53] [Rank 0] step:1721/10000 train_time:134785ms step_avg:78.32ms +[2025-07-08 07:23:53] [Rank 0] step:1721/10000 train_time:134785ms step_avg:78.32ms +[2025-07-08 07:23:55] [Rank 0] step:1741/10000 train_time:136254ms step_avg:78.26ms +[2025-07-08 07:23:55] [Rank 0] step:1741/10000 train_time:136254ms step_avg:78.26ms +[2025-07-08 07:23:56] [Rank 0] step:1761/10000 train_time:137725ms step_avg:78.21ms +[2025-07-08 07:23:56] [Rank 0] step:1761/10000 train_time:137725ms step_avg:78.21ms +[2025-07-08 07:23:57] [Rank 0] step:1781/10000 train_time:139193ms step_avg:78.15ms +[2025-07-08 07:23:57] [Rank 0] step:1781/10000 train_time:139193ms step_avg:78.15ms +[2025-07-08 07:23:59] [Rank 0] step:1801/10000 train_time:140923ms step_avg:78.25ms +[2025-07-08 07:23:59] [Rank 0] step:1801/10000 train_time:140923ms step_avg:78.25ms +[2025-07-08 07:24:01] [Rank 0] step:1821/10000 train_time:142375ms step_avg:78.19ms +[2025-07-08 07:24:01] [Rank 0] step:1821/10000 train_time:142375ms step_avg:78.19ms +[2025-07-08 07:24:02] [Rank 0] step:1841/10000 train_time:143849ms step_avg:78.14ms +[2025-07-08 07:24:02] [Rank 0] step:1841/10000 train_time:143849ms step_avg:78.14ms +[2025-07-08 07:24:04] [Rank 0] step:1861/10000 train_time:145321ms step_avg:78.09ms +[2025-07-08 07:24:04] [Rank 0] step:1861/10000 train_time:145321ms step_avg:78.09ms +[2025-07-08 07:24:05] [Rank 0] step:1881/10000 train_time:146794ms step_avg:78.04ms +[2025-07-08 07:24:05] [Rank 0] step:1881/10000 train_time:146794ms step_avg:78.04ms +[2025-07-08 07:24:07] [Rank 0] step:1901/10000 train_time:148507ms step_avg:78.12ms +[2025-07-08 07:24:07] [Rank 0] step:1901/10000 train_time:148507ms step_avg:78.12ms +[2025-07-08 07:24:08] [Rank 0] step:1921/10000 train_time:149980ms step_avg:78.07ms +[2025-07-08 07:24:08] [Rank 0] step:1921/10000 train_time:149980ms step_avg:78.07ms +[2025-07-08 07:24:10] [Rank 0] step:1941/10000 train_time:151455ms step_avg:78.03ms +[2025-07-08 07:24:10] [Rank 0] step:1941/10000 train_time:151455ms step_avg:78.03ms +[2025-07-08 07:24:11] [Rank 0] step:1961/10000 train_time:152934ms step_avg:77.99ms +[2025-07-08 07:24:11] [Rank 0] step:1961/10000 train_time:152934ms step_avg:77.99ms +[2025-07-08 07:24:13] [Rank 0] step:1981/10000 train_time:154408ms step_avg:77.94ms +[2025-07-08 07:24:13] [Rank 0] step:1981/10000 train_time:154408ms step_avg:77.94ms +[2025-07-08 07:24:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:24:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:24:15] [Rank 0] PRINT: step:2000/10000 train_loss:1.5030 val_loss:1.4431 train_time:156119ms step_avg:78.06ms +[2025-07-08 07:24:15] [Rank 0] PRINT: step:2000/10000 train_loss:1.5030 val_loss:1.4431 train_time:156119ms step_avg:78.06ms +[2025-07-08 07:24:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:24:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:24:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:24:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:24:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:24:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:29:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:29:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:29:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:29:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:29:41] [Rank 0] Total Loss: 4.5217 +[2025-07-08 07:29:41] [Rank 0] Total Loss: 4.5217 +[2025-07-08 07:29:41] [Rank 0] Total FTA: 0.2382 +[2025-07-08 07:29:41] [Rank 0] Total FTA: 0.2382 +[2025-07-08 07:29:41] [Rank 0] Group 0 Loss: 4.8328 +[2025-07-08 07:29:41] [Rank 0] Group 0 Loss: 4.8328 +[2025-07-08 07:29:41] [Rank 0] Group 1 Loss: 4.4864 +[2025-07-08 07:29:41] [Rank 0] Group 1 Loss: 4.4864 +[2025-07-08 07:29:41] [Rank 0] Group 2 Loss: 4.3330 +[2025-07-08 07:29:41] [Rank 0] Group 2 Loss: 4.3330 +[2025-07-08 07:29:42] [Rank 0] Group 3 Loss: 4.4427 +[2025-07-08 07:29:42] [Rank 0] Group 3 Loss: 4.4427 +[2025-07-08 07:29:42] [Rank 0] Group 4 Loss: 4.4421 +[2025-07-08 07:29:42] [Rank 0] Group 4 Loss: 4.4421 +[2025-07-08 07:29:42] [Rank 0] Group 5 Loss: 4.3269 +[2025-07-08 07:29:42] [Rank 0] Group 5 Loss: 4.3269 +[2025-07-08 07:29:42] [Rank 0] Group 6 Loss: 4.4497 +[2025-07-08 07:29:42] [Rank 0] Group 6 Loss: 4.4497 +[2025-07-08 07:29:42] [Rank 0] Group 7 Loss: 4.5477 +[2025-07-08 07:29:42] [Rank 0] Group 7 Loss: 4.5477 +[2025-07-08 07:29:42] [Rank 0] Group 8 Loss: 4.4649 +[2025-07-08 07:29:42] [Rank 0] Group 8 Loss: 4.4649 +[2025-07-08 07:29:42] [Rank 0] Group 9 Loss: 4.5227 +[2025-07-08 07:29:42] [Rank 0] Group 9 Loss: 4.5227 +[2025-07-08 07:29:42] [Rank 0] Group 10 Loss: 4.5046 +[2025-07-08 07:29:42] [Rank 0] Group 10 Loss: 4.5046 +[2025-07-08 07:29:42] [Rank 0] Group 11 Loss: 4.5514 +[2025-07-08 07:29:42] [Rank 0] Group 11 Loss: 4.5514 +[2025-07-08 07:29:42] [Rank 0] Group 0 FTA: 0.5475 +[2025-07-08 07:29:42] [Rank 0] Group 0 FTA: 0.5475 +[2025-07-08 07:29:42] [Rank 0] Group 1 FTA: 0.3307 +[2025-07-08 07:29:42] [Rank 0] Group 1 FTA: 0.3307 +[2025-07-08 07:29:42] [Rank 0] Group 2 FTA: 0.1719 +[2025-07-08 07:29:42] [Rank 0] Group 2 FTA: 0.1719 +[2025-07-08 07:29:42] [Rank 0] Group 3 FTA: 0.1719 +[2025-07-08 07:29:42] [Rank 0] Group 3 FTA: 0.1719 +[2025-07-08 07:29:42] [Rank 0] Group 4 FTA: 0.1094 +[2025-07-08 07:29:42] [Rank 0] Group 4 FTA: 0.1094 +[2025-07-08 07:29:42] [Rank 0] Group 5 FTA: 0.1562 +[2025-07-08 07:29:42] [Rank 0] Group 5 FTA: 0.1562 +[2025-07-08 07:29:42] [Rank 0] Group 6 FTA: 0.1745 +[2025-07-08 07:29:42] [Rank 0] Group 6 FTA: 0.1745 +[2025-07-08 07:29:42] [Rank 0] Group 7 FTA: 0.2031 +[2025-07-08 07:29:42] [Rank 0] Group 7 FTA: 0.2031 +[2025-07-08 07:29:42] [Rank 0] Group 8 FTA: 0.1875 +[2025-07-08 07:29:42] [Rank 0] Group 8 FTA: 0.1875 +[2025-07-08 07:29:42] [Rank 0] Group 9 FTA: 0.1797 +[2025-07-08 07:29:42] [Rank 0] Group 9 FTA: 0.1797 +[2025-07-08 07:29:42] [Rank 0] Group 10 FTA: 0.2031 +[2025-07-08 07:29:42] [Rank 0] Group 10 FTA: 0.2031 +[2025-07-08 07:29:42] [Rank 0] Group 11 FTA: 0.1885 +[2025-07-08 07:29:42] [Rank 0] Group 11 FTA: 0.1885 +[2025-07-08 07:29:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_loss_curves.png +[2025-07-08 07:29:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_loss_curves.png +[2025-07-08 07:29:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_acc_curves.png +[2025-07-08 07:29:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_acc_curves.png +[2025-07-08 07:29:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_loss_curve.png +[2025-07-08 07:29:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_loss_curve.png +[2025-07-08 07:29:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_acc_curve.png +[2025-07-08 07:29:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_acc_curve.png +[2025-07-08 07:29:43] [Rank 0] step:2001/10000 train_time:156140ms step_avg:78.03ms +[2025-07-08 07:29:43] [Rank 0] step:2001/10000 train_time:156140ms step_avg:78.03ms +[2025-07-08 07:29:45] [Rank 0] step:2021/10000 train_time:157613ms step_avg:77.99ms +[2025-07-08 07:29:45] [Rank 0] step:2021/10000 train_time:157613ms step_avg:77.99ms +[2025-07-08 07:29:46] [Rank 0] step:2041/10000 train_time:159078ms step_avg:77.94ms +[2025-07-08 07:29:46] [Rank 0] step:2041/10000 train_time:159078ms step_avg:77.94ms +[2025-07-08 07:29:48] [Rank 0] step:2061/10000 train_time:160544ms step_avg:77.90ms +[2025-07-08 07:29:48] [Rank 0] step:2061/10000 train_time:160544ms step_avg:77.90ms +[2025-07-08 07:29:50] [Rank 0] step:2081/10000 train_time:162680ms step_avg:78.17ms +[2025-07-08 07:29:50] [Rank 0] step:2081/10000 train_time:162680ms step_avg:78.17ms +[2025-07-08 07:29:51] [Rank 0] step:2101/10000 train_time:164146ms step_avg:78.13ms +[2025-07-08 07:29:51] [Rank 0] step:2101/10000 train_time:164146ms step_avg:78.13ms +[2025-07-08 07:29:53] [Rank 0] step:2121/10000 train_time:165614ms step_avg:78.08ms +[2025-07-08 07:29:53] [Rank 0] step:2121/10000 train_time:165614ms step_avg:78.08ms +[2025-07-08 07:29:54] [Rank 0] step:2141/10000 train_time:167084ms step_avg:78.04ms +[2025-07-08 07:29:54] [Rank 0] step:2141/10000 train_time:167084ms step_avg:78.04ms +[2025-07-08 07:29:56] [Rank 0] step:2161/10000 train_time:168550ms step_avg:78.00ms +[2025-07-08 07:29:56] [Rank 0] step:2161/10000 train_time:168550ms step_avg:78.00ms +[2025-07-08 07:29:58] [Rank 0] step:2181/10000 train_time:170680ms step_avg:78.26ms +[2025-07-08 07:29:58] [Rank 0] step:2181/10000 train_time:170680ms step_avg:78.26ms +[2025-07-08 07:29:59] [Rank 0] step:2201/10000 train_time:172148ms step_avg:78.21ms +[2025-07-08 07:29:59] [Rank 0] step:2201/10000 train_time:172148ms step_avg:78.21ms +[2025-07-08 07:30:01] [Rank 0] step:2221/10000 train_time:173618ms step_avg:78.17ms +[2025-07-08 07:30:01] [Rank 0] step:2221/10000 train_time:173618ms step_avg:78.17ms +[2025-07-08 07:30:02] [Rank 0] step:2241/10000 train_time:175107ms step_avg:78.14ms +[2025-07-08 07:30:02] [Rank 0] step:2241/10000 train_time:175107ms step_avg:78.14ms +[2025-07-08 07:30:04] [Rank 0] step:2261/10000 train_time:176837ms step_avg:78.21ms +[2025-07-08 07:30:04] [Rank 0] step:2261/10000 train_time:176837ms step_avg:78.21ms +[2025-07-08 07:30:05] [Rank 0] step:2281/10000 train_time:178329ms step_avg:78.18ms +[2025-07-08 07:30:05] [Rank 0] step:2281/10000 train_time:178329ms step_avg:78.18ms +[2025-07-08 07:30:07] [Rank 0] step:2301/10000 train_time:179823ms step_avg:78.15ms +[2025-07-08 07:30:07] [Rank 0] step:2301/10000 train_time:179823ms step_avg:78.15ms +[2025-07-08 07:30:08] [Rank 0] step:2321/10000 train_time:181318ms step_avg:78.12ms +[2025-07-08 07:30:08] [Rank 0] step:2321/10000 train_time:181318ms step_avg:78.12ms +[2025-07-08 07:30:10] [Rank 0] step:2341/10000 train_time:182814ms step_avg:78.09ms +[2025-07-08 07:30:10] [Rank 0] step:2341/10000 train_time:182814ms step_avg:78.09ms +[2025-07-08 07:30:11] [Rank 0] step:2361/10000 train_time:184342ms step_avg:78.08ms +[2025-07-08 07:30:11] [Rank 0] step:2361/10000 train_time:184342ms step_avg:78.08ms +[2025-07-08 07:30:13] [Rank 0] step:2381/10000 train_time:185843ms step_avg:78.05ms +[2025-07-08 07:30:13] [Rank 0] step:2381/10000 train_time:185843ms step_avg:78.05ms +[2025-07-08 07:30:14] [Rank 0] step:2401/10000 train_time:187338ms step_avg:78.03ms +[2025-07-08 07:30:14] [Rank 0] step:2401/10000 train_time:187338ms step_avg:78.03ms +[2025-07-08 07:30:16] [Rank 0] step:2421/10000 train_time:188837ms step_avg:78.00ms +[2025-07-08 07:30:16] [Rank 0] step:2421/10000 train_time:188837ms step_avg:78.00ms +[2025-07-08 07:30:18] [Rank 0] step:2441/10000 train_time:190983ms step_avg:78.24ms +[2025-07-08 07:30:18] [Rank 0] step:2441/10000 train_time:190983ms step_avg:78.24ms +[2025-07-08 07:30:19] [Rank 0] step:2461/10000 train_time:192478ms step_avg:78.21ms +[2025-07-08 07:30:19] [Rank 0] step:2461/10000 train_time:192478ms step_avg:78.21ms +[2025-07-08 07:30:21] [Rank 0] step:2481/10000 train_time:193975ms step_avg:78.18ms +[2025-07-08 07:30:21] [Rank 0] step:2481/10000 train_time:193975ms step_avg:78.18ms +[2025-07-08 07:30:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:30:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:30:23] [Rank 0] PRINT: step:2500/10000 train_loss:1.3718 val_loss:1.3132 train_time:195474ms step_avg:78.19ms +[2025-07-08 07:30:23] [Rank 0] PRINT: step:2500/10000 train_loss:1.3718 val_loss:1.3132 train_time:195474ms step_avg:78.19ms +[2025-07-08 07:30:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:30:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:30:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:30:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:30:24] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:30:24] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:35:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:35:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:35:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:35:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:35:47] [Rank 0] Total Loss: 4.6518 +[2025-07-08 07:35:47] [Rank 0] Total Loss: 4.6518 +[2025-07-08 07:35:47] [Rank 0] Total FTA: 0.2821 +[2025-07-08 07:35:47] [Rank 0] Total FTA: 0.2821 +[2025-07-08 07:35:47] [Rank 0] Group 0 Loss: 4.8620 +[2025-07-08 07:35:47] [Rank 0] Group 0 Loss: 4.8620 +[2025-07-08 07:35:47] [Rank 0] Group 1 Loss: 4.5959 +[2025-07-08 07:35:47] [Rank 0] Group 1 Loss: 4.5959 +[2025-07-08 07:35:47] [Rank 0] Group 2 Loss: 4.4550 +[2025-07-08 07:35:47] [Rank 0] Group 2 Loss: 4.4550 +[2025-07-08 07:35:47] [Rank 0] Group 3 Loss: 4.8210 +[2025-07-08 07:35:47] [Rank 0] Group 3 Loss: 4.8210 +[2025-07-08 07:35:47] [Rank 0] Group 4 Loss: 4.5367 +[2025-07-08 07:35:47] [Rank 0] Group 4 Loss: 4.5367 +[2025-07-08 07:35:47] [Rank 0] Group 5 Loss: 4.5497 +[2025-07-08 07:35:47] [Rank 0] Group 5 Loss: 4.5497 +[2025-07-08 07:35:47] [Rank 0] Group 6 Loss: 4.5090 +[2025-07-08 07:35:47] [Rank 0] Group 6 Loss: 4.5090 +[2025-07-08 07:35:47] [Rank 0] Group 7 Loss: 4.6570 +[2025-07-08 07:35:47] [Rank 0] Group 7 Loss: 4.6570 +[2025-07-08 07:35:47] [Rank 0] Group 8 Loss: 4.7085 +[2025-07-08 07:35:47] [Rank 0] Group 8 Loss: 4.7085 +[2025-07-08 07:35:47] [Rank 0] Group 9 Loss: 4.6182 +[2025-07-08 07:35:47] [Rank 0] Group 9 Loss: 4.6182 +[2025-07-08 07:35:47] [Rank 0] Group 10 Loss: 4.6935 +[2025-07-08 07:35:47] [Rank 0] Group 10 Loss: 4.6935 +[2025-07-08 07:35:47] [Rank 0] Group 11 Loss: 4.6244 +[2025-07-08 07:35:47] [Rank 0] Group 11 Loss: 4.6244 +[2025-07-08 07:35:47] [Rank 0] Group 0 FTA: 0.3264 +[2025-07-08 07:35:47] [Rank 0] Group 0 FTA: 0.3264 +[2025-07-08 07:35:47] [Rank 0] Group 1 FTA: 0.3646 +[2025-07-08 07:35:47] [Rank 0] Group 1 FTA: 0.3646 +[2025-07-08 07:35:47] [Rank 0] Group 2 FTA: 0.2552 +[2025-07-08 07:35:47] [Rank 0] Group 2 FTA: 0.2552 +[2025-07-08 07:35:47] [Rank 0] Group 3 FTA: 0.2214 +[2025-07-08 07:35:47] [Rank 0] Group 3 FTA: 0.2214 +[2025-07-08 07:35:47] [Rank 0] Group 4 FTA: 0.1641 +[2025-07-08 07:35:47] [Rank 0] Group 4 FTA: 0.1641 +[2025-07-08 07:35:47] [Rank 0] Group 5 FTA: 0.2708 +[2025-07-08 07:35:47] [Rank 0] Group 5 FTA: 0.2708 +[2025-07-08 07:35:47] [Rank 0] Group 6 FTA: 0.2865 +[2025-07-08 07:35:47] [Rank 0] Group 6 FTA: 0.2865 +[2025-07-08 07:35:47] [Rank 0] Group 7 FTA: 0.2578 +[2025-07-08 07:35:47] [Rank 0] Group 7 FTA: 0.2578 +[2025-07-08 07:35:47] [Rank 0] Group 8 FTA: 0.2865 +[2025-07-08 07:35:47] [Rank 0] Group 8 FTA: 0.2865 +[2025-07-08 07:35:47] [Rank 0] Group 9 FTA: 0.2969 +[2025-07-08 07:35:47] [Rank 0] Group 9 FTA: 0.2969 +[2025-07-08 07:35:47] [Rank 0] Group 10 FTA: 0.2871 +[2025-07-08 07:35:47] [Rank 0] Group 10 FTA: 0.2871 +[2025-07-08 07:35:47] [Rank 0] Group 11 FTA: 0.2988 +[2025-07-08 07:35:47] [Rank 0] Group 11 FTA: 0.2988 +[2025-07-08 07:35:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_loss_curves.png +[2025-07-08 07:35:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_loss_curves.png +[2025-07-08 07:35:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_acc_curves.png +[2025-07-08 07:35:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/per_class_acc_curves.png +[2025-07-08 07:35:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_loss_curve.png +[2025-07-08 07:35:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_loss_curve.png +[2025-07-08 07:35:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_acc_curve.png +[2025-07-08 07:35:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_46/total_acc_curve.png +[2025-07-08 07:35:49] [Rank 0] step:2501/10000 train_time:195493ms step_avg:78.17ms +[2025-07-08 07:35:49] [Rank 0] step:2501/10000 train_time:195493ms step_avg:78.17ms +[2025-07-08 07:35:51] [Rank 0] step:2521/10000 train_time:197675ms step_avg:78.41ms +[2025-07-08 07:35:51] [Rank 0] step:2521/10000 train_time:197675ms step_avg:78.41ms +[2025-07-08 07:35:53] [Rank 0] step:2541/10000 train_time:199146ms step_avg:78.37ms +[2025-07-08 07:35:53] [Rank 0] step:2541/10000 train_time:199146ms step_avg:78.37ms +[2025-07-08 07:35:54] [Rank 0] step:2561/10000 train_time:200788ms step_avg:78.40ms +[2025-07-08 07:35:54] [Rank 0] step:2561/10000 train_time:200788ms step_avg:78.40ms +[2025-07-08 07:35:56] [Rank 0] step:2581/10000 train_time:202284ms step_avg:78.37ms +[2025-07-08 07:35:56] [Rank 0] step:2581/10000 train_time:202284ms step_avg:78.37ms +[2025-07-08 07:35:57] [Rank 0] step:2601/10000 train_time:203774ms step_avg:78.34ms +[2025-07-08 07:35:57] [Rank 0] step:2601/10000 train_time:203774ms step_avg:78.34ms +[2025-07-08 07:35:59] [Rank 0] step:2621/10000 train_time:205507ms step_avg:78.41ms +[2025-07-08 07:35:59] [Rank 0] step:2621/10000 train_time:205507ms step_avg:78.41ms +[2025-07-08 07:36:00] [Rank 0] step:2641/10000 train_time:206998ms step_avg:78.38ms +[2025-07-08 07:36:00] [Rank 0] step:2641/10000 train_time:206998ms step_avg:78.38ms +[2025-07-08 07:36:02] [Rank 0] step:2661/10000 train_time:208490ms step_avg:78.35ms +[2025-07-08 07:36:02] [Rank 0] step:2661/10000 train_time:208490ms step_avg:78.35ms +[2025-07-08 07:36:03] [Rank 0] step:2681/10000 train_time:209983ms step_avg:78.32ms +[2025-07-08 07:36:03] [Rank 0] step:2681/10000 train_time:209983ms step_avg:78.32ms +[2025-07-08 07:36:06] [Rank 0] step:2701/10000 train_time:212139ms step_avg:78.54ms +[2025-07-08 07:36:06] [Rank 0] step:2701/10000 train_time:212139ms step_avg:78.54ms +[2025-07-08 07:36:07] [Rank 0] step:2721/10000 train_time:213613ms step_avg:78.51ms +[2025-07-08 07:36:07] [Rank 0] step:2721/10000 train_time:213613ms step_avg:78.51ms +[2025-07-08 07:36:08] [Rank 0] step:2741/10000 train_time:215108ms step_avg:78.48ms +[2025-07-08 07:36:08] [Rank 0] step:2741/10000 train_time:215108ms step_avg:78.48ms +[2025-07-08 07:36:10] [Rank 0] step:2761/10000 train_time:216599ms step_avg:78.45ms +[2025-07-08 07:36:10] [Rank 0] step:2761/10000 train_time:216599ms step_avg:78.45ms +[2025-07-08 07:36:11] [Rank 0] step:2781/10000 train_time:218093ms step_avg:78.42ms +[2025-07-08 07:36:11] [Rank 0] step:2781/10000 train_time:218093ms step_avg:78.42ms +[2025-07-08 07:36:13] [Rank 0] step:2801/10000 train_time:219827ms step_avg:78.48ms +[2025-07-08 07:36:13] [Rank 0] step:2801/10000 train_time:219827ms step_avg:78.48ms +[2025-07-08 07:36:15] [Rank 0] step:2821/10000 train_time:221320ms step_avg:78.45ms +[2025-07-08 07:36:15] [Rank 0] step:2821/10000 train_time:221320ms step_avg:78.45ms +[2025-07-08 07:36:16] [Rank 0] step:2841/10000 train_time:222814ms step_avg:78.43ms +[2025-07-08 07:36:16] [Rank 0] step:2841/10000 train_time:222814ms step_avg:78.43ms +[2025-07-08 07:36:18] [Rank 0] step:2861/10000 train_time:224314ms step_avg:78.40ms +[2025-07-08 07:36:18] [Rank 0] step:2861/10000 train_time:224314ms step_avg:78.40ms +[2025-07-08 07:36:20] [Rank 0] step:2881/10000 train_time:226477ms step_avg:78.61ms +[2025-07-08 07:36:20] [Rank 0] step:2881/10000 train_time:226477ms step_avg:78.61ms +[2025-07-08 07:36:21] [Rank 0] step:2901/10000 train_time:227951ms step_avg:78.58ms +[2025-07-08 07:36:21] [Rank 0] step:2901/10000 train_time:227951ms step_avg:78.58ms +[2025-07-08 07:36:23] [Rank 0] step:2921/10000 train_time:229446ms step_avg:78.55ms +[2025-07-08 07:36:23] [Rank 0] step:2921/10000 train_time:229446ms step_avg:78.55ms +[2025-07-08 07:36:24] [Rank 0] step:2941/10000 train_time:230943ms step_avg:78.53ms +[2025-07-08 07:36:24] [Rank 0] step:2941/10000 train_time:230943ms step_avg:78.53ms +[2025-07-08 07:36:26] [Rank 0] step:2961/10000 train_time:232439ms step_avg:78.50ms +[2025-07-08 07:36:26] [Rank 0] step:2961/10000 train_time:232439ms step_avg:78.50ms +[2025-07-08 07:36:28] [Rank 0] step:2981/10000 train_time:234593ms step_avg:78.70ms +[2025-07-08 07:36:28] [Rank 0] step:2981/10000 train_time:234593ms step_avg:78.70ms +[2025-07-08 07:36:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:36:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:36:30] [Rank 0] PRINT: step:3000/10000 train_loss:1.2720 val_loss:1.2254 train_time:236090ms step_avg:78.70ms +[2025-07-08 07:36:30] [Rank 0] PRINT: step:3000/10000 train_loss:1.2720 val_loss:1.2254 train_time:236090ms step_avg:78.70ms +[2025-07-08 07:36:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:36:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:36:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:36:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:36:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:36:31] [Rank 0] Evaluation set size after sampling: 5633 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/config.json b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..275f054d58c46b619f094c52c785cbf1d36c61b5 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "35de3d1d-34b5-4264-8691-ac049a4ae838", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..e7e5c767c49fb8420017d01d2060c95c6fb43f28 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2dff447499ed0a60ab03d6a8bf158171078cc91e8c2829930e77d73929f5c7e9 +size 442273 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..a043c2e324f152c08f77743bc10e9545ccf91941 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b3942624d77a3f9e6efe1af266163c47d4204fadc4ec4ee9d2b7ff728925bcb +size 404077 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..8f8ae7c57b886d6299caf0432b5d985896611c20 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79599e6cbb86b624deb3107b53dd37a1bc2ce0d553e56aba769085478c7d839c +size 111891 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..c2679ef74971f02a6705d9d5ef583bd1761cbab5 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3987c119486a96dc666c08c07c11977df8aa7db8d23e9972ea06e5e5892ee9a +size 126419 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/training_log_35de3d1d-34b5-4264-8691-ac049a4ae838.txt b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/training_log_35de3d1d-34b5-4264-8691-ac049a4ae838.txt new file mode 100644 index 0000000000000000000000000000000000000000..0b07ddcec1bf26e8818e3786823f4832b7b49969 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/training_log_35de3d1d-34b5-4264-8691-ac049a4ae838.txt @@ -0,0 +1,5132 @@ +[2025-07-07 00:00:11] [Rank 0] PRINT: --- Script Start: Mon Jul 7 00:00:11 2025 --- +[2025-07-07 00:00:11] [Rank 0] PRINT: --- Script Start: Mon Jul 7 00:00:11 2025 --- +[2025-07-07 00:00:11] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-07 00:00:11] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-07 00:00:11] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 00:00:11] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 00:00:11] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-07 00:00:11] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-07 00:00:11] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48 +[2025-07-07 00:00:11] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48 +[2025-07-07 00:00:11] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 00:00:11] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 00:00:12] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 00:00:12] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 00:00:12] [Rank 0] PRINT: Constructing model... +[2025-07-07 00:00:12] [Rank 0] PRINT: Constructing model... +[2025-07-07 00:00:13] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 00:00:13] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 00:00:13] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 00:00:13] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 00:00:13] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 00:00:13] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 00:00:14] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 00:00:14] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 00:00:14] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 00:00:14] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 00:00:14] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 00:00:14] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 00:00:14] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 00:00:14] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 00:00:14] [Rank 0] PRINT: Model returns: +[2025-07-07 00:00:14] [Rank 0] PRINT: Model returns: +[2025-07-07 00:00:14] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 00:00:14] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 00:00:14] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-07 00:00:14] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-07 00:00:14] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-07 00:00:14] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-07 00:00:14] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-07 00:00:14] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-07 00:00:14] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-07 00:00:14] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-07 00:00:14] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 00:00:14] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 00:00:14] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 00:00:14] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 00:00:14] [Rank 0] PRINT: Starting warmup... +[2025-07-07 00:00:14] [Rank 0] PRINT: Starting warmup... +[2025-07-07 00:01:27] [Rank 0] PRINT: Warmup complete. +[2025-07-07 00:01:27] [Rank 0] PRINT: Warmup complete. +[2025-07-07 00:01:27] [Rank 0] PRINT: Starting training... +[2025-07-07 00:01:27] [Rank 0] PRINT: Starting training... +[2025-07-07 00:01:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:01:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:01:34] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 00:01:34] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 00:01:36] [Rank 0] step:21/10000 train_time:1749ms step_avg:83.27ms +[2025-07-07 00:01:36] [Rank 0] step:21/10000 train_time:1749ms step_avg:83.27ms +[2025-07-07 00:01:38] [Rank 0] step:41/10000 train_time:3203ms step_avg:78.13ms +[2025-07-07 00:01:38] [Rank 0] step:41/10000 train_time:3203ms step_avg:78.13ms +[2025-07-07 00:01:39] [Rank 0] step:61/10000 train_time:4659ms step_avg:76.37ms +[2025-07-07 00:01:39] [Rank 0] step:61/10000 train_time:4659ms step_avg:76.37ms +[2025-07-07 00:01:41] [Rank 0] step:81/10000 train_time:6112ms step_avg:75.45ms +[2025-07-07 00:01:41] [Rank 0] step:81/10000 train_time:6112ms step_avg:75.45ms +[2025-07-07 00:01:43] [Rank 0] step:101/10000 train_time:8236ms step_avg:81.55ms +[2025-07-07 00:01:43] [Rank 0] step:101/10000 train_time:8236ms step_avg:81.55ms +[2025-07-07 00:01:44] [Rank 0] step:121/10000 train_time:9692ms step_avg:80.10ms +[2025-07-07 00:01:44] [Rank 0] step:121/10000 train_time:9692ms step_avg:80.10ms +[2025-07-07 00:01:46] [Rank 0] step:141/10000 train_time:11150ms step_avg:79.08ms +[2025-07-07 00:01:46] [Rank 0] step:141/10000 train_time:11150ms step_avg:79.08ms +[2025-07-07 00:01:47] [Rank 0] step:161/10000 train_time:12608ms step_avg:78.31ms +[2025-07-07 00:01:47] [Rank 0] step:161/10000 train_time:12608ms step_avg:78.31ms +[2025-07-07 00:01:49] [Rank 0] step:181/10000 train_time:14066ms step_avg:77.71ms +[2025-07-07 00:01:49] [Rank 0] step:181/10000 train_time:14066ms step_avg:77.71ms +[2025-07-07 00:01:50] [Rank 0] step:201/10000 train_time:15767ms step_avg:78.44ms +[2025-07-07 00:01:50] [Rank 0] step:201/10000 train_time:15767ms step_avg:78.44ms +[2025-07-07 00:01:52] [Rank 0] step:221/10000 train_time:17228ms step_avg:77.95ms +[2025-07-07 00:01:52] [Rank 0] step:221/10000 train_time:17228ms step_avg:77.95ms +[2025-07-07 00:01:53] [Rank 0] step:241/10000 train_time:18689ms step_avg:77.55ms +[2025-07-07 00:01:53] [Rank 0] step:241/10000 train_time:18689ms step_avg:77.55ms +[2025-07-07 00:01:55] [Rank 0] step:261/10000 train_time:20152ms step_avg:77.21ms +[2025-07-07 00:01:55] [Rank 0] step:261/10000 train_time:20152ms step_avg:77.21ms +[2025-07-07 00:01:56] [Rank 0] step:281/10000 train_time:21850ms step_avg:77.76ms +[2025-07-07 00:01:56] [Rank 0] step:281/10000 train_time:21850ms step_avg:77.76ms +[2025-07-07 00:01:58] [Rank 0] step:301/10000 train_time:23318ms step_avg:77.47ms +[2025-07-07 00:01:58] [Rank 0] step:301/10000 train_time:23318ms step_avg:77.47ms +[2025-07-07 00:01:59] [Rank 0] step:321/10000 train_time:24785ms step_avg:77.21ms +[2025-07-07 00:01:59] [Rank 0] step:321/10000 train_time:24785ms step_avg:77.21ms +[2025-07-07 00:02:01] [Rank 0] step:341/10000 train_time:26254ms step_avg:76.99ms +[2025-07-07 00:02:01] [Rank 0] step:341/10000 train_time:26254ms step_avg:76.99ms +[2025-07-07 00:02:03] [Rank 0] step:361/10000 train_time:27774ms step_avg:76.94ms +[2025-07-07 00:02:03] [Rank 0] step:361/10000 train_time:27774ms step_avg:76.94ms +[2025-07-07 00:02:04] [Rank 0] step:381/10000 train_time:29841ms step_avg:78.32ms +[2025-07-07 00:02:04] [Rank 0] step:381/10000 train_time:29841ms step_avg:78.32ms +[2025-07-07 00:02:06] [Rank 0] step:401/10000 train_time:31308ms step_avg:78.07ms +[2025-07-07 00:02:06] [Rank 0] step:401/10000 train_time:31308ms step_avg:78.07ms +[2025-07-07 00:02:07] [Rank 0] step:421/10000 train_time:32775ms step_avg:77.85ms +[2025-07-07 00:02:07] [Rank 0] step:421/10000 train_time:32775ms step_avg:77.85ms +[2025-07-07 00:02:09] [Rank 0] step:441/10000 train_time:34244ms step_avg:77.65ms +[2025-07-07 00:02:09] [Rank 0] step:441/10000 train_time:34244ms step_avg:77.65ms +[2025-07-07 00:02:11] [Rank 0] step:461/10000 train_time:36362ms step_avg:78.88ms +[2025-07-07 00:02:11] [Rank 0] step:461/10000 train_time:36362ms step_avg:78.88ms +[2025-07-07 00:02:12] [Rank 0] step:481/10000 train_time:37831ms step_avg:78.65ms +[2025-07-07 00:02:12] [Rank 0] step:481/10000 train_time:37831ms step_avg:78.65ms +[2025-07-07 00:02:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:02:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:02:15] [Rank 0] PRINT: step:500/10000 train_loss:6.8669 val_loss:4.3929 train_time:39300ms step_avg:78.60ms +[2025-07-07 00:02:15] [Rank 0] PRINT: step:500/10000 train_loss:6.8669 val_loss:4.3929 train_time:39300ms step_avg:78.60ms +[2025-07-07 00:02:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:02:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:02:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:02:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:02:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:02:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:07:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:07:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:07:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:07:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:07:36] [Rank 0] Total Loss: 5.6270 +[2025-07-07 00:07:36] [Rank 0] Total Loss: 5.6270 +[2025-07-07 00:07:36] [Rank 0] Total FTA: 0.0581 +[2025-07-07 00:07:36] [Rank 0] Total FTA: 0.0581 +[2025-07-07 00:07:36] [Rank 0] Group 0 Loss: 5.6217 +[2025-07-07 00:07:36] [Rank 0] Group 0 Loss: 5.6217 +[2025-07-07 00:07:36] [Rank 0] Group 1 Loss: 5.6823 +[2025-07-07 00:07:36] [Rank 0] Group 1 Loss: 5.6823 +[2025-07-07 00:07:36] [Rank 0] Group 2 Loss: 5.6588 +[2025-07-07 00:07:36] [Rank 0] Group 2 Loss: 5.6588 +[2025-07-07 00:07:36] [Rank 0] Group 3 Loss: 5.5687 +[2025-07-07 00:07:36] [Rank 0] Group 3 Loss: 5.5687 +[2025-07-07 00:07:36] [Rank 0] Group 4 Loss: 5.6314 +[2025-07-07 00:07:36] [Rank 0] Group 4 Loss: 5.6314 +[2025-07-07 00:07:36] [Rank 0] Group 5 Loss: 5.6181 +[2025-07-07 00:07:36] [Rank 0] Group 5 Loss: 5.6181 +[2025-07-07 00:07:36] [Rank 0] Group 6 Loss: 5.6292 +[2025-07-07 00:07:36] [Rank 0] Group 6 Loss: 5.6292 +[2025-07-07 00:07:36] [Rank 0] Group 7 Loss: 5.6232 +[2025-07-07 00:07:36] [Rank 0] Group 7 Loss: 5.6232 +[2025-07-07 00:07:36] [Rank 0] Group 8 Loss: 5.6110 +[2025-07-07 00:07:36] [Rank 0] Group 8 Loss: 5.6110 +[2025-07-07 00:07:36] [Rank 0] Group 9 Loss: 5.6616 +[2025-07-07 00:07:36] [Rank 0] Group 9 Loss: 5.6616 +[2025-07-07 00:07:36] [Rank 0] Group 10 Loss: 5.6120 +[2025-07-07 00:07:36] [Rank 0] Group 10 Loss: 5.6120 +[2025-07-07 00:07:36] [Rank 0] Group 11 Loss: 5.6275 +[2025-07-07 00:07:36] [Rank 0] Group 11 Loss: 5.6275 +[2025-07-07 00:07:36] [Rank 0] Group 0 FTA: 0.1404 +[2025-07-07 00:07:36] [Rank 0] Group 0 FTA: 0.1404 +[2025-07-07 00:07:36] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 00:07:36] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 00:07:36] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 00:07:36] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 00:07:36] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-07 00:07:36] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-07 00:07:36] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 00:07:36] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 00:07:36] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-07 00:07:36] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-07 00:07:36] [Rank 0] Group 6 FTA: 0.0547 +[2025-07-07 00:07:36] [Rank 0] Group 6 FTA: 0.0547 +[2025-07-07 00:07:36] [Rank 0] Group 7 FTA: 0.0599 +[2025-07-07 00:07:36] [Rank 0] Group 7 FTA: 0.0599 +[2025-07-07 00:07:36] [Rank 0] Group 8 FTA: 0.0833 +[2025-07-07 00:07:36] [Rank 0] Group 8 FTA: 0.0833 +[2025-07-07 00:07:36] [Rank 0] Group 9 FTA: 0.0664 +[2025-07-07 00:07:36] [Rank 0] Group 9 FTA: 0.0664 +[2025-07-07 00:07:36] [Rank 0] Group 10 FTA: 0.0547 +[2025-07-07 00:07:36] [Rank 0] Group 10 FTA: 0.0547 +[2025-07-07 00:07:36] [Rank 0] Group 11 FTA: 0.0537 +[2025-07-07 00:07:36] [Rank 0] Group 11 FTA: 0.0537 +[2025-07-07 00:07:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 00:07:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 00:07:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 00:07:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 00:07:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 00:07:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 00:07:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 00:07:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 00:07:37] [Rank 0] step:501/10000 train_time:39321ms step_avg:78.49ms +[2025-07-07 00:07:37] [Rank 0] step:501/10000 train_time:39321ms step_avg:78.49ms +[2025-07-07 00:07:39] [Rank 0] step:521/10000 train_time:40789ms step_avg:78.29ms +[2025-07-07 00:07:39] [Rank 0] step:521/10000 train_time:40789ms step_avg:78.29ms +[2025-07-07 00:07:41] [Rank 0] step:541/10000 train_time:43138ms step_avg:79.74ms +[2025-07-07 00:07:41] [Rank 0] step:541/10000 train_time:43138ms step_avg:79.74ms +[2025-07-07 00:07:43] [Rank 0] step:561/10000 train_time:44742ms step_avg:79.75ms +[2025-07-07 00:07:43] [Rank 0] step:561/10000 train_time:44742ms step_avg:79.75ms +[2025-07-07 00:07:44] [Rank 0] step:581/10000 train_time:46203ms step_avg:79.52ms +[2025-07-07 00:07:44] [Rank 0] step:581/10000 train_time:46203ms step_avg:79.52ms +[2025-07-07 00:07:46] [Rank 0] step:601/10000 train_time:47666ms step_avg:79.31ms +[2025-07-07 00:07:46] [Rank 0] step:601/10000 train_time:47666ms step_avg:79.31ms +[2025-07-07 00:07:47] [Rank 0] step:621/10000 train_time:49130ms step_avg:79.11ms +[2025-07-07 00:07:47] [Rank 0] step:621/10000 train_time:49130ms step_avg:79.11ms +[2025-07-07 00:07:49] [Rank 0] step:641/10000 train_time:51237ms step_avg:79.93ms +[2025-07-07 00:07:49] [Rank 0] step:641/10000 train_time:51237ms step_avg:79.93ms +[2025-07-07 00:07:51] [Rank 0] step:661/10000 train_time:52701ms step_avg:79.73ms +[2025-07-07 00:07:51] [Rank 0] step:661/10000 train_time:52701ms step_avg:79.73ms +[2025-07-07 00:07:52] [Rank 0] step:681/10000 train_time:54165ms step_avg:79.54ms +[2025-07-07 00:07:52] [Rank 0] step:681/10000 train_time:54165ms step_avg:79.54ms +[2025-07-07 00:07:54] [Rank 0] step:701/10000 train_time:55631ms step_avg:79.36ms +[2025-07-07 00:07:54] [Rank 0] step:701/10000 train_time:55631ms step_avg:79.36ms +[2025-07-07 00:07:56] [Rank 0] step:721/10000 train_time:57350ms step_avg:79.54ms +[2025-07-07 00:07:56] [Rank 0] step:721/10000 train_time:57350ms step_avg:79.54ms +[2025-07-07 00:07:57] [Rank 0] step:741/10000 train_time:59205ms step_avg:79.90ms +[2025-07-07 00:07:57] [Rank 0] step:741/10000 train_time:59205ms step_avg:79.90ms +[2025-07-07 00:07:59] [Rank 0] step:761/10000 train_time:60681ms step_avg:79.74ms +[2025-07-07 00:07:59] [Rank 0] step:761/10000 train_time:60681ms step_avg:79.74ms +[2025-07-07 00:08:00] [Rank 0] step:781/10000 train_time:62164ms step_avg:79.59ms +[2025-07-07 00:08:00] [Rank 0] step:781/10000 train_time:62164ms step_avg:79.59ms +[2025-07-07 00:08:02] [Rank 0] step:801/10000 train_time:63635ms step_avg:79.44ms +[2025-07-07 00:08:02] [Rank 0] step:801/10000 train_time:63635ms step_avg:79.44ms +[2025-07-07 00:08:04] [Rank 0] step:821/10000 train_time:65767ms step_avg:80.11ms +[2025-07-07 00:08:04] [Rank 0] step:821/10000 train_time:65767ms step_avg:80.11ms +[2025-07-07 00:08:05] [Rank 0] step:841/10000 train_time:67244ms step_avg:79.96ms +[2025-07-07 00:08:05] [Rank 0] step:841/10000 train_time:67244ms step_avg:79.96ms +[2025-07-07 00:08:07] [Rank 0] step:861/10000 train_time:68723ms step_avg:79.82ms +[2025-07-07 00:08:07] [Rank 0] step:861/10000 train_time:68723ms step_avg:79.82ms +[2025-07-07 00:08:08] [Rank 0] step:881/10000 train_time:70199ms step_avg:79.68ms +[2025-07-07 00:08:08] [Rank 0] step:881/10000 train_time:70199ms step_avg:79.68ms +[2025-07-07 00:08:10] [Rank 0] step:901/10000 train_time:71733ms step_avg:79.62ms +[2025-07-07 00:08:10] [Rank 0] step:901/10000 train_time:71733ms step_avg:79.62ms +[2025-07-07 00:08:12] [Rank 0] step:921/10000 train_time:73808ms step_avg:80.14ms +[2025-07-07 00:08:12] [Rank 0] step:921/10000 train_time:73808ms step_avg:80.14ms +[2025-07-07 00:08:13] [Rank 0] step:941/10000 train_time:75287ms step_avg:80.01ms +[2025-07-07 00:08:13] [Rank 0] step:941/10000 train_time:75287ms step_avg:80.01ms +[2025-07-07 00:08:15] [Rank 0] step:961/10000 train_time:76766ms step_avg:79.88ms +[2025-07-07 00:08:15] [Rank 0] step:961/10000 train_time:76766ms step_avg:79.88ms +[2025-07-07 00:08:16] [Rank 0] step:981/10000 train_time:78246ms step_avg:79.76ms +[2025-07-07 00:08:16] [Rank 0] step:981/10000 train_time:78246ms step_avg:79.76ms +[2025-07-07 00:08:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:08:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:08:19] [Rank 0] PRINT: step:1000/10000 train_loss:2.9590 val_loss:2.0161 train_time:79963ms step_avg:79.96ms +[2025-07-07 00:08:19] [Rank 0] PRINT: step:1000/10000 train_loss:2.9590 val_loss:2.0161 train_time:79963ms step_avg:79.96ms +[2025-07-07 00:08:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:08:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:08:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:08:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:08:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:08:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:13:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:13:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:13:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:13:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:13:43] [Rank 0] Total Loss: 4.1181 +[2025-07-07 00:13:43] [Rank 0] Total Loss: 4.1181 +[2025-07-07 00:13:43] [Rank 0] Total FTA: 0.0971 +[2025-07-07 00:13:43] [Rank 0] Total FTA: 0.0971 +[2025-07-07 00:13:43] [Rank 0] Group 0 Loss: 4.3549 +[2025-07-07 00:13:43] [Rank 0] Group 0 Loss: 4.3549 +[2025-07-07 00:13:43] [Rank 0] Group 1 Loss: 4.1957 +[2025-07-07 00:13:43] [Rank 0] Group 1 Loss: 4.1957 +[2025-07-07 00:13:43] [Rank 0] Group 2 Loss: 3.9818 +[2025-07-07 00:13:43] [Rank 0] Group 2 Loss: 3.9818 +[2025-07-07 00:13:43] [Rank 0] Group 3 Loss: 4.0364 +[2025-07-07 00:13:43] [Rank 0] Group 3 Loss: 4.0364 +[2025-07-07 00:13:43] [Rank 0] Group 4 Loss: 4.1222 +[2025-07-07 00:13:43] [Rank 0] Group 4 Loss: 4.1222 +[2025-07-07 00:13:43] [Rank 0] Group 5 Loss: 4.0387 +[2025-07-07 00:13:43] [Rank 0] Group 5 Loss: 4.0387 +[2025-07-07 00:13:43] [Rank 0] Group 6 Loss: 4.0290 +[2025-07-07 00:13:43] [Rank 0] Group 6 Loss: 4.0290 +[2025-07-07 00:13:43] [Rank 0] Group 7 Loss: 4.0982 +[2025-07-07 00:13:43] [Rank 0] Group 7 Loss: 4.0982 +[2025-07-07 00:13:43] [Rank 0] Group 8 Loss: 4.0488 +[2025-07-07 00:13:43] [Rank 0] Group 8 Loss: 4.0488 +[2025-07-07 00:13:43] [Rank 0] Group 9 Loss: 4.0789 +[2025-07-07 00:13:43] [Rank 0] Group 9 Loss: 4.0789 +[2025-07-07 00:13:43] [Rank 0] Group 10 Loss: 4.1113 +[2025-07-07 00:13:43] [Rank 0] Group 10 Loss: 4.1113 +[2025-07-07 00:13:43] [Rank 0] Group 11 Loss: 4.1009 +[2025-07-07 00:13:43] [Rank 0] Group 11 Loss: 4.1009 +[2025-07-07 00:13:43] [Rank 0] Group 0 FTA: 0.1534 +[2025-07-07 00:13:43] [Rank 0] Group 0 FTA: 0.1534 +[2025-07-07 00:13:43] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 00:13:43] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 00:13:43] [Rank 0] Group 2 FTA: 0.1589 +[2025-07-07 00:13:43] [Rank 0] Group 2 FTA: 0.1589 +[2025-07-07 00:13:43] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-07 00:13:43] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-07 00:13:43] [Rank 0] Group 4 FTA: 0.0495 +[2025-07-07 00:13:43] [Rank 0] Group 4 FTA: 0.0495 +[2025-07-07 00:13:43] [Rank 0] Group 5 FTA: 0.0755 +[2025-07-07 00:13:43] [Rank 0] Group 5 FTA: 0.0755 +[2025-07-07 00:13:43] [Rank 0] Group 6 FTA: 0.1172 +[2025-07-07 00:13:43] [Rank 0] Group 6 FTA: 0.1172 +[2025-07-07 00:13:43] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 00:13:43] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 00:13:43] [Rank 0] Group 8 FTA: 0.1042 +[2025-07-07 00:13:43] [Rank 0] Group 8 FTA: 0.1042 +[2025-07-07 00:13:43] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 00:13:43] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 00:13:43] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-07 00:13:43] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-07 00:13:43] [Rank 0] Group 11 FTA: 0.1055 +[2025-07-07 00:13:43] [Rank 0] Group 11 FTA: 0.1055 +[2025-07-07 00:13:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 00:13:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 00:13:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 00:13:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 00:13:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 00:13:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 00:13:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 00:13:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 00:13:45] [Rank 0] step:1001/10000 train_time:79984ms step_avg:79.90ms +[2025-07-07 00:13:45] [Rank 0] step:1001/10000 train_time:79984ms step_avg:79.90ms +[2025-07-07 00:13:46] [Rank 0] step:1021/10000 train_time:81466ms step_avg:79.79ms +[2025-07-07 00:13:46] [Rank 0] step:1021/10000 train_time:81466ms step_avg:79.79ms +[2025-07-07 00:13:48] [Rank 0] step:1041/10000 train_time:82935ms step_avg:79.67ms +[2025-07-07 00:13:48] [Rank 0] step:1041/10000 train_time:82935ms step_avg:79.67ms +[2025-07-07 00:13:49] [Rank 0] step:1061/10000 train_time:84409ms step_avg:79.56ms +[2025-07-07 00:13:49] [Rank 0] step:1061/10000 train_time:84409ms step_avg:79.56ms +[2025-07-07 00:13:51] [Rank 0] step:1081/10000 train_time:86133ms step_avg:79.68ms +[2025-07-07 00:13:51] [Rank 0] step:1081/10000 train_time:86133ms step_avg:79.68ms +[2025-07-07 00:13:53] [Rank 0] step:1101/10000 train_time:88011ms step_avg:79.94ms +[2025-07-07 00:13:53] [Rank 0] step:1101/10000 train_time:88011ms step_avg:79.94ms +[2025-07-07 00:13:54] [Rank 0] step:1121/10000 train_time:89481ms step_avg:79.82ms +[2025-07-07 00:13:54] [Rank 0] step:1121/10000 train_time:89481ms step_avg:79.82ms +[2025-07-07 00:13:56] [Rank 0] step:1141/10000 train_time:90951ms step_avg:79.71ms +[2025-07-07 00:13:56] [Rank 0] step:1141/10000 train_time:90951ms step_avg:79.71ms +[2025-07-07 00:13:57] [Rank 0] step:1161/10000 train_time:92423ms step_avg:79.61ms +[2025-07-07 00:13:57] [Rank 0] step:1161/10000 train_time:92423ms step_avg:79.61ms +[2025-07-07 00:13:59] [Rank 0] step:1181/10000 train_time:94136ms step_avg:79.71ms +[2025-07-07 00:13:59] [Rank 0] step:1181/10000 train_time:94136ms step_avg:79.71ms +[2025-07-07 00:14:01] [Rank 0] step:1201/10000 train_time:95865ms step_avg:79.82ms +[2025-07-07 00:14:01] [Rank 0] step:1201/10000 train_time:95865ms step_avg:79.82ms +[2025-07-07 00:14:02] [Rank 0] step:1221/10000 train_time:97436ms step_avg:79.80ms +[2025-07-07 00:14:02] [Rank 0] step:1221/10000 train_time:97436ms step_avg:79.80ms +[2025-07-07 00:14:04] [Rank 0] step:1241/10000 train_time:98978ms step_avg:79.76ms +[2025-07-07 00:14:04] [Rank 0] step:1241/10000 train_time:98978ms step_avg:79.76ms +[2025-07-07 00:14:05] [Rank 0] step:1261/10000 train_time:100453ms step_avg:79.66ms +[2025-07-07 00:14:05] [Rank 0] step:1261/10000 train_time:100453ms step_avg:79.66ms +[2025-07-07 00:14:07] [Rank 0] step:1281/10000 train_time:102167ms step_avg:79.76ms +[2025-07-07 00:14:07] [Rank 0] step:1281/10000 train_time:102167ms step_avg:79.76ms +[2025-07-07 00:14:08] [Rank 0] step:1301/10000 train_time:103644ms step_avg:79.66ms +[2025-07-07 00:14:08] [Rank 0] step:1301/10000 train_time:103644ms step_avg:79.66ms +[2025-07-07 00:14:10] [Rank 0] step:1321/10000 train_time:105121ms step_avg:79.58ms +[2025-07-07 00:14:10] [Rank 0] step:1321/10000 train_time:105121ms step_avg:79.58ms +[2025-07-07 00:14:11] [Rank 0] step:1341/10000 train_time:106597ms step_avg:79.49ms +[2025-07-07 00:14:11] [Rank 0] step:1341/10000 train_time:106597ms step_avg:79.49ms +[2025-07-07 00:14:13] [Rank 0] step:1361/10000 train_time:108312ms step_avg:79.58ms +[2025-07-07 00:14:13] [Rank 0] step:1361/10000 train_time:108312ms step_avg:79.58ms +[2025-07-07 00:14:15] [Rank 0] step:1381/10000 train_time:109789ms step_avg:79.50ms +[2025-07-07 00:14:15] [Rank 0] step:1381/10000 train_time:109789ms step_avg:79.50ms +[2025-07-07 00:14:16] [Rank 0] step:1401/10000 train_time:111267ms step_avg:79.42ms +[2025-07-07 00:14:16] [Rank 0] step:1401/10000 train_time:111267ms step_avg:79.42ms +[2025-07-07 00:14:18] [Rank 0] step:1421/10000 train_time:112743ms step_avg:79.34ms +[2025-07-07 00:14:18] [Rank 0] step:1421/10000 train_time:112743ms step_avg:79.34ms +[2025-07-07 00:14:20] [Rank 0] step:1441/10000 train_time:114220ms step_avg:79.26ms +[2025-07-07 00:14:20] [Rank 0] step:1441/10000 train_time:114220ms step_avg:79.26ms +[2025-07-07 00:14:21] [Rank 0] step:1461/10000 train_time:116339ms step_avg:79.63ms +[2025-07-07 00:14:21] [Rank 0] step:1461/10000 train_time:116339ms step_avg:79.63ms +[2025-07-07 00:14:23] [Rank 0] step:1481/10000 train_time:117817ms step_avg:79.55ms +[2025-07-07 00:14:23] [Rank 0] step:1481/10000 train_time:117817ms step_avg:79.55ms +[2025-07-07 00:14:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:14:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:14:25] [Rank 0] PRINT: step:1500/10000 train_loss:1.7519 val_loss:1.5999 train_time:119296ms step_avg:79.53ms +[2025-07-07 00:14:25] [Rank 0] PRINT: step:1500/10000 train_loss:1.7519 val_loss:1.5999 train_time:119296ms step_avg:79.53ms +[2025-07-07 00:14:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:14:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:14:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:14:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:14:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:14:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:19:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:19:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:19:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:19:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:19:50] [Rank 0] Total Loss: 4.1728 +[2025-07-07 00:19:50] [Rank 0] Total Loss: 4.1728 +[2025-07-07 00:19:50] [Rank 0] Total FTA: 0.1207 +[2025-07-07 00:19:50] [Rank 0] Total FTA: 0.1207 +[2025-07-07 00:19:50] [Rank 0] Group 0 Loss: 4.4793 +[2025-07-07 00:19:50] [Rank 0] Group 0 Loss: 4.4793 +[2025-07-07 00:19:50] [Rank 0] Group 1 Loss: 4.2017 +[2025-07-07 00:19:50] [Rank 0] Group 1 Loss: 4.2017 +[2025-07-07 00:19:50] [Rank 0] Group 2 Loss: 3.9623 +[2025-07-07 00:19:50] [Rank 0] Group 2 Loss: 3.9623 +[2025-07-07 00:19:50] [Rank 0] Group 3 Loss: 4.1369 +[2025-07-07 00:19:50] [Rank 0] Group 3 Loss: 4.1369 +[2025-07-07 00:19:50] [Rank 0] Group 4 Loss: 4.1210 +[2025-07-07 00:19:50] [Rank 0] Group 4 Loss: 4.1210 +[2025-07-07 00:19:50] [Rank 0] Group 5 Loss: 4.0723 +[2025-07-07 00:19:50] [Rank 0] Group 5 Loss: 4.0723 +[2025-07-07 00:19:50] [Rank 0] Group 6 Loss: 4.0720 +[2025-07-07 00:19:50] [Rank 0] Group 6 Loss: 4.0720 +[2025-07-07 00:19:50] [Rank 0] Group 7 Loss: 4.1784 +[2025-07-07 00:19:50] [Rank 0] Group 7 Loss: 4.1784 +[2025-07-07 00:19:50] [Rank 0] Group 8 Loss: 4.1699 +[2025-07-07 00:19:50] [Rank 0] Group 8 Loss: 4.1699 +[2025-07-07 00:19:50] [Rank 0] Group 9 Loss: 4.1335 +[2025-07-07 00:19:50] [Rank 0] Group 9 Loss: 4.1335 +[2025-07-07 00:19:50] [Rank 0] Group 10 Loss: 4.1398 +[2025-07-07 00:19:50] [Rank 0] Group 10 Loss: 4.1398 +[2025-07-07 00:19:50] [Rank 0] Group 11 Loss: 4.1447 +[2025-07-07 00:19:50] [Rank 0] Group 11 Loss: 4.1447 +[2025-07-07 00:19:50] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 00:19:50] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 00:19:50] [Rank 0] Group 1 FTA: 0.1432 +[2025-07-07 00:19:50] [Rank 0] Group 1 FTA: 0.1432 +[2025-07-07 00:19:50] [Rank 0] Group 2 FTA: 0.1693 +[2025-07-07 00:19:50] [Rank 0] Group 2 FTA: 0.1693 +[2025-07-07 00:19:50] [Rank 0] Group 3 FTA: 0.1250 +[2025-07-07 00:19:50] [Rank 0] Group 3 FTA: 0.1250 +[2025-07-07 00:19:50] [Rank 0] Group 4 FTA: 0.0521 +[2025-07-07 00:19:50] [Rank 0] Group 4 FTA: 0.0521 +[2025-07-07 00:19:50] [Rank 0] Group 5 FTA: 0.1198 +[2025-07-07 00:19:50] [Rank 0] Group 5 FTA: 0.1198 +[2025-07-07 00:19:50] [Rank 0] Group 6 FTA: 0.1172 +[2025-07-07 00:19:50] [Rank 0] Group 6 FTA: 0.1172 +[2025-07-07 00:19:50] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-07 00:19:50] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-07 00:19:50] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-07 00:19:50] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-07 00:19:50] [Rank 0] Group 9 FTA: 0.1328 +[2025-07-07 00:19:50] [Rank 0] Group 9 FTA: 0.1328 +[2025-07-07 00:19:50] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-07 00:19:50] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-07 00:19:50] [Rank 0] Group 11 FTA: 0.1064 +[2025-07-07 00:19:50] [Rank 0] Group 11 FTA: 0.1064 +[2025-07-07 00:19:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 00:19:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 00:19:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 00:19:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 00:19:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 00:19:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 00:19:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 00:19:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 00:19:51] [Rank 0] step:1501/10000 train_time:119318ms step_avg:79.49ms +[2025-07-07 00:19:51] [Rank 0] step:1501/10000 train_time:119318ms step_avg:79.49ms +[2025-07-07 00:19:53] [Rank 0] step:1521/10000 train_time:120795ms step_avg:79.42ms +[2025-07-07 00:19:53] [Rank 0] step:1521/10000 train_time:120795ms step_avg:79.42ms +[2025-07-07 00:19:55] [Rank 0] step:1541/10000 train_time:122912ms step_avg:79.76ms +[2025-07-07 00:19:55] [Rank 0] step:1541/10000 train_time:122912ms step_avg:79.76ms +[2025-07-07 00:19:56] [Rank 0] step:1561/10000 train_time:124380ms step_avg:79.68ms +[2025-07-07 00:19:56] [Rank 0] step:1561/10000 train_time:124380ms step_avg:79.68ms +[2025-07-07 00:19:58] [Rank 0] step:1581/10000 train_time:125851ms step_avg:79.60ms +[2025-07-07 00:19:58] [Rank 0] step:1581/10000 train_time:125851ms step_avg:79.60ms +[2025-07-07 00:19:59] [Rank 0] step:1601/10000 train_time:127324ms step_avg:79.53ms +[2025-07-07 00:19:59] [Rank 0] step:1601/10000 train_time:127324ms step_avg:79.53ms +[2025-07-07 00:20:02] [Rank 0] step:1621/10000 train_time:128797ms step_avg:79.46ms +[2025-07-07 00:20:02] [Rank 0] step:1621/10000 train_time:128797ms step_avg:79.46ms +[2025-07-07 00:20:03] [Rank 0] step:1641/10000 train_time:130923ms step_avg:79.78ms +[2025-07-07 00:20:03] [Rank 0] step:1641/10000 train_time:130923ms step_avg:79.78ms +[2025-07-07 00:20:05] [Rank 0] step:1661/10000 train_time:132395ms step_avg:79.71ms +[2025-07-07 00:20:05] [Rank 0] step:1661/10000 train_time:132395ms step_avg:79.71ms +[2025-07-07 00:20:06] [Rank 0] step:1681/10000 train_time:133868ms step_avg:79.64ms +[2025-07-07 00:20:06] [Rank 0] step:1681/10000 train_time:133868ms step_avg:79.64ms +[2025-07-07 00:20:07] [Rank 0] step:1701/10000 train_time:135345ms step_avg:79.57ms +[2025-07-07 00:20:07] [Rank 0] step:1701/10000 train_time:135345ms step_avg:79.57ms +[2025-07-07 00:20:09] [Rank 0] step:1721/10000 train_time:137057ms step_avg:79.64ms +[2025-07-07 00:20:09] [Rank 0] step:1721/10000 train_time:137057ms step_avg:79.64ms +[2025-07-07 00:20:11] [Rank 0] step:1741/10000 train_time:138535ms step_avg:79.57ms +[2025-07-07 00:20:11] [Rank 0] step:1741/10000 train_time:138535ms step_avg:79.57ms +[2025-07-07 00:20:12] [Rank 0] step:1761/10000 train_time:140009ms step_avg:79.51ms +[2025-07-07 00:20:12] [Rank 0] step:1761/10000 train_time:140009ms step_avg:79.51ms +[2025-07-07 00:20:14] [Rank 0] step:1781/10000 train_time:141484ms step_avg:79.44ms +[2025-07-07 00:20:14] [Rank 0] step:1781/10000 train_time:141484ms step_avg:79.44ms +[2025-07-07 00:20:15] [Rank 0] step:1801/10000 train_time:143215ms step_avg:79.52ms +[2025-07-07 00:20:15] [Rank 0] step:1801/10000 train_time:143215ms step_avg:79.52ms +[2025-07-07 00:20:17] [Rank 0] step:1821/10000 train_time:144674ms step_avg:79.45ms +[2025-07-07 00:20:17] [Rank 0] step:1821/10000 train_time:144674ms step_avg:79.45ms +[2025-07-07 00:20:18] [Rank 0] step:1841/10000 train_time:146150ms step_avg:79.39ms +[2025-07-07 00:20:18] [Rank 0] step:1841/10000 train_time:146150ms step_avg:79.39ms +[2025-07-07 00:20:20] [Rank 0] step:1861/10000 train_time:147902ms step_avg:79.47ms +[2025-07-07 00:20:20] [Rank 0] step:1861/10000 train_time:147902ms step_avg:79.47ms +[2025-07-07 00:20:22] [Rank 0] step:1881/10000 train_time:149437ms step_avg:79.45ms +[2025-07-07 00:20:22] [Rank 0] step:1881/10000 train_time:149437ms step_avg:79.45ms +[2025-07-07 00:20:24] [Rank 0] step:1901/10000 train_time:151587ms step_avg:79.74ms +[2025-07-07 00:20:24] [Rank 0] step:1901/10000 train_time:151587ms step_avg:79.74ms +[2025-07-07 00:20:25] [Rank 0] step:1921/10000 train_time:153063ms step_avg:79.68ms +[2025-07-07 00:20:25] [Rank 0] step:1921/10000 train_time:153063ms step_avg:79.68ms +[2025-07-07 00:20:27] [Rank 0] step:1941/10000 train_time:154541ms step_avg:79.62ms +[2025-07-07 00:20:27] [Rank 0] step:1941/10000 train_time:154541ms step_avg:79.62ms +[2025-07-07 00:20:28] [Rank 0] step:1961/10000 train_time:156017ms step_avg:79.56ms +[2025-07-07 00:20:28] [Rank 0] step:1961/10000 train_time:156017ms step_avg:79.56ms +[2025-07-07 00:20:30] [Rank 0] step:1981/10000 train_time:157751ms step_avg:79.63ms +[2025-07-07 00:20:30] [Rank 0] step:1981/10000 train_time:157751ms step_avg:79.63ms +[2025-07-07 00:20:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:20:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:20:33] [Rank 0] PRINT: step:2000/10000 train_loss:1.5038 val_loss:1.4412 train_time:159623ms step_avg:79.81ms +[2025-07-07 00:20:33] [Rank 0] PRINT: step:2000/10000 train_loss:1.5038 val_loss:1.4412 train_time:159623ms step_avg:79.81ms +[2025-07-07 00:20:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:20:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:20:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:20:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:20:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:20:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:26:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:26:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:26:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:26:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:26:00] [Rank 0] Total Loss: 4.3637 +[2025-07-07 00:26:00] [Rank 0] Total Loss: 4.3637 +[2025-07-07 00:26:00] [Rank 0] Total FTA: 0.2349 +[2025-07-07 00:26:00] [Rank 0] Total FTA: 0.2349 +[2025-07-07 00:26:00] [Rank 0] Group 0 Loss: 4.7806 +[2025-07-07 00:26:00] [Rank 0] Group 0 Loss: 4.7806 +[2025-07-07 00:26:00] [Rank 0] Group 1 Loss: 4.4008 +[2025-07-07 00:26:00] [Rank 0] Group 1 Loss: 4.4008 +[2025-07-07 00:26:00] [Rank 0] Group 2 Loss: 4.2256 +[2025-07-07 00:26:00] [Rank 0] Group 2 Loss: 4.2256 +[2025-07-07 00:26:00] [Rank 0] Group 3 Loss: 4.2891 +[2025-07-07 00:26:00] [Rank 0] Group 3 Loss: 4.2891 +[2025-07-07 00:26:00] [Rank 0] Group 4 Loss: 4.2596 +[2025-07-07 00:26:00] [Rank 0] Group 4 Loss: 4.2596 +[2025-07-07 00:26:00] [Rank 0] Group 5 Loss: 4.2662 +[2025-07-07 00:26:00] [Rank 0] Group 5 Loss: 4.2662 +[2025-07-07 00:26:00] [Rank 0] Group 6 Loss: 4.2740 +[2025-07-07 00:26:00] [Rank 0] Group 6 Loss: 4.2740 +[2025-07-07 00:26:00] [Rank 0] Group 7 Loss: 4.3688 +[2025-07-07 00:26:00] [Rank 0] Group 7 Loss: 4.3688 +[2025-07-07 00:26:00] [Rank 0] Group 8 Loss: 4.3023 +[2025-07-07 00:26:00] [Rank 0] Group 8 Loss: 4.3023 +[2025-07-07 00:26:00] [Rank 0] Group 9 Loss: 4.2338 +[2025-07-07 00:26:00] [Rank 0] Group 9 Loss: 4.2338 +[2025-07-07 00:26:00] [Rank 0] Group 10 Loss: 4.3061 +[2025-07-07 00:26:00] [Rank 0] Group 10 Loss: 4.3061 +[2025-07-07 00:26:00] [Rank 0] Group 11 Loss: 4.3078 +[2025-07-07 00:26:00] [Rank 0] Group 11 Loss: 4.3078 +[2025-07-07 00:26:00] [Rank 0] Group 0 FTA: 0.3342 +[2025-07-07 00:26:00] [Rank 0] Group 0 FTA: 0.3342 +[2025-07-07 00:26:00] [Rank 0] Group 1 FTA: 0.2995 +[2025-07-07 00:26:00] [Rank 0] Group 1 FTA: 0.2995 +[2025-07-07 00:26:00] [Rank 0] Group 2 FTA: 0.3620 +[2025-07-07 00:26:00] [Rank 0] Group 2 FTA: 0.3620 +[2025-07-07 00:26:00] [Rank 0] Group 3 FTA: 0.2396 +[2025-07-07 00:26:00] [Rank 0] Group 3 FTA: 0.2396 +[2025-07-07 00:26:00] [Rank 0] Group 4 FTA: 0.1250 +[2025-07-07 00:26:00] [Rank 0] Group 4 FTA: 0.1250 +[2025-07-07 00:26:00] [Rank 0] Group 5 FTA: 0.2109 +[2025-07-07 00:26:00] [Rank 0] Group 5 FTA: 0.2109 +[2025-07-07 00:26:00] [Rank 0] Group 6 FTA: 0.1849 +[2025-07-07 00:26:00] [Rank 0] Group 6 FTA: 0.1849 +[2025-07-07 00:26:00] [Rank 0] Group 7 FTA: 0.2240 +[2025-07-07 00:26:00] [Rank 0] Group 7 FTA: 0.2240 +[2025-07-07 00:26:00] [Rank 0] Group 8 FTA: 0.2161 +[2025-07-07 00:26:00] [Rank 0] Group 8 FTA: 0.2161 +[2025-07-07 00:26:00] [Rank 0] Group 9 FTA: 0.2109 +[2025-07-07 00:26:00] [Rank 0] Group 9 FTA: 0.2109 +[2025-07-07 00:26:00] [Rank 0] Group 10 FTA: 0.2051 +[2025-07-07 00:26:00] [Rank 0] Group 10 FTA: 0.2051 +[2025-07-07 00:26:00] [Rank 0] Group 11 FTA: 0.1875 +[2025-07-07 00:26:00] [Rank 0] Group 11 FTA: 0.1875 +[2025-07-07 00:26:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 00:26:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 00:26:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 00:26:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 00:26:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 00:26:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 00:26:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 00:26:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 00:26:02] [Rank 0] step:2001/10000 train_time:159644ms step_avg:79.78ms +[2025-07-07 00:26:02] [Rank 0] step:2001/10000 train_time:159644ms step_avg:79.78ms +[2025-07-07 00:26:03] [Rank 0] step:2021/10000 train_time:161142ms step_avg:79.73ms +[2025-07-07 00:26:03] [Rank 0] step:2021/10000 train_time:161142ms step_avg:79.73ms +[2025-07-07 00:26:05] [Rank 0] step:2041/10000 train_time:162612ms step_avg:79.67ms +[2025-07-07 00:26:05] [Rank 0] step:2041/10000 train_time:162612ms step_avg:79.67ms +[2025-07-07 00:26:06] [Rank 0] step:2061/10000 train_time:164080ms step_avg:79.61ms +[2025-07-07 00:26:06] [Rank 0] step:2061/10000 train_time:164080ms step_avg:79.61ms +[2025-07-07 00:26:08] [Rank 0] step:2081/10000 train_time:166205ms step_avg:79.87ms +[2025-07-07 00:26:08] [Rank 0] step:2081/10000 train_time:166205ms step_avg:79.87ms +[2025-07-07 00:26:10] [Rank 0] step:2101/10000 train_time:167676ms step_avg:79.81ms +[2025-07-07 00:26:10] [Rank 0] step:2101/10000 train_time:167676ms step_avg:79.81ms +[2025-07-07 00:26:11] [Rank 0] step:2121/10000 train_time:169158ms step_avg:79.75ms +[2025-07-07 00:26:11] [Rank 0] step:2121/10000 train_time:169158ms step_avg:79.75ms +[2025-07-07 00:26:13] [Rank 0] step:2141/10000 train_time:170627ms step_avg:79.69ms +[2025-07-07 00:26:13] [Rank 0] step:2141/10000 train_time:170627ms step_avg:79.69ms +[2025-07-07 00:26:15] [Rank 0] step:2161/10000 train_time:172100ms step_avg:79.64ms +[2025-07-07 00:26:15] [Rank 0] step:2161/10000 train_time:172100ms step_avg:79.64ms +[2025-07-07 00:26:16] [Rank 0] step:2181/10000 train_time:173811ms step_avg:79.69ms +[2025-07-07 00:26:16] [Rank 0] step:2181/10000 train_time:173811ms step_avg:79.69ms +[2025-07-07 00:26:17] [Rank 0] step:2201/10000 train_time:175280ms step_avg:79.64ms +[2025-07-07 00:26:17] [Rank 0] step:2201/10000 train_time:175280ms step_avg:79.64ms +[2025-07-07 00:26:19] [Rank 0] step:2221/10000 train_time:176751ms step_avg:79.58ms +[2025-07-07 00:26:19] [Rank 0] step:2221/10000 train_time:176751ms step_avg:79.58ms +[2025-07-07 00:26:20] [Rank 0] step:2241/10000 train_time:178245ms step_avg:79.54ms +[2025-07-07 00:26:20] [Rank 0] step:2241/10000 train_time:178245ms step_avg:79.54ms +[2025-07-07 00:26:23] [Rank 0] step:2261/10000 train_time:180395ms step_avg:79.79ms +[2025-07-07 00:26:23] [Rank 0] step:2261/10000 train_time:180395ms step_avg:79.79ms +[2025-07-07 00:26:24] [Rank 0] step:2281/10000 train_time:181891ms step_avg:79.74ms +[2025-07-07 00:26:24] [Rank 0] step:2281/10000 train_time:181891ms step_avg:79.74ms +[2025-07-07 00:26:26] [Rank 0] step:2301/10000 train_time:183389ms step_avg:79.70ms +[2025-07-07 00:26:26] [Rank 0] step:2301/10000 train_time:183389ms step_avg:79.70ms +[2025-07-07 00:26:27] [Rank 0] step:2321/10000 train_time:184889ms step_avg:79.66ms +[2025-07-07 00:26:27] [Rank 0] step:2321/10000 train_time:184889ms step_avg:79.66ms +[2025-07-07 00:26:29] [Rank 0] step:2341/10000 train_time:186389ms step_avg:79.62ms +[2025-07-07 00:26:29] [Rank 0] step:2341/10000 train_time:186389ms step_avg:79.62ms +[2025-07-07 00:26:30] [Rank 0] step:2361/10000 train_time:188126ms step_avg:79.68ms +[2025-07-07 00:26:30] [Rank 0] step:2361/10000 train_time:188126ms step_avg:79.68ms +[2025-07-07 00:26:32] [Rank 0] step:2381/10000 train_time:189624ms step_avg:79.64ms +[2025-07-07 00:26:32] [Rank 0] step:2381/10000 train_time:189624ms step_avg:79.64ms +[2025-07-07 00:26:33] [Rank 0] step:2401/10000 train_time:191124ms step_avg:79.60ms +[2025-07-07 00:26:33] [Rank 0] step:2401/10000 train_time:191124ms step_avg:79.60ms +[2025-07-07 00:26:35] [Rank 0] step:2421/10000 train_time:192625ms step_avg:79.56ms +[2025-07-07 00:26:35] [Rank 0] step:2421/10000 train_time:192625ms step_avg:79.56ms +[2025-07-07 00:26:37] [Rank 0] step:2441/10000 train_time:194796ms step_avg:79.80ms +[2025-07-07 00:26:37] [Rank 0] step:2441/10000 train_time:194796ms step_avg:79.80ms +[2025-07-07 00:26:39] [Rank 0] step:2461/10000 train_time:196351ms step_avg:79.79ms +[2025-07-07 00:26:39] [Rank 0] step:2461/10000 train_time:196351ms step_avg:79.79ms +[2025-07-07 00:26:40] [Rank 0] step:2481/10000 train_time:197919ms step_avg:79.77ms +[2025-07-07 00:26:40] [Rank 0] step:2481/10000 train_time:197919ms step_avg:79.77ms +[2025-07-07 00:26:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:26:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:26:43] [Rank 0] PRINT: step:2500/10000 train_loss:1.3695 val_loss:1.3074 train_time:199483ms step_avg:79.79ms +[2025-07-07 00:26:43] [Rank 0] PRINT: step:2500/10000 train_loss:1.3695 val_loss:1.3074 train_time:199483ms step_avg:79.79ms +[2025-07-07 00:26:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:26:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:26:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:26:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:26:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:26:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:32:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:32:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:32:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:32:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:32:08] [Rank 0] Total Loss: 4.6217 +[2025-07-07 00:32:08] [Rank 0] Total Loss: 4.6217 +[2025-07-07 00:32:08] [Rank 0] Total FTA: 0.3167 +[2025-07-07 00:32:08] [Rank 0] Total FTA: 0.3167 +[2025-07-07 00:32:08] [Rank 0] Group 0 Loss: 5.0825 +[2025-07-07 00:32:08] [Rank 0] Group 0 Loss: 5.0825 +[2025-07-07 00:32:08] [Rank 0] Group 1 Loss: 4.5429 +[2025-07-07 00:32:08] [Rank 0] Group 1 Loss: 4.5429 +[2025-07-07 00:32:08] [Rank 0] Group 2 Loss: 4.4026 +[2025-07-07 00:32:08] [Rank 0] Group 2 Loss: 4.4026 +[2025-07-07 00:32:08] [Rank 0] Group 3 Loss: 4.5866 +[2025-07-07 00:32:08] [Rank 0] Group 3 Loss: 4.5866 +[2025-07-07 00:32:08] [Rank 0] Group 4 Loss: 4.5493 +[2025-07-07 00:32:08] [Rank 0] Group 4 Loss: 4.5493 +[2025-07-07 00:32:08] [Rank 0] Group 5 Loss: 4.5388 +[2025-07-07 00:32:08] [Rank 0] Group 5 Loss: 4.5388 +[2025-07-07 00:32:08] [Rank 0] Group 6 Loss: 4.4955 +[2025-07-07 00:32:08] [Rank 0] Group 6 Loss: 4.4955 +[2025-07-07 00:32:08] [Rank 0] Group 7 Loss: 4.5656 +[2025-07-07 00:32:08] [Rank 0] Group 7 Loss: 4.5656 +[2025-07-07 00:32:08] [Rank 0] Group 8 Loss: 4.5416 +[2025-07-07 00:32:08] [Rank 0] Group 8 Loss: 4.5416 +[2025-07-07 00:32:08] [Rank 0] Group 9 Loss: 4.5443 +[2025-07-07 00:32:08] [Rank 0] Group 9 Loss: 4.5443 +[2025-07-07 00:32:08] [Rank 0] Group 10 Loss: 4.5657 +[2025-07-07 00:32:08] [Rank 0] Group 10 Loss: 4.5657 +[2025-07-07 00:32:08] [Rank 0] Group 11 Loss: 4.6047 +[2025-07-07 00:32:08] [Rank 0] Group 11 Loss: 4.6047 +[2025-07-07 00:32:08] [Rank 0] Group 0 FTA: 0.3043 +[2025-07-07 00:32:08] [Rank 0] Group 0 FTA: 0.3043 +[2025-07-07 00:32:08] [Rank 0] Group 1 FTA: 0.5000 +[2025-07-07 00:32:08] [Rank 0] Group 1 FTA: 0.5000 +[2025-07-07 00:32:08] [Rank 0] Group 2 FTA: 0.4766 +[2025-07-07 00:32:08] [Rank 0] Group 2 FTA: 0.4766 +[2025-07-07 00:32:08] [Rank 0] Group 3 FTA: 0.2552 +[2025-07-07 00:32:08] [Rank 0] Group 3 FTA: 0.2552 +[2025-07-07 00:32:08] [Rank 0] Group 4 FTA: 0.3021 +[2025-07-07 00:32:08] [Rank 0] Group 4 FTA: 0.3021 +[2025-07-07 00:32:08] [Rank 0] Group 5 FTA: 0.2891 +[2025-07-07 00:32:08] [Rank 0] Group 5 FTA: 0.2891 +[2025-07-07 00:32:08] [Rank 0] Group 6 FTA: 0.2396 +[2025-07-07 00:32:08] [Rank 0] Group 6 FTA: 0.2396 +[2025-07-07 00:32:08] [Rank 0] Group 7 FTA: 0.2917 +[2025-07-07 00:32:08] [Rank 0] Group 7 FTA: 0.2917 +[2025-07-07 00:32:08] [Rank 0] Group 8 FTA: 0.3203 +[2025-07-07 00:32:08] [Rank 0] Group 8 FTA: 0.3203 +[2025-07-07 00:32:08] [Rank 0] Group 9 FTA: 0.2656 +[2025-07-07 00:32:08] [Rank 0] Group 9 FTA: 0.2656 +[2025-07-07 00:32:08] [Rank 0] Group 10 FTA: 0.3066 +[2025-07-07 00:32:08] [Rank 0] Group 10 FTA: 0.3066 +[2025-07-07 00:32:08] [Rank 0] Group 11 FTA: 0.2910 +[2025-07-07 00:32:08] [Rank 0] Group 11 FTA: 0.2910 +[2025-07-07 00:32:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 00:32:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 00:32:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 00:32:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 00:32:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 00:32:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 00:32:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 00:32:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 00:32:09] [Rank 0] step:2501/10000 train_time:199503ms step_avg:79.77ms +[2025-07-07 00:32:09] [Rank 0] step:2501/10000 train_time:199503ms step_avg:79.77ms +[2025-07-07 00:32:12] [Rank 0] step:2521/10000 train_time:200991ms step_avg:79.73ms +[2025-07-07 00:32:12] [Rank 0] step:2521/10000 train_time:200991ms step_avg:79.73ms +[2025-07-07 00:32:13] [Rank 0] step:2541/10000 train_time:203150ms step_avg:79.95ms +[2025-07-07 00:32:13] [Rank 0] step:2541/10000 train_time:203150ms step_avg:79.95ms +[2025-07-07 00:32:15] [Rank 0] step:2561/10000 train_time:204642ms step_avg:79.91ms +[2025-07-07 00:32:15] [Rank 0] step:2561/10000 train_time:204642ms step_avg:79.91ms +[2025-07-07 00:32:16] [Rank 0] step:2581/10000 train_time:206135ms step_avg:79.87ms +[2025-07-07 00:32:16] [Rank 0] step:2581/10000 train_time:206135ms step_avg:79.87ms +[2025-07-07 00:32:18] [Rank 0] step:2601/10000 train_time:207629ms step_avg:79.83ms +[2025-07-07 00:32:18] [Rank 0] step:2601/10000 train_time:207629ms step_avg:79.83ms +[2025-07-07 00:32:20] [Rank 0] step:2621/10000 train_time:209794ms step_avg:80.04ms +[2025-07-07 00:32:20] [Rank 0] step:2621/10000 train_time:209794ms step_avg:80.04ms +[2025-07-07 00:32:21] [Rank 0] step:2641/10000 train_time:211289ms step_avg:80.00ms +[2025-07-07 00:32:21] [Rank 0] step:2641/10000 train_time:211289ms step_avg:80.00ms +[2025-07-07 00:32:23] [Rank 0] step:2661/10000 train_time:212785ms step_avg:79.96ms +[2025-07-07 00:32:23] [Rank 0] step:2661/10000 train_time:212785ms step_avg:79.96ms +[2025-07-07 00:32:24] [Rank 0] step:2681/10000 train_time:214281ms step_avg:79.93ms +[2025-07-07 00:32:24] [Rank 0] step:2681/10000 train_time:214281ms step_avg:79.93ms +[2025-07-07 00:32:26] [Rank 0] step:2701/10000 train_time:215777ms step_avg:79.89ms +[2025-07-07 00:32:26] [Rank 0] step:2701/10000 train_time:215777ms step_avg:79.89ms +[2025-07-07 00:32:28] [Rank 0] step:2721/10000 train_time:217939ms step_avg:80.10ms +[2025-07-07 00:32:28] [Rank 0] step:2721/10000 train_time:217939ms step_avg:80.10ms +[2025-07-07 00:32:29] [Rank 0] step:2741/10000 train_time:219434ms step_avg:80.06ms +[2025-07-07 00:32:29] [Rank 0] step:2741/10000 train_time:219434ms step_avg:80.06ms +[2025-07-07 00:32:31] [Rank 0] step:2761/10000 train_time:220931ms step_avg:80.02ms +[2025-07-07 00:32:31] [Rank 0] step:2761/10000 train_time:220931ms step_avg:80.02ms +[2025-07-07 00:32:32] [Rank 0] step:2781/10000 train_time:222429ms step_avg:79.98ms +[2025-07-07 00:32:32] [Rank 0] step:2781/10000 train_time:222429ms step_avg:79.98ms +[2025-07-07 00:32:35] [Rank 0] step:2801/10000 train_time:224580ms step_avg:80.18ms +[2025-07-07 00:32:35] [Rank 0] step:2801/10000 train_time:224580ms step_avg:80.18ms +[2025-07-07 00:32:36] [Rank 0] step:2821/10000 train_time:226079ms step_avg:80.14ms +[2025-07-07 00:32:36] [Rank 0] step:2821/10000 train_time:226079ms step_avg:80.14ms +[2025-07-07 00:32:38] [Rank 0] step:2841/10000 train_time:227579ms step_avg:80.11ms +[2025-07-07 00:32:38] [Rank 0] step:2841/10000 train_time:227579ms step_avg:80.11ms +[2025-07-07 00:32:39] [Rank 0] step:2861/10000 train_time:229077ms step_avg:80.07ms +[2025-07-07 00:32:39] [Rank 0] step:2861/10000 train_time:229077ms step_avg:80.07ms +[2025-07-07 00:32:41] [Rank 0] step:2881/10000 train_time:231245ms step_avg:80.27ms +[2025-07-07 00:32:41] [Rank 0] step:2881/10000 train_time:231245ms step_avg:80.27ms +[2025-07-07 00:32:43] [Rank 0] step:2901/10000 train_time:232726ms step_avg:80.22ms +[2025-07-07 00:32:43] [Rank 0] step:2901/10000 train_time:232726ms step_avg:80.22ms +[2025-07-07 00:32:44] [Rank 0] step:2921/10000 train_time:234227ms step_avg:80.19ms +[2025-07-07 00:32:44] [Rank 0] step:2921/10000 train_time:234227ms step_avg:80.19ms +[2025-07-07 00:32:46] [Rank 0] step:2941/10000 train_time:235727ms step_avg:80.15ms +[2025-07-07 00:32:46] [Rank 0] step:2941/10000 train_time:235727ms step_avg:80.15ms +[2025-07-07 00:32:47] [Rank 0] step:2961/10000 train_time:237228ms step_avg:80.12ms +[2025-07-07 00:32:47] [Rank 0] step:2961/10000 train_time:237228ms step_avg:80.12ms +[2025-07-07 00:32:49] [Rank 0] step:2981/10000 train_time:239398ms step_avg:80.31ms +[2025-07-07 00:32:49] [Rank 0] step:2981/10000 train_time:239398ms step_avg:80.31ms +[2025-07-07 00:32:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:32:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:32:52] [Rank 0] PRINT: step:3000/10000 train_loss:1.2663 val_loss:1.2203 train_time:240896ms step_avg:80.30ms +[2025-07-07 00:32:52] [Rank 0] PRINT: step:3000/10000 train_loss:1.2663 val_loss:1.2203 train_time:240896ms step_avg:80.30ms +[2025-07-07 00:32:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:32:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:32:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:32:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:32:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:32:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:38:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:38:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:38:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:38:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:38:18] [Rank 0] Total Loss: 4.7250 +[2025-07-07 00:38:18] [Rank 0] Total Loss: 4.7250 +[2025-07-07 00:38:18] [Rank 0] Total FTA: 0.3465 +[2025-07-07 00:38:18] [Rank 0] Total FTA: 0.3465 +[2025-07-07 00:38:18] [Rank 0] Group 0 Loss: 4.9324 +[2025-07-07 00:38:18] [Rank 0] Group 0 Loss: 4.9324 +[2025-07-07 00:38:18] [Rank 0] Group 1 Loss: 4.6246 +[2025-07-07 00:38:18] [Rank 0] Group 1 Loss: 4.6246 +[2025-07-07 00:38:18] [Rank 0] Group 2 Loss: 4.6086 +[2025-07-07 00:38:18] [Rank 0] Group 2 Loss: 4.6086 +[2025-07-07 00:38:18] [Rank 0] Group 3 Loss: 4.7661 +[2025-07-07 00:38:18] [Rank 0] Group 3 Loss: 4.7661 +[2025-07-07 00:38:18] [Rank 0] Group 4 Loss: 4.7081 +[2025-07-07 00:38:18] [Rank 0] Group 4 Loss: 4.7081 +[2025-07-07 00:38:18] [Rank 0] Group 5 Loss: 4.6364 +[2025-07-07 00:38:18] [Rank 0] Group 5 Loss: 4.6364 +[2025-07-07 00:38:18] [Rank 0] Group 6 Loss: 4.6502 +[2025-07-07 00:38:18] [Rank 0] Group 6 Loss: 4.6502 +[2025-07-07 00:38:18] [Rank 0] Group 7 Loss: 4.7506 +[2025-07-07 00:38:18] [Rank 0] Group 7 Loss: 4.7506 +[2025-07-07 00:38:18] [Rank 0] Group 8 Loss: 4.6501 +[2025-07-07 00:38:18] [Rank 0] Group 8 Loss: 4.6501 +[2025-07-07 00:38:18] [Rank 0] Group 9 Loss: 4.7098 +[2025-07-07 00:38:18] [Rank 0] Group 9 Loss: 4.7098 +[2025-07-07 00:38:18] [Rank 0] Group 10 Loss: 4.6737 +[2025-07-07 00:38:18] [Rank 0] Group 10 Loss: 4.6737 +[2025-07-07 00:38:18] [Rank 0] Group 11 Loss: 4.7508 +[2025-07-07 00:38:18] [Rank 0] Group 11 Loss: 4.7508 +[2025-07-07 00:38:18] [Rank 0] Group 0 FTA: 0.3329 +[2025-07-07 00:38:18] [Rank 0] Group 0 FTA: 0.3329 +[2025-07-07 00:38:18] [Rank 0] Group 1 FTA: 0.4818 +[2025-07-07 00:38:18] [Rank 0] Group 1 FTA: 0.4818 +[2025-07-07 00:38:18] [Rank 0] Group 2 FTA: 0.3307 +[2025-07-07 00:38:18] [Rank 0] Group 2 FTA: 0.3307 +[2025-07-07 00:38:18] [Rank 0] Group 3 FTA: 0.3073 +[2025-07-07 00:38:18] [Rank 0] Group 3 FTA: 0.3073 +[2025-07-07 00:38:18] [Rank 0] Group 4 FTA: 0.2708 +[2025-07-07 00:38:18] [Rank 0] Group 4 FTA: 0.2708 +[2025-07-07 00:38:18] [Rank 0] Group 5 FTA: 0.3724 +[2025-07-07 00:38:18] [Rank 0] Group 5 FTA: 0.3724 +[2025-07-07 00:38:18] [Rank 0] Group 6 FTA: 0.3203 +[2025-07-07 00:38:18] [Rank 0] Group 6 FTA: 0.3203 +[2025-07-07 00:38:18] [Rank 0] Group 7 FTA: 0.3802 +[2025-07-07 00:38:18] [Rank 0] Group 7 FTA: 0.3802 +[2025-07-07 00:38:18] [Rank 0] Group 8 FTA: 0.3203 +[2025-07-07 00:38:18] [Rank 0] Group 8 FTA: 0.3203 +[2025-07-07 00:38:18] [Rank 0] Group 9 FTA: 0.3633 +[2025-07-07 00:38:18] [Rank 0] Group 9 FTA: 0.3633 +[2025-07-07 00:38:18] [Rank 0] Group 10 FTA: 0.3340 +[2025-07-07 00:38:18] [Rank 0] Group 10 FTA: 0.3340 +[2025-07-07 00:38:18] [Rank 0] Group 11 FTA: 0.3545 +[2025-07-07 00:38:18] [Rank 0] Group 11 FTA: 0.3545 +[2025-07-07 00:38:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 00:38:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 00:38:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 00:38:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 00:38:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 00:38:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 00:38:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 00:38:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 00:38:19] [Rank 0] step:3001/10000 train_time:240916ms step_avg:80.28ms +[2025-07-07 00:38:19] [Rank 0] step:3001/10000 train_time:240916ms step_avg:80.28ms +[2025-07-07 00:38:21] [Rank 0] step:3021/10000 train_time:242424ms step_avg:80.25ms +[2025-07-07 00:38:21] [Rank 0] step:3021/10000 train_time:242424ms step_avg:80.25ms +[2025-07-07 00:38:22] [Rank 0] step:3041/10000 train_time:243913ms step_avg:80.21ms +[2025-07-07 00:38:22] [Rank 0] step:3041/10000 train_time:243913ms step_avg:80.21ms +[2025-07-07 00:38:24] [Rank 0] step:3061/10000 train_time:245457ms step_avg:80.19ms +[2025-07-07 00:38:24] [Rank 0] step:3061/10000 train_time:245457ms step_avg:80.19ms +[2025-07-07 00:38:26] [Rank 0] step:3081/10000 train_time:247568ms step_avg:80.35ms +[2025-07-07 00:38:26] [Rank 0] step:3081/10000 train_time:247568ms step_avg:80.35ms +[2025-07-07 00:38:27] [Rank 0] step:3101/10000 train_time:249061ms step_avg:80.32ms +[2025-07-07 00:38:27] [Rank 0] step:3101/10000 train_time:249061ms step_avg:80.32ms +[2025-07-07 00:38:29] [Rank 0] step:3121/10000 train_time:250556ms step_avg:80.28ms +[2025-07-07 00:38:29] [Rank 0] step:3121/10000 train_time:250556ms step_avg:80.28ms +[2025-07-07 00:38:30] [Rank 0] step:3141/10000 train_time:252051ms step_avg:80.25ms +[2025-07-07 00:38:30] [Rank 0] step:3141/10000 train_time:252051ms step_avg:80.25ms +[2025-07-07 00:38:32] [Rank 0] step:3161/10000 train_time:253780ms step_avg:80.28ms +[2025-07-07 00:38:32] [Rank 0] step:3161/10000 train_time:253780ms step_avg:80.28ms +[2025-07-07 00:38:34] [Rank 0] step:3181/10000 train_time:255275ms step_avg:80.25ms +[2025-07-07 00:38:34] [Rank 0] step:3181/10000 train_time:255275ms step_avg:80.25ms +[2025-07-07 00:38:35] [Rank 0] step:3201/10000 train_time:256771ms step_avg:80.22ms +[2025-07-07 00:38:35] [Rank 0] step:3201/10000 train_time:256771ms step_avg:80.22ms +[2025-07-07 00:38:37] [Rank 0] step:3221/10000 train_time:258268ms step_avg:80.18ms +[2025-07-07 00:38:37] [Rank 0] step:3221/10000 train_time:258268ms step_avg:80.18ms +[2025-07-07 00:38:39] [Rank 0] step:3241/10000 train_time:260020ms step_avg:80.23ms +[2025-07-07 00:38:39] [Rank 0] step:3241/10000 train_time:260020ms step_avg:80.23ms +[2025-07-07 00:38:40] [Rank 0] step:3261/10000 train_time:261906ms step_avg:80.31ms +[2025-07-07 00:38:40] [Rank 0] step:3261/10000 train_time:261906ms step_avg:80.31ms +[2025-07-07 00:38:42] [Rank 0] step:3281/10000 train_time:263403ms step_avg:80.28ms +[2025-07-07 00:38:42] [Rank 0] step:3281/10000 train_time:263403ms step_avg:80.28ms +[2025-07-07 00:38:43] [Rank 0] step:3301/10000 train_time:264901ms step_avg:80.25ms +[2025-07-07 00:38:43] [Rank 0] step:3301/10000 train_time:264901ms step_avg:80.25ms +[2025-07-07 00:38:45] [Rank 0] step:3321/10000 train_time:266399ms step_avg:80.22ms +[2025-07-07 00:38:45] [Rank 0] step:3321/10000 train_time:266399ms step_avg:80.22ms +[2025-07-07 00:38:47] [Rank 0] step:3341/10000 train_time:268547ms step_avg:80.38ms +[2025-07-07 00:38:47] [Rank 0] step:3341/10000 train_time:268547ms step_avg:80.38ms +[2025-07-07 00:38:48] [Rank 0] step:3361/10000 train_time:270047ms step_avg:80.35ms +[2025-07-07 00:38:48] [Rank 0] step:3361/10000 train_time:270047ms step_avg:80.35ms +[2025-07-07 00:38:50] [Rank 0] step:3381/10000 train_time:271546ms step_avg:80.32ms +[2025-07-07 00:38:50] [Rank 0] step:3381/10000 train_time:271546ms step_avg:80.32ms +[2025-07-07 00:38:51] [Rank 0] step:3401/10000 train_time:273047ms step_avg:80.28ms +[2025-07-07 00:38:51] [Rank 0] step:3401/10000 train_time:273047ms step_avg:80.28ms +[2025-07-07 00:38:53] [Rank 0] step:3421/10000 train_time:274802ms step_avg:80.33ms +[2025-07-07 00:38:53] [Rank 0] step:3421/10000 train_time:274802ms step_avg:80.33ms +[2025-07-07 00:38:55] [Rank 0] step:3441/10000 train_time:276694ms step_avg:80.41ms +[2025-07-07 00:38:55] [Rank 0] step:3441/10000 train_time:276694ms step_avg:80.41ms +[2025-07-07 00:38:56] [Rank 0] step:3461/10000 train_time:278196ms step_avg:80.38ms +[2025-07-07 00:38:56] [Rank 0] step:3461/10000 train_time:278196ms step_avg:80.38ms +[2025-07-07 00:38:58] [Rank 0] step:3481/10000 train_time:279700ms step_avg:80.35ms +[2025-07-07 00:38:58] [Rank 0] step:3481/10000 train_time:279700ms step_avg:80.35ms +[2025-07-07 00:38:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:38:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:39:00] [Rank 0] PRINT: step:3500/10000 train_loss:1.1937 val_loss:1.1586 train_time:281203ms step_avg:80.34ms +[2025-07-07 00:39:00] [Rank 0] PRINT: step:3500/10000 train_loss:1.1937 val_loss:1.1586 train_time:281203ms step_avg:80.34ms +[2025-07-07 00:39:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:39:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:39:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:39:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:39:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:39:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:44:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:44:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:44:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:44:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:44:23] [Rank 0] Total Loss: 4.7306 +[2025-07-07 00:44:23] [Rank 0] Total Loss: 4.7306 +[2025-07-07 00:44:23] [Rank 0] Total FTA: 0.4078 +[2025-07-07 00:44:23] [Rank 0] Total FTA: 0.4078 +[2025-07-07 00:44:23] [Rank 0] Group 0 Loss: 4.8159 +[2025-07-07 00:44:23] [Rank 0] Group 0 Loss: 4.8159 +[2025-07-07 00:44:23] [Rank 0] Group 1 Loss: 4.6386 +[2025-07-07 00:44:23] [Rank 0] Group 1 Loss: 4.6386 +[2025-07-07 00:44:23] [Rank 0] Group 2 Loss: 4.5326 +[2025-07-07 00:44:23] [Rank 0] Group 2 Loss: 4.5326 +[2025-07-07 00:44:23] [Rank 0] Group 3 Loss: 4.6777 +[2025-07-07 00:44:23] [Rank 0] Group 3 Loss: 4.6777 +[2025-07-07 00:44:23] [Rank 0] Group 4 Loss: 4.6608 +[2025-07-07 00:44:23] [Rank 0] Group 4 Loss: 4.6608 +[2025-07-07 00:44:23] [Rank 0] Group 5 Loss: 4.8196 +[2025-07-07 00:44:23] [Rank 0] Group 5 Loss: 4.8196 +[2025-07-07 00:44:23] [Rank 0] Group 6 Loss: 4.6792 +[2025-07-07 00:44:23] [Rank 0] Group 6 Loss: 4.6792 +[2025-07-07 00:44:23] [Rank 0] Group 7 Loss: 4.7913 +[2025-07-07 00:44:23] [Rank 0] Group 7 Loss: 4.7913 +[2025-07-07 00:44:23] [Rank 0] Group 8 Loss: 4.8043 +[2025-07-07 00:44:23] [Rank 0] Group 8 Loss: 4.8043 +[2025-07-07 00:44:23] [Rank 0] Group 9 Loss: 4.7234 +[2025-07-07 00:44:23] [Rank 0] Group 9 Loss: 4.7234 +[2025-07-07 00:44:23] [Rank 0] Group 10 Loss: 4.7644 +[2025-07-07 00:44:23] [Rank 0] Group 10 Loss: 4.7644 +[2025-07-07 00:44:23] [Rank 0] Group 11 Loss: 4.7419 +[2025-07-07 00:44:23] [Rank 0] Group 11 Loss: 4.7419 +[2025-07-07 00:44:23] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 00:44:23] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 00:44:23] [Rank 0] Group 1 FTA: 0.4818 +[2025-07-07 00:44:23] [Rank 0] Group 1 FTA: 0.4818 +[2025-07-07 00:44:24] [Rank 0] Group 2 FTA: 0.6042 +[2025-07-07 00:44:24] [Rank 0] Group 2 FTA: 0.6042 +[2025-07-07 00:44:24] [Rank 0] Group 3 FTA: 0.4115 +[2025-07-07 00:44:24] [Rank 0] Group 3 FTA: 0.4115 +[2025-07-07 00:44:24] [Rank 0] Group 4 FTA: 0.3646 +[2025-07-07 00:44:24] [Rank 0] Group 4 FTA: 0.3646 +[2025-07-07 00:44:24] [Rank 0] Group 5 FTA: 0.5052 +[2025-07-07 00:44:24] [Rank 0] Group 5 FTA: 0.5052 +[2025-07-07 00:44:24] [Rank 0] Group 6 FTA: 0.3750 +[2025-07-07 00:44:24] [Rank 0] Group 6 FTA: 0.3750 +[2025-07-07 00:44:24] [Rank 0] Group 7 FTA: 0.4505 +[2025-07-07 00:44:24] [Rank 0] Group 7 FTA: 0.4505 +[2025-07-07 00:44:24] [Rank 0] Group 8 FTA: 0.4323 +[2025-07-07 00:44:24] [Rank 0] Group 8 FTA: 0.4323 +[2025-07-07 00:44:24] [Rank 0] Group 9 FTA: 0.4570 +[2025-07-07 00:44:24] [Rank 0] Group 9 FTA: 0.4570 +[2025-07-07 00:44:24] [Rank 0] Group 10 FTA: 0.4180 +[2025-07-07 00:44:24] [Rank 0] Group 10 FTA: 0.4180 +[2025-07-07 00:44:24] [Rank 0] Group 11 FTA: 0.4336 +[2025-07-07 00:44:24] [Rank 0] Group 11 FTA: 0.4336 +[2025-07-07 00:44:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 00:44:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 00:44:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 00:44:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 00:44:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 00:44:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 00:44:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 00:44:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 00:44:25] [Rank 0] step:3501/10000 train_time:281223ms step_avg:80.33ms +[2025-07-07 00:44:25] [Rank 0] step:3501/10000 train_time:281223ms step_avg:80.33ms +[2025-07-07 00:44:27] [Rank 0] step:3521/10000 train_time:283360ms step_avg:80.48ms +[2025-07-07 00:44:27] [Rank 0] step:3521/10000 train_time:283360ms step_avg:80.48ms +[2025-07-07 00:44:29] [Rank 0] step:3541/10000 train_time:284851ms step_avg:80.44ms +[2025-07-07 00:44:29] [Rank 0] step:3541/10000 train_time:284851ms step_avg:80.44ms +[2025-07-07 00:44:30] [Rank 0] step:3561/10000 train_time:286345ms step_avg:80.41ms +[2025-07-07 00:44:30] [Rank 0] step:3561/10000 train_time:286345ms step_avg:80.41ms +[2025-07-07 00:44:32] [Rank 0] step:3581/10000 train_time:287838ms step_avg:80.38ms +[2025-07-07 00:44:32] [Rank 0] step:3581/10000 train_time:287838ms step_avg:80.38ms +[2025-07-07 00:44:33] [Rank 0] step:3601/10000 train_time:289431ms step_avg:80.38ms +[2025-07-07 00:44:33] [Rank 0] step:3601/10000 train_time:289431ms step_avg:80.38ms +[2025-07-07 00:44:35] [Rank 0] step:3621/10000 train_time:291189ms step_avg:80.42ms +[2025-07-07 00:44:35] [Rank 0] step:3621/10000 train_time:291189ms step_avg:80.42ms +[2025-07-07 00:44:37] [Rank 0] step:3641/10000 train_time:292749ms step_avg:80.40ms +[2025-07-07 00:44:37] [Rank 0] step:3641/10000 train_time:292749ms step_avg:80.40ms +[2025-07-07 00:44:38] [Rank 0] step:3661/10000 train_time:294244ms step_avg:80.37ms +[2025-07-07 00:44:38] [Rank 0] step:3661/10000 train_time:294244ms step_avg:80.37ms +[2025-07-07 00:44:40] [Rank 0] step:3681/10000 train_time:295740ms step_avg:80.34ms +[2025-07-07 00:44:40] [Rank 0] step:3681/10000 train_time:295740ms step_avg:80.34ms +[2025-07-07 00:44:42] [Rank 0] step:3701/10000 train_time:297891ms step_avg:80.49ms +[2025-07-07 00:44:42] [Rank 0] step:3701/10000 train_time:297891ms step_avg:80.49ms +[2025-07-07 00:44:43] [Rank 0] step:3721/10000 train_time:299386ms step_avg:80.46ms +[2025-07-07 00:44:43] [Rank 0] step:3721/10000 train_time:299386ms step_avg:80.46ms +[2025-07-07 00:44:45] [Rank 0] step:3741/10000 train_time:300882ms step_avg:80.43ms +[2025-07-07 00:44:45] [Rank 0] step:3741/10000 train_time:300882ms step_avg:80.43ms +[2025-07-07 00:44:46] [Rank 0] step:3761/10000 train_time:302377ms step_avg:80.40ms +[2025-07-07 00:44:46] [Rank 0] step:3761/10000 train_time:302377ms step_avg:80.40ms +[2025-07-07 00:44:48] [Rank 0] step:3781/10000 train_time:304129ms step_avg:80.44ms +[2025-07-07 00:44:48] [Rank 0] step:3781/10000 train_time:304129ms step_avg:80.44ms +[2025-07-07 00:44:49] [Rank 0] step:3801/10000 train_time:305607ms step_avg:80.40ms +[2025-07-07 00:44:49] [Rank 0] step:3801/10000 train_time:305607ms step_avg:80.40ms +[2025-07-07 00:44:51] [Rank 0] step:3821/10000 train_time:307103ms step_avg:80.37ms +[2025-07-07 00:44:51] [Rank 0] step:3821/10000 train_time:307103ms step_avg:80.37ms +[2025-07-07 00:44:52] [Rank 0] step:3841/10000 train_time:308601ms step_avg:80.34ms +[2025-07-07 00:44:52] [Rank 0] step:3841/10000 train_time:308601ms step_avg:80.34ms +[2025-07-07 00:44:54] [Rank 0] step:3861/10000 train_time:310098ms step_avg:80.32ms +[2025-07-07 00:44:54] [Rank 0] step:3861/10000 train_time:310098ms step_avg:80.32ms +[2025-07-07 00:44:56] [Rank 0] step:3881/10000 train_time:312261ms step_avg:80.46ms +[2025-07-07 00:44:56] [Rank 0] step:3881/10000 train_time:312261ms step_avg:80.46ms +[2025-07-07 00:44:58] [Rank 0] step:3901/10000 train_time:313758ms step_avg:80.43ms +[2025-07-07 00:44:58] [Rank 0] step:3901/10000 train_time:313758ms step_avg:80.43ms +[2025-07-07 00:44:59] [Rank 0] step:3921/10000 train_time:315257ms step_avg:80.40ms +[2025-07-07 00:44:59] [Rank 0] step:3921/10000 train_time:315257ms step_avg:80.40ms +[2025-07-07 00:45:01] [Rank 0] step:3941/10000 train_time:316754ms step_avg:80.37ms +[2025-07-07 00:45:01] [Rank 0] step:3941/10000 train_time:316754ms step_avg:80.37ms +[2025-07-07 00:45:02] [Rank 0] step:3961/10000 train_time:318510ms step_avg:80.41ms +[2025-07-07 00:45:02] [Rank 0] step:3961/10000 train_time:318510ms step_avg:80.41ms +[2025-07-07 00:45:04] [Rank 0] step:3981/10000 train_time:319991ms step_avg:80.38ms +[2025-07-07 00:45:04] [Rank 0] step:3981/10000 train_time:319991ms step_avg:80.38ms +[2025-07-07 00:45:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:45:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:45:06] [Rank 0] PRINT: step:4000/10000 train_loss:1.1413 val_loss:1.1156 train_time:321491ms step_avg:80.37ms +[2025-07-07 00:45:06] [Rank 0] PRINT: step:4000/10000 train_loss:1.1413 val_loss:1.1156 train_time:321491ms step_avg:80.37ms +[2025-07-07 00:45:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:45:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:45:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:45:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:45:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:45:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:50:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:50:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:50:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:50:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:50:31] [Rank 0] Total Loss: 4.7788 +[2025-07-07 00:50:31] [Rank 0] Total Loss: 4.7788 +[2025-07-07 00:50:31] [Rank 0] Total FTA: 0.4836 +[2025-07-07 00:50:31] [Rank 0] Total FTA: 0.4836 +[2025-07-07 00:50:31] [Rank 0] Group 0 Loss: 4.7473 +[2025-07-07 00:50:31] [Rank 0] Group 0 Loss: 4.7473 +[2025-07-07 00:50:31] [Rank 0] Group 1 Loss: 4.5804 +[2025-07-07 00:50:31] [Rank 0] Group 1 Loss: 4.5804 +[2025-07-07 00:50:31] [Rank 0] Group 2 Loss: 4.6229 +[2025-07-07 00:50:31] [Rank 0] Group 2 Loss: 4.6229 +[2025-07-07 00:50:31] [Rank 0] Group 3 Loss: 4.7590 +[2025-07-07 00:50:31] [Rank 0] Group 3 Loss: 4.7590 +[2025-07-07 00:50:31] [Rank 0] Group 4 Loss: 4.8205 +[2025-07-07 00:50:31] [Rank 0] Group 4 Loss: 4.8205 +[2025-07-07 00:50:31] [Rank 0] Group 5 Loss: 4.8012 +[2025-07-07 00:50:31] [Rank 0] Group 5 Loss: 4.8012 +[2025-07-07 00:50:31] [Rank 0] Group 6 Loss: 4.7742 +[2025-07-07 00:50:31] [Rank 0] Group 6 Loss: 4.7742 +[2025-07-07 00:50:31] [Rank 0] Group 7 Loss: 4.8948 +[2025-07-07 00:50:31] [Rank 0] Group 7 Loss: 4.8948 +[2025-07-07 00:50:31] [Rank 0] Group 8 Loss: 4.8246 +[2025-07-07 00:50:31] [Rank 0] Group 8 Loss: 4.8246 +[2025-07-07 00:50:31] [Rank 0] Group 9 Loss: 4.8444 +[2025-07-07 00:50:31] [Rank 0] Group 9 Loss: 4.8444 +[2025-07-07 00:50:31] [Rank 0] Group 10 Loss: 4.8485 +[2025-07-07 00:50:31] [Rank 0] Group 10 Loss: 4.8485 +[2025-07-07 00:50:31] [Rank 0] Group 11 Loss: 4.8087 +[2025-07-07 00:50:31] [Rank 0] Group 11 Loss: 4.8087 +[2025-07-07 00:50:31] [Rank 0] Group 0 FTA: 0.4954 +[2025-07-07 00:50:31] [Rank 0] Group 0 FTA: 0.4954 +[2025-07-07 00:50:31] [Rank 0] Group 1 FTA: 0.5052 +[2025-07-07 00:50:31] [Rank 0] Group 1 FTA: 0.5052 +[2025-07-07 00:50:31] [Rank 0] Group 2 FTA: 0.6198 +[2025-07-07 00:50:31] [Rank 0] Group 2 FTA: 0.6198 +[2025-07-07 00:50:31] [Rank 0] Group 3 FTA: 0.5234 +[2025-07-07 00:50:31] [Rank 0] Group 3 FTA: 0.5234 +[2025-07-07 00:50:31] [Rank 0] Group 4 FTA: 0.4844 +[2025-07-07 00:50:31] [Rank 0] Group 4 FTA: 0.4844 +[2025-07-07 00:50:31] [Rank 0] Group 5 FTA: 0.4661 +[2025-07-07 00:50:31] [Rank 0] Group 5 FTA: 0.4661 +[2025-07-07 00:50:31] [Rank 0] Group 6 FTA: 0.4167 +[2025-07-07 00:50:31] [Rank 0] Group 6 FTA: 0.4167 +[2025-07-07 00:50:31] [Rank 0] Group 7 FTA: 0.4714 +[2025-07-07 00:50:31] [Rank 0] Group 7 FTA: 0.4714 +[2025-07-07 00:50:31] [Rank 0] Group 8 FTA: 0.4453 +[2025-07-07 00:50:31] [Rank 0] Group 8 FTA: 0.4453 +[2025-07-07 00:50:31] [Rank 0] Group 9 FTA: 0.4375 +[2025-07-07 00:50:31] [Rank 0] Group 9 FTA: 0.4375 +[2025-07-07 00:50:31] [Rank 0] Group 10 FTA: 0.4785 +[2025-07-07 00:50:31] [Rank 0] Group 10 FTA: 0.4785 +[2025-07-07 00:50:31] [Rank 0] Group 11 FTA: 0.4648 +[2025-07-07 00:50:31] [Rank 0] Group 11 FTA: 0.4648 +[2025-07-07 00:50:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 00:50:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 00:50:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 00:50:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 00:50:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 00:50:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 00:50:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 00:50:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 00:50:33] [Rank 0] step:4001/10000 train_time:321511ms step_avg:80.36ms +[2025-07-07 00:50:33] [Rank 0] step:4001/10000 train_time:321511ms step_avg:80.36ms +[2025-07-07 00:50:34] [Rank 0] step:4021/10000 train_time:323020ms step_avg:80.33ms +[2025-07-07 00:50:34] [Rank 0] step:4021/10000 train_time:323020ms step_avg:80.33ms +[2025-07-07 00:50:36] [Rank 0] step:4041/10000 train_time:324508ms step_avg:80.30ms +[2025-07-07 00:50:36] [Rank 0] step:4041/10000 train_time:324508ms step_avg:80.30ms +[2025-07-07 00:50:38] [Rank 0] step:4061/10000 train_time:326656ms step_avg:80.44ms +[2025-07-07 00:50:38] [Rank 0] step:4061/10000 train_time:326656ms step_avg:80.44ms +[2025-07-07 00:50:39] [Rank 0] step:4081/10000 train_time:328149ms step_avg:80.41ms +[2025-07-07 00:50:39] [Rank 0] step:4081/10000 train_time:328149ms step_avg:80.41ms +[2025-07-07 00:50:41] [Rank 0] step:4101/10000 train_time:329643ms step_avg:80.38ms +[2025-07-07 00:50:41] [Rank 0] step:4101/10000 train_time:329643ms step_avg:80.38ms +[2025-07-07 00:50:42] [Rank 0] step:4121/10000 train_time:331138ms step_avg:80.35ms +[2025-07-07 00:50:42] [Rank 0] step:4121/10000 train_time:331138ms step_avg:80.35ms +[2025-07-07 00:50:44] [Rank 0] step:4141/10000 train_time:332632ms step_avg:80.33ms +[2025-07-07 00:50:44] [Rank 0] step:4141/10000 train_time:332632ms step_avg:80.33ms +[2025-07-07 00:50:46] [Rank 0] step:4161/10000 train_time:334366ms step_avg:80.36ms +[2025-07-07 00:50:46] [Rank 0] step:4161/10000 train_time:334366ms step_avg:80.36ms +[2025-07-07 00:50:47] [Rank 0] step:4181/10000 train_time:335860ms step_avg:80.33ms +[2025-07-07 00:50:47] [Rank 0] step:4181/10000 train_time:335860ms step_avg:80.33ms +[2025-07-07 00:50:49] [Rank 0] step:4201/10000 train_time:337356ms step_avg:80.30ms +[2025-07-07 00:50:49] [Rank 0] step:4201/10000 train_time:337356ms step_avg:80.30ms +[2025-07-07 00:50:50] [Rank 0] step:4221/10000 train_time:338853ms step_avg:80.28ms +[2025-07-07 00:50:50] [Rank 0] step:4221/10000 train_time:338853ms step_avg:80.28ms +[2025-07-07 00:50:52] [Rank 0] step:4241/10000 train_time:340583ms step_avg:80.31ms +[2025-07-07 00:50:52] [Rank 0] step:4241/10000 train_time:340583ms step_avg:80.31ms +[2025-07-07 00:50:53] [Rank 0] step:4261/10000 train_time:342211ms step_avg:80.31ms +[2025-07-07 00:50:53] [Rank 0] step:4261/10000 train_time:342211ms step_avg:80.31ms +[2025-07-07 00:50:55] [Rank 0] step:4281/10000 train_time:343709ms step_avg:80.29ms +[2025-07-07 00:50:55] [Rank 0] step:4281/10000 train_time:343709ms step_avg:80.29ms +[2025-07-07 00:50:56] [Rank 0] step:4301/10000 train_time:345265ms step_avg:80.28ms +[2025-07-07 00:50:56] [Rank 0] step:4301/10000 train_time:345265ms step_avg:80.28ms +[2025-07-07 00:50:58] [Rank 0] step:4321/10000 train_time:346769ms step_avg:80.25ms +[2025-07-07 00:50:58] [Rank 0] step:4321/10000 train_time:346769ms step_avg:80.25ms +[2025-07-07 00:51:00] [Rank 0] step:4341/10000 train_time:348507ms step_avg:80.28ms +[2025-07-07 00:51:00] [Rank 0] step:4341/10000 train_time:348507ms step_avg:80.28ms +[2025-07-07 00:51:01] [Rank 0] step:4361/10000 train_time:350003ms step_avg:80.26ms +[2025-07-07 00:51:01] [Rank 0] step:4361/10000 train_time:350003ms step_avg:80.26ms +[2025-07-07 00:51:03] [Rank 0] step:4381/10000 train_time:351502ms step_avg:80.23ms +[2025-07-07 00:51:03] [Rank 0] step:4381/10000 train_time:351502ms step_avg:80.23ms +[2025-07-07 00:51:04] [Rank 0] step:4401/10000 train_time:353000ms step_avg:80.21ms +[2025-07-07 00:51:04] [Rank 0] step:4401/10000 train_time:353000ms step_avg:80.21ms +[2025-07-07 00:51:06] [Rank 0] step:4421/10000 train_time:354738ms step_avg:80.24ms +[2025-07-07 00:51:06] [Rank 0] step:4421/10000 train_time:354738ms step_avg:80.24ms +[2025-07-07 00:51:07] [Rank 0] step:4441/10000 train_time:356236ms step_avg:80.22ms +[2025-07-07 00:51:07] [Rank 0] step:4441/10000 train_time:356236ms step_avg:80.22ms +[2025-07-07 00:51:09] [Rank 0] step:4461/10000 train_time:357733ms step_avg:80.19ms +[2025-07-07 00:51:09] [Rank 0] step:4461/10000 train_time:357733ms step_avg:80.19ms +[2025-07-07 00:51:10] [Rank 0] step:4481/10000 train_time:359233ms step_avg:80.17ms +[2025-07-07 00:51:10] [Rank 0] step:4481/10000 train_time:359233ms step_avg:80.17ms +[2025-07-07 00:51:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:51:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:51:13] [Rank 0] PRINT: step:4500/10000 train_loss:1.1026 val_loss:1.0815 train_time:360736ms step_avg:80.16ms +[2025-07-07 00:51:13] [Rank 0] PRINT: step:4500/10000 train_loss:1.1026 val_loss:1.0815 train_time:360736ms step_avg:80.16ms +[2025-07-07 00:51:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:51:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:51:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:51:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:51:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:51:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:56:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:56:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 00:56:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:56:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 00:56:39] [Rank 0] Total Loss: 4.9439 +[2025-07-07 00:56:39] [Rank 0] Total Loss: 4.9439 +[2025-07-07 00:56:39] [Rank 0] Total FTA: 0.5301 +[2025-07-07 00:56:39] [Rank 0] Total FTA: 0.5301 +[2025-07-07 00:56:39] [Rank 0] Group 0 Loss: 5.0883 +[2025-07-07 00:56:39] [Rank 0] Group 0 Loss: 5.0883 +[2025-07-07 00:56:39] [Rank 0] Group 1 Loss: 4.8578 +[2025-07-07 00:56:39] [Rank 0] Group 1 Loss: 4.8578 +[2025-07-07 00:56:39] [Rank 0] Group 2 Loss: 4.6506 +[2025-07-07 00:56:39] [Rank 0] Group 2 Loss: 4.6506 +[2025-07-07 00:56:39] [Rank 0] Group 3 Loss: 5.0515 +[2025-07-07 00:56:39] [Rank 0] Group 3 Loss: 5.0515 +[2025-07-07 00:56:39] [Rank 0] Group 4 Loss: 4.8917 +[2025-07-07 00:56:39] [Rank 0] Group 4 Loss: 4.8917 +[2025-07-07 00:56:39] [Rank 0] Group 5 Loss: 4.9589 +[2025-07-07 00:56:39] [Rank 0] Group 5 Loss: 4.9589 +[2025-07-07 00:56:39] [Rank 0] Group 6 Loss: 5.0210 +[2025-07-07 00:56:39] [Rank 0] Group 6 Loss: 5.0210 +[2025-07-07 00:56:39] [Rank 0] Group 7 Loss: 4.9011 +[2025-07-07 00:56:39] [Rank 0] Group 7 Loss: 4.9011 +[2025-07-07 00:56:39] [Rank 0] Group 8 Loss: 4.9224 +[2025-07-07 00:56:39] [Rank 0] Group 8 Loss: 4.9224 +[2025-07-07 00:56:39] [Rank 0] Group 9 Loss: 4.9390 +[2025-07-07 00:56:39] [Rank 0] Group 9 Loss: 4.9390 +[2025-07-07 00:56:39] [Rank 0] Group 10 Loss: 4.9137 +[2025-07-07 00:56:39] [Rank 0] Group 10 Loss: 4.9137 +[2025-07-07 00:56:39] [Rank 0] Group 11 Loss: 4.9629 +[2025-07-07 00:56:39] [Rank 0] Group 11 Loss: 4.9629 +[2025-07-07 00:56:39] [Rank 0] Group 0 FTA: 0.6775 +[2025-07-07 00:56:39] [Rank 0] Group 0 FTA: 0.6775 +[2025-07-07 00:56:39] [Rank 0] Group 1 FTA: 0.3359 +[2025-07-07 00:56:39] [Rank 0] Group 1 FTA: 0.3359 +[2025-07-07 00:56:39] [Rank 0] Group 2 FTA: 0.4818 +[2025-07-07 00:56:39] [Rank 0] Group 2 FTA: 0.4818 +[2025-07-07 00:56:39] [Rank 0] Group 3 FTA: 0.4922 +[2025-07-07 00:56:39] [Rank 0] Group 3 FTA: 0.4922 +[2025-07-07 00:56:39] [Rank 0] Group 4 FTA: 0.5026 +[2025-07-07 00:56:39] [Rank 0] Group 4 FTA: 0.5026 +[2025-07-07 00:56:39] [Rank 0] Group 5 FTA: 0.5651 +[2025-07-07 00:56:39] [Rank 0] Group 5 FTA: 0.5651 +[2025-07-07 00:56:39] [Rank 0] Group 6 FTA: 0.4870 +[2025-07-07 00:56:39] [Rank 0] Group 6 FTA: 0.4870 +[2025-07-07 00:56:39] [Rank 0] Group 7 FTA: 0.5260 +[2025-07-07 00:56:39] [Rank 0] Group 7 FTA: 0.5260 +[2025-07-07 00:56:39] [Rank 0] Group 8 FTA: 0.5286 +[2025-07-07 00:56:39] [Rank 0] Group 8 FTA: 0.5286 +[2025-07-07 00:56:39] [Rank 0] Group 9 FTA: 0.5352 +[2025-07-07 00:56:39] [Rank 0] Group 9 FTA: 0.5352 +[2025-07-07 00:56:39] [Rank 0] Group 10 FTA: 0.5215 +[2025-07-07 00:56:39] [Rank 0] Group 10 FTA: 0.5215 +[2025-07-07 00:56:39] [Rank 0] Group 11 FTA: 0.5430 +[2025-07-07 00:56:39] [Rank 0] Group 11 FTA: 0.5430 +[2025-07-07 00:56:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 00:56:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 00:56:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 00:56:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 00:56:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 00:56:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 00:56:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 00:56:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 00:56:41] [Rank 0] step:4501/10000 train_time:360763ms step_avg:80.15ms +[2025-07-07 00:56:41] [Rank 0] step:4501/10000 train_time:360763ms step_avg:80.15ms +[2025-07-07 00:56:43] [Rank 0] step:4521/10000 train_time:362955ms step_avg:80.28ms +[2025-07-07 00:56:43] [Rank 0] step:4521/10000 train_time:362955ms step_avg:80.28ms +[2025-07-07 00:56:44] [Rank 0] step:4541/10000 train_time:364448ms step_avg:80.26ms +[2025-07-07 00:56:44] [Rank 0] step:4541/10000 train_time:364448ms step_avg:80.26ms +[2025-07-07 00:56:46] [Rank 0] step:4561/10000 train_time:365941ms step_avg:80.23ms +[2025-07-07 00:56:46] [Rank 0] step:4561/10000 train_time:365941ms step_avg:80.23ms +[2025-07-07 00:56:47] [Rank 0] step:4581/10000 train_time:367436ms step_avg:80.21ms +[2025-07-07 00:56:47] [Rank 0] step:4581/10000 train_time:367436ms step_avg:80.21ms +[2025-07-07 00:56:49] [Rank 0] step:4601/10000 train_time:369600ms step_avg:80.33ms +[2025-07-07 00:56:49] [Rank 0] step:4601/10000 train_time:369600ms step_avg:80.33ms +[2025-07-07 00:56:51] [Rank 0] step:4621/10000 train_time:371092ms step_avg:80.31ms +[2025-07-07 00:56:51] [Rank 0] step:4621/10000 train_time:371092ms step_avg:80.31ms +[2025-07-07 00:56:52] [Rank 0] step:4641/10000 train_time:372587ms step_avg:80.28ms +[2025-07-07 00:56:52] [Rank 0] step:4641/10000 train_time:372587ms step_avg:80.28ms +[2025-07-07 00:56:54] [Rank 0] step:4661/10000 train_time:374083ms step_avg:80.26ms +[2025-07-07 00:56:54] [Rank 0] step:4661/10000 train_time:374083ms step_avg:80.26ms +[2025-07-07 00:56:56] [Rank 0] step:4681/10000 train_time:375582ms step_avg:80.24ms +[2025-07-07 00:56:56] [Rank 0] step:4681/10000 train_time:375582ms step_avg:80.24ms +[2025-07-07 00:56:58] [Rank 0] step:4701/10000 train_time:377745ms step_avg:80.35ms +[2025-07-07 00:56:58] [Rank 0] step:4701/10000 train_time:377745ms step_avg:80.35ms +[2025-07-07 00:56:59] [Rank 0] step:4721/10000 train_time:379244ms step_avg:80.33ms +[2025-07-07 00:56:59] [Rank 0] step:4721/10000 train_time:379244ms step_avg:80.33ms +[2025-07-07 00:57:01] [Rank 0] step:4741/10000 train_time:380743ms step_avg:80.31ms +[2025-07-07 00:57:01] [Rank 0] step:4741/10000 train_time:380743ms step_avg:80.31ms +[2025-07-07 00:57:02] [Rank 0] step:4761/10000 train_time:382246ms step_avg:80.29ms +[2025-07-07 00:57:02] [Rank 0] step:4761/10000 train_time:382246ms step_avg:80.29ms +[2025-07-07 00:57:04] [Rank 0] step:4781/10000 train_time:384383ms step_avg:80.40ms +[2025-07-07 00:57:04] [Rank 0] step:4781/10000 train_time:384383ms step_avg:80.40ms +[2025-07-07 00:57:06] [Rank 0] step:4801/10000 train_time:385882ms step_avg:80.38ms +[2025-07-07 00:57:06] [Rank 0] step:4801/10000 train_time:385882ms step_avg:80.38ms +[2025-07-07 00:57:07] [Rank 0] step:4821/10000 train_time:387381ms step_avg:80.35ms +[2025-07-07 00:57:07] [Rank 0] step:4821/10000 train_time:387381ms step_avg:80.35ms +[2025-07-07 00:57:09] [Rank 0] step:4841/10000 train_time:388884ms step_avg:80.33ms +[2025-07-07 00:57:09] [Rank 0] step:4841/10000 train_time:388884ms step_avg:80.33ms +[2025-07-07 00:57:10] [Rank 0] step:4861/10000 train_time:390386ms step_avg:80.31ms +[2025-07-07 00:57:10] [Rank 0] step:4861/10000 train_time:390386ms step_avg:80.31ms +[2025-07-07 00:57:12] [Rank 0] step:4881/10000 train_time:392124ms step_avg:80.34ms +[2025-07-07 00:57:12] [Rank 0] step:4881/10000 train_time:392124ms step_avg:80.34ms +[2025-07-07 00:57:14] [Rank 0] step:4901/10000 train_time:393868ms step_avg:80.36ms +[2025-07-07 00:57:14] [Rank 0] step:4901/10000 train_time:393868ms step_avg:80.36ms +[2025-07-07 00:57:15] [Rank 0] step:4921/10000 train_time:395466ms step_avg:80.36ms +[2025-07-07 00:57:15] [Rank 0] step:4921/10000 train_time:395466ms step_avg:80.36ms +[2025-07-07 00:57:17] [Rank 0] step:4941/10000 train_time:397016ms step_avg:80.35ms +[2025-07-07 00:57:17] [Rank 0] step:4941/10000 train_time:397016ms step_avg:80.35ms +[2025-07-07 00:57:19] [Rank 0] step:4961/10000 train_time:399171ms step_avg:80.46ms +[2025-07-07 00:57:19] [Rank 0] step:4961/10000 train_time:399171ms step_avg:80.46ms +[2025-07-07 00:57:21] [Rank 0] step:4981/10000 train_time:400669ms step_avg:80.44ms +[2025-07-07 00:57:21] [Rank 0] step:4981/10000 train_time:400669ms step_avg:80.44ms +[2025-07-07 00:57:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:57:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 00:57:23] [Rank 0] PRINT: step:5000/10000 train_loss:1.0705 val_loss:1.0512 train_time:402169ms step_avg:80.43ms +[2025-07-07 00:57:23] [Rank 0] PRINT: step:5000/10000 train_loss:1.0705 val_loss:1.0512 train_time:402169ms step_avg:80.43ms +[2025-07-07 00:57:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:57:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 00:57:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:57:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 00:57:23] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 00:57:23] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:02:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:02:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:02:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:02:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:02:47] [Rank 0] Total Loss: 5.0666 +[2025-07-07 01:02:47] [Rank 0] Total Loss: 5.0666 +[2025-07-07 01:02:47] [Rank 0] Total FTA: 0.5880 +[2025-07-07 01:02:47] [Rank 0] Total FTA: 0.5880 +[2025-07-07 01:02:47] [Rank 0] Group 0 Loss: 5.1637 +[2025-07-07 01:02:47] [Rank 0] Group 0 Loss: 5.1637 +[2025-07-07 01:02:47] [Rank 0] Group 1 Loss: 5.0507 +[2025-07-07 01:02:47] [Rank 0] Group 1 Loss: 5.0507 +[2025-07-07 01:02:47] [Rank 0] Group 2 Loss: 4.8454 +[2025-07-07 01:02:47] [Rank 0] Group 2 Loss: 4.8454 +[2025-07-07 01:02:47] [Rank 0] Group 3 Loss: 5.1170 +[2025-07-07 01:02:47] [Rank 0] Group 3 Loss: 5.1170 +[2025-07-07 01:02:47] [Rank 0] Group 4 Loss: 5.0991 +[2025-07-07 01:02:47] [Rank 0] Group 4 Loss: 5.0991 +[2025-07-07 01:02:47] [Rank 0] Group 5 Loss: 4.9959 +[2025-07-07 01:02:47] [Rank 0] Group 5 Loss: 4.9959 +[2025-07-07 01:02:47] [Rank 0] Group 6 Loss: 5.0568 +[2025-07-07 01:02:47] [Rank 0] Group 6 Loss: 5.0568 +[2025-07-07 01:02:47] [Rank 0] Group 7 Loss: 5.0955 +[2025-07-07 01:02:47] [Rank 0] Group 7 Loss: 5.0955 +[2025-07-07 01:02:47] [Rank 0] Group 8 Loss: 5.0134 +[2025-07-07 01:02:47] [Rank 0] Group 8 Loss: 5.0134 +[2025-07-07 01:02:47] [Rank 0] Group 9 Loss: 5.0554 +[2025-07-07 01:02:47] [Rank 0] Group 9 Loss: 5.0554 +[2025-07-07 01:02:47] [Rank 0] Group 10 Loss: 5.0842 +[2025-07-07 01:02:47] [Rank 0] Group 10 Loss: 5.0842 +[2025-07-07 01:02:47] [Rank 0] Group 11 Loss: 5.0849 +[2025-07-07 01:02:47] [Rank 0] Group 11 Loss: 5.0849 +[2025-07-07 01:02:47] [Rank 0] Group 0 FTA: 0.6619 +[2025-07-07 01:02:47] [Rank 0] Group 0 FTA: 0.6619 +[2025-07-07 01:02:47] [Rank 0] Group 1 FTA: 0.6536 +[2025-07-07 01:02:47] [Rank 0] Group 1 FTA: 0.6536 +[2025-07-07 01:02:47] [Rank 0] Group 2 FTA: 0.6484 +[2025-07-07 01:02:47] [Rank 0] Group 2 FTA: 0.6484 +[2025-07-07 01:02:47] [Rank 0] Group 3 FTA: 0.5729 +[2025-07-07 01:02:47] [Rank 0] Group 3 FTA: 0.5729 +[2025-07-07 01:02:47] [Rank 0] Group 4 FTA: 0.4297 +[2025-07-07 01:02:47] [Rank 0] Group 4 FTA: 0.4297 +[2025-07-07 01:02:47] [Rank 0] Group 5 FTA: 0.6615 +[2025-07-07 01:02:47] [Rank 0] Group 5 FTA: 0.6615 +[2025-07-07 01:02:47] [Rank 0] Group 6 FTA: 0.5339 +[2025-07-07 01:02:47] [Rank 0] Group 6 FTA: 0.5339 +[2025-07-07 01:02:47] [Rank 0] Group 7 FTA: 0.5026 +[2025-07-07 01:02:47] [Rank 0] Group 7 FTA: 0.5026 +[2025-07-07 01:02:47] [Rank 0] Group 8 FTA: 0.5391 +[2025-07-07 01:02:47] [Rank 0] Group 8 FTA: 0.5391 +[2025-07-07 01:02:47] [Rank 0] Group 9 FTA: 0.5898 +[2025-07-07 01:02:47] [Rank 0] Group 9 FTA: 0.5898 +[2025-07-07 01:02:47] [Rank 0] Group 10 FTA: 0.5957 +[2025-07-07 01:02:47] [Rank 0] Group 10 FTA: 0.5957 +[2025-07-07 01:02:47] [Rank 0] Group 11 FTA: 0.5889 +[2025-07-07 01:02:47] [Rank 0] Group 11 FTA: 0.5889 +[2025-07-07 01:02:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 01:02:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 01:02:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 01:02:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 01:02:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 01:02:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 01:02:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 01:02:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 01:02:48] [Rank 0] step:5001/10000 train_time:402190ms step_avg:80.42ms +[2025-07-07 01:02:48] [Rank 0] step:5001/10000 train_time:402190ms step_avg:80.42ms +[2025-07-07 01:02:50] [Rank 0] step:5021/10000 train_time:403685ms step_avg:80.40ms +[2025-07-07 01:02:50] [Rank 0] step:5021/10000 train_time:403685ms step_avg:80.40ms +[2025-07-07 01:02:52] [Rank 0] step:5041/10000 train_time:405846ms step_avg:80.51ms +[2025-07-07 01:02:52] [Rank 0] step:5041/10000 train_time:405846ms step_avg:80.51ms +[2025-07-07 01:02:54] [Rank 0] step:5061/10000 train_time:407317ms step_avg:80.48ms +[2025-07-07 01:02:54] [Rank 0] step:5061/10000 train_time:407317ms step_avg:80.48ms +[2025-07-07 01:02:55] [Rank 0] step:5081/10000 train_time:408811ms step_avg:80.46ms +[2025-07-07 01:02:55] [Rank 0] step:5081/10000 train_time:408811ms step_avg:80.46ms +[2025-07-07 01:02:57] [Rank 0] step:5101/10000 train_time:410307ms step_avg:80.44ms +[2025-07-07 01:02:57] [Rank 0] step:5101/10000 train_time:410307ms step_avg:80.44ms +[2025-07-07 01:02:58] [Rank 0] step:5121/10000 train_time:411802ms step_avg:80.41ms +[2025-07-07 01:02:58] [Rank 0] step:5121/10000 train_time:411802ms step_avg:80.41ms +[2025-07-07 01:03:00] [Rank 0] step:5141/10000 train_time:413963ms step_avg:80.52ms +[2025-07-07 01:03:00] [Rank 0] step:5141/10000 train_time:413963ms step_avg:80.52ms +[2025-07-07 01:03:02] [Rank 0] step:5161/10000 train_time:415458ms step_avg:80.50ms +[2025-07-07 01:03:02] [Rank 0] step:5161/10000 train_time:415458ms step_avg:80.50ms +[2025-07-07 01:03:03] [Rank 0] step:5181/10000 train_time:416954ms step_avg:80.48ms +[2025-07-07 01:03:03] [Rank 0] step:5181/10000 train_time:416954ms step_avg:80.48ms +[2025-07-07 01:03:05] [Rank 0] step:5201/10000 train_time:418451ms step_avg:80.46ms +[2025-07-07 01:03:05] [Rank 0] step:5201/10000 train_time:418451ms step_avg:80.46ms +[2025-07-07 01:03:07] [Rank 0] step:5221/10000 train_time:419947ms step_avg:80.43ms +[2025-07-07 01:03:07] [Rank 0] step:5221/10000 train_time:419947ms step_avg:80.43ms +[2025-07-07 01:03:08] [Rank 0] step:5241/10000 train_time:422106ms step_avg:80.54ms +[2025-07-07 01:03:08] [Rank 0] step:5241/10000 train_time:422106ms step_avg:80.54ms +[2025-07-07 01:03:10] [Rank 0] step:5261/10000 train_time:423604ms step_avg:80.52ms +[2025-07-07 01:03:10] [Rank 0] step:5261/10000 train_time:423604ms step_avg:80.52ms +[2025-07-07 01:03:11] [Rank 0] step:5281/10000 train_time:425104ms step_avg:80.50ms +[2025-07-07 01:03:11] [Rank 0] step:5281/10000 train_time:425104ms step_avg:80.50ms +[2025-07-07 01:03:13] [Rank 0] step:5301/10000 train_time:426602ms step_avg:80.48ms +[2025-07-07 01:03:13] [Rank 0] step:5301/10000 train_time:426602ms step_avg:80.48ms +[2025-07-07 01:03:15] [Rank 0] step:5321/10000 train_time:428745ms step_avg:80.58ms +[2025-07-07 01:03:15] [Rank 0] step:5321/10000 train_time:428745ms step_avg:80.58ms +[2025-07-07 01:03:17] [Rank 0] step:5341/10000 train_time:430246ms step_avg:80.56ms +[2025-07-07 01:03:17] [Rank 0] step:5341/10000 train_time:430246ms step_avg:80.56ms +[2025-07-07 01:03:18] [Rank 0] step:5361/10000 train_time:431746ms step_avg:80.53ms +[2025-07-07 01:03:18] [Rank 0] step:5361/10000 train_time:431746ms step_avg:80.53ms +[2025-07-07 01:03:20] [Rank 0] step:5381/10000 train_time:433249ms step_avg:80.51ms +[2025-07-07 01:03:20] [Rank 0] step:5381/10000 train_time:433249ms step_avg:80.51ms +[2025-07-07 01:03:22] [Rank 0] step:5401/10000 train_time:434805ms step_avg:80.50ms +[2025-07-07 01:03:22] [Rank 0] step:5401/10000 train_time:434805ms step_avg:80.50ms +[2025-07-07 01:03:23] [Rank 0] step:5421/10000 train_time:436896ms step_avg:80.59ms +[2025-07-07 01:03:23] [Rank 0] step:5421/10000 train_time:436896ms step_avg:80.59ms +[2025-07-07 01:03:25] [Rank 0] step:5441/10000 train_time:438398ms step_avg:80.57ms +[2025-07-07 01:03:25] [Rank 0] step:5441/10000 train_time:438398ms step_avg:80.57ms +[2025-07-07 01:03:26] [Rank 0] step:5461/10000 train_time:439900ms step_avg:80.55ms +[2025-07-07 01:03:26] [Rank 0] step:5461/10000 train_time:439900ms step_avg:80.55ms +[2025-07-07 01:03:28] [Rank 0] step:5481/10000 train_time:441403ms step_avg:80.53ms +[2025-07-07 01:03:28] [Rank 0] step:5481/10000 train_time:441403ms step_avg:80.53ms +[2025-07-07 01:03:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:03:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:03:31] [Rank 0] PRINT: step:5500/10000 train_loss:1.0414 val_loss:1.0244 train_time:443563ms step_avg:80.65ms +[2025-07-07 01:03:31] [Rank 0] PRINT: step:5500/10000 train_loss:1.0414 val_loss:1.0244 train_time:443563ms step_avg:80.65ms +[2025-07-07 01:03:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:03:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:03:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:03:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:03:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:03:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:08:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:08:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:08:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:08:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:08:55] [Rank 0] Total Loss: 5.1601 +[2025-07-07 01:08:55] [Rank 0] Total Loss: 5.1601 +[2025-07-07 01:08:55] [Rank 0] Total FTA: 0.6650 +[2025-07-07 01:08:55] [Rank 0] Total FTA: 0.6650 +[2025-07-07 01:08:55] [Rank 0] Group 0 Loss: 5.3860 +[2025-07-07 01:08:55] [Rank 0] Group 0 Loss: 5.3860 +[2025-07-07 01:08:55] [Rank 0] Group 1 Loss: 5.1803 +[2025-07-07 01:08:55] [Rank 0] Group 1 Loss: 5.1803 +[2025-07-07 01:08:55] [Rank 0] Group 2 Loss: 4.8898 +[2025-07-07 01:08:55] [Rank 0] Group 2 Loss: 4.8898 +[2025-07-07 01:08:55] [Rank 0] Group 3 Loss: 5.2118 +[2025-07-07 01:08:55] [Rank 0] Group 3 Loss: 5.2118 +[2025-07-07 01:08:55] [Rank 0] Group 4 Loss: 5.0771 +[2025-07-07 01:08:55] [Rank 0] Group 4 Loss: 5.0771 +[2025-07-07 01:08:55] [Rank 0] Group 5 Loss: 5.0966 +[2025-07-07 01:08:55] [Rank 0] Group 5 Loss: 5.0966 +[2025-07-07 01:08:55] [Rank 0] Group 6 Loss: 5.1015 +[2025-07-07 01:08:55] [Rank 0] Group 6 Loss: 5.1015 +[2025-07-07 01:08:55] [Rank 0] Group 7 Loss: 5.1903 +[2025-07-07 01:08:55] [Rank 0] Group 7 Loss: 5.1903 +[2025-07-07 01:08:55] [Rank 0] Group 8 Loss: 5.1374 +[2025-07-07 01:08:55] [Rank 0] Group 8 Loss: 5.1374 +[2025-07-07 01:08:55] [Rank 0] Group 9 Loss: 5.0672 +[2025-07-07 01:08:55] [Rank 0] Group 9 Loss: 5.0672 +[2025-07-07 01:08:55] [Rank 0] Group 10 Loss: 5.1300 +[2025-07-07 01:08:55] [Rank 0] Group 10 Loss: 5.1300 +[2025-07-07 01:08:55] [Rank 0] Group 11 Loss: 5.1775 +[2025-07-07 01:08:55] [Rank 0] Group 11 Loss: 5.1775 +[2025-07-07 01:08:55] [Rank 0] Group 0 FTA: 0.8244 +[2025-07-07 01:08:55] [Rank 0] Group 0 FTA: 0.8244 +[2025-07-07 01:08:55] [Rank 0] Group 1 FTA: 0.4740 +[2025-07-07 01:08:55] [Rank 0] Group 1 FTA: 0.4740 +[2025-07-07 01:08:55] [Rank 0] Group 2 FTA: 0.7240 +[2025-07-07 01:08:55] [Rank 0] Group 2 FTA: 0.7240 +[2025-07-07 01:08:55] [Rank 0] Group 3 FTA: 0.5729 +[2025-07-07 01:08:55] [Rank 0] Group 3 FTA: 0.5729 +[2025-07-07 01:08:55] [Rank 0] Group 4 FTA: 0.6068 +[2025-07-07 01:08:55] [Rank 0] Group 4 FTA: 0.6068 +[2025-07-07 01:08:55] [Rank 0] Group 5 FTA: 0.6693 +[2025-07-07 01:08:55] [Rank 0] Group 5 FTA: 0.6693 +[2025-07-07 01:08:55] [Rank 0] Group 6 FTA: 0.5964 +[2025-07-07 01:08:55] [Rank 0] Group 6 FTA: 0.5964 +[2025-07-07 01:08:55] [Rank 0] Group 7 FTA: 0.6589 +[2025-07-07 01:08:55] [Rank 0] Group 7 FTA: 0.6589 +[2025-07-07 01:08:55] [Rank 0] Group 8 FTA: 0.6224 +[2025-07-07 01:08:55] [Rank 0] Group 8 FTA: 0.6224 +[2025-07-07 01:08:55] [Rank 0] Group 9 FTA: 0.6719 +[2025-07-07 01:08:55] [Rank 0] Group 9 FTA: 0.6719 +[2025-07-07 01:08:55] [Rank 0] Group 10 FTA: 0.6699 +[2025-07-07 01:08:55] [Rank 0] Group 10 FTA: 0.6699 +[2025-07-07 01:08:55] [Rank 0] Group 11 FTA: 0.6895 +[2025-07-07 01:08:55] [Rank 0] Group 11 FTA: 0.6895 +[2025-07-07 01:08:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 01:08:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 01:08:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 01:08:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 01:08:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 01:08:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 01:08:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 01:08:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 01:08:57] [Rank 0] step:5501/10000 train_time:443584ms step_avg:80.64ms +[2025-07-07 01:08:57] [Rank 0] step:5501/10000 train_time:443584ms step_avg:80.64ms +[2025-07-07 01:08:58] [Rank 0] step:5521/10000 train_time:445081ms step_avg:80.62ms +[2025-07-07 01:08:58] [Rank 0] step:5521/10000 train_time:445081ms step_avg:80.62ms +[2025-07-07 01:09:00] [Rank 0] step:5541/10000 train_time:446578ms step_avg:80.60ms +[2025-07-07 01:09:00] [Rank 0] step:5541/10000 train_time:446578ms step_avg:80.60ms +[2025-07-07 01:09:01] [Rank 0] step:5561/10000 train_time:448076ms step_avg:80.57ms +[2025-07-07 01:09:01] [Rank 0] step:5561/10000 train_time:448076ms step_avg:80.57ms +[2025-07-07 01:09:03] [Rank 0] step:5581/10000 train_time:449836ms step_avg:80.60ms +[2025-07-07 01:09:03] [Rank 0] step:5581/10000 train_time:449836ms step_avg:80.60ms +[2025-07-07 01:09:04] [Rank 0] step:5601/10000 train_time:451413ms step_avg:80.60ms +[2025-07-07 01:09:04] [Rank 0] step:5601/10000 train_time:451413ms step_avg:80.60ms +[2025-07-07 01:09:06] [Rank 0] step:5621/10000 train_time:452910ms step_avg:80.57ms +[2025-07-07 01:09:06] [Rank 0] step:5621/10000 train_time:452910ms step_avg:80.57ms +[2025-07-07 01:09:07] [Rank 0] step:5641/10000 train_time:454405ms step_avg:80.55ms +[2025-07-07 01:09:07] [Rank 0] step:5641/10000 train_time:454405ms step_avg:80.55ms +[2025-07-07 01:09:09] [Rank 0] step:5661/10000 train_time:455900ms step_avg:80.53ms +[2025-07-07 01:09:09] [Rank 0] step:5661/10000 train_time:455900ms step_avg:80.53ms +[2025-07-07 01:09:11] [Rank 0] step:5681/10000 train_time:458060ms step_avg:80.63ms +[2025-07-07 01:09:11] [Rank 0] step:5681/10000 train_time:458060ms step_avg:80.63ms +[2025-07-07 01:09:12] [Rank 0] step:5701/10000 train_time:459555ms step_avg:80.61ms +[2025-07-07 01:09:12] [Rank 0] step:5701/10000 train_time:459555ms step_avg:80.61ms +[2025-07-07 01:09:14] [Rank 0] step:5721/10000 train_time:461051ms step_avg:80.59ms +[2025-07-07 01:09:14] [Rank 0] step:5721/10000 train_time:461051ms step_avg:80.59ms +[2025-07-07 01:09:15] [Rank 0] step:5741/10000 train_time:462550ms step_avg:80.57ms +[2025-07-07 01:09:15] [Rank 0] step:5741/10000 train_time:462550ms step_avg:80.57ms +[2025-07-07 01:09:17] [Rank 0] step:5761/10000 train_time:464048ms step_avg:80.55ms +[2025-07-07 01:09:17] [Rank 0] step:5761/10000 train_time:464048ms step_avg:80.55ms +[2025-07-07 01:09:19] [Rank 0] step:5781/10000 train_time:465785ms step_avg:80.57ms +[2025-07-07 01:09:19] [Rank 0] step:5781/10000 train_time:465785ms step_avg:80.57ms +[2025-07-07 01:09:20] [Rank 0] step:5801/10000 train_time:467282ms step_avg:80.55ms +[2025-07-07 01:09:20] [Rank 0] step:5801/10000 train_time:467282ms step_avg:80.55ms +[2025-07-07 01:09:22] [Rank 0] step:5821/10000 train_time:468782ms step_avg:80.53ms +[2025-07-07 01:09:22] [Rank 0] step:5821/10000 train_time:468782ms step_avg:80.53ms +[2025-07-07 01:09:23] [Rank 0] step:5841/10000 train_time:470281ms step_avg:80.51ms +[2025-07-07 01:09:23] [Rank 0] step:5841/10000 train_time:470281ms step_avg:80.51ms +[2025-07-07 01:09:25] [Rank 0] step:5861/10000 train_time:472429ms step_avg:80.61ms +[2025-07-07 01:09:25] [Rank 0] step:5861/10000 train_time:472429ms step_avg:80.61ms +[2025-07-07 01:09:27] [Rank 0] step:5881/10000 train_time:473926ms step_avg:80.59ms +[2025-07-07 01:09:27] [Rank 0] step:5881/10000 train_time:473926ms step_avg:80.59ms +[2025-07-07 01:09:28] [Rank 0] step:5901/10000 train_time:475430ms step_avg:80.57ms +[2025-07-07 01:09:28] [Rank 0] step:5901/10000 train_time:475430ms step_avg:80.57ms +[2025-07-07 01:09:30] [Rank 0] step:5921/10000 train_time:476932ms step_avg:80.55ms +[2025-07-07 01:09:30] [Rank 0] step:5921/10000 train_time:476932ms step_avg:80.55ms +[2025-07-07 01:09:32] [Rank 0] step:5941/10000 train_time:478481ms step_avg:80.54ms +[2025-07-07 01:09:32] [Rank 0] step:5941/10000 train_time:478481ms step_avg:80.54ms +[2025-07-07 01:09:34] [Rank 0] step:5961/10000 train_time:480575ms step_avg:80.62ms +[2025-07-07 01:09:34] [Rank 0] step:5961/10000 train_time:480575ms step_avg:80.62ms +[2025-07-07 01:09:35] [Rank 0] step:5981/10000 train_time:482073ms step_avg:80.60ms +[2025-07-07 01:09:35] [Rank 0] step:5981/10000 train_time:482073ms step_avg:80.60ms +[2025-07-07 01:09:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:09:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:09:37] [Rank 0] PRINT: step:6000/10000 train_loss:1.0166 val_loss:1.0028 train_time:483575ms step_avg:80.60ms +[2025-07-07 01:09:37] [Rank 0] PRINT: step:6000/10000 train_loss:1.0166 val_loss:1.0028 train_time:483575ms step_avg:80.60ms +[2025-07-07 01:09:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:09:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:09:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:09:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:09:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:09:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:15:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:15:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:15:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:15:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:15:07] [Rank 0] Total Loss: 5.1968 +[2025-07-07 01:15:07] [Rank 0] Total Loss: 5.1968 +[2025-07-07 01:15:07] [Rank 0] Total FTA: 0.7502 +[2025-07-07 01:15:07] [Rank 0] Total FTA: 0.7502 +[2025-07-07 01:15:07] [Rank 0] Group 0 Loss: 5.3404 +[2025-07-07 01:15:07] [Rank 0] Group 0 Loss: 5.3404 +[2025-07-07 01:15:07] [Rank 0] Group 1 Loss: 5.0211 +[2025-07-07 01:15:07] [Rank 0] Group 1 Loss: 5.0211 +[2025-07-07 01:15:07] [Rank 0] Group 2 Loss: 4.8890 +[2025-07-07 01:15:07] [Rank 0] Group 2 Loss: 4.8890 +[2025-07-07 01:15:07] [Rank 0] Group 3 Loss: 5.0664 +[2025-07-07 01:15:07] [Rank 0] Group 3 Loss: 5.0664 +[2025-07-07 01:15:07] [Rank 0] Group 4 Loss: 5.1113 +[2025-07-07 01:15:07] [Rank 0] Group 4 Loss: 5.1113 +[2025-07-07 01:15:07] [Rank 0] Group 5 Loss: 5.1469 +[2025-07-07 01:15:07] [Rank 0] Group 5 Loss: 5.1469 +[2025-07-07 01:15:07] [Rank 0] Group 6 Loss: 5.1902 +[2025-07-07 01:15:07] [Rank 0] Group 6 Loss: 5.1902 +[2025-07-07 01:15:07] [Rank 0] Group 7 Loss: 5.2111 +[2025-07-07 01:15:07] [Rank 0] Group 7 Loss: 5.2111 +[2025-07-07 01:15:07] [Rank 0] Group 8 Loss: 5.2055 +[2025-07-07 01:15:07] [Rank 0] Group 8 Loss: 5.2055 +[2025-07-07 01:15:07] [Rank 0] Group 9 Loss: 5.2410 +[2025-07-07 01:15:07] [Rank 0] Group 9 Loss: 5.2410 +[2025-07-07 01:15:07] [Rank 0] Group 10 Loss: 5.3379 +[2025-07-07 01:15:07] [Rank 0] Group 10 Loss: 5.3379 +[2025-07-07 01:15:07] [Rank 0] Group 11 Loss: 5.2822 +[2025-07-07 01:15:07] [Rank 0] Group 11 Loss: 5.2822 +[2025-07-07 01:15:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 01:15:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 01:15:07] [Rank 0] Group 1 FTA: 0.8490 +[2025-07-07 01:15:07] [Rank 0] Group 1 FTA: 0.8490 +[2025-07-07 01:15:07] [Rank 0] Group 2 FTA: 0.6328 +[2025-07-07 01:15:07] [Rank 0] Group 2 FTA: 0.6328 +[2025-07-07 01:15:07] [Rank 0] Group 3 FTA: 0.8125 +[2025-07-07 01:15:07] [Rank 0] Group 3 FTA: 0.8125 +[2025-07-07 01:15:07] [Rank 0] Group 4 FTA: 0.7344 +[2025-07-07 01:15:07] [Rank 0] Group 4 FTA: 0.7344 +[2025-07-07 01:15:07] [Rank 0] Group 5 FTA: 0.6901 +[2025-07-07 01:15:07] [Rank 0] Group 5 FTA: 0.6901 +[2025-07-07 01:15:07] [Rank 0] Group 6 FTA: 0.6589 +[2025-07-07 01:15:07] [Rank 0] Group 6 FTA: 0.6589 +[2025-07-07 01:15:07] [Rank 0] Group 7 FTA: 0.6953 +[2025-07-07 01:15:07] [Rank 0] Group 7 FTA: 0.6953 +[2025-07-07 01:15:07] [Rank 0] Group 8 FTA: 0.7083 +[2025-07-07 01:15:07] [Rank 0] Group 8 FTA: 0.7083 +[2025-07-07 01:15:07] [Rank 0] Group 9 FTA: 0.6758 +[2025-07-07 01:15:07] [Rank 0] Group 9 FTA: 0.6758 +[2025-07-07 01:15:07] [Rank 0] Group 10 FTA: 0.6797 +[2025-07-07 01:15:07] [Rank 0] Group 10 FTA: 0.6797 +[2025-07-07 01:15:07] [Rank 0] Group 11 FTA: 0.6992 +[2025-07-07 01:15:07] [Rank 0] Group 11 FTA: 0.6992 +[2025-07-07 01:15:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 01:15:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 01:15:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 01:15:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 01:15:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 01:15:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 01:15:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 01:15:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 01:15:09] [Rank 0] step:6001/10000 train_time:483597ms step_avg:80.59ms +[2025-07-07 01:15:09] [Rank 0] step:6001/10000 train_time:483597ms step_avg:80.59ms +[2025-07-07 01:15:10] [Rank 0] step:6021/10000 train_time:485197ms step_avg:80.58ms +[2025-07-07 01:15:10] [Rank 0] step:6021/10000 train_time:485197ms step_avg:80.58ms +[2025-07-07 01:15:12] [Rank 0] step:6041/10000 train_time:487365ms step_avg:80.68ms +[2025-07-07 01:15:12] [Rank 0] step:6041/10000 train_time:487365ms step_avg:80.68ms +[2025-07-07 01:15:14] [Rank 0] step:6061/10000 train_time:488857ms step_avg:80.66ms +[2025-07-07 01:15:14] [Rank 0] step:6061/10000 train_time:488857ms step_avg:80.66ms +[2025-07-07 01:15:15] [Rank 0] step:6081/10000 train_time:490352ms step_avg:80.64ms +[2025-07-07 01:15:15] [Rank 0] step:6081/10000 train_time:490352ms step_avg:80.64ms +[2025-07-07 01:15:17] [Rank 0] step:6101/10000 train_time:491848ms step_avg:80.62ms +[2025-07-07 01:15:17] [Rank 0] step:6101/10000 train_time:491848ms step_avg:80.62ms +[2025-07-07 01:15:19] [Rank 0] step:6121/10000 train_time:493345ms step_avg:80.60ms +[2025-07-07 01:15:19] [Rank 0] step:6121/10000 train_time:493345ms step_avg:80.60ms +[2025-07-07 01:15:21] [Rank 0] step:6141/10000 train_time:495508ms step_avg:80.69ms +[2025-07-07 01:15:21] [Rank 0] step:6141/10000 train_time:495508ms step_avg:80.69ms +[2025-07-07 01:15:22] [Rank 0] step:6161/10000 train_time:497006ms step_avg:80.67ms +[2025-07-07 01:15:22] [Rank 0] step:6161/10000 train_time:497006ms step_avg:80.67ms +[2025-07-07 01:15:24] [Rank 0] step:6181/10000 train_time:498504ms step_avg:80.65ms +[2025-07-07 01:15:24] [Rank 0] step:6181/10000 train_time:498504ms step_avg:80.65ms +[2025-07-07 01:15:25] [Rank 0] step:6201/10000 train_time:500006ms step_avg:80.63ms +[2025-07-07 01:15:25] [Rank 0] step:6201/10000 train_time:500006ms step_avg:80.63ms +[2025-07-07 01:15:27] [Rank 0] step:6221/10000 train_time:502151ms step_avg:80.72ms +[2025-07-07 01:15:27] [Rank 0] step:6221/10000 train_time:502151ms step_avg:80.72ms +[2025-07-07 01:15:29] [Rank 0] step:6241/10000 train_time:503647ms step_avg:80.70ms +[2025-07-07 01:15:29] [Rank 0] step:6241/10000 train_time:503647ms step_avg:80.70ms +[2025-07-07 01:15:30] [Rank 0] step:6261/10000 train_time:505148ms step_avg:80.68ms +[2025-07-07 01:15:30] [Rank 0] step:6261/10000 train_time:505148ms step_avg:80.68ms +[2025-07-07 01:15:32] [Rank 0] step:6281/10000 train_time:506648ms step_avg:80.66ms +[2025-07-07 01:15:32] [Rank 0] step:6281/10000 train_time:506648ms step_avg:80.66ms +[2025-07-07 01:15:34] [Rank 0] step:6301/10000 train_time:508147ms step_avg:80.65ms +[2025-07-07 01:15:34] [Rank 0] step:6301/10000 train_time:508147ms step_avg:80.65ms +[2025-07-07 01:15:35] [Rank 0] step:6321/10000 train_time:510294ms step_avg:80.73ms +[2025-07-07 01:15:35] [Rank 0] step:6321/10000 train_time:510294ms step_avg:80.73ms +[2025-07-07 01:15:37] [Rank 0] step:6341/10000 train_time:511794ms step_avg:80.71ms +[2025-07-07 01:15:37] [Rank 0] step:6341/10000 train_time:511794ms step_avg:80.71ms +[2025-07-07 01:15:38] [Rank 0] step:6361/10000 train_time:513296ms step_avg:80.69ms +[2025-07-07 01:15:38] [Rank 0] step:6361/10000 train_time:513296ms step_avg:80.69ms +[2025-07-07 01:15:40] [Rank 0] step:6381/10000 train_time:514797ms step_avg:80.68ms +[2025-07-07 01:15:40] [Rank 0] step:6381/10000 train_time:514797ms step_avg:80.68ms +[2025-07-07 01:15:42] [Rank 0] step:6401/10000 train_time:516965ms step_avg:80.76ms +[2025-07-07 01:15:42] [Rank 0] step:6401/10000 train_time:516965ms step_avg:80.76ms +[2025-07-07 01:15:44] [Rank 0] step:6421/10000 train_time:518465ms step_avg:80.75ms +[2025-07-07 01:15:44] [Rank 0] step:6421/10000 train_time:518465ms step_avg:80.75ms +[2025-07-07 01:15:45] [Rank 0] step:6441/10000 train_time:519966ms step_avg:80.73ms +[2025-07-07 01:15:45] [Rank 0] step:6441/10000 train_time:519966ms step_avg:80.73ms +[2025-07-07 01:15:47] [Rank 0] step:6461/10000 train_time:521468ms step_avg:80.71ms +[2025-07-07 01:15:47] [Rank 0] step:6461/10000 train_time:521468ms step_avg:80.71ms +[2025-07-07 01:15:49] [Rank 0] step:6481/10000 train_time:522970ms step_avg:80.69ms +[2025-07-07 01:15:49] [Rank 0] step:6481/10000 train_time:522970ms step_avg:80.69ms +[2025-07-07 01:15:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:15:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:15:51] [Rank 0] PRINT: step:6500/10000 train_loss:0.9968 val_loss:0.9863 train_time:525114ms step_avg:80.79ms +[2025-07-07 01:15:51] [Rank 0] PRINT: step:6500/10000 train_loss:0.9968 val_loss:0.9863 train_time:525114ms step_avg:80.79ms +[2025-07-07 01:15:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:15:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:15:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:15:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:15:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:15:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:21:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:21:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:21:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:21:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:21:18] [Rank 0] Total Loss: 5.2868 +[2025-07-07 01:21:18] [Rank 0] Total Loss: 5.2868 +[2025-07-07 01:21:18] [Rank 0] Total FTA: 0.7738 +[2025-07-07 01:21:18] [Rank 0] Total FTA: 0.7738 +[2025-07-07 01:21:18] [Rank 0] Group 0 Loss: 5.5303 +[2025-07-07 01:21:18] [Rank 0] Group 0 Loss: 5.5303 +[2025-07-07 01:21:18] [Rank 0] Group 1 Loss: 5.0854 +[2025-07-07 01:21:18] [Rank 0] Group 1 Loss: 5.0854 +[2025-07-07 01:21:18] [Rank 0] Group 2 Loss: 5.0656 +[2025-07-07 01:21:18] [Rank 0] Group 2 Loss: 5.0656 +[2025-07-07 01:21:18] [Rank 0] Group 3 Loss: 5.2672 +[2025-07-07 01:21:18] [Rank 0] Group 3 Loss: 5.2672 +[2025-07-07 01:21:18] [Rank 0] Group 4 Loss: 5.2482 +[2025-07-07 01:21:18] [Rank 0] Group 4 Loss: 5.2482 +[2025-07-07 01:21:18] [Rank 0] Group 5 Loss: 5.2735 +[2025-07-07 01:21:18] [Rank 0] Group 5 Loss: 5.2735 +[2025-07-07 01:21:18] [Rank 0] Group 6 Loss: 5.2023 +[2025-07-07 01:21:18] [Rank 0] Group 6 Loss: 5.2023 +[2025-07-07 01:21:19] [Rank 0] Group 7 Loss: 5.3172 +[2025-07-07 01:21:19] [Rank 0] Group 7 Loss: 5.3172 +[2025-07-07 01:21:19] [Rank 0] Group 8 Loss: 5.2942 +[2025-07-07 01:21:19] [Rank 0] Group 8 Loss: 5.2942 +[2025-07-07 01:21:19] [Rank 0] Group 9 Loss: 5.3136 +[2025-07-07 01:21:19] [Rank 0] Group 9 Loss: 5.3136 +[2025-07-07 01:21:19] [Rank 0] Group 10 Loss: 5.3430 +[2025-07-07 01:21:19] [Rank 0] Group 10 Loss: 5.3430 +[2025-07-07 01:21:19] [Rank 0] Group 11 Loss: 5.2720 +[2025-07-07 01:21:19] [Rank 0] Group 11 Loss: 5.2720 +[2025-07-07 01:21:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 01:21:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 01:21:19] [Rank 0] Group 1 FTA: 0.8359 +[2025-07-07 01:21:19] [Rank 0] Group 1 FTA: 0.8359 +[2025-07-07 01:21:19] [Rank 0] Group 2 FTA: 0.7943 +[2025-07-07 01:21:19] [Rank 0] Group 2 FTA: 0.7943 +[2025-07-07 01:21:19] [Rank 0] Group 3 FTA: 0.7135 +[2025-07-07 01:21:19] [Rank 0] Group 3 FTA: 0.7135 +[2025-07-07 01:21:19] [Rank 0] Group 4 FTA: 0.7005 +[2025-07-07 01:21:19] [Rank 0] Group 4 FTA: 0.7005 +[2025-07-07 01:21:19] [Rank 0] Group 5 FTA: 0.7266 +[2025-07-07 01:21:19] [Rank 0] Group 5 FTA: 0.7266 +[2025-07-07 01:21:19] [Rank 0] Group 6 FTA: 0.6849 +[2025-07-07 01:21:19] [Rank 0] Group 6 FTA: 0.6849 +[2025-07-07 01:21:19] [Rank 0] Group 7 FTA: 0.7526 +[2025-07-07 01:21:19] [Rank 0] Group 7 FTA: 0.7526 +[2025-07-07 01:21:19] [Rank 0] Group 8 FTA: 0.7448 +[2025-07-07 01:21:19] [Rank 0] Group 8 FTA: 0.7448 +[2025-07-07 01:21:19] [Rank 0] Group 9 FTA: 0.7109 +[2025-07-07 01:21:19] [Rank 0] Group 9 FTA: 0.7109 +[2025-07-07 01:21:19] [Rank 0] Group 10 FTA: 0.7539 +[2025-07-07 01:21:19] [Rank 0] Group 10 FTA: 0.7539 +[2025-07-07 01:21:19] [Rank 0] Group 11 FTA: 0.7188 +[2025-07-07 01:21:19] [Rank 0] Group 11 FTA: 0.7188 +[2025-07-07 01:21:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 01:21:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 01:21:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 01:21:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 01:21:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 01:21:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 01:21:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 01:21:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 01:21:20] [Rank 0] step:6501/10000 train_time:525135ms step_avg:80.78ms +[2025-07-07 01:21:20] [Rank 0] step:6501/10000 train_time:525135ms step_avg:80.78ms +[2025-07-07 01:21:22] [Rank 0] step:6521/10000 train_time:526635ms step_avg:80.76ms +[2025-07-07 01:21:22] [Rank 0] step:6521/10000 train_time:526635ms step_avg:80.76ms +[2025-07-07 01:21:23] [Rank 0] step:6541/10000 train_time:528128ms step_avg:80.74ms +[2025-07-07 01:21:23] [Rank 0] step:6541/10000 train_time:528128ms step_avg:80.74ms +[2025-07-07 01:21:25] [Rank 0] step:6561/10000 train_time:529623ms step_avg:80.72ms +[2025-07-07 01:21:25] [Rank 0] step:6561/10000 train_time:529623ms step_avg:80.72ms +[2025-07-07 01:21:27] [Rank 0] step:6581/10000 train_time:531952ms step_avg:80.83ms +[2025-07-07 01:21:27] [Rank 0] step:6581/10000 train_time:531952ms step_avg:80.83ms +[2025-07-07 01:21:28] [Rank 0] step:6601/10000 train_time:533529ms step_avg:80.83ms +[2025-07-07 01:21:28] [Rank 0] step:6601/10000 train_time:533529ms step_avg:80.83ms +[2025-07-07 01:21:30] [Rank 0] step:6621/10000 train_time:535086ms step_avg:80.82ms +[2025-07-07 01:21:30] [Rank 0] step:6621/10000 train_time:535086ms step_avg:80.82ms +[2025-07-07 01:21:32] [Rank 0] step:6641/10000 train_time:536583ms step_avg:80.80ms +[2025-07-07 01:21:32] [Rank 0] step:6641/10000 train_time:536583ms step_avg:80.80ms +[2025-07-07 01:21:34] [Rank 0] step:6661/10000 train_time:538080ms step_avg:80.78ms +[2025-07-07 01:21:34] [Rank 0] step:6661/10000 train_time:538080ms step_avg:80.78ms +[2025-07-07 01:21:35] [Rank 0] step:6681/10000 train_time:540221ms step_avg:80.86ms +[2025-07-07 01:21:35] [Rank 0] step:6681/10000 train_time:540221ms step_avg:80.86ms +[2025-07-07 01:21:37] [Rank 0] step:6701/10000 train_time:541719ms step_avg:80.84ms +[2025-07-07 01:21:37] [Rank 0] step:6701/10000 train_time:541719ms step_avg:80.84ms +[2025-07-07 01:21:38] [Rank 0] step:6721/10000 train_time:543216ms step_avg:80.82ms +[2025-07-07 01:21:38] [Rank 0] step:6721/10000 train_time:543216ms step_avg:80.82ms +[2025-07-07 01:21:40] [Rank 0] step:6741/10000 train_time:544715ms step_avg:80.81ms +[2025-07-07 01:21:40] [Rank 0] step:6741/10000 train_time:544715ms step_avg:80.81ms +[2025-07-07 01:21:42] [Rank 0] step:6761/10000 train_time:546863ms step_avg:80.88ms +[2025-07-07 01:21:42] [Rank 0] step:6761/10000 train_time:546863ms step_avg:80.88ms +[2025-07-07 01:21:43] [Rank 0] step:6781/10000 train_time:548360ms step_avg:80.87ms +[2025-07-07 01:21:43] [Rank 0] step:6781/10000 train_time:548360ms step_avg:80.87ms +[2025-07-07 01:21:45] [Rank 0] step:6801/10000 train_time:549858ms step_avg:80.85ms +[2025-07-07 01:21:45] [Rank 0] step:6801/10000 train_time:549858ms step_avg:80.85ms +[2025-07-07 01:21:46] [Rank 0] step:6821/10000 train_time:551359ms step_avg:80.83ms +[2025-07-07 01:21:46] [Rank 0] step:6821/10000 train_time:551359ms step_avg:80.83ms +[2025-07-07 01:21:48] [Rank 0] step:6841/10000 train_time:553121ms step_avg:80.85ms +[2025-07-07 01:21:48] [Rank 0] step:6841/10000 train_time:553121ms step_avg:80.85ms +[2025-07-07 01:21:50] [Rank 0] step:6861/10000 train_time:555013ms step_avg:80.89ms +[2025-07-07 01:21:50] [Rank 0] step:6861/10000 train_time:555013ms step_avg:80.89ms +[2025-07-07 01:21:51] [Rank 0] step:6881/10000 train_time:556514ms step_avg:80.88ms +[2025-07-07 01:21:51] [Rank 0] step:6881/10000 train_time:556514ms step_avg:80.88ms +[2025-07-07 01:21:53] [Rank 0] step:6901/10000 train_time:558015ms step_avg:80.86ms +[2025-07-07 01:21:53] [Rank 0] step:6901/10000 train_time:558015ms step_avg:80.86ms +[2025-07-07 01:21:54] [Rank 0] step:6921/10000 train_time:559517ms step_avg:80.84ms +[2025-07-07 01:21:54] [Rank 0] step:6921/10000 train_time:559517ms step_avg:80.84ms +[2025-07-07 01:21:57] [Rank 0] step:6941/10000 train_time:561681ms step_avg:80.92ms +[2025-07-07 01:21:57] [Rank 0] step:6941/10000 train_time:561681ms step_avg:80.92ms +[2025-07-07 01:21:58] [Rank 0] step:6961/10000 train_time:563182ms step_avg:80.91ms +[2025-07-07 01:21:58] [Rank 0] step:6961/10000 train_time:563182ms step_avg:80.91ms +[2025-07-07 01:22:00] [Rank 0] step:6981/10000 train_time:564684ms step_avg:80.89ms +[2025-07-07 01:22:00] [Rank 0] step:6981/10000 train_time:564684ms step_avg:80.89ms +[2025-07-07 01:22:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:22:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:22:02] [Rank 0] PRINT: step:7000/10000 train_loss:0.9805 val_loss:0.9721 train_time:566186ms step_avg:80.88ms +[2025-07-07 01:22:02] [Rank 0] PRINT: step:7000/10000 train_loss:0.9805 val_loss:0.9721 train_time:566186ms step_avg:80.88ms +[2025-07-07 01:22:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:22:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:22:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:22:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:22:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:22:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:27:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:27:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:27:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:27:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:27:29] [Rank 0] Total Loss: 5.2646 +[2025-07-07 01:27:29] [Rank 0] Total Loss: 5.2646 +[2025-07-07 01:27:29] [Rank 0] Total FTA: 0.7838 +[2025-07-07 01:27:29] [Rank 0] Total FTA: 0.7838 +[2025-07-07 01:27:29] [Rank 0] Group 0 Loss: 5.4118 +[2025-07-07 01:27:29] [Rank 0] Group 0 Loss: 5.4118 +[2025-07-07 01:27:29] [Rank 0] Group 1 Loss: 4.9563 +[2025-07-07 01:27:29] [Rank 0] Group 1 Loss: 4.9563 +[2025-07-07 01:27:29] [Rank 0] Group 2 Loss: 4.9211 +[2025-07-07 01:27:29] [Rank 0] Group 2 Loss: 4.9211 +[2025-07-07 01:27:29] [Rank 0] Group 3 Loss: 5.3449 +[2025-07-07 01:27:29] [Rank 0] Group 3 Loss: 5.3449 +[2025-07-07 01:27:29] [Rank 0] Group 4 Loss: 5.1976 +[2025-07-07 01:27:29] [Rank 0] Group 4 Loss: 5.1976 +[2025-07-07 01:27:29] [Rank 0] Group 5 Loss: 5.2868 +[2025-07-07 01:27:29] [Rank 0] Group 5 Loss: 5.2868 +[2025-07-07 01:27:29] [Rank 0] Group 6 Loss: 5.3079 +[2025-07-07 01:27:29] [Rank 0] Group 6 Loss: 5.3079 +[2025-07-07 01:27:29] [Rank 0] Group 7 Loss: 5.2641 +[2025-07-07 01:27:29] [Rank 0] Group 7 Loss: 5.2641 +[2025-07-07 01:27:29] [Rank 0] Group 8 Loss: 5.3006 +[2025-07-07 01:27:29] [Rank 0] Group 8 Loss: 5.3006 +[2025-07-07 01:27:29] [Rank 0] Group 9 Loss: 5.2934 +[2025-07-07 01:27:29] [Rank 0] Group 9 Loss: 5.2934 +[2025-07-07 01:27:29] [Rank 0] Group 10 Loss: 5.3043 +[2025-07-07 01:27:29] [Rank 0] Group 10 Loss: 5.3043 +[2025-07-07 01:27:29] [Rank 0] Group 11 Loss: 5.3289 +[2025-07-07 01:27:29] [Rank 0] Group 11 Loss: 5.3289 +[2025-07-07 01:27:29] [Rank 0] Group 0 FTA: 0.8440 +[2025-07-07 01:27:29] [Rank 0] Group 0 FTA: 0.8440 +[2025-07-07 01:27:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 01:27:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 01:27:29] [Rank 0] Group 2 FTA: 0.6484 +[2025-07-07 01:27:29] [Rank 0] Group 2 FTA: 0.6484 +[2025-07-07 01:27:29] [Rank 0] Group 3 FTA: 0.8307 +[2025-07-07 01:27:29] [Rank 0] Group 3 FTA: 0.8307 +[2025-07-07 01:27:29] [Rank 0] Group 4 FTA: 0.8646 +[2025-07-07 01:27:29] [Rank 0] Group 4 FTA: 0.8646 +[2025-07-07 01:27:29] [Rank 0] Group 5 FTA: 0.7578 +[2025-07-07 01:27:29] [Rank 0] Group 5 FTA: 0.7578 +[2025-07-07 01:27:29] [Rank 0] Group 6 FTA: 0.7083 +[2025-07-07 01:27:29] [Rank 0] Group 6 FTA: 0.7083 +[2025-07-07 01:27:29] [Rank 0] Group 7 FTA: 0.7578 +[2025-07-07 01:27:29] [Rank 0] Group 7 FTA: 0.7578 +[2025-07-07 01:27:29] [Rank 0] Group 8 FTA: 0.7161 +[2025-07-07 01:27:29] [Rank 0] Group 8 FTA: 0.7161 +[2025-07-07 01:27:29] [Rank 0] Group 9 FTA: 0.7500 +[2025-07-07 01:27:29] [Rank 0] Group 9 FTA: 0.7500 +[2025-07-07 01:27:29] [Rank 0] Group 10 FTA: 0.7227 +[2025-07-07 01:27:29] [Rank 0] Group 10 FTA: 0.7227 +[2025-07-07 01:27:29] [Rank 0] Group 11 FTA: 0.7725 +[2025-07-07 01:27:29] [Rank 0] Group 11 FTA: 0.7725 +[2025-07-07 01:27:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 01:27:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 01:27:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 01:27:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 01:27:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 01:27:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 01:27:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 01:27:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 01:27:30] [Rank 0] step:7001/10000 train_time:566208ms step_avg:80.88ms +[2025-07-07 01:27:30] [Rank 0] step:7001/10000 train_time:566208ms step_avg:80.88ms +[2025-07-07 01:27:32] [Rank 0] step:7021/10000 train_time:567762ms step_avg:80.87ms +[2025-07-07 01:27:32] [Rank 0] step:7021/10000 train_time:567762ms step_avg:80.87ms +[2025-07-07 01:27:34] [Rank 0] step:7041/10000 train_time:569871ms step_avg:80.94ms +[2025-07-07 01:27:34] [Rank 0] step:7041/10000 train_time:569871ms step_avg:80.94ms +[2025-07-07 01:27:35] [Rank 0] step:7061/10000 train_time:571366ms step_avg:80.92ms +[2025-07-07 01:27:35] [Rank 0] step:7061/10000 train_time:571366ms step_avg:80.92ms +[2025-07-07 01:27:37] [Rank 0] step:7081/10000 train_time:572860ms step_avg:80.90ms +[2025-07-07 01:27:37] [Rank 0] step:7081/10000 train_time:572860ms step_avg:80.90ms +[2025-07-07 01:27:38] [Rank 0] step:7101/10000 train_time:574354ms step_avg:80.88ms +[2025-07-07 01:27:38] [Rank 0] step:7101/10000 train_time:574354ms step_avg:80.88ms +[2025-07-07 01:27:41] [Rank 0] step:7121/10000 train_time:576516ms step_avg:80.96ms +[2025-07-07 01:27:41] [Rank 0] step:7121/10000 train_time:576516ms step_avg:80.96ms +[2025-07-07 01:27:42] [Rank 0] step:7141/10000 train_time:578010ms step_avg:80.94ms +[2025-07-07 01:27:42] [Rank 0] step:7141/10000 train_time:578010ms step_avg:80.94ms +[2025-07-07 01:27:43] [Rank 0] step:7161/10000 train_time:579506ms step_avg:80.93ms +[2025-07-07 01:27:43] [Rank 0] step:7161/10000 train_time:579506ms step_avg:80.93ms +[2025-07-07 01:27:45] [Rank 0] step:7181/10000 train_time:581004ms step_avg:80.91ms +[2025-07-07 01:27:45] [Rank 0] step:7181/10000 train_time:581004ms step_avg:80.91ms +[2025-07-07 01:27:47] [Rank 0] step:7201/10000 train_time:582552ms step_avg:80.90ms +[2025-07-07 01:27:47] [Rank 0] step:7201/10000 train_time:582552ms step_avg:80.90ms +[2025-07-07 01:27:48] [Rank 0] step:7221/10000 train_time:584333ms step_avg:80.92ms +[2025-07-07 01:27:48] [Rank 0] step:7221/10000 train_time:584333ms step_avg:80.92ms +[2025-07-07 01:27:50] [Rank 0] step:7241/10000 train_time:585897ms step_avg:80.91ms +[2025-07-07 01:27:50] [Rank 0] step:7241/10000 train_time:585897ms step_avg:80.91ms +[2025-07-07 01:27:51] [Rank 0] step:7261/10000 train_time:587394ms step_avg:80.90ms +[2025-07-07 01:27:51] [Rank 0] step:7261/10000 train_time:587394ms step_avg:80.90ms +[2025-07-07 01:27:53] [Rank 0] step:7281/10000 train_time:588893ms step_avg:80.88ms +[2025-07-07 01:27:53] [Rank 0] step:7281/10000 train_time:588893ms step_avg:80.88ms +[2025-07-07 01:27:55] [Rank 0] step:7301/10000 train_time:591045ms step_avg:80.95ms +[2025-07-07 01:27:55] [Rank 0] step:7301/10000 train_time:591045ms step_avg:80.95ms +[2025-07-07 01:27:57] [Rank 0] step:7321/10000 train_time:592543ms step_avg:80.94ms +[2025-07-07 01:27:57] [Rank 0] step:7321/10000 train_time:592543ms step_avg:80.94ms +[2025-07-07 01:27:58] [Rank 0] step:7341/10000 train_time:594044ms step_avg:80.92ms +[2025-07-07 01:27:58] [Rank 0] step:7341/10000 train_time:594044ms step_avg:80.92ms +[2025-07-07 01:28:00] [Rank 0] step:7361/10000 train_time:595548ms step_avg:80.91ms +[2025-07-07 01:28:00] [Rank 0] step:7361/10000 train_time:595548ms step_avg:80.91ms +[2025-07-07 01:28:01] [Rank 0] step:7381/10000 train_time:597053ms step_avg:80.89ms +[2025-07-07 01:28:01] [Rank 0] step:7381/10000 train_time:597053ms step_avg:80.89ms +[2025-07-07 01:28:03] [Rank 0] step:7401/10000 train_time:598801ms step_avg:80.91ms +[2025-07-07 01:28:03] [Rank 0] step:7401/10000 train_time:598801ms step_avg:80.91ms +[2025-07-07 01:28:04] [Rank 0] step:7421/10000 train_time:600307ms step_avg:80.89ms +[2025-07-07 01:28:04] [Rank 0] step:7421/10000 train_time:600307ms step_avg:80.89ms +[2025-07-07 01:28:06] [Rank 0] step:7441/10000 train_time:601812ms step_avg:80.88ms +[2025-07-07 01:28:06] [Rank 0] step:7441/10000 train_time:601812ms step_avg:80.88ms +[2025-07-07 01:28:07] [Rank 0] step:7461/10000 train_time:603317ms step_avg:80.86ms +[2025-07-07 01:28:07] [Rank 0] step:7461/10000 train_time:603317ms step_avg:80.86ms +[2025-07-07 01:28:09] [Rank 0] step:7481/10000 train_time:605478ms step_avg:80.94ms +[2025-07-07 01:28:09] [Rank 0] step:7481/10000 train_time:605478ms step_avg:80.94ms +[2025-07-07 01:28:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:28:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:28:12] [Rank 0] PRINT: step:7500/10000 train_loss:0.9660 val_loss:0.9590 train_time:606978ms step_avg:80.93ms +[2025-07-07 01:28:12] [Rank 0] PRINT: step:7500/10000 train_loss:0.9660 val_loss:0.9590 train_time:606978ms step_avg:80.93ms +[2025-07-07 01:28:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:28:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:28:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:28:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:28:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:28:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:33:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:33:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:33:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:33:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:33:38] [Rank 0] Total Loss: 5.3567 +[2025-07-07 01:33:38] [Rank 0] Total Loss: 5.3567 +[2025-07-07 01:33:38] [Rank 0] Total FTA: 0.8498 +[2025-07-07 01:33:38] [Rank 0] Total FTA: 0.8498 +[2025-07-07 01:33:38] [Rank 0] Group 0 Loss: 5.5750 +[2025-07-07 01:33:38] [Rank 0] Group 0 Loss: 5.5750 +[2025-07-07 01:33:38] [Rank 0] Group 1 Loss: 5.1498 +[2025-07-07 01:33:38] [Rank 0] Group 1 Loss: 5.1498 +[2025-07-07 01:33:38] [Rank 0] Group 2 Loss: 5.0203 +[2025-07-07 01:33:38] [Rank 0] Group 2 Loss: 5.0203 +[2025-07-07 01:33:38] [Rank 0] Group 3 Loss: 5.3564 +[2025-07-07 01:33:38] [Rank 0] Group 3 Loss: 5.3564 +[2025-07-07 01:33:38] [Rank 0] Group 4 Loss: 5.2552 +[2025-07-07 01:33:38] [Rank 0] Group 4 Loss: 5.2552 +[2025-07-07 01:33:38] [Rank 0] Group 5 Loss: 5.2757 +[2025-07-07 01:33:38] [Rank 0] Group 5 Loss: 5.2757 +[2025-07-07 01:33:38] [Rank 0] Group 6 Loss: 5.3975 +[2025-07-07 01:33:38] [Rank 0] Group 6 Loss: 5.3975 +[2025-07-07 01:33:38] [Rank 0] Group 7 Loss: 5.3587 +[2025-07-07 01:33:38] [Rank 0] Group 7 Loss: 5.3587 +[2025-07-07 01:33:38] [Rank 0] Group 8 Loss: 5.3497 +[2025-07-07 01:33:38] [Rank 0] Group 8 Loss: 5.3497 +[2025-07-07 01:33:38] [Rank 0] Group 9 Loss: 5.3756 +[2025-07-07 01:33:38] [Rank 0] Group 9 Loss: 5.3756 +[2025-07-07 01:33:38] [Rank 0] Group 10 Loss: 5.4213 +[2025-07-07 01:33:38] [Rank 0] Group 10 Loss: 5.4213 +[2025-07-07 01:33:38] [Rank 0] Group 11 Loss: 5.4146 +[2025-07-07 01:33:38] [Rank 0] Group 11 Loss: 5.4146 +[2025-07-07 01:33:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 01:33:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 01:33:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 01:33:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 01:33:38] [Rank 0] Group 2 FTA: 0.7448 +[2025-07-07 01:33:38] [Rank 0] Group 2 FTA: 0.7448 +[2025-07-07 01:33:38] [Rank 0] Group 3 FTA: 0.9219 +[2025-07-07 01:33:38] [Rank 0] Group 3 FTA: 0.9219 +[2025-07-07 01:33:38] [Rank 0] Group 4 FTA: 0.8698 +[2025-07-07 01:33:38] [Rank 0] Group 4 FTA: 0.8698 +[2025-07-07 01:33:38] [Rank 0] Group 5 FTA: 0.8047 +[2025-07-07 01:33:38] [Rank 0] Group 5 FTA: 0.8047 +[2025-07-07 01:33:38] [Rank 0] Group 6 FTA: 0.7839 +[2025-07-07 01:33:38] [Rank 0] Group 6 FTA: 0.7839 +[2025-07-07 01:33:38] [Rank 0] Group 7 FTA: 0.8229 +[2025-07-07 01:33:38] [Rank 0] Group 7 FTA: 0.8229 +[2025-07-07 01:33:38] [Rank 0] Group 8 FTA: 0.8281 +[2025-07-07 01:33:38] [Rank 0] Group 8 FTA: 0.8281 +[2025-07-07 01:33:38] [Rank 0] Group 9 FTA: 0.7617 +[2025-07-07 01:33:38] [Rank 0] Group 9 FTA: 0.7617 +[2025-07-07 01:33:38] [Rank 0] Group 10 FTA: 0.8027 +[2025-07-07 01:33:38] [Rank 0] Group 10 FTA: 0.8027 +[2025-07-07 01:33:38] [Rank 0] Group 11 FTA: 0.7910 +[2025-07-07 01:33:38] [Rank 0] Group 11 FTA: 0.7910 +[2025-07-07 01:33:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 01:33:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 01:33:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 01:33:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 01:33:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 01:33:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 01:33:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 01:33:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 01:33:40] [Rank 0] step:7501/10000 train_time:607000ms step_avg:80.92ms +[2025-07-07 01:33:40] [Rank 0] step:7501/10000 train_time:607000ms step_avg:80.92ms +[2025-07-07 01:33:41] [Rank 0] step:7521/10000 train_time:608503ms step_avg:80.91ms +[2025-07-07 01:33:41] [Rank 0] step:7521/10000 train_time:608503ms step_avg:80.91ms +[2025-07-07 01:33:43] [Rank 0] step:7541/10000 train_time:609996ms step_avg:80.89ms +[2025-07-07 01:33:43] [Rank 0] step:7541/10000 train_time:609996ms step_avg:80.89ms +[2025-07-07 01:33:45] [Rank 0] step:7561/10000 train_time:611751ms step_avg:80.91ms +[2025-07-07 01:33:45] [Rank 0] step:7561/10000 train_time:611751ms step_avg:80.91ms +[2025-07-07 01:33:46] [Rank 0] step:7581/10000 train_time:613658ms step_avg:80.95ms +[2025-07-07 01:33:46] [Rank 0] step:7581/10000 train_time:613658ms step_avg:80.95ms +[2025-07-07 01:33:48] [Rank 0] step:7601/10000 train_time:615154ms step_avg:80.93ms +[2025-07-07 01:33:48] [Rank 0] step:7601/10000 train_time:615154ms step_avg:80.93ms +[2025-07-07 01:33:49] [Rank 0] step:7621/10000 train_time:616651ms step_avg:80.91ms +[2025-07-07 01:33:49] [Rank 0] step:7621/10000 train_time:616651ms step_avg:80.91ms +[2025-07-07 01:33:51] [Rank 0] step:7641/10000 train_time:618147ms step_avg:80.90ms +[2025-07-07 01:33:51] [Rank 0] step:7641/10000 train_time:618147ms step_avg:80.90ms +[2025-07-07 01:33:53] [Rank 0] step:7661/10000 train_time:620302ms step_avg:80.97ms +[2025-07-07 01:33:53] [Rank 0] step:7661/10000 train_time:620302ms step_avg:80.97ms +[2025-07-07 01:33:54] [Rank 0] step:7681/10000 train_time:621799ms step_avg:80.95ms +[2025-07-07 01:33:54] [Rank 0] step:7681/10000 train_time:621799ms step_avg:80.95ms +[2025-07-07 01:33:56] [Rank 0] step:7701/10000 train_time:623297ms step_avg:80.94ms +[2025-07-07 01:33:56] [Rank 0] step:7701/10000 train_time:623297ms step_avg:80.94ms +[2025-07-07 01:33:57] [Rank 0] step:7721/10000 train_time:624796ms step_avg:80.92ms +[2025-07-07 01:33:57] [Rank 0] step:7721/10000 train_time:624796ms step_avg:80.92ms +[2025-07-07 01:33:59] [Rank 0] step:7741/10000 train_time:626297ms step_avg:80.91ms +[2025-07-07 01:33:59] [Rank 0] step:7741/10000 train_time:626297ms step_avg:80.91ms +[2025-07-07 01:34:01] [Rank 0] step:7761/10000 train_time:628037ms step_avg:80.92ms +[2025-07-07 01:34:01] [Rank 0] step:7761/10000 train_time:628037ms step_avg:80.92ms +[2025-07-07 01:34:02] [Rank 0] step:7781/10000 train_time:629536ms step_avg:80.91ms +[2025-07-07 01:34:02] [Rank 0] step:7781/10000 train_time:629536ms step_avg:80.91ms +[2025-07-07 01:34:04] [Rank 0] step:7801/10000 train_time:631039ms step_avg:80.89ms +[2025-07-07 01:34:04] [Rank 0] step:7801/10000 train_time:631039ms step_avg:80.89ms +[2025-07-07 01:34:05] [Rank 0] step:7821/10000 train_time:632542ms step_avg:80.88ms +[2025-07-07 01:34:05] [Rank 0] step:7821/10000 train_time:632542ms step_avg:80.88ms +[2025-07-07 01:34:07] [Rank 0] step:7841/10000 train_time:634360ms step_avg:80.90ms +[2025-07-07 01:34:07] [Rank 0] step:7841/10000 train_time:634360ms step_avg:80.90ms +[2025-07-07 01:34:08] [Rank 0] step:7861/10000 train_time:635860ms step_avg:80.89ms +[2025-07-07 01:34:08] [Rank 0] step:7861/10000 train_time:635860ms step_avg:80.89ms +[2025-07-07 01:34:10] [Rank 0] step:7881/10000 train_time:637455ms step_avg:80.89ms +[2025-07-07 01:34:10] [Rank 0] step:7881/10000 train_time:637455ms step_avg:80.89ms +[2025-07-07 01:34:11] [Rank 0] step:7901/10000 train_time:638960ms step_avg:80.87ms +[2025-07-07 01:34:11] [Rank 0] step:7901/10000 train_time:638960ms step_avg:80.87ms +[2025-07-07 01:34:13] [Rank 0] step:7921/10000 train_time:640517ms step_avg:80.86ms +[2025-07-07 01:34:13] [Rank 0] step:7921/10000 train_time:640517ms step_avg:80.86ms +[2025-07-07 01:34:15] [Rank 0] step:7941/10000 train_time:642203ms step_avg:80.87ms +[2025-07-07 01:34:15] [Rank 0] step:7941/10000 train_time:642203ms step_avg:80.87ms +[2025-07-07 01:34:16] [Rank 0] step:7961/10000 train_time:643711ms step_avg:80.86ms +[2025-07-07 01:34:16] [Rank 0] step:7961/10000 train_time:643711ms step_avg:80.86ms +[2025-07-07 01:34:18] [Rank 0] step:7981/10000 train_time:645219ms step_avg:80.84ms +[2025-07-07 01:34:18] [Rank 0] step:7981/10000 train_time:645219ms step_avg:80.84ms +[2025-07-07 01:34:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:34:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:34:20] [Rank 0] PRINT: step:8000/10000 train_loss:0.9533 val_loss:0.9476 train_time:646727ms step_avg:80.84ms +[2025-07-07 01:34:20] [Rank 0] PRINT: step:8000/10000 train_loss:0.9533 val_loss:0.9476 train_time:646727ms step_avg:80.84ms +[2025-07-07 01:34:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:34:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:34:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:34:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:34:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:34:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:39:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:39:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:39:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:39:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:39:42] [Rank 0] Total Loss: 5.4580 +[2025-07-07 01:39:42] [Rank 0] Total Loss: 5.4580 +[2025-07-07 01:39:42] [Rank 0] Total FTA: 0.8457 +[2025-07-07 01:39:42] [Rank 0] Total FTA: 0.8457 +[2025-07-07 01:39:42] [Rank 0] Group 0 Loss: 5.5790 +[2025-07-07 01:39:42] [Rank 0] Group 0 Loss: 5.5790 +[2025-07-07 01:39:42] [Rank 0] Group 1 Loss: 5.1950 +[2025-07-07 01:39:42] [Rank 0] Group 1 Loss: 5.1950 +[2025-07-07 01:39:42] [Rank 0] Group 2 Loss: 5.2900 +[2025-07-07 01:39:42] [Rank 0] Group 2 Loss: 5.2900 +[2025-07-07 01:39:42] [Rank 0] Group 3 Loss: 5.6563 +[2025-07-07 01:39:42] [Rank 0] Group 3 Loss: 5.6563 +[2025-07-07 01:39:42] [Rank 0] Group 4 Loss: 5.2587 +[2025-07-07 01:39:42] [Rank 0] Group 4 Loss: 5.2587 +[2025-07-07 01:39:42] [Rank 0] Group 5 Loss: 5.3754 +[2025-07-07 01:39:42] [Rank 0] Group 5 Loss: 5.3754 +[2025-07-07 01:39:42] [Rank 0] Group 6 Loss: 5.4046 +[2025-07-07 01:39:42] [Rank 0] Group 6 Loss: 5.4046 +[2025-07-07 01:39:42] [Rank 0] Group 7 Loss: 5.4519 +[2025-07-07 01:39:42] [Rank 0] Group 7 Loss: 5.4519 +[2025-07-07 01:39:42] [Rank 0] Group 8 Loss: 5.5140 +[2025-07-07 01:39:42] [Rank 0] Group 8 Loss: 5.5140 +[2025-07-07 01:39:42] [Rank 0] Group 9 Loss: 5.5888 +[2025-07-07 01:39:42] [Rank 0] Group 9 Loss: 5.5888 +[2025-07-07 01:39:42] [Rank 0] Group 10 Loss: 5.4689 +[2025-07-07 01:39:42] [Rank 0] Group 10 Loss: 5.4689 +[2025-07-07 01:39:42] [Rank 0] Group 11 Loss: 5.5234 +[2025-07-07 01:39:42] [Rank 0] Group 11 Loss: 5.5234 +[2025-07-07 01:39:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 01:39:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 01:39:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 01:39:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 01:39:42] [Rank 0] Group 2 FTA: 0.7370 +[2025-07-07 01:39:42] [Rank 0] Group 2 FTA: 0.7370 +[2025-07-07 01:39:42] [Rank 0] Group 3 FTA: 0.8594 +[2025-07-07 01:39:42] [Rank 0] Group 3 FTA: 0.8594 +[2025-07-07 01:39:42] [Rank 0] Group 4 FTA: 0.9062 +[2025-07-07 01:39:42] [Rank 0] Group 4 FTA: 0.9062 +[2025-07-07 01:39:42] [Rank 0] Group 5 FTA: 0.7865 +[2025-07-07 01:39:42] [Rank 0] Group 5 FTA: 0.7865 +[2025-07-07 01:39:42] [Rank 0] Group 6 FTA: 0.7656 +[2025-07-07 01:39:42] [Rank 0] Group 6 FTA: 0.7656 +[2025-07-07 01:39:42] [Rank 0] Group 7 FTA: 0.8203 +[2025-07-07 01:39:42] [Rank 0] Group 7 FTA: 0.8203 +[2025-07-07 01:39:42] [Rank 0] Group 8 FTA: 0.8307 +[2025-07-07 01:39:42] [Rank 0] Group 8 FTA: 0.8307 +[2025-07-07 01:39:42] [Rank 0] Group 9 FTA: 0.8047 +[2025-07-07 01:39:42] [Rank 0] Group 9 FTA: 0.8047 +[2025-07-07 01:39:42] [Rank 0] Group 10 FTA: 0.7832 +[2025-07-07 01:39:42] [Rank 0] Group 10 FTA: 0.7832 +[2025-07-07 01:39:42] [Rank 0] Group 11 FTA: 0.7939 +[2025-07-07 01:39:42] [Rank 0] Group 11 FTA: 0.7939 +[2025-07-07 01:39:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 01:39:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 01:39:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 01:39:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 01:39:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 01:39:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 01:39:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 01:39:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 01:39:43] [Rank 0] step:8001/10000 train_time:646748ms step_avg:80.83ms +[2025-07-07 01:39:43] [Rank 0] step:8001/10000 train_time:646748ms step_avg:80.83ms +[2025-07-07 01:39:45] [Rank 0] step:8021/10000 train_time:648892ms step_avg:80.90ms +[2025-07-07 01:39:45] [Rank 0] step:8021/10000 train_time:648892ms step_avg:80.90ms +[2025-07-07 01:39:47] [Rank 0] step:8041/10000 train_time:650387ms step_avg:80.88ms +[2025-07-07 01:39:47] [Rank 0] step:8041/10000 train_time:650387ms step_avg:80.88ms +[2025-07-07 01:39:48] [Rank 0] step:8061/10000 train_time:651883ms step_avg:80.87ms +[2025-07-07 01:39:48] [Rank 0] step:8061/10000 train_time:651883ms step_avg:80.87ms +[2025-07-07 01:39:50] [Rank 0] step:8081/10000 train_time:653381ms step_avg:80.85ms +[2025-07-07 01:39:50] [Rank 0] step:8081/10000 train_time:653381ms step_avg:80.85ms +[2025-07-07 01:39:52] [Rank 0] step:8101/10000 train_time:654879ms step_avg:80.84ms +[2025-07-07 01:39:52] [Rank 0] step:8101/10000 train_time:654879ms step_avg:80.84ms +[2025-07-07 01:39:53] [Rank 0] step:8121/10000 train_time:656616ms step_avg:80.85ms +[2025-07-07 01:39:53] [Rank 0] step:8121/10000 train_time:656616ms step_avg:80.85ms +[2025-07-07 01:39:55] [Rank 0] step:8141/10000 train_time:658114ms step_avg:80.84ms +[2025-07-07 01:39:55] [Rank 0] step:8141/10000 train_time:658114ms step_avg:80.84ms +[2025-07-07 01:39:56] [Rank 0] step:8161/10000 train_time:659612ms step_avg:80.82ms +[2025-07-07 01:39:56] [Rank 0] step:8161/10000 train_time:659612ms step_avg:80.82ms +[2025-07-07 01:39:58] [Rank 0] step:8181/10000 train_time:661112ms step_avg:80.81ms +[2025-07-07 01:39:58] [Rank 0] step:8181/10000 train_time:661112ms step_avg:80.81ms +[2025-07-07 01:40:00] [Rank 0] step:8201/10000 train_time:663265ms step_avg:80.88ms +[2025-07-07 01:40:00] [Rank 0] step:8201/10000 train_time:663265ms step_avg:80.88ms +[2025-07-07 01:40:01] [Rank 0] step:8221/10000 train_time:664766ms step_avg:80.86ms +[2025-07-07 01:40:01] [Rank 0] step:8221/10000 train_time:664766ms step_avg:80.86ms +[2025-07-07 01:40:03] [Rank 0] step:8241/10000 train_time:666269ms step_avg:80.85ms +[2025-07-07 01:40:03] [Rank 0] step:8241/10000 train_time:666269ms step_avg:80.85ms +[2025-07-07 01:40:04] [Rank 0] step:8261/10000 train_time:667770ms step_avg:80.83ms +[2025-07-07 01:40:04] [Rank 0] step:8261/10000 train_time:667770ms step_avg:80.83ms +[2025-07-07 01:40:06] [Rank 0] step:8281/10000 train_time:669324ms step_avg:80.83ms +[2025-07-07 01:40:06] [Rank 0] step:8281/10000 train_time:669324ms step_avg:80.83ms +[2025-07-07 01:40:08] [Rank 0] step:8301/10000 train_time:671446ms step_avg:80.89ms +[2025-07-07 01:40:08] [Rank 0] step:8301/10000 train_time:671446ms step_avg:80.89ms +[2025-07-07 01:40:09] [Rank 0] step:8321/10000 train_time:672946ms step_avg:80.87ms +[2025-07-07 01:40:09] [Rank 0] step:8321/10000 train_time:672946ms step_avg:80.87ms +[2025-07-07 01:40:11] [Rank 0] step:8341/10000 train_time:674449ms step_avg:80.86ms +[2025-07-07 01:40:11] [Rank 0] step:8341/10000 train_time:674449ms step_avg:80.86ms +[2025-07-07 01:40:12] [Rank 0] step:8361/10000 train_time:675950ms step_avg:80.85ms +[2025-07-07 01:40:12] [Rank 0] step:8361/10000 train_time:675950ms step_avg:80.85ms +[2025-07-07 01:40:15] [Rank 0] step:8381/10000 train_time:678114ms step_avg:80.91ms +[2025-07-07 01:40:15] [Rank 0] step:8381/10000 train_time:678114ms step_avg:80.91ms +[2025-07-07 01:40:16] [Rank 0] step:8401/10000 train_time:679613ms step_avg:80.90ms +[2025-07-07 01:40:16] [Rank 0] step:8401/10000 train_time:679613ms step_avg:80.90ms +[2025-07-07 01:40:18] [Rank 0] step:8421/10000 train_time:681115ms step_avg:80.88ms +[2025-07-07 01:40:18] [Rank 0] step:8421/10000 train_time:681115ms step_avg:80.88ms +[2025-07-07 01:40:19] [Rank 0] step:8441/10000 train_time:682617ms step_avg:80.87ms +[2025-07-07 01:40:19] [Rank 0] step:8441/10000 train_time:682617ms step_avg:80.87ms +[2025-07-07 01:40:21] [Rank 0] step:8461/10000 train_time:684120ms step_avg:80.86ms +[2025-07-07 01:40:21] [Rank 0] step:8461/10000 train_time:684120ms step_avg:80.86ms +[2025-07-07 01:40:22] [Rank 0] step:8481/10000 train_time:685860ms step_avg:80.87ms +[2025-07-07 01:40:22] [Rank 0] step:8481/10000 train_time:685860ms step_avg:80.87ms +[2025-07-07 01:40:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:40:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:40:25] [Rank 0] PRINT: step:8500/10000 train_loss:0.9428 val_loss:0.9386 train_time:687362ms step_avg:80.87ms +[2025-07-07 01:40:25] [Rank 0] PRINT: step:8500/10000 train_loss:0.9428 val_loss:0.9386 train_time:687362ms step_avg:80.87ms +[2025-07-07 01:40:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:40:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:40:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:40:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:40:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:40:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:45:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:45:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:45:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:45:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:45:52] [Rank 0] Total Loss: 5.4764 +[2025-07-07 01:45:52] [Rank 0] Total Loss: 5.4764 +[2025-07-07 01:45:52] [Rank 0] Total FTA: 0.8850 +[2025-07-07 01:45:52] [Rank 0] Total FTA: 0.8850 +[2025-07-07 01:45:52] [Rank 0] Group 0 Loss: 5.7430 +[2025-07-07 01:45:52] [Rank 0] Group 0 Loss: 5.7430 +[2025-07-07 01:45:52] [Rank 0] Group 1 Loss: 5.3931 +[2025-07-07 01:45:52] [Rank 0] Group 1 Loss: 5.3931 +[2025-07-07 01:45:52] [Rank 0] Group 2 Loss: 5.1138 +[2025-07-07 01:45:52] [Rank 0] Group 2 Loss: 5.1138 +[2025-07-07 01:45:52] [Rank 0] Group 3 Loss: 5.5281 +[2025-07-07 01:45:52] [Rank 0] Group 3 Loss: 5.5281 +[2025-07-07 01:45:52] [Rank 0] Group 4 Loss: 5.4363 +[2025-07-07 01:45:52] [Rank 0] Group 4 Loss: 5.4363 +[2025-07-07 01:45:52] [Rank 0] Group 5 Loss: 5.3647 +[2025-07-07 01:45:52] [Rank 0] Group 5 Loss: 5.3647 +[2025-07-07 01:45:52] [Rank 0] Group 6 Loss: 5.4804 +[2025-07-07 01:45:52] [Rank 0] Group 6 Loss: 5.4804 +[2025-07-07 01:45:52] [Rank 0] Group 7 Loss: 5.4415 +[2025-07-07 01:45:52] [Rank 0] Group 7 Loss: 5.4415 +[2025-07-07 01:45:52] [Rank 0] Group 8 Loss: 5.5048 +[2025-07-07 01:45:52] [Rank 0] Group 8 Loss: 5.5048 +[2025-07-07 01:45:52] [Rank 0] Group 9 Loss: 5.5828 +[2025-07-07 01:45:52] [Rank 0] Group 9 Loss: 5.5828 +[2025-07-07 01:45:52] [Rank 0] Group 10 Loss: 5.4974 +[2025-07-07 01:45:52] [Rank 0] Group 10 Loss: 5.4974 +[2025-07-07 01:45:52] [Rank 0] Group 11 Loss: 5.4446 +[2025-07-07 01:45:52] [Rank 0] Group 11 Loss: 5.4446 +[2025-07-07 01:45:52] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 01:45:52] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 01:45:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 01:45:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 01:45:52] [Rank 0] Group 2 FTA: 0.8464 +[2025-07-07 01:45:52] [Rank 0] Group 2 FTA: 0.8464 +[2025-07-07 01:45:52] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 01:45:52] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 01:45:52] [Rank 0] Group 4 FTA: 0.8750 +[2025-07-07 01:45:52] [Rank 0] Group 4 FTA: 0.8750 +[2025-07-07 01:45:52] [Rank 0] Group 5 FTA: 0.8724 +[2025-07-07 01:45:52] [Rank 0] Group 5 FTA: 0.8724 +[2025-07-07 01:45:52] [Rank 0] Group 6 FTA: 0.8594 +[2025-07-07 01:45:52] [Rank 0] Group 6 FTA: 0.8594 +[2025-07-07 01:45:52] [Rank 0] Group 7 FTA: 0.8438 +[2025-07-07 01:45:52] [Rank 0] Group 7 FTA: 0.8438 +[2025-07-07 01:45:52] [Rank 0] Group 8 FTA: 0.8203 +[2025-07-07 01:45:52] [Rank 0] Group 8 FTA: 0.8203 +[2025-07-07 01:45:52] [Rank 0] Group 9 FTA: 0.7891 +[2025-07-07 01:45:52] [Rank 0] Group 9 FTA: 0.7891 +[2025-07-07 01:45:52] [Rank 0] Group 10 FTA: 0.8477 +[2025-07-07 01:45:52] [Rank 0] Group 10 FTA: 0.8477 +[2025-07-07 01:45:52] [Rank 0] Group 11 FTA: 0.8271 +[2025-07-07 01:45:52] [Rank 0] Group 11 FTA: 0.8271 +[2025-07-07 01:45:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 01:45:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 01:45:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 01:45:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 01:45:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 01:45:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 01:45:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 01:45:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 01:45:54] [Rank 0] step:8501/10000 train_time:687383ms step_avg:80.86ms +[2025-07-07 01:45:54] [Rank 0] step:8501/10000 train_time:687383ms step_avg:80.86ms +[2025-07-07 01:45:55] [Rank 0] step:8521/10000 train_time:688889ms step_avg:80.85ms +[2025-07-07 01:45:55] [Rank 0] step:8521/10000 train_time:688889ms step_avg:80.85ms +[2025-07-07 01:45:57] [Rank 0] step:8541/10000 train_time:690383ms step_avg:80.83ms +[2025-07-07 01:45:57] [Rank 0] step:8541/10000 train_time:690383ms step_avg:80.83ms +[2025-07-07 01:45:59] [Rank 0] step:8561/10000 train_time:692537ms step_avg:80.89ms +[2025-07-07 01:45:59] [Rank 0] step:8561/10000 train_time:692537ms step_avg:80.89ms +[2025-07-07 01:46:00] [Rank 0] step:8581/10000 train_time:694032ms step_avg:80.88ms +[2025-07-07 01:46:00] [Rank 0] step:8581/10000 train_time:694032ms step_avg:80.88ms +[2025-07-07 01:46:02] [Rank 0] step:8601/10000 train_time:695529ms step_avg:80.87ms +[2025-07-07 01:46:02] [Rank 0] step:8601/10000 train_time:695529ms step_avg:80.87ms +[2025-07-07 01:46:03] [Rank 0] step:8621/10000 train_time:697028ms step_avg:80.85ms +[2025-07-07 01:46:03] [Rank 0] step:8621/10000 train_time:697028ms step_avg:80.85ms +[2025-07-07 01:46:05] [Rank 0] step:8641/10000 train_time:698783ms step_avg:80.87ms +[2025-07-07 01:46:05] [Rank 0] step:8641/10000 train_time:698783ms step_avg:80.87ms +[2025-07-07 01:46:07] [Rank 0] step:8661/10000 train_time:700682ms step_avg:80.90ms +[2025-07-07 01:46:07] [Rank 0] step:8661/10000 train_time:700682ms step_avg:80.90ms +[2025-07-07 01:46:08] [Rank 0] step:8681/10000 train_time:702179ms step_avg:80.89ms +[2025-07-07 01:46:08] [Rank 0] step:8681/10000 train_time:702179ms step_avg:80.89ms +[2025-07-07 01:46:10] [Rank 0] step:8701/10000 train_time:703681ms step_avg:80.87ms +[2025-07-07 01:46:10] [Rank 0] step:8701/10000 train_time:703681ms step_avg:80.87ms +[2025-07-07 01:46:11] [Rank 0] step:8721/10000 train_time:705183ms step_avg:80.86ms +[2025-07-07 01:46:11] [Rank 0] step:8721/10000 train_time:705183ms step_avg:80.86ms +[2025-07-07 01:46:14] [Rank 0] step:8741/10000 train_time:707324ms step_avg:80.92ms +[2025-07-07 01:46:14] [Rank 0] step:8741/10000 train_time:707324ms step_avg:80.92ms +[2025-07-07 01:46:15] [Rank 0] step:8761/10000 train_time:708831ms step_avg:80.91ms +[2025-07-07 01:46:15] [Rank 0] step:8761/10000 train_time:708831ms step_avg:80.91ms +[2025-07-07 01:46:17] [Rank 0] step:8781/10000 train_time:710334ms step_avg:80.89ms +[2025-07-07 01:46:17] [Rank 0] step:8781/10000 train_time:710334ms step_avg:80.89ms +[2025-07-07 01:46:18] [Rank 0] step:8801/10000 train_time:711839ms step_avg:80.88ms +[2025-07-07 01:46:18] [Rank 0] step:8801/10000 train_time:711839ms step_avg:80.88ms +[2025-07-07 01:46:20] [Rank 0] step:8821/10000 train_time:713345ms step_avg:80.87ms +[2025-07-07 01:46:20] [Rank 0] step:8821/10000 train_time:713345ms step_avg:80.87ms +[2025-07-07 01:46:21] [Rank 0] step:8841/10000 train_time:715087ms step_avg:80.88ms +[2025-07-07 01:46:21] [Rank 0] step:8841/10000 train_time:715087ms step_avg:80.88ms +[2025-07-07 01:46:23] [Rank 0] step:8861/10000 train_time:716588ms step_avg:80.87ms +[2025-07-07 01:46:23] [Rank 0] step:8861/10000 train_time:716588ms step_avg:80.87ms +[2025-07-07 01:46:24] [Rank 0] step:8881/10000 train_time:718091ms step_avg:80.86ms +[2025-07-07 01:46:24] [Rank 0] step:8881/10000 train_time:718091ms step_avg:80.86ms +[2025-07-07 01:46:26] [Rank 0] step:8901/10000 train_time:719594ms step_avg:80.84ms +[2025-07-07 01:46:26] [Rank 0] step:8901/10000 train_time:719594ms step_avg:80.84ms +[2025-07-07 01:46:28] [Rank 0] step:8921/10000 train_time:721331ms step_avg:80.86ms +[2025-07-07 01:46:28] [Rank 0] step:8921/10000 train_time:721331ms step_avg:80.86ms +[2025-07-07 01:46:29] [Rank 0] step:8941/10000 train_time:722834ms step_avg:80.84ms +[2025-07-07 01:46:29] [Rank 0] step:8941/10000 train_time:722834ms step_avg:80.84ms +[2025-07-07 01:46:31] [Rank 0] step:8961/10000 train_time:724337ms step_avg:80.83ms +[2025-07-07 01:46:31] [Rank 0] step:8961/10000 train_time:724337ms step_avg:80.83ms +[2025-07-07 01:46:32] [Rank 0] step:8981/10000 train_time:725841ms step_avg:80.82ms +[2025-07-07 01:46:32] [Rank 0] step:8981/10000 train_time:725841ms step_avg:80.82ms +[2025-07-07 01:46:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:46:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:46:35] [Rank 0] PRINT: step:9000/10000 train_loss:0.9342 val_loss:0.9318 train_time:727346ms step_avg:80.82ms +[2025-07-07 01:46:35] [Rank 0] PRINT: step:9000/10000 train_loss:0.9342 val_loss:0.9318 train_time:727346ms step_avg:80.82ms +[2025-07-07 01:46:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:46:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:46:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:46:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:46:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:46:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:51:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:51:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:51:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:51:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:51:57] [Rank 0] Total Loss: 5.5095 +[2025-07-07 01:51:57] [Rank 0] Total Loss: 5.5095 +[2025-07-07 01:51:57] [Rank 0] Total FTA: 0.8921 +[2025-07-07 01:51:57] [Rank 0] Total FTA: 0.8921 +[2025-07-07 01:51:57] [Rank 0] Group 0 Loss: 5.7536 +[2025-07-07 01:51:57] [Rank 0] Group 0 Loss: 5.7536 +[2025-07-07 01:51:57] [Rank 0] Group 1 Loss: 5.2893 +[2025-07-07 01:51:57] [Rank 0] Group 1 Loss: 5.2893 +[2025-07-07 01:51:57] [Rank 0] Group 2 Loss: 5.2538 +[2025-07-07 01:51:57] [Rank 0] Group 2 Loss: 5.2538 +[2025-07-07 01:51:57] [Rank 0] Group 3 Loss: 5.6495 +[2025-07-07 01:51:57] [Rank 0] Group 3 Loss: 5.6495 +[2025-07-07 01:51:57] [Rank 0] Group 4 Loss: 5.4098 +[2025-07-07 01:51:57] [Rank 0] Group 4 Loss: 5.4098 +[2025-07-07 01:51:57] [Rank 0] Group 5 Loss: 5.4205 +[2025-07-07 01:51:57] [Rank 0] Group 5 Loss: 5.4205 +[2025-07-07 01:51:57] [Rank 0] Group 6 Loss: 5.4951 +[2025-07-07 01:51:57] [Rank 0] Group 6 Loss: 5.4951 +[2025-07-07 01:51:57] [Rank 0] Group 7 Loss: 5.5264 +[2025-07-07 01:51:57] [Rank 0] Group 7 Loss: 5.5264 +[2025-07-07 01:51:57] [Rank 0] Group 8 Loss: 5.4999 +[2025-07-07 01:51:57] [Rank 0] Group 8 Loss: 5.4999 +[2025-07-07 01:51:57] [Rank 0] Group 9 Loss: 5.4914 +[2025-07-07 01:51:57] [Rank 0] Group 9 Loss: 5.4914 +[2025-07-07 01:51:57] [Rank 0] Group 10 Loss: 5.5377 +[2025-07-07 01:51:57] [Rank 0] Group 10 Loss: 5.5377 +[2025-07-07 01:51:57] [Rank 0] Group 11 Loss: 5.5162 +[2025-07-07 01:51:57] [Rank 0] Group 11 Loss: 5.5162 +[2025-07-07 01:51:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 01:51:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 01:51:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 01:51:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 01:51:57] [Rank 0] Group 2 FTA: 0.8568 +[2025-07-07 01:51:57] [Rank 0] Group 2 FTA: 0.8568 +[2025-07-07 01:51:57] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 01:51:57] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 01:51:57] [Rank 0] Group 4 FTA: 0.8724 +[2025-07-07 01:51:57] [Rank 0] Group 4 FTA: 0.8724 +[2025-07-07 01:51:57] [Rank 0] Group 5 FTA: 0.8255 +[2025-07-07 01:51:57] [Rank 0] Group 5 FTA: 0.8255 +[2025-07-07 01:51:57] [Rank 0] Group 6 FTA: 0.8802 +[2025-07-07 01:51:57] [Rank 0] Group 6 FTA: 0.8802 +[2025-07-07 01:51:57] [Rank 0] Group 7 FTA: 0.8646 +[2025-07-07 01:51:57] [Rank 0] Group 7 FTA: 0.8646 +[2025-07-07 01:51:57] [Rank 0] Group 8 FTA: 0.8490 +[2025-07-07 01:51:57] [Rank 0] Group 8 FTA: 0.8490 +[2025-07-07 01:51:57] [Rank 0] Group 9 FTA: 0.8320 +[2025-07-07 01:51:57] [Rank 0] Group 9 FTA: 0.8320 +[2025-07-07 01:51:57] [Rank 0] Group 10 FTA: 0.8379 +[2025-07-07 01:51:57] [Rank 0] Group 10 FTA: 0.8379 +[2025-07-07 01:51:57] [Rank 0] Group 11 FTA: 0.8486 +[2025-07-07 01:51:57] [Rank 0] Group 11 FTA: 0.8486 +[2025-07-07 01:51:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 01:51:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 01:51:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 01:51:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 01:52:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 01:52:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 01:52:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 01:52:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 01:52:02] [Rank 0] step:9001/10000 train_time:727375ms step_avg:80.81ms +[2025-07-07 01:52:02] [Rank 0] step:9001/10000 train_time:727375ms step_avg:80.81ms +[2025-07-07 01:52:03] [Rank 0] step:9021/10000 train_time:729635ms step_avg:80.88ms +[2025-07-07 01:52:03] [Rank 0] step:9021/10000 train_time:729635ms step_avg:80.88ms +[2025-07-07 01:52:05] [Rank 0] step:9041/10000 train_time:731285ms step_avg:80.89ms +[2025-07-07 01:52:05] [Rank 0] step:9041/10000 train_time:731285ms step_avg:80.89ms +[2025-07-07 01:52:06] [Rank 0] step:9061/10000 train_time:732782ms step_avg:80.87ms +[2025-07-07 01:52:06] [Rank 0] step:9061/10000 train_time:732782ms step_avg:80.87ms +[2025-07-07 01:52:08] [Rank 0] step:9081/10000 train_time:734278ms step_avg:80.86ms +[2025-07-07 01:52:08] [Rank 0] step:9081/10000 train_time:734278ms step_avg:80.86ms +[2025-07-07 01:52:10] [Rank 0] step:9101/10000 train_time:736443ms step_avg:80.92ms +[2025-07-07 01:52:10] [Rank 0] step:9101/10000 train_time:736443ms step_avg:80.92ms +[2025-07-07 01:52:11] [Rank 0] step:9121/10000 train_time:737939ms step_avg:80.91ms +[2025-07-07 01:52:11] [Rank 0] step:9121/10000 train_time:737939ms step_avg:80.91ms +[2025-07-07 01:52:13] [Rank 0] step:9141/10000 train_time:739436ms step_avg:80.89ms +[2025-07-07 01:52:13] [Rank 0] step:9141/10000 train_time:739436ms step_avg:80.89ms +[2025-07-07 01:52:14] [Rank 0] step:9161/10000 train_time:740935ms step_avg:80.88ms +[2025-07-07 01:52:14] [Rank 0] step:9161/10000 train_time:740935ms step_avg:80.88ms +[2025-07-07 01:52:16] [Rank 0] step:9181/10000 train_time:742438ms step_avg:80.87ms +[2025-07-07 01:52:16] [Rank 0] step:9181/10000 train_time:742438ms step_avg:80.87ms +[2025-07-07 01:52:18] [Rank 0] step:9201/10000 train_time:744174ms step_avg:80.88ms +[2025-07-07 01:52:18] [Rank 0] step:9201/10000 train_time:744174ms step_avg:80.88ms +[2025-07-07 01:52:19] [Rank 0] step:9221/10000 train_time:745673ms step_avg:80.87ms +[2025-07-07 01:52:19] [Rank 0] step:9221/10000 train_time:745673ms step_avg:80.87ms +[2025-07-07 01:52:21] [Rank 0] step:9241/10000 train_time:747173ms step_avg:80.85ms +[2025-07-07 01:52:21] [Rank 0] step:9241/10000 train_time:747173ms step_avg:80.85ms +[2025-07-07 01:52:22] [Rank 0] step:9261/10000 train_time:748674ms step_avg:80.84ms +[2025-07-07 01:52:22] [Rank 0] step:9261/10000 train_time:748674ms step_avg:80.84ms +[2025-07-07 01:52:24] [Rank 0] step:9281/10000 train_time:750842ms step_avg:80.90ms +[2025-07-07 01:52:24] [Rank 0] step:9281/10000 train_time:750842ms step_avg:80.90ms +[2025-07-07 01:52:26] [Rank 0] step:9301/10000 train_time:752345ms step_avg:80.89ms +[2025-07-07 01:52:26] [Rank 0] step:9301/10000 train_time:752345ms step_avg:80.89ms +[2025-07-07 01:52:27] [Rank 0] step:9321/10000 train_time:753849ms step_avg:80.88ms +[2025-07-07 01:52:27] [Rank 0] step:9321/10000 train_time:753849ms step_avg:80.88ms +[2025-07-07 01:52:29] [Rank 0] step:9341/10000 train_time:755354ms step_avg:80.86ms +[2025-07-07 01:52:29] [Rank 0] step:9341/10000 train_time:755354ms step_avg:80.86ms +[2025-07-07 01:52:31] [Rank 0] step:9361/10000 train_time:756859ms step_avg:80.85ms +[2025-07-07 01:52:31] [Rank 0] step:9361/10000 train_time:756859ms step_avg:80.85ms +[2025-07-07 01:52:33] [Rank 0] step:9381/10000 train_time:759023ms step_avg:80.91ms +[2025-07-07 01:52:33] [Rank 0] step:9381/10000 train_time:759023ms step_avg:80.91ms +[2025-07-07 01:52:34] [Rank 0] step:9401/10000 train_time:760528ms step_avg:80.90ms +[2025-07-07 01:52:34] [Rank 0] step:9401/10000 train_time:760528ms step_avg:80.90ms +[2025-07-07 01:52:36] [Rank 0] step:9421/10000 train_time:762032ms step_avg:80.89ms +[2025-07-07 01:52:36] [Rank 0] step:9421/10000 train_time:762032ms step_avg:80.89ms +[2025-07-07 01:52:37] [Rank 0] step:9441/10000 train_time:763537ms step_avg:80.87ms +[2025-07-07 01:52:37] [Rank 0] step:9441/10000 train_time:763537ms step_avg:80.87ms +[2025-07-07 01:52:39] [Rank 0] step:9461/10000 train_time:765690ms step_avg:80.93ms +[2025-07-07 01:52:39] [Rank 0] step:9461/10000 train_time:765690ms step_avg:80.93ms +[2025-07-07 01:52:41] [Rank 0] step:9481/10000 train_time:767192ms step_avg:80.92ms +[2025-07-07 01:52:41] [Rank 0] step:9481/10000 train_time:767192ms step_avg:80.92ms +[2025-07-07 01:52:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:52:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:52:43] [Rank 0] PRINT: step:9500/10000 train_loss:0.9272 val_loss:0.9264 train_time:768694ms step_avg:80.92ms +[2025-07-07 01:52:43] [Rank 0] PRINT: step:9500/10000 train_loss:0.9272 val_loss:0.9264 train_time:768694ms step_avg:80.92ms +[2025-07-07 01:52:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:52:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:52:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:52:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:52:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:52:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:58:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:58:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 01:58:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:58:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 01:58:09] [Rank 0] Total Loss: 5.5885 +[2025-07-07 01:58:09] [Rank 0] Total Loss: 5.5885 +[2025-07-07 01:58:09] [Rank 0] Total FTA: 0.9116 +[2025-07-07 01:58:09] [Rank 0] Total FTA: 0.9116 +[2025-07-07 01:58:09] [Rank 0] Group 0 Loss: 5.7535 +[2025-07-07 01:58:09] [Rank 0] Group 0 Loss: 5.7535 +[2025-07-07 01:58:09] [Rank 0] Group 1 Loss: 5.4396 +[2025-07-07 01:58:09] [Rank 0] Group 1 Loss: 5.4396 +[2025-07-07 01:58:09] [Rank 0] Group 2 Loss: 5.2651 +[2025-07-07 01:58:09] [Rank 0] Group 2 Loss: 5.2651 +[2025-07-07 01:58:09] [Rank 0] Group 3 Loss: 5.7042 +[2025-07-07 01:58:09] [Rank 0] Group 3 Loss: 5.7042 +[2025-07-07 01:58:09] [Rank 0] Group 4 Loss: 5.4265 +[2025-07-07 01:58:09] [Rank 0] Group 4 Loss: 5.4265 +[2025-07-07 01:58:09] [Rank 0] Group 5 Loss: 5.4345 +[2025-07-07 01:58:09] [Rank 0] Group 5 Loss: 5.4345 +[2025-07-07 01:58:09] [Rank 0] Group 6 Loss: 5.5521 +[2025-07-07 01:58:09] [Rank 0] Group 6 Loss: 5.5521 +[2025-07-07 01:58:09] [Rank 0] Group 7 Loss: 5.6897 +[2025-07-07 01:58:09] [Rank 0] Group 7 Loss: 5.6897 +[2025-07-07 01:58:09] [Rank 0] Group 8 Loss: 5.6212 +[2025-07-07 01:58:09] [Rank 0] Group 8 Loss: 5.6212 +[2025-07-07 01:58:10] [Rank 0] Group 9 Loss: 5.7147 +[2025-07-07 01:58:10] [Rank 0] Group 9 Loss: 5.7147 +[2025-07-07 01:58:10] [Rank 0] Group 10 Loss: 5.6620 +[2025-07-07 01:58:10] [Rank 0] Group 10 Loss: 5.6620 +[2025-07-07 01:58:10] [Rank 0] Group 11 Loss: 5.6119 +[2025-07-07 01:58:10] [Rank 0] Group 11 Loss: 5.6119 +[2025-07-07 01:58:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 01:58:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 01:58:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 01:58:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 01:58:10] [Rank 0] Group 2 FTA: 0.8333 +[2025-07-07 01:58:10] [Rank 0] Group 2 FTA: 0.8333 +[2025-07-07 01:58:10] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 01:58:10] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 01:58:10] [Rank 0] Group 4 FTA: 0.9062 +[2025-07-07 01:58:10] [Rank 0] Group 4 FTA: 0.9062 +[2025-07-07 01:58:10] [Rank 0] Group 5 FTA: 0.9141 +[2025-07-07 01:58:10] [Rank 0] Group 5 FTA: 0.9141 +[2025-07-07 01:58:10] [Rank 0] Group 6 FTA: 0.8828 +[2025-07-07 01:58:10] [Rank 0] Group 6 FTA: 0.8828 +[2025-07-07 01:58:10] [Rank 0] Group 7 FTA: 0.9062 +[2025-07-07 01:58:10] [Rank 0] Group 7 FTA: 0.9062 +[2025-07-07 01:58:10] [Rank 0] Group 8 FTA: 0.8568 +[2025-07-07 01:58:10] [Rank 0] Group 8 FTA: 0.8568 +[2025-07-07 01:58:10] [Rank 0] Group 9 FTA: 0.8555 +[2025-07-07 01:58:10] [Rank 0] Group 9 FTA: 0.8555 +[2025-07-07 01:58:10] [Rank 0] Group 10 FTA: 0.8770 +[2025-07-07 01:58:10] [Rank 0] Group 10 FTA: 0.8770 +[2025-07-07 01:58:10] [Rank 0] Group 11 FTA: 0.8740 +[2025-07-07 01:58:10] [Rank 0] Group 11 FTA: 0.8740 +[2025-07-07 01:58:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 01:58:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 01:58:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 01:58:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 01:58:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 01:58:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 01:58:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 01:58:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 01:58:11] [Rank 0] step:9501/10000 train_time:768715ms step_avg:80.91ms +[2025-07-07 01:58:11] [Rank 0] step:9501/10000 train_time:768715ms step_avg:80.91ms +[2025-07-07 01:58:13] [Rank 0] step:9521/10000 train_time:770211ms step_avg:80.90ms +[2025-07-07 01:58:13] [Rank 0] step:9521/10000 train_time:770211ms step_avg:80.90ms +[2025-07-07 01:58:15] [Rank 0] step:9541/10000 train_time:772394ms step_avg:80.96ms +[2025-07-07 01:58:15] [Rank 0] step:9541/10000 train_time:772394ms step_avg:80.96ms +[2025-07-07 01:58:16] [Rank 0] step:9561/10000 train_time:773870ms step_avg:80.94ms +[2025-07-07 01:58:16] [Rank 0] step:9561/10000 train_time:773870ms step_avg:80.94ms +[2025-07-07 01:58:18] [Rank 0] step:9581/10000 train_time:775363ms step_avg:80.93ms +[2025-07-07 01:58:18] [Rank 0] step:9581/10000 train_time:775363ms step_avg:80.93ms +[2025-07-07 01:58:19] [Rank 0] step:9601/10000 train_time:776859ms step_avg:80.91ms +[2025-07-07 01:58:19] [Rank 0] step:9601/10000 train_time:776859ms step_avg:80.91ms +[2025-07-07 01:58:21] [Rank 0] step:9621/10000 train_time:778497ms step_avg:80.92ms +[2025-07-07 01:58:21] [Rank 0] step:9621/10000 train_time:778497ms step_avg:80.92ms +[2025-07-07 01:58:23] [Rank 0] step:9641/10000 train_time:780254ms step_avg:80.93ms +[2025-07-07 01:58:23] [Rank 0] step:9641/10000 train_time:780254ms step_avg:80.93ms +[2025-07-07 01:58:24] [Rank 0] step:9661/10000 train_time:781874ms step_avg:80.93ms +[2025-07-07 01:58:24] [Rank 0] step:9661/10000 train_time:781874ms step_avg:80.93ms +[2025-07-07 01:58:26] [Rank 0] step:9681/10000 train_time:783371ms step_avg:80.92ms +[2025-07-07 01:58:26] [Rank 0] step:9681/10000 train_time:783371ms step_avg:80.92ms +[2025-07-07 01:58:27] [Rank 0] step:9701/10000 train_time:784870ms step_avg:80.91ms +[2025-07-07 01:58:27] [Rank 0] step:9701/10000 train_time:784870ms step_avg:80.91ms +[2025-07-07 01:58:29] [Rank 0] step:9721/10000 train_time:786370ms step_avg:80.89ms +[2025-07-07 01:58:29] [Rank 0] step:9721/10000 train_time:786370ms step_avg:80.89ms +[2025-07-07 01:58:30] [Rank 0] step:9741/10000 train_time:788109ms step_avg:80.91ms +[2025-07-07 01:58:30] [Rank 0] step:9741/10000 train_time:788109ms step_avg:80.91ms +[2025-07-07 01:58:32] [Rank 0] step:9761/10000 train_time:789610ms step_avg:80.89ms +[2025-07-07 01:58:32] [Rank 0] step:9761/10000 train_time:789610ms step_avg:80.89ms +[2025-07-07 01:58:33] [Rank 0] step:9781/10000 train_time:791112ms step_avg:80.88ms +[2025-07-07 01:58:33] [Rank 0] step:9781/10000 train_time:791112ms step_avg:80.88ms +[2025-07-07 01:58:35] [Rank 0] step:9801/10000 train_time:792614ms step_avg:80.87ms +[2025-07-07 01:58:35] [Rank 0] step:9801/10000 train_time:792614ms step_avg:80.87ms +[2025-07-07 01:58:37] [Rank 0] step:9821/10000 train_time:794787ms step_avg:80.93ms +[2025-07-07 01:58:37] [Rank 0] step:9821/10000 train_time:794787ms step_avg:80.93ms +[2025-07-07 01:58:39] [Rank 0] step:9841/10000 train_time:796291ms step_avg:80.92ms +[2025-07-07 01:58:39] [Rank 0] step:9841/10000 train_time:796291ms step_avg:80.92ms +[2025-07-07 01:58:40] [Rank 0] step:9861/10000 train_time:797795ms step_avg:80.90ms +[2025-07-07 01:58:40] [Rank 0] step:9861/10000 train_time:797795ms step_avg:80.90ms +[2025-07-07 01:58:42] [Rank 0] step:9881/10000 train_time:799303ms step_avg:80.89ms +[2025-07-07 01:58:42] [Rank 0] step:9881/10000 train_time:799303ms step_avg:80.89ms +[2025-07-07 01:58:44] [Rank 0] step:9901/10000 train_time:800807ms step_avg:80.88ms +[2025-07-07 01:58:44] [Rank 0] step:9901/10000 train_time:800807ms step_avg:80.88ms +[2025-07-07 01:58:45] [Rank 0] step:9921/10000 train_time:802970ms step_avg:80.94ms +[2025-07-07 01:58:45] [Rank 0] step:9921/10000 train_time:802970ms step_avg:80.94ms +[2025-07-07 01:58:47] [Rank 0] step:9941/10000 train_time:804476ms step_avg:80.93ms +[2025-07-07 01:58:47] [Rank 0] step:9941/10000 train_time:804476ms step_avg:80.93ms +[2025-07-07 01:58:48] [Rank 0] step:9961/10000 train_time:805982ms step_avg:80.91ms +[2025-07-07 01:58:48] [Rank 0] step:9961/10000 train_time:805982ms step_avg:80.91ms +[2025-07-07 01:58:50] [Rank 0] step:9981/10000 train_time:807488ms step_avg:80.90ms +[2025-07-07 01:58:50] [Rank 0] step:9981/10000 train_time:807488ms step_avg:80.90ms +[2025-07-07 01:58:52] [Rank 0] step:10000/10000 train_time:809561ms step_avg:80.96ms +[2025-07-07 01:58:52] [Rank 0] step:10000/10000 train_time:809561ms step_avg:80.96ms +[2025-07-07 01:58:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:58:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 01:58:53] [Rank 0] PRINT: step:10000/10000 train_loss:0.9220 val_loss:0.9222 train_time:809642ms step_avg:80.96ms +[2025-07-07 01:58:53] [Rank 0] PRINT: step:10000/10000 train_loss:0.9220 val_loss:0.9222 train_time:809642ms step_avg:80.96ms +[2025-07-07 01:58:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:58:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 01:58:53] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:58:53] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 01:58:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 01:58:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 02:04:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:04:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 02:04:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:04:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 02:04:18] [Rank 0] Total Loss: 5.6089 +[2025-07-07 02:04:18] [Rank 0] Total Loss: 5.6089 +[2025-07-07 02:04:18] [Rank 0] Total FTA: 0.9159 +[2025-07-07 02:04:18] [Rank 0] Total FTA: 0.9159 +[2025-07-07 02:04:18] [Rank 0] Group 0 Loss: 5.8468 +[2025-07-07 02:04:18] [Rank 0] Group 0 Loss: 5.8468 +[2025-07-07 02:04:18] [Rank 0] Group 1 Loss: 5.3405 +[2025-07-07 02:04:18] [Rank 0] Group 1 Loss: 5.3405 +[2025-07-07 02:04:18] [Rank 0] Group 2 Loss: 5.3159 +[2025-07-07 02:04:18] [Rank 0] Group 2 Loss: 5.3159 +[2025-07-07 02:04:18] [Rank 0] Group 3 Loss: 5.7932 +[2025-07-07 02:04:18] [Rank 0] Group 3 Loss: 5.7932 +[2025-07-07 02:04:18] [Rank 0] Group 4 Loss: 5.5393 +[2025-07-07 02:04:18] [Rank 0] Group 4 Loss: 5.5393 +[2025-07-07 02:04:18] [Rank 0] Group 5 Loss: 5.4638 +[2025-07-07 02:04:18] [Rank 0] Group 5 Loss: 5.4638 +[2025-07-07 02:04:18] [Rank 0] Group 6 Loss: 5.6049 +[2025-07-07 02:04:18] [Rank 0] Group 6 Loss: 5.6049 +[2025-07-07 02:04:18] [Rank 0] Group 7 Loss: 5.5495 +[2025-07-07 02:04:18] [Rank 0] Group 7 Loss: 5.5495 +[2025-07-07 02:04:18] [Rank 0] Group 8 Loss: 5.5591 +[2025-07-07 02:04:18] [Rank 0] Group 8 Loss: 5.5591 +[2025-07-07 02:04:18] [Rank 0] Group 9 Loss: 5.6607 +[2025-07-07 02:04:18] [Rank 0] Group 9 Loss: 5.6607 +[2025-07-07 02:04:18] [Rank 0] Group 10 Loss: 5.6281 +[2025-07-07 02:04:18] [Rank 0] Group 10 Loss: 5.6281 +[2025-07-07 02:04:18] [Rank 0] Group 11 Loss: 5.6719 +[2025-07-07 02:04:18] [Rank 0] Group 11 Loss: 5.6719 +[2025-07-07 02:04:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 02:04:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 02:04:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 02:04:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 02:04:18] [Rank 0] Group 2 FTA: 0.8880 +[2025-07-07 02:04:18] [Rank 0] Group 2 FTA: 0.8880 +[2025-07-07 02:04:18] [Rank 0] Group 3 FTA: 0.9167 +[2025-07-07 02:04:18] [Rank 0] Group 3 FTA: 0.9167 +[2025-07-07 02:04:18] [Rank 0] Group 4 FTA: 0.9349 +[2025-07-07 02:04:18] [Rank 0] Group 4 FTA: 0.9349 +[2025-07-07 02:04:18] [Rank 0] Group 5 FTA: 0.9271 +[2025-07-07 02:04:18] [Rank 0] Group 5 FTA: 0.9271 +[2025-07-07 02:04:18] [Rank 0] Group 6 FTA: 0.8880 +[2025-07-07 02:04:18] [Rank 0] Group 6 FTA: 0.8880 +[2025-07-07 02:04:18] [Rank 0] Group 7 FTA: 0.9010 +[2025-07-07 02:04:18] [Rank 0] Group 7 FTA: 0.9010 +[2025-07-07 02:04:18] [Rank 0] Group 8 FTA: 0.8880 +[2025-07-07 02:04:18] [Rank 0] Group 8 FTA: 0.8880 +[2025-07-07 02:04:18] [Rank 0] Group 9 FTA: 0.8438 +[2025-07-07 02:04:18] [Rank 0] Group 9 FTA: 0.8438 +[2025-07-07 02:04:18] [Rank 0] Group 10 FTA: 0.8535 +[2025-07-07 02:04:18] [Rank 0] Group 10 FTA: 0.8535 +[2025-07-07 02:04:18] [Rank 0] Group 11 FTA: 0.8955 +[2025-07-07 02:04:18] [Rank 0] Group 11 FTA: 0.8955 +[2025-07-07 02:04:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 02:04:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 02:04:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 02:04:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 02:04:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 02:04:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 02:04:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 02:04:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 02:04:19] [Rank 0] step:10001/10000 train_time:809663ms step_avg:80.96ms +[2025-07-07 02:04:19] [Rank 0] step:10001/10000 train_time:809663ms step_avg:80.96ms +[2025-07-07 02:04:19] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 02:04:19 2025 --- +[2025-07-07 02:04:19] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 02:04:19 2025 --- +[2025-07-07 02:04:19] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 10356 MiB +[2025-07-07 02:04:19] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 10356 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/config.json b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..e4bae7217a9a59f217a1ce5b0c96b834478196c7 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "03cfa763-ee4f-4d30-af00-9334706775a2", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..e9bb5916ee46081b379713ecb19eac5575d052a9 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d7edad12d93ef65ef65bc54f56fc86ccb12f474d3a001cb2b26f8b7a3c83030 +size 434084 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..aff3735c5fc5e90b9eba7ee6a76f4ae639b5caa5 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3d0f09cb2918f11ea79214fbdabc33961ce74d0f0b5047e8404d40223e882cd +size 354087 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..24b3ac4b9780d4043727801f338da7f3ac30c733 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c681cbb294cb9866ed4316d131aa4c4e923bccb9a932a2bd6fb9fa7ea24d8f4 +size 113966 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..96138bfff59095c82f00ca6f765f1773039b5c28 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60685ff27ebf6d54c89b2559db96f5b6babdb1a8bdc5308a93171006ba6e47ad +size 103695 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/training_log_03cfa763-ee4f-4d30-af00-9334706775a2.txt b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/training_log_03cfa763-ee4f-4d30-af00-9334706775a2.txt new file mode 100644 index 0000000000000000000000000000000000000000..5eb3ab46dfe5bdb2239d2a722f559b2744e0575e --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/training_log_03cfa763-ee4f-4d30-af00-9334706775a2.txt @@ -0,0 +1,5132 @@ +[2025-07-06 08:42:45] [Rank 0] PRINT: --- Script Start: Sun Jul 6 08:42:45 2025 --- +[2025-07-06 08:42:45] [Rank 0] PRINT: --- Script Start: Sun Jul 6 08:42:45 2025 --- +[2025-07-06 08:42:45] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-06 08:42:45] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-06 08:42:45] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 08:42:45] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 08:42:45] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-06 08:42:45] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-06 08:42:45] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42 +[2025-07-06 08:42:45] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42 +[2025-07-06 08:42:45] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 08:42:45] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 08:42:46] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 08:42:46] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 08:42:46] [Rank 0] PRINT: Constructing model... +[2025-07-06 08:42:46] [Rank 0] PRINT: Constructing model... +[2025-07-06 08:42:48] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 08:42:48] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 08:42:48] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 08:42:48] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 08:42:48] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 08:42:48] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 08:42:49] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 08:42:49] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 08:42:49] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 08:42:49] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 08:42:49] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 08:42:49] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 08:42:49] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 08:42:49] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 08:42:49] [Rank 0] PRINT: Model returns: +[2025-07-06 08:42:49] [Rank 0] PRINT: Model returns: +[2025-07-06 08:42:49] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 08:42:49] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 08:42:49] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 08:42:49] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 08:42:49] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 08:42:49] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 08:42:49] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 08:42:49] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 08:42:49] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 08:42:49] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 08:42:49] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 08:42:49] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 08:42:49] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 08:42:49] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 08:42:49] [Rank 0] PRINT: Starting warmup... +[2025-07-06 08:42:49] [Rank 0] PRINT: Starting warmup... +[2025-07-06 08:44:30] [Rank 0] PRINT: Warmup complete. +[2025-07-06 08:44:30] [Rank 0] PRINT: Warmup complete. +[2025-07-06 08:44:30] [Rank 0] PRINT: Starting training... +[2025-07-06 08:44:30] [Rank 0] PRINT: Starting training... +[2025-07-06 08:44:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 08:44:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 08:44:38] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 08:44:38] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 08:44:40] [Rank 0] step:21/10000 train_time:1758ms step_avg:83.69ms +[2025-07-06 08:44:40] [Rank 0] step:21/10000 train_time:1758ms step_avg:83.69ms +[2025-07-06 08:44:42] [Rank 0] step:41/10000 train_time:3211ms step_avg:78.31ms +[2025-07-06 08:44:42] [Rank 0] step:41/10000 train_time:3211ms step_avg:78.31ms +[2025-07-06 08:44:43] [Rank 0] step:61/10000 train_time:4666ms step_avg:76.49ms +[2025-07-06 08:44:43] [Rank 0] step:61/10000 train_time:4666ms step_avg:76.49ms +[2025-07-06 08:44:45] [Rank 0] step:81/10000 train_time:6120ms step_avg:75.55ms +[2025-07-06 08:44:45] [Rank 0] step:81/10000 train_time:6120ms step_avg:75.55ms +[2025-07-06 08:44:47] [Rank 0] step:101/10000 train_time:8238ms step_avg:81.57ms +[2025-07-06 08:44:47] [Rank 0] step:101/10000 train_time:8238ms step_avg:81.57ms +[2025-07-06 08:44:48] [Rank 0] step:121/10000 train_time:9908ms step_avg:81.88ms +[2025-07-06 08:44:48] [Rank 0] step:121/10000 train_time:9908ms step_avg:81.88ms +[2025-07-06 08:44:50] [Rank 0] step:141/10000 train_time:11368ms step_avg:80.62ms +[2025-07-06 08:44:50] [Rank 0] step:141/10000 train_time:11368ms step_avg:80.62ms +[2025-07-06 08:44:51] [Rank 0] step:161/10000 train_time:12829ms step_avg:79.68ms +[2025-07-06 08:44:51] [Rank 0] step:161/10000 train_time:12829ms step_avg:79.68ms +[2025-07-06 08:44:53] [Rank 0] step:181/10000 train_time:14289ms step_avg:78.94ms +[2025-07-06 08:44:53] [Rank 0] step:181/10000 train_time:14289ms step_avg:78.94ms +[2025-07-06 08:44:55] [Rank 0] step:201/10000 train_time:16393ms step_avg:81.56ms +[2025-07-06 08:44:55] [Rank 0] step:201/10000 train_time:16393ms step_avg:81.56ms +[2025-07-06 08:44:56] [Rank 0] step:221/10000 train_time:17855ms step_avg:80.79ms +[2025-07-06 08:44:56] [Rank 0] step:221/10000 train_time:17855ms step_avg:80.79ms +[2025-07-06 08:44:58] [Rank 0] step:241/10000 train_time:19320ms step_avg:80.16ms +[2025-07-06 08:44:58] [Rank 0] step:241/10000 train_time:19320ms step_avg:80.16ms +[2025-07-06 08:44:59] [Rank 0] step:261/10000 train_time:20791ms step_avg:79.66ms +[2025-07-06 08:44:59] [Rank 0] step:261/10000 train_time:20791ms step_avg:79.66ms +[2025-07-06 08:45:01] [Rank 0] step:281/10000 train_time:22923ms step_avg:81.58ms +[2025-07-06 08:45:01] [Rank 0] step:281/10000 train_time:22923ms step_avg:81.58ms +[2025-07-06 08:45:03] [Rank 0] step:301/10000 train_time:24393ms step_avg:81.04ms +[2025-07-06 08:45:03] [Rank 0] step:301/10000 train_time:24393ms step_avg:81.04ms +[2025-07-06 08:45:04] [Rank 0] step:321/10000 train_time:25858ms step_avg:80.56ms +[2025-07-06 08:45:04] [Rank 0] step:321/10000 train_time:25858ms step_avg:80.56ms +[2025-07-06 08:45:06] [Rank 0] step:341/10000 train_time:27328ms step_avg:80.14ms +[2025-07-06 08:45:06] [Rank 0] step:341/10000 train_time:27328ms step_avg:80.14ms +[2025-07-06 08:45:08] [Rank 0] step:361/10000 train_time:29477ms step_avg:81.65ms +[2025-07-06 08:45:08] [Rank 0] step:361/10000 train_time:29477ms step_avg:81.65ms +[2025-07-06 08:45:09] [Rank 0] step:381/10000 train_time:30923ms step_avg:81.16ms +[2025-07-06 08:45:09] [Rank 0] step:381/10000 train_time:30923ms step_avg:81.16ms +[2025-07-06 08:45:11] [Rank 0] step:401/10000 train_time:32390ms step_avg:80.77ms +[2025-07-06 08:45:11] [Rank 0] step:401/10000 train_time:32390ms step_avg:80.77ms +[2025-07-06 08:45:12] [Rank 0] step:421/10000 train_time:33859ms step_avg:80.42ms +[2025-07-06 08:45:12] [Rank 0] step:421/10000 train_time:33859ms step_avg:80.42ms +[2025-07-06 08:45:14] [Rank 0] step:441/10000 train_time:35328ms step_avg:80.11ms +[2025-07-06 08:45:14] [Rank 0] step:441/10000 train_time:35328ms step_avg:80.11ms +[2025-07-06 08:45:16] [Rank 0] step:461/10000 train_time:37454ms step_avg:81.25ms +[2025-07-06 08:45:16] [Rank 0] step:461/10000 train_time:37454ms step_avg:81.25ms +[2025-07-06 08:45:17] [Rank 0] step:481/10000 train_time:38922ms step_avg:80.92ms +[2025-07-06 08:45:17] [Rank 0] step:481/10000 train_time:38922ms step_avg:80.92ms +[2025-07-06 08:45:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 08:45:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 08:45:20] [Rank 0] PRINT: step:500/10000 train_loss:4.9401 val_loss:2.0529 train_time:40389ms step_avg:80.78ms +[2025-07-06 08:45:20] [Rank 0] PRINT: step:500/10000 train_loss:4.9401 val_loss:2.0529 train_time:40389ms step_avg:80.78ms +[2025-07-06 08:45:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 08:45:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 08:45:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 08:45:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 08:45:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 08:45:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 08:50:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 08:50:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 08:50:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 08:50:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 08:50:43] [Rank 0] Total Loss: 4.0781 +[2025-07-06 08:50:43] [Rank 0] Total Loss: 4.0781 +[2025-07-06 08:50:43] [Rank 0] Total FTA: 0.0847 +[2025-07-06 08:50:43] [Rank 0] Total FTA: 0.0847 +[2025-07-06 08:50:43] [Rank 0] Group 0 Loss: 4.3282 +[2025-07-06 08:50:43] [Rank 0] Group 0 Loss: 4.3282 +[2025-07-06 08:50:43] [Rank 0] Group 1 Loss: 4.0499 +[2025-07-06 08:50:43] [Rank 0] Group 1 Loss: 4.0499 +[2025-07-06 08:50:43] [Rank 0] Group 2 Loss: 4.0020 +[2025-07-06 08:50:43] [Rank 0] Group 2 Loss: 4.0020 +[2025-07-06 08:50:43] [Rank 0] Group 3 Loss: 4.0118 +[2025-07-06 08:50:43] [Rank 0] Group 3 Loss: 4.0118 +[2025-07-06 08:50:43] [Rank 0] Group 4 Loss: 4.0285 +[2025-07-06 08:50:43] [Rank 0] Group 4 Loss: 4.0285 +[2025-07-06 08:50:43] [Rank 0] Group 5 Loss: 4.0046 +[2025-07-06 08:50:43] [Rank 0] Group 5 Loss: 4.0046 +[2025-07-06 08:50:43] [Rank 0] Group 6 Loss: 3.9708 +[2025-07-06 08:50:43] [Rank 0] Group 6 Loss: 3.9708 +[2025-07-06 08:50:43] [Rank 0] Group 7 Loss: 4.0756 +[2025-07-06 08:50:43] [Rank 0] Group 7 Loss: 4.0756 +[2025-07-06 08:50:43] [Rank 0] Group 8 Loss: 4.0478 +[2025-07-06 08:50:43] [Rank 0] Group 8 Loss: 4.0478 +[2025-07-06 08:50:43] [Rank 0] Group 9 Loss: 4.0028 +[2025-07-06 08:50:43] [Rank 0] Group 9 Loss: 4.0028 +[2025-07-06 08:50:43] [Rank 0] Group 10 Loss: 4.0398 +[2025-07-06 08:50:43] [Rank 0] Group 10 Loss: 4.0398 +[2025-07-06 08:50:43] [Rank 0] Group 11 Loss: 4.0911 +[2025-07-06 08:50:43] [Rank 0] Group 11 Loss: 4.0911 +[2025-07-06 08:50:43] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-06 08:50:43] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-06 08:50:43] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 08:50:43] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 08:50:43] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-06 08:50:43] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-06 08:50:43] [Rank 0] Group 3 FTA: 0.0547 +[2025-07-06 08:50:43] [Rank 0] Group 3 FTA: 0.0547 +[2025-07-06 08:50:43] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-06 08:50:43] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-06 08:50:43] [Rank 0] Group 5 FTA: 0.0729 +[2025-07-06 08:50:43] [Rank 0] Group 5 FTA: 0.0729 +[2025-07-06 08:50:43] [Rank 0] Group 6 FTA: 0.0807 +[2025-07-06 08:50:43] [Rank 0] Group 6 FTA: 0.0807 +[2025-07-06 08:50:43] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-06 08:50:43] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-06 08:50:43] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-06 08:50:43] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-06 08:50:43] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-06 08:50:43] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-06 08:50:43] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-06 08:50:43] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-06 08:50:43] [Rank 0] Group 11 FTA: 0.0850 +[2025-07-06 08:50:43] [Rank 0] Group 11 FTA: 0.0850 +[2025-07-06 08:50:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 08:50:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 08:50:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 08:50:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 08:50:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 08:50:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 08:50:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 08:50:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 08:50:45] [Rank 0] step:501/10000 train_time:40411ms step_avg:80.66ms +[2025-07-06 08:50:45] [Rank 0] step:501/10000 train_time:40411ms step_avg:80.66ms +[2025-07-06 08:50:46] [Rank 0] step:521/10000 train_time:41886ms step_avg:80.40ms +[2025-07-06 08:50:46] [Rank 0] step:521/10000 train_time:41886ms step_avg:80.40ms +[2025-07-06 08:50:48] [Rank 0] step:541/10000 train_time:43345ms step_avg:80.12ms +[2025-07-06 08:50:48] [Rank 0] step:541/10000 train_time:43345ms step_avg:80.12ms +[2025-07-06 08:50:50] [Rank 0] step:561/10000 train_time:45452ms step_avg:81.02ms +[2025-07-06 08:50:50] [Rank 0] step:561/10000 train_time:45452ms step_avg:81.02ms +[2025-07-06 08:50:51] [Rank 0] step:581/10000 train_time:46912ms step_avg:80.74ms +[2025-07-06 08:50:51] [Rank 0] step:581/10000 train_time:46912ms step_avg:80.74ms +[2025-07-06 08:50:52] [Rank 0] step:601/10000 train_time:48370ms step_avg:80.48ms +[2025-07-06 08:50:52] [Rank 0] step:601/10000 train_time:48370ms step_avg:80.48ms +[2025-07-06 08:50:54] [Rank 0] step:621/10000 train_time:49830ms step_avg:80.24ms +[2025-07-06 08:50:54] [Rank 0] step:621/10000 train_time:49830ms step_avg:80.24ms +[2025-07-06 08:50:56] [Rank 0] step:641/10000 train_time:51953ms step_avg:81.05ms +[2025-07-06 08:50:56] [Rank 0] step:641/10000 train_time:51953ms step_avg:81.05ms +[2025-07-06 08:50:58] [Rank 0] step:661/10000 train_time:53414ms step_avg:80.81ms +[2025-07-06 08:50:58] [Rank 0] step:661/10000 train_time:53414ms step_avg:80.81ms +[2025-07-06 08:50:59] [Rank 0] step:681/10000 train_time:54875ms step_avg:80.58ms +[2025-07-06 08:50:59] [Rank 0] step:681/10000 train_time:54875ms step_avg:80.58ms +[2025-07-06 08:51:00] [Rank 0] step:701/10000 train_time:56337ms step_avg:80.37ms +[2025-07-06 08:51:00] [Rank 0] step:701/10000 train_time:56337ms step_avg:80.37ms +[2025-07-06 08:51:03] [Rank 0] step:721/10000 train_time:58057ms step_avg:80.52ms +[2025-07-06 08:51:03] [Rank 0] step:721/10000 train_time:58057ms step_avg:80.52ms +[2025-07-06 08:51:04] [Rank 0] step:741/10000 train_time:59921ms step_avg:80.86ms +[2025-07-06 08:51:04] [Rank 0] step:741/10000 train_time:59921ms step_avg:80.86ms +[2025-07-06 08:51:06] [Rank 0] step:761/10000 train_time:61389ms step_avg:80.67ms +[2025-07-06 08:51:06] [Rank 0] step:761/10000 train_time:61389ms step_avg:80.67ms +[2025-07-06 08:51:07] [Rank 0] step:781/10000 train_time:62975ms step_avg:80.63ms +[2025-07-06 08:51:07] [Rank 0] step:781/10000 train_time:62975ms step_avg:80.63ms +[2025-07-06 08:51:09] [Rank 0] step:801/10000 train_time:64583ms step_avg:80.63ms +[2025-07-06 08:51:09] [Rank 0] step:801/10000 train_time:64583ms step_avg:80.63ms +[2025-07-06 08:51:10] [Rank 0] step:821/10000 train_time:66296ms step_avg:80.75ms +[2025-07-06 08:51:10] [Rank 0] step:821/10000 train_time:66296ms step_avg:80.75ms +[2025-07-06 08:51:12] [Rank 0] step:841/10000 train_time:67773ms step_avg:80.59ms +[2025-07-06 08:51:12] [Rank 0] step:841/10000 train_time:67773ms step_avg:80.59ms +[2025-07-06 08:51:13] [Rank 0] step:861/10000 train_time:69246ms step_avg:80.42ms +[2025-07-06 08:51:13] [Rank 0] step:861/10000 train_time:69246ms step_avg:80.42ms +[2025-07-06 08:51:15] [Rank 0] step:881/10000 train_time:70722ms step_avg:80.27ms +[2025-07-06 08:51:15] [Rank 0] step:881/10000 train_time:70722ms step_avg:80.27ms +[2025-07-06 08:51:17] [Rank 0] step:901/10000 train_time:72249ms step_avg:80.19ms +[2025-07-06 08:51:17] [Rank 0] step:901/10000 train_time:72249ms step_avg:80.19ms +[2025-07-06 08:51:18] [Rank 0] step:921/10000 train_time:74329ms step_avg:80.70ms +[2025-07-06 08:51:18] [Rank 0] step:921/10000 train_time:74329ms step_avg:80.70ms +[2025-07-06 08:51:20] [Rank 0] step:941/10000 train_time:75799ms step_avg:80.55ms +[2025-07-06 08:51:20] [Rank 0] step:941/10000 train_time:75799ms step_avg:80.55ms +[2025-07-06 08:51:21] [Rank 0] step:961/10000 train_time:77272ms step_avg:80.41ms +[2025-07-06 08:51:21] [Rank 0] step:961/10000 train_time:77272ms step_avg:80.41ms +[2025-07-06 08:51:23] [Rank 0] step:981/10000 train_time:78747ms step_avg:80.27ms +[2025-07-06 08:51:23] [Rank 0] step:981/10000 train_time:78747ms step_avg:80.27ms +[2025-07-06 08:51:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 08:51:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 08:51:26] [Rank 0] PRINT: step:1000/10000 train_loss:1.6990 val_loss:1.5348 train_time:80983ms step_avg:80.98ms +[2025-07-06 08:51:26] [Rank 0] PRINT: step:1000/10000 train_loss:1.6990 val_loss:1.5348 train_time:80983ms step_avg:80.98ms +[2025-07-06 08:51:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 08:51:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 08:51:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 08:51:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 08:51:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 08:51:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 08:56:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 08:56:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 08:56:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 08:56:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 08:56:51] [Rank 0] Total Loss: 4.1722 +[2025-07-06 08:56:51] [Rank 0] Total Loss: 4.1722 +[2025-07-06 08:56:51] [Rank 0] Total FTA: 0.1168 +[2025-07-06 08:56:51] [Rank 0] Total FTA: 0.1168 +[2025-07-06 08:56:51] [Rank 0] Group 0 Loss: 4.4262 +[2025-07-06 08:56:51] [Rank 0] Group 0 Loss: 4.4262 +[2025-07-06 08:56:51] [Rank 0] Group 1 Loss: 4.0948 +[2025-07-06 08:56:51] [Rank 0] Group 1 Loss: 4.0948 +[2025-07-06 08:56:51] [Rank 0] Group 2 Loss: 4.0236 +[2025-07-06 08:56:51] [Rank 0] Group 2 Loss: 4.0236 +[2025-07-06 08:56:52] [Rank 0] Group 3 Loss: 4.0523 +[2025-07-06 08:56:52] [Rank 0] Group 3 Loss: 4.0523 +[2025-07-06 08:56:52] [Rank 0] Group 4 Loss: 4.1469 +[2025-07-06 08:56:52] [Rank 0] Group 4 Loss: 4.1469 +[2025-07-06 08:56:52] [Rank 0] Group 5 Loss: 4.0909 +[2025-07-06 08:56:52] [Rank 0] Group 5 Loss: 4.0909 +[2025-07-06 08:56:52] [Rank 0] Group 6 Loss: 4.1225 +[2025-07-06 08:56:52] [Rank 0] Group 6 Loss: 4.1225 +[2025-07-06 08:56:52] [Rank 0] Group 7 Loss: 4.1762 +[2025-07-06 08:56:52] [Rank 0] Group 7 Loss: 4.1762 +[2025-07-06 08:56:52] [Rank 0] Group 8 Loss: 4.1613 +[2025-07-06 08:56:52] [Rank 0] Group 8 Loss: 4.1613 +[2025-07-06 08:56:52] [Rank 0] Group 9 Loss: 4.0814 +[2025-07-06 08:56:52] [Rank 0] Group 9 Loss: 4.0814 +[2025-07-06 08:56:52] [Rank 0] Group 10 Loss: 4.1524 +[2025-07-06 08:56:52] [Rank 0] Group 10 Loss: 4.1524 +[2025-07-06 08:56:52] [Rank 0] Group 11 Loss: 4.2052 +[2025-07-06 08:56:52] [Rank 0] Group 11 Loss: 4.2052 +[2025-07-06 08:56:52] [Rank 0] Group 0 FTA: 0.1547 +[2025-07-06 08:56:52] [Rank 0] Group 0 FTA: 0.1547 +[2025-07-06 08:56:52] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-06 08:56:52] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-06 08:56:52] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-06 08:56:52] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-06 08:56:52] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-06 08:56:52] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-06 08:56:52] [Rank 0] Group 4 FTA: 0.0495 +[2025-07-06 08:56:52] [Rank 0] Group 4 FTA: 0.0495 +[2025-07-06 08:56:52] [Rank 0] Group 5 FTA: 0.1172 +[2025-07-06 08:56:52] [Rank 0] Group 5 FTA: 0.1172 +[2025-07-06 08:56:52] [Rank 0] Group 6 FTA: 0.1302 +[2025-07-06 08:56:52] [Rank 0] Group 6 FTA: 0.1302 +[2025-07-06 08:56:52] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-06 08:56:52] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-06 08:56:52] [Rank 0] Group 8 FTA: 0.1380 +[2025-07-06 08:56:52] [Rank 0] Group 8 FTA: 0.1380 +[2025-07-06 08:56:52] [Rank 0] Group 9 FTA: 0.1055 +[2025-07-06 08:56:52] [Rank 0] Group 9 FTA: 0.1055 +[2025-07-06 08:56:52] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-06 08:56:52] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-06 08:56:52] [Rank 0] Group 11 FTA: 0.1113 +[2025-07-06 08:56:52] [Rank 0] Group 11 FTA: 0.1113 +[2025-07-06 08:56:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 08:56:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 08:56:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 08:56:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 08:56:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 08:56:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 08:56:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 08:56:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 08:56:53] [Rank 0] step:1001/10000 train_time:81005ms step_avg:80.92ms +[2025-07-06 08:56:53] [Rank 0] step:1001/10000 train_time:81005ms step_avg:80.92ms +[2025-07-06 08:56:55] [Rank 0] step:1021/10000 train_time:82470ms step_avg:80.77ms +[2025-07-06 08:56:55] [Rank 0] step:1021/10000 train_time:82470ms step_avg:80.77ms +[2025-07-06 08:56:56] [Rank 0] step:1041/10000 train_time:83935ms step_avg:80.63ms +[2025-07-06 08:56:56] [Rank 0] step:1041/10000 train_time:83935ms step_avg:80.63ms +[2025-07-06 08:56:57] [Rank 0] step:1061/10000 train_time:85403ms step_avg:80.49ms +[2025-07-06 08:56:57] [Rank 0] step:1061/10000 train_time:85403ms step_avg:80.49ms +[2025-07-06 08:57:00] [Rank 0] step:1081/10000 train_time:86872ms step_avg:80.36ms +[2025-07-06 08:57:00] [Rank 0] step:1081/10000 train_time:86872ms step_avg:80.36ms +[2025-07-06 08:57:01] [Rank 0] step:1101/10000 train_time:89013ms step_avg:80.85ms +[2025-07-06 08:57:01] [Rank 0] step:1101/10000 train_time:89013ms step_avg:80.85ms +[2025-07-06 08:57:03] [Rank 0] step:1121/10000 train_time:90479ms step_avg:80.71ms +[2025-07-06 08:57:03] [Rank 0] step:1121/10000 train_time:90479ms step_avg:80.71ms +[2025-07-06 08:57:04] [Rank 0] step:1141/10000 train_time:91947ms step_avg:80.58ms +[2025-07-06 08:57:04] [Rank 0] step:1141/10000 train_time:91947ms step_avg:80.58ms +[2025-07-06 08:57:05] [Rank 0] step:1161/10000 train_time:93417ms step_avg:80.46ms +[2025-07-06 08:57:05] [Rank 0] step:1161/10000 train_time:93417ms step_avg:80.46ms +[2025-07-06 08:57:08] [Rank 0] step:1181/10000 train_time:95538ms step_avg:80.90ms +[2025-07-06 08:57:08] [Rank 0] step:1181/10000 train_time:95538ms step_avg:80.90ms +[2025-07-06 08:57:09] [Rank 0] step:1201/10000 train_time:97005ms step_avg:80.77ms +[2025-07-06 08:57:09] [Rank 0] step:1201/10000 train_time:97005ms step_avg:80.77ms +[2025-07-06 08:57:11] [Rank 0] step:1221/10000 train_time:98475ms step_avg:80.65ms +[2025-07-06 08:57:11] [Rank 0] step:1221/10000 train_time:98475ms step_avg:80.65ms +[2025-07-06 08:57:12] [Rank 0] step:1241/10000 train_time:99946ms step_avg:80.54ms +[2025-07-06 08:57:12] [Rank 0] step:1241/10000 train_time:99946ms step_avg:80.54ms +[2025-07-06 08:57:14] [Rank 0] step:1261/10000 train_time:102090ms step_avg:80.96ms +[2025-07-06 08:57:14] [Rank 0] step:1261/10000 train_time:102090ms step_avg:80.96ms +[2025-07-06 08:57:16] [Rank 0] step:1281/10000 train_time:103539ms step_avg:80.83ms +[2025-07-06 08:57:16] [Rank 0] step:1281/10000 train_time:103539ms step_avg:80.83ms +[2025-07-06 08:57:17] [Rank 0] step:1301/10000 train_time:105008ms step_avg:80.71ms +[2025-07-06 08:57:17] [Rank 0] step:1301/10000 train_time:105008ms step_avg:80.71ms +[2025-07-06 08:57:19] [Rank 0] step:1321/10000 train_time:106479ms step_avg:80.60ms +[2025-07-06 08:57:19] [Rank 0] step:1321/10000 train_time:106479ms step_avg:80.60ms +[2025-07-06 08:57:20] [Rank 0] step:1341/10000 train_time:107947ms step_avg:80.50ms +[2025-07-06 08:57:20] [Rank 0] step:1341/10000 train_time:107947ms step_avg:80.50ms +[2025-07-06 08:57:22] [Rank 0] step:1361/10000 train_time:110068ms step_avg:80.87ms +[2025-07-06 08:57:22] [Rank 0] step:1361/10000 train_time:110068ms step_avg:80.87ms +[2025-07-06 08:57:24] [Rank 0] step:1381/10000 train_time:111537ms step_avg:80.77ms +[2025-07-06 08:57:24] [Rank 0] step:1381/10000 train_time:111537ms step_avg:80.77ms +[2025-07-06 08:57:25] [Rank 0] step:1401/10000 train_time:113007ms step_avg:80.66ms +[2025-07-06 08:57:25] [Rank 0] step:1401/10000 train_time:113007ms step_avg:80.66ms +[2025-07-06 08:57:27] [Rank 0] step:1421/10000 train_time:114634ms step_avg:80.67ms +[2025-07-06 08:57:27] [Rank 0] step:1421/10000 train_time:114634ms step_avg:80.67ms +[2025-07-06 08:57:28] [Rank 0] step:1441/10000 train_time:116454ms step_avg:80.81ms +[2025-07-06 08:57:28] [Rank 0] step:1441/10000 train_time:116454ms step_avg:80.81ms +[2025-07-06 08:57:30] [Rank 0] step:1461/10000 train_time:117903ms step_avg:80.70ms +[2025-07-06 08:57:30] [Rank 0] step:1461/10000 train_time:117903ms step_avg:80.70ms +[2025-07-06 08:57:31] [Rank 0] step:1481/10000 train_time:119374ms step_avg:80.60ms +[2025-07-06 08:57:31] [Rank 0] step:1481/10000 train_time:119374ms step_avg:80.60ms +[2025-07-06 08:57:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 08:57:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 08:57:34] [Rank 0] PRINT: step:1500/10000 train_loss:1.4229 val_loss:1.3308 train_time:120841ms step_avg:80.56ms +[2025-07-06 08:57:34] [Rank 0] PRINT: step:1500/10000 train_loss:1.4229 val_loss:1.3308 train_time:120841ms step_avg:80.56ms +[2025-07-06 08:57:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 08:57:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 08:57:34] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 08:57:34] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 08:57:34] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 08:57:34] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 09:03:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 09:03:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 09:03:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 09:03:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 09:03:00] [Rank 0] Total Loss: 4.3309 +[2025-07-06 09:03:00] [Rank 0] Total Loss: 4.3309 +[2025-07-06 09:03:00] [Rank 0] Total FTA: 0.1979 +[2025-07-06 09:03:00] [Rank 0] Total FTA: 0.1979 +[2025-07-06 09:03:00] [Rank 0] Group 0 Loss: 4.5473 +[2025-07-06 09:03:00] [Rank 0] Group 0 Loss: 4.5473 +[2025-07-06 09:03:00] [Rank 0] Group 1 Loss: 4.2200 +[2025-07-06 09:03:00] [Rank 0] Group 1 Loss: 4.2200 +[2025-07-06 09:03:00] [Rank 0] Group 2 Loss: 4.1008 +[2025-07-06 09:03:00] [Rank 0] Group 2 Loss: 4.1008 +[2025-07-06 09:03:00] [Rank 0] Group 3 Loss: 4.3883 +[2025-07-06 09:03:00] [Rank 0] Group 3 Loss: 4.3883 +[2025-07-06 09:03:00] [Rank 0] Group 4 Loss: 4.3027 +[2025-07-06 09:03:00] [Rank 0] Group 4 Loss: 4.3027 +[2025-07-06 09:03:00] [Rank 0] Group 5 Loss: 4.2807 +[2025-07-06 09:03:00] [Rank 0] Group 5 Loss: 4.2807 +[2025-07-06 09:03:00] [Rank 0] Group 6 Loss: 4.2353 +[2025-07-06 09:03:00] [Rank 0] Group 6 Loss: 4.2353 +[2025-07-06 09:03:00] [Rank 0] Group 7 Loss: 4.3303 +[2025-07-06 09:03:00] [Rank 0] Group 7 Loss: 4.3303 +[2025-07-06 09:03:00] [Rank 0] Group 8 Loss: 4.3365 +[2025-07-06 09:03:00] [Rank 0] Group 8 Loss: 4.3365 +[2025-07-06 09:03:00] [Rank 0] Group 9 Loss: 4.2753 +[2025-07-06 09:03:00] [Rank 0] Group 9 Loss: 4.2753 +[2025-07-06 09:03:00] [Rank 0] Group 10 Loss: 4.3466 +[2025-07-06 09:03:00] [Rank 0] Group 10 Loss: 4.3466 +[2025-07-06 09:03:00] [Rank 0] Group 11 Loss: 4.3442 +[2025-07-06 09:03:00] [Rank 0] Group 11 Loss: 4.3442 +[2025-07-06 09:03:00] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-06 09:03:00] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-06 09:03:00] [Rank 0] Group 1 FTA: 0.3854 +[2025-07-06 09:03:00] [Rank 0] Group 1 FTA: 0.3854 +[2025-07-06 09:03:00] [Rank 0] Group 2 FTA: 0.2943 +[2025-07-06 09:03:00] [Rank 0] Group 2 FTA: 0.2943 +[2025-07-06 09:03:00] [Rank 0] Group 3 FTA: 0.0990 +[2025-07-06 09:03:00] [Rank 0] Group 3 FTA: 0.0990 +[2025-07-06 09:03:00] [Rank 0] Group 4 FTA: 0.1901 +[2025-07-06 09:03:00] [Rank 0] Group 4 FTA: 0.1901 +[2025-07-06 09:03:00] [Rank 0] Group 5 FTA: 0.2500 +[2025-07-06 09:03:00] [Rank 0] Group 5 FTA: 0.2500 +[2025-07-06 09:03:00] [Rank 0] Group 6 FTA: 0.2292 +[2025-07-06 09:03:00] [Rank 0] Group 6 FTA: 0.2292 +[2025-07-06 09:03:00] [Rank 0] Group 7 FTA: 0.1875 +[2025-07-06 09:03:00] [Rank 0] Group 7 FTA: 0.1875 +[2025-07-06 09:03:00] [Rank 0] Group 8 FTA: 0.2161 +[2025-07-06 09:03:00] [Rank 0] Group 8 FTA: 0.2161 +[2025-07-06 09:03:00] [Rank 0] Group 9 FTA: 0.2266 +[2025-07-06 09:03:00] [Rank 0] Group 9 FTA: 0.2266 +[2025-07-06 09:03:00] [Rank 0] Group 10 FTA: 0.2305 +[2025-07-06 09:03:00] [Rank 0] Group 10 FTA: 0.2305 +[2025-07-06 09:03:00] [Rank 0] Group 11 FTA: 0.2227 +[2025-07-06 09:03:00] [Rank 0] Group 11 FTA: 0.2227 +[2025-07-06 09:03:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 09:03:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 09:03:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 09:03:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 09:03:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 09:03:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 09:03:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 09:03:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 09:03:01] [Rank 0] step:1501/10000 train_time:120865ms step_avg:80.52ms +[2025-07-06 09:03:01] [Rank 0] step:1501/10000 train_time:120865ms step_avg:80.52ms +[2025-07-06 09:03:03] [Rank 0] step:1521/10000 train_time:122350ms step_avg:80.44ms +[2025-07-06 09:03:03] [Rank 0] step:1521/10000 train_time:122350ms step_avg:80.44ms +[2025-07-06 09:03:05] [Rank 0] step:1541/10000 train_time:124475ms step_avg:80.78ms +[2025-07-06 09:03:05] [Rank 0] step:1541/10000 train_time:124475ms step_avg:80.78ms +[2025-07-06 09:03:06] [Rank 0] step:1561/10000 train_time:125943ms step_avg:80.68ms +[2025-07-06 09:03:06] [Rank 0] step:1561/10000 train_time:125943ms step_avg:80.68ms +[2025-07-06 09:03:08] [Rank 0] step:1581/10000 train_time:127409ms step_avg:80.59ms +[2025-07-06 09:03:08] [Rank 0] step:1581/10000 train_time:127409ms step_avg:80.59ms +[2025-07-06 09:03:09] [Rank 0] step:1601/10000 train_time:128875ms step_avg:80.50ms +[2025-07-06 09:03:09] [Rank 0] step:1601/10000 train_time:128875ms step_avg:80.50ms +[2025-07-06 09:03:11] [Rank 0] step:1621/10000 train_time:130395ms step_avg:80.44ms +[2025-07-06 09:03:11] [Rank 0] step:1621/10000 train_time:130395ms step_avg:80.44ms +[2025-07-06 09:03:13] [Rank 0] step:1641/10000 train_time:132475ms step_avg:80.73ms +[2025-07-06 09:03:13] [Rank 0] step:1641/10000 train_time:132475ms step_avg:80.73ms +[2025-07-06 09:03:14] [Rank 0] step:1661/10000 train_time:133941ms step_avg:80.64ms +[2025-07-06 09:03:14] [Rank 0] step:1661/10000 train_time:133941ms step_avg:80.64ms +[2025-07-06 09:03:16] [Rank 0] step:1681/10000 train_time:135408ms step_avg:80.55ms +[2025-07-06 09:03:16] [Rank 0] step:1681/10000 train_time:135408ms step_avg:80.55ms +[2025-07-06 09:03:17] [Rank 0] step:1701/10000 train_time:136878ms step_avg:80.47ms +[2025-07-06 09:03:17] [Rank 0] step:1701/10000 train_time:136878ms step_avg:80.47ms +[2025-07-06 09:03:19] [Rank 0] step:1721/10000 train_time:139003ms step_avg:80.77ms +[2025-07-06 09:03:19] [Rank 0] step:1721/10000 train_time:139003ms step_avg:80.77ms +[2025-07-06 09:03:21] [Rank 0] step:1741/10000 train_time:140470ms step_avg:80.68ms +[2025-07-06 09:03:21] [Rank 0] step:1741/10000 train_time:140470ms step_avg:80.68ms +[2025-07-06 09:03:22] [Rank 0] step:1761/10000 train_time:141940ms step_avg:80.60ms +[2025-07-06 09:03:22] [Rank 0] step:1761/10000 train_time:141940ms step_avg:80.60ms +[2025-07-06 09:03:24] [Rank 0] step:1781/10000 train_time:143410ms step_avg:80.52ms +[2025-07-06 09:03:24] [Rank 0] step:1781/10000 train_time:143410ms step_avg:80.52ms +[2025-07-06 09:03:26] [Rank 0] step:1801/10000 train_time:145555ms step_avg:80.82ms +[2025-07-06 09:03:26] [Rank 0] step:1801/10000 train_time:145555ms step_avg:80.82ms +[2025-07-06 09:03:27] [Rank 0] step:1821/10000 train_time:147006ms step_avg:80.73ms +[2025-07-06 09:03:27] [Rank 0] step:1821/10000 train_time:147006ms step_avg:80.73ms +[2025-07-06 09:03:29] [Rank 0] step:1841/10000 train_time:148474ms step_avg:80.65ms +[2025-07-06 09:03:29] [Rank 0] step:1841/10000 train_time:148474ms step_avg:80.65ms +[2025-07-06 09:03:30] [Rank 0] step:1861/10000 train_time:149945ms step_avg:80.57ms +[2025-07-06 09:03:30] [Rank 0] step:1861/10000 train_time:149945ms step_avg:80.57ms +[2025-07-06 09:03:32] [Rank 0] step:1881/10000 train_time:151414ms step_avg:80.50ms +[2025-07-06 09:03:32] [Rank 0] step:1881/10000 train_time:151414ms step_avg:80.50ms +[2025-07-06 09:03:33] [Rank 0] step:1901/10000 train_time:153022ms step_avg:80.50ms +[2025-07-06 09:03:33] [Rank 0] step:1901/10000 train_time:153022ms step_avg:80.50ms +[2025-07-06 09:03:35] [Rank 0] step:1921/10000 train_time:154490ms step_avg:80.42ms +[2025-07-06 09:03:35] [Rank 0] step:1921/10000 train_time:154490ms step_avg:80.42ms +[2025-07-06 09:03:36] [Rank 0] step:1941/10000 train_time:155961ms step_avg:80.35ms +[2025-07-06 09:03:36] [Rank 0] step:1941/10000 train_time:155961ms step_avg:80.35ms +[2025-07-06 09:03:38] [Rank 0] step:1961/10000 train_time:157432ms step_avg:80.28ms +[2025-07-06 09:03:38] [Rank 0] step:1961/10000 train_time:157432ms step_avg:80.28ms +[2025-07-06 09:03:40] [Rank 0] step:1981/10000 train_time:159156ms step_avg:80.34ms +[2025-07-06 09:03:40] [Rank 0] step:1981/10000 train_time:159156ms step_avg:80.34ms +[2025-07-06 09:03:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 09:03:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 09:03:42] [Rank 0] PRINT: step:2000/10000 train_loss:1.2418 val_loss:1.2164 train_time:161023ms step_avg:80.51ms +[2025-07-06 09:03:42] [Rank 0] PRINT: step:2000/10000 train_loss:1.2418 val_loss:1.2164 train_time:161023ms step_avg:80.51ms +[2025-07-06 09:03:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 09:03:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 09:03:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 09:03:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 09:03:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 09:03:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 09:09:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 09:09:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 09:09:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 09:09:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 09:09:08] [Rank 0] Total Loss: 4.5269 +[2025-07-06 09:09:08] [Rank 0] Total Loss: 4.5269 +[2025-07-06 09:09:08] [Rank 0] Total FTA: 0.2934 +[2025-07-06 09:09:08] [Rank 0] Total FTA: 0.2934 +[2025-07-06 09:09:08] [Rank 0] Group 0 Loss: 4.8935 +[2025-07-06 09:09:08] [Rank 0] Group 0 Loss: 4.8935 +[2025-07-06 09:09:08] [Rank 0] Group 1 Loss: 4.2664 +[2025-07-06 09:09:08] [Rank 0] Group 1 Loss: 4.2664 +[2025-07-06 09:09:08] [Rank 0] Group 2 Loss: 4.2926 +[2025-07-06 09:09:08] [Rank 0] Group 2 Loss: 4.2926 +[2025-07-06 09:09:08] [Rank 0] Group 3 Loss: 4.5016 +[2025-07-06 09:09:08] [Rank 0] Group 3 Loss: 4.5016 +[2025-07-06 09:09:08] [Rank 0] Group 4 Loss: 4.4795 +[2025-07-06 09:09:08] [Rank 0] Group 4 Loss: 4.4795 +[2025-07-06 09:09:08] [Rank 0] Group 5 Loss: 4.4454 +[2025-07-06 09:09:08] [Rank 0] Group 5 Loss: 4.4454 +[2025-07-06 09:09:08] [Rank 0] Group 6 Loss: 4.4302 +[2025-07-06 09:09:08] [Rank 0] Group 6 Loss: 4.4302 +[2025-07-06 09:09:08] [Rank 0] Group 7 Loss: 4.5375 +[2025-07-06 09:09:08] [Rank 0] Group 7 Loss: 4.5375 +[2025-07-06 09:09:08] [Rank 0] Group 8 Loss: 4.5256 +[2025-07-06 09:09:08] [Rank 0] Group 8 Loss: 4.5256 +[2025-07-06 09:09:08] [Rank 0] Group 9 Loss: 4.4744 +[2025-07-06 09:09:08] [Rank 0] Group 9 Loss: 4.4744 +[2025-07-06 09:09:08] [Rank 0] Group 10 Loss: 4.5170 +[2025-07-06 09:09:08] [Rank 0] Group 10 Loss: 4.5170 +[2025-07-06 09:09:08] [Rank 0] Group 11 Loss: 4.5456 +[2025-07-06 09:09:08] [Rank 0] Group 11 Loss: 4.5456 +[2025-07-06 09:09:08] [Rank 0] Group 0 FTA: 0.3316 +[2025-07-06 09:09:08] [Rank 0] Group 0 FTA: 0.3316 +[2025-07-06 09:09:08] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-06 09:09:08] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-06 09:09:08] [Rank 0] Group 2 FTA: 0.2630 +[2025-07-06 09:09:08] [Rank 0] Group 2 FTA: 0.2630 +[2025-07-06 09:09:08] [Rank 0] Group 3 FTA: 0.2135 +[2025-07-06 09:09:08] [Rank 0] Group 3 FTA: 0.2135 +[2025-07-06 09:09:08] [Rank 0] Group 4 FTA: 0.2865 +[2025-07-06 09:09:08] [Rank 0] Group 4 FTA: 0.2865 +[2025-07-06 09:09:08] [Rank 0] Group 5 FTA: 0.3203 +[2025-07-06 09:09:08] [Rank 0] Group 5 FTA: 0.3203 +[2025-07-06 09:09:08] [Rank 0] Group 6 FTA: 0.3047 +[2025-07-06 09:09:08] [Rank 0] Group 6 FTA: 0.3047 +[2025-07-06 09:09:08] [Rank 0] Group 7 FTA: 0.3203 +[2025-07-06 09:09:08] [Rank 0] Group 7 FTA: 0.3203 +[2025-07-06 09:09:08] [Rank 0] Group 8 FTA: 0.3047 +[2025-07-06 09:09:08] [Rank 0] Group 8 FTA: 0.3047 +[2025-07-06 09:09:08] [Rank 0] Group 9 FTA: 0.3242 +[2025-07-06 09:09:08] [Rank 0] Group 9 FTA: 0.3242 +[2025-07-06 09:09:08] [Rank 0] Group 10 FTA: 0.3281 +[2025-07-06 09:09:08] [Rank 0] Group 10 FTA: 0.3281 +[2025-07-06 09:09:08] [Rank 0] Group 11 FTA: 0.3096 +[2025-07-06 09:09:08] [Rank 0] Group 11 FTA: 0.3096 +[2025-07-06 09:09:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 09:09:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 09:09:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 09:09:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 09:09:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 09:09:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 09:09:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 09:09:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 09:09:10] [Rank 0] step:2001/10000 train_time:161044ms step_avg:80.48ms +[2025-07-06 09:09:10] [Rank 0] step:2001/10000 train_time:161044ms step_avg:80.48ms +[2025-07-06 09:09:11] [Rank 0] step:2021/10000 train_time:162622ms step_avg:80.47ms +[2025-07-06 09:09:11] [Rank 0] step:2021/10000 train_time:162622ms step_avg:80.47ms +[2025-07-06 09:09:13] [Rank 0] step:2041/10000 train_time:164087ms step_avg:80.40ms +[2025-07-06 09:09:13] [Rank 0] step:2041/10000 train_time:164087ms step_avg:80.40ms +[2025-07-06 09:09:14] [Rank 0] step:2061/10000 train_time:165554ms step_avg:80.33ms +[2025-07-06 09:09:14] [Rank 0] step:2061/10000 train_time:165554ms step_avg:80.33ms +[2025-07-06 09:09:16] [Rank 0] step:2081/10000 train_time:167681ms step_avg:80.58ms +[2025-07-06 09:09:16] [Rank 0] step:2081/10000 train_time:167681ms step_avg:80.58ms +[2025-07-06 09:09:18] [Rank 0] step:2101/10000 train_time:169146ms step_avg:80.51ms +[2025-07-06 09:09:18] [Rank 0] step:2101/10000 train_time:169146ms step_avg:80.51ms +[2025-07-06 09:09:19] [Rank 0] step:2121/10000 train_time:170612ms step_avg:80.44ms +[2025-07-06 09:09:19] [Rank 0] step:2121/10000 train_time:170612ms step_avg:80.44ms +[2025-07-06 09:09:21] [Rank 0] step:2141/10000 train_time:172079ms step_avg:80.37ms +[2025-07-06 09:09:21] [Rank 0] step:2141/10000 train_time:172079ms step_avg:80.37ms +[2025-07-06 09:09:23] [Rank 0] step:2161/10000 train_time:173597ms step_avg:80.33ms +[2025-07-06 09:09:23] [Rank 0] step:2161/10000 train_time:173597ms step_avg:80.33ms +[2025-07-06 09:09:24] [Rank 0] step:2181/10000 train_time:175681ms step_avg:80.55ms +[2025-07-06 09:09:24] [Rank 0] step:2181/10000 train_time:175681ms step_avg:80.55ms +[2025-07-06 09:09:26] [Rank 0] step:2201/10000 train_time:177149ms step_avg:80.49ms +[2025-07-06 09:09:26] [Rank 0] step:2201/10000 train_time:177149ms step_avg:80.49ms +[2025-07-06 09:09:27] [Rank 0] step:2221/10000 train_time:178617ms step_avg:80.42ms +[2025-07-06 09:09:27] [Rank 0] step:2221/10000 train_time:178617ms step_avg:80.42ms +[2025-07-06 09:09:29] [Rank 0] step:2241/10000 train_time:180107ms step_avg:80.37ms +[2025-07-06 09:09:29] [Rank 0] step:2241/10000 train_time:180107ms step_avg:80.37ms +[2025-07-06 09:09:31] [Rank 0] step:2261/10000 train_time:181839ms step_avg:80.42ms +[2025-07-06 09:09:31] [Rank 0] step:2261/10000 train_time:181839ms step_avg:80.42ms +[2025-07-06 09:09:32] [Rank 0] step:2281/10000 train_time:183333ms step_avg:80.37ms +[2025-07-06 09:09:32] [Rank 0] step:2281/10000 train_time:183333ms step_avg:80.37ms +[2025-07-06 09:09:34] [Rank 0] step:2301/10000 train_time:184829ms step_avg:80.33ms +[2025-07-06 09:09:34] [Rank 0] step:2301/10000 train_time:184829ms step_avg:80.33ms +[2025-07-06 09:09:35] [Rank 0] step:2321/10000 train_time:186423ms step_avg:80.32ms +[2025-07-06 09:09:35] [Rank 0] step:2321/10000 train_time:186423ms step_avg:80.32ms +[2025-07-06 09:09:37] [Rank 0] step:2341/10000 train_time:188601ms step_avg:80.56ms +[2025-07-06 09:09:37] [Rank 0] step:2341/10000 train_time:188601ms step_avg:80.56ms +[2025-07-06 09:09:39] [Rank 0] step:2361/10000 train_time:190076ms step_avg:80.51ms +[2025-07-06 09:09:39] [Rank 0] step:2361/10000 train_time:190076ms step_avg:80.51ms +[2025-07-06 09:09:40] [Rank 0] step:2381/10000 train_time:191572ms step_avg:80.46ms +[2025-07-06 09:09:40] [Rank 0] step:2381/10000 train_time:191572ms step_avg:80.46ms +[2025-07-06 09:09:42] [Rank 0] step:2401/10000 train_time:193068ms step_avg:80.41ms +[2025-07-06 09:09:42] [Rank 0] step:2401/10000 train_time:193068ms step_avg:80.41ms +[2025-07-06 09:09:43] [Rank 0] step:2421/10000 train_time:194564ms step_avg:80.37ms +[2025-07-06 09:09:43] [Rank 0] step:2421/10000 train_time:194564ms step_avg:80.37ms +[2025-07-06 09:09:46] [Rank 0] step:2441/10000 train_time:196720ms step_avg:80.59ms +[2025-07-06 09:09:46] [Rank 0] step:2441/10000 train_time:196720ms step_avg:80.59ms +[2025-07-06 09:09:47] [Rank 0] step:2461/10000 train_time:198316ms step_avg:80.58ms +[2025-07-06 09:09:47] [Rank 0] step:2461/10000 train_time:198316ms step_avg:80.58ms +[2025-07-06 09:09:49] [Rank 0] step:2481/10000 train_time:199814ms step_avg:80.54ms +[2025-07-06 09:09:49] [Rank 0] step:2481/10000 train_time:199814ms step_avg:80.54ms +[2025-07-06 09:09:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 09:09:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 09:09:51] [Rank 0] PRINT: step:2500/10000 train_loss:1.1638 val_loss:1.1215 train_time:201310ms step_avg:80.52ms +[2025-07-06 09:09:51] [Rank 0] PRINT: step:2500/10000 train_loss:1.1638 val_loss:1.1215 train_time:201310ms step_avg:80.52ms +[2025-07-06 09:09:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 09:09:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 09:09:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 09:09:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 09:09:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 09:09:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 09:15:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 09:15:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 09:15:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 09:15:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 09:15:14] [Rank 0] Total Loss: 4.8554 +[2025-07-06 09:15:14] [Rank 0] Total Loss: 4.8554 +[2025-07-06 09:15:14] [Rank 0] Total FTA: 0.4165 +[2025-07-06 09:15:14] [Rank 0] Total FTA: 0.4165 +[2025-07-06 09:15:14] [Rank 0] Group 0 Loss: 5.6147 +[2025-07-06 09:15:14] [Rank 0] Group 0 Loss: 5.6147 +[2025-07-06 09:15:14] [Rank 0] Group 1 Loss: 4.5009 +[2025-07-06 09:15:14] [Rank 0] Group 1 Loss: 4.5009 +[2025-07-06 09:15:14] [Rank 0] Group 2 Loss: 4.6231 +[2025-07-06 09:15:14] [Rank 0] Group 2 Loss: 4.6231 +[2025-07-06 09:15:14] [Rank 0] Group 3 Loss: 4.6808 +[2025-07-06 09:15:14] [Rank 0] Group 3 Loss: 4.6808 +[2025-07-06 09:15:14] [Rank 0] Group 4 Loss: 4.7330 +[2025-07-06 09:15:14] [Rank 0] Group 4 Loss: 4.7330 +[2025-07-06 09:15:14] [Rank 0] Group 5 Loss: 4.7899 +[2025-07-06 09:15:14] [Rank 0] Group 5 Loss: 4.7899 +[2025-07-06 09:15:14] [Rank 0] Group 6 Loss: 4.7125 +[2025-07-06 09:15:14] [Rank 0] Group 6 Loss: 4.7125 +[2025-07-06 09:15:14] [Rank 0] Group 7 Loss: 4.7269 +[2025-07-06 09:15:14] [Rank 0] Group 7 Loss: 4.7269 +[2025-07-06 09:15:14] [Rank 0] Group 8 Loss: 4.8093 +[2025-07-06 09:15:14] [Rank 0] Group 8 Loss: 4.8093 +[2025-07-06 09:15:14] [Rank 0] Group 9 Loss: 4.7980 +[2025-07-06 09:15:14] [Rank 0] Group 9 Loss: 4.7980 +[2025-07-06 09:15:14] [Rank 0] Group 10 Loss: 4.7966 +[2025-07-06 09:15:14] [Rank 0] Group 10 Loss: 4.7966 +[2025-07-06 09:15:14] [Rank 0] Group 11 Loss: 4.8041 +[2025-07-06 09:15:14] [Rank 0] Group 11 Loss: 4.8041 +[2025-07-06 09:15:14] [Rank 0] Group 0 FTA: 0.3524 +[2025-07-06 09:15:14] [Rank 0] Group 0 FTA: 0.3524 +[2025-07-06 09:15:14] [Rank 0] Group 1 FTA: 0.3177 +[2025-07-06 09:15:14] [Rank 0] Group 1 FTA: 0.3177 +[2025-07-06 09:15:14] [Rank 0] Group 2 FTA: 0.5625 +[2025-07-06 09:15:14] [Rank 0] Group 2 FTA: 0.5625 +[2025-07-06 09:15:14] [Rank 0] Group 3 FTA: 0.3984 +[2025-07-06 09:15:14] [Rank 0] Group 3 FTA: 0.3984 +[2025-07-06 09:15:14] [Rank 0] Group 4 FTA: 0.3620 +[2025-07-06 09:15:14] [Rank 0] Group 4 FTA: 0.3620 +[2025-07-06 09:15:14] [Rank 0] Group 5 FTA: 0.4427 +[2025-07-06 09:15:14] [Rank 0] Group 5 FTA: 0.4427 +[2025-07-06 09:15:14] [Rank 0] Group 6 FTA: 0.3828 +[2025-07-06 09:15:14] [Rank 0] Group 6 FTA: 0.3828 +[2025-07-06 09:15:14] [Rank 0] Group 7 FTA: 0.3880 +[2025-07-06 09:15:14] [Rank 0] Group 7 FTA: 0.3880 +[2025-07-06 09:15:14] [Rank 0] Group 8 FTA: 0.4505 +[2025-07-06 09:15:14] [Rank 0] Group 8 FTA: 0.4505 +[2025-07-06 09:15:14] [Rank 0] Group 9 FTA: 0.4258 +[2025-07-06 09:15:14] [Rank 0] Group 9 FTA: 0.4258 +[2025-07-06 09:15:14] [Rank 0] Group 10 FTA: 0.4551 +[2025-07-06 09:15:14] [Rank 0] Group 10 FTA: 0.4551 +[2025-07-06 09:15:14] [Rank 0] Group 11 FTA: 0.4531 +[2025-07-06 09:15:14] [Rank 0] Group 11 FTA: 0.4531 +[2025-07-06 09:15:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 09:15:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 09:15:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 09:15:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 09:15:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 09:15:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 09:15:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 09:15:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 09:15:16] [Rank 0] step:2501/10000 train_time:201332ms step_avg:80.50ms +[2025-07-06 09:15:16] [Rank 0] step:2501/10000 train_time:201332ms step_avg:80.50ms +[2025-07-06 09:15:18] [Rank 0] step:2521/10000 train_time:203077ms step_avg:80.55ms +[2025-07-06 09:15:18] [Rank 0] step:2521/10000 train_time:203077ms step_avg:80.55ms +[2025-07-06 09:15:19] [Rank 0] step:2541/10000 train_time:204966ms step_avg:80.66ms +[2025-07-06 09:15:19] [Rank 0] step:2541/10000 train_time:204966ms step_avg:80.66ms +[2025-07-06 09:15:21] [Rank 0] step:2561/10000 train_time:206456ms step_avg:80.62ms +[2025-07-06 09:15:21] [Rank 0] step:2561/10000 train_time:206456ms step_avg:80.62ms +[2025-07-06 09:15:23] [Rank 0] step:2581/10000 train_time:208212ms step_avg:80.67ms +[2025-07-06 09:15:23] [Rank 0] step:2581/10000 train_time:208212ms step_avg:80.67ms +[2025-07-06 09:15:24] [Rank 0] step:2601/10000 train_time:209702ms step_avg:80.62ms +[2025-07-06 09:15:24] [Rank 0] step:2601/10000 train_time:209702ms step_avg:80.62ms +[2025-07-06 09:15:26] [Rank 0] step:2621/10000 train_time:211965ms step_avg:80.87ms +[2025-07-06 09:15:26] [Rank 0] step:2621/10000 train_time:211965ms step_avg:80.87ms +[2025-07-06 09:15:28] [Rank 0] step:2641/10000 train_time:213455ms step_avg:80.82ms +[2025-07-06 09:15:28] [Rank 0] step:2641/10000 train_time:213455ms step_avg:80.82ms +[2025-07-06 09:15:29] [Rank 0] step:2661/10000 train_time:214946ms step_avg:80.78ms +[2025-07-06 09:15:29] [Rank 0] step:2661/10000 train_time:214946ms step_avg:80.78ms +[2025-07-06 09:15:31] [Rank 0] step:2681/10000 train_time:216441ms step_avg:80.73ms +[2025-07-06 09:15:31] [Rank 0] step:2681/10000 train_time:216441ms step_avg:80.73ms +[2025-07-06 09:15:33] [Rank 0] step:2701/10000 train_time:217935ms step_avg:80.69ms +[2025-07-06 09:15:33] [Rank 0] step:2701/10000 train_time:217935ms step_avg:80.69ms +[2025-07-06 09:15:34] [Rank 0] step:2721/10000 train_time:219663ms step_avg:80.73ms +[2025-07-06 09:15:34] [Rank 0] step:2721/10000 train_time:219663ms step_avg:80.73ms +[2025-07-06 09:15:36] [Rank 0] step:2741/10000 train_time:221157ms step_avg:80.68ms +[2025-07-06 09:15:36] [Rank 0] step:2741/10000 train_time:221157ms step_avg:80.68ms +[2025-07-06 09:15:37] [Rank 0] step:2761/10000 train_time:222652ms step_avg:80.64ms +[2025-07-06 09:15:37] [Rank 0] step:2761/10000 train_time:222652ms step_avg:80.64ms +[2025-07-06 09:15:39] [Rank 0] step:2781/10000 train_time:224147ms step_avg:80.60ms +[2025-07-06 09:15:39] [Rank 0] step:2781/10000 train_time:224147ms step_avg:80.60ms +[2025-07-06 09:15:41] [Rank 0] step:2801/10000 train_time:226303ms step_avg:80.79ms +[2025-07-06 09:15:41] [Rank 0] step:2801/10000 train_time:226303ms step_avg:80.79ms +[2025-07-06 09:15:42] [Rank 0] step:2821/10000 train_time:227899ms step_avg:80.79ms +[2025-07-06 09:15:42] [Rank 0] step:2821/10000 train_time:227899ms step_avg:80.79ms +[2025-07-06 09:15:44] [Rank 0] step:2841/10000 train_time:229396ms step_avg:80.74ms +[2025-07-06 09:15:44] [Rank 0] step:2841/10000 train_time:229396ms step_avg:80.74ms +[2025-07-06 09:15:45] [Rank 0] step:2861/10000 train_time:230892ms step_avg:80.70ms +[2025-07-06 09:15:45] [Rank 0] step:2861/10000 train_time:230892ms step_avg:80.70ms +[2025-07-06 09:15:48] [Rank 0] step:2881/10000 train_time:232644ms step_avg:80.75ms +[2025-07-06 09:15:48] [Rank 0] step:2881/10000 train_time:232644ms step_avg:80.75ms +[2025-07-06 09:15:49] [Rank 0] step:2901/10000 train_time:234540ms step_avg:80.85ms +[2025-07-06 09:15:49] [Rank 0] step:2901/10000 train_time:234540ms step_avg:80.85ms +[2025-07-06 09:15:51] [Rank 0] step:2921/10000 train_time:236038ms step_avg:80.81ms +[2025-07-06 09:15:51] [Rank 0] step:2921/10000 train_time:236038ms step_avg:80.81ms +[2025-07-06 09:15:52] [Rank 0] step:2941/10000 train_time:237535ms step_avg:80.77ms +[2025-07-06 09:15:52] [Rank 0] step:2941/10000 train_time:237535ms step_avg:80.77ms +[2025-07-06 09:15:54] [Rank 0] step:2961/10000 train_time:239031ms step_avg:80.73ms +[2025-07-06 09:15:54] [Rank 0] step:2961/10000 train_time:239031ms step_avg:80.73ms +[2025-07-06 09:15:56] [Rank 0] step:2981/10000 train_time:241182ms step_avg:80.91ms +[2025-07-06 09:15:56] [Rank 0] step:2981/10000 train_time:241182ms step_avg:80.91ms +[2025-07-06 09:15:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 09:15:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 09:15:58] [Rank 0] PRINT: step:3000/10000 train_loss:1.0970 val_loss:1.0554 train_time:242679ms step_avg:80.89ms +[2025-07-06 09:15:58] [Rank 0] PRINT: step:3000/10000 train_loss:1.0970 val_loss:1.0554 train_time:242679ms step_avg:80.89ms +[2025-07-06 09:15:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 09:15:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 09:15:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 09:15:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 09:15:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 09:15:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 09:21:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 09:21:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 09:21:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 09:21:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 09:21:21] [Rank 0] Total Loss: 5.0084 +[2025-07-06 09:21:21] [Rank 0] Total Loss: 5.0084 +[2025-07-06 09:21:21] [Rank 0] Total FTA: 0.5544 +[2025-07-06 09:21:21] [Rank 0] Total FTA: 0.5544 +[2025-07-06 09:21:21] [Rank 0] Group 0 Loss: 5.2634 +[2025-07-06 09:21:21] [Rank 0] Group 0 Loss: 5.2634 +[2025-07-06 09:21:22] [Rank 0] Group 1 Loss: 4.7813 +[2025-07-06 09:21:22] [Rank 0] Group 1 Loss: 4.7813 +[2025-07-06 09:21:22] [Rank 0] Group 2 Loss: 4.7386 +[2025-07-06 09:21:22] [Rank 0] Group 2 Loss: 4.7386 +[2025-07-06 09:21:22] [Rank 0] Group 3 Loss: 5.0160 +[2025-07-06 09:21:22] [Rank 0] Group 3 Loss: 5.0160 +[2025-07-06 09:21:22] [Rank 0] Group 4 Loss: 5.0277 +[2025-07-06 09:21:22] [Rank 0] Group 4 Loss: 5.0277 +[2025-07-06 09:21:22] [Rank 0] Group 5 Loss: 4.9708 +[2025-07-06 09:21:22] [Rank 0] Group 5 Loss: 4.9708 +[2025-07-06 09:21:22] [Rank 0] Group 6 Loss: 4.9299 +[2025-07-06 09:21:22] [Rank 0] Group 6 Loss: 4.9299 +[2025-07-06 09:21:22] [Rank 0] Group 7 Loss: 5.0491 +[2025-07-06 09:21:22] [Rank 0] Group 7 Loss: 5.0491 +[2025-07-06 09:21:22] [Rank 0] Group 8 Loss: 5.0238 +[2025-07-06 09:21:22] [Rank 0] Group 8 Loss: 5.0238 +[2025-07-06 09:21:22] [Rank 0] Group 9 Loss: 5.0182 +[2025-07-06 09:21:22] [Rank 0] Group 9 Loss: 5.0182 +[2025-07-06 09:21:22] [Rank 0] Group 10 Loss: 5.0027 +[2025-07-06 09:21:22] [Rank 0] Group 10 Loss: 5.0027 +[2025-07-06 09:21:22] [Rank 0] Group 11 Loss: 5.0162 +[2025-07-06 09:21:22] [Rank 0] Group 11 Loss: 5.0162 +[2025-07-06 09:21:22] [Rank 0] Group 0 FTA: 0.5345 +[2025-07-06 09:21:22] [Rank 0] Group 0 FTA: 0.5345 +[2025-07-06 09:21:22] [Rank 0] Group 1 FTA: 0.3411 +[2025-07-06 09:21:22] [Rank 0] Group 1 FTA: 0.3411 +[2025-07-06 09:21:22] [Rank 0] Group 2 FTA: 0.6719 +[2025-07-06 09:21:22] [Rank 0] Group 2 FTA: 0.6719 +[2025-07-06 09:21:22] [Rank 0] Group 3 FTA: 0.5521 +[2025-07-06 09:21:22] [Rank 0] Group 3 FTA: 0.5521 +[2025-07-06 09:21:22] [Rank 0] Group 4 FTA: 0.5365 +[2025-07-06 09:21:22] [Rank 0] Group 4 FTA: 0.5365 +[2025-07-06 09:21:22] [Rank 0] Group 5 FTA: 0.5964 +[2025-07-06 09:21:22] [Rank 0] Group 5 FTA: 0.5964 +[2025-07-06 09:21:22] [Rank 0] Group 6 FTA: 0.5078 +[2025-07-06 09:21:22] [Rank 0] Group 6 FTA: 0.5078 +[2025-07-06 09:21:22] [Rank 0] Group 7 FTA: 0.6042 +[2025-07-06 09:21:22] [Rank 0] Group 7 FTA: 0.6042 +[2025-07-06 09:21:22] [Rank 0] Group 8 FTA: 0.5573 +[2025-07-06 09:21:22] [Rank 0] Group 8 FTA: 0.5573 +[2025-07-06 09:21:22] [Rank 0] Group 9 FTA: 0.5508 +[2025-07-06 09:21:22] [Rank 0] Group 9 FTA: 0.5508 +[2025-07-06 09:21:22] [Rank 0] Group 10 FTA: 0.5762 +[2025-07-06 09:21:22] [Rank 0] Group 10 FTA: 0.5762 +[2025-07-06 09:21:22] [Rank 0] Group 11 FTA: 0.5850 +[2025-07-06 09:21:22] [Rank 0] Group 11 FTA: 0.5850 +[2025-07-06 09:21:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 09:21:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 09:21:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 09:21:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 09:21:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 09:21:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 09:21:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 09:21:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 09:21:23] [Rank 0] step:3001/10000 train_time:242702ms step_avg:80.87ms +[2025-07-06 09:21:23] [Rank 0] step:3001/10000 train_time:242702ms step_avg:80.87ms +[2025-07-06 09:21:25] [Rank 0] step:3021/10000 train_time:244208ms step_avg:80.84ms +[2025-07-06 09:21:25] [Rank 0] step:3021/10000 train_time:244208ms step_avg:80.84ms +[2025-07-06 09:21:26] [Rank 0] step:3041/10000 train_time:245699ms step_avg:80.80ms +[2025-07-06 09:21:26] [Rank 0] step:3041/10000 train_time:245699ms step_avg:80.80ms +[2025-07-06 09:21:28] [Rank 0] step:3061/10000 train_time:247243ms step_avg:80.77ms +[2025-07-06 09:21:28] [Rank 0] step:3061/10000 train_time:247243ms step_avg:80.77ms +[2025-07-06 09:21:30] [Rank 0] step:3081/10000 train_time:249334ms step_avg:80.93ms +[2025-07-06 09:21:30] [Rank 0] step:3081/10000 train_time:249334ms step_avg:80.93ms +[2025-07-06 09:21:31] [Rank 0] step:3101/10000 train_time:250924ms step_avg:80.92ms +[2025-07-06 09:21:31] [Rank 0] step:3101/10000 train_time:250924ms step_avg:80.92ms +[2025-07-06 09:21:33] [Rank 0] step:3121/10000 train_time:252418ms step_avg:80.88ms +[2025-07-06 09:21:33] [Rank 0] step:3121/10000 train_time:252418ms step_avg:80.88ms +[2025-07-06 09:21:34] [Rank 0] step:3141/10000 train_time:253911ms step_avg:80.84ms +[2025-07-06 09:21:34] [Rank 0] step:3141/10000 train_time:253911ms step_avg:80.84ms +[2025-07-06 09:21:37] [Rank 0] step:3161/10000 train_time:256169ms step_avg:81.04ms +[2025-07-06 09:21:37] [Rank 0] step:3161/10000 train_time:256169ms step_avg:81.04ms +[2025-07-06 09:21:38] [Rank 0] step:3181/10000 train_time:257662ms step_avg:81.00ms +[2025-07-06 09:21:38] [Rank 0] step:3181/10000 train_time:257662ms step_avg:81.00ms +[2025-07-06 09:21:40] [Rank 0] step:3201/10000 train_time:259156ms step_avg:80.96ms +[2025-07-06 09:21:40] [Rank 0] step:3201/10000 train_time:259156ms step_avg:80.96ms +[2025-07-06 09:21:41] [Rank 0] step:3221/10000 train_time:260863ms step_avg:80.99ms +[2025-07-06 09:21:41] [Rank 0] step:3221/10000 train_time:260863ms step_avg:80.99ms +[2025-07-06 09:21:43] [Rank 0] step:3241/10000 train_time:262409ms step_avg:80.97ms +[2025-07-06 09:21:43] [Rank 0] step:3241/10000 train_time:262409ms step_avg:80.97ms +[2025-07-06 09:21:45] [Rank 0] step:3261/10000 train_time:264091ms step_avg:80.98ms +[2025-07-06 09:21:45] [Rank 0] step:3261/10000 train_time:264091ms step_avg:80.98ms +[2025-07-06 09:21:46] [Rank 0] step:3281/10000 train_time:265587ms step_avg:80.95ms +[2025-07-06 09:21:46] [Rank 0] step:3281/10000 train_time:265587ms step_avg:80.95ms +[2025-07-06 09:21:48] [Rank 0] step:3301/10000 train_time:267085ms step_avg:80.91ms +[2025-07-06 09:21:48] [Rank 0] step:3301/10000 train_time:267085ms step_avg:80.91ms +[2025-07-06 09:21:49] [Rank 0] step:3321/10000 train_time:268581ms step_avg:80.87ms +[2025-07-06 09:21:49] [Rank 0] step:3321/10000 train_time:268581ms step_avg:80.87ms +[2025-07-06 09:21:51] [Rank 0] step:3341/10000 train_time:270313ms step_avg:80.91ms +[2025-07-06 09:21:51] [Rank 0] step:3341/10000 train_time:270313ms step_avg:80.91ms +[2025-07-06 09:21:52] [Rank 0] step:3361/10000 train_time:271907ms step_avg:80.90ms +[2025-07-06 09:21:52] [Rank 0] step:3361/10000 train_time:271907ms step_avg:80.90ms +[2025-07-06 09:21:54] [Rank 0] step:3381/10000 train_time:273402ms step_avg:80.86ms +[2025-07-06 09:21:54] [Rank 0] step:3381/10000 train_time:273402ms step_avg:80.86ms +[2025-07-06 09:21:55] [Rank 0] step:3401/10000 train_time:274899ms step_avg:80.83ms +[2025-07-06 09:21:55] [Rank 0] step:3401/10000 train_time:274899ms step_avg:80.83ms +[2025-07-06 09:21:58] [Rank 0] step:3421/10000 train_time:276395ms step_avg:80.79ms +[2025-07-06 09:21:58] [Rank 0] step:3421/10000 train_time:276395ms step_avg:80.79ms +[2025-07-06 09:21:59] [Rank 0] step:3441/10000 train_time:278657ms step_avg:80.98ms +[2025-07-06 09:21:59] [Rank 0] step:3441/10000 train_time:278657ms step_avg:80.98ms +[2025-07-06 09:22:01] [Rank 0] step:3461/10000 train_time:280155ms step_avg:80.95ms +[2025-07-06 09:22:01] [Rank 0] step:3461/10000 train_time:280155ms step_avg:80.95ms +[2025-07-06 09:22:02] [Rank 0] step:3481/10000 train_time:281652ms step_avg:80.91ms +[2025-07-06 09:22:02] [Rank 0] step:3481/10000 train_time:281652ms step_avg:80.91ms +[2025-07-06 09:22:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 09:22:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 09:22:04] [Rank 0] PRINT: step:3500/10000 train_loss:1.0347 val_loss:0.9984 train_time:283148ms step_avg:80.90ms +[2025-07-06 09:22:04] [Rank 0] PRINT: step:3500/10000 train_loss:1.0347 val_loss:0.9984 train_time:283148ms step_avg:80.90ms +[2025-07-06 09:22:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 09:22:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 09:22:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 09:22:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 09:22:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 09:22:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 09:27:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 09:27:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 09:27:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 09:27:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 09:27:28] [Rank 0] Total Loss: 5.0762 +[2025-07-06 09:27:28] [Rank 0] Total Loss: 5.0762 +[2025-07-06 09:27:28] [Rank 0] Total FTA: 0.7435 +[2025-07-06 09:27:28] [Rank 0] Total FTA: 0.7435 +[2025-07-06 09:27:28] [Rank 0] Group 0 Loss: 5.4848 +[2025-07-06 09:27:28] [Rank 0] Group 0 Loss: 5.4848 +[2025-07-06 09:27:28] [Rank 0] Group 1 Loss: 4.8720 +[2025-07-06 09:27:28] [Rank 0] Group 1 Loss: 4.8720 +[2025-07-06 09:27:28] [Rank 0] Group 2 Loss: 4.8499 +[2025-07-06 09:27:28] [Rank 0] Group 2 Loss: 4.8499 +[2025-07-06 09:27:28] [Rank 0] Group 3 Loss: 4.9769 +[2025-07-06 09:27:28] [Rank 0] Group 3 Loss: 4.9769 +[2025-07-06 09:27:28] [Rank 0] Group 4 Loss: 5.0526 +[2025-07-06 09:27:28] [Rank 0] Group 4 Loss: 5.0526 +[2025-07-06 09:27:28] [Rank 0] Group 5 Loss: 5.0324 +[2025-07-06 09:27:28] [Rank 0] Group 5 Loss: 5.0324 +[2025-07-06 09:27:28] [Rank 0] Group 6 Loss: 5.0051 +[2025-07-06 09:27:28] [Rank 0] Group 6 Loss: 5.0051 +[2025-07-06 09:27:28] [Rank 0] Group 7 Loss: 5.0619 +[2025-07-06 09:27:28] [Rank 0] Group 7 Loss: 5.0619 +[2025-07-06 09:27:28] [Rank 0] Group 8 Loss: 5.0719 +[2025-07-06 09:27:28] [Rank 0] Group 8 Loss: 5.0719 +[2025-07-06 09:27:28] [Rank 0] Group 9 Loss: 5.0283 +[2025-07-06 09:27:28] [Rank 0] Group 9 Loss: 5.0283 +[2025-07-06 09:27:28] [Rank 0] Group 10 Loss: 5.0357 +[2025-07-06 09:27:28] [Rank 0] Group 10 Loss: 5.0357 +[2025-07-06 09:27:28] [Rank 0] Group 11 Loss: 5.0594 +[2025-07-06 09:27:28] [Rank 0] Group 11 Loss: 5.0594 +[2025-07-06 09:27:28] [Rank 0] Group 0 FTA: 0.8296 +[2025-07-06 09:27:28] [Rank 0] Group 0 FTA: 0.8296 +[2025-07-06 09:27:28] [Rank 0] Group 1 FTA: 0.6771 +[2025-07-06 09:27:28] [Rank 0] Group 1 FTA: 0.6771 +[2025-07-06 09:27:28] [Rank 0] Group 2 FTA: 0.6979 +[2025-07-06 09:27:28] [Rank 0] Group 2 FTA: 0.6979 +[2025-07-06 09:27:28] [Rank 0] Group 3 FTA: 0.7891 +[2025-07-06 09:27:28] [Rank 0] Group 3 FTA: 0.7891 +[2025-07-06 09:27:28] [Rank 0] Group 4 FTA: 0.6589 +[2025-07-06 09:27:28] [Rank 0] Group 4 FTA: 0.6589 +[2025-07-06 09:27:28] [Rank 0] Group 5 FTA: 0.7812 +[2025-07-06 09:27:28] [Rank 0] Group 5 FTA: 0.7812 +[2025-07-06 09:27:28] [Rank 0] Group 6 FTA: 0.6823 +[2025-07-06 09:27:28] [Rank 0] Group 6 FTA: 0.6823 +[2025-07-06 09:27:28] [Rank 0] Group 7 FTA: 0.7786 +[2025-07-06 09:27:28] [Rank 0] Group 7 FTA: 0.7786 +[2025-07-06 09:27:28] [Rank 0] Group 8 FTA: 0.7135 +[2025-07-06 09:27:28] [Rank 0] Group 8 FTA: 0.7135 +[2025-07-06 09:27:28] [Rank 0] Group 9 FTA: 0.7812 +[2025-07-06 09:27:28] [Rank 0] Group 9 FTA: 0.7812 +[2025-07-06 09:27:28] [Rank 0] Group 10 FTA: 0.7207 +[2025-07-06 09:27:28] [Rank 0] Group 10 FTA: 0.7207 +[2025-07-06 09:27:28] [Rank 0] Group 11 FTA: 0.7441 +[2025-07-06 09:27:28] [Rank 0] Group 11 FTA: 0.7441 +[2025-07-06 09:27:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 09:27:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 09:27:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 09:27:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 09:27:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 09:27:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 09:27:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 09:27:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 09:27:30] [Rank 0] step:3501/10000 train_time:283170ms step_avg:80.88ms +[2025-07-06 09:27:30] [Rank 0] step:3501/10000 train_time:283170ms step_avg:80.88ms +[2025-07-06 09:27:32] [Rank 0] step:3521/10000 train_time:285339ms step_avg:81.04ms +[2025-07-06 09:27:32] [Rank 0] step:3521/10000 train_time:285339ms step_avg:81.04ms +[2025-07-06 09:27:34] [Rank 0] step:3541/10000 train_time:286827ms step_avg:81.00ms +[2025-07-06 09:27:34] [Rank 0] step:3541/10000 train_time:286827ms step_avg:81.00ms +[2025-07-06 09:27:35] [Rank 0] step:3561/10000 train_time:288318ms step_avg:80.97ms +[2025-07-06 09:27:35] [Rank 0] step:3561/10000 train_time:288318ms step_avg:80.97ms +[2025-07-06 09:27:37] [Rank 0] step:3581/10000 train_time:289809ms step_avg:80.93ms +[2025-07-06 09:27:37] [Rank 0] step:3581/10000 train_time:289809ms step_avg:80.93ms +[2025-07-06 09:27:39] [Rank 0] step:3601/10000 train_time:291558ms step_avg:80.97ms +[2025-07-06 09:27:39] [Rank 0] step:3601/10000 train_time:291558ms step_avg:80.97ms +[2025-07-06 09:27:40] [Rank 0] step:3621/10000 train_time:293449ms step_avg:81.04ms +[2025-07-06 09:27:40] [Rank 0] step:3621/10000 train_time:293449ms step_avg:81.04ms +[2025-07-06 09:27:42] [Rank 0] step:3641/10000 train_time:294941ms step_avg:81.01ms +[2025-07-06 09:27:42] [Rank 0] step:3641/10000 train_time:294941ms step_avg:81.01ms +[2025-07-06 09:27:43] [Rank 0] step:3661/10000 train_time:296432ms step_avg:80.97ms +[2025-07-06 09:27:43] [Rank 0] step:3661/10000 train_time:296432ms step_avg:80.97ms +[2025-07-06 09:27:45] [Rank 0] step:3681/10000 train_time:297924ms step_avg:80.94ms +[2025-07-06 09:27:45] [Rank 0] step:3681/10000 train_time:297924ms step_avg:80.94ms +[2025-07-06 09:27:47] [Rank 0] step:3701/10000 train_time:300063ms step_avg:81.08ms +[2025-07-06 09:27:47] [Rank 0] step:3701/10000 train_time:300063ms step_avg:81.08ms +[2025-07-06 09:27:48] [Rank 0] step:3721/10000 train_time:301555ms step_avg:81.04ms +[2025-07-06 09:27:48] [Rank 0] step:3721/10000 train_time:301555ms step_avg:81.04ms +[2025-07-06 09:27:50] [Rank 0] step:3741/10000 train_time:303049ms step_avg:81.01ms +[2025-07-06 09:27:50] [Rank 0] step:3741/10000 train_time:303049ms step_avg:81.01ms +[2025-07-06 09:27:51] [Rank 0] step:3761/10000 train_time:304543ms step_avg:80.97ms +[2025-07-06 09:27:51] [Rank 0] step:3761/10000 train_time:304543ms step_avg:80.97ms +[2025-07-06 09:27:53] [Rank 0] step:3781/10000 train_time:306698ms step_avg:81.12ms +[2025-07-06 09:27:53] [Rank 0] step:3781/10000 train_time:306698ms step_avg:81.12ms +[2025-07-06 09:27:55] [Rank 0] step:3801/10000 train_time:308175ms step_avg:81.08ms +[2025-07-06 09:27:55] [Rank 0] step:3801/10000 train_time:308175ms step_avg:81.08ms +[2025-07-06 09:27:56] [Rank 0] step:3821/10000 train_time:309669ms step_avg:81.04ms +[2025-07-06 09:27:56] [Rank 0] step:3821/10000 train_time:309669ms step_avg:81.04ms +[2025-07-06 09:27:58] [Rank 0] step:3841/10000 train_time:311166ms step_avg:81.01ms +[2025-07-06 09:27:58] [Rank 0] step:3841/10000 train_time:311166ms step_avg:81.01ms +[2025-07-06 09:27:59] [Rank 0] step:3861/10000 train_time:312661ms step_avg:80.98ms +[2025-07-06 09:27:59] [Rank 0] step:3861/10000 train_time:312661ms step_avg:80.98ms +[2025-07-06 09:28:01] [Rank 0] step:3881/10000 train_time:314733ms step_avg:81.10ms +[2025-07-06 09:28:01] [Rank 0] step:3881/10000 train_time:314733ms step_avg:81.10ms +[2025-07-06 09:28:03] [Rank 0] step:3901/10000 train_time:316228ms step_avg:81.06ms +[2025-07-06 09:28:03] [Rank 0] step:3901/10000 train_time:316228ms step_avg:81.06ms +[2025-07-06 09:28:04] [Rank 0] step:3921/10000 train_time:317724ms step_avg:81.03ms +[2025-07-06 09:28:04] [Rank 0] step:3921/10000 train_time:317724ms step_avg:81.03ms +[2025-07-06 09:28:06] [Rank 0] step:3941/10000 train_time:319223ms step_avg:81.00ms +[2025-07-06 09:28:06] [Rank 0] step:3941/10000 train_time:319223ms step_avg:81.00ms +[2025-07-06 09:28:08] [Rank 0] step:3961/10000 train_time:320720ms step_avg:80.97ms +[2025-07-06 09:28:08] [Rank 0] step:3961/10000 train_time:320720ms step_avg:80.97ms +[2025-07-06 09:28:10] [Rank 0] step:3981/10000 train_time:322862ms step_avg:81.10ms +[2025-07-06 09:28:10] [Rank 0] step:3981/10000 train_time:322862ms step_avg:81.10ms +[2025-07-06 09:28:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 09:28:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 09:28:12] [Rank 0] PRINT: step:4000/10000 train_loss:0.9776 val_loss:0.9478 train_time:324358ms step_avg:81.09ms +[2025-07-06 09:28:12] [Rank 0] PRINT: step:4000/10000 train_loss:0.9776 val_loss:0.9478 train_time:324358ms step_avg:81.09ms +[2025-07-06 09:28:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 09:28:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 09:28:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 09:28:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 09:28:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 09:28:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 09:33:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 09:33:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 09:33:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 09:33:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 09:33:39] [Rank 0] Total Loss: 5.3125 +[2025-07-06 09:33:39] [Rank 0] Total Loss: 5.3125 +[2025-07-06 09:33:39] [Rank 0] Total FTA: 0.7642 +[2025-07-06 09:33:39] [Rank 0] Total FTA: 0.7642 +[2025-07-06 09:33:39] [Rank 0] Group 0 Loss: 5.7468 +[2025-07-06 09:33:39] [Rank 0] Group 0 Loss: 5.7468 +[2025-07-06 09:33:39] [Rank 0] Group 1 Loss: 5.1302 +[2025-07-06 09:33:39] [Rank 0] Group 1 Loss: 5.1302 +[2025-07-06 09:33:39] [Rank 0] Group 2 Loss: 5.1353 +[2025-07-06 09:33:39] [Rank 0] Group 2 Loss: 5.1353 +[2025-07-06 09:33:39] [Rank 0] Group 3 Loss: 5.2607 +[2025-07-06 09:33:39] [Rank 0] Group 3 Loss: 5.2607 +[2025-07-06 09:33:39] [Rank 0] Group 4 Loss: 5.3408 +[2025-07-06 09:33:39] [Rank 0] Group 4 Loss: 5.3408 +[2025-07-06 09:33:39] [Rank 0] Group 5 Loss: 5.2694 +[2025-07-06 09:33:39] [Rank 0] Group 5 Loss: 5.2694 +[2025-07-06 09:33:39] [Rank 0] Group 6 Loss: 5.1690 +[2025-07-06 09:33:39] [Rank 0] Group 6 Loss: 5.1690 +[2025-07-06 09:33:39] [Rank 0] Group 7 Loss: 5.2774 +[2025-07-06 09:33:39] [Rank 0] Group 7 Loss: 5.2774 +[2025-07-06 09:33:39] [Rank 0] Group 8 Loss: 5.2638 +[2025-07-06 09:33:39] [Rank 0] Group 8 Loss: 5.2638 +[2025-07-06 09:33:39] [Rank 0] Group 9 Loss: 5.2349 +[2025-07-06 09:33:39] [Rank 0] Group 9 Loss: 5.2349 +[2025-07-06 09:33:39] [Rank 0] Group 10 Loss: 5.2870 +[2025-07-06 09:33:39] [Rank 0] Group 10 Loss: 5.2870 +[2025-07-06 09:33:39] [Rank 0] Group 11 Loss: 5.2637 +[2025-07-06 09:33:39] [Rank 0] Group 11 Loss: 5.2637 +[2025-07-06 09:33:39] [Rank 0] Group 0 FTA: 0.4655 +[2025-07-06 09:33:39] [Rank 0] Group 0 FTA: 0.4655 +[2025-07-06 09:33:39] [Rank 0] Group 1 FTA: 0.8568 +[2025-07-06 09:33:39] [Rank 0] Group 1 FTA: 0.8568 +[2025-07-06 09:33:39] [Rank 0] Group 2 FTA: 0.7448 +[2025-07-06 09:33:39] [Rank 0] Group 2 FTA: 0.7448 +[2025-07-06 09:33:39] [Rank 0] Group 3 FTA: 0.7396 +[2025-07-06 09:33:39] [Rank 0] Group 3 FTA: 0.7396 +[2025-07-06 09:33:39] [Rank 0] Group 4 FTA: 0.8021 +[2025-07-06 09:33:39] [Rank 0] Group 4 FTA: 0.8021 +[2025-07-06 09:33:39] [Rank 0] Group 5 FTA: 0.8698 +[2025-07-06 09:33:39] [Rank 0] Group 5 FTA: 0.8698 +[2025-07-06 09:33:39] [Rank 0] Group 6 FTA: 0.8125 +[2025-07-06 09:33:39] [Rank 0] Group 6 FTA: 0.8125 +[2025-07-06 09:33:39] [Rank 0] Group 7 FTA: 0.8151 +[2025-07-06 09:33:39] [Rank 0] Group 7 FTA: 0.8151 +[2025-07-06 09:33:39] [Rank 0] Group 8 FTA: 0.7917 +[2025-07-06 09:33:39] [Rank 0] Group 8 FTA: 0.7917 +[2025-07-06 09:33:39] [Rank 0] Group 9 FTA: 0.8320 +[2025-07-06 09:33:39] [Rank 0] Group 9 FTA: 0.8320 +[2025-07-06 09:33:39] [Rank 0] Group 10 FTA: 0.8242 +[2025-07-06 09:33:39] [Rank 0] Group 10 FTA: 0.8242 +[2025-07-06 09:33:39] [Rank 0] Group 11 FTA: 0.8223 +[2025-07-06 09:33:39] [Rank 0] Group 11 FTA: 0.8223 +[2025-07-06 09:33:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 09:33:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 09:33:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 09:33:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 09:33:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 09:33:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 09:33:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 09:33:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 09:33:41] [Rank 0] step:4001/10000 train_time:324379ms step_avg:81.07ms +[2025-07-06 09:33:41] [Rank 0] step:4001/10000 train_time:324379ms step_avg:81.07ms +[2025-07-06 09:33:42] [Rank 0] step:4021/10000 train_time:325882ms step_avg:81.05ms +[2025-07-06 09:33:42] [Rank 0] step:4021/10000 train_time:325882ms step_avg:81.05ms +[2025-07-06 09:33:44] [Rank 0] step:4041/10000 train_time:327372ms step_avg:81.01ms +[2025-07-06 09:33:44] [Rank 0] step:4041/10000 train_time:327372ms step_avg:81.01ms +[2025-07-06 09:33:46] [Rank 0] step:4061/10000 train_time:329530ms step_avg:81.15ms +[2025-07-06 09:33:46] [Rank 0] step:4061/10000 train_time:329530ms step_avg:81.15ms +[2025-07-06 09:33:48] [Rank 0] step:4081/10000 train_time:331020ms step_avg:81.11ms +[2025-07-06 09:33:48] [Rank 0] step:4081/10000 train_time:331020ms step_avg:81.11ms +[2025-07-06 09:33:49] [Rank 0] step:4101/10000 train_time:332512ms step_avg:81.08ms +[2025-07-06 09:33:49] [Rank 0] step:4101/10000 train_time:332512ms step_avg:81.08ms +[2025-07-06 09:33:51] [Rank 0] step:4121/10000 train_time:334004ms step_avg:81.05ms +[2025-07-06 09:33:51] [Rank 0] step:4121/10000 train_time:334004ms step_avg:81.05ms +[2025-07-06 09:33:53] [Rank 0] step:4141/10000 train_time:335548ms step_avg:81.03ms +[2025-07-06 09:33:53] [Rank 0] step:4141/10000 train_time:335548ms step_avg:81.03ms +[2025-07-06 09:33:54] [Rank 0] step:4161/10000 train_time:337641ms step_avg:81.14ms +[2025-07-06 09:33:54] [Rank 0] step:4161/10000 train_time:337641ms step_avg:81.14ms +[2025-07-06 09:33:56] [Rank 0] step:4181/10000 train_time:339134ms step_avg:81.11ms +[2025-07-06 09:33:56] [Rank 0] step:4181/10000 train_time:339134ms step_avg:81.11ms +[2025-07-06 09:33:57] [Rank 0] step:4201/10000 train_time:340634ms step_avg:81.08ms +[2025-07-06 09:33:57] [Rank 0] step:4201/10000 train_time:340634ms step_avg:81.08ms +[2025-07-06 09:33:59] [Rank 0] step:4221/10000 train_time:342128ms step_avg:81.05ms +[2025-07-06 09:33:59] [Rank 0] step:4221/10000 train_time:342128ms step_avg:81.05ms +[2025-07-06 09:34:01] [Rank 0] step:4241/10000 train_time:344285ms step_avg:81.18ms +[2025-07-06 09:34:01] [Rank 0] step:4241/10000 train_time:344285ms step_avg:81.18ms +[2025-07-06 09:34:02] [Rank 0] step:4261/10000 train_time:345778ms step_avg:81.15ms +[2025-07-06 09:34:02] [Rank 0] step:4261/10000 train_time:345778ms step_avg:81.15ms +[2025-07-06 09:34:04] [Rank 0] step:4281/10000 train_time:347272ms step_avg:81.12ms +[2025-07-06 09:34:04] [Rank 0] step:4281/10000 train_time:347272ms step_avg:81.12ms +[2025-07-06 09:34:05] [Rank 0] step:4301/10000 train_time:348767ms step_avg:81.09ms +[2025-07-06 09:34:05] [Rank 0] step:4301/10000 train_time:348767ms step_avg:81.09ms +[2025-07-06 09:34:07] [Rank 0] step:4321/10000 train_time:350261ms step_avg:81.06ms +[2025-07-06 09:34:07] [Rank 0] step:4321/10000 train_time:350261ms step_avg:81.06ms +[2025-07-06 09:34:09] [Rank 0] step:4341/10000 train_time:352396ms step_avg:81.18ms +[2025-07-06 09:34:09] [Rank 0] step:4341/10000 train_time:352396ms step_avg:81.18ms +[2025-07-06 09:34:10] [Rank 0] step:4361/10000 train_time:353890ms step_avg:81.15ms +[2025-07-06 09:34:10] [Rank 0] step:4361/10000 train_time:353890ms step_avg:81.15ms +[2025-07-06 09:34:12] [Rank 0] step:4381/10000 train_time:355384ms step_avg:81.12ms +[2025-07-06 09:34:12] [Rank 0] step:4381/10000 train_time:355384ms step_avg:81.12ms +[2025-07-06 09:34:13] [Rank 0] step:4401/10000 train_time:356880ms step_avg:81.09ms +[2025-07-06 09:34:13] [Rank 0] step:4401/10000 train_time:356880ms step_avg:81.09ms +[2025-07-06 09:34:16] [Rank 0] step:4421/10000 train_time:359038ms step_avg:81.21ms +[2025-07-06 09:34:16] [Rank 0] step:4421/10000 train_time:359038ms step_avg:81.21ms +[2025-07-06 09:34:17] [Rank 0] step:4441/10000 train_time:360533ms step_avg:81.18ms +[2025-07-06 09:34:17] [Rank 0] step:4441/10000 train_time:360533ms step_avg:81.18ms +[2025-07-06 09:34:19] [Rank 0] step:4461/10000 train_time:362028ms step_avg:81.15ms +[2025-07-06 09:34:19] [Rank 0] step:4461/10000 train_time:362028ms step_avg:81.15ms +[2025-07-06 09:34:20] [Rank 0] step:4481/10000 train_time:363667ms step_avg:81.16ms +[2025-07-06 09:34:20] [Rank 0] step:4481/10000 train_time:363667ms step_avg:81.16ms +[2025-07-06 09:34:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 09:34:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 09:34:23] [Rank 0] PRINT: step:4500/10000 train_loss:0.9342 val_loss:0.9133 train_time:365284ms step_avg:81.17ms +[2025-07-06 09:34:23] [Rank 0] PRINT: step:4500/10000 train_loss:0.9342 val_loss:0.9133 train_time:365284ms step_avg:81.17ms +[2025-07-06 09:34:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 09:34:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 09:34:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 09:34:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 09:34:23] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 09:34:23] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 09:39:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 09:39:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 09:39:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 09:39:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 09:39:47] [Rank 0] Total Loss: 5.4311 +[2025-07-06 09:39:47] [Rank 0] Total Loss: 5.4311 +[2025-07-06 09:39:47] [Rank 0] Total FTA: 0.8951 +[2025-07-06 09:39:47] [Rank 0] Total FTA: 0.8951 +[2025-07-06 09:39:47] [Rank 0] Group 0 Loss: 6.1188 +[2025-07-06 09:39:47] [Rank 0] Group 0 Loss: 6.1188 +[2025-07-06 09:39:47] [Rank 0] Group 1 Loss: 5.1111 +[2025-07-06 09:39:47] [Rank 0] Group 1 Loss: 5.1111 +[2025-07-06 09:39:47] [Rank 0] Group 2 Loss: 5.2530 +[2025-07-06 09:39:47] [Rank 0] Group 2 Loss: 5.2530 +[2025-07-06 09:39:47] [Rank 0] Group 3 Loss: 5.2919 +[2025-07-06 09:39:47] [Rank 0] Group 3 Loss: 5.2919 +[2025-07-06 09:39:47] [Rank 0] Group 4 Loss: 5.3557 +[2025-07-06 09:39:47] [Rank 0] Group 4 Loss: 5.3557 +[2025-07-06 09:39:47] [Rank 0] Group 5 Loss: 5.3401 +[2025-07-06 09:39:47] [Rank 0] Group 5 Loss: 5.3401 +[2025-07-06 09:39:47] [Rank 0] Group 6 Loss: 5.2504 +[2025-07-06 09:39:47] [Rank 0] Group 6 Loss: 5.2504 +[2025-07-06 09:39:47] [Rank 0] Group 7 Loss: 5.3924 +[2025-07-06 09:39:47] [Rank 0] Group 7 Loss: 5.3924 +[2025-07-06 09:39:47] [Rank 0] Group 8 Loss: 5.3719 +[2025-07-06 09:39:47] [Rank 0] Group 8 Loss: 5.3719 +[2025-07-06 09:39:47] [Rank 0] Group 9 Loss: 5.3429 +[2025-07-06 09:39:47] [Rank 0] Group 9 Loss: 5.3429 +[2025-07-06 09:39:47] [Rank 0] Group 10 Loss: 5.3656 +[2025-07-06 09:39:47] [Rank 0] Group 10 Loss: 5.3656 +[2025-07-06 09:39:47] [Rank 0] Group 11 Loss: 5.3751 +[2025-07-06 09:39:47] [Rank 0] Group 11 Loss: 5.3751 +[2025-07-06 09:39:47] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 09:39:47] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 09:39:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 09:39:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 09:39:47] [Rank 0] Group 2 FTA: 0.9036 +[2025-07-06 09:39:47] [Rank 0] Group 2 FTA: 0.9036 +[2025-07-06 09:39:47] [Rank 0] Group 3 FTA: 0.8385 +[2025-07-06 09:39:47] [Rank 0] Group 3 FTA: 0.8385 +[2025-07-06 09:39:47] [Rank 0] Group 4 FTA: 0.8490 +[2025-07-06 09:39:47] [Rank 0] Group 4 FTA: 0.8490 +[2025-07-06 09:39:47] [Rank 0] Group 5 FTA: 0.8880 +[2025-07-06 09:39:47] [Rank 0] Group 5 FTA: 0.8880 +[2025-07-06 09:39:47] [Rank 0] Group 6 FTA: 0.8542 +[2025-07-06 09:39:47] [Rank 0] Group 6 FTA: 0.8542 +[2025-07-06 09:39:47] [Rank 0] Group 7 FTA: 0.8750 +[2025-07-06 09:39:47] [Rank 0] Group 7 FTA: 0.8750 +[2025-07-06 09:39:47] [Rank 0] Group 8 FTA: 0.8698 +[2025-07-06 09:39:47] [Rank 0] Group 8 FTA: 0.8698 +[2025-07-06 09:39:47] [Rank 0] Group 9 FTA: 0.8984 +[2025-07-06 09:39:47] [Rank 0] Group 9 FTA: 0.8984 +[2025-07-06 09:39:47] [Rank 0] Group 10 FTA: 0.8926 +[2025-07-06 09:39:47] [Rank 0] Group 10 FTA: 0.8926 +[2025-07-06 09:39:47] [Rank 0] Group 11 FTA: 0.8477 +[2025-07-06 09:39:47] [Rank 0] Group 11 FTA: 0.8477 +[2025-07-06 09:39:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 09:39:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 09:39:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 09:39:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 09:39:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 09:39:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 09:39:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 09:39:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 09:39:50] [Rank 0] step:4501/10000 train_time:365314ms step_avg:81.16ms +[2025-07-06 09:39:50] [Rank 0] step:4501/10000 train_time:365314ms step_avg:81.16ms +[2025-07-06 09:39:51] [Rank 0] step:4521/10000 train_time:367496ms step_avg:81.29ms +[2025-07-06 09:39:51] [Rank 0] step:4521/10000 train_time:367496ms step_avg:81.29ms +[2025-07-06 09:39:53] [Rank 0] step:4541/10000 train_time:368986ms step_avg:81.26ms +[2025-07-06 09:39:53] [Rank 0] step:4541/10000 train_time:368986ms step_avg:81.26ms +[2025-07-06 09:39:54] [Rank 0] step:4561/10000 train_time:370478ms step_avg:81.23ms +[2025-07-06 09:39:54] [Rank 0] step:4561/10000 train_time:370478ms step_avg:81.23ms +[2025-07-06 09:39:56] [Rank 0] step:4581/10000 train_time:371970ms step_avg:81.20ms +[2025-07-06 09:39:56] [Rank 0] step:4581/10000 train_time:371970ms step_avg:81.20ms +[2025-07-06 09:39:58] [Rank 0] step:4601/10000 train_time:374110ms step_avg:81.31ms +[2025-07-06 09:39:58] [Rank 0] step:4601/10000 train_time:374110ms step_avg:81.31ms +[2025-07-06 09:39:59] [Rank 0] step:4621/10000 train_time:375603ms step_avg:81.28ms +[2025-07-06 09:39:59] [Rank 0] step:4621/10000 train_time:375603ms step_avg:81.28ms +[2025-07-06 09:40:01] [Rank 0] step:4641/10000 train_time:377098ms step_avg:81.25ms +[2025-07-06 09:40:01] [Rank 0] step:4641/10000 train_time:377098ms step_avg:81.25ms +[2025-07-06 09:40:02] [Rank 0] step:4661/10000 train_time:378592ms step_avg:81.23ms +[2025-07-06 09:40:02] [Rank 0] step:4661/10000 train_time:378592ms step_avg:81.23ms +[2025-07-06 09:40:04] [Rank 0] step:4681/10000 train_time:380339ms step_avg:81.25ms +[2025-07-06 09:40:04] [Rank 0] step:4681/10000 train_time:380339ms step_avg:81.25ms +[2025-07-06 09:40:06] [Rank 0] step:4701/10000 train_time:382219ms step_avg:81.31ms +[2025-07-06 09:40:06] [Rank 0] step:4701/10000 train_time:382219ms step_avg:81.31ms +[2025-07-06 09:40:07] [Rank 0] step:4721/10000 train_time:383712ms step_avg:81.28ms +[2025-07-06 09:40:07] [Rank 0] step:4721/10000 train_time:383712ms step_avg:81.28ms +[2025-07-06 09:40:09] [Rank 0] step:4741/10000 train_time:385209ms step_avg:81.25ms +[2025-07-06 09:40:09] [Rank 0] step:4741/10000 train_time:385209ms step_avg:81.25ms +[2025-07-06 09:40:10] [Rank 0] step:4761/10000 train_time:386701ms step_avg:81.22ms +[2025-07-06 09:40:10] [Rank 0] step:4761/10000 train_time:386701ms step_avg:81.22ms +[2025-07-06 09:40:12] [Rank 0] step:4781/10000 train_time:388864ms step_avg:81.34ms +[2025-07-06 09:40:12] [Rank 0] step:4781/10000 train_time:388864ms step_avg:81.34ms +[2025-07-06 09:40:14] [Rank 0] step:4801/10000 train_time:390358ms step_avg:81.31ms +[2025-07-06 09:40:14] [Rank 0] step:4801/10000 train_time:390358ms step_avg:81.31ms +[2025-07-06 09:40:15] [Rank 0] step:4821/10000 train_time:391854ms step_avg:81.28ms +[2025-07-06 09:40:15] [Rank 0] step:4821/10000 train_time:391854ms step_avg:81.28ms +[2025-07-06 09:40:17] [Rank 0] step:4841/10000 train_time:393348ms step_avg:81.25ms +[2025-07-06 09:40:17] [Rank 0] step:4841/10000 train_time:393348ms step_avg:81.25ms +[2025-07-06 09:40:19] [Rank 0] step:4861/10000 train_time:395099ms step_avg:81.28ms +[2025-07-06 09:40:19] [Rank 0] step:4861/10000 train_time:395099ms step_avg:81.28ms +[2025-07-06 09:40:21] [Rank 0] step:4881/10000 train_time:397006ms step_avg:81.34ms +[2025-07-06 09:40:21] [Rank 0] step:4881/10000 train_time:397006ms step_avg:81.34ms +[2025-07-06 09:40:22] [Rank 0] step:4901/10000 train_time:398500ms step_avg:81.31ms +[2025-07-06 09:40:22] [Rank 0] step:4901/10000 train_time:398500ms step_avg:81.31ms +[2025-07-06 09:40:24] [Rank 0] step:4921/10000 train_time:399995ms step_avg:81.28ms +[2025-07-06 09:40:24] [Rank 0] step:4921/10000 train_time:399995ms step_avg:81.28ms +[2025-07-06 09:40:25] [Rank 0] step:4941/10000 train_time:401490ms step_avg:81.26ms +[2025-07-06 09:40:25] [Rank 0] step:4941/10000 train_time:401490ms step_avg:81.26ms +[2025-07-06 09:40:27] [Rank 0] step:4961/10000 train_time:403648ms step_avg:81.36ms +[2025-07-06 09:40:27] [Rank 0] step:4961/10000 train_time:403648ms step_avg:81.36ms +[2025-07-06 09:40:29] [Rank 0] step:4981/10000 train_time:405143ms step_avg:81.34ms +[2025-07-06 09:40:29] [Rank 0] step:4981/10000 train_time:405143ms step_avg:81.34ms +[2025-07-06 09:40:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 09:40:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 09:40:31] [Rank 0] PRINT: step:5000/10000 train_loss:0.9048 val_loss:0.8917 train_time:406639ms step_avg:81.33ms +[2025-07-06 09:40:31] [Rank 0] PRINT: step:5000/10000 train_loss:0.9048 val_loss:0.8917 train_time:406639ms step_avg:81.33ms +[2025-07-06 09:40:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 09:40:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 09:40:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 09:40:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 09:40:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 09:40:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 09:45:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 09:45:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 09:45:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 09:45:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 09:45:55] [Rank 0] Total Loss: 5.5378 +[2025-07-06 09:45:55] [Rank 0] Total Loss: 5.5378 +[2025-07-06 09:45:55] [Rank 0] Total FTA: 0.8819 +[2025-07-06 09:45:55] [Rank 0] Total FTA: 0.8819 +[2025-07-06 09:45:55] [Rank 0] Group 0 Loss: 6.0698 +[2025-07-06 09:45:55] [Rank 0] Group 0 Loss: 6.0698 +[2025-07-06 09:45:55] [Rank 0] Group 1 Loss: 5.3176 +[2025-07-06 09:45:55] [Rank 0] Group 1 Loss: 5.3176 +[2025-07-06 09:45:55] [Rank 0] Group 2 Loss: 5.4401 +[2025-07-06 09:45:55] [Rank 0] Group 2 Loss: 5.4401 +[2025-07-06 09:45:55] [Rank 0] Group 3 Loss: 5.4387 +[2025-07-06 09:45:55] [Rank 0] Group 3 Loss: 5.4387 +[2025-07-06 09:45:55] [Rank 0] Group 4 Loss: 5.4842 +[2025-07-06 09:45:55] [Rank 0] Group 4 Loss: 5.4842 +[2025-07-06 09:45:55] [Rank 0] Group 5 Loss: 5.4655 +[2025-07-06 09:45:55] [Rank 0] Group 5 Loss: 5.4655 +[2025-07-06 09:45:55] [Rank 0] Group 6 Loss: 5.3843 +[2025-07-06 09:45:55] [Rank 0] Group 6 Loss: 5.3843 +[2025-07-06 09:45:55] [Rank 0] Group 7 Loss: 5.4568 +[2025-07-06 09:45:55] [Rank 0] Group 7 Loss: 5.4568 +[2025-07-06 09:45:55] [Rank 0] Group 8 Loss: 5.4637 +[2025-07-06 09:45:55] [Rank 0] Group 8 Loss: 5.4637 +[2025-07-06 09:45:55] [Rank 0] Group 9 Loss: 5.4322 +[2025-07-06 09:45:55] [Rank 0] Group 9 Loss: 5.4322 +[2025-07-06 09:45:55] [Rank 0] Group 10 Loss: 5.5013 +[2025-07-06 09:45:55] [Rank 0] Group 10 Loss: 5.5013 +[2025-07-06 09:45:55] [Rank 0] Group 11 Loss: 5.5025 +[2025-07-06 09:45:55] [Rank 0] Group 11 Loss: 5.5025 +[2025-07-06 09:45:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 09:45:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 09:45:55] [Rank 0] Group 1 FTA: 0.6328 +[2025-07-06 09:45:55] [Rank 0] Group 1 FTA: 0.6328 +[2025-07-06 09:45:55] [Rank 0] Group 2 FTA: 0.8099 +[2025-07-06 09:45:55] [Rank 0] Group 2 FTA: 0.8099 +[2025-07-06 09:45:55] [Rank 0] Group 3 FTA: 0.9193 +[2025-07-06 09:45:55] [Rank 0] Group 3 FTA: 0.9193 +[2025-07-06 09:45:55] [Rank 0] Group 4 FTA: 0.8411 +[2025-07-06 09:45:55] [Rank 0] Group 4 FTA: 0.8411 +[2025-07-06 09:45:55] [Rank 0] Group 5 FTA: 0.9167 +[2025-07-06 09:45:55] [Rank 0] Group 5 FTA: 0.9167 +[2025-07-06 09:45:55] [Rank 0] Group 6 FTA: 0.8932 +[2025-07-06 09:45:55] [Rank 0] Group 6 FTA: 0.8932 +[2025-07-06 09:45:55] [Rank 0] Group 7 FTA: 0.8672 +[2025-07-06 09:45:55] [Rank 0] Group 7 FTA: 0.8672 +[2025-07-06 09:45:55] [Rank 0] Group 8 FTA: 0.8724 +[2025-07-06 09:45:55] [Rank 0] Group 8 FTA: 0.8724 +[2025-07-06 09:45:55] [Rank 0] Group 9 FTA: 0.8906 +[2025-07-06 09:45:55] [Rank 0] Group 9 FTA: 0.8906 +[2025-07-06 09:45:55] [Rank 0] Group 10 FTA: 0.8945 +[2025-07-06 09:45:55] [Rank 0] Group 10 FTA: 0.8945 +[2025-07-06 09:45:55] [Rank 0] Group 11 FTA: 0.8984 +[2025-07-06 09:45:55] [Rank 0] Group 11 FTA: 0.8984 +[2025-07-06 09:45:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 09:45:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 09:45:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 09:45:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 09:45:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 09:45:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 09:45:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 09:45:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 09:45:56] [Rank 0] step:5001/10000 train_time:406662ms step_avg:81.32ms +[2025-07-06 09:45:56] [Rank 0] step:5001/10000 train_time:406662ms step_avg:81.32ms +[2025-07-06 09:45:58] [Rank 0] step:5021/10000 train_time:408170ms step_avg:81.29ms +[2025-07-06 09:45:58] [Rank 0] step:5021/10000 train_time:408170ms step_avg:81.29ms +[2025-07-06 09:46:00] [Rank 0] step:5041/10000 train_time:409658ms step_avg:81.27ms +[2025-07-06 09:46:00] [Rank 0] step:5041/10000 train_time:409658ms step_avg:81.27ms +[2025-07-06 09:46:02] [Rank 0] step:5061/10000 train_time:411818ms step_avg:81.37ms +[2025-07-06 09:46:02] [Rank 0] step:5061/10000 train_time:411818ms step_avg:81.37ms +[2025-07-06 09:46:03] [Rank 0] step:5081/10000 train_time:413308ms step_avg:81.34ms +[2025-07-06 09:46:03] [Rank 0] step:5081/10000 train_time:413308ms step_avg:81.34ms +[2025-07-06 09:46:05] [Rank 0] step:5101/10000 train_time:414800ms step_avg:81.32ms +[2025-07-06 09:46:05] [Rank 0] step:5101/10000 train_time:414800ms step_avg:81.32ms +[2025-07-06 09:46:06] [Rank 0] step:5121/10000 train_time:416293ms step_avg:81.29ms +[2025-07-06 09:46:06] [Rank 0] step:5121/10000 train_time:416293ms step_avg:81.29ms +[2025-07-06 09:46:08] [Rank 0] step:5141/10000 train_time:418430ms step_avg:81.39ms +[2025-07-06 09:46:08] [Rank 0] step:5141/10000 train_time:418430ms step_avg:81.39ms +[2025-07-06 09:46:10] [Rank 0] step:5161/10000 train_time:419922ms step_avg:81.36ms +[2025-07-06 09:46:10] [Rank 0] step:5161/10000 train_time:419922ms step_avg:81.36ms +[2025-07-06 09:46:11] [Rank 0] step:5181/10000 train_time:421415ms step_avg:81.34ms +[2025-07-06 09:46:11] [Rank 0] step:5181/10000 train_time:421415ms step_avg:81.34ms +[2025-07-06 09:46:13] [Rank 0] step:5201/10000 train_time:422909ms step_avg:81.31ms +[2025-07-06 09:46:13] [Rank 0] step:5201/10000 train_time:422909ms step_avg:81.31ms +[2025-07-06 09:46:15] [Rank 0] step:5221/10000 train_time:424456ms step_avg:81.30ms +[2025-07-06 09:46:15] [Rank 0] step:5221/10000 train_time:424456ms step_avg:81.30ms +[2025-07-06 09:46:16] [Rank 0] step:5241/10000 train_time:426540ms step_avg:81.39ms +[2025-07-06 09:46:16] [Rank 0] step:5241/10000 train_time:426540ms step_avg:81.39ms +[2025-07-06 09:46:18] [Rank 0] step:5261/10000 train_time:428035ms step_avg:81.36ms +[2025-07-06 09:46:18] [Rank 0] step:5261/10000 train_time:428035ms step_avg:81.36ms +[2025-07-06 09:46:19] [Rank 0] step:5281/10000 train_time:429529ms step_avg:81.33ms +[2025-07-06 09:46:19] [Rank 0] step:5281/10000 train_time:429529ms step_avg:81.33ms +[2025-07-06 09:46:21] [Rank 0] step:5301/10000 train_time:431022ms step_avg:81.31ms +[2025-07-06 09:46:21] [Rank 0] step:5301/10000 train_time:431022ms step_avg:81.31ms +[2025-07-06 09:46:23] [Rank 0] step:5321/10000 train_time:433185ms step_avg:81.41ms +[2025-07-06 09:46:23] [Rank 0] step:5321/10000 train_time:433185ms step_avg:81.41ms +[2025-07-06 09:46:24] [Rank 0] step:5341/10000 train_time:434677ms step_avg:81.38ms +[2025-07-06 09:46:24] [Rank 0] step:5341/10000 train_time:434677ms step_avg:81.38ms +[2025-07-06 09:46:26] [Rank 0] step:5361/10000 train_time:436172ms step_avg:81.36ms +[2025-07-06 09:46:26] [Rank 0] step:5361/10000 train_time:436172ms step_avg:81.36ms +[2025-07-06 09:46:27] [Rank 0] step:5381/10000 train_time:437667ms step_avg:81.34ms +[2025-07-06 09:46:27] [Rank 0] step:5381/10000 train_time:437667ms step_avg:81.34ms +[2025-07-06 09:46:30] [Rank 0] step:5401/10000 train_time:439419ms step_avg:81.36ms +[2025-07-06 09:46:30] [Rank 0] step:5401/10000 train_time:439419ms step_avg:81.36ms +[2025-07-06 09:46:31] [Rank 0] step:5421/10000 train_time:441327ms step_avg:81.41ms +[2025-07-06 09:46:31] [Rank 0] step:5421/10000 train_time:441327ms step_avg:81.41ms +[2025-07-06 09:46:33] [Rank 0] step:5441/10000 train_time:442819ms step_avg:81.39ms +[2025-07-06 09:46:33] [Rank 0] step:5441/10000 train_time:442819ms step_avg:81.39ms +[2025-07-06 09:46:34] [Rank 0] step:5461/10000 train_time:444316ms step_avg:81.36ms +[2025-07-06 09:46:34] [Rank 0] step:5461/10000 train_time:444316ms step_avg:81.36ms +[2025-07-06 09:46:36] [Rank 0] step:5481/10000 train_time:445811ms step_avg:81.34ms +[2025-07-06 09:46:36] [Rank 0] step:5481/10000 train_time:445811ms step_avg:81.34ms +[2025-07-06 09:46:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 09:46:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 09:46:39] [Rank 0] PRINT: step:5500/10000 train_loss:0.8884 val_loss:0.8815 train_time:447969ms step_avg:81.45ms +[2025-07-06 09:46:39] [Rank 0] PRINT: step:5500/10000 train_loss:0.8884 val_loss:0.8815 train_time:447969ms step_avg:81.45ms +[2025-07-06 09:46:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 09:46:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 09:46:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 09:46:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 09:46:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 09:46:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 09:52:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 09:52:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 09:52:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 09:52:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 09:52:03] [Rank 0] Total Loss: 5.4944 +[2025-07-06 09:52:03] [Rank 0] Total Loss: 5.4944 +[2025-07-06 09:52:03] [Rank 0] Total FTA: 0.8732 +[2025-07-06 09:52:03] [Rank 0] Total FTA: 0.8732 +[2025-07-06 09:52:03] [Rank 0] Group 0 Loss: 5.9052 +[2025-07-06 09:52:03] [Rank 0] Group 0 Loss: 5.9052 +[2025-07-06 09:52:03] [Rank 0] Group 1 Loss: 5.3024 +[2025-07-06 09:52:03] [Rank 0] Group 1 Loss: 5.3024 +[2025-07-06 09:52:03] [Rank 0] Group 2 Loss: 5.1429 +[2025-07-06 09:52:03] [Rank 0] Group 2 Loss: 5.1429 +[2025-07-06 09:52:03] [Rank 0] Group 3 Loss: 5.4024 +[2025-07-06 09:52:03] [Rank 0] Group 3 Loss: 5.4024 +[2025-07-06 09:52:04] [Rank 0] Group 4 Loss: 5.4665 +[2025-07-06 09:52:04] [Rank 0] Group 4 Loss: 5.4665 +[2025-07-06 09:52:04] [Rank 0] Group 5 Loss: 5.5342 +[2025-07-06 09:52:04] [Rank 0] Group 5 Loss: 5.5342 +[2025-07-06 09:52:04] [Rank 0] Group 6 Loss: 5.3613 +[2025-07-06 09:52:04] [Rank 0] Group 6 Loss: 5.3613 +[2025-07-06 09:52:04] [Rank 0] Group 7 Loss: 5.5318 +[2025-07-06 09:52:04] [Rank 0] Group 7 Loss: 5.5318 +[2025-07-06 09:52:04] [Rank 0] Group 8 Loss: 5.4787 +[2025-07-06 09:52:04] [Rank 0] Group 8 Loss: 5.4787 +[2025-07-06 09:52:04] [Rank 0] Group 9 Loss: 5.4219 +[2025-07-06 09:52:04] [Rank 0] Group 9 Loss: 5.4219 +[2025-07-06 09:52:04] [Rank 0] Group 10 Loss: 5.4552 +[2025-07-06 09:52:04] [Rank 0] Group 10 Loss: 5.4552 +[2025-07-06 09:52:04] [Rank 0] Group 11 Loss: 5.4996 +[2025-07-06 09:52:04] [Rank 0] Group 11 Loss: 5.4996 +[2025-07-06 09:52:04] [Rank 0] Group 0 FTA: 0.6515 +[2025-07-06 09:52:04] [Rank 0] Group 0 FTA: 0.6515 +[2025-07-06 09:52:04] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 09:52:04] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 09:52:04] [Rank 0] Group 2 FTA: 0.8255 +[2025-07-06 09:52:04] [Rank 0] Group 2 FTA: 0.8255 +[2025-07-06 09:52:04] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 09:52:04] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 09:52:04] [Rank 0] Group 4 FTA: 0.8802 +[2025-07-06 09:52:04] [Rank 0] Group 4 FTA: 0.8802 +[2025-07-06 09:52:04] [Rank 0] Group 5 FTA: 0.9427 +[2025-07-06 09:52:04] [Rank 0] Group 5 FTA: 0.9427 +[2025-07-06 09:52:04] [Rank 0] Group 6 FTA: 0.8828 +[2025-07-06 09:52:04] [Rank 0] Group 6 FTA: 0.8828 +[2025-07-06 09:52:04] [Rank 0] Group 7 FTA: 0.9010 +[2025-07-06 09:52:04] [Rank 0] Group 7 FTA: 0.9010 +[2025-07-06 09:52:04] [Rank 0] Group 8 FTA: 0.8906 +[2025-07-06 09:52:04] [Rank 0] Group 8 FTA: 0.8906 +[2025-07-06 09:52:04] [Rank 0] Group 9 FTA: 0.8867 +[2025-07-06 09:52:04] [Rank 0] Group 9 FTA: 0.8867 +[2025-07-06 09:52:04] [Rank 0] Group 10 FTA: 0.8945 +[2025-07-06 09:52:04] [Rank 0] Group 10 FTA: 0.8945 +[2025-07-06 09:52:04] [Rank 0] Group 11 FTA: 0.8994 +[2025-07-06 09:52:04] [Rank 0] Group 11 FTA: 0.8994 +[2025-07-06 09:52:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 09:52:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 09:52:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 09:52:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 09:52:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 09:52:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 09:52:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 09:52:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 09:52:05] [Rank 0] step:5501/10000 train_time:447992ms step_avg:81.44ms +[2025-07-06 09:52:05] [Rank 0] step:5501/10000 train_time:447992ms step_avg:81.44ms +[2025-07-06 09:52:07] [Rank 0] step:5521/10000 train_time:449494ms step_avg:81.42ms +[2025-07-06 09:52:07] [Rank 0] step:5521/10000 train_time:449494ms step_avg:81.42ms +[2025-07-06 09:52:08] [Rank 0] step:5541/10000 train_time:450982ms step_avg:81.39ms +[2025-07-06 09:52:08] [Rank 0] step:5541/10000 train_time:450982ms step_avg:81.39ms +[2025-07-06 09:52:10] [Rank 0] step:5561/10000 train_time:452473ms step_avg:81.37ms +[2025-07-06 09:52:10] [Rank 0] step:5561/10000 train_time:452473ms step_avg:81.37ms +[2025-07-06 09:52:12] [Rank 0] step:5581/10000 train_time:454021ms step_avg:81.35ms +[2025-07-06 09:52:12] [Rank 0] step:5581/10000 train_time:454021ms step_avg:81.35ms +[2025-07-06 09:52:13] [Rank 0] step:5601/10000 train_time:456116ms step_avg:81.43ms +[2025-07-06 09:52:13] [Rank 0] step:5601/10000 train_time:456116ms step_avg:81.43ms +[2025-07-06 09:52:15] [Rank 0] step:5621/10000 train_time:457747ms step_avg:81.44ms +[2025-07-06 09:52:15] [Rank 0] step:5621/10000 train_time:457747ms step_avg:81.44ms +[2025-07-06 09:52:16] [Rank 0] step:5641/10000 train_time:459289ms step_avg:81.42ms +[2025-07-06 09:52:16] [Rank 0] step:5641/10000 train_time:459289ms step_avg:81.42ms +[2025-07-06 09:52:18] [Rank 0] step:5661/10000 train_time:460779ms step_avg:81.40ms +[2025-07-06 09:52:18] [Rank 0] step:5661/10000 train_time:460779ms step_avg:81.40ms +[2025-07-06 09:52:20] [Rank 0] step:5681/10000 train_time:462921ms step_avg:81.49ms +[2025-07-06 09:52:20] [Rank 0] step:5681/10000 train_time:462921ms step_avg:81.49ms +[2025-07-06 09:52:21] [Rank 0] step:5701/10000 train_time:464415ms step_avg:81.46ms +[2025-07-06 09:52:21] [Rank 0] step:5701/10000 train_time:464415ms step_avg:81.46ms +[2025-07-06 09:52:23] [Rank 0] step:5721/10000 train_time:465907ms step_avg:81.44ms +[2025-07-06 09:52:23] [Rank 0] step:5721/10000 train_time:465907ms step_avg:81.44ms +[2025-07-06 09:52:24] [Rank 0] step:5741/10000 train_time:467400ms step_avg:81.41ms +[2025-07-06 09:52:24] [Rank 0] step:5741/10000 train_time:467400ms step_avg:81.41ms +[2025-07-06 09:52:27] [Rank 0] step:5761/10000 train_time:468895ms step_avg:81.39ms +[2025-07-06 09:52:27] [Rank 0] step:5761/10000 train_time:468895ms step_avg:81.39ms +[2025-07-06 09:52:28] [Rank 0] step:5781/10000 train_time:471030ms step_avg:81.48ms +[2025-07-06 09:52:28] [Rank 0] step:5781/10000 train_time:471030ms step_avg:81.48ms +[2025-07-06 09:52:30] [Rank 0] step:5801/10000 train_time:472526ms step_avg:81.46ms +[2025-07-06 09:52:30] [Rank 0] step:5801/10000 train_time:472526ms step_avg:81.46ms +[2025-07-06 09:52:31] [Rank 0] step:5821/10000 train_time:474020ms step_avg:81.43ms +[2025-07-06 09:52:31] [Rank 0] step:5821/10000 train_time:474020ms step_avg:81.43ms +[2025-07-06 09:52:33] [Rank 0] step:5841/10000 train_time:475514ms step_avg:81.41ms +[2025-07-06 09:52:33] [Rank 0] step:5841/10000 train_time:475514ms step_avg:81.41ms +[2025-07-06 09:52:35] [Rank 0] step:5861/10000 train_time:477676ms step_avg:81.50ms +[2025-07-06 09:52:35] [Rank 0] step:5861/10000 train_time:477676ms step_avg:81.50ms +[2025-07-06 09:52:36] [Rank 0] step:5881/10000 train_time:479170ms step_avg:81.48ms +[2025-07-06 09:52:36] [Rank 0] step:5881/10000 train_time:479170ms step_avg:81.48ms +[2025-07-06 09:52:38] [Rank 0] step:5901/10000 train_time:480664ms step_avg:81.45ms +[2025-07-06 09:52:38] [Rank 0] step:5901/10000 train_time:480664ms step_avg:81.45ms +[2025-07-06 09:52:39] [Rank 0] step:5921/10000 train_time:482160ms step_avg:81.43ms +[2025-07-06 09:52:39] [Rank 0] step:5921/10000 train_time:482160ms step_avg:81.43ms +[2025-07-06 09:52:41] [Rank 0] step:5941/10000 train_time:483910ms step_avg:81.45ms +[2025-07-06 09:52:41] [Rank 0] step:5941/10000 train_time:483910ms step_avg:81.45ms +[2025-07-06 09:52:43] [Rank 0] step:5961/10000 train_time:485817ms step_avg:81.50ms +[2025-07-06 09:52:43] [Rank 0] step:5961/10000 train_time:485817ms step_avg:81.50ms +[2025-07-06 09:52:44] [Rank 0] step:5981/10000 train_time:487313ms step_avg:81.48ms +[2025-07-06 09:52:44] [Rank 0] step:5981/10000 train_time:487313ms step_avg:81.48ms +[2025-07-06 09:52:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 09:52:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 09:52:47] [Rank 0] PRINT: step:6000/10000 train_loss:0.8801 val_loss:0.8772 train_time:488808ms step_avg:81.47ms +[2025-07-06 09:52:47] [Rank 0] PRINT: step:6000/10000 train_loss:0.8801 val_loss:0.8772 train_time:488808ms step_avg:81.47ms +[2025-07-06 09:52:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 09:52:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 09:52:47] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 09:52:47] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 09:52:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 09:52:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 09:58:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 09:58:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 09:58:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 09:58:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 09:58:14] [Rank 0] Total Loss: 5.5421 +[2025-07-06 09:58:14] [Rank 0] Total Loss: 5.5421 +[2025-07-06 09:58:14] [Rank 0] Total FTA: 0.9373 +[2025-07-06 09:58:14] [Rank 0] Total FTA: 0.9373 +[2025-07-06 09:58:14] [Rank 0] Group 0 Loss: 6.0534 +[2025-07-06 09:58:14] [Rank 0] Group 0 Loss: 6.0534 +[2025-07-06 09:58:14] [Rank 0] Group 1 Loss: 5.3419 +[2025-07-06 09:58:14] [Rank 0] Group 1 Loss: 5.3419 +[2025-07-06 09:58:14] [Rank 0] Group 2 Loss: 5.3610 +[2025-07-06 09:58:14] [Rank 0] Group 2 Loss: 5.3610 +[2025-07-06 09:58:14] [Rank 0] Group 3 Loss: 5.4178 +[2025-07-06 09:58:14] [Rank 0] Group 3 Loss: 5.4178 +[2025-07-06 09:58:14] [Rank 0] Group 4 Loss: 5.4777 +[2025-07-06 09:58:14] [Rank 0] Group 4 Loss: 5.4777 +[2025-07-06 09:58:14] [Rank 0] Group 5 Loss: 5.5050 +[2025-07-06 09:58:14] [Rank 0] Group 5 Loss: 5.5050 +[2025-07-06 09:58:14] [Rank 0] Group 6 Loss: 5.3377 +[2025-07-06 09:58:14] [Rank 0] Group 6 Loss: 5.3377 +[2025-07-06 09:58:14] [Rank 0] Group 7 Loss: 5.5094 +[2025-07-06 09:58:14] [Rank 0] Group 7 Loss: 5.5094 +[2025-07-06 09:58:14] [Rank 0] Group 8 Loss: 5.5523 +[2025-07-06 09:58:14] [Rank 0] Group 8 Loss: 5.5523 +[2025-07-06 09:58:14] [Rank 0] Group 9 Loss: 5.4040 +[2025-07-06 09:58:14] [Rank 0] Group 9 Loss: 5.4040 +[2025-07-06 09:58:14] [Rank 0] Group 10 Loss: 5.5216 +[2025-07-06 09:58:14] [Rank 0] Group 10 Loss: 5.5216 +[2025-07-06 09:58:14] [Rank 0] Group 11 Loss: 5.5154 +[2025-07-06 09:58:14] [Rank 0] Group 11 Loss: 5.5154 +[2025-07-06 09:58:14] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 09:58:14] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 09:58:14] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 09:58:14] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 09:58:14] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 09:58:14] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 09:58:14] [Rank 0] Group 3 FTA: 0.9583 +[2025-07-06 09:58:14] [Rank 0] Group 3 FTA: 0.9583 +[2025-07-06 09:58:14] [Rank 0] Group 4 FTA: 0.8802 +[2025-07-06 09:58:14] [Rank 0] Group 4 FTA: 0.8802 +[2025-07-06 09:58:14] [Rank 0] Group 5 FTA: 0.9323 +[2025-07-06 09:58:14] [Rank 0] Group 5 FTA: 0.9323 +[2025-07-06 09:58:14] [Rank 0] Group 6 FTA: 0.9141 +[2025-07-06 09:58:14] [Rank 0] Group 6 FTA: 0.9141 +[2025-07-06 09:58:14] [Rank 0] Group 7 FTA: 0.9193 +[2025-07-06 09:58:14] [Rank 0] Group 7 FTA: 0.9193 +[2025-07-06 09:58:14] [Rank 0] Group 8 FTA: 0.9167 +[2025-07-06 09:58:14] [Rank 0] Group 8 FTA: 0.9167 +[2025-07-06 09:58:14] [Rank 0] Group 9 FTA: 0.8984 +[2025-07-06 09:58:14] [Rank 0] Group 9 FTA: 0.8984 +[2025-07-06 09:58:14] [Rank 0] Group 10 FTA: 0.9023 +[2025-07-06 09:58:14] [Rank 0] Group 10 FTA: 0.9023 +[2025-07-06 09:58:14] [Rank 0] Group 11 FTA: 0.9092 +[2025-07-06 09:58:14] [Rank 0] Group 11 FTA: 0.9092 +[2025-07-06 09:58:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 09:58:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 09:58:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 09:58:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 09:58:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 09:58:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 09:58:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 09:58:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 09:58:16] [Rank 0] step:6001/10000 train_time:488830ms step_avg:81.46ms +[2025-07-06 09:58:16] [Rank 0] step:6001/10000 train_time:488830ms step_avg:81.46ms +[2025-07-06 09:58:17] [Rank 0] step:6021/10000 train_time:490331ms step_avg:81.44ms +[2025-07-06 09:58:17] [Rank 0] step:6021/10000 train_time:490331ms step_avg:81.44ms +[2025-07-06 09:58:19] [Rank 0] step:6041/10000 train_time:492478ms step_avg:81.52ms +[2025-07-06 09:58:19] [Rank 0] step:6041/10000 train_time:492478ms step_avg:81.52ms +[2025-07-06 09:58:21] [Rank 0] step:6061/10000 train_time:493968ms step_avg:81.50ms +[2025-07-06 09:58:21] [Rank 0] step:6061/10000 train_time:493968ms step_avg:81.50ms +[2025-07-06 09:58:22] [Rank 0] step:6081/10000 train_time:495456ms step_avg:81.48ms +[2025-07-06 09:58:22] [Rank 0] step:6081/10000 train_time:495456ms step_avg:81.48ms +[2025-07-06 09:58:24] [Rank 0] step:6101/10000 train_time:496947ms step_avg:81.45ms +[2025-07-06 09:58:24] [Rank 0] step:6101/10000 train_time:496947ms step_avg:81.45ms +[2025-07-06 09:58:26] [Rank 0] step:6121/10000 train_time:498693ms step_avg:81.47ms +[2025-07-06 09:58:26] [Rank 0] step:6121/10000 train_time:498693ms step_avg:81.47ms +[2025-07-06 09:58:27] [Rank 0] step:6141/10000 train_time:500589ms step_avg:81.52ms +[2025-07-06 09:58:27] [Rank 0] step:6141/10000 train_time:500589ms step_avg:81.52ms +[2025-07-06 09:58:29] [Rank 0] step:6161/10000 train_time:502080ms step_avg:81.49ms +[2025-07-06 09:58:29] [Rank 0] step:6161/10000 train_time:502080ms step_avg:81.49ms +[2025-07-06 09:58:30] [Rank 0] step:6181/10000 train_time:503572ms step_avg:81.47ms +[2025-07-06 09:58:30] [Rank 0] step:6181/10000 train_time:503572ms step_avg:81.47ms +[2025-07-06 09:58:32] [Rank 0] step:6201/10000 train_time:505065ms step_avg:81.45ms +[2025-07-06 09:58:32] [Rank 0] step:6201/10000 train_time:505065ms step_avg:81.45ms +[2025-07-06 09:58:34] [Rank 0] step:6221/10000 train_time:507341ms step_avg:81.55ms +[2025-07-06 09:58:34] [Rank 0] step:6221/10000 train_time:507341ms step_avg:81.55ms +[2025-07-06 09:58:36] [Rank 0] step:6241/10000 train_time:508949ms step_avg:81.55ms +[2025-07-06 09:58:36] [Rank 0] step:6241/10000 train_time:508949ms step_avg:81.55ms +[2025-07-06 09:58:37] [Rank 0] step:6261/10000 train_time:510443ms step_avg:81.53ms +[2025-07-06 09:58:37] [Rank 0] step:6261/10000 train_time:510443ms step_avg:81.53ms +[2025-07-06 09:58:39] [Rank 0] step:6281/10000 train_time:511936ms step_avg:81.51ms +[2025-07-06 09:58:39] [Rank 0] step:6281/10000 train_time:511936ms step_avg:81.51ms +[2025-07-06 09:58:41] [Rank 0] step:6301/10000 train_time:514093ms step_avg:81.59ms +[2025-07-06 09:58:41] [Rank 0] step:6301/10000 train_time:514093ms step_avg:81.59ms +[2025-07-06 09:58:42] [Rank 0] step:6321/10000 train_time:515566ms step_avg:81.56ms +[2025-07-06 09:58:42] [Rank 0] step:6321/10000 train_time:515566ms step_avg:81.56ms +[2025-07-06 09:58:44] [Rank 0] step:6341/10000 train_time:517061ms step_avg:81.54ms +[2025-07-06 09:58:44] [Rank 0] step:6341/10000 train_time:517061ms step_avg:81.54ms +[2025-07-06 09:58:45] [Rank 0] step:6361/10000 train_time:518554ms step_avg:81.52ms +[2025-07-06 09:58:45] [Rank 0] step:6361/10000 train_time:518554ms step_avg:81.52ms +[2025-07-06 09:58:47] [Rank 0] step:6381/10000 train_time:520047ms step_avg:81.50ms +[2025-07-06 09:58:47] [Rank 0] step:6381/10000 train_time:520047ms step_avg:81.50ms +[2025-07-06 09:58:49] [Rank 0] step:6401/10000 train_time:522210ms step_avg:81.58ms +[2025-07-06 09:58:49] [Rank 0] step:6401/10000 train_time:522210ms step_avg:81.58ms +[2025-07-06 09:58:50] [Rank 0] step:6421/10000 train_time:523704ms step_avg:81.56ms +[2025-07-06 09:58:50] [Rank 0] step:6421/10000 train_time:523704ms step_avg:81.56ms +[2025-07-06 09:58:52] [Rank 0] step:6441/10000 train_time:525199ms step_avg:81.54ms +[2025-07-06 09:58:52] [Rank 0] step:6441/10000 train_time:525199ms step_avg:81.54ms +[2025-07-06 09:58:53] [Rank 0] step:6461/10000 train_time:526695ms step_avg:81.52ms +[2025-07-06 09:58:53] [Rank 0] step:6461/10000 train_time:526695ms step_avg:81.52ms +[2025-07-06 09:58:56] [Rank 0] step:6481/10000 train_time:528241ms step_avg:81.51ms +[2025-07-06 09:58:56] [Rank 0] step:6481/10000 train_time:528241ms step_avg:81.51ms +[2025-07-06 09:58:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 09:58:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 09:58:58] [Rank 0] PRINT: step:6500/10000 train_loss:0.8756 val_loss:0.8738 train_time:530353ms step_avg:81.59ms +[2025-07-06 09:58:58] [Rank 0] PRINT: step:6500/10000 train_loss:0.8756 val_loss:0.8738 train_time:530353ms step_avg:81.59ms +[2025-07-06 09:58:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 09:58:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 09:58:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 09:58:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 09:58:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 09:58:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 10:04:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 10:04:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 10:04:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 10:04:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 10:04:23] [Rank 0] Total Loss: 5.6365 +[2025-07-06 10:04:23] [Rank 0] Total Loss: 5.6365 +[2025-07-06 10:04:23] [Rank 0] Total FTA: 0.9224 +[2025-07-06 10:04:23] [Rank 0] Total FTA: 0.9224 +[2025-07-06 10:04:23] [Rank 0] Group 0 Loss: 5.9842 +[2025-07-06 10:04:23] [Rank 0] Group 0 Loss: 5.9842 +[2025-07-06 10:04:23] [Rank 0] Group 1 Loss: 5.4055 +[2025-07-06 10:04:23] [Rank 0] Group 1 Loss: 5.4055 +[2025-07-06 10:04:23] [Rank 0] Group 2 Loss: 5.3739 +[2025-07-06 10:04:23] [Rank 0] Group 2 Loss: 5.3739 +[2025-07-06 10:04:23] [Rank 0] Group 3 Loss: 5.6747 +[2025-07-06 10:04:23] [Rank 0] Group 3 Loss: 5.6747 +[2025-07-06 10:04:23] [Rank 0] Group 4 Loss: 5.6646 +[2025-07-06 10:04:23] [Rank 0] Group 4 Loss: 5.6646 +[2025-07-06 10:04:23] [Rank 0] Group 5 Loss: 5.6120 +[2025-07-06 10:04:23] [Rank 0] Group 5 Loss: 5.6120 +[2025-07-06 10:04:23] [Rank 0] Group 6 Loss: 5.5658 +[2025-07-06 10:04:23] [Rank 0] Group 6 Loss: 5.5658 +[2025-07-06 10:04:23] [Rank 0] Group 7 Loss: 5.6279 +[2025-07-06 10:04:23] [Rank 0] Group 7 Loss: 5.6279 +[2025-07-06 10:04:23] [Rank 0] Group 8 Loss: 5.5927 +[2025-07-06 10:04:23] [Rank 0] Group 8 Loss: 5.5927 +[2025-07-06 10:04:23] [Rank 0] Group 9 Loss: 5.5856 +[2025-07-06 10:04:23] [Rank 0] Group 9 Loss: 5.5856 +[2025-07-06 10:04:23] [Rank 0] Group 10 Loss: 5.5766 +[2025-07-06 10:04:23] [Rank 0] Group 10 Loss: 5.5766 +[2025-07-06 10:04:23] [Rank 0] Group 11 Loss: 5.6338 +[2025-07-06 10:04:23] [Rank 0] Group 11 Loss: 5.6338 +[2025-07-06 10:04:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 10:04:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 10:04:23] [Rank 0] Group 1 FTA: 0.8203 +[2025-07-06 10:04:23] [Rank 0] Group 1 FTA: 0.8203 +[2025-07-06 10:04:23] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 10:04:23] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 10:04:23] [Rank 0] Group 3 FTA: 0.9557 +[2025-07-06 10:04:23] [Rank 0] Group 3 FTA: 0.9557 +[2025-07-06 10:04:23] [Rank 0] Group 4 FTA: 0.8646 +[2025-07-06 10:04:23] [Rank 0] Group 4 FTA: 0.8646 +[2025-07-06 10:04:23] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-06 10:04:23] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-06 10:04:23] [Rank 0] Group 6 FTA: 0.9245 +[2025-07-06 10:04:23] [Rank 0] Group 6 FTA: 0.9245 +[2025-07-06 10:04:23] [Rank 0] Group 7 FTA: 0.8958 +[2025-07-06 10:04:23] [Rank 0] Group 7 FTA: 0.8958 +[2025-07-06 10:04:23] [Rank 0] Group 8 FTA: 0.9062 +[2025-07-06 10:04:23] [Rank 0] Group 8 FTA: 0.9062 +[2025-07-06 10:04:23] [Rank 0] Group 9 FTA: 0.9219 +[2025-07-06 10:04:23] [Rank 0] Group 9 FTA: 0.9219 +[2025-07-06 10:04:23] [Rank 0] Group 10 FTA: 0.8750 +[2025-07-06 10:04:23] [Rank 0] Group 10 FTA: 0.8750 +[2025-07-06 10:04:23] [Rank 0] Group 11 FTA: 0.9150 +[2025-07-06 10:04:23] [Rank 0] Group 11 FTA: 0.9150 +[2025-07-06 10:04:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 10:04:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 10:04:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 10:04:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 10:04:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 10:04:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 10:04:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 10:04:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 10:04:25] [Rank 0] step:6501/10000 train_time:530374ms step_avg:81.58ms +[2025-07-06 10:04:25] [Rank 0] step:6501/10000 train_time:530374ms step_avg:81.58ms +[2025-07-06 10:04:26] [Rank 0] step:6521/10000 train_time:531885ms step_avg:81.56ms +[2025-07-06 10:04:26] [Rank 0] step:6521/10000 train_time:531885ms step_avg:81.56ms +[2025-07-06 10:04:28] [Rank 0] step:6541/10000 train_time:533375ms step_avg:81.54ms +[2025-07-06 10:04:28] [Rank 0] step:6541/10000 train_time:533375ms step_avg:81.54ms +[2025-07-06 10:04:29] [Rank 0] step:6561/10000 train_time:534864ms step_avg:81.52ms +[2025-07-06 10:04:29] [Rank 0] step:6561/10000 train_time:534864ms step_avg:81.52ms +[2025-07-06 10:04:31] [Rank 0] step:6581/10000 train_time:537015ms step_avg:81.60ms +[2025-07-06 10:04:31] [Rank 0] step:6581/10000 train_time:537015ms step_avg:81.60ms +[2025-07-06 10:04:33] [Rank 0] step:6601/10000 train_time:538504ms step_avg:81.58ms +[2025-07-06 10:04:33] [Rank 0] step:6601/10000 train_time:538504ms step_avg:81.58ms +[2025-07-06 10:04:34] [Rank 0] step:6621/10000 train_time:539997ms step_avg:81.56ms +[2025-07-06 10:04:34] [Rank 0] step:6621/10000 train_time:539997ms step_avg:81.56ms +[2025-07-06 10:04:36] [Rank 0] step:6641/10000 train_time:541488ms step_avg:81.54ms +[2025-07-06 10:04:36] [Rank 0] step:6641/10000 train_time:541488ms step_avg:81.54ms +[2025-07-06 10:04:38] [Rank 0] step:6661/10000 train_time:542979ms step_avg:81.52ms +[2025-07-06 10:04:38] [Rank 0] step:6661/10000 train_time:542979ms step_avg:81.52ms +[2025-07-06 10:04:39] [Rank 0] step:6681/10000 train_time:545124ms step_avg:81.59ms +[2025-07-06 10:04:39] [Rank 0] step:6681/10000 train_time:545124ms step_avg:81.59ms +[2025-07-06 10:04:41] [Rank 0] step:6701/10000 train_time:546618ms step_avg:81.57ms +[2025-07-06 10:04:41] [Rank 0] step:6701/10000 train_time:546618ms step_avg:81.57ms +[2025-07-06 10:04:42] [Rank 0] step:6721/10000 train_time:548110ms step_avg:81.55ms +[2025-07-06 10:04:42] [Rank 0] step:6721/10000 train_time:548110ms step_avg:81.55ms +[2025-07-06 10:04:44] [Rank 0] step:6741/10000 train_time:549604ms step_avg:81.53ms +[2025-07-06 10:04:44] [Rank 0] step:6741/10000 train_time:549604ms step_avg:81.53ms +[2025-07-06 10:04:46] [Rank 0] step:6761/10000 train_time:551738ms step_avg:81.61ms +[2025-07-06 10:04:46] [Rank 0] step:6761/10000 train_time:551738ms step_avg:81.61ms +[2025-07-06 10:04:48] [Rank 0] step:6781/10000 train_time:553229ms step_avg:81.59ms +[2025-07-06 10:04:48] [Rank 0] step:6781/10000 train_time:553229ms step_avg:81.59ms +[2025-07-06 10:04:49] [Rank 0] step:6801/10000 train_time:554724ms step_avg:81.57ms +[2025-07-06 10:04:49] [Rank 0] step:6801/10000 train_time:554724ms step_avg:81.57ms +[2025-07-06 10:04:51] [Rank 0] step:6821/10000 train_time:556218ms step_avg:81.54ms +[2025-07-06 10:04:51] [Rank 0] step:6821/10000 train_time:556218ms step_avg:81.54ms +[2025-07-06 10:04:53] [Rank 0] step:6841/10000 train_time:557766ms step_avg:81.53ms +[2025-07-06 10:04:53] [Rank 0] step:6841/10000 train_time:557766ms step_avg:81.53ms +[2025-07-06 10:04:54] [Rank 0] step:6861/10000 train_time:560078ms step_avg:81.63ms +[2025-07-06 10:04:54] [Rank 0] step:6861/10000 train_time:560078ms step_avg:81.63ms +[2025-07-06 10:04:56] [Rank 0] step:6881/10000 train_time:561572ms step_avg:81.61ms +[2025-07-06 10:04:56] [Rank 0] step:6881/10000 train_time:561572ms step_avg:81.61ms +[2025-07-06 10:04:57] [Rank 0] step:6901/10000 train_time:563069ms step_avg:81.59ms +[2025-07-06 10:04:57] [Rank 0] step:6901/10000 train_time:563069ms step_avg:81.59ms +[2025-07-06 10:04:59] [Rank 0] step:6921/10000 train_time:564566ms step_avg:81.57ms +[2025-07-06 10:04:59] [Rank 0] step:6921/10000 train_time:564566ms step_avg:81.57ms +[2025-07-06 10:05:01] [Rank 0] step:6941/10000 train_time:566715ms step_avg:81.65ms +[2025-07-06 10:05:01] [Rank 0] step:6941/10000 train_time:566715ms step_avg:81.65ms +[2025-07-06 10:05:03] [Rank 0] step:6961/10000 train_time:568211ms step_avg:81.63ms +[2025-07-06 10:05:03] [Rank 0] step:6961/10000 train_time:568211ms step_avg:81.63ms +[2025-07-06 10:05:04] [Rank 0] step:6981/10000 train_time:569707ms step_avg:81.61ms +[2025-07-06 10:05:04] [Rank 0] step:6981/10000 train_time:569707ms step_avg:81.61ms +[2025-07-06 10:05:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 10:05:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 10:05:06] [Rank 0] PRINT: step:7000/10000 train_loss:0.8727 val_loss:0.8720 train_time:571203ms step_avg:81.60ms +[2025-07-06 10:05:06] [Rank 0] PRINT: step:7000/10000 train_loss:0.8727 val_loss:0.8720 train_time:571203ms step_avg:81.60ms +[2025-07-06 10:05:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 10:05:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 10:05:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 10:05:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 10:05:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 10:05:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 10:10:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 10:10:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 10:10:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 10:10:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 10:10:31] [Rank 0] Total Loss: 5.6463 +[2025-07-06 10:10:31] [Rank 0] Total Loss: 5.6463 +[2025-07-06 10:10:31] [Rank 0] Total FTA: 0.8871 +[2025-07-06 10:10:31] [Rank 0] Total FTA: 0.8871 +[2025-07-06 10:10:31] [Rank 0] Group 0 Loss: 6.0289 +[2025-07-06 10:10:31] [Rank 0] Group 0 Loss: 6.0289 +[2025-07-06 10:10:31] [Rank 0] Group 1 Loss: 5.3219 +[2025-07-06 10:10:31] [Rank 0] Group 1 Loss: 5.3219 +[2025-07-06 10:10:31] [Rank 0] Group 2 Loss: 5.7279 +[2025-07-06 10:10:31] [Rank 0] Group 2 Loss: 5.7279 +[2025-07-06 10:10:31] [Rank 0] Group 3 Loss: 5.5824 +[2025-07-06 10:10:31] [Rank 0] Group 3 Loss: 5.5824 +[2025-07-06 10:10:31] [Rank 0] Group 4 Loss: 5.6250 +[2025-07-06 10:10:31] [Rank 0] Group 4 Loss: 5.6250 +[2025-07-06 10:10:31] [Rank 0] Group 5 Loss: 5.5562 +[2025-07-06 10:10:31] [Rank 0] Group 5 Loss: 5.5562 +[2025-07-06 10:10:31] [Rank 0] Group 6 Loss: 5.4479 +[2025-07-06 10:10:31] [Rank 0] Group 6 Loss: 5.4479 +[2025-07-06 10:10:31] [Rank 0] Group 7 Loss: 5.6334 +[2025-07-06 10:10:31] [Rank 0] Group 7 Loss: 5.6334 +[2025-07-06 10:10:31] [Rank 0] Group 8 Loss: 5.5962 +[2025-07-06 10:10:31] [Rank 0] Group 8 Loss: 5.5962 +[2025-07-06 10:10:31] [Rank 0] Group 9 Loss: 5.6204 +[2025-07-06 10:10:31] [Rank 0] Group 9 Loss: 5.6204 +[2025-07-06 10:10:31] [Rank 0] Group 10 Loss: 5.6224 +[2025-07-06 10:10:31] [Rank 0] Group 10 Loss: 5.6224 +[2025-07-06 10:10:31] [Rank 0] Group 11 Loss: 5.6319 +[2025-07-06 10:10:31] [Rank 0] Group 11 Loss: 5.6319 +[2025-07-06 10:10:31] [Rank 0] Group 0 FTA: 0.8427 +[2025-07-06 10:10:31] [Rank 0] Group 0 FTA: 0.8427 +[2025-07-06 10:10:31] [Rank 0] Group 1 FTA: 0.8255 +[2025-07-06 10:10:31] [Rank 0] Group 1 FTA: 0.8255 +[2025-07-06 10:10:31] [Rank 0] Group 2 FTA: 0.9271 +[2025-07-06 10:10:31] [Rank 0] Group 2 FTA: 0.9271 +[2025-07-06 10:10:31] [Rank 0] Group 3 FTA: 0.9635 +[2025-07-06 10:10:31] [Rank 0] Group 3 FTA: 0.9635 +[2025-07-06 10:10:31] [Rank 0] Group 4 FTA: 0.7943 +[2025-07-06 10:10:31] [Rank 0] Group 4 FTA: 0.7943 +[2025-07-06 10:10:31] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-06 10:10:31] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-06 10:10:31] [Rank 0] Group 6 FTA: 0.8750 +[2025-07-06 10:10:31] [Rank 0] Group 6 FTA: 0.8750 +[2025-07-06 10:10:31] [Rank 0] Group 7 FTA: 0.9245 +[2025-07-06 10:10:31] [Rank 0] Group 7 FTA: 0.9245 +[2025-07-06 10:10:31] [Rank 0] Group 8 FTA: 0.9062 +[2025-07-06 10:10:31] [Rank 0] Group 8 FTA: 0.9062 +[2025-07-06 10:10:31] [Rank 0] Group 9 FTA: 0.8750 +[2025-07-06 10:10:31] [Rank 0] Group 9 FTA: 0.8750 +[2025-07-06 10:10:31] [Rank 0] Group 10 FTA: 0.8789 +[2025-07-06 10:10:31] [Rank 0] Group 10 FTA: 0.8789 +[2025-07-06 10:10:31] [Rank 0] Group 11 FTA: 0.9053 +[2025-07-06 10:10:31] [Rank 0] Group 11 FTA: 0.9053 +[2025-07-06 10:10:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 10:10:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 10:10:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 10:10:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 10:10:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 10:10:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 10:10:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 10:10:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 10:10:33] [Rank 0] step:7001/10000 train_time:571227ms step_avg:81.59ms +[2025-07-06 10:10:33] [Rank 0] step:7001/10000 train_time:571227ms step_avg:81.59ms +[2025-07-06 10:10:35] [Rank 0] step:7021/10000 train_time:572981ms step_avg:81.61ms +[2025-07-06 10:10:35] [Rank 0] step:7021/10000 train_time:572981ms step_avg:81.61ms +[2025-07-06 10:10:36] [Rank 0] step:7041/10000 train_time:574859ms step_avg:81.64ms +[2025-07-06 10:10:36] [Rank 0] step:7041/10000 train_time:574859ms step_avg:81.64ms +[2025-07-06 10:10:38] [Rank 0] step:7061/10000 train_time:576348ms step_avg:81.62ms +[2025-07-06 10:10:38] [Rank 0] step:7061/10000 train_time:576348ms step_avg:81.62ms +[2025-07-06 10:10:39] [Rank 0] step:7081/10000 train_time:577838ms step_avg:81.60ms +[2025-07-06 10:10:39] [Rank 0] step:7081/10000 train_time:577838ms step_avg:81.60ms +[2025-07-06 10:10:41] [Rank 0] step:7101/10000 train_time:579328ms step_avg:81.58ms +[2025-07-06 10:10:41] [Rank 0] step:7101/10000 train_time:579328ms step_avg:81.58ms +[2025-07-06 10:10:43] [Rank 0] step:7121/10000 train_time:581474ms step_avg:81.66ms +[2025-07-06 10:10:43] [Rank 0] step:7121/10000 train_time:581474ms step_avg:81.66ms +[2025-07-06 10:10:44] [Rank 0] step:7141/10000 train_time:582965ms step_avg:81.64ms +[2025-07-06 10:10:44] [Rank 0] step:7141/10000 train_time:582965ms step_avg:81.64ms +[2025-07-06 10:10:46] [Rank 0] step:7161/10000 train_time:584457ms step_avg:81.62ms +[2025-07-06 10:10:46] [Rank 0] step:7161/10000 train_time:584457ms step_avg:81.62ms +[2025-07-06 10:10:47] [Rank 0] step:7181/10000 train_time:585949ms step_avg:81.60ms +[2025-07-06 10:10:47] [Rank 0] step:7181/10000 train_time:585949ms step_avg:81.60ms +[2025-07-06 10:10:50] [Rank 0] step:7201/10000 train_time:587697ms step_avg:81.61ms +[2025-07-06 10:10:50] [Rank 0] step:7201/10000 train_time:587697ms step_avg:81.61ms +[2025-07-06 10:10:51] [Rank 0] step:7221/10000 train_time:589583ms step_avg:81.65ms +[2025-07-06 10:10:51] [Rank 0] step:7221/10000 train_time:589583ms step_avg:81.65ms +[2025-07-06 10:10:53] [Rank 0] step:7241/10000 train_time:591075ms step_avg:81.63ms +[2025-07-06 10:10:53] [Rank 0] step:7241/10000 train_time:591075ms step_avg:81.63ms +[2025-07-06 10:10:54] [Rank 0] step:7261/10000 train_time:592568ms step_avg:81.61ms +[2025-07-06 10:10:54] [Rank 0] step:7261/10000 train_time:592568ms step_avg:81.61ms +[2025-07-06 10:10:56] [Rank 0] step:7281/10000 train_time:594062ms step_avg:81.59ms +[2025-07-06 10:10:56] [Rank 0] step:7281/10000 train_time:594062ms step_avg:81.59ms +[2025-07-06 10:10:58] [Rank 0] step:7301/10000 train_time:596197ms step_avg:81.66ms +[2025-07-06 10:10:58] [Rank 0] step:7301/10000 train_time:596197ms step_avg:81.66ms +[2025-07-06 10:10:59] [Rank 0] step:7321/10000 train_time:597692ms step_avg:81.64ms +[2025-07-06 10:10:59] [Rank 0] step:7321/10000 train_time:597692ms step_avg:81.64ms +[2025-07-06 10:11:01] [Rank 0] step:7341/10000 train_time:599190ms step_avg:81.62ms +[2025-07-06 10:11:01] [Rank 0] step:7341/10000 train_time:599190ms step_avg:81.62ms +[2025-07-06 10:11:02] [Rank 0] step:7361/10000 train_time:600683ms step_avg:81.60ms +[2025-07-06 10:11:02] [Rank 0] step:7361/10000 train_time:600683ms step_avg:81.60ms +[2025-07-06 10:11:04] [Rank 0] step:7381/10000 train_time:602437ms step_avg:81.62ms +[2025-07-06 10:11:04] [Rank 0] step:7381/10000 train_time:602437ms step_avg:81.62ms +[2025-07-06 10:11:06] [Rank 0] step:7401/10000 train_time:604340ms step_avg:81.66ms +[2025-07-06 10:11:06] [Rank 0] step:7401/10000 train_time:604340ms step_avg:81.66ms +[2025-07-06 10:11:07] [Rank 0] step:7421/10000 train_time:605835ms step_avg:81.64ms +[2025-07-06 10:11:07] [Rank 0] step:7421/10000 train_time:605835ms step_avg:81.64ms +[2025-07-06 10:11:09] [Rank 0] step:7441/10000 train_time:607332ms step_avg:81.62ms +[2025-07-06 10:11:09] [Rank 0] step:7441/10000 train_time:607332ms step_avg:81.62ms +[2025-07-06 10:11:10] [Rank 0] step:7461/10000 train_time:608828ms step_avg:81.60ms +[2025-07-06 10:11:10] [Rank 0] step:7461/10000 train_time:608828ms step_avg:81.60ms +[2025-07-06 10:11:12] [Rank 0] step:7481/10000 train_time:610983ms step_avg:81.67ms +[2025-07-06 10:11:12] [Rank 0] step:7481/10000 train_time:610983ms step_avg:81.67ms +[2025-07-06 10:11:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 10:11:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 10:11:15] [Rank 0] PRINT: step:7500/10000 train_loss:0.8705 val_loss:0.8707 train_time:612731ms step_avg:81.70ms +[2025-07-06 10:11:15] [Rank 0] PRINT: step:7500/10000 train_loss:0.8705 val_loss:0.8707 train_time:612731ms step_avg:81.70ms +[2025-07-06 10:11:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 10:11:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 10:11:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 10:11:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 10:11:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 10:11:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 10:16:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 10:16:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 10:16:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 10:16:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 10:16:44] [Rank 0] Total Loss: 5.6668 +[2025-07-06 10:16:44] [Rank 0] Total Loss: 5.6668 +[2025-07-06 10:16:44] [Rank 0] Total FTA: 0.9249 +[2025-07-06 10:16:44] [Rank 0] Total FTA: 0.9249 +[2025-07-06 10:16:44] [Rank 0] Group 0 Loss: 6.1035 +[2025-07-06 10:16:44] [Rank 0] Group 0 Loss: 6.1035 +[2025-07-06 10:16:44] [Rank 0] Group 1 Loss: 5.4847 +[2025-07-06 10:16:44] [Rank 0] Group 1 Loss: 5.4847 +[2025-07-06 10:16:44] [Rank 0] Group 2 Loss: 5.5087 +[2025-07-06 10:16:44] [Rank 0] Group 2 Loss: 5.5087 +[2025-07-06 10:16:44] [Rank 0] Group 3 Loss: 5.6219 +[2025-07-06 10:16:44] [Rank 0] Group 3 Loss: 5.6219 +[2025-07-06 10:16:44] [Rank 0] Group 4 Loss: 5.7060 +[2025-07-06 10:16:44] [Rank 0] Group 4 Loss: 5.7060 +[2025-07-06 10:16:44] [Rank 0] Group 5 Loss: 5.5792 +[2025-07-06 10:16:44] [Rank 0] Group 5 Loss: 5.5792 +[2025-07-06 10:16:44] [Rank 0] Group 6 Loss: 5.5063 +[2025-07-06 10:16:44] [Rank 0] Group 6 Loss: 5.5063 +[2025-07-06 10:16:44] [Rank 0] Group 7 Loss: 5.6077 +[2025-07-06 10:16:44] [Rank 0] Group 7 Loss: 5.6077 +[2025-07-06 10:16:44] [Rank 0] Group 8 Loss: 5.6005 +[2025-07-06 10:16:44] [Rank 0] Group 8 Loss: 5.6005 +[2025-07-06 10:16:44] [Rank 0] Group 9 Loss: 5.6353 +[2025-07-06 10:16:44] [Rank 0] Group 9 Loss: 5.6353 +[2025-07-06 10:16:44] [Rank 0] Group 10 Loss: 5.6465 +[2025-07-06 10:16:44] [Rank 0] Group 10 Loss: 5.6465 +[2025-07-06 10:16:44] [Rank 0] Group 11 Loss: 5.6264 +[2025-07-06 10:16:44] [Rank 0] Group 11 Loss: 5.6264 +[2025-07-06 10:16:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 10:16:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 10:16:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 10:16:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 10:16:44] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 10:16:44] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 10:16:44] [Rank 0] Group 3 FTA: 0.8802 +[2025-07-06 10:16:44] [Rank 0] Group 3 FTA: 0.8802 +[2025-07-06 10:16:44] [Rank 0] Group 4 FTA: 0.8490 +[2025-07-06 10:16:44] [Rank 0] Group 4 FTA: 0.8490 +[2025-07-06 10:16:44] [Rank 0] Group 5 FTA: 0.9219 +[2025-07-06 10:16:44] [Rank 0] Group 5 FTA: 0.9219 +[2025-07-06 10:16:44] [Rank 0] Group 6 FTA: 0.9062 +[2025-07-06 10:16:44] [Rank 0] Group 6 FTA: 0.9062 +[2025-07-06 10:16:44] [Rank 0] Group 7 FTA: 0.8984 +[2025-07-06 10:16:44] [Rank 0] Group 7 FTA: 0.8984 +[2025-07-06 10:16:44] [Rank 0] Group 8 FTA: 0.9010 +[2025-07-06 10:16:44] [Rank 0] Group 8 FTA: 0.9010 +[2025-07-06 10:16:44] [Rank 0] Group 9 FTA: 0.9141 +[2025-07-06 10:16:44] [Rank 0] Group 9 FTA: 0.9141 +[2025-07-06 10:16:44] [Rank 0] Group 10 FTA: 0.9141 +[2025-07-06 10:16:44] [Rank 0] Group 10 FTA: 0.9141 +[2025-07-06 10:16:44] [Rank 0] Group 11 FTA: 0.8926 +[2025-07-06 10:16:44] [Rank 0] Group 11 FTA: 0.8926 +[2025-07-06 10:16:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 10:16:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 10:16:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 10:16:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 10:16:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 10:16:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 10:16:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 10:16:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 10:16:46] [Rank 0] step:7501/10000 train_time:612755ms step_avg:81.69ms +[2025-07-06 10:16:46] [Rank 0] step:7501/10000 train_time:612755ms step_avg:81.69ms +[2025-07-06 10:16:47] [Rank 0] step:7521/10000 train_time:614246ms step_avg:81.67ms +[2025-07-06 10:16:47] [Rank 0] step:7521/10000 train_time:614246ms step_avg:81.67ms +[2025-07-06 10:16:49] [Rank 0] step:7541/10000 train_time:615733ms step_avg:81.65ms +[2025-07-06 10:16:49] [Rank 0] step:7541/10000 train_time:615733ms step_avg:81.65ms +[2025-07-06 10:16:51] [Rank 0] step:7561/10000 train_time:617279ms step_avg:81.64ms +[2025-07-06 10:16:51] [Rank 0] step:7561/10000 train_time:617279ms step_avg:81.64ms +[2025-07-06 10:16:53] [Rank 0] step:7581/10000 train_time:619464ms step_avg:81.71ms +[2025-07-06 10:16:53] [Rank 0] step:7581/10000 train_time:619464ms step_avg:81.71ms +[2025-07-06 10:16:54] [Rank 0] step:7601/10000 train_time:620952ms step_avg:81.69ms +[2025-07-06 10:16:54] [Rank 0] step:7601/10000 train_time:620952ms step_avg:81.69ms +[2025-07-06 10:16:55] [Rank 0] step:7621/10000 train_time:622444ms step_avg:81.67ms +[2025-07-06 10:16:55] [Rank 0] step:7621/10000 train_time:622444ms step_avg:81.67ms +[2025-07-06 10:16:57] [Rank 0] step:7641/10000 train_time:623936ms step_avg:81.66ms +[2025-07-06 10:16:57] [Rank 0] step:7641/10000 train_time:623936ms step_avg:81.66ms +[2025-07-06 10:16:59] [Rank 0] step:7661/10000 train_time:625665ms step_avg:81.67ms +[2025-07-06 10:16:59] [Rank 0] step:7661/10000 train_time:625665ms step_avg:81.67ms +[2025-07-06 10:17:00] [Rank 0] step:7681/10000 train_time:627157ms step_avg:81.65ms +[2025-07-06 10:17:00] [Rank 0] step:7681/10000 train_time:627157ms step_avg:81.65ms +[2025-07-06 10:17:02] [Rank 0] step:7701/10000 train_time:628651ms step_avg:81.63ms +[2025-07-06 10:17:02] [Rank 0] step:7701/10000 train_time:628651ms step_avg:81.63ms +[2025-07-06 10:17:03] [Rank 0] step:7721/10000 train_time:630145ms step_avg:81.61ms +[2025-07-06 10:17:03] [Rank 0] step:7721/10000 train_time:630145ms step_avg:81.61ms +[2025-07-06 10:17:05] [Rank 0] step:7741/10000 train_time:631894ms step_avg:81.63ms +[2025-07-06 10:17:05] [Rank 0] step:7741/10000 train_time:631894ms step_avg:81.63ms +[2025-07-06 10:17:07] [Rank 0] step:7761/10000 train_time:633796ms step_avg:81.66ms +[2025-07-06 10:17:07] [Rank 0] step:7761/10000 train_time:633796ms step_avg:81.66ms +[2025-07-06 10:17:08] [Rank 0] step:7781/10000 train_time:635289ms step_avg:81.65ms +[2025-07-06 10:17:08] [Rank 0] step:7781/10000 train_time:635289ms step_avg:81.65ms +[2025-07-06 10:17:10] [Rank 0] step:7801/10000 train_time:636783ms step_avg:81.63ms +[2025-07-06 10:17:10] [Rank 0] step:7801/10000 train_time:636783ms step_avg:81.63ms +[2025-07-06 10:17:11] [Rank 0] step:7821/10000 train_time:638278ms step_avg:81.61ms +[2025-07-06 10:17:11] [Rank 0] step:7821/10000 train_time:638278ms step_avg:81.61ms +[2025-07-06 10:17:13] [Rank 0] step:7841/10000 train_time:640011ms step_avg:81.62ms +[2025-07-06 10:17:13] [Rank 0] step:7841/10000 train_time:640011ms step_avg:81.62ms +[2025-07-06 10:17:15] [Rank 0] step:7861/10000 train_time:641506ms step_avg:81.61ms +[2025-07-06 10:17:15] [Rank 0] step:7861/10000 train_time:641506ms step_avg:81.61ms +[2025-07-06 10:17:16] [Rank 0] step:7881/10000 train_time:643002ms step_avg:81.59ms +[2025-07-06 10:17:16] [Rank 0] step:7881/10000 train_time:643002ms step_avg:81.59ms +[2025-07-06 10:17:18] [Rank 0] step:7901/10000 train_time:644499ms step_avg:81.57ms +[2025-07-06 10:17:18] [Rank 0] step:7901/10000 train_time:644499ms step_avg:81.57ms +[2025-07-06 10:17:20] [Rank 0] step:7921/10000 train_time:646050ms step_avg:81.56ms +[2025-07-06 10:17:20] [Rank 0] step:7921/10000 train_time:646050ms step_avg:81.56ms +[2025-07-06 10:17:21] [Rank 0] step:7941/10000 train_time:648132ms step_avg:81.62ms +[2025-07-06 10:17:21] [Rank 0] step:7941/10000 train_time:648132ms step_avg:81.62ms +[2025-07-06 10:17:23] [Rank 0] step:7961/10000 train_time:649629ms step_avg:81.60ms +[2025-07-06 10:17:23] [Rank 0] step:7961/10000 train_time:649629ms step_avg:81.60ms +[2025-07-06 10:17:24] [Rank 0] step:7981/10000 train_time:651128ms step_avg:81.58ms +[2025-07-06 10:17:24] [Rank 0] step:7981/10000 train_time:651128ms step_avg:81.58ms +[2025-07-06 10:17:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 10:17:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 10:17:27] [Rank 0] PRINT: step:8000/10000 train_loss:0.8688 val_loss:0.8698 train_time:652627ms step_avg:81.58ms +[2025-07-06 10:17:27] [Rank 0] PRINT: step:8000/10000 train_loss:0.8688 val_loss:0.8698 train_time:652627ms step_avg:81.58ms +[2025-07-06 10:17:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 10:17:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 10:17:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 10:17:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 10:17:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 10:17:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 10:22:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 10:22:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 10:22:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 10:22:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 10:22:54] [Rank 0] Total Loss: 5.7181 +[2025-07-06 10:22:54] [Rank 0] Total Loss: 5.7181 +[2025-07-06 10:22:54] [Rank 0] Total FTA: 0.9288 +[2025-07-06 10:22:54] [Rank 0] Total FTA: 0.9288 +[2025-07-06 10:22:54] [Rank 0] Group 0 Loss: 6.0379 +[2025-07-06 10:22:54] [Rank 0] Group 0 Loss: 6.0379 +[2025-07-06 10:22:54] [Rank 0] Group 1 Loss: 5.6697 +[2025-07-06 10:22:54] [Rank 0] Group 1 Loss: 5.6697 +[2025-07-06 10:22:54] [Rank 0] Group 2 Loss: 5.5175 +[2025-07-06 10:22:54] [Rank 0] Group 2 Loss: 5.5175 +[2025-07-06 10:22:54] [Rank 0] Group 3 Loss: 5.6366 +[2025-07-06 10:22:54] [Rank 0] Group 3 Loss: 5.6366 +[2025-07-06 10:22:54] [Rank 0] Group 4 Loss: 5.7038 +[2025-07-06 10:22:54] [Rank 0] Group 4 Loss: 5.7038 +[2025-07-06 10:22:54] [Rank 0] Group 5 Loss: 5.7213 +[2025-07-06 10:22:54] [Rank 0] Group 5 Loss: 5.7213 +[2025-07-06 10:22:54] [Rank 0] Group 6 Loss: 5.5748 +[2025-07-06 10:22:54] [Rank 0] Group 6 Loss: 5.5748 +[2025-07-06 10:22:54] [Rank 0] Group 7 Loss: 5.6768 +[2025-07-06 10:22:54] [Rank 0] Group 7 Loss: 5.6768 +[2025-07-06 10:22:54] [Rank 0] Group 8 Loss: 5.7172 +[2025-07-06 10:22:54] [Rank 0] Group 8 Loss: 5.7172 +[2025-07-06 10:22:54] [Rank 0] Group 9 Loss: 5.6838 +[2025-07-06 10:22:54] [Rank 0] Group 9 Loss: 5.6838 +[2025-07-06 10:22:54] [Rank 0] Group 10 Loss: 5.6812 +[2025-07-06 10:22:54] [Rank 0] Group 10 Loss: 5.6812 +[2025-07-06 10:22:54] [Rank 0] Group 11 Loss: 5.7026 +[2025-07-06 10:22:54] [Rank 0] Group 11 Loss: 5.7026 +[2025-07-06 10:22:54] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 10:22:54] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 10:22:54] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 10:22:54] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 10:22:54] [Rank 0] Group 2 FTA: 0.8880 +[2025-07-06 10:22:54] [Rank 0] Group 2 FTA: 0.8880 +[2025-07-06 10:22:54] [Rank 0] Group 3 FTA: 0.9583 +[2025-07-06 10:22:54] [Rank 0] Group 3 FTA: 0.9583 +[2025-07-06 10:22:54] [Rank 0] Group 4 FTA: 0.8672 +[2025-07-06 10:22:54] [Rank 0] Group 4 FTA: 0.8672 +[2025-07-06 10:22:54] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-06 10:22:54] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-06 10:22:54] [Rank 0] Group 6 FTA: 0.9219 +[2025-07-06 10:22:54] [Rank 0] Group 6 FTA: 0.9219 +[2025-07-06 10:22:54] [Rank 0] Group 7 FTA: 0.9219 +[2025-07-06 10:22:54] [Rank 0] Group 7 FTA: 0.9219 +[2025-07-06 10:22:54] [Rank 0] Group 8 FTA: 0.8880 +[2025-07-06 10:22:54] [Rank 0] Group 8 FTA: 0.8880 +[2025-07-06 10:22:54] [Rank 0] Group 9 FTA: 0.9023 +[2025-07-06 10:22:54] [Rank 0] Group 9 FTA: 0.9023 +[2025-07-06 10:22:54] [Rank 0] Group 10 FTA: 0.9082 +[2025-07-06 10:22:54] [Rank 0] Group 10 FTA: 0.9082 +[2025-07-06 10:22:54] [Rank 0] Group 11 FTA: 0.9023 +[2025-07-06 10:22:54] [Rank 0] Group 11 FTA: 0.9023 +[2025-07-06 10:22:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 10:22:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 10:22:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 10:22:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 10:22:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 10:22:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 10:22:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 10:22:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 10:22:56] [Rank 0] step:8001/10000 train_time:652652ms step_avg:81.57ms +[2025-07-06 10:22:56] [Rank 0] step:8001/10000 train_time:652652ms step_avg:81.57ms +[2025-07-06 10:22:58] [Rank 0] step:8021/10000 train_time:654815ms step_avg:81.64ms +[2025-07-06 10:22:58] [Rank 0] step:8021/10000 train_time:654815ms step_avg:81.64ms +[2025-07-06 10:23:00] [Rank 0] step:8041/10000 train_time:656302ms step_avg:81.62ms +[2025-07-06 10:23:00] [Rank 0] step:8041/10000 train_time:656302ms step_avg:81.62ms +[2025-07-06 10:23:01] [Rank 0] step:8061/10000 train_time:657791ms step_avg:81.60ms +[2025-07-06 10:23:01] [Rank 0] step:8061/10000 train_time:657791ms step_avg:81.60ms +[2025-07-06 10:23:03] [Rank 0] step:8081/10000 train_time:659283ms step_avg:81.58ms +[2025-07-06 10:23:03] [Rank 0] step:8081/10000 train_time:659283ms step_avg:81.58ms +[2025-07-06 10:23:04] [Rank 0] step:8101/10000 train_time:660773ms step_avg:81.57ms +[2025-07-06 10:23:04] [Rank 0] step:8101/10000 train_time:660773ms step_avg:81.57ms +[2025-07-06 10:23:06] [Rank 0] step:8121/10000 train_time:662498ms step_avg:81.58ms +[2025-07-06 10:23:06] [Rank 0] step:8121/10000 train_time:662498ms step_avg:81.58ms +[2025-07-06 10:23:07] [Rank 0] step:8141/10000 train_time:663990ms step_avg:81.56ms +[2025-07-06 10:23:07] [Rank 0] step:8141/10000 train_time:663990ms step_avg:81.56ms +[2025-07-06 10:23:09] [Rank 0] step:8161/10000 train_time:665482ms step_avg:81.54ms +[2025-07-06 10:23:09] [Rank 0] step:8161/10000 train_time:665482ms step_avg:81.54ms +[2025-07-06 10:23:10] [Rank 0] step:8181/10000 train_time:666974ms step_avg:81.53ms +[2025-07-06 10:23:10] [Rank 0] step:8181/10000 train_time:666974ms step_avg:81.53ms +[2025-07-06 10:23:12] [Rank 0] step:8201/10000 train_time:669119ms step_avg:81.59ms +[2025-07-06 10:23:12] [Rank 0] step:8201/10000 train_time:669119ms step_avg:81.59ms +[2025-07-06 10:23:14] [Rank 0] step:8221/10000 train_time:670611ms step_avg:81.57ms +[2025-07-06 10:23:14] [Rank 0] step:8221/10000 train_time:670611ms step_avg:81.57ms +[2025-07-06 10:23:15] [Rank 0] step:8241/10000 train_time:672105ms step_avg:81.56ms +[2025-07-06 10:23:15] [Rank 0] step:8241/10000 train_time:672105ms step_avg:81.56ms +[2025-07-06 10:23:17] [Rank 0] step:8261/10000 train_time:673598ms step_avg:81.54ms +[2025-07-06 10:23:17] [Rank 0] step:8261/10000 train_time:673598ms step_avg:81.54ms +[2025-07-06 10:23:19] [Rank 0] step:8281/10000 train_time:675348ms step_avg:81.55ms +[2025-07-06 10:23:19] [Rank 0] step:8281/10000 train_time:675348ms step_avg:81.55ms +[2025-07-06 10:23:20] [Rank 0] step:8301/10000 train_time:677230ms step_avg:81.58ms +[2025-07-06 10:23:20] [Rank 0] step:8301/10000 train_time:677230ms step_avg:81.58ms +[2025-07-06 10:23:22] [Rank 0] step:8321/10000 train_time:678721ms step_avg:81.57ms +[2025-07-06 10:23:22] [Rank 0] step:8321/10000 train_time:678721ms step_avg:81.57ms +[2025-07-06 10:23:23] [Rank 0] step:8341/10000 train_time:680217ms step_avg:81.55ms +[2025-07-06 10:23:23] [Rank 0] step:8341/10000 train_time:680217ms step_avg:81.55ms +[2025-07-06 10:23:25] [Rank 0] step:8361/10000 train_time:681712ms step_avg:81.53ms +[2025-07-06 10:23:25] [Rank 0] step:8361/10000 train_time:681712ms step_avg:81.53ms +[2025-07-06 10:23:27] [Rank 0] step:8381/10000 train_time:683873ms step_avg:81.60ms +[2025-07-06 10:23:27] [Rank 0] step:8381/10000 train_time:683873ms step_avg:81.60ms +[2025-07-06 10:23:29] [Rank 0] step:8401/10000 train_time:685369ms step_avg:81.58ms +[2025-07-06 10:23:29] [Rank 0] step:8401/10000 train_time:685369ms step_avg:81.58ms +[2025-07-06 10:23:30] [Rank 0] step:8421/10000 train_time:686865ms step_avg:81.57ms +[2025-07-06 10:23:30] [Rank 0] step:8421/10000 train_time:686865ms step_avg:81.57ms +[2025-07-06 10:23:32] [Rank 0] step:8441/10000 train_time:688360ms step_avg:81.55ms +[2025-07-06 10:23:32] [Rank 0] step:8441/10000 train_time:688360ms step_avg:81.55ms +[2025-07-06 10:23:33] [Rank 0] step:8461/10000 train_time:689907ms step_avg:81.54ms +[2025-07-06 10:23:33] [Rank 0] step:8461/10000 train_time:689907ms step_avg:81.54ms +[2025-07-06 10:23:35] [Rank 0] step:8481/10000 train_time:691589ms step_avg:81.55ms +[2025-07-06 10:23:35] [Rank 0] step:8481/10000 train_time:691589ms step_avg:81.55ms +[2025-07-06 10:23:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 10:23:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 10:23:37] [Rank 0] PRINT: step:8500/10000 train_loss:0.8674 val_loss:0.8685 train_time:693085ms step_avg:81.54ms +[2025-07-06 10:23:37] [Rank 0] PRINT: step:8500/10000 train_loss:0.8674 val_loss:0.8685 train_time:693085ms step_avg:81.54ms +[2025-07-06 10:23:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 10:23:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 10:23:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 10:23:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 10:23:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 10:23:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 10:29:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 10:29:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 10:29:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 10:29:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 10:29:05] [Rank 0] Total Loss: 5.7746 +[2025-07-06 10:29:05] [Rank 0] Total Loss: 5.7746 +[2025-07-06 10:29:05] [Rank 0] Total FTA: 0.9283 +[2025-07-06 10:29:05] [Rank 0] Total FTA: 0.9283 +[2025-07-06 10:29:05] [Rank 0] Group 0 Loss: 6.3722 +[2025-07-06 10:29:05] [Rank 0] Group 0 Loss: 6.3722 +[2025-07-06 10:29:05] [Rank 0] Group 1 Loss: 5.6386 +[2025-07-06 10:29:05] [Rank 0] Group 1 Loss: 5.6386 +[2025-07-06 10:29:05] [Rank 0] Group 2 Loss: 5.6093 +[2025-07-06 10:29:05] [Rank 0] Group 2 Loss: 5.6093 +[2025-07-06 10:29:05] [Rank 0] Group 3 Loss: 5.7010 +[2025-07-06 10:29:05] [Rank 0] Group 3 Loss: 5.7010 +[2025-07-06 10:29:05] [Rank 0] Group 4 Loss: 5.7877 +[2025-07-06 10:29:05] [Rank 0] Group 4 Loss: 5.7877 +[2025-07-06 10:29:05] [Rank 0] Group 5 Loss: 5.6990 +[2025-07-06 10:29:05] [Rank 0] Group 5 Loss: 5.6990 +[2025-07-06 10:29:05] [Rank 0] Group 6 Loss: 5.5495 +[2025-07-06 10:29:05] [Rank 0] Group 6 Loss: 5.5495 +[2025-07-06 10:29:05] [Rank 0] Group 7 Loss: 5.7216 +[2025-07-06 10:29:05] [Rank 0] Group 7 Loss: 5.7216 +[2025-07-06 10:29:06] [Rank 0] Group 8 Loss: 5.7337 +[2025-07-06 10:29:06] [Rank 0] Group 8 Loss: 5.7337 +[2025-07-06 10:29:06] [Rank 0] Group 9 Loss: 5.6835 +[2025-07-06 10:29:06] [Rank 0] Group 9 Loss: 5.6835 +[2025-07-06 10:29:06] [Rank 0] Group 10 Loss: 5.6403 +[2025-07-06 10:29:06] [Rank 0] Group 10 Loss: 5.6403 +[2025-07-06 10:29:06] [Rank 0] Group 11 Loss: 5.6995 +[2025-07-06 10:29:06] [Rank 0] Group 11 Loss: 5.6995 +[2025-07-06 10:29:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 10:29:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 10:29:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 10:29:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 10:29:06] [Rank 0] Group 2 FTA: 0.7865 +[2025-07-06 10:29:06] [Rank 0] Group 2 FTA: 0.7865 +[2025-07-06 10:29:06] [Rank 0] Group 3 FTA: 0.9635 +[2025-07-06 10:29:06] [Rank 0] Group 3 FTA: 0.9635 +[2025-07-06 10:29:06] [Rank 0] Group 4 FTA: 0.9453 +[2025-07-06 10:29:06] [Rank 0] Group 4 FTA: 0.9453 +[2025-07-06 10:29:06] [Rank 0] Group 5 FTA: 0.9115 +[2025-07-06 10:29:06] [Rank 0] Group 5 FTA: 0.9115 +[2025-07-06 10:29:06] [Rank 0] Group 6 FTA: 0.8854 +[2025-07-06 10:29:06] [Rank 0] Group 6 FTA: 0.8854 +[2025-07-06 10:29:06] [Rank 0] Group 7 FTA: 0.9089 +[2025-07-06 10:29:06] [Rank 0] Group 7 FTA: 0.9089 +[2025-07-06 10:29:06] [Rank 0] Group 8 FTA: 0.9401 +[2025-07-06 10:29:06] [Rank 0] Group 8 FTA: 0.9401 +[2025-07-06 10:29:06] [Rank 0] Group 9 FTA: 0.9297 +[2025-07-06 10:29:06] [Rank 0] Group 9 FTA: 0.9297 +[2025-07-06 10:29:06] [Rank 0] Group 10 FTA: 0.9199 +[2025-07-06 10:29:06] [Rank 0] Group 10 FTA: 0.9199 +[2025-07-06 10:29:06] [Rank 0] Group 11 FTA: 0.9102 +[2025-07-06 10:29:06] [Rank 0] Group 11 FTA: 0.9102 +[2025-07-06 10:29:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 10:29:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 10:29:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 10:29:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 10:29:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 10:29:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 10:29:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 10:29:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 10:29:07] [Rank 0] step:8501/10000 train_time:693109ms step_avg:81.53ms +[2025-07-06 10:29:07] [Rank 0] step:8501/10000 train_time:693109ms step_avg:81.53ms +[2025-07-06 10:29:09] [Rank 0] step:8521/10000 train_time:694866ms step_avg:81.55ms +[2025-07-06 10:29:09] [Rank 0] step:8521/10000 train_time:694866ms step_avg:81.55ms +[2025-07-06 10:29:10] [Rank 0] step:8541/10000 train_time:696350ms step_avg:81.53ms +[2025-07-06 10:29:10] [Rank 0] step:8541/10000 train_time:696350ms step_avg:81.53ms +[2025-07-06 10:29:13] [Rank 0] step:8561/10000 train_time:698506ms step_avg:81.59ms +[2025-07-06 10:29:13] [Rank 0] step:8561/10000 train_time:698506ms step_avg:81.59ms +[2025-07-06 10:29:14] [Rank 0] step:8581/10000 train_time:699995ms step_avg:81.58ms +[2025-07-06 10:29:14] [Rank 0] step:8581/10000 train_time:699995ms step_avg:81.58ms +[2025-07-06 10:29:16] [Rank 0] step:8601/10000 train_time:701487ms step_avg:81.56ms +[2025-07-06 10:29:16] [Rank 0] step:8601/10000 train_time:701487ms step_avg:81.56ms +[2025-07-06 10:29:17] [Rank 0] step:8621/10000 train_time:702977ms step_avg:81.54ms +[2025-07-06 10:29:17] [Rank 0] step:8621/10000 train_time:702977ms step_avg:81.54ms +[2025-07-06 10:29:19] [Rank 0] step:8641/10000 train_time:705143ms step_avg:81.60ms +[2025-07-06 10:29:19] [Rank 0] step:8641/10000 train_time:705143ms step_avg:81.60ms +[2025-07-06 10:29:21] [Rank 0] step:8661/10000 train_time:706614ms step_avg:81.59ms +[2025-07-06 10:29:21] [Rank 0] step:8661/10000 train_time:706614ms step_avg:81.59ms +[2025-07-06 10:29:22] [Rank 0] step:8681/10000 train_time:708205ms step_avg:81.58ms +[2025-07-06 10:29:22] [Rank 0] step:8681/10000 train_time:708205ms step_avg:81.58ms +[2025-07-06 10:29:24] [Rank 0] step:8701/10000 train_time:709697ms step_avg:81.57ms +[2025-07-06 10:29:24] [Rank 0] step:8701/10000 train_time:709697ms step_avg:81.57ms +[2025-07-06 10:29:25] [Rank 0] step:8721/10000 train_time:711191ms step_avg:81.55ms +[2025-07-06 10:29:25] [Rank 0] step:8721/10000 train_time:711191ms step_avg:81.55ms +[2025-07-06 10:29:27] [Rank 0] step:8741/10000 train_time:713325ms step_avg:81.61ms +[2025-07-06 10:29:27] [Rank 0] step:8741/10000 train_time:713325ms step_avg:81.61ms +[2025-07-06 10:29:29] [Rank 0] step:8761/10000 train_time:714919ms step_avg:81.60ms +[2025-07-06 10:29:29] [Rank 0] step:8761/10000 train_time:714919ms step_avg:81.60ms +[2025-07-06 10:29:31] [Rank 0] step:8781/10000 train_time:716413ms step_avg:81.59ms +[2025-07-06 10:29:31] [Rank 0] step:8781/10000 train_time:716413ms step_avg:81.59ms +[2025-07-06 10:29:32] [Rank 0] step:8801/10000 train_time:717908ms step_avg:81.57ms +[2025-07-06 10:29:32] [Rank 0] step:8801/10000 train_time:717908ms step_avg:81.57ms +[2025-07-06 10:29:34] [Rank 0] step:8821/10000 train_time:719452ms step_avg:81.56ms +[2025-07-06 10:29:34] [Rank 0] step:8821/10000 train_time:719452ms step_avg:81.56ms +[2025-07-06 10:29:35] [Rank 0] step:8841/10000 train_time:721131ms step_avg:81.57ms +[2025-07-06 10:29:35] [Rank 0] step:8841/10000 train_time:721131ms step_avg:81.57ms +[2025-07-06 10:29:37] [Rank 0] step:8861/10000 train_time:722627ms step_avg:81.55ms +[2025-07-06 10:29:37] [Rank 0] step:8861/10000 train_time:722627ms step_avg:81.55ms +[2025-07-06 10:29:38] [Rank 0] step:8881/10000 train_time:724122ms step_avg:81.54ms +[2025-07-06 10:29:38] [Rank 0] step:8881/10000 train_time:724122ms step_avg:81.54ms +[2025-07-06 10:29:40] [Rank 0] step:8901/10000 train_time:725618ms step_avg:81.52ms +[2025-07-06 10:29:40] [Rank 0] step:8901/10000 train_time:725618ms step_avg:81.52ms +[2025-07-06 10:29:42] [Rank 0] step:8921/10000 train_time:727758ms step_avg:81.58ms +[2025-07-06 10:29:42] [Rank 0] step:8921/10000 train_time:727758ms step_avg:81.58ms +[2025-07-06 10:29:44] [Rank 0] step:8941/10000 train_time:729355ms step_avg:81.57ms +[2025-07-06 10:29:44] [Rank 0] step:8941/10000 train_time:729355ms step_avg:81.57ms +[2025-07-06 10:29:45] [Rank 0] step:8961/10000 train_time:730852ms step_avg:81.56ms +[2025-07-06 10:29:45] [Rank 0] step:8961/10000 train_time:730852ms step_avg:81.56ms +[2025-07-06 10:29:46] [Rank 0] step:8981/10000 train_time:732348ms step_avg:81.54ms +[2025-07-06 10:29:46] [Rank 0] step:8981/10000 train_time:732348ms step_avg:81.54ms +[2025-07-06 10:29:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 10:29:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 10:29:49] [Rank 0] PRINT: step:9000/10000 train_loss:0.8662 val_loss:0.8679 train_time:733844ms step_avg:81.54ms +[2025-07-06 10:29:49] [Rank 0] PRINT: step:9000/10000 train_loss:0.8662 val_loss:0.8679 train_time:733844ms step_avg:81.54ms +[2025-07-06 10:29:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 10:29:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 10:29:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 10:29:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 10:29:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 10:29:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 10:35:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 10:35:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 10:35:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 10:35:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 10:35:17] [Rank 0] Total Loss: 5.8034 +[2025-07-06 10:35:17] [Rank 0] Total Loss: 5.8034 +[2025-07-06 10:35:17] [Rank 0] Total FTA: 0.9411 +[2025-07-06 10:35:17] [Rank 0] Total FTA: 0.9411 +[2025-07-06 10:35:17] [Rank 0] Group 0 Loss: 6.2861 +[2025-07-06 10:35:17] [Rank 0] Group 0 Loss: 6.2861 +[2025-07-06 10:35:17] [Rank 0] Group 1 Loss: 5.8608 +[2025-07-06 10:35:17] [Rank 0] Group 1 Loss: 5.8608 +[2025-07-06 10:35:17] [Rank 0] Group 2 Loss: 5.6479 +[2025-07-06 10:35:17] [Rank 0] Group 2 Loss: 5.6479 +[2025-07-06 10:35:17] [Rank 0] Group 3 Loss: 5.7359 +[2025-07-06 10:35:17] [Rank 0] Group 3 Loss: 5.7359 +[2025-07-06 10:35:17] [Rank 0] Group 4 Loss: 5.8369 +[2025-07-06 10:35:17] [Rank 0] Group 4 Loss: 5.8369 +[2025-07-06 10:35:17] [Rank 0] Group 5 Loss: 5.7488 +[2025-07-06 10:35:17] [Rank 0] Group 5 Loss: 5.7488 +[2025-07-06 10:35:17] [Rank 0] Group 6 Loss: 5.5498 +[2025-07-06 10:35:17] [Rank 0] Group 6 Loss: 5.5498 +[2025-07-06 10:35:18] [Rank 0] Group 7 Loss: 5.6448 +[2025-07-06 10:35:18] [Rank 0] Group 7 Loss: 5.6448 +[2025-07-06 10:35:18] [Rank 0] Group 8 Loss: 5.7632 +[2025-07-06 10:35:18] [Rank 0] Group 8 Loss: 5.7632 +[2025-07-06 10:35:18] [Rank 0] Group 9 Loss: 5.5999 +[2025-07-06 10:35:18] [Rank 0] Group 9 Loss: 5.5999 +[2025-07-06 10:35:18] [Rank 0] Group 10 Loss: 5.7716 +[2025-07-06 10:35:18] [Rank 0] Group 10 Loss: 5.7716 +[2025-07-06 10:35:18] [Rank 0] Group 11 Loss: 5.7472 +[2025-07-06 10:35:18] [Rank 0] Group 11 Loss: 5.7472 +[2025-07-06 10:35:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 10:35:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 10:35:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 10:35:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 10:35:18] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 10:35:18] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 10:35:18] [Rank 0] Group 3 FTA: 0.9583 +[2025-07-06 10:35:18] [Rank 0] Group 3 FTA: 0.9583 +[2025-07-06 10:35:18] [Rank 0] Group 4 FTA: 0.9557 +[2025-07-06 10:35:18] [Rank 0] Group 4 FTA: 0.9557 +[2025-07-06 10:35:18] [Rank 0] Group 5 FTA: 0.9036 +[2025-07-06 10:35:18] [Rank 0] Group 5 FTA: 0.9036 +[2025-07-06 10:35:18] [Rank 0] Group 6 FTA: 0.9010 +[2025-07-06 10:35:18] [Rank 0] Group 6 FTA: 0.9010 +[2025-07-06 10:35:18] [Rank 0] Group 7 FTA: 0.9297 +[2025-07-06 10:35:18] [Rank 0] Group 7 FTA: 0.9297 +[2025-07-06 10:35:18] [Rank 0] Group 8 FTA: 0.9193 +[2025-07-06 10:35:18] [Rank 0] Group 8 FTA: 0.9193 +[2025-07-06 10:35:18] [Rank 0] Group 9 FTA: 0.9102 +[2025-07-06 10:35:18] [Rank 0] Group 9 FTA: 0.9102 +[2025-07-06 10:35:18] [Rank 0] Group 10 FTA: 0.9062 +[2025-07-06 10:35:18] [Rank 0] Group 10 FTA: 0.9062 +[2025-07-06 10:35:18] [Rank 0] Group 11 FTA: 0.9072 +[2025-07-06 10:35:18] [Rank 0] Group 11 FTA: 0.9072 +[2025-07-06 10:35:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 10:35:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 10:35:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 10:35:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 10:35:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 10:35:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 10:35:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 10:35:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 10:35:20] [Rank 0] step:9001/10000 train_time:734180ms step_avg:81.57ms +[2025-07-06 10:35:20] [Rank 0] step:9001/10000 train_time:734180ms step_avg:81.57ms +[2025-07-06 10:35:21] [Rank 0] step:9021/10000 train_time:736064ms step_avg:81.59ms +[2025-07-06 10:35:21] [Rank 0] step:9021/10000 train_time:736064ms step_avg:81.59ms +[2025-07-06 10:35:23] [Rank 0] step:9041/10000 train_time:737550ms step_avg:81.58ms +[2025-07-06 10:35:23] [Rank 0] step:9041/10000 train_time:737550ms step_avg:81.58ms +[2025-07-06 10:35:24] [Rank 0] step:9061/10000 train_time:739039ms step_avg:81.56ms +[2025-07-06 10:35:24] [Rank 0] step:9061/10000 train_time:739039ms step_avg:81.56ms +[2025-07-06 10:35:26] [Rank 0] step:9081/10000 train_time:740529ms step_avg:81.55ms +[2025-07-06 10:35:26] [Rank 0] step:9081/10000 train_time:740529ms step_avg:81.55ms +[2025-07-06 10:35:28] [Rank 0] step:9101/10000 train_time:742856ms step_avg:81.62ms +[2025-07-06 10:35:28] [Rank 0] step:9101/10000 train_time:742856ms step_avg:81.62ms +[2025-07-06 10:35:30] [Rank 0] step:9121/10000 train_time:744347ms step_avg:81.61ms +[2025-07-06 10:35:30] [Rank 0] step:9121/10000 train_time:744347ms step_avg:81.61ms +[2025-07-06 10:35:31] [Rank 0] step:9141/10000 train_time:745837ms step_avg:81.59ms +[2025-07-06 10:35:31] [Rank 0] step:9141/10000 train_time:745837ms step_avg:81.59ms +[2025-07-06 10:35:33] [Rank 0] step:9161/10000 train_time:747328ms step_avg:81.58ms +[2025-07-06 10:35:33] [Rank 0] step:9161/10000 train_time:747328ms step_avg:81.58ms +[2025-07-06 10:35:35] [Rank 0] step:9181/10000 train_time:748872ms step_avg:81.57ms +[2025-07-06 10:35:35] [Rank 0] step:9181/10000 train_time:748872ms step_avg:81.57ms +[2025-07-06 10:35:36] [Rank 0] step:9201/10000 train_time:750978ms step_avg:81.62ms +[2025-07-06 10:35:36] [Rank 0] step:9201/10000 train_time:750978ms step_avg:81.62ms +[2025-07-06 10:35:38] [Rank 0] step:9221/10000 train_time:752470ms step_avg:81.60ms +[2025-07-06 10:35:38] [Rank 0] step:9221/10000 train_time:752470ms step_avg:81.60ms +[2025-07-06 10:35:39] [Rank 0] step:9241/10000 train_time:753962ms step_avg:81.59ms +[2025-07-06 10:35:39] [Rank 0] step:9241/10000 train_time:753962ms step_avg:81.59ms +[2025-07-06 10:35:41] [Rank 0] step:9261/10000 train_time:755456ms step_avg:81.57ms +[2025-07-06 10:35:41] [Rank 0] step:9261/10000 train_time:755456ms step_avg:81.57ms +[2025-07-06 10:35:42] [Rank 0] step:9281/10000 train_time:757183ms step_avg:81.58ms +[2025-07-06 10:35:42] [Rank 0] step:9281/10000 train_time:757183ms step_avg:81.58ms +[2025-07-06 10:35:44] [Rank 0] step:9301/10000 train_time:758677ms step_avg:81.57ms +[2025-07-06 10:35:44] [Rank 0] step:9301/10000 train_time:758677ms step_avg:81.57ms +[2025-07-06 10:35:45] [Rank 0] step:9321/10000 train_time:760171ms step_avg:81.55ms +[2025-07-06 10:35:45] [Rank 0] step:9321/10000 train_time:760171ms step_avg:81.55ms +[2025-07-06 10:35:47] [Rank 0] step:9341/10000 train_time:761664ms step_avg:81.54ms +[2025-07-06 10:35:47] [Rank 0] step:9341/10000 train_time:761664ms step_avg:81.54ms +[2025-07-06 10:35:49] [Rank 0] step:9361/10000 train_time:763210ms step_avg:81.53ms +[2025-07-06 10:35:49] [Rank 0] step:9361/10000 train_time:763210ms step_avg:81.53ms +[2025-07-06 10:35:51] [Rank 0] step:9381/10000 train_time:765317ms step_avg:81.58ms +[2025-07-06 10:35:51] [Rank 0] step:9381/10000 train_time:765317ms step_avg:81.58ms +[2025-07-06 10:35:52] [Rank 0] step:9401/10000 train_time:766808ms step_avg:81.57ms +[2025-07-06 10:35:52] [Rank 0] step:9401/10000 train_time:766808ms step_avg:81.57ms +[2025-07-06 10:35:54] [Rank 0] step:9421/10000 train_time:768303ms step_avg:81.55ms +[2025-07-06 10:35:54] [Rank 0] step:9421/10000 train_time:768303ms step_avg:81.55ms +[2025-07-06 10:35:55] [Rank 0] step:9441/10000 train_time:769797ms step_avg:81.54ms +[2025-07-06 10:35:55] [Rank 0] step:9441/10000 train_time:769797ms step_avg:81.54ms +[2025-07-06 10:35:57] [Rank 0] step:9461/10000 train_time:771528ms step_avg:81.55ms +[2025-07-06 10:35:57] [Rank 0] step:9461/10000 train_time:771528ms step_avg:81.55ms +[2025-07-06 10:35:58] [Rank 0] step:9481/10000 train_time:773021ms step_avg:81.53ms +[2025-07-06 10:35:58] [Rank 0] step:9481/10000 train_time:773021ms step_avg:81.53ms +[2025-07-06 10:36:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 10:36:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 10:36:01] [Rank 0] PRINT: step:9500/10000 train_loss:0.8651 val_loss:0.8674 train_time:774616ms step_avg:81.54ms +[2025-07-06 10:36:01] [Rank 0] PRINT: step:9500/10000 train_loss:0.8651 val_loss:0.8674 train_time:774616ms step_avg:81.54ms +[2025-07-06 10:36:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 10:36:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 10:36:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 10:36:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 10:36:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 10:36:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 10:41:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 10:41:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 10:41:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 10:41:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 10:41:29] [Rank 0] Total Loss: 5.8416 +[2025-07-06 10:41:29] [Rank 0] Total Loss: 5.8416 +[2025-07-06 10:41:29] [Rank 0] Total FTA: 0.9295 +[2025-07-06 10:41:29] [Rank 0] Total FTA: 0.9295 +[2025-07-06 10:41:29] [Rank 0] Group 0 Loss: 6.3525 +[2025-07-06 10:41:29] [Rank 0] Group 0 Loss: 6.3525 +[2025-07-06 10:41:29] [Rank 0] Group 1 Loss: 5.6974 +[2025-07-06 10:41:29] [Rank 0] Group 1 Loss: 5.6974 +[2025-07-06 10:41:29] [Rank 0] Group 2 Loss: 5.7872 +[2025-07-06 10:41:29] [Rank 0] Group 2 Loss: 5.7872 +[2025-07-06 10:41:29] [Rank 0] Group 3 Loss: 5.6334 +[2025-07-06 10:41:29] [Rank 0] Group 3 Loss: 5.6334 +[2025-07-06 10:41:29] [Rank 0] Group 4 Loss: 5.9329 +[2025-07-06 10:41:29] [Rank 0] Group 4 Loss: 5.9329 +[2025-07-06 10:41:29] [Rank 0] Group 5 Loss: 5.7325 +[2025-07-06 10:41:29] [Rank 0] Group 5 Loss: 5.7325 +[2025-07-06 10:41:29] [Rank 0] Group 6 Loss: 5.6731 +[2025-07-06 10:41:29] [Rank 0] Group 6 Loss: 5.6731 +[2025-07-06 10:41:29] [Rank 0] Group 7 Loss: 5.7365 +[2025-07-06 10:41:29] [Rank 0] Group 7 Loss: 5.7365 +[2025-07-06 10:41:29] [Rank 0] Group 8 Loss: 5.8368 +[2025-07-06 10:41:29] [Rank 0] Group 8 Loss: 5.8368 +[2025-07-06 10:41:29] [Rank 0] Group 9 Loss: 5.7906 +[2025-07-06 10:41:29] [Rank 0] Group 9 Loss: 5.7906 +[2025-07-06 10:41:29] [Rank 0] Group 10 Loss: 5.7548 +[2025-07-06 10:41:29] [Rank 0] Group 10 Loss: 5.7548 +[2025-07-06 10:41:29] [Rank 0] Group 11 Loss: 5.7776 +[2025-07-06 10:41:29] [Rank 0] Group 11 Loss: 5.7776 +[2025-07-06 10:41:29] [Rank 0] Group 0 FTA: 0.8544 +[2025-07-06 10:41:29] [Rank 0] Group 0 FTA: 0.8544 +[2025-07-06 10:41:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 10:41:29] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 10:41:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 10:41:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 10:41:29] [Rank 0] Group 3 FTA: 0.9167 +[2025-07-06 10:41:29] [Rank 0] Group 3 FTA: 0.9167 +[2025-07-06 10:41:29] [Rank 0] Group 4 FTA: 0.9896 +[2025-07-06 10:41:29] [Rank 0] Group 4 FTA: 0.9896 +[2025-07-06 10:41:29] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-06 10:41:29] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-06 10:41:29] [Rank 0] Group 6 FTA: 0.9062 +[2025-07-06 10:41:29] [Rank 0] Group 6 FTA: 0.9062 +[2025-07-06 10:41:29] [Rank 0] Group 7 FTA: 0.9297 +[2025-07-06 10:41:29] [Rank 0] Group 7 FTA: 0.9297 +[2025-07-06 10:41:29] [Rank 0] Group 8 FTA: 0.9010 +[2025-07-06 10:41:29] [Rank 0] Group 8 FTA: 0.9010 +[2025-07-06 10:41:29] [Rank 0] Group 9 FTA: 0.9375 +[2025-07-06 10:41:29] [Rank 0] Group 9 FTA: 0.9375 +[2025-07-06 10:41:29] [Rank 0] Group 10 FTA: 0.9336 +[2025-07-06 10:41:29] [Rank 0] Group 10 FTA: 0.9336 +[2025-07-06 10:41:29] [Rank 0] Group 11 FTA: 0.9199 +[2025-07-06 10:41:29] [Rank 0] Group 11 FTA: 0.9199 +[2025-07-06 10:41:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 10:41:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 10:41:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 10:41:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 10:41:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 10:41:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 10:41:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 10:41:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 10:41:31] [Rank 0] step:9501/10000 train_time:774639ms step_avg:81.53ms +[2025-07-06 10:41:31] [Rank 0] step:9501/10000 train_time:774639ms step_avg:81.53ms +[2025-07-06 10:41:32] [Rank 0] step:9521/10000 train_time:776132ms step_avg:81.52ms +[2025-07-06 10:41:32] [Rank 0] step:9521/10000 train_time:776132ms step_avg:81.52ms +[2025-07-06 10:41:34] [Rank 0] step:9541/10000 train_time:777878ms step_avg:81.53ms +[2025-07-06 10:41:34] [Rank 0] step:9541/10000 train_time:777878ms step_avg:81.53ms +[2025-07-06 10:41:36] [Rank 0] step:9561/10000 train_time:779750ms step_avg:81.56ms +[2025-07-06 10:41:36] [Rank 0] step:9561/10000 train_time:779750ms step_avg:81.56ms +[2025-07-06 10:41:37] [Rank 0] step:9581/10000 train_time:781239ms step_avg:81.54ms +[2025-07-06 10:41:37] [Rank 0] step:9581/10000 train_time:781239ms step_avg:81.54ms +[2025-07-06 10:41:39] [Rank 0] step:9601/10000 train_time:782729ms step_avg:81.53ms +[2025-07-06 10:41:39] [Rank 0] step:9601/10000 train_time:782729ms step_avg:81.53ms +[2025-07-06 10:41:40] [Rank 0] step:9621/10000 train_time:784220ms step_avg:81.51ms +[2025-07-06 10:41:40] [Rank 0] step:9621/10000 train_time:784220ms step_avg:81.51ms +[2025-07-06 10:41:42] [Rank 0] step:9641/10000 train_time:786363ms step_avg:81.56ms +[2025-07-06 10:41:42] [Rank 0] step:9641/10000 train_time:786363ms step_avg:81.56ms +[2025-07-06 10:41:44] [Rank 0] step:9661/10000 train_time:787855ms step_avg:81.55ms +[2025-07-06 10:41:44] [Rank 0] step:9661/10000 train_time:787855ms step_avg:81.55ms +[2025-07-06 10:41:45] [Rank 0] step:9681/10000 train_time:789347ms step_avg:81.54ms +[2025-07-06 10:41:45] [Rank 0] step:9681/10000 train_time:789347ms step_avg:81.54ms +[2025-07-06 10:41:47] [Rank 0] step:9701/10000 train_time:791084ms step_avg:81.55ms +[2025-07-06 10:41:47] [Rank 0] step:9701/10000 train_time:791084ms step_avg:81.55ms +[2025-07-06 10:41:49] [Rank 0] step:9721/10000 train_time:792684ms step_avg:81.54ms +[2025-07-06 10:41:49] [Rank 0] step:9721/10000 train_time:792684ms step_avg:81.54ms +[2025-07-06 10:41:50] [Rank 0] step:9741/10000 train_time:794167ms step_avg:81.53ms +[2025-07-06 10:41:50] [Rank 0] step:9741/10000 train_time:794167ms step_avg:81.53ms +[2025-07-06 10:41:52] [Rank 0] step:9761/10000 train_time:795660ms step_avg:81.51ms +[2025-07-06 10:41:52] [Rank 0] step:9761/10000 train_time:795660ms step_avg:81.51ms +[2025-07-06 10:41:53] [Rank 0] step:9781/10000 train_time:797153ms step_avg:81.50ms +[2025-07-06 10:41:53] [Rank 0] step:9781/10000 train_time:797153ms step_avg:81.50ms +[2025-07-06 10:41:55] [Rank 0] step:9801/10000 train_time:798648ms step_avg:81.49ms +[2025-07-06 10:41:55] [Rank 0] step:9801/10000 train_time:798648ms step_avg:81.49ms +[2025-07-06 10:41:57] [Rank 0] step:9821/10000 train_time:800796ms step_avg:81.54ms +[2025-07-06 10:41:57] [Rank 0] step:9821/10000 train_time:800796ms step_avg:81.54ms +[2025-07-06 10:41:58] [Rank 0] step:9841/10000 train_time:802291ms step_avg:81.53ms +[2025-07-06 10:41:58] [Rank 0] step:9841/10000 train_time:802291ms step_avg:81.53ms +[2025-07-06 10:42:00] [Rank 0] step:9861/10000 train_time:803787ms step_avg:81.51ms +[2025-07-06 10:42:00] [Rank 0] step:9861/10000 train_time:803787ms step_avg:81.51ms +[2025-07-06 10:42:01] [Rank 0] step:9881/10000 train_time:805283ms step_avg:81.50ms +[2025-07-06 10:42:01] [Rank 0] step:9881/10000 train_time:805283ms step_avg:81.50ms +[2025-07-06 10:42:03] [Rank 0] step:9901/10000 train_time:806779ms step_avg:81.48ms +[2025-07-06 10:42:03] [Rank 0] step:9901/10000 train_time:806779ms step_avg:81.48ms +[2025-07-06 10:42:05] [Rank 0] step:9921/10000 train_time:808938ms step_avg:81.54ms +[2025-07-06 10:42:05] [Rank 0] step:9921/10000 train_time:808938ms step_avg:81.54ms +[2025-07-06 10:42:06] [Rank 0] step:9941/10000 train_time:810431ms step_avg:81.52ms +[2025-07-06 10:42:06] [Rank 0] step:9941/10000 train_time:810431ms step_avg:81.52ms +[2025-07-06 10:42:08] [Rank 0] step:9961/10000 train_time:811926ms step_avg:81.51ms +[2025-07-06 10:42:08] [Rank 0] step:9961/10000 train_time:811926ms step_avg:81.51ms +[2025-07-06 10:42:09] [Rank 0] step:9981/10000 train_time:813422ms step_avg:81.50ms +[2025-07-06 10:42:09] [Rank 0] step:9981/10000 train_time:813422ms step_avg:81.50ms +[2025-07-06 10:42:11] [Rank 0] step:10000/10000 train_time:815507ms step_avg:81.55ms +[2025-07-06 10:42:11] [Rank 0] step:10000/10000 train_time:815507ms step_avg:81.55ms +[2025-07-06 10:42:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 10:42:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 10:42:12] [Rank 0] PRINT: step:10000/10000 train_loss:0.8641 val_loss:0.8671 train_time:815688ms step_avg:81.57ms +[2025-07-06 10:42:12] [Rank 0] PRINT: step:10000/10000 train_loss:0.8641 val_loss:0.8671 train_time:815688ms step_avg:81.57ms +[2025-07-06 10:42:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 10:42:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 10:42:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 10:42:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 10:42:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 10:42:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 10:47:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 10:47:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 10:47:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 10:47:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 10:47:37] [Rank 0] Total Loss: 5.8470 +[2025-07-06 10:47:37] [Rank 0] Total Loss: 5.8470 +[2025-07-06 10:47:37] [Rank 0] Total FTA: 0.9521 +[2025-07-06 10:47:37] [Rank 0] Total FTA: 0.9521 +[2025-07-06 10:47:37] [Rank 0] Group 0 Loss: 6.3000 +[2025-07-06 10:47:37] [Rank 0] Group 0 Loss: 6.3000 +[2025-07-06 10:47:37] [Rank 0] Group 1 Loss: 5.7139 +[2025-07-06 10:47:37] [Rank 0] Group 1 Loss: 5.7139 +[2025-07-06 10:47:37] [Rank 0] Group 2 Loss: 5.7887 +[2025-07-06 10:47:37] [Rank 0] Group 2 Loss: 5.7887 +[2025-07-06 10:47:37] [Rank 0] Group 3 Loss: 5.6692 +[2025-07-06 10:47:37] [Rank 0] Group 3 Loss: 5.6692 +[2025-07-06 10:47:37] [Rank 0] Group 4 Loss: 5.9362 +[2025-07-06 10:47:37] [Rank 0] Group 4 Loss: 5.9362 +[2025-07-06 10:47:37] [Rank 0] Group 5 Loss: 5.7667 +[2025-07-06 10:47:37] [Rank 0] Group 5 Loss: 5.7667 +[2025-07-06 10:47:37] [Rank 0] Group 6 Loss: 5.6297 +[2025-07-06 10:47:37] [Rank 0] Group 6 Loss: 5.6297 +[2025-07-06 10:47:37] [Rank 0] Group 7 Loss: 5.8073 +[2025-07-06 10:47:37] [Rank 0] Group 7 Loss: 5.8073 +[2025-07-06 10:47:37] [Rank 0] Group 8 Loss: 5.8551 +[2025-07-06 10:47:37] [Rank 0] Group 8 Loss: 5.8551 +[2025-07-06 10:47:37] [Rank 0] Group 9 Loss: 5.7111 +[2025-07-06 10:47:37] [Rank 0] Group 9 Loss: 5.7111 +[2025-07-06 10:47:37] [Rank 0] Group 10 Loss: 5.7538 +[2025-07-06 10:47:37] [Rank 0] Group 10 Loss: 5.7538 +[2025-07-06 10:47:37] [Rank 0] Group 11 Loss: 5.8158 +[2025-07-06 10:47:37] [Rank 0] Group 11 Loss: 5.8158 +[2025-07-06 10:47:37] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 10:47:37] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 10:47:37] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 10:47:37] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 10:47:37] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 10:47:37] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 10:47:37] [Rank 0] Group 3 FTA: 0.9167 +[2025-07-06 10:47:37] [Rank 0] Group 3 FTA: 0.9167 +[2025-07-06 10:47:37] [Rank 0] Group 4 FTA: 0.9844 +[2025-07-06 10:47:37] [Rank 0] Group 4 FTA: 0.9844 +[2025-07-06 10:47:37] [Rank 0] Group 5 FTA: 0.9870 +[2025-07-06 10:47:37] [Rank 0] Group 5 FTA: 0.9870 +[2025-07-06 10:47:37] [Rank 0] Group 6 FTA: 0.9115 +[2025-07-06 10:47:37] [Rank 0] Group 6 FTA: 0.9115 +[2025-07-06 10:47:37] [Rank 0] Group 7 FTA: 0.9375 +[2025-07-06 10:47:37] [Rank 0] Group 7 FTA: 0.9375 +[2025-07-06 10:47:37] [Rank 0] Group 8 FTA: 0.9036 +[2025-07-06 10:47:37] [Rank 0] Group 8 FTA: 0.9036 +[2025-07-06 10:47:37] [Rank 0] Group 9 FTA: 0.9375 +[2025-07-06 10:47:37] [Rank 0] Group 9 FTA: 0.9375 +[2025-07-06 10:47:37] [Rank 0] Group 10 FTA: 0.9297 +[2025-07-06 10:47:37] [Rank 0] Group 10 FTA: 0.9297 +[2025-07-06 10:47:37] [Rank 0] Group 11 FTA: 0.9219 +[2025-07-06 10:47:37] [Rank 0] Group 11 FTA: 0.9219 +[2025-07-06 10:47:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 10:47:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-06 10:47:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 10:47:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-06 10:47:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 10:47:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-06 10:47:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 10:47:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-06 10:47:39] [Rank 0] step:10001/10000 train_time:815712ms step_avg:81.56ms +[2025-07-06 10:47:39] [Rank 0] step:10001/10000 train_time:815712ms step_avg:81.56ms +[2025-07-06 10:47:39] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 10:47:39 2025 --- +[2025-07-06 10:47:39] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 10:47:39 2025 --- +[2025-07-06 10:47:39] [Rank 0] PRINT: Peak memory allocated: 9109 MiB reserved: 10356 MiB +[2025-07-06 10:47:39] [Rank 0] PRINT: Peak memory allocated: 9109 MiB reserved: 10356 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/config.json b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..34b311b9ca88244c52a21db52d3af23e47070a9a --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "d4dd0851-edb7-4b5c-9e86-e0eb0ba95eec", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..5d551a0b56eaf85642f43c7807d9ffa61810940a --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b839a5346b6af6119d12c5b64ba4aa541b2807df7d31a1ac3b97740ec3b57ed2 +size 411911 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..36b0118c83fda4d93920ac91ff0106e1da570738 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c0dc811cc021906a55e0c6e59988dee7f42ea337a3808499330e7d8d945bf8c +size 378176 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..17ecf639ebc86319f361312abe23b236d6406761 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1777886348ddd7523e04a4ba7effd74572bd35362e2bebe7555f0b0a06ecb033 +size 115966 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..22d87fcb7b3eed72d6995956cdd6dbd7bf31c3ef --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:479fdd41f6e671d5eec3edf16aed9ec83c6af8f23b7d075edcf4381555957893 +size 124164 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/training_log_d4dd0851-edb7-4b5c-9e86-e0eb0ba95eec.txt b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/training_log_d4dd0851-edb7-4b5c-9e86-e0eb0ba95eec.txt new file mode 100644 index 0000000000000000000000000000000000000000..aeb39eef8977020f43330d86acd7317635089155 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/training_log_d4dd0851-edb7-4b5c-9e86-e0eb0ba95eec.txt @@ -0,0 +1,5144 @@ +[2025-07-07 22:58:20] [Rank 0] PRINT: --- Script Start: Mon Jul 7 22:58:20 2025 --- +[2025-07-07 22:58:20] [Rank 0] PRINT: --- Script Start: Mon Jul 7 22:58:20 2025 --- +[2025-07-07 22:58:20] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-07 22:58:20] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-07 22:58:20] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 22:58:20] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 22:58:20] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-07 22:58:20] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-07 22:58:20] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43 +[2025-07-07 22:58:20] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43 +[2025-07-07 22:58:20] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 22:58:20] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 22:58:21] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 22:58:21] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 22:58:21] [Rank 0] PRINT: Constructing model... +[2025-07-07 22:58:21] [Rank 0] PRINT: Constructing model... +[2025-07-07 22:58:23] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 22:58:23] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 22:58:23] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 22:58:23] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 22:58:23] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 22:58:23] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 22:58:24] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 22:58:24] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 22:58:24] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 22:58:24] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 22:58:24] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 22:58:24] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 22:58:24] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 22:58:24] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 22:58:24] [Rank 0] PRINT: Model returns: +[2025-07-07 22:58:24] [Rank 0] PRINT: Model returns: +[2025-07-07 22:58:24] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 22:58:24] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 22:58:24] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-07 22:58:24] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-07 22:58:24] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-07 22:58:24] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-07 22:58:24] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-07 22:58:24] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-07 22:58:24] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-07 22:58:24] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-07 22:58:24] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 22:58:24] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 22:58:24] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 22:58:24] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 22:58:24] [Rank 0] PRINT: Starting warmup... +[2025-07-07 22:58:24] [Rank 0] PRINT: Starting warmup... +[2025-07-07 22:59:54] [Rank 0] PRINT: Warmup complete. +[2025-07-07 22:59:54] [Rank 0] PRINT: Warmup complete. +[2025-07-07 22:59:54] [Rank 0] PRINT: Starting training... +[2025-07-07 22:59:54] [Rank 0] PRINT: Starting training... +[2025-07-07 22:59:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:59:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:00:01] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 23:00:01] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 23:00:03] [Rank 0] step:21/10000 train_time:1755ms step_avg:83.56ms +[2025-07-07 23:00:03] [Rank 0] step:21/10000 train_time:1755ms step_avg:83.56ms +[2025-07-07 23:00:04] [Rank 0] step:41/10000 train_time:3211ms step_avg:78.32ms +[2025-07-07 23:00:04] [Rank 0] step:41/10000 train_time:3211ms step_avg:78.32ms +[2025-07-07 23:00:06] [Rank 0] step:61/10000 train_time:4670ms step_avg:76.55ms +[2025-07-07 23:00:06] [Rank 0] step:61/10000 train_time:4670ms step_avg:76.55ms +[2025-07-07 23:00:07] [Rank 0] step:81/10000 train_time:6134ms step_avg:75.73ms +[2025-07-07 23:00:07] [Rank 0] step:81/10000 train_time:6134ms step_avg:75.73ms +[2025-07-07 23:00:09] [Rank 0] step:101/10000 train_time:7840ms step_avg:77.63ms +[2025-07-07 23:00:09] [Rank 0] step:101/10000 train_time:7840ms step_avg:77.63ms +[2025-07-07 23:00:10] [Rank 0] step:121/10000 train_time:9305ms step_avg:76.90ms +[2025-07-07 23:00:10] [Rank 0] step:121/10000 train_time:9305ms step_avg:76.90ms +[2025-07-07 23:00:12] [Rank 0] step:141/10000 train_time:10769ms step_avg:76.37ms +[2025-07-07 23:00:12] [Rank 0] step:141/10000 train_time:10769ms step_avg:76.37ms +[2025-07-07 23:00:13] [Rank 0] step:161/10000 train_time:12237ms step_avg:76.01ms +[2025-07-07 23:00:13] [Rank 0] step:161/10000 train_time:12237ms step_avg:76.01ms +[2025-07-07 23:00:15] [Rank 0] step:181/10000 train_time:13711ms step_avg:75.75ms +[2025-07-07 23:00:15] [Rank 0] step:181/10000 train_time:13711ms step_avg:75.75ms +[2025-07-07 23:00:17] [Rank 0] step:201/10000 train_time:15822ms step_avg:78.71ms +[2025-07-07 23:00:17] [Rank 0] step:201/10000 train_time:15822ms step_avg:78.71ms +[2025-07-07 23:00:18] [Rank 0] step:221/10000 train_time:17292ms step_avg:78.24ms +[2025-07-07 23:00:18] [Rank 0] step:221/10000 train_time:17292ms step_avg:78.24ms +[2025-07-07 23:00:20] [Rank 0] step:241/10000 train_time:18764ms step_avg:77.86ms +[2025-07-07 23:00:20] [Rank 0] step:241/10000 train_time:18764ms step_avg:77.86ms +[2025-07-07 23:00:21] [Rank 0] step:261/10000 train_time:20237ms step_avg:77.54ms +[2025-07-07 23:00:21] [Rank 0] step:261/10000 train_time:20237ms step_avg:77.54ms +[2025-07-07 23:00:23] [Rank 0] step:281/10000 train_time:22380ms step_avg:79.64ms +[2025-07-07 23:00:23] [Rank 0] step:281/10000 train_time:22380ms step_avg:79.64ms +[2025-07-07 23:00:25] [Rank 0] step:301/10000 train_time:23851ms step_avg:79.24ms +[2025-07-07 23:00:25] [Rank 0] step:301/10000 train_time:23851ms step_avg:79.24ms +[2025-07-07 23:00:26] [Rank 0] step:321/10000 train_time:25323ms step_avg:78.89ms +[2025-07-07 23:00:26] [Rank 0] step:321/10000 train_time:25323ms step_avg:78.89ms +[2025-07-07 23:00:28] [Rank 0] step:341/10000 train_time:26796ms step_avg:78.58ms +[2025-07-07 23:00:28] [Rank 0] step:341/10000 train_time:26796ms step_avg:78.58ms +[2025-07-07 23:00:30] [Rank 0] step:361/10000 train_time:28322ms step_avg:78.45ms +[2025-07-07 23:00:30] [Rank 0] step:361/10000 train_time:28322ms step_avg:78.45ms +[2025-07-07 23:00:31] [Rank 0] step:381/10000 train_time:30382ms step_avg:79.74ms +[2025-07-07 23:00:31] [Rank 0] step:381/10000 train_time:30382ms step_avg:79.74ms +[2025-07-07 23:00:33] [Rank 0] step:401/10000 train_time:31854ms step_avg:79.44ms +[2025-07-07 23:00:33] [Rank 0] step:401/10000 train_time:31854ms step_avg:79.44ms +[2025-07-07 23:00:34] [Rank 0] step:421/10000 train_time:33561ms step_avg:79.72ms +[2025-07-07 23:00:34] [Rank 0] step:421/10000 train_time:33561ms step_avg:79.72ms +[2025-07-07 23:00:36] [Rank 0] step:441/10000 train_time:35075ms step_avg:79.54ms +[2025-07-07 23:00:36] [Rank 0] step:441/10000 train_time:35075ms step_avg:79.54ms +[2025-07-07 23:00:38] [Rank 0] step:461/10000 train_time:37196ms step_avg:80.69ms +[2025-07-07 23:00:38] [Rank 0] step:461/10000 train_time:37196ms step_avg:80.69ms +[2025-07-07 23:00:40] [Rank 0] step:481/10000 train_time:38668ms step_avg:80.39ms +[2025-07-07 23:00:40] [Rank 0] step:481/10000 train_time:38668ms step_avg:80.39ms +[2025-07-07 23:00:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:00:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:00:42] [Rank 0] PRINT: step:500/10000 train_loss:4.9434 val_loss:2.0519 train_time:40140ms step_avg:80.28ms +[2025-07-07 23:00:42] [Rank 0] PRINT: step:500/10000 train_loss:4.9434 val_loss:2.0519 train_time:40140ms step_avg:80.28ms +[2025-07-07 23:00:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:00:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:00:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:00:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:00:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:00:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:06:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:06:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:06:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:06:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:06:04] [Rank 0] Total Loss: 4.1162 +[2025-07-07 23:06:04] [Rank 0] Total Loss: 4.1162 +[2025-07-07 23:06:04] [Rank 0] Total FTA: 0.0847 +[2025-07-07 23:06:04] [Rank 0] Total FTA: 0.0847 +[2025-07-07 23:06:04] [Rank 0] Group 0 Loss: 4.3472 +[2025-07-07 23:06:04] [Rank 0] Group 0 Loss: 4.3472 +[2025-07-07 23:06:04] [Rank 0] Group 1 Loss: 4.1404 +[2025-07-07 23:06:04] [Rank 0] Group 1 Loss: 4.1404 +[2025-07-07 23:06:04] [Rank 0] Group 2 Loss: 4.0418 +[2025-07-07 23:06:04] [Rank 0] Group 2 Loss: 4.0418 +[2025-07-07 23:06:04] [Rank 0] Group 3 Loss: 4.1737 +[2025-07-07 23:06:04] [Rank 0] Group 3 Loss: 4.1737 +[2025-07-07 23:06:04] [Rank 0] Group 4 Loss: 4.0348 +[2025-07-07 23:06:04] [Rank 0] Group 4 Loss: 4.0348 +[2025-07-07 23:06:04] [Rank 0] Group 5 Loss: 4.0467 +[2025-07-07 23:06:04] [Rank 0] Group 5 Loss: 4.0467 +[2025-07-07 23:06:04] [Rank 0] Group 6 Loss: 4.0015 +[2025-07-07 23:06:04] [Rank 0] Group 6 Loss: 4.0015 +[2025-07-07 23:06:04] [Rank 0] Group 7 Loss: 4.1074 +[2025-07-07 23:06:04] [Rank 0] Group 7 Loss: 4.1074 +[2025-07-07 23:06:04] [Rank 0] Group 8 Loss: 4.0668 +[2025-07-07 23:06:04] [Rank 0] Group 8 Loss: 4.0668 +[2025-07-07 23:06:04] [Rank 0] Group 9 Loss: 4.0492 +[2025-07-07 23:06:04] [Rank 0] Group 9 Loss: 4.0492 +[2025-07-07 23:06:04] [Rank 0] Group 10 Loss: 4.0866 +[2025-07-07 23:06:04] [Rank 0] Group 10 Loss: 4.0866 +[2025-07-07 23:06:04] [Rank 0] Group 11 Loss: 4.0929 +[2025-07-07 23:06:04] [Rank 0] Group 11 Loss: 4.0929 +[2025-07-07 23:06:04] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 23:06:04] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 23:06:04] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 23:06:04] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 23:06:04] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-07 23:06:04] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-07 23:06:04] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 23:06:04] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 23:06:04] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 23:06:04] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 23:06:04] [Rank 0] Group 5 FTA: 0.0625 +[2025-07-07 23:06:04] [Rank 0] Group 5 FTA: 0.0625 +[2025-07-07 23:06:04] [Rank 0] Group 6 FTA: 0.0651 +[2025-07-07 23:06:04] [Rank 0] Group 6 FTA: 0.0651 +[2025-07-07 23:06:04] [Rank 0] Group 7 FTA: 0.1198 +[2025-07-07 23:06:04] [Rank 0] Group 7 FTA: 0.1198 +[2025-07-07 23:06:04] [Rank 0] Group 8 FTA: 0.0807 +[2025-07-07 23:06:04] [Rank 0] Group 8 FTA: 0.0807 +[2025-07-07 23:06:04] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 23:06:04] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 23:06:04] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-07 23:06:04] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-07 23:06:04] [Rank 0] Group 11 FTA: 0.0967 +[2025-07-07 23:06:04] [Rank 0] Group 11 FTA: 0.0967 +[2025-07-07 23:06:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-07 23:06:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-07 23:06:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-07 23:06:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-07 23:06:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-07 23:06:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-07 23:06:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-07 23:06:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-07 23:06:06] [Rank 0] step:501/10000 train_time:40160ms step_avg:80.16ms +[2025-07-07 23:06:06] [Rank 0] step:501/10000 train_time:40160ms step_avg:80.16ms +[2025-07-07 23:06:07] [Rank 0] step:521/10000 train_time:41633ms step_avg:79.91ms +[2025-07-07 23:06:07] [Rank 0] step:521/10000 train_time:41633ms step_avg:79.91ms +[2025-07-07 23:06:09] [Rank 0] step:541/10000 train_time:43774ms step_avg:80.91ms +[2025-07-07 23:06:09] [Rank 0] step:541/10000 train_time:43774ms step_avg:80.91ms +[2025-07-07 23:06:11] [Rank 0] step:561/10000 train_time:45219ms step_avg:80.60ms +[2025-07-07 23:06:11] [Rank 0] step:561/10000 train_time:45219ms step_avg:80.60ms +[2025-07-07 23:06:12] [Rank 0] step:581/10000 train_time:46683ms step_avg:80.35ms +[2025-07-07 23:06:12] [Rank 0] step:581/10000 train_time:46683ms step_avg:80.35ms +[2025-07-07 23:06:14] [Rank 0] step:601/10000 train_time:48150ms step_avg:80.12ms +[2025-07-07 23:06:14] [Rank 0] step:601/10000 train_time:48150ms step_avg:80.12ms +[2025-07-07 23:06:15] [Rank 0] step:621/10000 train_time:49616ms step_avg:79.90ms +[2025-07-07 23:06:15] [Rank 0] step:621/10000 train_time:49616ms step_avg:79.90ms +[2025-07-07 23:06:17] [Rank 0] step:641/10000 train_time:51748ms step_avg:80.73ms +[2025-07-07 23:06:17] [Rank 0] step:641/10000 train_time:51748ms step_avg:80.73ms +[2025-07-07 23:06:19] [Rank 0] step:661/10000 train_time:53214ms step_avg:80.51ms +[2025-07-07 23:06:19] [Rank 0] step:661/10000 train_time:53214ms step_avg:80.51ms +[2025-07-07 23:06:20] [Rank 0] step:681/10000 train_time:54682ms step_avg:80.30ms +[2025-07-07 23:06:20] [Rank 0] step:681/10000 train_time:54682ms step_avg:80.30ms +[2025-07-07 23:06:22] [Rank 0] step:701/10000 train_time:56150ms step_avg:80.10ms +[2025-07-07 23:06:22] [Rank 0] step:701/10000 train_time:56150ms step_avg:80.10ms +[2025-07-07 23:06:23] [Rank 0] step:721/10000 train_time:57670ms step_avg:79.99ms +[2025-07-07 23:06:23] [Rank 0] step:721/10000 train_time:57670ms step_avg:79.99ms +[2025-07-07 23:06:25] [Rank 0] step:741/10000 train_time:59324ms step_avg:80.06ms +[2025-07-07 23:06:25] [Rank 0] step:741/10000 train_time:59324ms step_avg:80.06ms +[2025-07-07 23:06:26] [Rank 0] step:761/10000 train_time:60803ms step_avg:79.90ms +[2025-07-07 23:06:26] [Rank 0] step:761/10000 train_time:60803ms step_avg:79.90ms +[2025-07-07 23:06:28] [Rank 0] step:781/10000 train_time:62284ms step_avg:79.75ms +[2025-07-07 23:06:28] [Rank 0] step:781/10000 train_time:62284ms step_avg:79.75ms +[2025-07-07 23:06:29] [Rank 0] step:801/10000 train_time:63763ms step_avg:79.60ms +[2025-07-07 23:06:29] [Rank 0] step:801/10000 train_time:63763ms step_avg:79.60ms +[2025-07-07 23:06:31] [Rank 0] step:821/10000 train_time:65587ms step_avg:79.89ms +[2025-07-07 23:06:31] [Rank 0] step:821/10000 train_time:65587ms step_avg:79.89ms +[2025-07-07 23:06:33] [Rank 0] step:841/10000 train_time:67067ms step_avg:79.75ms +[2025-07-07 23:06:33] [Rank 0] step:841/10000 train_time:67067ms step_avg:79.75ms +[2025-07-07 23:06:34] [Rank 0] step:861/10000 train_time:68548ms step_avg:79.61ms +[2025-07-07 23:06:34] [Rank 0] step:861/10000 train_time:68548ms step_avg:79.61ms +[2025-07-07 23:06:36] [Rank 0] step:881/10000 train_time:70030ms step_avg:79.49ms +[2025-07-07 23:06:36] [Rank 0] step:881/10000 train_time:70030ms step_avg:79.49ms +[2025-07-07 23:06:38] [Rank 0] step:901/10000 train_time:71566ms step_avg:79.43ms +[2025-07-07 23:06:38] [Rank 0] step:901/10000 train_time:71566ms step_avg:79.43ms +[2025-07-07 23:06:39] [Rank 0] step:921/10000 train_time:73659ms step_avg:79.98ms +[2025-07-07 23:06:39] [Rank 0] step:921/10000 train_time:73659ms step_avg:79.98ms +[2025-07-07 23:06:41] [Rank 0] step:941/10000 train_time:75139ms step_avg:79.85ms +[2025-07-07 23:06:41] [Rank 0] step:941/10000 train_time:75139ms step_avg:79.85ms +[2025-07-07 23:06:42] [Rank 0] step:961/10000 train_time:76622ms step_avg:79.73ms +[2025-07-07 23:06:42] [Rank 0] step:961/10000 train_time:76622ms step_avg:79.73ms +[2025-07-07 23:06:44] [Rank 0] step:981/10000 train_time:78104ms step_avg:79.62ms +[2025-07-07 23:06:44] [Rank 0] step:981/10000 train_time:78104ms step_avg:79.62ms +[2025-07-07 23:06:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:06:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:06:46] [Rank 0] PRINT: step:1000/10000 train_loss:1.7007 val_loss:1.5391 train_time:79719ms step_avg:79.72ms +[2025-07-07 23:06:46] [Rank 0] PRINT: step:1000/10000 train_loss:1.7007 val_loss:1.5391 train_time:79719ms step_avg:79.72ms +[2025-07-07 23:06:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:06:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:06:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:06:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:06:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:06:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:12:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:12:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:12:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:12:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:12:10] [Rank 0] Total Loss: 4.2492 +[2025-07-07 23:12:10] [Rank 0] Total Loss: 4.2492 +[2025-07-07 23:12:10] [Rank 0] Total FTA: 0.1280 +[2025-07-07 23:12:10] [Rank 0] Total FTA: 0.1280 +[2025-07-07 23:12:10] [Rank 0] Group 0 Loss: 4.4681 +[2025-07-07 23:12:10] [Rank 0] Group 0 Loss: 4.4681 +[2025-07-07 23:12:10] [Rank 0] Group 1 Loss: 4.3254 +[2025-07-07 23:12:10] [Rank 0] Group 1 Loss: 4.3254 +[2025-07-07 23:12:10] [Rank 0] Group 2 Loss: 4.1117 +[2025-07-07 23:12:10] [Rank 0] Group 2 Loss: 4.1117 +[2025-07-07 23:12:10] [Rank 0] Group 3 Loss: 4.1499 +[2025-07-07 23:12:10] [Rank 0] Group 3 Loss: 4.1499 +[2025-07-07 23:12:10] [Rank 0] Group 4 Loss: 4.2391 +[2025-07-07 23:12:10] [Rank 0] Group 4 Loss: 4.2391 +[2025-07-07 23:12:10] [Rank 0] Group 5 Loss: 4.1502 +[2025-07-07 23:12:10] [Rank 0] Group 5 Loss: 4.1502 +[2025-07-07 23:12:10] [Rank 0] Group 6 Loss: 4.1677 +[2025-07-07 23:12:10] [Rank 0] Group 6 Loss: 4.1677 +[2025-07-07 23:12:10] [Rank 0] Group 7 Loss: 4.2525 +[2025-07-07 23:12:10] [Rank 0] Group 7 Loss: 4.2525 +[2025-07-07 23:12:10] [Rank 0] Group 8 Loss: 4.2385 +[2025-07-07 23:12:10] [Rank 0] Group 8 Loss: 4.2385 +[2025-07-07 23:12:10] [Rank 0] Group 9 Loss: 4.2319 +[2025-07-07 23:12:10] [Rank 0] Group 9 Loss: 4.2319 +[2025-07-07 23:12:10] [Rank 0] Group 10 Loss: 4.2193 +[2025-07-07 23:12:10] [Rank 0] Group 10 Loss: 4.2193 +[2025-07-07 23:12:10] [Rank 0] Group 11 Loss: 4.2384 +[2025-07-07 23:12:10] [Rank 0] Group 11 Loss: 4.2384 +[2025-07-07 23:12:10] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-07 23:12:10] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-07 23:12:10] [Rank 0] Group 1 FTA: 0.1875 +[2025-07-07 23:12:10] [Rank 0] Group 1 FTA: 0.1875 +[2025-07-07 23:12:10] [Rank 0] Group 2 FTA: 0.2422 +[2025-07-07 23:12:10] [Rank 0] Group 2 FTA: 0.2422 +[2025-07-07 23:12:10] [Rank 0] Group 3 FTA: 0.0807 +[2025-07-07 23:12:10] [Rank 0] Group 3 FTA: 0.0807 +[2025-07-07 23:12:10] [Rank 0] Group 4 FTA: 0.0938 +[2025-07-07 23:12:10] [Rank 0] Group 4 FTA: 0.0938 +[2025-07-07 23:12:10] [Rank 0] Group 5 FTA: 0.1016 +[2025-07-07 23:12:10] [Rank 0] Group 5 FTA: 0.1016 +[2025-07-07 23:12:10] [Rank 0] Group 6 FTA: 0.1510 +[2025-07-07 23:12:10] [Rank 0] Group 6 FTA: 0.1510 +[2025-07-07 23:12:10] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-07 23:12:10] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-07 23:12:10] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-07 23:12:10] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-07 23:12:10] [Rank 0] Group 9 FTA: 0.1289 +[2025-07-07 23:12:10] [Rank 0] Group 9 FTA: 0.1289 +[2025-07-07 23:12:10] [Rank 0] Group 10 FTA: 0.1055 +[2025-07-07 23:12:10] [Rank 0] Group 10 FTA: 0.1055 +[2025-07-07 23:12:10] [Rank 0] Group 11 FTA: 0.0957 +[2025-07-07 23:12:10] [Rank 0] Group 11 FTA: 0.0957 +[2025-07-07 23:12:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-07 23:12:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-07 23:12:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-07 23:12:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-07 23:12:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-07 23:12:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-07 23:12:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-07 23:12:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-07 23:12:12] [Rank 0] step:1001/10000 train_time:79738ms step_avg:79.66ms +[2025-07-07 23:12:12] [Rank 0] step:1001/10000 train_time:79738ms step_avg:79.66ms +[2025-07-07 23:12:13] [Rank 0] step:1021/10000 train_time:81225ms step_avg:79.55ms +[2025-07-07 23:12:13] [Rank 0] step:1021/10000 train_time:81225ms step_avg:79.55ms +[2025-07-07 23:12:15] [Rank 0] step:1041/10000 train_time:82696ms step_avg:79.44ms +[2025-07-07 23:12:15] [Rank 0] step:1041/10000 train_time:82696ms step_avg:79.44ms +[2025-07-07 23:12:16] [Rank 0] step:1061/10000 train_time:84167ms step_avg:79.33ms +[2025-07-07 23:12:16] [Rank 0] step:1061/10000 train_time:84167ms step_avg:79.33ms +[2025-07-07 23:12:18] [Rank 0] step:1081/10000 train_time:85640ms step_avg:79.22ms +[2025-07-07 23:12:18] [Rank 0] step:1081/10000 train_time:85640ms step_avg:79.22ms +[2025-07-07 23:12:20] [Rank 0] step:1101/10000 train_time:87762ms step_avg:79.71ms +[2025-07-07 23:12:20] [Rank 0] step:1101/10000 train_time:87762ms step_avg:79.71ms +[2025-07-07 23:12:21] [Rank 0] step:1121/10000 train_time:89233ms step_avg:79.60ms +[2025-07-07 23:12:21] [Rank 0] step:1121/10000 train_time:89233ms step_avg:79.60ms +[2025-07-07 23:12:23] [Rank 0] step:1141/10000 train_time:90705ms step_avg:79.50ms +[2025-07-07 23:12:23] [Rank 0] step:1141/10000 train_time:90705ms step_avg:79.50ms +[2025-07-07 23:12:24] [Rank 0] step:1161/10000 train_time:92179ms step_avg:79.40ms +[2025-07-07 23:12:24] [Rank 0] step:1161/10000 train_time:92179ms step_avg:79.40ms +[2025-07-07 23:12:26] [Rank 0] step:1181/10000 train_time:94318ms step_avg:79.86ms +[2025-07-07 23:12:26] [Rank 0] step:1181/10000 train_time:94318ms step_avg:79.86ms +[2025-07-07 23:12:28] [Rank 0] step:1201/10000 train_time:95791ms step_avg:79.76ms +[2025-07-07 23:12:28] [Rank 0] step:1201/10000 train_time:95791ms step_avg:79.76ms +[2025-07-07 23:12:29] [Rank 0] step:1221/10000 train_time:97267ms step_avg:79.66ms +[2025-07-07 23:12:29] [Rank 0] step:1221/10000 train_time:97267ms step_avg:79.66ms +[2025-07-07 23:12:31] [Rank 0] step:1241/10000 train_time:98747ms step_avg:79.57ms +[2025-07-07 23:12:31] [Rank 0] step:1241/10000 train_time:98747ms step_avg:79.57ms +[2025-07-07 23:12:32] [Rank 0] step:1261/10000 train_time:100221ms step_avg:79.48ms +[2025-07-07 23:12:32] [Rank 0] step:1261/10000 train_time:100221ms step_avg:79.48ms +[2025-07-07 23:12:34] [Rank 0] step:1281/10000 train_time:101930ms step_avg:79.57ms +[2025-07-07 23:12:34] [Rank 0] step:1281/10000 train_time:101930ms step_avg:79.57ms +[2025-07-07 23:12:35] [Rank 0] step:1301/10000 train_time:103407ms step_avg:79.48ms +[2025-07-07 23:12:35] [Rank 0] step:1301/10000 train_time:103407ms step_avg:79.48ms +[2025-07-07 23:12:37] [Rank 0] step:1321/10000 train_time:104883ms step_avg:79.40ms +[2025-07-07 23:12:37] [Rank 0] step:1321/10000 train_time:104883ms step_avg:79.40ms +[2025-07-07 23:12:38] [Rank 0] step:1341/10000 train_time:106358ms step_avg:79.31ms +[2025-07-07 23:12:38] [Rank 0] step:1341/10000 train_time:106358ms step_avg:79.31ms +[2025-07-07 23:12:40] [Rank 0] step:1361/10000 train_time:108070ms step_avg:79.40ms +[2025-07-07 23:12:40] [Rank 0] step:1361/10000 train_time:108070ms step_avg:79.40ms +[2025-07-07 23:12:41] [Rank 0] step:1381/10000 train_time:109548ms step_avg:79.32ms +[2025-07-07 23:12:41] [Rank 0] step:1381/10000 train_time:109548ms step_avg:79.32ms +[2025-07-07 23:12:43] [Rank 0] step:1401/10000 train_time:111025ms step_avg:79.25ms +[2025-07-07 23:12:43] [Rank 0] step:1401/10000 train_time:111025ms step_avg:79.25ms +[2025-07-07 23:12:44] [Rank 0] step:1421/10000 train_time:112504ms step_avg:79.17ms +[2025-07-07 23:12:44] [Rank 0] step:1421/10000 train_time:112504ms step_avg:79.17ms +[2025-07-07 23:12:46] [Rank 0] step:1441/10000 train_time:114236ms step_avg:79.28ms +[2025-07-07 23:12:46] [Rank 0] step:1441/10000 train_time:114236ms step_avg:79.28ms +[2025-07-07 23:12:48] [Rank 0] step:1461/10000 train_time:115695ms step_avg:79.19ms +[2025-07-07 23:12:48] [Rank 0] step:1461/10000 train_time:115695ms step_avg:79.19ms +[2025-07-07 23:12:49] [Rank 0] step:1481/10000 train_time:117172ms step_avg:79.12ms +[2025-07-07 23:12:49] [Rank 0] step:1481/10000 train_time:117172ms step_avg:79.12ms +[2025-07-07 23:12:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:12:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:12:51] [Rank 0] PRINT: step:1500/10000 train_loss:1.4271 val_loss:1.3336 train_time:118650ms step_avg:79.10ms +[2025-07-07 23:12:51] [Rank 0] PRINT: step:1500/10000 train_loss:1.4271 val_loss:1.3336 train_time:118650ms step_avg:79.10ms +[2025-07-07 23:12:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:12:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:12:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:12:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:12:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:12:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:18:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:18:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:18:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:18:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:18:15] [Rank 0] Total Loss: 4.3996 +[2025-07-07 23:18:15] [Rank 0] Total Loss: 4.3996 +[2025-07-07 23:18:15] [Rank 0] Total FTA: 0.1926 +[2025-07-07 23:18:15] [Rank 0] Total FTA: 0.1926 +[2025-07-07 23:18:15] [Rank 0] Group 0 Loss: 4.6211 +[2025-07-07 23:18:15] [Rank 0] Group 0 Loss: 4.6211 +[2025-07-07 23:18:15] [Rank 0] Group 1 Loss: 4.1602 +[2025-07-07 23:18:15] [Rank 0] Group 1 Loss: 4.1602 +[2025-07-07 23:18:15] [Rank 0] Group 2 Loss: 4.2641 +[2025-07-07 23:18:15] [Rank 0] Group 2 Loss: 4.2641 +[2025-07-07 23:18:15] [Rank 0] Group 3 Loss: 4.3759 +[2025-07-07 23:18:15] [Rank 0] Group 3 Loss: 4.3759 +[2025-07-07 23:18:15] [Rank 0] Group 4 Loss: 4.3763 +[2025-07-07 23:18:15] [Rank 0] Group 4 Loss: 4.3763 +[2025-07-07 23:18:15] [Rank 0] Group 5 Loss: 4.3067 +[2025-07-07 23:18:15] [Rank 0] Group 5 Loss: 4.3067 +[2025-07-07 23:18:15] [Rank 0] Group 6 Loss: 4.3136 +[2025-07-07 23:18:15] [Rank 0] Group 6 Loss: 4.3136 +[2025-07-07 23:18:15] [Rank 0] Group 7 Loss: 4.4466 +[2025-07-07 23:18:15] [Rank 0] Group 7 Loss: 4.4466 +[2025-07-07 23:18:15] [Rank 0] Group 8 Loss: 4.3901 +[2025-07-07 23:18:15] [Rank 0] Group 8 Loss: 4.3901 +[2025-07-07 23:18:15] [Rank 0] Group 9 Loss: 4.4313 +[2025-07-07 23:18:15] [Rank 0] Group 9 Loss: 4.4313 +[2025-07-07 23:18:15] [Rank 0] Group 10 Loss: 4.4083 +[2025-07-07 23:18:15] [Rank 0] Group 10 Loss: 4.4083 +[2025-07-07 23:18:15] [Rank 0] Group 11 Loss: 4.4322 +[2025-07-07 23:18:15] [Rank 0] Group 11 Loss: 4.4322 +[2025-07-07 23:18:15] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-07 23:18:15] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-07 23:18:15] [Rank 0] Group 1 FTA: 0.3568 +[2025-07-07 23:18:15] [Rank 0] Group 1 FTA: 0.3568 +[2025-07-07 23:18:15] [Rank 0] Group 2 FTA: 0.1771 +[2025-07-07 23:18:15] [Rank 0] Group 2 FTA: 0.1771 +[2025-07-07 23:18:15] [Rank 0] Group 3 FTA: 0.0781 +[2025-07-07 23:18:15] [Rank 0] Group 3 FTA: 0.0781 +[2025-07-07 23:18:15] [Rank 0] Group 4 FTA: 0.1380 +[2025-07-07 23:18:15] [Rank 0] Group 4 FTA: 0.1380 +[2025-07-07 23:18:15] [Rank 0] Group 5 FTA: 0.2031 +[2025-07-07 23:18:15] [Rank 0] Group 5 FTA: 0.2031 +[2025-07-07 23:18:15] [Rank 0] Group 6 FTA: 0.1745 +[2025-07-07 23:18:15] [Rank 0] Group 6 FTA: 0.1745 +[2025-07-07 23:18:15] [Rank 0] Group 7 FTA: 0.2240 +[2025-07-07 23:18:15] [Rank 0] Group 7 FTA: 0.2240 +[2025-07-07 23:18:15] [Rank 0] Group 8 FTA: 0.2109 +[2025-07-07 23:18:15] [Rank 0] Group 8 FTA: 0.2109 +[2025-07-07 23:18:15] [Rank 0] Group 9 FTA: 0.1875 +[2025-07-07 23:18:15] [Rank 0] Group 9 FTA: 0.1875 +[2025-07-07 23:18:15] [Rank 0] Group 10 FTA: 0.2051 +[2025-07-07 23:18:15] [Rank 0] Group 10 FTA: 0.2051 +[2025-07-07 23:18:15] [Rank 0] Group 11 FTA: 0.2041 +[2025-07-07 23:18:15] [Rank 0] Group 11 FTA: 0.2041 +[2025-07-07 23:18:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-07 23:18:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-07 23:18:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-07 23:18:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-07 23:18:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-07 23:18:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-07 23:18:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-07 23:18:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-07 23:18:17] [Rank 0] step:1501/10000 train_time:118670ms step_avg:79.06ms +[2025-07-07 23:18:17] [Rank 0] step:1501/10000 train_time:118670ms step_avg:79.06ms +[2025-07-07 23:18:18] [Rank 0] step:1521/10000 train_time:120142ms step_avg:78.99ms +[2025-07-07 23:18:18] [Rank 0] step:1521/10000 train_time:120142ms step_avg:78.99ms +[2025-07-07 23:18:20] [Rank 0] step:1541/10000 train_time:121853ms step_avg:79.07ms +[2025-07-07 23:18:20] [Rank 0] step:1541/10000 train_time:121853ms step_avg:79.07ms +[2025-07-07 23:18:21] [Rank 0] step:1561/10000 train_time:123325ms step_avg:79.00ms +[2025-07-07 23:18:21] [Rank 0] step:1561/10000 train_time:123325ms step_avg:79.00ms +[2025-07-07 23:18:23] [Rank 0] step:1581/10000 train_time:124793ms step_avg:78.93ms +[2025-07-07 23:18:23] [Rank 0] step:1581/10000 train_time:124793ms step_avg:78.93ms +[2025-07-07 23:18:24] [Rank 0] step:1601/10000 train_time:126264ms step_avg:78.87ms +[2025-07-07 23:18:24] [Rank 0] step:1601/10000 train_time:126264ms step_avg:78.87ms +[2025-07-07 23:18:26] [Rank 0] step:1621/10000 train_time:127787ms step_avg:78.83ms +[2025-07-07 23:18:26] [Rank 0] step:1621/10000 train_time:127787ms step_avg:78.83ms +[2025-07-07 23:18:28] [Rank 0] step:1641/10000 train_time:129874ms step_avg:79.14ms +[2025-07-07 23:18:28] [Rank 0] step:1641/10000 train_time:129874ms step_avg:79.14ms +[2025-07-07 23:18:30] [Rank 0] step:1661/10000 train_time:131482ms step_avg:79.16ms +[2025-07-07 23:18:30] [Rank 0] step:1661/10000 train_time:131482ms step_avg:79.16ms +[2025-07-07 23:18:31] [Rank 0] step:1681/10000 train_time:132995ms step_avg:79.12ms +[2025-07-07 23:18:31] [Rank 0] step:1681/10000 train_time:132995ms step_avg:79.12ms +[2025-07-07 23:18:33] [Rank 0] step:1701/10000 train_time:134469ms step_avg:79.05ms +[2025-07-07 23:18:33] [Rank 0] step:1701/10000 train_time:134469ms step_avg:79.05ms +[2025-07-07 23:18:35] [Rank 0] step:1721/10000 train_time:136589ms step_avg:79.37ms +[2025-07-07 23:18:35] [Rank 0] step:1721/10000 train_time:136589ms step_avg:79.37ms +[2025-07-07 23:18:36] [Rank 0] step:1741/10000 train_time:138066ms step_avg:79.30ms +[2025-07-07 23:18:36] [Rank 0] step:1741/10000 train_time:138066ms step_avg:79.30ms +[2025-07-07 23:18:38] [Rank 0] step:1761/10000 train_time:139537ms step_avg:79.24ms +[2025-07-07 23:18:38] [Rank 0] step:1761/10000 train_time:139537ms step_avg:79.24ms +[2025-07-07 23:18:39] [Rank 0] step:1781/10000 train_time:141012ms step_avg:79.18ms +[2025-07-07 23:18:39] [Rank 0] step:1781/10000 train_time:141012ms step_avg:79.18ms +[2025-07-07 23:18:41] [Rank 0] step:1801/10000 train_time:143171ms step_avg:79.50ms +[2025-07-07 23:18:41] [Rank 0] step:1801/10000 train_time:143171ms step_avg:79.50ms +[2025-07-07 23:18:43] [Rank 0] step:1821/10000 train_time:144628ms step_avg:79.42ms +[2025-07-07 23:18:43] [Rank 0] step:1821/10000 train_time:144628ms step_avg:79.42ms +[2025-07-07 23:18:44] [Rank 0] step:1841/10000 train_time:146102ms step_avg:79.36ms +[2025-07-07 23:18:44] [Rank 0] step:1841/10000 train_time:146102ms step_avg:79.36ms +[2025-07-07 23:18:46] [Rank 0] step:1861/10000 train_time:147578ms step_avg:79.30ms +[2025-07-07 23:18:46] [Rank 0] step:1861/10000 train_time:147578ms step_avg:79.30ms +[2025-07-07 23:18:47] [Rank 0] step:1881/10000 train_time:149056ms step_avg:79.24ms +[2025-07-07 23:18:47] [Rank 0] step:1881/10000 train_time:149056ms step_avg:79.24ms +[2025-07-07 23:18:49] [Rank 0] step:1901/10000 train_time:151184ms step_avg:79.53ms +[2025-07-07 23:18:49] [Rank 0] step:1901/10000 train_time:151184ms step_avg:79.53ms +[2025-07-07 23:18:51] [Rank 0] step:1921/10000 train_time:152659ms step_avg:79.47ms +[2025-07-07 23:18:51] [Rank 0] step:1921/10000 train_time:152659ms step_avg:79.47ms +[2025-07-07 23:18:52] [Rank 0] step:1941/10000 train_time:154135ms step_avg:79.41ms +[2025-07-07 23:18:52] [Rank 0] step:1941/10000 train_time:154135ms step_avg:79.41ms +[2025-07-07 23:18:54] [Rank 0] step:1961/10000 train_time:155613ms step_avg:79.35ms +[2025-07-07 23:18:54] [Rank 0] step:1961/10000 train_time:155613ms step_avg:79.35ms +[2025-07-07 23:18:56] [Rank 0] step:1981/10000 train_time:157141ms step_avg:79.32ms +[2025-07-07 23:18:56] [Rank 0] step:1981/10000 train_time:157141ms step_avg:79.32ms +[2025-07-07 23:18:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:18:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:18:58] [Rank 0] PRINT: step:2000/10000 train_loss:1.2396 val_loss:1.2140 train_time:159219ms step_avg:79.61ms +[2025-07-07 23:18:58] [Rank 0] PRINT: step:2000/10000 train_loss:1.2396 val_loss:1.2140 train_time:159219ms step_avg:79.61ms +[2025-07-07 23:18:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:18:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:18:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:18:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:18:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:18:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:24:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:24:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:24:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:24:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:24:23] [Rank 0] Total Loss: 4.5904 +[2025-07-07 23:24:23] [Rank 0] Total Loss: 4.5904 +[2025-07-07 23:24:23] [Rank 0] Total FTA: 0.3334 +[2025-07-07 23:24:23] [Rank 0] Total FTA: 0.3334 +[2025-07-07 23:24:23] [Rank 0] Group 0 Loss: 4.7262 +[2025-07-07 23:24:23] [Rank 0] Group 0 Loss: 4.7262 +[2025-07-07 23:24:23] [Rank 0] Group 1 Loss: 4.3310 +[2025-07-07 23:24:23] [Rank 0] Group 1 Loss: 4.3310 +[2025-07-07 23:24:23] [Rank 0] Group 2 Loss: 4.3098 +[2025-07-07 23:24:23] [Rank 0] Group 2 Loss: 4.3098 +[2025-07-07 23:24:23] [Rank 0] Group 3 Loss: 4.7072 +[2025-07-07 23:24:23] [Rank 0] Group 3 Loss: 4.7072 +[2025-07-07 23:24:23] [Rank 0] Group 4 Loss: 4.5123 +[2025-07-07 23:24:23] [Rank 0] Group 4 Loss: 4.5123 +[2025-07-07 23:24:23] [Rank 0] Group 5 Loss: 4.5499 +[2025-07-07 23:24:23] [Rank 0] Group 5 Loss: 4.5499 +[2025-07-07 23:24:23] [Rank 0] Group 6 Loss: 4.5974 +[2025-07-07 23:24:23] [Rank 0] Group 6 Loss: 4.5974 +[2025-07-07 23:24:23] [Rank 0] Group 7 Loss: 4.5898 +[2025-07-07 23:24:23] [Rank 0] Group 7 Loss: 4.5898 +[2025-07-07 23:24:23] [Rank 0] Group 8 Loss: 4.6141 +[2025-07-07 23:24:23] [Rank 0] Group 8 Loss: 4.6141 +[2025-07-07 23:24:23] [Rank 0] Group 9 Loss: 4.6065 +[2025-07-07 23:24:23] [Rank 0] Group 9 Loss: 4.6065 +[2025-07-07 23:24:23] [Rank 0] Group 10 Loss: 4.6647 +[2025-07-07 23:24:23] [Rank 0] Group 10 Loss: 4.6647 +[2025-07-07 23:24:23] [Rank 0] Group 11 Loss: 4.6392 +[2025-07-07 23:24:23] [Rank 0] Group 11 Loss: 4.6392 +[2025-07-07 23:24:23] [Rank 0] Group 0 FTA: 0.5072 +[2025-07-07 23:24:23] [Rank 0] Group 0 FTA: 0.5072 +[2025-07-07 23:24:23] [Rank 0] Group 1 FTA: 0.3516 +[2025-07-07 23:24:23] [Rank 0] Group 1 FTA: 0.3516 +[2025-07-07 23:24:23] [Rank 0] Group 2 FTA: 0.3255 +[2025-07-07 23:24:23] [Rank 0] Group 2 FTA: 0.3255 +[2025-07-07 23:24:23] [Rank 0] Group 3 FTA: 0.2031 +[2025-07-07 23:24:23] [Rank 0] Group 3 FTA: 0.2031 +[2025-07-07 23:24:23] [Rank 0] Group 4 FTA: 0.2734 +[2025-07-07 23:24:23] [Rank 0] Group 4 FTA: 0.2734 +[2025-07-07 23:24:23] [Rank 0] Group 5 FTA: 0.3073 +[2025-07-07 23:24:23] [Rank 0] Group 5 FTA: 0.3073 +[2025-07-07 23:24:23] [Rank 0] Group 6 FTA: 0.2839 +[2025-07-07 23:24:23] [Rank 0] Group 6 FTA: 0.2839 +[2025-07-07 23:24:23] [Rank 0] Group 7 FTA: 0.2786 +[2025-07-07 23:24:23] [Rank 0] Group 7 FTA: 0.2786 +[2025-07-07 23:24:23] [Rank 0] Group 8 FTA: 0.3021 +[2025-07-07 23:24:23] [Rank 0] Group 8 FTA: 0.3021 +[2025-07-07 23:24:23] [Rank 0] Group 9 FTA: 0.3242 +[2025-07-07 23:24:23] [Rank 0] Group 9 FTA: 0.3242 +[2025-07-07 23:24:23] [Rank 0] Group 10 FTA: 0.3477 +[2025-07-07 23:24:23] [Rank 0] Group 10 FTA: 0.3477 +[2025-07-07 23:24:23] [Rank 0] Group 11 FTA: 0.3262 +[2025-07-07 23:24:23] [Rank 0] Group 11 FTA: 0.3262 +[2025-07-07 23:24:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-07 23:24:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-07 23:24:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-07 23:24:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-07 23:24:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-07 23:24:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-07 23:24:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-07 23:24:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-07 23:24:25] [Rank 0] step:2001/10000 train_time:159238ms step_avg:79.58ms +[2025-07-07 23:24:25] [Rank 0] step:2001/10000 train_time:159238ms step_avg:79.58ms +[2025-07-07 23:24:26] [Rank 0] step:2021/10000 train_time:160712ms step_avg:79.52ms +[2025-07-07 23:24:26] [Rank 0] step:2021/10000 train_time:160712ms step_avg:79.52ms +[2025-07-07 23:24:28] [Rank 0] step:2041/10000 train_time:162183ms step_avg:79.46ms +[2025-07-07 23:24:28] [Rank 0] step:2041/10000 train_time:162183ms step_avg:79.46ms +[2025-07-07 23:24:29] [Rank 0] step:2061/10000 train_time:163651ms step_avg:79.40ms +[2025-07-07 23:24:29] [Rank 0] step:2061/10000 train_time:163651ms step_avg:79.40ms +[2025-07-07 23:24:31] [Rank 0] step:2081/10000 train_time:165778ms step_avg:79.66ms +[2025-07-07 23:24:31] [Rank 0] step:2081/10000 train_time:165778ms step_avg:79.66ms +[2025-07-07 23:24:33] [Rank 0] step:2101/10000 train_time:167249ms step_avg:79.60ms +[2025-07-07 23:24:33] [Rank 0] step:2101/10000 train_time:167249ms step_avg:79.60ms +[2025-07-07 23:24:34] [Rank 0] step:2121/10000 train_time:168722ms step_avg:79.55ms +[2025-07-07 23:24:34] [Rank 0] step:2121/10000 train_time:168722ms step_avg:79.55ms +[2025-07-07 23:24:36] [Rank 0] step:2141/10000 train_time:170192ms step_avg:79.49ms +[2025-07-07 23:24:36] [Rank 0] step:2141/10000 train_time:170192ms step_avg:79.49ms +[2025-07-07 23:24:38] [Rank 0] step:2161/10000 train_time:171664ms step_avg:79.44ms +[2025-07-07 23:24:38] [Rank 0] step:2161/10000 train_time:171664ms step_avg:79.44ms +[2025-07-07 23:24:39] [Rank 0] step:2181/10000 train_time:173779ms step_avg:79.68ms +[2025-07-07 23:24:39] [Rank 0] step:2181/10000 train_time:173779ms step_avg:79.68ms +[2025-07-07 23:24:41] [Rank 0] step:2201/10000 train_time:175253ms step_avg:79.62ms +[2025-07-07 23:24:41] [Rank 0] step:2201/10000 train_time:175253ms step_avg:79.62ms +[2025-07-07 23:24:42] [Rank 0] step:2221/10000 train_time:176727ms step_avg:79.57ms +[2025-07-07 23:24:42] [Rank 0] step:2221/10000 train_time:176727ms step_avg:79.57ms +[2025-07-07 23:24:44] [Rank 0] step:2241/10000 train_time:178223ms step_avg:79.53ms +[2025-07-07 23:24:44] [Rank 0] step:2241/10000 train_time:178223ms step_avg:79.53ms +[2025-07-07 23:24:46] [Rank 0] step:2261/10000 train_time:180385ms step_avg:79.78ms +[2025-07-07 23:24:46] [Rank 0] step:2261/10000 train_time:180385ms step_avg:79.78ms +[2025-07-07 23:24:47] [Rank 0] step:2281/10000 train_time:181884ms step_avg:79.74ms +[2025-07-07 23:24:47] [Rank 0] step:2281/10000 train_time:181884ms step_avg:79.74ms +[2025-07-07 23:24:49] [Rank 0] step:2301/10000 train_time:183679ms step_avg:79.83ms +[2025-07-07 23:24:49] [Rank 0] step:2301/10000 train_time:183679ms step_avg:79.83ms +[2025-07-07 23:24:51] [Rank 0] step:2321/10000 train_time:185180ms step_avg:79.78ms +[2025-07-07 23:24:51] [Rank 0] step:2321/10000 train_time:185180ms step_avg:79.78ms +[2025-07-07 23:24:53] [Rank 0] step:2341/10000 train_time:186941ms step_avg:79.86ms +[2025-07-07 23:24:53] [Rank 0] step:2341/10000 train_time:186941ms step_avg:79.86ms +[2025-07-07 23:24:54] [Rank 0] step:2361/10000 train_time:188852ms step_avg:79.99ms +[2025-07-07 23:24:54] [Rank 0] step:2361/10000 train_time:188852ms step_avg:79.99ms +[2025-07-07 23:24:56] [Rank 0] step:2381/10000 train_time:190354ms step_avg:79.95ms +[2025-07-07 23:24:56] [Rank 0] step:2381/10000 train_time:190354ms step_avg:79.95ms +[2025-07-07 23:24:57] [Rank 0] step:2401/10000 train_time:191856ms step_avg:79.91ms +[2025-07-07 23:24:57] [Rank 0] step:2401/10000 train_time:191856ms step_avg:79.91ms +[2025-07-07 23:24:59] [Rank 0] step:2421/10000 train_time:193359ms step_avg:79.87ms +[2025-07-07 23:24:59] [Rank 0] step:2421/10000 train_time:193359ms step_avg:79.87ms +[2025-07-07 23:25:01] [Rank 0] step:2441/10000 train_time:195577ms step_avg:80.12ms +[2025-07-07 23:25:01] [Rank 0] step:2441/10000 train_time:195577ms step_avg:80.12ms +[2025-07-07 23:25:02] [Rank 0] step:2461/10000 train_time:197029ms step_avg:80.06ms +[2025-07-07 23:25:02] [Rank 0] step:2461/10000 train_time:197029ms step_avg:80.06ms +[2025-07-07 23:25:04] [Rank 0] step:2481/10000 train_time:198530ms step_avg:80.02ms +[2025-07-07 23:25:04] [Rank 0] step:2481/10000 train_time:198530ms step_avg:80.02ms +[2025-07-07 23:25:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:25:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:25:06] [Rank 0] PRINT: step:2500/10000 train_loss:1.1617 val_loss:1.1195 train_time:200032ms step_avg:80.01ms +[2025-07-07 23:25:06] [Rank 0] PRINT: step:2500/10000 train_loss:1.1617 val_loss:1.1195 train_time:200032ms step_avg:80.01ms +[2025-07-07 23:25:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:25:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:25:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:25:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:25:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:25:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:30:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:30:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:30:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:30:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:30:31] [Rank 0] Total Loss: 4.7848 +[2025-07-07 23:30:31] [Rank 0] Total Loss: 4.7848 +[2025-07-07 23:30:31] [Rank 0] Total FTA: 0.4660 +[2025-07-07 23:30:31] [Rank 0] Total FTA: 0.4660 +[2025-07-07 23:30:31] [Rank 0] Group 0 Loss: 4.8687 +[2025-07-07 23:30:31] [Rank 0] Group 0 Loss: 4.8687 +[2025-07-07 23:30:31] [Rank 0] Group 1 Loss: 4.5264 +[2025-07-07 23:30:31] [Rank 0] Group 1 Loss: 4.5264 +[2025-07-07 23:30:31] [Rank 0] Group 2 Loss: 4.5911 +[2025-07-07 23:30:31] [Rank 0] Group 2 Loss: 4.5911 +[2025-07-07 23:30:31] [Rank 0] Group 3 Loss: 4.8766 +[2025-07-07 23:30:31] [Rank 0] Group 3 Loss: 4.8766 +[2025-07-07 23:30:31] [Rank 0] Group 4 Loss: 4.7446 +[2025-07-07 23:30:31] [Rank 0] Group 4 Loss: 4.7446 +[2025-07-07 23:30:31] [Rank 0] Group 5 Loss: 4.7191 +[2025-07-07 23:30:31] [Rank 0] Group 5 Loss: 4.7191 +[2025-07-07 23:30:31] [Rank 0] Group 6 Loss: 4.7235 +[2025-07-07 23:30:31] [Rank 0] Group 6 Loss: 4.7235 +[2025-07-07 23:30:31] [Rank 0] Group 7 Loss: 4.8510 +[2025-07-07 23:30:31] [Rank 0] Group 7 Loss: 4.8510 +[2025-07-07 23:30:31] [Rank 0] Group 8 Loss: 4.8384 +[2025-07-07 23:30:31] [Rank 0] Group 8 Loss: 4.8384 +[2025-07-07 23:30:31] [Rank 0] Group 9 Loss: 4.7549 +[2025-07-07 23:30:31] [Rank 0] Group 9 Loss: 4.7549 +[2025-07-07 23:30:31] [Rank 0] Group 10 Loss: 4.8012 +[2025-07-07 23:30:31] [Rank 0] Group 10 Loss: 4.8012 +[2025-07-07 23:30:31] [Rank 0] Group 11 Loss: 4.8738 +[2025-07-07 23:30:31] [Rank 0] Group 11 Loss: 4.8738 +[2025-07-07 23:30:31] [Rank 0] Group 0 FTA: 0.5254 +[2025-07-07 23:30:31] [Rank 0] Group 0 FTA: 0.5254 +[2025-07-07 23:30:31] [Rank 0] Group 1 FTA: 0.4661 +[2025-07-07 23:30:31] [Rank 0] Group 1 FTA: 0.4661 +[2025-07-07 23:30:31] [Rank 0] Group 2 FTA: 0.6693 +[2025-07-07 23:30:31] [Rank 0] Group 2 FTA: 0.6693 +[2025-07-07 23:30:31] [Rank 0] Group 3 FTA: 0.3984 +[2025-07-07 23:30:31] [Rank 0] Group 3 FTA: 0.3984 +[2025-07-07 23:30:31] [Rank 0] Group 4 FTA: 0.3672 +[2025-07-07 23:30:31] [Rank 0] Group 4 FTA: 0.3672 +[2025-07-07 23:30:31] [Rank 0] Group 5 FTA: 0.4635 +[2025-07-07 23:30:31] [Rank 0] Group 5 FTA: 0.4635 +[2025-07-07 23:30:31] [Rank 0] Group 6 FTA: 0.4115 +[2025-07-07 23:30:31] [Rank 0] Group 6 FTA: 0.4115 +[2025-07-07 23:30:31] [Rank 0] Group 7 FTA: 0.4766 +[2025-07-07 23:30:31] [Rank 0] Group 7 FTA: 0.4766 +[2025-07-07 23:30:31] [Rank 0] Group 8 FTA: 0.4193 +[2025-07-07 23:30:31] [Rank 0] Group 8 FTA: 0.4193 +[2025-07-07 23:30:31] [Rank 0] Group 9 FTA: 0.4648 +[2025-07-07 23:30:31] [Rank 0] Group 9 FTA: 0.4648 +[2025-07-07 23:30:31] [Rank 0] Group 10 FTA: 0.4316 +[2025-07-07 23:30:31] [Rank 0] Group 10 FTA: 0.4316 +[2025-07-07 23:30:31] [Rank 0] Group 11 FTA: 0.4600 +[2025-07-07 23:30:31] [Rank 0] Group 11 FTA: 0.4600 +[2025-07-07 23:30:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-07 23:30:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-07 23:30:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-07 23:30:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-07 23:30:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-07 23:30:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-07 23:30:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-07 23:30:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-07 23:30:32] [Rank 0] step:2501/10000 train_time:200052ms step_avg:79.99ms +[2025-07-07 23:30:32] [Rank 0] step:2501/10000 train_time:200052ms step_avg:79.99ms +[2025-07-07 23:30:35] [Rank 0] step:2521/10000 train_time:201561ms step_avg:79.95ms +[2025-07-07 23:30:35] [Rank 0] step:2521/10000 train_time:201561ms step_avg:79.95ms +[2025-07-07 23:30:36] [Rank 0] step:2541/10000 train_time:203709ms step_avg:80.17ms +[2025-07-07 23:30:36] [Rank 0] step:2541/10000 train_time:203709ms step_avg:80.17ms +[2025-07-07 23:30:38] [Rank 0] step:2561/10000 train_time:205205ms step_avg:80.13ms +[2025-07-07 23:30:38] [Rank 0] step:2561/10000 train_time:205205ms step_avg:80.13ms +[2025-07-07 23:30:39] [Rank 0] step:2581/10000 train_time:206698ms step_avg:80.08ms +[2025-07-07 23:30:39] [Rank 0] step:2581/10000 train_time:206698ms step_avg:80.08ms +[2025-07-07 23:30:41] [Rank 0] step:2601/10000 train_time:208197ms step_avg:80.05ms +[2025-07-07 23:30:41] [Rank 0] step:2601/10000 train_time:208197ms step_avg:80.05ms +[2025-07-07 23:30:43] [Rank 0] step:2621/10000 train_time:210352ms step_avg:80.26ms +[2025-07-07 23:30:43] [Rank 0] step:2621/10000 train_time:210352ms step_avg:80.26ms +[2025-07-07 23:30:44] [Rank 0] step:2641/10000 train_time:211849ms step_avg:80.22ms +[2025-07-07 23:30:44] [Rank 0] step:2641/10000 train_time:211849ms step_avg:80.22ms +[2025-07-07 23:30:46] [Rank 0] step:2661/10000 train_time:213348ms step_avg:80.18ms +[2025-07-07 23:30:46] [Rank 0] step:2661/10000 train_time:213348ms step_avg:80.18ms +[2025-07-07 23:30:47] [Rank 0] step:2681/10000 train_time:214847ms step_avg:80.14ms +[2025-07-07 23:30:47] [Rank 0] step:2681/10000 train_time:214847ms step_avg:80.14ms +[2025-07-07 23:30:49] [Rank 0] step:2701/10000 train_time:216603ms step_avg:80.19ms +[2025-07-07 23:30:49] [Rank 0] step:2701/10000 train_time:216603ms step_avg:80.19ms +[2025-07-07 23:30:50] [Rank 0] step:2721/10000 train_time:218084ms step_avg:80.15ms +[2025-07-07 23:30:50] [Rank 0] step:2721/10000 train_time:218084ms step_avg:80.15ms +[2025-07-07 23:30:52] [Rank 0] step:2741/10000 train_time:219586ms step_avg:80.11ms +[2025-07-07 23:30:52] [Rank 0] step:2741/10000 train_time:219586ms step_avg:80.11ms +[2025-07-07 23:30:53] [Rank 0] step:2761/10000 train_time:221087ms step_avg:80.07ms +[2025-07-07 23:30:53] [Rank 0] step:2761/10000 train_time:221087ms step_avg:80.07ms +[2025-07-07 23:30:55] [Rank 0] step:2781/10000 train_time:222587ms step_avg:80.04ms +[2025-07-07 23:30:55] [Rank 0] step:2781/10000 train_time:222587ms step_avg:80.04ms +[2025-07-07 23:30:57] [Rank 0] step:2801/10000 train_time:224752ms step_avg:80.24ms +[2025-07-07 23:30:57] [Rank 0] step:2801/10000 train_time:224752ms step_avg:80.24ms +[2025-07-07 23:30:59] [Rank 0] step:2821/10000 train_time:226250ms step_avg:80.20ms +[2025-07-07 23:30:59] [Rank 0] step:2821/10000 train_time:226250ms step_avg:80.20ms +[2025-07-07 23:31:00] [Rank 0] step:2841/10000 train_time:227751ms step_avg:80.17ms +[2025-07-07 23:31:00] [Rank 0] step:2841/10000 train_time:227751ms step_avg:80.17ms +[2025-07-07 23:31:02] [Rank 0] step:2861/10000 train_time:229254ms step_avg:80.13ms +[2025-07-07 23:31:02] [Rank 0] step:2861/10000 train_time:229254ms step_avg:80.13ms +[2025-07-07 23:31:03] [Rank 0] step:2881/10000 train_time:230806ms step_avg:80.11ms +[2025-07-07 23:31:03] [Rank 0] step:2881/10000 train_time:230806ms step_avg:80.11ms +[2025-07-07 23:31:05] [Rank 0] step:2901/10000 train_time:232494ms step_avg:80.14ms +[2025-07-07 23:31:05] [Rank 0] step:2901/10000 train_time:232494ms step_avg:80.14ms +[2025-07-07 23:31:06] [Rank 0] step:2921/10000 train_time:233997ms step_avg:80.11ms +[2025-07-07 23:31:06] [Rank 0] step:2921/10000 train_time:233997ms step_avg:80.11ms +[2025-07-07 23:31:08] [Rank 0] step:2941/10000 train_time:235730ms step_avg:80.15ms +[2025-07-07 23:31:08] [Rank 0] step:2941/10000 train_time:235730ms step_avg:80.15ms +[2025-07-07 23:31:10] [Rank 0] step:2961/10000 train_time:237241ms step_avg:80.12ms +[2025-07-07 23:31:10] [Rank 0] step:2961/10000 train_time:237241ms step_avg:80.12ms +[2025-07-07 23:31:12] [Rank 0] step:2981/10000 train_time:239410ms step_avg:80.31ms +[2025-07-07 23:31:12] [Rank 0] step:2981/10000 train_time:239410ms step_avg:80.31ms +[2025-07-07 23:31:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:31:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:31:14] [Rank 0] PRINT: step:3000/10000 train_loss:1.0887 val_loss:1.0390 train_time:240909ms step_avg:80.30ms +[2025-07-07 23:31:14] [Rank 0] PRINT: step:3000/10000 train_loss:1.0887 val_loss:1.0390 train_time:240909ms step_avg:80.30ms +[2025-07-07 23:31:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:31:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:31:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:31:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:31:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:31:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:36:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:36:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:36:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:36:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:36:38] [Rank 0] Total Loss: 4.9089 +[2025-07-07 23:36:38] [Rank 0] Total Loss: 4.9089 +[2025-07-07 23:36:38] [Rank 0] Total FTA: 0.6236 +[2025-07-07 23:36:38] [Rank 0] Total FTA: 0.6236 +[2025-07-07 23:36:38] [Rank 0] Group 0 Loss: 5.1706 +[2025-07-07 23:36:38] [Rank 0] Group 0 Loss: 5.1706 +[2025-07-07 23:36:38] [Rank 0] Group 1 Loss: 4.8668 +[2025-07-07 23:36:38] [Rank 0] Group 1 Loss: 4.8668 +[2025-07-07 23:36:38] [Rank 0] Group 2 Loss: 4.6250 +[2025-07-07 23:36:38] [Rank 0] Group 2 Loss: 4.6250 +[2025-07-07 23:36:38] [Rank 0] Group 3 Loss: 4.8645 +[2025-07-07 23:36:38] [Rank 0] Group 3 Loss: 4.8645 +[2025-07-07 23:36:38] [Rank 0] Group 4 Loss: 4.9352 +[2025-07-07 23:36:38] [Rank 0] Group 4 Loss: 4.9352 +[2025-07-07 23:36:38] [Rank 0] Group 5 Loss: 4.8679 +[2025-07-07 23:36:38] [Rank 0] Group 5 Loss: 4.8679 +[2025-07-07 23:36:38] [Rank 0] Group 6 Loss: 4.8263 +[2025-07-07 23:36:38] [Rank 0] Group 6 Loss: 4.8263 +[2025-07-07 23:36:38] [Rank 0] Group 7 Loss: 4.8942 +[2025-07-07 23:36:38] [Rank 0] Group 7 Loss: 4.8942 +[2025-07-07 23:36:38] [Rank 0] Group 8 Loss: 4.8637 +[2025-07-07 23:36:38] [Rank 0] Group 8 Loss: 4.8637 +[2025-07-07 23:36:38] [Rank 0] Group 9 Loss: 4.9550 +[2025-07-07 23:36:38] [Rank 0] Group 9 Loss: 4.9550 +[2025-07-07 23:36:38] [Rank 0] Group 10 Loss: 4.9317 +[2025-07-07 23:36:38] [Rank 0] Group 10 Loss: 4.9317 +[2025-07-07 23:36:38] [Rank 0] Group 11 Loss: 4.8874 +[2025-07-07 23:36:38] [Rank 0] Group 11 Loss: 4.8874 +[2025-07-07 23:36:38] [Rank 0] Group 0 FTA: 0.7009 +[2025-07-07 23:36:38] [Rank 0] Group 0 FTA: 0.7009 +[2025-07-07 23:36:38] [Rank 0] Group 1 FTA: 0.6719 +[2025-07-07 23:36:38] [Rank 0] Group 1 FTA: 0.6719 +[2025-07-07 23:36:38] [Rank 0] Group 2 FTA: 0.5755 +[2025-07-07 23:36:38] [Rank 0] Group 2 FTA: 0.5755 +[2025-07-07 23:36:38] [Rank 0] Group 3 FTA: 0.4323 +[2025-07-07 23:36:38] [Rank 0] Group 3 FTA: 0.4323 +[2025-07-07 23:36:38] [Rank 0] Group 4 FTA: 0.5833 +[2025-07-07 23:36:38] [Rank 0] Group 4 FTA: 0.5833 +[2025-07-07 23:36:38] [Rank 0] Group 5 FTA: 0.6797 +[2025-07-07 23:36:38] [Rank 0] Group 5 FTA: 0.6797 +[2025-07-07 23:36:38] [Rank 0] Group 6 FTA: 0.6042 +[2025-07-07 23:36:38] [Rank 0] Group 6 FTA: 0.6042 +[2025-07-07 23:36:38] [Rank 0] Group 7 FTA: 0.6354 +[2025-07-07 23:36:38] [Rank 0] Group 7 FTA: 0.6354 +[2025-07-07 23:36:38] [Rank 0] Group 8 FTA: 0.6615 +[2025-07-07 23:36:38] [Rank 0] Group 8 FTA: 0.6615 +[2025-07-07 23:36:38] [Rank 0] Group 9 FTA: 0.6641 +[2025-07-07 23:36:38] [Rank 0] Group 9 FTA: 0.6641 +[2025-07-07 23:36:38] [Rank 0] Group 10 FTA: 0.6309 +[2025-07-07 23:36:38] [Rank 0] Group 10 FTA: 0.6309 +[2025-07-07 23:36:38] [Rank 0] Group 11 FTA: 0.6064 +[2025-07-07 23:36:38] [Rank 0] Group 11 FTA: 0.6064 +[2025-07-07 23:36:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-07 23:36:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-07 23:36:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-07 23:36:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-07 23:36:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-07 23:36:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-07 23:36:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-07 23:36:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-07 23:36:40] [Rank 0] step:3001/10000 train_time:240929ms step_avg:80.28ms +[2025-07-07 23:36:40] [Rank 0] step:3001/10000 train_time:240929ms step_avg:80.28ms +[2025-07-07 23:36:41] [Rank 0] step:3021/10000 train_time:242436ms step_avg:80.25ms +[2025-07-07 23:36:41] [Rank 0] step:3021/10000 train_time:242436ms step_avg:80.25ms +[2025-07-07 23:36:43] [Rank 0] step:3041/10000 train_time:243932ms step_avg:80.21ms +[2025-07-07 23:36:43] [Rank 0] step:3041/10000 train_time:243932ms step_avg:80.21ms +[2025-07-07 23:36:44] [Rank 0] step:3061/10000 train_time:245585ms step_avg:80.23ms +[2025-07-07 23:36:44] [Rank 0] step:3061/10000 train_time:245585ms step_avg:80.23ms +[2025-07-07 23:36:46] [Rank 0] step:3081/10000 train_time:247169ms step_avg:80.22ms +[2025-07-07 23:36:46] [Rank 0] step:3081/10000 train_time:247169ms step_avg:80.22ms +[2025-07-07 23:36:47] [Rank 0] step:3101/10000 train_time:248664ms step_avg:80.19ms +[2025-07-07 23:36:47] [Rank 0] step:3101/10000 train_time:248664ms step_avg:80.19ms +[2025-07-07 23:36:49] [Rank 0] step:3121/10000 train_time:250161ms step_avg:80.15ms +[2025-07-07 23:36:49] [Rank 0] step:3121/10000 train_time:250161ms step_avg:80.15ms +[2025-07-07 23:36:50] [Rank 0] step:3141/10000 train_time:251660ms step_avg:80.12ms +[2025-07-07 23:36:50] [Rank 0] step:3141/10000 train_time:251660ms step_avg:80.12ms +[2025-07-07 23:36:53] [Rank 0] step:3161/10000 train_time:253822ms step_avg:80.30ms +[2025-07-07 23:36:53] [Rank 0] step:3161/10000 train_time:253822ms step_avg:80.30ms +[2025-07-07 23:36:54] [Rank 0] step:3181/10000 train_time:255319ms step_avg:80.26ms +[2025-07-07 23:36:54] [Rank 0] step:3181/10000 train_time:255319ms step_avg:80.26ms +[2025-07-07 23:36:56] [Rank 0] step:3201/10000 train_time:256820ms step_avg:80.23ms +[2025-07-07 23:36:56] [Rank 0] step:3201/10000 train_time:256820ms step_avg:80.23ms +[2025-07-07 23:36:57] [Rank 0] step:3221/10000 train_time:258319ms step_avg:80.20ms +[2025-07-07 23:36:57] [Rank 0] step:3221/10000 train_time:258319ms step_avg:80.20ms +[2025-07-07 23:36:59] [Rank 0] step:3241/10000 train_time:259870ms step_avg:80.18ms +[2025-07-07 23:36:59] [Rank 0] step:3241/10000 train_time:259870ms step_avg:80.18ms +[2025-07-07 23:37:00] [Rank 0] step:3261/10000 train_time:261554ms step_avg:80.21ms +[2025-07-07 23:37:00] [Rank 0] step:3261/10000 train_time:261554ms step_avg:80.21ms +[2025-07-07 23:37:02] [Rank 0] step:3281/10000 train_time:263055ms step_avg:80.18ms +[2025-07-07 23:37:02] [Rank 0] step:3281/10000 train_time:263055ms step_avg:80.18ms +[2025-07-07 23:37:03] [Rank 0] step:3301/10000 train_time:264559ms step_avg:80.15ms +[2025-07-07 23:37:03] [Rank 0] step:3301/10000 train_time:264559ms step_avg:80.15ms +[2025-07-07 23:37:05] [Rank 0] step:3321/10000 train_time:266060ms step_avg:80.11ms +[2025-07-07 23:37:05] [Rank 0] step:3321/10000 train_time:266060ms step_avg:80.11ms +[2025-07-07 23:37:07] [Rank 0] step:3341/10000 train_time:268223ms step_avg:80.28ms +[2025-07-07 23:37:07] [Rank 0] step:3341/10000 train_time:268223ms step_avg:80.28ms +[2025-07-07 23:37:08] [Rank 0] step:3361/10000 train_time:269723ms step_avg:80.25ms +[2025-07-07 23:37:08] [Rank 0] step:3361/10000 train_time:269723ms step_avg:80.25ms +[2025-07-07 23:37:10] [Rank 0] step:3381/10000 train_time:271225ms step_avg:80.22ms +[2025-07-07 23:37:10] [Rank 0] step:3381/10000 train_time:271225ms step_avg:80.22ms +[2025-07-07 23:37:11] [Rank 0] step:3401/10000 train_time:272727ms step_avg:80.19ms +[2025-07-07 23:37:11] [Rank 0] step:3401/10000 train_time:272727ms step_avg:80.19ms +[2025-07-07 23:37:14] [Rank 0] step:3421/10000 train_time:274918ms step_avg:80.36ms +[2025-07-07 23:37:14] [Rank 0] step:3421/10000 train_time:274918ms step_avg:80.36ms +[2025-07-07 23:37:15] [Rank 0] step:3441/10000 train_time:276401ms step_avg:80.33ms +[2025-07-07 23:37:15] [Rank 0] step:3441/10000 train_time:276401ms step_avg:80.33ms +[2025-07-07 23:37:17] [Rank 0] step:3461/10000 train_time:277905ms step_avg:80.30ms +[2025-07-07 23:37:17] [Rank 0] step:3461/10000 train_time:277905ms step_avg:80.30ms +[2025-07-07 23:37:18] [Rank 0] step:3481/10000 train_time:279407ms step_avg:80.27ms +[2025-07-07 23:37:18] [Rank 0] step:3481/10000 train_time:279407ms step_avg:80.27ms +[2025-07-07 23:37:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:37:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:37:21] [Rank 0] PRINT: step:3500/10000 train_loss:1.0139 val_loss:0.9769 train_time:280908ms step_avg:80.26ms +[2025-07-07 23:37:21] [Rank 0] PRINT: step:3500/10000 train_loss:1.0139 val_loss:0.9769 train_time:280908ms step_avg:80.26ms +[2025-07-07 23:37:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:37:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:37:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:37:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:37:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:37:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:42:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:42:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:42:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:42:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:42:45] [Rank 0] Total Loss: 5.0696 +[2025-07-07 23:42:45] [Rank 0] Total Loss: 5.0696 +[2025-07-07 23:42:45] [Rank 0] Total FTA: 0.7167 +[2025-07-07 23:42:45] [Rank 0] Total FTA: 0.7167 +[2025-07-07 23:42:45] [Rank 0] Group 0 Loss: 5.4314 +[2025-07-07 23:42:45] [Rank 0] Group 0 Loss: 5.4314 +[2025-07-07 23:42:45] [Rank 0] Group 1 Loss: 5.0198 +[2025-07-07 23:42:45] [Rank 0] Group 1 Loss: 5.0198 +[2025-07-07 23:42:45] [Rank 0] Group 2 Loss: 4.6366 +[2025-07-07 23:42:45] [Rank 0] Group 2 Loss: 4.6366 +[2025-07-07 23:42:45] [Rank 0] Group 3 Loss: 5.0471 +[2025-07-07 23:42:45] [Rank 0] Group 3 Loss: 5.0471 +[2025-07-07 23:42:45] [Rank 0] Group 4 Loss: 5.1346 +[2025-07-07 23:42:45] [Rank 0] Group 4 Loss: 5.1346 +[2025-07-07 23:42:45] [Rank 0] Group 5 Loss: 4.9659 +[2025-07-07 23:42:45] [Rank 0] Group 5 Loss: 4.9659 +[2025-07-07 23:42:45] [Rank 0] Group 6 Loss: 4.9248 +[2025-07-07 23:42:45] [Rank 0] Group 6 Loss: 4.9248 +[2025-07-07 23:42:45] [Rank 0] Group 7 Loss: 5.0659 +[2025-07-07 23:42:45] [Rank 0] Group 7 Loss: 5.0659 +[2025-07-07 23:42:45] [Rank 0] Group 8 Loss: 5.0490 +[2025-07-07 23:42:45] [Rank 0] Group 8 Loss: 5.0490 +[2025-07-07 23:42:45] [Rank 0] Group 9 Loss: 5.0402 +[2025-07-07 23:42:45] [Rank 0] Group 9 Loss: 5.0402 +[2025-07-07 23:42:45] [Rank 0] Group 10 Loss: 5.0515 +[2025-07-07 23:42:45] [Rank 0] Group 10 Loss: 5.0515 +[2025-07-07 23:42:45] [Rank 0] Group 11 Loss: 5.0818 +[2025-07-07 23:42:45] [Rank 0] Group 11 Loss: 5.0818 +[2025-07-07 23:42:45] [Rank 0] Group 0 FTA: 0.6736 +[2025-07-07 23:42:45] [Rank 0] Group 0 FTA: 0.6736 +[2025-07-07 23:42:45] [Rank 0] Group 1 FTA: 0.4870 +[2025-07-07 23:42:45] [Rank 0] Group 1 FTA: 0.4870 +[2025-07-07 23:42:45] [Rank 0] Group 2 FTA: 0.6354 +[2025-07-07 23:42:45] [Rank 0] Group 2 FTA: 0.6354 +[2025-07-07 23:42:45] [Rank 0] Group 3 FTA: 0.7370 +[2025-07-07 23:42:45] [Rank 0] Group 3 FTA: 0.7370 +[2025-07-07 23:42:45] [Rank 0] Group 4 FTA: 0.6823 +[2025-07-07 23:42:45] [Rank 0] Group 4 FTA: 0.6823 +[2025-07-07 23:42:45] [Rank 0] Group 5 FTA: 0.7526 +[2025-07-07 23:42:45] [Rank 0] Group 5 FTA: 0.7526 +[2025-07-07 23:42:45] [Rank 0] Group 6 FTA: 0.7708 +[2025-07-07 23:42:45] [Rank 0] Group 6 FTA: 0.7708 +[2025-07-07 23:42:45] [Rank 0] Group 7 FTA: 0.7865 +[2025-07-07 23:42:45] [Rank 0] Group 7 FTA: 0.7865 +[2025-07-07 23:42:45] [Rank 0] Group 8 FTA: 0.7370 +[2025-07-07 23:42:45] [Rank 0] Group 8 FTA: 0.7370 +[2025-07-07 23:42:45] [Rank 0] Group 9 FTA: 0.7695 +[2025-07-07 23:42:45] [Rank 0] Group 9 FTA: 0.7695 +[2025-07-07 23:42:45] [Rank 0] Group 10 FTA: 0.7520 +[2025-07-07 23:42:45] [Rank 0] Group 10 FTA: 0.7520 +[2025-07-07 23:42:45] [Rank 0] Group 11 FTA: 0.7725 +[2025-07-07 23:42:45] [Rank 0] Group 11 FTA: 0.7725 +[2025-07-07 23:42:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-07 23:42:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-07 23:42:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-07 23:42:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-07 23:42:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-07 23:42:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-07 23:42:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-07 23:42:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-07 23:42:47] [Rank 0] step:3501/10000 train_time:280928ms step_avg:80.24ms +[2025-07-07 23:42:47] [Rank 0] step:3501/10000 train_time:280928ms step_avg:80.24ms +[2025-07-07 23:42:49] [Rank 0] step:3521/10000 train_time:283090ms step_avg:80.40ms +[2025-07-07 23:42:49] [Rank 0] step:3521/10000 train_time:283090ms step_avg:80.40ms +[2025-07-07 23:42:51] [Rank 0] step:3541/10000 train_time:284585ms step_avg:80.37ms +[2025-07-07 23:42:51] [Rank 0] step:3541/10000 train_time:284585ms step_avg:80.37ms +[2025-07-07 23:42:52] [Rank 0] step:3561/10000 train_time:286081ms step_avg:80.34ms +[2025-07-07 23:42:52] [Rank 0] step:3561/10000 train_time:286081ms step_avg:80.34ms +[2025-07-07 23:42:54] [Rank 0] step:3581/10000 train_time:287579ms step_avg:80.31ms +[2025-07-07 23:42:54] [Rank 0] step:3581/10000 train_time:287579ms step_avg:80.31ms +[2025-07-07 23:42:56] [Rank 0] step:3601/10000 train_time:289333ms step_avg:80.35ms +[2025-07-07 23:42:56] [Rank 0] step:3601/10000 train_time:289333ms step_avg:80.35ms +[2025-07-07 23:42:57] [Rank 0] step:3621/10000 train_time:291240ms step_avg:80.43ms +[2025-07-07 23:42:57] [Rank 0] step:3621/10000 train_time:291240ms step_avg:80.43ms +[2025-07-07 23:42:59] [Rank 0] step:3641/10000 train_time:292736ms step_avg:80.40ms +[2025-07-07 23:42:59] [Rank 0] step:3641/10000 train_time:292736ms step_avg:80.40ms +[2025-07-07 23:43:00] [Rank 0] step:3661/10000 train_time:294235ms step_avg:80.37ms +[2025-07-07 23:43:00] [Rank 0] step:3661/10000 train_time:294235ms step_avg:80.37ms +[2025-07-07 23:43:02] [Rank 0] step:3681/10000 train_time:295733ms step_avg:80.34ms +[2025-07-07 23:43:02] [Rank 0] step:3681/10000 train_time:295733ms step_avg:80.34ms +[2025-07-07 23:43:04] [Rank 0] step:3701/10000 train_time:297878ms step_avg:80.49ms +[2025-07-07 23:43:04] [Rank 0] step:3701/10000 train_time:297878ms step_avg:80.49ms +[2025-07-07 23:43:05] [Rank 0] step:3721/10000 train_time:299377ms step_avg:80.46ms +[2025-07-07 23:43:05] [Rank 0] step:3721/10000 train_time:299377ms step_avg:80.46ms +[2025-07-07 23:43:07] [Rank 0] step:3741/10000 train_time:300878ms step_avg:80.43ms +[2025-07-07 23:43:07] [Rank 0] step:3741/10000 train_time:300878ms step_avg:80.43ms +[2025-07-07 23:43:08] [Rank 0] step:3761/10000 train_time:302379ms step_avg:80.40ms +[2025-07-07 23:43:08] [Rank 0] step:3761/10000 train_time:302379ms step_avg:80.40ms +[2025-07-07 23:43:11] [Rank 0] step:3781/10000 train_time:303933ms step_avg:80.38ms +[2025-07-07 23:43:11] [Rank 0] step:3781/10000 train_time:303933ms step_avg:80.38ms +[2025-07-07 23:43:12] [Rank 0] step:3801/10000 train_time:306023ms step_avg:80.51ms +[2025-07-07 23:43:12] [Rank 0] step:3801/10000 train_time:306023ms step_avg:80.51ms +[2025-07-07 23:43:14] [Rank 0] step:3821/10000 train_time:307524ms step_avg:80.48ms +[2025-07-07 23:43:14] [Rank 0] step:3821/10000 train_time:307524ms step_avg:80.48ms +[2025-07-07 23:43:15] [Rank 0] step:3841/10000 train_time:309026ms step_avg:80.45ms +[2025-07-07 23:43:15] [Rank 0] step:3841/10000 train_time:309026ms step_avg:80.45ms +[2025-07-07 23:43:17] [Rank 0] step:3861/10000 train_time:310530ms step_avg:80.43ms +[2025-07-07 23:43:17] [Rank 0] step:3861/10000 train_time:310530ms step_avg:80.43ms +[2025-07-07 23:43:18] [Rank 0] step:3881/10000 train_time:312266ms step_avg:80.46ms +[2025-07-07 23:43:18] [Rank 0] step:3881/10000 train_time:312266ms step_avg:80.46ms +[2025-07-07 23:43:20] [Rank 0] step:3901/10000 train_time:313765ms step_avg:80.43ms +[2025-07-07 23:43:20] [Rank 0] step:3901/10000 train_time:313765ms step_avg:80.43ms +[2025-07-07 23:43:21] [Rank 0] step:3921/10000 train_time:315268ms step_avg:80.40ms +[2025-07-07 23:43:21] [Rank 0] step:3921/10000 train_time:315268ms step_avg:80.40ms +[2025-07-07 23:43:23] [Rank 0] step:3941/10000 train_time:316767ms step_avg:80.38ms +[2025-07-07 23:43:23] [Rank 0] step:3941/10000 train_time:316767ms step_avg:80.38ms +[2025-07-07 23:43:25] [Rank 0] step:3961/10000 train_time:318525ms step_avg:80.42ms +[2025-07-07 23:43:25] [Rank 0] step:3961/10000 train_time:318525ms step_avg:80.42ms +[2025-07-07 23:43:26] [Rank 0] step:3981/10000 train_time:320008ms step_avg:80.38ms +[2025-07-07 23:43:26] [Rank 0] step:3981/10000 train_time:320008ms step_avg:80.38ms +[2025-07-07 23:43:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:43:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:43:28] [Rank 0] PRINT: step:4000/10000 train_loss:0.9626 val_loss:0.9390 train_time:321509ms step_avg:80.38ms +[2025-07-07 23:43:28] [Rank 0] PRINT: step:4000/10000 train_loss:0.9626 val_loss:0.9390 train_time:321509ms step_avg:80.38ms +[2025-07-07 23:43:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:43:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:43:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:43:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:43:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:43:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:48:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:48:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:48:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:48:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:48:53] [Rank 0] Total Loss: 5.1859 +[2025-07-07 23:48:53] [Rank 0] Total Loss: 5.1859 +[2025-07-07 23:48:53] [Rank 0] Total FTA: 0.8045 +[2025-07-07 23:48:53] [Rank 0] Total FTA: 0.8045 +[2025-07-07 23:48:53] [Rank 0] Group 0 Loss: 5.6311 +[2025-07-07 23:48:53] [Rank 0] Group 0 Loss: 5.6311 +[2025-07-07 23:48:53] [Rank 0] Group 1 Loss: 5.0209 +[2025-07-07 23:48:53] [Rank 0] Group 1 Loss: 5.0209 +[2025-07-07 23:48:53] [Rank 0] Group 2 Loss: 4.7769 +[2025-07-07 23:48:53] [Rank 0] Group 2 Loss: 4.7769 +[2025-07-07 23:48:53] [Rank 0] Group 3 Loss: 5.1047 +[2025-07-07 23:48:53] [Rank 0] Group 3 Loss: 5.1047 +[2025-07-07 23:48:53] [Rank 0] Group 4 Loss: 5.1146 +[2025-07-07 23:48:53] [Rank 0] Group 4 Loss: 5.1146 +[2025-07-07 23:48:53] [Rank 0] Group 5 Loss: 5.1363 +[2025-07-07 23:48:53] [Rank 0] Group 5 Loss: 5.1363 +[2025-07-07 23:48:53] [Rank 0] Group 6 Loss: 5.0869 +[2025-07-07 23:48:53] [Rank 0] Group 6 Loss: 5.0869 +[2025-07-07 23:48:53] [Rank 0] Group 7 Loss: 5.2154 +[2025-07-07 23:48:53] [Rank 0] Group 7 Loss: 5.2154 +[2025-07-07 23:48:53] [Rank 0] Group 8 Loss: 5.1669 +[2025-07-07 23:48:53] [Rank 0] Group 8 Loss: 5.1669 +[2025-07-07 23:48:53] [Rank 0] Group 9 Loss: 5.1394 +[2025-07-07 23:48:53] [Rank 0] Group 9 Loss: 5.1394 +[2025-07-07 23:48:53] [Rank 0] Group 10 Loss: 5.1689 +[2025-07-07 23:48:53] [Rank 0] Group 10 Loss: 5.1689 +[2025-07-07 23:48:53] [Rank 0] Group 11 Loss: 5.1960 +[2025-07-07 23:48:53] [Rank 0] Group 11 Loss: 5.1960 +[2025-07-07 23:48:53] [Rank 0] Group 0 FTA: 0.6814 +[2025-07-07 23:48:53] [Rank 0] Group 0 FTA: 0.6814 +[2025-07-07 23:48:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 23:48:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 23:48:53] [Rank 0] Group 2 FTA: 0.7135 +[2025-07-07 23:48:53] [Rank 0] Group 2 FTA: 0.7135 +[2025-07-07 23:48:53] [Rank 0] Group 3 FTA: 0.8151 +[2025-07-07 23:48:53] [Rank 0] Group 3 FTA: 0.8151 +[2025-07-07 23:48:53] [Rank 0] Group 4 FTA: 0.7448 +[2025-07-07 23:48:53] [Rank 0] Group 4 FTA: 0.7448 +[2025-07-07 23:48:53] [Rank 0] Group 5 FTA: 0.8255 +[2025-07-07 23:48:53] [Rank 0] Group 5 FTA: 0.8255 +[2025-07-07 23:48:53] [Rank 0] Group 6 FTA: 0.8229 +[2025-07-07 23:48:53] [Rank 0] Group 6 FTA: 0.8229 +[2025-07-07 23:48:53] [Rank 0] Group 7 FTA: 0.8333 +[2025-07-07 23:48:53] [Rank 0] Group 7 FTA: 0.8333 +[2025-07-07 23:48:53] [Rank 0] Group 8 FTA: 0.7891 +[2025-07-07 23:48:53] [Rank 0] Group 8 FTA: 0.7891 +[2025-07-07 23:48:53] [Rank 0] Group 9 FTA: 0.8086 +[2025-07-07 23:48:53] [Rank 0] Group 9 FTA: 0.8086 +[2025-07-07 23:48:53] [Rank 0] Group 10 FTA: 0.8496 +[2025-07-07 23:48:53] [Rank 0] Group 10 FTA: 0.8496 +[2025-07-07 23:48:53] [Rank 0] Group 11 FTA: 0.8330 +[2025-07-07 23:48:53] [Rank 0] Group 11 FTA: 0.8330 +[2025-07-07 23:48:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-07 23:48:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-07 23:48:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-07 23:48:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-07 23:48:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-07 23:48:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-07 23:48:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-07 23:48:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-07 23:48:55] [Rank 0] step:4001/10000 train_time:321529ms step_avg:80.36ms +[2025-07-07 23:48:55] [Rank 0] step:4001/10000 train_time:321529ms step_avg:80.36ms +[2025-07-07 23:48:56] [Rank 0] step:4021/10000 train_time:323041ms step_avg:80.34ms +[2025-07-07 23:48:56] [Rank 0] step:4021/10000 train_time:323041ms step_avg:80.34ms +[2025-07-07 23:48:58] [Rank 0] step:4041/10000 train_time:324537ms step_avg:80.31ms +[2025-07-07 23:48:58] [Rank 0] step:4041/10000 train_time:324537ms step_avg:80.31ms +[2025-07-07 23:49:00] [Rank 0] step:4061/10000 train_time:326694ms step_avg:80.45ms +[2025-07-07 23:49:00] [Rank 0] step:4061/10000 train_time:326694ms step_avg:80.45ms +[2025-07-07 23:49:01] [Rank 0] step:4081/10000 train_time:328188ms step_avg:80.42ms +[2025-07-07 23:49:01] [Rank 0] step:4081/10000 train_time:328188ms step_avg:80.42ms +[2025-07-07 23:49:03] [Rank 0] step:4101/10000 train_time:329781ms step_avg:80.41ms +[2025-07-07 23:49:03] [Rank 0] step:4101/10000 train_time:329781ms step_avg:80.41ms +[2025-07-07 23:49:04] [Rank 0] step:4121/10000 train_time:331470ms step_avg:80.43ms +[2025-07-07 23:49:04] [Rank 0] step:4121/10000 train_time:331470ms step_avg:80.43ms +[2025-07-07 23:49:07] [Rank 0] step:4141/10000 train_time:333648ms step_avg:80.57ms +[2025-07-07 23:49:07] [Rank 0] step:4141/10000 train_time:333648ms step_avg:80.57ms +[2025-07-07 23:49:08] [Rank 0] step:4161/10000 train_time:335133ms step_avg:80.54ms +[2025-07-07 23:49:08] [Rank 0] step:4161/10000 train_time:335133ms step_avg:80.54ms +[2025-07-07 23:49:10] [Rank 0] step:4181/10000 train_time:336632ms step_avg:80.51ms +[2025-07-07 23:49:10] [Rank 0] step:4181/10000 train_time:336632ms step_avg:80.51ms +[2025-07-07 23:49:11] [Rank 0] step:4201/10000 train_time:338130ms step_avg:80.49ms +[2025-07-07 23:49:11] [Rank 0] step:4201/10000 train_time:338130ms step_avg:80.49ms +[2025-07-07 23:49:13] [Rank 0] step:4221/10000 train_time:339630ms step_avg:80.46ms +[2025-07-07 23:49:13] [Rank 0] step:4221/10000 train_time:339630ms step_avg:80.46ms +[2025-07-07 23:49:15] [Rank 0] step:4241/10000 train_time:341799ms step_avg:80.59ms +[2025-07-07 23:49:15] [Rank 0] step:4241/10000 train_time:341799ms step_avg:80.59ms +[2025-07-07 23:49:16] [Rank 0] step:4261/10000 train_time:343300ms step_avg:80.57ms +[2025-07-07 23:49:16] [Rank 0] step:4261/10000 train_time:343300ms step_avg:80.57ms +[2025-07-07 23:49:18] [Rank 0] step:4281/10000 train_time:344800ms step_avg:80.54ms +[2025-07-07 23:49:18] [Rank 0] step:4281/10000 train_time:344800ms step_avg:80.54ms +[2025-07-07 23:49:19] [Rank 0] step:4301/10000 train_time:346300ms step_avg:80.52ms +[2025-07-07 23:49:19] [Rank 0] step:4301/10000 train_time:346300ms step_avg:80.52ms +[2025-07-07 23:49:21] [Rank 0] step:4321/10000 train_time:347800ms step_avg:80.49ms +[2025-07-07 23:49:21] [Rank 0] step:4321/10000 train_time:347800ms step_avg:80.49ms +[2025-07-07 23:49:23] [Rank 0] step:4341/10000 train_time:349946ms step_avg:80.61ms +[2025-07-07 23:49:23] [Rank 0] step:4341/10000 train_time:349946ms step_avg:80.61ms +[2025-07-07 23:49:24] [Rank 0] step:4361/10000 train_time:351446ms step_avg:80.59ms +[2025-07-07 23:49:24] [Rank 0] step:4361/10000 train_time:351446ms step_avg:80.59ms +[2025-07-07 23:49:26] [Rank 0] step:4381/10000 train_time:352947ms step_avg:80.56ms +[2025-07-07 23:49:26] [Rank 0] step:4381/10000 train_time:352947ms step_avg:80.56ms +[2025-07-07 23:49:27] [Rank 0] step:4401/10000 train_time:354451ms step_avg:80.54ms +[2025-07-07 23:49:27] [Rank 0] step:4401/10000 train_time:354451ms step_avg:80.54ms +[2025-07-07 23:49:29] [Rank 0] step:4421/10000 train_time:356193ms step_avg:80.57ms +[2025-07-07 23:49:29] [Rank 0] step:4421/10000 train_time:356193ms step_avg:80.57ms +[2025-07-07 23:49:31] [Rank 0] step:4441/10000 train_time:357694ms step_avg:80.54ms +[2025-07-07 23:49:31] [Rank 0] step:4441/10000 train_time:357694ms step_avg:80.54ms +[2025-07-07 23:49:32] [Rank 0] step:4461/10000 train_time:359195ms step_avg:80.52ms +[2025-07-07 23:49:32] [Rank 0] step:4461/10000 train_time:359195ms step_avg:80.52ms +[2025-07-07 23:49:34] [Rank 0] step:4481/10000 train_time:360699ms step_avg:80.50ms +[2025-07-07 23:49:34] [Rank 0] step:4481/10000 train_time:360699ms step_avg:80.50ms +[2025-07-07 23:49:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:49:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:49:36] [Rank 0] PRINT: step:4500/10000 train_loss:0.9282 val_loss:0.9119 train_time:362202ms step_avg:80.49ms +[2025-07-07 23:49:36] [Rank 0] PRINT: step:4500/10000 train_loss:0.9282 val_loss:0.9119 train_time:362202ms step_avg:80.49ms +[2025-07-07 23:49:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:49:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:49:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:49:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:49:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:49:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:55:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:55:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:55:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:55:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:55:01] [Rank 0] Total Loss: 5.3111 +[2025-07-07 23:55:01] [Rank 0] Total Loss: 5.3111 +[2025-07-07 23:55:01] [Rank 0] Total FTA: 0.8835 +[2025-07-07 23:55:01] [Rank 0] Total FTA: 0.8835 +[2025-07-07 23:55:01] [Rank 0] Group 0 Loss: 5.5569 +[2025-07-07 23:55:01] [Rank 0] Group 0 Loss: 5.5569 +[2025-07-07 23:55:01] [Rank 0] Group 1 Loss: 5.3543 +[2025-07-07 23:55:01] [Rank 0] Group 1 Loss: 5.3543 +[2025-07-07 23:55:01] [Rank 0] Group 2 Loss: 5.0565 +[2025-07-07 23:55:01] [Rank 0] Group 2 Loss: 5.0565 +[2025-07-07 23:55:01] [Rank 0] Group 3 Loss: 5.3820 +[2025-07-07 23:55:01] [Rank 0] Group 3 Loss: 5.3820 +[2025-07-07 23:55:01] [Rank 0] Group 4 Loss: 5.3011 +[2025-07-07 23:55:01] [Rank 0] Group 4 Loss: 5.3011 +[2025-07-07 23:55:01] [Rank 0] Group 5 Loss: 5.2329 +[2025-07-07 23:55:01] [Rank 0] Group 5 Loss: 5.2329 +[2025-07-07 23:55:01] [Rank 0] Group 6 Loss: 5.1022 +[2025-07-07 23:55:01] [Rank 0] Group 6 Loss: 5.1022 +[2025-07-07 23:55:01] [Rank 0] Group 7 Loss: 5.3054 +[2025-07-07 23:55:01] [Rank 0] Group 7 Loss: 5.3054 +[2025-07-07 23:55:01] [Rank 0] Group 8 Loss: 5.2898 +[2025-07-07 23:55:01] [Rank 0] Group 8 Loss: 5.2898 +[2025-07-07 23:55:01] [Rank 0] Group 9 Loss: 5.1672 +[2025-07-07 23:55:01] [Rank 0] Group 9 Loss: 5.1672 +[2025-07-07 23:55:01] [Rank 0] Group 10 Loss: 5.2896 +[2025-07-07 23:55:01] [Rank 0] Group 10 Loss: 5.2896 +[2025-07-07 23:55:01] [Rank 0] Group 11 Loss: 5.3473 +[2025-07-07 23:55:01] [Rank 0] Group 11 Loss: 5.3473 +[2025-07-07 23:55:01] [Rank 0] Group 0 FTA: 0.8283 +[2025-07-07 23:55:01] [Rank 0] Group 0 FTA: 0.8283 +[2025-07-07 23:55:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 23:55:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 23:55:01] [Rank 0] Group 2 FTA: 0.9297 +[2025-07-07 23:55:01] [Rank 0] Group 2 FTA: 0.9297 +[2025-07-07 23:55:01] [Rank 0] Group 3 FTA: 0.8672 +[2025-07-07 23:55:01] [Rank 0] Group 3 FTA: 0.8672 +[2025-07-07 23:55:01] [Rank 0] Group 4 FTA: 0.8958 +[2025-07-07 23:55:01] [Rank 0] Group 4 FTA: 0.8958 +[2025-07-07 23:55:01] [Rank 0] Group 5 FTA: 0.9245 +[2025-07-07 23:55:01] [Rank 0] Group 5 FTA: 0.9245 +[2025-07-07 23:55:01] [Rank 0] Group 6 FTA: 0.8646 +[2025-07-07 23:55:01] [Rank 0] Group 6 FTA: 0.8646 +[2025-07-07 23:55:01] [Rank 0] Group 7 FTA: 0.8828 +[2025-07-07 23:55:01] [Rank 0] Group 7 FTA: 0.8828 +[2025-07-07 23:55:01] [Rank 0] Group 8 FTA: 0.8594 +[2025-07-07 23:55:01] [Rank 0] Group 8 FTA: 0.8594 +[2025-07-07 23:55:01] [Rank 0] Group 9 FTA: 0.9375 +[2025-07-07 23:55:01] [Rank 0] Group 9 FTA: 0.9375 +[2025-07-07 23:55:01] [Rank 0] Group 10 FTA: 0.8652 +[2025-07-07 23:55:01] [Rank 0] Group 10 FTA: 0.8652 +[2025-07-07 23:55:01] [Rank 0] Group 11 FTA: 0.8623 +[2025-07-07 23:55:01] [Rank 0] Group 11 FTA: 0.8623 +[2025-07-07 23:55:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-07 23:55:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-07 23:55:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-07 23:55:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-07 23:55:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-07 23:55:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-07 23:55:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-07 23:55:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-07 23:55:04] [Rank 0] step:4501/10000 train_time:362331ms step_avg:80.50ms +[2025-07-07 23:55:04] [Rank 0] step:4501/10000 train_time:362331ms step_avg:80.50ms +[2025-07-07 23:55:05] [Rank 0] step:4521/10000 train_time:364426ms step_avg:80.61ms +[2025-07-07 23:55:05] [Rank 0] step:4521/10000 train_time:364426ms step_avg:80.61ms +[2025-07-07 23:55:07] [Rank 0] step:4541/10000 train_time:365921ms step_avg:80.58ms +[2025-07-07 23:55:07] [Rank 0] step:4541/10000 train_time:365921ms step_avg:80.58ms +[2025-07-07 23:55:08] [Rank 0] step:4561/10000 train_time:367414ms step_avg:80.56ms +[2025-07-07 23:55:08] [Rank 0] step:4561/10000 train_time:367414ms step_avg:80.56ms +[2025-07-07 23:55:10] [Rank 0] step:4581/10000 train_time:368911ms step_avg:80.53ms +[2025-07-07 23:55:10] [Rank 0] step:4581/10000 train_time:368911ms step_avg:80.53ms +[2025-07-07 23:55:12] [Rank 0] step:4601/10000 train_time:371068ms step_avg:80.65ms +[2025-07-07 23:55:12] [Rank 0] step:4601/10000 train_time:371068ms step_avg:80.65ms +[2025-07-07 23:55:13] [Rank 0] step:4621/10000 train_time:372563ms step_avg:80.62ms +[2025-07-07 23:55:13] [Rank 0] step:4621/10000 train_time:372563ms step_avg:80.62ms +[2025-07-07 23:55:15] [Rank 0] step:4641/10000 train_time:374061ms step_avg:80.60ms +[2025-07-07 23:55:15] [Rank 0] step:4641/10000 train_time:374061ms step_avg:80.60ms +[2025-07-07 23:55:16] [Rank 0] step:4661/10000 train_time:375560ms step_avg:80.57ms +[2025-07-07 23:55:16] [Rank 0] step:4661/10000 train_time:375560ms step_avg:80.57ms +[2025-07-07 23:55:18] [Rank 0] step:4681/10000 train_time:377735ms step_avg:80.70ms +[2025-07-07 23:55:18] [Rank 0] step:4681/10000 train_time:377735ms step_avg:80.70ms +[2025-07-07 23:55:20] [Rank 0] step:4701/10000 train_time:379213ms step_avg:80.67ms +[2025-07-07 23:55:20] [Rank 0] step:4701/10000 train_time:379213ms step_avg:80.67ms +[2025-07-07 23:55:21] [Rank 0] step:4721/10000 train_time:380714ms step_avg:80.64ms +[2025-07-07 23:55:21] [Rank 0] step:4721/10000 train_time:380714ms step_avg:80.64ms +[2025-07-07 23:55:23] [Rank 0] step:4741/10000 train_time:382442ms step_avg:80.67ms +[2025-07-07 23:55:23] [Rank 0] step:4741/10000 train_time:382442ms step_avg:80.67ms +[2025-07-07 23:55:25] [Rank 0] step:4761/10000 train_time:383941ms step_avg:80.64ms +[2025-07-07 23:55:25] [Rank 0] step:4761/10000 train_time:383941ms step_avg:80.64ms +[2025-07-07 23:55:27] [Rank 0] step:4781/10000 train_time:386109ms step_avg:80.76ms +[2025-07-07 23:55:27] [Rank 0] step:4781/10000 train_time:386109ms step_avg:80.76ms +[2025-07-07 23:55:28] [Rank 0] step:4801/10000 train_time:387608ms step_avg:80.73ms +[2025-07-07 23:55:28] [Rank 0] step:4801/10000 train_time:387608ms step_avg:80.73ms +[2025-07-07 23:55:30] [Rank 0] step:4821/10000 train_time:389108ms step_avg:80.71ms +[2025-07-07 23:55:30] [Rank 0] step:4821/10000 train_time:389108ms step_avg:80.71ms +[2025-07-07 23:55:31] [Rank 0] step:4841/10000 train_time:390610ms step_avg:80.69ms +[2025-07-07 23:55:31] [Rank 0] step:4841/10000 train_time:390610ms step_avg:80.69ms +[2025-07-07 23:55:33] [Rank 0] step:4861/10000 train_time:392774ms step_avg:80.80ms +[2025-07-07 23:55:33] [Rank 0] step:4861/10000 train_time:392774ms step_avg:80.80ms +[2025-07-07 23:55:35] [Rank 0] step:4881/10000 train_time:394257ms step_avg:80.77ms +[2025-07-07 23:55:35] [Rank 0] step:4881/10000 train_time:394257ms step_avg:80.77ms +[2025-07-07 23:55:36] [Rank 0] step:4901/10000 train_time:395759ms step_avg:80.75ms +[2025-07-07 23:55:36] [Rank 0] step:4901/10000 train_time:395759ms step_avg:80.75ms +[2025-07-07 23:55:38] [Rank 0] step:4921/10000 train_time:397260ms step_avg:80.73ms +[2025-07-07 23:55:38] [Rank 0] step:4921/10000 train_time:397260ms step_avg:80.73ms +[2025-07-07 23:55:39] [Rank 0] step:4941/10000 train_time:398761ms step_avg:80.70ms +[2025-07-07 23:55:39] [Rank 0] step:4941/10000 train_time:398761ms step_avg:80.70ms +[2025-07-07 23:55:42] [Rank 0] step:4961/10000 train_time:400926ms step_avg:80.82ms +[2025-07-07 23:55:42] [Rank 0] step:4961/10000 train_time:400926ms step_avg:80.82ms +[2025-07-07 23:55:43] [Rank 0] step:4981/10000 train_time:402425ms step_avg:80.79ms +[2025-07-07 23:55:43] [Rank 0] step:4981/10000 train_time:402425ms step_avg:80.79ms +[2025-07-07 23:55:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:55:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:55:46] [Rank 0] PRINT: step:5000/10000 train_loss:0.9051 val_loss:0.8945 train_time:403933ms step_avg:80.79ms +[2025-07-07 23:55:46] [Rank 0] PRINT: step:5000/10000 train_loss:0.9051 val_loss:0.8945 train_time:403933ms step_avg:80.79ms +[2025-07-07 23:55:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:55:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:55:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:55:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:55:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:55:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:01:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:01:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:01:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:01:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:01:11] [Rank 0] Total Loss: 5.2985 +[2025-07-08 00:01:11] [Rank 0] Total Loss: 5.2985 +[2025-07-08 00:01:11] [Rank 0] Total FTA: 0.9343 +[2025-07-08 00:01:11] [Rank 0] Total FTA: 0.9343 +[2025-07-08 00:01:11] [Rank 0] Group 0 Loss: 5.6383 +[2025-07-08 00:01:11] [Rank 0] Group 0 Loss: 5.6383 +[2025-07-08 00:01:11] [Rank 0] Group 1 Loss: 5.2729 +[2025-07-08 00:01:11] [Rank 0] Group 1 Loss: 5.2729 +[2025-07-08 00:01:11] [Rank 0] Group 2 Loss: 4.9971 +[2025-07-08 00:01:11] [Rank 0] Group 2 Loss: 4.9971 +[2025-07-08 00:01:11] [Rank 0] Group 3 Loss: 5.2232 +[2025-07-08 00:01:11] [Rank 0] Group 3 Loss: 5.2232 +[2025-07-08 00:01:11] [Rank 0] Group 4 Loss: 5.2570 +[2025-07-08 00:01:11] [Rank 0] Group 4 Loss: 5.2570 +[2025-07-08 00:01:11] [Rank 0] Group 5 Loss: 5.2566 +[2025-07-08 00:01:11] [Rank 0] Group 5 Loss: 5.2566 +[2025-07-08 00:01:11] [Rank 0] Group 6 Loss: 5.1658 +[2025-07-08 00:01:11] [Rank 0] Group 6 Loss: 5.1658 +[2025-07-08 00:01:11] [Rank 0] Group 7 Loss: 5.2984 +[2025-07-08 00:01:11] [Rank 0] Group 7 Loss: 5.2984 +[2025-07-08 00:01:11] [Rank 0] Group 8 Loss: 5.2946 +[2025-07-08 00:01:11] [Rank 0] Group 8 Loss: 5.2946 +[2025-07-08 00:01:11] [Rank 0] Group 9 Loss: 5.2147 +[2025-07-08 00:01:11] [Rank 0] Group 9 Loss: 5.2147 +[2025-07-08 00:01:11] [Rank 0] Group 10 Loss: 5.3473 +[2025-07-08 00:01:11] [Rank 0] Group 10 Loss: 5.3473 +[2025-07-08 00:01:11] [Rank 0] Group 11 Loss: 5.2732 +[2025-07-08 00:01:11] [Rank 0] Group 11 Loss: 5.2732 +[2025-07-08 00:01:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 00:01:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 00:01:11] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 00:01:11] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 00:01:11] [Rank 0] Group 2 FTA: 0.9115 +[2025-07-08 00:01:11] [Rank 0] Group 2 FTA: 0.9115 +[2025-07-08 00:01:11] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 00:01:11] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 00:01:11] [Rank 0] Group 4 FTA: 0.8958 +[2025-07-08 00:01:11] [Rank 0] Group 4 FTA: 0.8958 +[2025-07-08 00:01:11] [Rank 0] Group 5 FTA: 0.9219 +[2025-07-08 00:01:11] [Rank 0] Group 5 FTA: 0.9219 +[2025-07-08 00:01:11] [Rank 0] Group 6 FTA: 0.9115 +[2025-07-08 00:01:11] [Rank 0] Group 6 FTA: 0.9115 +[2025-07-08 00:01:11] [Rank 0] Group 7 FTA: 0.9010 +[2025-07-08 00:01:11] [Rank 0] Group 7 FTA: 0.9010 +[2025-07-08 00:01:11] [Rank 0] Group 8 FTA: 0.8880 +[2025-07-08 00:01:11] [Rank 0] Group 8 FTA: 0.8880 +[2025-07-08 00:01:11] [Rank 0] Group 9 FTA: 0.9297 +[2025-07-08 00:01:11] [Rank 0] Group 9 FTA: 0.9297 +[2025-07-08 00:01:11] [Rank 0] Group 10 FTA: 0.9082 +[2025-07-08 00:01:11] [Rank 0] Group 10 FTA: 0.9082 +[2025-07-08 00:01:11] [Rank 0] Group 11 FTA: 0.9160 +[2025-07-08 00:01:11] [Rank 0] Group 11 FTA: 0.9160 +[2025-07-08 00:01:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-08 00:01:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-08 00:01:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-08 00:01:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-08 00:01:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-08 00:01:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-08 00:01:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-08 00:01:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-08 00:01:13] [Rank 0] step:5001/10000 train_time:403953ms step_avg:80.77ms +[2025-07-08 00:01:13] [Rank 0] step:5001/10000 train_time:403953ms step_avg:80.77ms +[2025-07-08 00:01:14] [Rank 0] step:5021/10000 train_time:405457ms step_avg:80.75ms +[2025-07-08 00:01:14] [Rank 0] step:5021/10000 train_time:405457ms step_avg:80.75ms +[2025-07-08 00:01:16] [Rank 0] step:5041/10000 train_time:407007ms step_avg:80.74ms +[2025-07-08 00:01:16] [Rank 0] step:5041/10000 train_time:407007ms step_avg:80.74ms +[2025-07-08 00:01:18] [Rank 0] step:5061/10000 train_time:409114ms step_avg:80.84ms +[2025-07-08 00:01:18] [Rank 0] step:5061/10000 train_time:409114ms step_avg:80.84ms +[2025-07-08 00:01:19] [Rank 0] step:5081/10000 train_time:410610ms step_avg:80.81ms +[2025-07-08 00:01:19] [Rank 0] step:5081/10000 train_time:410610ms step_avg:80.81ms +[2025-07-08 00:01:21] [Rank 0] step:5101/10000 train_time:412108ms step_avg:80.79ms +[2025-07-08 00:01:21] [Rank 0] step:5101/10000 train_time:412108ms step_avg:80.79ms +[2025-07-08 00:01:22] [Rank 0] step:5121/10000 train_time:413604ms step_avg:80.77ms +[2025-07-08 00:01:22] [Rank 0] step:5121/10000 train_time:413604ms step_avg:80.77ms +[2025-07-08 00:01:24] [Rank 0] step:5141/10000 train_time:415758ms step_avg:80.87ms +[2025-07-08 00:01:24] [Rank 0] step:5141/10000 train_time:415758ms step_avg:80.87ms +[2025-07-08 00:01:26] [Rank 0] step:5161/10000 train_time:417255ms step_avg:80.85ms +[2025-07-08 00:01:26] [Rank 0] step:5161/10000 train_time:417255ms step_avg:80.85ms +[2025-07-08 00:01:27] [Rank 0] step:5181/10000 train_time:418753ms step_avg:80.82ms +[2025-07-08 00:01:27] [Rank 0] step:5181/10000 train_time:418753ms step_avg:80.82ms +[2025-07-08 00:01:29] [Rank 0] step:5201/10000 train_time:420251ms step_avg:80.80ms +[2025-07-08 00:01:29] [Rank 0] step:5201/10000 train_time:420251ms step_avg:80.80ms +[2025-07-08 00:01:31] [Rank 0] step:5221/10000 train_time:421802ms step_avg:80.79ms +[2025-07-08 00:01:31] [Rank 0] step:5221/10000 train_time:421802ms step_avg:80.79ms +[2025-07-08 00:01:33] [Rank 0] step:5241/10000 train_time:423902ms step_avg:80.88ms +[2025-07-08 00:01:33] [Rank 0] step:5241/10000 train_time:423902ms step_avg:80.88ms +[2025-07-08 00:01:34] [Rank 0] step:5261/10000 train_time:425402ms step_avg:80.86ms +[2025-07-08 00:01:34] [Rank 0] step:5261/10000 train_time:425402ms step_avg:80.86ms +[2025-07-08 00:01:36] [Rank 0] step:5281/10000 train_time:426901ms step_avg:80.84ms +[2025-07-08 00:01:36] [Rank 0] step:5281/10000 train_time:426901ms step_avg:80.84ms +[2025-07-08 00:01:37] [Rank 0] step:5301/10000 train_time:428402ms step_avg:80.82ms +[2025-07-08 00:01:37] [Rank 0] step:5301/10000 train_time:428402ms step_avg:80.82ms +[2025-07-08 00:01:39] [Rank 0] step:5321/10000 train_time:430138ms step_avg:80.84ms +[2025-07-08 00:01:39] [Rank 0] step:5321/10000 train_time:430138ms step_avg:80.84ms +[2025-07-08 00:01:40] [Rank 0] step:5341/10000 train_time:431639ms step_avg:80.82ms +[2025-07-08 00:01:40] [Rank 0] step:5341/10000 train_time:431639ms step_avg:80.82ms +[2025-07-08 00:01:42] [Rank 0] step:5361/10000 train_time:433384ms step_avg:80.84ms +[2025-07-08 00:01:42] [Rank 0] step:5361/10000 train_time:433384ms step_avg:80.84ms +[2025-07-08 00:01:43] [Rank 0] step:5381/10000 train_time:434884ms step_avg:80.82ms +[2025-07-08 00:01:43] [Rank 0] step:5381/10000 train_time:434884ms step_avg:80.82ms +[2025-07-08 00:01:45] [Rank 0] step:5401/10000 train_time:436435ms step_avg:80.81ms +[2025-07-08 00:01:45] [Rank 0] step:5401/10000 train_time:436435ms step_avg:80.81ms +[2025-07-08 00:01:47] [Rank 0] step:5421/10000 train_time:438120ms step_avg:80.82ms +[2025-07-08 00:01:47] [Rank 0] step:5421/10000 train_time:438120ms step_avg:80.82ms +[2025-07-08 00:01:48] [Rank 0] step:5441/10000 train_time:439621ms step_avg:80.80ms +[2025-07-08 00:01:48] [Rank 0] step:5441/10000 train_time:439621ms step_avg:80.80ms +[2025-07-08 00:01:50] [Rank 0] step:5461/10000 train_time:441124ms step_avg:80.78ms +[2025-07-08 00:01:50] [Rank 0] step:5461/10000 train_time:441124ms step_avg:80.78ms +[2025-07-08 00:01:51] [Rank 0] step:5481/10000 train_time:442624ms step_avg:80.76ms +[2025-07-08 00:01:51] [Rank 0] step:5481/10000 train_time:442624ms step_avg:80.76ms +[2025-07-08 00:01:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:01:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:01:54] [Rank 0] PRINT: step:5500/10000 train_loss:0.8898 val_loss:0.8826 train_time:444780ms step_avg:80.87ms +[2025-07-08 00:01:54] [Rank 0] PRINT: step:5500/10000 train_loss:0.8898 val_loss:0.8826 train_time:444780ms step_avg:80.87ms +[2025-07-08 00:01:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:01:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:01:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:01:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:01:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:01:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:07:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:07:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:07:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:07:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:07:19] [Rank 0] Total Loss: 5.5168 +[2025-07-08 00:07:19] [Rank 0] Total Loss: 5.5168 +[2025-07-08 00:07:19] [Rank 0] Total FTA: 0.9112 +[2025-07-08 00:07:19] [Rank 0] Total FTA: 0.9112 +[2025-07-08 00:07:19] [Rank 0] Group 0 Loss: 5.9470 +[2025-07-08 00:07:19] [Rank 0] Group 0 Loss: 5.9470 +[2025-07-08 00:07:19] [Rank 0] Group 1 Loss: 5.4468 +[2025-07-08 00:07:19] [Rank 0] Group 1 Loss: 5.4468 +[2025-07-08 00:07:19] [Rank 0] Group 2 Loss: 5.0818 +[2025-07-08 00:07:19] [Rank 0] Group 2 Loss: 5.0818 +[2025-07-08 00:07:19] [Rank 0] Group 3 Loss: 5.3932 +[2025-07-08 00:07:19] [Rank 0] Group 3 Loss: 5.3932 +[2025-07-08 00:07:19] [Rank 0] Group 4 Loss: 5.5539 +[2025-07-08 00:07:19] [Rank 0] Group 4 Loss: 5.5539 +[2025-07-08 00:07:19] [Rank 0] Group 5 Loss: 5.3865 +[2025-07-08 00:07:19] [Rank 0] Group 5 Loss: 5.3865 +[2025-07-08 00:07:19] [Rank 0] Group 6 Loss: 5.3812 +[2025-07-08 00:07:19] [Rank 0] Group 6 Loss: 5.3812 +[2025-07-08 00:07:19] [Rank 0] Group 7 Loss: 5.5084 +[2025-07-08 00:07:19] [Rank 0] Group 7 Loss: 5.5084 +[2025-07-08 00:07:19] [Rank 0] Group 8 Loss: 5.5174 +[2025-07-08 00:07:19] [Rank 0] Group 8 Loss: 5.5174 +[2025-07-08 00:07:19] [Rank 0] Group 9 Loss: 5.4880 +[2025-07-08 00:07:19] [Rank 0] Group 9 Loss: 5.4880 +[2025-07-08 00:07:19] [Rank 0] Group 10 Loss: 5.5194 +[2025-07-08 00:07:19] [Rank 0] Group 10 Loss: 5.5194 +[2025-07-08 00:07:19] [Rank 0] Group 11 Loss: 5.5240 +[2025-07-08 00:07:19] [Rank 0] Group 11 Loss: 5.5240 +[2025-07-08 00:07:19] [Rank 0] Group 0 FTA: 0.8427 +[2025-07-08 00:07:19] [Rank 0] Group 0 FTA: 0.8427 +[2025-07-08 00:07:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 00:07:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 00:07:19] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 00:07:19] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 00:07:19] [Rank 0] Group 3 FTA: 0.9661 +[2025-07-08 00:07:19] [Rank 0] Group 3 FTA: 0.9661 +[2025-07-08 00:07:19] [Rank 0] Group 4 FTA: 0.8333 +[2025-07-08 00:07:19] [Rank 0] Group 4 FTA: 0.8333 +[2025-07-08 00:07:19] [Rank 0] Group 5 FTA: 0.9010 +[2025-07-08 00:07:19] [Rank 0] Group 5 FTA: 0.9010 +[2025-07-08 00:07:19] [Rank 0] Group 6 FTA: 0.9167 +[2025-07-08 00:07:19] [Rank 0] Group 6 FTA: 0.9167 +[2025-07-08 00:07:19] [Rank 0] Group 7 FTA: 0.9062 +[2025-07-08 00:07:19] [Rank 0] Group 7 FTA: 0.9062 +[2025-07-08 00:07:19] [Rank 0] Group 8 FTA: 0.8724 +[2025-07-08 00:07:19] [Rank 0] Group 8 FTA: 0.8724 +[2025-07-08 00:07:19] [Rank 0] Group 9 FTA: 0.9102 +[2025-07-08 00:07:19] [Rank 0] Group 9 FTA: 0.9102 +[2025-07-08 00:07:19] [Rank 0] Group 10 FTA: 0.9121 +[2025-07-08 00:07:19] [Rank 0] Group 10 FTA: 0.9121 +[2025-07-08 00:07:19] [Rank 0] Group 11 FTA: 0.9229 +[2025-07-08 00:07:19] [Rank 0] Group 11 FTA: 0.9229 +[2025-07-08 00:07:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-08 00:07:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-08 00:07:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-08 00:07:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-08 00:07:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-08 00:07:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-08 00:07:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-08 00:07:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-08 00:07:20] [Rank 0] step:5501/10000 train_time:444801ms step_avg:80.86ms +[2025-07-08 00:07:20] [Rank 0] step:5501/10000 train_time:444801ms step_avg:80.86ms +[2025-07-08 00:07:22] [Rank 0] step:5521/10000 train_time:446298ms step_avg:80.84ms +[2025-07-08 00:07:22] [Rank 0] step:5521/10000 train_time:446298ms step_avg:80.84ms +[2025-07-08 00:07:23] [Rank 0] step:5541/10000 train_time:447792ms step_avg:80.81ms +[2025-07-08 00:07:23] [Rank 0] step:5541/10000 train_time:447792ms step_avg:80.81ms +[2025-07-08 00:07:25] [Rank 0] step:5561/10000 train_time:449287ms step_avg:80.79ms +[2025-07-08 00:07:25] [Rank 0] step:5561/10000 train_time:449287ms step_avg:80.79ms +[2025-07-08 00:07:27] [Rank 0] step:5581/10000 train_time:450839ms step_avg:80.78ms +[2025-07-08 00:07:27] [Rank 0] step:5581/10000 train_time:450839ms step_avg:80.78ms +[2025-07-08 00:07:28] [Rank 0] step:5601/10000 train_time:452931ms step_avg:80.87ms +[2025-07-08 00:07:28] [Rank 0] step:5601/10000 train_time:452931ms step_avg:80.87ms +[2025-07-08 00:07:30] [Rank 0] step:5621/10000 train_time:454428ms step_avg:80.84ms +[2025-07-08 00:07:30] [Rank 0] step:5621/10000 train_time:454428ms step_avg:80.84ms +[2025-07-08 00:07:31] [Rank 0] step:5641/10000 train_time:455926ms step_avg:80.82ms +[2025-07-08 00:07:31] [Rank 0] step:5641/10000 train_time:455926ms step_avg:80.82ms +[2025-07-08 00:07:33] [Rank 0] step:5661/10000 train_time:457425ms step_avg:80.80ms +[2025-07-08 00:07:33] [Rank 0] step:5661/10000 train_time:457425ms step_avg:80.80ms +[2025-07-08 00:07:35] [Rank 0] step:5681/10000 train_time:459574ms step_avg:80.90ms +[2025-07-08 00:07:35] [Rank 0] step:5681/10000 train_time:459574ms step_avg:80.90ms +[2025-07-08 00:07:37] [Rank 0] step:5701/10000 train_time:461074ms step_avg:80.88ms +[2025-07-08 00:07:37] [Rank 0] step:5701/10000 train_time:461074ms step_avg:80.88ms +[2025-07-08 00:07:38] [Rank 0] step:5721/10000 train_time:462573ms step_avg:80.86ms +[2025-07-08 00:07:38] [Rank 0] step:5721/10000 train_time:462573ms step_avg:80.86ms +[2025-07-08 00:07:40] [Rank 0] step:5741/10000 train_time:464072ms step_avg:80.83ms +[2025-07-08 00:07:40] [Rank 0] step:5741/10000 train_time:464072ms step_avg:80.83ms +[2025-07-08 00:07:42] [Rank 0] step:5761/10000 train_time:466239ms step_avg:80.93ms +[2025-07-08 00:07:42] [Rank 0] step:5761/10000 train_time:466239ms step_avg:80.93ms +[2025-07-08 00:07:43] [Rank 0] step:5781/10000 train_time:467721ms step_avg:80.91ms +[2025-07-08 00:07:43] [Rank 0] step:5781/10000 train_time:467721ms step_avg:80.91ms +[2025-07-08 00:07:45] [Rank 0] step:5801/10000 train_time:469222ms step_avg:80.89ms +[2025-07-08 00:07:45] [Rank 0] step:5801/10000 train_time:469222ms step_avg:80.89ms +[2025-07-08 00:07:46] [Rank 0] step:5821/10000 train_time:470723ms step_avg:80.87ms +[2025-07-08 00:07:46] [Rank 0] step:5821/10000 train_time:470723ms step_avg:80.87ms +[2025-07-08 00:07:48] [Rank 0] step:5841/10000 train_time:472225ms step_avg:80.85ms +[2025-07-08 00:07:48] [Rank 0] step:5841/10000 train_time:472225ms step_avg:80.85ms +[2025-07-08 00:07:50] [Rank 0] step:5861/10000 train_time:474392ms step_avg:80.94ms +[2025-07-08 00:07:50] [Rank 0] step:5861/10000 train_time:474392ms step_avg:80.94ms +[2025-07-08 00:07:51] [Rank 0] step:5881/10000 train_time:475891ms step_avg:80.92ms +[2025-07-08 00:07:51] [Rank 0] step:5881/10000 train_time:475891ms step_avg:80.92ms +[2025-07-08 00:07:53] [Rank 0] step:5901/10000 train_time:477390ms step_avg:80.90ms +[2025-07-08 00:07:53] [Rank 0] step:5901/10000 train_time:477390ms step_avg:80.90ms +[2025-07-08 00:07:54] [Rank 0] step:5921/10000 train_time:478891ms step_avg:80.88ms +[2025-07-08 00:07:54] [Rank 0] step:5921/10000 train_time:478891ms step_avg:80.88ms +[2025-07-08 00:07:57] [Rank 0] step:5941/10000 train_time:480444ms step_avg:80.87ms +[2025-07-08 00:07:57] [Rank 0] step:5941/10000 train_time:480444ms step_avg:80.87ms +[2025-07-08 00:07:58] [Rank 0] step:5961/10000 train_time:482535ms step_avg:80.95ms +[2025-07-08 00:07:58] [Rank 0] step:5961/10000 train_time:482535ms step_avg:80.95ms +[2025-07-08 00:08:00] [Rank 0] step:5981/10000 train_time:484036ms step_avg:80.93ms +[2025-07-08 00:08:00] [Rank 0] step:5981/10000 train_time:484036ms step_avg:80.93ms +[2025-07-08 00:08:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:08:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:08:02] [Rank 0] PRINT: step:6000/10000 train_loss:0.8805 val_loss:0.8772 train_time:485629ms step_avg:80.94ms +[2025-07-08 00:08:02] [Rank 0] PRINT: step:6000/10000 train_loss:0.8805 val_loss:0.8772 train_time:485629ms step_avg:80.94ms +[2025-07-08 00:08:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:08:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:08:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:08:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:08:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:08:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:13:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:13:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:13:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:13:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:13:27] [Rank 0] Total Loss: 5.4848 +[2025-07-08 00:13:27] [Rank 0] Total Loss: 5.4848 +[2025-07-08 00:13:27] [Rank 0] Total FTA: 0.9444 +[2025-07-08 00:13:27] [Rank 0] Total FTA: 0.9444 +[2025-07-08 00:13:27] [Rank 0] Group 0 Loss: 5.7194 +[2025-07-08 00:13:27] [Rank 0] Group 0 Loss: 5.7194 +[2025-07-08 00:13:27] [Rank 0] Group 1 Loss: 5.5534 +[2025-07-08 00:13:27] [Rank 0] Group 1 Loss: 5.5534 +[2025-07-08 00:13:27] [Rank 0] Group 2 Loss: 5.0692 +[2025-07-08 00:13:27] [Rank 0] Group 2 Loss: 5.0692 +[2025-07-08 00:13:27] [Rank 0] Group 3 Loss: 5.4336 +[2025-07-08 00:13:27] [Rank 0] Group 3 Loss: 5.4336 +[2025-07-08 00:13:27] [Rank 0] Group 4 Loss: 5.5490 +[2025-07-08 00:13:27] [Rank 0] Group 4 Loss: 5.5490 +[2025-07-08 00:13:27] [Rank 0] Group 5 Loss: 5.4175 +[2025-07-08 00:13:27] [Rank 0] Group 5 Loss: 5.4175 +[2025-07-08 00:13:27] [Rank 0] Group 6 Loss: 5.2754 +[2025-07-08 00:13:27] [Rank 0] Group 6 Loss: 5.2754 +[2025-07-08 00:13:27] [Rank 0] Group 7 Loss: 5.5109 +[2025-07-08 00:13:27] [Rank 0] Group 7 Loss: 5.5109 +[2025-07-08 00:13:27] [Rank 0] Group 8 Loss: 5.5364 +[2025-07-08 00:13:27] [Rank 0] Group 8 Loss: 5.5364 +[2025-07-08 00:13:27] [Rank 0] Group 9 Loss: 5.5015 +[2025-07-08 00:13:27] [Rank 0] Group 9 Loss: 5.5015 +[2025-07-08 00:13:27] [Rank 0] Group 10 Loss: 5.5083 +[2025-07-08 00:13:27] [Rank 0] Group 10 Loss: 5.5083 +[2025-07-08 00:13:27] [Rank 0] Group 11 Loss: 5.4926 +[2025-07-08 00:13:27] [Rank 0] Group 11 Loss: 5.4926 +[2025-07-08 00:13:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 00:13:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 00:13:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 00:13:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 00:13:27] [Rank 0] Group 2 FTA: 0.9193 +[2025-07-08 00:13:27] [Rank 0] Group 2 FTA: 0.9193 +[2025-07-08 00:13:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 00:13:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 00:13:28] [Rank 0] Group 4 FTA: 0.9245 +[2025-07-08 00:13:28] [Rank 0] Group 4 FTA: 0.9245 +[2025-07-08 00:13:28] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-08 00:13:28] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-08 00:13:28] [Rank 0] Group 6 FTA: 0.9271 +[2025-07-08 00:13:28] [Rank 0] Group 6 FTA: 0.9271 +[2025-07-08 00:13:28] [Rank 0] Group 7 FTA: 0.9115 +[2025-07-08 00:13:28] [Rank 0] Group 7 FTA: 0.9115 +[2025-07-08 00:13:28] [Rank 0] Group 8 FTA: 0.9036 +[2025-07-08 00:13:28] [Rank 0] Group 8 FTA: 0.9036 +[2025-07-08 00:13:28] [Rank 0] Group 9 FTA: 0.9141 +[2025-07-08 00:13:28] [Rank 0] Group 9 FTA: 0.9141 +[2025-07-08 00:13:28] [Rank 0] Group 10 FTA: 0.9316 +[2025-07-08 00:13:28] [Rank 0] Group 10 FTA: 0.9316 +[2025-07-08 00:13:28] [Rank 0] Group 11 FTA: 0.9277 +[2025-07-08 00:13:28] [Rank 0] Group 11 FTA: 0.9277 +[2025-07-08 00:13:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-08 00:13:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-08 00:13:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-08 00:13:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-08 00:13:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-08 00:13:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-08 00:13:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-08 00:13:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-08 00:13:29] [Rank 0] step:6001/10000 train_time:485648ms step_avg:80.93ms +[2025-07-08 00:13:29] [Rank 0] step:6001/10000 train_time:485648ms step_avg:80.93ms +[2025-07-08 00:13:31] [Rank 0] step:6021/10000 train_time:487143ms step_avg:80.91ms +[2025-07-08 00:13:31] [Rank 0] step:6021/10000 train_time:487143ms step_avg:80.91ms +[2025-07-08 00:13:33] [Rank 0] step:6041/10000 train_time:489295ms step_avg:81.00ms +[2025-07-08 00:13:33] [Rank 0] step:6041/10000 train_time:489295ms step_avg:81.00ms +[2025-07-08 00:13:34] [Rank 0] step:6061/10000 train_time:490794ms step_avg:80.98ms +[2025-07-08 00:13:34] [Rank 0] step:6061/10000 train_time:490794ms step_avg:80.98ms +[2025-07-08 00:13:36] [Rank 0] step:6081/10000 train_time:492290ms step_avg:80.96ms +[2025-07-08 00:13:36] [Rank 0] step:6081/10000 train_time:492290ms step_avg:80.96ms +[2025-07-08 00:13:37] [Rank 0] step:6101/10000 train_time:493786ms step_avg:80.94ms +[2025-07-08 00:13:37] [Rank 0] step:6101/10000 train_time:493786ms step_avg:80.94ms +[2025-07-08 00:13:39] [Rank 0] step:6121/10000 train_time:495332ms step_avg:80.92ms +[2025-07-08 00:13:39] [Rank 0] step:6121/10000 train_time:495332ms step_avg:80.92ms +[2025-07-08 00:13:40] [Rank 0] step:6141/10000 train_time:497120ms step_avg:80.95ms +[2025-07-08 00:13:40] [Rank 0] step:6141/10000 train_time:497120ms step_avg:80.95ms +[2025-07-08 00:13:42] [Rank 0] step:6161/10000 train_time:498620ms step_avg:80.93ms +[2025-07-08 00:13:42] [Rank 0] step:6161/10000 train_time:498620ms step_avg:80.93ms +[2025-07-08 00:13:43] [Rank 0] step:6181/10000 train_time:500119ms step_avg:80.91ms +[2025-07-08 00:13:43] [Rank 0] step:6181/10000 train_time:500119ms step_avg:80.91ms +[2025-07-08 00:13:45] [Rank 0] step:6201/10000 train_time:501618ms step_avg:80.89ms +[2025-07-08 00:13:45] [Rank 0] step:6201/10000 train_time:501618ms step_avg:80.89ms +[2025-07-08 00:13:47] [Rank 0] step:6221/10000 train_time:503759ms step_avg:80.98ms +[2025-07-08 00:13:47] [Rank 0] step:6221/10000 train_time:503759ms step_avg:80.98ms +[2025-07-08 00:13:49] [Rank 0] step:6241/10000 train_time:505259ms step_avg:80.96ms +[2025-07-08 00:13:49] [Rank 0] step:6241/10000 train_time:505259ms step_avg:80.96ms +[2025-07-08 00:13:50] [Rank 0] step:6261/10000 train_time:506759ms step_avg:80.94ms +[2025-07-08 00:13:50] [Rank 0] step:6261/10000 train_time:506759ms step_avg:80.94ms +[2025-07-08 00:13:52] [Rank 0] step:6281/10000 train_time:508259ms step_avg:80.92ms +[2025-07-08 00:13:52] [Rank 0] step:6281/10000 train_time:508259ms step_avg:80.92ms +[2025-07-08 00:13:54] [Rank 0] step:6301/10000 train_time:510426ms step_avg:81.01ms +[2025-07-08 00:13:54] [Rank 0] step:6301/10000 train_time:510426ms step_avg:81.01ms +[2025-07-08 00:13:55] [Rank 0] step:6321/10000 train_time:511909ms step_avg:80.99ms +[2025-07-08 00:13:55] [Rank 0] step:6321/10000 train_time:511909ms step_avg:80.99ms +[2025-07-08 00:13:57] [Rank 0] step:6341/10000 train_time:513409ms step_avg:80.97ms +[2025-07-08 00:13:57] [Rank 0] step:6341/10000 train_time:513409ms step_avg:80.97ms +[2025-07-08 00:13:58] [Rank 0] step:6361/10000 train_time:514912ms step_avg:80.95ms +[2025-07-08 00:13:58] [Rank 0] step:6361/10000 train_time:514912ms step_avg:80.95ms +[2025-07-08 00:14:00] [Rank 0] step:6381/10000 train_time:516412ms step_avg:80.93ms +[2025-07-08 00:14:00] [Rank 0] step:6381/10000 train_time:516412ms step_avg:80.93ms +[2025-07-08 00:14:02] [Rank 0] step:6401/10000 train_time:518578ms step_avg:81.02ms +[2025-07-08 00:14:02] [Rank 0] step:6401/10000 train_time:518578ms step_avg:81.02ms +[2025-07-08 00:14:03] [Rank 0] step:6421/10000 train_time:520080ms step_avg:81.00ms +[2025-07-08 00:14:03] [Rank 0] step:6421/10000 train_time:520080ms step_avg:81.00ms +[2025-07-08 00:14:05] [Rank 0] step:6441/10000 train_time:521582ms step_avg:80.98ms +[2025-07-08 00:14:05] [Rank 0] step:6441/10000 train_time:521582ms step_avg:80.98ms +[2025-07-08 00:14:06] [Rank 0] step:6461/10000 train_time:523085ms step_avg:80.96ms +[2025-07-08 00:14:06] [Rank 0] step:6461/10000 train_time:523085ms step_avg:80.96ms +[2025-07-08 00:14:09] [Rank 0] step:6481/10000 train_time:525273ms step_avg:81.05ms +[2025-07-08 00:14:09] [Rank 0] step:6481/10000 train_time:525273ms step_avg:81.05ms +[2025-07-08 00:14:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:14:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:14:11] [Rank 0] PRINT: step:6500/10000 train_loss:0.8754 val_loss:0.8748 train_time:526755ms step_avg:81.04ms +[2025-07-08 00:14:11] [Rank 0] PRINT: step:6500/10000 train_loss:0.8754 val_loss:0.8748 train_time:526755ms step_avg:81.04ms +[2025-07-08 00:14:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:14:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:14:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:14:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:14:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:14:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:19:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:19:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:19:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:19:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:19:35] [Rank 0] Total Loss: 5.5337 +[2025-07-08 00:19:35] [Rank 0] Total Loss: 5.5337 +[2025-07-08 00:19:35] [Rank 0] Total FTA: 0.9313 +[2025-07-08 00:19:35] [Rank 0] Total FTA: 0.9313 +[2025-07-08 00:19:35] [Rank 0] Group 0 Loss: 5.9931 +[2025-07-08 00:19:35] [Rank 0] Group 0 Loss: 5.9931 +[2025-07-08 00:19:35] [Rank 0] Group 1 Loss: 5.4288 +[2025-07-08 00:19:35] [Rank 0] Group 1 Loss: 5.4288 +[2025-07-08 00:19:36] [Rank 0] Group 2 Loss: 5.0737 +[2025-07-08 00:19:36] [Rank 0] Group 2 Loss: 5.0737 +[2025-07-08 00:19:36] [Rank 0] Group 3 Loss: 5.5287 +[2025-07-08 00:19:36] [Rank 0] Group 3 Loss: 5.5287 +[2025-07-08 00:19:36] [Rank 0] Group 4 Loss: 5.5307 +[2025-07-08 00:19:36] [Rank 0] Group 4 Loss: 5.5307 +[2025-07-08 00:19:36] [Rank 0] Group 5 Loss: 5.4639 +[2025-07-08 00:19:36] [Rank 0] Group 5 Loss: 5.4639 +[2025-07-08 00:19:36] [Rank 0] Group 6 Loss: 5.4085 +[2025-07-08 00:19:36] [Rank 0] Group 6 Loss: 5.4085 +[2025-07-08 00:19:36] [Rank 0] Group 7 Loss: 5.5443 +[2025-07-08 00:19:36] [Rank 0] Group 7 Loss: 5.5443 +[2025-07-08 00:19:36] [Rank 0] Group 8 Loss: 5.4937 +[2025-07-08 00:19:36] [Rank 0] Group 8 Loss: 5.4937 +[2025-07-08 00:19:36] [Rank 0] Group 9 Loss: 5.5332 +[2025-07-08 00:19:36] [Rank 0] Group 9 Loss: 5.5332 +[2025-07-08 00:19:36] [Rank 0] Group 10 Loss: 5.4804 +[2025-07-08 00:19:36] [Rank 0] Group 10 Loss: 5.4804 +[2025-07-08 00:19:36] [Rank 0] Group 11 Loss: 5.5143 +[2025-07-08 00:19:36] [Rank 0] Group 11 Loss: 5.5143 +[2025-07-08 00:19:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 00:19:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 00:19:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 00:19:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 00:19:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 00:19:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 00:19:36] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 00:19:36] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 00:19:36] [Rank 0] Group 4 FTA: 0.8438 +[2025-07-08 00:19:36] [Rank 0] Group 4 FTA: 0.8438 +[2025-07-08 00:19:36] [Rank 0] Group 5 FTA: 0.8854 +[2025-07-08 00:19:36] [Rank 0] Group 5 FTA: 0.8854 +[2025-07-08 00:19:36] [Rank 0] Group 6 FTA: 0.9297 +[2025-07-08 00:19:36] [Rank 0] Group 6 FTA: 0.9297 +[2025-07-08 00:19:36] [Rank 0] Group 7 FTA: 0.8958 +[2025-07-08 00:19:36] [Rank 0] Group 7 FTA: 0.8958 +[2025-07-08 00:19:36] [Rank 0] Group 8 FTA: 0.8776 +[2025-07-08 00:19:36] [Rank 0] Group 8 FTA: 0.8776 +[2025-07-08 00:19:36] [Rank 0] Group 9 FTA: 0.8828 +[2025-07-08 00:19:36] [Rank 0] Group 9 FTA: 0.8828 +[2025-07-08 00:19:36] [Rank 0] Group 10 FTA: 0.9160 +[2025-07-08 00:19:36] [Rank 0] Group 10 FTA: 0.9160 +[2025-07-08 00:19:36] [Rank 0] Group 11 FTA: 0.9062 +[2025-07-08 00:19:36] [Rank 0] Group 11 FTA: 0.9062 +[2025-07-08 00:19:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-08 00:19:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-08 00:19:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-08 00:19:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-08 00:19:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-08 00:19:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-08 00:19:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-08 00:19:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-08 00:19:37] [Rank 0] step:6501/10000 train_time:526774ms step_avg:81.03ms +[2025-07-08 00:19:37] [Rank 0] step:6501/10000 train_time:526774ms step_avg:81.03ms +[2025-07-08 00:19:39] [Rank 0] step:6521/10000 train_time:528273ms step_avg:81.01ms +[2025-07-08 00:19:39] [Rank 0] step:6521/10000 train_time:528273ms step_avg:81.01ms +[2025-07-08 00:19:40] [Rank 0] step:6541/10000 train_time:529766ms step_avg:80.99ms +[2025-07-08 00:19:40] [Rank 0] step:6541/10000 train_time:529766ms step_avg:80.99ms +[2025-07-08 00:19:42] [Rank 0] step:6561/10000 train_time:531263ms step_avg:80.97ms +[2025-07-08 00:19:42] [Rank 0] step:6561/10000 train_time:531263ms step_avg:80.97ms +[2025-07-08 00:19:44] [Rank 0] step:6581/10000 train_time:533428ms step_avg:81.06ms +[2025-07-08 00:19:44] [Rank 0] step:6581/10000 train_time:533428ms step_avg:81.06ms +[2025-07-08 00:19:45] [Rank 0] step:6601/10000 train_time:534926ms step_avg:81.04ms +[2025-07-08 00:19:45] [Rank 0] step:6601/10000 train_time:534926ms step_avg:81.04ms +[2025-07-08 00:19:47] [Rank 0] step:6621/10000 train_time:536423ms step_avg:81.02ms +[2025-07-08 00:19:47] [Rank 0] step:6621/10000 train_time:536423ms step_avg:81.02ms +[2025-07-08 00:19:48] [Rank 0] step:6641/10000 train_time:537920ms step_avg:81.00ms +[2025-07-08 00:19:48] [Rank 0] step:6641/10000 train_time:537920ms step_avg:81.00ms +[2025-07-08 00:19:50] [Rank 0] step:6661/10000 train_time:540096ms step_avg:81.08ms +[2025-07-08 00:19:50] [Rank 0] step:6661/10000 train_time:540096ms step_avg:81.08ms +[2025-07-08 00:19:52] [Rank 0] step:6681/10000 train_time:541573ms step_avg:81.06ms +[2025-07-08 00:19:52] [Rank 0] step:6681/10000 train_time:541573ms step_avg:81.06ms +[2025-07-08 00:19:53] [Rank 0] step:6701/10000 train_time:543070ms step_avg:81.04ms +[2025-07-08 00:19:53] [Rank 0] step:6701/10000 train_time:543070ms step_avg:81.04ms +[2025-07-08 00:19:55] [Rank 0] step:6721/10000 train_time:544570ms step_avg:81.03ms +[2025-07-08 00:19:55] [Rank 0] step:6721/10000 train_time:544570ms step_avg:81.03ms +[2025-07-08 00:19:56] [Rank 0] step:6741/10000 train_time:546071ms step_avg:81.01ms +[2025-07-08 00:19:56] [Rank 0] step:6741/10000 train_time:546071ms step_avg:81.01ms +[2025-07-08 00:19:59] [Rank 0] step:6761/10000 train_time:548214ms step_avg:81.08ms +[2025-07-08 00:19:59] [Rank 0] step:6761/10000 train_time:548214ms step_avg:81.08ms +[2025-07-08 00:20:00] [Rank 0] step:6781/10000 train_time:549713ms step_avg:81.07ms +[2025-07-08 00:20:00] [Rank 0] step:6781/10000 train_time:549713ms step_avg:81.07ms +[2025-07-08 00:20:02] [Rank 0] step:6801/10000 train_time:551214ms step_avg:81.05ms +[2025-07-08 00:20:02] [Rank 0] step:6801/10000 train_time:551214ms step_avg:81.05ms +[2025-07-08 00:20:03] [Rank 0] step:6821/10000 train_time:552716ms step_avg:81.03ms +[2025-07-08 00:20:03] [Rank 0] step:6821/10000 train_time:552716ms step_avg:81.03ms +[2025-07-08 00:20:05] [Rank 0] step:6841/10000 train_time:554879ms step_avg:81.11ms +[2025-07-08 00:20:05] [Rank 0] step:6841/10000 train_time:554879ms step_avg:81.11ms +[2025-07-08 00:20:07] [Rank 0] step:6861/10000 train_time:556362ms step_avg:81.09ms +[2025-07-08 00:20:07] [Rank 0] step:6861/10000 train_time:556362ms step_avg:81.09ms +[2025-07-08 00:20:08] [Rank 0] step:6881/10000 train_time:557863ms step_avg:81.07ms +[2025-07-08 00:20:08] [Rank 0] step:6881/10000 train_time:557863ms step_avg:81.07ms +[2025-07-08 00:20:10] [Rank 0] step:6901/10000 train_time:559365ms step_avg:81.06ms +[2025-07-08 00:20:10] [Rank 0] step:6901/10000 train_time:559365ms step_avg:81.06ms +[2025-07-08 00:20:11] [Rank 0] step:6921/10000 train_time:560869ms step_avg:81.04ms +[2025-07-08 00:20:11] [Rank 0] step:6921/10000 train_time:560869ms step_avg:81.04ms +[2025-07-08 00:20:13] [Rank 0] step:6941/10000 train_time:563033ms step_avg:81.12ms +[2025-07-08 00:20:13] [Rank 0] step:6941/10000 train_time:563033ms step_avg:81.12ms +[2025-07-08 00:20:15] [Rank 0] step:6961/10000 train_time:564534ms step_avg:81.10ms +[2025-07-08 00:20:15] [Rank 0] step:6961/10000 train_time:564534ms step_avg:81.10ms +[2025-07-08 00:20:16] [Rank 0] step:6981/10000 train_time:566036ms step_avg:81.08ms +[2025-07-08 00:20:16] [Rank 0] step:6981/10000 train_time:566036ms step_avg:81.08ms +[2025-07-08 00:20:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:20:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:20:19] [Rank 0] PRINT: step:7000/10000 train_loss:0.8726 val_loss:0.8723 train_time:567538ms step_avg:81.08ms +[2025-07-08 00:20:19] [Rank 0] PRINT: step:7000/10000 train_loss:0.8726 val_loss:0.8723 train_time:567538ms step_avg:81.08ms +[2025-07-08 00:20:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:20:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:20:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:20:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:20:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:20:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:25:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:25:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:25:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:25:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:25:43] [Rank 0] Total Loss: 5.5868 +[2025-07-08 00:25:43] [Rank 0] Total Loss: 5.5868 +[2025-07-08 00:25:43] [Rank 0] Total FTA: 0.9041 +[2025-07-08 00:25:43] [Rank 0] Total FTA: 0.9041 +[2025-07-08 00:25:43] [Rank 0] Group 0 Loss: 5.7252 +[2025-07-08 00:25:43] [Rank 0] Group 0 Loss: 5.7252 +[2025-07-08 00:25:43] [Rank 0] Group 1 Loss: 5.8764 +[2025-07-08 00:25:43] [Rank 0] Group 1 Loss: 5.8764 +[2025-07-08 00:25:43] [Rank 0] Group 2 Loss: 5.3210 +[2025-07-08 00:25:43] [Rank 0] Group 2 Loss: 5.3210 +[2025-07-08 00:25:43] [Rank 0] Group 3 Loss: 5.4934 +[2025-07-08 00:25:43] [Rank 0] Group 3 Loss: 5.4934 +[2025-07-08 00:25:43] [Rank 0] Group 4 Loss: 5.6266 +[2025-07-08 00:25:43] [Rank 0] Group 4 Loss: 5.6266 +[2025-07-08 00:25:43] [Rank 0] Group 5 Loss: 5.5064 +[2025-07-08 00:25:43] [Rank 0] Group 5 Loss: 5.5064 +[2025-07-08 00:25:43] [Rank 0] Group 6 Loss: 5.4515 +[2025-07-08 00:25:43] [Rank 0] Group 6 Loss: 5.4515 +[2025-07-08 00:25:43] [Rank 0] Group 7 Loss: 5.5987 +[2025-07-08 00:25:43] [Rank 0] Group 7 Loss: 5.5987 +[2025-07-08 00:25:43] [Rank 0] Group 8 Loss: 5.5876 +[2025-07-08 00:25:43] [Rank 0] Group 8 Loss: 5.5876 +[2025-07-08 00:25:43] [Rank 0] Group 9 Loss: 5.5167 +[2025-07-08 00:25:43] [Rank 0] Group 9 Loss: 5.5167 +[2025-07-08 00:25:43] [Rank 0] Group 10 Loss: 5.5785 +[2025-07-08 00:25:43] [Rank 0] Group 10 Loss: 5.5785 +[2025-07-08 00:25:43] [Rank 0] Group 11 Loss: 5.5921 +[2025-07-08 00:25:43] [Rank 0] Group 11 Loss: 5.5921 +[2025-07-08 00:25:43] [Rank 0] Group 0 FTA: 0.6814 +[2025-07-08 00:25:43] [Rank 0] Group 0 FTA: 0.6814 +[2025-07-08 00:25:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 00:25:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 00:25:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 00:25:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 00:25:43] [Rank 0] Group 3 FTA: 0.9635 +[2025-07-08 00:25:43] [Rank 0] Group 3 FTA: 0.9635 +[2025-07-08 00:25:43] [Rank 0] Group 4 FTA: 0.9453 +[2025-07-08 00:25:43] [Rank 0] Group 4 FTA: 0.9453 +[2025-07-08 00:25:43] [Rank 0] Group 5 FTA: 0.8984 +[2025-07-08 00:25:43] [Rank 0] Group 5 FTA: 0.8984 +[2025-07-08 00:25:43] [Rank 0] Group 6 FTA: 0.9193 +[2025-07-08 00:25:43] [Rank 0] Group 6 FTA: 0.9193 +[2025-07-08 00:25:43] [Rank 0] Group 7 FTA: 0.9141 +[2025-07-08 00:25:43] [Rank 0] Group 7 FTA: 0.9141 +[2025-07-08 00:25:43] [Rank 0] Group 8 FTA: 0.9219 +[2025-07-08 00:25:43] [Rank 0] Group 8 FTA: 0.9219 +[2025-07-08 00:25:43] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-08 00:25:43] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-08 00:25:43] [Rank 0] Group 10 FTA: 0.9297 +[2025-07-08 00:25:43] [Rank 0] Group 10 FTA: 0.9297 +[2025-07-08 00:25:43] [Rank 0] Group 11 FTA: 0.9258 +[2025-07-08 00:25:43] [Rank 0] Group 11 FTA: 0.9258 +[2025-07-08 00:25:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-08 00:25:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-08 00:25:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-08 00:25:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-08 00:25:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-08 00:25:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-08 00:25:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-08 00:25:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-08 00:25:45] [Rank 0] step:7001/10000 train_time:567558ms step_avg:81.07ms +[2025-07-08 00:25:45] [Rank 0] step:7001/10000 train_time:567558ms step_avg:81.07ms +[2025-07-08 00:25:46] [Rank 0] step:7021/10000 train_time:569061ms step_avg:81.05ms +[2025-07-08 00:25:46] [Rank 0] step:7021/10000 train_time:569061ms step_avg:81.05ms +[2025-07-08 00:25:48] [Rank 0] step:7041/10000 train_time:570791ms step_avg:81.07ms +[2025-07-08 00:25:48] [Rank 0] step:7041/10000 train_time:570791ms step_avg:81.07ms +[2025-07-08 00:25:49] [Rank 0] step:7061/10000 train_time:572283ms step_avg:81.05ms +[2025-07-08 00:25:49] [Rank 0] step:7061/10000 train_time:572283ms step_avg:81.05ms +[2025-07-08 00:25:51] [Rank 0] step:7081/10000 train_time:573779ms step_avg:81.03ms +[2025-07-08 00:25:51] [Rank 0] step:7081/10000 train_time:573779ms step_avg:81.03ms +[2025-07-08 00:25:52] [Rank 0] step:7101/10000 train_time:575275ms step_avg:81.01ms +[2025-07-08 00:25:52] [Rank 0] step:7101/10000 train_time:575275ms step_avg:81.01ms +[2025-07-08 00:25:54] [Rank 0] step:7121/10000 train_time:577012ms step_avg:81.03ms +[2025-07-08 00:25:54] [Rank 0] step:7121/10000 train_time:577012ms step_avg:81.03ms +[2025-07-08 00:25:56] [Rank 0] step:7141/10000 train_time:578604ms step_avg:81.03ms +[2025-07-08 00:25:56] [Rank 0] step:7141/10000 train_time:578604ms step_avg:81.03ms +[2025-07-08 00:25:57] [Rank 0] step:7161/10000 train_time:580198ms step_avg:81.02ms +[2025-07-08 00:25:57] [Rank 0] step:7161/10000 train_time:580198ms step_avg:81.02ms +[2025-07-08 00:25:59] [Rank 0] step:7181/10000 train_time:581693ms step_avg:81.00ms +[2025-07-08 00:25:59] [Rank 0] step:7181/10000 train_time:581693ms step_avg:81.00ms +[2025-07-08 00:26:01] [Rank 0] step:7201/10000 train_time:583193ms step_avg:80.99ms +[2025-07-08 00:26:01] [Rank 0] step:7201/10000 train_time:583193ms step_avg:80.99ms +[2025-07-08 00:26:02] [Rank 0] step:7221/10000 train_time:585354ms step_avg:81.06ms +[2025-07-08 00:26:02] [Rank 0] step:7221/10000 train_time:585354ms step_avg:81.06ms +[2025-07-08 00:26:04] [Rank 0] step:7241/10000 train_time:586853ms step_avg:81.05ms +[2025-07-08 00:26:04] [Rank 0] step:7241/10000 train_time:586853ms step_avg:81.05ms +[2025-07-08 00:26:06] [Rank 0] step:7261/10000 train_time:588355ms step_avg:81.03ms +[2025-07-08 00:26:06] [Rank 0] step:7261/10000 train_time:588355ms step_avg:81.03ms +[2025-07-08 00:26:07] [Rank 0] step:7281/10000 train_time:589854ms step_avg:81.01ms +[2025-07-08 00:26:07] [Rank 0] step:7281/10000 train_time:589854ms step_avg:81.01ms +[2025-07-08 00:26:09] [Rank 0] step:7301/10000 train_time:591994ms step_avg:81.08ms +[2025-07-08 00:26:09] [Rank 0] step:7301/10000 train_time:591994ms step_avg:81.08ms +[2025-07-08 00:26:11] [Rank 0] step:7321/10000 train_time:593495ms step_avg:81.07ms +[2025-07-08 00:26:11] [Rank 0] step:7321/10000 train_time:593495ms step_avg:81.07ms +[2025-07-08 00:26:12] [Rank 0] step:7341/10000 train_time:594994ms step_avg:81.05ms +[2025-07-08 00:26:12] [Rank 0] step:7341/10000 train_time:594994ms step_avg:81.05ms +[2025-07-08 00:26:14] [Rank 0] step:7361/10000 train_time:596494ms step_avg:81.03ms +[2025-07-08 00:26:14] [Rank 0] step:7361/10000 train_time:596494ms step_avg:81.03ms +[2025-07-08 00:26:16] [Rank 0] step:7381/10000 train_time:598659ms step_avg:81.11ms +[2025-07-08 00:26:16] [Rank 0] step:7381/10000 train_time:598659ms step_avg:81.11ms +[2025-07-08 00:26:17] [Rank 0] step:7401/10000 train_time:600140ms step_avg:81.09ms +[2025-07-08 00:26:17] [Rank 0] step:7401/10000 train_time:600140ms step_avg:81.09ms +[2025-07-08 00:26:19] [Rank 0] step:7421/10000 train_time:601640ms step_avg:81.07ms +[2025-07-08 00:26:19] [Rank 0] step:7421/10000 train_time:601640ms step_avg:81.07ms +[2025-07-08 00:26:20] [Rank 0] step:7441/10000 train_time:603141ms step_avg:81.06ms +[2025-07-08 00:26:20] [Rank 0] step:7441/10000 train_time:603141ms step_avg:81.06ms +[2025-07-08 00:26:22] [Rank 0] step:7461/10000 train_time:604644ms step_avg:81.04ms +[2025-07-08 00:26:22] [Rank 0] step:7461/10000 train_time:604644ms step_avg:81.04ms +[2025-07-08 00:26:24] [Rank 0] step:7481/10000 train_time:606811ms step_avg:81.11ms +[2025-07-08 00:26:24] [Rank 0] step:7481/10000 train_time:606811ms step_avg:81.11ms +[2025-07-08 00:26:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:26:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:26:26] [Rank 0] PRINT: step:7500/10000 train_loss:0.8706 val_loss:0.8705 train_time:608313ms step_avg:81.11ms +[2025-07-08 00:26:26] [Rank 0] PRINT: step:7500/10000 train_loss:0.8706 val_loss:0.8705 train_time:608313ms step_avg:81.11ms +[2025-07-08 00:26:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:26:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:26:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:26:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:26:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:26:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:31:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:31:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:31:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:31:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:31:50] [Rank 0] Total Loss: 5.6756 +[2025-07-08 00:31:50] [Rank 0] Total Loss: 5.6756 +[2025-07-08 00:31:50] [Rank 0] Total FTA: 0.8855 +[2025-07-08 00:31:50] [Rank 0] Total FTA: 0.8855 +[2025-07-08 00:31:50] [Rank 0] Group 0 Loss: 5.8649 +[2025-07-08 00:31:50] [Rank 0] Group 0 Loss: 5.8649 +[2025-07-08 00:31:50] [Rank 0] Group 1 Loss: 6.0234 +[2025-07-08 00:31:50] [Rank 0] Group 1 Loss: 6.0234 +[2025-07-08 00:31:50] [Rank 0] Group 2 Loss: 5.3050 +[2025-07-08 00:31:50] [Rank 0] Group 2 Loss: 5.3050 +[2025-07-08 00:31:50] [Rank 0] Group 3 Loss: 5.6880 +[2025-07-08 00:31:50] [Rank 0] Group 3 Loss: 5.6880 +[2025-07-08 00:31:50] [Rank 0] Group 4 Loss: 5.7695 +[2025-07-08 00:31:50] [Rank 0] Group 4 Loss: 5.7695 +[2025-07-08 00:31:50] [Rank 0] Group 5 Loss: 5.5753 +[2025-07-08 00:31:50] [Rank 0] Group 5 Loss: 5.5753 +[2025-07-08 00:31:50] [Rank 0] Group 6 Loss: 5.5061 +[2025-07-08 00:31:50] [Rank 0] Group 6 Loss: 5.5061 +[2025-07-08 00:31:50] [Rank 0] Group 7 Loss: 5.7077 +[2025-07-08 00:31:50] [Rank 0] Group 7 Loss: 5.7077 +[2025-07-08 00:31:50] [Rank 0] Group 8 Loss: 5.6666 +[2025-07-08 00:31:50] [Rank 0] Group 8 Loss: 5.6666 +[2025-07-08 00:31:50] [Rank 0] Group 9 Loss: 5.5464 +[2025-07-08 00:31:50] [Rank 0] Group 9 Loss: 5.5464 +[2025-07-08 00:31:50] [Rank 0] Group 10 Loss: 5.6770 +[2025-07-08 00:31:50] [Rank 0] Group 10 Loss: 5.6770 +[2025-07-08 00:31:50] [Rank 0] Group 11 Loss: 5.6264 +[2025-07-08 00:31:50] [Rank 0] Group 11 Loss: 5.6264 +[2025-07-08 00:31:50] [Rank 0] Group 0 FTA: 0.6489 +[2025-07-08 00:31:50] [Rank 0] Group 0 FTA: 0.6489 +[2025-07-08 00:31:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 00:31:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 00:31:50] [Rank 0] Group 2 FTA: 0.8307 +[2025-07-08 00:31:50] [Rank 0] Group 2 FTA: 0.8307 +[2025-07-08 00:31:50] [Rank 0] Group 3 FTA: 0.9635 +[2025-07-08 00:31:50] [Rank 0] Group 3 FTA: 0.9635 +[2025-07-08 00:31:50] [Rank 0] Group 4 FTA: 0.8698 +[2025-07-08 00:31:50] [Rank 0] Group 4 FTA: 0.8698 +[2025-07-08 00:31:50] [Rank 0] Group 5 FTA: 0.9193 +[2025-07-08 00:31:50] [Rank 0] Group 5 FTA: 0.9193 +[2025-07-08 00:31:50] [Rank 0] Group 6 FTA: 0.9219 +[2025-07-08 00:31:50] [Rank 0] Group 6 FTA: 0.9219 +[2025-07-08 00:31:50] [Rank 0] Group 7 FTA: 0.9297 +[2025-07-08 00:31:50] [Rank 0] Group 7 FTA: 0.9297 +[2025-07-08 00:31:50] [Rank 0] Group 8 FTA: 0.9479 +[2025-07-08 00:31:50] [Rank 0] Group 8 FTA: 0.9479 +[2025-07-08 00:31:50] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-08 00:31:50] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-08 00:31:50] [Rank 0] Group 10 FTA: 0.9102 +[2025-07-08 00:31:50] [Rank 0] Group 10 FTA: 0.9102 +[2025-07-08 00:31:50] [Rank 0] Group 11 FTA: 0.9248 +[2025-07-08 00:31:50] [Rank 0] Group 11 FTA: 0.9248 +[2025-07-08 00:31:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-08 00:31:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-08 00:31:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-08 00:31:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-08 00:31:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-08 00:31:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-08 00:31:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-08 00:31:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-08 00:31:51] [Rank 0] step:7501/10000 train_time:608333ms step_avg:81.10ms +[2025-07-08 00:31:51] [Rank 0] step:7501/10000 train_time:608333ms step_avg:81.10ms +[2025-07-08 00:31:53] [Rank 0] step:7521/10000 train_time:609835ms step_avg:81.08ms +[2025-07-08 00:31:53] [Rank 0] step:7521/10000 train_time:609835ms step_avg:81.08ms +[2025-07-08 00:31:54] [Rank 0] step:7541/10000 train_time:611329ms step_avg:81.07ms +[2025-07-08 00:31:54] [Rank 0] step:7541/10000 train_time:611329ms step_avg:81.07ms +[2025-07-08 00:31:57] [Rank 0] step:7561/10000 train_time:612825ms step_avg:81.05ms +[2025-07-08 00:31:57] [Rank 0] step:7561/10000 train_time:612825ms step_avg:81.05ms +[2025-07-08 00:31:58] [Rank 0] step:7581/10000 train_time:614985ms step_avg:81.12ms +[2025-07-08 00:31:58] [Rank 0] step:7581/10000 train_time:614985ms step_avg:81.12ms +[2025-07-08 00:32:00] [Rank 0] step:7601/10000 train_time:616478ms step_avg:81.10ms +[2025-07-08 00:32:00] [Rank 0] step:7601/10000 train_time:616478ms step_avg:81.10ms +[2025-07-08 00:32:01] [Rank 0] step:7621/10000 train_time:617975ms step_avg:81.09ms +[2025-07-08 00:32:01] [Rank 0] step:7621/10000 train_time:617975ms step_avg:81.09ms +[2025-07-08 00:32:03] [Rank 0] step:7641/10000 train_time:619472ms step_avg:81.07ms +[2025-07-08 00:32:03] [Rank 0] step:7641/10000 train_time:619472ms step_avg:81.07ms +[2025-07-08 00:32:04] [Rank 0] step:7661/10000 train_time:621206ms step_avg:81.09ms +[2025-07-08 00:32:04] [Rank 0] step:7661/10000 train_time:621206ms step_avg:81.09ms +[2025-07-08 00:32:06] [Rank 0] step:7681/10000 train_time:622703ms step_avg:81.07ms +[2025-07-08 00:32:06] [Rank 0] step:7681/10000 train_time:622703ms step_avg:81.07ms +[2025-07-08 00:32:07] [Rank 0] step:7701/10000 train_time:624200ms step_avg:81.05ms +[2025-07-08 00:32:07] [Rank 0] step:7701/10000 train_time:624200ms step_avg:81.05ms +[2025-07-08 00:32:09] [Rank 0] step:7721/10000 train_time:625698ms step_avg:81.04ms +[2025-07-08 00:32:09] [Rank 0] step:7721/10000 train_time:625698ms step_avg:81.04ms +[2025-07-08 00:32:11] [Rank 0] step:7741/10000 train_time:627247ms step_avg:81.03ms +[2025-07-08 00:32:11] [Rank 0] step:7741/10000 train_time:627247ms step_avg:81.03ms +[2025-07-08 00:32:12] [Rank 0] step:7761/10000 train_time:629359ms step_avg:81.09ms +[2025-07-08 00:32:12] [Rank 0] step:7761/10000 train_time:629359ms step_avg:81.09ms +[2025-07-08 00:32:14] [Rank 0] step:7781/10000 train_time:630855ms step_avg:81.08ms +[2025-07-08 00:32:14] [Rank 0] step:7781/10000 train_time:630855ms step_avg:81.08ms +[2025-07-08 00:32:16] [Rank 0] step:7801/10000 train_time:632559ms step_avg:81.09ms +[2025-07-08 00:32:16] [Rank 0] step:7801/10000 train_time:632559ms step_avg:81.09ms +[2025-07-08 00:32:17] [Rank 0] step:7821/10000 train_time:634101ms step_avg:81.08ms +[2025-07-08 00:32:17] [Rank 0] step:7821/10000 train_time:634101ms step_avg:81.08ms +[2025-07-08 00:32:19] [Rank 0] step:7841/10000 train_time:635836ms step_avg:81.09ms +[2025-07-08 00:32:19] [Rank 0] step:7841/10000 train_time:635836ms step_avg:81.09ms +[2025-07-08 00:32:20] [Rank 0] step:7861/10000 train_time:637335ms step_avg:81.08ms +[2025-07-08 00:32:20] [Rank 0] step:7861/10000 train_time:637335ms step_avg:81.08ms +[2025-07-08 00:32:22] [Rank 0] step:7881/10000 train_time:638834ms step_avg:81.06ms +[2025-07-08 00:32:22] [Rank 0] step:7881/10000 train_time:638834ms step_avg:81.06ms +[2025-07-08 00:32:23] [Rank 0] step:7901/10000 train_time:640334ms step_avg:81.04ms +[2025-07-08 00:32:23] [Rank 0] step:7901/10000 train_time:640334ms step_avg:81.04ms +[2025-07-08 00:32:26] [Rank 0] step:7921/10000 train_time:642501ms step_avg:81.11ms +[2025-07-08 00:32:26] [Rank 0] step:7921/10000 train_time:642501ms step_avg:81.11ms +[2025-07-08 00:32:27] [Rank 0] step:7941/10000 train_time:643983ms step_avg:81.10ms +[2025-07-08 00:32:27] [Rank 0] step:7941/10000 train_time:643983ms step_avg:81.10ms +[2025-07-08 00:32:29] [Rank 0] step:7961/10000 train_time:645484ms step_avg:81.08ms +[2025-07-08 00:32:29] [Rank 0] step:7961/10000 train_time:645484ms step_avg:81.08ms +[2025-07-08 00:32:30] [Rank 0] step:7981/10000 train_time:646983ms step_avg:81.07ms +[2025-07-08 00:32:30] [Rank 0] step:7981/10000 train_time:646983ms step_avg:81.07ms +[2025-07-08 00:32:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:32:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:32:32] [Rank 0] PRINT: step:8000/10000 train_loss:0.8688 val_loss:0.8694 train_time:648483ms step_avg:81.06ms +[2025-07-08 00:32:32] [Rank 0] PRINT: step:8000/10000 train_loss:0.8688 val_loss:0.8694 train_time:648483ms step_avg:81.06ms +[2025-07-08 00:32:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:32:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:32:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:32:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:32:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:32:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:37:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:37:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:37:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:37:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:37:57] [Rank 0] Total Loss: 5.6281 +[2025-07-08 00:37:57] [Rank 0] Total Loss: 5.6281 +[2025-07-08 00:37:57] [Rank 0] Total FTA: 0.9384 +[2025-07-08 00:37:57] [Rank 0] Total FTA: 0.9384 +[2025-07-08 00:37:57] [Rank 0] Group 0 Loss: 5.7150 +[2025-07-08 00:37:57] [Rank 0] Group 0 Loss: 5.7150 +[2025-07-08 00:37:57] [Rank 0] Group 1 Loss: 5.9344 +[2025-07-08 00:37:57] [Rank 0] Group 1 Loss: 5.9344 +[2025-07-08 00:37:57] [Rank 0] Group 2 Loss: 5.3595 +[2025-07-08 00:37:57] [Rank 0] Group 2 Loss: 5.3595 +[2025-07-08 00:37:57] [Rank 0] Group 3 Loss: 5.6500 +[2025-07-08 00:37:57] [Rank 0] Group 3 Loss: 5.6500 +[2025-07-08 00:37:57] [Rank 0] Group 4 Loss: 5.6667 +[2025-07-08 00:37:57] [Rank 0] Group 4 Loss: 5.6667 +[2025-07-08 00:37:57] [Rank 0] Group 5 Loss: 5.4756 +[2025-07-08 00:37:57] [Rank 0] Group 5 Loss: 5.4756 +[2025-07-08 00:37:57] [Rank 0] Group 6 Loss: 5.5043 +[2025-07-08 00:37:57] [Rank 0] Group 6 Loss: 5.5043 +[2025-07-08 00:37:57] [Rank 0] Group 7 Loss: 5.6743 +[2025-07-08 00:37:57] [Rank 0] Group 7 Loss: 5.6743 +[2025-07-08 00:37:57] [Rank 0] Group 8 Loss: 5.6230 +[2025-07-08 00:37:57] [Rank 0] Group 8 Loss: 5.6230 +[2025-07-08 00:37:57] [Rank 0] Group 9 Loss: 5.5764 +[2025-07-08 00:37:57] [Rank 0] Group 9 Loss: 5.5764 +[2025-07-08 00:37:57] [Rank 0] Group 10 Loss: 5.6104 +[2025-07-08 00:37:57] [Rank 0] Group 10 Loss: 5.6104 +[2025-07-08 00:37:57] [Rank 0] Group 11 Loss: 5.6358 +[2025-07-08 00:37:57] [Rank 0] Group 11 Loss: 5.6358 +[2025-07-08 00:37:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 00:37:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 00:37:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 00:37:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 00:37:57] [Rank 0] Group 2 FTA: 0.9219 +[2025-07-08 00:37:57] [Rank 0] Group 2 FTA: 0.9219 +[2025-07-08 00:37:57] [Rank 0] Group 3 FTA: 0.8646 +[2025-07-08 00:37:57] [Rank 0] Group 3 FTA: 0.8646 +[2025-07-08 00:37:57] [Rank 0] Group 4 FTA: 0.9427 +[2025-07-08 00:37:57] [Rank 0] Group 4 FTA: 0.9427 +[2025-07-08 00:37:57] [Rank 0] Group 5 FTA: 0.9245 +[2025-07-08 00:37:57] [Rank 0] Group 5 FTA: 0.9245 +[2025-07-08 00:37:57] [Rank 0] Group 6 FTA: 0.9453 +[2025-07-08 00:37:57] [Rank 0] Group 6 FTA: 0.9453 +[2025-07-08 00:37:57] [Rank 0] Group 7 FTA: 0.9062 +[2025-07-08 00:37:57] [Rank 0] Group 7 FTA: 0.9062 +[2025-07-08 00:37:57] [Rank 0] Group 8 FTA: 0.9219 +[2025-07-08 00:37:57] [Rank 0] Group 8 FTA: 0.9219 +[2025-07-08 00:37:57] [Rank 0] Group 9 FTA: 0.9219 +[2025-07-08 00:37:57] [Rank 0] Group 9 FTA: 0.9219 +[2025-07-08 00:37:57] [Rank 0] Group 10 FTA: 0.9238 +[2025-07-08 00:37:57] [Rank 0] Group 10 FTA: 0.9238 +[2025-07-08 00:37:57] [Rank 0] Group 11 FTA: 0.9336 +[2025-07-08 00:37:57] [Rank 0] Group 11 FTA: 0.9336 +[2025-07-08 00:37:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-08 00:37:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-08 00:37:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-08 00:37:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-08 00:37:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-08 00:37:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-08 00:37:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-08 00:37:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-08 00:37:58] [Rank 0] step:8001/10000 train_time:648502ms step_avg:81.05ms +[2025-07-08 00:37:58] [Rank 0] step:8001/10000 train_time:648502ms step_avg:81.05ms +[2025-07-08 00:38:00] [Rank 0] step:8021/10000 train_time:650666ms step_avg:81.12ms +[2025-07-08 00:38:00] [Rank 0] step:8021/10000 train_time:650666ms step_avg:81.12ms +[2025-07-08 00:38:02] [Rank 0] step:8041/10000 train_time:652157ms step_avg:81.10ms +[2025-07-08 00:38:02] [Rank 0] step:8041/10000 train_time:652157ms step_avg:81.10ms +[2025-07-08 00:38:03] [Rank 0] step:8061/10000 train_time:653654ms step_avg:81.09ms +[2025-07-08 00:38:03] [Rank 0] step:8061/10000 train_time:653654ms step_avg:81.09ms +[2025-07-08 00:38:05] [Rank 0] step:8081/10000 train_time:655150ms step_avg:81.07ms +[2025-07-08 00:38:05] [Rank 0] step:8081/10000 train_time:655150ms step_avg:81.07ms +[2025-07-08 00:38:07] [Rank 0] step:8101/10000 train_time:656901ms step_avg:81.09ms +[2025-07-08 00:38:07] [Rank 0] step:8101/10000 train_time:656901ms step_avg:81.09ms +[2025-07-08 00:38:08] [Rank 0] step:8121/10000 train_time:658808ms step_avg:81.12ms +[2025-07-08 00:38:08] [Rank 0] step:8121/10000 train_time:658808ms step_avg:81.12ms +[2025-07-08 00:38:10] [Rank 0] step:8141/10000 train_time:660304ms step_avg:81.11ms +[2025-07-08 00:38:10] [Rank 0] step:8141/10000 train_time:660304ms step_avg:81.11ms +[2025-07-08 00:38:11] [Rank 0] step:8161/10000 train_time:661800ms step_avg:81.09ms +[2025-07-08 00:38:11] [Rank 0] step:8161/10000 train_time:661800ms step_avg:81.09ms +[2025-07-08 00:38:13] [Rank 0] step:8181/10000 train_time:663298ms step_avg:81.08ms +[2025-07-08 00:38:13] [Rank 0] step:8181/10000 train_time:663298ms step_avg:81.08ms +[2025-07-08 00:38:15] [Rank 0] step:8201/10000 train_time:665450ms step_avg:81.14ms +[2025-07-08 00:38:15] [Rank 0] step:8201/10000 train_time:665450ms step_avg:81.14ms +[2025-07-08 00:38:17] [Rank 0] step:8221/10000 train_time:666946ms step_avg:81.13ms +[2025-07-08 00:38:17] [Rank 0] step:8221/10000 train_time:666946ms step_avg:81.13ms +[2025-07-08 00:38:18] [Rank 0] step:8241/10000 train_time:668446ms step_avg:81.11ms +[2025-07-08 00:38:18] [Rank 0] step:8241/10000 train_time:668446ms step_avg:81.11ms +[2025-07-08 00:38:20] [Rank 0] step:8261/10000 train_time:669945ms step_avg:81.10ms +[2025-07-08 00:38:20] [Rank 0] step:8261/10000 train_time:669945ms step_avg:81.10ms +[2025-07-08 00:38:22] [Rank 0] step:8281/10000 train_time:671497ms step_avg:81.09ms +[2025-07-08 00:38:22] [Rank 0] step:8281/10000 train_time:671497ms step_avg:81.09ms +[2025-07-08 00:38:23] [Rank 0] step:8301/10000 train_time:673596ms step_avg:81.15ms +[2025-07-08 00:38:23] [Rank 0] step:8301/10000 train_time:673596ms step_avg:81.15ms +[2025-07-08 00:38:25] [Rank 0] step:8321/10000 train_time:675096ms step_avg:81.13ms +[2025-07-08 00:38:25] [Rank 0] step:8321/10000 train_time:675096ms step_avg:81.13ms +[2025-07-08 00:38:26] [Rank 0] step:8341/10000 train_time:676596ms step_avg:81.12ms +[2025-07-08 00:38:26] [Rank 0] step:8341/10000 train_time:676596ms step_avg:81.12ms +[2025-07-08 00:38:28] [Rank 0] step:8361/10000 train_time:678096ms step_avg:81.10ms +[2025-07-08 00:38:28] [Rank 0] step:8361/10000 train_time:678096ms step_avg:81.10ms +[2025-07-08 00:38:30] [Rank 0] step:8381/10000 train_time:680265ms step_avg:81.17ms +[2025-07-08 00:38:30] [Rank 0] step:8381/10000 train_time:680265ms step_avg:81.17ms +[2025-07-08 00:38:31] [Rank 0] step:8401/10000 train_time:681767ms step_avg:81.15ms +[2025-07-08 00:38:31] [Rank 0] step:8401/10000 train_time:681767ms step_avg:81.15ms +[2025-07-08 00:38:33] [Rank 0] step:8421/10000 train_time:683268ms step_avg:81.14ms +[2025-07-08 00:38:33] [Rank 0] step:8421/10000 train_time:683268ms step_avg:81.14ms +[2025-07-08 00:38:35] [Rank 0] step:8441/10000 train_time:685010ms step_avg:81.15ms +[2025-07-08 00:38:35] [Rank 0] step:8441/10000 train_time:685010ms step_avg:81.15ms +[2025-07-08 00:38:37] [Rank 0] step:8461/10000 train_time:686821ms step_avg:81.17ms +[2025-07-08 00:38:37] [Rank 0] step:8461/10000 train_time:686821ms step_avg:81.17ms +[2025-07-08 00:38:38] [Rank 0] step:8481/10000 train_time:688733ms step_avg:81.21ms +[2025-07-08 00:38:38] [Rank 0] step:8481/10000 train_time:688733ms step_avg:81.21ms +[2025-07-08 00:38:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:38:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:38:41] [Rank 0] PRINT: step:8500/10000 train_loss:0.8673 val_loss:0.8684 train_time:690231ms step_avg:81.20ms +[2025-07-08 00:38:41] [Rank 0] PRINT: step:8500/10000 train_loss:0.8673 val_loss:0.8684 train_time:690231ms step_avg:81.20ms +[2025-07-08 00:38:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:38:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:38:41] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:38:41] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:38:41] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:38:41] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:44:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:44:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:44:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:44:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:44:08] [Rank 0] Total Loss: 5.6191 +[2025-07-08 00:44:08] [Rank 0] Total Loss: 5.6191 +[2025-07-08 00:44:08] [Rank 0] Total FTA: 0.9423 +[2025-07-08 00:44:08] [Rank 0] Total FTA: 0.9423 +[2025-07-08 00:44:08] [Rank 0] Group 0 Loss: 5.7275 +[2025-07-08 00:44:08] [Rank 0] Group 0 Loss: 5.7275 +[2025-07-08 00:44:08] [Rank 0] Group 1 Loss: 6.0436 +[2025-07-08 00:44:08] [Rank 0] Group 1 Loss: 6.0436 +[2025-07-08 00:44:08] [Rank 0] Group 2 Loss: 5.4819 +[2025-07-08 00:44:08] [Rank 0] Group 2 Loss: 5.4819 +[2025-07-08 00:44:08] [Rank 0] Group 3 Loss: 5.5671 +[2025-07-08 00:44:08] [Rank 0] Group 3 Loss: 5.5671 +[2025-07-08 00:44:08] [Rank 0] Group 4 Loss: 5.7100 +[2025-07-08 00:44:08] [Rank 0] Group 4 Loss: 5.7100 +[2025-07-08 00:44:08] [Rank 0] Group 5 Loss: 5.4241 +[2025-07-08 00:44:08] [Rank 0] Group 5 Loss: 5.4241 +[2025-07-08 00:44:08] [Rank 0] Group 6 Loss: 5.4545 +[2025-07-08 00:44:08] [Rank 0] Group 6 Loss: 5.4545 +[2025-07-08 00:44:08] [Rank 0] Group 7 Loss: 5.6298 +[2025-07-08 00:44:08] [Rank 0] Group 7 Loss: 5.6298 +[2025-07-08 00:44:08] [Rank 0] Group 8 Loss: 5.5201 +[2025-07-08 00:44:08] [Rank 0] Group 8 Loss: 5.5201 +[2025-07-08 00:44:08] [Rank 0] Group 9 Loss: 5.5171 +[2025-07-08 00:44:08] [Rank 0] Group 9 Loss: 5.5171 +[2025-07-08 00:44:08] [Rank 0] Group 10 Loss: 5.5769 +[2025-07-08 00:44:08] [Rank 0] Group 10 Loss: 5.5769 +[2025-07-08 00:44:08] [Rank 0] Group 11 Loss: 5.6297 +[2025-07-08 00:44:08] [Rank 0] Group 11 Loss: 5.6297 +[2025-07-08 00:44:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 00:44:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 00:44:08] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 00:44:08] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 00:44:08] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 00:44:08] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 00:44:08] [Rank 0] Group 3 FTA: 0.9167 +[2025-07-08 00:44:08] [Rank 0] Group 3 FTA: 0.9167 +[2025-07-08 00:44:08] [Rank 0] Group 4 FTA: 0.9193 +[2025-07-08 00:44:08] [Rank 0] Group 4 FTA: 0.9193 +[2025-07-08 00:44:08] [Rank 0] Group 5 FTA: 0.9062 +[2025-07-08 00:44:08] [Rank 0] Group 5 FTA: 0.9062 +[2025-07-08 00:44:08] [Rank 0] Group 6 FTA: 0.9401 +[2025-07-08 00:44:08] [Rank 0] Group 6 FTA: 0.9401 +[2025-07-08 00:44:08] [Rank 0] Group 7 FTA: 0.9089 +[2025-07-08 00:44:08] [Rank 0] Group 7 FTA: 0.9089 +[2025-07-08 00:44:08] [Rank 0] Group 8 FTA: 0.9141 +[2025-07-08 00:44:08] [Rank 0] Group 8 FTA: 0.9141 +[2025-07-08 00:44:08] [Rank 0] Group 9 FTA: 0.9297 +[2025-07-08 00:44:08] [Rank 0] Group 9 FTA: 0.9297 +[2025-07-08 00:44:08] [Rank 0] Group 10 FTA: 0.9199 +[2025-07-08 00:44:08] [Rank 0] Group 10 FTA: 0.9199 +[2025-07-08 00:44:08] [Rank 0] Group 11 FTA: 0.9258 +[2025-07-08 00:44:08] [Rank 0] Group 11 FTA: 0.9258 +[2025-07-08 00:44:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-08 00:44:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-08 00:44:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-08 00:44:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-08 00:44:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-08 00:44:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-08 00:44:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-08 00:44:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-08 00:44:10] [Rank 0] step:8501/10000 train_time:690251ms step_avg:81.20ms +[2025-07-08 00:44:10] [Rank 0] step:8501/10000 train_time:690251ms step_avg:81.20ms +[2025-07-08 00:44:11] [Rank 0] step:8521/10000 train_time:691761ms step_avg:81.18ms +[2025-07-08 00:44:11] [Rank 0] step:8521/10000 train_time:691761ms step_avg:81.18ms +[2025-07-08 00:44:13] [Rank 0] step:8541/10000 train_time:693256ms step_avg:81.17ms +[2025-07-08 00:44:13] [Rank 0] step:8541/10000 train_time:693256ms step_avg:81.17ms +[2025-07-08 00:44:15] [Rank 0] step:8561/10000 train_time:695401ms step_avg:81.23ms +[2025-07-08 00:44:15] [Rank 0] step:8561/10000 train_time:695401ms step_avg:81.23ms +[2025-07-08 00:44:16] [Rank 0] step:8581/10000 train_time:696898ms step_avg:81.21ms +[2025-07-08 00:44:16] [Rank 0] step:8581/10000 train_time:696898ms step_avg:81.21ms +[2025-07-08 00:44:18] [Rank 0] step:8601/10000 train_time:698394ms step_avg:81.20ms +[2025-07-08 00:44:18] [Rank 0] step:8601/10000 train_time:698394ms step_avg:81.20ms +[2025-07-08 00:44:19] [Rank 0] step:8621/10000 train_time:699889ms step_avg:81.18ms +[2025-07-08 00:44:19] [Rank 0] step:8621/10000 train_time:699889ms step_avg:81.18ms +[2025-07-08 00:44:21] [Rank 0] step:8641/10000 train_time:701386ms step_avg:81.17ms +[2025-07-08 00:44:21] [Rank 0] step:8641/10000 train_time:701386ms step_avg:81.17ms +[2025-07-08 00:44:23] [Rank 0] step:8661/10000 train_time:703548ms step_avg:81.23ms +[2025-07-08 00:44:23] [Rank 0] step:8661/10000 train_time:703548ms step_avg:81.23ms +[2025-07-08 00:44:24] [Rank 0] step:8681/10000 train_time:705045ms step_avg:81.22ms +[2025-07-08 00:44:24] [Rank 0] step:8681/10000 train_time:705045ms step_avg:81.22ms +[2025-07-08 00:44:26] [Rank 0] step:8701/10000 train_time:706543ms step_avg:81.20ms +[2025-07-08 00:44:26] [Rank 0] step:8701/10000 train_time:706543ms step_avg:81.20ms +[2025-07-08 00:44:27] [Rank 0] step:8721/10000 train_time:708041ms step_avg:81.19ms +[2025-07-08 00:44:27] [Rank 0] step:8721/10000 train_time:708041ms step_avg:81.19ms +[2025-07-08 00:44:29] [Rank 0] step:8741/10000 train_time:709778ms step_avg:81.20ms +[2025-07-08 00:44:29] [Rank 0] step:8741/10000 train_time:709778ms step_avg:81.20ms +[2025-07-08 00:44:31] [Rank 0] step:8761/10000 train_time:711277ms step_avg:81.19ms +[2025-07-08 00:44:31] [Rank 0] step:8761/10000 train_time:711277ms step_avg:81.19ms +[2025-07-08 00:44:32] [Rank 0] step:8781/10000 train_time:712776ms step_avg:81.17ms +[2025-07-08 00:44:32] [Rank 0] step:8781/10000 train_time:712776ms step_avg:81.17ms +[2025-07-08 00:44:34] [Rank 0] step:8801/10000 train_time:714277ms step_avg:81.16ms +[2025-07-08 00:44:34] [Rank 0] step:8801/10000 train_time:714277ms step_avg:81.16ms +[2025-07-08 00:44:35] [Rank 0] step:8821/10000 train_time:715828ms step_avg:81.15ms +[2025-07-08 00:44:35] [Rank 0] step:8821/10000 train_time:715828ms step_avg:81.15ms +[2025-07-08 00:44:37] [Rank 0] step:8841/10000 train_time:717516ms step_avg:81.16ms +[2025-07-08 00:44:37] [Rank 0] step:8841/10000 train_time:717516ms step_avg:81.16ms +[2025-07-08 00:44:38] [Rank 0] step:8861/10000 train_time:719017ms step_avg:81.14ms +[2025-07-08 00:44:38] [Rank 0] step:8861/10000 train_time:719017ms step_avg:81.14ms +[2025-07-08 00:44:40] [Rank 0] step:8881/10000 train_time:720518ms step_avg:81.13ms +[2025-07-08 00:44:40] [Rank 0] step:8881/10000 train_time:720518ms step_avg:81.13ms +[2025-07-08 00:44:41] [Rank 0] step:8901/10000 train_time:722020ms step_avg:81.12ms +[2025-07-08 00:44:41] [Rank 0] step:8901/10000 train_time:722020ms step_avg:81.12ms +[2025-07-08 00:44:43] [Rank 0] step:8921/10000 train_time:724171ms step_avg:81.18ms +[2025-07-08 00:44:43] [Rank 0] step:8921/10000 train_time:724171ms step_avg:81.18ms +[2025-07-08 00:44:45] [Rank 0] step:8941/10000 train_time:725673ms step_avg:81.16ms +[2025-07-08 00:44:45] [Rank 0] step:8941/10000 train_time:725673ms step_avg:81.16ms +[2025-07-08 00:44:46] [Rank 0] step:8961/10000 train_time:727173ms step_avg:81.15ms +[2025-07-08 00:44:46] [Rank 0] step:8961/10000 train_time:727173ms step_avg:81.15ms +[2025-07-08 00:44:48] [Rank 0] step:8981/10000 train_time:728674ms step_avg:81.14ms +[2025-07-08 00:44:48] [Rank 0] step:8981/10000 train_time:728674ms step_avg:81.14ms +[2025-07-08 00:44:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:44:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:44:50] [Rank 0] PRINT: step:9000/10000 train_loss:0.8661 val_loss:0.8678 train_time:730174ms step_avg:81.13ms +[2025-07-08 00:44:50] [Rank 0] PRINT: step:9000/10000 train_loss:0.8661 val_loss:0.8678 train_time:730174ms step_avg:81.13ms +[2025-07-08 00:44:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:44:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:44:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:44:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:44:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:44:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:50:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:50:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:50:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:50:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:50:15] [Rank 0] Total Loss: 5.7995 +[2025-07-08 00:50:15] [Rank 0] Total Loss: 5.7995 +[2025-07-08 00:50:15] [Rank 0] Total FTA: 0.9309 +[2025-07-08 00:50:15] [Rank 0] Total FTA: 0.9309 +[2025-07-08 00:50:15] [Rank 0] Group 0 Loss: 5.9211 +[2025-07-08 00:50:15] [Rank 0] Group 0 Loss: 5.9211 +[2025-07-08 00:50:15] [Rank 0] Group 1 Loss: 6.1953 +[2025-07-08 00:50:15] [Rank 0] Group 1 Loss: 6.1953 +[2025-07-08 00:50:15] [Rank 0] Group 2 Loss: 5.6730 +[2025-07-08 00:50:15] [Rank 0] Group 2 Loss: 5.6730 +[2025-07-08 00:50:15] [Rank 0] Group 3 Loss: 5.6352 +[2025-07-08 00:50:15] [Rank 0] Group 3 Loss: 5.6352 +[2025-07-08 00:50:15] [Rank 0] Group 4 Loss: 5.8603 +[2025-07-08 00:50:15] [Rank 0] Group 4 Loss: 5.8603 +[2025-07-08 00:50:15] [Rank 0] Group 5 Loss: 5.6954 +[2025-07-08 00:50:15] [Rank 0] Group 5 Loss: 5.6954 +[2025-07-08 00:50:15] [Rank 0] Group 6 Loss: 5.6504 +[2025-07-08 00:50:15] [Rank 0] Group 6 Loss: 5.6504 +[2025-07-08 00:50:15] [Rank 0] Group 7 Loss: 5.7847 +[2025-07-08 00:50:15] [Rank 0] Group 7 Loss: 5.7847 +[2025-07-08 00:50:15] [Rank 0] Group 8 Loss: 5.8020 +[2025-07-08 00:50:15] [Rank 0] Group 8 Loss: 5.8020 +[2025-07-08 00:50:15] [Rank 0] Group 9 Loss: 5.7884 +[2025-07-08 00:50:15] [Rank 0] Group 9 Loss: 5.7884 +[2025-07-08 00:50:15] [Rank 0] Group 10 Loss: 5.7701 +[2025-07-08 00:50:15] [Rank 0] Group 10 Loss: 5.7701 +[2025-07-08 00:50:15] [Rank 0] Group 11 Loss: 5.7631 +[2025-07-08 00:50:15] [Rank 0] Group 11 Loss: 5.7631 +[2025-07-08 00:50:15] [Rank 0] Group 0 FTA: 0.8322 +[2025-07-08 00:50:15] [Rank 0] Group 0 FTA: 0.8322 +[2025-07-08 00:50:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 00:50:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 00:50:15] [Rank 0] Group 2 FTA: 0.9219 +[2025-07-08 00:50:15] [Rank 0] Group 2 FTA: 0.9219 +[2025-07-08 00:50:15] [Rank 0] Group 3 FTA: 0.9505 +[2025-07-08 00:50:15] [Rank 0] Group 3 FTA: 0.9505 +[2025-07-08 00:50:15] [Rank 0] Group 4 FTA: 0.9609 +[2025-07-08 00:50:15] [Rank 0] Group 4 FTA: 0.9609 +[2025-07-08 00:50:15] [Rank 0] Group 5 FTA: 0.9505 +[2025-07-08 00:50:15] [Rank 0] Group 5 FTA: 0.9505 +[2025-07-08 00:50:15] [Rank 0] Group 6 FTA: 0.9583 +[2025-07-08 00:50:15] [Rank 0] Group 6 FTA: 0.9583 +[2025-07-08 00:50:15] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-08 00:50:15] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-08 00:50:15] [Rank 0] Group 8 FTA: 0.9089 +[2025-07-08 00:50:15] [Rank 0] Group 8 FTA: 0.9089 +[2025-07-08 00:50:15] [Rank 0] Group 9 FTA: 0.9531 +[2025-07-08 00:50:15] [Rank 0] Group 9 FTA: 0.9531 +[2025-07-08 00:50:15] [Rank 0] Group 10 FTA: 0.9434 +[2025-07-08 00:50:15] [Rank 0] Group 10 FTA: 0.9434 +[2025-07-08 00:50:15] [Rank 0] Group 11 FTA: 0.9395 +[2025-07-08 00:50:15] [Rank 0] Group 11 FTA: 0.9395 +[2025-07-08 00:50:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-08 00:50:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-08 00:50:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-08 00:50:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-08 00:50:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-08 00:50:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-08 00:50:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-08 00:50:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-08 00:50:17] [Rank 0] step:9001/10000 train_time:730202ms step_avg:81.12ms +[2025-07-08 00:50:17] [Rank 0] step:9001/10000 train_time:730202ms step_avg:81.12ms +[2025-07-08 00:50:18] [Rank 0] step:9021/10000 train_time:732411ms step_avg:81.19ms +[2025-07-08 00:50:18] [Rank 0] step:9021/10000 train_time:732411ms step_avg:81.19ms +[2025-07-08 00:50:20] [Rank 0] step:9041/10000 train_time:733905ms step_avg:81.18ms +[2025-07-08 00:50:20] [Rank 0] step:9041/10000 train_time:733905ms step_avg:81.18ms +[2025-07-08 00:50:21] [Rank 0] step:9061/10000 train_time:735399ms step_avg:81.16ms +[2025-07-08 00:50:21] [Rank 0] step:9061/10000 train_time:735399ms step_avg:81.16ms +[2025-07-08 00:50:23] [Rank 0] step:9081/10000 train_time:736894ms step_avg:81.15ms +[2025-07-08 00:50:23] [Rank 0] step:9081/10000 train_time:736894ms step_avg:81.15ms +[2025-07-08 00:50:25] [Rank 0] step:9101/10000 train_time:739051ms step_avg:81.21ms +[2025-07-08 00:50:25] [Rank 0] step:9101/10000 train_time:739051ms step_avg:81.21ms +[2025-07-08 00:50:27] [Rank 0] step:9121/10000 train_time:740547ms step_avg:81.19ms +[2025-07-08 00:50:27] [Rank 0] step:9121/10000 train_time:740547ms step_avg:81.19ms +[2025-07-08 00:50:28] [Rank 0] step:9141/10000 train_time:742044ms step_avg:81.18ms +[2025-07-08 00:50:28] [Rank 0] step:9141/10000 train_time:742044ms step_avg:81.18ms +[2025-07-08 00:50:29] [Rank 0] step:9161/10000 train_time:743542ms step_avg:81.16ms +[2025-07-08 00:50:29] [Rank 0] step:9161/10000 train_time:743542ms step_avg:81.16ms +[2025-07-08 00:50:31] [Rank 0] step:9181/10000 train_time:745097ms step_avg:81.16ms +[2025-07-08 00:50:31] [Rank 0] step:9181/10000 train_time:745097ms step_avg:81.16ms +[2025-07-08 00:50:33] [Rank 0] step:9201/10000 train_time:746778ms step_avg:81.16ms +[2025-07-08 00:50:33] [Rank 0] step:9201/10000 train_time:746778ms step_avg:81.16ms +[2025-07-08 00:50:34] [Rank 0] step:9221/10000 train_time:748276ms step_avg:81.15ms +[2025-07-08 00:50:34] [Rank 0] step:9221/10000 train_time:748276ms step_avg:81.15ms +[2025-07-08 00:50:36] [Rank 0] step:9241/10000 train_time:749776ms step_avg:81.14ms +[2025-07-08 00:50:36] [Rank 0] step:9241/10000 train_time:749776ms step_avg:81.14ms +[2025-07-08 00:50:37] [Rank 0] step:9261/10000 train_time:751277ms step_avg:81.12ms +[2025-07-08 00:50:37] [Rank 0] step:9261/10000 train_time:751277ms step_avg:81.12ms +[2025-07-08 00:50:39] [Rank 0] step:9281/10000 train_time:753422ms step_avg:81.18ms +[2025-07-08 00:50:39] [Rank 0] step:9281/10000 train_time:753422ms step_avg:81.18ms +[2025-07-08 00:50:41] [Rank 0] step:9301/10000 train_time:754920ms step_avg:81.17ms +[2025-07-08 00:50:41] [Rank 0] step:9301/10000 train_time:754920ms step_avg:81.17ms +[2025-07-08 00:50:42] [Rank 0] step:9321/10000 train_time:756420ms step_avg:81.15ms +[2025-07-08 00:50:42] [Rank 0] step:9321/10000 train_time:756420ms step_avg:81.15ms +[2025-07-08 00:50:44] [Rank 0] step:9341/10000 train_time:757920ms step_avg:81.14ms +[2025-07-08 00:50:44] [Rank 0] step:9341/10000 train_time:757920ms step_avg:81.14ms +[2025-07-08 00:50:46] [Rank 0] step:9361/10000 train_time:759423ms step_avg:81.13ms +[2025-07-08 00:50:46] [Rank 0] step:9361/10000 train_time:759423ms step_avg:81.13ms +[2025-07-08 00:50:47] [Rank 0] step:9381/10000 train_time:761160ms step_avg:81.14ms +[2025-07-08 00:50:47] [Rank 0] step:9381/10000 train_time:761160ms step_avg:81.14ms +[2025-07-08 00:50:49] [Rank 0] step:9401/10000 train_time:762659ms step_avg:81.13ms +[2025-07-08 00:50:49] [Rank 0] step:9401/10000 train_time:762659ms step_avg:81.13ms +[2025-07-08 00:50:50] [Rank 0] step:9421/10000 train_time:764161ms step_avg:81.11ms +[2025-07-08 00:50:50] [Rank 0] step:9421/10000 train_time:764161ms step_avg:81.11ms +[2025-07-08 00:50:52] [Rank 0] step:9441/10000 train_time:765661ms step_avg:81.10ms +[2025-07-08 00:50:52] [Rank 0] step:9441/10000 train_time:765661ms step_avg:81.10ms +[2025-07-08 00:50:54] [Rank 0] step:9461/10000 train_time:767819ms step_avg:81.16ms +[2025-07-08 00:50:54] [Rank 0] step:9461/10000 train_time:767819ms step_avg:81.16ms +[2025-07-08 00:50:55] [Rank 0] step:9481/10000 train_time:769320ms step_avg:81.14ms +[2025-07-08 00:50:55] [Rank 0] step:9481/10000 train_time:769320ms step_avg:81.14ms +[2025-07-08 00:50:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:50:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:50:58] [Rank 0] PRINT: step:9500/10000 train_loss:0.8651 val_loss:0.8673 train_time:770820ms step_avg:81.14ms +[2025-07-08 00:50:58] [Rank 0] PRINT: step:9500/10000 train_loss:0.8651 val_loss:0.8673 train_time:770820ms step_avg:81.14ms +[2025-07-08 00:50:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:50:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:50:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:50:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:50:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:50:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:56:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:56:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:56:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:56:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:56:24] [Rank 0] Total Loss: 5.7677 +[2025-07-08 00:56:24] [Rank 0] Total Loss: 5.7677 +[2025-07-08 00:56:24] [Rank 0] Total FTA: 0.9322 +[2025-07-08 00:56:24] [Rank 0] Total FTA: 0.9322 +[2025-07-08 00:56:24] [Rank 0] Group 0 Loss: 5.8379 +[2025-07-08 00:56:24] [Rank 0] Group 0 Loss: 5.8379 +[2025-07-08 00:56:24] [Rank 0] Group 1 Loss: 6.2335 +[2025-07-08 00:56:24] [Rank 0] Group 1 Loss: 6.2335 +[2025-07-08 00:56:24] [Rank 0] Group 2 Loss: 5.3802 +[2025-07-08 00:56:24] [Rank 0] Group 2 Loss: 5.3802 +[2025-07-08 00:56:24] [Rank 0] Group 3 Loss: 5.6949 +[2025-07-08 00:56:24] [Rank 0] Group 3 Loss: 5.6949 +[2025-07-08 00:56:24] [Rank 0] Group 4 Loss: 5.8279 +[2025-07-08 00:56:24] [Rank 0] Group 4 Loss: 5.8279 +[2025-07-08 00:56:24] [Rank 0] Group 5 Loss: 5.6108 +[2025-07-08 00:56:24] [Rank 0] Group 5 Loss: 5.6108 +[2025-07-08 00:56:24] [Rank 0] Group 6 Loss: 5.6103 +[2025-07-08 00:56:24] [Rank 0] Group 6 Loss: 5.6103 +[2025-07-08 00:56:24] [Rank 0] Group 7 Loss: 5.7913 +[2025-07-08 00:56:24] [Rank 0] Group 7 Loss: 5.7913 +[2025-07-08 00:56:24] [Rank 0] Group 8 Loss: 5.7367 +[2025-07-08 00:56:24] [Rank 0] Group 8 Loss: 5.7367 +[2025-07-08 00:56:24] [Rank 0] Group 9 Loss: 5.7880 +[2025-07-08 00:56:24] [Rank 0] Group 9 Loss: 5.7880 +[2025-07-08 00:56:24] [Rank 0] Group 10 Loss: 5.7866 +[2025-07-08 00:56:24] [Rank 0] Group 10 Loss: 5.7866 +[2025-07-08 00:56:24] [Rank 0] Group 11 Loss: 5.7963 +[2025-07-08 00:56:24] [Rank 0] Group 11 Loss: 5.7963 +[2025-07-08 00:56:24] [Rank 0] Group 0 FTA: 0.8218 +[2025-07-08 00:56:24] [Rank 0] Group 0 FTA: 0.8218 +[2025-07-08 00:56:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 00:56:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 00:56:24] [Rank 0] Group 2 FTA: 0.9297 +[2025-07-08 00:56:24] [Rank 0] Group 2 FTA: 0.9297 +[2025-07-08 00:56:24] [Rank 0] Group 3 FTA: 0.9010 +[2025-07-08 00:56:24] [Rank 0] Group 3 FTA: 0.9010 +[2025-07-08 00:56:24] [Rank 0] Group 4 FTA: 0.9401 +[2025-07-08 00:56:24] [Rank 0] Group 4 FTA: 0.9401 +[2025-07-08 00:56:24] [Rank 0] Group 5 FTA: 0.9714 +[2025-07-08 00:56:24] [Rank 0] Group 5 FTA: 0.9714 +[2025-07-08 00:56:24] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-08 00:56:24] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-08 00:56:24] [Rank 0] Group 7 FTA: 0.9531 +[2025-07-08 00:56:24] [Rank 0] Group 7 FTA: 0.9531 +[2025-07-08 00:56:24] [Rank 0] Group 8 FTA: 0.9505 +[2025-07-08 00:56:24] [Rank 0] Group 8 FTA: 0.9505 +[2025-07-08 00:56:24] [Rank 0] Group 9 FTA: 0.9648 +[2025-07-08 00:56:24] [Rank 0] Group 9 FTA: 0.9648 +[2025-07-08 00:56:24] [Rank 0] Group 10 FTA: 0.9453 +[2025-07-08 00:56:24] [Rank 0] Group 10 FTA: 0.9453 +[2025-07-08 00:56:24] [Rank 0] Group 11 FTA: 0.9375 +[2025-07-08 00:56:24] [Rank 0] Group 11 FTA: 0.9375 +[2025-07-08 00:56:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-08 00:56:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-08 00:56:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-08 00:56:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-08 00:56:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-08 00:56:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-08 00:56:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-08 00:56:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-08 00:56:25] [Rank 0] step:9501/10000 train_time:770841ms step_avg:81.13ms +[2025-07-08 00:56:25] [Rank 0] step:9501/10000 train_time:770841ms step_avg:81.13ms +[2025-07-08 00:56:27] [Rank 0] step:9521/10000 train_time:772349ms step_avg:81.12ms +[2025-07-08 00:56:27] [Rank 0] step:9521/10000 train_time:772349ms step_avg:81.12ms +[2025-07-08 00:56:29] [Rank 0] step:9541/10000 train_time:773843ms step_avg:81.11ms +[2025-07-08 00:56:29] [Rank 0] step:9541/10000 train_time:773843ms step_avg:81.11ms +[2025-07-08 00:56:30] [Rank 0] step:9561/10000 train_time:775805ms step_avg:81.14ms +[2025-07-08 00:56:30] [Rank 0] step:9561/10000 train_time:775805ms step_avg:81.14ms +[2025-07-08 00:56:32] [Rank 0] step:9581/10000 train_time:777299ms step_avg:81.13ms +[2025-07-08 00:56:32] [Rank 0] step:9581/10000 train_time:777299ms step_avg:81.13ms +[2025-07-08 00:56:33] [Rank 0] step:9601/10000 train_time:778795ms step_avg:81.12ms +[2025-07-08 00:56:33] [Rank 0] step:9601/10000 train_time:778795ms step_avg:81.12ms +[2025-07-08 00:56:35] [Rank 0] step:9621/10000 train_time:780291ms step_avg:81.10ms +[2025-07-08 00:56:35] [Rank 0] step:9621/10000 train_time:780291ms step_avg:81.10ms +[2025-07-08 00:56:37] [Rank 0] step:9641/10000 train_time:782126ms step_avg:81.12ms +[2025-07-08 00:56:37] [Rank 0] step:9641/10000 train_time:782126ms step_avg:81.12ms +[2025-07-08 00:56:38] [Rank 0] step:9661/10000 train_time:783621ms step_avg:81.11ms +[2025-07-08 00:56:38] [Rank 0] step:9661/10000 train_time:783621ms step_avg:81.11ms +[2025-07-08 00:56:40] [Rank 0] step:9681/10000 train_time:785118ms step_avg:81.10ms +[2025-07-08 00:56:40] [Rank 0] step:9681/10000 train_time:785118ms step_avg:81.10ms +[2025-07-08 00:56:41] [Rank 0] step:9701/10000 train_time:786617ms step_avg:81.09ms +[2025-07-08 00:56:41] [Rank 0] step:9701/10000 train_time:786617ms step_avg:81.09ms +[2025-07-08 00:56:43] [Rank 0] step:9721/10000 train_time:788116ms step_avg:81.07ms +[2025-07-08 00:56:43] [Rank 0] step:9721/10000 train_time:788116ms step_avg:81.07ms +[2025-07-08 00:56:45] [Rank 0] step:9741/10000 train_time:790276ms step_avg:81.13ms +[2025-07-08 00:56:45] [Rank 0] step:9741/10000 train_time:790276ms step_avg:81.13ms +[2025-07-08 00:56:46] [Rank 0] step:9761/10000 train_time:791776ms step_avg:81.12ms +[2025-07-08 00:56:46] [Rank 0] step:9761/10000 train_time:791776ms step_avg:81.12ms +[2025-07-08 00:56:48] [Rank 0] step:9781/10000 train_time:793278ms step_avg:81.10ms +[2025-07-08 00:56:48] [Rank 0] step:9781/10000 train_time:793278ms step_avg:81.10ms +[2025-07-08 00:56:49] [Rank 0] step:9801/10000 train_time:794776ms step_avg:81.09ms +[2025-07-08 00:56:49] [Rank 0] step:9801/10000 train_time:794776ms step_avg:81.09ms +[2025-07-08 00:56:51] [Rank 0] step:9821/10000 train_time:796916ms step_avg:81.14ms +[2025-07-08 00:56:51] [Rank 0] step:9821/10000 train_time:796916ms step_avg:81.14ms +[2025-07-08 00:56:53] [Rank 0] step:9841/10000 train_time:798415ms step_avg:81.13ms +[2025-07-08 00:56:53] [Rank 0] step:9841/10000 train_time:798415ms step_avg:81.13ms +[2025-07-08 00:56:54] [Rank 0] step:9861/10000 train_time:799915ms step_avg:81.12ms +[2025-07-08 00:56:54] [Rank 0] step:9861/10000 train_time:799915ms step_avg:81.12ms +[2025-07-08 00:56:56] [Rank 0] step:9881/10000 train_time:801416ms step_avg:81.11ms +[2025-07-08 00:56:56] [Rank 0] step:9881/10000 train_time:801416ms step_avg:81.11ms +[2025-07-08 00:56:58] [Rank 0] step:9901/10000 train_time:802968ms step_avg:81.10ms +[2025-07-08 00:56:58] [Rank 0] step:9901/10000 train_time:802968ms step_avg:81.10ms +[2025-07-08 00:56:59] [Rank 0] step:9921/10000 train_time:805062ms step_avg:81.15ms +[2025-07-08 00:56:59] [Rank 0] step:9921/10000 train_time:805062ms step_avg:81.15ms +[2025-07-08 00:57:01] [Rank 0] step:9941/10000 train_time:806563ms step_avg:81.13ms +[2025-07-08 00:57:01] [Rank 0] step:9941/10000 train_time:806563ms step_avg:81.13ms +[2025-07-08 00:57:02] [Rank 0] step:9961/10000 train_time:808064ms step_avg:81.12ms +[2025-07-08 00:57:02] [Rank 0] step:9961/10000 train_time:808064ms step_avg:81.12ms +[2025-07-08 00:57:04] [Rank 0] step:9981/10000 train_time:809565ms step_avg:81.11ms +[2025-07-08 00:57:04] [Rank 0] step:9981/10000 train_time:809565ms step_avg:81.11ms +[2025-07-08 00:57:06] [Rank 0] step:10000/10000 train_time:811659ms step_avg:81.17ms +[2025-07-08 00:57:06] [Rank 0] step:10000/10000 train_time:811659ms step_avg:81.17ms +[2025-07-08 00:57:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:57:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:57:07] [Rank 0] PRINT: step:10000/10000 train_loss:0.8641 val_loss:0.8670 train_time:811739ms step_avg:81.17ms +[2025-07-08 00:57:07] [Rank 0] PRINT: step:10000/10000 train_loss:0.8641 val_loss:0.8670 train_time:811739ms step_avg:81.17ms +[2025-07-08 00:57:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:57:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:57:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:57:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:57:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:57:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:02:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:02:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:02:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:02:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:02:33] [Rank 0] Total Loss: 5.8339 +[2025-07-08 01:02:33] [Rank 0] Total Loss: 5.8339 +[2025-07-08 01:02:34] [Rank 0] Total FTA: 0.9327 +[2025-07-08 01:02:34] [Rank 0] Total FTA: 0.9327 +[2025-07-08 01:02:34] [Rank 0] Group 0 Loss: 5.8665 +[2025-07-08 01:02:34] [Rank 0] Group 0 Loss: 5.8665 +[2025-07-08 01:02:34] [Rank 0] Group 1 Loss: 6.3520 +[2025-07-08 01:02:34] [Rank 0] Group 1 Loss: 6.3520 +[2025-07-08 01:02:34] [Rank 0] Group 2 Loss: 5.4863 +[2025-07-08 01:02:34] [Rank 0] Group 2 Loss: 5.4863 +[2025-07-08 01:02:34] [Rank 0] Group 3 Loss: 5.8412 +[2025-07-08 01:02:34] [Rank 0] Group 3 Loss: 5.8412 +[2025-07-08 01:02:34] [Rank 0] Group 4 Loss: 5.9247 +[2025-07-08 01:02:34] [Rank 0] Group 4 Loss: 5.9247 +[2025-07-08 01:02:34] [Rank 0] Group 5 Loss: 5.7331 +[2025-07-08 01:02:34] [Rank 0] Group 5 Loss: 5.7331 +[2025-07-08 01:02:34] [Rank 0] Group 6 Loss: 5.5901 +[2025-07-08 01:02:34] [Rank 0] Group 6 Loss: 5.5901 +[2025-07-08 01:02:34] [Rank 0] Group 7 Loss: 5.8774 +[2025-07-08 01:02:34] [Rank 0] Group 7 Loss: 5.8774 +[2025-07-08 01:02:34] [Rank 0] Group 8 Loss: 5.8059 +[2025-07-08 01:02:34] [Rank 0] Group 8 Loss: 5.8059 +[2025-07-08 01:02:34] [Rank 0] Group 9 Loss: 5.7489 +[2025-07-08 01:02:34] [Rank 0] Group 9 Loss: 5.7489 +[2025-07-08 01:02:34] [Rank 0] Group 10 Loss: 5.8239 +[2025-07-08 01:02:34] [Rank 0] Group 10 Loss: 5.8239 +[2025-07-08 01:02:34] [Rank 0] Group 11 Loss: 5.8581 +[2025-07-08 01:02:34] [Rank 0] Group 11 Loss: 5.8581 +[2025-07-08 01:02:34] [Rank 0] Group 0 FTA: 0.8336 +[2025-07-08 01:02:34] [Rank 0] Group 0 FTA: 0.8336 +[2025-07-08 01:02:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 01:02:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 01:02:34] [Rank 0] Group 2 FTA: 0.9036 +[2025-07-08 01:02:34] [Rank 0] Group 2 FTA: 0.9036 +[2025-07-08 01:02:34] [Rank 0] Group 3 FTA: 0.9062 +[2025-07-08 01:02:34] [Rank 0] Group 3 FTA: 0.9062 +[2025-07-08 01:02:34] [Rank 0] Group 4 FTA: 0.9714 +[2025-07-08 01:02:34] [Rank 0] Group 4 FTA: 0.9714 +[2025-07-08 01:02:34] [Rank 0] Group 5 FTA: 0.9531 +[2025-07-08 01:02:34] [Rank 0] Group 5 FTA: 0.9531 +[2025-07-08 01:02:34] [Rank 0] Group 6 FTA: 0.9688 +[2025-07-08 01:02:34] [Rank 0] Group 6 FTA: 0.9688 +[2025-07-08 01:02:34] [Rank 0] Group 7 FTA: 0.9583 +[2025-07-08 01:02:34] [Rank 0] Group 7 FTA: 0.9583 +[2025-07-08 01:02:34] [Rank 0] Group 8 FTA: 0.9271 +[2025-07-08 01:02:34] [Rank 0] Group 8 FTA: 0.9271 +[2025-07-08 01:02:34] [Rank 0] Group 9 FTA: 0.9375 +[2025-07-08 01:02:34] [Rank 0] Group 9 FTA: 0.9375 +[2025-07-08 01:02:34] [Rank 0] Group 10 FTA: 0.9492 +[2025-07-08 01:02:34] [Rank 0] Group 10 FTA: 0.9492 +[2025-07-08 01:02:34] [Rank 0] Group 11 FTA: 0.9502 +[2025-07-08 01:02:34] [Rank 0] Group 11 FTA: 0.9502 +[2025-07-08 01:02:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-08 01:02:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_loss_curves.png +[2025-07-08 01:02:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-08 01:02:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/per_class_acc_curves.png +[2025-07-08 01:02:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-08 01:02:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_loss_curve.png +[2025-07-08 01:02:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-08 01:02:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_43/total_acc_curve.png +[2025-07-08 01:02:35] [Rank 0] step:10001/10000 train_time:811760ms step_avg:81.17ms +[2025-07-08 01:02:35] [Rank 0] step:10001/10000 train_time:811760ms step_avg:81.17ms +[2025-07-08 01:02:35] [Rank 0] PRINT: --- Training Finished: Tue Jul 8 01:02:35 2025 --- +[2025-07-08 01:02:35] [Rank 0] PRINT: --- Training Finished: Tue Jul 8 01:02:35 2025 --- +[2025-07-08 01:02:35] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9916 MiB +[2025-07-08 01:02:35] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9916 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/config.json b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..1886a1e8ee13bfefb127c43420876388361b87c2 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "a4759a00-1c3f-4941-b1c8-5400f13804c8", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..733f663d2d06bd23f7e57df62006cf8cb71ec5ad --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af1bbfafa57f3cd05e014d481ce0c0f18870d44342ba37e7ebabab1c215ff074 +size 425237 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..f8581975b7675fa8e15cf7fba1c67489a3800159 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:adae505c8ef3dd6380abfbd5cfd7060a1385968cef1b8414b15332b495e2eb88 +size 365952 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..d620eeae61fbb51762adc61f5ec66dbca3ba44e2 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:85efc920035038a02530d287e4a95f82f41cf304131ca5ce591a46632a874adb +size 107698 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..b3d53ebbe1394c3d4c13cfaeeb63070132fdf98d --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a38a80ef1d8e37ca327ff54453bdb638ad154ebdce99a3baef38e325f404571 +size 105061 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/training_log_a4759a00-1c3f-4941-b1c8-5400f13804c8.txt b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/training_log_a4759a00-1c3f-4941-b1c8-5400f13804c8.txt new file mode 100644 index 0000000000000000000000000000000000000000..6ba9455c8c8306cf00cd679b532ff015f05e5e6b --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/training_log_a4759a00-1c3f-4941-b1c8-5400f13804c8.txt @@ -0,0 +1,5132 @@ +[2025-07-06 21:49:45] [Rank 0] PRINT: --- Script Start: Sun Jul 6 21:49:45 2025 --- +[2025-07-06 21:49:45] [Rank 0] PRINT: --- Script Start: Sun Jul 6 21:49:45 2025 --- +[2025-07-06 21:49:45] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-06 21:49:45] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-06 21:49:45] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 21:49:45] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 21:49:45] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-06 21:49:45] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-06 21:49:45] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45 +[2025-07-06 21:49:45] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45 +[2025-07-06 21:49:45] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 21:49:45] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 21:49:46] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 21:49:46] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 21:49:46] [Rank 0] PRINT: Constructing model... +[2025-07-06 21:49:46] [Rank 0] PRINT: Constructing model... +[2025-07-06 21:49:48] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 21:49:48] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 21:49:48] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 21:49:48] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 21:49:48] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 21:49:48] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 21:49:49] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 21:49:49] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 21:49:49] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 21:49:49] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 21:49:49] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 21:49:49] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 21:49:49] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 21:49:49] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 21:49:49] [Rank 0] PRINT: Model returns: +[2025-07-06 21:49:49] [Rank 0] PRINT: Model returns: +[2025-07-06 21:49:49] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 21:49:49] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 21:49:49] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 21:49:49] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 21:49:49] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 21:49:49] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 21:49:49] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 21:49:49] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 21:49:49] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 21:49:49] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 21:49:49] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 21:49:49] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 21:49:49] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 21:49:49] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 21:49:49] [Rank 0] PRINT: Starting warmup... +[2025-07-06 21:49:49] [Rank 0] PRINT: Starting warmup... +[2025-07-06 21:51:09] [Rank 0] PRINT: Warmup complete. +[2025-07-06 21:51:09] [Rank 0] PRINT: Warmup complete. +[2025-07-06 21:51:09] [Rank 0] PRINT: Starting training... +[2025-07-06 21:51:09] [Rank 0] PRINT: Starting training... +[2025-07-06 21:51:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:51:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:51:17] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 21:51:17] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 21:51:19] [Rank 0] step:21/10000 train_time:1757ms step_avg:83.65ms +[2025-07-06 21:51:19] [Rank 0] step:21/10000 train_time:1757ms step_avg:83.65ms +[2025-07-06 21:51:21] [Rank 0] step:41/10000 train_time:3217ms step_avg:78.47ms +[2025-07-06 21:51:21] [Rank 0] step:41/10000 train_time:3217ms step_avg:78.47ms +[2025-07-06 21:51:22] [Rank 0] step:61/10000 train_time:4676ms step_avg:76.65ms +[2025-07-06 21:51:22] [Rank 0] step:61/10000 train_time:4676ms step_avg:76.65ms +[2025-07-06 21:51:23] [Rank 0] step:81/10000 train_time:6136ms step_avg:75.75ms +[2025-07-06 21:51:23] [Rank 0] step:81/10000 train_time:6136ms step_avg:75.75ms +[2025-07-06 21:51:26] [Rank 0] step:101/10000 train_time:8240ms step_avg:81.58ms +[2025-07-06 21:51:26] [Rank 0] step:101/10000 train_time:8240ms step_avg:81.58ms +[2025-07-06 21:51:27] [Rank 0] step:121/10000 train_time:9704ms step_avg:80.20ms +[2025-07-06 21:51:27] [Rank 0] step:121/10000 train_time:9704ms step_avg:80.20ms +[2025-07-06 21:51:28] [Rank 0] step:141/10000 train_time:11167ms step_avg:79.20ms +[2025-07-06 21:51:28] [Rank 0] step:141/10000 train_time:11167ms step_avg:79.20ms +[2025-07-06 21:51:30] [Rank 0] step:161/10000 train_time:12632ms step_avg:78.46ms +[2025-07-06 21:51:30] [Rank 0] step:161/10000 train_time:12632ms step_avg:78.46ms +[2025-07-06 21:51:32] [Rank 0] step:181/10000 train_time:14102ms step_avg:77.91ms +[2025-07-06 21:51:32] [Rank 0] step:181/10000 train_time:14102ms step_avg:77.91ms +[2025-07-06 21:51:34] [Rank 0] step:201/10000 train_time:16209ms step_avg:80.64ms +[2025-07-06 21:51:34] [Rank 0] step:201/10000 train_time:16209ms step_avg:80.64ms +[2025-07-06 21:51:35] [Rank 0] step:221/10000 train_time:17678ms step_avg:79.99ms +[2025-07-06 21:51:35] [Rank 0] step:221/10000 train_time:17678ms step_avg:79.99ms +[2025-07-06 21:51:36] [Rank 0] step:241/10000 train_time:19149ms step_avg:79.46ms +[2025-07-06 21:51:36] [Rank 0] step:241/10000 train_time:19149ms step_avg:79.46ms +[2025-07-06 21:51:38] [Rank 0] step:261/10000 train_time:20622ms step_avg:79.01ms +[2025-07-06 21:51:38] [Rank 0] step:261/10000 train_time:20622ms step_avg:79.01ms +[2025-07-06 21:51:40] [Rank 0] step:281/10000 train_time:22737ms step_avg:80.92ms +[2025-07-06 21:51:40] [Rank 0] step:281/10000 train_time:22737ms step_avg:80.92ms +[2025-07-06 21:51:42] [Rank 0] step:301/10000 train_time:24208ms step_avg:80.42ms +[2025-07-06 21:51:42] [Rank 0] step:301/10000 train_time:24208ms step_avg:80.42ms +[2025-07-06 21:51:43] [Rank 0] step:321/10000 train_time:25681ms step_avg:80.00ms +[2025-07-06 21:51:43] [Rank 0] step:321/10000 train_time:25681ms step_avg:80.00ms +[2025-07-06 21:51:44] [Rank 0] step:341/10000 train_time:27153ms step_avg:79.63ms +[2025-07-06 21:51:44] [Rank 0] step:341/10000 train_time:27153ms step_avg:79.63ms +[2025-07-06 21:51:46] [Rank 0] step:361/10000 train_time:28625ms step_avg:79.29ms +[2025-07-06 21:51:46] [Rank 0] step:361/10000 train_time:28625ms step_avg:79.29ms +[2025-07-06 21:51:48] [Rank 0] step:381/10000 train_time:30334ms step_avg:79.62ms +[2025-07-06 21:51:48] [Rank 0] step:381/10000 train_time:30334ms step_avg:79.62ms +[2025-07-06 21:51:49] [Rank 0] step:401/10000 train_time:31806ms step_avg:79.32ms +[2025-07-06 21:51:49] [Rank 0] step:401/10000 train_time:31806ms step_avg:79.32ms +[2025-07-06 21:51:51] [Rank 0] step:421/10000 train_time:33281ms step_avg:79.05ms +[2025-07-06 21:51:51] [Rank 0] step:421/10000 train_time:33281ms step_avg:79.05ms +[2025-07-06 21:51:52] [Rank 0] step:441/10000 train_time:34755ms step_avg:78.81ms +[2025-07-06 21:51:52] [Rank 0] step:441/10000 train_time:34755ms step_avg:78.81ms +[2025-07-06 21:51:54] [Rank 0] step:461/10000 train_time:36460ms step_avg:79.09ms +[2025-07-06 21:51:54] [Rank 0] step:461/10000 train_time:36460ms step_avg:79.09ms +[2025-07-06 21:51:55] [Rank 0] step:481/10000 train_time:37936ms step_avg:78.87ms +[2025-07-06 21:51:55] [Rank 0] step:481/10000 train_time:37936ms step_avg:78.87ms +[2025-07-06 21:51:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:51:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:51:58] [Rank 0] PRINT: step:500/10000 train_loss:4.9436 val_loss:2.0549 train_time:39409ms step_avg:78.82ms +[2025-07-06 21:51:58] [Rank 0] PRINT: step:500/10000 train_loss:4.9436 val_loss:2.0549 train_time:39409ms step_avg:78.82ms +[2025-07-06 21:51:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:51:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:51:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:51:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:51:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:51:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:57:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:57:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:57:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:57:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:57:21] [Rank 0] Total Loss: 4.0880 +[2025-07-06 21:57:21] [Rank 0] Total Loss: 4.0880 +[2025-07-06 21:57:21] [Rank 0] Total FTA: 0.0889 +[2025-07-06 21:57:21] [Rank 0] Total FTA: 0.0889 +[2025-07-06 21:57:21] [Rank 0] Group 0 Loss: 4.3294 +[2025-07-06 21:57:21] [Rank 0] Group 0 Loss: 4.3294 +[2025-07-06 21:57:21] [Rank 0] Group 1 Loss: 4.0669 +[2025-07-06 21:57:21] [Rank 0] Group 1 Loss: 4.0669 +[2025-07-06 21:57:21] [Rank 0] Group 2 Loss: 3.9976 +[2025-07-06 21:57:21] [Rank 0] Group 2 Loss: 3.9976 +[2025-07-06 21:57:21] [Rank 0] Group 3 Loss: 3.9923 +[2025-07-06 21:57:21] [Rank 0] Group 3 Loss: 3.9923 +[2025-07-06 21:57:21] [Rank 0] Group 4 Loss: 4.0623 +[2025-07-06 21:57:21] [Rank 0] Group 4 Loss: 4.0623 +[2025-07-06 21:57:21] [Rank 0] Group 5 Loss: 4.0364 +[2025-07-06 21:57:21] [Rank 0] Group 5 Loss: 4.0364 +[2025-07-06 21:57:21] [Rank 0] Group 6 Loss: 4.0103 +[2025-07-06 21:57:21] [Rank 0] Group 6 Loss: 4.0103 +[2025-07-06 21:57:21] [Rank 0] Group 7 Loss: 4.0700 +[2025-07-06 21:57:21] [Rank 0] Group 7 Loss: 4.0700 +[2025-07-06 21:57:21] [Rank 0] Group 8 Loss: 4.0439 +[2025-07-06 21:57:21] [Rank 0] Group 8 Loss: 4.0439 +[2025-07-06 21:57:21] [Rank 0] Group 9 Loss: 4.0620 +[2025-07-06 21:57:21] [Rank 0] Group 9 Loss: 4.0620 +[2025-07-06 21:57:21] [Rank 0] Group 10 Loss: 4.0847 +[2025-07-06 21:57:21] [Rank 0] Group 10 Loss: 4.0847 +[2025-07-06 21:57:21] [Rank 0] Group 11 Loss: 4.0738 +[2025-07-06 21:57:21] [Rank 0] Group 11 Loss: 4.0738 +[2025-07-06 21:57:21] [Rank 0] Group 0 FTA: 0.1795 +[2025-07-06 21:57:21] [Rank 0] Group 0 FTA: 0.1795 +[2025-07-06 21:57:21] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 21:57:21] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 21:57:21] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-06 21:57:21] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-06 21:57:21] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-06 21:57:21] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-06 21:57:21] [Rank 0] Group 4 FTA: 0.0130 +[2025-07-06 21:57:21] [Rank 0] Group 4 FTA: 0.0130 +[2025-07-06 21:57:21] [Rank 0] Group 5 FTA: 0.0807 +[2025-07-06 21:57:21] [Rank 0] Group 5 FTA: 0.0807 +[2025-07-06 21:57:21] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-06 21:57:21] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-06 21:57:21] [Rank 0] Group 7 FTA: 0.1250 +[2025-07-06 21:57:21] [Rank 0] Group 7 FTA: 0.1250 +[2025-07-06 21:57:21] [Rank 0] Group 8 FTA: 0.0807 +[2025-07-06 21:57:21] [Rank 0] Group 8 FTA: 0.0807 +[2025-07-06 21:57:21] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-06 21:57:21] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-06 21:57:21] [Rank 0] Group 10 FTA: 0.0977 +[2025-07-06 21:57:21] [Rank 0] Group 10 FTA: 0.0977 +[2025-07-06 21:57:21] [Rank 0] Group 11 FTA: 0.0889 +[2025-07-06 21:57:21] [Rank 0] Group 11 FTA: 0.0889 +[2025-07-06 21:57:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 21:57:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 21:57:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 21:57:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 21:57:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 21:57:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 21:57:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 21:57:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 21:57:23] [Rank 0] step:501/10000 train_time:39429ms step_avg:78.70ms +[2025-07-06 21:57:23] [Rank 0] step:501/10000 train_time:39429ms step_avg:78.70ms +[2025-07-06 21:57:24] [Rank 0] step:521/10000 train_time:40885ms step_avg:78.48ms +[2025-07-06 21:57:24] [Rank 0] step:521/10000 train_time:40885ms step_avg:78.48ms +[2025-07-06 21:57:26] [Rank 0] step:541/10000 train_time:43016ms step_avg:79.51ms +[2025-07-06 21:57:26] [Rank 0] step:541/10000 train_time:43016ms step_avg:79.51ms +[2025-07-06 21:57:28] [Rank 0] step:561/10000 train_time:44459ms step_avg:79.25ms +[2025-07-06 21:57:28] [Rank 0] step:561/10000 train_time:44459ms step_avg:79.25ms +[2025-07-06 21:57:29] [Rank 0] step:581/10000 train_time:45922ms step_avg:79.04ms +[2025-07-06 21:57:29] [Rank 0] step:581/10000 train_time:45922ms step_avg:79.04ms +[2025-07-06 21:57:31] [Rank 0] step:601/10000 train_time:47388ms step_avg:78.85ms +[2025-07-06 21:57:31] [Rank 0] step:601/10000 train_time:47388ms step_avg:78.85ms +[2025-07-06 21:57:32] [Rank 0] step:621/10000 train_time:48854ms step_avg:78.67ms +[2025-07-06 21:57:32] [Rank 0] step:621/10000 train_time:48854ms step_avg:78.67ms +[2025-07-06 21:57:34] [Rank 0] step:641/10000 train_time:50555ms step_avg:78.87ms +[2025-07-06 21:57:34] [Rank 0] step:641/10000 train_time:50555ms step_avg:78.87ms +[2025-07-06 21:57:35] [Rank 0] step:661/10000 train_time:52019ms step_avg:78.70ms +[2025-07-06 21:57:35] [Rank 0] step:661/10000 train_time:52019ms step_avg:78.70ms +[2025-07-06 21:57:37] [Rank 0] step:681/10000 train_time:53487ms step_avg:78.54ms +[2025-07-06 21:57:37] [Rank 0] step:681/10000 train_time:53487ms step_avg:78.54ms +[2025-07-06 21:57:38] [Rank 0] step:701/10000 train_time:54957ms step_avg:78.40ms +[2025-07-06 21:57:38] [Rank 0] step:701/10000 train_time:54957ms step_avg:78.40ms +[2025-07-06 21:57:40] [Rank 0] step:721/10000 train_time:56683ms step_avg:78.62ms +[2025-07-06 21:57:40] [Rank 0] step:721/10000 train_time:56683ms step_avg:78.62ms +[2025-07-06 21:57:41] [Rank 0] step:741/10000 train_time:58132ms step_avg:78.45ms +[2025-07-06 21:57:41] [Rank 0] step:741/10000 train_time:58132ms step_avg:78.45ms +[2025-07-06 21:57:43] [Rank 0] step:761/10000 train_time:59611ms step_avg:78.33ms +[2025-07-06 21:57:43] [Rank 0] step:761/10000 train_time:59611ms step_avg:78.33ms +[2025-07-06 21:57:44] [Rank 0] step:781/10000 train_time:61095ms step_avg:78.23ms +[2025-07-06 21:57:44] [Rank 0] step:781/10000 train_time:61095ms step_avg:78.23ms +[2025-07-06 21:57:46] [Rank 0] step:801/10000 train_time:62574ms step_avg:78.12ms +[2025-07-06 21:57:46] [Rank 0] step:801/10000 train_time:62574ms step_avg:78.12ms +[2025-07-06 21:57:48] [Rank 0] step:821/10000 train_time:64694ms step_avg:78.80ms +[2025-07-06 21:57:48] [Rank 0] step:821/10000 train_time:64694ms step_avg:78.80ms +[2025-07-06 21:57:49] [Rank 0] step:841/10000 train_time:66173ms step_avg:78.68ms +[2025-07-06 21:57:49] [Rank 0] step:841/10000 train_time:66173ms step_avg:78.68ms +[2025-07-06 21:57:51] [Rank 0] step:861/10000 train_time:67655ms step_avg:78.58ms +[2025-07-06 21:57:51] [Rank 0] step:861/10000 train_time:67655ms step_avg:78.58ms +[2025-07-06 21:57:52] [Rank 0] step:881/10000 train_time:69137ms step_avg:78.48ms +[2025-07-06 21:57:52] [Rank 0] step:881/10000 train_time:69137ms step_avg:78.48ms +[2025-07-06 21:57:54] [Rank 0] step:901/10000 train_time:71305ms step_avg:79.14ms +[2025-07-06 21:57:54] [Rank 0] step:901/10000 train_time:71305ms step_avg:79.14ms +[2025-07-06 21:57:56] [Rank 0] step:921/10000 train_time:72768ms step_avg:79.01ms +[2025-07-06 21:57:56] [Rank 0] step:921/10000 train_time:72768ms step_avg:79.01ms +[2025-07-06 21:57:57] [Rank 0] step:941/10000 train_time:74250ms step_avg:78.91ms +[2025-07-06 21:57:57] [Rank 0] step:941/10000 train_time:74250ms step_avg:78.91ms +[2025-07-06 21:57:59] [Rank 0] step:961/10000 train_time:75733ms step_avg:78.81ms +[2025-07-06 21:57:59] [Rank 0] step:961/10000 train_time:75733ms step_avg:78.81ms +[2025-07-06 21:58:00] [Rank 0] step:981/10000 train_time:77217ms step_avg:78.71ms +[2025-07-06 21:58:00] [Rank 0] step:981/10000 train_time:77217ms step_avg:78.71ms +[2025-07-06 21:58:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:58:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:58:03] [Rank 0] PRINT: step:1000/10000 train_loss:1.7015 val_loss:1.5382 train_time:78936ms step_avg:78.94ms +[2025-07-06 21:58:03] [Rank 0] PRINT: step:1000/10000 train_loss:1.7015 val_loss:1.5382 train_time:78936ms step_avg:78.94ms +[2025-07-06 21:58:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:58:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:58:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:58:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:58:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:58:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:03:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:03:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:03:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:03:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:03:26] [Rank 0] Total Loss: 4.2066 +[2025-07-06 22:03:26] [Rank 0] Total Loss: 4.2066 +[2025-07-06 22:03:26] [Rank 0] Total FTA: 0.1097 +[2025-07-06 22:03:26] [Rank 0] Total FTA: 0.1097 +[2025-07-06 22:03:26] [Rank 0] Group 0 Loss: 4.5004 +[2025-07-06 22:03:26] [Rank 0] Group 0 Loss: 4.5004 +[2025-07-06 22:03:26] [Rank 0] Group 1 Loss: 4.2260 +[2025-07-06 22:03:26] [Rank 0] Group 1 Loss: 4.2260 +[2025-07-06 22:03:26] [Rank 0] Group 2 Loss: 4.0655 +[2025-07-06 22:03:26] [Rank 0] Group 2 Loss: 4.0655 +[2025-07-06 22:03:26] [Rank 0] Group 3 Loss: 4.0615 +[2025-07-06 22:03:26] [Rank 0] Group 3 Loss: 4.0615 +[2025-07-06 22:03:26] [Rank 0] Group 4 Loss: 4.2152 +[2025-07-06 22:03:26] [Rank 0] Group 4 Loss: 4.2152 +[2025-07-06 22:03:26] [Rank 0] Group 5 Loss: 4.0516 +[2025-07-06 22:03:26] [Rank 0] Group 5 Loss: 4.0516 +[2025-07-06 22:03:26] [Rank 0] Group 6 Loss: 4.0835 +[2025-07-06 22:03:26] [Rank 0] Group 6 Loss: 4.0835 +[2025-07-06 22:03:26] [Rank 0] Group 7 Loss: 4.2283 +[2025-07-06 22:03:26] [Rank 0] Group 7 Loss: 4.2283 +[2025-07-06 22:03:26] [Rank 0] Group 8 Loss: 4.1482 +[2025-07-06 22:03:26] [Rank 0] Group 8 Loss: 4.1482 +[2025-07-06 22:03:26] [Rank 0] Group 9 Loss: 4.1443 +[2025-07-06 22:03:26] [Rank 0] Group 9 Loss: 4.1443 +[2025-07-06 22:03:26] [Rank 0] Group 10 Loss: 4.2012 +[2025-07-06 22:03:26] [Rank 0] Group 10 Loss: 4.2012 +[2025-07-06 22:03:26] [Rank 0] Group 11 Loss: 4.2191 +[2025-07-06 22:03:26] [Rank 0] Group 11 Loss: 4.2191 +[2025-07-06 22:03:26] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-06 22:03:26] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-06 22:03:26] [Rank 0] Group 1 FTA: 0.1536 +[2025-07-06 22:03:26] [Rank 0] Group 1 FTA: 0.1536 +[2025-07-06 22:03:26] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-06 22:03:26] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-06 22:03:26] [Rank 0] Group 3 FTA: 0.0781 +[2025-07-06 22:03:26] [Rank 0] Group 3 FTA: 0.0781 +[2025-07-06 22:03:26] [Rank 0] Group 4 FTA: 0.0651 +[2025-07-06 22:03:26] [Rank 0] Group 4 FTA: 0.0651 +[2025-07-06 22:03:26] [Rank 0] Group 5 FTA: 0.1068 +[2025-07-06 22:03:26] [Rank 0] Group 5 FTA: 0.1068 +[2025-07-06 22:03:26] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-06 22:03:26] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-06 22:03:26] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-06 22:03:26] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-06 22:03:26] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-06 22:03:26] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-06 22:03:26] [Rank 0] Group 9 FTA: 0.1445 +[2025-07-06 22:03:26] [Rank 0] Group 9 FTA: 0.1445 +[2025-07-06 22:03:26] [Rank 0] Group 10 FTA: 0.1035 +[2025-07-06 22:03:26] [Rank 0] Group 10 FTA: 0.1035 +[2025-07-06 22:03:26] [Rank 0] Group 11 FTA: 0.1289 +[2025-07-06 22:03:26] [Rank 0] Group 11 FTA: 0.1289 +[2025-07-06 22:03:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 22:03:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 22:03:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 22:03:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 22:03:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 22:03:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 22:03:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 22:03:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 22:03:28] [Rank 0] step:1001/10000 train_time:78956ms step_avg:78.88ms +[2025-07-06 22:03:28] [Rank 0] step:1001/10000 train_time:78956ms step_avg:78.88ms +[2025-07-06 22:03:29] [Rank 0] step:1021/10000 train_time:80427ms step_avg:78.77ms +[2025-07-06 22:03:29] [Rank 0] step:1021/10000 train_time:80427ms step_avg:78.77ms +[2025-07-06 22:03:31] [Rank 0] step:1041/10000 train_time:81897ms step_avg:78.67ms +[2025-07-06 22:03:31] [Rank 0] step:1041/10000 train_time:81897ms step_avg:78.67ms +[2025-07-06 22:03:32] [Rank 0] step:1061/10000 train_time:83366ms step_avg:78.57ms +[2025-07-06 22:03:32] [Rank 0] step:1061/10000 train_time:83366ms step_avg:78.57ms +[2025-07-06 22:03:34] [Rank 0] step:1081/10000 train_time:85096ms step_avg:78.72ms +[2025-07-06 22:03:34] [Rank 0] step:1081/10000 train_time:85096ms step_avg:78.72ms +[2025-07-06 22:03:36] [Rank 0] step:1101/10000 train_time:86958ms step_avg:78.98ms +[2025-07-06 22:03:36] [Rank 0] step:1101/10000 train_time:86958ms step_avg:78.98ms +[2025-07-06 22:03:37] [Rank 0] step:1121/10000 train_time:88430ms step_avg:78.89ms +[2025-07-06 22:03:37] [Rank 0] step:1121/10000 train_time:88430ms step_avg:78.89ms +[2025-07-06 22:03:39] [Rank 0] step:1141/10000 train_time:89901ms step_avg:78.79ms +[2025-07-06 22:03:39] [Rank 0] step:1141/10000 train_time:89901ms step_avg:78.79ms +[2025-07-06 22:03:40] [Rank 0] step:1161/10000 train_time:91373ms step_avg:78.70ms +[2025-07-06 22:03:40] [Rank 0] step:1161/10000 train_time:91373ms step_avg:78.70ms +[2025-07-06 22:03:42] [Rank 0] step:1181/10000 train_time:93082ms step_avg:78.82ms +[2025-07-06 22:03:42] [Rank 0] step:1181/10000 train_time:93082ms step_avg:78.82ms +[2025-07-06 22:03:43] [Rank 0] step:1201/10000 train_time:94559ms step_avg:78.73ms +[2025-07-06 22:03:43] [Rank 0] step:1201/10000 train_time:94559ms step_avg:78.73ms +[2025-07-06 22:03:45] [Rank 0] step:1221/10000 train_time:96128ms step_avg:78.73ms +[2025-07-06 22:03:45] [Rank 0] step:1221/10000 train_time:96128ms step_avg:78.73ms +[2025-07-06 22:03:46] [Rank 0] step:1241/10000 train_time:97623ms step_avg:78.66ms +[2025-07-06 22:03:46] [Rank 0] step:1241/10000 train_time:97623ms step_avg:78.66ms +[2025-07-06 22:03:49] [Rank 0] step:1261/10000 train_time:99147ms step_avg:78.63ms +[2025-07-06 22:03:49] [Rank 0] step:1261/10000 train_time:99147ms step_avg:78.63ms +[2025-07-06 22:03:50] [Rank 0] step:1281/10000 train_time:101232ms step_avg:79.03ms +[2025-07-06 22:03:50] [Rank 0] step:1281/10000 train_time:101232ms step_avg:79.03ms +[2025-07-06 22:03:52] [Rank 0] step:1301/10000 train_time:102708ms step_avg:78.95ms +[2025-07-06 22:03:52] [Rank 0] step:1301/10000 train_time:102708ms step_avg:78.95ms +[2025-07-06 22:03:53] [Rank 0] step:1321/10000 train_time:104185ms step_avg:78.87ms +[2025-07-06 22:03:53] [Rank 0] step:1321/10000 train_time:104185ms step_avg:78.87ms +[2025-07-06 22:03:54] [Rank 0] step:1341/10000 train_time:105662ms step_avg:78.79ms +[2025-07-06 22:03:54] [Rank 0] step:1341/10000 train_time:105662ms step_avg:78.79ms +[2025-07-06 22:03:57] [Rank 0] step:1361/10000 train_time:107787ms step_avg:79.20ms +[2025-07-06 22:03:57] [Rank 0] step:1361/10000 train_time:107787ms step_avg:79.20ms +[2025-07-06 22:03:58] [Rank 0] step:1381/10000 train_time:109265ms step_avg:79.12ms +[2025-07-06 22:03:58] [Rank 0] step:1381/10000 train_time:109265ms step_avg:79.12ms +[2025-07-06 22:04:00] [Rank 0] step:1401/10000 train_time:110744ms step_avg:79.05ms +[2025-07-06 22:04:00] [Rank 0] step:1401/10000 train_time:110744ms step_avg:79.05ms +[2025-07-06 22:04:01] [Rank 0] step:1421/10000 train_time:112223ms step_avg:78.97ms +[2025-07-06 22:04:01] [Rank 0] step:1421/10000 train_time:112223ms step_avg:78.97ms +[2025-07-06 22:04:03] [Rank 0] step:1441/10000 train_time:113758ms step_avg:78.94ms +[2025-07-06 22:04:03] [Rank 0] step:1441/10000 train_time:113758ms step_avg:78.94ms +[2025-07-06 22:04:05] [Rank 0] step:1461/10000 train_time:115827ms step_avg:79.28ms +[2025-07-06 22:04:05] [Rank 0] step:1461/10000 train_time:115827ms step_avg:79.28ms +[2025-07-06 22:04:06] [Rank 0] step:1481/10000 train_time:117306ms step_avg:79.21ms +[2025-07-06 22:04:06] [Rank 0] step:1481/10000 train_time:117306ms step_avg:79.21ms +[2025-07-06 22:04:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:04:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:04:08] [Rank 0] PRINT: step:1500/10000 train_loss:1.4246 val_loss:1.3299 train_time:118785ms step_avg:79.19ms +[2025-07-06 22:04:08] [Rank 0] PRINT: step:1500/10000 train_loss:1.4246 val_loss:1.3299 train_time:118785ms step_avg:79.19ms +[2025-07-06 22:04:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:04:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:04:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:04:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:04:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:04:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:09:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:09:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:09:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:09:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:09:32] [Rank 0] Total Loss: 4.4427 +[2025-07-06 22:09:32] [Rank 0] Total Loss: 4.4427 +[2025-07-06 22:09:32] [Rank 0] Total FTA: 0.2404 +[2025-07-06 22:09:32] [Rank 0] Total FTA: 0.2404 +[2025-07-06 22:09:32] [Rank 0] Group 0 Loss: 4.6005 +[2025-07-06 22:09:32] [Rank 0] Group 0 Loss: 4.6005 +[2025-07-06 22:09:32] [Rank 0] Group 1 Loss: 4.5362 +[2025-07-06 22:09:32] [Rank 0] Group 1 Loss: 4.5362 +[2025-07-06 22:09:32] [Rank 0] Group 2 Loss: 4.2594 +[2025-07-06 22:09:32] [Rank 0] Group 2 Loss: 4.2594 +[2025-07-06 22:09:32] [Rank 0] Group 3 Loss: 4.4786 +[2025-07-06 22:09:32] [Rank 0] Group 3 Loss: 4.4786 +[2025-07-06 22:09:32] [Rank 0] Group 4 Loss: 4.3597 +[2025-07-06 22:09:32] [Rank 0] Group 4 Loss: 4.3597 +[2025-07-06 22:09:32] [Rank 0] Group 5 Loss: 4.3942 +[2025-07-06 22:09:32] [Rank 0] Group 5 Loss: 4.3942 +[2025-07-06 22:09:32] [Rank 0] Group 6 Loss: 4.3493 +[2025-07-06 22:09:32] [Rank 0] Group 6 Loss: 4.3493 +[2025-07-06 22:09:32] [Rank 0] Group 7 Loss: 4.4481 +[2025-07-06 22:09:32] [Rank 0] Group 7 Loss: 4.4481 +[2025-07-06 22:09:32] [Rank 0] Group 8 Loss: 4.4152 +[2025-07-06 22:09:32] [Rank 0] Group 8 Loss: 4.4152 +[2025-07-06 22:09:32] [Rank 0] Group 9 Loss: 4.3664 +[2025-07-06 22:09:32] [Rank 0] Group 9 Loss: 4.3664 +[2025-07-06 22:09:32] [Rank 0] Group 10 Loss: 4.4425 +[2025-07-06 22:09:32] [Rank 0] Group 10 Loss: 4.4425 +[2025-07-06 22:09:32] [Rank 0] Group 11 Loss: 4.4565 +[2025-07-06 22:09:32] [Rank 0] Group 11 Loss: 4.4565 +[2025-07-06 22:09:32] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-06 22:09:32] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-06 22:09:32] [Rank 0] Group 1 FTA: 0.4948 +[2025-07-06 22:09:32] [Rank 0] Group 1 FTA: 0.4948 +[2025-07-06 22:09:32] [Rank 0] Group 2 FTA: 0.3359 +[2025-07-06 22:09:32] [Rank 0] Group 2 FTA: 0.3359 +[2025-07-06 22:09:32] [Rank 0] Group 3 FTA: 0.1302 +[2025-07-06 22:09:32] [Rank 0] Group 3 FTA: 0.1302 +[2025-07-06 22:09:32] [Rank 0] Group 4 FTA: 0.1615 +[2025-07-06 22:09:32] [Rank 0] Group 4 FTA: 0.1615 +[2025-07-06 22:09:32] [Rank 0] Group 5 FTA: 0.2708 +[2025-07-06 22:09:32] [Rank 0] Group 5 FTA: 0.2708 +[2025-07-06 22:09:32] [Rank 0] Group 6 FTA: 0.2474 +[2025-07-06 22:09:32] [Rank 0] Group 6 FTA: 0.2474 +[2025-07-06 22:09:32] [Rank 0] Group 7 FTA: 0.2630 +[2025-07-06 22:09:32] [Rank 0] Group 7 FTA: 0.2630 +[2025-07-06 22:09:32] [Rank 0] Group 8 FTA: 0.2448 +[2025-07-06 22:09:32] [Rank 0] Group 8 FTA: 0.2448 +[2025-07-06 22:09:32] [Rank 0] Group 9 FTA: 0.2305 +[2025-07-06 22:09:32] [Rank 0] Group 9 FTA: 0.2305 +[2025-07-06 22:09:32] [Rank 0] Group 10 FTA: 0.2539 +[2025-07-06 22:09:32] [Rank 0] Group 10 FTA: 0.2539 +[2025-07-06 22:09:32] [Rank 0] Group 11 FTA: 0.2012 +[2025-07-06 22:09:32] [Rank 0] Group 11 FTA: 0.2012 +[2025-07-06 22:09:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 22:09:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 22:09:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 22:09:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 22:09:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 22:09:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 22:09:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 22:09:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 22:09:33] [Rank 0] step:1501/10000 train_time:118806ms step_avg:79.15ms +[2025-07-06 22:09:33] [Rank 0] step:1501/10000 train_time:118806ms step_avg:79.15ms +[2025-07-06 22:09:35] [Rank 0] step:1521/10000 train_time:120286ms step_avg:79.08ms +[2025-07-06 22:09:35] [Rank 0] step:1521/10000 train_time:120286ms step_avg:79.08ms +[2025-07-06 22:09:37] [Rank 0] step:1541/10000 train_time:122402ms step_avg:79.43ms +[2025-07-06 22:09:37] [Rank 0] step:1541/10000 train_time:122402ms step_avg:79.43ms +[2025-07-06 22:09:38] [Rank 0] step:1561/10000 train_time:123873ms step_avg:79.35ms +[2025-07-06 22:09:38] [Rank 0] step:1561/10000 train_time:123873ms step_avg:79.35ms +[2025-07-06 22:09:40] [Rank 0] step:1581/10000 train_time:125340ms step_avg:79.28ms +[2025-07-06 22:09:40] [Rank 0] step:1581/10000 train_time:125340ms step_avg:79.28ms +[2025-07-06 22:09:41] [Rank 0] step:1601/10000 train_time:126810ms step_avg:79.21ms +[2025-07-06 22:09:41] [Rank 0] step:1601/10000 train_time:126810ms step_avg:79.21ms +[2025-07-06 22:09:43] [Rank 0] step:1621/10000 train_time:128539ms step_avg:79.30ms +[2025-07-06 22:09:43] [Rank 0] step:1621/10000 train_time:128539ms step_avg:79.30ms +[2025-07-06 22:09:45] [Rank 0] step:1641/10000 train_time:130411ms step_avg:79.47ms +[2025-07-06 22:09:45] [Rank 0] step:1641/10000 train_time:130411ms step_avg:79.47ms +[2025-07-06 22:09:46] [Rank 0] step:1661/10000 train_time:131881ms step_avg:79.40ms +[2025-07-06 22:09:46] [Rank 0] step:1661/10000 train_time:131881ms step_avg:79.40ms +[2025-07-06 22:09:48] [Rank 0] step:1681/10000 train_time:133353ms step_avg:79.33ms +[2025-07-06 22:09:48] [Rank 0] step:1681/10000 train_time:133353ms step_avg:79.33ms +[2025-07-06 22:09:49] [Rank 0] step:1701/10000 train_time:134825ms step_avg:79.26ms +[2025-07-06 22:09:49] [Rank 0] step:1701/10000 train_time:134825ms step_avg:79.26ms +[2025-07-06 22:09:51] [Rank 0] step:1721/10000 train_time:136964ms step_avg:79.58ms +[2025-07-06 22:09:51] [Rank 0] step:1721/10000 train_time:136964ms step_avg:79.58ms +[2025-07-06 22:09:53] [Rank 0] step:1741/10000 train_time:138436ms step_avg:79.52ms +[2025-07-06 22:09:53] [Rank 0] step:1741/10000 train_time:138436ms step_avg:79.52ms +[2025-07-06 22:09:54] [Rank 0] step:1761/10000 train_time:139910ms step_avg:79.45ms +[2025-07-06 22:09:54] [Rank 0] step:1761/10000 train_time:139910ms step_avg:79.45ms +[2025-07-06 22:09:56] [Rank 0] step:1781/10000 train_time:141384ms step_avg:79.38ms +[2025-07-06 22:09:56] [Rank 0] step:1781/10000 train_time:141384ms step_avg:79.38ms +[2025-07-06 22:09:58] [Rank 0] step:1801/10000 train_time:142912ms step_avg:79.35ms +[2025-07-06 22:09:58] [Rank 0] step:1801/10000 train_time:142912ms step_avg:79.35ms +[2025-07-06 22:09:59] [Rank 0] step:1821/10000 train_time:145000ms step_avg:79.63ms +[2025-07-06 22:09:59] [Rank 0] step:1821/10000 train_time:145000ms step_avg:79.63ms +[2025-07-06 22:10:01] [Rank 0] step:1841/10000 train_time:146477ms step_avg:79.56ms +[2025-07-06 22:10:01] [Rank 0] step:1841/10000 train_time:146477ms step_avg:79.56ms +[2025-07-06 22:10:02] [Rank 0] step:1861/10000 train_time:147954ms step_avg:79.50ms +[2025-07-06 22:10:02] [Rank 0] step:1861/10000 train_time:147954ms step_avg:79.50ms +[2025-07-06 22:10:04] [Rank 0] step:1881/10000 train_time:149434ms step_avg:79.44ms +[2025-07-06 22:10:04] [Rank 0] step:1881/10000 train_time:149434ms step_avg:79.44ms +[2025-07-06 22:10:06] [Rank 0] step:1901/10000 train_time:151075ms step_avg:79.47ms +[2025-07-06 22:10:06] [Rank 0] step:1901/10000 train_time:151075ms step_avg:79.47ms +[2025-07-06 22:10:07] [Rank 0] step:1921/10000 train_time:152554ms step_avg:79.41ms +[2025-07-06 22:10:07] [Rank 0] step:1921/10000 train_time:152554ms step_avg:79.41ms +[2025-07-06 22:10:08] [Rank 0] step:1941/10000 train_time:154034ms step_avg:79.36ms +[2025-07-06 22:10:08] [Rank 0] step:1941/10000 train_time:154034ms step_avg:79.36ms +[2025-07-06 22:10:10] [Rank 0] step:1961/10000 train_time:155512ms step_avg:79.30ms +[2025-07-06 22:10:10] [Rank 0] step:1961/10000 train_time:155512ms step_avg:79.30ms +[2025-07-06 22:10:12] [Rank 0] step:1981/10000 train_time:157250ms step_avg:79.38ms +[2025-07-06 22:10:12] [Rank 0] step:1981/10000 train_time:157250ms step_avg:79.38ms +[2025-07-06 22:10:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:10:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:10:14] [Rank 0] PRINT: step:2000/10000 train_loss:1.2418 val_loss:1.2147 train_time:159116ms step_avg:79.56ms +[2025-07-06 22:10:14] [Rank 0] PRINT: step:2000/10000 train_loss:1.2418 val_loss:1.2147 train_time:159116ms step_avg:79.56ms +[2025-07-06 22:10:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:10:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:10:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:10:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:10:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:10:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:15:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:15:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:15:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:15:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:15:38] [Rank 0] Total Loss: 4.7684 +[2025-07-06 22:15:38] [Rank 0] Total Loss: 4.7684 +[2025-07-06 22:15:38] [Rank 0] Total FTA: 0.3286 +[2025-07-06 22:15:38] [Rank 0] Total FTA: 0.3286 +[2025-07-06 22:15:38] [Rank 0] Group 0 Loss: 4.9224 +[2025-07-06 22:15:38] [Rank 0] Group 0 Loss: 4.9224 +[2025-07-06 22:15:38] [Rank 0] Group 1 Loss: 4.9203 +[2025-07-06 22:15:38] [Rank 0] Group 1 Loss: 4.9203 +[2025-07-06 22:15:38] [Rank 0] Group 2 Loss: 4.5441 +[2025-07-06 22:15:38] [Rank 0] Group 2 Loss: 4.5441 +[2025-07-06 22:15:38] [Rank 0] Group 3 Loss: 4.9411 +[2025-07-06 22:15:38] [Rank 0] Group 3 Loss: 4.9411 +[2025-07-06 22:15:38] [Rank 0] Group 4 Loss: 4.6452 +[2025-07-06 22:15:38] [Rank 0] Group 4 Loss: 4.6452 +[2025-07-06 22:15:38] [Rank 0] Group 5 Loss: 4.6630 +[2025-07-06 22:15:38] [Rank 0] Group 5 Loss: 4.6630 +[2025-07-06 22:15:38] [Rank 0] Group 6 Loss: 4.7114 +[2025-07-06 22:15:38] [Rank 0] Group 6 Loss: 4.7114 +[2025-07-06 22:15:38] [Rank 0] Group 7 Loss: 4.7590 +[2025-07-06 22:15:38] [Rank 0] Group 7 Loss: 4.7590 +[2025-07-06 22:15:38] [Rank 0] Group 8 Loss: 4.7172 +[2025-07-06 22:15:38] [Rank 0] Group 8 Loss: 4.7172 +[2025-07-06 22:15:38] [Rank 0] Group 9 Loss: 4.7912 +[2025-07-06 22:15:38] [Rank 0] Group 9 Loss: 4.7912 +[2025-07-06 22:15:38] [Rank 0] Group 10 Loss: 4.7761 +[2025-07-06 22:15:38] [Rank 0] Group 10 Loss: 4.7761 +[2025-07-06 22:15:38] [Rank 0] Group 11 Loss: 4.7352 +[2025-07-06 22:15:38] [Rank 0] Group 11 Loss: 4.7352 +[2025-07-06 22:15:38] [Rank 0] Group 0 FTA: 0.3316 +[2025-07-06 22:15:38] [Rank 0] Group 0 FTA: 0.3316 +[2025-07-06 22:15:38] [Rank 0] Group 1 FTA: 0.5130 +[2025-07-06 22:15:38] [Rank 0] Group 1 FTA: 0.5130 +[2025-07-06 22:15:38] [Rank 0] Group 2 FTA: 0.2448 +[2025-07-06 22:15:38] [Rank 0] Group 2 FTA: 0.2448 +[2025-07-06 22:15:38] [Rank 0] Group 3 FTA: 0.2422 +[2025-07-06 22:15:38] [Rank 0] Group 3 FTA: 0.2422 +[2025-07-06 22:15:38] [Rank 0] Group 4 FTA: 0.2734 +[2025-07-06 22:15:38] [Rank 0] Group 4 FTA: 0.2734 +[2025-07-06 22:15:38] [Rank 0] Group 5 FTA: 0.3333 +[2025-07-06 22:15:38] [Rank 0] Group 5 FTA: 0.3333 +[2025-07-06 22:15:38] [Rank 0] Group 6 FTA: 0.3516 +[2025-07-06 22:15:38] [Rank 0] Group 6 FTA: 0.3516 +[2025-07-06 22:15:38] [Rank 0] Group 7 FTA: 0.3125 +[2025-07-06 22:15:38] [Rank 0] Group 7 FTA: 0.3125 +[2025-07-06 22:15:38] [Rank 0] Group 8 FTA: 0.3021 +[2025-07-06 22:15:38] [Rank 0] Group 8 FTA: 0.3021 +[2025-07-06 22:15:38] [Rank 0] Group 9 FTA: 0.3438 +[2025-07-06 22:15:38] [Rank 0] Group 9 FTA: 0.3438 +[2025-07-06 22:15:38] [Rank 0] Group 10 FTA: 0.3281 +[2025-07-06 22:15:38] [Rank 0] Group 10 FTA: 0.3281 +[2025-07-06 22:15:38] [Rank 0] Group 11 FTA: 0.3438 +[2025-07-06 22:15:38] [Rank 0] Group 11 FTA: 0.3438 +[2025-07-06 22:15:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 22:15:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 22:15:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 22:15:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 22:15:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 22:15:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 22:15:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 22:15:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 22:15:40] [Rank 0] step:2001/10000 train_time:159138ms step_avg:79.53ms +[2025-07-06 22:15:40] [Rank 0] step:2001/10000 train_time:159138ms step_avg:79.53ms +[2025-07-06 22:15:41] [Rank 0] step:2021/10000 train_time:160623ms step_avg:79.48ms +[2025-07-06 22:15:41] [Rank 0] step:2021/10000 train_time:160623ms step_avg:79.48ms +[2025-07-06 22:15:42] [Rank 0] step:2041/10000 train_time:162094ms step_avg:79.42ms +[2025-07-06 22:15:42] [Rank 0] step:2041/10000 train_time:162094ms step_avg:79.42ms +[2025-07-06 22:15:44] [Rank 0] step:2061/10000 train_time:163564ms step_avg:79.36ms +[2025-07-06 22:15:44] [Rank 0] step:2061/10000 train_time:163564ms step_avg:79.36ms +[2025-07-06 22:15:46] [Rank 0] step:2081/10000 train_time:165687ms step_avg:79.62ms +[2025-07-06 22:15:46] [Rank 0] step:2081/10000 train_time:165687ms step_avg:79.62ms +[2025-07-06 22:15:48] [Rank 0] step:2101/10000 train_time:167157ms step_avg:79.56ms +[2025-07-06 22:15:48] [Rank 0] step:2101/10000 train_time:167157ms step_avg:79.56ms +[2025-07-06 22:15:49] [Rank 0] step:2121/10000 train_time:168626ms step_avg:79.50ms +[2025-07-06 22:15:49] [Rank 0] step:2121/10000 train_time:168626ms step_avg:79.50ms +[2025-07-06 22:15:50] [Rank 0] step:2141/10000 train_time:170097ms step_avg:79.45ms +[2025-07-06 22:15:50] [Rank 0] step:2141/10000 train_time:170097ms step_avg:79.45ms +[2025-07-06 22:15:52] [Rank 0] step:2161/10000 train_time:171622ms step_avg:79.42ms +[2025-07-06 22:15:52] [Rank 0] step:2161/10000 train_time:171622ms step_avg:79.42ms +[2025-07-06 22:15:54] [Rank 0] step:2181/10000 train_time:173283ms step_avg:79.45ms +[2025-07-06 22:15:54] [Rank 0] step:2181/10000 train_time:173283ms step_avg:79.45ms +[2025-07-06 22:15:55] [Rank 0] step:2201/10000 train_time:174754ms step_avg:79.40ms +[2025-07-06 22:15:55] [Rank 0] step:2201/10000 train_time:174754ms step_avg:79.40ms +[2025-07-06 22:15:57] [Rank 0] step:2221/10000 train_time:176227ms step_avg:79.35ms +[2025-07-06 22:15:57] [Rank 0] step:2221/10000 train_time:176227ms step_avg:79.35ms +[2025-07-06 22:15:58] [Rank 0] step:2241/10000 train_time:177723ms step_avg:79.31ms +[2025-07-06 22:15:58] [Rank 0] step:2241/10000 train_time:177723ms step_avg:79.31ms +[2025-07-06 22:16:00] [Rank 0] step:2261/10000 train_time:179877ms step_avg:79.56ms +[2025-07-06 22:16:00] [Rank 0] step:2261/10000 train_time:179877ms step_avg:79.56ms +[2025-07-06 22:16:02] [Rank 0] step:2281/10000 train_time:181377ms step_avg:79.52ms +[2025-07-06 22:16:02] [Rank 0] step:2281/10000 train_time:181377ms step_avg:79.52ms +[2025-07-06 22:16:03] [Rank 0] step:2301/10000 train_time:182877ms step_avg:79.48ms +[2025-07-06 22:16:03] [Rank 0] step:2301/10000 train_time:182877ms step_avg:79.48ms +[2025-07-06 22:16:05] [Rank 0] step:2321/10000 train_time:184379ms step_avg:79.44ms +[2025-07-06 22:16:05] [Rank 0] step:2321/10000 train_time:184379ms step_avg:79.44ms +[2025-07-06 22:16:07] [Rank 0] step:2341/10000 train_time:185882ms step_avg:79.40ms +[2025-07-06 22:16:07] [Rank 0] step:2341/10000 train_time:185882ms step_avg:79.40ms +[2025-07-06 22:16:08] [Rank 0] step:2361/10000 train_time:188030ms step_avg:79.64ms +[2025-07-06 22:16:08] [Rank 0] step:2361/10000 train_time:188030ms step_avg:79.64ms +[2025-07-06 22:16:10] [Rank 0] step:2381/10000 train_time:189531ms step_avg:79.60ms +[2025-07-06 22:16:10] [Rank 0] step:2381/10000 train_time:189531ms step_avg:79.60ms +[2025-07-06 22:16:11] [Rank 0] step:2401/10000 train_time:191033ms step_avg:79.56ms +[2025-07-06 22:16:11] [Rank 0] step:2401/10000 train_time:191033ms step_avg:79.56ms +[2025-07-06 22:16:13] [Rank 0] step:2421/10000 train_time:192538ms step_avg:79.53ms +[2025-07-06 22:16:13] [Rank 0] step:2421/10000 train_time:192538ms step_avg:79.53ms +[2025-07-06 22:16:15] [Rank 0] step:2441/10000 train_time:194697ms step_avg:79.76ms +[2025-07-06 22:16:15] [Rank 0] step:2441/10000 train_time:194697ms step_avg:79.76ms +[2025-07-06 22:16:17] [Rank 0] step:2461/10000 train_time:196201ms step_avg:79.72ms +[2025-07-06 22:16:17] [Rank 0] step:2461/10000 train_time:196201ms step_avg:79.72ms +[2025-07-06 22:16:18] [Rank 0] step:2481/10000 train_time:197706ms step_avg:79.69ms +[2025-07-06 22:16:18] [Rank 0] step:2481/10000 train_time:197706ms step_avg:79.69ms +[2025-07-06 22:16:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:16:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:16:20] [Rank 0] PRINT: step:2500/10000 train_loss:1.1628 val_loss:1.1222 train_time:199213ms step_avg:79.69ms +[2025-07-06 22:16:20] [Rank 0] PRINT: step:2500/10000 train_loss:1.1628 val_loss:1.1222 train_time:199213ms step_avg:79.69ms +[2025-07-06 22:16:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:16:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:16:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:16:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:16:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:16:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:21:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:21:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:21:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:21:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:21:41] [Rank 0] Total Loss: 4.9121 +[2025-07-06 22:21:41] [Rank 0] Total Loss: 4.9121 +[2025-07-06 22:21:41] [Rank 0] Total FTA: 0.4346 +[2025-07-06 22:21:41] [Rank 0] Total FTA: 0.4346 +[2025-07-06 22:21:41] [Rank 0] Group 0 Loss: 5.0407 +[2025-07-06 22:21:41] [Rank 0] Group 0 Loss: 5.0407 +[2025-07-06 22:21:41] [Rank 0] Group 1 Loss: 4.8739 +[2025-07-06 22:21:41] [Rank 0] Group 1 Loss: 4.8739 +[2025-07-06 22:21:41] [Rank 0] Group 2 Loss: 4.6824 +[2025-07-06 22:21:41] [Rank 0] Group 2 Loss: 4.6824 +[2025-07-06 22:21:41] [Rank 0] Group 3 Loss: 4.8939 +[2025-07-06 22:21:41] [Rank 0] Group 3 Loss: 4.8939 +[2025-07-06 22:21:41] [Rank 0] Group 4 Loss: 4.8794 +[2025-07-06 22:21:41] [Rank 0] Group 4 Loss: 4.8794 +[2025-07-06 22:21:41] [Rank 0] Group 5 Loss: 4.8546 +[2025-07-06 22:21:41] [Rank 0] Group 5 Loss: 4.8546 +[2025-07-06 22:21:42] [Rank 0] Group 6 Loss: 4.8240 +[2025-07-06 22:21:42] [Rank 0] Group 6 Loss: 4.8240 +[2025-07-06 22:21:42] [Rank 0] Group 7 Loss: 4.9530 +[2025-07-06 22:21:42] [Rank 0] Group 7 Loss: 4.9530 +[2025-07-06 22:21:42] [Rank 0] Group 8 Loss: 4.8895 +[2025-07-06 22:21:42] [Rank 0] Group 8 Loss: 4.8895 +[2025-07-06 22:21:42] [Rank 0] Group 9 Loss: 4.9211 +[2025-07-06 22:21:42] [Rank 0] Group 9 Loss: 4.9211 +[2025-07-06 22:21:42] [Rank 0] Group 10 Loss: 4.9691 +[2025-07-06 22:21:42] [Rank 0] Group 10 Loss: 4.9691 +[2025-07-06 22:21:42] [Rank 0] Group 11 Loss: 4.9524 +[2025-07-06 22:21:42] [Rank 0] Group 11 Loss: 4.9524 +[2025-07-06 22:21:42] [Rank 0] Group 0 FTA: 0.5085 +[2025-07-06 22:21:42] [Rank 0] Group 0 FTA: 0.5085 +[2025-07-06 22:21:42] [Rank 0] Group 1 FTA: 0.4766 +[2025-07-06 22:21:42] [Rank 0] Group 1 FTA: 0.4766 +[2025-07-06 22:21:42] [Rank 0] Group 2 FTA: 0.3229 +[2025-07-06 22:21:42] [Rank 0] Group 2 FTA: 0.3229 +[2025-07-06 22:21:42] [Rank 0] Group 3 FTA: 0.3255 +[2025-07-06 22:21:42] [Rank 0] Group 3 FTA: 0.3255 +[2025-07-06 22:21:42] [Rank 0] Group 4 FTA: 0.3828 +[2025-07-06 22:21:42] [Rank 0] Group 4 FTA: 0.3828 +[2025-07-06 22:21:42] [Rank 0] Group 5 FTA: 0.4349 +[2025-07-06 22:21:42] [Rank 0] Group 5 FTA: 0.4349 +[2025-07-06 22:21:42] [Rank 0] Group 6 FTA: 0.4427 +[2025-07-06 22:21:42] [Rank 0] Group 6 FTA: 0.4427 +[2025-07-06 22:21:42] [Rank 0] Group 7 FTA: 0.4505 +[2025-07-06 22:21:42] [Rank 0] Group 7 FTA: 0.4505 +[2025-07-06 22:21:42] [Rank 0] Group 8 FTA: 0.4245 +[2025-07-06 22:21:42] [Rank 0] Group 8 FTA: 0.4245 +[2025-07-06 22:21:42] [Rank 0] Group 9 FTA: 0.4336 +[2025-07-06 22:21:42] [Rank 0] Group 9 FTA: 0.4336 +[2025-07-06 22:21:42] [Rank 0] Group 10 FTA: 0.4609 +[2025-07-06 22:21:42] [Rank 0] Group 10 FTA: 0.4609 +[2025-07-06 22:21:42] [Rank 0] Group 11 FTA: 0.4473 +[2025-07-06 22:21:42] [Rank 0] Group 11 FTA: 0.4473 +[2025-07-06 22:21:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 22:21:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 22:21:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 22:21:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 22:21:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 22:21:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 22:21:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 22:21:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 22:21:43] [Rank 0] step:2501/10000 train_time:199233ms step_avg:79.66ms +[2025-07-06 22:21:43] [Rank 0] step:2501/10000 train_time:199233ms step_avg:79.66ms +[2025-07-06 22:21:45] [Rank 0] step:2521/10000 train_time:200985ms step_avg:79.72ms +[2025-07-06 22:21:45] [Rank 0] step:2521/10000 train_time:200985ms step_avg:79.72ms +[2025-07-06 22:21:47] [Rank 0] step:2541/10000 train_time:202878ms step_avg:79.84ms +[2025-07-06 22:21:47] [Rank 0] step:2541/10000 train_time:202878ms step_avg:79.84ms +[2025-07-06 22:21:48] [Rank 0] step:2561/10000 train_time:204371ms step_avg:79.80ms +[2025-07-06 22:21:48] [Rank 0] step:2561/10000 train_time:204371ms step_avg:79.80ms +[2025-07-06 22:21:50] [Rank 0] step:2581/10000 train_time:205866ms step_avg:79.76ms +[2025-07-06 22:21:50] [Rank 0] step:2581/10000 train_time:205866ms step_avg:79.76ms +[2025-07-06 22:21:51] [Rank 0] step:2601/10000 train_time:207361ms step_avg:79.72ms +[2025-07-06 22:21:51] [Rank 0] step:2601/10000 train_time:207361ms step_avg:79.72ms +[2025-07-06 22:21:53] [Rank 0] step:2621/10000 train_time:209520ms step_avg:79.94ms +[2025-07-06 22:21:53] [Rank 0] step:2621/10000 train_time:209520ms step_avg:79.94ms +[2025-07-06 22:21:55] [Rank 0] step:2641/10000 train_time:211017ms step_avg:79.90ms +[2025-07-06 22:21:55] [Rank 0] step:2641/10000 train_time:211017ms step_avg:79.90ms +[2025-07-06 22:21:56] [Rank 0] step:2661/10000 train_time:212513ms step_avg:79.86ms +[2025-07-06 22:21:56] [Rank 0] step:2661/10000 train_time:212513ms step_avg:79.86ms +[2025-07-06 22:21:58] [Rank 0] step:2681/10000 train_time:214011ms step_avg:79.83ms +[2025-07-06 22:21:58] [Rank 0] step:2681/10000 train_time:214011ms step_avg:79.83ms +[2025-07-06 22:22:00] [Rank 0] step:2701/10000 train_time:215557ms step_avg:79.81ms +[2025-07-06 22:22:00] [Rank 0] step:2701/10000 train_time:215557ms step_avg:79.81ms +[2025-07-06 22:22:01] [Rank 0] step:2721/10000 train_time:217240ms step_avg:79.84ms +[2025-07-06 22:22:01] [Rank 0] step:2721/10000 train_time:217240ms step_avg:79.84ms +[2025-07-06 22:22:03] [Rank 0] step:2741/10000 train_time:218738ms step_avg:79.80ms +[2025-07-06 22:22:03] [Rank 0] step:2741/10000 train_time:218738ms step_avg:79.80ms +[2025-07-06 22:22:04] [Rank 0] step:2761/10000 train_time:220237ms step_avg:79.77ms +[2025-07-06 22:22:04] [Rank 0] step:2761/10000 train_time:220237ms step_avg:79.77ms +[2025-07-06 22:22:06] [Rank 0] step:2781/10000 train_time:221736ms step_avg:79.73ms +[2025-07-06 22:22:06] [Rank 0] step:2781/10000 train_time:221736ms step_avg:79.73ms +[2025-07-06 22:22:08] [Rank 0] step:2801/10000 train_time:223890ms step_avg:79.93ms +[2025-07-06 22:22:08] [Rank 0] step:2801/10000 train_time:223890ms step_avg:79.93ms +[2025-07-06 22:22:09] [Rank 0] step:2821/10000 train_time:225389ms step_avg:79.90ms +[2025-07-06 22:22:09] [Rank 0] step:2821/10000 train_time:225389ms step_avg:79.90ms +[2025-07-06 22:22:11] [Rank 0] step:2841/10000 train_time:226890ms step_avg:79.86ms +[2025-07-06 22:22:11] [Rank 0] step:2841/10000 train_time:226890ms step_avg:79.86ms +[2025-07-06 22:22:12] [Rank 0] step:2861/10000 train_time:228393ms step_avg:79.83ms +[2025-07-06 22:22:12] [Rank 0] step:2861/10000 train_time:228393ms step_avg:79.83ms +[2025-07-06 22:22:14] [Rank 0] step:2881/10000 train_time:230145ms step_avg:79.88ms +[2025-07-06 22:22:14] [Rank 0] step:2881/10000 train_time:230145ms step_avg:79.88ms +[2025-07-06 22:22:16] [Rank 0] step:2901/10000 train_time:231626ms step_avg:79.84ms +[2025-07-06 22:22:16] [Rank 0] step:2901/10000 train_time:231626ms step_avg:79.84ms +[2025-07-06 22:22:17] [Rank 0] step:2921/10000 train_time:233128ms step_avg:79.81ms +[2025-07-06 22:22:17] [Rank 0] step:2921/10000 train_time:233128ms step_avg:79.81ms +[2025-07-06 22:22:19] [Rank 0] step:2941/10000 train_time:234630ms step_avg:79.78ms +[2025-07-06 22:22:19] [Rank 0] step:2941/10000 train_time:234630ms step_avg:79.78ms +[2025-07-06 22:22:20] [Rank 0] step:2961/10000 train_time:236131ms step_avg:79.75ms +[2025-07-06 22:22:20] [Rank 0] step:2961/10000 train_time:236131ms step_avg:79.75ms +[2025-07-06 22:22:22] [Rank 0] step:2981/10000 train_time:238290ms step_avg:79.94ms +[2025-07-06 22:22:22] [Rank 0] step:2981/10000 train_time:238290ms step_avg:79.94ms +[2025-07-06 22:22:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:22:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:22:25] [Rank 0] PRINT: step:3000/10000 train_loss:1.0975 val_loss:1.0517 train_time:239791ms step_avg:79.93ms +[2025-07-06 22:22:25] [Rank 0] PRINT: step:3000/10000 train_loss:1.0975 val_loss:1.0517 train_time:239791ms step_avg:79.93ms +[2025-07-06 22:22:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:22:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:22:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:22:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:22:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:22:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:27:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:27:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:27:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:27:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:27:48] [Rank 0] Total Loss: 5.0370 +[2025-07-06 22:27:48] [Rank 0] Total Loss: 5.0370 +[2025-07-06 22:27:48] [Rank 0] Total FTA: 0.5814 +[2025-07-06 22:27:48] [Rank 0] Total FTA: 0.5814 +[2025-07-06 22:27:48] [Rank 0] Group 0 Loss: 4.9420 +[2025-07-06 22:27:48] [Rank 0] Group 0 Loss: 4.9420 +[2025-07-06 22:27:48] [Rank 0] Group 1 Loss: 5.0923 +[2025-07-06 22:27:48] [Rank 0] Group 1 Loss: 5.0923 +[2025-07-06 22:27:48] [Rank 0] Group 2 Loss: 4.8792 +[2025-07-06 22:27:48] [Rank 0] Group 2 Loss: 4.8792 +[2025-07-06 22:27:48] [Rank 0] Group 3 Loss: 5.1826 +[2025-07-06 22:27:48] [Rank 0] Group 3 Loss: 5.1826 +[2025-07-06 22:27:48] [Rank 0] Group 4 Loss: 4.9940 +[2025-07-06 22:27:48] [Rank 0] Group 4 Loss: 4.9940 +[2025-07-06 22:27:48] [Rank 0] Group 5 Loss: 5.0057 +[2025-07-06 22:27:48] [Rank 0] Group 5 Loss: 5.0057 +[2025-07-06 22:27:48] [Rank 0] Group 6 Loss: 4.9653 +[2025-07-06 22:27:48] [Rank 0] Group 6 Loss: 4.9653 +[2025-07-06 22:27:48] [Rank 0] Group 7 Loss: 5.1337 +[2025-07-06 22:27:48] [Rank 0] Group 7 Loss: 5.1337 +[2025-07-06 22:27:48] [Rank 0] Group 8 Loss: 5.1198 +[2025-07-06 22:27:48] [Rank 0] Group 8 Loss: 5.1198 +[2025-07-06 22:27:48] [Rank 0] Group 9 Loss: 5.0253 +[2025-07-06 22:27:48] [Rank 0] Group 9 Loss: 5.0253 +[2025-07-06 22:27:48] [Rank 0] Group 10 Loss: 5.0928 +[2025-07-06 22:27:48] [Rank 0] Group 10 Loss: 5.0928 +[2025-07-06 22:27:48] [Rank 0] Group 11 Loss: 5.0544 +[2025-07-06 22:27:48] [Rank 0] Group 11 Loss: 5.0544 +[2025-07-06 22:27:48] [Rank 0] Group 0 FTA: 0.4876 +[2025-07-06 22:27:48] [Rank 0] Group 0 FTA: 0.4876 +[2025-07-06 22:27:48] [Rank 0] Group 1 FTA: 0.4948 +[2025-07-06 22:27:48] [Rank 0] Group 1 FTA: 0.4948 +[2025-07-06 22:27:48] [Rank 0] Group 2 FTA: 0.5807 +[2025-07-06 22:27:48] [Rank 0] Group 2 FTA: 0.5807 +[2025-07-06 22:27:48] [Rank 0] Group 3 FTA: 0.4818 +[2025-07-06 22:27:48] [Rank 0] Group 3 FTA: 0.4818 +[2025-07-06 22:27:48] [Rank 0] Group 4 FTA: 0.5677 +[2025-07-06 22:27:48] [Rank 0] Group 4 FTA: 0.5677 +[2025-07-06 22:27:48] [Rank 0] Group 5 FTA: 0.6797 +[2025-07-06 22:27:48] [Rank 0] Group 5 FTA: 0.6797 +[2025-07-06 22:27:48] [Rank 0] Group 6 FTA: 0.5859 +[2025-07-06 22:27:48] [Rank 0] Group 6 FTA: 0.5859 +[2025-07-06 22:27:48] [Rank 0] Group 7 FTA: 0.6042 +[2025-07-06 22:27:48] [Rank 0] Group 7 FTA: 0.6042 +[2025-07-06 22:27:48] [Rank 0] Group 8 FTA: 0.6224 +[2025-07-06 22:27:48] [Rank 0] Group 8 FTA: 0.6224 +[2025-07-06 22:27:48] [Rank 0] Group 9 FTA: 0.6445 +[2025-07-06 22:27:48] [Rank 0] Group 9 FTA: 0.6445 +[2025-07-06 22:27:48] [Rank 0] Group 10 FTA: 0.6113 +[2025-07-06 22:27:48] [Rank 0] Group 10 FTA: 0.6113 +[2025-07-06 22:27:48] [Rank 0] Group 11 FTA: 0.6338 +[2025-07-06 22:27:48] [Rank 0] Group 11 FTA: 0.6338 +[2025-07-06 22:27:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 22:27:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 22:27:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 22:27:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 22:27:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 22:27:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 22:27:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 22:27:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 22:27:49] [Rank 0] step:3001/10000 train_time:239813ms step_avg:79.91ms +[2025-07-06 22:27:49] [Rank 0] step:3001/10000 train_time:239813ms step_avg:79.91ms +[2025-07-06 22:27:51] [Rank 0] step:3021/10000 train_time:241325ms step_avg:79.88ms +[2025-07-06 22:27:51] [Rank 0] step:3021/10000 train_time:241325ms step_avg:79.88ms +[2025-07-06 22:27:52] [Rank 0] step:3041/10000 train_time:242820ms step_avg:79.85ms +[2025-07-06 22:27:52] [Rank 0] step:3041/10000 train_time:242820ms step_avg:79.85ms +[2025-07-06 22:27:54] [Rank 0] step:3061/10000 train_time:244367ms step_avg:79.83ms +[2025-07-06 22:27:54] [Rank 0] step:3061/10000 train_time:244367ms step_avg:79.83ms +[2025-07-06 22:27:56] [Rank 0] step:3081/10000 train_time:246462ms step_avg:79.99ms +[2025-07-06 22:27:56] [Rank 0] step:3081/10000 train_time:246462ms step_avg:79.99ms +[2025-07-06 22:27:57] [Rank 0] step:3101/10000 train_time:247957ms step_avg:79.96ms +[2025-07-06 22:27:57] [Rank 0] step:3101/10000 train_time:247957ms step_avg:79.96ms +[2025-07-06 22:27:59] [Rank 0] step:3121/10000 train_time:249454ms step_avg:79.93ms +[2025-07-06 22:27:59] [Rank 0] step:3121/10000 train_time:249454ms step_avg:79.93ms +[2025-07-06 22:28:00] [Rank 0] step:3141/10000 train_time:251019ms step_avg:79.92ms +[2025-07-06 22:28:00] [Rank 0] step:3141/10000 train_time:251019ms step_avg:79.92ms +[2025-07-06 22:28:02] [Rank 0] step:3161/10000 train_time:252755ms step_avg:79.96ms +[2025-07-06 22:28:02] [Rank 0] step:3161/10000 train_time:252755ms step_avg:79.96ms +[2025-07-06 22:28:04] [Rank 0] step:3181/10000 train_time:254252ms step_avg:79.93ms +[2025-07-06 22:28:04] [Rank 0] step:3181/10000 train_time:254252ms step_avg:79.93ms +[2025-07-06 22:28:05] [Rank 0] step:3201/10000 train_time:255750ms step_avg:79.90ms +[2025-07-06 22:28:05] [Rank 0] step:3201/10000 train_time:255750ms step_avg:79.90ms +[2025-07-06 22:28:07] [Rank 0] step:3221/10000 train_time:257249ms step_avg:79.87ms +[2025-07-06 22:28:07] [Rank 0] step:3221/10000 train_time:257249ms step_avg:79.87ms +[2025-07-06 22:28:09] [Rank 0] step:3241/10000 train_time:259419ms step_avg:80.04ms +[2025-07-06 22:28:09] [Rank 0] step:3241/10000 train_time:259419ms step_avg:80.04ms +[2025-07-06 22:28:10] [Rank 0] step:3261/10000 train_time:260897ms step_avg:80.01ms +[2025-07-06 22:28:10] [Rank 0] step:3261/10000 train_time:260897ms step_avg:80.01ms +[2025-07-06 22:28:12] [Rank 0] step:3281/10000 train_time:262399ms step_avg:79.98ms +[2025-07-06 22:28:12] [Rank 0] step:3281/10000 train_time:262399ms step_avg:79.98ms +[2025-07-06 22:28:13] [Rank 0] step:3301/10000 train_time:263899ms step_avg:79.95ms +[2025-07-06 22:28:13] [Rank 0] step:3301/10000 train_time:263899ms step_avg:79.95ms +[2025-07-06 22:28:15] [Rank 0] step:3321/10000 train_time:265399ms step_avg:79.92ms +[2025-07-06 22:28:15] [Rank 0] step:3321/10000 train_time:265399ms step_avg:79.92ms +[2025-07-06 22:28:17] [Rank 0] step:3341/10000 train_time:267539ms step_avg:80.08ms +[2025-07-06 22:28:17] [Rank 0] step:3341/10000 train_time:267539ms step_avg:80.08ms +[2025-07-06 22:28:18] [Rank 0] step:3361/10000 train_time:269035ms step_avg:80.05ms +[2025-07-06 22:28:18] [Rank 0] step:3361/10000 train_time:269035ms step_avg:80.05ms +[2025-07-06 22:28:20] [Rank 0] step:3381/10000 train_time:270534ms step_avg:80.02ms +[2025-07-06 22:28:20] [Rank 0] step:3381/10000 train_time:270534ms step_avg:80.02ms +[2025-07-06 22:28:21] [Rank 0] step:3401/10000 train_time:272034ms step_avg:79.99ms +[2025-07-06 22:28:21] [Rank 0] step:3401/10000 train_time:272034ms step_avg:79.99ms +[2025-07-06 22:28:23] [Rank 0] step:3421/10000 train_time:274201ms step_avg:80.15ms +[2025-07-06 22:28:23] [Rank 0] step:3421/10000 train_time:274201ms step_avg:80.15ms +[2025-07-06 22:28:25] [Rank 0] step:3441/10000 train_time:275681ms step_avg:80.12ms +[2025-07-06 22:28:25] [Rank 0] step:3441/10000 train_time:275681ms step_avg:80.12ms +[2025-07-06 22:28:26] [Rank 0] step:3461/10000 train_time:277182ms step_avg:80.09ms +[2025-07-06 22:28:26] [Rank 0] step:3461/10000 train_time:277182ms step_avg:80.09ms +[2025-07-06 22:28:28] [Rank 0] step:3481/10000 train_time:278685ms step_avg:80.06ms +[2025-07-06 22:28:28] [Rank 0] step:3481/10000 train_time:278685ms step_avg:80.06ms +[2025-07-06 22:28:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:28:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:28:30] [Rank 0] PRINT: step:3500/10000 train_loss:1.0236 val_loss:0.9809 train_time:280189ms step_avg:80.05ms +[2025-07-06 22:28:30] [Rank 0] PRINT: step:3500/10000 train_loss:1.0236 val_loss:0.9809 train_time:280189ms step_avg:80.05ms +[2025-07-06 22:28:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:28:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:28:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:28:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:28:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:28:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:33:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:33:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:33:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:33:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:33:52] [Rank 0] Total Loss: 5.2629 +[2025-07-06 22:33:52] [Rank 0] Total Loss: 5.2629 +[2025-07-06 22:33:52] [Rank 0] Total FTA: 0.8045 +[2025-07-06 22:33:52] [Rank 0] Total FTA: 0.8045 +[2025-07-06 22:33:52] [Rank 0] Group 0 Loss: 5.3517 +[2025-07-06 22:33:52] [Rank 0] Group 0 Loss: 5.3517 +[2025-07-06 22:33:52] [Rank 0] Group 1 Loss: 5.2026 +[2025-07-06 22:33:52] [Rank 0] Group 1 Loss: 5.2026 +[2025-07-06 22:33:52] [Rank 0] Group 2 Loss: 5.1309 +[2025-07-06 22:33:52] [Rank 0] Group 2 Loss: 5.1309 +[2025-07-06 22:33:52] [Rank 0] Group 3 Loss: 5.2670 +[2025-07-06 22:33:52] [Rank 0] Group 3 Loss: 5.2670 +[2025-07-06 22:33:52] [Rank 0] Group 4 Loss: 5.3534 +[2025-07-06 22:33:52] [Rank 0] Group 4 Loss: 5.3534 +[2025-07-06 22:33:52] [Rank 0] Group 5 Loss: 5.2230 +[2025-07-06 22:33:52] [Rank 0] Group 5 Loss: 5.2230 +[2025-07-06 22:33:52] [Rank 0] Group 6 Loss: 5.1322 +[2025-07-06 22:33:52] [Rank 0] Group 6 Loss: 5.1322 +[2025-07-06 22:33:52] [Rank 0] Group 7 Loss: 5.2639 +[2025-07-06 22:33:52] [Rank 0] Group 7 Loss: 5.2639 +[2025-07-06 22:33:52] [Rank 0] Group 8 Loss: 5.2811 +[2025-07-06 22:33:52] [Rank 0] Group 8 Loss: 5.2811 +[2025-07-06 22:33:52] [Rank 0] Group 9 Loss: 5.2225 +[2025-07-06 22:33:52] [Rank 0] Group 9 Loss: 5.2225 +[2025-07-06 22:33:52] [Rank 0] Group 10 Loss: 5.2698 +[2025-07-06 22:33:52] [Rank 0] Group 10 Loss: 5.2698 +[2025-07-06 22:33:52] [Rank 0] Group 11 Loss: 5.2964 +[2025-07-06 22:33:52] [Rank 0] Group 11 Loss: 5.2964 +[2025-07-06 22:33:52] [Rank 0] Group 0 FTA: 0.8401 +[2025-07-06 22:33:52] [Rank 0] Group 0 FTA: 0.8401 +[2025-07-06 22:33:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 22:33:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 22:33:52] [Rank 0] Group 2 FTA: 0.9115 +[2025-07-06 22:33:52] [Rank 0] Group 2 FTA: 0.9115 +[2025-07-06 22:33:52] [Rank 0] Group 3 FTA: 0.8203 +[2025-07-06 22:33:52] [Rank 0] Group 3 FTA: 0.8203 +[2025-07-06 22:33:52] [Rank 0] Group 4 FTA: 0.7604 +[2025-07-06 22:33:52] [Rank 0] Group 4 FTA: 0.7604 +[2025-07-06 22:33:52] [Rank 0] Group 5 FTA: 0.7682 +[2025-07-06 22:33:52] [Rank 0] Group 5 FTA: 0.7682 +[2025-07-06 22:33:52] [Rank 0] Group 6 FTA: 0.7109 +[2025-07-06 22:33:52] [Rank 0] Group 6 FTA: 0.7109 +[2025-07-06 22:33:52] [Rank 0] Group 7 FTA: 0.7708 +[2025-07-06 22:33:52] [Rank 0] Group 7 FTA: 0.7708 +[2025-07-06 22:33:52] [Rank 0] Group 8 FTA: 0.7578 +[2025-07-06 22:33:52] [Rank 0] Group 8 FTA: 0.7578 +[2025-07-06 22:33:52] [Rank 0] Group 9 FTA: 0.7930 +[2025-07-06 22:33:52] [Rank 0] Group 9 FTA: 0.7930 +[2025-07-06 22:33:52] [Rank 0] Group 10 FTA: 0.7676 +[2025-07-06 22:33:52] [Rank 0] Group 10 FTA: 0.7676 +[2025-07-06 22:33:52] [Rank 0] Group 11 FTA: 0.7754 +[2025-07-06 22:33:52] [Rank 0] Group 11 FTA: 0.7754 +[2025-07-06 22:33:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 22:33:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 22:33:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 22:33:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 22:33:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 22:33:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 22:33:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 22:33:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 22:33:53] [Rank 0] step:3501/10000 train_time:280210ms step_avg:80.04ms +[2025-07-06 22:33:53] [Rank 0] step:3501/10000 train_time:280210ms step_avg:80.04ms +[2025-07-06 22:33:55] [Rank 0] step:3521/10000 train_time:282379ms step_avg:80.20ms +[2025-07-06 22:33:55] [Rank 0] step:3521/10000 train_time:282379ms step_avg:80.20ms +[2025-07-06 22:33:57] [Rank 0] step:3541/10000 train_time:283871ms step_avg:80.17ms +[2025-07-06 22:33:57] [Rank 0] step:3541/10000 train_time:283871ms step_avg:80.17ms +[2025-07-06 22:33:58] [Rank 0] step:3561/10000 train_time:285365ms step_avg:80.14ms +[2025-07-06 22:33:58] [Rank 0] step:3561/10000 train_time:285365ms step_avg:80.14ms +[2025-07-06 22:34:00] [Rank 0] step:3581/10000 train_time:286861ms step_avg:80.11ms +[2025-07-06 22:34:00] [Rank 0] step:3581/10000 train_time:286861ms step_avg:80.11ms +[2025-07-06 22:34:02] [Rank 0] step:3601/10000 train_time:288355ms step_avg:80.08ms +[2025-07-06 22:34:02] [Rank 0] step:3601/10000 train_time:288355ms step_avg:80.08ms +[2025-07-06 22:34:03] [Rank 0] step:3621/10000 train_time:290089ms step_avg:80.11ms +[2025-07-06 22:34:03] [Rank 0] step:3621/10000 train_time:290089ms step_avg:80.11ms +[2025-07-06 22:34:05] [Rank 0] step:3641/10000 train_time:291585ms step_avg:80.08ms +[2025-07-06 22:34:05] [Rank 0] step:3641/10000 train_time:291585ms step_avg:80.08ms +[2025-07-06 22:34:06] [Rank 0] step:3661/10000 train_time:293081ms step_avg:80.05ms +[2025-07-06 22:34:06] [Rank 0] step:3661/10000 train_time:293081ms step_avg:80.05ms +[2025-07-06 22:34:08] [Rank 0] step:3681/10000 train_time:294580ms step_avg:80.03ms +[2025-07-06 22:34:08] [Rank 0] step:3681/10000 train_time:294580ms step_avg:80.03ms +[2025-07-06 22:34:10] [Rank 0] step:3701/10000 train_time:296748ms step_avg:80.18ms +[2025-07-06 22:34:10] [Rank 0] step:3701/10000 train_time:296748ms step_avg:80.18ms +[2025-07-06 22:34:11] [Rank 0] step:3721/10000 train_time:298244ms step_avg:80.15ms +[2025-07-06 22:34:11] [Rank 0] step:3721/10000 train_time:298244ms step_avg:80.15ms +[2025-07-06 22:34:13] [Rank 0] step:3741/10000 train_time:299743ms step_avg:80.12ms +[2025-07-06 22:34:13] [Rank 0] step:3741/10000 train_time:299743ms step_avg:80.12ms +[2025-07-06 22:34:14] [Rank 0] step:3761/10000 train_time:301243ms step_avg:80.10ms +[2025-07-06 22:34:14] [Rank 0] step:3761/10000 train_time:301243ms step_avg:80.10ms +[2025-07-06 22:34:16] [Rank 0] step:3781/10000 train_time:302997ms step_avg:80.14ms +[2025-07-06 22:34:16] [Rank 0] step:3781/10000 train_time:302997ms step_avg:80.14ms +[2025-07-06 22:34:18] [Rank 0] step:3801/10000 train_time:304891ms step_avg:80.21ms +[2025-07-06 22:34:18] [Rank 0] step:3801/10000 train_time:304891ms step_avg:80.21ms +[2025-07-06 22:34:20] [Rank 0] step:3821/10000 train_time:306454ms step_avg:80.20ms +[2025-07-06 22:34:20] [Rank 0] step:3821/10000 train_time:306454ms step_avg:80.20ms +[2025-07-06 22:34:21] [Rank 0] step:3841/10000 train_time:307953ms step_avg:80.18ms +[2025-07-06 22:34:21] [Rank 0] step:3841/10000 train_time:307953ms step_avg:80.18ms +[2025-07-06 22:34:23] [Rank 0] step:3861/10000 train_time:309451ms step_avg:80.15ms +[2025-07-06 22:34:23] [Rank 0] step:3861/10000 train_time:309451ms step_avg:80.15ms +[2025-07-06 22:34:25] [Rank 0] step:3881/10000 train_time:311595ms step_avg:80.29ms +[2025-07-06 22:34:25] [Rank 0] step:3881/10000 train_time:311595ms step_avg:80.29ms +[2025-07-06 22:34:26] [Rank 0] step:3901/10000 train_time:313095ms step_avg:80.26ms +[2025-07-06 22:34:26] [Rank 0] step:3901/10000 train_time:313095ms step_avg:80.26ms +[2025-07-06 22:34:28] [Rank 0] step:3921/10000 train_time:314596ms step_avg:80.23ms +[2025-07-06 22:34:28] [Rank 0] step:3921/10000 train_time:314596ms step_avg:80.23ms +[2025-07-06 22:34:29] [Rank 0] step:3941/10000 train_time:316098ms step_avg:80.21ms +[2025-07-06 22:34:29] [Rank 0] step:3941/10000 train_time:316098ms step_avg:80.21ms +[2025-07-06 22:34:31] [Rank 0] step:3961/10000 train_time:317649ms step_avg:80.19ms +[2025-07-06 22:34:31] [Rank 0] step:3961/10000 train_time:317649ms step_avg:80.19ms +[2025-07-06 22:34:33] [Rank 0] step:3981/10000 train_time:319741ms step_avg:80.32ms +[2025-07-06 22:34:33] [Rank 0] step:3981/10000 train_time:319741ms step_avg:80.32ms +[2025-07-06 22:34:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:34:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:34:35] [Rank 0] PRINT: step:4000/10000 train_loss:0.9616 val_loss:0.9342 train_time:321243ms step_avg:80.31ms +[2025-07-06 22:34:35] [Rank 0] PRINT: step:4000/10000 train_loss:0.9616 val_loss:0.9342 train_time:321243ms step_avg:80.31ms +[2025-07-06 22:34:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:34:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:34:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:34:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:34:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:34:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:39:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:39:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:40:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:40:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:40:00] [Rank 0] Total Loss: 5.3006 +[2025-07-06 22:40:00] [Rank 0] Total Loss: 5.3006 +[2025-07-06 22:40:00] [Rank 0] Total FTA: 0.8242 +[2025-07-06 22:40:00] [Rank 0] Total FTA: 0.8242 +[2025-07-06 22:40:00] [Rank 0] Group 0 Loss: 5.5929 +[2025-07-06 22:40:00] [Rank 0] Group 0 Loss: 5.5929 +[2025-07-06 22:40:00] [Rank 0] Group 1 Loss: 5.0690 +[2025-07-06 22:40:00] [Rank 0] Group 1 Loss: 5.0690 +[2025-07-06 22:40:00] [Rank 0] Group 2 Loss: 5.2640 +[2025-07-06 22:40:00] [Rank 0] Group 2 Loss: 5.2640 +[2025-07-06 22:40:00] [Rank 0] Group 3 Loss: 5.2131 +[2025-07-06 22:40:00] [Rank 0] Group 3 Loss: 5.2131 +[2025-07-06 22:40:00] [Rank 0] Group 4 Loss: 5.2709 +[2025-07-06 22:40:00] [Rank 0] Group 4 Loss: 5.2709 +[2025-07-06 22:40:00] [Rank 0] Group 5 Loss: 5.2066 +[2025-07-06 22:40:00] [Rank 0] Group 5 Loss: 5.2066 +[2025-07-06 22:40:00] [Rank 0] Group 6 Loss: 5.1637 +[2025-07-06 22:40:00] [Rank 0] Group 6 Loss: 5.1637 +[2025-07-06 22:40:00] [Rank 0] Group 7 Loss: 5.2804 +[2025-07-06 22:40:00] [Rank 0] Group 7 Loss: 5.2804 +[2025-07-06 22:40:00] [Rank 0] Group 8 Loss: 5.3321 +[2025-07-06 22:40:00] [Rank 0] Group 8 Loss: 5.3321 +[2025-07-06 22:40:00] [Rank 0] Group 9 Loss: 5.2922 +[2025-07-06 22:40:00] [Rank 0] Group 9 Loss: 5.2922 +[2025-07-06 22:40:00] [Rank 0] Group 10 Loss: 5.3217 +[2025-07-06 22:40:00] [Rank 0] Group 10 Loss: 5.3217 +[2025-07-06 22:40:00] [Rank 0] Group 11 Loss: 5.2996 +[2025-07-06 22:40:00] [Rank 0] Group 11 Loss: 5.2996 +[2025-07-06 22:40:00] [Rank 0] Group 0 FTA: 0.6723 +[2025-07-06 22:40:00] [Rank 0] Group 0 FTA: 0.6723 +[2025-07-06 22:40:00] [Rank 0] Group 1 FTA: 0.7969 +[2025-07-06 22:40:00] [Rank 0] Group 1 FTA: 0.7969 +[2025-07-06 22:40:00] [Rank 0] Group 2 FTA: 0.8047 +[2025-07-06 22:40:00] [Rank 0] Group 2 FTA: 0.8047 +[2025-07-06 22:40:00] [Rank 0] Group 3 FTA: 0.9375 +[2025-07-06 22:40:00] [Rank 0] Group 3 FTA: 0.9375 +[2025-07-06 22:40:00] [Rank 0] Group 4 FTA: 0.8672 +[2025-07-06 22:40:00] [Rank 0] Group 4 FTA: 0.8672 +[2025-07-06 22:40:00] [Rank 0] Group 5 FTA: 0.8177 +[2025-07-06 22:40:00] [Rank 0] Group 5 FTA: 0.8177 +[2025-07-06 22:40:00] [Rank 0] Group 6 FTA: 0.8438 +[2025-07-06 22:40:00] [Rank 0] Group 6 FTA: 0.8438 +[2025-07-06 22:40:00] [Rank 0] Group 7 FTA: 0.8177 +[2025-07-06 22:40:00] [Rank 0] Group 7 FTA: 0.8177 +[2025-07-06 22:40:00] [Rank 0] Group 8 FTA: 0.8438 +[2025-07-06 22:40:00] [Rank 0] Group 8 FTA: 0.8438 +[2025-07-06 22:40:00] [Rank 0] Group 9 FTA: 0.8438 +[2025-07-06 22:40:00] [Rank 0] Group 9 FTA: 0.8438 +[2025-07-06 22:40:00] [Rank 0] Group 10 FTA: 0.8555 +[2025-07-06 22:40:00] [Rank 0] Group 10 FTA: 0.8555 +[2025-07-06 22:40:00] [Rank 0] Group 11 FTA: 0.8672 +[2025-07-06 22:40:00] [Rank 0] Group 11 FTA: 0.8672 +[2025-07-06 22:40:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 22:40:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 22:40:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 22:40:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 22:40:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 22:40:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 22:40:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 22:40:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 22:40:01] [Rank 0] step:4001/10000 train_time:321265ms step_avg:80.30ms +[2025-07-06 22:40:01] [Rank 0] step:4001/10000 train_time:321265ms step_avg:80.30ms +[2025-07-06 22:40:03] [Rank 0] step:4021/10000 train_time:322774ms step_avg:80.27ms +[2025-07-06 22:40:03] [Rank 0] step:4021/10000 train_time:322774ms step_avg:80.27ms +[2025-07-06 22:40:04] [Rank 0] step:4041/10000 train_time:324266ms step_avg:80.24ms +[2025-07-06 22:40:04] [Rank 0] step:4041/10000 train_time:324266ms step_avg:80.24ms +[2025-07-06 22:40:06] [Rank 0] step:4061/10000 train_time:326422ms step_avg:80.38ms +[2025-07-06 22:40:06] [Rank 0] step:4061/10000 train_time:326422ms step_avg:80.38ms +[2025-07-06 22:40:08] [Rank 0] step:4081/10000 train_time:327915ms step_avg:80.35ms +[2025-07-06 22:40:08] [Rank 0] step:4081/10000 train_time:327915ms step_avg:80.35ms +[2025-07-06 22:40:09] [Rank 0] step:4101/10000 train_time:329409ms step_avg:80.32ms +[2025-07-06 22:40:09] [Rank 0] step:4101/10000 train_time:329409ms step_avg:80.32ms +[2025-07-06 22:40:11] [Rank 0] step:4121/10000 train_time:330906ms step_avg:80.30ms +[2025-07-06 22:40:11] [Rank 0] step:4121/10000 train_time:330906ms step_avg:80.30ms +[2025-07-06 22:40:13] [Rank 0] step:4141/10000 train_time:332656ms step_avg:80.33ms +[2025-07-06 22:40:13] [Rank 0] step:4141/10000 train_time:332656ms step_avg:80.33ms +[2025-07-06 22:40:14] [Rank 0] step:4161/10000 train_time:334566ms step_avg:80.41ms +[2025-07-06 22:40:14] [Rank 0] step:4161/10000 train_time:334566ms step_avg:80.41ms +[2025-07-06 22:40:16] [Rank 0] step:4181/10000 train_time:336064ms step_avg:80.38ms +[2025-07-06 22:40:16] [Rank 0] step:4181/10000 train_time:336064ms step_avg:80.38ms +[2025-07-06 22:40:17] [Rank 0] step:4201/10000 train_time:337561ms step_avg:80.35ms +[2025-07-06 22:40:17] [Rank 0] step:4201/10000 train_time:337561ms step_avg:80.35ms +[2025-07-06 22:40:19] [Rank 0] step:4221/10000 train_time:339059ms step_avg:80.33ms +[2025-07-06 22:40:19] [Rank 0] step:4221/10000 train_time:339059ms step_avg:80.33ms +[2025-07-06 22:40:21] [Rank 0] step:4241/10000 train_time:341206ms step_avg:80.45ms +[2025-07-06 22:40:21] [Rank 0] step:4241/10000 train_time:341206ms step_avg:80.45ms +[2025-07-06 22:40:23] [Rank 0] step:4261/10000 train_time:342703ms step_avg:80.43ms +[2025-07-06 22:40:23] [Rank 0] step:4261/10000 train_time:342703ms step_avg:80.43ms +[2025-07-06 22:40:24] [Rank 0] step:4281/10000 train_time:344200ms step_avg:80.40ms +[2025-07-06 22:40:24] [Rank 0] step:4281/10000 train_time:344200ms step_avg:80.40ms +[2025-07-06 22:40:26] [Rank 0] step:4301/10000 train_time:345699ms step_avg:80.38ms +[2025-07-06 22:40:26] [Rank 0] step:4301/10000 train_time:345699ms step_avg:80.38ms +[2025-07-06 22:40:28] [Rank 0] step:4321/10000 train_time:347254ms step_avg:80.36ms +[2025-07-06 22:40:28] [Rank 0] step:4321/10000 train_time:347254ms step_avg:80.36ms +[2025-07-06 22:40:29] [Rank 0] step:4341/10000 train_time:349353ms step_avg:80.48ms +[2025-07-06 22:40:29] [Rank 0] step:4341/10000 train_time:349353ms step_avg:80.48ms +[2025-07-06 22:40:31] [Rank 0] step:4361/10000 train_time:350853ms step_avg:80.45ms +[2025-07-06 22:40:31] [Rank 0] step:4361/10000 train_time:350853ms step_avg:80.45ms +[2025-07-06 22:40:32] [Rank 0] step:4381/10000 train_time:352353ms step_avg:80.43ms +[2025-07-06 22:40:32] [Rank 0] step:4381/10000 train_time:352353ms step_avg:80.43ms +[2025-07-06 22:40:34] [Rank 0] step:4401/10000 train_time:353854ms step_avg:80.40ms +[2025-07-06 22:40:34] [Rank 0] step:4401/10000 train_time:353854ms step_avg:80.40ms +[2025-07-06 22:40:36] [Rank 0] step:4421/10000 train_time:356024ms step_avg:80.53ms +[2025-07-06 22:40:36] [Rank 0] step:4421/10000 train_time:356024ms step_avg:80.53ms +[2025-07-06 22:40:37] [Rank 0] step:4441/10000 train_time:357525ms step_avg:80.51ms +[2025-07-06 22:40:37] [Rank 0] step:4441/10000 train_time:357525ms step_avg:80.51ms +[2025-07-06 22:40:39] [Rank 0] step:4461/10000 train_time:359094ms step_avg:80.50ms +[2025-07-06 22:40:39] [Rank 0] step:4461/10000 train_time:359094ms step_avg:80.50ms +[2025-07-06 22:40:40] [Rank 0] step:4481/10000 train_time:360595ms step_avg:80.47ms +[2025-07-06 22:40:40] [Rank 0] step:4481/10000 train_time:360595ms step_avg:80.47ms +[2025-07-06 22:40:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:40:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:40:43] [Rank 0] PRINT: step:4500/10000 train_loss:0.9236 val_loss:0.9071 train_time:362096ms step_avg:80.47ms +[2025-07-06 22:40:43] [Rank 0] PRINT: step:4500/10000 train_loss:0.9236 val_loss:0.9071 train_time:362096ms step_avg:80.47ms +[2025-07-06 22:40:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:40:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:40:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:40:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:40:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:40:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:46:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:46:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:46:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:46:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:46:07] [Rank 0] Total Loss: 5.3202 +[2025-07-06 22:46:07] [Rank 0] Total Loss: 5.3202 +[2025-07-06 22:46:07] [Rank 0] Total FTA: 0.9258 +[2025-07-06 22:46:07] [Rank 0] Total FTA: 0.9258 +[2025-07-06 22:46:07] [Rank 0] Group 0 Loss: 5.6049 +[2025-07-06 22:46:07] [Rank 0] Group 0 Loss: 5.6049 +[2025-07-06 22:46:07] [Rank 0] Group 1 Loss: 5.1110 +[2025-07-06 22:46:07] [Rank 0] Group 1 Loss: 5.1110 +[2025-07-06 22:46:07] [Rank 0] Group 2 Loss: 5.0800 +[2025-07-06 22:46:07] [Rank 0] Group 2 Loss: 5.0800 +[2025-07-06 22:46:07] [Rank 0] Group 3 Loss: 5.3355 +[2025-07-06 22:46:07] [Rank 0] Group 3 Loss: 5.3355 +[2025-07-06 22:46:07] [Rank 0] Group 4 Loss: 5.3047 +[2025-07-06 22:46:07] [Rank 0] Group 4 Loss: 5.3047 +[2025-07-06 22:46:07] [Rank 0] Group 5 Loss: 5.2164 +[2025-07-06 22:46:07] [Rank 0] Group 5 Loss: 5.2164 +[2025-07-06 22:46:07] [Rank 0] Group 6 Loss: 5.2095 +[2025-07-06 22:46:07] [Rank 0] Group 6 Loss: 5.2095 +[2025-07-06 22:46:07] [Rank 0] Group 7 Loss: 5.3228 +[2025-07-06 22:46:07] [Rank 0] Group 7 Loss: 5.3228 +[2025-07-06 22:46:07] [Rank 0] Group 8 Loss: 5.3255 +[2025-07-06 22:46:07] [Rank 0] Group 8 Loss: 5.3255 +[2025-07-06 22:46:07] [Rank 0] Group 9 Loss: 5.2889 +[2025-07-06 22:46:07] [Rank 0] Group 9 Loss: 5.2889 +[2025-07-06 22:46:07] [Rank 0] Group 10 Loss: 5.3383 +[2025-07-06 22:46:07] [Rank 0] Group 10 Loss: 5.3383 +[2025-07-06 22:46:07] [Rank 0] Group 11 Loss: 5.3515 +[2025-07-06 22:46:07] [Rank 0] Group 11 Loss: 5.3515 +[2025-07-06 22:46:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 22:46:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 22:46:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 22:46:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 22:46:07] [Rank 0] Group 2 FTA: 0.8359 +[2025-07-06 22:46:07] [Rank 0] Group 2 FTA: 0.8359 +[2025-07-06 22:46:07] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 22:46:07] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 22:46:07] [Rank 0] Group 4 FTA: 0.8854 +[2025-07-06 22:46:07] [Rank 0] Group 4 FTA: 0.8854 +[2025-07-06 22:46:07] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-06 22:46:07] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-06 22:46:07] [Rank 0] Group 6 FTA: 0.8958 +[2025-07-06 22:46:07] [Rank 0] Group 6 FTA: 0.8958 +[2025-07-06 22:46:07] [Rank 0] Group 7 FTA: 0.9219 +[2025-07-06 22:46:07] [Rank 0] Group 7 FTA: 0.9219 +[2025-07-06 22:46:07] [Rank 0] Group 8 FTA: 0.8906 +[2025-07-06 22:46:07] [Rank 0] Group 8 FTA: 0.8906 +[2025-07-06 22:46:07] [Rank 0] Group 9 FTA: 0.8789 +[2025-07-06 22:46:07] [Rank 0] Group 9 FTA: 0.8789 +[2025-07-06 22:46:07] [Rank 0] Group 10 FTA: 0.8926 +[2025-07-06 22:46:07] [Rank 0] Group 10 FTA: 0.8926 +[2025-07-06 22:46:07] [Rank 0] Group 11 FTA: 0.9121 +[2025-07-06 22:46:07] [Rank 0] Group 11 FTA: 0.9121 +[2025-07-06 22:46:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 22:46:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 22:46:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 22:46:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 22:46:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 22:46:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 22:46:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 22:46:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 22:46:09] [Rank 0] step:4501/10000 train_time:362859ms step_avg:80.62ms +[2025-07-06 22:46:09] [Rank 0] step:4501/10000 train_time:362859ms step_avg:80.62ms +[2025-07-06 22:46:11] [Rank 0] step:4521/10000 train_time:364351ms step_avg:80.59ms +[2025-07-06 22:46:11] [Rank 0] step:4521/10000 train_time:364351ms step_avg:80.59ms +[2025-07-06 22:46:12] [Rank 0] step:4541/10000 train_time:365844ms step_avg:80.56ms +[2025-07-06 22:46:12] [Rank 0] step:4541/10000 train_time:365844ms step_avg:80.56ms +[2025-07-06 22:46:14] [Rank 0] step:4561/10000 train_time:367339ms step_avg:80.54ms +[2025-07-06 22:46:14] [Rank 0] step:4561/10000 train_time:367339ms step_avg:80.54ms +[2025-07-06 22:46:15] [Rank 0] step:4581/10000 train_time:368834ms step_avg:80.51ms +[2025-07-06 22:46:15] [Rank 0] step:4581/10000 train_time:368834ms step_avg:80.51ms +[2025-07-06 22:46:17] [Rank 0] step:4601/10000 train_time:370565ms step_avg:80.54ms +[2025-07-06 22:46:17] [Rank 0] step:4601/10000 train_time:370565ms step_avg:80.54ms +[2025-07-06 22:46:18] [Rank 0] step:4621/10000 train_time:372061ms step_avg:80.52ms +[2025-07-06 22:46:18] [Rank 0] step:4621/10000 train_time:372061ms step_avg:80.52ms +[2025-07-06 22:46:20] [Rank 0] step:4641/10000 train_time:373557ms step_avg:80.49ms +[2025-07-06 22:46:20] [Rank 0] step:4641/10000 train_time:373557ms step_avg:80.49ms +[2025-07-06 22:46:21] [Rank 0] step:4661/10000 train_time:375054ms step_avg:80.47ms +[2025-07-06 22:46:21] [Rank 0] step:4661/10000 train_time:375054ms step_avg:80.47ms +[2025-07-06 22:46:24] [Rank 0] step:4681/10000 train_time:377223ms step_avg:80.59ms +[2025-07-06 22:46:24] [Rank 0] step:4681/10000 train_time:377223ms step_avg:80.59ms +[2025-07-06 22:46:25] [Rank 0] step:4701/10000 train_time:378701ms step_avg:80.56ms +[2025-07-06 22:46:25] [Rank 0] step:4701/10000 train_time:378701ms step_avg:80.56ms +[2025-07-06 22:46:27] [Rank 0] step:4721/10000 train_time:380198ms step_avg:80.53ms +[2025-07-06 22:46:27] [Rank 0] step:4721/10000 train_time:380198ms step_avg:80.53ms +[2025-07-06 22:46:28] [Rank 0] step:4741/10000 train_time:381697ms step_avg:80.51ms +[2025-07-06 22:46:28] [Rank 0] step:4741/10000 train_time:381697ms step_avg:80.51ms +[2025-07-06 22:46:30] [Rank 0] step:4761/10000 train_time:383195ms step_avg:80.49ms +[2025-07-06 22:46:30] [Rank 0] step:4761/10000 train_time:383195ms step_avg:80.49ms +[2025-07-06 22:46:32] [Rank 0] step:4781/10000 train_time:385341ms step_avg:80.60ms +[2025-07-06 22:46:32] [Rank 0] step:4781/10000 train_time:385341ms step_avg:80.60ms +[2025-07-06 22:46:33] [Rank 0] step:4801/10000 train_time:386839ms step_avg:80.57ms +[2025-07-06 22:46:33] [Rank 0] step:4801/10000 train_time:386839ms step_avg:80.57ms +[2025-07-06 22:46:35] [Rank 0] step:4821/10000 train_time:388337ms step_avg:80.55ms +[2025-07-06 22:46:35] [Rank 0] step:4821/10000 train_time:388337ms step_avg:80.55ms +[2025-07-06 22:46:36] [Rank 0] step:4841/10000 train_time:389837ms step_avg:80.53ms +[2025-07-06 22:46:36] [Rank 0] step:4841/10000 train_time:389837ms step_avg:80.53ms +[2025-07-06 22:46:38] [Rank 0] step:4861/10000 train_time:391590ms step_avg:80.56ms +[2025-07-06 22:46:38] [Rank 0] step:4861/10000 train_time:391590ms step_avg:80.56ms +[2025-07-06 22:46:40] [Rank 0] step:4881/10000 train_time:393487ms step_avg:80.62ms +[2025-07-06 22:46:40] [Rank 0] step:4881/10000 train_time:393487ms step_avg:80.62ms +[2025-07-06 22:46:41] [Rank 0] step:4901/10000 train_time:394987ms step_avg:80.59ms +[2025-07-06 22:46:41] [Rank 0] step:4901/10000 train_time:394987ms step_avg:80.59ms +[2025-07-06 22:46:43] [Rank 0] step:4921/10000 train_time:396487ms step_avg:80.57ms +[2025-07-06 22:46:43] [Rank 0] step:4921/10000 train_time:396487ms step_avg:80.57ms +[2025-07-06 22:46:44] [Rank 0] step:4941/10000 train_time:397986ms step_avg:80.55ms +[2025-07-06 22:46:44] [Rank 0] step:4941/10000 train_time:397986ms step_avg:80.55ms +[2025-07-06 22:46:46] [Rank 0] step:4961/10000 train_time:400127ms step_avg:80.65ms +[2025-07-06 22:46:46] [Rank 0] step:4961/10000 train_time:400127ms step_avg:80.65ms +[2025-07-06 22:46:48] [Rank 0] step:4981/10000 train_time:401626ms step_avg:80.63ms +[2025-07-06 22:46:48] [Rank 0] step:4981/10000 train_time:401626ms step_avg:80.63ms +[2025-07-06 22:46:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:46:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:46:50] [Rank 0] PRINT: step:5000/10000 train_loss:0.9021 val_loss:0.8920 train_time:403126ms step_avg:80.63ms +[2025-07-06 22:46:50] [Rank 0] PRINT: step:5000/10000 train_loss:0.9021 val_loss:0.8920 train_time:403126ms step_avg:80.63ms +[2025-07-06 22:46:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:46:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:46:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:46:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:46:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:46:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:52:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:52:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:52:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:52:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:52:12] [Rank 0] Total Loss: 5.4727 +[2025-07-06 22:52:12] [Rank 0] Total Loss: 5.4727 +[2025-07-06 22:52:12] [Rank 0] Total FTA: 0.9285 +[2025-07-06 22:52:12] [Rank 0] Total FTA: 0.9285 +[2025-07-06 22:52:12] [Rank 0] Group 0 Loss: 5.7973 +[2025-07-06 22:52:12] [Rank 0] Group 0 Loss: 5.7973 +[2025-07-06 22:52:12] [Rank 0] Group 1 Loss: 5.3393 +[2025-07-06 22:52:12] [Rank 0] Group 1 Loss: 5.3393 +[2025-07-06 22:52:12] [Rank 0] Group 2 Loss: 5.2120 +[2025-07-06 22:52:12] [Rank 0] Group 2 Loss: 5.2120 +[2025-07-06 22:52:12] [Rank 0] Group 3 Loss: 5.5099 +[2025-07-06 22:52:12] [Rank 0] Group 3 Loss: 5.5099 +[2025-07-06 22:52:12] [Rank 0] Group 4 Loss: 5.4034 +[2025-07-06 22:52:12] [Rank 0] Group 4 Loss: 5.4034 +[2025-07-06 22:52:12] [Rank 0] Group 5 Loss: 5.4440 +[2025-07-06 22:52:12] [Rank 0] Group 5 Loss: 5.4440 +[2025-07-06 22:52:12] [Rank 0] Group 6 Loss: 5.4002 +[2025-07-06 22:52:12] [Rank 0] Group 6 Loss: 5.4002 +[2025-07-06 22:52:12] [Rank 0] Group 7 Loss: 5.4344 +[2025-07-06 22:52:12] [Rank 0] Group 7 Loss: 5.4344 +[2025-07-06 22:52:12] [Rank 0] Group 8 Loss: 5.4689 +[2025-07-06 22:52:12] [Rank 0] Group 8 Loss: 5.4689 +[2025-07-06 22:52:12] [Rank 0] Group 9 Loss: 5.4713 +[2025-07-06 22:52:12] [Rank 0] Group 9 Loss: 5.4713 +[2025-07-06 22:52:12] [Rank 0] Group 10 Loss: 5.4450 +[2025-07-06 22:52:12] [Rank 0] Group 10 Loss: 5.4450 +[2025-07-06 22:52:12] [Rank 0] Group 11 Loss: 5.4567 +[2025-07-06 22:52:12] [Rank 0] Group 11 Loss: 5.4567 +[2025-07-06 22:52:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 22:52:12] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 22:52:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 22:52:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 22:52:12] [Rank 0] Group 2 FTA: 0.8281 +[2025-07-06 22:52:12] [Rank 0] Group 2 FTA: 0.8281 +[2025-07-06 22:52:12] [Rank 0] Group 3 FTA: 0.9505 +[2025-07-06 22:52:12] [Rank 0] Group 3 FTA: 0.9505 +[2025-07-06 22:52:12] [Rank 0] Group 4 FTA: 0.8958 +[2025-07-06 22:52:12] [Rank 0] Group 4 FTA: 0.8958 +[2025-07-06 22:52:12] [Rank 0] Group 5 FTA: 0.9245 +[2025-07-06 22:52:12] [Rank 0] Group 5 FTA: 0.9245 +[2025-07-06 22:52:12] [Rank 0] Group 6 FTA: 0.9219 +[2025-07-06 22:52:12] [Rank 0] Group 6 FTA: 0.9219 +[2025-07-06 22:52:12] [Rank 0] Group 7 FTA: 0.8958 +[2025-07-06 22:52:12] [Rank 0] Group 7 FTA: 0.8958 +[2025-07-06 22:52:12] [Rank 0] Group 8 FTA: 0.8750 +[2025-07-06 22:52:12] [Rank 0] Group 8 FTA: 0.8750 +[2025-07-06 22:52:12] [Rank 0] Group 9 FTA: 0.9219 +[2025-07-06 22:52:12] [Rank 0] Group 9 FTA: 0.9219 +[2025-07-06 22:52:12] [Rank 0] Group 10 FTA: 0.9199 +[2025-07-06 22:52:12] [Rank 0] Group 10 FTA: 0.9199 +[2025-07-06 22:52:12] [Rank 0] Group 11 FTA: 0.9316 +[2025-07-06 22:52:12] [Rank 0] Group 11 FTA: 0.9316 +[2025-07-06 22:52:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 22:52:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 22:52:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 22:52:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 22:52:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 22:52:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 22:52:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 22:52:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 22:52:14] [Rank 0] step:5001/10000 train_time:403147ms step_avg:80.61ms +[2025-07-06 22:52:14] [Rank 0] step:5001/10000 train_time:403147ms step_avg:80.61ms +[2025-07-06 22:52:15] [Rank 0] step:5021/10000 train_time:404754ms step_avg:80.61ms +[2025-07-06 22:52:15] [Rank 0] step:5021/10000 train_time:404754ms step_avg:80.61ms +[2025-07-06 22:52:18] [Rank 0] step:5041/10000 train_time:406247ms step_avg:80.59ms +[2025-07-06 22:52:18] [Rank 0] step:5041/10000 train_time:406247ms step_avg:80.59ms +[2025-07-06 22:52:19] [Rank 0] step:5061/10000 train_time:408402ms step_avg:80.70ms +[2025-07-06 22:52:19] [Rank 0] step:5061/10000 train_time:408402ms step_avg:80.70ms +[2025-07-06 22:52:21] [Rank 0] step:5081/10000 train_time:409894ms step_avg:80.67ms +[2025-07-06 22:52:21] [Rank 0] step:5081/10000 train_time:409894ms step_avg:80.67ms +[2025-07-06 22:52:22] [Rank 0] step:5101/10000 train_time:411388ms step_avg:80.65ms +[2025-07-06 22:52:22] [Rank 0] step:5101/10000 train_time:411388ms step_avg:80.65ms +[2025-07-06 22:52:23] [Rank 0] step:5121/10000 train_time:412883ms step_avg:80.63ms +[2025-07-06 22:52:23] [Rank 0] step:5121/10000 train_time:412883ms step_avg:80.63ms +[2025-07-06 22:52:25] [Rank 0] step:5141/10000 train_time:414615ms step_avg:80.65ms +[2025-07-06 22:52:25] [Rank 0] step:5141/10000 train_time:414615ms step_avg:80.65ms +[2025-07-06 22:52:27] [Rank 0] step:5161/10000 train_time:416108ms step_avg:80.63ms +[2025-07-06 22:52:27] [Rank 0] step:5161/10000 train_time:416108ms step_avg:80.63ms +[2025-07-06 22:52:28] [Rank 0] step:5181/10000 train_time:417603ms step_avg:80.60ms +[2025-07-06 22:52:28] [Rank 0] step:5181/10000 train_time:417603ms step_avg:80.60ms +[2025-07-06 22:52:30] [Rank 0] step:5201/10000 train_time:419101ms step_avg:80.58ms +[2025-07-06 22:52:30] [Rank 0] step:5201/10000 train_time:419101ms step_avg:80.58ms +[2025-07-06 22:52:32] [Rank 0] step:5221/10000 train_time:420599ms step_avg:80.56ms +[2025-07-06 22:52:32] [Rank 0] step:5221/10000 train_time:420599ms step_avg:80.56ms +[2025-07-06 22:52:33] [Rank 0] step:5241/10000 train_time:422740ms step_avg:80.66ms +[2025-07-06 22:52:33] [Rank 0] step:5241/10000 train_time:422740ms step_avg:80.66ms +[2025-07-06 22:52:35] [Rank 0] step:5261/10000 train_time:424237ms step_avg:80.64ms +[2025-07-06 22:52:35] [Rank 0] step:5261/10000 train_time:424237ms step_avg:80.64ms +[2025-07-06 22:52:36] [Rank 0] step:5281/10000 train_time:425735ms step_avg:80.62ms +[2025-07-06 22:52:36] [Rank 0] step:5281/10000 train_time:425735ms step_avg:80.62ms +[2025-07-06 22:52:38] [Rank 0] step:5301/10000 train_time:427234ms step_avg:80.59ms +[2025-07-06 22:52:38] [Rank 0] step:5301/10000 train_time:427234ms step_avg:80.59ms +[2025-07-06 22:52:40] [Rank 0] step:5321/10000 train_time:429382ms step_avg:80.70ms +[2025-07-06 22:52:40] [Rank 0] step:5321/10000 train_time:429382ms step_avg:80.70ms +[2025-07-06 22:52:41] [Rank 0] step:5341/10000 train_time:430879ms step_avg:80.67ms +[2025-07-06 22:52:41] [Rank 0] step:5341/10000 train_time:430879ms step_avg:80.67ms +[2025-07-06 22:52:43] [Rank 0] step:5361/10000 train_time:432377ms step_avg:80.65ms +[2025-07-06 22:52:43] [Rank 0] step:5361/10000 train_time:432377ms step_avg:80.65ms +[2025-07-06 22:52:44] [Rank 0] step:5381/10000 train_time:433874ms step_avg:80.63ms +[2025-07-06 22:52:44] [Rank 0] step:5381/10000 train_time:433874ms step_avg:80.63ms +[2025-07-06 22:52:47] [Rank 0] step:5401/10000 train_time:436047ms step_avg:80.73ms +[2025-07-06 22:52:47] [Rank 0] step:5401/10000 train_time:436047ms step_avg:80.73ms +[2025-07-06 22:52:48] [Rank 0] step:5421/10000 train_time:437525ms step_avg:80.71ms +[2025-07-06 22:52:48] [Rank 0] step:5421/10000 train_time:437525ms step_avg:80.71ms +[2025-07-06 22:52:50] [Rank 0] step:5441/10000 train_time:439026ms step_avg:80.69ms +[2025-07-06 22:52:50] [Rank 0] step:5441/10000 train_time:439026ms step_avg:80.69ms +[2025-07-06 22:52:51] [Rank 0] step:5461/10000 train_time:440522ms step_avg:80.67ms +[2025-07-06 22:52:51] [Rank 0] step:5461/10000 train_time:440522ms step_avg:80.67ms +[2025-07-06 22:52:53] [Rank 0] step:5481/10000 train_time:442022ms step_avg:80.65ms +[2025-07-06 22:52:53] [Rank 0] step:5481/10000 train_time:442022ms step_avg:80.65ms +[2025-07-06 22:52:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:52:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:52:56] [Rank 0] PRINT: step:5500/10000 train_loss:0.8886 val_loss:0.8828 train_time:444166ms step_avg:80.76ms +[2025-07-06 22:52:56] [Rank 0] PRINT: step:5500/10000 train_loss:0.8886 val_loss:0.8828 train_time:444166ms step_avg:80.76ms +[2025-07-06 22:52:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:52:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:52:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:52:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:52:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:52:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:58:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:58:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:58:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:58:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:58:18] [Rank 0] Total Loss: 5.6767 +[2025-07-06 22:58:18] [Rank 0] Total Loss: 5.6767 +[2025-07-06 22:58:18] [Rank 0] Total FTA: 0.9206 +[2025-07-06 22:58:18] [Rank 0] Total FTA: 0.9206 +[2025-07-06 22:58:18] [Rank 0] Group 0 Loss: 6.0922 +[2025-07-06 22:58:18] [Rank 0] Group 0 Loss: 6.0922 +[2025-07-06 22:58:18] [Rank 0] Group 1 Loss: 5.4673 +[2025-07-06 22:58:18] [Rank 0] Group 1 Loss: 5.4673 +[2025-07-06 22:58:18] [Rank 0] Group 2 Loss: 5.5763 +[2025-07-06 22:58:18] [Rank 0] Group 2 Loss: 5.5763 +[2025-07-06 22:58:18] [Rank 0] Group 3 Loss: 5.6656 +[2025-07-06 22:58:18] [Rank 0] Group 3 Loss: 5.6656 +[2025-07-06 22:58:18] [Rank 0] Group 4 Loss: 5.6295 +[2025-07-06 22:58:18] [Rank 0] Group 4 Loss: 5.6295 +[2025-07-06 22:58:18] [Rank 0] Group 5 Loss: 5.6104 +[2025-07-06 22:58:18] [Rank 0] Group 5 Loss: 5.6104 +[2025-07-06 22:58:18] [Rank 0] Group 6 Loss: 5.5366 +[2025-07-06 22:58:18] [Rank 0] Group 6 Loss: 5.5366 +[2025-07-06 22:58:18] [Rank 0] Group 7 Loss: 5.6124 +[2025-07-06 22:58:18] [Rank 0] Group 7 Loss: 5.6124 +[2025-07-06 22:58:18] [Rank 0] Group 8 Loss: 5.6455 +[2025-07-06 22:58:18] [Rank 0] Group 8 Loss: 5.6455 +[2025-07-06 22:58:18] [Rank 0] Group 9 Loss: 5.6104 +[2025-07-06 22:58:18] [Rank 0] Group 9 Loss: 5.6104 +[2025-07-06 22:58:18] [Rank 0] Group 10 Loss: 5.6305 +[2025-07-06 22:58:18] [Rank 0] Group 10 Loss: 5.6305 +[2025-07-06 22:58:18] [Rank 0] Group 11 Loss: 5.6556 +[2025-07-06 22:58:18] [Rank 0] Group 11 Loss: 5.6556 +[2025-07-06 22:58:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 22:58:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 22:58:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 22:58:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 22:58:18] [Rank 0] Group 2 FTA: 0.9349 +[2025-07-06 22:58:18] [Rank 0] Group 2 FTA: 0.9349 +[2025-07-06 22:58:18] [Rank 0] Group 3 FTA: 0.7917 +[2025-07-06 22:58:18] [Rank 0] Group 3 FTA: 0.7917 +[2025-07-06 22:58:18] [Rank 0] Group 4 FTA: 0.9271 +[2025-07-06 22:58:18] [Rank 0] Group 4 FTA: 0.9271 +[2025-07-06 22:58:18] [Rank 0] Group 5 FTA: 0.9297 +[2025-07-06 22:58:18] [Rank 0] Group 5 FTA: 0.9297 +[2025-07-06 22:58:18] [Rank 0] Group 6 FTA: 0.8906 +[2025-07-06 22:58:18] [Rank 0] Group 6 FTA: 0.8906 +[2025-07-06 22:58:18] [Rank 0] Group 7 FTA: 0.9089 +[2025-07-06 22:58:18] [Rank 0] Group 7 FTA: 0.9089 +[2025-07-06 22:58:18] [Rank 0] Group 8 FTA: 0.8932 +[2025-07-06 22:58:18] [Rank 0] Group 8 FTA: 0.8932 +[2025-07-06 22:58:18] [Rank 0] Group 9 FTA: 0.9102 +[2025-07-06 22:58:18] [Rank 0] Group 9 FTA: 0.9102 +[2025-07-06 22:58:18] [Rank 0] Group 10 FTA: 0.9043 +[2025-07-06 22:58:18] [Rank 0] Group 10 FTA: 0.9043 +[2025-07-06 22:58:18] [Rank 0] Group 11 FTA: 0.9053 +[2025-07-06 22:58:18] [Rank 0] Group 11 FTA: 0.9053 +[2025-07-06 22:58:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 22:58:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 22:58:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 22:58:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 22:58:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 22:58:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 22:58:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 22:58:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 22:58:20] [Rank 0] step:5501/10000 train_time:444186ms step_avg:80.75ms +[2025-07-06 22:58:20] [Rank 0] step:5501/10000 train_time:444186ms step_avg:80.75ms +[2025-07-06 22:58:21] [Rank 0] step:5521/10000 train_time:445673ms step_avg:80.72ms +[2025-07-06 22:58:21] [Rank 0] step:5521/10000 train_time:445673ms step_avg:80.72ms +[2025-07-06 22:58:23] [Rank 0] step:5541/10000 train_time:447166ms step_avg:80.70ms +[2025-07-06 22:58:23] [Rank 0] step:5541/10000 train_time:447166ms step_avg:80.70ms +[2025-07-06 22:58:24] [Rank 0] step:5561/10000 train_time:448660ms step_avg:80.68ms +[2025-07-06 22:58:24] [Rank 0] step:5561/10000 train_time:448660ms step_avg:80.68ms +[2025-07-06 22:58:26] [Rank 0] step:5581/10000 train_time:450412ms step_avg:80.70ms +[2025-07-06 22:58:26] [Rank 0] step:5581/10000 train_time:450412ms step_avg:80.70ms +[2025-07-06 22:58:28] [Rank 0] step:5601/10000 train_time:452314ms step_avg:80.76ms +[2025-07-06 22:58:28] [Rank 0] step:5601/10000 train_time:452314ms step_avg:80.76ms +[2025-07-06 22:58:29] [Rank 0] step:5621/10000 train_time:453809ms step_avg:80.73ms +[2025-07-06 22:58:29] [Rank 0] step:5621/10000 train_time:453809ms step_avg:80.73ms +[2025-07-06 22:58:31] [Rank 0] step:5641/10000 train_time:455304ms step_avg:80.71ms +[2025-07-06 22:58:31] [Rank 0] step:5641/10000 train_time:455304ms step_avg:80.71ms +[2025-07-06 22:58:32] [Rank 0] step:5661/10000 train_time:456801ms step_avg:80.69ms +[2025-07-06 22:58:32] [Rank 0] step:5661/10000 train_time:456801ms step_avg:80.69ms +[2025-07-06 22:58:34] [Rank 0] step:5681/10000 train_time:458629ms step_avg:80.73ms +[2025-07-06 22:58:34] [Rank 0] step:5681/10000 train_time:458629ms step_avg:80.73ms +[2025-07-06 22:58:36] [Rank 0] step:5701/10000 train_time:460127ms step_avg:80.71ms +[2025-07-06 22:58:36] [Rank 0] step:5701/10000 train_time:460127ms step_avg:80.71ms +[2025-07-06 22:58:37] [Rank 0] step:5721/10000 train_time:461624ms step_avg:80.69ms +[2025-07-06 22:58:37] [Rank 0] step:5721/10000 train_time:461624ms step_avg:80.69ms +[2025-07-06 22:58:39] [Rank 0] step:5741/10000 train_time:463121ms step_avg:80.67ms +[2025-07-06 22:58:39] [Rank 0] step:5741/10000 train_time:463121ms step_avg:80.67ms +[2025-07-06 22:58:41] [Rank 0] step:5761/10000 train_time:464871ms step_avg:80.69ms +[2025-07-06 22:58:41] [Rank 0] step:5761/10000 train_time:464871ms step_avg:80.69ms +[2025-07-06 22:58:42] [Rank 0] step:5781/10000 train_time:466780ms step_avg:80.74ms +[2025-07-06 22:58:42] [Rank 0] step:5781/10000 train_time:466780ms step_avg:80.74ms +[2025-07-06 22:58:44] [Rank 0] step:5801/10000 train_time:468280ms step_avg:80.72ms +[2025-07-06 22:58:44] [Rank 0] step:5801/10000 train_time:468280ms step_avg:80.72ms +[2025-07-06 22:58:45] [Rank 0] step:5821/10000 train_time:469780ms step_avg:80.70ms +[2025-07-06 22:58:45] [Rank 0] step:5821/10000 train_time:469780ms step_avg:80.70ms +[2025-07-06 22:58:47] [Rank 0] step:5841/10000 train_time:471278ms step_avg:80.68ms +[2025-07-06 22:58:47] [Rank 0] step:5841/10000 train_time:471278ms step_avg:80.68ms +[2025-07-06 22:58:49] [Rank 0] step:5861/10000 train_time:473426ms step_avg:80.78ms +[2025-07-06 22:58:49] [Rank 0] step:5861/10000 train_time:473426ms step_avg:80.78ms +[2025-07-06 22:58:50] [Rank 0] step:5881/10000 train_time:474925ms step_avg:80.76ms +[2025-07-06 22:58:50] [Rank 0] step:5881/10000 train_time:474925ms step_avg:80.76ms +[2025-07-06 22:58:52] [Rank 0] step:5901/10000 train_time:476425ms step_avg:80.74ms +[2025-07-06 22:58:52] [Rank 0] step:5901/10000 train_time:476425ms step_avg:80.74ms +[2025-07-06 22:58:53] [Rank 0] step:5921/10000 train_time:477925ms step_avg:80.72ms +[2025-07-06 22:58:53] [Rank 0] step:5921/10000 train_time:477925ms step_avg:80.72ms +[2025-07-06 22:58:55] [Rank 0] step:5941/10000 train_time:479424ms step_avg:80.70ms +[2025-07-06 22:58:55] [Rank 0] step:5941/10000 train_time:479424ms step_avg:80.70ms +[2025-07-06 22:58:57] [Rank 0] step:5961/10000 train_time:481568ms step_avg:80.79ms +[2025-07-06 22:58:57] [Rank 0] step:5961/10000 train_time:481568ms step_avg:80.79ms +[2025-07-06 22:58:58] [Rank 0] step:5981/10000 train_time:483067ms step_avg:80.77ms +[2025-07-06 22:58:58] [Rank 0] step:5981/10000 train_time:483067ms step_avg:80.77ms +[2025-07-06 22:59:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:59:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:59:01] [Rank 0] PRINT: step:6000/10000 train_loss:0.8805 val_loss:0.8778 train_time:484567ms step_avg:80.76ms +[2025-07-06 22:59:01] [Rank 0] PRINT: step:6000/10000 train_loss:0.8805 val_loss:0.8778 train_time:484567ms step_avg:80.76ms +[2025-07-06 22:59:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:59:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:59:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:59:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:59:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:59:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:04:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:04:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:04:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:04:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:04:23] [Rank 0] Total Loss: 5.5644 +[2025-07-06 23:04:23] [Rank 0] Total Loss: 5.5644 +[2025-07-06 23:04:23] [Rank 0] Total FTA: 0.9088 +[2025-07-06 23:04:23] [Rank 0] Total FTA: 0.9088 +[2025-07-06 23:04:23] [Rank 0] Group 0 Loss: 5.7379 +[2025-07-06 23:04:23] [Rank 0] Group 0 Loss: 5.7379 +[2025-07-06 23:04:23] [Rank 0] Group 1 Loss: 5.3662 +[2025-07-06 23:04:23] [Rank 0] Group 1 Loss: 5.3662 +[2025-07-06 23:04:23] [Rank 0] Group 2 Loss: 5.3674 +[2025-07-06 23:04:23] [Rank 0] Group 2 Loss: 5.3674 +[2025-07-06 23:04:23] [Rank 0] Group 3 Loss: 5.6260 +[2025-07-06 23:04:23] [Rank 0] Group 3 Loss: 5.6260 +[2025-07-06 23:04:23] [Rank 0] Group 4 Loss: 5.4838 +[2025-07-06 23:04:23] [Rank 0] Group 4 Loss: 5.4838 +[2025-07-06 23:04:23] [Rank 0] Group 5 Loss: 5.5382 +[2025-07-06 23:04:23] [Rank 0] Group 5 Loss: 5.5382 +[2025-07-06 23:04:23] [Rank 0] Group 6 Loss: 5.4992 +[2025-07-06 23:04:23] [Rank 0] Group 6 Loss: 5.4992 +[2025-07-06 23:04:23] [Rank 0] Group 7 Loss: 5.5795 +[2025-07-06 23:04:23] [Rank 0] Group 7 Loss: 5.5795 +[2025-07-06 23:04:23] [Rank 0] Group 8 Loss: 5.5716 +[2025-07-06 23:04:23] [Rank 0] Group 8 Loss: 5.5716 +[2025-07-06 23:04:23] [Rank 0] Group 9 Loss: 5.5823 +[2025-07-06 23:04:23] [Rank 0] Group 9 Loss: 5.5823 +[2025-07-06 23:04:23] [Rank 0] Group 10 Loss: 5.5467 +[2025-07-06 23:04:23] [Rank 0] Group 10 Loss: 5.5467 +[2025-07-06 23:04:23] [Rank 0] Group 11 Loss: 5.6198 +[2025-07-06 23:04:23] [Rank 0] Group 11 Loss: 5.6198 +[2025-07-06 23:04:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 23:04:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 23:04:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:04:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:04:23] [Rank 0] Group 2 FTA: 0.5000 +[2025-07-06 23:04:23] [Rank 0] Group 2 FTA: 0.5000 +[2025-07-06 23:04:23] [Rank 0] Group 3 FTA: 0.8750 +[2025-07-06 23:04:23] [Rank 0] Group 3 FTA: 0.8750 +[2025-07-06 23:04:23] [Rank 0] Group 4 FTA: 0.9427 +[2025-07-06 23:04:23] [Rank 0] Group 4 FTA: 0.9427 +[2025-07-06 23:04:23] [Rank 0] Group 5 FTA: 0.9844 +[2025-07-06 23:04:23] [Rank 0] Group 5 FTA: 0.9844 +[2025-07-06 23:04:23] [Rank 0] Group 6 FTA: 0.9115 +[2025-07-06 23:04:23] [Rank 0] Group 6 FTA: 0.9115 +[2025-07-06 23:04:23] [Rank 0] Group 7 FTA: 0.9141 +[2025-07-06 23:04:23] [Rank 0] Group 7 FTA: 0.9141 +[2025-07-06 23:04:23] [Rank 0] Group 8 FTA: 0.9141 +[2025-07-06 23:04:23] [Rank 0] Group 8 FTA: 0.9141 +[2025-07-06 23:04:23] [Rank 0] Group 9 FTA: 0.9180 +[2025-07-06 23:04:23] [Rank 0] Group 9 FTA: 0.9180 +[2025-07-06 23:04:23] [Rank 0] Group 10 FTA: 0.9082 +[2025-07-06 23:04:23] [Rank 0] Group 10 FTA: 0.9082 +[2025-07-06 23:04:23] [Rank 0] Group 11 FTA: 0.9238 +[2025-07-06 23:04:23] [Rank 0] Group 11 FTA: 0.9238 +[2025-07-06 23:04:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 23:04:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 23:04:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 23:04:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 23:04:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 23:04:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 23:04:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 23:04:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 23:04:25] [Rank 0] step:6001/10000 train_time:484587ms step_avg:80.75ms +[2025-07-06 23:04:25] [Rank 0] step:6001/10000 train_time:484587ms step_avg:80.75ms +[2025-07-06 23:04:26] [Rank 0] step:6021/10000 train_time:486101ms step_avg:80.73ms +[2025-07-06 23:04:26] [Rank 0] step:6021/10000 train_time:486101ms step_avg:80.73ms +[2025-07-06 23:04:28] [Rank 0] step:6041/10000 train_time:488267ms step_avg:80.83ms +[2025-07-06 23:04:28] [Rank 0] step:6041/10000 train_time:488267ms step_avg:80.83ms +[2025-07-06 23:04:30] [Rank 0] step:6061/10000 train_time:489760ms step_avg:80.81ms +[2025-07-06 23:04:30] [Rank 0] step:6061/10000 train_time:489760ms step_avg:80.81ms +[2025-07-06 23:04:31] [Rank 0] step:6081/10000 train_time:491254ms step_avg:80.79ms +[2025-07-06 23:04:31] [Rank 0] step:6081/10000 train_time:491254ms step_avg:80.79ms +[2025-07-06 23:04:33] [Rank 0] step:6101/10000 train_time:492747ms step_avg:80.76ms +[2025-07-06 23:04:33] [Rank 0] step:6101/10000 train_time:492747ms step_avg:80.76ms +[2025-07-06 23:04:35] [Rank 0] step:6121/10000 train_time:494290ms step_avg:80.75ms +[2025-07-06 23:04:35] [Rank 0] step:6121/10000 train_time:494290ms step_avg:80.75ms +[2025-07-06 23:04:36] [Rank 0] step:6141/10000 train_time:496373ms step_avg:80.83ms +[2025-07-06 23:04:36] [Rank 0] step:6141/10000 train_time:496373ms step_avg:80.83ms +[2025-07-06 23:04:38] [Rank 0] step:6161/10000 train_time:497866ms step_avg:80.81ms +[2025-07-06 23:04:38] [Rank 0] step:6161/10000 train_time:497866ms step_avg:80.81ms +[2025-07-06 23:04:39] [Rank 0] step:6181/10000 train_time:499360ms step_avg:80.79ms +[2025-07-06 23:04:39] [Rank 0] step:6181/10000 train_time:499360ms step_avg:80.79ms +[2025-07-06 23:04:41] [Rank 0] step:6201/10000 train_time:500856ms step_avg:80.77ms +[2025-07-06 23:04:41] [Rank 0] step:6201/10000 train_time:500856ms step_avg:80.77ms +[2025-07-06 23:04:43] [Rank 0] step:6221/10000 train_time:502593ms step_avg:80.79ms +[2025-07-06 23:04:43] [Rank 0] step:6221/10000 train_time:502593ms step_avg:80.79ms +[2025-07-06 23:04:44] [Rank 0] step:6241/10000 train_time:504087ms step_avg:80.77ms +[2025-07-06 23:04:44] [Rank 0] step:6241/10000 train_time:504087ms step_avg:80.77ms +[2025-07-06 23:04:46] [Rank 0] step:6261/10000 train_time:505586ms step_avg:80.75ms +[2025-07-06 23:04:46] [Rank 0] step:6261/10000 train_time:505586ms step_avg:80.75ms +[2025-07-06 23:04:47] [Rank 0] step:6281/10000 train_time:507084ms step_avg:80.73ms +[2025-07-06 23:04:47] [Rank 0] step:6281/10000 train_time:507084ms step_avg:80.73ms +[2025-07-06 23:04:49] [Rank 0] step:6301/10000 train_time:508634ms step_avg:80.72ms +[2025-07-06 23:04:49] [Rank 0] step:6301/10000 train_time:508634ms step_avg:80.72ms +[2025-07-06 23:04:51] [Rank 0] step:6321/10000 train_time:510746ms step_avg:80.80ms +[2025-07-06 23:04:51] [Rank 0] step:6321/10000 train_time:510746ms step_avg:80.80ms +[2025-07-06 23:04:52] [Rank 0] step:6341/10000 train_time:512244ms step_avg:80.78ms +[2025-07-06 23:04:52] [Rank 0] step:6341/10000 train_time:512244ms step_avg:80.78ms +[2025-07-06 23:04:54] [Rank 0] step:6361/10000 train_time:513882ms step_avg:80.79ms +[2025-07-06 23:04:54] [Rank 0] step:6361/10000 train_time:513882ms step_avg:80.79ms +[2025-07-06 23:04:55] [Rank 0] step:6381/10000 train_time:515379ms step_avg:80.77ms +[2025-07-06 23:04:55] [Rank 0] step:6381/10000 train_time:515379ms step_avg:80.77ms +[2025-07-06 23:04:57] [Rank 0] step:6401/10000 train_time:517111ms step_avg:80.79ms +[2025-07-06 23:04:57] [Rank 0] step:6401/10000 train_time:517111ms step_avg:80.79ms +[2025-07-06 23:04:59] [Rank 0] step:6421/10000 train_time:518612ms step_avg:80.77ms +[2025-07-06 23:04:59] [Rank 0] step:6421/10000 train_time:518612ms step_avg:80.77ms +[2025-07-06 23:05:00] [Rank 0] step:6441/10000 train_time:520110ms step_avg:80.75ms +[2025-07-06 23:05:00] [Rank 0] step:6441/10000 train_time:520110ms step_avg:80.75ms +[2025-07-06 23:05:02] [Rank 0] step:6461/10000 train_time:521609ms step_avg:80.73ms +[2025-07-06 23:05:02] [Rank 0] step:6461/10000 train_time:521609ms step_avg:80.73ms +[2025-07-06 23:05:04] [Rank 0] step:6481/10000 train_time:523109ms step_avg:80.71ms +[2025-07-06 23:05:04] [Rank 0] step:6481/10000 train_time:523109ms step_avg:80.71ms +[2025-07-06 23:05:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:05:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:05:06] [Rank 0] PRINT: step:6500/10000 train_loss:0.8757 val_loss:0.8740 train_time:525277ms step_avg:80.81ms +[2025-07-06 23:05:06] [Rank 0] PRINT: step:6500/10000 train_loss:0.8757 val_loss:0.8740 train_time:525277ms step_avg:80.81ms +[2025-07-06 23:05:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:05:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:05:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:05:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:05:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:05:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:10:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:10:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:10:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:10:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:10:32] [Rank 0] Total Loss: 5.7428 +[2025-07-06 23:10:32] [Rank 0] Total Loss: 5.7428 +[2025-07-06 23:10:32] [Rank 0] Total FTA: 0.9054 +[2025-07-06 23:10:32] [Rank 0] Total FTA: 0.9054 +[2025-07-06 23:10:32] [Rank 0] Group 0 Loss: 5.9033 +[2025-07-06 23:10:32] [Rank 0] Group 0 Loss: 5.9033 +[2025-07-06 23:10:32] [Rank 0] Group 1 Loss: 5.6866 +[2025-07-06 23:10:32] [Rank 0] Group 1 Loss: 5.6866 +[2025-07-06 23:10:32] [Rank 0] Group 2 Loss: 5.5249 +[2025-07-06 23:10:32] [Rank 0] Group 2 Loss: 5.5249 +[2025-07-06 23:10:32] [Rank 0] Group 3 Loss: 5.8580 +[2025-07-06 23:10:32] [Rank 0] Group 3 Loss: 5.8580 +[2025-07-06 23:10:32] [Rank 0] Group 4 Loss: 5.7488 +[2025-07-06 23:10:32] [Rank 0] Group 4 Loss: 5.7488 +[2025-07-06 23:10:32] [Rank 0] Group 5 Loss: 5.6968 +[2025-07-06 23:10:32] [Rank 0] Group 5 Loss: 5.6968 +[2025-07-06 23:10:32] [Rank 0] Group 6 Loss: 5.6006 +[2025-07-06 23:10:32] [Rank 0] Group 6 Loss: 5.6006 +[2025-07-06 23:10:32] [Rank 0] Group 7 Loss: 5.7251 +[2025-07-06 23:10:32] [Rank 0] Group 7 Loss: 5.7251 +[2025-07-06 23:10:32] [Rank 0] Group 8 Loss: 5.6988 +[2025-07-06 23:10:32] [Rank 0] Group 8 Loss: 5.6988 +[2025-07-06 23:10:32] [Rank 0] Group 9 Loss: 5.6803 +[2025-07-06 23:10:32] [Rank 0] Group 9 Loss: 5.6803 +[2025-07-06 23:10:32] [Rank 0] Group 10 Loss: 5.7970 +[2025-07-06 23:10:32] [Rank 0] Group 10 Loss: 5.7970 +[2025-07-06 23:10:32] [Rank 0] Group 11 Loss: 5.7620 +[2025-07-06 23:10:32] [Rank 0] Group 11 Loss: 5.7620 +[2025-07-06 23:10:32] [Rank 0] Group 0 FTA: 0.8218 +[2025-07-06 23:10:32] [Rank 0] Group 0 FTA: 0.8218 +[2025-07-06 23:10:32] [Rank 0] Group 1 FTA: 0.8646 +[2025-07-06 23:10:32] [Rank 0] Group 1 FTA: 0.8646 +[2025-07-06 23:10:32] [Rank 0] Group 2 FTA: 0.9245 +[2025-07-06 23:10:32] [Rank 0] Group 2 FTA: 0.9245 +[2025-07-06 23:10:32] [Rank 0] Group 3 FTA: 0.9193 +[2025-07-06 23:10:32] [Rank 0] Group 3 FTA: 0.9193 +[2025-07-06 23:10:32] [Rank 0] Group 4 FTA: 0.8776 +[2025-07-06 23:10:32] [Rank 0] Group 4 FTA: 0.8776 +[2025-07-06 23:10:32] [Rank 0] Group 5 FTA: 0.9245 +[2025-07-06 23:10:32] [Rank 0] Group 5 FTA: 0.9245 +[2025-07-06 23:10:32] [Rank 0] Group 6 FTA: 0.9349 +[2025-07-06 23:10:32] [Rank 0] Group 6 FTA: 0.9349 +[2025-07-06 23:10:32] [Rank 0] Group 7 FTA: 0.9479 +[2025-07-06 23:10:32] [Rank 0] Group 7 FTA: 0.9479 +[2025-07-06 23:10:32] [Rank 0] Group 8 FTA: 0.9219 +[2025-07-06 23:10:32] [Rank 0] Group 8 FTA: 0.9219 +[2025-07-06 23:10:32] [Rank 0] Group 9 FTA: 0.9141 +[2025-07-06 23:10:32] [Rank 0] Group 9 FTA: 0.9141 +[2025-07-06 23:10:32] [Rank 0] Group 10 FTA: 0.9316 +[2025-07-06 23:10:32] [Rank 0] Group 10 FTA: 0.9316 +[2025-07-06 23:10:32] [Rank 0] Group 11 FTA: 0.9258 +[2025-07-06 23:10:32] [Rank 0] Group 11 FTA: 0.9258 +[2025-07-06 23:10:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 23:10:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 23:10:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 23:10:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 23:10:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 23:10:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 23:10:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 23:10:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 23:10:33] [Rank 0] step:6501/10000 train_time:525297ms step_avg:80.80ms +[2025-07-06 23:10:33] [Rank 0] step:6501/10000 train_time:525297ms step_avg:80.80ms +[2025-07-06 23:10:35] [Rank 0] step:6521/10000 train_time:526805ms step_avg:80.79ms +[2025-07-06 23:10:35] [Rank 0] step:6521/10000 train_time:526805ms step_avg:80.79ms +[2025-07-06 23:10:36] [Rank 0] step:6541/10000 train_time:528297ms step_avg:80.77ms +[2025-07-06 23:10:36] [Rank 0] step:6541/10000 train_time:528297ms step_avg:80.77ms +[2025-07-06 23:10:38] [Rank 0] step:6561/10000 train_time:529790ms step_avg:80.75ms +[2025-07-06 23:10:38] [Rank 0] step:6561/10000 train_time:529790ms step_avg:80.75ms +[2025-07-06 23:10:40] [Rank 0] step:6581/10000 train_time:531949ms step_avg:80.83ms +[2025-07-06 23:10:40] [Rank 0] step:6581/10000 train_time:531949ms step_avg:80.83ms +[2025-07-06 23:10:41] [Rank 0] step:6601/10000 train_time:533443ms step_avg:80.81ms +[2025-07-06 23:10:41] [Rank 0] step:6601/10000 train_time:533443ms step_avg:80.81ms +[2025-07-06 23:10:43] [Rank 0] step:6621/10000 train_time:534936ms step_avg:80.79ms +[2025-07-06 23:10:43] [Rank 0] step:6621/10000 train_time:534936ms step_avg:80.79ms +[2025-07-06 23:10:44] [Rank 0] step:6641/10000 train_time:536431ms step_avg:80.78ms +[2025-07-06 23:10:44] [Rank 0] step:6641/10000 train_time:536431ms step_avg:80.78ms +[2025-07-06 23:10:46] [Rank 0] step:6661/10000 train_time:538616ms step_avg:80.86ms +[2025-07-06 23:10:46] [Rank 0] step:6661/10000 train_time:538616ms step_avg:80.86ms +[2025-07-06 23:10:48] [Rank 0] step:6681/10000 train_time:540092ms step_avg:80.84ms +[2025-07-06 23:10:48] [Rank 0] step:6681/10000 train_time:540092ms step_avg:80.84ms +[2025-07-06 23:10:49] [Rank 0] step:6701/10000 train_time:541587ms step_avg:80.82ms +[2025-07-06 23:10:49] [Rank 0] step:6701/10000 train_time:541587ms step_avg:80.82ms +[2025-07-06 23:10:51] [Rank 0] step:6721/10000 train_time:543083ms step_avg:80.80ms +[2025-07-06 23:10:51] [Rank 0] step:6721/10000 train_time:543083ms step_avg:80.80ms +[2025-07-06 23:10:52] [Rank 0] step:6741/10000 train_time:544578ms step_avg:80.79ms +[2025-07-06 23:10:52] [Rank 0] step:6741/10000 train_time:544578ms step_avg:80.79ms +[2025-07-06 23:10:55] [Rank 0] step:6761/10000 train_time:546735ms step_avg:80.87ms +[2025-07-06 23:10:55] [Rank 0] step:6761/10000 train_time:546735ms step_avg:80.87ms +[2025-07-06 23:10:56] [Rank 0] step:6781/10000 train_time:548230ms step_avg:80.85ms +[2025-07-06 23:10:56] [Rank 0] step:6781/10000 train_time:548230ms step_avg:80.85ms +[2025-07-06 23:10:58] [Rank 0] step:6801/10000 train_time:549727ms step_avg:80.83ms +[2025-07-06 23:10:58] [Rank 0] step:6801/10000 train_time:549727ms step_avg:80.83ms +[2025-07-06 23:10:59] [Rank 0] step:6821/10000 train_time:551226ms step_avg:80.81ms +[2025-07-06 23:10:59] [Rank 0] step:6821/10000 train_time:551226ms step_avg:80.81ms +[2025-07-06 23:11:01] [Rank 0] step:6841/10000 train_time:553399ms step_avg:80.89ms +[2025-07-06 23:11:01] [Rank 0] step:6841/10000 train_time:553399ms step_avg:80.89ms +[2025-07-06 23:11:03] [Rank 0] step:6861/10000 train_time:554877ms step_avg:80.87ms +[2025-07-06 23:11:03] [Rank 0] step:6861/10000 train_time:554877ms step_avg:80.87ms +[2025-07-06 23:11:04] [Rank 0] step:6881/10000 train_time:556374ms step_avg:80.86ms +[2025-07-06 23:11:04] [Rank 0] step:6881/10000 train_time:556374ms step_avg:80.86ms +[2025-07-06 23:11:06] [Rank 0] step:6901/10000 train_time:557872ms step_avg:80.84ms +[2025-07-06 23:11:06] [Rank 0] step:6901/10000 train_time:557872ms step_avg:80.84ms +[2025-07-06 23:11:07] [Rank 0] step:6921/10000 train_time:559372ms step_avg:80.82ms +[2025-07-06 23:11:07] [Rank 0] step:6921/10000 train_time:559372ms step_avg:80.82ms +[2025-07-06 23:11:09] [Rank 0] step:6941/10000 train_time:561520ms step_avg:80.90ms +[2025-07-06 23:11:09] [Rank 0] step:6941/10000 train_time:561520ms step_avg:80.90ms +[2025-07-06 23:11:11] [Rank 0] step:6961/10000 train_time:563019ms step_avg:80.88ms +[2025-07-06 23:11:11] [Rank 0] step:6961/10000 train_time:563019ms step_avg:80.88ms +[2025-07-06 23:11:12] [Rank 0] step:6981/10000 train_time:564520ms step_avg:80.87ms +[2025-07-06 23:11:12] [Rank 0] step:6981/10000 train_time:564520ms step_avg:80.87ms +[2025-07-06 23:11:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:11:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:11:15] [Rank 0] PRINT: step:7000/10000 train_loss:0.8726 val_loss:0.8729 train_time:566144ms step_avg:80.88ms +[2025-07-06 23:11:15] [Rank 0] PRINT: step:7000/10000 train_loss:0.8726 val_loss:0.8729 train_time:566144ms step_avg:80.88ms +[2025-07-06 23:11:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:11:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:11:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:11:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:11:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:11:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:16:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:16:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:16:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:16:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:16:36] [Rank 0] Total Loss: 5.7434 +[2025-07-06 23:16:36] [Rank 0] Total Loss: 5.7434 +[2025-07-06 23:16:36] [Rank 0] Total FTA: 0.8963 +[2025-07-06 23:16:36] [Rank 0] Total FTA: 0.8963 +[2025-07-06 23:16:36] [Rank 0] Group 0 Loss: 5.9362 +[2025-07-06 23:16:36] [Rank 0] Group 0 Loss: 5.9362 +[2025-07-06 23:16:36] [Rank 0] Group 1 Loss: 5.7534 +[2025-07-06 23:16:36] [Rank 0] Group 1 Loss: 5.7534 +[2025-07-06 23:16:36] [Rank 0] Group 2 Loss: 5.8474 +[2025-07-06 23:16:36] [Rank 0] Group 2 Loss: 5.8474 +[2025-07-06 23:16:36] [Rank 0] Group 3 Loss: 5.6873 +[2025-07-06 23:16:36] [Rank 0] Group 3 Loss: 5.6873 +[2025-07-06 23:16:36] [Rank 0] Group 4 Loss: 5.6451 +[2025-07-06 23:16:36] [Rank 0] Group 4 Loss: 5.6451 +[2025-07-06 23:16:36] [Rank 0] Group 5 Loss: 5.6324 +[2025-07-06 23:16:36] [Rank 0] Group 5 Loss: 5.6324 +[2025-07-06 23:16:36] [Rank 0] Group 6 Loss: 5.6094 +[2025-07-06 23:16:36] [Rank 0] Group 6 Loss: 5.6094 +[2025-07-06 23:16:36] [Rank 0] Group 7 Loss: 5.7242 +[2025-07-06 23:16:36] [Rank 0] Group 7 Loss: 5.7242 +[2025-07-06 23:16:36] [Rank 0] Group 8 Loss: 5.7476 +[2025-07-06 23:16:36] [Rank 0] Group 8 Loss: 5.7476 +[2025-07-06 23:16:36] [Rank 0] Group 9 Loss: 5.6877 +[2025-07-06 23:16:36] [Rank 0] Group 9 Loss: 5.6877 +[2025-07-06 23:16:36] [Rank 0] Group 10 Loss: 5.7484 +[2025-07-06 23:16:36] [Rank 0] Group 10 Loss: 5.7484 +[2025-07-06 23:16:36] [Rank 0] Group 11 Loss: 5.7227 +[2025-07-06 23:16:36] [Rank 0] Group 11 Loss: 5.7227 +[2025-07-06 23:16:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 23:16:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 23:16:36] [Rank 0] Group 1 FTA: 0.8568 +[2025-07-06 23:16:36] [Rank 0] Group 1 FTA: 0.8568 +[2025-07-06 23:16:36] [Rank 0] Group 2 FTA: 0.7552 +[2025-07-06 23:16:36] [Rank 0] Group 2 FTA: 0.7552 +[2025-07-06 23:16:36] [Rank 0] Group 3 FTA: 0.7812 +[2025-07-06 23:16:36] [Rank 0] Group 3 FTA: 0.7812 +[2025-07-06 23:16:36] [Rank 0] Group 4 FTA: 0.9297 +[2025-07-06 23:16:36] [Rank 0] Group 4 FTA: 0.9297 +[2025-07-06 23:16:36] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-06 23:16:36] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-06 23:16:36] [Rank 0] Group 6 FTA: 0.9089 +[2025-07-06 23:16:36] [Rank 0] Group 6 FTA: 0.9089 +[2025-07-06 23:16:37] [Rank 0] Group 7 FTA: 0.9141 +[2025-07-06 23:16:37] [Rank 0] Group 7 FTA: 0.9141 +[2025-07-06 23:16:37] [Rank 0] Group 8 FTA: 0.8802 +[2025-07-06 23:16:37] [Rank 0] Group 8 FTA: 0.8802 +[2025-07-06 23:16:37] [Rank 0] Group 9 FTA: 0.8711 +[2025-07-06 23:16:37] [Rank 0] Group 9 FTA: 0.8711 +[2025-07-06 23:16:37] [Rank 0] Group 10 FTA: 0.8945 +[2025-07-06 23:16:37] [Rank 0] Group 10 FTA: 0.8945 +[2025-07-06 23:16:37] [Rank 0] Group 11 FTA: 0.9023 +[2025-07-06 23:16:37] [Rank 0] Group 11 FTA: 0.9023 +[2025-07-06 23:16:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 23:16:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 23:16:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 23:16:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 23:16:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 23:16:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 23:16:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 23:16:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 23:16:38] [Rank 0] step:7001/10000 train_time:566167ms step_avg:80.87ms +[2025-07-06 23:16:38] [Rank 0] step:7001/10000 train_time:566167ms step_avg:80.87ms +[2025-07-06 23:16:40] [Rank 0] step:7021/10000 train_time:568355ms step_avg:80.95ms +[2025-07-06 23:16:40] [Rank 0] step:7021/10000 train_time:568355ms step_avg:80.95ms +[2025-07-06 23:16:42] [Rank 0] step:7041/10000 train_time:569828ms step_avg:80.93ms +[2025-07-06 23:16:42] [Rank 0] step:7041/10000 train_time:569828ms step_avg:80.93ms +[2025-07-06 23:16:43] [Rank 0] step:7061/10000 train_time:571319ms step_avg:80.91ms +[2025-07-06 23:16:43] [Rank 0] step:7061/10000 train_time:571319ms step_avg:80.91ms +[2025-07-06 23:16:45] [Rank 0] step:7081/10000 train_time:572811ms step_avg:80.89ms +[2025-07-06 23:16:45] [Rank 0] step:7081/10000 train_time:572811ms step_avg:80.89ms +[2025-07-06 23:16:46] [Rank 0] step:7101/10000 train_time:574305ms step_avg:80.88ms +[2025-07-06 23:16:46] [Rank 0] step:7101/10000 train_time:574305ms step_avg:80.88ms +[2025-07-06 23:16:48] [Rank 0] step:7121/10000 train_time:576037ms step_avg:80.89ms +[2025-07-06 23:16:48] [Rank 0] step:7121/10000 train_time:576037ms step_avg:80.89ms +[2025-07-06 23:16:49] [Rank 0] step:7141/10000 train_time:577532ms step_avg:80.88ms +[2025-07-06 23:16:49] [Rank 0] step:7141/10000 train_time:577532ms step_avg:80.88ms +[2025-07-06 23:16:51] [Rank 0] step:7161/10000 train_time:579025ms step_avg:80.86ms +[2025-07-06 23:16:51] [Rank 0] step:7161/10000 train_time:579025ms step_avg:80.86ms +[2025-07-06 23:16:52] [Rank 0] step:7181/10000 train_time:580520ms step_avg:80.84ms +[2025-07-06 23:16:52] [Rank 0] step:7181/10000 train_time:580520ms step_avg:80.84ms +[2025-07-06 23:16:55] [Rank 0] step:7201/10000 train_time:582269ms step_avg:80.86ms +[2025-07-06 23:16:55] [Rank 0] step:7201/10000 train_time:582269ms step_avg:80.86ms +[2025-07-06 23:16:56] [Rank 0] step:7221/10000 train_time:584166ms step_avg:80.90ms +[2025-07-06 23:16:56] [Rank 0] step:7221/10000 train_time:584166ms step_avg:80.90ms +[2025-07-06 23:16:57] [Rank 0] step:7241/10000 train_time:585662ms step_avg:80.88ms +[2025-07-06 23:16:57] [Rank 0] step:7241/10000 train_time:585662ms step_avg:80.88ms +[2025-07-06 23:16:59] [Rank 0] step:7261/10000 train_time:587161ms step_avg:80.87ms +[2025-07-06 23:16:59] [Rank 0] step:7261/10000 train_time:587161ms step_avg:80.87ms +[2025-07-06 23:17:00] [Rank 0] step:7281/10000 train_time:588659ms step_avg:80.85ms +[2025-07-06 23:17:00] [Rank 0] step:7281/10000 train_time:588659ms step_avg:80.85ms +[2025-07-06 23:17:02] [Rank 0] step:7301/10000 train_time:590390ms step_avg:80.86ms +[2025-07-06 23:17:02] [Rank 0] step:7301/10000 train_time:590390ms step_avg:80.86ms +[2025-07-06 23:17:04] [Rank 0] step:7321/10000 train_time:591887ms step_avg:80.85ms +[2025-07-06 23:17:04] [Rank 0] step:7321/10000 train_time:591887ms step_avg:80.85ms +[2025-07-06 23:17:05] [Rank 0] step:7341/10000 train_time:593386ms step_avg:80.83ms +[2025-07-06 23:17:05] [Rank 0] step:7341/10000 train_time:593386ms step_avg:80.83ms +[2025-07-06 23:17:07] [Rank 0] step:7361/10000 train_time:594886ms step_avg:80.82ms +[2025-07-06 23:17:07] [Rank 0] step:7361/10000 train_time:594886ms step_avg:80.82ms +[2025-07-06 23:17:09] [Rank 0] step:7381/10000 train_time:596435ms step_avg:80.81ms +[2025-07-06 23:17:09] [Rank 0] step:7381/10000 train_time:596435ms step_avg:80.81ms +[2025-07-06 23:17:10] [Rank 0] step:7401/10000 train_time:598535ms step_avg:80.87ms +[2025-07-06 23:17:10] [Rank 0] step:7401/10000 train_time:598535ms step_avg:80.87ms +[2025-07-06 23:17:12] [Rank 0] step:7421/10000 train_time:600034ms step_avg:80.86ms +[2025-07-06 23:17:12] [Rank 0] step:7421/10000 train_time:600034ms step_avg:80.86ms +[2025-07-06 23:17:13] [Rank 0] step:7441/10000 train_time:601533ms step_avg:80.84ms +[2025-07-06 23:17:13] [Rank 0] step:7441/10000 train_time:601533ms step_avg:80.84ms +[2025-07-06 23:17:15] [Rank 0] step:7461/10000 train_time:603033ms step_avg:80.82ms +[2025-07-06 23:17:15] [Rank 0] step:7461/10000 train_time:603033ms step_avg:80.82ms +[2025-07-06 23:17:17] [Rank 0] step:7481/10000 train_time:604767ms step_avg:80.84ms +[2025-07-06 23:17:17] [Rank 0] step:7481/10000 train_time:604767ms step_avg:80.84ms +[2025-07-06 23:17:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:17:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:17:19] [Rank 0] PRINT: step:7500/10000 train_loss:0.8706 val_loss:0.8709 train_time:606266ms step_avg:80.84ms +[2025-07-06 23:17:19] [Rank 0] PRINT: step:7500/10000 train_loss:0.8706 val_loss:0.8709 train_time:606266ms step_avg:80.84ms +[2025-07-06 23:17:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:17:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:17:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:17:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:17:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:17:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:22:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:22:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:22:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:22:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:22:42] [Rank 0] Total Loss: 5.8741 +[2025-07-06 23:22:42] [Rank 0] Total Loss: 5.8741 +[2025-07-06 23:22:42] [Rank 0] Total FTA: 0.9009 +[2025-07-06 23:22:42] [Rank 0] Total FTA: 0.9009 +[2025-07-06 23:22:42] [Rank 0] Group 0 Loss: 6.0841 +[2025-07-06 23:22:42] [Rank 0] Group 0 Loss: 6.0841 +[2025-07-06 23:22:42] [Rank 0] Group 1 Loss: 5.8040 +[2025-07-06 23:22:42] [Rank 0] Group 1 Loss: 5.8040 +[2025-07-06 23:22:42] [Rank 0] Group 2 Loss: 6.0251 +[2025-07-06 23:22:42] [Rank 0] Group 2 Loss: 6.0251 +[2025-07-06 23:22:42] [Rank 0] Group 3 Loss: 5.8612 +[2025-07-06 23:22:42] [Rank 0] Group 3 Loss: 5.8612 +[2025-07-06 23:22:42] [Rank 0] Group 4 Loss: 5.7114 +[2025-07-06 23:22:42] [Rank 0] Group 4 Loss: 5.7114 +[2025-07-06 23:22:42] [Rank 0] Group 5 Loss: 5.7173 +[2025-07-06 23:22:42] [Rank 0] Group 5 Loss: 5.7173 +[2025-07-06 23:22:42] [Rank 0] Group 6 Loss: 5.8250 +[2025-07-06 23:22:42] [Rank 0] Group 6 Loss: 5.8250 +[2025-07-06 23:22:42] [Rank 0] Group 7 Loss: 5.8437 +[2025-07-06 23:22:42] [Rank 0] Group 7 Loss: 5.8437 +[2025-07-06 23:22:42] [Rank 0] Group 8 Loss: 5.8388 +[2025-07-06 23:22:42] [Rank 0] Group 8 Loss: 5.8388 +[2025-07-06 23:22:42] [Rank 0] Group 9 Loss: 5.8433 +[2025-07-06 23:22:42] [Rank 0] Group 9 Loss: 5.8433 +[2025-07-06 23:22:42] [Rank 0] Group 10 Loss: 5.8585 +[2025-07-06 23:22:42] [Rank 0] Group 10 Loss: 5.8585 +[2025-07-06 23:22:42] [Rank 0] Group 11 Loss: 5.8695 +[2025-07-06 23:22:42] [Rank 0] Group 11 Loss: 5.8695 +[2025-07-06 23:22:42] [Rank 0] Group 0 FTA: 0.8375 +[2025-07-06 23:22:42] [Rank 0] Group 0 FTA: 0.8375 +[2025-07-06 23:22:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:22:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:22:42] [Rank 0] Group 2 FTA: 0.7682 +[2025-07-06 23:22:42] [Rank 0] Group 2 FTA: 0.7682 +[2025-07-06 23:22:42] [Rank 0] Group 3 FTA: 0.8594 +[2025-07-06 23:22:42] [Rank 0] Group 3 FTA: 0.8594 +[2025-07-06 23:22:42] [Rank 0] Group 4 FTA: 0.9453 +[2025-07-06 23:22:42] [Rank 0] Group 4 FTA: 0.9453 +[2025-07-06 23:22:42] [Rank 0] Group 5 FTA: 0.9427 +[2025-07-06 23:22:42] [Rank 0] Group 5 FTA: 0.9427 +[2025-07-06 23:22:42] [Rank 0] Group 6 FTA: 0.9531 +[2025-07-06 23:22:42] [Rank 0] Group 6 FTA: 0.9531 +[2025-07-06 23:22:42] [Rank 0] Group 7 FTA: 0.9479 +[2025-07-06 23:22:42] [Rank 0] Group 7 FTA: 0.9479 +[2025-07-06 23:22:42] [Rank 0] Group 8 FTA: 0.8906 +[2025-07-06 23:22:42] [Rank 0] Group 8 FTA: 0.8906 +[2025-07-06 23:22:42] [Rank 0] Group 9 FTA: 0.9180 +[2025-07-06 23:22:42] [Rank 0] Group 9 FTA: 0.9180 +[2025-07-06 23:22:42] [Rank 0] Group 10 FTA: 0.9082 +[2025-07-06 23:22:42] [Rank 0] Group 10 FTA: 0.9082 +[2025-07-06 23:22:42] [Rank 0] Group 11 FTA: 0.9033 +[2025-07-06 23:22:42] [Rank 0] Group 11 FTA: 0.9033 +[2025-07-06 23:22:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 23:22:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 23:22:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 23:22:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 23:22:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 23:22:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 23:22:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 23:22:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 23:22:43] [Rank 0] step:7501/10000 train_time:606287ms step_avg:80.83ms +[2025-07-06 23:22:43] [Rank 0] step:7501/10000 train_time:606287ms step_avg:80.83ms +[2025-07-06 23:22:45] [Rank 0] step:7521/10000 train_time:607805ms step_avg:80.81ms +[2025-07-06 23:22:45] [Rank 0] step:7521/10000 train_time:607805ms step_avg:80.81ms +[2025-07-06 23:22:46] [Rank 0] step:7541/10000 train_time:609297ms step_avg:80.80ms +[2025-07-06 23:22:46] [Rank 0] step:7541/10000 train_time:609297ms step_avg:80.80ms +[2025-07-06 23:22:48] [Rank 0] step:7561/10000 train_time:610846ms step_avg:80.79ms +[2025-07-06 23:22:48] [Rank 0] step:7561/10000 train_time:610846ms step_avg:80.79ms +[2025-07-06 23:22:49] [Rank 0] step:7581/10000 train_time:612532ms step_avg:80.80ms +[2025-07-06 23:22:49] [Rank 0] step:7581/10000 train_time:612532ms step_avg:80.80ms +[2025-07-06 23:22:51] [Rank 0] step:7601/10000 train_time:614025ms step_avg:80.78ms +[2025-07-06 23:22:51] [Rank 0] step:7601/10000 train_time:614025ms step_avg:80.78ms +[2025-07-06 23:22:52] [Rank 0] step:7621/10000 train_time:615519ms step_avg:80.77ms +[2025-07-06 23:22:52] [Rank 0] step:7621/10000 train_time:615519ms step_avg:80.77ms +[2025-07-06 23:22:54] [Rank 0] step:7641/10000 train_time:617014ms step_avg:80.75ms +[2025-07-06 23:22:54] [Rank 0] step:7641/10000 train_time:617014ms step_avg:80.75ms +[2025-07-06 23:22:56] [Rank 0] step:7661/10000 train_time:619172ms step_avg:80.82ms +[2025-07-06 23:22:56] [Rank 0] step:7661/10000 train_time:619172ms step_avg:80.82ms +[2025-07-06 23:22:58] [Rank 0] step:7681/10000 train_time:620668ms step_avg:80.81ms +[2025-07-06 23:22:58] [Rank 0] step:7681/10000 train_time:620668ms step_avg:80.81ms +[2025-07-06 23:22:59] [Rank 0] step:7701/10000 train_time:622167ms step_avg:80.79ms +[2025-07-06 23:22:59] [Rank 0] step:7701/10000 train_time:622167ms step_avg:80.79ms +[2025-07-06 23:23:01] [Rank 0] step:7721/10000 train_time:623665ms step_avg:80.78ms +[2025-07-06 23:23:01] [Rank 0] step:7721/10000 train_time:623665ms step_avg:80.78ms +[2025-07-06 23:23:03] [Rank 0] step:7741/10000 train_time:625163ms step_avg:80.76ms +[2025-07-06 23:23:03] [Rank 0] step:7741/10000 train_time:625163ms step_avg:80.76ms +[2025-07-06 23:23:04] [Rank 0] step:7761/10000 train_time:627317ms step_avg:80.83ms +[2025-07-06 23:23:04] [Rank 0] step:7761/10000 train_time:627317ms step_avg:80.83ms +[2025-07-06 23:23:06] [Rank 0] step:7781/10000 train_time:628814ms step_avg:80.81ms +[2025-07-06 23:23:06] [Rank 0] step:7781/10000 train_time:628814ms step_avg:80.81ms +[2025-07-06 23:23:07] [Rank 0] step:7801/10000 train_time:630312ms step_avg:80.80ms +[2025-07-06 23:23:07] [Rank 0] step:7801/10000 train_time:630312ms step_avg:80.80ms +[2025-07-06 23:23:09] [Rank 0] step:7821/10000 train_time:631811ms step_avg:80.78ms +[2025-07-06 23:23:09] [Rank 0] step:7821/10000 train_time:631811ms step_avg:80.78ms +[2025-07-06 23:23:11] [Rank 0] step:7841/10000 train_time:633957ms step_avg:80.85ms +[2025-07-06 23:23:11] [Rank 0] step:7841/10000 train_time:633957ms step_avg:80.85ms +[2025-07-06 23:23:12] [Rank 0] step:7861/10000 train_time:635455ms step_avg:80.84ms +[2025-07-06 23:23:12] [Rank 0] step:7861/10000 train_time:635455ms step_avg:80.84ms +[2025-07-06 23:23:14] [Rank 0] step:7881/10000 train_time:636955ms step_avg:80.82ms +[2025-07-06 23:23:14] [Rank 0] step:7881/10000 train_time:636955ms step_avg:80.82ms +[2025-07-06 23:23:15] [Rank 0] step:7901/10000 train_time:638455ms step_avg:80.81ms +[2025-07-06 23:23:15] [Rank 0] step:7901/10000 train_time:638455ms step_avg:80.81ms +[2025-07-06 23:23:18] [Rank 0] step:7921/10000 train_time:640622ms step_avg:80.88ms +[2025-07-06 23:23:18] [Rank 0] step:7921/10000 train_time:640622ms step_avg:80.88ms +[2025-07-06 23:23:19] [Rank 0] step:7941/10000 train_time:642100ms step_avg:80.86ms +[2025-07-06 23:23:19] [Rank 0] step:7941/10000 train_time:642100ms step_avg:80.86ms +[2025-07-06 23:23:21] [Rank 0] step:7961/10000 train_time:643600ms step_avg:80.84ms +[2025-07-06 23:23:21] [Rank 0] step:7961/10000 train_time:643600ms step_avg:80.84ms +[2025-07-06 23:23:22] [Rank 0] step:7981/10000 train_time:645099ms step_avg:80.83ms +[2025-07-06 23:23:22] [Rank 0] step:7981/10000 train_time:645099ms step_avg:80.83ms +[2025-07-06 23:23:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:23:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:23:24] [Rank 0] PRINT: step:8000/10000 train_loss:0.8690 val_loss:0.8693 train_time:646598ms step_avg:80.82ms +[2025-07-06 23:23:24] [Rank 0] PRINT: step:8000/10000 train_loss:0.8690 val_loss:0.8693 train_time:646598ms step_avg:80.82ms +[2025-07-06 23:23:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:23:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:23:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:23:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:23:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:23:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:28:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:28:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:28:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:28:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:28:49] [Rank 0] Total Loss: 5.8760 +[2025-07-06 23:28:49] [Rank 0] Total Loss: 5.8760 +[2025-07-06 23:28:49] [Rank 0] Total FTA: 0.9210 +[2025-07-06 23:28:49] [Rank 0] Total FTA: 0.9210 +[2025-07-06 23:28:49] [Rank 0] Group 0 Loss: 6.1912 +[2025-07-06 23:28:49] [Rank 0] Group 0 Loss: 6.1912 +[2025-07-06 23:28:49] [Rank 0] Group 1 Loss: 5.7607 +[2025-07-06 23:28:49] [Rank 0] Group 1 Loss: 5.7607 +[2025-07-06 23:28:49] [Rank 0] Group 2 Loss: 5.8834 +[2025-07-06 23:28:49] [Rank 0] Group 2 Loss: 5.8834 +[2025-07-06 23:28:49] [Rank 0] Group 3 Loss: 5.7362 +[2025-07-06 23:28:49] [Rank 0] Group 3 Loss: 5.7362 +[2025-07-06 23:28:49] [Rank 0] Group 4 Loss: 5.8354 +[2025-07-06 23:28:49] [Rank 0] Group 4 Loss: 5.8354 +[2025-07-06 23:28:49] [Rank 0] Group 5 Loss: 5.7803 +[2025-07-06 23:28:49] [Rank 0] Group 5 Loss: 5.7803 +[2025-07-06 23:28:49] [Rank 0] Group 6 Loss: 5.7310 +[2025-07-06 23:28:49] [Rank 0] Group 6 Loss: 5.7310 +[2025-07-06 23:28:49] [Rank 0] Group 7 Loss: 5.9131 +[2025-07-06 23:28:49] [Rank 0] Group 7 Loss: 5.9131 +[2025-07-06 23:28:49] [Rank 0] Group 8 Loss: 5.9135 +[2025-07-06 23:28:49] [Rank 0] Group 8 Loss: 5.9135 +[2025-07-06 23:28:49] [Rank 0] Group 9 Loss: 5.8417 +[2025-07-06 23:28:49] [Rank 0] Group 9 Loss: 5.8417 +[2025-07-06 23:28:49] [Rank 0] Group 10 Loss: 5.8226 +[2025-07-06 23:28:49] [Rank 0] Group 10 Loss: 5.8226 +[2025-07-06 23:28:49] [Rank 0] Group 11 Loss: 5.8450 +[2025-07-06 23:28:49] [Rank 0] Group 11 Loss: 5.8450 +[2025-07-06 23:28:49] [Rank 0] Group 0 FTA: 0.8322 +[2025-07-06 23:28:49] [Rank 0] Group 0 FTA: 0.8322 +[2025-07-06 23:28:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:28:49] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:28:49] [Rank 0] Group 2 FTA: 0.9219 +[2025-07-06 23:28:49] [Rank 0] Group 2 FTA: 0.9219 +[2025-07-06 23:28:49] [Rank 0] Group 3 FTA: 0.9167 +[2025-07-06 23:28:49] [Rank 0] Group 3 FTA: 0.9167 +[2025-07-06 23:28:49] [Rank 0] Group 4 FTA: 0.9323 +[2025-07-06 23:28:49] [Rank 0] Group 4 FTA: 0.9323 +[2025-07-06 23:28:49] [Rank 0] Group 5 FTA: 0.8958 +[2025-07-06 23:28:49] [Rank 0] Group 5 FTA: 0.8958 +[2025-07-06 23:28:49] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-06 23:28:49] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-06 23:28:49] [Rank 0] Group 7 FTA: 0.9531 +[2025-07-06 23:28:49] [Rank 0] Group 7 FTA: 0.9531 +[2025-07-06 23:28:49] [Rank 0] Group 8 FTA: 0.9115 +[2025-07-06 23:28:49] [Rank 0] Group 8 FTA: 0.9115 +[2025-07-06 23:28:49] [Rank 0] Group 9 FTA: 0.9219 +[2025-07-06 23:28:49] [Rank 0] Group 9 FTA: 0.9219 +[2025-07-06 23:28:49] [Rank 0] Group 10 FTA: 0.9121 +[2025-07-06 23:28:49] [Rank 0] Group 10 FTA: 0.9121 +[2025-07-06 23:28:49] [Rank 0] Group 11 FTA: 0.9385 +[2025-07-06 23:28:49] [Rank 0] Group 11 FTA: 0.9385 +[2025-07-06 23:28:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 23:28:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 23:28:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 23:28:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 23:28:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 23:28:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 23:28:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 23:28:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 23:28:51] [Rank 0] step:8001/10000 train_time:646620ms step_avg:80.82ms +[2025-07-06 23:28:51] [Rank 0] step:8001/10000 train_time:646620ms step_avg:80.82ms +[2025-07-06 23:28:53] [Rank 0] step:8021/10000 train_time:648780ms step_avg:80.89ms +[2025-07-06 23:28:53] [Rank 0] step:8021/10000 train_time:648780ms step_avg:80.89ms +[2025-07-06 23:28:54] [Rank 0] step:8041/10000 train_time:650273ms step_avg:80.87ms +[2025-07-06 23:28:54] [Rank 0] step:8041/10000 train_time:650273ms step_avg:80.87ms +[2025-07-06 23:28:56] [Rank 0] step:8061/10000 train_time:651766ms step_avg:80.85ms +[2025-07-06 23:28:56] [Rank 0] step:8061/10000 train_time:651766ms step_avg:80.85ms +[2025-07-06 23:28:57] [Rank 0] step:8081/10000 train_time:653258ms step_avg:80.84ms +[2025-07-06 23:28:57] [Rank 0] step:8081/10000 train_time:653258ms step_avg:80.84ms +[2025-07-06 23:28:59] [Rank 0] step:8101/10000 train_time:654751ms step_avg:80.82ms +[2025-07-06 23:28:59] [Rank 0] step:8101/10000 train_time:654751ms step_avg:80.82ms +[2025-07-06 23:29:01] [Rank 0] step:8121/10000 train_time:656893ms step_avg:80.89ms +[2025-07-06 23:29:01] [Rank 0] step:8121/10000 train_time:656893ms step_avg:80.89ms +[2025-07-06 23:29:02] [Rank 0] step:8141/10000 train_time:658387ms step_avg:80.87ms +[2025-07-06 23:29:02] [Rank 0] step:8141/10000 train_time:658387ms step_avg:80.87ms +[2025-07-06 23:29:04] [Rank 0] step:8161/10000 train_time:659883ms step_avg:80.86ms +[2025-07-06 23:29:04] [Rank 0] step:8161/10000 train_time:659883ms step_avg:80.86ms +[2025-07-06 23:29:05] [Rank 0] step:8181/10000 train_time:661378ms step_avg:80.84ms +[2025-07-06 23:29:05] [Rank 0] step:8181/10000 train_time:661378ms step_avg:80.84ms +[2025-07-06 23:29:08] [Rank 0] step:8201/10000 train_time:663624ms step_avg:80.92ms +[2025-07-06 23:29:08] [Rank 0] step:8201/10000 train_time:663624ms step_avg:80.92ms +[2025-07-06 23:29:09] [Rank 0] step:8221/10000 train_time:665179ms step_avg:80.91ms +[2025-07-06 23:29:09] [Rank 0] step:8221/10000 train_time:665179ms step_avg:80.91ms +[2025-07-06 23:29:11] [Rank 0] step:8241/10000 train_time:666675ms step_avg:80.90ms +[2025-07-06 23:29:11] [Rank 0] step:8241/10000 train_time:666675ms step_avg:80.90ms +[2025-07-06 23:29:12] [Rank 0] step:8261/10000 train_time:668180ms step_avg:80.88ms +[2025-07-06 23:29:12] [Rank 0] step:8261/10000 train_time:668180ms step_avg:80.88ms +[2025-07-06 23:29:14] [Rank 0] step:8281/10000 train_time:669677ms step_avg:80.87ms +[2025-07-06 23:29:14] [Rank 0] step:8281/10000 train_time:669677ms step_avg:80.87ms +[2025-07-06 23:29:16] [Rank 0] step:8301/10000 train_time:671839ms step_avg:80.93ms +[2025-07-06 23:29:16] [Rank 0] step:8301/10000 train_time:671839ms step_avg:80.93ms +[2025-07-06 23:29:17] [Rank 0] step:8321/10000 train_time:673335ms step_avg:80.92ms +[2025-07-06 23:29:17] [Rank 0] step:8321/10000 train_time:673335ms step_avg:80.92ms +[2025-07-06 23:29:19] [Rank 0] step:8341/10000 train_time:674833ms step_avg:80.91ms +[2025-07-06 23:29:19] [Rank 0] step:8341/10000 train_time:674833ms step_avg:80.91ms +[2025-07-06 23:29:20] [Rank 0] step:8361/10000 train_time:676332ms step_avg:80.89ms +[2025-07-06 23:29:20] [Rank 0] step:8361/10000 train_time:676332ms step_avg:80.89ms +[2025-07-06 23:29:22] [Rank 0] step:8381/10000 train_time:678480ms step_avg:80.95ms +[2025-07-06 23:29:22] [Rank 0] step:8381/10000 train_time:678480ms step_avg:80.95ms +[2025-07-06 23:29:24] [Rank 0] step:8401/10000 train_time:679978ms step_avg:80.94ms +[2025-07-06 23:29:24] [Rank 0] step:8401/10000 train_time:679978ms step_avg:80.94ms +[2025-07-06 23:29:25] [Rank 0] step:8421/10000 train_time:681475ms step_avg:80.93ms +[2025-07-06 23:29:25] [Rank 0] step:8421/10000 train_time:681475ms step_avg:80.93ms +[2025-07-06 23:29:27] [Rank 0] step:8441/10000 train_time:682974ms step_avg:80.91ms +[2025-07-06 23:29:27] [Rank 0] step:8441/10000 train_time:682974ms step_avg:80.91ms +[2025-07-06 23:29:29] [Rank 0] step:8461/10000 train_time:685145ms step_avg:80.98ms +[2025-07-06 23:29:29] [Rank 0] step:8461/10000 train_time:685145ms step_avg:80.98ms +[2025-07-06 23:29:31] [Rank 0] step:8481/10000 train_time:686624ms step_avg:80.96ms +[2025-07-06 23:29:31] [Rank 0] step:8481/10000 train_time:686624ms step_avg:80.96ms +[2025-07-06 23:29:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:29:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:29:33] [Rank 0] PRINT: step:8500/10000 train_loss:0.8674 val_loss:0.8685 train_time:688123ms step_avg:80.96ms +[2025-07-06 23:29:33] [Rank 0] PRINT: step:8500/10000 train_loss:0.8674 val_loss:0.8685 train_time:688123ms step_avg:80.96ms +[2025-07-06 23:29:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:29:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:29:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:29:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:29:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:29:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:34:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:34:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:34:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:34:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:34:55] [Rank 0] Total Loss: 5.9497 +[2025-07-06 23:34:55] [Rank 0] Total Loss: 5.9497 +[2025-07-06 23:34:55] [Rank 0] Total FTA: 0.9173 +[2025-07-06 23:34:55] [Rank 0] Total FTA: 0.9173 +[2025-07-06 23:34:55] [Rank 0] Group 0 Loss: 6.3628 +[2025-07-06 23:34:55] [Rank 0] Group 0 Loss: 6.3628 +[2025-07-06 23:34:55] [Rank 0] Group 1 Loss: 5.8490 +[2025-07-06 23:34:55] [Rank 0] Group 1 Loss: 5.8490 +[2025-07-06 23:34:55] [Rank 0] Group 2 Loss: 5.8075 +[2025-07-06 23:34:55] [Rank 0] Group 2 Loss: 5.8075 +[2025-07-06 23:34:55] [Rank 0] Group 3 Loss: 5.8672 +[2025-07-06 23:34:55] [Rank 0] Group 3 Loss: 5.8672 +[2025-07-06 23:34:55] [Rank 0] Group 4 Loss: 5.8763 +[2025-07-06 23:34:55] [Rank 0] Group 4 Loss: 5.8763 +[2025-07-06 23:34:55] [Rank 0] Group 5 Loss: 5.7841 +[2025-07-06 23:34:55] [Rank 0] Group 5 Loss: 5.7841 +[2025-07-06 23:34:55] [Rank 0] Group 6 Loss: 5.8509 +[2025-07-06 23:34:55] [Rank 0] Group 6 Loss: 5.8509 +[2025-07-06 23:34:55] [Rank 0] Group 7 Loss: 5.9132 +[2025-07-06 23:34:55] [Rank 0] Group 7 Loss: 5.9132 +[2025-07-06 23:34:55] [Rank 0] Group 8 Loss: 5.9164 +[2025-07-06 23:34:55] [Rank 0] Group 8 Loss: 5.9164 +[2025-07-06 23:34:55] [Rank 0] Group 9 Loss: 5.8144 +[2025-07-06 23:34:55] [Rank 0] Group 9 Loss: 5.8144 +[2025-07-06 23:34:55] [Rank 0] Group 10 Loss: 5.8995 +[2025-07-06 23:34:55] [Rank 0] Group 10 Loss: 5.8995 +[2025-07-06 23:34:55] [Rank 0] Group 11 Loss: 5.9731 +[2025-07-06 23:34:55] [Rank 0] Group 11 Loss: 5.9731 +[2025-07-06 23:34:55] [Rank 0] Group 0 FTA: 0.8244 +[2025-07-06 23:34:55] [Rank 0] Group 0 FTA: 0.8244 +[2025-07-06 23:34:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:34:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:34:55] [Rank 0] Group 2 FTA: 0.9349 +[2025-07-06 23:34:55] [Rank 0] Group 2 FTA: 0.9349 +[2025-07-06 23:34:55] [Rank 0] Group 3 FTA: 0.9505 +[2025-07-06 23:34:55] [Rank 0] Group 3 FTA: 0.9505 +[2025-07-06 23:34:55] [Rank 0] Group 4 FTA: 0.9010 +[2025-07-06 23:34:55] [Rank 0] Group 4 FTA: 0.9010 +[2025-07-06 23:34:55] [Rank 0] Group 5 FTA: 0.9479 +[2025-07-06 23:34:55] [Rank 0] Group 5 FTA: 0.9479 +[2025-07-06 23:34:55] [Rank 0] Group 6 FTA: 0.8880 +[2025-07-06 23:34:55] [Rank 0] Group 6 FTA: 0.8880 +[2025-07-06 23:34:55] [Rank 0] Group 7 FTA: 0.9297 +[2025-07-06 23:34:55] [Rank 0] Group 7 FTA: 0.9297 +[2025-07-06 23:34:55] [Rank 0] Group 8 FTA: 0.9323 +[2025-07-06 23:34:55] [Rank 0] Group 8 FTA: 0.9323 +[2025-07-06 23:34:55] [Rank 0] Group 9 FTA: 0.9258 +[2025-07-06 23:34:55] [Rank 0] Group 9 FTA: 0.9258 +[2025-07-06 23:34:55] [Rank 0] Group 10 FTA: 0.9180 +[2025-07-06 23:34:55] [Rank 0] Group 10 FTA: 0.9180 +[2025-07-06 23:34:55] [Rank 0] Group 11 FTA: 0.9297 +[2025-07-06 23:34:55] [Rank 0] Group 11 FTA: 0.9297 +[2025-07-06 23:34:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 23:34:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 23:34:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 23:34:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 23:34:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 23:34:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 23:34:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 23:34:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 23:34:57] [Rank 0] step:8501/10000 train_time:688145ms step_avg:80.95ms +[2025-07-06 23:34:57] [Rank 0] step:8501/10000 train_time:688145ms step_avg:80.95ms +[2025-07-06 23:34:58] [Rank 0] step:8521/10000 train_time:689659ms step_avg:80.94ms +[2025-07-06 23:34:58] [Rank 0] step:8521/10000 train_time:689659ms step_avg:80.94ms +[2025-07-06 23:35:00] [Rank 0] step:8541/10000 train_time:691151ms step_avg:80.92ms +[2025-07-06 23:35:00] [Rank 0] step:8541/10000 train_time:691151ms step_avg:80.92ms +[2025-07-06 23:35:02] [Rank 0] step:8561/10000 train_time:693313ms step_avg:80.98ms +[2025-07-06 23:35:02] [Rank 0] step:8561/10000 train_time:693313ms step_avg:80.98ms +[2025-07-06 23:35:03] [Rank 0] step:8581/10000 train_time:694805ms step_avg:80.97ms +[2025-07-06 23:35:03] [Rank 0] step:8581/10000 train_time:694805ms step_avg:80.97ms +[2025-07-06 23:35:05] [Rank 0] step:8601/10000 train_time:696299ms step_avg:80.96ms +[2025-07-06 23:35:05] [Rank 0] step:8601/10000 train_time:696299ms step_avg:80.96ms +[2025-07-06 23:35:06] [Rank 0] step:8621/10000 train_time:697792ms step_avg:80.94ms +[2025-07-06 23:35:06] [Rank 0] step:8621/10000 train_time:697792ms step_avg:80.94ms +[2025-07-06 23:35:08] [Rank 0] step:8641/10000 train_time:699286ms step_avg:80.93ms +[2025-07-06 23:35:08] [Rank 0] step:8641/10000 train_time:699286ms step_avg:80.93ms +[2025-07-06 23:35:10] [Rank 0] step:8661/10000 train_time:701424ms step_avg:80.99ms +[2025-07-06 23:35:10] [Rank 0] step:8661/10000 train_time:701424ms step_avg:80.99ms +[2025-07-06 23:35:11] [Rank 0] step:8681/10000 train_time:702918ms step_avg:80.97ms +[2025-07-06 23:35:11] [Rank 0] step:8681/10000 train_time:702918ms step_avg:80.97ms +[2025-07-06 23:35:13] [Rank 0] step:8701/10000 train_time:704414ms step_avg:80.96ms +[2025-07-06 23:35:13] [Rank 0] step:8701/10000 train_time:704414ms step_avg:80.96ms +[2025-07-06 23:35:14] [Rank 0] step:8721/10000 train_time:705911ms step_avg:80.94ms +[2025-07-06 23:35:14] [Rank 0] step:8721/10000 train_time:705911ms step_avg:80.94ms +[2025-07-06 23:35:17] [Rank 0] step:8741/10000 train_time:708067ms step_avg:81.01ms +[2025-07-06 23:35:17] [Rank 0] step:8741/10000 train_time:708067ms step_avg:81.01ms +[2025-07-06 23:35:18] [Rank 0] step:8761/10000 train_time:709563ms step_avg:80.99ms +[2025-07-06 23:35:18] [Rank 0] step:8761/10000 train_time:709563ms step_avg:80.99ms +[2025-07-06 23:35:20] [Rank 0] step:8781/10000 train_time:711060ms step_avg:80.98ms +[2025-07-06 23:35:20] [Rank 0] step:8781/10000 train_time:711060ms step_avg:80.98ms +[2025-07-06 23:35:21] [Rank 0] step:8801/10000 train_time:712559ms step_avg:80.96ms +[2025-07-06 23:35:21] [Rank 0] step:8801/10000 train_time:712559ms step_avg:80.96ms +[2025-07-06 23:35:23] [Rank 0] step:8821/10000 train_time:714057ms step_avg:80.95ms +[2025-07-06 23:35:23] [Rank 0] step:8821/10000 train_time:714057ms step_avg:80.95ms +[2025-07-06 23:35:25] [Rank 0] step:8841/10000 train_time:716210ms step_avg:81.01ms +[2025-07-06 23:35:25] [Rank 0] step:8841/10000 train_time:716210ms step_avg:81.01ms +[2025-07-06 23:35:26] [Rank 0] step:8861/10000 train_time:717708ms step_avg:81.00ms +[2025-07-06 23:35:26] [Rank 0] step:8861/10000 train_time:717708ms step_avg:81.00ms +[2025-07-06 23:35:28] [Rank 0] step:8881/10000 train_time:719309ms step_avg:80.99ms +[2025-07-06 23:35:28] [Rank 0] step:8881/10000 train_time:719309ms step_avg:80.99ms +[2025-07-06 23:35:29] [Rank 0] step:8901/10000 train_time:720806ms step_avg:80.98ms +[2025-07-06 23:35:29] [Rank 0] step:8901/10000 train_time:720806ms step_avg:80.98ms +[2025-07-06 23:35:32] [Rank 0] step:8921/10000 train_time:722946ms step_avg:81.04ms +[2025-07-06 23:35:32] [Rank 0] step:8921/10000 train_time:722946ms step_avg:81.04ms +[2025-07-06 23:35:33] [Rank 0] step:8941/10000 train_time:724446ms step_avg:81.03ms +[2025-07-06 23:35:33] [Rank 0] step:8941/10000 train_time:724446ms step_avg:81.03ms +[2025-07-06 23:35:35] [Rank 0] step:8961/10000 train_time:725944ms step_avg:81.01ms +[2025-07-06 23:35:35] [Rank 0] step:8961/10000 train_time:725944ms step_avg:81.01ms +[2025-07-06 23:35:36] [Rank 0] step:8981/10000 train_time:727444ms step_avg:81.00ms +[2025-07-06 23:35:36] [Rank 0] step:8981/10000 train_time:727444ms step_avg:81.00ms +[2025-07-06 23:35:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:35:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:35:38] [Rank 0] PRINT: step:9000/10000 train_loss:0.8662 val_loss:0.8679 train_time:728942ms step_avg:80.99ms +[2025-07-06 23:35:38] [Rank 0] PRINT: step:9000/10000 train_loss:0.8662 val_loss:0.8679 train_time:728942ms step_avg:80.99ms +[2025-07-06 23:35:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:35:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:35:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:35:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:35:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:35:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:41:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:41:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:41:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:41:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:41:01] [Rank 0] Total Loss: 5.9557 +[2025-07-06 23:41:01] [Rank 0] Total Loss: 5.9557 +[2025-07-06 23:41:01] [Rank 0] Total FTA: 0.9231 +[2025-07-06 23:41:01] [Rank 0] Total FTA: 0.9231 +[2025-07-06 23:41:01] [Rank 0] Group 0 Loss: 6.2087 +[2025-07-06 23:41:01] [Rank 0] Group 0 Loss: 6.2087 +[2025-07-06 23:41:01] [Rank 0] Group 1 Loss: 6.0057 +[2025-07-06 23:41:01] [Rank 0] Group 1 Loss: 6.0057 +[2025-07-06 23:41:01] [Rank 0] Group 2 Loss: 5.9662 +[2025-07-06 23:41:01] [Rank 0] Group 2 Loss: 5.9662 +[2025-07-06 23:41:01] [Rank 0] Group 3 Loss: 5.8218 +[2025-07-06 23:41:01] [Rank 0] Group 3 Loss: 5.8218 +[2025-07-06 23:41:01] [Rank 0] Group 4 Loss: 5.9265 +[2025-07-06 23:41:01] [Rank 0] Group 4 Loss: 5.9265 +[2025-07-06 23:41:01] [Rank 0] Group 5 Loss: 5.8614 +[2025-07-06 23:41:01] [Rank 0] Group 5 Loss: 5.8614 +[2025-07-06 23:41:01] [Rank 0] Group 6 Loss: 5.8270 +[2025-07-06 23:41:01] [Rank 0] Group 6 Loss: 5.8270 +[2025-07-06 23:41:01] [Rank 0] Group 7 Loss: 5.9828 +[2025-07-06 23:41:01] [Rank 0] Group 7 Loss: 5.9828 +[2025-07-06 23:41:01] [Rank 0] Group 8 Loss: 5.9842 +[2025-07-06 23:41:01] [Rank 0] Group 8 Loss: 5.9842 +[2025-07-06 23:41:01] [Rank 0] Group 9 Loss: 5.7815 +[2025-07-06 23:41:01] [Rank 0] Group 9 Loss: 5.7815 +[2025-07-06 23:41:01] [Rank 0] Group 10 Loss: 5.9417 +[2025-07-06 23:41:01] [Rank 0] Group 10 Loss: 5.9417 +[2025-07-06 23:41:01] [Rank 0] Group 11 Loss: 5.9176 +[2025-07-06 23:41:01] [Rank 0] Group 11 Loss: 5.9176 +[2025-07-06 23:41:01] [Rank 0] Group 0 FTA: 0.8492 +[2025-07-06 23:41:01] [Rank 0] Group 0 FTA: 0.8492 +[2025-07-06 23:41:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:41:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:41:01] [Rank 0] Group 2 FTA: 0.9193 +[2025-07-06 23:41:01] [Rank 0] Group 2 FTA: 0.9193 +[2025-07-06 23:41:01] [Rank 0] Group 3 FTA: 0.9453 +[2025-07-06 23:41:01] [Rank 0] Group 3 FTA: 0.9453 +[2025-07-06 23:41:01] [Rank 0] Group 4 FTA: 0.9401 +[2025-07-06 23:41:01] [Rank 0] Group 4 FTA: 0.9401 +[2025-07-06 23:41:01] [Rank 0] Group 5 FTA: 0.8932 +[2025-07-06 23:41:01] [Rank 0] Group 5 FTA: 0.8932 +[2025-07-06 23:41:01] [Rank 0] Group 6 FTA: 0.9427 +[2025-07-06 23:41:01] [Rank 0] Group 6 FTA: 0.9427 +[2025-07-06 23:41:01] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-06 23:41:01] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-06 23:41:01] [Rank 0] Group 8 FTA: 0.9297 +[2025-07-06 23:41:01] [Rank 0] Group 8 FTA: 0.9297 +[2025-07-06 23:41:01] [Rank 0] Group 9 FTA: 0.9258 +[2025-07-06 23:41:01] [Rank 0] Group 9 FTA: 0.9258 +[2025-07-06 23:41:01] [Rank 0] Group 10 FTA: 0.9336 +[2025-07-06 23:41:01] [Rank 0] Group 10 FTA: 0.9336 +[2025-07-06 23:41:01] [Rank 0] Group 11 FTA: 0.9258 +[2025-07-06 23:41:01] [Rank 0] Group 11 FTA: 0.9258 +[2025-07-06 23:41:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 23:41:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 23:41:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 23:41:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 23:41:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 23:41:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 23:41:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 23:41:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 23:41:03] [Rank 0] step:9001/10000 train_time:729702ms step_avg:81.07ms +[2025-07-06 23:41:03] [Rank 0] step:9001/10000 train_time:729702ms step_avg:81.07ms +[2025-07-06 23:41:05] [Rank 0] step:9021/10000 train_time:731205ms step_avg:81.06ms +[2025-07-06 23:41:05] [Rank 0] step:9021/10000 train_time:731205ms step_avg:81.06ms +[2025-07-06 23:41:06] [Rank 0] step:9041/10000 train_time:732694ms step_avg:81.04ms +[2025-07-06 23:41:06] [Rank 0] step:9041/10000 train_time:732694ms step_avg:81.04ms +[2025-07-06 23:41:08] [Rank 0] step:9061/10000 train_time:734187ms step_avg:81.03ms +[2025-07-06 23:41:08] [Rank 0] step:9061/10000 train_time:734187ms step_avg:81.03ms +[2025-07-06 23:41:09] [Rank 0] step:9081/10000 train_time:735680ms step_avg:81.01ms +[2025-07-06 23:41:09] [Rank 0] step:9081/10000 train_time:735680ms step_avg:81.01ms +[2025-07-06 23:41:11] [Rank 0] step:9101/10000 train_time:737815ms step_avg:81.07ms +[2025-07-06 23:41:11] [Rank 0] step:9101/10000 train_time:737815ms step_avg:81.07ms +[2025-07-06 23:41:13] [Rank 0] step:9121/10000 train_time:739310ms step_avg:81.06ms +[2025-07-06 23:41:13] [Rank 0] step:9121/10000 train_time:739310ms step_avg:81.06ms +[2025-07-06 23:41:14] [Rank 0] step:9141/10000 train_time:740806ms step_avg:81.04ms +[2025-07-06 23:41:14] [Rank 0] step:9141/10000 train_time:740806ms step_avg:81.04ms +[2025-07-06 23:41:16] [Rank 0] step:9161/10000 train_time:742302ms step_avg:81.03ms +[2025-07-06 23:41:16] [Rank 0] step:9161/10000 train_time:742302ms step_avg:81.03ms +[2025-07-06 23:41:18] [Rank 0] step:9181/10000 train_time:744482ms step_avg:81.09ms +[2025-07-06 23:41:18] [Rank 0] step:9181/10000 train_time:744482ms step_avg:81.09ms +[2025-07-06 23:41:20] [Rank 0] step:9201/10000 train_time:745958ms step_avg:81.07ms +[2025-07-06 23:41:20] [Rank 0] step:9201/10000 train_time:745958ms step_avg:81.07ms +[2025-07-06 23:41:21] [Rank 0] step:9221/10000 train_time:747453ms step_avg:81.06ms +[2025-07-06 23:41:21] [Rank 0] step:9221/10000 train_time:747453ms step_avg:81.06ms +[2025-07-06 23:41:23] [Rank 0] step:9241/10000 train_time:748949ms step_avg:81.05ms +[2025-07-06 23:41:23] [Rank 0] step:9241/10000 train_time:748949ms step_avg:81.05ms +[2025-07-06 23:41:24] [Rank 0] step:9261/10000 train_time:750445ms step_avg:81.03ms +[2025-07-06 23:41:24] [Rank 0] step:9261/10000 train_time:750445ms step_avg:81.03ms +[2025-07-06 23:41:26] [Rank 0] step:9281/10000 train_time:752599ms step_avg:81.09ms +[2025-07-06 23:41:26] [Rank 0] step:9281/10000 train_time:752599ms step_avg:81.09ms +[2025-07-06 23:41:28] [Rank 0] step:9301/10000 train_time:754097ms step_avg:81.08ms +[2025-07-06 23:41:28] [Rank 0] step:9301/10000 train_time:754097ms step_avg:81.08ms +[2025-07-06 23:41:29] [Rank 0] step:9321/10000 train_time:755596ms step_avg:81.06ms +[2025-07-06 23:41:29] [Rank 0] step:9321/10000 train_time:755596ms step_avg:81.06ms +[2025-07-06 23:41:31] [Rank 0] step:9341/10000 train_time:757093ms step_avg:81.05ms +[2025-07-06 23:41:31] [Rank 0] step:9341/10000 train_time:757093ms step_avg:81.05ms +[2025-07-06 23:41:33] [Rank 0] step:9361/10000 train_time:758849ms step_avg:81.06ms +[2025-07-06 23:41:33] [Rank 0] step:9361/10000 train_time:758849ms step_avg:81.06ms +[2025-07-06 23:41:34] [Rank 0] step:9381/10000 train_time:760743ms step_avg:81.09ms +[2025-07-06 23:41:34] [Rank 0] step:9381/10000 train_time:760743ms step_avg:81.09ms +[2025-07-06 23:41:36] [Rank 0] step:9401/10000 train_time:762241ms step_avg:81.08ms +[2025-07-06 23:41:36] [Rank 0] step:9401/10000 train_time:762241ms step_avg:81.08ms +[2025-07-06 23:41:37] [Rank 0] step:9421/10000 train_time:763741ms step_avg:81.07ms +[2025-07-06 23:41:37] [Rank 0] step:9421/10000 train_time:763741ms step_avg:81.07ms +[2025-07-06 23:41:39] [Rank 0] step:9441/10000 train_time:765239ms step_avg:81.05ms +[2025-07-06 23:41:39] [Rank 0] step:9441/10000 train_time:765239ms step_avg:81.05ms +[2025-07-06 23:41:41] [Rank 0] step:9461/10000 train_time:767385ms step_avg:81.11ms +[2025-07-06 23:41:41] [Rank 0] step:9461/10000 train_time:767385ms step_avg:81.11ms +[2025-07-06 23:41:42] [Rank 0] step:9481/10000 train_time:768883ms step_avg:81.10ms +[2025-07-06 23:41:42] [Rank 0] step:9481/10000 train_time:768883ms step_avg:81.10ms +[2025-07-06 23:41:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:41:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:41:45] [Rank 0] PRINT: step:9500/10000 train_loss:0.8651 val_loss:0.8674 train_time:770382ms step_avg:81.09ms +[2025-07-06 23:41:45] [Rank 0] PRINT: step:9500/10000 train_loss:0.8651 val_loss:0.8674 train_time:770382ms step_avg:81.09ms +[2025-07-06 23:41:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:41:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:41:45] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:41:45] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:41:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:41:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:47:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:47:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:47:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:47:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:47:07] [Rank 0] Total Loss: 6.0436 +[2025-07-06 23:47:07] [Rank 0] Total Loss: 6.0436 +[2025-07-06 23:47:07] [Rank 0] Total FTA: 0.9311 +[2025-07-06 23:47:07] [Rank 0] Total FTA: 0.9311 +[2025-07-06 23:47:07] [Rank 0] Group 0 Loss: 6.4635 +[2025-07-06 23:47:07] [Rank 0] Group 0 Loss: 6.4635 +[2025-07-06 23:47:07] [Rank 0] Group 1 Loss: 6.1574 +[2025-07-06 23:47:07] [Rank 0] Group 1 Loss: 6.1574 +[2025-07-06 23:47:07] [Rank 0] Group 2 Loss: 5.9596 +[2025-07-06 23:47:07] [Rank 0] Group 2 Loss: 5.9596 +[2025-07-06 23:47:07] [Rank 0] Group 3 Loss: 5.9109 +[2025-07-06 23:47:07] [Rank 0] Group 3 Loss: 5.9109 +[2025-07-06 23:47:07] [Rank 0] Group 4 Loss: 5.9897 +[2025-07-06 23:47:07] [Rank 0] Group 4 Loss: 5.9897 +[2025-07-06 23:47:07] [Rank 0] Group 5 Loss: 5.9281 +[2025-07-06 23:47:07] [Rank 0] Group 5 Loss: 5.9281 +[2025-07-06 23:47:07] [Rank 0] Group 6 Loss: 5.8703 +[2025-07-06 23:47:07] [Rank 0] Group 6 Loss: 5.8703 +[2025-07-06 23:47:07] [Rank 0] Group 7 Loss: 6.0450 +[2025-07-06 23:47:07] [Rank 0] Group 7 Loss: 6.0450 +[2025-07-06 23:47:07] [Rank 0] Group 8 Loss: 5.9573 +[2025-07-06 23:47:07] [Rank 0] Group 8 Loss: 5.9573 +[2025-07-06 23:47:07] [Rank 0] Group 9 Loss: 6.0913 +[2025-07-06 23:47:07] [Rank 0] Group 9 Loss: 6.0913 +[2025-07-06 23:47:07] [Rank 0] Group 10 Loss: 6.0149 +[2025-07-06 23:47:07] [Rank 0] Group 10 Loss: 6.0149 +[2025-07-06 23:47:07] [Rank 0] Group 11 Loss: 5.9299 +[2025-07-06 23:47:07] [Rank 0] Group 11 Loss: 5.9299 +[2025-07-06 23:47:07] [Rank 0] Group 0 FTA: 0.8336 +[2025-07-06 23:47:07] [Rank 0] Group 0 FTA: 0.8336 +[2025-07-06 23:47:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:47:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:47:07] [Rank 0] Group 2 FTA: 0.9245 +[2025-07-06 23:47:07] [Rank 0] Group 2 FTA: 0.9245 +[2025-07-06 23:47:07] [Rank 0] Group 3 FTA: 0.8776 +[2025-07-06 23:47:07] [Rank 0] Group 3 FTA: 0.8776 +[2025-07-06 23:47:07] [Rank 0] Group 4 FTA: 0.9557 +[2025-07-06 23:47:07] [Rank 0] Group 4 FTA: 0.9557 +[2025-07-06 23:47:07] [Rank 0] Group 5 FTA: 0.9349 +[2025-07-06 23:47:07] [Rank 0] Group 5 FTA: 0.9349 +[2025-07-06 23:47:07] [Rank 0] Group 6 FTA: 0.9714 +[2025-07-06 23:47:07] [Rank 0] Group 6 FTA: 0.9714 +[2025-07-06 23:47:07] [Rank 0] Group 7 FTA: 0.9609 +[2025-07-06 23:47:07] [Rank 0] Group 7 FTA: 0.9609 +[2025-07-06 23:47:07] [Rank 0] Group 8 FTA: 0.9453 +[2025-07-06 23:47:07] [Rank 0] Group 8 FTA: 0.9453 +[2025-07-06 23:47:07] [Rank 0] Group 9 FTA: 0.9336 +[2025-07-06 23:47:07] [Rank 0] Group 9 FTA: 0.9336 +[2025-07-06 23:47:07] [Rank 0] Group 10 FTA: 0.9570 +[2025-07-06 23:47:07] [Rank 0] Group 10 FTA: 0.9570 +[2025-07-06 23:47:07] [Rank 0] Group 11 FTA: 0.9453 +[2025-07-06 23:47:07] [Rank 0] Group 11 FTA: 0.9453 +[2025-07-06 23:47:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 23:47:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 23:47:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 23:47:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 23:47:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 23:47:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 23:47:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 23:47:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 23:47:08] [Rank 0] step:9501/10000 train_time:770403ms step_avg:81.09ms +[2025-07-06 23:47:08] [Rank 0] step:9501/10000 train_time:770403ms step_avg:81.09ms +[2025-07-06 23:47:10] [Rank 0] step:9521/10000 train_time:771894ms step_avg:81.07ms +[2025-07-06 23:47:10] [Rank 0] step:9521/10000 train_time:771894ms step_avg:81.07ms +[2025-07-06 23:47:12] [Rank 0] step:9541/10000 train_time:774046ms step_avg:81.13ms +[2025-07-06 23:47:12] [Rank 0] step:9541/10000 train_time:774046ms step_avg:81.13ms +[2025-07-06 23:47:14] [Rank 0] step:9561/10000 train_time:775518ms step_avg:81.11ms +[2025-07-06 23:47:14] [Rank 0] step:9561/10000 train_time:775518ms step_avg:81.11ms +[2025-07-06 23:47:15] [Rank 0] step:9581/10000 train_time:777010ms step_avg:81.10ms +[2025-07-06 23:47:15] [Rank 0] step:9581/10000 train_time:777010ms step_avg:81.10ms +[2025-07-06 23:47:17] [Rank 0] step:9601/10000 train_time:778505ms step_avg:81.09ms +[2025-07-06 23:47:17] [Rank 0] step:9601/10000 train_time:778505ms step_avg:81.09ms +[2025-07-06 23:47:18] [Rank 0] step:9621/10000 train_time:779999ms step_avg:81.07ms +[2025-07-06 23:47:18] [Rank 0] step:9621/10000 train_time:779999ms step_avg:81.07ms +[2025-07-06 23:47:20] [Rank 0] step:9641/10000 train_time:781733ms step_avg:81.08ms +[2025-07-06 23:47:20] [Rank 0] step:9641/10000 train_time:781733ms step_avg:81.08ms +[2025-07-06 23:47:21] [Rank 0] step:9661/10000 train_time:783227ms step_avg:81.07ms +[2025-07-06 23:47:21] [Rank 0] step:9661/10000 train_time:783227ms step_avg:81.07ms +[2025-07-06 23:47:23] [Rank 0] step:9681/10000 train_time:784722ms step_avg:81.06ms +[2025-07-06 23:47:23] [Rank 0] step:9681/10000 train_time:784722ms step_avg:81.06ms +[2025-07-06 23:47:24] [Rank 0] step:9701/10000 train_time:786217ms step_avg:81.04ms +[2025-07-06 23:47:24] [Rank 0] step:9701/10000 train_time:786217ms step_avg:81.04ms +[2025-07-06 23:47:26] [Rank 0] step:9721/10000 train_time:787712ms step_avg:81.03ms +[2025-07-06 23:47:26] [Rank 0] step:9721/10000 train_time:787712ms step_avg:81.03ms +[2025-07-06 23:47:28] [Rank 0] step:9741/10000 train_time:789859ms step_avg:81.09ms +[2025-07-06 23:47:28] [Rank 0] step:9741/10000 train_time:789859ms step_avg:81.09ms +[2025-07-06 23:47:29] [Rank 0] step:9761/10000 train_time:791357ms step_avg:81.07ms +[2025-07-06 23:47:29] [Rank 0] step:9761/10000 train_time:791357ms step_avg:81.07ms +[2025-07-06 23:47:31] [Rank 0] step:9781/10000 train_time:792855ms step_avg:81.06ms +[2025-07-06 23:47:31] [Rank 0] step:9781/10000 train_time:792855ms step_avg:81.06ms +[2025-07-06 23:47:32] [Rank 0] step:9801/10000 train_time:794353ms step_avg:81.05ms +[2025-07-06 23:47:32] [Rank 0] step:9801/10000 train_time:794353ms step_avg:81.05ms +[2025-07-06 23:47:35] [Rank 0] step:9821/10000 train_time:796502ms step_avg:81.10ms +[2025-07-06 23:47:35] [Rank 0] step:9821/10000 train_time:796502ms step_avg:81.10ms +[2025-07-06 23:47:36] [Rank 0] step:9841/10000 train_time:798000ms step_avg:81.09ms +[2025-07-06 23:47:36] [Rank 0] step:9841/10000 train_time:798000ms step_avg:81.09ms +[2025-07-06 23:47:38] [Rank 0] step:9861/10000 train_time:799499ms step_avg:81.08ms +[2025-07-06 23:47:38] [Rank 0] step:9861/10000 train_time:799499ms step_avg:81.08ms +[2025-07-06 23:47:39] [Rank 0] step:9881/10000 train_time:800998ms step_avg:81.06ms +[2025-07-06 23:47:39] [Rank 0] step:9881/10000 train_time:800998ms step_avg:81.06ms +[2025-07-06 23:47:41] [Rank 0] step:9901/10000 train_time:802750ms step_avg:81.08ms +[2025-07-06 23:47:41] [Rank 0] step:9901/10000 train_time:802750ms step_avg:81.08ms +[2025-07-06 23:47:43] [Rank 0] step:9921/10000 train_time:804644ms step_avg:81.11ms +[2025-07-06 23:47:43] [Rank 0] step:9921/10000 train_time:804644ms step_avg:81.11ms +[2025-07-06 23:47:44] [Rank 0] step:9941/10000 train_time:806143ms step_avg:81.09ms +[2025-07-06 23:47:44] [Rank 0] step:9941/10000 train_time:806143ms step_avg:81.09ms +[2025-07-06 23:47:46] [Rank 0] step:9961/10000 train_time:807643ms step_avg:81.08ms +[2025-07-06 23:47:46] [Rank 0] step:9961/10000 train_time:807643ms step_avg:81.08ms +[2025-07-06 23:47:47] [Rank 0] step:9981/10000 train_time:809142ms step_avg:81.07ms +[2025-07-06 23:47:47] [Rank 0] step:9981/10000 train_time:809142ms step_avg:81.07ms +[2025-07-06 23:47:49] [Rank 0] step:10000/10000 train_time:811210ms step_avg:81.12ms +[2025-07-06 23:47:49] [Rank 0] step:10000/10000 train_time:811210ms step_avg:81.12ms +[2025-07-06 23:47:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:47:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:47:50] [Rank 0] PRINT: step:10000/10000 train_loss:0.8641 val_loss:0.8670 train_time:811290ms step_avg:81.13ms +[2025-07-06 23:47:50] [Rank 0] PRINT: step:10000/10000 train_loss:0.8641 val_loss:0.8670 train_time:811290ms step_avg:81.13ms +[2025-07-06 23:47:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:47:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:47:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:47:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:47:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:47:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:53:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:53:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:53:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:53:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:53:13] [Rank 0] Total Loss: 6.0840 +[2025-07-06 23:53:13] [Rank 0] Total Loss: 6.0840 +[2025-07-06 23:53:13] [Rank 0] Total FTA: 0.9336 +[2025-07-06 23:53:13] [Rank 0] Total FTA: 0.9336 +[2025-07-06 23:53:13] [Rank 0] Group 0 Loss: 6.2954 +[2025-07-06 23:53:13] [Rank 0] Group 0 Loss: 6.2954 +[2025-07-06 23:53:13] [Rank 0] Group 1 Loss: 6.1781 +[2025-07-06 23:53:13] [Rank 0] Group 1 Loss: 6.1781 +[2025-07-06 23:53:13] [Rank 0] Group 2 Loss: 5.9557 +[2025-07-06 23:53:13] [Rank 0] Group 2 Loss: 5.9557 +[2025-07-06 23:53:13] [Rank 0] Group 3 Loss: 5.9425 +[2025-07-06 23:53:13] [Rank 0] Group 3 Loss: 5.9425 +[2025-07-06 23:53:13] [Rank 0] Group 4 Loss: 6.0579 +[2025-07-06 23:53:13] [Rank 0] Group 4 Loss: 6.0579 +[2025-07-06 23:53:13] [Rank 0] Group 5 Loss: 5.8978 +[2025-07-06 23:53:13] [Rank 0] Group 5 Loss: 5.8978 +[2025-07-06 23:53:13] [Rank 0] Group 6 Loss: 5.9690 +[2025-07-06 23:53:13] [Rank 0] Group 6 Loss: 5.9690 +[2025-07-06 23:53:13] [Rank 0] Group 7 Loss: 6.1673 +[2025-07-06 23:53:13] [Rank 0] Group 7 Loss: 6.1673 +[2025-07-06 23:53:13] [Rank 0] Group 8 Loss: 5.9972 +[2025-07-06 23:53:13] [Rank 0] Group 8 Loss: 5.9972 +[2025-07-06 23:53:13] [Rank 0] Group 9 Loss: 6.0799 +[2025-07-06 23:53:13] [Rank 0] Group 9 Loss: 6.0799 +[2025-07-06 23:53:13] [Rank 0] Group 10 Loss: 6.1384 +[2025-07-06 23:53:13] [Rank 0] Group 10 Loss: 6.1384 +[2025-07-06 23:53:13] [Rank 0] Group 11 Loss: 6.0892 +[2025-07-06 23:53:13] [Rank 0] Group 11 Loss: 6.0892 +[2025-07-06 23:53:13] [Rank 0] Group 0 FTA: 0.8440 +[2025-07-06 23:53:13] [Rank 0] Group 0 FTA: 0.8440 +[2025-07-06 23:53:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:53:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:53:13] [Rank 0] Group 2 FTA: 0.8411 +[2025-07-06 23:53:13] [Rank 0] Group 2 FTA: 0.8411 +[2025-07-06 23:53:13] [Rank 0] Group 3 FTA: 0.9557 +[2025-07-06 23:53:13] [Rank 0] Group 3 FTA: 0.9557 +[2025-07-06 23:53:13] [Rank 0] Group 4 FTA: 0.9427 +[2025-07-06 23:53:13] [Rank 0] Group 4 FTA: 0.9427 +[2025-07-06 23:53:13] [Rank 0] Group 5 FTA: 0.9427 +[2025-07-06 23:53:13] [Rank 0] Group 5 FTA: 0.9427 +[2025-07-06 23:53:13] [Rank 0] Group 6 FTA: 0.9766 +[2025-07-06 23:53:13] [Rank 0] Group 6 FTA: 0.9766 +[2025-07-06 23:53:13] [Rank 0] Group 7 FTA: 0.9505 +[2025-07-06 23:53:13] [Rank 0] Group 7 FTA: 0.9505 +[2025-07-06 23:53:13] [Rank 0] Group 8 FTA: 0.9479 +[2025-07-06 23:53:13] [Rank 0] Group 8 FTA: 0.9479 +[2025-07-06 23:53:13] [Rank 0] Group 9 FTA: 0.9453 +[2025-07-06 23:53:13] [Rank 0] Group 9 FTA: 0.9453 +[2025-07-06 23:53:13] [Rank 0] Group 10 FTA: 0.9551 +[2025-07-06 23:53:13] [Rank 0] Group 10 FTA: 0.9551 +[2025-07-06 23:53:13] [Rank 0] Group 11 FTA: 0.9541 +[2025-07-06 23:53:13] [Rank 0] Group 11 FTA: 0.9541 +[2025-07-06 23:53:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 23:53:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-06 23:53:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 23:53:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-06 23:53:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 23:53:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-06 23:53:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 23:53:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-06 23:53:15] [Rank 0] step:10001/10000 train_time:811310ms step_avg:81.12ms +[2025-07-06 23:53:15] [Rank 0] step:10001/10000 train_time:811310ms step_avg:81.12ms +[2025-07-06 23:53:15] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 23:53:15 2025 --- +[2025-07-06 23:53:15] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 23:53:15 2025 --- +[2025-07-06 23:53:15] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9956 MiB +[2025-07-06 23:53:15] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9956 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/config.json b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..da7d31ff1cc7d82beaeb7acc4663de98bb7090b7 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "5127e800-3c5c-4044-85f9-9c4108d86e08", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..9403fb29132bb21a702ae2396891ab03cf59eec5 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:307bdd9af1d7df6072be5a80a7bea23ac2b00c055496a154e0feaf5c69cc9b31 +size 426371 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..6d31f4c8162f89f1392f5112f37f2c98ed5d21fe --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2ed36aae1f4b7168690dc16dda26b3d0dbcf8a018717820b7d6ef55fbdd1aad +size 387661 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..ef90bc8025b1120d8b1545ab5c9ea90a16642456 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:237d97aabc5e4434fd6c3ecad62324a5e9770a65d8ce9cdbb5e87c6314f3a15a +size 115771 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..f6a429a0363542946a1a46c510db3b144c56260a --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1633a99b1ee27dea517b29ffcde741590dd630f33f69a65bc47807304cae81c4 +size 106686 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/training_log_5127e800-3c5c-4044-85f9-9c4108d86e08.txt b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/training_log_5127e800-3c5c-4044-85f9-9c4108d86e08.txt new file mode 100644 index 0000000000000000000000000000000000000000..77a8fbc8d6df604108f7dad99ffd099af41d18ab --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/training_log_5127e800-3c5c-4044-85f9-9c4108d86e08.txt @@ -0,0 +1,5144 @@ +[2025-07-08 04:58:45] [Rank 0] PRINT: --- Script Start: Tue Jul 8 04:58:45 2025 --- +[2025-07-08 04:58:45] [Rank 0] PRINT: --- Script Start: Tue Jul 8 04:58:45 2025 --- +[2025-07-08 04:58:45] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-08 04:58:45] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-08 04:58:45] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-08 04:58:45] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-08 04:58:45] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-08 04:58:45] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-08 04:58:45] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46 +[2025-07-08 04:58:45] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46 +[2025-07-08 04:58:45] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-08 04:58:45] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-08 04:58:45] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-08 04:58:45] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-08 04:58:45] [Rank 0] PRINT: Constructing model... +[2025-07-08 04:58:45] [Rank 0] PRINT: Constructing model... +[2025-07-08 04:58:47] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-08 04:58:47] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-08 04:58:47] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-08 04:58:47] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-08 04:58:47] [Rank 0] PRINT: Testing model forward function: +[2025-07-08 04:58:47] [Rank 0] PRINT: Testing model forward function: +[2025-07-08 04:58:48] [Rank 0] PRINT: Model test - Result type: +[2025-07-08 04:58:48] [Rank 0] PRINT: Model test - Result type: +[2025-07-08 04:58:48] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-08 04:58:48] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-08 04:58:48] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-08 04:58:48] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-08 04:58:48] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-08 04:58:48] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-08 04:58:48] [Rank 0] PRINT: Model returns: +[2025-07-08 04:58:48] [Rank 0] PRINT: Model returns: +[2025-07-08 04:58:48] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-08 04:58:48] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-08 04:58:48] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-08 04:58:48] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-08 04:58:48] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-08 04:58:48] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-08 04:58:48] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-08 04:58:48] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-08 04:58:48] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-08 04:58:48] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-08 04:58:48] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-08 04:58:48] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-08 04:58:48] [Rank 0] PRINT: Model compilation complete. +[2025-07-08 04:58:48] [Rank 0] PRINT: Model compilation complete. +[2025-07-08 04:58:48] [Rank 0] PRINT: Starting warmup... +[2025-07-08 04:58:48] [Rank 0] PRINT: Starting warmup... +[2025-07-08 04:59:55] [Rank 0] PRINT: Warmup complete. +[2025-07-08 04:59:55] [Rank 0] PRINT: Warmup complete. +[2025-07-08 04:59:55] [Rank 0] PRINT: Starting training... +[2025-07-08 04:59:55] [Rank 0] PRINT: Starting training... +[2025-07-08 04:59:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:59:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:00:03] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-08 05:00:03] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-08 05:00:04] [Rank 0] step:21/10000 train_time:1545ms step_avg:73.59ms +[2025-07-08 05:00:04] [Rank 0] step:21/10000 train_time:1545ms step_avg:73.59ms +[2025-07-08 05:00:06] [Rank 0] step:41/10000 train_time:2998ms step_avg:73.13ms +[2025-07-08 05:00:06] [Rank 0] step:41/10000 train_time:2998ms step_avg:73.13ms +[2025-07-08 05:00:07] [Rank 0] step:61/10000 train_time:4448ms step_avg:72.92ms +[2025-07-08 05:00:07] [Rank 0] step:61/10000 train_time:4448ms step_avg:72.92ms +[2025-07-08 05:00:09] [Rank 0] step:81/10000 train_time:5901ms step_avg:72.86ms +[2025-07-08 05:00:09] [Rank 0] step:81/10000 train_time:5901ms step_avg:72.86ms +[2025-07-08 05:00:11] [Rank 0] step:101/10000 train_time:8003ms step_avg:79.23ms +[2025-07-08 05:00:11] [Rank 0] step:101/10000 train_time:8003ms step_avg:79.23ms +[2025-07-08 05:00:12] [Rank 0] step:121/10000 train_time:9457ms step_avg:78.16ms +[2025-07-08 05:00:12] [Rank 0] step:121/10000 train_time:9457ms step_avg:78.16ms +[2025-07-08 05:00:14] [Rank 0] step:141/10000 train_time:10912ms step_avg:77.39ms +[2025-07-08 05:00:14] [Rank 0] step:141/10000 train_time:10912ms step_avg:77.39ms +[2025-07-08 05:00:15] [Rank 0] step:161/10000 train_time:12369ms step_avg:76.83ms +[2025-07-08 05:00:15] [Rank 0] step:161/10000 train_time:12369ms step_avg:76.83ms +[2025-07-08 05:00:17] [Rank 0] step:181/10000 train_time:13880ms step_avg:76.69ms +[2025-07-08 05:00:17] [Rank 0] step:181/10000 train_time:13880ms step_avg:76.69ms +[2025-07-08 05:00:19] [Rank 0] step:201/10000 train_time:15933ms step_avg:79.27ms +[2025-07-08 05:00:19] [Rank 0] step:201/10000 train_time:15933ms step_avg:79.27ms +[2025-07-08 05:00:20] [Rank 0] step:221/10000 train_time:17392ms step_avg:78.70ms +[2025-07-08 05:00:20] [Rank 0] step:221/10000 train_time:17392ms step_avg:78.70ms +[2025-07-08 05:00:21] [Rank 0] step:241/10000 train_time:18855ms step_avg:78.24ms +[2025-07-08 05:00:21] [Rank 0] step:241/10000 train_time:18855ms step_avg:78.24ms +[2025-07-08 05:00:23] [Rank 0] step:261/10000 train_time:20320ms step_avg:77.85ms +[2025-07-08 05:00:23] [Rank 0] step:261/10000 train_time:20320ms step_avg:77.85ms +[2025-07-08 05:00:25] [Rank 0] step:281/10000 train_time:22436ms step_avg:79.84ms +[2025-07-08 05:00:25] [Rank 0] step:281/10000 train_time:22436ms step_avg:79.84ms +[2025-07-08 05:00:27] [Rank 0] step:301/10000 train_time:23898ms step_avg:79.40ms +[2025-07-08 05:00:27] [Rank 0] step:301/10000 train_time:23898ms step_avg:79.40ms +[2025-07-08 05:00:28] [Rank 0] step:321/10000 train_time:25360ms step_avg:79.00ms +[2025-07-08 05:00:28] [Rank 0] step:321/10000 train_time:25360ms step_avg:79.00ms +[2025-07-08 05:00:29] [Rank 0] step:341/10000 train_time:26827ms step_avg:78.67ms +[2025-07-08 05:00:29] [Rank 0] step:341/10000 train_time:26827ms step_avg:78.67ms +[2025-07-08 05:00:32] [Rank 0] step:361/10000 train_time:28957ms step_avg:80.21ms +[2025-07-08 05:00:32] [Rank 0] step:361/10000 train_time:28957ms step_avg:80.21ms +[2025-07-08 05:00:33] [Rank 0] step:381/10000 train_time:30400ms step_avg:79.79ms +[2025-07-08 05:00:33] [Rank 0] step:381/10000 train_time:30400ms step_avg:79.79ms +[2025-07-08 05:00:34] [Rank 0] step:401/10000 train_time:31866ms step_avg:79.47ms +[2025-07-08 05:00:34] [Rank 0] step:401/10000 train_time:31866ms step_avg:79.47ms +[2025-07-08 05:00:36] [Rank 0] step:421/10000 train_time:33332ms step_avg:79.17ms +[2025-07-08 05:00:36] [Rank 0] step:421/10000 train_time:33332ms step_avg:79.17ms +[2025-07-08 05:00:37] [Rank 0] step:441/10000 train_time:34797ms step_avg:78.90ms +[2025-07-08 05:00:37] [Rank 0] step:441/10000 train_time:34797ms step_avg:78.90ms +[2025-07-08 05:00:40] [Rank 0] step:461/10000 train_time:36932ms step_avg:80.11ms +[2025-07-08 05:00:40] [Rank 0] step:461/10000 train_time:36932ms step_avg:80.11ms +[2025-07-08 05:00:41] [Rank 0] step:481/10000 train_time:38396ms step_avg:79.83ms +[2025-07-08 05:00:41] [Rank 0] step:481/10000 train_time:38396ms step_avg:79.83ms +[2025-07-08 05:00:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:00:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:00:43] [Rank 0] PRINT: step:500/10000 train_loss:4.9403 val_loss:2.0526 train_time:39862ms step_avg:79.72ms +[2025-07-08 05:00:43] [Rank 0] PRINT: step:500/10000 train_loss:4.9403 val_loss:2.0526 train_time:39862ms step_avg:79.72ms +[2025-07-08 05:00:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:00:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:00:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:00:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:00:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:00:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:06:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:06:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:06:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:06:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:06:09] [Rank 0] Total Loss: 4.1617 +[2025-07-08 05:06:09] [Rank 0] Total Loss: 4.1617 +[2025-07-08 05:06:09] [Rank 0] Total FTA: 0.0861 +[2025-07-08 05:06:09] [Rank 0] Total FTA: 0.0861 +[2025-07-08 05:06:09] [Rank 0] Group 0 Loss: 4.4743 +[2025-07-08 05:06:09] [Rank 0] Group 0 Loss: 4.4743 +[2025-07-08 05:06:09] [Rank 0] Group 1 Loss: 4.1673 +[2025-07-08 05:06:09] [Rank 0] Group 1 Loss: 4.1673 +[2025-07-08 05:06:09] [Rank 0] Group 2 Loss: 4.0684 +[2025-07-08 05:06:09] [Rank 0] Group 2 Loss: 4.0684 +[2025-07-08 05:06:09] [Rank 0] Group 3 Loss: 4.1013 +[2025-07-08 05:06:09] [Rank 0] Group 3 Loss: 4.1013 +[2025-07-08 05:06:09] [Rank 0] Group 4 Loss: 4.1017 +[2025-07-08 05:06:09] [Rank 0] Group 4 Loss: 4.1017 +[2025-07-08 05:06:09] [Rank 0] Group 5 Loss: 4.0604 +[2025-07-08 05:06:09] [Rank 0] Group 5 Loss: 4.0604 +[2025-07-08 05:06:09] [Rank 0] Group 6 Loss: 4.0842 +[2025-07-08 05:06:09] [Rank 0] Group 6 Loss: 4.0842 +[2025-07-08 05:06:09] [Rank 0] Group 7 Loss: 4.1210 +[2025-07-08 05:06:09] [Rank 0] Group 7 Loss: 4.1210 +[2025-07-08 05:06:09] [Rank 0] Group 8 Loss: 4.1162 +[2025-07-08 05:06:09] [Rank 0] Group 8 Loss: 4.1162 +[2025-07-08 05:06:09] [Rank 0] Group 9 Loss: 4.1270 +[2025-07-08 05:06:09] [Rank 0] Group 9 Loss: 4.1270 +[2025-07-08 05:06:09] [Rank 0] Group 10 Loss: 4.1227 +[2025-07-08 05:06:09] [Rank 0] Group 10 Loss: 4.1227 +[2025-07-08 05:06:09] [Rank 0] Group 11 Loss: 4.1325 +[2025-07-08 05:06:09] [Rank 0] Group 11 Loss: 4.1325 +[2025-07-08 05:06:09] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-08 05:06:09] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-08 05:06:09] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 05:06:09] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 05:06:09] [Rank 0] Group 2 FTA: 0.0651 +[2025-07-08 05:06:09] [Rank 0] Group 2 FTA: 0.0651 +[2025-07-08 05:06:09] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-08 05:06:09] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-08 05:06:09] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-08 05:06:09] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-08 05:06:09] [Rank 0] Group 5 FTA: 0.0807 +[2025-07-08 05:06:09] [Rank 0] Group 5 FTA: 0.0807 +[2025-07-08 05:06:09] [Rank 0] Group 6 FTA: 0.1068 +[2025-07-08 05:06:09] [Rank 0] Group 6 FTA: 0.1068 +[2025-07-08 05:06:09] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-08 05:06:09] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-08 05:06:09] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-08 05:06:09] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-08 05:06:09] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-08 05:06:09] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-08 05:06:09] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-08 05:06:09] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-08 05:06:09] [Rank 0] Group 11 FTA: 0.0918 +[2025-07-08 05:06:09] [Rank 0] Group 11 FTA: 0.0918 +[2025-07-08 05:06:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 05:06:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 05:06:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 05:06:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 05:06:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 05:06:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 05:06:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 05:06:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 05:06:11] [Rank 0] step:501/10000 train_time:39882ms step_avg:79.61ms +[2025-07-08 05:06:11] [Rank 0] step:501/10000 train_time:39882ms step_avg:79.61ms +[2025-07-08 05:06:12] [Rank 0] step:521/10000 train_time:41338ms step_avg:79.34ms +[2025-07-08 05:06:12] [Rank 0] step:521/10000 train_time:41338ms step_avg:79.34ms +[2025-07-08 05:06:14] [Rank 0] step:541/10000 train_time:42797ms step_avg:79.11ms +[2025-07-08 05:06:14] [Rank 0] step:541/10000 train_time:42797ms step_avg:79.11ms +[2025-07-08 05:06:15] [Rank 0] step:561/10000 train_time:44620ms step_avg:79.54ms +[2025-07-08 05:06:15] [Rank 0] step:561/10000 train_time:44620ms step_avg:79.54ms +[2025-07-08 05:06:17] [Rank 0] step:581/10000 train_time:46081ms step_avg:79.31ms +[2025-07-08 05:06:17] [Rank 0] step:581/10000 train_time:46081ms step_avg:79.31ms +[2025-07-08 05:06:18] [Rank 0] step:601/10000 train_time:47536ms step_avg:79.10ms +[2025-07-08 05:06:18] [Rank 0] step:601/10000 train_time:47536ms step_avg:79.10ms +[2025-07-08 05:06:20] [Rank 0] step:621/10000 train_time:48993ms step_avg:78.89ms +[2025-07-08 05:06:20] [Rank 0] step:621/10000 train_time:48993ms step_avg:78.89ms +[2025-07-08 05:06:22] [Rank 0] step:641/10000 train_time:50691ms step_avg:79.08ms +[2025-07-08 05:06:22] [Rank 0] step:641/10000 train_time:50691ms step_avg:79.08ms +[2025-07-08 05:06:23] [Rank 0] step:661/10000 train_time:52150ms step_avg:78.90ms +[2025-07-08 05:06:23] [Rank 0] step:661/10000 train_time:52150ms step_avg:78.90ms +[2025-07-08 05:06:24] [Rank 0] step:681/10000 train_time:53610ms step_avg:78.72ms +[2025-07-08 05:06:24] [Rank 0] step:681/10000 train_time:53610ms step_avg:78.72ms +[2025-07-08 05:06:26] [Rank 0] step:701/10000 train_time:55071ms step_avg:78.56ms +[2025-07-08 05:06:26] [Rank 0] step:701/10000 train_time:55071ms step_avg:78.56ms +[2025-07-08 05:06:28] [Rank 0] step:721/10000 train_time:56791ms step_avg:78.77ms +[2025-07-08 05:06:28] [Rank 0] step:721/10000 train_time:56791ms step_avg:78.77ms +[2025-07-08 05:06:29] [Rank 0] step:741/10000 train_time:58651ms step_avg:79.15ms +[2025-07-08 05:06:29] [Rank 0] step:741/10000 train_time:58651ms step_avg:79.15ms +[2025-07-08 05:06:31] [Rank 0] step:761/10000 train_time:60118ms step_avg:79.00ms +[2025-07-08 05:06:31] [Rank 0] step:761/10000 train_time:60118ms step_avg:79.00ms +[2025-07-08 05:06:32] [Rank 0] step:781/10000 train_time:61588ms step_avg:78.86ms +[2025-07-08 05:06:32] [Rank 0] step:781/10000 train_time:61588ms step_avg:78.86ms +[2025-07-08 05:06:34] [Rank 0] step:801/10000 train_time:63058ms step_avg:78.72ms +[2025-07-08 05:06:34] [Rank 0] step:801/10000 train_time:63058ms step_avg:78.72ms +[2025-07-08 05:06:36] [Rank 0] step:821/10000 train_time:64764ms step_avg:78.88ms +[2025-07-08 05:06:36] [Rank 0] step:821/10000 train_time:64764ms step_avg:78.88ms +[2025-07-08 05:06:37] [Rank 0] step:841/10000 train_time:66237ms step_avg:78.76ms +[2025-07-08 05:06:37] [Rank 0] step:841/10000 train_time:66237ms step_avg:78.76ms +[2025-07-08 05:06:39] [Rank 0] step:861/10000 train_time:67709ms step_avg:78.64ms +[2025-07-08 05:06:39] [Rank 0] step:861/10000 train_time:67709ms step_avg:78.64ms +[2025-07-08 05:06:40] [Rank 0] step:881/10000 train_time:69183ms step_avg:78.53ms +[2025-07-08 05:06:40] [Rank 0] step:881/10000 train_time:69183ms step_avg:78.53ms +[2025-07-08 05:06:42] [Rank 0] step:901/10000 train_time:70912ms step_avg:78.70ms +[2025-07-08 05:06:42] [Rank 0] step:901/10000 train_time:70912ms step_avg:78.70ms +[2025-07-08 05:06:44] [Rank 0] step:921/10000 train_time:72772ms step_avg:79.01ms +[2025-07-08 05:06:44] [Rank 0] step:921/10000 train_time:72772ms step_avg:79.01ms +[2025-07-08 05:06:45] [Rank 0] step:941/10000 train_time:74241ms step_avg:78.90ms +[2025-07-08 05:06:45] [Rank 0] step:941/10000 train_time:74241ms step_avg:78.90ms +[2025-07-08 05:06:47] [Rank 0] step:961/10000 train_time:75713ms step_avg:78.79ms +[2025-07-08 05:06:47] [Rank 0] step:961/10000 train_time:75713ms step_avg:78.79ms +[2025-07-08 05:06:48] [Rank 0] step:981/10000 train_time:77188ms step_avg:78.68ms +[2025-07-08 05:06:48] [Rank 0] step:981/10000 train_time:77188ms step_avg:78.68ms +[2025-07-08 05:06:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:06:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:06:51] [Rank 0] PRINT: step:1000/10000 train_loss:1.7034 val_loss:1.5408 train_time:79328ms step_avg:79.33ms +[2025-07-08 05:06:51] [Rank 0] PRINT: step:1000/10000 train_loss:1.7034 val_loss:1.5408 train_time:79328ms step_avg:79.33ms +[2025-07-08 05:06:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:06:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:06:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:06:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:06:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:06:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:12:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:12:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:12:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:12:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:12:17] [Rank 0] Total Loss: 4.4276 +[2025-07-08 05:12:17] [Rank 0] Total Loss: 4.4276 +[2025-07-08 05:12:17] [Rank 0] Total FTA: 0.1262 +[2025-07-08 05:12:17] [Rank 0] Total FTA: 0.1262 +[2025-07-08 05:12:17] [Rank 0] Group 0 Loss: 4.8452 +[2025-07-08 05:12:17] [Rank 0] Group 0 Loss: 4.8452 +[2025-07-08 05:12:17] [Rank 0] Group 1 Loss: 4.2254 +[2025-07-08 05:12:17] [Rank 0] Group 1 Loss: 4.2254 +[2025-07-08 05:12:17] [Rank 0] Group 2 Loss: 4.2350 +[2025-07-08 05:12:17] [Rank 0] Group 2 Loss: 4.2350 +[2025-07-08 05:12:17] [Rank 0] Group 3 Loss: 4.3176 +[2025-07-08 05:12:17] [Rank 0] Group 3 Loss: 4.3176 +[2025-07-08 05:12:17] [Rank 0] Group 4 Loss: 4.4790 +[2025-07-08 05:12:17] [Rank 0] Group 4 Loss: 4.4790 +[2025-07-08 05:12:17] [Rank 0] Group 5 Loss: 4.2980 +[2025-07-08 05:12:17] [Rank 0] Group 5 Loss: 4.2980 +[2025-07-08 05:12:17] [Rank 0] Group 6 Loss: 4.3736 +[2025-07-08 05:12:17] [Rank 0] Group 6 Loss: 4.3736 +[2025-07-08 05:12:17] [Rank 0] Group 7 Loss: 4.4423 +[2025-07-08 05:12:17] [Rank 0] Group 7 Loss: 4.4423 +[2025-07-08 05:12:17] [Rank 0] Group 8 Loss: 4.4207 +[2025-07-08 05:12:17] [Rank 0] Group 8 Loss: 4.4207 +[2025-07-08 05:12:17] [Rank 0] Group 9 Loss: 4.3576 +[2025-07-08 05:12:17] [Rank 0] Group 9 Loss: 4.3576 +[2025-07-08 05:12:17] [Rank 0] Group 10 Loss: 4.4131 +[2025-07-08 05:12:17] [Rank 0] Group 10 Loss: 4.4131 +[2025-07-08 05:12:17] [Rank 0] Group 11 Loss: 4.3746 +[2025-07-08 05:12:17] [Rank 0] Group 11 Loss: 4.3746 +[2025-07-08 05:12:17] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-08 05:12:17] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-08 05:12:17] [Rank 0] Group 1 FTA: 0.1849 +[2025-07-08 05:12:17] [Rank 0] Group 1 FTA: 0.1849 +[2025-07-08 05:12:17] [Rank 0] Group 2 FTA: 0.2682 +[2025-07-08 05:12:17] [Rank 0] Group 2 FTA: 0.2682 +[2025-07-08 05:12:17] [Rank 0] Group 3 FTA: 0.0208 +[2025-07-08 05:12:17] [Rank 0] Group 3 FTA: 0.0208 +[2025-07-08 05:12:17] [Rank 0] Group 4 FTA: 0.0781 +[2025-07-08 05:12:17] [Rank 0] Group 4 FTA: 0.0781 +[2025-07-08 05:12:17] [Rank 0] Group 5 FTA: 0.1172 +[2025-07-08 05:12:17] [Rank 0] Group 5 FTA: 0.1172 +[2025-07-08 05:12:17] [Rank 0] Group 6 FTA: 0.1380 +[2025-07-08 05:12:17] [Rank 0] Group 6 FTA: 0.1380 +[2025-07-08 05:12:17] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-08 05:12:17] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-08 05:12:17] [Rank 0] Group 8 FTA: 0.1354 +[2025-07-08 05:12:17] [Rank 0] Group 8 FTA: 0.1354 +[2025-07-08 05:12:17] [Rank 0] Group 9 FTA: 0.1289 +[2025-07-08 05:12:17] [Rank 0] Group 9 FTA: 0.1289 +[2025-07-08 05:12:17] [Rank 0] Group 10 FTA: 0.1055 +[2025-07-08 05:12:17] [Rank 0] Group 10 FTA: 0.1055 +[2025-07-08 05:12:17] [Rank 0] Group 11 FTA: 0.0928 +[2025-07-08 05:12:17] [Rank 0] Group 11 FTA: 0.0928 +[2025-07-08 05:12:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 05:12:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 05:12:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 05:12:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 05:12:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 05:12:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 05:12:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 05:12:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 05:12:19] [Rank 0] step:1001/10000 train_time:79349ms step_avg:79.27ms +[2025-07-08 05:12:19] [Rank 0] step:1001/10000 train_time:79349ms step_avg:79.27ms +[2025-07-08 05:12:20] [Rank 0] step:1021/10000 train_time:80831ms step_avg:79.17ms +[2025-07-08 05:12:20] [Rank 0] step:1021/10000 train_time:80831ms step_avg:79.17ms +[2025-07-08 05:12:22] [Rank 0] step:1041/10000 train_time:82294ms step_avg:79.05ms +[2025-07-08 05:12:22] [Rank 0] step:1041/10000 train_time:82294ms step_avg:79.05ms +[2025-07-08 05:12:23] [Rank 0] step:1061/10000 train_time:83759ms step_avg:78.94ms +[2025-07-08 05:12:23] [Rank 0] step:1061/10000 train_time:83759ms step_avg:78.94ms +[2025-07-08 05:12:25] [Rank 0] step:1081/10000 train_time:85485ms step_avg:79.08ms +[2025-07-08 05:12:25] [Rank 0] step:1081/10000 train_time:85485ms step_avg:79.08ms +[2025-07-08 05:12:27] [Rank 0] step:1101/10000 train_time:87362ms step_avg:79.35ms +[2025-07-08 05:12:27] [Rank 0] step:1101/10000 train_time:87362ms step_avg:79.35ms +[2025-07-08 05:12:28] [Rank 0] step:1121/10000 train_time:88824ms step_avg:79.24ms +[2025-07-08 05:12:28] [Rank 0] step:1121/10000 train_time:88824ms step_avg:79.24ms +[2025-07-08 05:12:30] [Rank 0] step:1141/10000 train_time:90291ms step_avg:79.13ms +[2025-07-08 05:12:30] [Rank 0] step:1141/10000 train_time:90291ms step_avg:79.13ms +[2025-07-08 05:12:31] [Rank 0] step:1161/10000 train_time:91758ms step_avg:79.03ms +[2025-07-08 05:12:31] [Rank 0] step:1161/10000 train_time:91758ms step_avg:79.03ms +[2025-07-08 05:12:33] [Rank 0] step:1181/10000 train_time:93465ms step_avg:79.14ms +[2025-07-08 05:12:33] [Rank 0] step:1181/10000 train_time:93465ms step_avg:79.14ms +[2025-07-08 05:12:34] [Rank 0] step:1201/10000 train_time:94932ms step_avg:79.04ms +[2025-07-08 05:12:34] [Rank 0] step:1201/10000 train_time:94932ms step_avg:79.04ms +[2025-07-08 05:12:36] [Rank 0] step:1221/10000 train_time:96537ms step_avg:79.06ms +[2025-07-08 05:12:36] [Rank 0] step:1221/10000 train_time:96537ms step_avg:79.06ms +[2025-07-08 05:12:37] [Rank 0] step:1241/10000 train_time:98005ms step_avg:78.97ms +[2025-07-08 05:12:37] [Rank 0] step:1241/10000 train_time:98005ms step_avg:78.97ms +[2025-07-08 05:12:39] [Rank 0] step:1261/10000 train_time:99525ms step_avg:78.93ms +[2025-07-08 05:12:39] [Rank 0] step:1261/10000 train_time:99525ms step_avg:78.93ms +[2025-07-08 05:12:40] [Rank 0] step:1281/10000 train_time:101173ms step_avg:78.98ms +[2025-07-08 05:12:40] [Rank 0] step:1281/10000 train_time:101173ms step_avg:78.98ms +[2025-07-08 05:12:42] [Rank 0] step:1301/10000 train_time:102639ms step_avg:78.89ms +[2025-07-08 05:12:42] [Rank 0] step:1301/10000 train_time:102639ms step_avg:78.89ms +[2025-07-08 05:12:43] [Rank 0] step:1321/10000 train_time:104108ms step_avg:78.81ms +[2025-07-08 05:12:43] [Rank 0] step:1321/10000 train_time:104108ms step_avg:78.81ms +[2025-07-08 05:12:45] [Rank 0] step:1341/10000 train_time:105578ms step_avg:78.73ms +[2025-07-08 05:12:45] [Rank 0] step:1341/10000 train_time:105578ms step_avg:78.73ms +[2025-07-08 05:12:47] [Rank 0] step:1361/10000 train_time:107719ms step_avg:79.15ms +[2025-07-08 05:12:47] [Rank 0] step:1361/10000 train_time:107719ms step_avg:79.15ms +[2025-07-08 05:12:49] [Rank 0] step:1381/10000 train_time:109184ms step_avg:79.06ms +[2025-07-08 05:12:49] [Rank 0] step:1381/10000 train_time:109184ms step_avg:79.06ms +[2025-07-08 05:12:50] [Rank 0] step:1401/10000 train_time:110650ms step_avg:78.98ms +[2025-07-08 05:12:50] [Rank 0] step:1401/10000 train_time:110650ms step_avg:78.98ms +[2025-07-08 05:12:51] [Rank 0] step:1421/10000 train_time:112119ms step_avg:78.90ms +[2025-07-08 05:12:51] [Rank 0] step:1421/10000 train_time:112119ms step_avg:78.90ms +[2025-07-08 05:12:53] [Rank 0] step:1441/10000 train_time:113585ms step_avg:78.82ms +[2025-07-08 05:12:53] [Rank 0] step:1441/10000 train_time:113585ms step_avg:78.82ms +[2025-07-08 05:12:55] [Rank 0] step:1461/10000 train_time:115189ms step_avg:78.84ms +[2025-07-08 05:12:55] [Rank 0] step:1461/10000 train_time:115189ms step_avg:78.84ms +[2025-07-08 05:12:56] [Rank 0] step:1481/10000 train_time:116653ms step_avg:78.77ms +[2025-07-08 05:12:56] [Rank 0] step:1481/10000 train_time:116653ms step_avg:78.77ms +[2025-07-08 05:12:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:12:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:12:58] [Rank 0] PRINT: step:1500/10000 train_loss:1.4275 val_loss:1.3349 train_time:118120ms step_avg:78.75ms +[2025-07-08 05:12:58] [Rank 0] PRINT: step:1500/10000 train_loss:1.4275 val_loss:1.3349 train_time:118120ms step_avg:78.75ms +[2025-07-08 05:12:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:12:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:12:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:12:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:12:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:12:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:18:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:18:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:18:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:18:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:18:23] [Rank 0] Total Loss: 4.4927 +[2025-07-08 05:18:23] [Rank 0] Total Loss: 4.4927 +[2025-07-08 05:18:23] [Rank 0] Total FTA: 0.2256 +[2025-07-08 05:18:23] [Rank 0] Total FTA: 0.2256 +[2025-07-08 05:18:23] [Rank 0] Group 0 Loss: 4.6304 +[2025-07-08 05:18:23] [Rank 0] Group 0 Loss: 4.6304 +[2025-07-08 05:18:23] [Rank 0] Group 1 Loss: 4.4411 +[2025-07-08 05:18:23] [Rank 0] Group 1 Loss: 4.4411 +[2025-07-08 05:18:23] [Rank 0] Group 2 Loss: 4.2716 +[2025-07-08 05:18:23] [Rank 0] Group 2 Loss: 4.2716 +[2025-07-08 05:18:23] [Rank 0] Group 3 Loss: 4.5776 +[2025-07-08 05:18:23] [Rank 0] Group 3 Loss: 4.5776 +[2025-07-08 05:18:23] [Rank 0] Group 4 Loss: 4.4436 +[2025-07-08 05:18:23] [Rank 0] Group 4 Loss: 4.4436 +[2025-07-08 05:18:23] [Rank 0] Group 5 Loss: 4.4267 +[2025-07-08 05:18:23] [Rank 0] Group 5 Loss: 4.4267 +[2025-07-08 05:18:23] [Rank 0] Group 6 Loss: 4.4239 +[2025-07-08 05:18:23] [Rank 0] Group 6 Loss: 4.4239 +[2025-07-08 05:18:23] [Rank 0] Group 7 Loss: 4.5445 +[2025-07-08 05:18:23] [Rank 0] Group 7 Loss: 4.5445 +[2025-07-08 05:18:23] [Rank 0] Group 8 Loss: 4.5039 +[2025-07-08 05:18:23] [Rank 0] Group 8 Loss: 4.5039 +[2025-07-08 05:18:23] [Rank 0] Group 9 Loss: 4.4441 +[2025-07-08 05:18:23] [Rank 0] Group 9 Loss: 4.4441 +[2025-07-08 05:18:23] [Rank 0] Group 10 Loss: 4.5599 +[2025-07-08 05:18:23] [Rank 0] Group 10 Loss: 4.5599 +[2025-07-08 05:18:23] [Rank 0] Group 11 Loss: 4.4838 +[2025-07-08 05:18:23] [Rank 0] Group 11 Loss: 4.4838 +[2025-07-08 05:18:23] [Rank 0] Group 0 FTA: 0.1756 +[2025-07-08 05:18:23] [Rank 0] Group 0 FTA: 0.1756 +[2025-07-08 05:18:23] [Rank 0] Group 1 FTA: 0.3464 +[2025-07-08 05:18:23] [Rank 0] Group 1 FTA: 0.3464 +[2025-07-08 05:18:23] [Rank 0] Group 2 FTA: 0.2422 +[2025-07-08 05:18:23] [Rank 0] Group 2 FTA: 0.2422 +[2025-07-08 05:18:23] [Rank 0] Group 3 FTA: 0.1615 +[2025-07-08 05:18:23] [Rank 0] Group 3 FTA: 0.1615 +[2025-07-08 05:18:23] [Rank 0] Group 4 FTA: 0.1432 +[2025-07-08 05:18:23] [Rank 0] Group 4 FTA: 0.1432 +[2025-07-08 05:18:23] [Rank 0] Group 5 FTA: 0.2812 +[2025-07-08 05:18:23] [Rank 0] Group 5 FTA: 0.2812 +[2025-07-08 05:18:23] [Rank 0] Group 6 FTA: 0.1771 +[2025-07-08 05:18:23] [Rank 0] Group 6 FTA: 0.1771 +[2025-07-08 05:18:23] [Rank 0] Group 7 FTA: 0.2344 +[2025-07-08 05:18:23] [Rank 0] Group 7 FTA: 0.2344 +[2025-07-08 05:18:23] [Rank 0] Group 8 FTA: 0.2630 +[2025-07-08 05:18:23] [Rank 0] Group 8 FTA: 0.2630 +[2025-07-08 05:18:23] [Rank 0] Group 9 FTA: 0.2461 +[2025-07-08 05:18:23] [Rank 0] Group 9 FTA: 0.2461 +[2025-07-08 05:18:23] [Rank 0] Group 10 FTA: 0.2480 +[2025-07-08 05:18:23] [Rank 0] Group 10 FTA: 0.2480 +[2025-07-08 05:18:23] [Rank 0] Group 11 FTA: 0.2305 +[2025-07-08 05:18:23] [Rank 0] Group 11 FTA: 0.2305 +[2025-07-08 05:18:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 05:18:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 05:18:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 05:18:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 05:18:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 05:18:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 05:18:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 05:18:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 05:18:24] [Rank 0] step:1501/10000 train_time:118140ms step_avg:78.71ms +[2025-07-08 05:18:24] [Rank 0] step:1501/10000 train_time:118140ms step_avg:78.71ms +[2025-07-08 05:18:26] [Rank 0] step:1521/10000 train_time:119626ms step_avg:78.65ms +[2025-07-08 05:18:26] [Rank 0] step:1521/10000 train_time:119626ms step_avg:78.65ms +[2025-07-08 05:18:28] [Rank 0] step:1541/10000 train_time:121761ms step_avg:79.01ms +[2025-07-08 05:18:28] [Rank 0] step:1541/10000 train_time:121761ms step_avg:79.01ms +[2025-07-08 05:18:29] [Rank 0] step:1561/10000 train_time:123223ms step_avg:78.94ms +[2025-07-08 05:18:29] [Rank 0] step:1561/10000 train_time:123223ms step_avg:78.94ms +[2025-07-08 05:18:31] [Rank 0] step:1581/10000 train_time:124686ms step_avg:78.87ms +[2025-07-08 05:18:31] [Rank 0] step:1581/10000 train_time:124686ms step_avg:78.87ms +[2025-07-08 05:18:32] [Rank 0] step:1601/10000 train_time:126150ms step_avg:78.79ms +[2025-07-08 05:18:32] [Rank 0] step:1601/10000 train_time:126150ms step_avg:78.79ms +[2025-07-08 05:18:34] [Rank 0] step:1621/10000 train_time:127770ms step_avg:78.82ms +[2025-07-08 05:18:34] [Rank 0] step:1621/10000 train_time:127770ms step_avg:78.82ms +[2025-07-08 05:18:35] [Rank 0] step:1641/10000 train_time:129215ms step_avg:78.74ms +[2025-07-08 05:18:35] [Rank 0] step:1641/10000 train_time:129215ms step_avg:78.74ms +[2025-07-08 05:18:37] [Rank 0] step:1661/10000 train_time:130680ms step_avg:78.68ms +[2025-07-08 05:18:37] [Rank 0] step:1661/10000 train_time:130680ms step_avg:78.68ms +[2025-07-08 05:18:38] [Rank 0] step:1681/10000 train_time:132145ms step_avg:78.61ms +[2025-07-08 05:18:38] [Rank 0] step:1681/10000 train_time:132145ms step_avg:78.61ms +[2025-07-08 05:18:40] [Rank 0] step:1701/10000 train_time:133609ms step_avg:78.55ms +[2025-07-08 05:18:40] [Rank 0] step:1701/10000 train_time:133609ms step_avg:78.55ms +[2025-07-08 05:18:42] [Rank 0] step:1721/10000 train_time:135745ms step_avg:78.88ms +[2025-07-08 05:18:42] [Rank 0] step:1721/10000 train_time:135745ms step_avg:78.88ms +[2025-07-08 05:18:43] [Rank 0] step:1741/10000 train_time:137210ms step_avg:78.81ms +[2025-07-08 05:18:43] [Rank 0] step:1741/10000 train_time:137210ms step_avg:78.81ms +[2025-07-08 05:18:45] [Rank 0] step:1761/10000 train_time:138677ms step_avg:78.75ms +[2025-07-08 05:18:45] [Rank 0] step:1761/10000 train_time:138677ms step_avg:78.75ms +[2025-07-08 05:18:46] [Rank 0] step:1781/10000 train_time:140144ms step_avg:78.69ms +[2025-07-08 05:18:46] [Rank 0] step:1781/10000 train_time:140144ms step_avg:78.69ms +[2025-07-08 05:18:48] [Rank 0] step:1801/10000 train_time:141662ms step_avg:78.66ms +[2025-07-08 05:18:48] [Rank 0] step:1801/10000 train_time:141662ms step_avg:78.66ms +[2025-07-08 05:18:49] [Rank 0] step:1821/10000 train_time:143316ms step_avg:78.70ms +[2025-07-08 05:18:49] [Rank 0] step:1821/10000 train_time:143316ms step_avg:78.70ms +[2025-07-08 05:18:51] [Rank 0] step:1841/10000 train_time:144782ms step_avg:78.64ms +[2025-07-08 05:18:51] [Rank 0] step:1841/10000 train_time:144782ms step_avg:78.64ms +[2025-07-08 05:18:52] [Rank 0] step:1861/10000 train_time:146249ms step_avg:78.59ms +[2025-07-08 05:18:52] [Rank 0] step:1861/10000 train_time:146249ms step_avg:78.59ms +[2025-07-08 05:18:54] [Rank 0] step:1881/10000 train_time:147716ms step_avg:78.53ms +[2025-07-08 05:18:54] [Rank 0] step:1881/10000 train_time:147716ms step_avg:78.53ms +[2025-07-08 05:18:56] [Rank 0] step:1901/10000 train_time:149924ms step_avg:78.87ms +[2025-07-08 05:18:56] [Rank 0] step:1901/10000 train_time:149924ms step_avg:78.87ms +[2025-07-08 05:18:57] [Rank 0] step:1921/10000 train_time:151392ms step_avg:78.81ms +[2025-07-08 05:18:57] [Rank 0] step:1921/10000 train_time:151392ms step_avg:78.81ms +[2025-07-08 05:18:59] [Rank 0] step:1941/10000 train_time:152857ms step_avg:78.75ms +[2025-07-08 05:18:59] [Rank 0] step:1941/10000 train_time:152857ms step_avg:78.75ms +[2025-07-08 05:19:00] [Rank 0] step:1961/10000 train_time:154324ms step_avg:78.70ms +[2025-07-08 05:19:00] [Rank 0] step:1961/10000 train_time:154324ms step_avg:78.70ms +[2025-07-08 05:19:02] [Rank 0] step:1981/10000 train_time:156474ms step_avg:78.99ms +[2025-07-08 05:19:02] [Rank 0] step:1981/10000 train_time:156474ms step_avg:78.99ms +[2025-07-08 05:19:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:19:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:19:05] [Rank 0] PRINT: step:2000/10000 train_loss:1.2423 val_loss:1.2111 train_time:157922ms step_avg:78.96ms +[2025-07-08 05:19:05] [Rank 0] PRINT: step:2000/10000 train_loss:1.2423 val_loss:1.2111 train_time:157922ms step_avg:78.96ms +[2025-07-08 05:19:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:19:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:19:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:19:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:19:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:19:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:24:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:24:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:24:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:24:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:24:29] [Rank 0] Total Loss: 4.7882 +[2025-07-08 05:24:29] [Rank 0] Total Loss: 4.7882 +[2025-07-08 05:24:29] [Rank 0] Total FTA: 0.3561 +[2025-07-08 05:24:29] [Rank 0] Total FTA: 0.3561 +[2025-07-08 05:24:29] [Rank 0] Group 0 Loss: 5.1223 +[2025-07-08 05:24:29] [Rank 0] Group 0 Loss: 5.1223 +[2025-07-08 05:24:29] [Rank 0] Group 1 Loss: 4.4150 +[2025-07-08 05:24:29] [Rank 0] Group 1 Loss: 4.4150 +[2025-07-08 05:24:29] [Rank 0] Group 2 Loss: 4.7078 +[2025-07-08 05:24:29] [Rank 0] Group 2 Loss: 4.7078 +[2025-07-08 05:24:29] [Rank 0] Group 3 Loss: 4.8561 +[2025-07-08 05:24:29] [Rank 0] Group 3 Loss: 4.8561 +[2025-07-08 05:24:29] [Rank 0] Group 4 Loss: 4.7426 +[2025-07-08 05:24:29] [Rank 0] Group 4 Loss: 4.7426 +[2025-07-08 05:24:29] [Rank 0] Group 5 Loss: 4.7186 +[2025-07-08 05:24:29] [Rank 0] Group 5 Loss: 4.7186 +[2025-07-08 05:24:29] [Rank 0] Group 6 Loss: 4.7133 +[2025-07-08 05:24:29] [Rank 0] Group 6 Loss: 4.7133 +[2025-07-08 05:24:29] [Rank 0] Group 7 Loss: 4.8260 +[2025-07-08 05:24:29] [Rank 0] Group 7 Loss: 4.8260 +[2025-07-08 05:24:29] [Rank 0] Group 8 Loss: 4.7245 +[2025-07-08 05:24:29] [Rank 0] Group 8 Loss: 4.7245 +[2025-07-08 05:24:29] [Rank 0] Group 9 Loss: 4.7794 +[2025-07-08 05:24:29] [Rank 0] Group 9 Loss: 4.7794 +[2025-07-08 05:24:29] [Rank 0] Group 10 Loss: 4.7588 +[2025-07-08 05:24:29] [Rank 0] Group 10 Loss: 4.7588 +[2025-07-08 05:24:29] [Rank 0] Group 11 Loss: 4.7800 +[2025-07-08 05:24:29] [Rank 0] Group 11 Loss: 4.7800 +[2025-07-08 05:24:29] [Rank 0] Group 0 FTA: 0.5475 +[2025-07-08 05:24:29] [Rank 0] Group 0 FTA: 0.5475 +[2025-07-08 05:24:29] [Rank 0] Group 1 FTA: 0.3307 +[2025-07-08 05:24:29] [Rank 0] Group 1 FTA: 0.3307 +[2025-07-08 05:24:29] [Rank 0] Group 2 FTA: 0.4688 +[2025-07-08 05:24:29] [Rank 0] Group 2 FTA: 0.4688 +[2025-07-08 05:24:29] [Rank 0] Group 3 FTA: 0.2578 +[2025-07-08 05:24:29] [Rank 0] Group 3 FTA: 0.2578 +[2025-07-08 05:24:29] [Rank 0] Group 4 FTA: 0.2786 +[2025-07-08 05:24:29] [Rank 0] Group 4 FTA: 0.2786 +[2025-07-08 05:24:29] [Rank 0] Group 5 FTA: 0.3333 +[2025-07-08 05:24:29] [Rank 0] Group 5 FTA: 0.3333 +[2025-07-08 05:24:29] [Rank 0] Group 6 FTA: 0.3151 +[2025-07-08 05:24:29] [Rank 0] Group 6 FTA: 0.3151 +[2025-07-08 05:24:29] [Rank 0] Group 7 FTA: 0.3542 +[2025-07-08 05:24:29] [Rank 0] Group 7 FTA: 0.3542 +[2025-07-08 05:24:29] [Rank 0] Group 8 FTA: 0.3255 +[2025-07-08 05:24:29] [Rank 0] Group 8 FTA: 0.3255 +[2025-07-08 05:24:29] [Rank 0] Group 9 FTA: 0.3125 +[2025-07-08 05:24:29] [Rank 0] Group 9 FTA: 0.3125 +[2025-07-08 05:24:29] [Rank 0] Group 10 FTA: 0.3223 +[2025-07-08 05:24:29] [Rank 0] Group 10 FTA: 0.3223 +[2025-07-08 05:24:29] [Rank 0] Group 11 FTA: 0.3096 +[2025-07-08 05:24:29] [Rank 0] Group 11 FTA: 0.3096 +[2025-07-08 05:24:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 05:24:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 05:24:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 05:24:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 05:24:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 05:24:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 05:24:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 05:24:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 05:24:31] [Rank 0] step:2001/10000 train_time:157943ms step_avg:78.93ms +[2025-07-08 05:24:31] [Rank 0] step:2001/10000 train_time:157943ms step_avg:78.93ms +[2025-07-08 05:24:32] [Rank 0] step:2021/10000 train_time:159421ms step_avg:78.88ms +[2025-07-08 05:24:32] [Rank 0] step:2021/10000 train_time:159421ms step_avg:78.88ms +[2025-07-08 05:24:34] [Rank 0] step:2041/10000 train_time:160884ms step_avg:78.83ms +[2025-07-08 05:24:34] [Rank 0] step:2041/10000 train_time:160884ms step_avg:78.83ms +[2025-07-08 05:24:35] [Rank 0] step:2061/10000 train_time:162347ms step_avg:78.77ms +[2025-07-08 05:24:35] [Rank 0] step:2061/10000 train_time:162347ms step_avg:78.77ms +[2025-07-08 05:24:37] [Rank 0] step:2081/10000 train_time:164452ms step_avg:79.03ms +[2025-07-08 05:24:37] [Rank 0] step:2081/10000 train_time:164452ms step_avg:79.03ms +[2025-07-08 05:24:39] [Rank 0] step:2101/10000 train_time:165916ms step_avg:78.97ms +[2025-07-08 05:24:39] [Rank 0] step:2101/10000 train_time:165916ms step_avg:78.97ms +[2025-07-08 05:24:40] [Rank 0] step:2121/10000 train_time:167380ms step_avg:78.92ms +[2025-07-08 05:24:40] [Rank 0] step:2121/10000 train_time:167380ms step_avg:78.92ms +[2025-07-08 05:24:42] [Rank 0] step:2141/10000 train_time:168842ms step_avg:78.86ms +[2025-07-08 05:24:42] [Rank 0] step:2141/10000 train_time:168842ms step_avg:78.86ms +[2025-07-08 05:24:44] [Rank 0] step:2161/10000 train_time:170562ms step_avg:78.93ms +[2025-07-08 05:24:44] [Rank 0] step:2161/10000 train_time:170562ms step_avg:78.93ms +[2025-07-08 05:24:45] [Rank 0] step:2181/10000 train_time:172420ms step_avg:79.06ms +[2025-07-08 05:24:45] [Rank 0] step:2181/10000 train_time:172420ms step_avg:79.06ms +[2025-07-08 05:24:47] [Rank 0] step:2201/10000 train_time:173883ms step_avg:79.00ms +[2025-07-08 05:24:47] [Rank 0] step:2201/10000 train_time:173883ms step_avg:79.00ms +[2025-07-08 05:24:48] [Rank 0] step:2221/10000 train_time:175348ms step_avg:78.95ms +[2025-07-08 05:24:48] [Rank 0] step:2221/10000 train_time:175348ms step_avg:78.95ms +[2025-07-08 05:24:50] [Rank 0] step:2241/10000 train_time:176837ms step_avg:78.91ms +[2025-07-08 05:24:50] [Rank 0] step:2241/10000 train_time:176837ms step_avg:78.91ms +[2025-07-08 05:24:52] [Rank 0] step:2261/10000 train_time:178994ms step_avg:79.17ms +[2025-07-08 05:24:52] [Rank 0] step:2261/10000 train_time:178994ms step_avg:79.17ms +[2025-07-08 05:24:53] [Rank 0] step:2281/10000 train_time:180486ms step_avg:79.13ms +[2025-07-08 05:24:53] [Rank 0] step:2281/10000 train_time:180486ms step_avg:79.13ms +[2025-07-08 05:24:55] [Rank 0] step:2301/10000 train_time:181977ms step_avg:79.09ms +[2025-07-08 05:24:55] [Rank 0] step:2301/10000 train_time:181977ms step_avg:79.09ms +[2025-07-08 05:24:56] [Rank 0] step:2321/10000 train_time:183469ms step_avg:79.05ms +[2025-07-08 05:24:56] [Rank 0] step:2321/10000 train_time:183469ms step_avg:79.05ms +[2025-07-08 05:24:58] [Rank 0] step:2341/10000 train_time:184963ms step_avg:79.01ms +[2025-07-08 05:24:58] [Rank 0] step:2341/10000 train_time:184963ms step_avg:79.01ms +[2025-07-08 05:25:00] [Rank 0] step:2361/10000 train_time:187106ms step_avg:79.25ms +[2025-07-08 05:25:00] [Rank 0] step:2361/10000 train_time:187106ms step_avg:79.25ms +[2025-07-08 05:25:01] [Rank 0] step:2381/10000 train_time:188598ms step_avg:79.21ms +[2025-07-08 05:25:01] [Rank 0] step:2381/10000 train_time:188598ms step_avg:79.21ms +[2025-07-08 05:25:03] [Rank 0] step:2401/10000 train_time:190092ms step_avg:79.17ms +[2025-07-08 05:25:03] [Rank 0] step:2401/10000 train_time:190092ms step_avg:79.17ms +[2025-07-08 05:25:04] [Rank 0] step:2421/10000 train_time:191588ms step_avg:79.14ms +[2025-07-08 05:25:04] [Rank 0] step:2421/10000 train_time:191588ms step_avg:79.14ms +[2025-07-08 05:25:06] [Rank 0] step:2441/10000 train_time:193321ms step_avg:79.20ms +[2025-07-08 05:25:06] [Rank 0] step:2441/10000 train_time:193321ms step_avg:79.20ms +[2025-07-08 05:25:08] [Rank 0] step:2461/10000 train_time:194814ms step_avg:79.16ms +[2025-07-08 05:25:08] [Rank 0] step:2461/10000 train_time:194814ms step_avg:79.16ms +[2025-07-08 05:25:09] [Rank 0] step:2481/10000 train_time:196310ms step_avg:79.13ms +[2025-07-08 05:25:09] [Rank 0] step:2481/10000 train_time:196310ms step_avg:79.13ms +[2025-07-08 05:25:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:25:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:25:12] [Rank 0] PRINT: step:2500/10000 train_loss:1.1613 val_loss:1.1174 train_time:197807ms step_avg:79.12ms +[2025-07-08 05:25:12] [Rank 0] PRINT: step:2500/10000 train_loss:1.1613 val_loss:1.1174 train_time:197807ms step_avg:79.12ms +[2025-07-08 05:25:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:25:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:25:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:25:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:25:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:25:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:30:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:30:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:30:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:30:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:30:36] [Rank 0] Total Loss: 5.1077 +[2025-07-08 05:30:36] [Rank 0] Total Loss: 5.1077 +[2025-07-08 05:30:36] [Rank 0] Total FTA: 0.4358 +[2025-07-08 05:30:36] [Rank 0] Total FTA: 0.4358 +[2025-07-08 05:30:36] [Rank 0] Group 0 Loss: 5.4544 +[2025-07-08 05:30:36] [Rank 0] Group 0 Loss: 5.4544 +[2025-07-08 05:30:36] [Rank 0] Group 1 Loss: 4.7847 +[2025-07-08 05:30:36] [Rank 0] Group 1 Loss: 4.7847 +[2025-07-08 05:30:36] [Rank 0] Group 2 Loss: 4.8813 +[2025-07-08 05:30:36] [Rank 0] Group 2 Loss: 4.8813 +[2025-07-08 05:30:36] [Rank 0] Group 3 Loss: 5.2188 +[2025-07-08 05:30:36] [Rank 0] Group 3 Loss: 5.2188 +[2025-07-08 05:30:36] [Rank 0] Group 4 Loss: 5.0947 +[2025-07-08 05:30:36] [Rank 0] Group 4 Loss: 5.0947 +[2025-07-08 05:30:36] [Rank 0] Group 5 Loss: 5.0783 +[2025-07-08 05:30:36] [Rank 0] Group 5 Loss: 5.0783 +[2025-07-08 05:30:36] [Rank 0] Group 6 Loss: 4.9964 +[2025-07-08 05:30:36] [Rank 0] Group 6 Loss: 4.9964 +[2025-07-08 05:30:36] [Rank 0] Group 7 Loss: 5.1120 +[2025-07-08 05:30:36] [Rank 0] Group 7 Loss: 5.1120 +[2025-07-08 05:30:36] [Rank 0] Group 8 Loss: 5.1290 +[2025-07-08 05:30:36] [Rank 0] Group 8 Loss: 5.1290 +[2025-07-08 05:30:36] [Rank 0] Group 9 Loss: 5.0638 +[2025-07-08 05:30:36] [Rank 0] Group 9 Loss: 5.0638 +[2025-07-08 05:30:36] [Rank 0] Group 10 Loss: 5.1171 +[2025-07-08 05:30:36] [Rank 0] Group 10 Loss: 5.1171 +[2025-07-08 05:30:36] [Rank 0] Group 11 Loss: 5.0659 +[2025-07-08 05:30:36] [Rank 0] Group 11 Loss: 5.0659 +[2025-07-08 05:30:36] [Rank 0] Group 0 FTA: 0.4811 +[2025-07-08 05:30:36] [Rank 0] Group 0 FTA: 0.4811 +[2025-07-08 05:30:36] [Rank 0] Group 1 FTA: 0.3177 +[2025-07-08 05:30:36] [Rank 0] Group 1 FTA: 0.3177 +[2025-07-08 05:30:36] [Rank 0] Group 2 FTA: 0.4870 +[2025-07-08 05:30:36] [Rank 0] Group 2 FTA: 0.4870 +[2025-07-08 05:30:36] [Rank 0] Group 3 FTA: 0.3073 +[2025-07-08 05:30:36] [Rank 0] Group 3 FTA: 0.3073 +[2025-07-08 05:30:36] [Rank 0] Group 4 FTA: 0.3125 +[2025-07-08 05:30:36] [Rank 0] Group 4 FTA: 0.3125 +[2025-07-08 05:30:36] [Rank 0] Group 5 FTA: 0.5339 +[2025-07-08 05:30:36] [Rank 0] Group 5 FTA: 0.5339 +[2025-07-08 05:30:36] [Rank 0] Group 6 FTA: 0.3750 +[2025-07-08 05:30:36] [Rank 0] Group 6 FTA: 0.3750 +[2025-07-08 05:30:36] [Rank 0] Group 7 FTA: 0.4635 +[2025-07-08 05:30:36] [Rank 0] Group 7 FTA: 0.4635 +[2025-07-08 05:30:36] [Rank 0] Group 8 FTA: 0.4505 +[2025-07-08 05:30:36] [Rank 0] Group 8 FTA: 0.4505 +[2025-07-08 05:30:36] [Rank 0] Group 9 FTA: 0.5000 +[2025-07-08 05:30:36] [Rank 0] Group 9 FTA: 0.5000 +[2025-07-08 05:30:36] [Rank 0] Group 10 FTA: 0.4668 +[2025-07-08 05:30:36] [Rank 0] Group 10 FTA: 0.4668 +[2025-07-08 05:30:36] [Rank 0] Group 11 FTA: 0.4600 +[2025-07-08 05:30:36] [Rank 0] Group 11 FTA: 0.4600 +[2025-07-08 05:30:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 05:30:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 05:30:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 05:30:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 05:30:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 05:30:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 05:30:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 05:30:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 05:30:38] [Rank 0] step:2501/10000 train_time:197827ms step_avg:79.10ms +[2025-07-08 05:30:38] [Rank 0] step:2501/10000 train_time:197827ms step_avg:79.10ms +[2025-07-08 05:30:40] [Rank 0] step:2521/10000 train_time:199365ms step_avg:79.08ms +[2025-07-08 05:30:40] [Rank 0] step:2521/10000 train_time:199365ms step_avg:79.08ms +[2025-07-08 05:30:41] [Rank 0] step:2541/10000 train_time:201452ms step_avg:79.28ms +[2025-07-08 05:30:41] [Rank 0] step:2541/10000 train_time:201452ms step_avg:79.28ms +[2025-07-08 05:30:43] [Rank 0] step:2561/10000 train_time:202942ms step_avg:79.24ms +[2025-07-08 05:30:43] [Rank 0] step:2561/10000 train_time:202942ms step_avg:79.24ms +[2025-07-08 05:30:44] [Rank 0] step:2581/10000 train_time:204428ms step_avg:79.21ms +[2025-07-08 05:30:44] [Rank 0] step:2581/10000 train_time:204428ms step_avg:79.21ms +[2025-07-08 05:30:46] [Rank 0] step:2601/10000 train_time:205917ms step_avg:79.17ms +[2025-07-08 05:30:46] [Rank 0] step:2601/10000 train_time:205917ms step_avg:79.17ms +[2025-07-08 05:30:47] [Rank 0] step:2621/10000 train_time:207641ms step_avg:79.22ms +[2025-07-08 05:30:47] [Rank 0] step:2621/10000 train_time:207641ms step_avg:79.22ms +[2025-07-08 05:30:49] [Rank 0] step:2641/10000 train_time:209132ms step_avg:79.19ms +[2025-07-08 05:30:49] [Rank 0] step:2641/10000 train_time:209132ms step_avg:79.19ms +[2025-07-08 05:30:50] [Rank 0] step:2661/10000 train_time:210620ms step_avg:79.15ms +[2025-07-08 05:30:50] [Rank 0] step:2661/10000 train_time:210620ms step_avg:79.15ms +[2025-07-08 05:30:52] [Rank 0] step:2681/10000 train_time:212112ms step_avg:79.12ms +[2025-07-08 05:30:52] [Rank 0] step:2681/10000 train_time:212112ms step_avg:79.12ms +[2025-07-08 05:30:54] [Rank 0] step:2701/10000 train_time:213653ms step_avg:79.10ms +[2025-07-08 05:30:54] [Rank 0] step:2701/10000 train_time:213653ms step_avg:79.10ms +[2025-07-08 05:30:56] [Rank 0] step:2721/10000 train_time:215762ms step_avg:79.30ms +[2025-07-08 05:30:56] [Rank 0] step:2721/10000 train_time:215762ms step_avg:79.30ms +[2025-07-08 05:30:57] [Rank 0] step:2741/10000 train_time:217250ms step_avg:79.26ms +[2025-07-08 05:30:57] [Rank 0] step:2741/10000 train_time:217250ms step_avg:79.26ms +[2025-07-08 05:30:59] [Rank 0] step:2761/10000 train_time:218740ms step_avg:79.23ms +[2025-07-08 05:30:59] [Rank 0] step:2761/10000 train_time:218740ms step_avg:79.23ms +[2025-07-08 05:31:00] [Rank 0] step:2781/10000 train_time:220233ms step_avg:79.19ms +[2025-07-08 05:31:00] [Rank 0] step:2781/10000 train_time:220233ms step_avg:79.19ms +[2025-07-08 05:31:02] [Rank 0] step:2801/10000 train_time:222375ms step_avg:79.39ms +[2025-07-08 05:31:02] [Rank 0] step:2801/10000 train_time:222375ms step_avg:79.39ms +[2025-07-08 05:31:04] [Rank 0] step:2821/10000 train_time:223864ms step_avg:79.36ms +[2025-07-08 05:31:04] [Rank 0] step:2821/10000 train_time:223864ms step_avg:79.36ms +[2025-07-08 05:31:05] [Rank 0] step:2841/10000 train_time:225357ms step_avg:79.32ms +[2025-07-08 05:31:05] [Rank 0] step:2841/10000 train_time:225357ms step_avg:79.32ms +[2025-07-08 05:31:07] [Rank 0] step:2861/10000 train_time:226852ms step_avg:79.29ms +[2025-07-08 05:31:07] [Rank 0] step:2861/10000 train_time:226852ms step_avg:79.29ms +[2025-07-08 05:31:09] [Rank 0] step:2881/10000 train_time:228394ms step_avg:79.28ms +[2025-07-08 05:31:09] [Rank 0] step:2881/10000 train_time:228394ms step_avg:79.28ms +[2025-07-08 05:31:10] [Rank 0] step:2901/10000 train_time:230483ms step_avg:79.45ms +[2025-07-08 05:31:10] [Rank 0] step:2901/10000 train_time:230483ms step_avg:79.45ms +[2025-07-08 05:31:12] [Rank 0] step:2921/10000 train_time:231976ms step_avg:79.42ms +[2025-07-08 05:31:12] [Rank 0] step:2921/10000 train_time:231976ms step_avg:79.42ms +[2025-07-08 05:31:13] [Rank 0] step:2941/10000 train_time:233470ms step_avg:79.38ms +[2025-07-08 05:31:13] [Rank 0] step:2941/10000 train_time:233470ms step_avg:79.38ms +[2025-07-08 05:31:15] [Rank 0] step:2961/10000 train_time:234962ms step_avg:79.35ms +[2025-07-08 05:31:15] [Rank 0] step:2961/10000 train_time:234962ms step_avg:79.35ms +[2025-07-08 05:31:16] [Rank 0] step:2981/10000 train_time:236692ms step_avg:79.40ms +[2025-07-08 05:31:16] [Rank 0] step:2981/10000 train_time:236692ms step_avg:79.40ms +[2025-07-08 05:31:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:31:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:31:19] [Rank 0] PRINT: step:3000/10000 train_loss:1.0920 val_loss:1.0492 train_time:238183ms step_avg:79.39ms +[2025-07-08 05:31:19] [Rank 0] PRINT: step:3000/10000 train_loss:1.0920 val_loss:1.0492 train_time:238183ms step_avg:79.39ms +[2025-07-08 05:31:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:31:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:31:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:31:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:31:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:31:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:36:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:36:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:36:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:36:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:36:44] [Rank 0] Total Loss: 5.1541 +[2025-07-08 05:36:44] [Rank 0] Total Loss: 5.1541 +[2025-07-08 05:36:44] [Rank 0] Total FTA: 0.5935 +[2025-07-08 05:36:44] [Rank 0] Total FTA: 0.5935 +[2025-07-08 05:36:44] [Rank 0] Group 0 Loss: 5.3392 +[2025-07-08 05:36:44] [Rank 0] Group 0 Loss: 5.3392 +[2025-07-08 05:36:44] [Rank 0] Group 1 Loss: 4.8092 +[2025-07-08 05:36:44] [Rank 0] Group 1 Loss: 4.8092 +[2025-07-08 05:36:44] [Rank 0] Group 2 Loss: 5.1925 +[2025-07-08 05:36:44] [Rank 0] Group 2 Loss: 5.1925 +[2025-07-08 05:36:44] [Rank 0] Group 3 Loss: 5.2640 +[2025-07-08 05:36:44] [Rank 0] Group 3 Loss: 5.2640 +[2025-07-08 05:36:44] [Rank 0] Group 4 Loss: 5.2721 +[2025-07-08 05:36:44] [Rank 0] Group 4 Loss: 5.2721 +[2025-07-08 05:36:44] [Rank 0] Group 5 Loss: 5.0551 +[2025-07-08 05:36:44] [Rank 0] Group 5 Loss: 5.0551 +[2025-07-08 05:36:44] [Rank 0] Group 6 Loss: 5.0764 +[2025-07-08 05:36:44] [Rank 0] Group 6 Loss: 5.0764 +[2025-07-08 05:36:44] [Rank 0] Group 7 Loss: 5.1583 +[2025-07-08 05:36:44] [Rank 0] Group 7 Loss: 5.1583 +[2025-07-08 05:36:44] [Rank 0] Group 8 Loss: 5.1231 +[2025-07-08 05:36:44] [Rank 0] Group 8 Loss: 5.1231 +[2025-07-08 05:36:44] [Rank 0] Group 9 Loss: 5.1418 +[2025-07-08 05:36:44] [Rank 0] Group 9 Loss: 5.1418 +[2025-07-08 05:36:44] [Rank 0] Group 10 Loss: 5.1223 +[2025-07-08 05:36:44] [Rank 0] Group 10 Loss: 5.1223 +[2025-07-08 05:36:44] [Rank 0] Group 11 Loss: 5.1399 +[2025-07-08 05:36:44] [Rank 0] Group 11 Loss: 5.1399 +[2025-07-08 05:36:44] [Rank 0] Group 0 FTA: 0.5293 +[2025-07-08 05:36:44] [Rank 0] Group 0 FTA: 0.5293 +[2025-07-08 05:36:44] [Rank 0] Group 1 FTA: 0.6719 +[2025-07-08 05:36:44] [Rank 0] Group 1 FTA: 0.6719 +[2025-07-08 05:36:44] [Rank 0] Group 2 FTA: 0.4740 +[2025-07-08 05:36:44] [Rank 0] Group 2 FTA: 0.4740 +[2025-07-08 05:36:44] [Rank 0] Group 3 FTA: 0.5495 +[2025-07-08 05:36:44] [Rank 0] Group 3 FTA: 0.5495 +[2025-07-08 05:36:44] [Rank 0] Group 4 FTA: 0.4661 +[2025-07-08 05:36:44] [Rank 0] Group 4 FTA: 0.4661 +[2025-07-08 05:36:44] [Rank 0] Group 5 FTA: 0.6979 +[2025-07-08 05:36:44] [Rank 0] Group 5 FTA: 0.6979 +[2025-07-08 05:36:44] [Rank 0] Group 6 FTA: 0.5807 +[2025-07-08 05:36:44] [Rank 0] Group 6 FTA: 0.5807 +[2025-07-08 05:36:44] [Rank 0] Group 7 FTA: 0.6146 +[2025-07-08 05:36:44] [Rank 0] Group 7 FTA: 0.6146 +[2025-07-08 05:36:44] [Rank 0] Group 8 FTA: 0.6250 +[2025-07-08 05:36:44] [Rank 0] Group 8 FTA: 0.6250 +[2025-07-08 05:36:44] [Rank 0] Group 9 FTA: 0.6445 +[2025-07-08 05:36:44] [Rank 0] Group 9 FTA: 0.6445 +[2025-07-08 05:36:44] [Rank 0] Group 10 FTA: 0.6504 +[2025-07-08 05:36:44] [Rank 0] Group 10 FTA: 0.6504 +[2025-07-08 05:36:44] [Rank 0] Group 11 FTA: 0.6260 +[2025-07-08 05:36:44] [Rank 0] Group 11 FTA: 0.6260 +[2025-07-08 05:36:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 05:36:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 05:36:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 05:36:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 05:36:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 05:36:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 05:36:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 05:36:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 05:36:45] [Rank 0] step:3001/10000 train_time:238202ms step_avg:79.37ms +[2025-07-08 05:36:45] [Rank 0] step:3001/10000 train_time:238202ms step_avg:79.37ms +[2025-07-08 05:36:47] [Rank 0] step:3021/10000 train_time:239696ms step_avg:79.34ms +[2025-07-08 05:36:47] [Rank 0] step:3021/10000 train_time:239696ms step_avg:79.34ms +[2025-07-08 05:36:48] [Rank 0] step:3041/10000 train_time:241183ms step_avg:79.31ms +[2025-07-08 05:36:48] [Rank 0] step:3041/10000 train_time:241183ms step_avg:79.31ms +[2025-07-08 05:36:50] [Rank 0] step:3061/10000 train_time:242827ms step_avg:79.33ms +[2025-07-08 05:36:50] [Rank 0] step:3061/10000 train_time:242827ms step_avg:79.33ms +[2025-07-08 05:36:52] [Rank 0] step:3081/10000 train_time:244959ms step_avg:79.51ms +[2025-07-08 05:36:52] [Rank 0] step:3081/10000 train_time:244959ms step_avg:79.51ms +[2025-07-08 05:36:53] [Rank 0] step:3101/10000 train_time:246447ms step_avg:79.47ms +[2025-07-08 05:36:53] [Rank 0] step:3101/10000 train_time:246447ms step_avg:79.47ms +[2025-07-08 05:36:55] [Rank 0] step:3121/10000 train_time:247937ms step_avg:79.44ms +[2025-07-08 05:36:55] [Rank 0] step:3121/10000 train_time:247937ms step_avg:79.44ms +[2025-07-08 05:36:56] [Rank 0] step:3141/10000 train_time:249430ms step_avg:79.41ms +[2025-07-08 05:36:56] [Rank 0] step:3141/10000 train_time:249430ms step_avg:79.41ms +[2025-07-08 05:36:59] [Rank 0] step:3161/10000 train_time:251578ms step_avg:79.59ms +[2025-07-08 05:36:59] [Rank 0] step:3161/10000 train_time:251578ms step_avg:79.59ms +[2025-07-08 05:37:00] [Rank 0] step:3181/10000 train_time:253065ms step_avg:79.56ms +[2025-07-08 05:37:00] [Rank 0] step:3181/10000 train_time:253065ms step_avg:79.56ms +[2025-07-08 05:37:02] [Rank 0] step:3201/10000 train_time:254555ms step_avg:79.52ms +[2025-07-08 05:37:02] [Rank 0] step:3201/10000 train_time:254555ms step_avg:79.52ms +[2025-07-08 05:37:03] [Rank 0] step:3221/10000 train_time:256047ms step_avg:79.49ms +[2025-07-08 05:37:03] [Rank 0] step:3221/10000 train_time:256047ms step_avg:79.49ms +[2025-07-08 05:37:05] [Rank 0] step:3241/10000 train_time:257592ms step_avg:79.48ms +[2025-07-08 05:37:05] [Rank 0] step:3241/10000 train_time:257592ms step_avg:79.48ms +[2025-07-08 05:37:06] [Rank 0] step:3261/10000 train_time:259270ms step_avg:79.51ms +[2025-07-08 05:37:06] [Rank 0] step:3261/10000 train_time:259270ms step_avg:79.51ms +[2025-07-08 05:37:08] [Rank 0] step:3281/10000 train_time:260764ms step_avg:79.48ms +[2025-07-08 05:37:08] [Rank 0] step:3281/10000 train_time:260764ms step_avg:79.48ms +[2025-07-08 05:37:09] [Rank 0] step:3301/10000 train_time:262256ms step_avg:79.45ms +[2025-07-08 05:37:09] [Rank 0] step:3301/10000 train_time:262256ms step_avg:79.45ms +[2025-07-08 05:37:11] [Rank 0] step:3321/10000 train_time:263752ms step_avg:79.42ms +[2025-07-08 05:37:11] [Rank 0] step:3321/10000 train_time:263752ms step_avg:79.42ms +[2025-07-08 05:37:13] [Rank 0] step:3341/10000 train_time:265910ms step_avg:79.59ms +[2025-07-08 05:37:13] [Rank 0] step:3341/10000 train_time:265910ms step_avg:79.59ms +[2025-07-08 05:37:14] [Rank 0] step:3361/10000 train_time:267403ms step_avg:79.56ms +[2025-07-08 05:37:14] [Rank 0] step:3361/10000 train_time:267403ms step_avg:79.56ms +[2025-07-08 05:37:16] [Rank 0] step:3381/10000 train_time:268896ms step_avg:79.53ms +[2025-07-08 05:37:16] [Rank 0] step:3381/10000 train_time:268896ms step_avg:79.53ms +[2025-07-08 05:37:17] [Rank 0] step:3401/10000 train_time:270393ms step_avg:79.50ms +[2025-07-08 05:37:17] [Rank 0] step:3401/10000 train_time:270393ms step_avg:79.50ms +[2025-07-08 05:37:20] [Rank 0] step:3421/10000 train_time:271938ms step_avg:79.49ms +[2025-07-08 05:37:20] [Rank 0] step:3421/10000 train_time:271938ms step_avg:79.49ms +[2025-07-08 05:37:21] [Rank 0] step:3441/10000 train_time:274018ms step_avg:79.63ms +[2025-07-08 05:37:21] [Rank 0] step:3441/10000 train_time:274018ms step_avg:79.63ms +[2025-07-08 05:37:23] [Rank 0] step:3461/10000 train_time:275512ms step_avg:79.60ms +[2025-07-08 05:37:23] [Rank 0] step:3461/10000 train_time:275512ms step_avg:79.60ms +[2025-07-08 05:37:24] [Rank 0] step:3481/10000 train_time:277009ms step_avg:79.58ms +[2025-07-08 05:37:24] [Rank 0] step:3481/10000 train_time:277009ms step_avg:79.58ms +[2025-07-08 05:37:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:37:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:37:26] [Rank 0] PRINT: step:3500/10000 train_loss:1.0262 val_loss:0.9869 train_time:278502ms step_avg:79.57ms +[2025-07-08 05:37:26] [Rank 0] PRINT: step:3500/10000 train_loss:1.0262 val_loss:0.9869 train_time:278502ms step_avg:79.57ms +[2025-07-08 05:37:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:37:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:37:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:37:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:37:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:37:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:42:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:42:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:42:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:42:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:42:53] [Rank 0] Total Loss: 5.2024 +[2025-07-08 05:42:53] [Rank 0] Total Loss: 5.2024 +[2025-07-08 05:42:53] [Rank 0] Total FTA: 0.7696 +[2025-07-08 05:42:53] [Rank 0] Total FTA: 0.7696 +[2025-07-08 05:42:53] [Rank 0] Group 0 Loss: 5.5088 +[2025-07-08 05:42:53] [Rank 0] Group 0 Loss: 5.5088 +[2025-07-08 05:42:53] [Rank 0] Group 1 Loss: 4.9304 +[2025-07-08 05:42:53] [Rank 0] Group 1 Loss: 4.9304 +[2025-07-08 05:42:53] [Rank 0] Group 2 Loss: 4.9465 +[2025-07-08 05:42:53] [Rank 0] Group 2 Loss: 4.9465 +[2025-07-08 05:42:53] [Rank 0] Group 3 Loss: 5.2493 +[2025-07-08 05:42:53] [Rank 0] Group 3 Loss: 5.2493 +[2025-07-08 05:42:53] [Rank 0] Group 4 Loss: 5.2407 +[2025-07-08 05:42:53] [Rank 0] Group 4 Loss: 5.2407 +[2025-07-08 05:42:53] [Rank 0] Group 5 Loss: 5.1075 +[2025-07-08 05:42:53] [Rank 0] Group 5 Loss: 5.1075 +[2025-07-08 05:42:53] [Rank 0] Group 6 Loss: 5.1190 +[2025-07-08 05:42:53] [Rank 0] Group 6 Loss: 5.1190 +[2025-07-08 05:42:53] [Rank 0] Group 7 Loss: 5.2101 +[2025-07-08 05:42:53] [Rank 0] Group 7 Loss: 5.2101 +[2025-07-08 05:42:53] [Rank 0] Group 8 Loss: 5.2391 +[2025-07-08 05:42:53] [Rank 0] Group 8 Loss: 5.2391 +[2025-07-08 05:42:53] [Rank 0] Group 9 Loss: 5.1655 +[2025-07-08 05:42:53] [Rank 0] Group 9 Loss: 5.1655 +[2025-07-08 05:42:53] [Rank 0] Group 10 Loss: 5.1516 +[2025-07-08 05:42:53] [Rank 0] Group 10 Loss: 5.1516 +[2025-07-08 05:42:53] [Rank 0] Group 11 Loss: 5.2231 +[2025-07-08 05:42:53] [Rank 0] Group 11 Loss: 5.2231 +[2025-07-08 05:42:53] [Rank 0] Group 0 FTA: 0.8453 +[2025-07-08 05:42:53] [Rank 0] Group 0 FTA: 0.8453 +[2025-07-08 05:42:53] [Rank 0] Group 1 FTA: 0.6615 +[2025-07-08 05:42:53] [Rank 0] Group 1 FTA: 0.6615 +[2025-07-08 05:42:53] [Rank 0] Group 2 FTA: 0.8255 +[2025-07-08 05:42:53] [Rank 0] Group 2 FTA: 0.8255 +[2025-07-08 05:42:53] [Rank 0] Group 3 FTA: 0.8125 +[2025-07-08 05:42:53] [Rank 0] Group 3 FTA: 0.8125 +[2025-07-08 05:42:53] [Rank 0] Group 4 FTA: 0.6615 +[2025-07-08 05:42:53] [Rank 0] Group 4 FTA: 0.6615 +[2025-07-08 05:42:53] [Rank 0] Group 5 FTA: 0.7604 +[2025-07-08 05:42:53] [Rank 0] Group 5 FTA: 0.7604 +[2025-07-08 05:42:53] [Rank 0] Group 6 FTA: 0.7083 +[2025-07-08 05:42:53] [Rank 0] Group 6 FTA: 0.7083 +[2025-07-08 05:42:53] [Rank 0] Group 7 FTA: 0.7812 +[2025-07-08 05:42:53] [Rank 0] Group 7 FTA: 0.7812 +[2025-07-08 05:42:53] [Rank 0] Group 8 FTA: 0.7812 +[2025-07-08 05:42:53] [Rank 0] Group 8 FTA: 0.7812 +[2025-07-08 05:42:53] [Rank 0] Group 9 FTA: 0.7539 +[2025-07-08 05:42:53] [Rank 0] Group 9 FTA: 0.7539 +[2025-07-08 05:42:53] [Rank 0] Group 10 FTA: 0.7812 +[2025-07-08 05:42:53] [Rank 0] Group 10 FTA: 0.7812 +[2025-07-08 05:42:53] [Rank 0] Group 11 FTA: 0.7725 +[2025-07-08 05:42:53] [Rank 0] Group 11 FTA: 0.7725 +[2025-07-08 05:42:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 05:42:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 05:42:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 05:42:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 05:42:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 05:42:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 05:42:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 05:42:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 05:42:54] [Rank 0] step:3501/10000 train_time:278523ms step_avg:79.56ms +[2025-07-08 05:42:54] [Rank 0] step:3501/10000 train_time:278523ms step_avg:79.56ms +[2025-07-08 05:42:56] [Rank 0] step:3521/10000 train_time:280659ms step_avg:79.71ms +[2025-07-08 05:42:56] [Rank 0] step:3521/10000 train_time:280659ms step_avg:79.71ms +[2025-07-08 05:42:58] [Rank 0] step:3541/10000 train_time:282149ms step_avg:79.68ms +[2025-07-08 05:42:58] [Rank 0] step:3541/10000 train_time:282149ms step_avg:79.68ms +[2025-07-08 05:42:59] [Rank 0] step:3561/10000 train_time:283636ms step_avg:79.65ms +[2025-07-08 05:42:59] [Rank 0] step:3561/10000 train_time:283636ms step_avg:79.65ms +[2025-07-08 05:43:01] [Rank 0] step:3581/10000 train_time:285124ms step_avg:79.62ms +[2025-07-08 05:43:01] [Rank 0] step:3581/10000 train_time:285124ms step_avg:79.62ms +[2025-07-08 05:43:03] [Rank 0] step:3601/10000 train_time:286667ms step_avg:79.61ms +[2025-07-08 05:43:03] [Rank 0] step:3601/10000 train_time:286667ms step_avg:79.61ms +[2025-07-08 05:43:04] [Rank 0] step:3621/10000 train_time:288343ms step_avg:79.63ms +[2025-07-08 05:43:04] [Rank 0] step:3621/10000 train_time:288343ms step_avg:79.63ms +[2025-07-08 05:43:06] [Rank 0] step:3641/10000 train_time:289831ms step_avg:79.60ms +[2025-07-08 05:43:06] [Rank 0] step:3641/10000 train_time:289831ms step_avg:79.60ms +[2025-07-08 05:43:07] [Rank 0] step:3661/10000 train_time:291320ms step_avg:79.57ms +[2025-07-08 05:43:07] [Rank 0] step:3661/10000 train_time:291320ms step_avg:79.57ms +[2025-07-08 05:43:09] [Rank 0] step:3681/10000 train_time:292961ms step_avg:79.59ms +[2025-07-08 05:43:09] [Rank 0] step:3681/10000 train_time:292961ms step_avg:79.59ms +[2025-07-08 05:43:10] [Rank 0] step:3701/10000 train_time:294691ms step_avg:79.62ms +[2025-07-08 05:43:10] [Rank 0] step:3701/10000 train_time:294691ms step_avg:79.62ms +[2025-07-08 05:43:12] [Rank 0] step:3721/10000 train_time:296179ms step_avg:79.60ms +[2025-07-08 05:43:12] [Rank 0] step:3721/10000 train_time:296179ms step_avg:79.60ms +[2025-07-08 05:43:13] [Rank 0] step:3741/10000 train_time:297670ms step_avg:79.57ms +[2025-07-08 05:43:13] [Rank 0] step:3741/10000 train_time:297670ms step_avg:79.57ms +[2025-07-08 05:43:15] [Rank 0] step:3761/10000 train_time:299164ms step_avg:79.54ms +[2025-07-08 05:43:15] [Rank 0] step:3761/10000 train_time:299164ms step_avg:79.54ms +[2025-07-08 05:43:17] [Rank 0] step:3781/10000 train_time:301316ms step_avg:79.69ms +[2025-07-08 05:43:17] [Rank 0] step:3781/10000 train_time:301316ms step_avg:79.69ms +[2025-07-08 05:43:19] [Rank 0] step:3801/10000 train_time:302788ms step_avg:79.66ms +[2025-07-08 05:43:19] [Rank 0] step:3801/10000 train_time:302788ms step_avg:79.66ms +[2025-07-08 05:43:20] [Rank 0] step:3821/10000 train_time:304281ms step_avg:79.63ms +[2025-07-08 05:43:20] [Rank 0] step:3821/10000 train_time:304281ms step_avg:79.63ms +[2025-07-08 05:43:22] [Rank 0] step:3841/10000 train_time:305775ms step_avg:79.61ms +[2025-07-08 05:43:22] [Rank 0] step:3841/10000 train_time:305775ms step_avg:79.61ms +[2025-07-08 05:43:23] [Rank 0] step:3861/10000 train_time:307271ms step_avg:79.58ms +[2025-07-08 05:43:23] [Rank 0] step:3861/10000 train_time:307271ms step_avg:79.58ms +[2025-07-08 05:43:25] [Rank 0] step:3881/10000 train_time:309006ms step_avg:79.62ms +[2025-07-08 05:43:25] [Rank 0] step:3881/10000 train_time:309006ms step_avg:79.62ms +[2025-07-08 05:43:26] [Rank 0] step:3901/10000 train_time:310500ms step_avg:79.59ms +[2025-07-08 05:43:26] [Rank 0] step:3901/10000 train_time:310500ms step_avg:79.59ms +[2025-07-08 05:43:28] [Rank 0] step:3921/10000 train_time:311996ms step_avg:79.57ms +[2025-07-08 05:43:28] [Rank 0] step:3921/10000 train_time:311996ms step_avg:79.57ms +[2025-07-08 05:43:29] [Rank 0] step:3941/10000 train_time:313492ms step_avg:79.55ms +[2025-07-08 05:43:29] [Rank 0] step:3941/10000 train_time:313492ms step_avg:79.55ms +[2025-07-08 05:43:31] [Rank 0] step:3961/10000 train_time:315038ms step_avg:79.54ms +[2025-07-08 05:43:31] [Rank 0] step:3961/10000 train_time:315038ms step_avg:79.54ms +[2025-07-08 05:43:33] [Rank 0] step:3981/10000 train_time:317127ms step_avg:79.66ms +[2025-07-08 05:43:33] [Rank 0] step:3981/10000 train_time:317127ms step_avg:79.66ms +[2025-07-08 05:43:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:43:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:43:35] [Rank 0] PRINT: step:4000/10000 train_loss:0.9701 val_loss:0.9427 train_time:318622ms step_avg:79.66ms +[2025-07-08 05:43:35] [Rank 0] PRINT: step:4000/10000 train_loss:0.9701 val_loss:0.9427 train_time:318622ms step_avg:79.66ms +[2025-07-08 05:43:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:43:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:43:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:43:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:43:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:43:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:49:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:49:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:49:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:49:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:49:01] [Rank 0] Total Loss: 5.3145 +[2025-07-08 05:49:01] [Rank 0] Total Loss: 5.3145 +[2025-07-08 05:49:01] [Rank 0] Total FTA: 0.8179 +[2025-07-08 05:49:01] [Rank 0] Total FTA: 0.8179 +[2025-07-08 05:49:01] [Rank 0] Group 0 Loss: 5.5095 +[2025-07-08 05:49:01] [Rank 0] Group 0 Loss: 5.5095 +[2025-07-08 05:49:01] [Rank 0] Group 1 Loss: 4.8857 +[2025-07-08 05:49:01] [Rank 0] Group 1 Loss: 4.8857 +[2025-07-08 05:49:01] [Rank 0] Group 2 Loss: 5.2072 +[2025-07-08 05:49:01] [Rank 0] Group 2 Loss: 5.2072 +[2025-07-08 05:49:01] [Rank 0] Group 3 Loss: 5.5143 +[2025-07-08 05:49:01] [Rank 0] Group 3 Loss: 5.5143 +[2025-07-08 05:49:01] [Rank 0] Group 4 Loss: 5.3282 +[2025-07-08 05:49:01] [Rank 0] Group 4 Loss: 5.3282 +[2025-07-08 05:49:01] [Rank 0] Group 5 Loss: 5.2332 +[2025-07-08 05:49:01] [Rank 0] Group 5 Loss: 5.2332 +[2025-07-08 05:49:01] [Rank 0] Group 6 Loss: 5.2669 +[2025-07-08 05:49:01] [Rank 0] Group 6 Loss: 5.2669 +[2025-07-08 05:49:01] [Rank 0] Group 7 Loss: 5.3501 +[2025-07-08 05:49:01] [Rank 0] Group 7 Loss: 5.3501 +[2025-07-08 05:49:01] [Rank 0] Group 8 Loss: 5.3301 +[2025-07-08 05:49:01] [Rank 0] Group 8 Loss: 5.3301 +[2025-07-08 05:49:01] [Rank 0] Group 9 Loss: 5.3196 +[2025-07-08 05:49:01] [Rank 0] Group 9 Loss: 5.3196 +[2025-07-08 05:49:01] [Rank 0] Group 10 Loss: 5.3115 +[2025-07-08 05:49:01] [Rank 0] Group 10 Loss: 5.3115 +[2025-07-08 05:49:01] [Rank 0] Group 11 Loss: 5.3183 +[2025-07-08 05:49:01] [Rank 0] Group 11 Loss: 5.3183 +[2025-07-08 05:49:01] [Rank 0] Group 0 FTA: 0.6736 +[2025-07-08 05:49:01] [Rank 0] Group 0 FTA: 0.6736 +[2025-07-08 05:49:01] [Rank 0] Group 1 FTA: 0.8438 +[2025-07-08 05:49:01] [Rank 0] Group 1 FTA: 0.8438 +[2025-07-08 05:49:01] [Rank 0] Group 2 FTA: 0.9167 +[2025-07-08 05:49:01] [Rank 0] Group 2 FTA: 0.9167 +[2025-07-08 05:49:01] [Rank 0] Group 3 FTA: 0.7474 +[2025-07-08 05:49:01] [Rank 0] Group 3 FTA: 0.7474 +[2025-07-08 05:49:01] [Rank 0] Group 4 FTA: 0.8698 +[2025-07-08 05:49:01] [Rank 0] Group 4 FTA: 0.8698 +[2025-07-08 05:49:01] [Rank 0] Group 5 FTA: 0.8672 +[2025-07-08 05:49:01] [Rank 0] Group 5 FTA: 0.8672 +[2025-07-08 05:49:01] [Rank 0] Group 6 FTA: 0.8359 +[2025-07-08 05:49:01] [Rank 0] Group 6 FTA: 0.8359 +[2025-07-08 05:49:01] [Rank 0] Group 7 FTA: 0.8281 +[2025-07-08 05:49:01] [Rank 0] Group 7 FTA: 0.8281 +[2025-07-08 05:49:01] [Rank 0] Group 8 FTA: 0.8255 +[2025-07-08 05:49:01] [Rank 0] Group 8 FTA: 0.8255 +[2025-07-08 05:49:01] [Rank 0] Group 9 FTA: 0.8047 +[2025-07-08 05:49:01] [Rank 0] Group 9 FTA: 0.8047 +[2025-07-08 05:49:01] [Rank 0] Group 10 FTA: 0.8398 +[2025-07-08 05:49:01] [Rank 0] Group 10 FTA: 0.8398 +[2025-07-08 05:49:01] [Rank 0] Group 11 FTA: 0.8467 +[2025-07-08 05:49:01] [Rank 0] Group 11 FTA: 0.8467 +[2025-07-08 05:49:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 05:49:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 05:49:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 05:49:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 05:49:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 05:49:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 05:49:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 05:49:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 05:49:03] [Rank 0] step:4001/10000 train_time:318642ms step_avg:79.64ms +[2025-07-08 05:49:03] [Rank 0] step:4001/10000 train_time:318642ms step_avg:79.64ms +[2025-07-08 05:49:04] [Rank 0] step:4021/10000 train_time:320140ms step_avg:79.62ms +[2025-07-08 05:49:04] [Rank 0] step:4021/10000 train_time:320140ms step_avg:79.62ms +[2025-07-08 05:49:06] [Rank 0] step:4041/10000 train_time:321629ms step_avg:79.59ms +[2025-07-08 05:49:06] [Rank 0] step:4041/10000 train_time:321629ms step_avg:79.59ms +[2025-07-08 05:49:08] [Rank 0] step:4061/10000 train_time:323773ms step_avg:79.73ms +[2025-07-08 05:49:08] [Rank 0] step:4061/10000 train_time:323773ms step_avg:79.73ms +[2025-07-08 05:49:10] [Rank 0] step:4081/10000 train_time:325259ms step_avg:79.70ms +[2025-07-08 05:49:10] [Rank 0] step:4081/10000 train_time:325259ms step_avg:79.70ms +[2025-07-08 05:49:11] [Rank 0] step:4101/10000 train_time:326746ms step_avg:79.67ms +[2025-07-08 05:49:11] [Rank 0] step:4101/10000 train_time:326746ms step_avg:79.67ms +[2025-07-08 05:49:13] [Rank 0] step:4121/10000 train_time:328238ms step_avg:79.65ms +[2025-07-08 05:49:13] [Rank 0] step:4121/10000 train_time:328238ms step_avg:79.65ms +[2025-07-08 05:49:14] [Rank 0] step:4141/10000 train_time:329778ms step_avg:79.64ms +[2025-07-08 05:49:14] [Rank 0] step:4141/10000 train_time:329778ms step_avg:79.64ms +[2025-07-08 05:49:16] [Rank 0] step:4161/10000 train_time:331450ms step_avg:79.66ms +[2025-07-08 05:49:16] [Rank 0] step:4161/10000 train_time:331450ms step_avg:79.66ms +[2025-07-08 05:49:17] [Rank 0] step:4181/10000 train_time:332940ms step_avg:79.63ms +[2025-07-08 05:49:17] [Rank 0] step:4181/10000 train_time:332940ms step_avg:79.63ms +[2025-07-08 05:49:19] [Rank 0] step:4201/10000 train_time:334432ms step_avg:79.61ms +[2025-07-08 05:49:19] [Rank 0] step:4201/10000 train_time:334432ms step_avg:79.61ms +[2025-07-08 05:49:20] [Rank 0] step:4221/10000 train_time:335921ms step_avg:79.58ms +[2025-07-08 05:49:20] [Rank 0] step:4221/10000 train_time:335921ms step_avg:79.58ms +[2025-07-08 05:49:22] [Rank 0] step:4241/10000 train_time:337651ms step_avg:79.62ms +[2025-07-08 05:49:22] [Rank 0] step:4241/10000 train_time:337651ms step_avg:79.62ms +[2025-07-08 05:49:23] [Rank 0] step:4261/10000 train_time:339141ms step_avg:79.59ms +[2025-07-08 05:49:23] [Rank 0] step:4261/10000 train_time:339141ms step_avg:79.59ms +[2025-07-08 05:49:25] [Rank 0] step:4281/10000 train_time:340633ms step_avg:79.57ms +[2025-07-08 05:49:25] [Rank 0] step:4281/10000 train_time:340633ms step_avg:79.57ms +[2025-07-08 05:49:26] [Rank 0] step:4301/10000 train_time:342124ms step_avg:79.55ms +[2025-07-08 05:49:26] [Rank 0] step:4301/10000 train_time:342124ms step_avg:79.55ms +[2025-07-08 05:49:29] [Rank 0] step:4321/10000 train_time:344459ms step_avg:79.72ms +[2025-07-08 05:49:29] [Rank 0] step:4321/10000 train_time:344459ms step_avg:79.72ms +[2025-07-08 05:49:30] [Rank 0] step:4341/10000 train_time:345933ms step_avg:79.69ms +[2025-07-08 05:49:30] [Rank 0] step:4341/10000 train_time:345933ms step_avg:79.69ms +[2025-07-08 05:49:32] [Rank 0] step:4361/10000 train_time:347427ms step_avg:79.67ms +[2025-07-08 05:49:32] [Rank 0] step:4361/10000 train_time:347427ms step_avg:79.67ms +[2025-07-08 05:49:33] [Rank 0] step:4381/10000 train_time:348919ms step_avg:79.64ms +[2025-07-08 05:49:33] [Rank 0] step:4381/10000 train_time:348919ms step_avg:79.64ms +[2025-07-08 05:49:35] [Rank 0] step:4401/10000 train_time:350413ms step_avg:79.62ms +[2025-07-08 05:49:35] [Rank 0] step:4401/10000 train_time:350413ms step_avg:79.62ms +[2025-07-08 05:49:37] [Rank 0] step:4421/10000 train_time:352574ms step_avg:79.75ms +[2025-07-08 05:49:37] [Rank 0] step:4421/10000 train_time:352574ms step_avg:79.75ms +[2025-07-08 05:49:38] [Rank 0] step:4441/10000 train_time:354068ms step_avg:79.73ms +[2025-07-08 05:49:38] [Rank 0] step:4441/10000 train_time:354068ms step_avg:79.73ms +[2025-07-08 05:49:40] [Rank 0] step:4461/10000 train_time:355562ms step_avg:79.70ms +[2025-07-08 05:49:40] [Rank 0] step:4461/10000 train_time:355562ms step_avg:79.70ms +[2025-07-08 05:49:41] [Rank 0] step:4481/10000 train_time:357059ms step_avg:79.68ms +[2025-07-08 05:49:41] [Rank 0] step:4481/10000 train_time:357059ms step_avg:79.68ms +[2025-07-08 05:49:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:49:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:49:44] [Rank 0] PRINT: step:4500/10000 train_loss:0.9313 val_loss:0.9134 train_time:358554ms step_avg:79.68ms +[2025-07-08 05:49:44] [Rank 0] PRINT: step:4500/10000 train_loss:0.9313 val_loss:0.9134 train_time:358554ms step_avg:79.68ms +[2025-07-08 05:49:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:49:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:49:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:49:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:49:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:49:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:55:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:55:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 05:55:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:55:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 05:55:11] [Rank 0] Total Loss: 5.4465 +[2025-07-08 05:55:11] [Rank 0] Total Loss: 5.4465 +[2025-07-08 05:55:11] [Rank 0] Total FTA: 0.9100 +[2025-07-08 05:55:11] [Rank 0] Total FTA: 0.9100 +[2025-07-08 05:55:11] [Rank 0] Group 0 Loss: 5.8009 +[2025-07-08 05:55:11] [Rank 0] Group 0 Loss: 5.8009 +[2025-07-08 05:55:11] [Rank 0] Group 1 Loss: 5.0948 +[2025-07-08 05:55:11] [Rank 0] Group 1 Loss: 5.0948 +[2025-07-08 05:55:11] [Rank 0] Group 2 Loss: 5.2565 +[2025-07-08 05:55:11] [Rank 0] Group 2 Loss: 5.2565 +[2025-07-08 05:55:11] [Rank 0] Group 3 Loss: 5.4965 +[2025-07-08 05:55:11] [Rank 0] Group 3 Loss: 5.4965 +[2025-07-08 05:55:11] [Rank 0] Group 4 Loss: 5.4323 +[2025-07-08 05:55:11] [Rank 0] Group 4 Loss: 5.4323 +[2025-07-08 05:55:11] [Rank 0] Group 5 Loss: 5.3545 +[2025-07-08 05:55:11] [Rank 0] Group 5 Loss: 5.3545 +[2025-07-08 05:55:11] [Rank 0] Group 6 Loss: 5.2971 +[2025-07-08 05:55:11] [Rank 0] Group 6 Loss: 5.2971 +[2025-07-08 05:55:11] [Rank 0] Group 7 Loss: 5.5047 +[2025-07-08 05:55:11] [Rank 0] Group 7 Loss: 5.5047 +[2025-07-08 05:55:11] [Rank 0] Group 8 Loss: 5.4120 +[2025-07-08 05:55:11] [Rank 0] Group 8 Loss: 5.4120 +[2025-07-08 05:55:11] [Rank 0] Group 9 Loss: 5.4605 +[2025-07-08 05:55:11] [Rank 0] Group 9 Loss: 5.4605 +[2025-07-08 05:55:11] [Rank 0] Group 10 Loss: 5.4560 +[2025-07-08 05:55:11] [Rank 0] Group 10 Loss: 5.4560 +[2025-07-08 05:55:11] [Rank 0] Group 11 Loss: 5.4436 +[2025-07-08 05:55:11] [Rank 0] Group 11 Loss: 5.4436 +[2025-07-08 05:55:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 05:55:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 05:55:11] [Rank 0] Group 1 FTA: 0.8255 +[2025-07-08 05:55:11] [Rank 0] Group 1 FTA: 0.8255 +[2025-07-08 05:55:11] [Rank 0] Group 2 FTA: 0.9141 +[2025-07-08 05:55:11] [Rank 0] Group 2 FTA: 0.9141 +[2025-07-08 05:55:11] [Rank 0] Group 3 FTA: 0.9557 +[2025-07-08 05:55:11] [Rank 0] Group 3 FTA: 0.9557 +[2025-07-08 05:55:11] [Rank 0] Group 4 FTA: 0.8646 +[2025-07-08 05:55:11] [Rank 0] Group 4 FTA: 0.8646 +[2025-07-08 05:55:11] [Rank 0] Group 5 FTA: 0.9062 +[2025-07-08 05:55:11] [Rank 0] Group 5 FTA: 0.9062 +[2025-07-08 05:55:11] [Rank 0] Group 6 FTA: 0.8984 +[2025-07-08 05:55:11] [Rank 0] Group 6 FTA: 0.8984 +[2025-07-08 05:55:11] [Rank 0] Group 7 FTA: 0.8932 +[2025-07-08 05:55:11] [Rank 0] Group 7 FTA: 0.8932 +[2025-07-08 05:55:11] [Rank 0] Group 8 FTA: 0.8880 +[2025-07-08 05:55:11] [Rank 0] Group 8 FTA: 0.8880 +[2025-07-08 05:55:11] [Rank 0] Group 9 FTA: 0.8906 +[2025-07-08 05:55:11] [Rank 0] Group 9 FTA: 0.8906 +[2025-07-08 05:55:11] [Rank 0] Group 10 FTA: 0.9062 +[2025-07-08 05:55:11] [Rank 0] Group 10 FTA: 0.9062 +[2025-07-08 05:55:11] [Rank 0] Group 11 FTA: 0.8994 +[2025-07-08 05:55:11] [Rank 0] Group 11 FTA: 0.8994 +[2025-07-08 05:55:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 05:55:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 05:55:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 05:55:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 05:55:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 05:55:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 05:55:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 05:55:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 05:55:14] [Rank 0] step:4501/10000 train_time:358684ms step_avg:79.69ms +[2025-07-08 05:55:14] [Rank 0] step:4501/10000 train_time:358684ms step_avg:79.69ms +[2025-07-08 05:55:15] [Rank 0] step:4521/10000 train_time:360778ms step_avg:79.80ms +[2025-07-08 05:55:15] [Rank 0] step:4521/10000 train_time:360778ms step_avg:79.80ms +[2025-07-08 05:55:17] [Rank 0] step:4541/10000 train_time:362264ms step_avg:79.78ms +[2025-07-08 05:55:17] [Rank 0] step:4541/10000 train_time:362264ms step_avg:79.78ms +[2025-07-08 05:55:18] [Rank 0] step:4561/10000 train_time:363753ms step_avg:79.75ms +[2025-07-08 05:55:18] [Rank 0] step:4561/10000 train_time:363753ms step_avg:79.75ms +[2025-07-08 05:55:20] [Rank 0] step:4581/10000 train_time:365242ms step_avg:79.73ms +[2025-07-08 05:55:20] [Rank 0] step:4581/10000 train_time:365242ms step_avg:79.73ms +[2025-07-08 05:55:22] [Rank 0] step:4601/10000 train_time:367393ms step_avg:79.85ms +[2025-07-08 05:55:22] [Rank 0] step:4601/10000 train_time:367393ms step_avg:79.85ms +[2025-07-08 05:55:23] [Rank 0] step:4621/10000 train_time:368881ms step_avg:79.83ms +[2025-07-08 05:55:23] [Rank 0] step:4621/10000 train_time:368881ms step_avg:79.83ms +[2025-07-08 05:55:25] [Rank 0] step:4641/10000 train_time:370369ms step_avg:79.80ms +[2025-07-08 05:55:25] [Rank 0] step:4641/10000 train_time:370369ms step_avg:79.80ms +[2025-07-08 05:55:26] [Rank 0] step:4661/10000 train_time:371860ms step_avg:79.78ms +[2025-07-08 05:55:26] [Rank 0] step:4661/10000 train_time:371860ms step_avg:79.78ms +[2025-07-08 05:55:28] [Rank 0] step:4681/10000 train_time:373603ms step_avg:79.81ms +[2025-07-08 05:55:28] [Rank 0] step:4681/10000 train_time:373603ms step_avg:79.81ms +[2025-07-08 05:55:30] [Rank 0] step:4701/10000 train_time:375512ms step_avg:79.88ms +[2025-07-08 05:55:30] [Rank 0] step:4701/10000 train_time:375512ms step_avg:79.88ms +[2025-07-08 05:55:31] [Rank 0] step:4721/10000 train_time:377004ms step_avg:79.86ms +[2025-07-08 05:55:31] [Rank 0] step:4721/10000 train_time:377004ms step_avg:79.86ms +[2025-07-08 05:55:33] [Rank 0] step:4741/10000 train_time:378494ms step_avg:79.83ms +[2025-07-08 05:55:33] [Rank 0] step:4741/10000 train_time:378494ms step_avg:79.83ms +[2025-07-08 05:55:34] [Rank 0] step:4761/10000 train_time:379989ms step_avg:79.81ms +[2025-07-08 05:55:34] [Rank 0] step:4761/10000 train_time:379989ms step_avg:79.81ms +[2025-07-08 05:55:37] [Rank 0] step:4781/10000 train_time:382148ms step_avg:79.93ms +[2025-07-08 05:55:37] [Rank 0] step:4781/10000 train_time:382148ms step_avg:79.93ms +[2025-07-08 05:55:38] [Rank 0] step:4801/10000 train_time:383641ms step_avg:79.91ms +[2025-07-08 05:55:38] [Rank 0] step:4801/10000 train_time:383641ms step_avg:79.91ms +[2025-07-08 05:55:40] [Rank 0] step:4821/10000 train_time:385133ms step_avg:79.89ms +[2025-07-08 05:55:40] [Rank 0] step:4821/10000 train_time:385133ms step_avg:79.89ms +[2025-07-08 05:55:41] [Rank 0] step:4841/10000 train_time:386628ms step_avg:79.87ms +[2025-07-08 05:55:41] [Rank 0] step:4841/10000 train_time:386628ms step_avg:79.87ms +[2025-07-08 05:55:43] [Rank 0] step:4861/10000 train_time:388174ms step_avg:79.85ms +[2025-07-08 05:55:43] [Rank 0] step:4861/10000 train_time:388174ms step_avg:79.85ms +[2025-07-08 05:55:45] [Rank 0] step:4881/10000 train_time:390256ms step_avg:79.95ms +[2025-07-08 05:55:45] [Rank 0] step:4881/10000 train_time:390256ms step_avg:79.95ms +[2025-07-08 05:55:46] [Rank 0] step:4901/10000 train_time:391751ms step_avg:79.93ms +[2025-07-08 05:55:46] [Rank 0] step:4901/10000 train_time:391751ms step_avg:79.93ms +[2025-07-08 05:55:48] [Rank 0] step:4921/10000 train_time:393401ms step_avg:79.94ms +[2025-07-08 05:55:48] [Rank 0] step:4921/10000 train_time:393401ms step_avg:79.94ms +[2025-07-08 05:55:49] [Rank 0] step:4941/10000 train_time:394898ms step_avg:79.92ms +[2025-07-08 05:55:49] [Rank 0] step:4941/10000 train_time:394898ms step_avg:79.92ms +[2025-07-08 05:55:51] [Rank 0] step:4961/10000 train_time:397058ms step_avg:80.04ms +[2025-07-08 05:55:51] [Rank 0] step:4961/10000 train_time:397058ms step_avg:80.04ms +[2025-07-08 05:55:53] [Rank 0] step:4981/10000 train_time:398553ms step_avg:80.01ms +[2025-07-08 05:55:53] [Rank 0] step:4981/10000 train_time:398553ms step_avg:80.01ms +[2025-07-08 05:55:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:55:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 05:55:55] [Rank 0] PRINT: step:5000/10000 train_loss:0.9074 val_loss:0.8959 train_time:400048ms step_avg:80.01ms +[2025-07-08 05:55:55] [Rank 0] PRINT: step:5000/10000 train_loss:0.9074 val_loss:0.8959 train_time:400048ms step_avg:80.01ms +[2025-07-08 05:55:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:55:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 05:55:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:55:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 05:55:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 05:55:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:01:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:01:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:01:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:01:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:01:22] [Rank 0] Total Loss: 5.6705 +[2025-07-08 06:01:22] [Rank 0] Total Loss: 5.6705 +[2025-07-08 06:01:22] [Rank 0] Total FTA: 0.8885 +[2025-07-08 06:01:22] [Rank 0] Total FTA: 0.8885 +[2025-07-08 06:01:22] [Rank 0] Group 0 Loss: 6.1768 +[2025-07-08 06:01:22] [Rank 0] Group 0 Loss: 6.1768 +[2025-07-08 06:01:22] [Rank 0] Group 1 Loss: 5.3517 +[2025-07-08 06:01:22] [Rank 0] Group 1 Loss: 5.3517 +[2025-07-08 06:01:22] [Rank 0] Group 2 Loss: 5.5671 +[2025-07-08 06:01:22] [Rank 0] Group 2 Loss: 5.5671 +[2025-07-08 06:01:22] [Rank 0] Group 3 Loss: 5.7793 +[2025-07-08 06:01:22] [Rank 0] Group 3 Loss: 5.7793 +[2025-07-08 06:01:22] [Rank 0] Group 4 Loss: 5.6583 +[2025-07-08 06:01:22] [Rank 0] Group 4 Loss: 5.6583 +[2025-07-08 06:01:22] [Rank 0] Group 5 Loss: 5.5060 +[2025-07-08 06:01:22] [Rank 0] Group 5 Loss: 5.5060 +[2025-07-08 06:01:22] [Rank 0] Group 6 Loss: 5.5160 +[2025-07-08 06:01:22] [Rank 0] Group 6 Loss: 5.5160 +[2025-07-08 06:01:22] [Rank 0] Group 7 Loss: 5.6083 +[2025-07-08 06:01:22] [Rank 0] Group 7 Loss: 5.6083 +[2025-07-08 06:01:22] [Rank 0] Group 8 Loss: 5.5837 +[2025-07-08 06:01:22] [Rank 0] Group 8 Loss: 5.5837 +[2025-07-08 06:01:22] [Rank 0] Group 9 Loss: 5.5809 +[2025-07-08 06:01:22] [Rank 0] Group 9 Loss: 5.5809 +[2025-07-08 06:01:22] [Rank 0] Group 10 Loss: 5.5829 +[2025-07-08 06:01:22] [Rank 0] Group 10 Loss: 5.5829 +[2025-07-08 06:01:22] [Rank 0] Group 11 Loss: 5.6543 +[2025-07-08 06:01:22] [Rank 0] Group 11 Loss: 5.6543 +[2025-07-08 06:01:22] [Rank 0] Group 0 FTA: 0.8544 +[2025-07-08 06:01:22] [Rank 0] Group 0 FTA: 0.8544 +[2025-07-08 06:01:22] [Rank 0] Group 1 FTA: 0.6562 +[2025-07-08 06:01:22] [Rank 0] Group 1 FTA: 0.6562 +[2025-07-08 06:01:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 06:01:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 06:01:22] [Rank 0] Group 3 FTA: 0.9271 +[2025-07-08 06:01:22] [Rank 0] Group 3 FTA: 0.9271 +[2025-07-08 06:01:22] [Rank 0] Group 4 FTA: 0.8828 +[2025-07-08 06:01:22] [Rank 0] Group 4 FTA: 0.8828 +[2025-07-08 06:01:22] [Rank 0] Group 5 FTA: 0.9271 +[2025-07-08 06:01:22] [Rank 0] Group 5 FTA: 0.9271 +[2025-07-08 06:01:22] [Rank 0] Group 6 FTA: 0.9245 +[2025-07-08 06:01:22] [Rank 0] Group 6 FTA: 0.9245 +[2025-07-08 06:01:22] [Rank 0] Group 7 FTA: 0.8828 +[2025-07-08 06:01:22] [Rank 0] Group 7 FTA: 0.8828 +[2025-07-08 06:01:22] [Rank 0] Group 8 FTA: 0.9167 +[2025-07-08 06:01:22] [Rank 0] Group 8 FTA: 0.9167 +[2025-07-08 06:01:22] [Rank 0] Group 9 FTA: 0.9102 +[2025-07-08 06:01:22] [Rank 0] Group 9 FTA: 0.9102 +[2025-07-08 06:01:22] [Rank 0] Group 10 FTA: 0.9004 +[2025-07-08 06:01:22] [Rank 0] Group 10 FTA: 0.9004 +[2025-07-08 06:01:22] [Rank 0] Group 11 FTA: 0.8994 +[2025-07-08 06:01:22] [Rank 0] Group 11 FTA: 0.8994 +[2025-07-08 06:01:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 06:01:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 06:01:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 06:01:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 06:01:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 06:01:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 06:01:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 06:01:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 06:01:24] [Rank 0] step:5001/10000 train_time:400068ms step_avg:80.00ms +[2025-07-08 06:01:24] [Rank 0] step:5001/10000 train_time:400068ms step_avg:80.00ms +[2025-07-08 06:01:25] [Rank 0] step:5021/10000 train_time:401552ms step_avg:79.97ms +[2025-07-08 06:01:25] [Rank 0] step:5021/10000 train_time:401552ms step_avg:79.97ms +[2025-07-08 06:01:28] [Rank 0] step:5041/10000 train_time:403717ms step_avg:80.09ms +[2025-07-08 06:01:28] [Rank 0] step:5041/10000 train_time:403717ms step_avg:80.09ms +[2025-07-08 06:01:29] [Rank 0] step:5061/10000 train_time:405185ms step_avg:80.06ms +[2025-07-08 06:01:29] [Rank 0] step:5061/10000 train_time:405185ms step_avg:80.06ms +[2025-07-08 06:01:31] [Rank 0] step:5081/10000 train_time:406671ms step_avg:80.04ms +[2025-07-08 06:01:31] [Rank 0] step:5081/10000 train_time:406671ms step_avg:80.04ms +[2025-07-08 06:01:32] [Rank 0] step:5101/10000 train_time:408159ms step_avg:80.02ms +[2025-07-08 06:01:32] [Rank 0] step:5101/10000 train_time:408159ms step_avg:80.02ms +[2025-07-08 06:01:34] [Rank 0] step:5121/10000 train_time:409649ms step_avg:79.99ms +[2025-07-08 06:01:34] [Rank 0] step:5121/10000 train_time:409649ms step_avg:79.99ms +[2025-07-08 06:01:36] [Rank 0] step:5141/10000 train_time:411799ms step_avg:80.10ms +[2025-07-08 06:01:36] [Rank 0] step:5141/10000 train_time:411799ms step_avg:80.10ms +[2025-07-08 06:01:37] [Rank 0] step:5161/10000 train_time:413286ms step_avg:80.08ms +[2025-07-08 06:01:37] [Rank 0] step:5161/10000 train_time:413286ms step_avg:80.08ms +[2025-07-08 06:01:39] [Rank 0] step:5181/10000 train_time:414775ms step_avg:80.06ms +[2025-07-08 06:01:39] [Rank 0] step:5181/10000 train_time:414775ms step_avg:80.06ms +[2025-07-08 06:01:40] [Rank 0] step:5201/10000 train_time:416268ms step_avg:80.04ms +[2025-07-08 06:01:40] [Rank 0] step:5201/10000 train_time:416268ms step_avg:80.04ms +[2025-07-08 06:01:42] [Rank 0] step:5221/10000 train_time:417756ms step_avg:80.01ms +[2025-07-08 06:01:42] [Rank 0] step:5221/10000 train_time:417756ms step_avg:80.01ms +[2025-07-08 06:01:43] [Rank 0] step:5241/10000 train_time:419481ms step_avg:80.04ms +[2025-07-08 06:01:43] [Rank 0] step:5241/10000 train_time:419481ms step_avg:80.04ms +[2025-07-08 06:01:45] [Rank 0] step:5261/10000 train_time:420968ms step_avg:80.02ms +[2025-07-08 06:01:45] [Rank 0] step:5261/10000 train_time:420968ms step_avg:80.02ms +[2025-07-08 06:01:46] [Rank 0] step:5281/10000 train_time:422460ms step_avg:80.00ms +[2025-07-08 06:01:46] [Rank 0] step:5281/10000 train_time:422460ms step_avg:80.00ms +[2025-07-08 06:01:48] [Rank 0] step:5301/10000 train_time:423953ms step_avg:79.98ms +[2025-07-08 06:01:48] [Rank 0] step:5301/10000 train_time:423953ms step_avg:79.98ms +[2025-07-08 06:01:50] [Rank 0] step:5321/10000 train_time:425677ms step_avg:80.00ms +[2025-07-08 06:01:50] [Rank 0] step:5321/10000 train_time:425677ms step_avg:80.00ms +[2025-07-08 06:01:51] [Rank 0] step:5341/10000 train_time:427169ms step_avg:79.98ms +[2025-07-08 06:01:51] [Rank 0] step:5341/10000 train_time:427169ms step_avg:79.98ms +[2025-07-08 06:01:53] [Rank 0] step:5361/10000 train_time:428660ms step_avg:79.96ms +[2025-07-08 06:01:53] [Rank 0] step:5361/10000 train_time:428660ms step_avg:79.96ms +[2025-07-08 06:01:54] [Rank 0] step:5381/10000 train_time:430153ms step_avg:79.94ms +[2025-07-08 06:01:54] [Rank 0] step:5381/10000 train_time:430153ms step_avg:79.94ms +[2025-07-08 06:01:56] [Rank 0] step:5401/10000 train_time:431648ms step_avg:79.92ms +[2025-07-08 06:01:56] [Rank 0] step:5401/10000 train_time:431648ms step_avg:79.92ms +[2025-07-08 06:01:58] [Rank 0] step:5421/10000 train_time:433799ms step_avg:80.02ms +[2025-07-08 06:01:58] [Rank 0] step:5421/10000 train_time:433799ms step_avg:80.02ms +[2025-07-08 06:01:59] [Rank 0] step:5441/10000 train_time:435291ms step_avg:80.00ms +[2025-07-08 06:01:59] [Rank 0] step:5441/10000 train_time:435291ms step_avg:80.00ms +[2025-07-08 06:02:01] [Rank 0] step:5461/10000 train_time:436785ms step_avg:79.98ms +[2025-07-08 06:02:01] [Rank 0] step:5461/10000 train_time:436785ms step_avg:79.98ms +[2025-07-08 06:02:02] [Rank 0] step:5481/10000 train_time:438280ms step_avg:79.96ms +[2025-07-08 06:02:02] [Rank 0] step:5481/10000 train_time:438280ms step_avg:79.96ms +[2025-07-08 06:02:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:02:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:02:05] [Rank 0] PRINT: step:5500/10000 train_loss:0.8917 val_loss:0.8850 train_time:440011ms step_avg:80.00ms +[2025-07-08 06:02:05] [Rank 0] PRINT: step:5500/10000 train_loss:0.8917 val_loss:0.8850 train_time:440011ms step_avg:80.00ms +[2025-07-08 06:02:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:02:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:02:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:02:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:02:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:02:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:07:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:07:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:07:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:07:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:07:31] [Rank 0] Total Loss: 5.6136 +[2025-07-08 06:07:31] [Rank 0] Total Loss: 5.6136 +[2025-07-08 06:07:31] [Rank 0] Total FTA: 0.8992 +[2025-07-08 06:07:31] [Rank 0] Total FTA: 0.8992 +[2025-07-08 06:07:31] [Rank 0] Group 0 Loss: 5.8007 +[2025-07-08 06:07:31] [Rank 0] Group 0 Loss: 5.8007 +[2025-07-08 06:07:31] [Rank 0] Group 1 Loss: 5.3537 +[2025-07-08 06:07:31] [Rank 0] Group 1 Loss: 5.3537 +[2025-07-08 06:07:31] [Rank 0] Group 2 Loss: 5.4893 +[2025-07-08 06:07:31] [Rank 0] Group 2 Loss: 5.4893 +[2025-07-08 06:07:31] [Rank 0] Group 3 Loss: 5.5987 +[2025-07-08 06:07:31] [Rank 0] Group 3 Loss: 5.5987 +[2025-07-08 06:07:31] [Rank 0] Group 4 Loss: 5.6758 +[2025-07-08 06:07:31] [Rank 0] Group 4 Loss: 5.6758 +[2025-07-08 06:07:31] [Rank 0] Group 5 Loss: 5.5210 +[2025-07-08 06:07:31] [Rank 0] Group 5 Loss: 5.5210 +[2025-07-08 06:07:31] [Rank 0] Group 6 Loss: 5.5250 +[2025-07-08 06:07:31] [Rank 0] Group 6 Loss: 5.5250 +[2025-07-08 06:07:31] [Rank 0] Group 7 Loss: 5.6142 +[2025-07-08 06:07:31] [Rank 0] Group 7 Loss: 5.6142 +[2025-07-08 06:07:31] [Rank 0] Group 8 Loss: 5.6414 +[2025-07-08 06:07:31] [Rank 0] Group 8 Loss: 5.6414 +[2025-07-08 06:07:31] [Rank 0] Group 9 Loss: 5.6254 +[2025-07-08 06:07:31] [Rank 0] Group 9 Loss: 5.6254 +[2025-07-08 06:07:31] [Rank 0] Group 10 Loss: 5.6509 +[2025-07-08 06:07:31] [Rank 0] Group 10 Loss: 5.6509 +[2025-07-08 06:07:31] [Rank 0] Group 11 Loss: 5.6351 +[2025-07-08 06:07:31] [Rank 0] Group 11 Loss: 5.6351 +[2025-07-08 06:07:31] [Rank 0] Group 0 FTA: 0.8440 +[2025-07-08 06:07:31] [Rank 0] Group 0 FTA: 0.8440 +[2025-07-08 06:07:31] [Rank 0] Group 1 FTA: 0.6641 +[2025-07-08 06:07:31] [Rank 0] Group 1 FTA: 0.6641 +[2025-07-08 06:07:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 06:07:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 06:07:31] [Rank 0] Group 3 FTA: 0.9010 +[2025-07-08 06:07:31] [Rank 0] Group 3 FTA: 0.9010 +[2025-07-08 06:07:31] [Rank 0] Group 4 FTA: 0.9375 +[2025-07-08 06:07:31] [Rank 0] Group 4 FTA: 0.9375 +[2025-07-08 06:07:31] [Rank 0] Group 5 FTA: 0.9714 +[2025-07-08 06:07:31] [Rank 0] Group 5 FTA: 0.9714 +[2025-07-08 06:07:31] [Rank 0] Group 6 FTA: 0.9479 +[2025-07-08 06:07:31] [Rank 0] Group 6 FTA: 0.9479 +[2025-07-08 06:07:31] [Rank 0] Group 7 FTA: 0.9141 +[2025-07-08 06:07:31] [Rank 0] Group 7 FTA: 0.9141 +[2025-07-08 06:07:31] [Rank 0] Group 8 FTA: 0.8984 +[2025-07-08 06:07:31] [Rank 0] Group 8 FTA: 0.8984 +[2025-07-08 06:07:31] [Rank 0] Group 9 FTA: 0.9219 +[2025-07-08 06:07:31] [Rank 0] Group 9 FTA: 0.9219 +[2025-07-08 06:07:31] [Rank 0] Group 10 FTA: 0.9102 +[2025-07-08 06:07:31] [Rank 0] Group 10 FTA: 0.9102 +[2025-07-08 06:07:31] [Rank 0] Group 11 FTA: 0.9141 +[2025-07-08 06:07:31] [Rank 0] Group 11 FTA: 0.9141 +[2025-07-08 06:07:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 06:07:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 06:07:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 06:07:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 06:07:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 06:07:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 06:07:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 06:07:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 06:07:32] [Rank 0] step:5501/10000 train_time:440030ms step_avg:79.99ms +[2025-07-08 06:07:32] [Rank 0] step:5501/10000 train_time:440030ms step_avg:79.99ms +[2025-07-08 06:07:34] [Rank 0] step:5521/10000 train_time:441536ms step_avg:79.97ms +[2025-07-08 06:07:34] [Rank 0] step:5521/10000 train_time:441536ms step_avg:79.97ms +[2025-07-08 06:07:35] [Rank 0] step:5541/10000 train_time:443024ms step_avg:79.95ms +[2025-07-08 06:07:35] [Rank 0] step:5541/10000 train_time:443024ms step_avg:79.95ms +[2025-07-08 06:07:37] [Rank 0] step:5561/10000 train_time:444513ms step_avg:79.93ms +[2025-07-08 06:07:37] [Rank 0] step:5561/10000 train_time:444513ms step_avg:79.93ms +[2025-07-08 06:07:39] [Rank 0] step:5581/10000 train_time:445999ms step_avg:79.91ms +[2025-07-08 06:07:39] [Rank 0] step:5581/10000 train_time:445999ms step_avg:79.91ms +[2025-07-08 06:07:40] [Rank 0] step:5601/10000 train_time:448154ms step_avg:80.01ms +[2025-07-08 06:07:40] [Rank 0] step:5601/10000 train_time:448154ms step_avg:80.01ms +[2025-07-08 06:07:42] [Rank 0] step:5621/10000 train_time:449642ms step_avg:79.99ms +[2025-07-08 06:07:42] [Rank 0] step:5621/10000 train_time:449642ms step_avg:79.99ms +[2025-07-08 06:07:43] [Rank 0] step:5641/10000 train_time:451134ms step_avg:79.97ms +[2025-07-08 06:07:43] [Rank 0] step:5641/10000 train_time:451134ms step_avg:79.97ms +[2025-07-08 06:07:45] [Rank 0] step:5661/10000 train_time:452621ms step_avg:79.95ms +[2025-07-08 06:07:45] [Rank 0] step:5661/10000 train_time:452621ms step_avg:79.95ms +[2025-07-08 06:07:47] [Rank 0] step:5681/10000 train_time:454769ms step_avg:80.05ms +[2025-07-08 06:07:47] [Rank 0] step:5681/10000 train_time:454769ms step_avg:80.05ms +[2025-07-08 06:07:49] [Rank 0] step:5701/10000 train_time:456256ms step_avg:80.03ms +[2025-07-08 06:07:49] [Rank 0] step:5701/10000 train_time:456256ms step_avg:80.03ms +[2025-07-08 06:07:50] [Rank 0] step:5721/10000 train_time:457746ms step_avg:80.01ms +[2025-07-08 06:07:50] [Rank 0] step:5721/10000 train_time:457746ms step_avg:80.01ms +[2025-07-08 06:07:51] [Rank 0] step:5741/10000 train_time:459239ms step_avg:79.99ms +[2025-07-08 06:07:51] [Rank 0] step:5741/10000 train_time:459239ms step_avg:79.99ms +[2025-07-08 06:07:54] [Rank 0] step:5761/10000 train_time:460778ms step_avg:79.98ms +[2025-07-08 06:07:54] [Rank 0] step:5761/10000 train_time:460778ms step_avg:79.98ms +[2025-07-08 06:07:55] [Rank 0] step:5781/10000 train_time:462875ms step_avg:80.07ms +[2025-07-08 06:07:55] [Rank 0] step:5781/10000 train_time:462875ms step_avg:80.07ms +[2025-07-08 06:07:57] [Rank 0] step:5801/10000 train_time:464368ms step_avg:80.05ms +[2025-07-08 06:07:57] [Rank 0] step:5801/10000 train_time:464368ms step_avg:80.05ms +[2025-07-08 06:07:58] [Rank 0] step:5821/10000 train_time:465859ms step_avg:80.03ms +[2025-07-08 06:07:58] [Rank 0] step:5821/10000 train_time:465859ms step_avg:80.03ms +[2025-07-08 06:08:00] [Rank 0] step:5841/10000 train_time:467351ms step_avg:80.01ms +[2025-07-08 06:08:00] [Rank 0] step:5841/10000 train_time:467351ms step_avg:80.01ms +[2025-07-08 06:08:02] [Rank 0] step:5861/10000 train_time:469491ms step_avg:80.10ms +[2025-07-08 06:08:02] [Rank 0] step:5861/10000 train_time:469491ms step_avg:80.10ms +[2025-07-08 06:08:03] [Rank 0] step:5881/10000 train_time:470981ms step_avg:80.09ms +[2025-07-08 06:08:03] [Rank 0] step:5881/10000 train_time:470981ms step_avg:80.09ms +[2025-07-08 06:08:05] [Rank 0] step:5901/10000 train_time:472474ms step_avg:80.07ms +[2025-07-08 06:08:05] [Rank 0] step:5901/10000 train_time:472474ms step_avg:80.07ms +[2025-07-08 06:08:06] [Rank 0] step:5921/10000 train_time:473968ms step_avg:80.05ms +[2025-07-08 06:08:06] [Rank 0] step:5921/10000 train_time:473968ms step_avg:80.05ms +[2025-07-08 06:08:08] [Rank 0] step:5941/10000 train_time:475715ms step_avg:80.07ms +[2025-07-08 06:08:08] [Rank 0] step:5941/10000 train_time:475715ms step_avg:80.07ms +[2025-07-08 06:08:10] [Rank 0] step:5961/10000 train_time:477596ms step_avg:80.12ms +[2025-07-08 06:08:10] [Rank 0] step:5961/10000 train_time:477596ms step_avg:80.12ms +[2025-07-08 06:08:11] [Rank 0] step:5981/10000 train_time:479090ms step_avg:80.10ms +[2025-07-08 06:08:11] [Rank 0] step:5981/10000 train_time:479090ms step_avg:80.10ms +[2025-07-08 06:08:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:08:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:08:14] [Rank 0] PRINT: step:6000/10000 train_loss:0.8822 val_loss:0.8783 train_time:480582ms step_avg:80.10ms +[2025-07-08 06:08:14] [Rank 0] PRINT: step:6000/10000 train_loss:0.8822 val_loss:0.8783 train_time:480582ms step_avg:80.10ms +[2025-07-08 06:08:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:08:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:08:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:08:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:08:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:08:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:13:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:13:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:13:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:13:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:13:39] [Rank 0] Total Loss: 5.7333 +[2025-07-08 06:13:39] [Rank 0] Total Loss: 5.7333 +[2025-07-08 06:13:39] [Rank 0] Total FTA: 0.9096 +[2025-07-08 06:13:39] [Rank 0] Total FTA: 0.9096 +[2025-07-08 06:13:39] [Rank 0] Group 0 Loss: 6.0208 +[2025-07-08 06:13:39] [Rank 0] Group 0 Loss: 6.0208 +[2025-07-08 06:13:39] [Rank 0] Group 1 Loss: 5.5583 +[2025-07-08 06:13:39] [Rank 0] Group 1 Loss: 5.5583 +[2025-07-08 06:13:39] [Rank 0] Group 2 Loss: 5.5994 +[2025-07-08 06:13:39] [Rank 0] Group 2 Loss: 5.5994 +[2025-07-08 06:13:39] [Rank 0] Group 3 Loss: 5.8304 +[2025-07-08 06:13:39] [Rank 0] Group 3 Loss: 5.8304 +[2025-07-08 06:13:39] [Rank 0] Group 4 Loss: 5.6964 +[2025-07-08 06:13:39] [Rank 0] Group 4 Loss: 5.6964 +[2025-07-08 06:13:39] [Rank 0] Group 5 Loss: 5.7616 +[2025-07-08 06:13:39] [Rank 0] Group 5 Loss: 5.7616 +[2025-07-08 06:13:39] [Rank 0] Group 6 Loss: 5.6076 +[2025-07-08 06:13:39] [Rank 0] Group 6 Loss: 5.6076 +[2025-07-08 06:13:39] [Rank 0] Group 7 Loss: 5.6870 +[2025-07-08 06:13:39] [Rank 0] Group 7 Loss: 5.6870 +[2025-07-08 06:13:39] [Rank 0] Group 8 Loss: 5.7279 +[2025-07-08 06:13:39] [Rank 0] Group 8 Loss: 5.7279 +[2025-07-08 06:13:39] [Rank 0] Group 9 Loss: 5.6480 +[2025-07-08 06:13:39] [Rank 0] Group 9 Loss: 5.6480 +[2025-07-08 06:13:39] [Rank 0] Group 10 Loss: 5.7162 +[2025-07-08 06:13:39] [Rank 0] Group 10 Loss: 5.7162 +[2025-07-08 06:13:39] [Rank 0] Group 11 Loss: 5.6964 +[2025-07-08 06:13:39] [Rank 0] Group 11 Loss: 5.6964 +[2025-07-08 06:13:39] [Rank 0] Group 0 FTA: 0.8244 +[2025-07-08 06:13:39] [Rank 0] Group 0 FTA: 0.8244 +[2025-07-08 06:13:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 06:13:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 06:13:39] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 06:13:39] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 06:13:39] [Rank 0] Group 3 FTA: 0.9714 +[2025-07-08 06:13:39] [Rank 0] Group 3 FTA: 0.9714 +[2025-07-08 06:13:39] [Rank 0] Group 4 FTA: 0.9036 +[2025-07-08 06:13:39] [Rank 0] Group 4 FTA: 0.9036 +[2025-07-08 06:13:39] [Rank 0] Group 5 FTA: 0.9193 +[2025-07-08 06:13:39] [Rank 0] Group 5 FTA: 0.9193 +[2025-07-08 06:13:39] [Rank 0] Group 6 FTA: 0.8750 +[2025-07-08 06:13:39] [Rank 0] Group 6 FTA: 0.8750 +[2025-07-08 06:13:39] [Rank 0] Group 7 FTA: 0.8828 +[2025-07-08 06:13:39] [Rank 0] Group 7 FTA: 0.8828 +[2025-07-08 06:13:39] [Rank 0] Group 8 FTA: 0.9062 +[2025-07-08 06:13:39] [Rank 0] Group 8 FTA: 0.9062 +[2025-07-08 06:13:39] [Rank 0] Group 9 FTA: 0.9258 +[2025-07-08 06:13:39] [Rank 0] Group 9 FTA: 0.9258 +[2025-07-08 06:13:39] [Rank 0] Group 10 FTA: 0.9121 +[2025-07-08 06:13:39] [Rank 0] Group 10 FTA: 0.9121 +[2025-07-08 06:13:39] [Rank 0] Group 11 FTA: 0.9004 +[2025-07-08 06:13:39] [Rank 0] Group 11 FTA: 0.9004 +[2025-07-08 06:13:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 06:13:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 06:13:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 06:13:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 06:13:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 06:13:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 06:13:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 06:13:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 06:13:41] [Rank 0] step:6001/10000 train_time:480602ms step_avg:80.09ms +[2025-07-08 06:13:41] [Rank 0] step:6001/10000 train_time:480602ms step_avg:80.09ms +[2025-07-08 06:13:42] [Rank 0] step:6021/10000 train_time:482158ms step_avg:80.08ms +[2025-07-08 06:13:42] [Rank 0] step:6021/10000 train_time:482158ms step_avg:80.08ms +[2025-07-08 06:13:44] [Rank 0] step:6041/10000 train_time:484292ms step_avg:80.17ms +[2025-07-08 06:13:44] [Rank 0] step:6041/10000 train_time:484292ms step_avg:80.17ms +[2025-07-08 06:13:46] [Rank 0] step:6061/10000 train_time:485777ms step_avg:80.15ms +[2025-07-08 06:13:46] [Rank 0] step:6061/10000 train_time:485777ms step_avg:80.15ms +[2025-07-08 06:13:47] [Rank 0] step:6081/10000 train_time:487264ms step_avg:80.13ms +[2025-07-08 06:13:47] [Rank 0] step:6081/10000 train_time:487264ms step_avg:80.13ms +[2025-07-08 06:13:49] [Rank 0] step:6101/10000 train_time:488754ms step_avg:80.11ms +[2025-07-08 06:13:49] [Rank 0] step:6101/10000 train_time:488754ms step_avg:80.11ms +[2025-07-08 06:13:51] [Rank 0] step:6121/10000 train_time:490899ms step_avg:80.20ms +[2025-07-08 06:13:51] [Rank 0] step:6121/10000 train_time:490899ms step_avg:80.20ms +[2025-07-08 06:13:52] [Rank 0] step:6141/10000 train_time:492367ms step_avg:80.18ms +[2025-07-08 06:13:52] [Rank 0] step:6141/10000 train_time:492367ms step_avg:80.18ms +[2025-07-08 06:13:54] [Rank 0] step:6161/10000 train_time:493855ms step_avg:80.16ms +[2025-07-08 06:13:54] [Rank 0] step:6161/10000 train_time:493855ms step_avg:80.16ms +[2025-07-08 06:13:55] [Rank 0] step:6181/10000 train_time:495345ms step_avg:80.14ms +[2025-07-08 06:13:55] [Rank 0] step:6181/10000 train_time:495345ms step_avg:80.14ms +[2025-07-08 06:13:57] [Rank 0] step:6201/10000 train_time:496834ms step_avg:80.12ms +[2025-07-08 06:13:57] [Rank 0] step:6201/10000 train_time:496834ms step_avg:80.12ms +[2025-07-08 06:13:59] [Rank 0] step:6221/10000 train_time:498563ms step_avg:80.14ms +[2025-07-08 06:13:59] [Rank 0] step:6221/10000 train_time:498563ms step_avg:80.14ms +[2025-07-08 06:14:00] [Rank 0] step:6241/10000 train_time:500051ms step_avg:80.12ms +[2025-07-08 06:14:00] [Rank 0] step:6241/10000 train_time:500051ms step_avg:80.12ms +[2025-07-08 06:14:02] [Rank 0] step:6261/10000 train_time:501542ms step_avg:80.11ms +[2025-07-08 06:14:02] [Rank 0] step:6261/10000 train_time:501542ms step_avg:80.11ms +[2025-07-08 06:14:03] [Rank 0] step:6281/10000 train_time:503037ms step_avg:80.09ms +[2025-07-08 06:14:03] [Rank 0] step:6281/10000 train_time:503037ms step_avg:80.09ms +[2025-07-08 06:14:05] [Rank 0] step:6301/10000 train_time:504527ms step_avg:80.07ms +[2025-07-08 06:14:05] [Rank 0] step:6301/10000 train_time:504527ms step_avg:80.07ms +[2025-07-08 06:14:07] [Rank 0] step:6321/10000 train_time:506675ms step_avg:80.16ms +[2025-07-08 06:14:07] [Rank 0] step:6321/10000 train_time:506675ms step_avg:80.16ms +[2025-07-08 06:14:08] [Rank 0] step:6341/10000 train_time:508167ms step_avg:80.14ms +[2025-07-08 06:14:08] [Rank 0] step:6341/10000 train_time:508167ms step_avg:80.14ms +[2025-07-08 06:14:10] [Rank 0] step:6361/10000 train_time:509662ms step_avg:80.12ms +[2025-07-08 06:14:10] [Rank 0] step:6361/10000 train_time:509662ms step_avg:80.12ms +[2025-07-08 06:14:11] [Rank 0] step:6381/10000 train_time:511154ms step_avg:80.11ms +[2025-07-08 06:14:11] [Rank 0] step:6381/10000 train_time:511154ms step_avg:80.11ms +[2025-07-08 06:14:13] [Rank 0] step:6401/10000 train_time:512888ms step_avg:80.13ms +[2025-07-08 06:14:13] [Rank 0] step:6401/10000 train_time:512888ms step_avg:80.13ms +[2025-07-08 06:14:14] [Rank 0] step:6421/10000 train_time:514380ms step_avg:80.11ms +[2025-07-08 06:14:14] [Rank 0] step:6421/10000 train_time:514380ms step_avg:80.11ms +[2025-07-08 06:14:16] [Rank 0] step:6441/10000 train_time:515874ms step_avg:80.09ms +[2025-07-08 06:14:16] [Rank 0] step:6441/10000 train_time:515874ms step_avg:80.09ms +[2025-07-08 06:14:17] [Rank 0] step:6461/10000 train_time:517369ms step_avg:80.08ms +[2025-07-08 06:14:17] [Rank 0] step:6461/10000 train_time:517369ms step_avg:80.08ms +[2025-07-08 06:14:19] [Rank 0] step:6481/10000 train_time:518915ms step_avg:80.07ms +[2025-07-08 06:14:19] [Rank 0] step:6481/10000 train_time:518915ms step_avg:80.07ms +[2025-07-08 06:14:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:14:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:14:22] [Rank 0] PRINT: step:6500/10000 train_loss:0.8763 val_loss:0.8744 train_time:521010ms step_avg:80.16ms +[2025-07-08 06:14:22] [Rank 0] PRINT: step:6500/10000 train_loss:0.8763 val_loss:0.8744 train_time:521010ms step_avg:80.16ms +[2025-07-08 06:14:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:14:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:14:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:14:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:14:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:14:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:19:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:19:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:19:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:19:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:19:48] [Rank 0] Total Loss: 5.7371 +[2025-07-08 06:19:48] [Rank 0] Total Loss: 5.7371 +[2025-07-08 06:19:48] [Rank 0] Total FTA: 0.8780 +[2025-07-08 06:19:48] [Rank 0] Total FTA: 0.8780 +[2025-07-08 06:19:48] [Rank 0] Group 0 Loss: 6.0773 +[2025-07-08 06:19:48] [Rank 0] Group 0 Loss: 6.0773 +[2025-07-08 06:19:48] [Rank 0] Group 1 Loss: 5.2294 +[2025-07-08 06:19:48] [Rank 0] Group 1 Loss: 5.2294 +[2025-07-08 06:19:48] [Rank 0] Group 2 Loss: 5.6274 +[2025-07-08 06:19:48] [Rank 0] Group 2 Loss: 5.6274 +[2025-07-08 06:19:48] [Rank 0] Group 3 Loss: 5.7690 +[2025-07-08 06:19:48] [Rank 0] Group 3 Loss: 5.7690 +[2025-07-08 06:19:48] [Rank 0] Group 4 Loss: 5.7885 +[2025-07-08 06:19:48] [Rank 0] Group 4 Loss: 5.7885 +[2025-07-08 06:19:48] [Rank 0] Group 5 Loss: 5.6587 +[2025-07-08 06:19:48] [Rank 0] Group 5 Loss: 5.6587 +[2025-07-08 06:19:48] [Rank 0] Group 6 Loss: 5.6417 +[2025-07-08 06:19:48] [Rank 0] Group 6 Loss: 5.6417 +[2025-07-08 06:19:48] [Rank 0] Group 7 Loss: 5.7600 +[2025-07-08 06:19:48] [Rank 0] Group 7 Loss: 5.7600 +[2025-07-08 06:19:48] [Rank 0] Group 8 Loss: 5.7423 +[2025-07-08 06:19:48] [Rank 0] Group 8 Loss: 5.7423 +[2025-07-08 06:19:48] [Rank 0] Group 9 Loss: 5.6387 +[2025-07-08 06:19:48] [Rank 0] Group 9 Loss: 5.6387 +[2025-07-08 06:19:48] [Rank 0] Group 10 Loss: 5.7306 +[2025-07-08 06:19:48] [Rank 0] Group 10 Loss: 5.7306 +[2025-07-08 06:19:48] [Rank 0] Group 11 Loss: 5.7642 +[2025-07-08 06:19:48] [Rank 0] Group 11 Loss: 5.7642 +[2025-07-08 06:19:48] [Rank 0] Group 0 FTA: 0.6580 +[2025-07-08 06:19:48] [Rank 0] Group 0 FTA: 0.6580 +[2025-07-08 06:19:49] [Rank 0] Group 1 FTA: 0.8620 +[2025-07-08 06:19:49] [Rank 0] Group 1 FTA: 0.8620 +[2025-07-08 06:19:49] [Rank 0] Group 2 FTA: 0.9115 +[2025-07-08 06:19:49] [Rank 0] Group 2 FTA: 0.9115 +[2025-07-08 06:19:49] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 06:19:49] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 06:19:49] [Rank 0] Group 4 FTA: 0.9141 +[2025-07-08 06:19:49] [Rank 0] Group 4 FTA: 0.9141 +[2025-07-08 06:19:49] [Rank 0] Group 5 FTA: 0.8932 +[2025-07-08 06:19:49] [Rank 0] Group 5 FTA: 0.8932 +[2025-07-08 06:19:49] [Rank 0] Group 6 FTA: 0.9245 +[2025-07-08 06:19:49] [Rank 0] Group 6 FTA: 0.9245 +[2025-07-08 06:19:49] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-08 06:19:49] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-08 06:19:49] [Rank 0] Group 8 FTA: 0.9271 +[2025-07-08 06:19:49] [Rank 0] Group 8 FTA: 0.9271 +[2025-07-08 06:19:49] [Rank 0] Group 9 FTA: 0.8945 +[2025-07-08 06:19:49] [Rank 0] Group 9 FTA: 0.8945 +[2025-07-08 06:19:49] [Rank 0] Group 10 FTA: 0.9121 +[2025-07-08 06:19:49] [Rank 0] Group 10 FTA: 0.9121 +[2025-07-08 06:19:49] [Rank 0] Group 11 FTA: 0.8916 +[2025-07-08 06:19:49] [Rank 0] Group 11 FTA: 0.8916 +[2025-07-08 06:19:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 06:19:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 06:19:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 06:19:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 06:19:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 06:19:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 06:19:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 06:19:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 06:19:50] [Rank 0] step:6501/10000 train_time:521030ms step_avg:80.15ms +[2025-07-08 06:19:50] [Rank 0] step:6501/10000 train_time:521030ms step_avg:80.15ms +[2025-07-08 06:19:52] [Rank 0] step:6521/10000 train_time:522521ms step_avg:80.13ms +[2025-07-08 06:19:52] [Rank 0] step:6521/10000 train_time:522521ms step_avg:80.13ms +[2025-07-08 06:19:53] [Rank 0] step:6541/10000 train_time:524005ms step_avg:80.11ms +[2025-07-08 06:19:53] [Rank 0] step:6541/10000 train_time:524005ms step_avg:80.11ms +[2025-07-08 06:19:55] [Rank 0] step:6561/10000 train_time:525492ms step_avg:80.09ms +[2025-07-08 06:19:55] [Rank 0] step:6561/10000 train_time:525492ms step_avg:80.09ms +[2025-07-08 06:19:57] [Rank 0] step:6581/10000 train_time:527626ms step_avg:80.17ms +[2025-07-08 06:19:57] [Rank 0] step:6581/10000 train_time:527626ms step_avg:80.17ms +[2025-07-08 06:19:58] [Rank 0] step:6601/10000 train_time:529112ms step_avg:80.16ms +[2025-07-08 06:19:58] [Rank 0] step:6601/10000 train_time:529112ms step_avg:80.16ms +[2025-07-08 06:20:00] [Rank 0] step:6621/10000 train_time:530600ms step_avg:80.14ms +[2025-07-08 06:20:00] [Rank 0] step:6621/10000 train_time:530600ms step_avg:80.14ms +[2025-07-08 06:20:01] [Rank 0] step:6641/10000 train_time:532087ms step_avg:80.12ms +[2025-07-08 06:20:01] [Rank 0] step:6641/10000 train_time:532087ms step_avg:80.12ms +[2025-07-08 06:20:03] [Rank 0] step:6661/10000 train_time:533835ms step_avg:80.14ms +[2025-07-08 06:20:03] [Rank 0] step:6661/10000 train_time:533835ms step_avg:80.14ms +[2025-07-08 06:20:04] [Rank 0] step:6681/10000 train_time:535304ms step_avg:80.12ms +[2025-07-08 06:20:04] [Rank 0] step:6681/10000 train_time:535304ms step_avg:80.12ms +[2025-07-08 06:20:06] [Rank 0] step:6701/10000 train_time:536794ms step_avg:80.11ms +[2025-07-08 06:20:06] [Rank 0] step:6701/10000 train_time:536794ms step_avg:80.11ms +[2025-07-08 06:20:07] [Rank 0] step:6721/10000 train_time:538284ms step_avg:80.09ms +[2025-07-08 06:20:07] [Rank 0] step:6721/10000 train_time:538284ms step_avg:80.09ms +[2025-07-08 06:20:09] [Rank 0] step:6741/10000 train_time:539774ms step_avg:80.07ms +[2025-07-08 06:20:09] [Rank 0] step:6741/10000 train_time:539774ms step_avg:80.07ms +[2025-07-08 06:20:11] [Rank 0] step:6761/10000 train_time:541503ms step_avg:80.09ms +[2025-07-08 06:20:11] [Rank 0] step:6761/10000 train_time:541503ms step_avg:80.09ms +[2025-07-08 06:20:12] [Rank 0] step:6781/10000 train_time:542994ms step_avg:80.08ms +[2025-07-08 06:20:12] [Rank 0] step:6781/10000 train_time:542994ms step_avg:80.08ms +[2025-07-08 06:20:14] [Rank 0] step:6801/10000 train_time:544486ms step_avg:80.06ms +[2025-07-08 06:20:14] [Rank 0] step:6801/10000 train_time:544486ms step_avg:80.06ms +[2025-07-08 06:20:15] [Rank 0] step:6821/10000 train_time:545979ms step_avg:80.04ms +[2025-07-08 06:20:15] [Rank 0] step:6821/10000 train_time:545979ms step_avg:80.04ms +[2025-07-08 06:20:17] [Rank 0] step:6841/10000 train_time:547472ms step_avg:80.03ms +[2025-07-08 06:20:17] [Rank 0] step:6841/10000 train_time:547472ms step_avg:80.03ms +[2025-07-08 06:20:18] [Rank 0] step:6861/10000 train_time:549199ms step_avg:80.05ms +[2025-07-08 06:20:18] [Rank 0] step:6861/10000 train_time:549199ms step_avg:80.05ms +[2025-07-08 06:20:20] [Rank 0] step:6881/10000 train_time:550695ms step_avg:80.03ms +[2025-07-08 06:20:20] [Rank 0] step:6881/10000 train_time:550695ms step_avg:80.03ms +[2025-07-08 06:20:21] [Rank 0] step:6901/10000 train_time:552190ms step_avg:80.02ms +[2025-07-08 06:20:21] [Rank 0] step:6901/10000 train_time:552190ms step_avg:80.02ms +[2025-07-08 06:20:23] [Rank 0] step:6921/10000 train_time:553685ms step_avg:80.00ms +[2025-07-08 06:20:23] [Rank 0] step:6921/10000 train_time:553685ms step_avg:80.00ms +[2025-07-08 06:20:25] [Rank 0] step:6941/10000 train_time:555820ms step_avg:80.08ms +[2025-07-08 06:20:25] [Rank 0] step:6941/10000 train_time:555820ms step_avg:80.08ms +[2025-07-08 06:20:26] [Rank 0] step:6961/10000 train_time:557312ms step_avg:80.06ms +[2025-07-08 06:20:26] [Rank 0] step:6961/10000 train_time:557312ms step_avg:80.06ms +[2025-07-08 06:20:28] [Rank 0] step:6981/10000 train_time:558806ms step_avg:80.05ms +[2025-07-08 06:20:28] [Rank 0] step:6981/10000 train_time:558806ms step_avg:80.05ms +[2025-07-08 06:20:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:20:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:20:30] [Rank 0] PRINT: step:7000/10000 train_loss:0.8728 val_loss:0.8727 train_time:560300ms step_avg:80.04ms +[2025-07-08 06:20:30] [Rank 0] PRINT: step:7000/10000 train_loss:0.8728 val_loss:0.8727 train_time:560300ms step_avg:80.04ms +[2025-07-08 06:20:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:20:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:20:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:20:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:20:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:20:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:26:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:26:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:26:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:26:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:26:03] [Rank 0] Total Loss: 5.8937 +[2025-07-08 06:26:03] [Rank 0] Total Loss: 5.8937 +[2025-07-08 06:26:03] [Rank 0] Total FTA: 0.8692 +[2025-07-08 06:26:03] [Rank 0] Total FTA: 0.8692 +[2025-07-08 06:26:03] [Rank 0] Group 0 Loss: 6.1581 +[2025-07-08 06:26:03] [Rank 0] Group 0 Loss: 6.1581 +[2025-07-08 06:26:03] [Rank 0] Group 1 Loss: 5.3388 +[2025-07-08 06:26:03] [Rank 0] Group 1 Loss: 5.3388 +[2025-07-08 06:26:03] [Rank 0] Group 2 Loss: 5.7601 +[2025-07-08 06:26:03] [Rank 0] Group 2 Loss: 5.7601 +[2025-07-08 06:26:03] [Rank 0] Group 3 Loss: 5.9631 +[2025-07-08 06:26:03] [Rank 0] Group 3 Loss: 5.9631 +[2025-07-08 06:26:03] [Rank 0] Group 4 Loss: 5.8925 +[2025-07-08 06:26:03] [Rank 0] Group 4 Loss: 5.8925 +[2025-07-08 06:26:03] [Rank 0] Group 5 Loss: 5.8928 +[2025-07-08 06:26:03] [Rank 0] Group 5 Loss: 5.8928 +[2025-07-08 06:26:03] [Rank 0] Group 6 Loss: 5.8613 +[2025-07-08 06:26:03] [Rank 0] Group 6 Loss: 5.8613 +[2025-07-08 06:26:03] [Rank 0] Group 7 Loss: 5.9230 +[2025-07-08 06:26:03] [Rank 0] Group 7 Loss: 5.9230 +[2025-07-08 06:26:03] [Rank 0] Group 8 Loss: 5.8736 +[2025-07-08 06:26:03] [Rank 0] Group 8 Loss: 5.8736 +[2025-07-08 06:26:03] [Rank 0] Group 9 Loss: 5.9583 +[2025-07-08 06:26:03] [Rank 0] Group 9 Loss: 5.9583 +[2025-07-08 06:26:03] [Rank 0] Group 10 Loss: 5.9062 +[2025-07-08 06:26:03] [Rank 0] Group 10 Loss: 5.9062 +[2025-07-08 06:26:03] [Rank 0] Group 11 Loss: 5.9145 +[2025-07-08 06:26:03] [Rank 0] Group 11 Loss: 5.9145 +[2025-07-08 06:26:03] [Rank 0] Group 0 FTA: 0.8127 +[2025-07-08 06:26:03] [Rank 0] Group 0 FTA: 0.8127 +[2025-07-08 06:26:03] [Rank 0] Group 1 FTA: 0.8411 +[2025-07-08 06:26:03] [Rank 0] Group 1 FTA: 0.8411 +[2025-07-08 06:26:03] [Rank 0] Group 2 FTA: 0.9193 +[2025-07-08 06:26:03] [Rank 0] Group 2 FTA: 0.9193 +[2025-07-08 06:26:03] [Rank 0] Group 3 FTA: 0.8646 +[2025-07-08 06:26:03] [Rank 0] Group 3 FTA: 0.8646 +[2025-07-08 06:26:03] [Rank 0] Group 4 FTA: 0.8750 +[2025-07-08 06:26:03] [Rank 0] Group 4 FTA: 0.8750 +[2025-07-08 06:26:03] [Rank 0] Group 5 FTA: 0.8411 +[2025-07-08 06:26:03] [Rank 0] Group 5 FTA: 0.8411 +[2025-07-08 06:26:03] [Rank 0] Group 6 FTA: 0.8906 +[2025-07-08 06:26:03] [Rank 0] Group 6 FTA: 0.8906 +[2025-07-08 06:26:03] [Rank 0] Group 7 FTA: 0.8490 +[2025-07-08 06:26:03] [Rank 0] Group 7 FTA: 0.8490 +[2025-07-08 06:26:03] [Rank 0] Group 8 FTA: 0.8958 +[2025-07-08 06:26:03] [Rank 0] Group 8 FTA: 0.8958 +[2025-07-08 06:26:03] [Rank 0] Group 9 FTA: 0.8750 +[2025-07-08 06:26:03] [Rank 0] Group 9 FTA: 0.8750 +[2025-07-08 06:26:03] [Rank 0] Group 10 FTA: 0.8809 +[2025-07-08 06:26:03] [Rank 0] Group 10 FTA: 0.8809 +[2025-07-08 06:26:03] [Rank 0] Group 11 FTA: 0.8955 +[2025-07-08 06:26:03] [Rank 0] Group 11 FTA: 0.8955 +[2025-07-08 06:26:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 06:26:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 06:26:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 06:26:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 06:26:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 06:26:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 06:26:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 06:26:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 06:26:04] [Rank 0] step:7001/10000 train_time:560322ms step_avg:80.03ms +[2025-07-08 06:26:04] [Rank 0] step:7001/10000 train_time:560322ms step_avg:80.03ms +[2025-07-08 06:26:06] [Rank 0] step:7021/10000 train_time:562501ms step_avg:80.12ms +[2025-07-08 06:26:06] [Rank 0] step:7021/10000 train_time:562501ms step_avg:80.12ms +[2025-07-08 06:26:08] [Rank 0] step:7041/10000 train_time:563968ms step_avg:80.10ms +[2025-07-08 06:26:08] [Rank 0] step:7041/10000 train_time:563968ms step_avg:80.10ms +[2025-07-08 06:26:09] [Rank 0] step:7061/10000 train_time:565456ms step_avg:80.08ms +[2025-07-08 06:26:09] [Rank 0] step:7061/10000 train_time:565456ms step_avg:80.08ms +[2025-07-08 06:26:11] [Rank 0] step:7081/10000 train_time:566941ms step_avg:80.07ms +[2025-07-08 06:26:11] [Rank 0] step:7081/10000 train_time:566941ms step_avg:80.07ms +[2025-07-08 06:26:12] [Rank 0] step:7101/10000 train_time:568429ms step_avg:80.05ms +[2025-07-08 06:26:12] [Rank 0] step:7101/10000 train_time:568429ms step_avg:80.05ms +[2025-07-08 06:26:14] [Rank 0] step:7121/10000 train_time:570582ms step_avg:80.13ms +[2025-07-08 06:26:14] [Rank 0] step:7121/10000 train_time:570582ms step_avg:80.13ms +[2025-07-08 06:26:16] [Rank 0] step:7141/10000 train_time:572072ms step_avg:80.11ms +[2025-07-08 06:26:16] [Rank 0] step:7141/10000 train_time:572072ms step_avg:80.11ms +[2025-07-08 06:26:17] [Rank 0] step:7161/10000 train_time:573559ms step_avg:80.09ms +[2025-07-08 06:26:17] [Rank 0] step:7161/10000 train_time:573559ms step_avg:80.09ms +[2025-07-08 06:26:19] [Rank 0] step:7181/10000 train_time:575049ms step_avg:80.08ms +[2025-07-08 06:26:19] [Rank 0] step:7181/10000 train_time:575049ms step_avg:80.08ms +[2025-07-08 06:26:21] [Rank 0] step:7201/10000 train_time:576590ms step_avg:80.07ms +[2025-07-08 06:26:21] [Rank 0] step:7201/10000 train_time:576590ms step_avg:80.07ms +[2025-07-08 06:26:23] [Rank 0] step:7221/10000 train_time:578693ms step_avg:80.14ms +[2025-07-08 06:26:23] [Rank 0] step:7221/10000 train_time:578693ms step_avg:80.14ms +[2025-07-08 06:26:24] [Rank 0] step:7241/10000 train_time:580179ms step_avg:80.12ms +[2025-07-08 06:26:24] [Rank 0] step:7241/10000 train_time:580179ms step_avg:80.12ms +[2025-07-08 06:26:25] [Rank 0] step:7261/10000 train_time:581669ms step_avg:80.11ms +[2025-07-08 06:26:25] [Rank 0] step:7261/10000 train_time:581669ms step_avg:80.11ms +[2025-07-08 06:26:27] [Rank 0] step:7281/10000 train_time:583163ms step_avg:80.09ms +[2025-07-08 06:26:27] [Rank 0] step:7281/10000 train_time:583163ms step_avg:80.09ms +[2025-07-08 06:26:29] [Rank 0] step:7301/10000 train_time:584890ms step_avg:80.11ms +[2025-07-08 06:26:29] [Rank 0] step:7301/10000 train_time:584890ms step_avg:80.11ms +[2025-07-08 06:26:30] [Rank 0] step:7321/10000 train_time:586382ms step_avg:80.10ms +[2025-07-08 06:26:30] [Rank 0] step:7321/10000 train_time:586382ms step_avg:80.10ms +[2025-07-08 06:26:32] [Rank 0] step:7341/10000 train_time:587873ms step_avg:80.08ms +[2025-07-08 06:26:32] [Rank 0] step:7341/10000 train_time:587873ms step_avg:80.08ms +[2025-07-08 06:26:33] [Rank 0] step:7361/10000 train_time:589368ms step_avg:80.07ms +[2025-07-08 06:26:33] [Rank 0] step:7361/10000 train_time:589368ms step_avg:80.07ms +[2025-07-08 06:26:35] [Rank 0] step:7381/10000 train_time:590862ms step_avg:80.05ms +[2025-07-08 06:26:35] [Rank 0] step:7381/10000 train_time:590862ms step_avg:80.05ms +[2025-07-08 06:26:37] [Rank 0] step:7401/10000 train_time:593002ms step_avg:80.12ms +[2025-07-08 06:26:37] [Rank 0] step:7401/10000 train_time:593002ms step_avg:80.12ms +[2025-07-08 06:26:38] [Rank 0] step:7421/10000 train_time:594493ms step_avg:80.11ms +[2025-07-08 06:26:38] [Rank 0] step:7421/10000 train_time:594493ms step_avg:80.11ms +[2025-07-08 06:26:40] [Rank 0] step:7441/10000 train_time:595985ms step_avg:80.09ms +[2025-07-08 06:26:40] [Rank 0] step:7441/10000 train_time:595985ms step_avg:80.09ms +[2025-07-08 06:26:41] [Rank 0] step:7461/10000 train_time:597480ms step_avg:80.08ms +[2025-07-08 06:26:41] [Rank 0] step:7461/10000 train_time:597480ms step_avg:80.08ms +[2025-07-08 06:26:43] [Rank 0] step:7481/10000 train_time:599643ms step_avg:80.16ms +[2025-07-08 06:26:43] [Rank 0] step:7481/10000 train_time:599643ms step_avg:80.16ms +[2025-07-08 06:26:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:26:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:26:46] [Rank 0] PRINT: step:7500/10000 train_loss:0.8708 val_loss:0.8709 train_time:601134ms step_avg:80.15ms +[2025-07-08 06:26:46] [Rank 0] PRINT: step:7500/10000 train_loss:0.8708 val_loss:0.8709 train_time:601134ms step_avg:80.15ms +[2025-07-08 06:26:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:26:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:26:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:26:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:26:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:26:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:32:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:32:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:32:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:32:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:32:17] [Rank 0] Total Loss: 5.8204 +[2025-07-08 06:32:17] [Rank 0] Total Loss: 5.8204 +[2025-07-08 06:32:17] [Rank 0] Total FTA: 0.9198 +[2025-07-08 06:32:17] [Rank 0] Total FTA: 0.9198 +[2025-07-08 06:32:17] [Rank 0] Group 0 Loss: 6.0995 +[2025-07-08 06:32:17] [Rank 0] Group 0 Loss: 6.0995 +[2025-07-08 06:32:17] [Rank 0] Group 1 Loss: 5.5631 +[2025-07-08 06:32:17] [Rank 0] Group 1 Loss: 5.5631 +[2025-07-08 06:32:17] [Rank 0] Group 2 Loss: 5.5942 +[2025-07-08 06:32:17] [Rank 0] Group 2 Loss: 5.5942 +[2025-07-08 06:32:17] [Rank 0] Group 3 Loss: 5.8822 +[2025-07-08 06:32:17] [Rank 0] Group 3 Loss: 5.8822 +[2025-07-08 06:32:17] [Rank 0] Group 4 Loss: 5.7857 +[2025-07-08 06:32:17] [Rank 0] Group 4 Loss: 5.7857 +[2025-07-08 06:32:17] [Rank 0] Group 5 Loss: 5.7480 +[2025-07-08 06:32:17] [Rank 0] Group 5 Loss: 5.7480 +[2025-07-08 06:32:17] [Rank 0] Group 6 Loss: 5.7342 +[2025-07-08 06:32:17] [Rank 0] Group 6 Loss: 5.7342 +[2025-07-08 06:32:17] [Rank 0] Group 7 Loss: 5.8495 +[2025-07-08 06:32:17] [Rank 0] Group 7 Loss: 5.8495 +[2025-07-08 06:32:17] [Rank 0] Group 8 Loss: 5.8178 +[2025-07-08 06:32:17] [Rank 0] Group 8 Loss: 5.8178 +[2025-07-08 06:32:17] [Rank 0] Group 9 Loss: 5.8614 +[2025-07-08 06:32:17] [Rank 0] Group 9 Loss: 5.8614 +[2025-07-08 06:32:17] [Rank 0] Group 10 Loss: 5.8213 +[2025-07-08 06:32:17] [Rank 0] Group 10 Loss: 5.8213 +[2025-07-08 06:32:17] [Rank 0] Group 11 Loss: 5.8205 +[2025-07-08 06:32:17] [Rank 0] Group 11 Loss: 5.8205 +[2025-07-08 06:32:17] [Rank 0] Group 0 FTA: 0.8362 +[2025-07-08 06:32:17] [Rank 0] Group 0 FTA: 0.8362 +[2025-07-08 06:32:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 06:32:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 06:32:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 06:32:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 06:32:17] [Rank 0] Group 3 FTA: 0.9193 +[2025-07-08 06:32:17] [Rank 0] Group 3 FTA: 0.9193 +[2025-07-08 06:32:17] [Rank 0] Group 4 FTA: 0.9427 +[2025-07-08 06:32:17] [Rank 0] Group 4 FTA: 0.9427 +[2025-07-08 06:32:17] [Rank 0] Group 5 FTA: 0.9453 +[2025-07-08 06:32:17] [Rank 0] Group 5 FTA: 0.9453 +[2025-07-08 06:32:17] [Rank 0] Group 6 FTA: 0.9349 +[2025-07-08 06:32:17] [Rank 0] Group 6 FTA: 0.9349 +[2025-07-08 06:32:17] [Rank 0] Group 7 FTA: 0.8880 +[2025-07-08 06:32:17] [Rank 0] Group 7 FTA: 0.8880 +[2025-07-08 06:32:17] [Rank 0] Group 8 FTA: 0.8932 +[2025-07-08 06:32:17] [Rank 0] Group 8 FTA: 0.8932 +[2025-07-08 06:32:17] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-08 06:32:17] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-08 06:32:17] [Rank 0] Group 10 FTA: 0.9023 +[2025-07-08 06:32:17] [Rank 0] Group 10 FTA: 0.9023 +[2025-07-08 06:32:17] [Rank 0] Group 11 FTA: 0.9238 +[2025-07-08 06:32:17] [Rank 0] Group 11 FTA: 0.9238 +[2025-07-08 06:32:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 06:32:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 06:32:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 06:32:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 06:32:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 06:32:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 06:32:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 06:32:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 06:32:18] [Rank 0] step:7501/10000 train_time:601156ms step_avg:80.14ms +[2025-07-08 06:32:18] [Rank 0] step:7501/10000 train_time:601156ms step_avg:80.14ms +[2025-07-08 06:32:20] [Rank 0] step:7521/10000 train_time:602656ms step_avg:80.13ms +[2025-07-08 06:32:20] [Rank 0] step:7521/10000 train_time:602656ms step_avg:80.13ms +[2025-07-08 06:32:21] [Rank 0] step:7541/10000 train_time:604142ms step_avg:80.11ms +[2025-07-08 06:32:21] [Rank 0] step:7541/10000 train_time:604142ms step_avg:80.11ms +[2025-07-08 06:32:23] [Rank 0] step:7561/10000 train_time:605888ms step_avg:80.13ms +[2025-07-08 06:32:23] [Rank 0] step:7561/10000 train_time:605888ms step_avg:80.13ms +[2025-07-08 06:32:25] [Rank 0] step:7581/10000 train_time:607783ms step_avg:80.17ms +[2025-07-08 06:32:25] [Rank 0] step:7581/10000 train_time:607783ms step_avg:80.17ms +[2025-07-08 06:32:26] [Rank 0] step:7601/10000 train_time:609268ms step_avg:80.16ms +[2025-07-08 06:32:26] [Rank 0] step:7601/10000 train_time:609268ms step_avg:80.16ms +[2025-07-08 06:32:28] [Rank 0] step:7621/10000 train_time:610757ms step_avg:80.14ms +[2025-07-08 06:32:28] [Rank 0] step:7621/10000 train_time:610757ms step_avg:80.14ms +[2025-07-08 06:32:29] [Rank 0] step:7641/10000 train_time:612247ms step_avg:80.13ms +[2025-07-08 06:32:29] [Rank 0] step:7641/10000 train_time:612247ms step_avg:80.13ms +[2025-07-08 06:32:31] [Rank 0] step:7661/10000 train_time:614399ms step_avg:80.20ms +[2025-07-08 06:32:31] [Rank 0] step:7661/10000 train_time:614399ms step_avg:80.20ms +[2025-07-08 06:32:33] [Rank 0] step:7681/10000 train_time:615886ms step_avg:80.18ms +[2025-07-08 06:32:33] [Rank 0] step:7681/10000 train_time:615886ms step_avg:80.18ms +[2025-07-08 06:32:34] [Rank 0] step:7701/10000 train_time:617374ms step_avg:80.17ms +[2025-07-08 06:32:34] [Rank 0] step:7701/10000 train_time:617374ms step_avg:80.17ms +[2025-07-08 06:32:36] [Rank 0] step:7721/10000 train_time:618865ms step_avg:80.15ms +[2025-07-08 06:32:36] [Rank 0] step:7721/10000 train_time:618865ms step_avg:80.15ms +[2025-07-08 06:32:38] [Rank 0] step:7741/10000 train_time:620612ms step_avg:80.17ms +[2025-07-08 06:32:38] [Rank 0] step:7741/10000 train_time:620612ms step_avg:80.17ms +[2025-07-08 06:32:40] [Rank 0] step:7761/10000 train_time:622505ms step_avg:80.21ms +[2025-07-08 06:32:40] [Rank 0] step:7761/10000 train_time:622505ms step_avg:80.21ms +[2025-07-08 06:32:41] [Rank 0] step:7781/10000 train_time:624126ms step_avg:80.21ms +[2025-07-08 06:32:41] [Rank 0] step:7781/10000 train_time:624126ms step_avg:80.21ms +[2025-07-08 06:32:43] [Rank 0] step:7801/10000 train_time:625619ms step_avg:80.20ms +[2025-07-08 06:32:43] [Rank 0] step:7801/10000 train_time:625619ms step_avg:80.20ms +[2025-07-08 06:32:44] [Rank 0] step:7821/10000 train_time:627109ms step_avg:80.18ms +[2025-07-08 06:32:44] [Rank 0] step:7821/10000 train_time:627109ms step_avg:80.18ms +[2025-07-08 06:32:46] [Rank 0] step:7841/10000 train_time:629249ms step_avg:80.25ms +[2025-07-08 06:32:46] [Rank 0] step:7841/10000 train_time:629249ms step_avg:80.25ms +[2025-07-08 06:32:48] [Rank 0] step:7861/10000 train_time:630739ms step_avg:80.24ms +[2025-07-08 06:32:48] [Rank 0] step:7861/10000 train_time:630739ms step_avg:80.24ms +[2025-07-08 06:32:49] [Rank 0] step:7881/10000 train_time:632231ms step_avg:80.22ms +[2025-07-08 06:32:49] [Rank 0] step:7881/10000 train_time:632231ms step_avg:80.22ms +[2025-07-08 06:32:51] [Rank 0] step:7901/10000 train_time:633725ms step_avg:80.21ms +[2025-07-08 06:32:51] [Rank 0] step:7901/10000 train_time:633725ms step_avg:80.21ms +[2025-07-08 06:32:52] [Rank 0] step:7921/10000 train_time:635270ms step_avg:80.20ms +[2025-07-08 06:32:52] [Rank 0] step:7921/10000 train_time:635270ms step_avg:80.20ms +[2025-07-08 06:32:54] [Rank 0] step:7941/10000 train_time:636944ms step_avg:80.21ms +[2025-07-08 06:32:54] [Rank 0] step:7941/10000 train_time:636944ms step_avg:80.21ms +[2025-07-08 06:32:55] [Rank 0] step:7961/10000 train_time:638437ms step_avg:80.20ms +[2025-07-08 06:32:55] [Rank 0] step:7961/10000 train_time:638437ms step_avg:80.20ms +[2025-07-08 06:32:57] [Rank 0] step:7981/10000 train_time:639930ms step_avg:80.18ms +[2025-07-08 06:32:57] [Rank 0] step:7981/10000 train_time:639930ms step_avg:80.18ms +[2025-07-08 06:32:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:32:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:32:59] [Rank 0] PRINT: step:8000/10000 train_loss:0.8690 val_loss:0.8695 train_time:641423ms step_avg:80.18ms +[2025-07-08 06:32:59] [Rank 0] PRINT: step:8000/10000 train_loss:0.8690 val_loss:0.8695 train_time:641423ms step_avg:80.18ms +[2025-07-08 06:32:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:32:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:32:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:32:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:33:00] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:33:00] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:38:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:38:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:38:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:38:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:38:31] [Rank 0] Total Loss: 5.8684 +[2025-07-08 06:38:31] [Rank 0] Total Loss: 5.8684 +[2025-07-08 06:38:31] [Rank 0] Total FTA: 0.8917 +[2025-07-08 06:38:31] [Rank 0] Total FTA: 0.8917 +[2025-07-08 06:38:31] [Rank 0] Group 0 Loss: 5.9837 +[2025-07-08 06:38:31] [Rank 0] Group 0 Loss: 5.9837 +[2025-07-08 06:38:31] [Rank 0] Group 1 Loss: 6.4253 +[2025-07-08 06:38:31] [Rank 0] Group 1 Loss: 6.4253 +[2025-07-08 06:38:31] [Rank 0] Group 2 Loss: 5.6226 +[2025-07-08 06:38:31] [Rank 0] Group 2 Loss: 5.6226 +[2025-07-08 06:38:31] [Rank 0] Group 3 Loss: 5.8694 +[2025-07-08 06:38:31] [Rank 0] Group 3 Loss: 5.8694 +[2025-07-08 06:38:31] [Rank 0] Group 4 Loss: 5.8590 +[2025-07-08 06:38:31] [Rank 0] Group 4 Loss: 5.8590 +[2025-07-08 06:38:31] [Rank 0] Group 5 Loss: 5.7535 +[2025-07-08 06:38:31] [Rank 0] Group 5 Loss: 5.7535 +[2025-07-08 06:38:31] [Rank 0] Group 6 Loss: 5.7426 +[2025-07-08 06:38:31] [Rank 0] Group 6 Loss: 5.7426 +[2025-07-08 06:38:31] [Rank 0] Group 7 Loss: 5.8336 +[2025-07-08 06:38:31] [Rank 0] Group 7 Loss: 5.8336 +[2025-07-08 06:38:31] [Rank 0] Group 8 Loss: 5.8212 +[2025-07-08 06:38:31] [Rank 0] Group 8 Loss: 5.8212 +[2025-07-08 06:38:31] [Rank 0] Group 9 Loss: 5.7279 +[2025-07-08 06:38:31] [Rank 0] Group 9 Loss: 5.7279 +[2025-07-08 06:38:31] [Rank 0] Group 10 Loss: 5.8364 +[2025-07-08 06:38:31] [Rank 0] Group 10 Loss: 5.8364 +[2025-07-08 06:38:31] [Rank 0] Group 11 Loss: 5.8404 +[2025-07-08 06:38:31] [Rank 0] Group 11 Loss: 5.8404 +[2025-07-08 06:38:31] [Rank 0] Group 0 FTA: 0.8309 +[2025-07-08 06:38:31] [Rank 0] Group 0 FTA: 0.8309 +[2025-07-08 06:38:31] [Rank 0] Group 1 FTA: 0.8828 +[2025-07-08 06:38:31] [Rank 0] Group 1 FTA: 0.8828 +[2025-07-08 06:38:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 06:38:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 06:38:31] [Rank 0] Group 3 FTA: 0.8411 +[2025-07-08 06:38:31] [Rank 0] Group 3 FTA: 0.8411 +[2025-07-08 06:38:31] [Rank 0] Group 4 FTA: 0.9036 +[2025-07-08 06:38:31] [Rank 0] Group 4 FTA: 0.9036 +[2025-07-08 06:38:31] [Rank 0] Group 5 FTA: 0.8906 +[2025-07-08 06:38:31] [Rank 0] Group 5 FTA: 0.8906 +[2025-07-08 06:38:31] [Rank 0] Group 6 FTA: 0.9219 +[2025-07-08 06:38:31] [Rank 0] Group 6 FTA: 0.9219 +[2025-07-08 06:38:31] [Rank 0] Group 7 FTA: 0.8828 +[2025-07-08 06:38:31] [Rank 0] Group 7 FTA: 0.8828 +[2025-07-08 06:38:31] [Rank 0] Group 8 FTA: 0.8854 +[2025-07-08 06:38:31] [Rank 0] Group 8 FTA: 0.8854 +[2025-07-08 06:38:31] [Rank 0] Group 9 FTA: 0.8750 +[2025-07-08 06:38:31] [Rank 0] Group 9 FTA: 0.8750 +[2025-07-08 06:38:32] [Rank 0] Group 10 FTA: 0.9004 +[2025-07-08 06:38:32] [Rank 0] Group 10 FTA: 0.9004 +[2025-07-08 06:38:32] [Rank 0] Group 11 FTA: 0.9092 +[2025-07-08 06:38:32] [Rank 0] Group 11 FTA: 0.9092 +[2025-07-08 06:38:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 06:38:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 06:38:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 06:38:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 06:38:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 06:38:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 06:38:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 06:38:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 06:38:33] [Rank 0] step:8001/10000 train_time:641446ms step_avg:80.17ms +[2025-07-08 06:38:33] [Rank 0] step:8001/10000 train_time:641446ms step_avg:80.17ms +[2025-07-08 06:38:35] [Rank 0] step:8021/10000 train_time:643614ms step_avg:80.24ms +[2025-07-08 06:38:35] [Rank 0] step:8021/10000 train_time:643614ms step_avg:80.24ms +[2025-07-08 06:38:37] [Rank 0] step:8041/10000 train_time:645099ms step_avg:80.23ms +[2025-07-08 06:38:37] [Rank 0] step:8041/10000 train_time:645099ms step_avg:80.23ms +[2025-07-08 06:38:38] [Rank 0] step:8061/10000 train_time:646586ms step_avg:80.21ms +[2025-07-08 06:38:38] [Rank 0] step:8061/10000 train_time:646586ms step_avg:80.21ms +[2025-07-08 06:38:40] [Rank 0] step:8081/10000 train_time:648075ms step_avg:80.20ms +[2025-07-08 06:38:40] [Rank 0] step:8081/10000 train_time:648075ms step_avg:80.20ms +[2025-07-08 06:38:42] [Rank 0] step:8101/10000 train_time:649612ms step_avg:80.19ms +[2025-07-08 06:38:42] [Rank 0] step:8101/10000 train_time:649612ms step_avg:80.19ms +[2025-07-08 06:38:43] [Rank 0] step:8121/10000 train_time:651688ms step_avg:80.25ms +[2025-07-08 06:38:43] [Rank 0] step:8121/10000 train_time:651688ms step_avg:80.25ms +[2025-07-08 06:38:45] [Rank 0] step:8141/10000 train_time:653177ms step_avg:80.23ms +[2025-07-08 06:38:45] [Rank 0] step:8141/10000 train_time:653177ms step_avg:80.23ms +[2025-07-08 06:38:46] [Rank 0] step:8161/10000 train_time:654666ms step_avg:80.22ms +[2025-07-08 06:38:46] [Rank 0] step:8161/10000 train_time:654666ms step_avg:80.22ms +[2025-07-08 06:38:48] [Rank 0] step:8181/10000 train_time:656153ms step_avg:80.20ms +[2025-07-08 06:38:48] [Rank 0] step:8181/10000 train_time:656153ms step_avg:80.20ms +[2025-07-08 06:38:49] [Rank 0] step:8201/10000 train_time:657880ms step_avg:80.22ms +[2025-07-08 06:38:49] [Rank 0] step:8201/10000 train_time:657880ms step_avg:80.22ms +[2025-07-08 06:38:51] [Rank 0] step:8221/10000 train_time:659370ms step_avg:80.21ms +[2025-07-08 06:38:51] [Rank 0] step:8221/10000 train_time:659370ms step_avg:80.21ms +[2025-07-08 06:38:52] [Rank 0] step:8241/10000 train_time:660861ms step_avg:80.19ms +[2025-07-08 06:38:52] [Rank 0] step:8241/10000 train_time:660861ms step_avg:80.19ms +[2025-07-08 06:38:54] [Rank 0] step:8261/10000 train_time:662352ms step_avg:80.18ms +[2025-07-08 06:38:54] [Rank 0] step:8261/10000 train_time:662352ms step_avg:80.18ms +[2025-07-08 06:38:56] [Rank 0] step:8281/10000 train_time:664096ms step_avg:80.20ms +[2025-07-08 06:38:56] [Rank 0] step:8281/10000 train_time:664096ms step_avg:80.20ms +[2025-07-08 06:38:57] [Rank 0] step:8301/10000 train_time:665572ms step_avg:80.18ms +[2025-07-08 06:38:57] [Rank 0] step:8301/10000 train_time:665572ms step_avg:80.18ms +[2025-07-08 06:38:59] [Rank 0] step:8321/10000 train_time:667064ms step_avg:80.17ms +[2025-07-08 06:38:59] [Rank 0] step:8321/10000 train_time:667064ms step_avg:80.17ms +[2025-07-08 06:39:00] [Rank 0] step:8341/10000 train_time:668715ms step_avg:80.17ms +[2025-07-08 06:39:00] [Rank 0] step:8341/10000 train_time:668715ms step_avg:80.17ms +[2025-07-08 06:39:02] [Rank 0] step:8361/10000 train_time:670207ms step_avg:80.16ms +[2025-07-08 06:39:02] [Rank 0] step:8361/10000 train_time:670207ms step_avg:80.16ms +[2025-07-08 06:39:04] [Rank 0] step:8381/10000 train_time:672354ms step_avg:80.22ms +[2025-07-08 06:39:04] [Rank 0] step:8381/10000 train_time:672354ms step_avg:80.22ms +[2025-07-08 06:39:05] [Rank 0] step:8401/10000 train_time:673843ms step_avg:80.21ms +[2025-07-08 06:39:05] [Rank 0] step:8401/10000 train_time:673843ms step_avg:80.21ms +[2025-07-08 06:39:07] [Rank 0] step:8421/10000 train_time:675338ms step_avg:80.20ms +[2025-07-08 06:39:07] [Rank 0] step:8421/10000 train_time:675338ms step_avg:80.20ms +[2025-07-08 06:39:08] [Rank 0] step:8441/10000 train_time:676831ms step_avg:80.18ms +[2025-07-08 06:39:08] [Rank 0] step:8441/10000 train_time:676831ms step_avg:80.18ms +[2025-07-08 06:39:10] [Rank 0] step:8461/10000 train_time:678579ms step_avg:80.20ms +[2025-07-08 06:39:10] [Rank 0] step:8461/10000 train_time:678579ms step_avg:80.20ms +[2025-07-08 06:39:12] [Rank 0] step:8481/10000 train_time:680054ms step_avg:80.19ms +[2025-07-08 06:39:12] [Rank 0] step:8481/10000 train_time:680054ms step_avg:80.19ms +[2025-07-08 06:39:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:39:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:39:14] [Rank 0] PRINT: step:8500/10000 train_loss:0.8675 val_loss:0.8686 train_time:681548ms step_avg:80.18ms +[2025-07-08 06:39:14] [Rank 0] PRINT: step:8500/10000 train_loss:0.8675 val_loss:0.8686 train_time:681548ms step_avg:80.18ms +[2025-07-08 06:39:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:39:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:39:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:39:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:39:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:39:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:44:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:44:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:44:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:44:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:44:47] [Rank 0] Total Loss: 5.8901 +[2025-07-08 06:44:47] [Rank 0] Total Loss: 5.8901 +[2025-07-08 06:44:47] [Rank 0] Total FTA: 0.9027 +[2025-07-08 06:44:47] [Rank 0] Total FTA: 0.9027 +[2025-07-08 06:44:47] [Rank 0] Group 0 Loss: 6.0979 +[2025-07-08 06:44:47] [Rank 0] Group 0 Loss: 6.0979 +[2025-07-08 06:44:47] [Rank 0] Group 1 Loss: 5.6633 +[2025-07-08 06:44:47] [Rank 0] Group 1 Loss: 5.6633 +[2025-07-08 06:44:47] [Rank 0] Group 2 Loss: 5.7261 +[2025-07-08 06:44:47] [Rank 0] Group 2 Loss: 5.7261 +[2025-07-08 06:44:47] [Rank 0] Group 3 Loss: 5.9283 +[2025-07-08 06:44:47] [Rank 0] Group 3 Loss: 5.9283 +[2025-07-08 06:44:47] [Rank 0] Group 4 Loss: 6.0014 +[2025-07-08 06:44:47] [Rank 0] Group 4 Loss: 6.0014 +[2025-07-08 06:44:47] [Rank 0] Group 5 Loss: 5.8761 +[2025-07-08 06:44:47] [Rank 0] Group 5 Loss: 5.8761 +[2025-07-08 06:44:47] [Rank 0] Group 6 Loss: 5.7811 +[2025-07-08 06:44:47] [Rank 0] Group 6 Loss: 5.7811 +[2025-07-08 06:44:47] [Rank 0] Group 7 Loss: 5.9176 +[2025-07-08 06:44:47] [Rank 0] Group 7 Loss: 5.9176 +[2025-07-08 06:44:47] [Rank 0] Group 8 Loss: 5.9174 +[2025-07-08 06:44:47] [Rank 0] Group 8 Loss: 5.9174 +[2025-07-08 06:44:47] [Rank 0] Group 9 Loss: 5.8639 +[2025-07-08 06:44:47] [Rank 0] Group 9 Loss: 5.8639 +[2025-07-08 06:44:47] [Rank 0] Group 10 Loss: 5.8239 +[2025-07-08 06:44:47] [Rank 0] Group 10 Loss: 5.8239 +[2025-07-08 06:44:47] [Rank 0] Group 11 Loss: 5.8899 +[2025-07-08 06:44:47] [Rank 0] Group 11 Loss: 5.8899 +[2025-07-08 06:44:47] [Rank 0] Group 0 FTA: 0.8349 +[2025-07-08 06:44:47] [Rank 0] Group 0 FTA: 0.8349 +[2025-07-08 06:44:47] [Rank 0] Group 1 FTA: 0.8490 +[2025-07-08 06:44:47] [Rank 0] Group 1 FTA: 0.8490 +[2025-07-08 06:44:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 06:44:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 06:44:47] [Rank 0] Group 3 FTA: 0.9089 +[2025-07-08 06:44:47] [Rank 0] Group 3 FTA: 0.9089 +[2025-07-08 06:44:47] [Rank 0] Group 4 FTA: 0.8828 +[2025-07-08 06:44:47] [Rank 0] Group 4 FTA: 0.8828 +[2025-07-08 06:44:47] [Rank 0] Group 5 FTA: 0.9271 +[2025-07-08 06:44:47] [Rank 0] Group 5 FTA: 0.9271 +[2025-07-08 06:44:47] [Rank 0] Group 6 FTA: 0.9375 +[2025-07-08 06:44:47] [Rank 0] Group 6 FTA: 0.9375 +[2025-07-08 06:44:47] [Rank 0] Group 7 FTA: 0.9167 +[2025-07-08 06:44:47] [Rank 0] Group 7 FTA: 0.9167 +[2025-07-08 06:44:47] [Rank 0] Group 8 FTA: 0.9219 +[2025-07-08 06:44:47] [Rank 0] Group 8 FTA: 0.9219 +[2025-07-08 06:44:47] [Rank 0] Group 9 FTA: 0.8945 +[2025-07-08 06:44:47] [Rank 0] Group 9 FTA: 0.8945 +[2025-07-08 06:44:47] [Rank 0] Group 10 FTA: 0.9160 +[2025-07-08 06:44:47] [Rank 0] Group 10 FTA: 0.9160 +[2025-07-08 06:44:47] [Rank 0] Group 11 FTA: 0.9033 +[2025-07-08 06:44:47] [Rank 0] Group 11 FTA: 0.9033 +[2025-07-08 06:44:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 06:44:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 06:44:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 06:44:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 06:44:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 06:44:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 06:44:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 06:44:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 06:44:49] [Rank 0] step:8501/10000 train_time:681569ms step_avg:80.18ms +[2025-07-08 06:44:49] [Rank 0] step:8501/10000 train_time:681569ms step_avg:80.18ms +[2025-07-08 06:44:50] [Rank 0] step:8521/10000 train_time:683077ms step_avg:80.16ms +[2025-07-08 06:44:50] [Rank 0] step:8521/10000 train_time:683077ms step_avg:80.16ms +[2025-07-08 06:44:52] [Rank 0] step:8541/10000 train_time:684563ms step_avg:80.15ms +[2025-07-08 06:44:52] [Rank 0] step:8541/10000 train_time:684563ms step_avg:80.15ms +[2025-07-08 06:44:54] [Rank 0] step:8561/10000 train_time:686291ms step_avg:80.16ms +[2025-07-08 06:44:54] [Rank 0] step:8561/10000 train_time:686291ms step_avg:80.16ms +[2025-07-08 06:44:55] [Rank 0] step:8581/10000 train_time:687775ms step_avg:80.15ms +[2025-07-08 06:44:55] [Rank 0] step:8581/10000 train_time:687775ms step_avg:80.15ms +[2025-07-08 06:44:57] [Rank 0] step:8601/10000 train_time:689262ms step_avg:80.14ms +[2025-07-08 06:44:57] [Rank 0] step:8601/10000 train_time:689262ms step_avg:80.14ms +[2025-07-08 06:44:58] [Rank 0] step:8621/10000 train_time:690752ms step_avg:80.12ms +[2025-07-08 06:44:58] [Rank 0] step:8621/10000 train_time:690752ms step_avg:80.12ms +[2025-07-08 06:45:00] [Rank 0] step:8641/10000 train_time:692911ms step_avg:80.19ms +[2025-07-08 06:45:00] [Rank 0] step:8641/10000 train_time:692911ms step_avg:80.19ms +[2025-07-08 06:45:02] [Rank 0] step:8661/10000 train_time:694378ms step_avg:80.17ms +[2025-07-08 06:45:02] [Rank 0] step:8661/10000 train_time:694378ms step_avg:80.17ms +[2025-07-08 06:45:03] [Rank 0] step:8681/10000 train_time:695869ms step_avg:80.16ms +[2025-07-08 06:45:03] [Rank 0] step:8681/10000 train_time:695869ms step_avg:80.16ms +[2025-07-08 06:45:05] [Rank 0] step:8701/10000 train_time:697360ms step_avg:80.15ms +[2025-07-08 06:45:05] [Rank 0] step:8701/10000 train_time:697360ms step_avg:80.15ms +[2025-07-08 06:45:06] [Rank 0] step:8721/10000 train_time:698847ms step_avg:80.13ms +[2025-07-08 06:45:06] [Rank 0] step:8721/10000 train_time:698847ms step_avg:80.13ms +[2025-07-08 06:45:08] [Rank 0] step:8741/10000 train_time:700989ms step_avg:80.20ms +[2025-07-08 06:45:08] [Rank 0] step:8741/10000 train_time:700989ms step_avg:80.20ms +[2025-07-08 06:45:10] [Rank 0] step:8761/10000 train_time:702479ms step_avg:80.18ms +[2025-07-08 06:45:10] [Rank 0] step:8761/10000 train_time:702479ms step_avg:80.18ms +[2025-07-08 06:45:11] [Rank 0] step:8781/10000 train_time:703972ms step_avg:80.17ms +[2025-07-08 06:45:11] [Rank 0] step:8781/10000 train_time:703972ms step_avg:80.17ms +[2025-07-08 06:45:13] [Rank 0] step:8801/10000 train_time:705463ms step_avg:80.16ms +[2025-07-08 06:45:13] [Rank 0] step:8801/10000 train_time:705463ms step_avg:80.16ms +[2025-07-08 06:45:15] [Rank 0] step:8821/10000 train_time:706954ms step_avg:80.14ms +[2025-07-08 06:45:15] [Rank 0] step:8821/10000 train_time:706954ms step_avg:80.14ms +[2025-07-08 06:45:16] [Rank 0] step:8841/10000 train_time:708682ms step_avg:80.16ms +[2025-07-08 06:45:16] [Rank 0] step:8841/10000 train_time:708682ms step_avg:80.16ms +[2025-07-08 06:45:18] [Rank 0] step:8861/10000 train_time:710177ms step_avg:80.15ms +[2025-07-08 06:45:18] [Rank 0] step:8861/10000 train_time:710177ms step_avg:80.15ms +[2025-07-08 06:45:19] [Rank 0] step:8881/10000 train_time:711827ms step_avg:80.15ms +[2025-07-08 06:45:19] [Rank 0] step:8881/10000 train_time:711827ms step_avg:80.15ms +[2025-07-08 06:45:21] [Rank 0] step:8901/10000 train_time:713322ms step_avg:80.14ms +[2025-07-08 06:45:21] [Rank 0] step:8901/10000 train_time:713322ms step_avg:80.14ms +[2025-07-08 06:45:22] [Rank 0] step:8921/10000 train_time:715050ms step_avg:80.15ms +[2025-07-08 06:45:22] [Rank 0] step:8921/10000 train_time:715050ms step_avg:80.15ms +[2025-07-08 06:45:24] [Rank 0] step:8941/10000 train_time:716546ms step_avg:80.14ms +[2025-07-08 06:45:24] [Rank 0] step:8941/10000 train_time:716546ms step_avg:80.14ms +[2025-07-08 06:45:25] [Rank 0] step:8961/10000 train_time:718038ms step_avg:80.13ms +[2025-07-08 06:45:25] [Rank 0] step:8961/10000 train_time:718038ms step_avg:80.13ms +[2025-07-08 06:45:27] [Rank 0] step:8981/10000 train_time:719533ms step_avg:80.12ms +[2025-07-08 06:45:27] [Rank 0] step:8981/10000 train_time:719533ms step_avg:80.12ms +[2025-07-08 06:45:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:45:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:45:29] [Rank 0] PRINT: step:9000/10000 train_loss:0.8663 val_loss:0.8680 train_time:721027ms step_avg:80.11ms +[2025-07-08 06:45:29] [Rank 0] PRINT: step:9000/10000 train_loss:0.8663 val_loss:0.8680 train_time:721027ms step_avg:80.11ms +[2025-07-08 06:45:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:45:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:45:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:45:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:45:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:45:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:50:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:50:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:50:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:50:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:50:59] [Rank 0] Total Loss: 5.9369 +[2025-07-08 06:50:59] [Rank 0] Total Loss: 5.9369 +[2025-07-08 06:50:59] [Rank 0] Total FTA: 0.9398 +[2025-07-08 06:50:59] [Rank 0] Total FTA: 0.9398 +[2025-07-08 06:50:59] [Rank 0] Group 0 Loss: 6.0705 +[2025-07-08 06:50:59] [Rank 0] Group 0 Loss: 6.0705 +[2025-07-08 06:50:59] [Rank 0] Group 1 Loss: 5.7261 +[2025-07-08 06:50:59] [Rank 0] Group 1 Loss: 5.7261 +[2025-07-08 06:50:59] [Rank 0] Group 2 Loss: 5.7678 +[2025-07-08 06:50:59] [Rank 0] Group 2 Loss: 5.7678 +[2025-07-08 06:50:59] [Rank 0] Group 3 Loss: 5.9766 +[2025-07-08 06:50:59] [Rank 0] Group 3 Loss: 5.9766 +[2025-07-08 06:50:59] [Rank 0] Group 4 Loss: 5.9904 +[2025-07-08 06:50:59] [Rank 0] Group 4 Loss: 5.9904 +[2025-07-08 06:50:59] [Rank 0] Group 5 Loss: 5.8433 +[2025-07-08 06:50:59] [Rank 0] Group 5 Loss: 5.8433 +[2025-07-08 06:50:59] [Rank 0] Group 6 Loss: 5.8399 +[2025-07-08 06:50:59] [Rank 0] Group 6 Loss: 5.8399 +[2025-07-08 06:50:59] [Rank 0] Group 7 Loss: 5.9891 +[2025-07-08 06:50:59] [Rank 0] Group 7 Loss: 5.9891 +[2025-07-08 06:50:59] [Rank 0] Group 8 Loss: 5.9262 +[2025-07-08 06:50:59] [Rank 0] Group 8 Loss: 5.9262 +[2025-07-08 06:50:59] [Rank 0] Group 9 Loss: 5.8714 +[2025-07-08 06:50:59] [Rank 0] Group 9 Loss: 5.8714 +[2025-07-08 06:50:59] [Rank 0] Group 10 Loss: 5.9682 +[2025-07-08 06:50:59] [Rank 0] Group 10 Loss: 5.9682 +[2025-07-08 06:50:59] [Rank 0] Group 11 Loss: 6.0008 +[2025-07-08 06:50:59] [Rank 0] Group 11 Loss: 6.0008 +[2025-07-08 06:50:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 06:50:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 06:50:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 06:50:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 06:50:59] [Rank 0] Group 2 FTA: 0.9115 +[2025-07-08 06:50:59] [Rank 0] Group 2 FTA: 0.9115 +[2025-07-08 06:50:59] [Rank 0] Group 3 FTA: 0.9036 +[2025-07-08 06:50:59] [Rank 0] Group 3 FTA: 0.9036 +[2025-07-08 06:50:59] [Rank 0] Group 4 FTA: 0.9531 +[2025-07-08 06:50:59] [Rank 0] Group 4 FTA: 0.9531 +[2025-07-08 06:50:59] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-08 06:50:59] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-08 06:50:59] [Rank 0] Group 6 FTA: 0.9219 +[2025-07-08 06:50:59] [Rank 0] Group 6 FTA: 0.9219 +[2025-07-08 06:50:59] [Rank 0] Group 7 FTA: 0.9245 +[2025-07-08 06:50:59] [Rank 0] Group 7 FTA: 0.9245 +[2025-07-08 06:50:59] [Rank 0] Group 8 FTA: 0.9089 +[2025-07-08 06:50:59] [Rank 0] Group 8 FTA: 0.9089 +[2025-07-08 06:50:59] [Rank 0] Group 9 FTA: 0.9102 +[2025-07-08 06:50:59] [Rank 0] Group 9 FTA: 0.9102 +[2025-07-08 06:50:59] [Rank 0] Group 10 FTA: 0.9551 +[2025-07-08 06:50:59] [Rank 0] Group 10 FTA: 0.9551 +[2025-07-08 06:50:59] [Rank 0] Group 11 FTA: 0.9150 +[2025-07-08 06:50:59] [Rank 0] Group 11 FTA: 0.9150 +[2025-07-08 06:50:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 06:50:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 06:51:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 06:51:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 06:51:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 06:51:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 06:51:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 06:51:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 06:51:04] [Rank 0] step:9001/10000 train_time:721776ms step_avg:80.19ms +[2025-07-08 06:51:04] [Rank 0] step:9001/10000 train_time:721776ms step_avg:80.19ms +[2025-07-08 06:51:05] [Rank 0] step:9021/10000 train_time:723259ms step_avg:80.17ms +[2025-07-08 06:51:05] [Rank 0] step:9021/10000 train_time:723259ms step_avg:80.17ms +[2025-07-08 06:51:06] [Rank 0] step:9041/10000 train_time:724744ms step_avg:80.16ms +[2025-07-08 06:51:06] [Rank 0] step:9041/10000 train_time:724744ms step_avg:80.16ms +[2025-07-08 06:51:08] [Rank 0] step:9061/10000 train_time:726233ms step_avg:80.15ms +[2025-07-08 06:51:08] [Rank 0] step:9061/10000 train_time:726233ms step_avg:80.15ms +[2025-07-08 06:51:09] [Rank 0] step:9081/10000 train_time:727720ms step_avg:80.14ms +[2025-07-08 06:51:09] [Rank 0] step:9081/10000 train_time:727720ms step_avg:80.14ms +[2025-07-08 06:51:11] [Rank 0] step:9101/10000 train_time:729445ms step_avg:80.15ms +[2025-07-08 06:51:11] [Rank 0] step:9101/10000 train_time:729445ms step_avg:80.15ms +[2025-07-08 06:51:13] [Rank 0] step:9121/10000 train_time:730934ms step_avg:80.14ms +[2025-07-08 06:51:13] [Rank 0] step:9121/10000 train_time:730934ms step_avg:80.14ms +[2025-07-08 06:51:14] [Rank 0] step:9141/10000 train_time:732420ms step_avg:80.12ms +[2025-07-08 06:51:14] [Rank 0] step:9141/10000 train_time:732420ms step_avg:80.12ms +[2025-07-08 06:51:16] [Rank 0] step:9161/10000 train_time:733909ms step_avg:80.11ms +[2025-07-08 06:51:16] [Rank 0] step:9161/10000 train_time:733909ms step_avg:80.11ms +[2025-07-08 06:51:18] [Rank 0] step:9181/10000 train_time:736076ms step_avg:80.17ms +[2025-07-08 06:51:18] [Rank 0] step:9181/10000 train_time:736076ms step_avg:80.17ms +[2025-07-08 06:51:19] [Rank 0] step:9201/10000 train_time:737546ms step_avg:80.16ms +[2025-07-08 06:51:19] [Rank 0] step:9201/10000 train_time:737546ms step_avg:80.16ms +[2025-07-08 06:51:21] [Rank 0] step:9221/10000 train_time:739035ms step_avg:80.15ms +[2025-07-08 06:51:21] [Rank 0] step:9221/10000 train_time:739035ms step_avg:80.15ms +[2025-07-08 06:51:22] [Rank 0] step:9241/10000 train_time:740524ms step_avg:80.13ms +[2025-07-08 06:51:22] [Rank 0] step:9241/10000 train_time:740524ms step_avg:80.13ms +[2025-07-08 06:51:24] [Rank 0] step:9261/10000 train_time:742018ms step_avg:80.12ms +[2025-07-08 06:51:24] [Rank 0] step:9261/10000 train_time:742018ms step_avg:80.12ms +[2025-07-08 06:51:25] [Rank 0] step:9281/10000 train_time:743744ms step_avg:80.14ms +[2025-07-08 06:51:25] [Rank 0] step:9281/10000 train_time:743744ms step_avg:80.14ms +[2025-07-08 06:51:27] [Rank 0] step:9301/10000 train_time:745238ms step_avg:80.12ms +[2025-07-08 06:51:27] [Rank 0] step:9301/10000 train_time:745238ms step_avg:80.12ms +[2025-07-08 06:51:28] [Rank 0] step:9321/10000 train_time:746727ms step_avg:80.11ms +[2025-07-08 06:51:28] [Rank 0] step:9321/10000 train_time:746727ms step_avg:80.11ms +[2025-07-08 06:51:30] [Rank 0] step:9341/10000 train_time:748220ms step_avg:80.10ms +[2025-07-08 06:51:30] [Rank 0] step:9341/10000 train_time:748220ms step_avg:80.10ms +[2025-07-08 06:51:32] [Rank 0] step:9361/10000 train_time:750381ms step_avg:80.16ms +[2025-07-08 06:51:32] [Rank 0] step:9361/10000 train_time:750381ms step_avg:80.16ms +[2025-07-08 06:51:34] [Rank 0] step:9381/10000 train_time:751856ms step_avg:80.15ms +[2025-07-08 06:51:34] [Rank 0] step:9381/10000 train_time:751856ms step_avg:80.15ms +[2025-07-08 06:51:35] [Rank 0] step:9401/10000 train_time:753347ms step_avg:80.13ms +[2025-07-08 06:51:35] [Rank 0] step:9401/10000 train_time:753347ms step_avg:80.13ms +[2025-07-08 06:51:37] [Rank 0] step:9421/10000 train_time:754840ms step_avg:80.12ms +[2025-07-08 06:51:37] [Rank 0] step:9421/10000 train_time:754840ms step_avg:80.12ms +[2025-07-08 06:51:38] [Rank 0] step:9441/10000 train_time:756419ms step_avg:80.12ms +[2025-07-08 06:51:38] [Rank 0] step:9441/10000 train_time:756419ms step_avg:80.12ms +[2025-07-08 06:51:40] [Rank 0] step:9461/10000 train_time:758147ms step_avg:80.13ms +[2025-07-08 06:51:40] [Rank 0] step:9461/10000 train_time:758147ms step_avg:80.13ms +[2025-07-08 06:51:41] [Rank 0] step:9481/10000 train_time:759644ms step_avg:80.12ms +[2025-07-08 06:51:41] [Rank 0] step:9481/10000 train_time:759644ms step_avg:80.12ms +[2025-07-08 06:51:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:51:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:51:44] [Rank 0] PRINT: step:9500/10000 train_loss:0.8652 val_loss:0.8674 train_time:761137ms step_avg:80.12ms +[2025-07-08 06:51:44] [Rank 0] PRINT: step:9500/10000 train_loss:0.8652 val_loss:0.8674 train_time:761137ms step_avg:80.12ms +[2025-07-08 06:51:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:51:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:51:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:51:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:51:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:51:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:57:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:57:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 06:57:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:57:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 06:57:14] [Rank 0] Total Loss: 5.9673 +[2025-07-08 06:57:14] [Rank 0] Total Loss: 5.9673 +[2025-07-08 06:57:14] [Rank 0] Total FTA: 0.9329 +[2025-07-08 06:57:14] [Rank 0] Total FTA: 0.9329 +[2025-07-08 06:57:14] [Rank 0] Group 0 Loss: 6.2080 +[2025-07-08 06:57:14] [Rank 0] Group 0 Loss: 6.2080 +[2025-07-08 06:57:14] [Rank 0] Group 1 Loss: 5.8129 +[2025-07-08 06:57:14] [Rank 0] Group 1 Loss: 5.8129 +[2025-07-08 06:57:14] [Rank 0] Group 2 Loss: 5.7228 +[2025-07-08 06:57:14] [Rank 0] Group 2 Loss: 5.7228 +[2025-07-08 06:57:14] [Rank 0] Group 3 Loss: 5.9372 +[2025-07-08 06:57:14] [Rank 0] Group 3 Loss: 5.9372 +[2025-07-08 06:57:14] [Rank 0] Group 4 Loss: 5.9789 +[2025-07-08 06:57:14] [Rank 0] Group 4 Loss: 5.9789 +[2025-07-08 06:57:14] [Rank 0] Group 5 Loss: 5.7971 +[2025-07-08 06:57:14] [Rank 0] Group 5 Loss: 5.7971 +[2025-07-08 06:57:15] [Rank 0] Group 6 Loss: 5.8852 +[2025-07-08 06:57:15] [Rank 0] Group 6 Loss: 5.8852 +[2025-07-08 06:57:15] [Rank 0] Group 7 Loss: 6.0445 +[2025-07-08 06:57:15] [Rank 0] Group 7 Loss: 6.0445 +[2025-07-08 06:57:15] [Rank 0] Group 8 Loss: 6.0055 +[2025-07-08 06:57:15] [Rank 0] Group 8 Loss: 6.0055 +[2025-07-08 06:57:15] [Rank 0] Group 9 Loss: 5.9442 +[2025-07-08 06:57:15] [Rank 0] Group 9 Loss: 5.9442 +[2025-07-08 06:57:15] [Rank 0] Group 10 Loss: 5.9633 +[2025-07-08 06:57:15] [Rank 0] Group 10 Loss: 5.9633 +[2025-07-08 06:57:15] [Rank 0] Group 11 Loss: 6.0021 +[2025-07-08 06:57:15] [Rank 0] Group 11 Loss: 6.0021 +[2025-07-08 06:57:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 06:57:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 06:57:15] [Rank 0] Group 1 FTA: 0.8516 +[2025-07-08 06:57:15] [Rank 0] Group 1 FTA: 0.8516 +[2025-07-08 06:57:15] [Rank 0] Group 2 FTA: 0.9219 +[2025-07-08 06:57:15] [Rank 0] Group 2 FTA: 0.9219 +[2025-07-08 06:57:15] [Rank 0] Group 3 FTA: 0.9036 +[2025-07-08 06:57:15] [Rank 0] Group 3 FTA: 0.9036 +[2025-07-08 06:57:15] [Rank 0] Group 4 FTA: 0.9635 +[2025-07-08 06:57:15] [Rank 0] Group 4 FTA: 0.9635 +[2025-07-08 06:57:15] [Rank 0] Group 5 FTA: 0.9271 +[2025-07-08 06:57:15] [Rank 0] Group 5 FTA: 0.9271 +[2025-07-08 06:57:15] [Rank 0] Group 6 FTA: 0.9323 +[2025-07-08 06:57:15] [Rank 0] Group 6 FTA: 0.9323 +[2025-07-08 06:57:15] [Rank 0] Group 7 FTA: 0.9349 +[2025-07-08 06:57:15] [Rank 0] Group 7 FTA: 0.9349 +[2025-07-08 06:57:15] [Rank 0] Group 8 FTA: 0.9219 +[2025-07-08 06:57:15] [Rank 0] Group 8 FTA: 0.9219 +[2025-07-08 06:57:15] [Rank 0] Group 9 FTA: 0.9062 +[2025-07-08 06:57:15] [Rank 0] Group 9 FTA: 0.9062 +[2025-07-08 06:57:15] [Rank 0] Group 10 FTA: 0.9414 +[2025-07-08 06:57:15] [Rank 0] Group 10 FTA: 0.9414 +[2025-07-08 06:57:15] [Rank 0] Group 11 FTA: 0.9248 +[2025-07-08 06:57:15] [Rank 0] Group 11 FTA: 0.9248 +[2025-07-08 06:57:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 06:57:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 06:57:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 06:57:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 06:57:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 06:57:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 06:57:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 06:57:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 06:57:16] [Rank 0] step:9501/10000 train_time:761159ms step_avg:80.11ms +[2025-07-08 06:57:16] [Rank 0] step:9501/10000 train_time:761159ms step_avg:80.11ms +[2025-07-08 06:57:18] [Rank 0] step:9521/10000 train_time:762661ms step_avg:80.10ms +[2025-07-08 06:57:18] [Rank 0] step:9521/10000 train_time:762661ms step_avg:80.10ms +[2025-07-08 06:57:20] [Rank 0] step:9541/10000 train_time:764204ms step_avg:80.10ms +[2025-07-08 06:57:20] [Rank 0] step:9541/10000 train_time:764204ms step_avg:80.10ms +[2025-07-08 06:57:21] [Rank 0] step:9561/10000 train_time:766294ms step_avg:80.15ms +[2025-07-08 06:57:21] [Rank 0] step:9561/10000 train_time:766294ms step_avg:80.15ms +[2025-07-08 06:57:23] [Rank 0] step:9581/10000 train_time:767781ms step_avg:80.14ms +[2025-07-08 06:57:23] [Rank 0] step:9581/10000 train_time:767781ms step_avg:80.14ms +[2025-07-08 06:57:24] [Rank 0] step:9601/10000 train_time:769266ms step_avg:80.12ms +[2025-07-08 06:57:24] [Rank 0] step:9601/10000 train_time:769266ms step_avg:80.12ms +[2025-07-08 06:57:26] [Rank 0] step:9621/10000 train_time:770766ms step_avg:80.11ms +[2025-07-08 06:57:26] [Rank 0] step:9621/10000 train_time:770766ms step_avg:80.11ms +[2025-07-08 06:57:27] [Rank 0] step:9641/10000 train_time:772496ms step_avg:80.13ms +[2025-07-08 06:57:27] [Rank 0] step:9641/10000 train_time:772496ms step_avg:80.13ms +[2025-07-08 06:57:29] [Rank 0] step:9661/10000 train_time:773984ms step_avg:80.11ms +[2025-07-08 06:57:29] [Rank 0] step:9661/10000 train_time:773984ms step_avg:80.11ms +[2025-07-08 06:57:30] [Rank 0] step:9681/10000 train_time:775473ms step_avg:80.10ms +[2025-07-08 06:57:30] [Rank 0] step:9681/10000 train_time:775473ms step_avg:80.10ms +[2025-07-08 06:57:32] [Rank 0] step:9701/10000 train_time:776965ms step_avg:80.09ms +[2025-07-08 06:57:32] [Rank 0] step:9701/10000 train_time:776965ms step_avg:80.09ms +[2025-07-08 06:57:34] [Rank 0] step:9721/10000 train_time:778506ms step_avg:80.09ms +[2025-07-08 06:57:34] [Rank 0] step:9721/10000 train_time:778506ms step_avg:80.09ms +[2025-07-08 06:57:35] [Rank 0] step:9741/10000 train_time:780180ms step_avg:80.09ms +[2025-07-08 06:57:35] [Rank 0] step:9741/10000 train_time:780180ms step_avg:80.09ms +[2025-07-08 06:57:37] [Rank 0] step:9761/10000 train_time:781669ms step_avg:80.08ms +[2025-07-08 06:57:37] [Rank 0] step:9761/10000 train_time:781669ms step_avg:80.08ms +[2025-07-08 06:57:38] [Rank 0] step:9781/10000 train_time:783161ms step_avg:80.07ms +[2025-07-08 06:57:38] [Rank 0] step:9781/10000 train_time:783161ms step_avg:80.07ms +[2025-07-08 06:57:39] [Rank 0] step:9801/10000 train_time:784650ms step_avg:80.06ms +[2025-07-08 06:57:39] [Rank 0] step:9801/10000 train_time:784650ms step_avg:80.06ms +[2025-07-08 06:57:41] [Rank 0] step:9821/10000 train_time:786376ms step_avg:80.07ms +[2025-07-08 06:57:41] [Rank 0] step:9821/10000 train_time:786376ms step_avg:80.07ms +[2025-07-08 06:57:43] [Rank 0] step:9841/10000 train_time:787866ms step_avg:80.06ms +[2025-07-08 06:57:43] [Rank 0] step:9841/10000 train_time:787866ms step_avg:80.06ms +[2025-07-08 06:57:44] [Rank 0] step:9861/10000 train_time:789359ms step_avg:80.05ms +[2025-07-08 06:57:44] [Rank 0] step:9861/10000 train_time:789359ms step_avg:80.05ms +[2025-07-08 06:57:46] [Rank 0] step:9881/10000 train_time:790853ms step_avg:80.04ms +[2025-07-08 06:57:46] [Rank 0] step:9881/10000 train_time:790853ms step_avg:80.04ms +[2025-07-08 06:57:47] [Rank 0] step:9901/10000 train_time:792600ms step_avg:80.05ms +[2025-07-08 06:57:47] [Rank 0] step:9901/10000 train_time:792600ms step_avg:80.05ms +[2025-07-08 06:57:49] [Rank 0] step:9921/10000 train_time:794072ms step_avg:80.04ms +[2025-07-08 06:57:49] [Rank 0] step:9921/10000 train_time:794072ms step_avg:80.04ms +[2025-07-08 06:57:50] [Rank 0] step:9941/10000 train_time:795566ms step_avg:80.03ms +[2025-07-08 06:57:50] [Rank 0] step:9941/10000 train_time:795566ms step_avg:80.03ms +[2025-07-08 06:57:52] [Rank 0] step:9961/10000 train_time:797063ms step_avg:80.02ms +[2025-07-08 06:57:52] [Rank 0] step:9961/10000 train_time:797063ms step_avg:80.02ms +[2025-07-08 06:57:53] [Rank 0] step:9981/10000 train_time:798553ms step_avg:80.01ms +[2025-07-08 06:57:53] [Rank 0] step:9981/10000 train_time:798553ms step_avg:80.01ms +[2025-07-08 06:57:55] [Rank 0] step:10000/10000 train_time:800211ms step_avg:80.02ms +[2025-07-08 06:57:55] [Rank 0] step:10000/10000 train_time:800211ms step_avg:80.02ms +[2025-07-08 06:57:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:57:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 06:57:56] [Rank 0] PRINT: step:10000/10000 train_loss:0.8642 val_loss:0.8671 train_time:800289ms step_avg:80.03ms +[2025-07-08 06:57:56] [Rank 0] PRINT: step:10000/10000 train_loss:0.8642 val_loss:0.8671 train_time:800289ms step_avg:80.03ms +[2025-07-08 06:57:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:57:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 06:57:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:57:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 06:57:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 06:57:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:03:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:03:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:03:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:03:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:03:28] [Rank 0] Total Loss: 6.0099 +[2025-07-08 07:03:28] [Rank 0] Total Loss: 6.0099 +[2025-07-08 07:03:28] [Rank 0] Total FTA: 0.9407 +[2025-07-08 07:03:28] [Rank 0] Total FTA: 0.9407 +[2025-07-08 07:03:28] [Rank 0] Group 0 Loss: 6.2454 +[2025-07-08 07:03:28] [Rank 0] Group 0 Loss: 6.2454 +[2025-07-08 07:03:29] [Rank 0] Group 1 Loss: 5.9125 +[2025-07-08 07:03:29] [Rank 0] Group 1 Loss: 5.9125 +[2025-07-08 07:03:29] [Rank 0] Group 2 Loss: 5.8030 +[2025-07-08 07:03:29] [Rank 0] Group 2 Loss: 5.8030 +[2025-07-08 07:03:29] [Rank 0] Group 3 Loss: 6.0770 +[2025-07-08 07:03:29] [Rank 0] Group 3 Loss: 6.0770 +[2025-07-08 07:03:29] [Rank 0] Group 4 Loss: 5.9945 +[2025-07-08 07:03:29] [Rank 0] Group 4 Loss: 5.9945 +[2025-07-08 07:03:29] [Rank 0] Group 5 Loss: 5.9528 +[2025-07-08 07:03:29] [Rank 0] Group 5 Loss: 5.9528 +[2025-07-08 07:03:29] [Rank 0] Group 6 Loss: 5.9094 +[2025-07-08 07:03:29] [Rank 0] Group 6 Loss: 5.9094 +[2025-07-08 07:03:29] [Rank 0] Group 7 Loss: 6.0299 +[2025-07-08 07:03:29] [Rank 0] Group 7 Loss: 6.0299 +[2025-07-08 07:03:29] [Rank 0] Group 8 Loss: 5.9686 +[2025-07-08 07:03:29] [Rank 0] Group 8 Loss: 5.9686 +[2025-07-08 07:03:29] [Rank 0] Group 9 Loss: 5.9959 +[2025-07-08 07:03:29] [Rank 0] Group 9 Loss: 5.9959 +[2025-07-08 07:03:29] [Rank 0] Group 10 Loss: 5.9745 +[2025-07-08 07:03:29] [Rank 0] Group 10 Loss: 5.9745 +[2025-07-08 07:03:29] [Rank 0] Group 11 Loss: 6.0159 +[2025-07-08 07:03:29] [Rank 0] Group 11 Loss: 6.0159 +[2025-07-08 07:03:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 07:03:29] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 07:03:29] [Rank 0] Group 1 FTA: 0.8281 +[2025-07-08 07:03:29] [Rank 0] Group 1 FTA: 0.8281 +[2025-07-08 07:03:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 07:03:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 07:03:29] [Rank 0] Group 3 FTA: 0.9036 +[2025-07-08 07:03:29] [Rank 0] Group 3 FTA: 0.9036 +[2025-07-08 07:03:29] [Rank 0] Group 4 FTA: 0.9401 +[2025-07-08 07:03:29] [Rank 0] Group 4 FTA: 0.9401 +[2025-07-08 07:03:29] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-08 07:03:29] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-08 07:03:29] [Rank 0] Group 6 FTA: 0.9219 +[2025-07-08 07:03:29] [Rank 0] Group 6 FTA: 0.9219 +[2025-07-08 07:03:29] [Rank 0] Group 7 FTA: 0.9349 +[2025-07-08 07:03:29] [Rank 0] Group 7 FTA: 0.9349 +[2025-07-08 07:03:29] [Rank 0] Group 8 FTA: 0.9427 +[2025-07-08 07:03:29] [Rank 0] Group 8 FTA: 0.9427 +[2025-07-08 07:03:29] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-08 07:03:29] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-08 07:03:29] [Rank 0] Group 10 FTA: 0.9473 +[2025-07-08 07:03:29] [Rank 0] Group 10 FTA: 0.9473 +[2025-07-08 07:03:29] [Rank 0] Group 11 FTA: 0.9355 +[2025-07-08 07:03:29] [Rank 0] Group 11 FTA: 0.9355 +[2025-07-08 07:03:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 07:03:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_loss_curves.png +[2025-07-08 07:03:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 07:03:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/per_class_acc_curves.png +[2025-07-08 07:03:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 07:03:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_loss_curve.png +[2025-07-08 07:03:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 07:03:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_46/total_acc_curve.png +[2025-07-08 07:03:30] [Rank 0] step:10001/10000 train_time:800312ms step_avg:80.02ms +[2025-07-08 07:03:30] [Rank 0] step:10001/10000 train_time:800312ms step_avg:80.02ms +[2025-07-08 07:03:30] [Rank 0] PRINT: --- Training Finished: Tue Jul 8 07:03:30 2025 --- +[2025-07-08 07:03:30] [Rank 0] PRINT: --- Training Finished: Tue Jul 8 07:03:30 2025 --- +[2025-07-08 07:03:30] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9956 MiB +[2025-07-08 07:03:30] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9956 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/config.json b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..38baba4627f1249f366b06e9f37f81d36ef2540a --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "173532cc-6960-452f-8c2f-6e5b16f01481", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..c36d752af76754c508b1154704b09cc340b08110 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a5a0876103675c0b1255113858f3ca3ddc1ce24837cfb12d75f512ea6476ab9 +size 420917 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..0176b8a98ff91a2ec9c0b475ee6e5e31be06b822 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f650b6e79566260abd1edc1a320b35224a247e206147496028c058a9be8b1493 +size 381727 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..d6d7a3586cb3d3f1a563008dc77c61564d410837 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9bc43f0645f7d156a75ff6cf2d08bf71914977ae8ebd969a27e95f4bc0e2eb14 +size 109470 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..d3a97bf0bd3a8144c9764f984e53e96c5ae862cf --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ecfc4ba9d23bf56517379bc9af0fefe919639ba517b03115cb0f2abdd51ca90 +size 113628 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/training_log_173532cc-6960-452f-8c2f-6e5b16f01481.txt b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/training_log_173532cc-6960-452f-8c2f-6e5b16f01481.txt new file mode 100644 index 0000000000000000000000000000000000000000..01242e43db9fd81754abe99356c6dbaaadfa67ab --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/training_log_173532cc-6960-452f-8c2f-6e5b16f01481.txt @@ -0,0 +1,5132 @@ +[2025-07-06 21:56:14] [Rank 0] PRINT: --- Script Start: Sun Jul 6 21:56:14 2025 --- +[2025-07-06 21:56:14] [Rank 0] PRINT: --- Script Start: Sun Jul 6 21:56:14 2025 --- +[2025-07-06 21:56:14] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-06 21:56:14] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-06 21:56:14] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 21:56:14] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 21:56:14] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-06 21:56:14] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-06 21:56:14] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48 +[2025-07-06 21:56:14] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48 +[2025-07-06 21:56:14] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 21:56:14] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 21:56:15] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 21:56:15] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 21:56:15] [Rank 0] PRINT: Constructing model... +[2025-07-06 21:56:15] [Rank 0] PRINT: Constructing model... +[2025-07-06 21:56:17] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 21:56:17] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 21:56:17] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 21:56:17] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 21:56:17] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 21:56:17] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 21:56:18] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 21:56:18] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 21:56:18] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 21:56:18] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 21:56:18] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 21:56:18] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 21:56:18] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 21:56:18] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 21:56:18] [Rank 0] PRINT: Model returns: +[2025-07-06 21:56:18] [Rank 0] PRINT: Model returns: +[2025-07-06 21:56:18] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 21:56:18] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 21:56:18] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 21:56:18] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 21:56:18] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 21:56:18] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 21:56:18] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 21:56:18] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 21:56:18] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 21:56:18] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 21:56:18] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 21:56:18] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 21:56:18] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 21:56:18] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 21:56:18] [Rank 0] PRINT: Starting warmup... +[2025-07-06 21:56:18] [Rank 0] PRINT: Starting warmup... +[2025-07-06 21:57:31] [Rank 0] PRINT: Warmup complete. +[2025-07-06 21:57:31] [Rank 0] PRINT: Warmup complete. +[2025-07-06 21:57:31] [Rank 0] PRINT: Starting training... +[2025-07-06 21:57:31] [Rank 0] PRINT: Starting training... +[2025-07-06 21:57:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:57:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:57:39] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 21:57:39] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 21:57:41] [Rank 0] step:21/10000 train_time:1556ms step_avg:74.08ms +[2025-07-06 21:57:41] [Rank 0] step:21/10000 train_time:1556ms step_avg:74.08ms +[2025-07-06 21:57:42] [Rank 0] step:41/10000 train_time:3005ms step_avg:73.30ms +[2025-07-06 21:57:42] [Rank 0] step:41/10000 train_time:3005ms step_avg:73.30ms +[2025-07-06 21:57:44] [Rank 0] step:61/10000 train_time:4459ms step_avg:73.09ms +[2025-07-06 21:57:44] [Rank 0] step:61/10000 train_time:4459ms step_avg:73.09ms +[2025-07-06 21:57:45] [Rank 0] step:81/10000 train_time:5916ms step_avg:73.04ms +[2025-07-06 21:57:45] [Rank 0] step:81/10000 train_time:5916ms step_avg:73.04ms +[2025-07-06 21:57:47] [Rank 0] step:101/10000 train_time:8031ms step_avg:79.52ms +[2025-07-06 21:57:47] [Rank 0] step:101/10000 train_time:8031ms step_avg:79.52ms +[2025-07-06 21:57:49] [Rank 0] step:121/10000 train_time:9488ms step_avg:78.41ms +[2025-07-06 21:57:49] [Rank 0] step:121/10000 train_time:9488ms step_avg:78.41ms +[2025-07-06 21:57:50] [Rank 0] step:141/10000 train_time:10947ms step_avg:77.64ms +[2025-07-06 21:57:50] [Rank 0] step:141/10000 train_time:10947ms step_avg:77.64ms +[2025-07-06 21:57:52] [Rank 0] step:161/10000 train_time:12407ms step_avg:77.06ms +[2025-07-06 21:57:52] [Rank 0] step:161/10000 train_time:12407ms step_avg:77.06ms +[2025-07-06 21:57:54] [Rank 0] step:181/10000 train_time:13869ms step_avg:76.63ms +[2025-07-06 21:57:54] [Rank 0] step:181/10000 train_time:13869ms step_avg:76.63ms +[2025-07-06 21:57:55] [Rank 0] step:201/10000 train_time:15998ms step_avg:79.59ms +[2025-07-06 21:57:55] [Rank 0] step:201/10000 train_time:15998ms step_avg:79.59ms +[2025-07-06 21:57:57] [Rank 0] step:221/10000 train_time:17465ms step_avg:79.03ms +[2025-07-06 21:57:57] [Rank 0] step:221/10000 train_time:17465ms step_avg:79.03ms +[2025-07-06 21:57:58] [Rank 0] step:241/10000 train_time:18933ms step_avg:78.56ms +[2025-07-06 21:57:58] [Rank 0] step:241/10000 train_time:18933ms step_avg:78.56ms +[2025-07-06 21:58:00] [Rank 0] step:261/10000 train_time:20398ms step_avg:78.15ms +[2025-07-06 21:58:00] [Rank 0] step:261/10000 train_time:20398ms step_avg:78.15ms +[2025-07-06 21:58:02] [Rank 0] step:281/10000 train_time:22667ms step_avg:80.66ms +[2025-07-06 21:58:02] [Rank 0] step:281/10000 train_time:22667ms step_avg:80.66ms +[2025-07-06 21:58:03] [Rank 0] step:301/10000 train_time:24179ms step_avg:80.33ms +[2025-07-06 21:58:03] [Rank 0] step:301/10000 train_time:24179ms step_avg:80.33ms +[2025-07-06 21:58:05] [Rank 0] step:321/10000 train_time:25804ms step_avg:80.39ms +[2025-07-06 21:58:05] [Rank 0] step:321/10000 train_time:25804ms step_avg:80.39ms +[2025-07-06 21:58:06] [Rank 0] step:341/10000 train_time:27273ms step_avg:79.98ms +[2025-07-06 21:58:06] [Rank 0] step:341/10000 train_time:27273ms step_avg:79.98ms +[2025-07-06 21:58:09] [Rank 0] step:361/10000 train_time:28744ms step_avg:79.62ms +[2025-07-06 21:58:09] [Rank 0] step:361/10000 train_time:28744ms step_avg:79.62ms +[2025-07-06 21:58:10] [Rank 0] step:381/10000 train_time:30882ms step_avg:81.05ms +[2025-07-06 21:58:10] [Rank 0] step:381/10000 train_time:30882ms step_avg:81.05ms +[2025-07-06 21:58:12] [Rank 0] step:401/10000 train_time:32350ms step_avg:80.67ms +[2025-07-06 21:58:12] [Rank 0] step:401/10000 train_time:32350ms step_avg:80.67ms +[2025-07-06 21:58:13] [Rank 0] step:421/10000 train_time:33819ms step_avg:80.33ms +[2025-07-06 21:58:13] [Rank 0] step:421/10000 train_time:33819ms step_avg:80.33ms +[2025-07-06 21:58:14] [Rank 0] step:441/10000 train_time:35288ms step_avg:80.02ms +[2025-07-06 21:58:14] [Rank 0] step:441/10000 train_time:35288ms step_avg:80.02ms +[2025-07-06 21:58:17] [Rank 0] step:461/10000 train_time:37409ms step_avg:81.15ms +[2025-07-06 21:58:17] [Rank 0] step:461/10000 train_time:37409ms step_avg:81.15ms +[2025-07-06 21:58:18] [Rank 0] step:481/10000 train_time:38878ms step_avg:80.83ms +[2025-07-06 21:58:18] [Rank 0] step:481/10000 train_time:38878ms step_avg:80.83ms +[2025-07-06 21:58:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:58:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:58:20] [Rank 0] PRINT: step:500/10000 train_loss:4.9443 val_loss:2.0542 train_time:40345ms step_avg:80.69ms +[2025-07-06 21:58:20] [Rank 0] PRINT: step:500/10000 train_loss:4.9443 val_loss:2.0542 train_time:40345ms step_avg:80.69ms +[2025-07-06 21:58:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:58:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:58:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:58:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:58:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:58:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:03:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:03:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:03:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:03:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:03:40] [Rank 0] Total Loss: 4.1084 +[2025-07-06 22:03:40] [Rank 0] Total Loss: 4.1084 +[2025-07-06 22:03:40] [Rank 0] Total FTA: 0.0795 +[2025-07-06 22:03:40] [Rank 0] Total FTA: 0.0795 +[2025-07-06 22:03:40] [Rank 0] Group 0 Loss: 4.3623 +[2025-07-06 22:03:40] [Rank 0] Group 0 Loss: 4.3623 +[2025-07-06 22:03:40] [Rank 0] Group 1 Loss: 4.0631 +[2025-07-06 22:03:40] [Rank 0] Group 1 Loss: 4.0631 +[2025-07-06 22:03:40] [Rank 0] Group 2 Loss: 4.0245 +[2025-07-06 22:03:40] [Rank 0] Group 2 Loss: 4.0245 +[2025-07-06 22:03:40] [Rank 0] Group 3 Loss: 4.0974 +[2025-07-06 22:03:40] [Rank 0] Group 3 Loss: 4.0974 +[2025-07-06 22:03:40] [Rank 0] Group 4 Loss: 4.0388 +[2025-07-06 22:03:40] [Rank 0] Group 4 Loss: 4.0388 +[2025-07-06 22:03:40] [Rank 0] Group 5 Loss: 4.0584 +[2025-07-06 22:03:40] [Rank 0] Group 5 Loss: 4.0584 +[2025-07-06 22:03:40] [Rank 0] Group 6 Loss: 4.0163 +[2025-07-06 22:03:40] [Rank 0] Group 6 Loss: 4.0163 +[2025-07-06 22:03:40] [Rank 0] Group 7 Loss: 4.0824 +[2025-07-06 22:03:40] [Rank 0] Group 7 Loss: 4.0824 +[2025-07-06 22:03:40] [Rank 0] Group 8 Loss: 4.0807 +[2025-07-06 22:03:40] [Rank 0] Group 8 Loss: 4.0807 +[2025-07-06 22:03:40] [Rank 0] Group 9 Loss: 4.1054 +[2025-07-06 22:03:40] [Rank 0] Group 9 Loss: 4.1054 +[2025-07-06 22:03:40] [Rank 0] Group 10 Loss: 4.0702 +[2025-07-06 22:03:40] [Rank 0] Group 10 Loss: 4.0702 +[2025-07-06 22:03:40] [Rank 0] Group 11 Loss: 4.0898 +[2025-07-06 22:03:40] [Rank 0] Group 11 Loss: 4.0898 +[2025-07-06 22:03:40] [Rank 0] Group 0 FTA: 0.1404 +[2025-07-06 22:03:40] [Rank 0] Group 0 FTA: 0.1404 +[2025-07-06 22:03:40] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 22:03:40] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 22:03:40] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-06 22:03:40] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-06 22:03:40] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-06 22:03:40] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-06 22:03:40] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-06 22:03:40] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-06 22:03:40] [Rank 0] Group 5 FTA: 0.0677 +[2025-07-06 22:03:40] [Rank 0] Group 5 FTA: 0.0677 +[2025-07-06 22:03:40] [Rank 0] Group 6 FTA: 0.0677 +[2025-07-06 22:03:40] [Rank 0] Group 6 FTA: 0.0677 +[2025-07-06 22:03:40] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-06 22:03:40] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-06 22:03:40] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-06 22:03:40] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-06 22:03:40] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-06 22:03:40] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-06 22:03:40] [Rank 0] Group 10 FTA: 0.0977 +[2025-07-06 22:03:40] [Rank 0] Group 10 FTA: 0.0977 +[2025-07-06 22:03:40] [Rank 0] Group 11 FTA: 0.0830 +[2025-07-06 22:03:40] [Rank 0] Group 11 FTA: 0.0830 +[2025-07-06 22:03:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 22:03:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 22:03:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 22:03:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 22:03:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 22:03:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 22:03:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 22:03:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 22:03:42] [Rank 0] step:501/10000 train_time:40366ms step_avg:80.57ms +[2025-07-06 22:03:42] [Rank 0] step:501/10000 train_time:40366ms step_avg:80.57ms +[2025-07-06 22:03:43] [Rank 0] step:521/10000 train_time:41822ms step_avg:80.27ms +[2025-07-06 22:03:43] [Rank 0] step:521/10000 train_time:41822ms step_avg:80.27ms +[2025-07-06 22:03:45] [Rank 0] step:541/10000 train_time:43541ms step_avg:80.48ms +[2025-07-06 22:03:45] [Rank 0] step:541/10000 train_time:43541ms step_avg:80.48ms +[2025-07-06 22:03:47] [Rank 0] step:561/10000 train_time:45386ms step_avg:80.90ms +[2025-07-06 22:03:47] [Rank 0] step:561/10000 train_time:45386ms step_avg:80.90ms +[2025-07-06 22:03:48] [Rank 0] step:581/10000 train_time:46846ms step_avg:80.63ms +[2025-07-06 22:03:48] [Rank 0] step:581/10000 train_time:46846ms step_avg:80.63ms +[2025-07-06 22:03:50] [Rank 0] step:601/10000 train_time:48304ms step_avg:80.37ms +[2025-07-06 22:03:50] [Rank 0] step:601/10000 train_time:48304ms step_avg:80.37ms +[2025-07-06 22:03:51] [Rank 0] step:621/10000 train_time:49765ms step_avg:80.14ms +[2025-07-06 22:03:51] [Rank 0] step:621/10000 train_time:49765ms step_avg:80.14ms +[2025-07-06 22:03:53] [Rank 0] step:641/10000 train_time:51890ms step_avg:80.95ms +[2025-07-06 22:03:53] [Rank 0] step:641/10000 train_time:51890ms step_avg:80.95ms +[2025-07-06 22:03:55] [Rank 0] step:661/10000 train_time:53352ms step_avg:80.71ms +[2025-07-06 22:03:55] [Rank 0] step:661/10000 train_time:53352ms step_avg:80.71ms +[2025-07-06 22:03:56] [Rank 0] step:681/10000 train_time:54813ms step_avg:80.49ms +[2025-07-06 22:03:56] [Rank 0] step:681/10000 train_time:54813ms step_avg:80.49ms +[2025-07-06 22:03:58] [Rank 0] step:701/10000 train_time:56275ms step_avg:80.28ms +[2025-07-06 22:03:58] [Rank 0] step:701/10000 train_time:56275ms step_avg:80.28ms +[2025-07-06 22:04:00] [Rank 0] step:721/10000 train_time:57994ms step_avg:80.44ms +[2025-07-06 22:04:00] [Rank 0] step:721/10000 train_time:57994ms step_avg:80.44ms +[2025-07-06 22:04:01] [Rank 0] step:741/10000 train_time:59859ms step_avg:80.78ms +[2025-07-06 22:04:01] [Rank 0] step:741/10000 train_time:59859ms step_avg:80.78ms +[2025-07-06 22:04:03] [Rank 0] step:761/10000 train_time:61328ms step_avg:80.59ms +[2025-07-06 22:04:03] [Rank 0] step:761/10000 train_time:61328ms step_avg:80.59ms +[2025-07-06 22:04:04] [Rank 0] step:781/10000 train_time:62798ms step_avg:80.41ms +[2025-07-06 22:04:04] [Rank 0] step:781/10000 train_time:62798ms step_avg:80.41ms +[2025-07-06 22:04:06] [Rank 0] step:801/10000 train_time:64272ms step_avg:80.24ms +[2025-07-06 22:04:06] [Rank 0] step:801/10000 train_time:64272ms step_avg:80.24ms +[2025-07-06 22:04:07] [Rank 0] step:821/10000 train_time:65984ms step_avg:80.37ms +[2025-07-06 22:04:07] [Rank 0] step:821/10000 train_time:65984ms step_avg:80.37ms +[2025-07-06 22:04:09] [Rank 0] step:841/10000 train_time:67458ms step_avg:80.21ms +[2025-07-06 22:04:09] [Rank 0] step:841/10000 train_time:67458ms step_avg:80.21ms +[2025-07-06 22:04:10] [Rank 0] step:861/10000 train_time:68930ms step_avg:80.06ms +[2025-07-06 22:04:10] [Rank 0] step:861/10000 train_time:68930ms step_avg:80.06ms +[2025-07-06 22:04:12] [Rank 0] step:881/10000 train_time:70405ms step_avg:79.91ms +[2025-07-06 22:04:12] [Rank 0] step:881/10000 train_time:70405ms step_avg:79.91ms +[2025-07-06 22:04:14] [Rank 0] step:901/10000 train_time:72137ms step_avg:80.06ms +[2025-07-06 22:04:14] [Rank 0] step:901/10000 train_time:72137ms step_avg:80.06ms +[2025-07-06 22:04:15] [Rank 0] step:921/10000 train_time:74013ms step_avg:80.36ms +[2025-07-06 22:04:15] [Rank 0] step:921/10000 train_time:74013ms step_avg:80.36ms +[2025-07-06 22:04:17] [Rank 0] step:941/10000 train_time:75489ms step_avg:80.22ms +[2025-07-06 22:04:17] [Rank 0] step:941/10000 train_time:75489ms step_avg:80.22ms +[2025-07-06 22:04:18] [Rank 0] step:961/10000 train_time:76967ms step_avg:80.09ms +[2025-07-06 22:04:18] [Rank 0] step:961/10000 train_time:76967ms step_avg:80.09ms +[2025-07-06 22:04:20] [Rank 0] step:981/10000 train_time:78441ms step_avg:79.96ms +[2025-07-06 22:04:20] [Rank 0] step:981/10000 train_time:78441ms step_avg:79.96ms +[2025-07-06 22:04:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:04:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:04:22] [Rank 0] PRINT: step:1000/10000 train_loss:1.7030 val_loss:1.5420 train_time:80200ms step_avg:80.20ms +[2025-07-06 22:04:22] [Rank 0] PRINT: step:1000/10000 train_loss:1.7030 val_loss:1.5420 train_time:80200ms step_avg:80.20ms +[2025-07-06 22:04:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:04:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:04:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:04:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:04:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:04:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:09:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:09:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:09:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:09:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:09:44] [Rank 0] Total Loss: 4.3378 +[2025-07-06 22:09:44] [Rank 0] Total Loss: 4.3378 +[2025-07-06 22:09:44] [Rank 0] Total FTA: 0.1134 +[2025-07-06 22:09:44] [Rank 0] Total FTA: 0.1134 +[2025-07-06 22:09:44] [Rank 0] Group 0 Loss: 4.5633 +[2025-07-06 22:09:44] [Rank 0] Group 0 Loss: 4.5633 +[2025-07-06 22:09:44] [Rank 0] Group 1 Loss: 4.3148 +[2025-07-06 22:09:44] [Rank 0] Group 1 Loss: 4.3148 +[2025-07-06 22:09:44] [Rank 0] Group 2 Loss: 4.1403 +[2025-07-06 22:09:44] [Rank 0] Group 2 Loss: 4.1403 +[2025-07-06 22:09:44] [Rank 0] Group 3 Loss: 4.3294 +[2025-07-06 22:09:44] [Rank 0] Group 3 Loss: 4.3294 +[2025-07-06 22:09:44] [Rank 0] Group 4 Loss: 4.3928 +[2025-07-06 22:09:44] [Rank 0] Group 4 Loss: 4.3928 +[2025-07-06 22:09:44] [Rank 0] Group 5 Loss: 4.1933 +[2025-07-06 22:09:44] [Rank 0] Group 5 Loss: 4.1933 +[2025-07-06 22:09:44] [Rank 0] Group 6 Loss: 4.2489 +[2025-07-06 22:09:44] [Rank 0] Group 6 Loss: 4.2489 +[2025-07-06 22:09:44] [Rank 0] Group 7 Loss: 4.3045 +[2025-07-06 22:09:44] [Rank 0] Group 7 Loss: 4.3045 +[2025-07-06 22:09:44] [Rank 0] Group 8 Loss: 4.3133 +[2025-07-06 22:09:44] [Rank 0] Group 8 Loss: 4.3133 +[2025-07-06 22:09:44] [Rank 0] Group 9 Loss: 4.3175 +[2025-07-06 22:09:44] [Rank 0] Group 9 Loss: 4.3175 +[2025-07-06 22:09:44] [Rank 0] Group 10 Loss: 4.3506 +[2025-07-06 22:09:44] [Rank 0] Group 10 Loss: 4.3506 +[2025-07-06 22:09:44] [Rank 0] Group 11 Loss: 4.3417 +[2025-07-06 22:09:44] [Rank 0] Group 11 Loss: 4.3417 +[2025-07-06 22:09:44] [Rank 0] Group 0 FTA: 0.1534 +[2025-07-06 22:09:44] [Rank 0] Group 0 FTA: 0.1534 +[2025-07-06 22:09:44] [Rank 0] Group 1 FTA: 0.1380 +[2025-07-06 22:09:44] [Rank 0] Group 1 FTA: 0.1380 +[2025-07-06 22:09:44] [Rank 0] Group 2 FTA: 0.2448 +[2025-07-06 22:09:44] [Rank 0] Group 2 FTA: 0.2448 +[2025-07-06 22:09:44] [Rank 0] Group 3 FTA: 0.0625 +[2025-07-06 22:09:44] [Rank 0] Group 3 FTA: 0.0625 +[2025-07-06 22:09:44] [Rank 0] Group 4 FTA: 0.0495 +[2025-07-06 22:09:44] [Rank 0] Group 4 FTA: 0.0495 +[2025-07-06 22:09:44] [Rank 0] Group 5 FTA: 0.0677 +[2025-07-06 22:09:44] [Rank 0] Group 5 FTA: 0.0677 +[2025-07-06 22:09:44] [Rank 0] Group 6 FTA: 0.1198 +[2025-07-06 22:09:44] [Rank 0] Group 6 FTA: 0.1198 +[2025-07-06 22:09:44] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-06 22:09:44] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-06 22:09:44] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-06 22:09:44] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-06 22:09:44] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-06 22:09:44] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-06 22:09:44] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-06 22:09:44] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-06 22:09:44] [Rank 0] Group 11 FTA: 0.0977 +[2025-07-06 22:09:44] [Rank 0] Group 11 FTA: 0.0977 +[2025-07-06 22:09:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 22:09:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 22:09:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 22:09:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 22:09:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 22:09:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 22:09:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 22:09:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 22:09:46] [Rank 0] step:1001/10000 train_time:80221ms step_avg:80.14ms +[2025-07-06 22:09:46] [Rank 0] step:1001/10000 train_time:80221ms step_avg:80.14ms +[2025-07-06 22:09:47] [Rank 0] step:1021/10000 train_time:81684ms step_avg:80.00ms +[2025-07-06 22:09:47] [Rank 0] step:1021/10000 train_time:81684ms step_avg:80.00ms +[2025-07-06 22:09:49] [Rank 0] step:1041/10000 train_time:83153ms step_avg:79.88ms +[2025-07-06 22:09:49] [Rank 0] step:1041/10000 train_time:83153ms step_avg:79.88ms +[2025-07-06 22:09:50] [Rank 0] step:1061/10000 train_time:84618ms step_avg:79.75ms +[2025-07-06 22:09:50] [Rank 0] step:1061/10000 train_time:84618ms step_avg:79.75ms +[2025-07-06 22:09:52] [Rank 0] step:1081/10000 train_time:86346ms step_avg:79.88ms +[2025-07-06 22:09:52] [Rank 0] step:1081/10000 train_time:86346ms step_avg:79.88ms +[2025-07-06 22:09:54] [Rank 0] step:1101/10000 train_time:88224ms step_avg:80.13ms +[2025-07-06 22:09:54] [Rank 0] step:1101/10000 train_time:88224ms step_avg:80.13ms +[2025-07-06 22:09:55] [Rank 0] step:1121/10000 train_time:89693ms step_avg:80.01ms +[2025-07-06 22:09:55] [Rank 0] step:1121/10000 train_time:89693ms step_avg:80.01ms +[2025-07-06 22:09:57] [Rank 0] step:1141/10000 train_time:91164ms step_avg:79.90ms +[2025-07-06 22:09:57] [Rank 0] step:1141/10000 train_time:91164ms step_avg:79.90ms +[2025-07-06 22:09:58] [Rank 0] step:1161/10000 train_time:92633ms step_avg:79.79ms +[2025-07-06 22:09:58] [Rank 0] step:1161/10000 train_time:92633ms step_avg:79.79ms +[2025-07-06 22:10:00] [Rank 0] step:1181/10000 train_time:94750ms step_avg:80.23ms +[2025-07-06 22:10:00] [Rank 0] step:1181/10000 train_time:94750ms step_avg:80.23ms +[2025-07-06 22:10:02] [Rank 0] step:1201/10000 train_time:96222ms step_avg:80.12ms +[2025-07-06 22:10:02] [Rank 0] step:1201/10000 train_time:96222ms step_avg:80.12ms +[2025-07-06 22:10:03] [Rank 0] step:1221/10000 train_time:97693ms step_avg:80.01ms +[2025-07-06 22:10:03] [Rank 0] step:1221/10000 train_time:97693ms step_avg:80.01ms +[2025-07-06 22:10:05] [Rank 0] step:1241/10000 train_time:99165ms step_avg:79.91ms +[2025-07-06 22:10:05] [Rank 0] step:1241/10000 train_time:99165ms step_avg:79.91ms +[2025-07-06 22:10:07] [Rank 0] step:1261/10000 train_time:100636ms step_avg:79.81ms +[2025-07-06 22:10:07] [Rank 0] step:1261/10000 train_time:100636ms step_avg:79.81ms +[2025-07-06 22:10:08] [Rank 0] step:1281/10000 train_time:102753ms step_avg:80.21ms +[2025-07-06 22:10:08] [Rank 0] step:1281/10000 train_time:102753ms step_avg:80.21ms +[2025-07-06 22:10:10] [Rank 0] step:1301/10000 train_time:104221ms step_avg:80.11ms +[2025-07-06 22:10:10] [Rank 0] step:1301/10000 train_time:104221ms step_avg:80.11ms +[2025-07-06 22:10:11] [Rank 0] step:1321/10000 train_time:105694ms step_avg:80.01ms +[2025-07-06 22:10:11] [Rank 0] step:1321/10000 train_time:105694ms step_avg:80.01ms +[2025-07-06 22:10:13] [Rank 0] step:1341/10000 train_time:107166ms step_avg:79.92ms +[2025-07-06 22:10:13] [Rank 0] step:1341/10000 train_time:107166ms step_avg:79.92ms +[2025-07-06 22:10:15] [Rank 0] step:1361/10000 train_time:109280ms step_avg:80.29ms +[2025-07-06 22:10:15] [Rank 0] step:1361/10000 train_time:109280ms step_avg:80.29ms +[2025-07-06 22:10:16] [Rank 0] step:1381/10000 train_time:110755ms step_avg:80.20ms +[2025-07-06 22:10:16] [Rank 0] step:1381/10000 train_time:110755ms step_avg:80.20ms +[2025-07-06 22:10:18] [Rank 0] step:1401/10000 train_time:112227ms step_avg:80.10ms +[2025-07-06 22:10:18] [Rank 0] step:1401/10000 train_time:112227ms step_avg:80.10ms +[2025-07-06 22:10:19] [Rank 0] step:1421/10000 train_time:113700ms step_avg:80.01ms +[2025-07-06 22:10:19] [Rank 0] step:1421/10000 train_time:113700ms step_avg:80.01ms +[2025-07-06 22:10:21] [Rank 0] step:1441/10000 train_time:115429ms step_avg:80.10ms +[2025-07-06 22:10:21] [Rank 0] step:1441/10000 train_time:115429ms step_avg:80.10ms +[2025-07-06 22:10:23] [Rank 0] step:1461/10000 train_time:117317ms step_avg:80.30ms +[2025-07-06 22:10:23] [Rank 0] step:1461/10000 train_time:117317ms step_avg:80.30ms +[2025-07-06 22:10:24] [Rank 0] step:1481/10000 train_time:118786ms step_avg:80.21ms +[2025-07-06 22:10:24] [Rank 0] step:1481/10000 train_time:118786ms step_avg:80.21ms +[2025-07-06 22:10:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:10:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:10:27] [Rank 0] PRINT: step:1500/10000 train_loss:1.4356 val_loss:1.3429 train_time:120259ms step_avg:80.17ms +[2025-07-06 22:10:27] [Rank 0] PRINT: step:1500/10000 train_loss:1.4356 val_loss:1.3429 train_time:120259ms step_avg:80.17ms +[2025-07-06 22:10:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:10:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:10:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:10:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:10:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:10:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:15:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:15:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:15:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:15:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:15:46] [Rank 0] Total Loss: 4.5803 +[2025-07-06 22:15:46] [Rank 0] Total Loss: 4.5803 +[2025-07-06 22:15:46] [Rank 0] Total FTA: 0.1908 +[2025-07-06 22:15:46] [Rank 0] Total FTA: 0.1908 +[2025-07-06 22:15:46] [Rank 0] Group 0 Loss: 4.7946 +[2025-07-06 22:15:46] [Rank 0] Group 0 Loss: 4.7946 +[2025-07-06 22:15:46] [Rank 0] Group 1 Loss: 4.3715 +[2025-07-06 22:15:46] [Rank 0] Group 1 Loss: 4.3715 +[2025-07-06 22:15:46] [Rank 0] Group 2 Loss: 4.3192 +[2025-07-06 22:15:46] [Rank 0] Group 2 Loss: 4.3192 +[2025-07-06 22:15:46] [Rank 0] Group 3 Loss: 4.6189 +[2025-07-06 22:15:46] [Rank 0] Group 3 Loss: 4.6189 +[2025-07-06 22:15:46] [Rank 0] Group 4 Loss: 4.6479 +[2025-07-06 22:15:46] [Rank 0] Group 4 Loss: 4.6479 +[2025-07-06 22:15:46] [Rank 0] Group 5 Loss: 4.4917 +[2025-07-06 22:15:46] [Rank 0] Group 5 Loss: 4.4917 +[2025-07-06 22:15:46] [Rank 0] Group 6 Loss: 4.5047 +[2025-07-06 22:15:46] [Rank 0] Group 6 Loss: 4.5047 +[2025-07-06 22:15:46] [Rank 0] Group 7 Loss: 4.6246 +[2025-07-06 22:15:46] [Rank 0] Group 7 Loss: 4.6246 +[2025-07-06 22:15:46] [Rank 0] Group 8 Loss: 4.6125 +[2025-07-06 22:15:46] [Rank 0] Group 8 Loss: 4.6125 +[2025-07-06 22:15:46] [Rank 0] Group 9 Loss: 4.5904 +[2025-07-06 22:15:46] [Rank 0] Group 9 Loss: 4.5904 +[2025-07-06 22:15:46] [Rank 0] Group 10 Loss: 4.5767 +[2025-07-06 22:15:46] [Rank 0] Group 10 Loss: 4.5767 +[2025-07-06 22:15:46] [Rank 0] Group 11 Loss: 4.5877 +[2025-07-06 22:15:46] [Rank 0] Group 11 Loss: 4.5877 +[2025-07-06 22:15:46] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-06 22:15:46] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-06 22:15:46] [Rank 0] Group 1 FTA: 0.4740 +[2025-07-06 22:15:46] [Rank 0] Group 1 FTA: 0.4740 +[2025-07-06 22:15:46] [Rank 0] Group 2 FTA: 0.1380 +[2025-07-06 22:15:46] [Rank 0] Group 2 FTA: 0.1380 +[2025-07-06 22:15:46] [Rank 0] Group 3 FTA: 0.0833 +[2025-07-06 22:15:46] [Rank 0] Group 3 FTA: 0.0833 +[2025-07-06 22:15:46] [Rank 0] Group 4 FTA: 0.1198 +[2025-07-06 22:15:46] [Rank 0] Group 4 FTA: 0.1198 +[2025-07-06 22:15:47] [Rank 0] Group 5 FTA: 0.2083 +[2025-07-06 22:15:47] [Rank 0] Group 5 FTA: 0.2083 +[2025-07-06 22:15:47] [Rank 0] Group 6 FTA: 0.1745 +[2025-07-06 22:15:47] [Rank 0] Group 6 FTA: 0.1745 +[2025-07-06 22:15:47] [Rank 0] Group 7 FTA: 0.2240 +[2025-07-06 22:15:47] [Rank 0] Group 7 FTA: 0.2240 +[2025-07-06 22:15:47] [Rank 0] Group 8 FTA: 0.1797 +[2025-07-06 22:15:47] [Rank 0] Group 8 FTA: 0.1797 +[2025-07-06 22:15:47] [Rank 0] Group 9 FTA: 0.1953 +[2025-07-06 22:15:47] [Rank 0] Group 9 FTA: 0.1953 +[2025-07-06 22:15:47] [Rank 0] Group 10 FTA: 0.1777 +[2025-07-06 22:15:47] [Rank 0] Group 10 FTA: 0.1777 +[2025-07-06 22:15:47] [Rank 0] Group 11 FTA: 0.1875 +[2025-07-06 22:15:47] [Rank 0] Group 11 FTA: 0.1875 +[2025-07-06 22:15:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 22:15:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 22:15:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 22:15:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 22:15:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 22:15:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 22:15:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 22:15:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 22:15:48] [Rank 0] step:1501/10000 train_time:120281ms step_avg:80.13ms +[2025-07-06 22:15:48] [Rank 0] step:1501/10000 train_time:120281ms step_avg:80.13ms +[2025-07-06 22:15:49] [Rank 0] step:1521/10000 train_time:121755ms step_avg:80.05ms +[2025-07-06 22:15:49] [Rank 0] step:1521/10000 train_time:121755ms step_avg:80.05ms +[2025-07-06 22:15:51] [Rank 0] step:1541/10000 train_time:123458ms step_avg:80.12ms +[2025-07-06 22:15:51] [Rank 0] step:1541/10000 train_time:123458ms step_avg:80.12ms +[2025-07-06 22:15:53] [Rank 0] step:1561/10000 train_time:124923ms step_avg:80.03ms +[2025-07-06 22:15:53] [Rank 0] step:1561/10000 train_time:124923ms step_avg:80.03ms +[2025-07-06 22:15:54] [Rank 0] step:1581/10000 train_time:126394ms step_avg:79.95ms +[2025-07-06 22:15:54] [Rank 0] step:1581/10000 train_time:126394ms step_avg:79.95ms +[2025-07-06 22:15:55] [Rank 0] step:1601/10000 train_time:127861ms step_avg:79.86ms +[2025-07-06 22:15:55] [Rank 0] step:1601/10000 train_time:127861ms step_avg:79.86ms +[2025-07-06 22:15:58] [Rank 0] step:1621/10000 train_time:129567ms step_avg:79.93ms +[2025-07-06 22:15:58] [Rank 0] step:1621/10000 train_time:129567ms step_avg:79.93ms +[2025-07-06 22:15:59] [Rank 0] step:1641/10000 train_time:131810ms step_avg:80.32ms +[2025-07-06 22:15:59] [Rank 0] step:1641/10000 train_time:131810ms step_avg:80.32ms +[2025-07-06 22:16:01] [Rank 0] step:1661/10000 train_time:133277ms step_avg:80.24ms +[2025-07-06 22:16:01] [Rank 0] step:1661/10000 train_time:133277ms step_avg:80.24ms +[2025-07-06 22:16:02] [Rank 0] step:1681/10000 train_time:134746ms step_avg:80.16ms +[2025-07-06 22:16:02] [Rank 0] step:1681/10000 train_time:134746ms step_avg:80.16ms +[2025-07-06 22:16:04] [Rank 0] step:1701/10000 train_time:136218ms step_avg:80.08ms +[2025-07-06 22:16:04] [Rank 0] step:1701/10000 train_time:136218ms step_avg:80.08ms +[2025-07-06 22:16:06] [Rank 0] step:1721/10000 train_time:138034ms step_avg:80.21ms +[2025-07-06 22:16:06] [Rank 0] step:1721/10000 train_time:138034ms step_avg:80.21ms +[2025-07-06 22:16:07] [Rank 0] step:1741/10000 train_time:139501ms step_avg:80.13ms +[2025-07-06 22:16:07] [Rank 0] step:1741/10000 train_time:139501ms step_avg:80.13ms +[2025-07-06 22:16:09] [Rank 0] step:1761/10000 train_time:140971ms step_avg:80.05ms +[2025-07-06 22:16:09] [Rank 0] step:1761/10000 train_time:140971ms step_avg:80.05ms +[2025-07-06 22:16:10] [Rank 0] step:1781/10000 train_time:142445ms step_avg:79.98ms +[2025-07-06 22:16:10] [Rank 0] step:1781/10000 train_time:142445ms step_avg:79.98ms +[2025-07-06 22:16:12] [Rank 0] step:1801/10000 train_time:143916ms step_avg:79.91ms +[2025-07-06 22:16:12] [Rank 0] step:1801/10000 train_time:143916ms step_avg:79.91ms +[2025-07-06 22:16:14] [Rank 0] step:1821/10000 train_time:146032ms step_avg:80.19ms +[2025-07-06 22:16:14] [Rank 0] step:1821/10000 train_time:146032ms step_avg:80.19ms +[2025-07-06 22:16:15] [Rank 0] step:1841/10000 train_time:147503ms step_avg:80.12ms +[2025-07-06 22:16:15] [Rank 0] step:1841/10000 train_time:147503ms step_avg:80.12ms +[2025-07-06 22:16:17] [Rank 0] step:1861/10000 train_time:148978ms step_avg:80.05ms +[2025-07-06 22:16:17] [Rank 0] step:1861/10000 train_time:148978ms step_avg:80.05ms +[2025-07-06 22:16:18] [Rank 0] step:1881/10000 train_time:150456ms step_avg:79.99ms +[2025-07-06 22:16:18] [Rank 0] step:1881/10000 train_time:150456ms step_avg:79.99ms +[2025-07-06 22:16:20] [Rank 0] step:1901/10000 train_time:152165ms step_avg:80.04ms +[2025-07-06 22:16:20] [Rank 0] step:1901/10000 train_time:152165ms step_avg:80.04ms +[2025-07-06 22:16:21] [Rank 0] step:1921/10000 train_time:153639ms step_avg:79.98ms +[2025-07-06 22:16:21] [Rank 0] step:1921/10000 train_time:153639ms step_avg:79.98ms +[2025-07-06 22:16:23] [Rank 0] step:1941/10000 train_time:155111ms step_avg:79.91ms +[2025-07-06 22:16:23] [Rank 0] step:1941/10000 train_time:155111ms step_avg:79.91ms +[2025-07-06 22:16:24] [Rank 0] step:1961/10000 train_time:156585ms step_avg:79.85ms +[2025-07-06 22:16:24] [Rank 0] step:1961/10000 train_time:156585ms step_avg:79.85ms +[2025-07-06 22:16:26] [Rank 0] step:1981/10000 train_time:158061ms step_avg:79.79ms +[2025-07-06 22:16:26] [Rank 0] step:1981/10000 train_time:158061ms step_avg:79.79ms +[2025-07-06 22:16:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:16:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:16:28] [Rank 0] PRINT: step:2000/10000 train_loss:1.2489 val_loss:1.2228 train_time:160176ms step_avg:80.09ms +[2025-07-06 22:16:28] [Rank 0] PRINT: step:2000/10000 train_loss:1.2489 val_loss:1.2228 train_time:160176ms step_avg:80.09ms +[2025-07-06 22:16:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:16:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:16:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:16:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:16:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:16:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:21:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:21:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:21:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:21:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:21:52] [Rank 0] Total Loss: 4.6212 +[2025-07-06 22:21:52] [Rank 0] Total Loss: 4.6212 +[2025-07-06 22:21:52] [Rank 0] Total FTA: 0.3261 +[2025-07-06 22:21:52] [Rank 0] Total FTA: 0.3261 +[2025-07-06 22:21:52] [Rank 0] Group 0 Loss: 4.9063 +[2025-07-06 22:21:52] [Rank 0] Group 0 Loss: 4.9063 +[2025-07-06 22:21:52] [Rank 0] Group 1 Loss: 4.2326 +[2025-07-06 22:21:52] [Rank 0] Group 1 Loss: 4.2326 +[2025-07-06 22:21:52] [Rank 0] Group 2 Loss: 4.3541 +[2025-07-06 22:21:52] [Rank 0] Group 2 Loss: 4.3541 +[2025-07-06 22:21:52] [Rank 0] Group 3 Loss: 4.6951 +[2025-07-06 22:21:52] [Rank 0] Group 3 Loss: 4.6951 +[2025-07-06 22:21:52] [Rank 0] Group 4 Loss: 4.6373 +[2025-07-06 22:21:52] [Rank 0] Group 4 Loss: 4.6373 +[2025-07-06 22:21:52] [Rank 0] Group 5 Loss: 4.5894 +[2025-07-06 22:21:52] [Rank 0] Group 5 Loss: 4.5894 +[2025-07-06 22:21:52] [Rank 0] Group 6 Loss: 4.5572 +[2025-07-06 22:21:52] [Rank 0] Group 6 Loss: 4.5572 +[2025-07-06 22:21:52] [Rank 0] Group 7 Loss: 4.6898 +[2025-07-06 22:21:52] [Rank 0] Group 7 Loss: 4.6898 +[2025-07-06 22:21:52] [Rank 0] Group 8 Loss: 4.6264 +[2025-07-06 22:21:52] [Rank 0] Group 8 Loss: 4.6264 +[2025-07-06 22:21:52] [Rank 0] Group 9 Loss: 4.5539 +[2025-07-06 22:21:52] [Rank 0] Group 9 Loss: 4.5539 +[2025-07-06 22:21:52] [Rank 0] Group 10 Loss: 4.6340 +[2025-07-06 22:21:52] [Rank 0] Group 10 Loss: 4.6340 +[2025-07-06 22:21:52] [Rank 0] Group 11 Loss: 4.6382 +[2025-07-06 22:21:52] [Rank 0] Group 11 Loss: 4.6382 +[2025-07-06 22:21:52] [Rank 0] Group 0 FTA: 0.4915 +[2025-07-06 22:21:52] [Rank 0] Group 0 FTA: 0.4915 +[2025-07-06 22:21:52] [Rank 0] Group 1 FTA: 0.4349 +[2025-07-06 22:21:52] [Rank 0] Group 1 FTA: 0.4349 +[2025-07-06 22:21:52] [Rank 0] Group 2 FTA: 0.4219 +[2025-07-06 22:21:52] [Rank 0] Group 2 FTA: 0.4219 +[2025-07-06 22:21:52] [Rank 0] Group 3 FTA: 0.2214 +[2025-07-06 22:21:52] [Rank 0] Group 3 FTA: 0.2214 +[2025-07-06 22:21:52] [Rank 0] Group 4 FTA: 0.2396 +[2025-07-06 22:21:52] [Rank 0] Group 4 FTA: 0.2396 +[2025-07-06 22:21:52] [Rank 0] Group 5 FTA: 0.2396 +[2025-07-06 22:21:52] [Rank 0] Group 5 FTA: 0.2396 +[2025-07-06 22:21:52] [Rank 0] Group 6 FTA: 0.2734 +[2025-07-06 22:21:52] [Rank 0] Group 6 FTA: 0.2734 +[2025-07-06 22:21:52] [Rank 0] Group 7 FTA: 0.3255 +[2025-07-06 22:21:52] [Rank 0] Group 7 FTA: 0.3255 +[2025-07-06 22:21:52] [Rank 0] Group 8 FTA: 0.2839 +[2025-07-06 22:21:52] [Rank 0] Group 8 FTA: 0.2839 +[2025-07-06 22:21:52] [Rank 0] Group 9 FTA: 0.3008 +[2025-07-06 22:21:52] [Rank 0] Group 9 FTA: 0.3008 +[2025-07-06 22:21:52] [Rank 0] Group 10 FTA: 0.3086 +[2025-07-06 22:21:52] [Rank 0] Group 10 FTA: 0.3086 +[2025-07-06 22:21:52] [Rank 0] Group 11 FTA: 0.2803 +[2025-07-06 22:21:52] [Rank 0] Group 11 FTA: 0.2803 +[2025-07-06 22:21:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 22:21:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 22:21:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 22:21:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 22:21:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 22:21:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 22:21:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 22:21:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 22:21:54] [Rank 0] step:2001/10000 train_time:160197ms step_avg:80.06ms +[2025-07-06 22:21:54] [Rank 0] step:2001/10000 train_time:160197ms step_avg:80.06ms +[2025-07-06 22:21:55] [Rank 0] step:2021/10000 train_time:161674ms step_avg:80.00ms +[2025-07-06 22:21:55] [Rank 0] step:2021/10000 train_time:161674ms step_avg:80.00ms +[2025-07-06 22:21:57] [Rank 0] step:2041/10000 train_time:163140ms step_avg:79.93ms +[2025-07-06 22:21:57] [Rank 0] step:2041/10000 train_time:163140ms step_avg:79.93ms +[2025-07-06 22:21:58] [Rank 0] step:2061/10000 train_time:164604ms step_avg:79.87ms +[2025-07-06 22:21:58] [Rank 0] step:2061/10000 train_time:164604ms step_avg:79.87ms +[2025-07-06 22:22:00] [Rank 0] step:2081/10000 train_time:166737ms step_avg:80.12ms +[2025-07-06 22:22:00] [Rank 0] step:2081/10000 train_time:166737ms step_avg:80.12ms +[2025-07-06 22:22:02] [Rank 0] step:2101/10000 train_time:168202ms step_avg:80.06ms +[2025-07-06 22:22:02] [Rank 0] step:2101/10000 train_time:168202ms step_avg:80.06ms +[2025-07-06 22:22:03] [Rank 0] step:2121/10000 train_time:169669ms step_avg:79.99ms +[2025-07-06 22:22:03] [Rank 0] step:2121/10000 train_time:169669ms step_avg:79.99ms +[2025-07-06 22:22:05] [Rank 0] step:2141/10000 train_time:171138ms step_avg:79.93ms +[2025-07-06 22:22:05] [Rank 0] step:2141/10000 train_time:171138ms step_avg:79.93ms +[2025-07-06 22:22:07] [Rank 0] step:2161/10000 train_time:172604ms step_avg:79.87ms +[2025-07-06 22:22:07] [Rank 0] step:2161/10000 train_time:172604ms step_avg:79.87ms +[2025-07-06 22:22:08] [Rank 0] step:2181/10000 train_time:174736ms step_avg:80.12ms +[2025-07-06 22:22:08] [Rank 0] step:2181/10000 train_time:174736ms step_avg:80.12ms +[2025-07-06 22:22:10] [Rank 0] step:2201/10000 train_time:176204ms step_avg:80.06ms +[2025-07-06 22:22:10] [Rank 0] step:2201/10000 train_time:176204ms step_avg:80.06ms +[2025-07-06 22:22:11] [Rank 0] step:2221/10000 train_time:177674ms step_avg:80.00ms +[2025-07-06 22:22:11] [Rank 0] step:2221/10000 train_time:177674ms step_avg:80.00ms +[2025-07-06 22:22:13] [Rank 0] step:2241/10000 train_time:179162ms step_avg:79.95ms +[2025-07-06 22:22:13] [Rank 0] step:2241/10000 train_time:179162ms step_avg:79.95ms +[2025-07-06 22:22:15] [Rank 0] step:2261/10000 train_time:181311ms step_avg:80.19ms +[2025-07-06 22:22:15] [Rank 0] step:2261/10000 train_time:181311ms step_avg:80.19ms +[2025-07-06 22:22:17] [Rank 0] step:2281/10000 train_time:183040ms step_avg:80.25ms +[2025-07-06 22:22:17] [Rank 0] step:2281/10000 train_time:183040ms step_avg:80.25ms +[2025-07-06 22:22:18] [Rank 0] step:2301/10000 train_time:184533ms step_avg:80.20ms +[2025-07-06 22:22:18] [Rank 0] step:2301/10000 train_time:184533ms step_avg:80.20ms +[2025-07-06 22:22:20] [Rank 0] step:2321/10000 train_time:186070ms step_avg:80.17ms +[2025-07-06 22:22:20] [Rank 0] step:2321/10000 train_time:186070ms step_avg:80.17ms +[2025-07-06 22:22:21] [Rank 0] step:2341/10000 train_time:187564ms step_avg:80.12ms +[2025-07-06 22:22:21] [Rank 0] step:2341/10000 train_time:187564ms step_avg:80.12ms +[2025-07-06 22:22:23] [Rank 0] step:2361/10000 train_time:189301ms step_avg:80.18ms +[2025-07-06 22:22:23] [Rank 0] step:2361/10000 train_time:189301ms step_avg:80.18ms +[2025-07-06 22:22:24] [Rank 0] step:2381/10000 train_time:190793ms step_avg:80.13ms +[2025-07-06 22:22:24] [Rank 0] step:2381/10000 train_time:190793ms step_avg:80.13ms +[2025-07-06 22:22:26] [Rank 0] step:2401/10000 train_time:192289ms step_avg:80.09ms +[2025-07-06 22:22:26] [Rank 0] step:2401/10000 train_time:192289ms step_avg:80.09ms +[2025-07-06 22:22:27] [Rank 0] step:2421/10000 train_time:193783ms step_avg:80.04ms +[2025-07-06 22:22:27] [Rank 0] step:2421/10000 train_time:193783ms step_avg:80.04ms +[2025-07-06 22:22:29] [Rank 0] step:2441/10000 train_time:195936ms step_avg:80.27ms +[2025-07-06 22:22:29] [Rank 0] step:2441/10000 train_time:195936ms step_avg:80.27ms +[2025-07-06 22:22:31] [Rank 0] step:2461/10000 train_time:197429ms step_avg:80.22ms +[2025-07-06 22:22:31] [Rank 0] step:2461/10000 train_time:197429ms step_avg:80.22ms +[2025-07-06 22:22:32] [Rank 0] step:2481/10000 train_time:198925ms step_avg:80.18ms +[2025-07-06 22:22:32] [Rank 0] step:2481/10000 train_time:198925ms step_avg:80.18ms +[2025-07-06 22:22:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:22:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:22:35] [Rank 0] PRINT: step:2500/10000 train_loss:1.1681 val_loss:1.1246 train_time:200420ms step_avg:80.17ms +[2025-07-06 22:22:35] [Rank 0] PRINT: step:2500/10000 train_loss:1.1681 val_loss:1.1246 train_time:200420ms step_avg:80.17ms +[2025-07-06 22:22:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:22:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:22:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:22:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:22:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:22:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:27:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:27:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:27:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:27:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:27:57] [Rank 0] Total Loss: 4.7367 +[2025-07-06 22:27:57] [Rank 0] Total Loss: 4.7367 +[2025-07-06 22:27:57] [Rank 0] Total FTA: 0.4103 +[2025-07-06 22:27:57] [Rank 0] Total FTA: 0.4103 +[2025-07-06 22:27:57] [Rank 0] Group 0 Loss: 4.8766 +[2025-07-06 22:27:57] [Rank 0] Group 0 Loss: 4.8766 +[2025-07-06 22:27:57] [Rank 0] Group 1 Loss: 4.3652 +[2025-07-06 22:27:57] [Rank 0] Group 1 Loss: 4.3652 +[2025-07-06 22:27:57] [Rank 0] Group 2 Loss: 4.4145 +[2025-07-06 22:27:57] [Rank 0] Group 2 Loss: 4.4145 +[2025-07-06 22:27:57] [Rank 0] Group 3 Loss: 4.8523 +[2025-07-06 22:27:57] [Rank 0] Group 3 Loss: 4.8523 +[2025-07-06 22:27:57] [Rank 0] Group 4 Loss: 4.8804 +[2025-07-06 22:27:57] [Rank 0] Group 4 Loss: 4.8804 +[2025-07-06 22:27:57] [Rank 0] Group 5 Loss: 4.6966 +[2025-07-06 22:27:57] [Rank 0] Group 5 Loss: 4.6966 +[2025-07-06 22:27:57] [Rank 0] Group 6 Loss: 4.6473 +[2025-07-06 22:27:57] [Rank 0] Group 6 Loss: 4.6473 +[2025-07-06 22:27:57] [Rank 0] Group 7 Loss: 4.7220 +[2025-07-06 22:27:57] [Rank 0] Group 7 Loss: 4.7220 +[2025-07-06 22:27:57] [Rank 0] Group 8 Loss: 4.7694 +[2025-07-06 22:27:57] [Rank 0] Group 8 Loss: 4.7694 +[2025-07-06 22:27:57] [Rank 0] Group 9 Loss: 4.7306 +[2025-07-06 22:27:57] [Rank 0] Group 9 Loss: 4.7306 +[2025-07-06 22:27:57] [Rank 0] Group 10 Loss: 4.8016 +[2025-07-06 22:27:57] [Rank 0] Group 10 Loss: 4.8016 +[2025-07-06 22:27:57] [Rank 0] Group 11 Loss: 4.8057 +[2025-07-06 22:27:57] [Rank 0] Group 11 Loss: 4.8057 +[2025-07-06 22:27:57] [Rank 0] Group 0 FTA: 0.4733 +[2025-07-06 22:27:57] [Rank 0] Group 0 FTA: 0.4733 +[2025-07-06 22:27:57] [Rank 0] Group 1 FTA: 0.3255 +[2025-07-06 22:27:57] [Rank 0] Group 1 FTA: 0.3255 +[2025-07-06 22:27:57] [Rank 0] Group 2 FTA: 0.4896 +[2025-07-06 22:27:57] [Rank 0] Group 2 FTA: 0.4896 +[2025-07-06 22:27:57] [Rank 0] Group 3 FTA: 0.3594 +[2025-07-06 22:27:57] [Rank 0] Group 3 FTA: 0.3594 +[2025-07-06 22:27:57] [Rank 0] Group 4 FTA: 0.3542 +[2025-07-06 22:27:57] [Rank 0] Group 4 FTA: 0.3542 +[2025-07-06 22:27:57] [Rank 0] Group 5 FTA: 0.4323 +[2025-07-06 22:27:57] [Rank 0] Group 5 FTA: 0.4323 +[2025-07-06 22:27:57] [Rank 0] Group 6 FTA: 0.3099 +[2025-07-06 22:27:57] [Rank 0] Group 6 FTA: 0.3099 +[2025-07-06 22:27:57] [Rank 0] Group 7 FTA: 0.4193 +[2025-07-06 22:27:57] [Rank 0] Group 7 FTA: 0.4193 +[2025-07-06 22:27:57] [Rank 0] Group 8 FTA: 0.3880 +[2025-07-06 22:27:57] [Rank 0] Group 8 FTA: 0.3880 +[2025-07-06 22:27:57] [Rank 0] Group 9 FTA: 0.4258 +[2025-07-06 22:27:57] [Rank 0] Group 9 FTA: 0.4258 +[2025-07-06 22:27:57] [Rank 0] Group 10 FTA: 0.4355 +[2025-07-06 22:27:57] [Rank 0] Group 10 FTA: 0.4355 +[2025-07-06 22:27:57] [Rank 0] Group 11 FTA: 0.4229 +[2025-07-06 22:27:57] [Rank 0] Group 11 FTA: 0.4229 +[2025-07-06 22:27:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 22:27:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 22:27:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 22:27:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 22:27:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 22:27:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 22:27:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 22:27:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 22:27:58] [Rank 0] step:2501/10000 train_time:200441ms step_avg:80.14ms +[2025-07-06 22:27:58] [Rank 0] step:2501/10000 train_time:200441ms step_avg:80.14ms +[2025-07-06 22:28:00] [Rank 0] step:2521/10000 train_time:201939ms step_avg:80.10ms +[2025-07-06 22:28:00] [Rank 0] step:2521/10000 train_time:201939ms step_avg:80.10ms +[2025-07-06 22:28:02] [Rank 0] step:2541/10000 train_time:204084ms step_avg:80.32ms +[2025-07-06 22:28:02] [Rank 0] step:2541/10000 train_time:204084ms step_avg:80.32ms +[2025-07-06 22:28:03] [Rank 0] step:2561/10000 train_time:205572ms step_avg:80.27ms +[2025-07-06 22:28:03] [Rank 0] step:2561/10000 train_time:205572ms step_avg:80.27ms +[2025-07-06 22:28:05] [Rank 0] step:2581/10000 train_time:207064ms step_avg:80.23ms +[2025-07-06 22:28:05] [Rank 0] step:2581/10000 train_time:207064ms step_avg:80.23ms +[2025-07-06 22:28:06] [Rank 0] step:2601/10000 train_time:208556ms step_avg:80.18ms +[2025-07-06 22:28:06] [Rank 0] step:2601/10000 train_time:208556ms step_avg:80.18ms +[2025-07-06 22:28:08] [Rank 0] step:2621/10000 train_time:210285ms step_avg:80.23ms +[2025-07-06 22:28:08] [Rank 0] step:2621/10000 train_time:210285ms step_avg:80.23ms +[2025-07-06 22:28:10] [Rank 0] step:2641/10000 train_time:211776ms step_avg:80.19ms +[2025-07-06 22:28:10] [Rank 0] step:2641/10000 train_time:211776ms step_avg:80.19ms +[2025-07-06 22:28:11] [Rank 0] step:2661/10000 train_time:213268ms step_avg:80.15ms +[2025-07-06 22:28:11] [Rank 0] step:2661/10000 train_time:213268ms step_avg:80.15ms +[2025-07-06 22:28:13] [Rank 0] step:2681/10000 train_time:214762ms step_avg:80.11ms +[2025-07-06 22:28:13] [Rank 0] step:2681/10000 train_time:214762ms step_avg:80.11ms +[2025-07-06 22:28:15] [Rank 0] step:2701/10000 train_time:216256ms step_avg:80.07ms +[2025-07-06 22:28:15] [Rank 0] step:2701/10000 train_time:216256ms step_avg:80.07ms +[2025-07-06 22:28:16] [Rank 0] step:2721/10000 train_time:218390ms step_avg:80.26ms +[2025-07-06 22:28:16] [Rank 0] step:2721/10000 train_time:218390ms step_avg:80.26ms +[2025-07-06 22:28:18] [Rank 0] step:2741/10000 train_time:219883ms step_avg:80.22ms +[2025-07-06 22:28:18] [Rank 0] step:2741/10000 train_time:219883ms step_avg:80.22ms +[2025-07-06 22:28:19] [Rank 0] step:2761/10000 train_time:221378ms step_avg:80.18ms +[2025-07-06 22:28:19] [Rank 0] step:2761/10000 train_time:221378ms step_avg:80.18ms +[2025-07-06 22:28:21] [Rank 0] step:2781/10000 train_time:222874ms step_avg:80.14ms +[2025-07-06 22:28:21] [Rank 0] step:2781/10000 train_time:222874ms step_avg:80.14ms +[2025-07-06 22:28:23] [Rank 0] step:2801/10000 train_time:224708ms step_avg:80.22ms +[2025-07-06 22:28:23] [Rank 0] step:2801/10000 train_time:224708ms step_avg:80.22ms +[2025-07-06 22:28:24] [Rank 0] step:2821/10000 train_time:226202ms step_avg:80.19ms +[2025-07-06 22:28:24] [Rank 0] step:2821/10000 train_time:226202ms step_avg:80.19ms +[2025-07-06 22:28:26] [Rank 0] step:2841/10000 train_time:227700ms step_avg:80.15ms +[2025-07-06 22:28:26] [Rank 0] step:2841/10000 train_time:227700ms step_avg:80.15ms +[2025-07-06 22:28:27] [Rank 0] step:2861/10000 train_time:229197ms step_avg:80.11ms +[2025-07-06 22:28:27] [Rank 0] step:2861/10000 train_time:229197ms step_avg:80.11ms +[2025-07-06 22:28:29] [Rank 0] step:2881/10000 train_time:230694ms step_avg:80.07ms +[2025-07-06 22:28:29] [Rank 0] step:2881/10000 train_time:230694ms step_avg:80.07ms +[2025-07-06 22:28:30] [Rank 0] step:2901/10000 train_time:232430ms step_avg:80.12ms +[2025-07-06 22:28:30] [Rank 0] step:2901/10000 train_time:232430ms step_avg:80.12ms +[2025-07-06 22:28:32] [Rank 0] step:2921/10000 train_time:233927ms step_avg:80.08ms +[2025-07-06 22:28:32] [Rank 0] step:2921/10000 train_time:233927ms step_avg:80.08ms +[2025-07-06 22:28:33] [Rank 0] step:2941/10000 train_time:235426ms step_avg:80.05ms +[2025-07-06 22:28:33] [Rank 0] step:2941/10000 train_time:235426ms step_avg:80.05ms +[2025-07-06 22:28:35] [Rank 0] step:2961/10000 train_time:236928ms step_avg:80.02ms +[2025-07-06 22:28:35] [Rank 0] step:2961/10000 train_time:236928ms step_avg:80.02ms +[2025-07-06 22:28:37] [Rank 0] step:2981/10000 train_time:238705ms step_avg:80.08ms +[2025-07-06 22:28:37] [Rank 0] step:2981/10000 train_time:238705ms step_avg:80.08ms +[2025-07-06 22:28:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:28:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:28:39] [Rank 0] PRINT: step:3000/10000 train_loss:1.0973 val_loss:1.0524 train_time:240264ms step_avg:80.09ms +[2025-07-06 22:28:39] [Rank 0] PRINT: step:3000/10000 train_loss:1.0973 val_loss:1.0524 train_time:240264ms step_avg:80.09ms +[2025-07-06 22:28:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:28:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:28:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:28:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:28:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:28:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:34:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:34:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:34:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:34:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:34:03] [Rank 0] Total Loss: 4.8847 +[2025-07-06 22:34:03] [Rank 0] Total Loss: 4.8847 +[2025-07-06 22:34:03] [Rank 0] Total FTA: 0.5475 +[2025-07-06 22:34:03] [Rank 0] Total FTA: 0.5475 +[2025-07-06 22:34:03] [Rank 0] Group 0 Loss: 5.2346 +[2025-07-06 22:34:03] [Rank 0] Group 0 Loss: 5.2346 +[2025-07-06 22:34:03] [Rank 0] Group 1 Loss: 4.6091 +[2025-07-06 22:34:03] [Rank 0] Group 1 Loss: 4.6091 +[2025-07-06 22:34:03] [Rank 0] Group 2 Loss: 4.5532 +[2025-07-06 22:34:03] [Rank 0] Group 2 Loss: 4.5532 +[2025-07-06 22:34:03] [Rank 0] Group 3 Loss: 4.9494 +[2025-07-06 22:34:03] [Rank 0] Group 3 Loss: 4.9494 +[2025-07-06 22:34:03] [Rank 0] Group 4 Loss: 4.8833 +[2025-07-06 22:34:03] [Rank 0] Group 4 Loss: 4.8833 +[2025-07-06 22:34:03] [Rank 0] Group 5 Loss: 4.8802 +[2025-07-06 22:34:03] [Rank 0] Group 5 Loss: 4.8802 +[2025-07-06 22:34:03] [Rank 0] Group 6 Loss: 4.7557 +[2025-07-06 22:34:03] [Rank 0] Group 6 Loss: 4.7557 +[2025-07-06 22:34:03] [Rank 0] Group 7 Loss: 4.8606 +[2025-07-06 22:34:03] [Rank 0] Group 7 Loss: 4.8606 +[2025-07-06 22:34:03] [Rank 0] Group 8 Loss: 4.8490 +[2025-07-06 22:34:03] [Rank 0] Group 8 Loss: 4.8490 +[2025-07-06 22:34:03] [Rank 0] Group 9 Loss: 4.8696 +[2025-07-06 22:34:03] [Rank 0] Group 9 Loss: 4.8696 +[2025-07-06 22:34:03] [Rank 0] Group 10 Loss: 4.8598 +[2025-07-06 22:34:03] [Rank 0] Group 10 Loss: 4.8598 +[2025-07-06 22:34:03] [Rank 0] Group 11 Loss: 4.9146 +[2025-07-06 22:34:03] [Rank 0] Group 11 Loss: 4.9146 +[2025-07-06 22:34:03] [Rank 0] Group 0 FTA: 0.5046 +[2025-07-06 22:34:03] [Rank 0] Group 0 FTA: 0.5046 +[2025-07-06 22:34:03] [Rank 0] Group 1 FTA: 0.6484 +[2025-07-06 22:34:03] [Rank 0] Group 1 FTA: 0.6484 +[2025-07-06 22:34:03] [Rank 0] Group 2 FTA: 0.4844 +[2025-07-06 22:34:03] [Rank 0] Group 2 FTA: 0.4844 +[2025-07-06 22:34:03] [Rank 0] Group 3 FTA: 0.4167 +[2025-07-06 22:34:03] [Rank 0] Group 3 FTA: 0.4167 +[2025-07-06 22:34:03] [Rank 0] Group 4 FTA: 0.5807 +[2025-07-06 22:34:03] [Rank 0] Group 4 FTA: 0.5807 +[2025-07-06 22:34:03] [Rank 0] Group 5 FTA: 0.5573 +[2025-07-06 22:34:03] [Rank 0] Group 5 FTA: 0.5573 +[2025-07-06 22:34:03] [Rank 0] Group 6 FTA: 0.4844 +[2025-07-06 22:34:03] [Rank 0] Group 6 FTA: 0.4844 +[2025-07-06 22:34:03] [Rank 0] Group 7 FTA: 0.6120 +[2025-07-06 22:34:03] [Rank 0] Group 7 FTA: 0.6120 +[2025-07-06 22:34:03] [Rank 0] Group 8 FTA: 0.5443 +[2025-07-06 22:34:03] [Rank 0] Group 8 FTA: 0.5443 +[2025-07-06 22:34:03] [Rank 0] Group 9 FTA: 0.5820 +[2025-07-06 22:34:03] [Rank 0] Group 9 FTA: 0.5820 +[2025-07-06 22:34:03] [Rank 0] Group 10 FTA: 0.5508 +[2025-07-06 22:34:03] [Rank 0] Group 10 FTA: 0.5508 +[2025-07-06 22:34:03] [Rank 0] Group 11 FTA: 0.5889 +[2025-07-06 22:34:03] [Rank 0] Group 11 FTA: 0.5889 +[2025-07-06 22:34:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 22:34:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 22:34:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 22:34:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 22:34:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 22:34:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 22:34:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 22:34:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 22:34:04] [Rank 0] step:3001/10000 train_time:240283ms step_avg:80.07ms +[2025-07-06 22:34:04] [Rank 0] step:3001/10000 train_time:240283ms step_avg:80.07ms +[2025-07-06 22:34:06] [Rank 0] step:3021/10000 train_time:241787ms step_avg:80.04ms +[2025-07-06 22:34:06] [Rank 0] step:3021/10000 train_time:241787ms step_avg:80.04ms +[2025-07-06 22:34:07] [Rank 0] step:3041/10000 train_time:243276ms step_avg:80.00ms +[2025-07-06 22:34:07] [Rank 0] step:3041/10000 train_time:243276ms step_avg:80.00ms +[2025-07-06 22:34:09] [Rank 0] step:3061/10000 train_time:244766ms step_avg:79.96ms +[2025-07-06 22:34:09] [Rank 0] step:3061/10000 train_time:244766ms step_avg:79.96ms +[2025-07-06 22:34:11] [Rank 0] step:3081/10000 train_time:246910ms step_avg:80.14ms +[2025-07-06 22:34:11] [Rank 0] step:3081/10000 train_time:246910ms step_avg:80.14ms +[2025-07-06 22:34:12] [Rank 0] step:3101/10000 train_time:248400ms step_avg:80.10ms +[2025-07-06 22:34:12] [Rank 0] step:3101/10000 train_time:248400ms step_avg:80.10ms +[2025-07-06 22:34:14] [Rank 0] step:3121/10000 train_time:249891ms step_avg:80.07ms +[2025-07-06 22:34:14] [Rank 0] step:3121/10000 train_time:249891ms step_avg:80.07ms +[2025-07-06 22:34:15] [Rank 0] step:3141/10000 train_time:251384ms step_avg:80.03ms +[2025-07-06 22:34:15] [Rank 0] step:3141/10000 train_time:251384ms step_avg:80.03ms +[2025-07-06 22:34:17] [Rank 0] step:3161/10000 train_time:253112ms step_avg:80.07ms +[2025-07-06 22:34:17] [Rank 0] step:3161/10000 train_time:253112ms step_avg:80.07ms +[2025-07-06 22:34:19] [Rank 0] step:3181/10000 train_time:254605ms step_avg:80.04ms +[2025-07-06 22:34:19] [Rank 0] step:3181/10000 train_time:254605ms step_avg:80.04ms +[2025-07-06 22:34:20] [Rank 0] step:3201/10000 train_time:256099ms step_avg:80.01ms +[2025-07-06 22:34:20] [Rank 0] step:3201/10000 train_time:256099ms step_avg:80.01ms +[2025-07-06 22:34:22] [Rank 0] step:3221/10000 train_time:257594ms step_avg:79.97ms +[2025-07-06 22:34:22] [Rank 0] step:3221/10000 train_time:257594ms step_avg:79.97ms +[2025-07-06 22:34:24] [Rank 0] step:3241/10000 train_time:259089ms step_avg:79.94ms +[2025-07-06 22:34:24] [Rank 0] step:3241/10000 train_time:259089ms step_avg:79.94ms +[2025-07-06 22:34:25] [Rank 0] step:3261/10000 train_time:261249ms step_avg:80.11ms +[2025-07-06 22:34:25] [Rank 0] step:3261/10000 train_time:261249ms step_avg:80.11ms +[2025-07-06 22:34:27] [Rank 0] step:3281/10000 train_time:262744ms step_avg:80.08ms +[2025-07-06 22:34:27] [Rank 0] step:3281/10000 train_time:262744ms step_avg:80.08ms +[2025-07-06 22:34:28] [Rank 0] step:3301/10000 train_time:264240ms step_avg:80.05ms +[2025-07-06 22:34:28] [Rank 0] step:3301/10000 train_time:264240ms step_avg:80.05ms +[2025-07-06 22:34:30] [Rank 0] step:3321/10000 train_time:265737ms step_avg:80.02ms +[2025-07-06 22:34:30] [Rank 0] step:3321/10000 train_time:265737ms step_avg:80.02ms +[2025-07-06 22:34:32] [Rank 0] step:3341/10000 train_time:267890ms step_avg:80.18ms +[2025-07-06 22:34:32] [Rank 0] step:3341/10000 train_time:267890ms step_avg:80.18ms +[2025-07-06 22:34:33] [Rank 0] step:3361/10000 train_time:269385ms step_avg:80.15ms +[2025-07-06 22:34:33] [Rank 0] step:3361/10000 train_time:269385ms step_avg:80.15ms +[2025-07-06 22:34:35] [Rank 0] step:3381/10000 train_time:270881ms step_avg:80.12ms +[2025-07-06 22:34:35] [Rank 0] step:3381/10000 train_time:270881ms step_avg:80.12ms +[2025-07-06 22:34:36] [Rank 0] step:3401/10000 train_time:272380ms step_avg:80.09ms +[2025-07-06 22:34:36] [Rank 0] step:3401/10000 train_time:272380ms step_avg:80.09ms +[2025-07-06 22:34:38] [Rank 0] step:3421/10000 train_time:273879ms step_avg:80.06ms +[2025-07-06 22:34:38] [Rank 0] step:3421/10000 train_time:273879ms step_avg:80.06ms +[2025-07-06 22:34:40] [Rank 0] step:3441/10000 train_time:275614ms step_avg:80.10ms +[2025-07-06 22:34:40] [Rank 0] step:3441/10000 train_time:275614ms step_avg:80.10ms +[2025-07-06 22:34:41] [Rank 0] step:3461/10000 train_time:277111ms step_avg:80.07ms +[2025-07-06 22:34:41] [Rank 0] step:3461/10000 train_time:277111ms step_avg:80.07ms +[2025-07-06 22:34:43] [Rank 0] step:3481/10000 train_time:278610ms step_avg:80.04ms +[2025-07-06 22:34:43] [Rank 0] step:3481/10000 train_time:278610ms step_avg:80.04ms +[2025-07-06 22:34:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:34:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:34:45] [Rank 0] PRINT: step:3500/10000 train_loss:1.0266 val_loss:0.9863 train_time:280108ms step_avg:80.03ms +[2025-07-06 22:34:45] [Rank 0] PRINT: step:3500/10000 train_loss:1.0266 val_loss:0.9863 train_time:280108ms step_avg:80.03ms +[2025-07-06 22:34:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:34:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:34:45] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:34:45] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:34:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:34:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:40:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:40:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:40:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:40:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:40:07] [Rank 0] Total Loss: 5.1355 +[2025-07-06 22:40:07] [Rank 0] Total Loss: 5.1355 +[2025-07-06 22:40:07] [Rank 0] Total FTA: 0.6908 +[2025-07-06 22:40:07] [Rank 0] Total FTA: 0.6908 +[2025-07-06 22:40:07] [Rank 0] Group 0 Loss: 5.2485 +[2025-07-06 22:40:07] [Rank 0] Group 0 Loss: 5.2485 +[2025-07-06 22:40:07] [Rank 0] Group 1 Loss: 5.0138 +[2025-07-06 22:40:07] [Rank 0] Group 1 Loss: 5.0138 +[2025-07-06 22:40:07] [Rank 0] Group 2 Loss: 4.7836 +[2025-07-06 22:40:07] [Rank 0] Group 2 Loss: 4.7836 +[2025-07-06 22:40:07] [Rank 0] Group 3 Loss: 5.2758 +[2025-07-06 22:40:07] [Rank 0] Group 3 Loss: 5.2758 +[2025-07-06 22:40:07] [Rank 0] Group 4 Loss: 5.1290 +[2025-07-06 22:40:07] [Rank 0] Group 4 Loss: 5.1290 +[2025-07-06 22:40:07] [Rank 0] Group 5 Loss: 5.1894 +[2025-07-06 22:40:07] [Rank 0] Group 5 Loss: 5.1894 +[2025-07-06 22:40:07] [Rank 0] Group 6 Loss: 5.0567 +[2025-07-06 22:40:07] [Rank 0] Group 6 Loss: 5.0567 +[2025-07-06 22:40:07] [Rank 0] Group 7 Loss: 5.1588 +[2025-07-06 22:40:07] [Rank 0] Group 7 Loss: 5.1588 +[2025-07-06 22:40:07] [Rank 0] Group 8 Loss: 5.1883 +[2025-07-06 22:40:07] [Rank 0] Group 8 Loss: 5.1883 +[2025-07-06 22:40:07] [Rank 0] Group 9 Loss: 5.1291 +[2025-07-06 22:40:07] [Rank 0] Group 9 Loss: 5.1291 +[2025-07-06 22:40:07] [Rank 0] Group 10 Loss: 5.1775 +[2025-07-06 22:40:07] [Rank 0] Group 10 Loss: 5.1775 +[2025-07-06 22:40:07] [Rank 0] Group 11 Loss: 5.1396 +[2025-07-06 22:40:07] [Rank 0] Group 11 Loss: 5.1396 +[2025-07-06 22:40:07] [Rank 0] Group 0 FTA: 0.5072 +[2025-07-06 22:40:07] [Rank 0] Group 0 FTA: 0.5072 +[2025-07-06 22:40:07] [Rank 0] Group 1 FTA: 0.6979 +[2025-07-06 22:40:07] [Rank 0] Group 1 FTA: 0.6979 +[2025-07-06 22:40:07] [Rank 0] Group 2 FTA: 0.7682 +[2025-07-06 22:40:07] [Rank 0] Group 2 FTA: 0.7682 +[2025-07-06 22:40:07] [Rank 0] Group 3 FTA: 0.6901 +[2025-07-06 22:40:07] [Rank 0] Group 3 FTA: 0.6901 +[2025-07-06 22:40:07] [Rank 0] Group 4 FTA: 0.6562 +[2025-07-06 22:40:07] [Rank 0] Group 4 FTA: 0.6562 +[2025-07-06 22:40:07] [Rank 0] Group 5 FTA: 0.7839 +[2025-07-06 22:40:07] [Rank 0] Group 5 FTA: 0.7839 +[2025-07-06 22:40:07] [Rank 0] Group 6 FTA: 0.6380 +[2025-07-06 22:40:07] [Rank 0] Group 6 FTA: 0.6380 +[2025-07-06 22:40:07] [Rank 0] Group 7 FTA: 0.7109 +[2025-07-06 22:40:07] [Rank 0] Group 7 FTA: 0.7109 +[2025-07-06 22:40:07] [Rank 0] Group 8 FTA: 0.7422 +[2025-07-06 22:40:07] [Rank 0] Group 8 FTA: 0.7422 +[2025-07-06 22:40:07] [Rank 0] Group 9 FTA: 0.7773 +[2025-07-06 22:40:07] [Rank 0] Group 9 FTA: 0.7773 +[2025-07-06 22:40:07] [Rank 0] Group 10 FTA: 0.7188 +[2025-07-06 22:40:07] [Rank 0] Group 10 FTA: 0.7188 +[2025-07-06 22:40:07] [Rank 0] Group 11 FTA: 0.7324 +[2025-07-06 22:40:07] [Rank 0] Group 11 FTA: 0.7324 +[2025-07-06 22:40:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 22:40:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 22:40:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 22:40:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 22:40:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 22:40:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 22:40:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 22:40:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 22:40:08] [Rank 0] step:3501/10000 train_time:280130ms step_avg:80.01ms +[2025-07-06 22:40:08] [Rank 0] step:3501/10000 train_time:280130ms step_avg:80.01ms +[2025-07-06 22:40:11] [Rank 0] step:3521/10000 train_time:282424ms step_avg:80.21ms +[2025-07-06 22:40:11] [Rank 0] step:3521/10000 train_time:282424ms step_avg:80.21ms +[2025-07-06 22:40:12] [Rank 0] step:3541/10000 train_time:284002ms step_avg:80.20ms +[2025-07-06 22:40:12] [Rank 0] step:3541/10000 train_time:284002ms step_avg:80.20ms +[2025-07-06 22:40:14] [Rank 0] step:3561/10000 train_time:285613ms step_avg:80.21ms +[2025-07-06 22:40:14] [Rank 0] step:3561/10000 train_time:285613ms step_avg:80.21ms +[2025-07-06 22:40:15] [Rank 0] step:3581/10000 train_time:287104ms step_avg:80.17ms +[2025-07-06 22:40:15] [Rank 0] step:3581/10000 train_time:287104ms step_avg:80.17ms +[2025-07-06 22:40:18] [Rank 0] step:3601/10000 train_time:289283ms step_avg:80.33ms +[2025-07-06 22:40:18] [Rank 0] step:3601/10000 train_time:289283ms step_avg:80.33ms +[2025-07-06 22:40:19] [Rank 0] step:3621/10000 train_time:290753ms step_avg:80.30ms +[2025-07-06 22:40:19] [Rank 0] step:3621/10000 train_time:290753ms step_avg:80.30ms +[2025-07-06 22:40:21] [Rank 0] step:3641/10000 train_time:292241ms step_avg:80.26ms +[2025-07-06 22:40:21] [Rank 0] step:3641/10000 train_time:292241ms step_avg:80.26ms +[2025-07-06 22:40:22] [Rank 0] step:3661/10000 train_time:293735ms step_avg:80.23ms +[2025-07-06 22:40:22] [Rank 0] step:3661/10000 train_time:293735ms step_avg:80.23ms +[2025-07-06 22:40:24] [Rank 0] step:3681/10000 train_time:295230ms step_avg:80.20ms +[2025-07-06 22:40:24] [Rank 0] step:3681/10000 train_time:295230ms step_avg:80.20ms +[2025-07-06 22:40:26] [Rank 0] step:3701/10000 train_time:297363ms step_avg:80.35ms +[2025-07-06 22:40:26] [Rank 0] step:3701/10000 train_time:297363ms step_avg:80.35ms +[2025-07-06 22:40:27] [Rank 0] step:3721/10000 train_time:298856ms step_avg:80.32ms +[2025-07-06 22:40:27] [Rank 0] step:3721/10000 train_time:298856ms step_avg:80.32ms +[2025-07-06 22:40:29] [Rank 0] step:3741/10000 train_time:300350ms step_avg:80.29ms +[2025-07-06 22:40:29] [Rank 0] step:3741/10000 train_time:300350ms step_avg:80.29ms +[2025-07-06 22:40:30] [Rank 0] step:3761/10000 train_time:301842ms step_avg:80.26ms +[2025-07-06 22:40:30] [Rank 0] step:3761/10000 train_time:301842ms step_avg:80.26ms +[2025-07-06 22:40:32] [Rank 0] step:3781/10000 train_time:303591ms step_avg:80.29ms +[2025-07-06 22:40:32] [Rank 0] step:3781/10000 train_time:303591ms step_avg:80.29ms +[2025-07-06 22:40:33] [Rank 0] step:3801/10000 train_time:305071ms step_avg:80.26ms +[2025-07-06 22:40:33] [Rank 0] step:3801/10000 train_time:305071ms step_avg:80.26ms +[2025-07-06 22:40:35] [Rank 0] step:3821/10000 train_time:306567ms step_avg:80.23ms +[2025-07-06 22:40:35] [Rank 0] step:3821/10000 train_time:306567ms step_avg:80.23ms +[2025-07-06 22:40:36] [Rank 0] step:3841/10000 train_time:308061ms step_avg:80.20ms +[2025-07-06 22:40:36] [Rank 0] step:3841/10000 train_time:308061ms step_avg:80.20ms +[2025-07-06 22:40:38] [Rank 0] step:3861/10000 train_time:309556ms step_avg:80.18ms +[2025-07-06 22:40:38] [Rank 0] step:3861/10000 train_time:309556ms step_avg:80.18ms +[2025-07-06 22:40:40] [Rank 0] step:3881/10000 train_time:311285ms step_avg:80.21ms +[2025-07-06 22:40:40] [Rank 0] step:3881/10000 train_time:311285ms step_avg:80.21ms +[2025-07-06 22:40:41] [Rank 0] step:3901/10000 train_time:312781ms step_avg:80.18ms +[2025-07-06 22:40:41] [Rank 0] step:3901/10000 train_time:312781ms step_avg:80.18ms +[2025-07-06 22:40:43] [Rank 0] step:3921/10000 train_time:314278ms step_avg:80.15ms +[2025-07-06 22:40:43] [Rank 0] step:3921/10000 train_time:314278ms step_avg:80.15ms +[2025-07-06 22:40:44] [Rank 0] step:3941/10000 train_time:315774ms step_avg:80.13ms +[2025-07-06 22:40:44] [Rank 0] step:3941/10000 train_time:315774ms step_avg:80.13ms +[2025-07-06 22:40:46] [Rank 0] step:3961/10000 train_time:317952ms step_avg:80.27ms +[2025-07-06 22:40:46] [Rank 0] step:3961/10000 train_time:317952ms step_avg:80.27ms +[2025-07-06 22:40:48] [Rank 0] step:3981/10000 train_time:319430ms step_avg:80.24ms +[2025-07-06 22:40:48] [Rank 0] step:3981/10000 train_time:319430ms step_avg:80.24ms +[2025-07-06 22:40:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:40:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:40:50] [Rank 0] PRINT: step:4000/10000 train_loss:0.9684 val_loss:0.9413 train_time:320925ms step_avg:80.23ms +[2025-07-06 22:40:50] [Rank 0] PRINT: step:4000/10000 train_loss:0.9684 val_loss:0.9413 train_time:320925ms step_avg:80.23ms +[2025-07-06 22:40:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:40:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:40:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:40:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:40:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:40:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:46:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:46:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:46:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:46:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:46:12] [Rank 0] Total Loss: 5.1537 +[2025-07-06 22:46:12] [Rank 0] Total Loss: 5.1537 +[2025-07-06 22:46:12] [Rank 0] Total FTA: 0.8294 +[2025-07-06 22:46:12] [Rank 0] Total FTA: 0.8294 +[2025-07-06 22:46:12] [Rank 0] Group 0 Loss: 5.3157 +[2025-07-06 22:46:12] [Rank 0] Group 0 Loss: 5.3157 +[2025-07-06 22:46:12] [Rank 0] Group 1 Loss: 4.9264 +[2025-07-06 22:46:12] [Rank 0] Group 1 Loss: 4.9264 +[2025-07-06 22:46:12] [Rank 0] Group 2 Loss: 4.7380 +[2025-07-06 22:46:12] [Rank 0] Group 2 Loss: 4.7380 +[2025-07-06 22:46:12] [Rank 0] Group 3 Loss: 5.3200 +[2025-07-06 22:46:12] [Rank 0] Group 3 Loss: 5.3200 +[2025-07-06 22:46:12] [Rank 0] Group 4 Loss: 5.1839 +[2025-07-06 22:46:12] [Rank 0] Group 4 Loss: 5.1839 +[2025-07-06 22:46:12] [Rank 0] Group 5 Loss: 5.1833 +[2025-07-06 22:46:12] [Rank 0] Group 5 Loss: 5.1833 +[2025-07-06 22:46:12] [Rank 0] Group 6 Loss: 5.0888 +[2025-07-06 22:46:12] [Rank 0] Group 6 Loss: 5.0888 +[2025-07-06 22:46:12] [Rank 0] Group 7 Loss: 5.1692 +[2025-07-06 22:46:12] [Rank 0] Group 7 Loss: 5.1692 +[2025-07-06 22:46:12] [Rank 0] Group 8 Loss: 5.1591 +[2025-07-06 22:46:12] [Rank 0] Group 8 Loss: 5.1591 +[2025-07-06 22:46:12] [Rank 0] Group 9 Loss: 5.1513 +[2025-07-06 22:46:12] [Rank 0] Group 9 Loss: 5.1513 +[2025-07-06 22:46:12] [Rank 0] Group 10 Loss: 5.2072 +[2025-07-06 22:46:12] [Rank 0] Group 10 Loss: 5.2072 +[2025-07-06 22:46:12] [Rank 0] Group 11 Loss: 5.1786 +[2025-07-06 22:46:12] [Rank 0] Group 11 Loss: 5.1786 +[2025-07-06 22:46:12] [Rank 0] Group 0 FTA: 0.8309 +[2025-07-06 22:46:12] [Rank 0] Group 0 FTA: 0.8309 +[2025-07-06 22:46:12] [Rank 0] Group 1 FTA: 0.8516 +[2025-07-06 22:46:12] [Rank 0] Group 1 FTA: 0.8516 +[2025-07-06 22:46:12] [Rank 0] Group 2 FTA: 0.9245 +[2025-07-06 22:46:12] [Rank 0] Group 2 FTA: 0.9245 +[2025-07-06 22:46:12] [Rank 0] Group 3 FTA: 0.7865 +[2025-07-06 22:46:12] [Rank 0] Group 3 FTA: 0.7865 +[2025-07-06 22:46:12] [Rank 0] Group 4 FTA: 0.8438 +[2025-07-06 22:46:12] [Rank 0] Group 4 FTA: 0.8438 +[2025-07-06 22:46:12] [Rank 0] Group 5 FTA: 0.8646 +[2025-07-06 22:46:12] [Rank 0] Group 5 FTA: 0.8646 +[2025-07-06 22:46:12] [Rank 0] Group 6 FTA: 0.8490 +[2025-07-06 22:46:12] [Rank 0] Group 6 FTA: 0.8490 +[2025-07-06 22:46:12] [Rank 0] Group 7 FTA: 0.7995 +[2025-07-06 22:46:12] [Rank 0] Group 7 FTA: 0.7995 +[2025-07-06 22:46:12] [Rank 0] Group 8 FTA: 0.7943 +[2025-07-06 22:46:12] [Rank 0] Group 8 FTA: 0.7943 +[2025-07-06 22:46:12] [Rank 0] Group 9 FTA: 0.7500 +[2025-07-06 22:46:12] [Rank 0] Group 9 FTA: 0.7500 +[2025-07-06 22:46:12] [Rank 0] Group 10 FTA: 0.8203 +[2025-07-06 22:46:12] [Rank 0] Group 10 FTA: 0.8203 +[2025-07-06 22:46:12] [Rank 0] Group 11 FTA: 0.8232 +[2025-07-06 22:46:12] [Rank 0] Group 11 FTA: 0.8232 +[2025-07-06 22:46:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 22:46:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 22:46:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 22:46:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 22:46:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 22:46:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 22:46:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 22:46:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 22:46:14] [Rank 0] step:4001/10000 train_time:320947ms step_avg:80.22ms +[2025-07-06 22:46:14] [Rank 0] step:4001/10000 train_time:320947ms step_avg:80.22ms +[2025-07-06 22:46:15] [Rank 0] step:4021/10000 train_time:322464ms step_avg:80.19ms +[2025-07-06 22:46:15] [Rank 0] step:4021/10000 train_time:322464ms step_avg:80.19ms +[2025-07-06 22:46:17] [Rank 0] step:4041/10000 train_time:323953ms step_avg:80.17ms +[2025-07-06 22:46:17] [Rank 0] step:4041/10000 train_time:323953ms step_avg:80.17ms +[2025-07-06 22:46:19] [Rank 0] step:4061/10000 train_time:326117ms step_avg:80.30ms +[2025-07-06 22:46:19] [Rank 0] step:4061/10000 train_time:326117ms step_avg:80.30ms +[2025-07-06 22:46:20] [Rank 0] step:4081/10000 train_time:327607ms step_avg:80.28ms +[2025-07-06 22:46:20] [Rank 0] step:4081/10000 train_time:327607ms step_avg:80.28ms +[2025-07-06 22:46:22] [Rank 0] step:4101/10000 train_time:329098ms step_avg:80.25ms +[2025-07-06 22:46:22] [Rank 0] step:4101/10000 train_time:329098ms step_avg:80.25ms +[2025-07-06 22:46:23] [Rank 0] step:4121/10000 train_time:330591ms step_avg:80.22ms +[2025-07-06 22:46:23] [Rank 0] step:4121/10000 train_time:330591ms step_avg:80.22ms +[2025-07-06 22:46:26] [Rank 0] step:4141/10000 train_time:332083ms step_avg:80.19ms +[2025-07-06 22:46:26] [Rank 0] step:4141/10000 train_time:332083ms step_avg:80.19ms +[2025-07-06 22:46:27] [Rank 0] step:4161/10000 train_time:334225ms step_avg:80.32ms +[2025-07-06 22:46:27] [Rank 0] step:4161/10000 train_time:334225ms step_avg:80.32ms +[2025-07-06 22:46:29] [Rank 0] step:4181/10000 train_time:335716ms step_avg:80.30ms +[2025-07-06 22:46:29] [Rank 0] step:4181/10000 train_time:335716ms step_avg:80.30ms +[2025-07-06 22:46:30] [Rank 0] step:4201/10000 train_time:337348ms step_avg:80.30ms +[2025-07-06 22:46:30] [Rank 0] step:4201/10000 train_time:337348ms step_avg:80.30ms +[2025-07-06 22:46:32] [Rank 0] step:4221/10000 train_time:338809ms step_avg:80.27ms +[2025-07-06 22:46:32] [Rank 0] step:4221/10000 train_time:338809ms step_avg:80.27ms +[2025-07-06 22:46:33] [Rank 0] step:4241/10000 train_time:340540ms step_avg:80.30ms +[2025-07-06 22:46:33] [Rank 0] step:4241/10000 train_time:340540ms step_avg:80.30ms +[2025-07-06 22:46:35] [Rank 0] step:4261/10000 train_time:342034ms step_avg:80.27ms +[2025-07-06 22:46:35] [Rank 0] step:4261/10000 train_time:342034ms step_avg:80.27ms +[2025-07-06 22:46:36] [Rank 0] step:4281/10000 train_time:343531ms step_avg:80.25ms +[2025-07-06 22:46:36] [Rank 0] step:4281/10000 train_time:343531ms step_avg:80.25ms +[2025-07-06 22:46:38] [Rank 0] step:4301/10000 train_time:345027ms step_avg:80.22ms +[2025-07-06 22:46:38] [Rank 0] step:4301/10000 train_time:345027ms step_avg:80.22ms +[2025-07-06 22:46:40] [Rank 0] step:4321/10000 train_time:346523ms step_avg:80.19ms +[2025-07-06 22:46:40] [Rank 0] step:4321/10000 train_time:346523ms step_avg:80.19ms +[2025-07-06 22:46:41] [Rank 0] step:4341/10000 train_time:348665ms step_avg:80.32ms +[2025-07-06 22:46:41] [Rank 0] step:4341/10000 train_time:348665ms step_avg:80.32ms +[2025-07-06 22:46:43] [Rank 0] step:4361/10000 train_time:350159ms step_avg:80.29ms +[2025-07-06 22:46:43] [Rank 0] step:4361/10000 train_time:350159ms step_avg:80.29ms +[2025-07-06 22:46:44] [Rank 0] step:4381/10000 train_time:351657ms step_avg:80.27ms +[2025-07-06 22:46:44] [Rank 0] step:4381/10000 train_time:351657ms step_avg:80.27ms +[2025-07-06 22:46:46] [Rank 0] step:4401/10000 train_time:353156ms step_avg:80.24ms +[2025-07-06 22:46:46] [Rank 0] step:4401/10000 train_time:353156ms step_avg:80.24ms +[2025-07-06 22:46:48] [Rank 0] step:4421/10000 train_time:354891ms step_avg:80.27ms +[2025-07-06 22:46:48] [Rank 0] step:4421/10000 train_time:354891ms step_avg:80.27ms +[2025-07-06 22:46:49] [Rank 0] step:4441/10000 train_time:356388ms step_avg:80.25ms +[2025-07-06 22:46:49] [Rank 0] step:4441/10000 train_time:356388ms step_avg:80.25ms +[2025-07-06 22:46:51] [Rank 0] step:4461/10000 train_time:357886ms step_avg:80.23ms +[2025-07-06 22:46:51] [Rank 0] step:4461/10000 train_time:357886ms step_avg:80.23ms +[2025-07-06 22:46:52] [Rank 0] step:4481/10000 train_time:359383ms step_avg:80.20ms +[2025-07-06 22:46:52] [Rank 0] step:4481/10000 train_time:359383ms step_avg:80.20ms +[2025-07-06 22:46:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:46:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:46:55] [Rank 0] PRINT: step:4500/10000 train_loss:0.9274 val_loss:0.9096 train_time:360879ms step_avg:80.20ms +[2025-07-06 22:46:55] [Rank 0] PRINT: step:4500/10000 train_loss:0.9274 val_loss:0.9096 train_time:360879ms step_avg:80.20ms +[2025-07-06 22:46:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:46:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:46:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:46:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:46:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:46:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:52:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:52:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:52:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:52:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:52:17] [Rank 0] Total Loss: 5.1361 +[2025-07-06 22:52:17] [Rank 0] Total Loss: 5.1361 +[2025-07-06 22:52:17] [Rank 0] Total FTA: 0.8740 +[2025-07-06 22:52:17] [Rank 0] Total FTA: 0.8740 +[2025-07-06 22:52:17] [Rank 0] Group 0 Loss: 5.2873 +[2025-07-06 22:52:17] [Rank 0] Group 0 Loss: 5.2873 +[2025-07-06 22:52:17] [Rank 0] Group 1 Loss: 5.0222 +[2025-07-06 22:52:17] [Rank 0] Group 1 Loss: 5.0222 +[2025-07-06 22:52:17] [Rank 0] Group 2 Loss: 4.8406 +[2025-07-06 22:52:17] [Rank 0] Group 2 Loss: 4.8406 +[2025-07-06 22:52:17] [Rank 0] Group 3 Loss: 5.2705 +[2025-07-06 22:52:17] [Rank 0] Group 3 Loss: 5.2705 +[2025-07-06 22:52:17] [Rank 0] Group 4 Loss: 5.1555 +[2025-07-06 22:52:17] [Rank 0] Group 4 Loss: 5.1555 +[2025-07-06 22:52:17] [Rank 0] Group 5 Loss: 5.0372 +[2025-07-06 22:52:17] [Rank 0] Group 5 Loss: 5.0372 +[2025-07-06 22:52:17] [Rank 0] Group 6 Loss: 5.0967 +[2025-07-06 22:52:17] [Rank 0] Group 6 Loss: 5.0967 +[2025-07-06 22:52:17] [Rank 0] Group 7 Loss: 5.1124 +[2025-07-06 22:52:17] [Rank 0] Group 7 Loss: 5.1124 +[2025-07-06 22:52:17] [Rank 0] Group 8 Loss: 5.1553 +[2025-07-06 22:52:17] [Rank 0] Group 8 Loss: 5.1553 +[2025-07-06 22:52:17] [Rank 0] Group 9 Loss: 5.1711 +[2025-07-06 22:52:17] [Rank 0] Group 9 Loss: 5.1711 +[2025-07-06 22:52:17] [Rank 0] Group 10 Loss: 5.1489 +[2025-07-06 22:52:17] [Rank 0] Group 10 Loss: 5.1489 +[2025-07-06 22:52:17] [Rank 0] Group 11 Loss: 5.1566 +[2025-07-06 22:52:17] [Rank 0] Group 11 Loss: 5.1566 +[2025-07-06 22:52:17] [Rank 0] Group 0 FTA: 0.8322 +[2025-07-06 22:52:17] [Rank 0] Group 0 FTA: 0.8322 +[2025-07-06 22:52:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 22:52:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 22:52:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 22:52:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 22:52:17] [Rank 0] Group 3 FTA: 0.8932 +[2025-07-06 22:52:17] [Rank 0] Group 3 FTA: 0.8932 +[2025-07-06 22:52:17] [Rank 0] Group 4 FTA: 0.8958 +[2025-07-06 22:52:17] [Rank 0] Group 4 FTA: 0.8958 +[2025-07-06 22:52:17] [Rank 0] Group 5 FTA: 0.8672 +[2025-07-06 22:52:17] [Rank 0] Group 5 FTA: 0.8672 +[2025-07-06 22:52:17] [Rank 0] Group 6 FTA: 0.8177 +[2025-07-06 22:52:17] [Rank 0] Group 6 FTA: 0.8177 +[2025-07-06 22:52:17] [Rank 0] Group 7 FTA: 0.8490 +[2025-07-06 22:52:17] [Rank 0] Group 7 FTA: 0.8490 +[2025-07-06 22:52:17] [Rank 0] Group 8 FTA: 0.8438 +[2025-07-06 22:52:17] [Rank 0] Group 8 FTA: 0.8438 +[2025-07-06 22:52:17] [Rank 0] Group 9 FTA: 0.8438 +[2025-07-06 22:52:17] [Rank 0] Group 9 FTA: 0.8438 +[2025-07-06 22:52:17] [Rank 0] Group 10 FTA: 0.8516 +[2025-07-06 22:52:17] [Rank 0] Group 10 FTA: 0.8516 +[2025-07-06 22:52:17] [Rank 0] Group 11 FTA: 0.8584 +[2025-07-06 22:52:17] [Rank 0] Group 11 FTA: 0.8584 +[2025-07-06 22:52:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 22:52:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 22:52:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 22:52:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 22:52:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 22:52:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 22:52:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 22:52:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 22:52:19] [Rank 0] step:4501/10000 train_time:361634ms step_avg:80.35ms +[2025-07-06 22:52:19] [Rank 0] step:4501/10000 train_time:361634ms step_avg:80.35ms +[2025-07-06 22:52:21] [Rank 0] step:4521/10000 train_time:363132ms step_avg:80.32ms +[2025-07-06 22:52:21] [Rank 0] step:4521/10000 train_time:363132ms step_avg:80.32ms +[2025-07-06 22:52:22] [Rank 0] step:4541/10000 train_time:364621ms step_avg:80.30ms +[2025-07-06 22:52:22] [Rank 0] step:4541/10000 train_time:364621ms step_avg:80.30ms +[2025-07-06 22:52:24] [Rank 0] step:4561/10000 train_time:366109ms step_avg:80.27ms +[2025-07-06 22:52:24] [Rank 0] step:4561/10000 train_time:366109ms step_avg:80.27ms +[2025-07-06 22:52:25] [Rank 0] step:4581/10000 train_time:367600ms step_avg:80.24ms +[2025-07-06 22:52:25] [Rank 0] step:4581/10000 train_time:367600ms step_avg:80.24ms +[2025-07-06 22:52:27] [Rank 0] step:4601/10000 train_time:369744ms step_avg:80.36ms +[2025-07-06 22:52:27] [Rank 0] step:4601/10000 train_time:369744ms step_avg:80.36ms +[2025-07-06 22:52:29] [Rank 0] step:4621/10000 train_time:371234ms step_avg:80.34ms +[2025-07-06 22:52:29] [Rank 0] step:4621/10000 train_time:371234ms step_avg:80.34ms +[2025-07-06 22:52:30] [Rank 0] step:4641/10000 train_time:372725ms step_avg:80.31ms +[2025-07-06 22:52:30] [Rank 0] step:4641/10000 train_time:372725ms step_avg:80.31ms +[2025-07-06 22:52:32] [Rank 0] step:4661/10000 train_time:374217ms step_avg:80.29ms +[2025-07-06 22:52:32] [Rank 0] step:4661/10000 train_time:374217ms step_avg:80.29ms +[2025-07-06 22:52:34] [Rank 0] step:4681/10000 train_time:375764ms step_avg:80.27ms +[2025-07-06 22:52:34] [Rank 0] step:4681/10000 train_time:375764ms step_avg:80.27ms +[2025-07-06 22:52:35] [Rank 0] step:4701/10000 train_time:377439ms step_avg:80.29ms +[2025-07-06 22:52:35] [Rank 0] step:4701/10000 train_time:377439ms step_avg:80.29ms +[2025-07-06 22:52:37] [Rank 0] step:4721/10000 train_time:378931ms step_avg:80.26ms +[2025-07-06 22:52:37] [Rank 0] step:4721/10000 train_time:378931ms step_avg:80.26ms +[2025-07-06 22:52:38] [Rank 0] step:4741/10000 train_time:380424ms step_avg:80.24ms +[2025-07-06 22:52:38] [Rank 0] step:4741/10000 train_time:380424ms step_avg:80.24ms +[2025-07-06 22:52:40] [Rank 0] step:4761/10000 train_time:381920ms step_avg:80.22ms +[2025-07-06 22:52:40] [Rank 0] step:4761/10000 train_time:381920ms step_avg:80.22ms +[2025-07-06 22:52:42] [Rank 0] step:4781/10000 train_time:384083ms step_avg:80.34ms +[2025-07-06 22:52:42] [Rank 0] step:4781/10000 train_time:384083ms step_avg:80.34ms +[2025-07-06 22:52:43] [Rank 0] step:4801/10000 train_time:385577ms step_avg:80.31ms +[2025-07-06 22:52:43] [Rank 0] step:4801/10000 train_time:385577ms step_avg:80.31ms +[2025-07-06 22:52:45] [Rank 0] step:4821/10000 train_time:387074ms step_avg:80.29ms +[2025-07-06 22:52:45] [Rank 0] step:4821/10000 train_time:387074ms step_avg:80.29ms +[2025-07-06 22:52:46] [Rank 0] step:4841/10000 train_time:388569ms step_avg:80.27ms +[2025-07-06 22:52:46] [Rank 0] step:4841/10000 train_time:388569ms step_avg:80.27ms +[2025-07-06 22:52:48] [Rank 0] step:4861/10000 train_time:390064ms step_avg:80.24ms +[2025-07-06 22:52:48] [Rank 0] step:4861/10000 train_time:390064ms step_avg:80.24ms +[2025-07-06 22:52:50] [Rank 0] step:4881/10000 train_time:392460ms step_avg:80.41ms +[2025-07-06 22:52:50] [Rank 0] step:4881/10000 train_time:392460ms step_avg:80.41ms +[2025-07-06 22:52:52] [Rank 0] step:4901/10000 train_time:394047ms step_avg:80.40ms +[2025-07-06 22:52:52] [Rank 0] step:4901/10000 train_time:394047ms step_avg:80.40ms +[2025-07-06 22:52:53] [Rank 0] step:4921/10000 train_time:395604ms step_avg:80.39ms +[2025-07-06 22:52:53] [Rank 0] step:4921/10000 train_time:395604ms step_avg:80.39ms +[2025-07-06 22:52:55] [Rank 0] step:4941/10000 train_time:397100ms step_avg:80.37ms +[2025-07-06 22:52:55] [Rank 0] step:4941/10000 train_time:397100ms step_avg:80.37ms +[2025-07-06 22:52:57] [Rank 0] step:4961/10000 train_time:399252ms step_avg:80.48ms +[2025-07-06 22:52:57] [Rank 0] step:4961/10000 train_time:399252ms step_avg:80.48ms +[2025-07-06 22:52:58] [Rank 0] step:4981/10000 train_time:400751ms step_avg:80.46ms +[2025-07-06 22:52:58] [Rank 0] step:4981/10000 train_time:400751ms step_avg:80.46ms +[2025-07-06 22:53:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:53:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:53:01] [Rank 0] PRINT: step:5000/10000 train_loss:0.9015 val_loss:0.8919 train_time:402249ms step_avg:80.45ms +[2025-07-06 22:53:01] [Rank 0] PRINT: step:5000/10000 train_loss:0.9015 val_loss:0.8919 train_time:402249ms step_avg:80.45ms +[2025-07-06 22:53:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:53:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:53:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:53:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:53:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:53:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:58:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:58:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 22:58:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:58:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 22:58:23] [Rank 0] Total Loss: 5.2082 +[2025-07-06 22:58:23] [Rank 0] Total Loss: 5.2082 +[2025-07-06 22:58:23] [Rank 0] Total FTA: 0.9096 +[2025-07-06 22:58:23] [Rank 0] Total FTA: 0.9096 +[2025-07-06 22:58:23] [Rank 0] Group 0 Loss: 5.3124 +[2025-07-06 22:58:23] [Rank 0] Group 0 Loss: 5.3124 +[2025-07-06 22:58:23] [Rank 0] Group 1 Loss: 4.9793 +[2025-07-06 22:58:23] [Rank 0] Group 1 Loss: 4.9793 +[2025-07-06 22:58:23] [Rank 0] Group 2 Loss: 4.8412 +[2025-07-06 22:58:23] [Rank 0] Group 2 Loss: 4.8412 +[2025-07-06 22:58:24] [Rank 0] Group 3 Loss: 5.4209 +[2025-07-06 22:58:24] [Rank 0] Group 3 Loss: 5.4209 +[2025-07-06 22:58:24] [Rank 0] Group 4 Loss: 5.2646 +[2025-07-06 22:58:24] [Rank 0] Group 4 Loss: 5.2646 +[2025-07-06 22:58:24] [Rank 0] Group 5 Loss: 5.1847 +[2025-07-06 22:58:24] [Rank 0] Group 5 Loss: 5.1847 +[2025-07-06 22:58:24] [Rank 0] Group 6 Loss: 5.1197 +[2025-07-06 22:58:24] [Rank 0] Group 6 Loss: 5.1197 +[2025-07-06 22:58:24] [Rank 0] Group 7 Loss: 5.2470 +[2025-07-06 22:58:24] [Rank 0] Group 7 Loss: 5.2470 +[2025-07-06 22:58:24] [Rank 0] Group 8 Loss: 5.1819 +[2025-07-06 22:58:24] [Rank 0] Group 8 Loss: 5.1819 +[2025-07-06 22:58:24] [Rank 0] Group 9 Loss: 5.2303 +[2025-07-06 22:58:24] [Rank 0] Group 9 Loss: 5.2303 +[2025-07-06 22:58:24] [Rank 0] Group 10 Loss: 5.2737 +[2025-07-06 22:58:24] [Rank 0] Group 10 Loss: 5.2737 +[2025-07-06 22:58:24] [Rank 0] Group 11 Loss: 5.2517 +[2025-07-06 22:58:24] [Rank 0] Group 11 Loss: 5.2517 +[2025-07-06 22:58:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 22:58:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 22:58:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 22:58:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 22:58:24] [Rank 0] Group 2 FTA: 0.8672 +[2025-07-06 22:58:24] [Rank 0] Group 2 FTA: 0.8672 +[2025-07-06 22:58:24] [Rank 0] Group 3 FTA: 0.8542 +[2025-07-06 22:58:24] [Rank 0] Group 3 FTA: 0.8542 +[2025-07-06 22:58:24] [Rank 0] Group 4 FTA: 0.8724 +[2025-07-06 22:58:24] [Rank 0] Group 4 FTA: 0.8724 +[2025-07-06 22:58:24] [Rank 0] Group 5 FTA: 0.9193 +[2025-07-06 22:58:24] [Rank 0] Group 5 FTA: 0.9193 +[2025-07-06 22:58:24] [Rank 0] Group 6 FTA: 0.9323 +[2025-07-06 22:58:24] [Rank 0] Group 6 FTA: 0.9323 +[2025-07-06 22:58:24] [Rank 0] Group 7 FTA: 0.8906 +[2025-07-06 22:58:24] [Rank 0] Group 7 FTA: 0.8906 +[2025-07-06 22:58:24] [Rank 0] Group 8 FTA: 0.8958 +[2025-07-06 22:58:24] [Rank 0] Group 8 FTA: 0.8958 +[2025-07-06 22:58:24] [Rank 0] Group 9 FTA: 0.8789 +[2025-07-06 22:58:24] [Rank 0] Group 9 FTA: 0.8789 +[2025-07-06 22:58:24] [Rank 0] Group 10 FTA: 0.8984 +[2025-07-06 22:58:24] [Rank 0] Group 10 FTA: 0.8984 +[2025-07-06 22:58:24] [Rank 0] Group 11 FTA: 0.8721 +[2025-07-06 22:58:24] [Rank 0] Group 11 FTA: 0.8721 +[2025-07-06 22:58:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 22:58:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 22:58:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 22:58:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 22:58:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 22:58:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 22:58:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 22:58:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 22:58:25] [Rank 0] step:5001/10000 train_time:402270ms step_avg:80.44ms +[2025-07-06 22:58:25] [Rank 0] step:5001/10000 train_time:402270ms step_avg:80.44ms +[2025-07-06 22:58:27] [Rank 0] step:5021/10000 train_time:403774ms step_avg:80.42ms +[2025-07-06 22:58:27] [Rank 0] step:5021/10000 train_time:403774ms step_avg:80.42ms +[2025-07-06 22:58:28] [Rank 0] step:5041/10000 train_time:405261ms step_avg:80.39ms +[2025-07-06 22:58:28] [Rank 0] step:5041/10000 train_time:405261ms step_avg:80.39ms +[2025-07-06 22:58:30] [Rank 0] step:5061/10000 train_time:406997ms step_avg:80.42ms +[2025-07-06 22:58:30] [Rank 0] step:5061/10000 train_time:406997ms step_avg:80.42ms +[2025-07-06 22:58:31] [Rank 0] step:5081/10000 train_time:408486ms step_avg:80.39ms +[2025-07-06 22:58:31] [Rank 0] step:5081/10000 train_time:408486ms step_avg:80.39ms +[2025-07-06 22:58:33] [Rank 0] step:5101/10000 train_time:409977ms step_avg:80.37ms +[2025-07-06 22:58:33] [Rank 0] step:5101/10000 train_time:409977ms step_avg:80.37ms +[2025-07-06 22:58:34] [Rank 0] step:5121/10000 train_time:411470ms step_avg:80.35ms +[2025-07-06 22:58:34] [Rank 0] step:5121/10000 train_time:411470ms step_avg:80.35ms +[2025-07-06 22:58:36] [Rank 0] step:5141/10000 train_time:413618ms step_avg:80.45ms +[2025-07-06 22:58:36] [Rank 0] step:5141/10000 train_time:413618ms step_avg:80.45ms +[2025-07-06 22:58:38] [Rank 0] step:5161/10000 train_time:415110ms step_avg:80.43ms +[2025-07-06 22:58:38] [Rank 0] step:5161/10000 train_time:415110ms step_avg:80.43ms +[2025-07-06 22:58:39] [Rank 0] step:5181/10000 train_time:416603ms step_avg:80.41ms +[2025-07-06 22:58:39] [Rank 0] step:5181/10000 train_time:416603ms step_avg:80.41ms +[2025-07-06 22:58:41] [Rank 0] step:5201/10000 train_time:418098ms step_avg:80.39ms +[2025-07-06 22:58:41] [Rank 0] step:5201/10000 train_time:418098ms step_avg:80.39ms +[2025-07-06 22:58:43] [Rank 0] step:5221/10000 train_time:419643ms step_avg:80.38ms +[2025-07-06 22:58:43] [Rank 0] step:5221/10000 train_time:419643ms step_avg:80.38ms +[2025-07-06 22:58:45] [Rank 0] step:5241/10000 train_time:421729ms step_avg:80.47ms +[2025-07-06 22:58:45] [Rank 0] step:5241/10000 train_time:421729ms step_avg:80.47ms +[2025-07-06 22:58:46] [Rank 0] step:5261/10000 train_time:423222ms step_avg:80.45ms +[2025-07-06 22:58:46] [Rank 0] step:5261/10000 train_time:423222ms step_avg:80.45ms +[2025-07-06 22:58:48] [Rank 0] step:5281/10000 train_time:424717ms step_avg:80.42ms +[2025-07-06 22:58:48] [Rank 0] step:5281/10000 train_time:424717ms step_avg:80.42ms +[2025-07-06 22:58:49] [Rank 0] step:5301/10000 train_time:426211ms step_avg:80.40ms +[2025-07-06 22:58:49] [Rank 0] step:5301/10000 train_time:426211ms step_avg:80.40ms +[2025-07-06 22:58:51] [Rank 0] step:5321/10000 train_time:427940ms step_avg:80.42ms +[2025-07-06 22:58:51] [Rank 0] step:5321/10000 train_time:427940ms step_avg:80.42ms +[2025-07-06 22:58:52] [Rank 0] step:5341/10000 train_time:429434ms step_avg:80.40ms +[2025-07-06 22:58:52] [Rank 0] step:5341/10000 train_time:429434ms step_avg:80.40ms +[2025-07-06 22:58:54] [Rank 0] step:5361/10000 train_time:430928ms step_avg:80.38ms +[2025-07-06 22:58:54] [Rank 0] step:5361/10000 train_time:430928ms step_avg:80.38ms +[2025-07-06 22:58:55] [Rank 0] step:5381/10000 train_time:432421ms step_avg:80.36ms +[2025-07-06 22:58:55] [Rank 0] step:5381/10000 train_time:432421ms step_avg:80.36ms +[2025-07-06 22:58:57] [Rank 0] step:5401/10000 train_time:434591ms step_avg:80.46ms +[2025-07-06 22:58:57] [Rank 0] step:5401/10000 train_time:434591ms step_avg:80.46ms +[2025-07-06 22:58:59] [Rank 0] step:5421/10000 train_time:436067ms step_avg:80.44ms +[2025-07-06 22:58:59] [Rank 0] step:5421/10000 train_time:436067ms step_avg:80.44ms +[2025-07-06 22:59:00] [Rank 0] step:5441/10000 train_time:437563ms step_avg:80.42ms +[2025-07-06 22:59:00] [Rank 0] step:5441/10000 train_time:437563ms step_avg:80.42ms +[2025-07-06 22:59:02] [Rank 0] step:5461/10000 train_time:439059ms step_avg:80.40ms +[2025-07-06 22:59:02] [Rank 0] step:5461/10000 train_time:439059ms step_avg:80.40ms +[2025-07-06 22:59:03] [Rank 0] step:5481/10000 train_time:440556ms step_avg:80.38ms +[2025-07-06 22:59:03] [Rank 0] step:5481/10000 train_time:440556ms step_avg:80.38ms +[2025-07-06 22:59:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:59:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 22:59:06] [Rank 0] PRINT: step:5500/10000 train_loss:0.8876 val_loss:0.8815 train_time:442708ms step_avg:80.49ms +[2025-07-06 22:59:06] [Rank 0] PRINT: step:5500/10000 train_loss:0.8876 val_loss:0.8815 train_time:442708ms step_avg:80.49ms +[2025-07-06 22:59:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:59:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 22:59:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:59:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 22:59:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 22:59:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:04:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:04:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:04:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:04:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:04:32] [Rank 0] Total Loss: 5.4006 +[2025-07-06 23:04:32] [Rank 0] Total Loss: 5.4006 +[2025-07-06 23:04:32] [Rank 0] Total FTA: 0.9249 +[2025-07-06 23:04:32] [Rank 0] Total FTA: 0.9249 +[2025-07-06 23:04:32] [Rank 0] Group 0 Loss: 5.5732 +[2025-07-06 23:04:32] [Rank 0] Group 0 Loss: 5.5732 +[2025-07-06 23:04:32] [Rank 0] Group 1 Loss: 5.1863 +[2025-07-06 23:04:32] [Rank 0] Group 1 Loss: 5.1863 +[2025-07-06 23:04:32] [Rank 0] Group 2 Loss: 5.0478 +[2025-07-06 23:04:32] [Rank 0] Group 2 Loss: 5.0478 +[2025-07-06 23:04:32] [Rank 0] Group 3 Loss: 5.6200 +[2025-07-06 23:04:32] [Rank 0] Group 3 Loss: 5.6200 +[2025-07-06 23:04:32] [Rank 0] Group 4 Loss: 5.3960 +[2025-07-06 23:04:32] [Rank 0] Group 4 Loss: 5.3960 +[2025-07-06 23:04:32] [Rank 0] Group 5 Loss: 5.3345 +[2025-07-06 23:04:32] [Rank 0] Group 5 Loss: 5.3345 +[2025-07-06 23:04:32] [Rank 0] Group 6 Loss: 5.3067 +[2025-07-06 23:04:32] [Rank 0] Group 6 Loss: 5.3067 +[2025-07-06 23:04:32] [Rank 0] Group 7 Loss: 5.4447 +[2025-07-06 23:04:32] [Rank 0] Group 7 Loss: 5.4447 +[2025-07-06 23:04:32] [Rank 0] Group 8 Loss: 5.4250 +[2025-07-06 23:04:32] [Rank 0] Group 8 Loss: 5.4250 +[2025-07-06 23:04:32] [Rank 0] Group 9 Loss: 5.3382 +[2025-07-06 23:04:32] [Rank 0] Group 9 Loss: 5.3382 +[2025-07-06 23:04:32] [Rank 0] Group 10 Loss: 5.4150 +[2025-07-06 23:04:32] [Rank 0] Group 10 Loss: 5.4150 +[2025-07-06 23:04:32] [Rank 0] Group 11 Loss: 5.4460 +[2025-07-06 23:04:32] [Rank 0] Group 11 Loss: 5.4460 +[2025-07-06 23:04:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 23:04:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 23:04:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:04:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:04:32] [Rank 0] Group 2 FTA: 0.9010 +[2025-07-06 23:04:32] [Rank 0] Group 2 FTA: 0.9010 +[2025-07-06 23:04:32] [Rank 0] Group 3 FTA: 0.9635 +[2025-07-06 23:04:32] [Rank 0] Group 3 FTA: 0.9635 +[2025-07-06 23:04:32] [Rank 0] Group 4 FTA: 0.8620 +[2025-07-06 23:04:32] [Rank 0] Group 4 FTA: 0.8620 +[2025-07-06 23:04:32] [Rank 0] Group 5 FTA: 0.9193 +[2025-07-06 23:04:32] [Rank 0] Group 5 FTA: 0.9193 +[2025-07-06 23:04:32] [Rank 0] Group 6 FTA: 0.9661 +[2025-07-06 23:04:32] [Rank 0] Group 6 FTA: 0.9661 +[2025-07-06 23:04:32] [Rank 0] Group 7 FTA: 0.8776 +[2025-07-06 23:04:32] [Rank 0] Group 7 FTA: 0.8776 +[2025-07-06 23:04:32] [Rank 0] Group 8 FTA: 0.8932 +[2025-07-06 23:04:32] [Rank 0] Group 8 FTA: 0.8932 +[2025-07-06 23:04:32] [Rank 0] Group 9 FTA: 0.8828 +[2025-07-06 23:04:32] [Rank 0] Group 9 FTA: 0.8828 +[2025-07-06 23:04:32] [Rank 0] Group 10 FTA: 0.8945 +[2025-07-06 23:04:32] [Rank 0] Group 10 FTA: 0.8945 +[2025-07-06 23:04:32] [Rank 0] Group 11 FTA: 0.9004 +[2025-07-06 23:04:32] [Rank 0] Group 11 FTA: 0.9004 +[2025-07-06 23:04:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 23:04:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 23:04:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 23:04:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 23:04:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 23:04:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 23:04:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 23:04:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 23:04:33] [Rank 0] step:5501/10000 train_time:442729ms step_avg:80.48ms +[2025-07-06 23:04:33] [Rank 0] step:5501/10000 train_time:442729ms step_avg:80.48ms +[2025-07-06 23:04:35] [Rank 0] step:5521/10000 train_time:444231ms step_avg:80.46ms +[2025-07-06 23:04:35] [Rank 0] step:5521/10000 train_time:444231ms step_avg:80.46ms +[2025-07-06 23:04:36] [Rank 0] step:5541/10000 train_time:445720ms step_avg:80.44ms +[2025-07-06 23:04:36] [Rank 0] step:5541/10000 train_time:445720ms step_avg:80.44ms +[2025-07-06 23:04:38] [Rank 0] step:5561/10000 train_time:447209ms step_avg:80.42ms +[2025-07-06 23:04:38] [Rank 0] step:5561/10000 train_time:447209ms step_avg:80.42ms +[2025-07-06 23:04:40] [Rank 0] step:5581/10000 train_time:448700ms step_avg:80.40ms +[2025-07-06 23:04:40] [Rank 0] step:5581/10000 train_time:448700ms step_avg:80.40ms +[2025-07-06 23:04:41] [Rank 0] step:5601/10000 train_time:450837ms step_avg:80.49ms +[2025-07-06 23:04:41] [Rank 0] step:5601/10000 train_time:450837ms step_avg:80.49ms +[2025-07-06 23:04:43] [Rank 0] step:5621/10000 train_time:452327ms step_avg:80.47ms +[2025-07-06 23:04:43] [Rank 0] step:5621/10000 train_time:452327ms step_avg:80.47ms +[2025-07-06 23:04:44] [Rank 0] step:5641/10000 train_time:453819ms step_avg:80.45ms +[2025-07-06 23:04:44] [Rank 0] step:5641/10000 train_time:453819ms step_avg:80.45ms +[2025-07-06 23:04:46] [Rank 0] step:5661/10000 train_time:455311ms step_avg:80.43ms +[2025-07-06 23:04:46] [Rank 0] step:5661/10000 train_time:455311ms step_avg:80.43ms +[2025-07-06 23:04:48] [Rank 0] step:5681/10000 train_time:457448ms step_avg:80.52ms +[2025-07-06 23:04:48] [Rank 0] step:5681/10000 train_time:457448ms step_avg:80.52ms +[2025-07-06 23:04:50] [Rank 0] step:5701/10000 train_time:458941ms step_avg:80.50ms +[2025-07-06 23:04:50] [Rank 0] step:5701/10000 train_time:458941ms step_avg:80.50ms +[2025-07-06 23:04:51] [Rank 0] step:5721/10000 train_time:460435ms step_avg:80.48ms +[2025-07-06 23:04:51] [Rank 0] step:5721/10000 train_time:460435ms step_avg:80.48ms +[2025-07-06 23:04:53] [Rank 0] step:5741/10000 train_time:461929ms step_avg:80.46ms +[2025-07-06 23:04:53] [Rank 0] step:5741/10000 train_time:461929ms step_avg:80.46ms +[2025-07-06 23:04:55] [Rank 0] step:5761/10000 train_time:463680ms step_avg:80.49ms +[2025-07-06 23:04:55] [Rank 0] step:5761/10000 train_time:463680ms step_avg:80.49ms +[2025-07-06 23:04:56] [Rank 0] step:5781/10000 train_time:465560ms step_avg:80.53ms +[2025-07-06 23:04:56] [Rank 0] step:5781/10000 train_time:465560ms step_avg:80.53ms +[2025-07-06 23:04:58] [Rank 0] step:5801/10000 train_time:467055ms step_avg:80.51ms +[2025-07-06 23:04:58] [Rank 0] step:5801/10000 train_time:467055ms step_avg:80.51ms +[2025-07-06 23:04:59] [Rank 0] step:5821/10000 train_time:468550ms step_avg:80.49ms +[2025-07-06 23:04:59] [Rank 0] step:5821/10000 train_time:468550ms step_avg:80.49ms +[2025-07-06 23:05:01] [Rank 0] step:5841/10000 train_time:470045ms step_avg:80.47ms +[2025-07-06 23:05:01] [Rank 0] step:5841/10000 train_time:470045ms step_avg:80.47ms +[2025-07-06 23:05:03] [Rank 0] step:5861/10000 train_time:472208ms step_avg:80.57ms +[2025-07-06 23:05:03] [Rank 0] step:5861/10000 train_time:472208ms step_avg:80.57ms +[2025-07-06 23:05:04] [Rank 0] step:5881/10000 train_time:473701ms step_avg:80.55ms +[2025-07-06 23:05:04] [Rank 0] step:5881/10000 train_time:473701ms step_avg:80.55ms +[2025-07-06 23:05:06] [Rank 0] step:5901/10000 train_time:475196ms step_avg:80.53ms +[2025-07-06 23:05:06] [Rank 0] step:5901/10000 train_time:475196ms step_avg:80.53ms +[2025-07-06 23:05:07] [Rank 0] step:5921/10000 train_time:476691ms step_avg:80.51ms +[2025-07-06 23:05:07] [Rank 0] step:5921/10000 train_time:476691ms step_avg:80.51ms +[2025-07-06 23:05:09] [Rank 0] step:5941/10000 train_time:478188ms step_avg:80.49ms +[2025-07-06 23:05:09] [Rank 0] step:5941/10000 train_time:478188ms step_avg:80.49ms +[2025-07-06 23:05:11] [Rank 0] step:5961/10000 train_time:480024ms step_avg:80.53ms +[2025-07-06 23:05:11] [Rank 0] step:5961/10000 train_time:480024ms step_avg:80.53ms +[2025-07-06 23:05:12] [Rank 0] step:5981/10000 train_time:481518ms step_avg:80.51ms +[2025-07-06 23:05:12] [Rank 0] step:5981/10000 train_time:481518ms step_avg:80.51ms +[2025-07-06 23:05:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:05:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:05:15] [Rank 0] PRINT: step:6000/10000 train_loss:0.8795 val_loss:0.8767 train_time:483014ms step_avg:80.50ms +[2025-07-06 23:05:15] [Rank 0] PRINT: step:6000/10000 train_loss:0.8795 val_loss:0.8767 train_time:483014ms step_avg:80.50ms +[2025-07-06 23:05:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:05:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:05:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:05:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:05:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:05:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:10:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:10:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:10:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:10:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:10:38] [Rank 0] Total Loss: 5.4163 +[2025-07-06 23:10:38] [Rank 0] Total Loss: 5.4163 +[2025-07-06 23:10:38] [Rank 0] Total FTA: 0.9093 +[2025-07-06 23:10:38] [Rank 0] Total FTA: 0.9093 +[2025-07-06 23:10:38] [Rank 0] Group 0 Loss: 5.5710 +[2025-07-06 23:10:38] [Rank 0] Group 0 Loss: 5.5710 +[2025-07-06 23:10:38] [Rank 0] Group 1 Loss: 5.2027 +[2025-07-06 23:10:38] [Rank 0] Group 1 Loss: 5.2027 +[2025-07-06 23:10:38] [Rank 0] Group 2 Loss: 5.1294 +[2025-07-06 23:10:38] [Rank 0] Group 2 Loss: 5.1294 +[2025-07-06 23:10:38] [Rank 0] Group 3 Loss: 5.4231 +[2025-07-06 23:10:38] [Rank 0] Group 3 Loss: 5.4231 +[2025-07-06 23:10:38] [Rank 0] Group 4 Loss: 5.3308 +[2025-07-06 23:10:38] [Rank 0] Group 4 Loss: 5.3308 +[2025-07-06 23:10:38] [Rank 0] Group 5 Loss: 5.4355 +[2025-07-06 23:10:38] [Rank 0] Group 5 Loss: 5.4355 +[2025-07-06 23:10:38] [Rank 0] Group 6 Loss: 5.3285 +[2025-07-06 23:10:38] [Rank 0] Group 6 Loss: 5.3285 +[2025-07-06 23:10:38] [Rank 0] Group 7 Loss: 5.4538 +[2025-07-06 23:10:38] [Rank 0] Group 7 Loss: 5.4538 +[2025-07-06 23:10:38] [Rank 0] Group 8 Loss: 5.4438 +[2025-07-06 23:10:38] [Rank 0] Group 8 Loss: 5.4438 +[2025-07-06 23:10:38] [Rank 0] Group 9 Loss: 5.4042 +[2025-07-06 23:10:38] [Rank 0] Group 9 Loss: 5.4042 +[2025-07-06 23:10:38] [Rank 0] Group 10 Loss: 5.5072 +[2025-07-06 23:10:38] [Rank 0] Group 10 Loss: 5.5072 +[2025-07-06 23:10:38] [Rank 0] Group 11 Loss: 5.4762 +[2025-07-06 23:10:38] [Rank 0] Group 11 Loss: 5.4762 +[2025-07-06 23:10:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 23:10:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 23:10:38] [Rank 0] Group 1 FTA: 0.7969 +[2025-07-06 23:10:38] [Rank 0] Group 1 FTA: 0.7969 +[2025-07-06 23:10:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 23:10:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 23:10:38] [Rank 0] Group 3 FTA: 0.9141 +[2025-07-06 23:10:38] [Rank 0] Group 3 FTA: 0.9141 +[2025-07-06 23:10:38] [Rank 0] Group 4 FTA: 0.8620 +[2025-07-06 23:10:38] [Rank 0] Group 4 FTA: 0.8620 +[2025-07-06 23:10:38] [Rank 0] Group 5 FTA: 0.8854 +[2025-07-06 23:10:38] [Rank 0] Group 5 FTA: 0.8854 +[2025-07-06 23:10:38] [Rank 0] Group 6 FTA: 0.9010 +[2025-07-06 23:10:38] [Rank 0] Group 6 FTA: 0.9010 +[2025-07-06 23:10:38] [Rank 0] Group 7 FTA: 0.9010 +[2025-07-06 23:10:38] [Rank 0] Group 7 FTA: 0.9010 +[2025-07-06 23:10:38] [Rank 0] Group 8 FTA: 0.9036 +[2025-07-06 23:10:38] [Rank 0] Group 8 FTA: 0.9036 +[2025-07-06 23:10:38] [Rank 0] Group 9 FTA: 0.9102 +[2025-07-06 23:10:38] [Rank 0] Group 9 FTA: 0.9102 +[2025-07-06 23:10:38] [Rank 0] Group 10 FTA: 0.8887 +[2025-07-06 23:10:38] [Rank 0] Group 10 FTA: 0.8887 +[2025-07-06 23:10:38] [Rank 0] Group 11 FTA: 0.8926 +[2025-07-06 23:10:38] [Rank 0] Group 11 FTA: 0.8926 +[2025-07-06 23:10:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 23:10:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 23:10:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 23:10:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 23:10:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 23:10:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 23:10:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 23:10:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 23:10:39] [Rank 0] step:6001/10000 train_time:483036ms step_avg:80.49ms +[2025-07-06 23:10:39] [Rank 0] step:6001/10000 train_time:483036ms step_avg:80.49ms +[2025-07-06 23:10:41] [Rank 0] step:6021/10000 train_time:484539ms step_avg:80.47ms +[2025-07-06 23:10:41] [Rank 0] step:6021/10000 train_time:484539ms step_avg:80.47ms +[2025-07-06 23:10:43] [Rank 0] step:6041/10000 train_time:486263ms step_avg:80.49ms +[2025-07-06 23:10:43] [Rank 0] step:6041/10000 train_time:486263ms step_avg:80.49ms +[2025-07-06 23:10:44] [Rank 0] step:6061/10000 train_time:487752ms step_avg:80.47ms +[2025-07-06 23:10:44] [Rank 0] step:6061/10000 train_time:487752ms step_avg:80.47ms +[2025-07-06 23:10:46] [Rank 0] step:6081/10000 train_time:489425ms step_avg:80.48ms +[2025-07-06 23:10:46] [Rank 0] step:6081/10000 train_time:489425ms step_avg:80.48ms +[2025-07-06 23:10:47] [Rank 0] step:6101/10000 train_time:490982ms step_avg:80.48ms +[2025-07-06 23:10:47] [Rank 0] step:6101/10000 train_time:490982ms step_avg:80.48ms +[2025-07-06 23:10:49] [Rank 0] step:6121/10000 train_time:492527ms step_avg:80.47ms +[2025-07-06 23:10:49] [Rank 0] step:6121/10000 train_time:492527ms step_avg:80.47ms +[2025-07-06 23:10:51] [Rank 0] step:6141/10000 train_time:494610ms step_avg:80.54ms +[2025-07-06 23:10:51] [Rank 0] step:6141/10000 train_time:494610ms step_avg:80.54ms +[2025-07-06 23:10:52] [Rank 0] step:6161/10000 train_time:496102ms step_avg:80.52ms +[2025-07-06 23:10:52] [Rank 0] step:6161/10000 train_time:496102ms step_avg:80.52ms +[2025-07-06 23:10:54] [Rank 0] step:6181/10000 train_time:497595ms step_avg:80.50ms +[2025-07-06 23:10:54] [Rank 0] step:6181/10000 train_time:497595ms step_avg:80.50ms +[2025-07-06 23:10:55] [Rank 0] step:6201/10000 train_time:499087ms step_avg:80.48ms +[2025-07-06 23:10:55] [Rank 0] step:6201/10000 train_time:499087ms step_avg:80.48ms +[2025-07-06 23:10:57] [Rank 0] step:6221/10000 train_time:500817ms step_avg:80.50ms +[2025-07-06 23:10:57] [Rank 0] step:6221/10000 train_time:500817ms step_avg:80.50ms +[2025-07-06 23:10:59] [Rank 0] step:6241/10000 train_time:502311ms step_avg:80.49ms +[2025-07-06 23:10:59] [Rank 0] step:6241/10000 train_time:502311ms step_avg:80.49ms +[2025-07-06 23:11:00] [Rank 0] step:6261/10000 train_time:503805ms step_avg:80.47ms +[2025-07-06 23:11:00] [Rank 0] step:6261/10000 train_time:503805ms step_avg:80.47ms +[2025-07-06 23:11:02] [Rank 0] step:6281/10000 train_time:505301ms step_avg:80.45ms +[2025-07-06 23:11:02] [Rank 0] step:6281/10000 train_time:505301ms step_avg:80.45ms +[2025-07-06 23:11:04] [Rank 0] step:6301/10000 train_time:506849ms step_avg:80.44ms +[2025-07-06 23:11:04] [Rank 0] step:6301/10000 train_time:506849ms step_avg:80.44ms +[2025-07-06 23:11:05] [Rank 0] step:6321/10000 train_time:508949ms step_avg:80.52ms +[2025-07-06 23:11:05] [Rank 0] step:6321/10000 train_time:508949ms step_avg:80.52ms +[2025-07-06 23:11:07] [Rank 0] step:6341/10000 train_time:510444ms step_avg:80.50ms +[2025-07-06 23:11:07] [Rank 0] step:6341/10000 train_time:510444ms step_avg:80.50ms +[2025-07-06 23:11:08] [Rank 0] step:6361/10000 train_time:511940ms step_avg:80.48ms +[2025-07-06 23:11:08] [Rank 0] step:6361/10000 train_time:511940ms step_avg:80.48ms +[2025-07-06 23:11:10] [Rank 0] step:6381/10000 train_time:513442ms step_avg:80.46ms +[2025-07-06 23:11:10] [Rank 0] step:6381/10000 train_time:513442ms step_avg:80.46ms +[2025-07-06 23:11:11] [Rank 0] step:6401/10000 train_time:515176ms step_avg:80.48ms +[2025-07-06 23:11:11] [Rank 0] step:6401/10000 train_time:515176ms step_avg:80.48ms +[2025-07-06 23:11:13] [Rank 0] step:6421/10000 train_time:516674ms step_avg:80.47ms +[2025-07-06 23:11:13] [Rank 0] step:6421/10000 train_time:516674ms step_avg:80.47ms +[2025-07-06 23:11:14] [Rank 0] step:6441/10000 train_time:518173ms step_avg:80.45ms +[2025-07-06 23:11:14] [Rank 0] step:6441/10000 train_time:518173ms step_avg:80.45ms +[2025-07-06 23:11:16] [Rank 0] step:6461/10000 train_time:519671ms step_avg:80.43ms +[2025-07-06 23:11:16] [Rank 0] step:6461/10000 train_time:519671ms step_avg:80.43ms +[2025-07-06 23:11:18] [Rank 0] step:6481/10000 train_time:521843ms step_avg:80.52ms +[2025-07-06 23:11:18] [Rank 0] step:6481/10000 train_time:521843ms step_avg:80.52ms +[2025-07-06 23:11:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:11:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:11:20] [Rank 0] PRINT: step:6500/10000 train_loss:0.8751 val_loss:0.8740 train_time:523317ms step_avg:80.51ms +[2025-07-06 23:11:20] [Rank 0] PRINT: step:6500/10000 train_loss:0.8751 val_loss:0.8740 train_time:523317ms step_avg:80.51ms +[2025-07-06 23:11:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:11:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:11:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:11:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:11:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:11:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:16:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:16:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:16:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:16:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:16:44] [Rank 0] Total Loss: 5.4710 +[2025-07-06 23:16:44] [Rank 0] Total Loss: 5.4710 +[2025-07-06 23:16:44] [Rank 0] Total FTA: 0.8972 +[2025-07-06 23:16:44] [Rank 0] Total FTA: 0.8972 +[2025-07-06 23:16:44] [Rank 0] Group 0 Loss: 5.7131 +[2025-07-06 23:16:44] [Rank 0] Group 0 Loss: 5.7131 +[2025-07-06 23:16:44] [Rank 0] Group 1 Loss: 5.2974 +[2025-07-06 23:16:44] [Rank 0] Group 1 Loss: 5.2974 +[2025-07-06 23:16:44] [Rank 0] Group 2 Loss: 5.1164 +[2025-07-06 23:16:44] [Rank 0] Group 2 Loss: 5.1164 +[2025-07-06 23:16:44] [Rank 0] Group 3 Loss: 5.5084 +[2025-07-06 23:16:44] [Rank 0] Group 3 Loss: 5.5084 +[2025-07-06 23:16:44] [Rank 0] Group 4 Loss: 5.4738 +[2025-07-06 23:16:44] [Rank 0] Group 4 Loss: 5.4738 +[2025-07-06 23:16:44] [Rank 0] Group 5 Loss: 5.4414 +[2025-07-06 23:16:44] [Rank 0] Group 5 Loss: 5.4414 +[2025-07-06 23:16:44] [Rank 0] Group 6 Loss: 5.3942 +[2025-07-06 23:16:44] [Rank 0] Group 6 Loss: 5.3942 +[2025-07-06 23:16:44] [Rank 0] Group 7 Loss: 5.4515 +[2025-07-06 23:16:44] [Rank 0] Group 7 Loss: 5.4515 +[2025-07-06 23:16:44] [Rank 0] Group 8 Loss: 5.4979 +[2025-07-06 23:16:44] [Rank 0] Group 8 Loss: 5.4979 +[2025-07-06 23:16:44] [Rank 0] Group 9 Loss: 5.4819 +[2025-07-06 23:16:44] [Rank 0] Group 9 Loss: 5.4819 +[2025-07-06 23:16:44] [Rank 0] Group 10 Loss: 5.5404 +[2025-07-06 23:16:44] [Rank 0] Group 10 Loss: 5.5404 +[2025-07-06 23:16:44] [Rank 0] Group 11 Loss: 5.4719 +[2025-07-06 23:16:44] [Rank 0] Group 11 Loss: 5.4719 +[2025-07-06 23:16:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 23:16:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 23:16:44] [Rank 0] Group 1 FTA: 0.8385 +[2025-07-06 23:16:44] [Rank 0] Group 1 FTA: 0.8385 +[2025-07-06 23:16:44] [Rank 0] Group 2 FTA: 0.8073 +[2025-07-06 23:16:44] [Rank 0] Group 2 FTA: 0.8073 +[2025-07-06 23:16:44] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 23:16:44] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 23:16:44] [Rank 0] Group 4 FTA: 0.8490 +[2025-07-06 23:16:44] [Rank 0] Group 4 FTA: 0.8490 +[2025-07-06 23:16:44] [Rank 0] Group 5 FTA: 0.8724 +[2025-07-06 23:16:44] [Rank 0] Group 5 FTA: 0.8724 +[2025-07-06 23:16:44] [Rank 0] Group 6 FTA: 0.8984 +[2025-07-06 23:16:44] [Rank 0] Group 6 FTA: 0.8984 +[2025-07-06 23:16:44] [Rank 0] Group 7 FTA: 0.8932 +[2025-07-06 23:16:44] [Rank 0] Group 7 FTA: 0.8932 +[2025-07-06 23:16:44] [Rank 0] Group 8 FTA: 0.8932 +[2025-07-06 23:16:44] [Rank 0] Group 8 FTA: 0.8932 +[2025-07-06 23:16:44] [Rank 0] Group 9 FTA: 0.8945 +[2025-07-06 23:16:44] [Rank 0] Group 9 FTA: 0.8945 +[2025-07-06 23:16:44] [Rank 0] Group 10 FTA: 0.8926 +[2025-07-06 23:16:44] [Rank 0] Group 10 FTA: 0.8926 +[2025-07-06 23:16:44] [Rank 0] Group 11 FTA: 0.8701 +[2025-07-06 23:16:44] [Rank 0] Group 11 FTA: 0.8701 +[2025-07-06 23:16:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 23:16:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 23:16:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 23:16:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 23:16:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 23:16:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 23:16:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 23:16:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 23:16:45] [Rank 0] step:6501/10000 train_time:523339ms step_avg:80.50ms +[2025-07-06 23:16:45] [Rank 0] step:6501/10000 train_time:523339ms step_avg:80.50ms +[2025-07-06 23:16:47] [Rank 0] step:6521/10000 train_time:524853ms step_avg:80.49ms +[2025-07-06 23:16:47] [Rank 0] step:6521/10000 train_time:524853ms step_avg:80.49ms +[2025-07-06 23:16:48] [Rank 0] step:6541/10000 train_time:526343ms step_avg:80.47ms +[2025-07-06 23:16:48] [Rank 0] step:6541/10000 train_time:526343ms step_avg:80.47ms +[2025-07-06 23:16:50] [Rank 0] step:6561/10000 train_time:527832ms step_avg:80.45ms +[2025-07-06 23:16:50] [Rank 0] step:6561/10000 train_time:527832ms step_avg:80.45ms +[2025-07-06 23:16:52] [Rank 0] step:6581/10000 train_time:529979ms step_avg:80.53ms +[2025-07-06 23:16:52] [Rank 0] step:6581/10000 train_time:529979ms step_avg:80.53ms +[2025-07-06 23:16:53] [Rank 0] step:6601/10000 train_time:531470ms step_avg:80.51ms +[2025-07-06 23:16:53] [Rank 0] step:6601/10000 train_time:531470ms step_avg:80.51ms +[2025-07-06 23:16:55] [Rank 0] step:6621/10000 train_time:532961ms step_avg:80.50ms +[2025-07-06 23:16:55] [Rank 0] step:6621/10000 train_time:532961ms step_avg:80.50ms +[2025-07-06 23:16:56] [Rank 0] step:6641/10000 train_time:534454ms step_avg:80.48ms +[2025-07-06 23:16:56] [Rank 0] step:6641/10000 train_time:534454ms step_avg:80.48ms +[2025-07-06 23:16:58] [Rank 0] step:6661/10000 train_time:535948ms step_avg:80.46ms +[2025-07-06 23:16:58] [Rank 0] step:6661/10000 train_time:535948ms step_avg:80.46ms +[2025-07-06 23:17:00] [Rank 0] step:6681/10000 train_time:537675ms step_avg:80.48ms +[2025-07-06 23:17:00] [Rank 0] step:6681/10000 train_time:537675ms step_avg:80.48ms +[2025-07-06 23:17:01] [Rank 0] step:6701/10000 train_time:539167ms step_avg:80.46ms +[2025-07-06 23:17:01] [Rank 0] step:6701/10000 train_time:539167ms step_avg:80.46ms +[2025-07-06 23:17:03] [Rank 0] step:6721/10000 train_time:540663ms step_avg:80.44ms +[2025-07-06 23:17:03] [Rank 0] step:6721/10000 train_time:540663ms step_avg:80.44ms +[2025-07-06 23:17:04] [Rank 0] step:6741/10000 train_time:542313ms step_avg:80.45ms +[2025-07-06 23:17:04] [Rank 0] step:6741/10000 train_time:542313ms step_avg:80.45ms +[2025-07-06 23:17:06] [Rank 0] step:6761/10000 train_time:544144ms step_avg:80.48ms +[2025-07-06 23:17:06] [Rank 0] step:6761/10000 train_time:544144ms step_avg:80.48ms +[2025-07-06 23:17:08] [Rank 0] step:6781/10000 train_time:545715ms step_avg:80.48ms +[2025-07-06 23:17:08] [Rank 0] step:6781/10000 train_time:545715ms step_avg:80.48ms +[2025-07-06 23:17:09] [Rank 0] step:6801/10000 train_time:547208ms step_avg:80.46ms +[2025-07-06 23:17:09] [Rank 0] step:6801/10000 train_time:547208ms step_avg:80.46ms +[2025-07-06 23:17:11] [Rank 0] step:6821/10000 train_time:548703ms step_avg:80.44ms +[2025-07-06 23:17:11] [Rank 0] step:6821/10000 train_time:548703ms step_avg:80.44ms +[2025-07-06 23:17:13] [Rank 0] step:6841/10000 train_time:550199ms step_avg:80.43ms +[2025-07-06 23:17:13] [Rank 0] step:6841/10000 train_time:550199ms step_avg:80.43ms +[2025-07-06 23:17:14] [Rank 0] step:6861/10000 train_time:552333ms step_avg:80.50ms +[2025-07-06 23:17:14] [Rank 0] step:6861/10000 train_time:552333ms step_avg:80.50ms +[2025-07-06 23:17:16] [Rank 0] step:6881/10000 train_time:553830ms step_avg:80.49ms +[2025-07-06 23:17:16] [Rank 0] step:6881/10000 train_time:553830ms step_avg:80.49ms +[2025-07-06 23:17:17] [Rank 0] step:6901/10000 train_time:555327ms step_avg:80.47ms +[2025-07-06 23:17:17] [Rank 0] step:6901/10000 train_time:555327ms step_avg:80.47ms +[2025-07-06 23:17:19] [Rank 0] step:6921/10000 train_time:556823ms step_avg:80.45ms +[2025-07-06 23:17:19] [Rank 0] step:6921/10000 train_time:556823ms step_avg:80.45ms +[2025-07-06 23:17:21] [Rank 0] step:6941/10000 train_time:558557ms step_avg:80.47ms +[2025-07-06 23:17:21] [Rank 0] step:6941/10000 train_time:558557ms step_avg:80.47ms +[2025-07-06 23:17:22] [Rank 0] step:6961/10000 train_time:560051ms step_avg:80.46ms +[2025-07-06 23:17:22] [Rank 0] step:6961/10000 train_time:560051ms step_avg:80.46ms +[2025-07-06 23:17:24] [Rank 0] step:6981/10000 train_time:561550ms step_avg:80.44ms +[2025-07-06 23:17:24] [Rank 0] step:6981/10000 train_time:561550ms step_avg:80.44ms +[2025-07-06 23:17:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:17:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:17:26] [Rank 0] PRINT: step:7000/10000 train_loss:0.8724 val_loss:0.8722 train_time:563048ms step_avg:80.44ms +[2025-07-06 23:17:26] [Rank 0] PRINT: step:7000/10000 train_loss:0.8724 val_loss:0.8722 train_time:563048ms step_avg:80.44ms +[2025-07-06 23:17:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:17:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:17:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:17:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:17:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:17:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:22:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:22:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:22:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:22:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:22:50] [Rank 0] Total Loss: 5.4607 +[2025-07-06 23:22:50] [Rank 0] Total Loss: 5.4607 +[2025-07-06 23:22:50] [Rank 0] Total FTA: 0.8729 +[2025-07-06 23:22:50] [Rank 0] Total FTA: 0.8729 +[2025-07-06 23:22:50] [Rank 0] Group 0 Loss: 5.7879 +[2025-07-06 23:22:50] [Rank 0] Group 0 Loss: 5.7879 +[2025-07-06 23:22:50] [Rank 0] Group 1 Loss: 5.4801 +[2025-07-06 23:22:50] [Rank 0] Group 1 Loss: 5.4801 +[2025-07-06 23:22:50] [Rank 0] Group 2 Loss: 5.0705 +[2025-07-06 23:22:50] [Rank 0] Group 2 Loss: 5.0705 +[2025-07-06 23:22:50] [Rank 0] Group 3 Loss: 5.5256 +[2025-07-06 23:22:50] [Rank 0] Group 3 Loss: 5.5256 +[2025-07-06 23:22:50] [Rank 0] Group 4 Loss: 5.4007 +[2025-07-06 23:22:50] [Rank 0] Group 4 Loss: 5.4007 +[2025-07-06 23:22:50] [Rank 0] Group 5 Loss: 5.3398 +[2025-07-06 23:22:50] [Rank 0] Group 5 Loss: 5.3398 +[2025-07-06 23:22:50] [Rank 0] Group 6 Loss: 5.3576 +[2025-07-06 23:22:50] [Rank 0] Group 6 Loss: 5.3576 +[2025-07-06 23:22:50] [Rank 0] Group 7 Loss: 5.4487 +[2025-07-06 23:22:50] [Rank 0] Group 7 Loss: 5.4487 +[2025-07-06 23:22:50] [Rank 0] Group 8 Loss: 5.4295 +[2025-07-06 23:22:50] [Rank 0] Group 8 Loss: 5.4295 +[2025-07-06 23:22:50] [Rank 0] Group 9 Loss: 5.4280 +[2025-07-06 23:22:50] [Rank 0] Group 9 Loss: 5.4280 +[2025-07-06 23:22:50] [Rank 0] Group 10 Loss: 5.4410 +[2025-07-06 23:22:50] [Rank 0] Group 10 Loss: 5.4410 +[2025-07-06 23:22:50] [Rank 0] Group 11 Loss: 5.4703 +[2025-07-06 23:22:50] [Rank 0] Group 11 Loss: 5.4703 +[2025-07-06 23:22:50] [Rank 0] Group 0 FTA: 0.8336 +[2025-07-06 23:22:50] [Rank 0] Group 0 FTA: 0.8336 +[2025-07-06 23:22:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:22:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:22:50] [Rank 0] Group 2 FTA: 0.9115 +[2025-07-06 23:22:50] [Rank 0] Group 2 FTA: 0.9115 +[2025-07-06 23:22:50] [Rank 0] Group 3 FTA: 0.8021 +[2025-07-06 23:22:50] [Rank 0] Group 3 FTA: 0.8021 +[2025-07-06 23:22:50] [Rank 0] Group 4 FTA: 0.8385 +[2025-07-06 23:22:50] [Rank 0] Group 4 FTA: 0.8385 +[2025-07-06 23:22:50] [Rank 0] Group 5 FTA: 0.8802 +[2025-07-06 23:22:50] [Rank 0] Group 5 FTA: 0.8802 +[2025-07-06 23:22:50] [Rank 0] Group 6 FTA: 0.8255 +[2025-07-06 23:22:50] [Rank 0] Group 6 FTA: 0.8255 +[2025-07-06 23:22:50] [Rank 0] Group 7 FTA: 0.8984 +[2025-07-06 23:22:50] [Rank 0] Group 7 FTA: 0.8984 +[2025-07-06 23:22:50] [Rank 0] Group 8 FTA: 0.8750 +[2025-07-06 23:22:50] [Rank 0] Group 8 FTA: 0.8750 +[2025-07-06 23:22:50] [Rank 0] Group 9 FTA: 0.8867 +[2025-07-06 23:22:50] [Rank 0] Group 9 FTA: 0.8867 +[2025-07-06 23:22:50] [Rank 0] Group 10 FTA: 0.8848 +[2025-07-06 23:22:50] [Rank 0] Group 10 FTA: 0.8848 +[2025-07-06 23:22:50] [Rank 0] Group 11 FTA: 0.8750 +[2025-07-06 23:22:50] [Rank 0] Group 11 FTA: 0.8750 +[2025-07-06 23:22:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 23:22:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 23:22:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 23:22:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 23:22:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 23:22:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 23:22:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 23:22:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 23:22:51] [Rank 0] step:7001/10000 train_time:563069ms step_avg:80.43ms +[2025-07-06 23:22:51] [Rank 0] step:7001/10000 train_time:563069ms step_avg:80.43ms +[2025-07-06 23:22:53] [Rank 0] step:7021/10000 train_time:565263ms step_avg:80.51ms +[2025-07-06 23:22:53] [Rank 0] step:7021/10000 train_time:565263ms step_avg:80.51ms +[2025-07-06 23:22:55] [Rank 0] step:7041/10000 train_time:566731ms step_avg:80.49ms +[2025-07-06 23:22:55] [Rank 0] step:7041/10000 train_time:566731ms step_avg:80.49ms +[2025-07-06 23:22:56] [Rank 0] step:7061/10000 train_time:568220ms step_avg:80.47ms +[2025-07-06 23:22:56] [Rank 0] step:7061/10000 train_time:568220ms step_avg:80.47ms +[2025-07-06 23:22:58] [Rank 0] step:7081/10000 train_time:569711ms step_avg:80.46ms +[2025-07-06 23:22:58] [Rank 0] step:7081/10000 train_time:569711ms step_avg:80.46ms +[2025-07-06 23:22:59] [Rank 0] step:7101/10000 train_time:571202ms step_avg:80.44ms +[2025-07-06 23:22:59] [Rank 0] step:7101/10000 train_time:571202ms step_avg:80.44ms +[2025-07-06 23:23:01] [Rank 0] step:7121/10000 train_time:572936ms step_avg:80.46ms +[2025-07-06 23:23:01] [Rank 0] step:7121/10000 train_time:572936ms step_avg:80.46ms +[2025-07-06 23:23:03] [Rank 0] step:7141/10000 train_time:574428ms step_avg:80.44ms +[2025-07-06 23:23:03] [Rank 0] step:7141/10000 train_time:574428ms step_avg:80.44ms +[2025-07-06 23:23:04] [Rank 0] step:7161/10000 train_time:575922ms step_avg:80.42ms +[2025-07-06 23:23:04] [Rank 0] step:7161/10000 train_time:575922ms step_avg:80.42ms +[2025-07-06 23:23:06] [Rank 0] step:7181/10000 train_time:577415ms step_avg:80.41ms +[2025-07-06 23:23:06] [Rank 0] step:7181/10000 train_time:577415ms step_avg:80.41ms +[2025-07-06 23:23:07] [Rank 0] step:7201/10000 train_time:578908ms step_avg:80.39ms +[2025-07-06 23:23:07] [Rank 0] step:7201/10000 train_time:578908ms step_avg:80.39ms +[2025-07-06 23:23:09] [Rank 0] step:7221/10000 train_time:580640ms step_avg:80.41ms +[2025-07-06 23:23:09] [Rank 0] step:7221/10000 train_time:580640ms step_avg:80.41ms +[2025-07-06 23:23:10] [Rank 0] step:7241/10000 train_time:582135ms step_avg:80.39ms +[2025-07-06 23:23:10] [Rank 0] step:7241/10000 train_time:582135ms step_avg:80.39ms +[2025-07-06 23:23:12] [Rank 0] step:7261/10000 train_time:583629ms step_avg:80.38ms +[2025-07-06 23:23:12] [Rank 0] step:7261/10000 train_time:583629ms step_avg:80.38ms +[2025-07-06 23:23:13] [Rank 0] step:7281/10000 train_time:585122ms step_avg:80.36ms +[2025-07-06 23:23:13] [Rank 0] step:7281/10000 train_time:585122ms step_avg:80.36ms +[2025-07-06 23:23:15] [Rank 0] step:7301/10000 train_time:587270ms step_avg:80.44ms +[2025-07-06 23:23:15] [Rank 0] step:7301/10000 train_time:587270ms step_avg:80.44ms +[2025-07-06 23:23:17] [Rank 0] step:7321/10000 train_time:588764ms step_avg:80.42ms +[2025-07-06 23:23:17] [Rank 0] step:7321/10000 train_time:588764ms step_avg:80.42ms +[2025-07-06 23:23:18] [Rank 0] step:7341/10000 train_time:590259ms step_avg:80.41ms +[2025-07-06 23:23:18] [Rank 0] step:7341/10000 train_time:590259ms step_avg:80.41ms +[2025-07-06 23:23:20] [Rank 0] step:7361/10000 train_time:591757ms step_avg:80.39ms +[2025-07-06 23:23:20] [Rank 0] step:7361/10000 train_time:591757ms step_avg:80.39ms +[2025-07-06 23:23:22] [Rank 0] step:7381/10000 train_time:593935ms step_avg:80.47ms +[2025-07-06 23:23:22] [Rank 0] step:7381/10000 train_time:593935ms step_avg:80.47ms +[2025-07-06 23:23:24] [Rank 0] step:7401/10000 train_time:595470ms step_avg:80.46ms +[2025-07-06 23:23:24] [Rank 0] step:7401/10000 train_time:595470ms step_avg:80.46ms +[2025-07-06 23:23:25] [Rank 0] step:7421/10000 train_time:597081ms step_avg:80.46ms +[2025-07-06 23:23:25] [Rank 0] step:7421/10000 train_time:597081ms step_avg:80.46ms +[2025-07-06 23:23:27] [Rank 0] step:7441/10000 train_time:598737ms step_avg:80.46ms +[2025-07-06 23:23:27] [Rank 0] step:7441/10000 train_time:598737ms step_avg:80.46ms +[2025-07-06 23:23:28] [Rank 0] step:7461/10000 train_time:600232ms step_avg:80.45ms +[2025-07-06 23:23:28] [Rank 0] step:7461/10000 train_time:600232ms step_avg:80.45ms +[2025-07-06 23:23:31] [Rank 0] step:7481/10000 train_time:602374ms step_avg:80.52ms +[2025-07-06 23:23:31] [Rank 0] step:7481/10000 train_time:602374ms step_avg:80.52ms +[2025-07-06 23:23:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:23:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:23:33] [Rank 0] PRINT: step:7500/10000 train_loss:0.8705 val_loss:0.8706 train_time:603868ms step_avg:80.52ms +[2025-07-06 23:23:33] [Rank 0] PRINT: step:7500/10000 train_loss:0.8705 val_loss:0.8706 train_time:603868ms step_avg:80.52ms +[2025-07-06 23:23:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:23:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:23:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:23:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:23:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:23:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:28:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:28:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:28:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:28:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:28:57] [Rank 0] Total Loss: 5.4678 +[2025-07-06 23:28:57] [Rank 0] Total Loss: 5.4678 +[2025-07-06 23:28:57] [Rank 0] Total FTA: 0.9182 +[2025-07-06 23:28:57] [Rank 0] Total FTA: 0.9182 +[2025-07-06 23:28:57] [Rank 0] Group 0 Loss: 5.6890 +[2025-07-06 23:28:57] [Rank 0] Group 0 Loss: 5.6890 +[2025-07-06 23:28:57] [Rank 0] Group 1 Loss: 5.4493 +[2025-07-06 23:28:57] [Rank 0] Group 1 Loss: 5.4493 +[2025-07-06 23:28:57] [Rank 0] Group 2 Loss: 5.1818 +[2025-07-06 23:28:57] [Rank 0] Group 2 Loss: 5.1818 +[2025-07-06 23:28:57] [Rank 0] Group 3 Loss: 5.4738 +[2025-07-06 23:28:57] [Rank 0] Group 3 Loss: 5.4738 +[2025-07-06 23:28:57] [Rank 0] Group 4 Loss: 5.4418 +[2025-07-06 23:28:57] [Rank 0] Group 4 Loss: 5.4418 +[2025-07-06 23:28:57] [Rank 0] Group 5 Loss: 5.3579 +[2025-07-06 23:28:57] [Rank 0] Group 5 Loss: 5.3579 +[2025-07-06 23:28:57] [Rank 0] Group 6 Loss: 5.3691 +[2025-07-06 23:28:57] [Rank 0] Group 6 Loss: 5.3691 +[2025-07-06 23:28:57] [Rank 0] Group 7 Loss: 5.4600 +[2025-07-06 23:28:57] [Rank 0] Group 7 Loss: 5.4600 +[2025-07-06 23:28:57] [Rank 0] Group 8 Loss: 5.4826 +[2025-07-06 23:28:57] [Rank 0] Group 8 Loss: 5.4826 +[2025-07-06 23:28:57] [Rank 0] Group 9 Loss: 5.4913 +[2025-07-06 23:28:57] [Rank 0] Group 9 Loss: 5.4913 +[2025-07-06 23:28:57] [Rank 0] Group 10 Loss: 5.4511 +[2025-07-06 23:28:57] [Rank 0] Group 10 Loss: 5.4511 +[2025-07-06 23:28:57] [Rank 0] Group 11 Loss: 5.5013 +[2025-07-06 23:28:57] [Rank 0] Group 11 Loss: 5.5013 +[2025-07-06 23:28:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 23:28:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 23:28:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:28:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:28:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 23:28:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 23:28:57] [Rank 0] Group 3 FTA: 0.9219 +[2025-07-06 23:28:57] [Rank 0] Group 3 FTA: 0.9219 +[2025-07-06 23:28:57] [Rank 0] Group 4 FTA: 0.8151 +[2025-07-06 23:28:57] [Rank 0] Group 4 FTA: 0.8151 +[2025-07-06 23:28:57] [Rank 0] Group 5 FTA: 0.9219 +[2025-07-06 23:28:57] [Rank 0] Group 5 FTA: 0.9219 +[2025-07-06 23:28:57] [Rank 0] Group 6 FTA: 0.8594 +[2025-07-06 23:28:57] [Rank 0] Group 6 FTA: 0.8594 +[2025-07-06 23:28:57] [Rank 0] Group 7 FTA: 0.8724 +[2025-07-06 23:28:57] [Rank 0] Group 7 FTA: 0.8724 +[2025-07-06 23:28:57] [Rank 0] Group 8 FTA: 0.8724 +[2025-07-06 23:28:57] [Rank 0] Group 8 FTA: 0.8724 +[2025-07-06 23:28:57] [Rank 0] Group 9 FTA: 0.8984 +[2025-07-06 23:28:57] [Rank 0] Group 9 FTA: 0.8984 +[2025-07-06 23:28:57] [Rank 0] Group 10 FTA: 0.8984 +[2025-07-06 23:28:57] [Rank 0] Group 10 FTA: 0.8984 +[2025-07-06 23:28:57] [Rank 0] Group 11 FTA: 0.9023 +[2025-07-06 23:28:57] [Rank 0] Group 11 FTA: 0.9023 +[2025-07-06 23:28:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 23:28:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 23:28:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 23:28:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 23:28:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 23:28:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 23:28:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 23:28:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 23:28:59] [Rank 0] step:7501/10000 train_time:603890ms step_avg:80.51ms +[2025-07-06 23:28:59] [Rank 0] step:7501/10000 train_time:603890ms step_avg:80.51ms +[2025-07-06 23:29:00] [Rank 0] step:7521/10000 train_time:605396ms step_avg:80.49ms +[2025-07-06 23:29:00] [Rank 0] step:7521/10000 train_time:605396ms step_avg:80.49ms +[2025-07-06 23:29:02] [Rank 0] step:7541/10000 train_time:606882ms step_avg:80.48ms +[2025-07-06 23:29:02] [Rank 0] step:7541/10000 train_time:606882ms step_avg:80.48ms +[2025-07-06 23:29:04] [Rank 0] step:7561/10000 train_time:609061ms step_avg:80.55ms +[2025-07-06 23:29:04] [Rank 0] step:7561/10000 train_time:609061ms step_avg:80.55ms +[2025-07-06 23:29:06] [Rank 0] step:7581/10000 train_time:610529ms step_avg:80.53ms +[2025-07-06 23:29:06] [Rank 0] step:7581/10000 train_time:610529ms step_avg:80.53ms +[2025-07-06 23:29:07] [Rank 0] step:7601/10000 train_time:612017ms step_avg:80.52ms +[2025-07-06 23:29:07] [Rank 0] step:7601/10000 train_time:612017ms step_avg:80.52ms +[2025-07-06 23:29:09] [Rank 0] step:7621/10000 train_time:613507ms step_avg:80.50ms +[2025-07-06 23:29:09] [Rank 0] step:7621/10000 train_time:613507ms step_avg:80.50ms +[2025-07-06 23:29:10] [Rank 0] step:7641/10000 train_time:615000ms step_avg:80.49ms +[2025-07-06 23:29:10] [Rank 0] step:7641/10000 train_time:615000ms step_avg:80.49ms +[2025-07-06 23:29:12] [Rank 0] step:7661/10000 train_time:617142ms step_avg:80.56ms +[2025-07-06 23:29:12] [Rank 0] step:7661/10000 train_time:617142ms step_avg:80.56ms +[2025-07-06 23:29:14] [Rank 0] step:7681/10000 train_time:618636ms step_avg:80.54ms +[2025-07-06 23:29:14] [Rank 0] step:7681/10000 train_time:618636ms step_avg:80.54ms +[2025-07-06 23:29:15] [Rank 0] step:7701/10000 train_time:620130ms step_avg:80.53ms +[2025-07-06 23:29:15] [Rank 0] step:7701/10000 train_time:620130ms step_avg:80.53ms +[2025-07-06 23:29:17] [Rank 0] step:7721/10000 train_time:621623ms step_avg:80.51ms +[2025-07-06 23:29:17] [Rank 0] step:7721/10000 train_time:621623ms step_avg:80.51ms +[2025-07-06 23:29:18] [Rank 0] step:7741/10000 train_time:623117ms step_avg:80.50ms +[2025-07-06 23:29:18] [Rank 0] step:7741/10000 train_time:623117ms step_avg:80.50ms +[2025-07-06 23:29:20] [Rank 0] step:7761/10000 train_time:624745ms step_avg:80.50ms +[2025-07-06 23:29:20] [Rank 0] step:7761/10000 train_time:624745ms step_avg:80.50ms +[2025-07-06 23:29:21] [Rank 0] step:7781/10000 train_time:626236ms step_avg:80.48ms +[2025-07-06 23:29:21] [Rank 0] step:7781/10000 train_time:626236ms step_avg:80.48ms +[2025-07-06 23:29:23] [Rank 0] step:7801/10000 train_time:627732ms step_avg:80.47ms +[2025-07-06 23:29:23] [Rank 0] step:7801/10000 train_time:627732ms step_avg:80.47ms +[2025-07-06 23:29:24] [Rank 0] step:7821/10000 train_time:629226ms step_avg:80.45ms +[2025-07-06 23:29:24] [Rank 0] step:7821/10000 train_time:629226ms step_avg:80.45ms +[2025-07-06 23:29:26] [Rank 0] step:7841/10000 train_time:631385ms step_avg:80.52ms +[2025-07-06 23:29:26] [Rank 0] step:7841/10000 train_time:631385ms step_avg:80.52ms +[2025-07-06 23:29:28] [Rank 0] step:7861/10000 train_time:632879ms step_avg:80.51ms +[2025-07-06 23:29:28] [Rank 0] step:7861/10000 train_time:632879ms step_avg:80.51ms +[2025-07-06 23:29:29] [Rank 0] step:7881/10000 train_time:634382ms step_avg:80.50ms +[2025-07-06 23:29:29] [Rank 0] step:7881/10000 train_time:634382ms step_avg:80.50ms +[2025-07-06 23:29:31] [Rank 0] step:7901/10000 train_time:635880ms step_avg:80.48ms +[2025-07-06 23:29:31] [Rank 0] step:7901/10000 train_time:635880ms step_avg:80.48ms +[2025-07-06 23:29:33] [Rank 0] step:7921/10000 train_time:637377ms step_avg:80.47ms +[2025-07-06 23:29:33] [Rank 0] step:7921/10000 train_time:637377ms step_avg:80.47ms +[2025-07-06 23:29:35] [Rank 0] step:7941/10000 train_time:639528ms step_avg:80.53ms +[2025-07-06 23:29:35] [Rank 0] step:7941/10000 train_time:639528ms step_avg:80.53ms +[2025-07-06 23:29:36] [Rank 0] step:7961/10000 train_time:641023ms step_avg:80.52ms +[2025-07-06 23:29:36] [Rank 0] step:7961/10000 train_time:641023ms step_avg:80.52ms +[2025-07-06 23:29:38] [Rank 0] step:7981/10000 train_time:642516ms step_avg:80.51ms +[2025-07-06 23:29:38] [Rank 0] step:7981/10000 train_time:642516ms step_avg:80.51ms +[2025-07-06 23:29:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:29:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:29:40] [Rank 0] PRINT: step:8000/10000 train_loss:0.8688 val_loss:0.8693 train_time:644011ms step_avg:80.50ms +[2025-07-06 23:29:40] [Rank 0] PRINT: step:8000/10000 train_loss:0.8688 val_loss:0.8693 train_time:644011ms step_avg:80.50ms +[2025-07-06 23:29:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:29:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:29:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:29:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:29:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:29:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:35:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:35:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:35:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:35:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:35:04] [Rank 0] Total Loss: 5.4915 +[2025-07-06 23:35:04] [Rank 0] Total Loss: 5.4915 +[2025-07-06 23:35:04] [Rank 0] Total FTA: 0.9121 +[2025-07-06 23:35:04] [Rank 0] Total FTA: 0.9121 +[2025-07-06 23:35:04] [Rank 0] Group 0 Loss: 5.7471 +[2025-07-06 23:35:04] [Rank 0] Group 0 Loss: 5.7471 +[2025-07-06 23:35:04] [Rank 0] Group 1 Loss: 5.3768 +[2025-07-06 23:35:04] [Rank 0] Group 1 Loss: 5.3768 +[2025-07-06 23:35:04] [Rank 0] Group 2 Loss: 5.2467 +[2025-07-06 23:35:04] [Rank 0] Group 2 Loss: 5.2467 +[2025-07-06 23:35:04] [Rank 0] Group 3 Loss: 5.5770 +[2025-07-06 23:35:04] [Rank 0] Group 3 Loss: 5.5770 +[2025-07-06 23:35:04] [Rank 0] Group 4 Loss: 5.4202 +[2025-07-06 23:35:04] [Rank 0] Group 4 Loss: 5.4202 +[2025-07-06 23:35:04] [Rank 0] Group 5 Loss: 5.3849 +[2025-07-06 23:35:04] [Rank 0] Group 5 Loss: 5.3849 +[2025-07-06 23:35:04] [Rank 0] Group 6 Loss: 5.3576 +[2025-07-06 23:35:04] [Rank 0] Group 6 Loss: 5.3576 +[2025-07-06 23:35:04] [Rank 0] Group 7 Loss: 5.4655 +[2025-07-06 23:35:04] [Rank 0] Group 7 Loss: 5.4655 +[2025-07-06 23:35:04] [Rank 0] Group 8 Loss: 5.5113 +[2025-07-06 23:35:04] [Rank 0] Group 8 Loss: 5.5113 +[2025-07-06 23:35:04] [Rank 0] Group 9 Loss: 5.5301 +[2025-07-06 23:35:04] [Rank 0] Group 9 Loss: 5.5301 +[2025-07-06 23:35:04] [Rank 0] Group 10 Loss: 5.4965 +[2025-07-06 23:35:04] [Rank 0] Group 10 Loss: 5.4965 +[2025-07-06 23:35:04] [Rank 0] Group 11 Loss: 5.5093 +[2025-07-06 23:35:04] [Rank 0] Group 11 Loss: 5.5093 +[2025-07-06 23:35:04] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 23:35:04] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 23:35:04] [Rank 0] Group 1 FTA: 0.8281 +[2025-07-06 23:35:04] [Rank 0] Group 1 FTA: 0.8281 +[2025-07-06 23:35:04] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 23:35:04] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 23:35:04] [Rank 0] Group 3 FTA: 0.9427 +[2025-07-06 23:35:04] [Rank 0] Group 3 FTA: 0.9427 +[2025-07-06 23:35:04] [Rank 0] Group 4 FTA: 0.8958 +[2025-07-06 23:35:04] [Rank 0] Group 4 FTA: 0.8958 +[2025-07-06 23:35:04] [Rank 0] Group 5 FTA: 0.9219 +[2025-07-06 23:35:04] [Rank 0] Group 5 FTA: 0.9219 +[2025-07-06 23:35:04] [Rank 0] Group 6 FTA: 0.9297 +[2025-07-06 23:35:04] [Rank 0] Group 6 FTA: 0.9297 +[2025-07-06 23:35:04] [Rank 0] Group 7 FTA: 0.8906 +[2025-07-06 23:35:04] [Rank 0] Group 7 FTA: 0.8906 +[2025-07-06 23:35:04] [Rank 0] Group 8 FTA: 0.8958 +[2025-07-06 23:35:04] [Rank 0] Group 8 FTA: 0.8958 +[2025-07-06 23:35:04] [Rank 0] Group 9 FTA: 0.8438 +[2025-07-06 23:35:04] [Rank 0] Group 9 FTA: 0.8438 +[2025-07-06 23:35:04] [Rank 0] Group 10 FTA: 0.8730 +[2025-07-06 23:35:04] [Rank 0] Group 10 FTA: 0.8730 +[2025-07-06 23:35:04] [Rank 0] Group 11 FTA: 0.8799 +[2025-07-06 23:35:04] [Rank 0] Group 11 FTA: 0.8799 +[2025-07-06 23:35:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 23:35:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 23:35:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 23:35:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 23:35:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 23:35:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 23:35:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 23:35:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 23:35:05] [Rank 0] step:8001/10000 train_time:644034ms step_avg:80.49ms +[2025-07-06 23:35:05] [Rank 0] step:8001/10000 train_time:644034ms step_avg:80.49ms +[2025-07-06 23:35:08] [Rank 0] step:8021/10000 train_time:646204ms step_avg:80.56ms +[2025-07-06 23:35:08] [Rank 0] step:8021/10000 train_time:646204ms step_avg:80.56ms +[2025-07-06 23:35:09] [Rank 0] step:8041/10000 train_time:647690ms step_avg:80.55ms +[2025-07-06 23:35:09] [Rank 0] step:8041/10000 train_time:647690ms step_avg:80.55ms +[2025-07-06 23:35:11] [Rank 0] step:8061/10000 train_time:649180ms step_avg:80.53ms +[2025-07-06 23:35:11] [Rank 0] step:8061/10000 train_time:649180ms step_avg:80.53ms +[2025-07-06 23:35:12] [Rank 0] step:8081/10000 train_time:650670ms step_avg:80.52ms +[2025-07-06 23:35:12] [Rank 0] step:8081/10000 train_time:650670ms step_avg:80.52ms +[2025-07-06 23:35:14] [Rank 0] step:8101/10000 train_time:652417ms step_avg:80.54ms +[2025-07-06 23:35:14] [Rank 0] step:8101/10000 train_time:652417ms step_avg:80.54ms +[2025-07-06 23:35:16] [Rank 0] step:8121/10000 train_time:654315ms step_avg:80.57ms +[2025-07-06 23:35:16] [Rank 0] step:8121/10000 train_time:654315ms step_avg:80.57ms +[2025-07-06 23:35:17] [Rank 0] step:8141/10000 train_time:655805ms step_avg:80.56ms +[2025-07-06 23:35:17] [Rank 0] step:8141/10000 train_time:655805ms step_avg:80.56ms +[2025-07-06 23:35:19] [Rank 0] step:8161/10000 train_time:657298ms step_avg:80.54ms +[2025-07-06 23:35:19] [Rank 0] step:8161/10000 train_time:657298ms step_avg:80.54ms +[2025-07-06 23:35:20] [Rank 0] step:8181/10000 train_time:658792ms step_avg:80.53ms +[2025-07-06 23:35:20] [Rank 0] step:8181/10000 train_time:658792ms step_avg:80.53ms +[2025-07-06 23:35:22] [Rank 0] step:8201/10000 train_time:660923ms step_avg:80.59ms +[2025-07-06 23:35:22] [Rank 0] step:8201/10000 train_time:660923ms step_avg:80.59ms +[2025-07-06 23:35:24] [Rank 0] step:8221/10000 train_time:662415ms step_avg:80.58ms +[2025-07-06 23:35:24] [Rank 0] step:8221/10000 train_time:662415ms step_avg:80.58ms +[2025-07-06 23:35:25] [Rank 0] step:8241/10000 train_time:663910ms step_avg:80.56ms +[2025-07-06 23:35:25] [Rank 0] step:8241/10000 train_time:663910ms step_avg:80.56ms +[2025-07-06 23:35:27] [Rank 0] step:8261/10000 train_time:665402ms step_avg:80.55ms +[2025-07-06 23:35:27] [Rank 0] step:8261/10000 train_time:665402ms step_avg:80.55ms +[2025-07-06 23:35:29] [Rank 0] step:8281/10000 train_time:666898ms step_avg:80.53ms +[2025-07-06 23:35:29] [Rank 0] step:8281/10000 train_time:666898ms step_avg:80.53ms +[2025-07-06 23:35:30] [Rank 0] step:8301/10000 train_time:669037ms step_avg:80.60ms +[2025-07-06 23:35:30] [Rank 0] step:8301/10000 train_time:669037ms step_avg:80.60ms +[2025-07-06 23:35:32] [Rank 0] step:8321/10000 train_time:670530ms step_avg:80.58ms +[2025-07-06 23:35:32] [Rank 0] step:8321/10000 train_time:670530ms step_avg:80.58ms +[2025-07-06 23:35:33] [Rank 0] step:8341/10000 train_time:672027ms step_avg:80.57ms +[2025-07-06 23:35:33] [Rank 0] step:8341/10000 train_time:672027ms step_avg:80.57ms +[2025-07-06 23:35:35] [Rank 0] step:8361/10000 train_time:673525ms step_avg:80.56ms +[2025-07-06 23:35:35] [Rank 0] step:8361/10000 train_time:673525ms step_avg:80.56ms +[2025-07-06 23:35:37] [Rank 0] step:8381/10000 train_time:675257ms step_avg:80.57ms +[2025-07-06 23:35:37] [Rank 0] step:8381/10000 train_time:675257ms step_avg:80.57ms +[2025-07-06 23:35:38] [Rank 0] step:8401/10000 train_time:676754ms step_avg:80.56ms +[2025-07-06 23:35:38] [Rank 0] step:8401/10000 train_time:676754ms step_avg:80.56ms +[2025-07-06 23:35:40] [Rank 0] step:8421/10000 train_time:678253ms step_avg:80.54ms +[2025-07-06 23:35:40] [Rank 0] step:8421/10000 train_time:678253ms step_avg:80.54ms +[2025-07-06 23:35:41] [Rank 0] step:8441/10000 train_time:679752ms step_avg:80.53ms +[2025-07-06 23:35:41] [Rank 0] step:8441/10000 train_time:679752ms step_avg:80.53ms +[2025-07-06 23:35:43] [Rank 0] step:8461/10000 train_time:681504ms step_avg:80.55ms +[2025-07-06 23:35:43] [Rank 0] step:8461/10000 train_time:681504ms step_avg:80.55ms +[2025-07-06 23:35:44] [Rank 0] step:8481/10000 train_time:682982ms step_avg:80.53ms +[2025-07-06 23:35:44] [Rank 0] step:8481/10000 train_time:682982ms step_avg:80.53ms +[2025-07-06 23:35:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:35:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:35:47] [Rank 0] PRINT: step:8500/10000 train_loss:0.8673 val_loss:0.8685 train_time:684480ms step_avg:80.53ms +[2025-07-06 23:35:47] [Rank 0] PRINT: step:8500/10000 train_loss:0.8673 val_loss:0.8685 train_time:684480ms step_avg:80.53ms +[2025-07-06 23:35:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:35:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:35:47] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:35:47] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:35:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:35:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:41:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:41:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:41:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:41:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:41:13] [Rank 0] Total Loss: 5.5258 +[2025-07-06 23:41:13] [Rank 0] Total Loss: 5.5258 +[2025-07-06 23:41:13] [Rank 0] Total FTA: 0.9292 +[2025-07-06 23:41:13] [Rank 0] Total FTA: 0.9292 +[2025-07-06 23:41:13] [Rank 0] Group 0 Loss: 5.6573 +[2025-07-06 23:41:13] [Rank 0] Group 0 Loss: 5.6573 +[2025-07-06 23:41:13] [Rank 0] Group 1 Loss: 5.5411 +[2025-07-06 23:41:13] [Rank 0] Group 1 Loss: 5.5411 +[2025-07-06 23:41:13] [Rank 0] Group 2 Loss: 5.3707 +[2025-07-06 23:41:13] [Rank 0] Group 2 Loss: 5.3707 +[2025-07-06 23:41:13] [Rank 0] Group 3 Loss: 5.5317 +[2025-07-06 23:41:13] [Rank 0] Group 3 Loss: 5.5317 +[2025-07-06 23:41:13] [Rank 0] Group 4 Loss: 5.5528 +[2025-07-06 23:41:13] [Rank 0] Group 4 Loss: 5.5528 +[2025-07-06 23:41:13] [Rank 0] Group 5 Loss: 5.4364 +[2025-07-06 23:41:13] [Rank 0] Group 5 Loss: 5.4364 +[2025-07-06 23:41:13] [Rank 0] Group 6 Loss: 5.4495 +[2025-07-06 23:41:13] [Rank 0] Group 6 Loss: 5.4495 +[2025-07-06 23:41:13] [Rank 0] Group 7 Loss: 5.5219 +[2025-07-06 23:41:13] [Rank 0] Group 7 Loss: 5.5219 +[2025-07-06 23:41:13] [Rank 0] Group 8 Loss: 5.5679 +[2025-07-06 23:41:13] [Rank 0] Group 8 Loss: 5.5679 +[2025-07-06 23:41:13] [Rank 0] Group 9 Loss: 5.5766 +[2025-07-06 23:41:13] [Rank 0] Group 9 Loss: 5.5766 +[2025-07-06 23:41:13] [Rank 0] Group 10 Loss: 5.5264 +[2025-07-06 23:41:13] [Rank 0] Group 10 Loss: 5.5264 +[2025-07-06 23:41:13] [Rank 0] Group 11 Loss: 5.5022 +[2025-07-06 23:41:13] [Rank 0] Group 11 Loss: 5.5022 +[2025-07-06 23:41:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 23:41:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 23:41:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:41:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:41:13] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 23:41:13] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 23:41:13] [Rank 0] Group 3 FTA: 0.9349 +[2025-07-06 23:41:13] [Rank 0] Group 3 FTA: 0.9349 +[2025-07-06 23:41:13] [Rank 0] Group 4 FTA: 0.8438 +[2025-07-06 23:41:13] [Rank 0] Group 4 FTA: 0.8438 +[2025-07-06 23:41:13] [Rank 0] Group 5 FTA: 0.9115 +[2025-07-06 23:41:13] [Rank 0] Group 5 FTA: 0.9115 +[2025-07-06 23:41:13] [Rank 0] Group 6 FTA: 0.9010 +[2025-07-06 23:41:13] [Rank 0] Group 6 FTA: 0.9010 +[2025-07-06 23:41:13] [Rank 0] Group 7 FTA: 0.9036 +[2025-07-06 23:41:13] [Rank 0] Group 7 FTA: 0.9036 +[2025-07-06 23:41:13] [Rank 0] Group 8 FTA: 0.8932 +[2025-07-06 23:41:13] [Rank 0] Group 8 FTA: 0.8932 +[2025-07-06 23:41:13] [Rank 0] Group 9 FTA: 0.8906 +[2025-07-06 23:41:13] [Rank 0] Group 9 FTA: 0.8906 +[2025-07-06 23:41:13] [Rank 0] Group 10 FTA: 0.8906 +[2025-07-06 23:41:13] [Rank 0] Group 10 FTA: 0.8906 +[2025-07-06 23:41:13] [Rank 0] Group 11 FTA: 0.9219 +[2025-07-06 23:41:13] [Rank 0] Group 11 FTA: 0.9219 +[2025-07-06 23:41:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 23:41:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 23:41:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 23:41:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 23:41:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 23:41:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 23:41:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 23:41:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 23:41:15] [Rank 0] step:8501/10000 train_time:684502ms step_avg:80.52ms +[2025-07-06 23:41:15] [Rank 0] step:8501/10000 train_time:684502ms step_avg:80.52ms +[2025-07-06 23:41:16] [Rank 0] step:8521/10000 train_time:686013ms step_avg:80.51ms +[2025-07-06 23:41:16] [Rank 0] step:8521/10000 train_time:686013ms step_avg:80.51ms +[2025-07-06 23:41:18] [Rank 0] step:8541/10000 train_time:687503ms step_avg:80.49ms +[2025-07-06 23:41:18] [Rank 0] step:8541/10000 train_time:687503ms step_avg:80.49ms +[2025-07-06 23:41:20] [Rank 0] step:8561/10000 train_time:689730ms step_avg:80.57ms +[2025-07-06 23:41:20] [Rank 0] step:8561/10000 train_time:689730ms step_avg:80.57ms +[2025-07-06 23:41:22] [Rank 0] step:8581/10000 train_time:691379ms step_avg:80.57ms +[2025-07-06 23:41:22] [Rank 0] step:8581/10000 train_time:691379ms step_avg:80.57ms +[2025-07-06 23:41:23] [Rank 0] step:8601/10000 train_time:692873ms step_avg:80.56ms +[2025-07-06 23:41:23] [Rank 0] step:8601/10000 train_time:692873ms step_avg:80.56ms +[2025-07-06 23:41:25] [Rank 0] step:8621/10000 train_time:694362ms step_avg:80.54ms +[2025-07-06 23:41:25] [Rank 0] step:8621/10000 train_time:694362ms step_avg:80.54ms +[2025-07-06 23:41:27] [Rank 0] step:8641/10000 train_time:695853ms step_avg:80.53ms +[2025-07-06 23:41:27] [Rank 0] step:8641/10000 train_time:695853ms step_avg:80.53ms +[2025-07-06 23:41:28] [Rank 0] step:8661/10000 train_time:697998ms step_avg:80.59ms +[2025-07-06 23:41:28] [Rank 0] step:8661/10000 train_time:697998ms step_avg:80.59ms +[2025-07-06 23:41:30] [Rank 0] step:8681/10000 train_time:699490ms step_avg:80.58ms +[2025-07-06 23:41:30] [Rank 0] step:8681/10000 train_time:699490ms step_avg:80.58ms +[2025-07-06 23:41:31] [Rank 0] step:8701/10000 train_time:700985ms step_avg:80.56ms +[2025-07-06 23:41:31] [Rank 0] step:8701/10000 train_time:700985ms step_avg:80.56ms +[2025-07-06 23:41:33] [Rank 0] step:8721/10000 train_time:702475ms step_avg:80.55ms +[2025-07-06 23:41:33] [Rank 0] step:8721/10000 train_time:702475ms step_avg:80.55ms +[2025-07-06 23:41:35] [Rank 0] step:8741/10000 train_time:704612ms step_avg:80.61ms +[2025-07-06 23:41:35] [Rank 0] step:8741/10000 train_time:704612ms step_avg:80.61ms +[2025-07-06 23:41:36] [Rank 0] step:8761/10000 train_time:706104ms step_avg:80.60ms +[2025-07-06 23:41:36] [Rank 0] step:8761/10000 train_time:706104ms step_avg:80.60ms +[2025-07-06 23:41:38] [Rank 0] step:8781/10000 train_time:707598ms step_avg:80.58ms +[2025-07-06 23:41:38] [Rank 0] step:8781/10000 train_time:707598ms step_avg:80.58ms +[2025-07-06 23:41:39] [Rank 0] step:8801/10000 train_time:709092ms step_avg:80.57ms +[2025-07-06 23:41:39] [Rank 0] step:8801/10000 train_time:709092ms step_avg:80.57ms +[2025-07-06 23:41:41] [Rank 0] step:8821/10000 train_time:710585ms step_avg:80.56ms +[2025-07-06 23:41:41] [Rank 0] step:8821/10000 train_time:710585ms step_avg:80.56ms +[2025-07-06 23:41:42] [Rank 0] step:8841/10000 train_time:712320ms step_avg:80.57ms +[2025-07-06 23:41:42] [Rank 0] step:8841/10000 train_time:712320ms step_avg:80.57ms +[2025-07-06 23:41:44] [Rank 0] step:8861/10000 train_time:713812ms step_avg:80.56ms +[2025-07-06 23:41:44] [Rank 0] step:8861/10000 train_time:713812ms step_avg:80.56ms +[2025-07-06 23:41:45] [Rank 0] step:8881/10000 train_time:715305ms step_avg:80.54ms +[2025-07-06 23:41:45] [Rank 0] step:8881/10000 train_time:715305ms step_avg:80.54ms +[2025-07-06 23:41:47] [Rank 0] step:8901/10000 train_time:716801ms step_avg:80.53ms +[2025-07-06 23:41:47] [Rank 0] step:8901/10000 train_time:716801ms step_avg:80.53ms +[2025-07-06 23:41:49] [Rank 0] step:8921/10000 train_time:718945ms step_avg:80.59ms +[2025-07-06 23:41:49] [Rank 0] step:8921/10000 train_time:718945ms step_avg:80.59ms +[2025-07-06 23:41:51] [Rank 0] step:8941/10000 train_time:720440ms step_avg:80.58ms +[2025-07-06 23:41:51] [Rank 0] step:8941/10000 train_time:720440ms step_avg:80.58ms +[2025-07-06 23:41:52] [Rank 0] step:8961/10000 train_time:721935ms step_avg:80.56ms +[2025-07-06 23:41:52] [Rank 0] step:8961/10000 train_time:721935ms step_avg:80.56ms +[2025-07-06 23:41:54] [Rank 0] step:8981/10000 train_time:723429ms step_avg:80.55ms +[2025-07-06 23:41:54] [Rank 0] step:8981/10000 train_time:723429ms step_avg:80.55ms +[2025-07-06 23:41:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:41:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:41:56] [Rank 0] PRINT: step:9000/10000 train_loss:0.8661 val_loss:0.8679 train_time:724924ms step_avg:80.55ms +[2025-07-06 23:41:56] [Rank 0] PRINT: step:9000/10000 train_loss:0.8661 val_loss:0.8679 train_time:724924ms step_avg:80.55ms +[2025-07-06 23:41:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:41:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:41:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:41:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:41:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:41:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:47:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:47:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:47:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:47:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:47:17] [Rank 0] Total Loss: 5.6265 +[2025-07-06 23:47:17] [Rank 0] Total Loss: 5.6265 +[2025-07-06 23:47:17] [Rank 0] Total FTA: 0.9212 +[2025-07-06 23:47:17] [Rank 0] Total FTA: 0.9212 +[2025-07-06 23:47:17] [Rank 0] Group 0 Loss: 5.8625 +[2025-07-06 23:47:17] [Rank 0] Group 0 Loss: 5.8625 +[2025-07-06 23:47:17] [Rank 0] Group 1 Loss: 5.6653 +[2025-07-06 23:47:17] [Rank 0] Group 1 Loss: 5.6653 +[2025-07-06 23:47:17] [Rank 0] Group 2 Loss: 5.3851 +[2025-07-06 23:47:17] [Rank 0] Group 2 Loss: 5.3851 +[2025-07-06 23:47:17] [Rank 0] Group 3 Loss: 5.7170 +[2025-07-06 23:47:17] [Rank 0] Group 3 Loss: 5.7170 +[2025-07-06 23:47:17] [Rank 0] Group 4 Loss: 5.6215 +[2025-07-06 23:47:17] [Rank 0] Group 4 Loss: 5.6215 +[2025-07-06 23:47:17] [Rank 0] Group 5 Loss: 5.5596 +[2025-07-06 23:47:17] [Rank 0] Group 5 Loss: 5.5596 +[2025-07-06 23:47:17] [Rank 0] Group 6 Loss: 5.5043 +[2025-07-06 23:47:17] [Rank 0] Group 6 Loss: 5.5043 +[2025-07-06 23:47:17] [Rank 0] Group 7 Loss: 5.6146 +[2025-07-06 23:47:17] [Rank 0] Group 7 Loss: 5.6146 +[2025-07-06 23:47:17] [Rank 0] Group 8 Loss: 5.6302 +[2025-07-06 23:47:17] [Rank 0] Group 8 Loss: 5.6302 +[2025-07-06 23:47:17] [Rank 0] Group 9 Loss: 5.5631 +[2025-07-06 23:47:17] [Rank 0] Group 9 Loss: 5.5631 +[2025-07-06 23:47:17] [Rank 0] Group 10 Loss: 5.5846 +[2025-07-06 23:47:17] [Rank 0] Group 10 Loss: 5.5846 +[2025-07-06 23:47:17] [Rank 0] Group 11 Loss: 5.6041 +[2025-07-06 23:47:17] [Rank 0] Group 11 Loss: 5.6041 +[2025-07-06 23:47:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 23:47:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 23:47:17] [Rank 0] Group 1 FTA: 0.8255 +[2025-07-06 23:47:17] [Rank 0] Group 1 FTA: 0.8255 +[2025-07-06 23:47:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 23:47:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 23:47:17] [Rank 0] Group 3 FTA: 0.9661 +[2025-07-06 23:47:17] [Rank 0] Group 3 FTA: 0.9661 +[2025-07-06 23:47:17] [Rank 0] Group 4 FTA: 0.8698 +[2025-07-06 23:47:17] [Rank 0] Group 4 FTA: 0.8698 +[2025-07-06 23:47:17] [Rank 0] Group 5 FTA: 0.9245 +[2025-07-06 23:47:17] [Rank 0] Group 5 FTA: 0.9245 +[2025-07-06 23:47:17] [Rank 0] Group 6 FTA: 0.8880 +[2025-07-06 23:47:17] [Rank 0] Group 6 FTA: 0.8880 +[2025-07-06 23:47:17] [Rank 0] Group 7 FTA: 0.9089 +[2025-07-06 23:47:17] [Rank 0] Group 7 FTA: 0.9089 +[2025-07-06 23:47:17] [Rank 0] Group 8 FTA: 0.9193 +[2025-07-06 23:47:17] [Rank 0] Group 8 FTA: 0.9193 +[2025-07-06 23:47:17] [Rank 0] Group 9 FTA: 0.8750 +[2025-07-06 23:47:17] [Rank 0] Group 9 FTA: 0.8750 +[2025-07-06 23:47:17] [Rank 0] Group 10 FTA: 0.8965 +[2025-07-06 23:47:17] [Rank 0] Group 10 FTA: 0.8965 +[2025-07-06 23:47:17] [Rank 0] Group 11 FTA: 0.9111 +[2025-07-06 23:47:17] [Rank 0] Group 11 FTA: 0.9111 +[2025-07-06 23:47:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 23:47:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 23:47:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 23:47:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 23:47:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 23:47:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 23:47:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 23:47:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 23:47:19] [Rank 0] step:9001/10000 train_time:725056ms step_avg:80.55ms +[2025-07-06 23:47:19] [Rank 0] step:9001/10000 train_time:725056ms step_avg:80.55ms +[2025-07-06 23:47:21] [Rank 0] step:9021/10000 train_time:727130ms step_avg:80.60ms +[2025-07-06 23:47:21] [Rank 0] step:9021/10000 train_time:727130ms step_avg:80.60ms +[2025-07-06 23:47:22] [Rank 0] step:9041/10000 train_time:728618ms step_avg:80.59ms +[2025-07-06 23:47:22] [Rank 0] step:9041/10000 train_time:728618ms step_avg:80.59ms +[2025-07-06 23:47:24] [Rank 0] step:9061/10000 train_time:730108ms step_avg:80.58ms +[2025-07-06 23:47:24] [Rank 0] step:9061/10000 train_time:730108ms step_avg:80.58ms +[2025-07-06 23:47:25] [Rank 0] step:9081/10000 train_time:731598ms step_avg:80.56ms +[2025-07-06 23:47:25] [Rank 0] step:9081/10000 train_time:731598ms step_avg:80.56ms +[2025-07-06 23:47:27] [Rank 0] step:9101/10000 train_time:733743ms step_avg:80.62ms +[2025-07-06 23:47:27] [Rank 0] step:9101/10000 train_time:733743ms step_avg:80.62ms +[2025-07-06 23:47:29] [Rank 0] step:9121/10000 train_time:735234ms step_avg:80.61ms +[2025-07-06 23:47:29] [Rank 0] step:9121/10000 train_time:735234ms step_avg:80.61ms +[2025-07-06 23:47:30] [Rank 0] step:9141/10000 train_time:736726ms step_avg:80.60ms +[2025-07-06 23:47:30] [Rank 0] step:9141/10000 train_time:736726ms step_avg:80.60ms +[2025-07-06 23:47:32] [Rank 0] step:9161/10000 train_time:738217ms step_avg:80.58ms +[2025-07-06 23:47:32] [Rank 0] step:9161/10000 train_time:738217ms step_avg:80.58ms +[2025-07-06 23:47:34] [Rank 0] step:9181/10000 train_time:739712ms step_avg:80.57ms +[2025-07-06 23:47:34] [Rank 0] step:9181/10000 train_time:739712ms step_avg:80.57ms +[2025-07-06 23:47:35] [Rank 0] step:9201/10000 train_time:741852ms step_avg:80.63ms +[2025-07-06 23:47:35] [Rank 0] step:9201/10000 train_time:741852ms step_avg:80.63ms +[2025-07-06 23:47:37] [Rank 0] step:9221/10000 train_time:743342ms step_avg:80.61ms +[2025-07-06 23:47:37] [Rank 0] step:9221/10000 train_time:743342ms step_avg:80.61ms +[2025-07-06 23:47:39] [Rank 0] step:9241/10000 train_time:745074ms step_avg:80.63ms +[2025-07-06 23:47:39] [Rank 0] step:9241/10000 train_time:745074ms step_avg:80.63ms +[2025-07-06 23:47:40] [Rank 0] step:9261/10000 train_time:746607ms step_avg:80.62ms +[2025-07-06 23:47:40] [Rank 0] step:9261/10000 train_time:746607ms step_avg:80.62ms +[2025-07-06 23:47:42] [Rank 0] step:9281/10000 train_time:748817ms step_avg:80.68ms +[2025-07-06 23:47:42] [Rank 0] step:9281/10000 train_time:748817ms step_avg:80.68ms +[2025-07-06 23:47:44] [Rank 0] step:9301/10000 train_time:750313ms step_avg:80.67ms +[2025-07-06 23:47:44] [Rank 0] step:9301/10000 train_time:750313ms step_avg:80.67ms +[2025-07-06 23:47:45] [Rank 0] step:9321/10000 train_time:751806ms step_avg:80.66ms +[2025-07-06 23:47:45] [Rank 0] step:9321/10000 train_time:751806ms step_avg:80.66ms +[2025-07-06 23:47:47] [Rank 0] step:9341/10000 train_time:753300ms step_avg:80.64ms +[2025-07-06 23:47:47] [Rank 0] step:9341/10000 train_time:753300ms step_avg:80.64ms +[2025-07-06 23:47:49] [Rank 0] step:9361/10000 train_time:754795ms step_avg:80.63ms +[2025-07-06 23:47:49] [Rank 0] step:9361/10000 train_time:754795ms step_avg:80.63ms +[2025-07-06 23:47:50] [Rank 0] step:9381/10000 train_time:756936ms step_avg:80.69ms +[2025-07-06 23:47:50] [Rank 0] step:9381/10000 train_time:756936ms step_avg:80.69ms +[2025-07-06 23:47:52] [Rank 0] step:9401/10000 train_time:758429ms step_avg:80.68ms +[2025-07-06 23:47:52] [Rank 0] step:9401/10000 train_time:758429ms step_avg:80.68ms +[2025-07-06 23:47:53] [Rank 0] step:9421/10000 train_time:759924ms step_avg:80.66ms +[2025-07-06 23:47:53] [Rank 0] step:9421/10000 train_time:759924ms step_avg:80.66ms +[2025-07-06 23:47:55] [Rank 0] step:9441/10000 train_time:761419ms step_avg:80.65ms +[2025-07-06 23:47:55] [Rank 0] step:9441/10000 train_time:761419ms step_avg:80.65ms +[2025-07-06 23:47:57] [Rank 0] step:9461/10000 train_time:763571ms step_avg:80.71ms +[2025-07-06 23:47:57] [Rank 0] step:9461/10000 train_time:763571ms step_avg:80.71ms +[2025-07-06 23:47:59] [Rank 0] step:9481/10000 train_time:765065ms step_avg:80.69ms +[2025-07-06 23:47:59] [Rank 0] step:9481/10000 train_time:765065ms step_avg:80.69ms +[2025-07-06 23:48:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:48:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:48:01] [Rank 0] PRINT: step:9500/10000 train_loss:0.8650 val_loss:0.8673 train_time:766558ms step_avg:80.69ms +[2025-07-06 23:48:01] [Rank 0] PRINT: step:9500/10000 train_loss:0.8650 val_loss:0.8673 train_time:766558ms step_avg:80.69ms +[2025-07-06 23:48:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:48:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:48:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:48:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:48:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:48:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:53:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:53:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:53:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:53:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:53:26] [Rank 0] Total Loss: 5.6226 +[2025-07-06 23:53:26] [Rank 0] Total Loss: 5.6226 +[2025-07-06 23:53:26] [Rank 0] Total FTA: 0.9199 +[2025-07-06 23:53:26] [Rank 0] Total FTA: 0.9199 +[2025-07-06 23:53:26] [Rank 0] Group 0 Loss: 5.8528 +[2025-07-06 23:53:26] [Rank 0] Group 0 Loss: 5.8528 +[2025-07-06 23:53:26] [Rank 0] Group 1 Loss: 5.6946 +[2025-07-06 23:53:26] [Rank 0] Group 1 Loss: 5.6946 +[2025-07-06 23:53:26] [Rank 0] Group 2 Loss: 5.3207 +[2025-07-06 23:53:26] [Rank 0] Group 2 Loss: 5.3207 +[2025-07-06 23:53:26] [Rank 0] Group 3 Loss: 5.6921 +[2025-07-06 23:53:26] [Rank 0] Group 3 Loss: 5.6921 +[2025-07-06 23:53:26] [Rank 0] Group 4 Loss: 5.5647 +[2025-07-06 23:53:26] [Rank 0] Group 4 Loss: 5.5647 +[2025-07-06 23:53:26] [Rank 0] Group 5 Loss: 5.5245 +[2025-07-06 23:53:26] [Rank 0] Group 5 Loss: 5.5245 +[2025-07-06 23:53:26] [Rank 0] Group 6 Loss: 5.4686 +[2025-07-06 23:53:26] [Rank 0] Group 6 Loss: 5.4686 +[2025-07-06 23:53:26] [Rank 0] Group 7 Loss: 5.6373 +[2025-07-06 23:53:26] [Rank 0] Group 7 Loss: 5.6373 +[2025-07-06 23:53:26] [Rank 0] Group 8 Loss: 5.5913 +[2025-07-06 23:53:26] [Rank 0] Group 8 Loss: 5.5913 +[2025-07-06 23:53:26] [Rank 0] Group 9 Loss: 5.6068 +[2025-07-06 23:53:26] [Rank 0] Group 9 Loss: 5.6068 +[2025-07-06 23:53:26] [Rank 0] Group 10 Loss: 5.6378 +[2025-07-06 23:53:26] [Rank 0] Group 10 Loss: 5.6378 +[2025-07-06 23:53:26] [Rank 0] Group 11 Loss: 5.6287 +[2025-07-06 23:53:26] [Rank 0] Group 11 Loss: 5.6287 +[2025-07-06 23:53:26] [Rank 0] Group 0 FTA: 0.8440 +[2025-07-06 23:53:26] [Rank 0] Group 0 FTA: 0.8440 +[2025-07-06 23:53:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:53:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:53:26] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 23:53:26] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 23:53:26] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 23:53:26] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 23:53:26] [Rank 0] Group 4 FTA: 0.8880 +[2025-07-06 23:53:26] [Rank 0] Group 4 FTA: 0.8880 +[2025-07-06 23:53:26] [Rank 0] Group 5 FTA: 0.9062 +[2025-07-06 23:53:26] [Rank 0] Group 5 FTA: 0.9062 +[2025-07-06 23:53:26] [Rank 0] Group 6 FTA: 0.9010 +[2025-07-06 23:53:26] [Rank 0] Group 6 FTA: 0.9010 +[2025-07-06 23:53:26] [Rank 0] Group 7 FTA: 0.9297 +[2025-07-06 23:53:26] [Rank 0] Group 7 FTA: 0.9297 +[2025-07-06 23:53:26] [Rank 0] Group 8 FTA: 0.8958 +[2025-07-06 23:53:26] [Rank 0] Group 8 FTA: 0.8958 +[2025-07-06 23:53:27] [Rank 0] Group 9 FTA: 0.9141 +[2025-07-06 23:53:27] [Rank 0] Group 9 FTA: 0.9141 +[2025-07-06 23:53:27] [Rank 0] Group 10 FTA: 0.9277 +[2025-07-06 23:53:27] [Rank 0] Group 10 FTA: 0.9277 +[2025-07-06 23:53:27] [Rank 0] Group 11 FTA: 0.9141 +[2025-07-06 23:53:27] [Rank 0] Group 11 FTA: 0.9141 +[2025-07-06 23:53:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 23:53:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 23:53:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 23:53:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 23:53:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 23:53:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 23:53:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 23:53:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 23:53:28] [Rank 0] step:9501/10000 train_time:766580ms step_avg:80.68ms +[2025-07-06 23:53:28] [Rank 0] step:9501/10000 train_time:766580ms step_avg:80.68ms +[2025-07-06 23:53:29] [Rank 0] step:9521/10000 train_time:768081ms step_avg:80.67ms +[2025-07-06 23:53:29] [Rank 0] step:9521/10000 train_time:768081ms step_avg:80.67ms +[2025-07-06 23:53:32] [Rank 0] step:9541/10000 train_time:770249ms step_avg:80.73ms +[2025-07-06 23:53:32] [Rank 0] step:9541/10000 train_time:770249ms step_avg:80.73ms +[2025-07-06 23:53:33] [Rank 0] step:9561/10000 train_time:771717ms step_avg:80.72ms +[2025-07-06 23:53:33] [Rank 0] step:9561/10000 train_time:771717ms step_avg:80.72ms +[2025-07-06 23:53:35] [Rank 0] step:9581/10000 train_time:773204ms step_avg:80.70ms +[2025-07-06 23:53:35] [Rank 0] step:9581/10000 train_time:773204ms step_avg:80.70ms +[2025-07-06 23:53:36] [Rank 0] step:9601/10000 train_time:774695ms step_avg:80.69ms +[2025-07-06 23:53:36] [Rank 0] step:9601/10000 train_time:774695ms step_avg:80.69ms +[2025-07-06 23:53:38] [Rank 0] step:9621/10000 train_time:776188ms step_avg:80.68ms +[2025-07-06 23:53:38] [Rank 0] step:9621/10000 train_time:776188ms step_avg:80.68ms +[2025-07-06 23:53:39] [Rank 0] step:9641/10000 train_time:777914ms step_avg:80.69ms +[2025-07-06 23:53:39] [Rank 0] step:9641/10000 train_time:777914ms step_avg:80.69ms +[2025-07-06 23:53:41] [Rank 0] step:9661/10000 train_time:779407ms step_avg:80.68ms +[2025-07-06 23:53:41] [Rank 0] step:9661/10000 train_time:779407ms step_avg:80.68ms +[2025-07-06 23:53:42] [Rank 0] step:9681/10000 train_time:780895ms step_avg:80.66ms +[2025-07-06 23:53:42] [Rank 0] step:9681/10000 train_time:780895ms step_avg:80.66ms +[2025-07-06 23:53:44] [Rank 0] step:9701/10000 train_time:782386ms step_avg:80.65ms +[2025-07-06 23:53:44] [Rank 0] step:9701/10000 train_time:782386ms step_avg:80.65ms +[2025-07-06 23:53:45] [Rank 0] step:9721/10000 train_time:783933ms step_avg:80.64ms +[2025-07-06 23:53:45] [Rank 0] step:9721/10000 train_time:783933ms step_avg:80.64ms +[2025-07-06 23:53:47] [Rank 0] step:9741/10000 train_time:785609ms step_avg:80.65ms +[2025-07-06 23:53:47] [Rank 0] step:9741/10000 train_time:785609ms step_avg:80.65ms +[2025-07-06 23:53:48] [Rank 0] step:9761/10000 train_time:787103ms step_avg:80.64ms +[2025-07-06 23:53:48] [Rank 0] step:9761/10000 train_time:787103ms step_avg:80.64ms +[2025-07-06 23:53:50] [Rank 0] step:9781/10000 train_time:788598ms step_avg:80.63ms +[2025-07-06 23:53:50] [Rank 0] step:9781/10000 train_time:788598ms step_avg:80.63ms +[2025-07-06 23:53:51] [Rank 0] step:9801/10000 train_time:790092ms step_avg:80.61ms +[2025-07-06 23:53:51] [Rank 0] step:9801/10000 train_time:790092ms step_avg:80.61ms +[2025-07-06 23:53:54] [Rank 0] step:9821/10000 train_time:792254ms step_avg:80.67ms +[2025-07-06 23:53:54] [Rank 0] step:9821/10000 train_time:792254ms step_avg:80.67ms +[2025-07-06 23:53:55] [Rank 0] step:9841/10000 train_time:793746ms step_avg:80.66ms +[2025-07-06 23:53:55] [Rank 0] step:9841/10000 train_time:793746ms step_avg:80.66ms +[2025-07-06 23:53:57] [Rank 0] step:9861/10000 train_time:795241ms step_avg:80.65ms +[2025-07-06 23:53:57] [Rank 0] step:9861/10000 train_time:795241ms step_avg:80.65ms +[2025-07-06 23:53:58] [Rank 0] step:9881/10000 train_time:797023ms step_avg:80.66ms +[2025-07-06 23:53:58] [Rank 0] step:9881/10000 train_time:797023ms step_avg:80.66ms +[2025-07-06 23:54:00] [Rank 0] step:9901/10000 train_time:798671ms step_avg:80.67ms +[2025-07-06 23:54:00] [Rank 0] step:9901/10000 train_time:798671ms step_avg:80.67ms +[2025-07-06 23:54:02] [Rank 0] step:9921/10000 train_time:800341ms step_avg:80.67ms +[2025-07-06 23:54:02] [Rank 0] step:9921/10000 train_time:800341ms step_avg:80.67ms +[2025-07-06 23:54:03] [Rank 0] step:9941/10000 train_time:801839ms step_avg:80.66ms +[2025-07-06 23:54:03] [Rank 0] step:9941/10000 train_time:801839ms step_avg:80.66ms +[2025-07-06 23:54:05] [Rank 0] step:9961/10000 train_time:803340ms step_avg:80.65ms +[2025-07-06 23:54:05] [Rank 0] step:9961/10000 train_time:803340ms step_avg:80.65ms +[2025-07-06 23:54:06] [Rank 0] step:9981/10000 train_time:804838ms step_avg:80.64ms +[2025-07-06 23:54:06] [Rank 0] step:9981/10000 train_time:804838ms step_avg:80.64ms +[2025-07-06 23:54:08] [Rank 0] step:10000/10000 train_time:806498ms step_avg:80.65ms +[2025-07-06 23:54:08] [Rank 0] step:10000/10000 train_time:806498ms step_avg:80.65ms +[2025-07-06 23:54:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:54:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 23:54:09] [Rank 0] PRINT: step:10000/10000 train_loss:0.8640 val_loss:0.8670 train_time:806578ms step_avg:80.66ms +[2025-07-06 23:54:09] [Rank 0] PRINT: step:10000/10000 train_loss:0.8640 val_loss:0.8670 train_time:806578ms step_avg:80.66ms +[2025-07-06 23:54:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:54:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 23:54:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:54:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 23:54:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:54:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 23:59:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:59:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 23:59:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:59:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 23:59:36] [Rank 0] Total Loss: 5.6829 +[2025-07-06 23:59:36] [Rank 0] Total Loss: 5.6829 +[2025-07-06 23:59:36] [Rank 0] Total FTA: 0.9219 +[2025-07-06 23:59:36] [Rank 0] Total FTA: 0.9219 +[2025-07-06 23:59:36] [Rank 0] Group 0 Loss: 5.9123 +[2025-07-06 23:59:36] [Rank 0] Group 0 Loss: 5.9123 +[2025-07-06 23:59:36] [Rank 0] Group 1 Loss: 5.8006 +[2025-07-06 23:59:36] [Rank 0] Group 1 Loss: 5.8006 +[2025-07-06 23:59:36] [Rank 0] Group 2 Loss: 5.4024 +[2025-07-06 23:59:36] [Rank 0] Group 2 Loss: 5.4024 +[2025-07-06 23:59:36] [Rank 0] Group 3 Loss: 5.7762 +[2025-07-06 23:59:36] [Rank 0] Group 3 Loss: 5.7762 +[2025-07-06 23:59:36] [Rank 0] Group 4 Loss: 5.7229 +[2025-07-06 23:59:36] [Rank 0] Group 4 Loss: 5.7229 +[2025-07-06 23:59:36] [Rank 0] Group 5 Loss: 5.5761 +[2025-07-06 23:59:36] [Rank 0] Group 5 Loss: 5.5761 +[2025-07-06 23:59:36] [Rank 0] Group 6 Loss: 5.5204 +[2025-07-06 23:59:36] [Rank 0] Group 6 Loss: 5.5204 +[2025-07-06 23:59:36] [Rank 0] Group 7 Loss: 5.6031 +[2025-07-06 23:59:36] [Rank 0] Group 7 Loss: 5.6031 +[2025-07-06 23:59:36] [Rank 0] Group 8 Loss: 5.6345 +[2025-07-06 23:59:36] [Rank 0] Group 8 Loss: 5.6345 +[2025-07-06 23:59:36] [Rank 0] Group 9 Loss: 5.7090 +[2025-07-06 23:59:36] [Rank 0] Group 9 Loss: 5.7090 +[2025-07-06 23:59:36] [Rank 0] Group 10 Loss: 5.6404 +[2025-07-06 23:59:36] [Rank 0] Group 10 Loss: 5.6404 +[2025-07-06 23:59:36] [Rank 0] Group 11 Loss: 5.6853 +[2025-07-06 23:59:36] [Rank 0] Group 11 Loss: 5.6853 +[2025-07-06 23:59:36] [Rank 0] Group 0 FTA: 0.8466 +[2025-07-06 23:59:36] [Rank 0] Group 0 FTA: 0.8466 +[2025-07-06 23:59:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:59:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 23:59:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 23:59:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 23:59:36] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 23:59:36] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 23:59:36] [Rank 0] Group 4 FTA: 0.8776 +[2025-07-06 23:59:36] [Rank 0] Group 4 FTA: 0.8776 +[2025-07-06 23:59:36] [Rank 0] Group 5 FTA: 0.9427 +[2025-07-06 23:59:36] [Rank 0] Group 5 FTA: 0.9427 +[2025-07-06 23:59:36] [Rank 0] Group 6 FTA: 0.8802 +[2025-07-06 23:59:36] [Rank 0] Group 6 FTA: 0.8802 +[2025-07-06 23:59:36] [Rank 0] Group 7 FTA: 0.9193 +[2025-07-06 23:59:36] [Rank 0] Group 7 FTA: 0.9193 +[2025-07-06 23:59:36] [Rank 0] Group 8 FTA: 0.9323 +[2025-07-06 23:59:36] [Rank 0] Group 8 FTA: 0.9323 +[2025-07-06 23:59:36] [Rank 0] Group 9 FTA: 0.8906 +[2025-07-06 23:59:36] [Rank 0] Group 9 FTA: 0.8906 +[2025-07-06 23:59:36] [Rank 0] Group 10 FTA: 0.9121 +[2025-07-06 23:59:36] [Rank 0] Group 10 FTA: 0.9121 +[2025-07-06 23:59:36] [Rank 0] Group 11 FTA: 0.9248 +[2025-07-06 23:59:36] [Rank 0] Group 11 FTA: 0.9248 +[2025-07-06 23:59:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 23:59:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-06 23:59:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 23:59:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-06 23:59:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 23:59:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-06 23:59:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 23:59:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-06 23:59:37] [Rank 0] step:10001/10000 train_time:806600ms step_avg:80.65ms +[2025-07-06 23:59:37] [Rank 0] step:10001/10000 train_time:806600ms step_avg:80.65ms +[2025-07-06 23:59:37] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 23:59:37 2025 --- +[2025-07-06 23:59:37] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 23:59:37 2025 --- +[2025-07-06 23:59:37] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 10356 MiB +[2025-07-06 23:59:37] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 10356 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/config.json b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/config.json new file mode 100644 index 0000000000000000000000000000000000000000..79839f93a535ba4e666992c57a830a912ec92286 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 49, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "9031321a-bcfb-42e7-82d0-fd09c3c238c4", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..962fd6869bf764c7169a0158d58301954a69951e --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b62e277d3d0f7c82742009aefce0c679dbf02a4c34be71a6f0cbb8cded11206 +size 314660 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..103e909da787226491517e47aec5d0c3038d2ebd --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79e118e7617ded500232a76e1bab6750aac11b4be37084123980d9a73219245a +size 372944 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..5f7b651e6ee21bd405584eb473c655309024abf9 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd1b5dc227e2799236a6464fc6c8fccb96c01c9f9a5297edc2872ce72f796bd1 +size 97276 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..ec0116ed6588a410800ef37a0eb89633b9b41c49 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c25aec655d181c7a7adfef9294a06d7cbcc2f5d3c1861bc75c4c3e4722a2133 +size 103327 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/training_log_9031321a-bcfb-42e7-82d0-fd09c3c238c4.txt b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/training_log_9031321a-bcfb-42e7-82d0-fd09c3c238c4.txt new file mode 100644 index 0000000000000000000000000000000000000000..6268a505a7c2d0acda4925a6b148c80557a6afdb --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/training_log_9031321a-bcfb-42e7-82d0-fd09c3c238c4.txt @@ -0,0 +1,3308 @@ +[2025-07-09 08:31:40] [Rank 0] PRINT: --- Script Start: Wed Jul 9 08:31:40 2025 --- +[2025-07-09 08:31:40] [Rank 0] PRINT: --- Script Start: Wed Jul 9 08:31:40 2025 --- +[2025-07-09 08:31:40] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-09 08:31:40] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-09 08:31:40] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-09 08:31:40] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-09 08:31:40] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-09 08:31:40] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-09 08:31:40] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49 +[2025-07-09 08:31:40] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49 +[2025-07-09 08:31:40] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-09 08:31:40] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-09 08:31:41] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-09 08:31:41] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-09 08:31:41] [Rank 0] PRINT: Constructing model... +[2025-07-09 08:31:41] [Rank 0] PRINT: Constructing model... +[2025-07-09 08:31:43] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-09 08:31:43] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-09 08:31:43] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-09 08:31:43] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-09 08:31:43] [Rank 0] PRINT: Testing model forward function: +[2025-07-09 08:31:43] [Rank 0] PRINT: Testing model forward function: +[2025-07-09 08:31:44] [Rank 0] PRINT: Model test - Result type: +[2025-07-09 08:31:44] [Rank 0] PRINT: Model test - Result type: +[2025-07-09 08:31:44] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-09 08:31:44] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-09 08:31:44] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-09 08:31:44] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-09 08:31:44] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-09 08:31:44] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-09 08:31:44] [Rank 0] PRINT: Model returns: +[2025-07-09 08:31:44] [Rank 0] PRINT: Model returns: +[2025-07-09 08:31:44] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-09 08:31:44] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-09 08:31:44] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-09 08:31:44] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-09 08:31:44] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-09 08:31:44] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-09 08:31:44] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-09 08:31:44] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-09 08:31:44] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-09 08:31:44] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-09 08:31:44] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-09 08:31:44] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-09 08:31:44] [Rank 0] PRINT: Model compilation complete. +[2025-07-09 08:31:44] [Rank 0] PRINT: Model compilation complete. +[2025-07-09 08:31:44] [Rank 0] PRINT: Starting warmup... +[2025-07-09 08:31:44] [Rank 0] PRINT: Starting warmup... +[2025-07-09 08:32:55] [Rank 0] PRINT: Warmup complete. +[2025-07-09 08:32:55] [Rank 0] PRINT: Warmup complete. +[2025-07-09 08:32:55] [Rank 0] PRINT: Starting training... +[2025-07-09 08:32:55] [Rank 0] PRINT: Starting training... +[2025-07-09 08:32:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 08:32:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 08:33:03] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-09 08:33:03] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-09 08:33:04] [Rank 0] step:21/10000 train_time:1649ms step_avg:78.54ms +[2025-07-09 08:33:04] [Rank 0] step:21/10000 train_time:1649ms step_avg:78.54ms +[2025-07-09 08:33:06] [Rank 0] step:41/10000 train_time:3099ms step_avg:75.58ms +[2025-07-09 08:33:06] [Rank 0] step:41/10000 train_time:3099ms step_avg:75.58ms +[2025-07-09 08:33:07] [Rank 0] step:61/10000 train_time:4546ms step_avg:74.53ms +[2025-07-09 08:33:07] [Rank 0] step:61/10000 train_time:4546ms step_avg:74.53ms +[2025-07-09 08:33:09] [Rank 0] step:81/10000 train_time:5996ms step_avg:74.03ms +[2025-07-09 08:33:09] [Rank 0] step:81/10000 train_time:5996ms step_avg:74.03ms +[2025-07-09 08:33:10] [Rank 0] step:101/10000 train_time:7774ms step_avg:76.97ms +[2025-07-09 08:33:10] [Rank 0] step:101/10000 train_time:7774ms step_avg:76.97ms +[2025-07-09 08:33:12] [Rank 0] step:121/10000 train_time:9226ms step_avg:76.25ms +[2025-07-09 08:33:12] [Rank 0] step:121/10000 train_time:9226ms step_avg:76.25ms +[2025-07-09 08:33:13] [Rank 0] step:141/10000 train_time:10682ms step_avg:75.76ms +[2025-07-09 08:33:13] [Rank 0] step:141/10000 train_time:10682ms step_avg:75.76ms +[2025-07-09 08:33:15] [Rank 0] step:161/10000 train_time:12137ms step_avg:75.38ms +[2025-07-09 08:33:15] [Rank 0] step:161/10000 train_time:12137ms step_avg:75.38ms +[2025-07-09 08:33:16] [Rank 0] step:181/10000 train_time:13646ms step_avg:75.39ms +[2025-07-09 08:33:16] [Rank 0] step:181/10000 train_time:13646ms step_avg:75.39ms +[2025-07-09 08:33:18] [Rank 0] step:201/10000 train_time:15290ms step_avg:76.07ms +[2025-07-09 08:33:18] [Rank 0] step:201/10000 train_time:15290ms step_avg:76.07ms +[2025-07-09 08:33:19] [Rank 0] step:221/10000 train_time:16751ms step_avg:75.80ms +[2025-07-09 08:33:19] [Rank 0] step:221/10000 train_time:16751ms step_avg:75.80ms +[2025-07-09 08:33:21] [Rank 0] step:241/10000 train_time:18211ms step_avg:75.56ms +[2025-07-09 08:33:21] [Rank 0] step:241/10000 train_time:18211ms step_avg:75.56ms +[2025-07-09 08:33:22] [Rank 0] step:261/10000 train_time:19674ms step_avg:75.38ms +[2025-07-09 08:33:22] [Rank 0] step:261/10000 train_time:19674ms step_avg:75.38ms +[2025-07-09 08:33:24] [Rank 0] step:281/10000 train_time:21373ms step_avg:76.06ms +[2025-07-09 08:33:24] [Rank 0] step:281/10000 train_time:21373ms step_avg:76.06ms +[2025-07-09 08:33:25] [Rank 0] step:301/10000 train_time:22833ms step_avg:75.86ms +[2025-07-09 08:33:25] [Rank 0] step:301/10000 train_time:22833ms step_avg:75.86ms +[2025-07-09 08:33:27] [Rank 0] step:321/10000 train_time:24296ms step_avg:75.69ms +[2025-07-09 08:33:27] [Rank 0] step:321/10000 train_time:24296ms step_avg:75.69ms +[2025-07-09 08:33:28] [Rank 0] step:341/10000 train_time:25757ms step_avg:75.53ms +[2025-07-09 08:33:28] [Rank 0] step:341/10000 train_time:25757ms step_avg:75.53ms +[2025-07-09 08:33:31] [Rank 0] step:361/10000 train_time:27218ms step_avg:75.40ms +[2025-07-09 08:33:31] [Rank 0] step:361/10000 train_time:27218ms step_avg:75.40ms +[2025-07-09 08:33:32] [Rank 0] step:381/10000 train_time:29339ms step_avg:77.01ms +[2025-07-09 08:33:32] [Rank 0] step:381/10000 train_time:29339ms step_avg:77.01ms +[2025-07-09 08:33:33] [Rank 0] step:401/10000 train_time:30802ms step_avg:76.81ms +[2025-07-09 08:33:33] [Rank 0] step:401/10000 train_time:30802ms step_avg:76.81ms +[2025-07-09 08:33:35] [Rank 0] step:421/10000 train_time:32264ms step_avg:76.64ms +[2025-07-09 08:33:35] [Rank 0] step:421/10000 train_time:32264ms step_avg:76.64ms +[2025-07-09 08:33:36] [Rank 0] step:441/10000 train_time:33727ms step_avg:76.48ms +[2025-07-09 08:33:36] [Rank 0] step:441/10000 train_time:33727ms step_avg:76.48ms +[2025-07-09 08:33:39] [Rank 0] step:461/10000 train_time:35839ms step_avg:77.74ms +[2025-07-09 08:33:39] [Rank 0] step:461/10000 train_time:35839ms step_avg:77.74ms +[2025-07-09 08:33:40] [Rank 0] step:481/10000 train_time:37303ms step_avg:77.55ms +[2025-07-09 08:33:40] [Rank 0] step:481/10000 train_time:37303ms step_avg:77.55ms +[2025-07-09 08:33:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 08:33:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 08:33:42] [Rank 0] PRINT: step:500/10000 train_loss:4.9443 val_loss:2.0530 train_time:38764ms step_avg:77.53ms +[2025-07-09 08:33:42] [Rank 0] PRINT: step:500/10000 train_loss:4.9443 val_loss:2.0530 train_time:38764ms step_avg:77.53ms +[2025-07-09 08:33:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 08:33:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 08:33:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 08:33:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 08:33:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 08:33:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 08:38:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 08:38:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 08:38:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 08:38:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 08:38:57] [Rank 0] Total Loss: 4.0420 +[2025-07-09 08:38:57] [Rank 0] Total Loss: 4.0420 +[2025-07-09 08:38:57] [Rank 0] Total FTA: 0.0836 +[2025-07-09 08:38:57] [Rank 0] Total FTA: 0.0836 +[2025-07-09 08:38:57] [Rank 0] Group 0 Loss: 4.2635 +[2025-07-09 08:38:57] [Rank 0] Group 0 Loss: 4.2635 +[2025-07-09 08:38:57] [Rank 0] Group 1 Loss: 3.9855 +[2025-07-09 08:38:57] [Rank 0] Group 1 Loss: 3.9855 +[2025-07-09 08:38:57] [Rank 0] Group 2 Loss: 3.9795 +[2025-07-09 08:38:57] [Rank 0] Group 2 Loss: 3.9795 +[2025-07-09 08:38:57] [Rank 0] Group 3 Loss: 4.0191 +[2025-07-09 08:38:57] [Rank 0] Group 3 Loss: 4.0191 +[2025-07-09 08:38:57] [Rank 0] Group 4 Loss: 4.0373 +[2025-07-09 08:38:57] [Rank 0] Group 4 Loss: 4.0373 +[2025-07-09 08:38:57] [Rank 0] Group 5 Loss: 3.9747 +[2025-07-09 08:38:57] [Rank 0] Group 5 Loss: 3.9747 +[2025-07-09 08:38:57] [Rank 0] Group 6 Loss: 3.9630 +[2025-07-09 08:38:57] [Rank 0] Group 6 Loss: 3.9630 +[2025-07-09 08:38:57] [Rank 0] Group 7 Loss: 4.0272 +[2025-07-09 08:38:57] [Rank 0] Group 7 Loss: 4.0272 +[2025-07-09 08:38:57] [Rank 0] Group 8 Loss: 4.0246 +[2025-07-09 08:38:57] [Rank 0] Group 8 Loss: 4.0246 +[2025-07-09 08:38:57] [Rank 0] Group 9 Loss: 3.9958 +[2025-07-09 08:38:57] [Rank 0] Group 9 Loss: 3.9958 +[2025-07-09 08:38:57] [Rank 0] Group 10 Loss: 4.0078 +[2025-07-09 08:38:57] [Rank 0] Group 10 Loss: 4.0078 +[2025-07-09 08:38:57] [Rank 0] Group 11 Loss: 4.0260 +[2025-07-09 08:38:57] [Rank 0] Group 11 Loss: 4.0260 +[2025-07-09 08:38:57] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-09 08:38:57] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-09 08:38:57] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 08:38:57] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 08:38:57] [Rank 0] Group 2 FTA: 0.0521 +[2025-07-09 08:38:57] [Rank 0] Group 2 FTA: 0.0521 +[2025-07-09 08:38:57] [Rank 0] Group 3 FTA: 0.0703 +[2025-07-09 08:38:57] [Rank 0] Group 3 FTA: 0.0703 +[2025-07-09 08:38:57] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-09 08:38:57] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-09 08:38:57] [Rank 0] Group 5 FTA: 0.0677 +[2025-07-09 08:38:57] [Rank 0] Group 5 FTA: 0.0677 +[2025-07-09 08:38:57] [Rank 0] Group 6 FTA: 0.0938 +[2025-07-09 08:38:57] [Rank 0] Group 6 FTA: 0.0938 +[2025-07-09 08:38:57] [Rank 0] Group 7 FTA: 0.1172 +[2025-07-09 08:38:57] [Rank 0] Group 7 FTA: 0.1172 +[2025-07-09 08:38:57] [Rank 0] Group 8 FTA: 0.1016 +[2025-07-09 08:38:57] [Rank 0] Group 8 FTA: 0.1016 +[2025-07-09 08:38:57] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-09 08:38:57] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-09 08:38:57] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-09 08:38:57] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-09 08:38:57] [Rank 0] Group 11 FTA: 0.0713 +[2025-07-09 08:38:57] [Rank 0] Group 11 FTA: 0.0713 +[2025-07-09 08:38:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_loss_curves.png +[2025-07-09 08:38:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_loss_curves.png +[2025-07-09 08:38:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_acc_curves.png +[2025-07-09 08:38:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_acc_curves.png +[2025-07-09 08:38:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_loss_curve.png +[2025-07-09 08:38:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_loss_curve.png +[2025-07-09 08:38:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_acc_curve.png +[2025-07-09 08:38:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_acc_curve.png +[2025-07-09 08:38:59] [Rank 0] step:501/10000 train_time:38787ms step_avg:77.42ms +[2025-07-09 08:38:59] [Rank 0] step:501/10000 train_time:38787ms step_avg:77.42ms +[2025-07-09 08:39:00] [Rank 0] step:521/10000 train_time:40239ms step_avg:77.23ms +[2025-07-09 08:39:00] [Rank 0] step:521/10000 train_time:40239ms step_avg:77.23ms +[2025-07-09 08:39:02] [Rank 0] step:541/10000 train_time:41749ms step_avg:77.17ms +[2025-07-09 08:39:02] [Rank 0] step:541/10000 train_time:41749ms step_avg:77.17ms +[2025-07-09 08:39:04] [Rank 0] step:561/10000 train_time:43815ms step_avg:78.10ms +[2025-07-09 08:39:04] [Rank 0] step:561/10000 train_time:43815ms step_avg:78.10ms +[2025-07-09 08:39:05] [Rank 0] step:581/10000 train_time:45267ms step_avg:77.91ms +[2025-07-09 08:39:05] [Rank 0] step:581/10000 train_time:45267ms step_avg:77.91ms +[2025-07-09 08:39:07] [Rank 0] step:601/10000 train_time:46719ms step_avg:77.74ms +[2025-07-09 08:39:07] [Rank 0] step:601/10000 train_time:46719ms step_avg:77.74ms +[2025-07-09 08:39:08] [Rank 0] step:621/10000 train_time:48174ms step_avg:77.57ms +[2025-07-09 08:39:08] [Rank 0] step:621/10000 train_time:48174ms step_avg:77.57ms +[2025-07-09 08:39:10] [Rank 0] step:641/10000 train_time:49868ms step_avg:77.80ms +[2025-07-09 08:39:10] [Rank 0] step:641/10000 train_time:49868ms step_avg:77.80ms +[2025-07-09 08:39:11] [Rank 0] step:661/10000 train_time:51559ms step_avg:78.00ms +[2025-07-09 08:39:11] [Rank 0] step:661/10000 train_time:51559ms step_avg:78.00ms +[2025-07-09 08:39:13] [Rank 0] step:681/10000 train_time:53016ms step_avg:77.85ms +[2025-07-09 08:39:13] [Rank 0] step:681/10000 train_time:53016ms step_avg:77.85ms +[2025-07-09 08:39:14] [Rank 0] step:701/10000 train_time:54548ms step_avg:77.81ms +[2025-07-09 08:39:14] [Rank 0] step:701/10000 train_time:54548ms step_avg:77.81ms +[2025-07-09 08:39:16] [Rank 0] step:721/10000 train_time:56057ms step_avg:77.75ms +[2025-07-09 08:39:16] [Rank 0] step:721/10000 train_time:56057ms step_avg:77.75ms +[2025-07-09 08:39:18] [Rank 0] step:741/10000 train_time:57808ms step_avg:78.01ms +[2025-07-09 08:39:18] [Rank 0] step:741/10000 train_time:57808ms step_avg:78.01ms +[2025-07-09 08:39:19] [Rank 0] step:761/10000 train_time:59275ms step_avg:77.89ms +[2025-07-09 08:39:19] [Rank 0] step:761/10000 train_time:59275ms step_avg:77.89ms +[2025-07-09 08:39:21] [Rank 0] step:781/10000 train_time:60745ms step_avg:77.78ms +[2025-07-09 08:39:21] [Rank 0] step:781/10000 train_time:60745ms step_avg:77.78ms +[2025-07-09 08:39:22] [Rank 0] step:801/10000 train_time:62214ms step_avg:77.67ms +[2025-07-09 08:39:22] [Rank 0] step:801/10000 train_time:62214ms step_avg:77.67ms +[2025-07-09 08:39:24] [Rank 0] step:821/10000 train_time:63925ms step_avg:77.86ms +[2025-07-09 08:39:24] [Rank 0] step:821/10000 train_time:63925ms step_avg:77.86ms +[2025-07-09 08:39:25] [Rank 0] step:841/10000 train_time:65394ms step_avg:77.76ms +[2025-07-09 08:39:25] [Rank 0] step:841/10000 train_time:65394ms step_avg:77.76ms +[2025-07-09 08:39:27] [Rank 0] step:861/10000 train_time:66864ms step_avg:77.66ms +[2025-07-09 08:39:27] [Rank 0] step:861/10000 train_time:66864ms step_avg:77.66ms +[2025-07-09 08:39:28] [Rank 0] step:881/10000 train_time:68336ms step_avg:77.57ms +[2025-07-09 08:39:28] [Rank 0] step:881/10000 train_time:68336ms step_avg:77.57ms +[2025-07-09 08:39:30] [Rank 0] step:901/10000 train_time:70477ms step_avg:78.22ms +[2025-07-09 08:39:30] [Rank 0] step:901/10000 train_time:70477ms step_avg:78.22ms +[2025-07-09 08:39:32] [Rank 0] step:921/10000 train_time:71928ms step_avg:78.10ms +[2025-07-09 08:39:32] [Rank 0] step:921/10000 train_time:71928ms step_avg:78.10ms +[2025-07-09 08:39:33] [Rank 0] step:941/10000 train_time:73398ms step_avg:78.00ms +[2025-07-09 08:39:33] [Rank 0] step:941/10000 train_time:73398ms step_avg:78.00ms +[2025-07-09 08:39:35] [Rank 0] step:961/10000 train_time:74871ms step_avg:77.91ms +[2025-07-09 08:39:35] [Rank 0] step:961/10000 train_time:74871ms step_avg:77.91ms +[2025-07-09 08:39:36] [Rank 0] step:981/10000 train_time:76340ms step_avg:77.82ms +[2025-07-09 08:39:36] [Rank 0] step:981/10000 train_time:76340ms step_avg:77.82ms +[2025-07-09 08:39:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 08:39:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 08:39:39] [Rank 0] PRINT: step:1000/10000 train_loss:1.7027 val_loss:1.5419 train_time:78453ms step_avg:78.45ms +[2025-07-09 08:39:39] [Rank 0] PRINT: step:1000/10000 train_loss:1.7027 val_loss:1.5419 train_time:78453ms step_avg:78.45ms +[2025-07-09 08:39:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 08:39:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 08:39:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 08:39:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 08:39:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 08:39:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 08:44:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 08:44:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 08:44:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 08:44:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 08:44:55] [Rank 0] Total Loss: 4.2187 +[2025-07-09 08:44:55] [Rank 0] Total Loss: 4.2187 +[2025-07-09 08:44:55] [Rank 0] Total FTA: 0.1163 +[2025-07-09 08:44:55] [Rank 0] Total FTA: 0.1163 +[2025-07-09 08:44:55] [Rank 0] Group 0 Loss: 4.4784 +[2025-07-09 08:44:55] [Rank 0] Group 0 Loss: 4.4784 +[2025-07-09 08:44:55] [Rank 0] Group 1 Loss: 4.0870 +[2025-07-09 08:44:55] [Rank 0] Group 1 Loss: 4.0870 +[2025-07-09 08:44:55] [Rank 0] Group 2 Loss: 3.9479 +[2025-07-09 08:44:55] [Rank 0] Group 2 Loss: 3.9479 +[2025-07-09 08:44:55] [Rank 0] Group 3 Loss: 4.1613 +[2025-07-09 08:44:55] [Rank 0] Group 3 Loss: 4.1613 +[2025-07-09 08:44:55] [Rank 0] Group 4 Loss: 4.1987 +[2025-07-09 08:44:55] [Rank 0] Group 4 Loss: 4.1987 +[2025-07-09 08:44:55] [Rank 0] Group 5 Loss: 4.1078 +[2025-07-09 08:44:55] [Rank 0] Group 5 Loss: 4.1078 +[2025-07-09 08:44:55] [Rank 0] Group 6 Loss: 4.1356 +[2025-07-09 08:44:55] [Rank 0] Group 6 Loss: 4.1356 +[2025-07-09 08:44:55] [Rank 0] Group 7 Loss: 4.2486 +[2025-07-09 08:44:55] [Rank 0] Group 7 Loss: 4.2486 +[2025-07-09 08:44:55] [Rank 0] Group 8 Loss: 4.2687 +[2025-07-09 08:44:55] [Rank 0] Group 8 Loss: 4.2687 +[2025-07-09 08:44:55] [Rank 0] Group 9 Loss: 4.1757 +[2025-07-09 08:44:55] [Rank 0] Group 9 Loss: 4.1757 +[2025-07-09 08:44:55] [Rank 0] Group 10 Loss: 4.2598 +[2025-07-09 08:44:55] [Rank 0] Group 10 Loss: 4.2598 +[2025-07-09 08:44:55] [Rank 0] Group 11 Loss: 4.2368 +[2025-07-09 08:44:55] [Rank 0] Group 11 Loss: 4.2368 +[2025-07-09 08:44:55] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-09 08:44:55] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-09 08:44:55] [Rank 0] Group 1 FTA: 0.1875 +[2025-07-09 08:44:55] [Rank 0] Group 1 FTA: 0.1875 +[2025-07-09 08:44:55] [Rank 0] Group 2 FTA: 0.1458 +[2025-07-09 08:44:55] [Rank 0] Group 2 FTA: 0.1458 +[2025-07-09 08:44:55] [Rank 0] Group 3 FTA: 0.0781 +[2025-07-09 08:44:55] [Rank 0] Group 3 FTA: 0.0781 +[2025-07-09 08:44:55] [Rank 0] Group 4 FTA: 0.1094 +[2025-07-09 08:44:55] [Rank 0] Group 4 FTA: 0.1094 +[2025-07-09 08:44:55] [Rank 0] Group 5 FTA: 0.0625 +[2025-07-09 08:44:55] [Rank 0] Group 5 FTA: 0.0625 +[2025-07-09 08:44:55] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-09 08:44:55] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-09 08:44:55] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-09 08:44:55] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-09 08:44:55] [Rank 0] Group 8 FTA: 0.1198 +[2025-07-09 08:44:55] [Rank 0] Group 8 FTA: 0.1198 +[2025-07-09 08:44:55] [Rank 0] Group 9 FTA: 0.0664 +[2025-07-09 08:44:55] [Rank 0] Group 9 FTA: 0.0664 +[2025-07-09 08:44:55] [Rank 0] Group 10 FTA: 0.1211 +[2025-07-09 08:44:55] [Rank 0] Group 10 FTA: 0.1211 +[2025-07-09 08:44:55] [Rank 0] Group 11 FTA: 0.0967 +[2025-07-09 08:44:55] [Rank 0] Group 11 FTA: 0.0967 +[2025-07-09 08:44:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_loss_curves.png +[2025-07-09 08:44:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_loss_curves.png +[2025-07-09 08:44:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_acc_curves.png +[2025-07-09 08:44:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_acc_curves.png +[2025-07-09 08:44:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_loss_curve.png +[2025-07-09 08:44:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_loss_curve.png +[2025-07-09 08:44:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_acc_curve.png +[2025-07-09 08:44:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_acc_curve.png +[2025-07-09 08:44:56] [Rank 0] step:1001/10000 train_time:78477ms step_avg:78.40ms +[2025-07-09 08:44:56] [Rank 0] step:1001/10000 train_time:78477ms step_avg:78.40ms +[2025-07-09 08:44:57] [Rank 0] step:1021/10000 train_time:79945ms step_avg:78.30ms +[2025-07-09 08:44:57] [Rank 0] step:1021/10000 train_time:79945ms step_avg:78.30ms +[2025-07-09 08:44:59] [Rank 0] step:1041/10000 train_time:81407ms step_avg:78.20ms +[2025-07-09 08:44:59] [Rank 0] step:1041/10000 train_time:81407ms step_avg:78.20ms +[2025-07-09 08:45:00] [Rank 0] step:1061/10000 train_time:82868ms step_avg:78.10ms +[2025-07-09 08:45:00] [Rank 0] step:1061/10000 train_time:82868ms step_avg:78.10ms +[2025-07-09 08:45:03] [Rank 0] step:1081/10000 train_time:84997ms step_avg:78.63ms +[2025-07-09 08:45:03] [Rank 0] step:1081/10000 train_time:84997ms step_avg:78.63ms +[2025-07-09 08:45:04] [Rank 0] step:1101/10000 train_time:86438ms step_avg:78.51ms +[2025-07-09 08:45:04] [Rank 0] step:1101/10000 train_time:86438ms step_avg:78.51ms +[2025-07-09 08:45:05] [Rank 0] step:1121/10000 train_time:87901ms step_avg:78.41ms +[2025-07-09 08:45:05] [Rank 0] step:1121/10000 train_time:87901ms step_avg:78.41ms +[2025-07-09 08:45:07] [Rank 0] step:1141/10000 train_time:89361ms step_avg:78.32ms +[2025-07-09 08:45:07] [Rank 0] step:1141/10000 train_time:89361ms step_avg:78.32ms +[2025-07-09 08:45:08] [Rank 0] step:1161/10000 train_time:90824ms step_avg:78.23ms +[2025-07-09 08:45:08] [Rank 0] step:1161/10000 train_time:90824ms step_avg:78.23ms +[2025-07-09 08:45:10] [Rank 0] step:1181/10000 train_time:92941ms step_avg:78.70ms +[2025-07-09 08:45:10] [Rank 0] step:1181/10000 train_time:92941ms step_avg:78.70ms +[2025-07-09 08:45:12] [Rank 0] step:1201/10000 train_time:94404ms step_avg:78.60ms +[2025-07-09 08:45:12] [Rank 0] step:1201/10000 train_time:94404ms step_avg:78.60ms +[2025-07-09 08:45:13] [Rank 0] step:1221/10000 train_time:95866ms step_avg:78.51ms +[2025-07-09 08:45:13] [Rank 0] step:1221/10000 train_time:95866ms step_avg:78.51ms +[2025-07-09 08:45:15] [Rank 0] step:1241/10000 train_time:97331ms step_avg:78.43ms +[2025-07-09 08:45:15] [Rank 0] step:1241/10000 train_time:97331ms step_avg:78.43ms +[2025-07-09 08:45:17] [Rank 0] step:1261/10000 train_time:99051ms step_avg:78.55ms +[2025-07-09 08:45:17] [Rank 0] step:1261/10000 train_time:99051ms step_avg:78.55ms +[2025-07-09 08:45:18] [Rank 0] step:1281/10000 train_time:100905ms step_avg:78.77ms +[2025-07-09 08:45:18] [Rank 0] step:1281/10000 train_time:100905ms step_avg:78.77ms +[2025-07-09 08:45:20] [Rank 0] step:1301/10000 train_time:102370ms step_avg:78.69ms +[2025-07-09 08:45:20] [Rank 0] step:1301/10000 train_time:102370ms step_avg:78.69ms +[2025-07-09 08:45:21] [Rank 0] step:1321/10000 train_time:103837ms step_avg:78.60ms +[2025-07-09 08:45:21] [Rank 0] step:1321/10000 train_time:103837ms step_avg:78.60ms +[2025-07-09 08:45:23] [Rank 0] step:1341/10000 train_time:105304ms step_avg:78.53ms +[2025-07-09 08:45:23] [Rank 0] step:1341/10000 train_time:105304ms step_avg:78.53ms +[2025-07-09 08:45:25] [Rank 0] step:1361/10000 train_time:107432ms step_avg:78.94ms +[2025-07-09 08:45:25] [Rank 0] step:1361/10000 train_time:107432ms step_avg:78.94ms +[2025-07-09 08:45:26] [Rank 0] step:1381/10000 train_time:108898ms step_avg:78.85ms +[2025-07-09 08:45:26] [Rank 0] step:1381/10000 train_time:108898ms step_avg:78.85ms +[2025-07-09 08:45:28] [Rank 0] step:1401/10000 train_time:110364ms step_avg:78.78ms +[2025-07-09 08:45:28] [Rank 0] step:1401/10000 train_time:110364ms step_avg:78.78ms +[2025-07-09 08:45:29] [Rank 0] step:1421/10000 train_time:111833ms step_avg:78.70ms +[2025-07-09 08:45:29] [Rank 0] step:1421/10000 train_time:111833ms step_avg:78.70ms +[2025-07-09 08:45:31] [Rank 0] step:1441/10000 train_time:113602ms step_avg:78.84ms +[2025-07-09 08:45:31] [Rank 0] step:1441/10000 train_time:113602ms step_avg:78.84ms +[2025-07-09 08:45:33] [Rank 0] step:1461/10000 train_time:115144ms step_avg:78.81ms +[2025-07-09 08:45:33] [Rank 0] step:1461/10000 train_time:115144ms step_avg:78.81ms +[2025-07-09 08:45:34] [Rank 0] step:1481/10000 train_time:116668ms step_avg:78.78ms +[2025-07-09 08:45:34] [Rank 0] step:1481/10000 train_time:116668ms step_avg:78.78ms +[2025-07-09 08:45:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 08:45:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 08:45:37] [Rank 0] PRINT: step:1500/10000 train_loss:1.4321 val_loss:1.3387 train_time:118135ms step_avg:78.76ms +[2025-07-09 08:45:37] [Rank 0] PRINT: step:1500/10000 train_loss:1.4321 val_loss:1.3387 train_time:118135ms step_avg:78.76ms +[2025-07-09 08:45:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 08:45:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 08:45:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 08:45:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 08:45:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 08:45:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 08:50:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 08:50:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 08:50:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 08:50:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 08:50:52] [Rank 0] Total Loss: 4.4019 +[2025-07-09 08:50:52] [Rank 0] Total Loss: 4.4019 +[2025-07-09 08:50:52] [Rank 0] Total FTA: 0.2086 +[2025-07-09 08:50:52] [Rank 0] Total FTA: 0.2086 +[2025-07-09 08:50:52] [Rank 0] Group 0 Loss: 4.5256 +[2025-07-09 08:50:52] [Rank 0] Group 0 Loss: 4.5256 +[2025-07-09 08:50:52] [Rank 0] Group 1 Loss: 4.4053 +[2025-07-09 08:50:52] [Rank 0] Group 1 Loss: 4.4053 +[2025-07-09 08:50:52] [Rank 0] Group 2 Loss: 4.0798 +[2025-07-09 08:50:52] [Rank 0] Group 2 Loss: 4.0798 +[2025-07-09 08:50:52] [Rank 0] Group 3 Loss: 4.4730 +[2025-07-09 08:50:52] [Rank 0] Group 3 Loss: 4.4730 +[2025-07-09 08:50:52] [Rank 0] Group 4 Loss: 4.4022 +[2025-07-09 08:50:52] [Rank 0] Group 4 Loss: 4.4022 +[2025-07-09 08:50:52] [Rank 0] Group 5 Loss: 4.3035 +[2025-07-09 08:50:52] [Rank 0] Group 5 Loss: 4.3035 +[2025-07-09 08:50:52] [Rank 0] Group 6 Loss: 4.3597 +[2025-07-09 08:50:52] [Rank 0] Group 6 Loss: 4.3597 +[2025-07-09 08:50:52] [Rank 0] Group 7 Loss: 4.4481 +[2025-07-09 08:50:52] [Rank 0] Group 7 Loss: 4.4481 +[2025-07-09 08:50:52] [Rank 0] Group 8 Loss: 4.4061 +[2025-07-09 08:50:52] [Rank 0] Group 8 Loss: 4.4061 +[2025-07-09 08:50:52] [Rank 0] Group 9 Loss: 4.3251 +[2025-07-09 08:50:52] [Rank 0] Group 9 Loss: 4.3251 +[2025-07-09 08:50:52] [Rank 0] Group 10 Loss: 4.4199 +[2025-07-09 08:50:52] [Rank 0] Group 10 Loss: 4.4199 +[2025-07-09 08:50:52] [Rank 0] Group 11 Loss: 4.4456 +[2025-07-09 08:50:52] [Rank 0] Group 11 Loss: 4.4456 +[2025-07-09 08:50:52] [Rank 0] Group 0 FTA: 0.1769 +[2025-07-09 08:50:52] [Rank 0] Group 0 FTA: 0.1769 +[2025-07-09 08:50:52] [Rank 0] Group 1 FTA: 0.1250 +[2025-07-09 08:50:52] [Rank 0] Group 1 FTA: 0.1250 +[2025-07-09 08:50:52] [Rank 0] Group 2 FTA: 0.3438 +[2025-07-09 08:50:52] [Rank 0] Group 2 FTA: 0.3438 +[2025-07-09 08:50:52] [Rank 0] Group 3 FTA: 0.1693 +[2025-07-09 08:50:52] [Rank 0] Group 3 FTA: 0.1693 +[2025-07-09 08:50:52] [Rank 0] Group 4 FTA: 0.2161 +[2025-07-09 08:50:52] [Rank 0] Group 4 FTA: 0.2161 +[2025-07-09 08:50:52] [Rank 0] Group 5 FTA: 0.2083 +[2025-07-09 08:50:52] [Rank 0] Group 5 FTA: 0.2083 +[2025-07-09 08:50:52] [Rank 0] Group 6 FTA: 0.1979 +[2025-07-09 08:50:52] [Rank 0] Group 6 FTA: 0.1979 +[2025-07-09 08:50:52] [Rank 0] Group 7 FTA: 0.2656 +[2025-07-09 08:50:52] [Rank 0] Group 7 FTA: 0.2656 +[2025-07-09 08:50:52] [Rank 0] Group 8 FTA: 0.2266 +[2025-07-09 08:50:52] [Rank 0] Group 8 FTA: 0.2266 +[2025-07-09 08:50:52] [Rank 0] Group 9 FTA: 0.1953 +[2025-07-09 08:50:52] [Rank 0] Group 9 FTA: 0.1953 +[2025-07-09 08:50:52] [Rank 0] Group 10 FTA: 0.2207 +[2025-07-09 08:50:52] [Rank 0] Group 10 FTA: 0.2207 +[2025-07-09 08:50:52] [Rank 0] Group 11 FTA: 0.1982 +[2025-07-09 08:50:52] [Rank 0] Group 11 FTA: 0.1982 +[2025-07-09 08:50:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_loss_curves.png +[2025-07-09 08:50:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_loss_curves.png +[2025-07-09 08:50:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_acc_curves.png +[2025-07-09 08:50:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_acc_curves.png +[2025-07-09 08:50:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_loss_curve.png +[2025-07-09 08:50:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_loss_curve.png +[2025-07-09 08:50:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_acc_curve.png +[2025-07-09 08:50:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_acc_curve.png +[2025-07-09 08:50:53] [Rank 0] step:1501/10000 train_time:118158ms step_avg:78.72ms +[2025-07-09 08:50:53] [Rank 0] step:1501/10000 train_time:118158ms step_avg:78.72ms +[2025-07-09 08:50:55] [Rank 0] step:1521/10000 train_time:119616ms step_avg:78.64ms +[2025-07-09 08:50:55] [Rank 0] step:1521/10000 train_time:119616ms step_avg:78.64ms +[2025-07-09 08:50:57] [Rank 0] step:1541/10000 train_time:121741ms step_avg:79.00ms +[2025-07-09 08:50:57] [Rank 0] step:1541/10000 train_time:121741ms step_avg:79.00ms +[2025-07-09 08:50:58] [Rank 0] step:1561/10000 train_time:123200ms step_avg:78.92ms +[2025-07-09 08:50:58] [Rank 0] step:1561/10000 train_time:123200ms step_avg:78.92ms +[2025-07-09 08:51:00] [Rank 0] step:1581/10000 train_time:124661ms step_avg:78.85ms +[2025-07-09 08:51:00] [Rank 0] step:1581/10000 train_time:124661ms step_avg:78.85ms +[2025-07-09 08:51:01] [Rank 0] step:1601/10000 train_time:126124ms step_avg:78.78ms +[2025-07-09 08:51:01] [Rank 0] step:1601/10000 train_time:126124ms step_avg:78.78ms +[2025-07-09 08:51:03] [Rank 0] step:1621/10000 train_time:127586ms step_avg:78.71ms +[2025-07-09 08:51:03] [Rank 0] step:1621/10000 train_time:127586ms step_avg:78.71ms +[2025-07-09 08:51:05] [Rank 0] step:1641/10000 train_time:129706ms step_avg:79.04ms +[2025-07-09 08:51:05] [Rank 0] step:1641/10000 train_time:129706ms step_avg:79.04ms +[2025-07-09 08:51:06] [Rank 0] step:1661/10000 train_time:131168ms step_avg:78.97ms +[2025-07-09 08:51:06] [Rank 0] step:1661/10000 train_time:131168ms step_avg:78.97ms +[2025-07-09 08:51:08] [Rank 0] step:1681/10000 train_time:132629ms step_avg:78.90ms +[2025-07-09 08:51:08] [Rank 0] step:1681/10000 train_time:132629ms step_avg:78.90ms +[2025-07-09 08:51:09] [Rank 0] step:1701/10000 train_time:134091ms step_avg:78.83ms +[2025-07-09 08:51:09] [Rank 0] step:1701/10000 train_time:134091ms step_avg:78.83ms +[2025-07-09 08:51:11] [Rank 0] step:1721/10000 train_time:135794ms step_avg:78.90ms +[2025-07-09 08:51:11] [Rank 0] step:1721/10000 train_time:135794ms step_avg:78.90ms +[2025-07-09 08:51:12] [Rank 0] step:1741/10000 train_time:137262ms step_avg:78.84ms +[2025-07-09 08:51:12] [Rank 0] step:1741/10000 train_time:137262ms step_avg:78.84ms +[2025-07-09 08:51:14] [Rank 0] step:1761/10000 train_time:138724ms step_avg:78.78ms +[2025-07-09 08:51:14] [Rank 0] step:1761/10000 train_time:138724ms step_avg:78.78ms +[2025-07-09 08:51:15] [Rank 0] step:1781/10000 train_time:140188ms step_avg:78.71ms +[2025-07-09 08:51:15] [Rank 0] step:1781/10000 train_time:140188ms step_avg:78.71ms +[2025-07-09 08:51:17] [Rank 0] step:1801/10000 train_time:142314ms step_avg:79.02ms +[2025-07-09 08:51:17] [Rank 0] step:1801/10000 train_time:142314ms step_avg:79.02ms +[2025-07-09 08:51:19] [Rank 0] step:1821/10000 train_time:143757ms step_avg:78.94ms +[2025-07-09 08:51:19] [Rank 0] step:1821/10000 train_time:143757ms step_avg:78.94ms +[2025-07-09 08:51:20] [Rank 0] step:1841/10000 train_time:145220ms step_avg:78.88ms +[2025-07-09 08:51:20] [Rank 0] step:1841/10000 train_time:145220ms step_avg:78.88ms +[2025-07-09 08:51:22] [Rank 0] step:1861/10000 train_time:146687ms step_avg:78.82ms +[2025-07-09 08:51:22] [Rank 0] step:1861/10000 train_time:146687ms step_avg:78.82ms +[2025-07-09 08:51:23] [Rank 0] step:1881/10000 train_time:148154ms step_avg:78.76ms +[2025-07-09 08:51:23] [Rank 0] step:1881/10000 train_time:148154ms step_avg:78.76ms +[2025-07-09 08:51:25] [Rank 0] step:1901/10000 train_time:149856ms step_avg:78.83ms +[2025-07-09 08:51:25] [Rank 0] step:1901/10000 train_time:149856ms step_avg:78.83ms +[2025-07-09 08:51:26] [Rank 0] step:1921/10000 train_time:151324ms step_avg:78.77ms +[2025-07-09 08:51:26] [Rank 0] step:1921/10000 train_time:151324ms step_avg:78.77ms +[2025-07-09 08:51:28] [Rank 0] step:1941/10000 train_time:152790ms step_avg:78.72ms +[2025-07-09 08:51:28] [Rank 0] step:1941/10000 train_time:152790ms step_avg:78.72ms +[2025-07-09 08:51:29] [Rank 0] step:1961/10000 train_time:154258ms step_avg:78.66ms +[2025-07-09 08:51:29] [Rank 0] step:1961/10000 train_time:154258ms step_avg:78.66ms +[2025-07-09 08:51:31] [Rank 0] step:1981/10000 train_time:156393ms step_avg:78.95ms +[2025-07-09 08:51:31] [Rank 0] step:1981/10000 train_time:156393ms step_avg:78.95ms +[2025-07-09 08:51:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 08:51:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 08:51:34] [Rank 0] PRINT: step:2000/10000 train_loss:1.2442 val_loss:1.2167 train_time:157840ms step_avg:78.92ms +[2025-07-09 08:51:34] [Rank 0] PRINT: step:2000/10000 train_loss:1.2442 val_loss:1.2167 train_time:157840ms step_avg:78.92ms +[2025-07-09 08:51:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 08:51:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 08:51:34] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 08:51:34] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 08:51:34] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 08:51:34] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 08:56:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 08:56:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 08:56:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 08:56:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 08:56:50] [Rank 0] Total Loss: 4.7053 +[2025-07-09 08:56:50] [Rank 0] Total Loss: 4.7053 +[2025-07-09 08:56:50] [Rank 0] Total FTA: 0.3352 +[2025-07-09 08:56:50] [Rank 0] Total FTA: 0.3352 +[2025-07-09 08:56:50] [Rank 0] Group 0 Loss: 4.9127 +[2025-07-09 08:56:50] [Rank 0] Group 0 Loss: 4.9127 +[2025-07-09 08:56:50] [Rank 0] Group 1 Loss: 4.4313 +[2025-07-09 08:56:50] [Rank 0] Group 1 Loss: 4.4313 +[2025-07-09 08:56:50] [Rank 0] Group 2 Loss: 4.6037 +[2025-07-09 08:56:50] [Rank 0] Group 2 Loss: 4.6037 +[2025-07-09 08:56:50] [Rank 0] Group 3 Loss: 4.7951 +[2025-07-09 08:56:50] [Rank 0] Group 3 Loss: 4.7951 +[2025-07-09 08:56:50] [Rank 0] Group 4 Loss: 4.6239 +[2025-07-09 08:56:50] [Rank 0] Group 4 Loss: 4.6239 +[2025-07-09 08:56:50] [Rank 0] Group 5 Loss: 4.5795 +[2025-07-09 08:56:50] [Rank 0] Group 5 Loss: 4.5795 +[2025-07-09 08:56:50] [Rank 0] Group 6 Loss: 4.6847 +[2025-07-09 08:56:50] [Rank 0] Group 6 Loss: 4.6847 +[2025-07-09 08:56:50] [Rank 0] Group 7 Loss: 4.7413 +[2025-07-09 08:56:50] [Rank 0] Group 7 Loss: 4.7413 +[2025-07-09 08:56:50] [Rank 0] Group 8 Loss: 4.7216 +[2025-07-09 08:56:50] [Rank 0] Group 8 Loss: 4.7216 +[2025-07-09 08:56:50] [Rank 0] Group 9 Loss: 4.6883 +[2025-07-09 08:56:50] [Rank 0] Group 9 Loss: 4.6883 +[2025-07-09 08:56:50] [Rank 0] Group 10 Loss: 4.6949 +[2025-07-09 08:56:50] [Rank 0] Group 10 Loss: 4.6949 +[2025-07-09 08:56:50] [Rank 0] Group 11 Loss: 4.7319 +[2025-07-09 08:56:50] [Rank 0] Group 11 Loss: 4.7319 +[2025-07-09 08:56:50] [Rank 0] Group 0 FTA: 0.5098 +[2025-07-09 08:56:50] [Rank 0] Group 0 FTA: 0.5098 +[2025-07-09 08:56:50] [Rank 0] Group 1 FTA: 0.5417 +[2025-07-09 08:56:50] [Rank 0] Group 1 FTA: 0.5417 +[2025-07-09 08:56:50] [Rank 0] Group 2 FTA: 0.2188 +[2025-07-09 08:56:50] [Rank 0] Group 2 FTA: 0.2188 +[2025-07-09 08:56:50] [Rank 0] Group 3 FTA: 0.1901 +[2025-07-09 08:56:50] [Rank 0] Group 3 FTA: 0.1901 +[2025-07-09 08:56:50] [Rank 0] Group 4 FTA: 0.2552 +[2025-07-09 08:56:50] [Rank 0] Group 4 FTA: 0.2552 +[2025-07-09 08:56:50] [Rank 0] Group 5 FTA: 0.3151 +[2025-07-09 08:56:50] [Rank 0] Group 5 FTA: 0.3151 +[2025-07-09 08:56:50] [Rank 0] Group 6 FTA: 0.2812 +[2025-07-09 08:56:50] [Rank 0] Group 6 FTA: 0.2812 +[2025-07-09 08:56:50] [Rank 0] Group 7 FTA: 0.3177 +[2025-07-09 08:56:50] [Rank 0] Group 7 FTA: 0.3177 +[2025-07-09 08:56:50] [Rank 0] Group 8 FTA: 0.3333 +[2025-07-09 08:56:50] [Rank 0] Group 8 FTA: 0.3333 +[2025-07-09 08:56:50] [Rank 0] Group 9 FTA: 0.3359 +[2025-07-09 08:56:50] [Rank 0] Group 9 FTA: 0.3359 +[2025-07-09 08:56:50] [Rank 0] Group 10 FTA: 0.3125 +[2025-07-09 08:56:50] [Rank 0] Group 10 FTA: 0.3125 +[2025-07-09 08:56:50] [Rank 0] Group 11 FTA: 0.3008 +[2025-07-09 08:56:50] [Rank 0] Group 11 FTA: 0.3008 +[2025-07-09 08:56:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_loss_curves.png +[2025-07-09 08:56:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_loss_curves.png +[2025-07-09 08:56:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_acc_curves.png +[2025-07-09 08:56:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_acc_curves.png +[2025-07-09 08:56:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_loss_curve.png +[2025-07-09 08:56:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_loss_curve.png +[2025-07-09 08:56:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_acc_curve.png +[2025-07-09 08:56:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_acc_curve.png +[2025-07-09 08:56:51] [Rank 0] step:2001/10000 train_time:157865ms step_avg:78.89ms +[2025-07-09 08:56:51] [Rank 0] step:2001/10000 train_time:157865ms step_avg:78.89ms +[2025-07-09 08:56:53] [Rank 0] step:2021/10000 train_time:159345ms step_avg:78.84ms +[2025-07-09 08:56:53] [Rank 0] step:2021/10000 train_time:159345ms step_avg:78.84ms +[2025-07-09 08:56:54] [Rank 0] step:2041/10000 train_time:160802ms step_avg:78.79ms +[2025-07-09 08:56:54] [Rank 0] step:2041/10000 train_time:160802ms step_avg:78.79ms +[2025-07-09 08:56:56] [Rank 0] step:2061/10000 train_time:162260ms step_avg:78.73ms +[2025-07-09 08:56:56] [Rank 0] step:2061/10000 train_time:162260ms step_avg:78.73ms +[2025-07-09 08:56:58] [Rank 0] step:2081/10000 train_time:164363ms step_avg:78.98ms +[2025-07-09 08:56:58] [Rank 0] step:2081/10000 train_time:164363ms step_avg:78.98ms +[2025-07-09 08:56:59] [Rank 0] step:2101/10000 train_time:165822ms step_avg:78.93ms +[2025-07-09 08:56:59] [Rank 0] step:2101/10000 train_time:165822ms step_avg:78.93ms +[2025-07-09 08:57:01] [Rank 0] step:2121/10000 train_time:167282ms step_avg:78.87ms +[2025-07-09 08:57:01] [Rank 0] step:2121/10000 train_time:167282ms step_avg:78.87ms +[2025-07-09 08:57:02] [Rank 0] step:2141/10000 train_time:168744ms step_avg:78.82ms +[2025-07-09 08:57:02] [Rank 0] step:2141/10000 train_time:168744ms step_avg:78.82ms +[2025-07-09 08:57:04] [Rank 0] step:2161/10000 train_time:170468ms step_avg:78.88ms +[2025-07-09 08:57:04] [Rank 0] step:2161/10000 train_time:170468ms step_avg:78.88ms +[2025-07-09 08:57:06] [Rank 0] step:2181/10000 train_time:172042ms step_avg:78.88ms +[2025-07-09 08:57:06] [Rank 0] step:2181/10000 train_time:172042ms step_avg:78.88ms +[2025-07-09 08:57:07] [Rank 0] step:2201/10000 train_time:173646ms step_avg:78.89ms +[2025-07-09 08:57:07] [Rank 0] step:2201/10000 train_time:173646ms step_avg:78.89ms +[2025-07-09 08:57:09] [Rank 0] step:2221/10000 train_time:175214ms step_avg:78.89ms +[2025-07-09 08:57:09] [Rank 0] step:2221/10000 train_time:175214ms step_avg:78.89ms +[2025-07-09 08:57:10] [Rank 0] step:2241/10000 train_time:176698ms step_avg:78.85ms +[2025-07-09 08:57:10] [Rank 0] step:2241/10000 train_time:176698ms step_avg:78.85ms +[2025-07-09 08:57:12] [Rank 0] step:2261/10000 train_time:178842ms step_avg:79.10ms +[2025-07-09 08:57:12] [Rank 0] step:2261/10000 train_time:178842ms step_avg:79.10ms +[2025-07-09 08:57:14] [Rank 0] step:2281/10000 train_time:180330ms step_avg:79.06ms +[2025-07-09 08:57:14] [Rank 0] step:2281/10000 train_time:180330ms step_avg:79.06ms +[2025-07-09 08:57:15] [Rank 0] step:2301/10000 train_time:181822ms step_avg:79.02ms +[2025-07-09 08:57:15] [Rank 0] step:2301/10000 train_time:181822ms step_avg:79.02ms +[2025-07-09 08:57:17] [Rank 0] step:2321/10000 train_time:183312ms step_avg:78.98ms +[2025-07-09 08:57:17] [Rank 0] step:2321/10000 train_time:183312ms step_avg:78.98ms +[2025-07-09 08:57:19] [Rank 0] step:2341/10000 train_time:184856ms step_avg:78.96ms +[2025-07-09 08:57:19] [Rank 0] step:2341/10000 train_time:184856ms step_avg:78.96ms +[2025-07-09 08:57:20] [Rank 0] step:2361/10000 train_time:186531ms step_avg:79.01ms +[2025-07-09 08:57:20] [Rank 0] step:2361/10000 train_time:186531ms step_avg:79.01ms +[2025-07-09 08:57:22] [Rank 0] step:2381/10000 train_time:188022ms step_avg:78.97ms +[2025-07-09 08:57:22] [Rank 0] step:2381/10000 train_time:188022ms step_avg:78.97ms +[2025-07-09 08:57:23] [Rank 0] step:2401/10000 train_time:189513ms step_avg:78.93ms +[2025-07-09 08:57:23] [Rank 0] step:2401/10000 train_time:189513ms step_avg:78.93ms +[2025-07-09 08:57:24] [Rank 0] step:2421/10000 train_time:191006ms step_avg:78.90ms +[2025-07-09 08:57:24] [Rank 0] step:2421/10000 train_time:191006ms step_avg:78.90ms +[2025-07-09 08:57:27] [Rank 0] step:2441/10000 train_time:193148ms step_avg:79.13ms +[2025-07-09 08:57:27] [Rank 0] step:2441/10000 train_time:193148ms step_avg:79.13ms +[2025-07-09 08:57:28] [Rank 0] step:2461/10000 train_time:194639ms step_avg:79.09ms +[2025-07-09 08:57:28] [Rank 0] step:2461/10000 train_time:194639ms step_avg:79.09ms +[2025-07-09 08:57:30] [Rank 0] step:2481/10000 train_time:196130ms step_avg:79.05ms +[2025-07-09 08:57:30] [Rank 0] step:2481/10000 train_time:196130ms step_avg:79.05ms +[2025-07-09 08:57:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 08:57:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 08:57:32] [Rank 0] PRINT: step:2500/10000 train_loss:1.1651 val_loss:1.1221 train_time:197623ms step_avg:79.05ms +[2025-07-09 08:57:32] [Rank 0] PRINT: step:2500/10000 train_loss:1.1651 val_loss:1.1221 train_time:197623ms step_avg:79.05ms +[2025-07-09 08:57:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 08:57:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 08:57:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 08:57:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 08:57:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 08:57:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 09:02:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 09:02:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 09:02:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 09:02:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 09:02:48] [Rank 0] Total Loss: 4.8856 +[2025-07-09 09:02:48] [Rank 0] Total Loss: 4.8856 +[2025-07-09 09:02:48] [Rank 0] Total FTA: 0.4340 +[2025-07-09 09:02:48] [Rank 0] Total FTA: 0.4340 +[2025-07-09 09:02:48] [Rank 0] Group 0 Loss: 5.0247 +[2025-07-09 09:02:48] [Rank 0] Group 0 Loss: 5.0247 +[2025-07-09 09:02:48] [Rank 0] Group 1 Loss: 4.6916 +[2025-07-09 09:02:48] [Rank 0] Group 1 Loss: 4.6916 +[2025-07-09 09:02:48] [Rank 0] Group 2 Loss: 4.7672 +[2025-07-09 09:02:48] [Rank 0] Group 2 Loss: 4.7672 +[2025-07-09 09:02:48] [Rank 0] Group 3 Loss: 4.9885 +[2025-07-09 09:02:48] [Rank 0] Group 3 Loss: 4.9885 +[2025-07-09 09:02:48] [Rank 0] Group 4 Loss: 4.8655 +[2025-07-09 09:02:48] [Rank 0] Group 4 Loss: 4.8655 +[2025-07-09 09:02:48] [Rank 0] Group 5 Loss: 4.7996 +[2025-07-09 09:02:48] [Rank 0] Group 5 Loss: 4.7996 +[2025-07-09 09:02:48] [Rank 0] Group 6 Loss: 4.8484 +[2025-07-09 09:02:48] [Rank 0] Group 6 Loss: 4.8484 +[2025-07-09 09:02:48] [Rank 0] Group 7 Loss: 4.9382 +[2025-07-09 09:02:48] [Rank 0] Group 7 Loss: 4.9382 +[2025-07-09 09:02:48] [Rank 0] Group 8 Loss: 4.8428 +[2025-07-09 09:02:48] [Rank 0] Group 8 Loss: 4.8428 +[2025-07-09 09:02:48] [Rank 0] Group 9 Loss: 4.9055 +[2025-07-09 09:02:48] [Rank 0] Group 9 Loss: 4.9055 +[2025-07-09 09:02:48] [Rank 0] Group 10 Loss: 4.8994 +[2025-07-09 09:02:48] [Rank 0] Group 10 Loss: 4.8994 +[2025-07-09 09:02:48] [Rank 0] Group 11 Loss: 4.8979 +[2025-07-09 09:02:48] [Rank 0] Group 11 Loss: 4.8979 +[2025-07-09 09:02:48] [Rank 0] Group 0 FTA: 0.4707 +[2025-07-09 09:02:48] [Rank 0] Group 0 FTA: 0.4707 +[2025-07-09 09:02:48] [Rank 0] Group 1 FTA: 0.5130 +[2025-07-09 09:02:48] [Rank 0] Group 1 FTA: 0.5130 +[2025-07-09 09:02:48] [Rank 0] Group 2 FTA: 0.5495 +[2025-07-09 09:02:48] [Rank 0] Group 2 FTA: 0.5495 +[2025-07-09 09:02:48] [Rank 0] Group 3 FTA: 0.2656 +[2025-07-09 09:02:48] [Rank 0] Group 3 FTA: 0.2656 +[2025-07-09 09:02:48] [Rank 0] Group 4 FTA: 0.3099 +[2025-07-09 09:02:48] [Rank 0] Group 4 FTA: 0.3099 +[2025-07-09 09:02:48] [Rank 0] Group 5 FTA: 0.5000 +[2025-07-09 09:02:48] [Rank 0] Group 5 FTA: 0.5000 +[2025-07-09 09:02:48] [Rank 0] Group 6 FTA: 0.4297 +[2025-07-09 09:02:48] [Rank 0] Group 6 FTA: 0.4297 +[2025-07-09 09:02:48] [Rank 0] Group 7 FTA: 0.4453 +[2025-07-09 09:02:48] [Rank 0] Group 7 FTA: 0.4453 +[2025-07-09 09:02:48] [Rank 0] Group 8 FTA: 0.4115 +[2025-07-09 09:02:48] [Rank 0] Group 8 FTA: 0.4115 +[2025-07-09 09:02:48] [Rank 0] Group 9 FTA: 0.4258 +[2025-07-09 09:02:48] [Rank 0] Group 9 FTA: 0.4258 +[2025-07-09 09:02:48] [Rank 0] Group 10 FTA: 0.4434 +[2025-07-09 09:02:48] [Rank 0] Group 10 FTA: 0.4434 +[2025-07-09 09:02:48] [Rank 0] Group 11 FTA: 0.4219 +[2025-07-09 09:02:48] [Rank 0] Group 11 FTA: 0.4219 +[2025-07-09 09:02:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_loss_curves.png +[2025-07-09 09:02:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_loss_curves.png +[2025-07-09 09:02:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_acc_curves.png +[2025-07-09 09:02:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/per_class_acc_curves.png +[2025-07-09 09:02:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_loss_curve.png +[2025-07-09 09:02:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_loss_curve.png +[2025-07-09 09:02:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_acc_curve.png +[2025-07-09 09:02:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.001_seed_49/total_acc_curve.png +[2025-07-09 09:02:50] [Rank 0] step:2501/10000 train_time:197647ms step_avg:79.03ms +[2025-07-09 09:02:50] [Rank 0] step:2501/10000 train_time:197647ms step_avg:79.03ms +[2025-07-09 09:02:52] [Rank 0] step:2521/10000 train_time:199144ms step_avg:78.99ms +[2025-07-09 09:02:52] [Rank 0] step:2521/10000 train_time:199144ms step_avg:78.99ms +[2025-07-09 09:02:53] [Rank 0] step:2541/10000 train_time:201285ms step_avg:79.22ms +[2025-07-09 09:02:53] [Rank 0] step:2541/10000 train_time:201285ms step_avg:79.22ms +[2025-07-09 09:02:55] [Rank 0] step:2561/10000 train_time:202770ms step_avg:79.18ms +[2025-07-09 09:02:55] [Rank 0] step:2561/10000 train_time:202770ms step_avg:79.18ms +[2025-07-09 09:02:56] [Rank 0] step:2581/10000 train_time:204254ms step_avg:79.14ms +[2025-07-09 09:02:56] [Rank 0] step:2581/10000 train_time:204254ms step_avg:79.14ms +[2025-07-09 09:02:58] [Rank 0] step:2601/10000 train_time:205740ms step_avg:79.10ms +[2025-07-09 09:02:58] [Rank 0] step:2601/10000 train_time:205740ms step_avg:79.10ms +[2025-07-09 09:03:00] [Rank 0] step:2621/10000 train_time:207872ms step_avg:79.31ms +[2025-07-09 09:03:00] [Rank 0] step:2621/10000 train_time:207872ms step_avg:79.31ms +[2025-07-09 09:03:01] [Rank 0] step:2641/10000 train_time:209359ms step_avg:79.27ms +[2025-07-09 09:03:01] [Rank 0] step:2641/10000 train_time:209359ms step_avg:79.27ms +[2025-07-09 09:03:03] [Rank 0] step:2661/10000 train_time:210848ms step_avg:79.24ms +[2025-07-09 09:03:03] [Rank 0] step:2661/10000 train_time:210848ms step_avg:79.24ms +[2025-07-09 09:03:04] [Rank 0] step:2681/10000 train_time:212338ms step_avg:79.20ms +[2025-07-09 09:03:04] [Rank 0] step:2681/10000 train_time:212338ms step_avg:79.20ms +[2025-07-09 09:03:06] [Rank 0] step:2701/10000 train_time:213885ms step_avg:79.19ms +[2025-07-09 09:03:06] [Rank 0] step:2701/10000 train_time:213885ms step_avg:79.19ms +[2025-07-09 09:03:08] [Rank 0] step:2721/10000 train_time:215555ms step_avg:79.22ms +[2025-07-09 09:03:08] [Rank 0] step:2721/10000 train_time:215555ms step_avg:79.22ms +[2025-07-09 09:03:09] [Rank 0] step:2741/10000 train_time:217047ms step_avg:79.19ms +[2025-07-09 09:03:09] [Rank 0] step:2741/10000 train_time:217047ms step_avg:79.19ms +[2025-07-09 09:03:10] [Rank 0] step:2761/10000 train_time:218537ms step_avg:79.15ms +[2025-07-09 09:03:10] [Rank 0] step:2761/10000 train_time:218537ms step_avg:79.15ms +[2025-07-09 09:03:12] [Rank 0] step:2781/10000 train_time:220028ms step_avg:79.12ms +[2025-07-09 09:03:12] [Rank 0] step:2781/10000 train_time:220028ms step_avg:79.12ms +[2025-07-09 09:03:14] [Rank 0] step:2801/10000 train_time:222176ms step_avg:79.32ms +[2025-07-09 09:03:14] [Rank 0] step:2801/10000 train_time:222176ms step_avg:79.32ms +[2025-07-09 09:03:16] [Rank 0] step:2821/10000 train_time:223668ms step_avg:79.29ms +[2025-07-09 09:03:16] [Rank 0] step:2821/10000 train_time:223668ms step_avg:79.29ms +[2025-07-09 09:03:17] [Rank 0] step:2841/10000 train_time:225160ms step_avg:79.25ms +[2025-07-09 09:03:17] [Rank 0] step:2841/10000 train_time:225160ms step_avg:79.25ms +[2025-07-09 09:03:19] [Rank 0] step:2861/10000 train_time:226652ms step_avg:79.22ms +[2025-07-09 09:03:19] [Rank 0] step:2861/10000 train_time:226652ms step_avg:79.22ms +[2025-07-09 09:03:21] [Rank 0] step:2881/10000 train_time:228814ms step_avg:79.42ms +[2025-07-09 09:03:21] [Rank 0] step:2881/10000 train_time:228814ms step_avg:79.42ms +[2025-07-09 09:03:22] [Rank 0] step:2901/10000 train_time:230285ms step_avg:79.38ms +[2025-07-09 09:03:22] [Rank 0] step:2901/10000 train_time:230285ms step_avg:79.38ms +[2025-07-09 09:03:24] [Rank 0] step:2921/10000 train_time:231777ms step_avg:79.35ms +[2025-07-09 09:03:24] [Rank 0] step:2921/10000 train_time:231777ms step_avg:79.35ms +[2025-07-09 09:03:25] [Rank 0] step:2941/10000 train_time:233332ms step_avg:79.34ms +[2025-07-09 09:03:25] [Rank 0] step:2941/10000 train_time:233332ms step_avg:79.34ms +[2025-07-09 09:03:27] [Rank 0] step:2961/10000 train_time:234923ms step_avg:79.34ms +[2025-07-09 09:03:27] [Rank 0] step:2961/10000 train_time:234923ms step_avg:79.34ms +[2025-07-09 09:03:29] [Rank 0] step:2981/10000 train_time:236715ms step_avg:79.41ms +[2025-07-09 09:03:29] [Rank 0] step:2981/10000 train_time:236715ms step_avg:79.41ms +[2025-07-09 09:03:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 09:03:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 09:03:31] [Rank 0] PRINT: step:3000/10000 train_loss:1.0930 val_loss:1.0466 train_time:238207ms step_avg:79.40ms +[2025-07-09 09:03:31] [Rank 0] PRINT: step:3000/10000 train_loss:1.0930 val_loss:1.0466 train_time:238207ms step_avg:79.40ms +[2025-07-09 09:03:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 09:03:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 09:03:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 09:03:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 09:03:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 09:03:31] [Rank 0] Evaluation set size after sampling: 5633 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/config.json b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..b2473a41f15e7f6965b7e4a74409fe9ada719d6f --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "9236fe09-63a6-42c4-9751-91a9a579cd6c", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..e35ef9de18deb5392ee0f586a9ba511c49dd756a --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:391c247ea55bed747c77596b4db1aee13f1a3fbb0630b8957301b50c1eeb676a +size 418433 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..ac950ec5abcb79e4ce5b6267509e9d7305506f44 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02e557deb29d936ae6051f7b9e5a6ab024ca6174fabf19db4dfba7bec6ed3927 +size 368236 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..d14edd4b3642aff2b5d553806d9a4279f1e121ce --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d29662617af84638f59a2c948b91b32131d9efce4f77ebf36fed375ac67d132 +size 114071 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..a721b330c9f3b4b49948c4d335b05248581bb702 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afb2d93c724912f4943ac1bd7cd1f37fd7d20e4f4bc62764bebaa0c19e6a1a5d +size 113238 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/training_log_9236fe09-63a6-42c4-9751-91a9a579cd6c.txt b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/training_log_9236fe09-63a6-42c4-9751-91a9a579cd6c.txt new file mode 100644 index 0000000000000000000000000000000000000000..4140a58245300d0dfa8c8c91bb334cb927af3482 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/training_log_9236fe09-63a6-42c4-9751-91a9a579cd6c.txt @@ -0,0 +1,5132 @@ +[2025-07-06 06:37:08] [Rank 0] PRINT: --- Script Start: Sun Jul 6 06:37:08 2025 --- +[2025-07-06 06:37:08] [Rank 0] PRINT: --- Script Start: Sun Jul 6 06:37:08 2025 --- +[2025-07-06 06:37:08] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-06 06:37:08] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-06 06:37:08] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 06:37:08] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 06:37:08] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-06 06:37:08] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-06 06:37:08] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42 +[2025-07-06 06:37:08] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42 +[2025-07-06 06:37:08] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 06:37:08] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 06:37:09] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 06:37:09] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 06:37:09] [Rank 0] PRINT: Constructing model... +[2025-07-06 06:37:09] [Rank 0] PRINT: Constructing model... +[2025-07-06 06:37:11] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 06:37:11] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 06:37:11] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 06:37:11] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 06:37:11] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 06:37:11] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 06:37:12] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 06:37:12] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 06:37:12] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 06:37:12] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 06:37:12] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 06:37:12] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 06:37:12] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 06:37:12] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 06:37:12] [Rank 0] PRINT: Model returns: +[2025-07-06 06:37:12] [Rank 0] PRINT: Model returns: +[2025-07-06 06:37:12] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 06:37:12] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 06:37:12] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 06:37:12] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 06:37:12] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 06:37:12] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 06:37:12] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 06:37:12] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 06:37:12] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 06:37:12] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 06:37:12] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 06:37:12] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 06:37:12] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 06:37:12] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 06:37:12] [Rank 0] PRINT: Starting warmup... +[2025-07-06 06:37:12] [Rank 0] PRINT: Starting warmup... +[2025-07-06 06:39:01] [Rank 0] PRINT: Warmup complete. +[2025-07-06 06:39:01] [Rank 0] PRINT: Warmup complete. +[2025-07-06 06:39:01] [Rank 0] PRINT: Starting training... +[2025-07-06 06:39:01] [Rank 0] PRINT: Starting training... +[2025-07-06 06:39:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 06:39:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 06:39:08] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 06:39:08] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 06:39:10] [Rank 0] step:21/10000 train_time:1755ms step_avg:83.58ms +[2025-07-06 06:39:10] [Rank 0] step:21/10000 train_time:1755ms step_avg:83.58ms +[2025-07-06 06:39:11] [Rank 0] step:41/10000 train_time:3216ms step_avg:78.44ms +[2025-07-06 06:39:11] [Rank 0] step:41/10000 train_time:3216ms step_avg:78.44ms +[2025-07-06 06:39:13] [Rank 0] step:61/10000 train_time:4679ms step_avg:76.70ms +[2025-07-06 06:39:13] [Rank 0] step:61/10000 train_time:4679ms step_avg:76.70ms +[2025-07-06 06:39:14] [Rank 0] step:81/10000 train_time:6145ms step_avg:75.87ms +[2025-07-06 06:39:14] [Rank 0] step:81/10000 train_time:6145ms step_avg:75.87ms +[2025-07-06 06:39:16] [Rank 0] step:101/10000 train_time:8268ms step_avg:81.86ms +[2025-07-06 06:39:16] [Rank 0] step:101/10000 train_time:8268ms step_avg:81.86ms +[2025-07-06 06:39:18] [Rank 0] step:121/10000 train_time:9734ms step_avg:80.45ms +[2025-07-06 06:39:18] [Rank 0] step:121/10000 train_time:9734ms step_avg:80.45ms +[2025-07-06 06:39:19] [Rank 0] step:141/10000 train_time:11204ms step_avg:79.46ms +[2025-07-06 06:39:19] [Rank 0] step:141/10000 train_time:11204ms step_avg:79.46ms +[2025-07-06 06:39:21] [Rank 0] step:161/10000 train_time:12673ms step_avg:78.71ms +[2025-07-06 06:39:21] [Rank 0] step:161/10000 train_time:12673ms step_avg:78.71ms +[2025-07-06 06:39:23] [Rank 0] step:181/10000 train_time:14701ms step_avg:81.22ms +[2025-07-06 06:39:23] [Rank 0] step:181/10000 train_time:14701ms step_avg:81.22ms +[2025-07-06 06:39:24] [Rank 0] step:201/10000 train_time:16150ms step_avg:80.35ms +[2025-07-06 06:39:24] [Rank 0] step:201/10000 train_time:16150ms step_avg:80.35ms +[2025-07-06 06:39:26] [Rank 0] step:221/10000 train_time:17620ms step_avg:79.73ms +[2025-07-06 06:39:26] [Rank 0] step:221/10000 train_time:17620ms step_avg:79.73ms +[2025-07-06 06:39:27] [Rank 0] step:241/10000 train_time:19089ms step_avg:79.21ms +[2025-07-06 06:39:27] [Rank 0] step:241/10000 train_time:19089ms step_avg:79.21ms +[2025-07-06 06:39:29] [Rank 0] step:261/10000 train_time:20559ms step_avg:78.77ms +[2025-07-06 06:39:29] [Rank 0] step:261/10000 train_time:20559ms step_avg:78.77ms +[2025-07-06 06:39:31] [Rank 0] step:281/10000 train_time:22694ms step_avg:80.76ms +[2025-07-06 06:39:31] [Rank 0] step:281/10000 train_time:22694ms step_avg:80.76ms +[2025-07-06 06:39:32] [Rank 0] step:301/10000 train_time:24165ms step_avg:80.28ms +[2025-07-06 06:39:32] [Rank 0] step:301/10000 train_time:24165ms step_avg:80.28ms +[2025-07-06 06:39:34] [Rank 0] step:321/10000 train_time:25636ms step_avg:79.86ms +[2025-07-06 06:39:34] [Rank 0] step:321/10000 train_time:25636ms step_avg:79.86ms +[2025-07-06 06:39:35] [Rank 0] step:341/10000 train_time:27110ms step_avg:79.50ms +[2025-07-06 06:39:35] [Rank 0] step:341/10000 train_time:27110ms step_avg:79.50ms +[2025-07-06 06:39:37] [Rank 0] step:361/10000 train_time:28633ms step_avg:79.32ms +[2025-07-06 06:39:37] [Rank 0] step:361/10000 train_time:28633ms step_avg:79.32ms +[2025-07-06 06:39:39] [Rank 0] step:381/10000 train_time:30698ms step_avg:80.57ms +[2025-07-06 06:39:39] [Rank 0] step:381/10000 train_time:30698ms step_avg:80.57ms +[2025-07-06 06:39:40] [Rank 0] step:401/10000 train_time:32172ms step_avg:80.23ms +[2025-07-06 06:39:40] [Rank 0] step:401/10000 train_time:32172ms step_avg:80.23ms +[2025-07-06 06:39:42] [Rank 0] step:421/10000 train_time:33649ms step_avg:79.93ms +[2025-07-06 06:39:42] [Rank 0] step:421/10000 train_time:33649ms step_avg:79.93ms +[2025-07-06 06:39:43] [Rank 0] step:441/10000 train_time:35124ms step_avg:79.65ms +[2025-07-06 06:39:43] [Rank 0] step:441/10000 train_time:35124ms step_avg:79.65ms +[2025-07-06 06:39:45] [Rank 0] step:461/10000 train_time:37261ms step_avg:80.83ms +[2025-07-06 06:39:45] [Rank 0] step:461/10000 train_time:37261ms step_avg:80.83ms +[2025-07-06 06:39:47] [Rank 0] step:481/10000 train_time:38736ms step_avg:80.53ms +[2025-07-06 06:39:47] [Rank 0] step:481/10000 train_time:38736ms step_avg:80.53ms +[2025-07-06 06:39:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 06:39:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 06:39:49] [Rank 0] PRINT: step:500/10000 train_loss:3.3837 val_loss:1.6112 train_time:40213ms step_avg:80.43ms +[2025-07-06 06:39:49] [Rank 0] PRINT: step:500/10000 train_loss:3.3837 val_loss:1.6112 train_time:40213ms step_avg:80.43ms +[2025-07-06 06:39:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 06:39:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 06:39:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 06:39:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 06:39:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 06:39:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 06:45:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 06:45:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 06:45:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 06:45:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 06:45:12] [Rank 0] Total Loss: 4.3162 +[2025-07-06 06:45:12] [Rank 0] Total Loss: 4.3162 +[2025-07-06 06:45:12] [Rank 0] Total FTA: 0.0870 +[2025-07-06 06:45:12] [Rank 0] Total FTA: 0.0870 +[2025-07-06 06:45:12] [Rank 0] Group 0 Loss: 4.5184 +[2025-07-06 06:45:12] [Rank 0] Group 0 Loss: 4.5184 +[2025-07-06 06:45:12] [Rank 0] Group 1 Loss: 4.2978 +[2025-07-06 06:45:12] [Rank 0] Group 1 Loss: 4.2978 +[2025-07-06 06:45:12] [Rank 0] Group 2 Loss: 4.0875 +[2025-07-06 06:45:12] [Rank 0] Group 2 Loss: 4.0875 +[2025-07-06 06:45:12] [Rank 0] Group 3 Loss: 4.2616 +[2025-07-06 06:45:12] [Rank 0] Group 3 Loss: 4.2616 +[2025-07-06 06:45:12] [Rank 0] Group 4 Loss: 4.2796 +[2025-07-06 06:45:12] [Rank 0] Group 4 Loss: 4.2796 +[2025-07-06 06:45:12] [Rank 0] Group 5 Loss: 4.2370 +[2025-07-06 06:45:12] [Rank 0] Group 5 Loss: 4.2370 +[2025-07-06 06:45:12] [Rank 0] Group 6 Loss: 4.2307 +[2025-07-06 06:45:12] [Rank 0] Group 6 Loss: 4.2307 +[2025-07-06 06:45:12] [Rank 0] Group 7 Loss: 4.3510 +[2025-07-06 06:45:12] [Rank 0] Group 7 Loss: 4.3510 +[2025-07-06 06:45:12] [Rank 0] Group 8 Loss: 4.3047 +[2025-07-06 06:45:12] [Rank 0] Group 8 Loss: 4.3047 +[2025-07-06 06:45:12] [Rank 0] Group 9 Loss: 4.2559 +[2025-07-06 06:45:12] [Rank 0] Group 9 Loss: 4.2559 +[2025-07-06 06:45:12] [Rank 0] Group 10 Loss: 4.3100 +[2025-07-06 06:45:12] [Rank 0] Group 10 Loss: 4.3100 +[2025-07-06 06:45:12] [Rank 0] Group 11 Loss: 4.3626 +[2025-07-06 06:45:12] [Rank 0] Group 11 Loss: 4.3626 +[2025-07-06 06:45:12] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-06 06:45:12] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-06 06:45:12] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 06:45:12] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 06:45:12] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-06 06:45:12] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-06 06:45:12] [Rank 0] Group 3 FTA: 0.0547 +[2025-07-06 06:45:12] [Rank 0] Group 3 FTA: 0.0547 +[2025-07-06 06:45:12] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-06 06:45:12] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-06 06:45:12] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-06 06:45:12] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-06 06:45:12] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-06 06:45:12] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-06 06:45:12] [Rank 0] Group 7 FTA: 0.1146 +[2025-07-06 06:45:12] [Rank 0] Group 7 FTA: 0.1146 +[2025-07-06 06:45:12] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-06 06:45:12] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-06 06:45:12] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-06 06:45:12] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-06 06:45:12] [Rank 0] Group 10 FTA: 0.0977 +[2025-07-06 06:45:12] [Rank 0] Group 10 FTA: 0.0977 +[2025-07-06 06:45:12] [Rank 0] Group 11 FTA: 0.0820 +[2025-07-06 06:45:12] [Rank 0] Group 11 FTA: 0.0820 +[2025-07-06 06:45:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 06:45:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 06:45:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 06:45:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 06:45:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 06:45:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 06:45:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 06:45:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 06:45:14] [Rank 0] step:501/10000 train_time:40233ms step_avg:80.31ms +[2025-07-06 06:45:14] [Rank 0] step:501/10000 train_time:40233ms step_avg:80.31ms +[2025-07-06 06:45:15] [Rank 0] step:521/10000 train_time:41696ms step_avg:80.03ms +[2025-07-06 06:45:15] [Rank 0] step:521/10000 train_time:41696ms step_avg:80.03ms +[2025-07-06 06:45:17] [Rank 0] step:541/10000 train_time:43820ms step_avg:81.00ms +[2025-07-06 06:45:17] [Rank 0] step:541/10000 train_time:43820ms step_avg:81.00ms +[2025-07-06 06:45:19] [Rank 0] step:561/10000 train_time:45263ms step_avg:80.68ms +[2025-07-06 06:45:19] [Rank 0] step:561/10000 train_time:45263ms step_avg:80.68ms +[2025-07-06 06:45:20] [Rank 0] step:581/10000 train_time:46725ms step_avg:80.42ms +[2025-07-06 06:45:20] [Rank 0] step:581/10000 train_time:46725ms step_avg:80.42ms +[2025-07-06 06:45:22] [Rank 0] step:601/10000 train_time:48190ms step_avg:80.18ms +[2025-07-06 06:45:22] [Rank 0] step:601/10000 train_time:48190ms step_avg:80.18ms +[2025-07-06 06:45:23] [Rank 0] step:621/10000 train_time:49655ms step_avg:79.96ms +[2025-07-06 06:45:23] [Rank 0] step:621/10000 train_time:49655ms step_avg:79.96ms +[2025-07-06 06:45:25] [Rank 0] step:641/10000 train_time:51795ms step_avg:80.80ms +[2025-07-06 06:45:25] [Rank 0] step:641/10000 train_time:51795ms step_avg:80.80ms +[2025-07-06 06:45:27] [Rank 0] step:661/10000 train_time:53264ms step_avg:80.58ms +[2025-07-06 06:45:27] [Rank 0] step:661/10000 train_time:53264ms step_avg:80.58ms +[2025-07-06 06:45:28] [Rank 0] step:681/10000 train_time:54729ms step_avg:80.37ms +[2025-07-06 06:45:28] [Rank 0] step:681/10000 train_time:54729ms step_avg:80.37ms +[2025-07-06 06:45:30] [Rank 0] step:701/10000 train_time:56196ms step_avg:80.17ms +[2025-07-06 06:45:30] [Rank 0] step:701/10000 train_time:56196ms step_avg:80.17ms +[2025-07-06 06:45:32] [Rank 0] step:721/10000 train_time:58348ms step_avg:80.93ms +[2025-07-06 06:45:32] [Rank 0] step:721/10000 train_time:58348ms step_avg:80.93ms +[2025-07-06 06:45:33] [Rank 0] step:741/10000 train_time:59794ms step_avg:80.69ms +[2025-07-06 06:45:33] [Rank 0] step:741/10000 train_time:59794ms step_avg:80.69ms +[2025-07-06 06:45:35] [Rank 0] step:761/10000 train_time:61269ms step_avg:80.51ms +[2025-07-06 06:45:35] [Rank 0] step:761/10000 train_time:61269ms step_avg:80.51ms +[2025-07-06 06:45:36] [Rank 0] step:781/10000 train_time:62743ms step_avg:80.34ms +[2025-07-06 06:45:36] [Rank 0] step:781/10000 train_time:62743ms step_avg:80.34ms +[2025-07-06 06:45:38] [Rank 0] step:801/10000 train_time:64220ms step_avg:80.17ms +[2025-07-06 06:45:38] [Rank 0] step:801/10000 train_time:64220ms step_avg:80.17ms +[2025-07-06 06:45:40] [Rank 0] step:821/10000 train_time:66359ms step_avg:80.83ms +[2025-07-06 06:45:40] [Rank 0] step:821/10000 train_time:66359ms step_avg:80.83ms +[2025-07-06 06:45:41] [Rank 0] step:841/10000 train_time:68079ms step_avg:80.95ms +[2025-07-06 06:45:41] [Rank 0] step:841/10000 train_time:68079ms step_avg:80.95ms +[2025-07-06 06:45:43] [Rank 0] step:861/10000 train_time:69554ms step_avg:80.78ms +[2025-07-06 06:45:43] [Rank 0] step:861/10000 train_time:69554ms step_avg:80.78ms +[2025-07-06 06:45:44] [Rank 0] step:881/10000 train_time:71029ms step_avg:80.62ms +[2025-07-06 06:45:44] [Rank 0] step:881/10000 train_time:71029ms step_avg:80.62ms +[2025-07-06 06:45:47] [Rank 0] step:901/10000 train_time:72556ms step_avg:80.53ms +[2025-07-06 06:45:47] [Rank 0] step:901/10000 train_time:72556ms step_avg:80.53ms +[2025-07-06 06:45:48] [Rank 0] step:921/10000 train_time:74722ms step_avg:81.13ms +[2025-07-06 06:45:48] [Rank 0] step:921/10000 train_time:74722ms step_avg:81.13ms +[2025-07-06 06:45:50] [Rank 0] step:941/10000 train_time:76196ms step_avg:80.97ms +[2025-07-06 06:45:50] [Rank 0] step:941/10000 train_time:76196ms step_avg:80.97ms +[2025-07-06 06:45:51] [Rank 0] step:961/10000 train_time:77672ms step_avg:80.82ms +[2025-07-06 06:45:51] [Rank 0] step:961/10000 train_time:77672ms step_avg:80.82ms +[2025-07-06 06:45:53] [Rank 0] step:981/10000 train_time:79146ms step_avg:80.68ms +[2025-07-06 06:45:53] [Rank 0] step:981/10000 train_time:79146ms step_avg:80.68ms +[2025-07-06 06:45:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 06:45:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 06:45:56] [Rank 0] PRINT: step:1000/10000 train_loss:1.4596 val_loss:1.3160 train_time:81270ms step_avg:81.27ms +[2025-07-06 06:45:56] [Rank 0] PRINT: step:1000/10000 train_loss:1.4596 val_loss:1.3160 train_time:81270ms step_avg:81.27ms +[2025-07-06 06:45:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 06:45:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 06:45:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 06:45:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 06:45:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 06:45:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 06:51:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 06:51:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 06:51:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 06:51:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 06:51:19] [Rank 0] Total Loss: 4.3146 +[2025-07-06 06:51:19] [Rank 0] Total Loss: 4.3146 +[2025-07-06 06:51:19] [Rank 0] Total FTA: 0.1706 +[2025-07-06 06:51:19] [Rank 0] Total FTA: 0.1706 +[2025-07-06 06:51:19] [Rank 0] Group 0 Loss: 4.4990 +[2025-07-06 06:51:19] [Rank 0] Group 0 Loss: 4.4990 +[2025-07-06 06:51:19] [Rank 0] Group 1 Loss: 4.1386 +[2025-07-06 06:51:19] [Rank 0] Group 1 Loss: 4.1386 +[2025-07-06 06:51:19] [Rank 0] Group 2 Loss: 4.0959 +[2025-07-06 06:51:19] [Rank 0] Group 2 Loss: 4.0959 +[2025-07-06 06:51:20] [Rank 0] Group 3 Loss: 4.3723 +[2025-07-06 06:51:20] [Rank 0] Group 3 Loss: 4.3723 +[2025-07-06 06:51:20] [Rank 0] Group 4 Loss: 4.2820 +[2025-07-06 06:51:20] [Rank 0] Group 4 Loss: 4.2820 +[2025-07-06 06:51:20] [Rank 0] Group 5 Loss: 4.3017 +[2025-07-06 06:51:20] [Rank 0] Group 5 Loss: 4.3017 +[2025-07-06 06:51:20] [Rank 0] Group 6 Loss: 4.2374 +[2025-07-06 06:51:20] [Rank 0] Group 6 Loss: 4.2374 +[2025-07-06 06:51:20] [Rank 0] Group 7 Loss: 4.3430 +[2025-07-06 06:51:20] [Rank 0] Group 7 Loss: 4.3430 +[2025-07-06 06:51:20] [Rank 0] Group 8 Loss: 4.3342 +[2025-07-06 06:51:20] [Rank 0] Group 8 Loss: 4.3342 +[2025-07-06 06:51:20] [Rank 0] Group 9 Loss: 4.2248 +[2025-07-06 06:51:20] [Rank 0] Group 9 Loss: 4.2248 +[2025-07-06 06:51:20] [Rank 0] Group 10 Loss: 4.3094 +[2025-07-06 06:51:20] [Rank 0] Group 10 Loss: 4.3094 +[2025-07-06 06:51:20] [Rank 0] Group 11 Loss: 4.3557 +[2025-07-06 06:51:20] [Rank 0] Group 11 Loss: 4.3557 +[2025-07-06 06:51:20] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-06 06:51:20] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-06 06:51:20] [Rank 0] Group 1 FTA: 0.3333 +[2025-07-06 06:51:20] [Rank 0] Group 1 FTA: 0.3333 +[2025-07-06 06:51:20] [Rank 0] Group 2 FTA: 0.3359 +[2025-07-06 06:51:20] [Rank 0] Group 2 FTA: 0.3359 +[2025-07-06 06:51:20] [Rank 0] Group 3 FTA: 0.2135 +[2025-07-06 06:51:20] [Rank 0] Group 3 FTA: 0.2135 +[2025-07-06 06:51:20] [Rank 0] Group 4 FTA: 0.1328 +[2025-07-06 06:51:20] [Rank 0] Group 4 FTA: 0.1328 +[2025-07-06 06:51:20] [Rank 0] Group 5 FTA: 0.1641 +[2025-07-06 06:51:20] [Rank 0] Group 5 FTA: 0.1641 +[2025-07-06 06:51:20] [Rank 0] Group 6 FTA: 0.1849 +[2025-07-06 06:51:20] [Rank 0] Group 6 FTA: 0.1849 +[2025-07-06 06:51:20] [Rank 0] Group 7 FTA: 0.1641 +[2025-07-06 06:51:20] [Rank 0] Group 7 FTA: 0.1641 +[2025-07-06 06:51:20] [Rank 0] Group 8 FTA: 0.1615 +[2025-07-06 06:51:20] [Rank 0] Group 8 FTA: 0.1615 +[2025-07-06 06:51:20] [Rank 0] Group 9 FTA: 0.1719 +[2025-07-06 06:51:20] [Rank 0] Group 9 FTA: 0.1719 +[2025-07-06 06:51:20] [Rank 0] Group 10 FTA: 0.1797 +[2025-07-06 06:51:20] [Rank 0] Group 10 FTA: 0.1797 +[2025-07-06 06:51:20] [Rank 0] Group 11 FTA: 0.1719 +[2025-07-06 06:51:20] [Rank 0] Group 11 FTA: 0.1719 +[2025-07-06 06:51:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 06:51:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 06:51:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 06:51:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 06:51:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 06:51:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 06:51:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 06:51:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 06:51:21] [Rank 0] step:1001/10000 train_time:81291ms step_avg:81.21ms +[2025-07-06 06:51:21] [Rank 0] step:1001/10000 train_time:81291ms step_avg:81.21ms +[2025-07-06 06:51:22] [Rank 0] step:1021/10000 train_time:82765ms step_avg:81.06ms +[2025-07-06 06:51:22] [Rank 0] step:1021/10000 train_time:82765ms step_avg:81.06ms +[2025-07-06 06:51:24] [Rank 0] step:1041/10000 train_time:84231ms step_avg:80.91ms +[2025-07-06 06:51:24] [Rank 0] step:1041/10000 train_time:84231ms step_avg:80.91ms +[2025-07-06 06:51:25] [Rank 0] step:1061/10000 train_time:85702ms step_avg:80.77ms +[2025-07-06 06:51:25] [Rank 0] step:1061/10000 train_time:85702ms step_avg:80.77ms +[2025-07-06 06:51:28] [Rank 0] step:1081/10000 train_time:87169ms step_avg:80.64ms +[2025-07-06 06:51:28] [Rank 0] step:1081/10000 train_time:87169ms step_avg:80.64ms +[2025-07-06 06:51:29] [Rank 0] step:1101/10000 train_time:89300ms step_avg:81.11ms +[2025-07-06 06:51:29] [Rank 0] step:1101/10000 train_time:89300ms step_avg:81.11ms +[2025-07-06 06:51:31] [Rank 0] step:1121/10000 train_time:90872ms step_avg:81.06ms +[2025-07-06 06:51:31] [Rank 0] step:1121/10000 train_time:90872ms step_avg:81.06ms +[2025-07-06 06:51:32] [Rank 0] step:1141/10000 train_time:92345ms step_avg:80.93ms +[2025-07-06 06:51:32] [Rank 0] step:1141/10000 train_time:92345ms step_avg:80.93ms +[2025-07-06 06:51:34] [Rank 0] step:1161/10000 train_time:93914ms step_avg:80.89ms +[2025-07-06 06:51:34] [Rank 0] step:1161/10000 train_time:93914ms step_avg:80.89ms +[2025-07-06 06:51:36] [Rank 0] step:1181/10000 train_time:96051ms step_avg:81.33ms +[2025-07-06 06:51:36] [Rank 0] step:1181/10000 train_time:96051ms step_avg:81.33ms +[2025-07-06 06:51:37] [Rank 0] step:1201/10000 train_time:97623ms step_avg:81.28ms +[2025-07-06 06:51:37] [Rank 0] step:1201/10000 train_time:97623ms step_avg:81.28ms +[2025-07-06 06:51:39] [Rank 0] step:1221/10000 train_time:99196ms step_avg:81.24ms +[2025-07-06 06:51:39] [Rank 0] step:1221/10000 train_time:99196ms step_avg:81.24ms +[2025-07-06 06:51:40] [Rank 0] step:1241/10000 train_time:100670ms step_avg:81.12ms +[2025-07-06 06:51:40] [Rank 0] step:1241/10000 train_time:100670ms step_avg:81.12ms +[2025-07-06 06:51:42] [Rank 0] step:1261/10000 train_time:102193ms step_avg:81.04ms +[2025-07-06 06:51:42] [Rank 0] step:1261/10000 train_time:102193ms step_avg:81.04ms +[2025-07-06 06:51:44] [Rank 0] step:1281/10000 train_time:103858ms step_avg:81.08ms +[2025-07-06 06:51:44] [Rank 0] step:1281/10000 train_time:103858ms step_avg:81.08ms +[2025-07-06 06:51:45] [Rank 0] step:1301/10000 train_time:105331ms step_avg:80.96ms +[2025-07-06 06:51:45] [Rank 0] step:1301/10000 train_time:105331ms step_avg:80.96ms +[2025-07-06 06:51:47] [Rank 0] step:1321/10000 train_time:106906ms step_avg:80.93ms +[2025-07-06 06:51:47] [Rank 0] step:1321/10000 train_time:106906ms step_avg:80.93ms +[2025-07-06 06:51:48] [Rank 0] step:1341/10000 train_time:108383ms step_avg:80.82ms +[2025-07-06 06:51:48] [Rank 0] step:1341/10000 train_time:108383ms step_avg:80.82ms +[2025-07-06 06:51:50] [Rank 0] step:1361/10000 train_time:110519ms step_avg:81.20ms +[2025-07-06 06:51:50] [Rank 0] step:1361/10000 train_time:110519ms step_avg:81.20ms +[2025-07-06 06:51:52] [Rank 0] step:1381/10000 train_time:111996ms step_avg:81.10ms +[2025-07-06 06:51:52] [Rank 0] step:1381/10000 train_time:111996ms step_avg:81.10ms +[2025-07-06 06:51:53] [Rank 0] step:1401/10000 train_time:113473ms step_avg:80.99ms +[2025-07-06 06:51:53] [Rank 0] step:1401/10000 train_time:113473ms step_avg:80.99ms +[2025-07-06 06:51:55] [Rank 0] step:1421/10000 train_time:114952ms step_avg:80.89ms +[2025-07-06 06:51:55] [Rank 0] step:1421/10000 train_time:114952ms step_avg:80.89ms +[2025-07-06 06:51:57] [Rank 0] step:1441/10000 train_time:116483ms step_avg:80.83ms +[2025-07-06 06:51:57] [Rank 0] step:1441/10000 train_time:116483ms step_avg:80.83ms +[2025-07-06 06:51:58] [Rank 0] step:1461/10000 train_time:118556ms step_avg:81.15ms +[2025-07-06 06:51:58] [Rank 0] step:1461/10000 train_time:118556ms step_avg:81.15ms +[2025-07-06 06:52:00] [Rank 0] step:1481/10000 train_time:120034ms step_avg:81.05ms +[2025-07-06 06:52:00] [Rank 0] step:1481/10000 train_time:120034ms step_avg:81.05ms +[2025-07-06 06:52:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 06:52:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 06:52:02] [Rank 0] PRINT: step:1500/10000 train_loss:1.2239 val_loss:1.1955 train_time:121749ms step_avg:81.17ms +[2025-07-06 06:52:02] [Rank 0] PRINT: step:1500/10000 train_loss:1.2239 val_loss:1.1955 train_time:121749ms step_avg:81.17ms +[2025-07-06 06:52:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 06:52:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 06:52:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 06:52:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 06:52:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 06:52:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 06:57:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 06:57:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 06:57:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 06:57:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 06:57:28] [Rank 0] Total Loss: 4.7606 +[2025-07-06 06:57:28] [Rank 0] Total Loss: 4.7606 +[2025-07-06 06:57:28] [Rank 0] Total FTA: 0.3373 +[2025-07-06 06:57:28] [Rank 0] Total FTA: 0.3373 +[2025-07-06 06:57:28] [Rank 0] Group 0 Loss: 5.0407 +[2025-07-06 06:57:28] [Rank 0] Group 0 Loss: 5.0407 +[2025-07-06 06:57:28] [Rank 0] Group 1 Loss: 4.5532 +[2025-07-06 06:57:28] [Rank 0] Group 1 Loss: 4.5532 +[2025-07-06 06:57:28] [Rank 0] Group 2 Loss: 4.6034 +[2025-07-06 06:57:28] [Rank 0] Group 2 Loss: 4.6034 +[2025-07-06 06:57:28] [Rank 0] Group 3 Loss: 5.0741 +[2025-07-06 06:57:28] [Rank 0] Group 3 Loss: 5.0741 +[2025-07-06 06:57:28] [Rank 0] Group 4 Loss: 4.6912 +[2025-07-06 06:57:28] [Rank 0] Group 4 Loss: 4.6912 +[2025-07-06 06:57:28] [Rank 0] Group 5 Loss: 4.7371 +[2025-07-06 06:57:28] [Rank 0] Group 5 Loss: 4.7371 +[2025-07-06 06:57:28] [Rank 0] Group 6 Loss: 4.6216 +[2025-07-06 06:57:28] [Rank 0] Group 6 Loss: 4.6216 +[2025-07-06 06:57:28] [Rank 0] Group 7 Loss: 4.7139 +[2025-07-06 06:57:28] [Rank 0] Group 7 Loss: 4.7139 +[2025-07-06 06:57:28] [Rank 0] Group 8 Loss: 4.6920 +[2025-07-06 06:57:28] [Rank 0] Group 8 Loss: 4.6920 +[2025-07-06 06:57:28] [Rank 0] Group 9 Loss: 4.6894 +[2025-07-06 06:57:28] [Rank 0] Group 9 Loss: 4.6894 +[2025-07-06 06:57:28] [Rank 0] Group 10 Loss: 4.7198 +[2025-07-06 06:57:28] [Rank 0] Group 10 Loss: 4.7198 +[2025-07-06 06:57:28] [Rank 0] Group 11 Loss: 4.7378 +[2025-07-06 06:57:28] [Rank 0] Group 11 Loss: 4.7378 +[2025-07-06 06:57:28] [Rank 0] Group 0 FTA: 0.3342 +[2025-07-06 06:57:28] [Rank 0] Group 0 FTA: 0.3342 +[2025-07-06 06:57:28] [Rank 0] Group 1 FTA: 0.3854 +[2025-07-06 06:57:28] [Rank 0] Group 1 FTA: 0.3854 +[2025-07-06 06:57:28] [Rank 0] Group 2 FTA: 0.5469 +[2025-07-06 06:57:28] [Rank 0] Group 2 FTA: 0.5469 +[2025-07-06 06:57:28] [Rank 0] Group 3 FTA: 0.2786 +[2025-07-06 06:57:28] [Rank 0] Group 3 FTA: 0.2786 +[2025-07-06 06:57:28] [Rank 0] Group 4 FTA: 0.2682 +[2025-07-06 06:57:28] [Rank 0] Group 4 FTA: 0.2682 +[2025-07-06 06:57:28] [Rank 0] Group 5 FTA: 0.3932 +[2025-07-06 06:57:28] [Rank 0] Group 5 FTA: 0.3932 +[2025-07-06 06:57:28] [Rank 0] Group 6 FTA: 0.3021 +[2025-07-06 06:57:28] [Rank 0] Group 6 FTA: 0.3021 +[2025-07-06 06:57:28] [Rank 0] Group 7 FTA: 0.2891 +[2025-07-06 06:57:28] [Rank 0] Group 7 FTA: 0.2891 +[2025-07-06 06:57:28] [Rank 0] Group 8 FTA: 0.3021 +[2025-07-06 06:57:28] [Rank 0] Group 8 FTA: 0.3021 +[2025-07-06 06:57:28] [Rank 0] Group 9 FTA: 0.3125 +[2025-07-06 06:57:28] [Rank 0] Group 9 FTA: 0.3125 +[2025-07-06 06:57:28] [Rank 0] Group 10 FTA: 0.2988 +[2025-07-06 06:57:28] [Rank 0] Group 10 FTA: 0.2988 +[2025-07-06 06:57:28] [Rank 0] Group 11 FTA: 0.3398 +[2025-07-06 06:57:28] [Rank 0] Group 11 FTA: 0.3398 +[2025-07-06 06:57:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 06:57:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 06:57:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 06:57:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 06:57:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 06:57:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 06:57:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 06:57:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 06:57:29] [Rank 0] step:1501/10000 train_time:121770ms step_avg:81.13ms +[2025-07-06 06:57:29] [Rank 0] step:1501/10000 train_time:121770ms step_avg:81.13ms +[2025-07-06 06:57:31] [Rank 0] step:1521/10000 train_time:123258ms step_avg:81.04ms +[2025-07-06 06:57:31] [Rank 0] step:1521/10000 train_time:123258ms step_avg:81.04ms +[2025-07-06 06:57:33] [Rank 0] step:1541/10000 train_time:125400ms step_avg:81.38ms +[2025-07-06 06:57:33] [Rank 0] step:1541/10000 train_time:125400ms step_avg:81.38ms +[2025-07-06 06:57:34] [Rank 0] step:1561/10000 train_time:126870ms step_avg:81.27ms +[2025-07-06 06:57:34] [Rank 0] step:1561/10000 train_time:126870ms step_avg:81.27ms +[2025-07-06 06:57:36] [Rank 0] step:1581/10000 train_time:128342ms step_avg:81.18ms +[2025-07-06 06:57:36] [Rank 0] step:1581/10000 train_time:128342ms step_avg:81.18ms +[2025-07-06 06:57:37] [Rank 0] step:1601/10000 train_time:129815ms step_avg:81.08ms +[2025-07-06 06:57:37] [Rank 0] step:1601/10000 train_time:129815ms step_avg:81.08ms +[2025-07-06 06:57:39] [Rank 0] step:1621/10000 train_time:131951ms step_avg:81.40ms +[2025-07-06 06:57:39] [Rank 0] step:1621/10000 train_time:131951ms step_avg:81.40ms +[2025-07-06 06:57:41] [Rank 0] step:1641/10000 train_time:133406ms step_avg:81.30ms +[2025-07-06 06:57:41] [Rank 0] step:1641/10000 train_time:133406ms step_avg:81.30ms +[2025-07-06 06:57:42] [Rank 0] step:1661/10000 train_time:134880ms step_avg:81.20ms +[2025-07-06 06:57:42] [Rank 0] step:1661/10000 train_time:134880ms step_avg:81.20ms +[2025-07-06 06:57:44] [Rank 0] step:1681/10000 train_time:136357ms step_avg:81.12ms +[2025-07-06 06:57:44] [Rank 0] step:1681/10000 train_time:136357ms step_avg:81.12ms +[2025-07-06 06:57:45] [Rank 0] step:1701/10000 train_time:137831ms step_avg:81.03ms +[2025-07-06 06:57:45] [Rank 0] step:1701/10000 train_time:137831ms step_avg:81.03ms +[2025-07-06 06:57:47] [Rank 0] step:1721/10000 train_time:139646ms step_avg:81.14ms +[2025-07-06 06:57:47] [Rank 0] step:1721/10000 train_time:139646ms step_avg:81.14ms +[2025-07-06 06:57:49] [Rank 0] step:1741/10000 train_time:141121ms step_avg:81.06ms +[2025-07-06 06:57:49] [Rank 0] step:1741/10000 train_time:141121ms step_avg:81.06ms +[2025-07-06 06:57:50] [Rank 0] step:1761/10000 train_time:142596ms step_avg:80.97ms +[2025-07-06 06:57:50] [Rank 0] step:1761/10000 train_time:142596ms step_avg:80.97ms +[2025-07-06 06:57:52] [Rank 0] step:1781/10000 train_time:144073ms step_avg:80.89ms +[2025-07-06 06:57:52] [Rank 0] step:1781/10000 train_time:144073ms step_avg:80.89ms +[2025-07-06 06:57:54] [Rank 0] step:1801/10000 train_time:145599ms step_avg:80.84ms +[2025-07-06 06:57:54] [Rank 0] step:1801/10000 train_time:145599ms step_avg:80.84ms +[2025-07-06 06:57:55] [Rank 0] step:1821/10000 train_time:147679ms step_avg:81.10ms +[2025-07-06 06:57:55] [Rank 0] step:1821/10000 train_time:147679ms step_avg:81.10ms +[2025-07-06 06:57:57] [Rank 0] step:1841/10000 train_time:149156ms step_avg:81.02ms +[2025-07-06 06:57:57] [Rank 0] step:1841/10000 train_time:149156ms step_avg:81.02ms +[2025-07-06 06:57:58] [Rank 0] step:1861/10000 train_time:150634ms step_avg:80.94ms +[2025-07-06 06:57:58] [Rank 0] step:1861/10000 train_time:150634ms step_avg:80.94ms +[2025-07-06 06:58:00] [Rank 0] step:1881/10000 train_time:152112ms step_avg:80.87ms +[2025-07-06 06:58:00] [Rank 0] step:1881/10000 train_time:152112ms step_avg:80.87ms +[2025-07-06 06:58:02] [Rank 0] step:1901/10000 train_time:154234ms step_avg:81.13ms +[2025-07-06 06:58:02] [Rank 0] step:1901/10000 train_time:154234ms step_avg:81.13ms +[2025-07-06 06:58:03] [Rank 0] step:1921/10000 train_time:155714ms step_avg:81.06ms +[2025-07-06 06:58:03] [Rank 0] step:1921/10000 train_time:155714ms step_avg:81.06ms +[2025-07-06 06:58:05] [Rank 0] step:1941/10000 train_time:157195ms step_avg:80.99ms +[2025-07-06 06:58:05] [Rank 0] step:1941/10000 train_time:157195ms step_avg:80.99ms +[2025-07-06 06:58:06] [Rank 0] step:1961/10000 train_time:158675ms step_avg:80.92ms +[2025-07-06 06:58:06] [Rank 0] step:1961/10000 train_time:158675ms step_avg:80.92ms +[2025-07-06 06:58:08] [Rank 0] step:1981/10000 train_time:160411ms step_avg:80.97ms +[2025-07-06 06:58:08] [Rank 0] step:1981/10000 train_time:160411ms step_avg:80.97ms +[2025-07-06 06:58:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 06:58:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 06:58:11] [Rank 0] PRINT: step:2000/10000 train_loss:1.1180 val_loss:1.0886 train_time:162377ms step_avg:81.19ms +[2025-07-06 06:58:11] [Rank 0] PRINT: step:2000/10000 train_loss:1.1180 val_loss:1.0886 train_time:162377ms step_avg:81.19ms +[2025-07-06 06:58:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 06:58:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 06:58:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 06:58:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 06:58:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 06:58:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 07:03:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 07:03:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 07:03:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 07:03:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 07:03:38] [Rank 0] Total Loss: 5.0756 +[2025-07-06 07:03:38] [Rank 0] Total Loss: 5.0756 +[2025-07-06 07:03:38] [Rank 0] Total FTA: 0.4882 +[2025-07-06 07:03:38] [Rank 0] Total FTA: 0.4882 +[2025-07-06 07:03:38] [Rank 0] Group 0 Loss: 5.4211 +[2025-07-06 07:03:38] [Rank 0] Group 0 Loss: 5.4211 +[2025-07-06 07:03:38] [Rank 0] Group 1 Loss: 4.8904 +[2025-07-06 07:03:38] [Rank 0] Group 1 Loss: 4.8904 +[2025-07-06 07:03:38] [Rank 0] Group 2 Loss: 4.8330 +[2025-07-06 07:03:38] [Rank 0] Group 2 Loss: 4.8330 +[2025-07-06 07:03:38] [Rank 0] Group 3 Loss: 5.2268 +[2025-07-06 07:03:38] [Rank 0] Group 3 Loss: 5.2268 +[2025-07-06 07:03:38] [Rank 0] Group 4 Loss: 4.9691 +[2025-07-06 07:03:38] [Rank 0] Group 4 Loss: 4.9691 +[2025-07-06 07:03:38] [Rank 0] Group 5 Loss: 4.9867 +[2025-07-06 07:03:38] [Rank 0] Group 5 Loss: 4.9867 +[2025-07-06 07:03:38] [Rank 0] Group 6 Loss: 4.9823 +[2025-07-06 07:03:38] [Rank 0] Group 6 Loss: 4.9823 +[2025-07-06 07:03:38] [Rank 0] Group 7 Loss: 5.0920 +[2025-07-06 07:03:38] [Rank 0] Group 7 Loss: 5.0920 +[2025-07-06 07:03:38] [Rank 0] Group 8 Loss: 5.0246 +[2025-07-06 07:03:38] [Rank 0] Group 8 Loss: 5.0246 +[2025-07-06 07:03:38] [Rank 0] Group 9 Loss: 5.0200 +[2025-07-06 07:03:38] [Rank 0] Group 9 Loss: 5.0200 +[2025-07-06 07:03:38] [Rank 0] Group 10 Loss: 5.0479 +[2025-07-06 07:03:38] [Rank 0] Group 10 Loss: 5.0479 +[2025-07-06 07:03:38] [Rank 0] Group 11 Loss: 5.0690 +[2025-07-06 07:03:38] [Rank 0] Group 11 Loss: 5.0690 +[2025-07-06 07:03:38] [Rank 0] Group 0 FTA: 0.5059 +[2025-07-06 07:03:38] [Rank 0] Group 0 FTA: 0.5059 +[2025-07-06 07:03:38] [Rank 0] Group 1 FTA: 0.1823 +[2025-07-06 07:03:38] [Rank 0] Group 1 FTA: 0.1823 +[2025-07-06 07:03:38] [Rank 0] Group 2 FTA: 0.5938 +[2025-07-06 07:03:38] [Rank 0] Group 2 FTA: 0.5938 +[2025-07-06 07:03:38] [Rank 0] Group 3 FTA: 0.4141 +[2025-07-06 07:03:38] [Rank 0] Group 3 FTA: 0.4141 +[2025-07-06 07:03:38] [Rank 0] Group 4 FTA: 0.4896 +[2025-07-06 07:03:38] [Rank 0] Group 4 FTA: 0.4896 +[2025-07-06 07:03:38] [Rank 0] Group 5 FTA: 0.5911 +[2025-07-06 07:03:38] [Rank 0] Group 5 FTA: 0.5911 +[2025-07-06 07:03:38] [Rank 0] Group 6 FTA: 0.4974 +[2025-07-06 07:03:38] [Rank 0] Group 6 FTA: 0.4974 +[2025-07-06 07:03:38] [Rank 0] Group 7 FTA: 0.4688 +[2025-07-06 07:03:38] [Rank 0] Group 7 FTA: 0.4688 +[2025-07-06 07:03:38] [Rank 0] Group 8 FTA: 0.4661 +[2025-07-06 07:03:38] [Rank 0] Group 8 FTA: 0.4661 +[2025-07-06 07:03:38] [Rank 0] Group 9 FTA: 0.5156 +[2025-07-06 07:03:38] [Rank 0] Group 9 FTA: 0.5156 +[2025-07-06 07:03:38] [Rank 0] Group 10 FTA: 0.5293 +[2025-07-06 07:03:38] [Rank 0] Group 10 FTA: 0.5293 +[2025-07-06 07:03:38] [Rank 0] Group 11 FTA: 0.5234 +[2025-07-06 07:03:38] [Rank 0] Group 11 FTA: 0.5234 +[2025-07-06 07:03:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 07:03:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 07:03:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 07:03:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 07:03:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 07:03:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 07:03:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 07:03:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 07:03:39] [Rank 0] step:2001/10000 train_time:162399ms step_avg:81.16ms +[2025-07-06 07:03:39] [Rank 0] step:2001/10000 train_time:162399ms step_avg:81.16ms +[2025-07-06 07:03:41] [Rank 0] step:2021/10000 train_time:163869ms step_avg:81.08ms +[2025-07-06 07:03:41] [Rank 0] step:2021/10000 train_time:163869ms step_avg:81.08ms +[2025-07-06 07:03:42] [Rank 0] step:2041/10000 train_time:165335ms step_avg:81.01ms +[2025-07-06 07:03:42] [Rank 0] step:2041/10000 train_time:165335ms step_avg:81.01ms +[2025-07-06 07:03:44] [Rank 0] step:2061/10000 train_time:166807ms step_avg:80.94ms +[2025-07-06 07:03:44] [Rank 0] step:2061/10000 train_time:166807ms step_avg:80.94ms +[2025-07-06 07:03:46] [Rank 0] step:2081/10000 train_time:168923ms step_avg:81.17ms +[2025-07-06 07:03:46] [Rank 0] step:2081/10000 train_time:168923ms step_avg:81.17ms +[2025-07-06 07:03:47] [Rank 0] step:2101/10000 train_time:170397ms step_avg:81.10ms +[2025-07-06 07:03:47] [Rank 0] step:2101/10000 train_time:170397ms step_avg:81.10ms +[2025-07-06 07:03:49] [Rank 0] step:2121/10000 train_time:171869ms step_avg:81.03ms +[2025-07-06 07:03:49] [Rank 0] step:2121/10000 train_time:171869ms step_avg:81.03ms +[2025-07-06 07:03:50] [Rank 0] step:2141/10000 train_time:173343ms step_avg:80.96ms +[2025-07-06 07:03:50] [Rank 0] step:2141/10000 train_time:173343ms step_avg:80.96ms +[2025-07-06 07:03:52] [Rank 0] step:2161/10000 train_time:175474ms step_avg:81.20ms +[2025-07-06 07:03:52] [Rank 0] step:2161/10000 train_time:175474ms step_avg:81.20ms +[2025-07-06 07:03:54] [Rank 0] step:2181/10000 train_time:177033ms step_avg:81.17ms +[2025-07-06 07:03:54] [Rank 0] step:2181/10000 train_time:177033ms step_avg:81.17ms +[2025-07-06 07:03:55] [Rank 0] step:2201/10000 train_time:178506ms step_avg:81.10ms +[2025-07-06 07:03:55] [Rank 0] step:2201/10000 train_time:178506ms step_avg:81.10ms +[2025-07-06 07:03:57] [Rank 0] step:2221/10000 train_time:179980ms step_avg:81.04ms +[2025-07-06 07:03:57] [Rank 0] step:2221/10000 train_time:179980ms step_avg:81.04ms +[2025-07-06 07:03:58] [Rank 0] step:2241/10000 train_time:181477ms step_avg:80.98ms +[2025-07-06 07:03:58] [Rank 0] step:2241/10000 train_time:181477ms step_avg:80.98ms +[2025-07-06 07:04:00] [Rank 0] step:2261/10000 train_time:183627ms step_avg:81.21ms +[2025-07-06 07:04:00] [Rank 0] step:2261/10000 train_time:183627ms step_avg:81.21ms +[2025-07-06 07:04:02] [Rank 0] step:2281/10000 train_time:185128ms step_avg:81.16ms +[2025-07-06 07:04:02] [Rank 0] step:2281/10000 train_time:185128ms step_avg:81.16ms +[2025-07-06 07:04:03] [Rank 0] step:2301/10000 train_time:186629ms step_avg:81.11ms +[2025-07-06 07:04:03] [Rank 0] step:2301/10000 train_time:186629ms step_avg:81.11ms +[2025-07-06 07:04:05] [Rank 0] step:2321/10000 train_time:188132ms step_avg:81.06ms +[2025-07-06 07:04:05] [Rank 0] step:2321/10000 train_time:188132ms step_avg:81.06ms +[2025-07-06 07:04:07] [Rank 0] step:2341/10000 train_time:189632ms step_avg:81.00ms +[2025-07-06 07:04:07] [Rank 0] step:2341/10000 train_time:189632ms step_avg:81.00ms +[2025-07-06 07:04:08] [Rank 0] step:2361/10000 train_time:191370ms step_avg:81.05ms +[2025-07-06 07:04:08] [Rank 0] step:2361/10000 train_time:191370ms step_avg:81.05ms +[2025-07-06 07:04:10] [Rank 0] step:2381/10000 train_time:192870ms step_avg:81.00ms +[2025-07-06 07:04:10] [Rank 0] step:2381/10000 train_time:192870ms step_avg:81.00ms +[2025-07-06 07:04:11] [Rank 0] step:2401/10000 train_time:194372ms step_avg:80.95ms +[2025-07-06 07:04:11] [Rank 0] step:2401/10000 train_time:194372ms step_avg:80.95ms +[2025-07-06 07:04:13] [Rank 0] step:2421/10000 train_time:195874ms step_avg:80.91ms +[2025-07-06 07:04:13] [Rank 0] step:2421/10000 train_time:195874ms step_avg:80.91ms +[2025-07-06 07:04:14] [Rank 0] step:2441/10000 train_time:197610ms step_avg:80.95ms +[2025-07-06 07:04:14] [Rank 0] step:2441/10000 train_time:197610ms step_avg:80.95ms +[2025-07-06 07:04:16] [Rank 0] step:2461/10000 train_time:199114ms step_avg:80.91ms +[2025-07-06 07:04:16] [Rank 0] step:2461/10000 train_time:199114ms step_avg:80.91ms +[2025-07-06 07:04:17] [Rank 0] step:2481/10000 train_time:200619ms step_avg:80.86ms +[2025-07-06 07:04:17] [Rank 0] step:2481/10000 train_time:200619ms step_avg:80.86ms +[2025-07-06 07:04:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 07:04:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 07:04:20] [Rank 0] PRINT: step:2500/10000 train_loss:0.9935 val_loss:0.9325 train_time:202128ms step_avg:80.85ms +[2025-07-06 07:04:20] [Rank 0] PRINT: step:2500/10000 train_loss:0.9935 val_loss:0.9325 train_time:202128ms step_avg:80.85ms +[2025-07-06 07:04:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 07:04:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 07:04:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 07:04:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 07:04:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 07:04:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 07:09:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 07:09:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 07:09:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 07:09:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 07:09:45] [Rank 0] Total Loss: 5.1449 +[2025-07-06 07:09:45] [Rank 0] Total Loss: 5.1449 +[2025-07-06 07:09:45] [Rank 0] Total FTA: 0.7866 +[2025-07-06 07:09:45] [Rank 0] Total FTA: 0.7866 +[2025-07-06 07:09:45] [Rank 0] Group 0 Loss: 5.3150 +[2025-07-06 07:09:45] [Rank 0] Group 0 Loss: 5.3150 +[2025-07-06 07:09:45] [Rank 0] Group 1 Loss: 4.8922 +[2025-07-06 07:09:45] [Rank 0] Group 1 Loss: 4.8922 +[2025-07-06 07:09:45] [Rank 0] Group 2 Loss: 4.9504 +[2025-07-06 07:09:45] [Rank 0] Group 2 Loss: 4.9504 +[2025-07-06 07:09:45] [Rank 0] Group 3 Loss: 5.2117 +[2025-07-06 07:09:45] [Rank 0] Group 3 Loss: 5.2117 +[2025-07-06 07:09:45] [Rank 0] Group 4 Loss: 5.1729 +[2025-07-06 07:09:45] [Rank 0] Group 4 Loss: 5.1729 +[2025-07-06 07:09:45] [Rank 0] Group 5 Loss: 5.1499 +[2025-07-06 07:09:45] [Rank 0] Group 5 Loss: 5.1499 +[2025-07-06 07:09:45] [Rank 0] Group 6 Loss: 5.0378 +[2025-07-06 07:09:45] [Rank 0] Group 6 Loss: 5.0378 +[2025-07-06 07:09:45] [Rank 0] Group 7 Loss: 5.1413 +[2025-07-06 07:09:45] [Rank 0] Group 7 Loss: 5.1413 +[2025-07-06 07:09:45] [Rank 0] Group 8 Loss: 5.1783 +[2025-07-06 07:09:45] [Rank 0] Group 8 Loss: 5.1783 +[2025-07-06 07:09:45] [Rank 0] Group 9 Loss: 5.1570 +[2025-07-06 07:09:45] [Rank 0] Group 9 Loss: 5.1570 +[2025-07-06 07:09:45] [Rank 0] Group 10 Loss: 5.1654 +[2025-07-06 07:09:45] [Rank 0] Group 10 Loss: 5.1654 +[2025-07-06 07:09:45] [Rank 0] Group 11 Loss: 5.1631 +[2025-07-06 07:09:45] [Rank 0] Group 11 Loss: 5.1631 +[2025-07-06 07:09:45] [Rank 0] Group 0 FTA: 0.8440 +[2025-07-06 07:09:45] [Rank 0] Group 0 FTA: 0.8440 +[2025-07-06 07:09:45] [Rank 0] Group 1 FTA: 0.8333 +[2025-07-06 07:09:45] [Rank 0] Group 1 FTA: 0.8333 +[2025-07-06 07:09:45] [Rank 0] Group 2 FTA: 0.5964 +[2025-07-06 07:09:45] [Rank 0] Group 2 FTA: 0.5964 +[2025-07-06 07:09:45] [Rank 0] Group 3 FTA: 0.7760 +[2025-07-06 07:09:45] [Rank 0] Group 3 FTA: 0.7760 +[2025-07-06 07:09:45] [Rank 0] Group 4 FTA: 0.7995 +[2025-07-06 07:09:45] [Rank 0] Group 4 FTA: 0.7995 +[2025-07-06 07:09:45] [Rank 0] Group 5 FTA: 0.7812 +[2025-07-06 07:09:45] [Rank 0] Group 5 FTA: 0.7812 +[2025-07-06 07:09:45] [Rank 0] Group 6 FTA: 0.7891 +[2025-07-06 07:09:45] [Rank 0] Group 6 FTA: 0.7891 +[2025-07-06 07:09:45] [Rank 0] Group 7 FTA: 0.7839 +[2025-07-06 07:09:45] [Rank 0] Group 7 FTA: 0.7839 +[2025-07-06 07:09:45] [Rank 0] Group 8 FTA: 0.7526 +[2025-07-06 07:09:45] [Rank 0] Group 8 FTA: 0.7526 +[2025-07-06 07:09:45] [Rank 0] Group 9 FTA: 0.8320 +[2025-07-06 07:09:45] [Rank 0] Group 9 FTA: 0.8320 +[2025-07-06 07:09:45] [Rank 0] Group 10 FTA: 0.7949 +[2025-07-06 07:09:45] [Rank 0] Group 10 FTA: 0.7949 +[2025-07-06 07:09:45] [Rank 0] Group 11 FTA: 0.7959 +[2025-07-06 07:09:45] [Rank 0] Group 11 FTA: 0.7959 +[2025-07-06 07:09:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 07:09:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 07:09:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 07:09:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 07:09:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 07:09:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 07:09:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 07:09:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 07:09:47] [Rank 0] step:2501/10000 train_time:202148ms step_avg:80.83ms +[2025-07-06 07:09:47] [Rank 0] step:2501/10000 train_time:202148ms step_avg:80.83ms +[2025-07-06 07:09:48] [Rank 0] step:2521/10000 train_time:203708ms step_avg:80.80ms +[2025-07-06 07:09:48] [Rank 0] step:2521/10000 train_time:203708ms step_avg:80.80ms +[2025-07-06 07:09:50] [Rank 0] step:2541/10000 train_time:205388ms step_avg:80.83ms +[2025-07-06 07:09:50] [Rank 0] step:2541/10000 train_time:205388ms step_avg:80.83ms +[2025-07-06 07:09:51] [Rank 0] step:2561/10000 train_time:206883ms step_avg:80.78ms +[2025-07-06 07:09:51] [Rank 0] step:2561/10000 train_time:206883ms step_avg:80.78ms +[2025-07-06 07:09:53] [Rank 0] step:2581/10000 train_time:208379ms step_avg:80.74ms +[2025-07-06 07:09:53] [Rank 0] step:2581/10000 train_time:208379ms step_avg:80.74ms +[2025-07-06 07:09:54] [Rank 0] step:2601/10000 train_time:209874ms step_avg:80.69ms +[2025-07-06 07:09:54] [Rank 0] step:2601/10000 train_time:209874ms step_avg:80.69ms +[2025-07-06 07:09:57] [Rank 0] step:2621/10000 train_time:212077ms step_avg:80.91ms +[2025-07-06 07:09:57] [Rank 0] step:2621/10000 train_time:212077ms step_avg:80.91ms +[2025-07-06 07:09:58] [Rank 0] step:2641/10000 train_time:213573ms step_avg:80.87ms +[2025-07-06 07:09:58] [Rank 0] step:2641/10000 train_time:213573ms step_avg:80.87ms +[2025-07-06 07:10:00] [Rank 0] step:2661/10000 train_time:215073ms step_avg:80.82ms +[2025-07-06 07:10:00] [Rank 0] step:2661/10000 train_time:215073ms step_avg:80.82ms +[2025-07-06 07:10:01] [Rank 0] step:2681/10000 train_time:216572ms step_avg:80.78ms +[2025-07-06 07:10:01] [Rank 0] step:2681/10000 train_time:216572ms step_avg:80.78ms +[2025-07-06 07:10:03] [Rank 0] step:2701/10000 train_time:218125ms step_avg:80.76ms +[2025-07-06 07:10:03] [Rank 0] step:2701/10000 train_time:218125ms step_avg:80.76ms +[2025-07-06 07:10:05] [Rank 0] step:2721/10000 train_time:220225ms step_avg:80.94ms +[2025-07-06 07:10:05] [Rank 0] step:2721/10000 train_time:220225ms step_avg:80.94ms +[2025-07-06 07:10:06] [Rank 0] step:2741/10000 train_time:221724ms step_avg:80.89ms +[2025-07-06 07:10:06] [Rank 0] step:2741/10000 train_time:221724ms step_avg:80.89ms +[2025-07-06 07:10:08] [Rank 0] step:2761/10000 train_time:223224ms step_avg:80.85ms +[2025-07-06 07:10:08] [Rank 0] step:2761/10000 train_time:223224ms step_avg:80.85ms +[2025-07-06 07:10:09] [Rank 0] step:2781/10000 train_time:224725ms step_avg:80.81ms +[2025-07-06 07:10:09] [Rank 0] step:2781/10000 train_time:224725ms step_avg:80.81ms +[2025-07-06 07:10:11] [Rank 0] step:2801/10000 train_time:226864ms step_avg:80.99ms +[2025-07-06 07:10:11] [Rank 0] step:2801/10000 train_time:226864ms step_avg:80.99ms +[2025-07-06 07:10:13] [Rank 0] step:2821/10000 train_time:228464ms step_avg:80.99ms +[2025-07-06 07:10:13] [Rank 0] step:2821/10000 train_time:228464ms step_avg:80.99ms +[2025-07-06 07:10:14] [Rank 0] step:2841/10000 train_time:229965ms step_avg:80.95ms +[2025-07-06 07:10:14] [Rank 0] step:2841/10000 train_time:229965ms step_avg:80.95ms +[2025-07-06 07:10:16] [Rank 0] step:2861/10000 train_time:231466ms step_avg:80.90ms +[2025-07-06 07:10:16] [Rank 0] step:2861/10000 train_time:231466ms step_avg:80.90ms +[2025-07-06 07:10:18] [Rank 0] step:2881/10000 train_time:233220ms step_avg:80.95ms +[2025-07-06 07:10:18] [Rank 0] step:2881/10000 train_time:233220ms step_avg:80.95ms +[2025-07-06 07:10:20] [Rank 0] step:2901/10000 train_time:235107ms step_avg:81.04ms +[2025-07-06 07:10:20] [Rank 0] step:2901/10000 train_time:235107ms step_avg:81.04ms +[2025-07-06 07:10:21] [Rank 0] step:2921/10000 train_time:236611ms step_avg:81.00ms +[2025-07-06 07:10:21] [Rank 0] step:2921/10000 train_time:236611ms step_avg:81.00ms +[2025-07-06 07:10:23] [Rank 0] step:2941/10000 train_time:238117ms step_avg:80.96ms +[2025-07-06 07:10:23] [Rank 0] step:2941/10000 train_time:238117ms step_avg:80.96ms +[2025-07-06 07:10:24] [Rank 0] step:2961/10000 train_time:239620ms step_avg:80.93ms +[2025-07-06 07:10:24] [Rank 0] step:2961/10000 train_time:239620ms step_avg:80.93ms +[2025-07-06 07:10:26] [Rank 0] step:2981/10000 train_time:241360ms step_avg:80.97ms +[2025-07-06 07:10:26] [Rank 0] step:2981/10000 train_time:241360ms step_avg:80.97ms +[2025-07-06 07:10:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 07:10:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 07:10:28] [Rank 0] PRINT: step:3000/10000 train_loss:0.9140 val_loss:0.8905 train_time:242866ms step_avg:80.96ms +[2025-07-06 07:10:28] [Rank 0] PRINT: step:3000/10000 train_loss:0.9140 val_loss:0.8905 train_time:242866ms step_avg:80.96ms +[2025-07-06 07:10:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 07:10:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 07:10:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 07:10:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 07:10:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 07:10:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 07:15:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 07:15:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 07:15:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 07:15:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 07:15:52] [Rank 0] Total Loss: 5.2968 +[2025-07-06 07:15:52] [Rank 0] Total Loss: 5.2968 +[2025-07-06 07:15:52] [Rank 0] Total FTA: 0.8376 +[2025-07-06 07:15:52] [Rank 0] Total FTA: 0.8376 +[2025-07-06 07:15:52] [Rank 0] Group 0 Loss: 5.5903 +[2025-07-06 07:15:52] [Rank 0] Group 0 Loss: 5.5903 +[2025-07-06 07:15:52] [Rank 0] Group 1 Loss: 5.0239 +[2025-07-06 07:15:52] [Rank 0] Group 1 Loss: 5.0239 +[2025-07-06 07:15:52] [Rank 0] Group 2 Loss: 5.0242 +[2025-07-06 07:15:52] [Rank 0] Group 2 Loss: 5.0242 +[2025-07-06 07:15:52] [Rank 0] Group 3 Loss: 5.5453 +[2025-07-06 07:15:52] [Rank 0] Group 3 Loss: 5.5453 +[2025-07-06 07:15:52] [Rank 0] Group 4 Loss: 5.3784 +[2025-07-06 07:15:52] [Rank 0] Group 4 Loss: 5.3784 +[2025-07-06 07:15:52] [Rank 0] Group 5 Loss: 5.2606 +[2025-07-06 07:15:52] [Rank 0] Group 5 Loss: 5.2606 +[2025-07-06 07:15:52] [Rank 0] Group 6 Loss: 5.1534 +[2025-07-06 07:15:52] [Rank 0] Group 6 Loss: 5.1534 +[2025-07-06 07:15:52] [Rank 0] Group 7 Loss: 5.2838 +[2025-07-06 07:15:52] [Rank 0] Group 7 Loss: 5.2838 +[2025-07-06 07:15:52] [Rank 0] Group 8 Loss: 5.2562 +[2025-07-06 07:15:52] [Rank 0] Group 8 Loss: 5.2562 +[2025-07-06 07:15:52] [Rank 0] Group 9 Loss: 5.2580 +[2025-07-06 07:15:52] [Rank 0] Group 9 Loss: 5.2580 +[2025-07-06 07:15:52] [Rank 0] Group 10 Loss: 5.2618 +[2025-07-06 07:15:52] [Rank 0] Group 10 Loss: 5.2618 +[2025-07-06 07:15:52] [Rank 0] Group 11 Loss: 5.2719 +[2025-07-06 07:15:52] [Rank 0] Group 11 Loss: 5.2719 +[2025-07-06 07:15:52] [Rank 0] Group 0 FTA: 0.6814 +[2025-07-06 07:15:52] [Rank 0] Group 0 FTA: 0.6814 +[2025-07-06 07:15:52] [Rank 0] Group 1 FTA: 0.6875 +[2025-07-06 07:15:52] [Rank 0] Group 1 FTA: 0.6875 +[2025-07-06 07:15:52] [Rank 0] Group 2 FTA: 0.8255 +[2025-07-06 07:15:52] [Rank 0] Group 2 FTA: 0.8255 +[2025-07-06 07:15:52] [Rank 0] Group 3 FTA: 0.8672 +[2025-07-06 07:15:52] [Rank 0] Group 3 FTA: 0.8672 +[2025-07-06 07:15:52] [Rank 0] Group 4 FTA: 0.8776 +[2025-07-06 07:15:52] [Rank 0] Group 4 FTA: 0.8776 +[2025-07-06 07:15:52] [Rank 0] Group 5 FTA: 0.8776 +[2025-07-06 07:15:52] [Rank 0] Group 5 FTA: 0.8776 +[2025-07-06 07:15:52] [Rank 0] Group 6 FTA: 0.9089 +[2025-07-06 07:15:52] [Rank 0] Group 6 FTA: 0.9089 +[2025-07-06 07:15:52] [Rank 0] Group 7 FTA: 0.8906 +[2025-07-06 07:15:52] [Rank 0] Group 7 FTA: 0.8906 +[2025-07-06 07:15:52] [Rank 0] Group 8 FTA: 0.8672 +[2025-07-06 07:15:52] [Rank 0] Group 8 FTA: 0.8672 +[2025-07-06 07:15:52] [Rank 0] Group 9 FTA: 0.8555 +[2025-07-06 07:15:52] [Rank 0] Group 9 FTA: 0.8555 +[2025-07-06 07:15:52] [Rank 0] Group 10 FTA: 0.8906 +[2025-07-06 07:15:52] [Rank 0] Group 10 FTA: 0.8906 +[2025-07-06 07:15:52] [Rank 0] Group 11 FTA: 0.8857 +[2025-07-06 07:15:52] [Rank 0] Group 11 FTA: 0.8857 +[2025-07-06 07:15:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 07:15:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 07:15:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 07:15:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 07:15:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 07:15:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 07:15:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 07:15:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 07:15:54] [Rank 0] step:3001/10000 train_time:242888ms step_avg:80.94ms +[2025-07-06 07:15:54] [Rank 0] step:3001/10000 train_time:242888ms step_avg:80.94ms +[2025-07-06 07:15:55] [Rank 0] step:3021/10000 train_time:244385ms step_avg:80.90ms +[2025-07-06 07:15:55] [Rank 0] step:3021/10000 train_time:244385ms step_avg:80.90ms +[2025-07-06 07:15:57] [Rank 0] step:3041/10000 train_time:245877ms step_avg:80.85ms +[2025-07-06 07:15:57] [Rank 0] step:3041/10000 train_time:245877ms step_avg:80.85ms +[2025-07-06 07:15:59] [Rank 0] step:3061/10000 train_time:247628ms step_avg:80.90ms +[2025-07-06 07:15:59] [Rank 0] step:3061/10000 train_time:247628ms step_avg:80.90ms +[2025-07-06 07:16:00] [Rank 0] step:3081/10000 train_time:249527ms step_avg:80.99ms +[2025-07-06 07:16:00] [Rank 0] step:3081/10000 train_time:249527ms step_avg:80.99ms +[2025-07-06 07:16:02] [Rank 0] step:3101/10000 train_time:251023ms step_avg:80.95ms +[2025-07-06 07:16:02] [Rank 0] step:3101/10000 train_time:251023ms step_avg:80.95ms +[2025-07-06 07:16:03] [Rank 0] step:3121/10000 train_time:252519ms step_avg:80.91ms +[2025-07-06 07:16:03] [Rank 0] step:3121/10000 train_time:252519ms step_avg:80.91ms +[2025-07-06 07:16:05] [Rank 0] step:3141/10000 train_time:254017ms step_avg:80.87ms +[2025-07-06 07:16:05] [Rank 0] step:3141/10000 train_time:254017ms step_avg:80.87ms +[2025-07-06 07:16:07] [Rank 0] step:3161/10000 train_time:256169ms step_avg:81.04ms +[2025-07-06 07:16:07] [Rank 0] step:3161/10000 train_time:256169ms step_avg:81.04ms +[2025-07-06 07:16:08] [Rank 0] step:3181/10000 train_time:257666ms step_avg:81.00ms +[2025-07-06 07:16:08] [Rank 0] step:3181/10000 train_time:257666ms step_avg:81.00ms +[2025-07-06 07:16:10] [Rank 0] step:3201/10000 train_time:259165ms step_avg:80.96ms +[2025-07-06 07:16:10] [Rank 0] step:3201/10000 train_time:259165ms step_avg:80.96ms +[2025-07-06 07:16:11] [Rank 0] step:3221/10000 train_time:260663ms step_avg:80.93ms +[2025-07-06 07:16:11] [Rank 0] step:3221/10000 train_time:260663ms step_avg:80.93ms +[2025-07-06 07:16:13] [Rank 0] step:3241/10000 train_time:262836ms step_avg:81.10ms +[2025-07-06 07:16:13] [Rank 0] step:3241/10000 train_time:262836ms step_avg:81.10ms +[2025-07-06 07:16:15] [Rank 0] step:3261/10000 train_time:264403ms step_avg:81.08ms +[2025-07-06 07:16:15] [Rank 0] step:3261/10000 train_time:264403ms step_avg:81.08ms +[2025-07-06 07:16:17] [Rank 0] step:3281/10000 train_time:266089ms step_avg:81.10ms +[2025-07-06 07:16:17] [Rank 0] step:3281/10000 train_time:266089ms step_avg:81.10ms +[2025-07-06 07:16:18] [Rank 0] step:3301/10000 train_time:267590ms step_avg:81.06ms +[2025-07-06 07:16:18] [Rank 0] step:3301/10000 train_time:267590ms step_avg:81.06ms +[2025-07-06 07:16:20] [Rank 0] step:3321/10000 train_time:269089ms step_avg:81.03ms +[2025-07-06 07:16:20] [Rank 0] step:3321/10000 train_time:269089ms step_avg:81.03ms +[2025-07-06 07:16:22] [Rank 0] step:3341/10000 train_time:271242ms step_avg:81.19ms +[2025-07-06 07:16:22] [Rank 0] step:3341/10000 train_time:271242ms step_avg:81.19ms +[2025-07-06 07:16:24] [Rank 0] step:3361/10000 train_time:272841ms step_avg:81.18ms +[2025-07-06 07:16:24] [Rank 0] step:3361/10000 train_time:272841ms step_avg:81.18ms +[2025-07-06 07:16:25] [Rank 0] step:3381/10000 train_time:274343ms step_avg:81.14ms +[2025-07-06 07:16:25] [Rank 0] step:3381/10000 train_time:274343ms step_avg:81.14ms +[2025-07-06 07:16:27] [Rank 0] step:3401/10000 train_time:275845ms step_avg:81.11ms +[2025-07-06 07:16:27] [Rank 0] step:3401/10000 train_time:275845ms step_avg:81.11ms +[2025-07-06 07:16:28] [Rank 0] step:3421/10000 train_time:277499ms step_avg:81.12ms +[2025-07-06 07:16:28] [Rank 0] step:3421/10000 train_time:277499ms step_avg:81.12ms +[2025-07-06 07:16:30] [Rank 0] step:3441/10000 train_time:278983ms step_avg:81.08ms +[2025-07-06 07:16:30] [Rank 0] step:3441/10000 train_time:278983ms step_avg:81.08ms +[2025-07-06 07:16:31] [Rank 0] step:3461/10000 train_time:280487ms step_avg:81.04ms +[2025-07-06 07:16:31] [Rank 0] step:3461/10000 train_time:280487ms step_avg:81.04ms +[2025-07-06 07:16:33] [Rank 0] step:3481/10000 train_time:281991ms step_avg:81.01ms +[2025-07-06 07:16:33] [Rank 0] step:3481/10000 train_time:281991ms step_avg:81.01ms +[2025-07-06 07:16:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 07:16:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 07:16:35] [Rank 0] PRINT: step:3500/10000 train_loss:0.8887 val_loss:0.8799 train_time:283497ms step_avg:81.00ms +[2025-07-06 07:16:35] [Rank 0] PRINT: step:3500/10000 train_loss:0.8887 val_loss:0.8799 train_time:283497ms step_avg:81.00ms +[2025-07-06 07:16:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 07:16:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 07:16:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 07:16:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 07:16:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 07:16:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 07:21:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 07:21:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 07:21:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 07:21:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 07:21:59] [Rank 0] Total Loss: 5.2500 +[2025-07-06 07:21:59] [Rank 0] Total Loss: 5.2500 +[2025-07-06 07:21:59] [Rank 0] Total FTA: 0.9018 +[2025-07-06 07:21:59] [Rank 0] Total FTA: 0.9018 +[2025-07-06 07:21:59] [Rank 0] Group 0 Loss: 5.5145 +[2025-07-06 07:21:59] [Rank 0] Group 0 Loss: 5.5145 +[2025-07-06 07:21:59] [Rank 0] Group 1 Loss: 5.0570 +[2025-07-06 07:21:59] [Rank 0] Group 1 Loss: 5.0570 +[2025-07-06 07:21:59] [Rank 0] Group 2 Loss: 5.0277 +[2025-07-06 07:21:59] [Rank 0] Group 2 Loss: 5.0277 +[2025-07-06 07:21:59] [Rank 0] Group 3 Loss: 5.3077 +[2025-07-06 07:21:59] [Rank 0] Group 3 Loss: 5.3077 +[2025-07-06 07:21:59] [Rank 0] Group 4 Loss: 5.3431 +[2025-07-06 07:21:59] [Rank 0] Group 4 Loss: 5.3431 +[2025-07-06 07:21:59] [Rank 0] Group 5 Loss: 5.2344 +[2025-07-06 07:21:59] [Rank 0] Group 5 Loss: 5.2344 +[2025-07-06 07:21:59] [Rank 0] Group 6 Loss: 5.1745 +[2025-07-06 07:21:59] [Rank 0] Group 6 Loss: 5.1745 +[2025-07-06 07:21:59] [Rank 0] Group 7 Loss: 5.2555 +[2025-07-06 07:21:59] [Rank 0] Group 7 Loss: 5.2555 +[2025-07-06 07:21:59] [Rank 0] Group 8 Loss: 5.2410 +[2025-07-06 07:21:59] [Rank 0] Group 8 Loss: 5.2410 +[2025-07-06 07:21:59] [Rank 0] Group 9 Loss: 5.1843 +[2025-07-06 07:21:59] [Rank 0] Group 9 Loss: 5.1843 +[2025-07-06 07:21:59] [Rank 0] Group 10 Loss: 5.2084 +[2025-07-06 07:21:59] [Rank 0] Group 10 Loss: 5.2084 +[2025-07-06 07:21:59] [Rank 0] Group 11 Loss: 5.2231 +[2025-07-06 07:21:59] [Rank 0] Group 11 Loss: 5.2231 +[2025-07-06 07:21:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 07:21:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 07:21:59] [Rank 0] Group 1 FTA: 0.8307 +[2025-07-06 07:21:59] [Rank 0] Group 1 FTA: 0.8307 +[2025-07-06 07:21:59] [Rank 0] Group 2 FTA: 0.9323 +[2025-07-06 07:21:59] [Rank 0] Group 2 FTA: 0.9323 +[2025-07-06 07:21:59] [Rank 0] Group 3 FTA: 0.8281 +[2025-07-06 07:21:59] [Rank 0] Group 3 FTA: 0.8281 +[2025-07-06 07:21:59] [Rank 0] Group 4 FTA: 0.9141 +[2025-07-06 07:21:59] [Rank 0] Group 4 FTA: 0.9141 +[2025-07-06 07:21:59] [Rank 0] Group 5 FTA: 0.8307 +[2025-07-06 07:21:59] [Rank 0] Group 5 FTA: 0.8307 +[2025-07-06 07:21:59] [Rank 0] Group 6 FTA: 0.8620 +[2025-07-06 07:21:59] [Rank 0] Group 6 FTA: 0.8620 +[2025-07-06 07:21:59] [Rank 0] Group 7 FTA: 0.9349 +[2025-07-06 07:21:59] [Rank 0] Group 7 FTA: 0.9349 +[2025-07-06 07:21:59] [Rank 0] Group 8 FTA: 0.8750 +[2025-07-06 07:21:59] [Rank 0] Group 8 FTA: 0.8750 +[2025-07-06 07:21:59] [Rank 0] Group 9 FTA: 0.9102 +[2025-07-06 07:21:59] [Rank 0] Group 9 FTA: 0.9102 +[2025-07-06 07:21:59] [Rank 0] Group 10 FTA: 0.8848 +[2025-07-06 07:21:59] [Rank 0] Group 10 FTA: 0.8848 +[2025-07-06 07:21:59] [Rank 0] Group 11 FTA: 0.9121 +[2025-07-06 07:21:59] [Rank 0] Group 11 FTA: 0.9121 +[2025-07-06 07:21:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 07:21:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 07:22:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 07:22:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 07:22:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 07:22:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 07:22:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 07:22:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 07:22:00] [Rank 0] step:3501/10000 train_time:283520ms step_avg:80.98ms +[2025-07-06 07:22:00] [Rank 0] step:3501/10000 train_time:283520ms step_avg:80.98ms +[2025-07-06 07:22:03] [Rank 0] step:3521/10000 train_time:285655ms step_avg:81.13ms +[2025-07-06 07:22:03] [Rank 0] step:3521/10000 train_time:285655ms step_avg:81.13ms +[2025-07-06 07:22:04] [Rank 0] step:3541/10000 train_time:287147ms step_avg:81.09ms +[2025-07-06 07:22:04] [Rank 0] step:3541/10000 train_time:287147ms step_avg:81.09ms +[2025-07-06 07:22:06] [Rank 0] step:3561/10000 train_time:288646ms step_avg:81.06ms +[2025-07-06 07:22:06] [Rank 0] step:3561/10000 train_time:288646ms step_avg:81.06ms +[2025-07-06 07:22:07] [Rank 0] step:3581/10000 train_time:290140ms step_avg:81.02ms +[2025-07-06 07:22:07] [Rank 0] step:3581/10000 train_time:290140ms step_avg:81.02ms +[2025-07-06 07:22:09] [Rank 0] step:3601/10000 train_time:291686ms step_avg:81.00ms +[2025-07-06 07:22:09] [Rank 0] step:3601/10000 train_time:291686ms step_avg:81.00ms +[2025-07-06 07:22:10] [Rank 0] step:3621/10000 train_time:293366ms step_avg:81.02ms +[2025-07-06 07:22:10] [Rank 0] step:3621/10000 train_time:293366ms step_avg:81.02ms +[2025-07-06 07:22:12] [Rank 0] step:3641/10000 train_time:294863ms step_avg:80.98ms +[2025-07-06 07:22:12] [Rank 0] step:3641/10000 train_time:294863ms step_avg:80.98ms +[2025-07-06 07:22:13] [Rank 0] step:3661/10000 train_time:296360ms step_avg:80.95ms +[2025-07-06 07:22:13] [Rank 0] step:3661/10000 train_time:296360ms step_avg:80.95ms +[2025-07-06 07:22:15] [Rank 0] step:3681/10000 train_time:297856ms step_avg:80.92ms +[2025-07-06 07:22:15] [Rank 0] step:3681/10000 train_time:297856ms step_avg:80.92ms +[2025-07-06 07:22:17] [Rank 0] step:3701/10000 train_time:299993ms step_avg:81.06ms +[2025-07-06 07:22:17] [Rank 0] step:3701/10000 train_time:299993ms step_avg:81.06ms +[2025-07-06 07:22:18] [Rank 0] step:3721/10000 train_time:301489ms step_avg:81.02ms +[2025-07-06 07:22:18] [Rank 0] step:3721/10000 train_time:301489ms step_avg:81.02ms +[2025-07-06 07:22:20] [Rank 0] step:3741/10000 train_time:302987ms step_avg:80.99ms +[2025-07-06 07:22:20] [Rank 0] step:3741/10000 train_time:302987ms step_avg:80.99ms +[2025-07-06 07:22:21] [Rank 0] step:3761/10000 train_time:304486ms step_avg:80.96ms +[2025-07-06 07:22:21] [Rank 0] step:3761/10000 train_time:304486ms step_avg:80.96ms +[2025-07-06 07:22:24] [Rank 0] step:3781/10000 train_time:306659ms step_avg:81.11ms +[2025-07-06 07:22:24] [Rank 0] step:3781/10000 train_time:306659ms step_avg:81.11ms +[2025-07-06 07:22:25] [Rank 0] step:3801/10000 train_time:308139ms step_avg:81.07ms +[2025-07-06 07:22:25] [Rank 0] step:3801/10000 train_time:308139ms step_avg:81.07ms +[2025-07-06 07:22:27] [Rank 0] step:3821/10000 train_time:309638ms step_avg:81.04ms +[2025-07-06 07:22:27] [Rank 0] step:3821/10000 train_time:309638ms step_avg:81.04ms +[2025-07-06 07:22:28] [Rank 0] step:3841/10000 train_time:311136ms step_avg:81.00ms +[2025-07-06 07:22:28] [Rank 0] step:3841/10000 train_time:311136ms step_avg:81.00ms +[2025-07-06 07:22:30] [Rank 0] step:3861/10000 train_time:312637ms step_avg:80.97ms +[2025-07-06 07:22:30] [Rank 0] step:3861/10000 train_time:312637ms step_avg:80.97ms +[2025-07-06 07:22:32] [Rank 0] step:3881/10000 train_time:314779ms step_avg:81.11ms +[2025-07-06 07:22:32] [Rank 0] step:3881/10000 train_time:314779ms step_avg:81.11ms +[2025-07-06 07:22:33] [Rank 0] step:3901/10000 train_time:316279ms step_avg:81.08ms +[2025-07-06 07:22:33] [Rank 0] step:3901/10000 train_time:316279ms step_avg:81.08ms +[2025-07-06 07:22:35] [Rank 0] step:3921/10000 train_time:318014ms step_avg:81.11ms +[2025-07-06 07:22:35] [Rank 0] step:3921/10000 train_time:318014ms step_avg:81.11ms +[2025-07-06 07:22:36] [Rank 0] step:3941/10000 train_time:319517ms step_avg:81.08ms +[2025-07-06 07:22:36] [Rank 0] step:3941/10000 train_time:319517ms step_avg:81.08ms +[2025-07-06 07:22:39] [Rank 0] step:3961/10000 train_time:321699ms step_avg:81.22ms +[2025-07-06 07:22:39] [Rank 0] step:3961/10000 train_time:321699ms step_avg:81.22ms +[2025-07-06 07:22:40] [Rank 0] step:3981/10000 train_time:323182ms step_avg:81.18ms +[2025-07-06 07:22:40] [Rank 0] step:3981/10000 train_time:323182ms step_avg:81.18ms +[2025-07-06 07:22:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 07:22:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 07:22:42] [Rank 0] PRINT: step:4000/10000 train_loss:0.8800 val_loss:0.8743 train_time:324685ms step_avg:81.17ms +[2025-07-06 07:22:42] [Rank 0] PRINT: step:4000/10000 train_loss:0.8800 val_loss:0.8743 train_time:324685ms step_avg:81.17ms +[2025-07-06 07:22:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 07:22:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 07:22:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 07:22:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 07:22:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 07:22:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 07:28:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 07:28:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 07:28:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 07:28:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 07:28:07] [Rank 0] Total Loss: 5.3720 +[2025-07-06 07:28:07] [Rank 0] Total Loss: 5.3720 +[2025-07-06 07:28:07] [Rank 0] Total FTA: 0.8782 +[2025-07-06 07:28:07] [Rank 0] Total FTA: 0.8782 +[2025-07-06 07:28:07] [Rank 0] Group 0 Loss: 5.6573 +[2025-07-06 07:28:07] [Rank 0] Group 0 Loss: 5.6573 +[2025-07-06 07:28:07] [Rank 0] Group 1 Loss: 5.1611 +[2025-07-06 07:28:07] [Rank 0] Group 1 Loss: 5.1611 +[2025-07-06 07:28:07] [Rank 0] Group 2 Loss: 5.1176 +[2025-07-06 07:28:07] [Rank 0] Group 2 Loss: 5.1176 +[2025-07-06 07:28:07] [Rank 0] Group 3 Loss: 5.5273 +[2025-07-06 07:28:07] [Rank 0] Group 3 Loss: 5.5273 +[2025-07-06 07:28:07] [Rank 0] Group 4 Loss: 5.4237 +[2025-07-06 07:28:07] [Rank 0] Group 4 Loss: 5.4237 +[2025-07-06 07:28:07] [Rank 0] Group 5 Loss: 5.3576 +[2025-07-06 07:28:07] [Rank 0] Group 5 Loss: 5.3576 +[2025-07-06 07:28:08] [Rank 0] Group 6 Loss: 5.2192 +[2025-07-06 07:28:08] [Rank 0] Group 6 Loss: 5.2192 +[2025-07-06 07:28:08] [Rank 0] Group 7 Loss: 5.3640 +[2025-07-06 07:28:08] [Rank 0] Group 7 Loss: 5.3640 +[2025-07-06 07:28:08] [Rank 0] Group 8 Loss: 5.3626 +[2025-07-06 07:28:08] [Rank 0] Group 8 Loss: 5.3626 +[2025-07-06 07:28:08] [Rank 0] Group 9 Loss: 5.3077 +[2025-07-06 07:28:08] [Rank 0] Group 9 Loss: 5.3077 +[2025-07-06 07:28:08] [Rank 0] Group 10 Loss: 5.3773 +[2025-07-06 07:28:08] [Rank 0] Group 10 Loss: 5.3773 +[2025-07-06 07:28:08] [Rank 0] Group 11 Loss: 5.3373 +[2025-07-06 07:28:08] [Rank 0] Group 11 Loss: 5.3373 +[2025-07-06 07:28:08] [Rank 0] Group 0 FTA: 0.8322 +[2025-07-06 07:28:08] [Rank 0] Group 0 FTA: 0.8322 +[2025-07-06 07:28:08] [Rank 0] Group 1 FTA: 0.6927 +[2025-07-06 07:28:08] [Rank 0] Group 1 FTA: 0.6927 +[2025-07-06 07:28:08] [Rank 0] Group 2 FTA: 0.9167 +[2025-07-06 07:28:08] [Rank 0] Group 2 FTA: 0.9167 +[2025-07-06 07:28:08] [Rank 0] Group 3 FTA: 0.8802 +[2025-07-06 07:28:08] [Rank 0] Group 3 FTA: 0.8802 +[2025-07-06 07:28:08] [Rank 0] Group 4 FTA: 0.9036 +[2025-07-06 07:28:08] [Rank 0] Group 4 FTA: 0.9036 +[2025-07-06 07:28:08] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-06 07:28:08] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-06 07:28:08] [Rank 0] Group 6 FTA: 0.9297 +[2025-07-06 07:28:08] [Rank 0] Group 6 FTA: 0.9297 +[2025-07-06 07:28:08] [Rank 0] Group 7 FTA: 0.8958 +[2025-07-06 07:28:08] [Rank 0] Group 7 FTA: 0.8958 +[2025-07-06 07:28:08] [Rank 0] Group 8 FTA: 0.8698 +[2025-07-06 07:28:08] [Rank 0] Group 8 FTA: 0.8698 +[2025-07-06 07:28:08] [Rank 0] Group 9 FTA: 0.8984 +[2025-07-06 07:28:08] [Rank 0] Group 9 FTA: 0.8984 +[2025-07-06 07:28:08] [Rank 0] Group 10 FTA: 0.8828 +[2025-07-06 07:28:08] [Rank 0] Group 10 FTA: 0.8828 +[2025-07-06 07:28:08] [Rank 0] Group 11 FTA: 0.9043 +[2025-07-06 07:28:08] [Rank 0] Group 11 FTA: 0.9043 +[2025-07-06 07:28:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 07:28:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 07:28:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 07:28:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 07:28:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 07:28:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 07:28:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 07:28:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 07:28:09] [Rank 0] step:4001/10000 train_time:324706ms step_avg:81.16ms +[2025-07-06 07:28:09] [Rank 0] step:4001/10000 train_time:324706ms step_avg:81.16ms +[2025-07-06 07:28:11] [Rank 0] step:4021/10000 train_time:326199ms step_avg:81.12ms +[2025-07-06 07:28:11] [Rank 0] step:4021/10000 train_time:326199ms step_avg:81.12ms +[2025-07-06 07:28:12] [Rank 0] step:4041/10000 train_time:327691ms step_avg:81.09ms +[2025-07-06 07:28:12] [Rank 0] step:4041/10000 train_time:327691ms step_avg:81.09ms +[2025-07-06 07:28:14] [Rank 0] step:4061/10000 train_time:329843ms step_avg:81.22ms +[2025-07-06 07:28:14] [Rank 0] step:4061/10000 train_time:329843ms step_avg:81.22ms +[2025-07-06 07:28:16] [Rank 0] step:4081/10000 train_time:331337ms step_avg:81.19ms +[2025-07-06 07:28:16] [Rank 0] step:4081/10000 train_time:331337ms step_avg:81.19ms +[2025-07-06 07:28:17] [Rank 0] step:4101/10000 train_time:332833ms step_avg:81.16ms +[2025-07-06 07:28:17] [Rank 0] step:4101/10000 train_time:332833ms step_avg:81.16ms +[2025-07-06 07:28:19] [Rank 0] step:4121/10000 train_time:334329ms step_avg:81.13ms +[2025-07-06 07:28:19] [Rank 0] step:4121/10000 train_time:334329ms step_avg:81.13ms +[2025-07-06 07:28:21] [Rank 0] step:4141/10000 train_time:336509ms step_avg:81.26ms +[2025-07-06 07:28:21] [Rank 0] step:4141/10000 train_time:336509ms step_avg:81.26ms +[2025-07-06 07:28:23] [Rank 0] step:4161/10000 train_time:337985ms step_avg:81.23ms +[2025-07-06 07:28:23] [Rank 0] step:4161/10000 train_time:337985ms step_avg:81.23ms +[2025-07-06 07:28:24] [Rank 0] step:4181/10000 train_time:339483ms step_avg:81.20ms +[2025-07-06 07:28:24] [Rank 0] step:4181/10000 train_time:339483ms step_avg:81.20ms +[2025-07-06 07:28:26] [Rank 0] step:4201/10000 train_time:340981ms step_avg:81.17ms +[2025-07-06 07:28:26] [Rank 0] step:4201/10000 train_time:340981ms step_avg:81.17ms +[2025-07-06 07:28:27] [Rank 0] step:4221/10000 train_time:342479ms step_avg:81.14ms +[2025-07-06 07:28:27] [Rank 0] step:4221/10000 train_time:342479ms step_avg:81.14ms +[2025-07-06 07:28:29] [Rank 0] step:4241/10000 train_time:344628ms step_avg:81.26ms +[2025-07-06 07:28:29] [Rank 0] step:4241/10000 train_time:344628ms step_avg:81.26ms +[2025-07-06 07:28:31] [Rank 0] step:4261/10000 train_time:346136ms step_avg:81.23ms +[2025-07-06 07:28:31] [Rank 0] step:4261/10000 train_time:346136ms step_avg:81.23ms +[2025-07-06 07:28:32] [Rank 0] step:4281/10000 train_time:347634ms step_avg:81.20ms +[2025-07-06 07:28:32] [Rank 0] step:4281/10000 train_time:347634ms step_avg:81.20ms +[2025-07-06 07:28:34] [Rank 0] step:4301/10000 train_time:349133ms step_avg:81.17ms +[2025-07-06 07:28:34] [Rank 0] step:4301/10000 train_time:349133ms step_avg:81.17ms +[2025-07-06 07:28:35] [Rank 0] step:4321/10000 train_time:350633ms step_avg:81.15ms +[2025-07-06 07:28:35] [Rank 0] step:4321/10000 train_time:350633ms step_avg:81.15ms +[2025-07-06 07:28:37] [Rank 0] step:4341/10000 train_time:352368ms step_avg:81.17ms +[2025-07-06 07:28:37] [Rank 0] step:4341/10000 train_time:352368ms step_avg:81.17ms +[2025-07-06 07:28:38] [Rank 0] step:4361/10000 train_time:353870ms step_avg:81.14ms +[2025-07-06 07:28:38] [Rank 0] step:4361/10000 train_time:353870ms step_avg:81.14ms +[2025-07-06 07:28:40] [Rank 0] step:4381/10000 train_time:355372ms step_avg:81.12ms +[2025-07-06 07:28:40] [Rank 0] step:4381/10000 train_time:355372ms step_avg:81.12ms +[2025-07-06 07:28:41] [Rank 0] step:4401/10000 train_time:356876ms step_avg:81.09ms +[2025-07-06 07:28:41] [Rank 0] step:4401/10000 train_time:356876ms step_avg:81.09ms +[2025-07-06 07:28:43] [Rank 0] step:4421/10000 train_time:358615ms step_avg:81.12ms +[2025-07-06 07:28:43] [Rank 0] step:4421/10000 train_time:358615ms step_avg:81.12ms +[2025-07-06 07:28:45] [Rank 0] step:4441/10000 train_time:360115ms step_avg:81.09ms +[2025-07-06 07:28:45] [Rank 0] step:4441/10000 train_time:360115ms step_avg:81.09ms +[2025-07-06 07:28:46] [Rank 0] step:4461/10000 train_time:361615ms step_avg:81.06ms +[2025-07-06 07:28:46] [Rank 0] step:4461/10000 train_time:361615ms step_avg:81.06ms +[2025-07-06 07:28:48] [Rank 0] step:4481/10000 train_time:363117ms step_avg:81.03ms +[2025-07-06 07:28:48] [Rank 0] step:4481/10000 train_time:363117ms step_avg:81.03ms +[2025-07-06 07:28:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 07:28:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 07:28:50] [Rank 0] PRINT: step:4500/10000 train_loss:0.8748 val_loss:0.8723 train_time:364622ms step_avg:81.03ms +[2025-07-06 07:28:50] [Rank 0] PRINT: step:4500/10000 train_loss:0.8748 val_loss:0.8723 train_time:364622ms step_avg:81.03ms +[2025-07-06 07:28:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 07:28:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 07:28:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 07:28:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 07:28:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 07:28:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 07:34:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 07:34:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 07:34:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 07:34:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 07:34:15] [Rank 0] Total Loss: 5.3843 +[2025-07-06 07:34:15] [Rank 0] Total Loss: 5.3843 +[2025-07-06 07:34:15] [Rank 0] Total FTA: 0.8731 +[2025-07-06 07:34:15] [Rank 0] Total FTA: 0.8731 +[2025-07-06 07:34:15] [Rank 0] Group 0 Loss: 5.7187 +[2025-07-06 07:34:15] [Rank 0] Group 0 Loss: 5.7187 +[2025-07-06 07:34:15] [Rank 0] Group 1 Loss: 5.2414 +[2025-07-06 07:34:15] [Rank 0] Group 1 Loss: 5.2414 +[2025-07-06 07:34:15] [Rank 0] Group 2 Loss: 5.1503 +[2025-07-06 07:34:15] [Rank 0] Group 2 Loss: 5.1503 +[2025-07-06 07:34:15] [Rank 0] Group 3 Loss: 5.4491 +[2025-07-06 07:34:15] [Rank 0] Group 3 Loss: 5.4491 +[2025-07-06 07:34:15] [Rank 0] Group 4 Loss: 5.4544 +[2025-07-06 07:34:15] [Rank 0] Group 4 Loss: 5.4544 +[2025-07-06 07:34:15] [Rank 0] Group 5 Loss: 5.3641 +[2025-07-06 07:34:15] [Rank 0] Group 5 Loss: 5.3641 +[2025-07-06 07:34:15] [Rank 0] Group 6 Loss: 5.2575 +[2025-07-06 07:34:15] [Rank 0] Group 6 Loss: 5.2575 +[2025-07-06 07:34:15] [Rank 0] Group 7 Loss: 5.3458 +[2025-07-06 07:34:15] [Rank 0] Group 7 Loss: 5.3458 +[2025-07-06 07:34:15] [Rank 0] Group 8 Loss: 5.3098 +[2025-07-06 07:34:15] [Rank 0] Group 8 Loss: 5.3098 +[2025-07-06 07:34:15] [Rank 0] Group 9 Loss: 5.2907 +[2025-07-06 07:34:15] [Rank 0] Group 9 Loss: 5.2907 +[2025-07-06 07:34:15] [Rank 0] Group 10 Loss: 5.3415 +[2025-07-06 07:34:15] [Rank 0] Group 10 Loss: 5.3415 +[2025-07-06 07:34:15] [Rank 0] Group 11 Loss: 5.3661 +[2025-07-06 07:34:15] [Rank 0] Group 11 Loss: 5.3661 +[2025-07-06 07:34:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 07:34:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 07:34:15] [Rank 0] Group 1 FTA: 0.6172 +[2025-07-06 07:34:15] [Rank 0] Group 1 FTA: 0.6172 +[2025-07-06 07:34:15] [Rank 0] Group 2 FTA: 0.7266 +[2025-07-06 07:34:15] [Rank 0] Group 2 FTA: 0.7266 +[2025-07-06 07:34:15] [Rank 0] Group 3 FTA: 0.8229 +[2025-07-06 07:34:15] [Rank 0] Group 3 FTA: 0.8229 +[2025-07-06 07:34:15] [Rank 0] Group 4 FTA: 0.8776 +[2025-07-06 07:34:15] [Rank 0] Group 4 FTA: 0.8776 +[2025-07-06 07:34:15] [Rank 0] Group 5 FTA: 0.8984 +[2025-07-06 07:34:15] [Rank 0] Group 5 FTA: 0.8984 +[2025-07-06 07:34:15] [Rank 0] Group 6 FTA: 0.9219 +[2025-07-06 07:34:15] [Rank 0] Group 6 FTA: 0.9219 +[2025-07-06 07:34:15] [Rank 0] Group 7 FTA: 0.8620 +[2025-07-06 07:34:15] [Rank 0] Group 7 FTA: 0.8620 +[2025-07-06 07:34:15] [Rank 0] Group 8 FTA: 0.8828 +[2025-07-06 07:34:15] [Rank 0] Group 8 FTA: 0.8828 +[2025-07-06 07:34:15] [Rank 0] Group 9 FTA: 0.8984 +[2025-07-06 07:34:15] [Rank 0] Group 9 FTA: 0.8984 +[2025-07-06 07:34:15] [Rank 0] Group 10 FTA: 0.8828 +[2025-07-06 07:34:15] [Rank 0] Group 10 FTA: 0.8828 +[2025-07-06 07:34:15] [Rank 0] Group 11 FTA: 0.9072 +[2025-07-06 07:34:15] [Rank 0] Group 11 FTA: 0.9072 +[2025-07-06 07:34:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 07:34:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 07:34:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 07:34:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 07:34:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 07:34:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 07:34:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 07:34:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 07:34:18] [Rank 0] step:4501/10000 train_time:364960ms step_avg:81.08ms +[2025-07-06 07:34:18] [Rank 0] step:4501/10000 train_time:364960ms step_avg:81.08ms +[2025-07-06 07:34:19] [Rank 0] step:4521/10000 train_time:366857ms step_avg:81.15ms +[2025-07-06 07:34:19] [Rank 0] step:4521/10000 train_time:366857ms step_avg:81.15ms +[2025-07-06 07:34:20] [Rank 0] step:4541/10000 train_time:368349ms step_avg:81.12ms +[2025-07-06 07:34:20] [Rank 0] step:4541/10000 train_time:368349ms step_avg:81.12ms +[2025-07-06 07:34:22] [Rank 0] step:4561/10000 train_time:369843ms step_avg:81.09ms +[2025-07-06 07:34:22] [Rank 0] step:4561/10000 train_time:369843ms step_avg:81.09ms +[2025-07-06 07:34:23] [Rank 0] step:4581/10000 train_time:371337ms step_avg:81.06ms +[2025-07-06 07:34:23] [Rank 0] step:4581/10000 train_time:371337ms step_avg:81.06ms +[2025-07-06 07:34:26] [Rank 0] step:4601/10000 train_time:373503ms step_avg:81.18ms +[2025-07-06 07:34:26] [Rank 0] step:4601/10000 train_time:373503ms step_avg:81.18ms +[2025-07-06 07:34:27] [Rank 0] step:4621/10000 train_time:374998ms step_avg:81.15ms +[2025-07-06 07:34:27] [Rank 0] step:4621/10000 train_time:374998ms step_avg:81.15ms +[2025-07-06 07:34:29] [Rank 0] step:4641/10000 train_time:376493ms step_avg:81.12ms +[2025-07-06 07:34:29] [Rank 0] step:4641/10000 train_time:376493ms step_avg:81.12ms +[2025-07-06 07:34:30] [Rank 0] step:4661/10000 train_time:377990ms step_avg:81.10ms +[2025-07-06 07:34:30] [Rank 0] step:4661/10000 train_time:377990ms step_avg:81.10ms +[2025-07-06 07:34:32] [Rank 0] step:4681/10000 train_time:379539ms step_avg:81.08ms +[2025-07-06 07:34:32] [Rank 0] step:4681/10000 train_time:379539ms step_avg:81.08ms +[2025-07-06 07:34:34] [Rank 0] step:4701/10000 train_time:381644ms step_avg:81.18ms +[2025-07-06 07:34:34] [Rank 0] step:4701/10000 train_time:381644ms step_avg:81.18ms +[2025-07-06 07:34:35] [Rank 0] step:4721/10000 train_time:383142ms step_avg:81.16ms +[2025-07-06 07:34:35] [Rank 0] step:4721/10000 train_time:383142ms step_avg:81.16ms +[2025-07-06 07:34:37] [Rank 0] step:4741/10000 train_time:384642ms step_avg:81.13ms +[2025-07-06 07:34:37] [Rank 0] step:4741/10000 train_time:384642ms step_avg:81.13ms +[2025-07-06 07:34:38] [Rank 0] step:4761/10000 train_time:386140ms step_avg:81.10ms +[2025-07-06 07:34:38] [Rank 0] step:4761/10000 train_time:386140ms step_avg:81.10ms +[2025-07-06 07:34:40] [Rank 0] step:4781/10000 train_time:388286ms step_avg:81.21ms +[2025-07-06 07:34:40] [Rank 0] step:4781/10000 train_time:388286ms step_avg:81.21ms +[2025-07-06 07:34:42] [Rank 0] step:4801/10000 train_time:389786ms step_avg:81.19ms +[2025-07-06 07:34:42] [Rank 0] step:4801/10000 train_time:389786ms step_avg:81.19ms +[2025-07-06 07:34:43] [Rank 0] step:4821/10000 train_time:391285ms step_avg:81.16ms +[2025-07-06 07:34:43] [Rank 0] step:4821/10000 train_time:391285ms step_avg:81.16ms +[2025-07-06 07:34:45] [Rank 0] step:4841/10000 train_time:392786ms step_avg:81.14ms +[2025-07-06 07:34:45] [Rank 0] step:4841/10000 train_time:392786ms step_avg:81.14ms +[2025-07-06 07:34:47] [Rank 0] step:4861/10000 train_time:394951ms step_avg:81.25ms +[2025-07-06 07:34:47] [Rank 0] step:4861/10000 train_time:394951ms step_avg:81.25ms +[2025-07-06 07:34:49] [Rank 0] step:4881/10000 train_time:396433ms step_avg:81.22ms +[2025-07-06 07:34:49] [Rank 0] step:4881/10000 train_time:396433ms step_avg:81.22ms +[2025-07-06 07:34:50] [Rank 0] step:4901/10000 train_time:397935ms step_avg:81.19ms +[2025-07-06 07:34:50] [Rank 0] step:4901/10000 train_time:397935ms step_avg:81.19ms +[2025-07-06 07:34:52] [Rank 0] step:4921/10000 train_time:399438ms step_avg:81.17ms +[2025-07-06 07:34:52] [Rank 0] step:4921/10000 train_time:399438ms step_avg:81.17ms +[2025-07-06 07:34:53] [Rank 0] step:4941/10000 train_time:400940ms step_avg:81.15ms +[2025-07-06 07:34:53] [Rank 0] step:4941/10000 train_time:400940ms step_avg:81.15ms +[2025-07-06 07:34:55] [Rank 0] step:4961/10000 train_time:403106ms step_avg:81.25ms +[2025-07-06 07:34:55] [Rank 0] step:4961/10000 train_time:403106ms step_avg:81.25ms +[2025-07-06 07:34:57] [Rank 0] step:4981/10000 train_time:404609ms step_avg:81.23ms +[2025-07-06 07:34:57] [Rank 0] step:4981/10000 train_time:404609ms step_avg:81.23ms +[2025-07-06 07:34:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 07:34:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 07:34:59] [Rank 0] PRINT: step:5000/10000 train_loss:0.8714 val_loss:0.8691 train_time:406111ms step_avg:81.22ms +[2025-07-06 07:34:59] [Rank 0] PRINT: step:5000/10000 train_loss:0.8714 val_loss:0.8691 train_time:406111ms step_avg:81.22ms +[2025-07-06 07:34:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 07:34:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 07:34:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 07:34:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 07:34:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 07:34:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 07:40:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 07:40:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 07:40:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 07:40:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 07:40:27] [Rank 0] Total Loss: 5.4565 +[2025-07-06 07:40:27] [Rank 0] Total Loss: 5.4565 +[2025-07-06 07:40:27] [Rank 0] Total FTA: 0.9317 +[2025-07-06 07:40:27] [Rank 0] Total FTA: 0.9317 +[2025-07-06 07:40:27] [Rank 0] Group 0 Loss: 5.7000 +[2025-07-06 07:40:27] [Rank 0] Group 0 Loss: 5.7000 +[2025-07-06 07:40:27] [Rank 0] Group 1 Loss: 5.4769 +[2025-07-06 07:40:27] [Rank 0] Group 1 Loss: 5.4769 +[2025-07-06 07:40:27] [Rank 0] Group 2 Loss: 5.3829 +[2025-07-06 07:40:27] [Rank 0] Group 2 Loss: 5.3829 +[2025-07-06 07:40:27] [Rank 0] Group 3 Loss: 5.6123 +[2025-07-06 07:40:27] [Rank 0] Group 3 Loss: 5.6123 +[2025-07-06 07:40:27] [Rank 0] Group 4 Loss: 5.4538 +[2025-07-06 07:40:27] [Rank 0] Group 4 Loss: 5.4538 +[2025-07-06 07:40:27] [Rank 0] Group 5 Loss: 5.3638 +[2025-07-06 07:40:27] [Rank 0] Group 5 Loss: 5.3638 +[2025-07-06 07:40:27] [Rank 0] Group 6 Loss: 5.2971 +[2025-07-06 07:40:27] [Rank 0] Group 6 Loss: 5.2971 +[2025-07-06 07:40:27] [Rank 0] Group 7 Loss: 5.3834 +[2025-07-06 07:40:27] [Rank 0] Group 7 Loss: 5.3834 +[2025-07-06 07:40:27] [Rank 0] Group 8 Loss: 5.3937 +[2025-07-06 07:40:27] [Rank 0] Group 8 Loss: 5.3937 +[2025-07-06 07:40:27] [Rank 0] Group 9 Loss: 5.3744 +[2025-07-06 07:40:27] [Rank 0] Group 9 Loss: 5.3744 +[2025-07-06 07:40:27] [Rank 0] Group 10 Loss: 5.4271 +[2025-07-06 07:40:27] [Rank 0] Group 10 Loss: 5.4271 +[2025-07-06 07:40:27] [Rank 0] Group 11 Loss: 5.4169 +[2025-07-06 07:40:27] [Rank 0] Group 11 Loss: 5.4169 +[2025-07-06 07:40:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 07:40:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 07:40:27] [Rank 0] Group 1 FTA: 0.8307 +[2025-07-06 07:40:27] [Rank 0] Group 1 FTA: 0.8307 +[2025-07-06 07:40:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 07:40:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 07:40:27] [Rank 0] Group 3 FTA: 0.9688 +[2025-07-06 07:40:27] [Rank 0] Group 3 FTA: 0.9688 +[2025-07-06 07:40:27] [Rank 0] Group 4 FTA: 0.9635 +[2025-07-06 07:40:27] [Rank 0] Group 4 FTA: 0.9635 +[2025-07-06 07:40:27] [Rank 0] Group 5 FTA: 0.8776 +[2025-07-06 07:40:27] [Rank 0] Group 5 FTA: 0.8776 +[2025-07-06 07:40:27] [Rank 0] Group 6 FTA: 0.9349 +[2025-07-06 07:40:27] [Rank 0] Group 6 FTA: 0.9349 +[2025-07-06 07:40:27] [Rank 0] Group 7 FTA: 0.8906 +[2025-07-06 07:40:27] [Rank 0] Group 7 FTA: 0.8906 +[2025-07-06 07:40:27] [Rank 0] Group 8 FTA: 0.9115 +[2025-07-06 07:40:27] [Rank 0] Group 8 FTA: 0.9115 +[2025-07-06 07:40:27] [Rank 0] Group 9 FTA: 0.8906 +[2025-07-06 07:40:27] [Rank 0] Group 9 FTA: 0.8906 +[2025-07-06 07:40:27] [Rank 0] Group 10 FTA: 0.9219 +[2025-07-06 07:40:27] [Rank 0] Group 10 FTA: 0.9219 +[2025-07-06 07:40:27] [Rank 0] Group 11 FTA: 0.9238 +[2025-07-06 07:40:27] [Rank 0] Group 11 FTA: 0.9238 +[2025-07-06 07:40:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 07:40:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 07:40:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 07:40:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 07:40:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 07:40:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 07:40:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 07:40:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 07:40:29] [Rank 0] step:5001/10000 train_time:406133ms step_avg:81.21ms +[2025-07-06 07:40:29] [Rank 0] step:5001/10000 train_time:406133ms step_avg:81.21ms +[2025-07-06 07:40:30] [Rank 0] step:5021/10000 train_time:407898ms step_avg:81.24ms +[2025-07-06 07:40:30] [Rank 0] step:5021/10000 train_time:407898ms step_avg:81.24ms +[2025-07-06 07:40:32] [Rank 0] step:5041/10000 train_time:409444ms step_avg:81.22ms +[2025-07-06 07:40:32] [Rank 0] step:5041/10000 train_time:409444ms step_avg:81.22ms +[2025-07-06 07:40:34] [Rank 0] step:5061/10000 train_time:411526ms step_avg:81.31ms +[2025-07-06 07:40:34] [Rank 0] step:5061/10000 train_time:411526ms step_avg:81.31ms +[2025-07-06 07:40:35] [Rank 0] step:5081/10000 train_time:413019ms step_avg:81.29ms +[2025-07-06 07:40:35] [Rank 0] step:5081/10000 train_time:413019ms step_avg:81.29ms +[2025-07-06 07:40:37] [Rank 0] step:5101/10000 train_time:414513ms step_avg:81.26ms +[2025-07-06 07:40:37] [Rank 0] step:5101/10000 train_time:414513ms step_avg:81.26ms +[2025-07-06 07:40:38] [Rank 0] step:5121/10000 train_time:416009ms step_avg:81.24ms +[2025-07-06 07:40:38] [Rank 0] step:5121/10000 train_time:416009ms step_avg:81.24ms +[2025-07-06 07:40:41] [Rank 0] step:5141/10000 train_time:418170ms step_avg:81.34ms +[2025-07-06 07:40:41] [Rank 0] step:5141/10000 train_time:418170ms step_avg:81.34ms +[2025-07-06 07:40:42] [Rank 0] step:5161/10000 train_time:419666ms step_avg:81.31ms +[2025-07-06 07:40:42] [Rank 0] step:5161/10000 train_time:419666ms step_avg:81.31ms +[2025-07-06 07:40:44] [Rank 0] step:5181/10000 train_time:421163ms step_avg:81.29ms +[2025-07-06 07:40:44] [Rank 0] step:5181/10000 train_time:421163ms step_avg:81.29ms +[2025-07-06 07:40:45] [Rank 0] step:5201/10000 train_time:422659ms step_avg:81.26ms +[2025-07-06 07:40:45] [Rank 0] step:5201/10000 train_time:422659ms step_avg:81.26ms +[2025-07-06 07:40:47] [Rank 0] step:5221/10000 train_time:424836ms step_avg:81.37ms +[2025-07-06 07:40:47] [Rank 0] step:5221/10000 train_time:424836ms step_avg:81.37ms +[2025-07-06 07:40:49] [Rank 0] step:5241/10000 train_time:426316ms step_avg:81.34ms +[2025-07-06 07:40:49] [Rank 0] step:5241/10000 train_time:426316ms step_avg:81.34ms +[2025-07-06 07:40:50] [Rank 0] step:5261/10000 train_time:427815ms step_avg:81.32ms +[2025-07-06 07:40:50] [Rank 0] step:5261/10000 train_time:427815ms step_avg:81.32ms +[2025-07-06 07:40:52] [Rank 0] step:5281/10000 train_time:429313ms step_avg:81.29ms +[2025-07-06 07:40:52] [Rank 0] step:5281/10000 train_time:429313ms step_avg:81.29ms +[2025-07-06 07:40:53] [Rank 0] step:5301/10000 train_time:430812ms step_avg:81.27ms +[2025-07-06 07:40:53] [Rank 0] step:5301/10000 train_time:430812ms step_avg:81.27ms +[2025-07-06 07:40:55] [Rank 0] step:5321/10000 train_time:432957ms step_avg:81.37ms +[2025-07-06 07:40:55] [Rank 0] step:5321/10000 train_time:432957ms step_avg:81.37ms +[2025-07-06 07:40:57] [Rank 0] step:5341/10000 train_time:434457ms step_avg:81.34ms +[2025-07-06 07:40:57] [Rank 0] step:5341/10000 train_time:434457ms step_avg:81.34ms +[2025-07-06 07:40:58] [Rank 0] step:5361/10000 train_time:435956ms step_avg:81.32ms +[2025-07-06 07:40:58] [Rank 0] step:5361/10000 train_time:435956ms step_avg:81.32ms +[2025-07-06 07:41:00] [Rank 0] step:5381/10000 train_time:437455ms step_avg:81.30ms +[2025-07-06 07:41:00] [Rank 0] step:5381/10000 train_time:437455ms step_avg:81.30ms +[2025-07-06 07:41:02] [Rank 0] step:5401/10000 train_time:439009ms step_avg:81.28ms +[2025-07-06 07:41:02] [Rank 0] step:5401/10000 train_time:439009ms step_avg:81.28ms +[2025-07-06 07:41:04] [Rank 0] step:5421/10000 train_time:441102ms step_avg:81.37ms +[2025-07-06 07:41:04] [Rank 0] step:5421/10000 train_time:441102ms step_avg:81.37ms +[2025-07-06 07:41:05] [Rank 0] step:5441/10000 train_time:442602ms step_avg:81.35ms +[2025-07-06 07:41:05] [Rank 0] step:5441/10000 train_time:442602ms step_avg:81.35ms +[2025-07-06 07:41:07] [Rank 0] step:5461/10000 train_time:444101ms step_avg:81.32ms +[2025-07-06 07:41:07] [Rank 0] step:5461/10000 train_time:444101ms step_avg:81.32ms +[2025-07-06 07:41:08] [Rank 0] step:5481/10000 train_time:445602ms step_avg:81.30ms +[2025-07-06 07:41:08] [Rank 0] step:5481/10000 train_time:445602ms step_avg:81.30ms +[2025-07-06 07:41:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 07:41:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 07:41:11] [Rank 0] PRINT: step:5500/10000 train_loss:0.8687 val_loss:0.8672 train_time:447770ms step_avg:81.41ms +[2025-07-06 07:41:11] [Rank 0] PRINT: step:5500/10000 train_loss:0.8687 val_loss:0.8672 train_time:447770ms step_avg:81.41ms +[2025-07-06 07:41:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 07:41:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 07:41:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 07:41:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 07:41:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 07:41:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 07:46:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 07:46:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 07:46:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 07:46:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 07:46:38] [Rank 0] Total Loss: 5.4834 +[2025-07-06 07:46:38] [Rank 0] Total Loss: 5.4834 +[2025-07-06 07:46:38] [Rank 0] Total FTA: 0.8993 +[2025-07-06 07:46:38] [Rank 0] Total FTA: 0.8993 +[2025-07-06 07:46:38] [Rank 0] Group 0 Loss: 5.8209 +[2025-07-06 07:46:38] [Rank 0] Group 0 Loss: 5.8209 +[2025-07-06 07:46:38] [Rank 0] Group 1 Loss: 5.4494 +[2025-07-06 07:46:38] [Rank 0] Group 1 Loss: 5.4494 +[2025-07-06 07:46:38] [Rank 0] Group 2 Loss: 5.4067 +[2025-07-06 07:46:38] [Rank 0] Group 2 Loss: 5.4067 +[2025-07-06 07:46:38] [Rank 0] Group 3 Loss: 5.5029 +[2025-07-06 07:46:38] [Rank 0] Group 3 Loss: 5.5029 +[2025-07-06 07:46:38] [Rank 0] Group 4 Loss: 5.4353 +[2025-07-06 07:46:38] [Rank 0] Group 4 Loss: 5.4353 +[2025-07-06 07:46:38] [Rank 0] Group 5 Loss: 5.4025 +[2025-07-06 07:46:38] [Rank 0] Group 5 Loss: 5.4025 +[2025-07-06 07:46:38] [Rank 0] Group 6 Loss: 5.3070 +[2025-07-06 07:46:38] [Rank 0] Group 6 Loss: 5.3070 +[2025-07-06 07:46:38] [Rank 0] Group 7 Loss: 5.4564 +[2025-07-06 07:46:38] [Rank 0] Group 7 Loss: 5.4564 +[2025-07-06 07:46:38] [Rank 0] Group 8 Loss: 5.4084 +[2025-07-06 07:46:38] [Rank 0] Group 8 Loss: 5.4084 +[2025-07-06 07:46:38] [Rank 0] Group 9 Loss: 5.3829 +[2025-07-06 07:46:38] [Rank 0] Group 9 Loss: 5.3829 +[2025-07-06 07:46:38] [Rank 0] Group 10 Loss: 5.4426 +[2025-07-06 07:46:38] [Rank 0] Group 10 Loss: 5.4426 +[2025-07-06 07:46:38] [Rank 0] Group 11 Loss: 5.4622 +[2025-07-06 07:46:38] [Rank 0] Group 11 Loss: 5.4622 +[2025-07-06 07:46:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 07:46:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 07:46:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 07:46:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 07:46:38] [Rank 0] Group 2 FTA: 0.9115 +[2025-07-06 07:46:38] [Rank 0] Group 2 FTA: 0.9115 +[2025-07-06 07:46:38] [Rank 0] Group 3 FTA: 0.8828 +[2025-07-06 07:46:38] [Rank 0] Group 3 FTA: 0.8828 +[2025-07-06 07:46:38] [Rank 0] Group 4 FTA: 0.9062 +[2025-07-06 07:46:38] [Rank 0] Group 4 FTA: 0.9062 +[2025-07-06 07:46:38] [Rank 0] Group 5 FTA: 0.8672 +[2025-07-06 07:46:38] [Rank 0] Group 5 FTA: 0.8672 +[2025-07-06 07:46:38] [Rank 0] Group 6 FTA: 0.8698 +[2025-07-06 07:46:38] [Rank 0] Group 6 FTA: 0.8698 +[2025-07-06 07:46:38] [Rank 0] Group 7 FTA: 0.8776 +[2025-07-06 07:46:38] [Rank 0] Group 7 FTA: 0.8776 +[2025-07-06 07:46:38] [Rank 0] Group 8 FTA: 0.8542 +[2025-07-06 07:46:38] [Rank 0] Group 8 FTA: 0.8542 +[2025-07-06 07:46:38] [Rank 0] Group 9 FTA: 0.8125 +[2025-07-06 07:46:38] [Rank 0] Group 9 FTA: 0.8125 +[2025-07-06 07:46:38] [Rank 0] Group 10 FTA: 0.8691 +[2025-07-06 07:46:38] [Rank 0] Group 10 FTA: 0.8691 +[2025-07-06 07:46:38] [Rank 0] Group 11 FTA: 0.8701 +[2025-07-06 07:46:38] [Rank 0] Group 11 FTA: 0.8701 +[2025-07-06 07:46:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 07:46:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 07:46:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 07:46:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 07:46:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 07:46:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 07:46:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 07:46:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 07:46:40] [Rank 0] step:5501/10000 train_time:447793ms step_avg:81.40ms +[2025-07-06 07:46:40] [Rank 0] step:5501/10000 train_time:447793ms step_avg:81.40ms +[2025-07-06 07:46:41] [Rank 0] step:5521/10000 train_time:449279ms step_avg:81.38ms +[2025-07-06 07:46:41] [Rank 0] step:5521/10000 train_time:449279ms step_avg:81.38ms +[2025-07-06 07:46:43] [Rank 0] step:5541/10000 train_time:450772ms step_avg:81.35ms +[2025-07-06 07:46:43] [Rank 0] step:5541/10000 train_time:450772ms step_avg:81.35ms +[2025-07-06 07:46:44] [Rank 0] step:5561/10000 train_time:452266ms step_avg:81.33ms +[2025-07-06 07:46:44] [Rank 0] step:5561/10000 train_time:452266ms step_avg:81.33ms +[2025-07-06 07:46:47] [Rank 0] step:5581/10000 train_time:454440ms step_avg:81.43ms +[2025-07-06 07:46:47] [Rank 0] step:5581/10000 train_time:454440ms step_avg:81.43ms +[2025-07-06 07:46:48] [Rank 0] step:5601/10000 train_time:455914ms step_avg:81.40ms +[2025-07-06 07:46:48] [Rank 0] step:5601/10000 train_time:455914ms step_avg:81.40ms +[2025-07-06 07:46:50] [Rank 0] step:5621/10000 train_time:457645ms step_avg:81.42ms +[2025-07-06 07:46:50] [Rank 0] step:5621/10000 train_time:457645ms step_avg:81.42ms +[2025-07-06 07:46:51] [Rank 0] step:5641/10000 train_time:459141ms step_avg:81.39ms +[2025-07-06 07:46:51] [Rank 0] step:5641/10000 train_time:459141ms step_avg:81.39ms +[2025-07-06 07:46:53] [Rank 0] step:5661/10000 train_time:460639ms step_avg:81.37ms +[2025-07-06 07:46:53] [Rank 0] step:5661/10000 train_time:460639ms step_avg:81.37ms +[2025-07-06 07:46:55] [Rank 0] step:5681/10000 train_time:462780ms step_avg:81.46ms +[2025-07-06 07:46:55] [Rank 0] step:5681/10000 train_time:462780ms step_avg:81.46ms +[2025-07-06 07:46:56] [Rank 0] step:5701/10000 train_time:464276ms step_avg:81.44ms +[2025-07-06 07:46:56] [Rank 0] step:5701/10000 train_time:464276ms step_avg:81.44ms +[2025-07-06 07:46:58] [Rank 0] step:5721/10000 train_time:465774ms step_avg:81.41ms +[2025-07-06 07:46:58] [Rank 0] step:5721/10000 train_time:465774ms step_avg:81.41ms +[2025-07-06 07:46:59] [Rank 0] step:5741/10000 train_time:467274ms step_avg:81.39ms +[2025-07-06 07:46:59] [Rank 0] step:5741/10000 train_time:467274ms step_avg:81.39ms +[2025-07-06 07:47:02] [Rank 0] step:5761/10000 train_time:468775ms step_avg:81.37ms +[2025-07-06 07:47:02] [Rank 0] step:5761/10000 train_time:468775ms step_avg:81.37ms +[2025-07-06 07:47:03] [Rank 0] step:5781/10000 train_time:470929ms step_avg:81.46ms +[2025-07-06 07:47:03] [Rank 0] step:5781/10000 train_time:470929ms step_avg:81.46ms +[2025-07-06 07:47:05] [Rank 0] step:5801/10000 train_time:472430ms step_avg:81.44ms +[2025-07-06 07:47:05] [Rank 0] step:5801/10000 train_time:472430ms step_avg:81.44ms +[2025-07-06 07:47:06] [Rank 0] step:5821/10000 train_time:473931ms step_avg:81.42ms +[2025-07-06 07:47:06] [Rank 0] step:5821/10000 train_time:473931ms step_avg:81.42ms +[2025-07-06 07:47:08] [Rank 0] step:5841/10000 train_time:475432ms step_avg:81.40ms +[2025-07-06 07:47:08] [Rank 0] step:5841/10000 train_time:475432ms step_avg:81.40ms +[2025-07-06 07:47:09] [Rank 0] step:5861/10000 train_time:477165ms step_avg:81.41ms +[2025-07-06 07:47:09] [Rank 0] step:5861/10000 train_time:477165ms step_avg:81.41ms +[2025-07-06 07:47:11] [Rank 0] step:5881/10000 train_time:478668ms step_avg:81.39ms +[2025-07-06 07:47:11] [Rank 0] step:5881/10000 train_time:478668ms step_avg:81.39ms +[2025-07-06 07:47:12] [Rank 0] step:5901/10000 train_time:480170ms step_avg:81.37ms +[2025-07-06 07:47:12] [Rank 0] step:5901/10000 train_time:480170ms step_avg:81.37ms +[2025-07-06 07:47:14] [Rank 0] step:5921/10000 train_time:481672ms step_avg:81.35ms +[2025-07-06 07:47:14] [Rank 0] step:5921/10000 train_time:481672ms step_avg:81.35ms +[2025-07-06 07:47:16] [Rank 0] step:5941/10000 train_time:483174ms step_avg:81.33ms +[2025-07-06 07:47:16] [Rank 0] step:5941/10000 train_time:483174ms step_avg:81.33ms +[2025-07-06 07:47:18] [Rank 0] step:5961/10000 train_time:485328ms step_avg:81.42ms +[2025-07-06 07:47:18] [Rank 0] step:5961/10000 train_time:485328ms step_avg:81.42ms +[2025-07-06 07:47:19] [Rank 0] step:5981/10000 train_time:486828ms step_avg:81.40ms +[2025-07-06 07:47:19] [Rank 0] step:5981/10000 train_time:486828ms step_avg:81.40ms +[2025-07-06 07:47:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 07:47:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 07:47:21] [Rank 0] PRINT: step:6000/10000 train_loss:0.8664 val_loss:0.8665 train_time:488330ms step_avg:81.39ms +[2025-07-06 07:47:21] [Rank 0] PRINT: step:6000/10000 train_loss:0.8664 val_loss:0.8665 train_time:488330ms step_avg:81.39ms +[2025-07-06 07:47:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 07:47:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 07:47:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 07:47:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 07:47:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 07:47:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 07:52:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 07:52:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 07:52:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 07:52:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 07:52:51] [Rank 0] Total Loss: 5.4665 +[2025-07-06 07:52:51] [Rank 0] Total Loss: 5.4665 +[2025-07-06 07:52:51] [Rank 0] Total FTA: 0.9196 +[2025-07-06 07:52:51] [Rank 0] Total FTA: 0.9196 +[2025-07-06 07:52:51] [Rank 0] Group 0 Loss: 5.7466 +[2025-07-06 07:52:51] [Rank 0] Group 0 Loss: 5.7466 +[2025-07-06 07:52:51] [Rank 0] Group 1 Loss: 5.4109 +[2025-07-06 07:52:51] [Rank 0] Group 1 Loss: 5.4109 +[2025-07-06 07:52:51] [Rank 0] Group 2 Loss: 5.2639 +[2025-07-06 07:52:51] [Rank 0] Group 2 Loss: 5.2639 +[2025-07-06 07:52:51] [Rank 0] Group 3 Loss: 5.4280 +[2025-07-06 07:52:51] [Rank 0] Group 3 Loss: 5.4280 +[2025-07-06 07:52:51] [Rank 0] Group 4 Loss: 5.4326 +[2025-07-06 07:52:51] [Rank 0] Group 4 Loss: 5.4326 +[2025-07-06 07:52:51] [Rank 0] Group 5 Loss: 5.4288 +[2025-07-06 07:52:51] [Rank 0] Group 5 Loss: 5.4288 +[2025-07-06 07:52:51] [Rank 0] Group 6 Loss: 5.3279 +[2025-07-06 07:52:51] [Rank 0] Group 6 Loss: 5.3279 +[2025-07-06 07:52:51] [Rank 0] Group 7 Loss: 5.4823 +[2025-07-06 07:52:51] [Rank 0] Group 7 Loss: 5.4823 +[2025-07-06 07:52:51] [Rank 0] Group 8 Loss: 5.4770 +[2025-07-06 07:52:51] [Rank 0] Group 8 Loss: 5.4770 +[2025-07-06 07:52:51] [Rank 0] Group 9 Loss: 5.3742 +[2025-07-06 07:52:51] [Rank 0] Group 9 Loss: 5.3742 +[2025-07-06 07:52:51] [Rank 0] Group 10 Loss: 5.4338 +[2025-07-06 07:52:51] [Rank 0] Group 10 Loss: 5.4338 +[2025-07-06 07:52:51] [Rank 0] Group 11 Loss: 5.4758 +[2025-07-06 07:52:51] [Rank 0] Group 11 Loss: 5.4758 +[2025-07-06 07:52:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 07:52:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 07:52:51] [Rank 0] Group 1 FTA: 0.8125 +[2025-07-06 07:52:51] [Rank 0] Group 1 FTA: 0.8125 +[2025-07-06 07:52:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 07:52:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 07:52:51] [Rank 0] Group 3 FTA: 0.9010 +[2025-07-06 07:52:51] [Rank 0] Group 3 FTA: 0.9010 +[2025-07-06 07:52:51] [Rank 0] Group 4 FTA: 0.9271 +[2025-07-06 07:52:51] [Rank 0] Group 4 FTA: 0.9271 +[2025-07-06 07:52:51] [Rank 0] Group 5 FTA: 0.8854 +[2025-07-06 07:52:51] [Rank 0] Group 5 FTA: 0.8854 +[2025-07-06 07:52:51] [Rank 0] Group 6 FTA: 0.9297 +[2025-07-06 07:52:51] [Rank 0] Group 6 FTA: 0.9297 +[2025-07-06 07:52:51] [Rank 0] Group 7 FTA: 0.8958 +[2025-07-06 07:52:51] [Rank 0] Group 7 FTA: 0.8958 +[2025-07-06 07:52:51] [Rank 0] Group 8 FTA: 0.8776 +[2025-07-06 07:52:51] [Rank 0] Group 8 FTA: 0.8776 +[2025-07-06 07:52:51] [Rank 0] Group 9 FTA: 0.9102 +[2025-07-06 07:52:51] [Rank 0] Group 9 FTA: 0.9102 +[2025-07-06 07:52:51] [Rank 0] Group 10 FTA: 0.9141 +[2025-07-06 07:52:51] [Rank 0] Group 10 FTA: 0.9141 +[2025-07-06 07:52:51] [Rank 0] Group 11 FTA: 0.9121 +[2025-07-06 07:52:51] [Rank 0] Group 11 FTA: 0.9121 +[2025-07-06 07:52:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 07:52:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 07:52:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 07:52:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 07:52:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 07:52:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 07:52:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 07:52:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 07:52:52] [Rank 0] step:6001/10000 train_time:488354ms step_avg:81.38ms +[2025-07-06 07:52:52] [Rank 0] step:6001/10000 train_time:488354ms step_avg:81.38ms +[2025-07-06 07:52:54] [Rank 0] step:6021/10000 train_time:489846ms step_avg:81.36ms +[2025-07-06 07:52:54] [Rank 0] step:6021/10000 train_time:489846ms step_avg:81.36ms +[2025-07-06 07:52:56] [Rank 0] step:6041/10000 train_time:491993ms step_avg:81.44ms +[2025-07-06 07:52:56] [Rank 0] step:6041/10000 train_time:491993ms step_avg:81.44ms +[2025-07-06 07:52:58] [Rank 0] step:6061/10000 train_time:493484ms step_avg:81.42ms +[2025-07-06 07:52:58] [Rank 0] step:6061/10000 train_time:493484ms step_avg:81.42ms +[2025-07-06 07:52:59] [Rank 0] step:6081/10000 train_time:494977ms step_avg:81.40ms +[2025-07-06 07:52:59] [Rank 0] step:6081/10000 train_time:494977ms step_avg:81.40ms +[2025-07-06 07:53:01] [Rank 0] step:6101/10000 train_time:496472ms step_avg:81.38ms +[2025-07-06 07:53:01] [Rank 0] step:6101/10000 train_time:496472ms step_avg:81.38ms +[2025-07-06 07:53:02] [Rank 0] step:6121/10000 train_time:497968ms step_avg:81.35ms +[2025-07-06 07:53:02] [Rank 0] step:6121/10000 train_time:497968ms step_avg:81.35ms +[2025-07-06 07:53:04] [Rank 0] step:6141/10000 train_time:499697ms step_avg:81.37ms +[2025-07-06 07:53:04] [Rank 0] step:6141/10000 train_time:499697ms step_avg:81.37ms +[2025-07-06 07:53:05] [Rank 0] step:6161/10000 train_time:501194ms step_avg:81.35ms +[2025-07-06 07:53:05] [Rank 0] step:6161/10000 train_time:501194ms step_avg:81.35ms +[2025-07-06 07:53:07] [Rank 0] step:6181/10000 train_time:502691ms step_avg:81.33ms +[2025-07-06 07:53:07] [Rank 0] step:6181/10000 train_time:502691ms step_avg:81.33ms +[2025-07-06 07:53:08] [Rank 0] step:6201/10000 train_time:504340ms step_avg:81.33ms +[2025-07-06 07:53:08] [Rank 0] step:6201/10000 train_time:504340ms step_avg:81.33ms +[2025-07-06 07:53:11] [Rank 0] step:6221/10000 train_time:506618ms step_avg:81.44ms +[2025-07-06 07:53:11] [Rank 0] step:6221/10000 train_time:506618ms step_avg:81.44ms +[2025-07-06 07:53:12] [Rank 0] step:6241/10000 train_time:508117ms step_avg:81.42ms +[2025-07-06 07:53:12] [Rank 0] step:6241/10000 train_time:508117ms step_avg:81.42ms +[2025-07-06 07:53:14] [Rank 0] step:6261/10000 train_time:509615ms step_avg:81.40ms +[2025-07-06 07:53:14] [Rank 0] step:6261/10000 train_time:509615ms step_avg:81.40ms +[2025-07-06 07:53:15] [Rank 0] step:6281/10000 train_time:511114ms step_avg:81.37ms +[2025-07-06 07:53:15] [Rank 0] step:6281/10000 train_time:511114ms step_avg:81.37ms +[2025-07-06 07:53:17] [Rank 0] step:6301/10000 train_time:512870ms step_avg:81.40ms +[2025-07-06 07:53:17] [Rank 0] step:6301/10000 train_time:512870ms step_avg:81.40ms +[2025-07-06 07:53:19] [Rank 0] step:6321/10000 train_time:514765ms step_avg:81.44ms +[2025-07-06 07:53:19] [Rank 0] step:6321/10000 train_time:514765ms step_avg:81.44ms +[2025-07-06 07:53:20] [Rank 0] step:6341/10000 train_time:516366ms step_avg:81.43ms +[2025-07-06 07:53:20] [Rank 0] step:6341/10000 train_time:516366ms step_avg:81.43ms +[2025-07-06 07:53:22] [Rank 0] step:6361/10000 train_time:517865ms step_avg:81.41ms +[2025-07-06 07:53:22] [Rank 0] step:6361/10000 train_time:517865ms step_avg:81.41ms +[2025-07-06 07:53:23] [Rank 0] step:6381/10000 train_time:519366ms step_avg:81.39ms +[2025-07-06 07:53:23] [Rank 0] step:6381/10000 train_time:519366ms step_avg:81.39ms +[2025-07-06 07:53:26] [Rank 0] step:6401/10000 train_time:521532ms step_avg:81.48ms +[2025-07-06 07:53:26] [Rank 0] step:6401/10000 train_time:521532ms step_avg:81.48ms +[2025-07-06 07:53:27] [Rank 0] step:6421/10000 train_time:523031ms step_avg:81.46ms +[2025-07-06 07:53:27] [Rank 0] step:6421/10000 train_time:523031ms step_avg:81.46ms +[2025-07-06 07:53:29] [Rank 0] step:6441/10000 train_time:524532ms step_avg:81.44ms +[2025-07-06 07:53:29] [Rank 0] step:6441/10000 train_time:524532ms step_avg:81.44ms +[2025-07-06 07:53:30] [Rank 0] step:6461/10000 train_time:526033ms step_avg:81.42ms +[2025-07-06 07:53:30] [Rank 0] step:6461/10000 train_time:526033ms step_avg:81.42ms +[2025-07-06 07:53:32] [Rank 0] step:6481/10000 train_time:527789ms step_avg:81.44ms +[2025-07-06 07:53:32] [Rank 0] step:6481/10000 train_time:527789ms step_avg:81.44ms +[2025-07-06 07:53:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 07:53:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 07:53:35] [Rank 0] PRINT: step:6500/10000 train_loss:0.8649 val_loss:0.8649 train_time:529677ms step_avg:81.49ms +[2025-07-06 07:53:35] [Rank 0] PRINT: step:6500/10000 train_loss:0.8649 val_loss:0.8649 train_time:529677ms step_avg:81.49ms +[2025-07-06 07:53:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 07:53:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 07:53:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 07:53:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 07:53:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 07:53:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 07:59:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 07:59:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 07:59:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 07:59:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 07:59:03] [Rank 0] Total Loss: 5.4159 +[2025-07-06 07:59:03] [Rank 0] Total Loss: 5.4159 +[2025-07-06 07:59:03] [Rank 0] Total FTA: 0.9036 +[2025-07-06 07:59:03] [Rank 0] Total FTA: 0.9036 +[2025-07-06 07:59:03] [Rank 0] Group 0 Loss: 5.4449 +[2025-07-06 07:59:03] [Rank 0] Group 0 Loss: 5.4449 +[2025-07-06 07:59:03] [Rank 0] Group 1 Loss: 5.3475 +[2025-07-06 07:59:03] [Rank 0] Group 1 Loss: 5.3475 +[2025-07-06 07:59:03] [Rank 0] Group 2 Loss: 5.2253 +[2025-07-06 07:59:03] [Rank 0] Group 2 Loss: 5.2253 +[2025-07-06 07:59:03] [Rank 0] Group 3 Loss: 5.4955 +[2025-07-06 07:59:03] [Rank 0] Group 3 Loss: 5.4955 +[2025-07-06 07:59:03] [Rank 0] Group 4 Loss: 5.4576 +[2025-07-06 07:59:03] [Rank 0] Group 4 Loss: 5.4576 +[2025-07-06 07:59:03] [Rank 0] Group 5 Loss: 5.4295 +[2025-07-06 07:59:03] [Rank 0] Group 5 Loss: 5.4295 +[2025-07-06 07:59:03] [Rank 0] Group 6 Loss: 5.3402 +[2025-07-06 07:59:03] [Rank 0] Group 6 Loss: 5.3402 +[2025-07-06 07:59:03] [Rank 0] Group 7 Loss: 5.4397 +[2025-07-06 07:59:03] [Rank 0] Group 7 Loss: 5.4397 +[2025-07-06 07:59:03] [Rank 0] Group 8 Loss: 5.4198 +[2025-07-06 07:59:03] [Rank 0] Group 8 Loss: 5.4198 +[2025-07-06 07:59:03] [Rank 0] Group 9 Loss: 5.4383 +[2025-07-06 07:59:03] [Rank 0] Group 9 Loss: 5.4383 +[2025-07-06 07:59:03] [Rank 0] Group 10 Loss: 5.4304 +[2025-07-06 07:59:03] [Rank 0] Group 10 Loss: 5.4304 +[2025-07-06 07:59:03] [Rank 0] Group 11 Loss: 5.4459 +[2025-07-06 07:59:03] [Rank 0] Group 11 Loss: 5.4459 +[2025-07-06 07:59:03] [Rank 0] Group 0 FTA: 0.8401 +[2025-07-06 07:59:03] [Rank 0] Group 0 FTA: 0.8401 +[2025-07-06 07:59:03] [Rank 0] Group 1 FTA: 0.8203 +[2025-07-06 07:59:03] [Rank 0] Group 1 FTA: 0.8203 +[2025-07-06 07:59:03] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 07:59:03] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 07:59:03] [Rank 0] Group 3 FTA: 0.8906 +[2025-07-06 07:59:03] [Rank 0] Group 3 FTA: 0.8906 +[2025-07-06 07:59:03] [Rank 0] Group 4 FTA: 0.9062 +[2025-07-06 07:59:03] [Rank 0] Group 4 FTA: 0.9062 +[2025-07-06 07:59:03] [Rank 0] Group 5 FTA: 0.8932 +[2025-07-06 07:59:03] [Rank 0] Group 5 FTA: 0.8932 +[2025-07-06 07:59:03] [Rank 0] Group 6 FTA: 0.9245 +[2025-07-06 07:59:03] [Rank 0] Group 6 FTA: 0.9245 +[2025-07-06 07:59:03] [Rank 0] Group 7 FTA: 0.9193 +[2025-07-06 07:59:03] [Rank 0] Group 7 FTA: 0.9193 +[2025-07-06 07:59:03] [Rank 0] Group 8 FTA: 0.8958 +[2025-07-06 07:59:03] [Rank 0] Group 8 FTA: 0.8958 +[2025-07-06 07:59:03] [Rank 0] Group 9 FTA: 0.9336 +[2025-07-06 07:59:03] [Rank 0] Group 9 FTA: 0.9336 +[2025-07-06 07:59:03] [Rank 0] Group 10 FTA: 0.9160 +[2025-07-06 07:59:03] [Rank 0] Group 10 FTA: 0.9160 +[2025-07-06 07:59:03] [Rank 0] Group 11 FTA: 0.9297 +[2025-07-06 07:59:03] [Rank 0] Group 11 FTA: 0.9297 +[2025-07-06 07:59:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 07:59:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 07:59:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 07:59:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 07:59:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 07:59:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 07:59:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 07:59:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 07:59:04] [Rank 0] step:6501/10000 train_time:529698ms step_avg:81.48ms +[2025-07-06 07:59:04] [Rank 0] step:6501/10000 train_time:529698ms step_avg:81.48ms +[2025-07-06 07:59:06] [Rank 0] step:6521/10000 train_time:531185ms step_avg:81.46ms +[2025-07-06 07:59:06] [Rank 0] step:6521/10000 train_time:531185ms step_avg:81.46ms +[2025-07-06 07:59:07] [Rank 0] step:6541/10000 train_time:532677ms step_avg:81.44ms +[2025-07-06 07:59:07] [Rank 0] step:6541/10000 train_time:532677ms step_avg:81.44ms +[2025-07-06 07:59:09] [Rank 0] step:6561/10000 train_time:534171ms step_avg:81.42ms +[2025-07-06 07:59:09] [Rank 0] step:6561/10000 train_time:534171ms step_avg:81.42ms +[2025-07-06 07:59:11] [Rank 0] step:6581/10000 train_time:536312ms step_avg:81.49ms +[2025-07-06 07:59:11] [Rank 0] step:6581/10000 train_time:536312ms step_avg:81.49ms +[2025-07-06 07:59:12] [Rank 0] step:6601/10000 train_time:537807ms step_avg:81.47ms +[2025-07-06 07:59:12] [Rank 0] step:6601/10000 train_time:537807ms step_avg:81.47ms +[2025-07-06 07:59:14] [Rank 0] step:6621/10000 train_time:539303ms step_avg:81.45ms +[2025-07-06 07:59:14] [Rank 0] step:6621/10000 train_time:539303ms step_avg:81.45ms +[2025-07-06 07:59:15] [Rank 0] step:6641/10000 train_time:540799ms step_avg:81.43ms +[2025-07-06 07:59:15] [Rank 0] step:6641/10000 train_time:540799ms step_avg:81.43ms +[2025-07-06 07:59:17] [Rank 0] step:6661/10000 train_time:542981ms step_avg:81.52ms +[2025-07-06 07:59:17] [Rank 0] step:6661/10000 train_time:542981ms step_avg:81.52ms +[2025-07-06 07:59:19] [Rank 0] step:6681/10000 train_time:544459ms step_avg:81.49ms +[2025-07-06 07:59:19] [Rank 0] step:6681/10000 train_time:544459ms step_avg:81.49ms +[2025-07-06 07:59:20] [Rank 0] step:6701/10000 train_time:545957ms step_avg:81.47ms +[2025-07-06 07:59:20] [Rank 0] step:6701/10000 train_time:545957ms step_avg:81.47ms +[2025-07-06 07:59:22] [Rank 0] step:6721/10000 train_time:547456ms step_avg:81.45ms +[2025-07-06 07:59:22] [Rank 0] step:6721/10000 train_time:547456ms step_avg:81.45ms +[2025-07-06 07:59:23] [Rank 0] step:6741/10000 train_time:548955ms step_avg:81.44ms +[2025-07-06 07:59:23] [Rank 0] step:6741/10000 train_time:548955ms step_avg:81.44ms +[2025-07-06 07:59:26] [Rank 0] step:6761/10000 train_time:551101ms step_avg:81.51ms +[2025-07-06 07:59:26] [Rank 0] step:6761/10000 train_time:551101ms step_avg:81.51ms +[2025-07-06 07:59:27] [Rank 0] step:6781/10000 train_time:552599ms step_avg:81.49ms +[2025-07-06 07:59:27] [Rank 0] step:6781/10000 train_time:552599ms step_avg:81.49ms +[2025-07-06 07:59:29] [Rank 0] step:6801/10000 train_time:554337ms step_avg:81.51ms +[2025-07-06 07:59:29] [Rank 0] step:6801/10000 train_time:554337ms step_avg:81.51ms +[2025-07-06 07:59:30] [Rank 0] step:6821/10000 train_time:555943ms step_avg:81.50ms +[2025-07-06 07:59:30] [Rank 0] step:6821/10000 train_time:555943ms step_avg:81.50ms +[2025-07-06 07:59:33] [Rank 0] step:6841/10000 train_time:558117ms step_avg:81.58ms +[2025-07-06 07:59:33] [Rank 0] step:6841/10000 train_time:558117ms step_avg:81.58ms +[2025-07-06 07:59:34] [Rank 0] step:6861/10000 train_time:559597ms step_avg:81.56ms +[2025-07-06 07:59:34] [Rank 0] step:6861/10000 train_time:559597ms step_avg:81.56ms +[2025-07-06 07:59:36] [Rank 0] step:6881/10000 train_time:561095ms step_avg:81.54ms +[2025-07-06 07:59:36] [Rank 0] step:6881/10000 train_time:561095ms step_avg:81.54ms +[2025-07-06 07:59:37] [Rank 0] step:6901/10000 train_time:562595ms step_avg:81.52ms +[2025-07-06 07:59:37] [Rank 0] step:6901/10000 train_time:562595ms step_avg:81.52ms +[2025-07-06 07:59:39] [Rank 0] step:6921/10000 train_time:564094ms step_avg:81.50ms +[2025-07-06 07:59:39] [Rank 0] step:6921/10000 train_time:564094ms step_avg:81.50ms +[2025-07-06 07:59:40] [Rank 0] step:6941/10000 train_time:565836ms step_avg:81.52ms +[2025-07-06 07:59:40] [Rank 0] step:6941/10000 train_time:565836ms step_avg:81.52ms +[2025-07-06 07:59:42] [Rank 0] step:6961/10000 train_time:567336ms step_avg:81.50ms +[2025-07-06 07:59:42] [Rank 0] step:6961/10000 train_time:567336ms step_avg:81.50ms +[2025-07-06 07:59:43] [Rank 0] step:6981/10000 train_time:568934ms step_avg:81.50ms +[2025-07-06 07:59:43] [Rank 0] step:6981/10000 train_time:568934ms step_avg:81.50ms +[2025-07-06 07:59:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 07:59:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 07:59:46] [Rank 0] PRINT: step:7000/10000 train_loss:0.8636 val_loss:0.8642 train_time:570434ms step_avg:81.49ms +[2025-07-06 07:59:46] [Rank 0] PRINT: step:7000/10000 train_loss:0.8636 val_loss:0.8642 train_time:570434ms step_avg:81.49ms +[2025-07-06 07:59:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 07:59:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 07:59:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 07:59:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 07:59:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 07:59:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 08:05:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 08:05:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 08:05:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 08:05:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 08:05:13] [Rank 0] Total Loss: 5.4600 +[2025-07-06 08:05:13] [Rank 0] Total Loss: 5.4600 +[2025-07-06 08:05:13] [Rank 0] Total FTA: 0.9231 +[2025-07-06 08:05:13] [Rank 0] Total FTA: 0.9231 +[2025-07-06 08:05:13] [Rank 0] Group 0 Loss: 5.5780 +[2025-07-06 08:05:13] [Rank 0] Group 0 Loss: 5.5780 +[2025-07-06 08:05:13] [Rank 0] Group 1 Loss: 5.3336 +[2025-07-06 08:05:13] [Rank 0] Group 1 Loss: 5.3336 +[2025-07-06 08:05:13] [Rank 0] Group 2 Loss: 5.3327 +[2025-07-06 08:05:13] [Rank 0] Group 2 Loss: 5.3327 +[2025-07-06 08:05:13] [Rank 0] Group 3 Loss: 5.5701 +[2025-07-06 08:05:13] [Rank 0] Group 3 Loss: 5.5701 +[2025-07-06 08:05:13] [Rank 0] Group 4 Loss: 5.5591 +[2025-07-06 08:05:13] [Rank 0] Group 4 Loss: 5.5591 +[2025-07-06 08:05:13] [Rank 0] Group 5 Loss: 5.4326 +[2025-07-06 08:05:13] [Rank 0] Group 5 Loss: 5.4326 +[2025-07-06 08:05:13] [Rank 0] Group 6 Loss: 5.3157 +[2025-07-06 08:05:13] [Rank 0] Group 6 Loss: 5.3157 +[2025-07-06 08:05:13] [Rank 0] Group 7 Loss: 5.5047 +[2025-07-06 08:05:13] [Rank 0] Group 7 Loss: 5.5047 +[2025-07-06 08:05:13] [Rank 0] Group 8 Loss: 5.4214 +[2025-07-06 08:05:13] [Rank 0] Group 8 Loss: 5.4214 +[2025-07-06 08:05:13] [Rank 0] Group 9 Loss: 5.4643 +[2025-07-06 08:05:13] [Rank 0] Group 9 Loss: 5.4643 +[2025-07-06 08:05:13] [Rank 0] Group 10 Loss: 5.4623 +[2025-07-06 08:05:13] [Rank 0] Group 10 Loss: 5.4623 +[2025-07-06 08:05:13] [Rank 0] Group 11 Loss: 5.4480 +[2025-07-06 08:05:13] [Rank 0] Group 11 Loss: 5.4480 +[2025-07-06 08:05:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 08:05:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 08:05:13] [Rank 0] Group 1 FTA: 0.8255 +[2025-07-06 08:05:13] [Rank 0] Group 1 FTA: 0.8255 +[2025-07-06 08:05:13] [Rank 0] Group 2 FTA: 0.8542 +[2025-07-06 08:05:13] [Rank 0] Group 2 FTA: 0.8542 +[2025-07-06 08:05:13] [Rank 0] Group 3 FTA: 0.9609 +[2025-07-06 08:05:13] [Rank 0] Group 3 FTA: 0.9609 +[2025-07-06 08:05:13] [Rank 0] Group 4 FTA: 0.9141 +[2025-07-06 08:05:13] [Rank 0] Group 4 FTA: 0.9141 +[2025-07-06 08:05:13] [Rank 0] Group 5 FTA: 0.9323 +[2025-07-06 08:05:13] [Rank 0] Group 5 FTA: 0.9323 +[2025-07-06 08:05:13] [Rank 0] Group 6 FTA: 0.9089 +[2025-07-06 08:05:13] [Rank 0] Group 6 FTA: 0.9089 +[2025-07-06 08:05:13] [Rank 0] Group 7 FTA: 0.9141 +[2025-07-06 08:05:13] [Rank 0] Group 7 FTA: 0.9141 +[2025-07-06 08:05:13] [Rank 0] Group 8 FTA: 0.9219 +[2025-07-06 08:05:13] [Rank 0] Group 8 FTA: 0.9219 +[2025-07-06 08:05:13] [Rank 0] Group 9 FTA: 0.8984 +[2025-07-06 08:05:13] [Rank 0] Group 9 FTA: 0.8984 +[2025-07-06 08:05:13] [Rank 0] Group 10 FTA: 0.9336 +[2025-07-06 08:05:13] [Rank 0] Group 10 FTA: 0.9336 +[2025-07-06 08:05:13] [Rank 0] Group 11 FTA: 0.9238 +[2025-07-06 08:05:13] [Rank 0] Group 11 FTA: 0.9238 +[2025-07-06 08:05:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 08:05:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 08:05:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 08:05:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 08:05:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 08:05:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 08:05:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 08:05:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 08:05:15] [Rank 0] step:7001/10000 train_time:570457ms step_avg:81.48ms +[2025-07-06 08:05:15] [Rank 0] step:7001/10000 train_time:570457ms step_avg:81.48ms +[2025-07-06 08:05:17] [Rank 0] step:7021/10000 train_time:572013ms step_avg:81.47ms +[2025-07-06 08:05:17] [Rank 0] step:7021/10000 train_time:572013ms step_avg:81.47ms +[2025-07-06 08:05:18] [Rank 0] step:7041/10000 train_time:574110ms step_avg:81.54ms +[2025-07-06 08:05:18] [Rank 0] step:7041/10000 train_time:574110ms step_avg:81.54ms +[2025-07-06 08:05:20] [Rank 0] step:7061/10000 train_time:575703ms step_avg:81.53ms +[2025-07-06 08:05:20] [Rank 0] step:7061/10000 train_time:575703ms step_avg:81.53ms +[2025-07-06 08:05:21] [Rank 0] step:7081/10000 train_time:577199ms step_avg:81.51ms +[2025-07-06 08:05:21] [Rank 0] step:7081/10000 train_time:577199ms step_avg:81.51ms +[2025-07-06 08:05:23] [Rank 0] step:7101/10000 train_time:578695ms step_avg:81.49ms +[2025-07-06 08:05:23] [Rank 0] step:7101/10000 train_time:578695ms step_avg:81.49ms +[2025-07-06 08:05:25] [Rank 0] step:7121/10000 train_time:580424ms step_avg:81.51ms +[2025-07-06 08:05:25] [Rank 0] step:7121/10000 train_time:580424ms step_avg:81.51ms +[2025-07-06 08:05:26] [Rank 0] step:7141/10000 train_time:581920ms step_avg:81.49ms +[2025-07-06 08:05:26] [Rank 0] step:7141/10000 train_time:581920ms step_avg:81.49ms +[2025-07-06 08:05:28] [Rank 0] step:7161/10000 train_time:583518ms step_avg:81.49ms +[2025-07-06 08:05:28] [Rank 0] step:7161/10000 train_time:583518ms step_avg:81.49ms +[2025-07-06 08:05:29] [Rank 0] step:7181/10000 train_time:585015ms step_avg:81.47ms +[2025-07-06 08:05:29] [Rank 0] step:7181/10000 train_time:585015ms step_avg:81.47ms +[2025-07-06 08:05:31] [Rank 0] step:7201/10000 train_time:587292ms step_avg:81.56ms +[2025-07-06 08:05:31] [Rank 0] step:7201/10000 train_time:587292ms step_avg:81.56ms +[2025-07-06 08:05:33] [Rank 0] step:7221/10000 train_time:588770ms step_avg:81.54ms +[2025-07-06 08:05:33] [Rank 0] step:7221/10000 train_time:588770ms step_avg:81.54ms +[2025-07-06 08:05:34] [Rank 0] step:7241/10000 train_time:590267ms step_avg:81.52ms +[2025-07-06 08:05:34] [Rank 0] step:7241/10000 train_time:590267ms step_avg:81.52ms +[2025-07-06 08:05:36] [Rank 0] step:7261/10000 train_time:591765ms step_avg:81.50ms +[2025-07-06 08:05:36] [Rank 0] step:7261/10000 train_time:591765ms step_avg:81.50ms +[2025-07-06 08:05:37] [Rank 0] step:7281/10000 train_time:593264ms step_avg:81.48ms +[2025-07-06 08:05:37] [Rank 0] step:7281/10000 train_time:593264ms step_avg:81.48ms +[2025-07-06 08:05:40] [Rank 0] step:7301/10000 train_time:595412ms step_avg:81.55ms +[2025-07-06 08:05:40] [Rank 0] step:7301/10000 train_time:595412ms step_avg:81.55ms +[2025-07-06 08:05:41] [Rank 0] step:7321/10000 train_time:596907ms step_avg:81.53ms +[2025-07-06 08:05:41] [Rank 0] step:7321/10000 train_time:596907ms step_avg:81.53ms +[2025-07-06 08:05:43] [Rank 0] step:7341/10000 train_time:598405ms step_avg:81.52ms +[2025-07-06 08:05:43] [Rank 0] step:7341/10000 train_time:598405ms step_avg:81.52ms +[2025-07-06 08:05:44] [Rank 0] step:7361/10000 train_time:599904ms step_avg:81.50ms +[2025-07-06 08:05:44] [Rank 0] step:7361/10000 train_time:599904ms step_avg:81.50ms +[2025-07-06 08:05:46] [Rank 0] step:7381/10000 train_time:601456ms step_avg:81.49ms +[2025-07-06 08:05:46] [Rank 0] step:7381/10000 train_time:601456ms step_avg:81.49ms +[2025-07-06 08:05:48] [Rank 0] step:7401/10000 train_time:603876ms step_avg:81.59ms +[2025-07-06 08:05:48] [Rank 0] step:7401/10000 train_time:603876ms step_avg:81.59ms +[2025-07-06 08:05:50] [Rank 0] step:7421/10000 train_time:605376ms step_avg:81.58ms +[2025-07-06 08:05:50] [Rank 0] step:7421/10000 train_time:605376ms step_avg:81.58ms +[2025-07-06 08:05:51] [Rank 0] step:7441/10000 train_time:606875ms step_avg:81.56ms +[2025-07-06 08:05:51] [Rank 0] step:7441/10000 train_time:606875ms step_avg:81.56ms +[2025-07-06 08:05:53] [Rank 0] step:7461/10000 train_time:608375ms step_avg:81.54ms +[2025-07-06 08:05:53] [Rank 0] step:7461/10000 train_time:608375ms step_avg:81.54ms +[2025-07-06 08:05:54] [Rank 0] step:7481/10000 train_time:610110ms step_avg:81.55ms +[2025-07-06 08:05:54] [Rank 0] step:7481/10000 train_time:610110ms step_avg:81.55ms +[2025-07-06 08:05:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 08:05:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 08:05:57] [Rank 0] PRINT: step:7500/10000 train_loss:0.8625 val_loss:0.8629 train_time:611609ms step_avg:81.55ms +[2025-07-06 08:05:57] [Rank 0] PRINT: step:7500/10000 train_loss:0.8625 val_loss:0.8629 train_time:611609ms step_avg:81.55ms +[2025-07-06 08:05:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 08:05:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 08:05:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 08:05:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 08:05:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 08:05:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 08:11:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 08:11:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 08:11:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 08:11:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 08:11:25] [Rank 0] Total Loss: 5.4790 +[2025-07-06 08:11:25] [Rank 0] Total Loss: 5.4790 +[2025-07-06 08:11:25] [Rank 0] Total FTA: 0.9155 +[2025-07-06 08:11:25] [Rank 0] Total FTA: 0.9155 +[2025-07-06 08:11:25] [Rank 0] Group 0 Loss: 5.7087 +[2025-07-06 08:11:25] [Rank 0] Group 0 Loss: 5.7087 +[2025-07-06 08:11:25] [Rank 0] Group 1 Loss: 5.4156 +[2025-07-06 08:11:25] [Rank 0] Group 1 Loss: 5.4156 +[2025-07-06 08:11:25] [Rank 0] Group 2 Loss: 5.2263 +[2025-07-06 08:11:25] [Rank 0] Group 2 Loss: 5.2263 +[2025-07-06 08:11:25] [Rank 0] Group 3 Loss: 5.5197 +[2025-07-06 08:11:25] [Rank 0] Group 3 Loss: 5.5197 +[2025-07-06 08:11:25] [Rank 0] Group 4 Loss: 5.5652 +[2025-07-06 08:11:25] [Rank 0] Group 4 Loss: 5.5652 +[2025-07-06 08:11:25] [Rank 0] Group 5 Loss: 5.4446 +[2025-07-06 08:11:25] [Rank 0] Group 5 Loss: 5.4446 +[2025-07-06 08:11:25] [Rank 0] Group 6 Loss: 5.3647 +[2025-07-06 08:11:25] [Rank 0] Group 6 Loss: 5.3647 +[2025-07-06 08:11:25] [Rank 0] Group 7 Loss: 5.4589 +[2025-07-06 08:11:25] [Rank 0] Group 7 Loss: 5.4589 +[2025-07-06 08:11:25] [Rank 0] Group 8 Loss: 5.4574 +[2025-07-06 08:11:25] [Rank 0] Group 8 Loss: 5.4574 +[2025-07-06 08:11:25] [Rank 0] Group 9 Loss: 5.4660 +[2025-07-06 08:11:25] [Rank 0] Group 9 Loss: 5.4660 +[2025-07-06 08:11:25] [Rank 0] Group 10 Loss: 5.4575 +[2025-07-06 08:11:25] [Rank 0] Group 10 Loss: 5.4575 +[2025-07-06 08:11:25] [Rank 0] Group 11 Loss: 5.4628 +[2025-07-06 08:11:25] [Rank 0] Group 11 Loss: 5.4628 +[2025-07-06 08:11:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 08:11:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 08:11:25] [Rank 0] Group 1 FTA: 0.8229 +[2025-07-06 08:11:25] [Rank 0] Group 1 FTA: 0.8229 +[2025-07-06 08:11:25] [Rank 0] Group 2 FTA: 0.8385 +[2025-07-06 08:11:25] [Rank 0] Group 2 FTA: 0.8385 +[2025-07-06 08:11:25] [Rank 0] Group 3 FTA: 0.9219 +[2025-07-06 08:11:25] [Rank 0] Group 3 FTA: 0.9219 +[2025-07-06 08:11:25] [Rank 0] Group 4 FTA: 0.9010 +[2025-07-06 08:11:25] [Rank 0] Group 4 FTA: 0.9010 +[2025-07-06 08:11:25] [Rank 0] Group 5 FTA: 0.9089 +[2025-07-06 08:11:25] [Rank 0] Group 5 FTA: 0.9089 +[2025-07-06 08:11:25] [Rank 0] Group 6 FTA: 0.8828 +[2025-07-06 08:11:25] [Rank 0] Group 6 FTA: 0.8828 +[2025-07-06 08:11:25] [Rank 0] Group 7 FTA: 0.9062 +[2025-07-06 08:11:25] [Rank 0] Group 7 FTA: 0.9062 +[2025-07-06 08:11:25] [Rank 0] Group 8 FTA: 0.9167 +[2025-07-06 08:11:25] [Rank 0] Group 8 FTA: 0.9167 +[2025-07-06 08:11:25] [Rank 0] Group 9 FTA: 0.9297 +[2025-07-06 08:11:25] [Rank 0] Group 9 FTA: 0.9297 +[2025-07-06 08:11:25] [Rank 0] Group 10 FTA: 0.9355 +[2025-07-06 08:11:25] [Rank 0] Group 10 FTA: 0.9355 +[2025-07-06 08:11:25] [Rank 0] Group 11 FTA: 0.9229 +[2025-07-06 08:11:25] [Rank 0] Group 11 FTA: 0.9229 +[2025-07-06 08:11:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 08:11:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 08:11:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 08:11:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 08:11:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 08:11:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 08:11:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 08:11:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 08:11:27] [Rank 0] step:7501/10000 train_time:611632ms step_avg:81.54ms +[2025-07-06 08:11:27] [Rank 0] step:7501/10000 train_time:611632ms step_avg:81.54ms +[2025-07-06 08:11:28] [Rank 0] step:7521/10000 train_time:613124ms step_avg:81.52ms +[2025-07-06 08:11:28] [Rank 0] step:7521/10000 train_time:613124ms step_avg:81.52ms +[2025-07-06 08:11:30] [Rank 0] step:7541/10000 train_time:614617ms step_avg:81.50ms +[2025-07-06 08:11:30] [Rank 0] step:7541/10000 train_time:614617ms step_avg:81.50ms +[2025-07-06 08:11:32] [Rank 0] step:7561/10000 train_time:616167ms step_avg:81.49ms +[2025-07-06 08:11:32] [Rank 0] step:7561/10000 train_time:616167ms step_avg:81.49ms +[2025-07-06 08:11:33] [Rank 0] step:7581/10000 train_time:618250ms step_avg:81.55ms +[2025-07-06 08:11:33] [Rank 0] step:7581/10000 train_time:618250ms step_avg:81.55ms +[2025-07-06 08:11:35] [Rank 0] step:7601/10000 train_time:619744ms step_avg:81.53ms +[2025-07-06 08:11:35] [Rank 0] step:7601/10000 train_time:619744ms step_avg:81.53ms +[2025-07-06 08:11:36] [Rank 0] step:7621/10000 train_time:621240ms step_avg:81.52ms +[2025-07-06 08:11:36] [Rank 0] step:7621/10000 train_time:621240ms step_avg:81.52ms +[2025-07-06 08:11:38] [Rank 0] step:7641/10000 train_time:622737ms step_avg:81.50ms +[2025-07-06 08:11:38] [Rank 0] step:7641/10000 train_time:622737ms step_avg:81.50ms +[2025-07-06 08:11:40] [Rank 0] step:7661/10000 train_time:624987ms step_avg:81.58ms +[2025-07-06 08:11:40] [Rank 0] step:7661/10000 train_time:624987ms step_avg:81.58ms +[2025-07-06 08:11:41] [Rank 0] step:7681/10000 train_time:626484ms step_avg:81.56ms +[2025-07-06 08:11:41] [Rank 0] step:7681/10000 train_time:626484ms step_avg:81.56ms +[2025-07-06 08:11:43] [Rank 0] step:7701/10000 train_time:627981ms step_avg:81.55ms +[2025-07-06 08:11:43] [Rank 0] step:7701/10000 train_time:627981ms step_avg:81.55ms +[2025-07-06 08:11:44] [Rank 0] step:7721/10000 train_time:629478ms step_avg:81.53ms +[2025-07-06 08:11:44] [Rank 0] step:7721/10000 train_time:629478ms step_avg:81.53ms +[2025-07-06 08:11:46] [Rank 0] step:7741/10000 train_time:631026ms step_avg:81.52ms +[2025-07-06 08:11:46] [Rank 0] step:7741/10000 train_time:631026ms step_avg:81.52ms +[2025-07-06 08:11:48] [Rank 0] step:7761/10000 train_time:632710ms step_avg:81.52ms +[2025-07-06 08:11:48] [Rank 0] step:7761/10000 train_time:632710ms step_avg:81.52ms +[2025-07-06 08:11:49] [Rank 0] step:7781/10000 train_time:634208ms step_avg:81.51ms +[2025-07-06 08:11:49] [Rank 0] step:7781/10000 train_time:634208ms step_avg:81.51ms +[2025-07-06 08:11:51] [Rank 0] step:7801/10000 train_time:635707ms step_avg:81.49ms +[2025-07-06 08:11:51] [Rank 0] step:7801/10000 train_time:635707ms step_avg:81.49ms +[2025-07-06 08:11:52] [Rank 0] step:7821/10000 train_time:637205ms step_avg:81.47ms +[2025-07-06 08:11:52] [Rank 0] step:7821/10000 train_time:637205ms step_avg:81.47ms +[2025-07-06 08:11:54] [Rank 0] step:7841/10000 train_time:638939ms step_avg:81.49ms +[2025-07-06 08:11:54] [Rank 0] step:7841/10000 train_time:638939ms step_avg:81.49ms +[2025-07-06 08:11:55] [Rank 0] step:7861/10000 train_time:640539ms step_avg:81.48ms +[2025-07-06 08:11:55] [Rank 0] step:7861/10000 train_time:640539ms step_avg:81.48ms +[2025-07-06 08:11:57] [Rank 0] step:7881/10000 train_time:642037ms step_avg:81.47ms +[2025-07-06 08:11:57] [Rank 0] step:7881/10000 train_time:642037ms step_avg:81.47ms +[2025-07-06 08:11:58] [Rank 0] step:7901/10000 train_time:643537ms step_avg:81.45ms +[2025-07-06 08:11:58] [Rank 0] step:7901/10000 train_time:643537ms step_avg:81.45ms +[2025-07-06 08:12:00] [Rank 0] step:7921/10000 train_time:645093ms step_avg:81.44ms +[2025-07-06 08:12:00] [Rank 0] step:7921/10000 train_time:645093ms step_avg:81.44ms +[2025-07-06 08:12:02] [Rank 0] step:7941/10000 train_time:646774ms step_avg:81.45ms +[2025-07-06 08:12:02] [Rank 0] step:7941/10000 train_time:646774ms step_avg:81.45ms +[2025-07-06 08:12:03] [Rank 0] step:7961/10000 train_time:648376ms step_avg:81.44ms +[2025-07-06 08:12:03] [Rank 0] step:7961/10000 train_time:648376ms step_avg:81.44ms +[2025-07-06 08:12:05] [Rank 0] step:7981/10000 train_time:649877ms step_avg:81.43ms +[2025-07-06 08:12:05] [Rank 0] step:7981/10000 train_time:649877ms step_avg:81.43ms +[2025-07-06 08:12:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 08:12:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 08:12:07] [Rank 0] PRINT: step:8000/10000 train_loss:0.8613 val_loss:0.8625 train_time:651379ms step_avg:81.42ms +[2025-07-06 08:12:07] [Rank 0] PRINT: step:8000/10000 train_loss:0.8613 val_loss:0.8625 train_time:651379ms step_avg:81.42ms +[2025-07-06 08:12:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 08:12:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 08:12:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 08:12:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 08:12:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 08:12:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 08:17:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 08:17:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 08:17:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 08:17:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 08:17:35] [Rank 0] Total Loss: 5.5561 +[2025-07-06 08:17:35] [Rank 0] Total Loss: 5.5561 +[2025-07-06 08:17:35] [Rank 0] Total FTA: 0.9407 +[2025-07-06 08:17:35] [Rank 0] Total FTA: 0.9407 +[2025-07-06 08:17:35] [Rank 0] Group 0 Loss: 5.7913 +[2025-07-06 08:17:35] [Rank 0] Group 0 Loss: 5.7913 +[2025-07-06 08:17:35] [Rank 0] Group 1 Loss: 5.6832 +[2025-07-06 08:17:35] [Rank 0] Group 1 Loss: 5.6832 +[2025-07-06 08:17:35] [Rank 0] Group 2 Loss: 5.3184 +[2025-07-06 08:17:35] [Rank 0] Group 2 Loss: 5.3184 +[2025-07-06 08:17:35] [Rank 0] Group 3 Loss: 5.6039 +[2025-07-06 08:17:35] [Rank 0] Group 3 Loss: 5.6039 +[2025-07-06 08:17:35] [Rank 0] Group 4 Loss: 5.5777 +[2025-07-06 08:17:35] [Rank 0] Group 4 Loss: 5.5777 +[2025-07-06 08:17:35] [Rank 0] Group 5 Loss: 5.5519 +[2025-07-06 08:17:35] [Rank 0] Group 5 Loss: 5.5519 +[2025-07-06 08:17:35] [Rank 0] Group 6 Loss: 5.3976 +[2025-07-06 08:17:35] [Rank 0] Group 6 Loss: 5.3976 +[2025-07-06 08:17:35] [Rank 0] Group 7 Loss: 5.5110 +[2025-07-06 08:17:35] [Rank 0] Group 7 Loss: 5.5110 +[2025-07-06 08:17:35] [Rank 0] Group 8 Loss: 5.5458 +[2025-07-06 08:17:35] [Rank 0] Group 8 Loss: 5.5458 +[2025-07-06 08:17:35] [Rank 0] Group 9 Loss: 5.4948 +[2025-07-06 08:17:35] [Rank 0] Group 9 Loss: 5.4948 +[2025-07-06 08:17:35] [Rank 0] Group 10 Loss: 5.5134 +[2025-07-06 08:17:35] [Rank 0] Group 10 Loss: 5.5134 +[2025-07-06 08:17:35] [Rank 0] Group 11 Loss: 5.5134 +[2025-07-06 08:17:35] [Rank 0] Group 11 Loss: 5.5134 +[2025-07-06 08:17:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 08:17:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 08:17:35] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 08:17:35] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 08:17:35] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 08:17:35] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 08:17:35] [Rank 0] Group 3 FTA: 0.8646 +[2025-07-06 08:17:35] [Rank 0] Group 3 FTA: 0.8646 +[2025-07-06 08:17:35] [Rank 0] Group 4 FTA: 0.9401 +[2025-07-06 08:17:35] [Rank 0] Group 4 FTA: 0.9401 +[2025-07-06 08:17:35] [Rank 0] Group 5 FTA: 0.9193 +[2025-07-06 08:17:35] [Rank 0] Group 5 FTA: 0.9193 +[2025-07-06 08:17:35] [Rank 0] Group 6 FTA: 0.9219 +[2025-07-06 08:17:35] [Rank 0] Group 6 FTA: 0.9219 +[2025-07-06 08:17:35] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-06 08:17:35] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-06 08:17:35] [Rank 0] Group 8 FTA: 0.9193 +[2025-07-06 08:17:35] [Rank 0] Group 8 FTA: 0.9193 +[2025-07-06 08:17:35] [Rank 0] Group 9 FTA: 0.9258 +[2025-07-06 08:17:35] [Rank 0] Group 9 FTA: 0.9258 +[2025-07-06 08:17:35] [Rank 0] Group 10 FTA: 0.9180 +[2025-07-06 08:17:35] [Rank 0] Group 10 FTA: 0.9180 +[2025-07-06 08:17:35] [Rank 0] Group 11 FTA: 0.9189 +[2025-07-06 08:17:35] [Rank 0] Group 11 FTA: 0.9189 +[2025-07-06 08:17:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 08:17:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 08:17:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 08:17:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 08:17:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 08:17:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 08:17:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 08:17:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 08:17:36] [Rank 0] step:8001/10000 train_time:651402ms step_avg:81.42ms +[2025-07-06 08:17:36] [Rank 0] step:8001/10000 train_time:651402ms step_avg:81.42ms +[2025-07-06 08:17:39] [Rank 0] step:8021/10000 train_time:653569ms step_avg:81.48ms +[2025-07-06 08:17:39] [Rank 0] step:8021/10000 train_time:653569ms step_avg:81.48ms +[2025-07-06 08:17:40] [Rank 0] step:8041/10000 train_time:655161ms step_avg:81.48ms +[2025-07-06 08:17:40] [Rank 0] step:8041/10000 train_time:655161ms step_avg:81.48ms +[2025-07-06 08:17:42] [Rank 0] step:8061/10000 train_time:656653ms step_avg:81.46ms +[2025-07-06 08:17:42] [Rank 0] step:8061/10000 train_time:656653ms step_avg:81.46ms +[2025-07-06 08:17:43] [Rank 0] step:8081/10000 train_time:658147ms step_avg:81.44ms +[2025-07-06 08:17:43] [Rank 0] step:8081/10000 train_time:658147ms step_avg:81.44ms +[2025-07-06 08:17:45] [Rank 0] step:8101/10000 train_time:659745ms step_avg:81.44ms +[2025-07-06 08:17:45] [Rank 0] step:8101/10000 train_time:659745ms step_avg:81.44ms +[2025-07-06 08:17:47] [Rank 0] step:8121/10000 train_time:661904ms step_avg:81.51ms +[2025-07-06 08:17:47] [Rank 0] step:8121/10000 train_time:661904ms step_avg:81.51ms +[2025-07-06 08:17:48] [Rank 0] step:8141/10000 train_time:663400ms step_avg:81.49ms +[2025-07-06 08:17:48] [Rank 0] step:8141/10000 train_time:663400ms step_avg:81.49ms +[2025-07-06 08:17:50] [Rank 0] step:8161/10000 train_time:664894ms step_avg:81.47ms +[2025-07-06 08:17:50] [Rank 0] step:8161/10000 train_time:664894ms step_avg:81.47ms +[2025-07-06 08:17:51] [Rank 0] step:8181/10000 train_time:666393ms step_avg:81.46ms +[2025-07-06 08:17:51] [Rank 0] step:8181/10000 train_time:666393ms step_avg:81.46ms +[2025-07-06 08:17:53] [Rank 0] step:8201/10000 train_time:668125ms step_avg:81.47ms +[2025-07-06 08:17:53] [Rank 0] step:8201/10000 train_time:668125ms step_avg:81.47ms +[2025-07-06 08:17:55] [Rank 0] step:8221/10000 train_time:669624ms step_avg:81.45ms +[2025-07-06 08:17:55] [Rank 0] step:8221/10000 train_time:669624ms step_avg:81.45ms +[2025-07-06 08:17:56] [Rank 0] step:8241/10000 train_time:671122ms step_avg:81.44ms +[2025-07-06 08:17:56] [Rank 0] step:8241/10000 train_time:671122ms step_avg:81.44ms +[2025-07-06 08:17:58] [Rank 0] step:8261/10000 train_time:672621ms step_avg:81.42ms +[2025-07-06 08:17:58] [Rank 0] step:8261/10000 train_time:672621ms step_avg:81.42ms +[2025-07-06 08:18:00] [Rank 0] step:8281/10000 train_time:674801ms step_avg:81.49ms +[2025-07-06 08:18:00] [Rank 0] step:8281/10000 train_time:674801ms step_avg:81.49ms +[2025-07-06 08:18:01] [Rank 0] step:8301/10000 train_time:676279ms step_avg:81.47ms +[2025-07-06 08:18:01] [Rank 0] step:8301/10000 train_time:676279ms step_avg:81.47ms +[2025-07-06 08:18:03] [Rank 0] step:8321/10000 train_time:677780ms step_avg:81.45ms +[2025-07-06 08:18:03] [Rank 0] step:8321/10000 train_time:677780ms step_avg:81.45ms +[2025-07-06 08:18:04] [Rank 0] step:8341/10000 train_time:679280ms step_avg:81.44ms +[2025-07-06 08:18:04] [Rank 0] step:8341/10000 train_time:679280ms step_avg:81.44ms +[2025-07-06 08:18:06] [Rank 0] step:8361/10000 train_time:680779ms step_avg:81.42ms +[2025-07-06 08:18:06] [Rank 0] step:8361/10000 train_time:680779ms step_avg:81.42ms +[2025-07-06 08:18:08] [Rank 0] step:8381/10000 train_time:682513ms step_avg:81.44ms +[2025-07-06 08:18:08] [Rank 0] step:8381/10000 train_time:682513ms step_avg:81.44ms +[2025-07-06 08:18:09] [Rank 0] step:8401/10000 train_time:684014ms step_avg:81.42ms +[2025-07-06 08:18:09] [Rank 0] step:8401/10000 train_time:684014ms step_avg:81.42ms +[2025-07-06 08:18:11] [Rank 0] step:8421/10000 train_time:685515ms step_avg:81.41ms +[2025-07-06 08:18:11] [Rank 0] step:8421/10000 train_time:685515ms step_avg:81.41ms +[2025-07-06 08:18:12] [Rank 0] step:8441/10000 train_time:687017ms step_avg:81.39ms +[2025-07-06 08:18:12] [Rank 0] step:8441/10000 train_time:687017ms step_avg:81.39ms +[2025-07-06 08:18:14] [Rank 0] step:8461/10000 train_time:688519ms step_avg:81.38ms +[2025-07-06 08:18:14] [Rank 0] step:8461/10000 train_time:688519ms step_avg:81.38ms +[2025-07-06 08:18:16] [Rank 0] step:8481/10000 train_time:690679ms step_avg:81.44ms +[2025-07-06 08:18:16] [Rank 0] step:8481/10000 train_time:690679ms step_avg:81.44ms +[2025-07-06 08:18:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 08:18:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 08:18:18] [Rank 0] PRINT: step:8500/10000 train_loss:0.8604 val_loss:0.8620 train_time:692180ms step_avg:81.43ms +[2025-07-06 08:18:18] [Rank 0] PRINT: step:8500/10000 train_loss:0.8604 val_loss:0.8620 train_time:692180ms step_avg:81.43ms +[2025-07-06 08:18:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 08:18:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 08:18:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 08:18:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 08:18:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 08:18:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 08:23:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 08:23:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 08:23:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 08:23:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 08:23:45] [Rank 0] Total Loss: 5.4944 +[2025-07-06 08:23:45] [Rank 0] Total Loss: 5.4944 +[2025-07-06 08:23:45] [Rank 0] Total FTA: 0.9329 +[2025-07-06 08:23:45] [Rank 0] Total FTA: 0.9329 +[2025-07-06 08:23:45] [Rank 0] Group 0 Loss: 5.6570 +[2025-07-06 08:23:45] [Rank 0] Group 0 Loss: 5.6570 +[2025-07-06 08:23:45] [Rank 0] Group 1 Loss: 5.5663 +[2025-07-06 08:23:45] [Rank 0] Group 1 Loss: 5.5663 +[2025-07-06 08:23:45] [Rank 0] Group 2 Loss: 5.2997 +[2025-07-06 08:23:45] [Rank 0] Group 2 Loss: 5.2997 +[2025-07-06 08:23:45] [Rank 0] Group 3 Loss: 5.5522 +[2025-07-06 08:23:45] [Rank 0] Group 3 Loss: 5.5522 +[2025-07-06 08:23:45] [Rank 0] Group 4 Loss: 5.4905 +[2025-07-06 08:23:45] [Rank 0] Group 4 Loss: 5.4905 +[2025-07-06 08:23:45] [Rank 0] Group 5 Loss: 5.5103 +[2025-07-06 08:23:45] [Rank 0] Group 5 Loss: 5.5103 +[2025-07-06 08:23:46] [Rank 0] Group 6 Loss: 5.3648 +[2025-07-06 08:23:46] [Rank 0] Group 6 Loss: 5.3648 +[2025-07-06 08:23:46] [Rank 0] Group 7 Loss: 5.5212 +[2025-07-06 08:23:46] [Rank 0] Group 7 Loss: 5.5212 +[2025-07-06 08:23:46] [Rank 0] Group 8 Loss: 5.4605 +[2025-07-06 08:23:46] [Rank 0] Group 8 Loss: 5.4605 +[2025-07-06 08:23:46] [Rank 0] Group 9 Loss: 5.4448 +[2025-07-06 08:23:46] [Rank 0] Group 9 Loss: 5.4448 +[2025-07-06 08:23:46] [Rank 0] Group 10 Loss: 5.4333 +[2025-07-06 08:23:46] [Rank 0] Group 10 Loss: 5.4333 +[2025-07-06 08:23:46] [Rank 0] Group 11 Loss: 5.4866 +[2025-07-06 08:23:46] [Rank 0] Group 11 Loss: 5.4866 +[2025-07-06 08:23:46] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 08:23:46] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 08:23:46] [Rank 0] Group 1 FTA: 0.8255 +[2025-07-06 08:23:46] [Rank 0] Group 1 FTA: 0.8255 +[2025-07-06 08:23:46] [Rank 0] Group 2 FTA: 0.9141 +[2025-07-06 08:23:46] [Rank 0] Group 2 FTA: 0.9141 +[2025-07-06 08:23:46] [Rank 0] Group 3 FTA: 0.9115 +[2025-07-06 08:23:46] [Rank 0] Group 3 FTA: 0.9115 +[2025-07-06 08:23:46] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-06 08:23:46] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-06 08:23:46] [Rank 0] Group 5 FTA: 0.9349 +[2025-07-06 08:23:46] [Rank 0] Group 5 FTA: 0.9349 +[2025-07-06 08:23:46] [Rank 0] Group 6 FTA: 0.9323 +[2025-07-06 08:23:46] [Rank 0] Group 6 FTA: 0.9323 +[2025-07-06 08:23:46] [Rank 0] Group 7 FTA: 0.9219 +[2025-07-06 08:23:46] [Rank 0] Group 7 FTA: 0.9219 +[2025-07-06 08:23:46] [Rank 0] Group 8 FTA: 0.9089 +[2025-07-06 08:23:46] [Rank 0] Group 8 FTA: 0.9089 +[2025-07-06 08:23:46] [Rank 0] Group 9 FTA: 0.9570 +[2025-07-06 08:23:46] [Rank 0] Group 9 FTA: 0.9570 +[2025-07-06 08:23:46] [Rank 0] Group 10 FTA: 0.9297 +[2025-07-06 08:23:46] [Rank 0] Group 10 FTA: 0.9297 +[2025-07-06 08:23:46] [Rank 0] Group 11 FTA: 0.9297 +[2025-07-06 08:23:46] [Rank 0] Group 11 FTA: 0.9297 +[2025-07-06 08:23:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 08:23:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 08:23:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 08:23:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 08:23:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 08:23:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 08:23:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 08:23:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 08:23:47] [Rank 0] step:8501/10000 train_time:692203ms step_avg:81.43ms +[2025-07-06 08:23:47] [Rank 0] step:8501/10000 train_time:692203ms step_avg:81.43ms +[2025-07-06 08:23:49] [Rank 0] step:8521/10000 train_time:693695ms step_avg:81.41ms +[2025-07-06 08:23:49] [Rank 0] step:8521/10000 train_time:693695ms step_avg:81.41ms +[2025-07-06 08:23:50] [Rank 0] step:8541/10000 train_time:695187ms step_avg:81.39ms +[2025-07-06 08:23:50] [Rank 0] step:8541/10000 train_time:695187ms step_avg:81.39ms +[2025-07-06 08:23:52] [Rank 0] step:8561/10000 train_time:697349ms step_avg:81.46ms +[2025-07-06 08:23:52] [Rank 0] step:8561/10000 train_time:697349ms step_avg:81.46ms +[2025-07-06 08:23:54] [Rank 0] step:8581/10000 train_time:698843ms step_avg:81.44ms +[2025-07-06 08:23:54] [Rank 0] step:8581/10000 train_time:698843ms step_avg:81.44ms +[2025-07-06 08:23:55] [Rank 0] step:8601/10000 train_time:700439ms step_avg:81.44ms +[2025-07-06 08:23:55] [Rank 0] step:8601/10000 train_time:700439ms step_avg:81.44ms +[2025-07-06 08:23:57] [Rank 0] step:8621/10000 train_time:701934ms step_avg:81.42ms +[2025-07-06 08:23:57] [Rank 0] step:8621/10000 train_time:701934ms step_avg:81.42ms +[2025-07-06 08:23:59] [Rank 0] step:8641/10000 train_time:703581ms step_avg:81.42ms +[2025-07-06 08:23:59] [Rank 0] step:8641/10000 train_time:703581ms step_avg:81.42ms +[2025-07-06 08:24:01] [Rank 0] step:8661/10000 train_time:705788ms step_avg:81.49ms +[2025-07-06 08:24:01] [Rank 0] step:8661/10000 train_time:705788ms step_avg:81.49ms +[2025-07-06 08:24:02] [Rank 0] step:8681/10000 train_time:707284ms step_avg:81.47ms +[2025-07-06 08:24:02] [Rank 0] step:8681/10000 train_time:707284ms step_avg:81.47ms +[2025-07-06 08:24:04] [Rank 0] step:8701/10000 train_time:708780ms step_avg:81.46ms +[2025-07-06 08:24:04] [Rank 0] step:8701/10000 train_time:708780ms step_avg:81.46ms +[2025-07-06 08:24:05] [Rank 0] step:8721/10000 train_time:710277ms step_avg:81.44ms +[2025-07-06 08:24:05] [Rank 0] step:8721/10000 train_time:710277ms step_avg:81.44ms +[2025-07-06 08:24:07] [Rank 0] step:8741/10000 train_time:712424ms step_avg:81.50ms +[2025-07-06 08:24:07] [Rank 0] step:8741/10000 train_time:712424ms step_avg:81.50ms +[2025-07-06 08:24:09] [Rank 0] step:8761/10000 train_time:713921ms step_avg:81.49ms +[2025-07-06 08:24:09] [Rank 0] step:8761/10000 train_time:713921ms step_avg:81.49ms +[2025-07-06 08:24:10] [Rank 0] step:8781/10000 train_time:715420ms step_avg:81.47ms +[2025-07-06 08:24:10] [Rank 0] step:8781/10000 train_time:715420ms step_avg:81.47ms +[2025-07-06 08:24:12] [Rank 0] step:8801/10000 train_time:716918ms step_avg:81.46ms +[2025-07-06 08:24:12] [Rank 0] step:8801/10000 train_time:716918ms step_avg:81.46ms +[2025-07-06 08:24:14] [Rank 0] step:8821/10000 train_time:718465ms step_avg:81.45ms +[2025-07-06 08:24:14] [Rank 0] step:8821/10000 train_time:718465ms step_avg:81.45ms +[2025-07-06 08:24:15] [Rank 0] step:8841/10000 train_time:720154ms step_avg:81.46ms +[2025-07-06 08:24:15] [Rank 0] step:8841/10000 train_time:720154ms step_avg:81.46ms +[2025-07-06 08:24:17] [Rank 0] step:8861/10000 train_time:721653ms step_avg:81.44ms +[2025-07-06 08:24:17] [Rank 0] step:8861/10000 train_time:721653ms step_avg:81.44ms +[2025-07-06 08:24:18] [Rank 0] step:8881/10000 train_time:723154ms step_avg:81.43ms +[2025-07-06 08:24:18] [Rank 0] step:8881/10000 train_time:723154ms step_avg:81.43ms +[2025-07-06 08:24:20] [Rank 0] step:8901/10000 train_time:724657ms step_avg:81.41ms +[2025-07-06 08:24:20] [Rank 0] step:8901/10000 train_time:724657ms step_avg:81.41ms +[2025-07-06 08:24:22] [Rank 0] step:8921/10000 train_time:726824ms step_avg:81.47ms +[2025-07-06 08:24:22] [Rank 0] step:8921/10000 train_time:726824ms step_avg:81.47ms +[2025-07-06 08:24:23] [Rank 0] step:8941/10000 train_time:728325ms step_avg:81.46ms +[2025-07-06 08:24:23] [Rank 0] step:8941/10000 train_time:728325ms step_avg:81.46ms +[2025-07-06 08:24:25] [Rank 0] step:8961/10000 train_time:729827ms step_avg:81.44ms +[2025-07-06 08:24:25] [Rank 0] step:8961/10000 train_time:729827ms step_avg:81.44ms +[2025-07-06 08:24:26] [Rank 0] step:8981/10000 train_time:731330ms step_avg:81.43ms +[2025-07-06 08:24:26] [Rank 0] step:8981/10000 train_time:731330ms step_avg:81.43ms +[2025-07-06 08:24:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 08:24:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 08:24:29] [Rank 0] PRINT: step:9000/10000 train_loss:0.8595 val_loss:0.8617 train_time:732834ms step_avg:81.43ms +[2025-07-06 08:24:29] [Rank 0] PRINT: step:9000/10000 train_loss:0.8595 val_loss:0.8617 train_time:732834ms step_avg:81.43ms +[2025-07-06 08:24:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 08:24:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 08:24:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 08:24:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 08:24:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 08:24:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 08:29:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 08:29:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 08:29:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 08:29:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 08:29:56] [Rank 0] Total Loss: 5.5405 +[2025-07-06 08:29:56] [Rank 0] Total Loss: 5.5405 +[2025-07-06 08:29:56] [Rank 0] Total FTA: 0.9354 +[2025-07-06 08:29:56] [Rank 0] Total FTA: 0.9354 +[2025-07-06 08:29:56] [Rank 0] Group 0 Loss: 5.7001 +[2025-07-06 08:29:56] [Rank 0] Group 0 Loss: 5.7001 +[2025-07-06 08:29:56] [Rank 0] Group 1 Loss: 5.6705 +[2025-07-06 08:29:56] [Rank 0] Group 1 Loss: 5.6705 +[2025-07-06 08:29:56] [Rank 0] Group 2 Loss: 5.3545 +[2025-07-06 08:29:56] [Rank 0] Group 2 Loss: 5.3545 +[2025-07-06 08:29:56] [Rank 0] Group 3 Loss: 5.6342 +[2025-07-06 08:29:56] [Rank 0] Group 3 Loss: 5.6342 +[2025-07-06 08:29:56] [Rank 0] Group 4 Loss: 5.6099 +[2025-07-06 08:29:56] [Rank 0] Group 4 Loss: 5.6099 +[2025-07-06 08:29:56] [Rank 0] Group 5 Loss: 5.5155 +[2025-07-06 08:29:56] [Rank 0] Group 5 Loss: 5.5155 +[2025-07-06 08:29:56] [Rank 0] Group 6 Loss: 5.3789 +[2025-07-06 08:29:56] [Rank 0] Group 6 Loss: 5.3789 +[2025-07-06 08:29:56] [Rank 0] Group 7 Loss: 5.4676 +[2025-07-06 08:29:56] [Rank 0] Group 7 Loss: 5.4676 +[2025-07-06 08:29:56] [Rank 0] Group 8 Loss: 5.5118 +[2025-07-06 08:29:56] [Rank 0] Group 8 Loss: 5.5118 +[2025-07-06 08:29:56] [Rank 0] Group 9 Loss: 5.4157 +[2025-07-06 08:29:56] [Rank 0] Group 9 Loss: 5.4157 +[2025-07-06 08:29:56] [Rank 0] Group 10 Loss: 5.5406 +[2025-07-06 08:29:56] [Rank 0] Group 10 Loss: 5.5406 +[2025-07-06 08:29:56] [Rank 0] Group 11 Loss: 5.5200 +[2025-07-06 08:29:56] [Rank 0] Group 11 Loss: 5.5200 +[2025-07-06 08:29:56] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 08:29:56] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 08:29:56] [Rank 0] Group 1 FTA: 0.8151 +[2025-07-06 08:29:56] [Rank 0] Group 1 FTA: 0.8151 +[2025-07-06 08:29:56] [Rank 0] Group 2 FTA: 0.9167 +[2025-07-06 08:29:56] [Rank 0] Group 2 FTA: 0.9167 +[2025-07-06 08:29:56] [Rank 0] Group 3 FTA: 0.9245 +[2025-07-06 08:29:56] [Rank 0] Group 3 FTA: 0.9245 +[2025-07-06 08:29:56] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 08:29:56] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 08:29:56] [Rank 0] Group 5 FTA: 0.9557 +[2025-07-06 08:29:56] [Rank 0] Group 5 FTA: 0.9557 +[2025-07-06 08:29:56] [Rank 0] Group 6 FTA: 0.9089 +[2025-07-06 08:29:56] [Rank 0] Group 6 FTA: 0.9089 +[2025-07-06 08:29:56] [Rank 0] Group 7 FTA: 0.9245 +[2025-07-06 08:29:56] [Rank 0] Group 7 FTA: 0.9245 +[2025-07-06 08:29:56] [Rank 0] Group 8 FTA: 0.9271 +[2025-07-06 08:29:56] [Rank 0] Group 8 FTA: 0.9271 +[2025-07-06 08:29:56] [Rank 0] Group 9 FTA: 0.9180 +[2025-07-06 08:29:56] [Rank 0] Group 9 FTA: 0.9180 +[2025-07-06 08:29:56] [Rank 0] Group 10 FTA: 0.9297 +[2025-07-06 08:29:56] [Rank 0] Group 10 FTA: 0.9297 +[2025-07-06 08:29:56] [Rank 0] Group 11 FTA: 0.9355 +[2025-07-06 08:29:56] [Rank 0] Group 11 FTA: 0.9355 +[2025-07-06 08:29:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 08:29:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 08:29:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 08:29:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 08:29:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 08:29:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 08:29:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 08:29:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 08:29:58] [Rank 0] step:9001/10000 train_time:732864ms step_avg:81.42ms +[2025-07-06 08:29:58] [Rank 0] step:9001/10000 train_time:732864ms step_avg:81.42ms +[2025-07-06 08:30:00] [Rank 0] step:9021/10000 train_time:735048ms step_avg:81.48ms +[2025-07-06 08:30:00] [Rank 0] step:9021/10000 train_time:735048ms step_avg:81.48ms +[2025-07-06 08:30:01] [Rank 0] step:9041/10000 train_time:736641ms step_avg:81.48ms +[2025-07-06 08:30:01] [Rank 0] step:9041/10000 train_time:736641ms step_avg:81.48ms +[2025-07-06 08:30:03] [Rank 0] step:9061/10000 train_time:738402ms step_avg:81.49ms +[2025-07-06 08:30:03] [Rank 0] step:9061/10000 train_time:738402ms step_avg:81.49ms +[2025-07-06 08:30:05] [Rank 0] step:9081/10000 train_time:739897ms step_avg:81.48ms +[2025-07-06 08:30:05] [Rank 0] step:9081/10000 train_time:739897ms step_avg:81.48ms +[2025-07-06 08:30:06] [Rank 0] step:9101/10000 train_time:741627ms step_avg:81.49ms +[2025-07-06 08:30:06] [Rank 0] step:9101/10000 train_time:741627ms step_avg:81.49ms +[2025-07-06 08:30:08] [Rank 0] step:9121/10000 train_time:743122ms step_avg:81.47ms +[2025-07-06 08:30:08] [Rank 0] step:9121/10000 train_time:743122ms step_avg:81.47ms +[2025-07-06 08:30:09] [Rank 0] step:9141/10000 train_time:744618ms step_avg:81.46ms +[2025-07-06 08:30:09] [Rank 0] step:9141/10000 train_time:744618ms step_avg:81.46ms +[2025-07-06 08:30:11] [Rank 0] step:9161/10000 train_time:746115ms step_avg:81.44ms +[2025-07-06 08:30:11] [Rank 0] step:9161/10000 train_time:746115ms step_avg:81.44ms +[2025-07-06 08:30:13] [Rank 0] step:9181/10000 train_time:747611ms step_avg:81.43ms +[2025-07-06 08:30:13] [Rank 0] step:9181/10000 train_time:747611ms step_avg:81.43ms +[2025-07-06 08:30:15] [Rank 0] step:9201/10000 train_time:749776ms step_avg:81.49ms +[2025-07-06 08:30:15] [Rank 0] step:9201/10000 train_time:749776ms step_avg:81.49ms +[2025-07-06 08:30:16] [Rank 0] step:9221/10000 train_time:751272ms step_avg:81.47ms +[2025-07-06 08:30:16] [Rank 0] step:9221/10000 train_time:751272ms step_avg:81.47ms +[2025-07-06 08:30:18] [Rank 0] step:9241/10000 train_time:752774ms step_avg:81.46ms +[2025-07-06 08:30:18] [Rank 0] step:9241/10000 train_time:752774ms step_avg:81.46ms +[2025-07-06 08:30:19] [Rank 0] step:9261/10000 train_time:754272ms step_avg:81.45ms +[2025-07-06 08:30:19] [Rank 0] step:9261/10000 train_time:754272ms step_avg:81.45ms +[2025-07-06 08:30:21] [Rank 0] step:9281/10000 train_time:756008ms step_avg:81.46ms +[2025-07-06 08:30:21] [Rank 0] step:9281/10000 train_time:756008ms step_avg:81.46ms +[2025-07-06 08:30:22] [Rank 0] step:9301/10000 train_time:757506ms step_avg:81.44ms +[2025-07-06 08:30:22] [Rank 0] step:9301/10000 train_time:757506ms step_avg:81.44ms +[2025-07-06 08:30:24] [Rank 0] step:9321/10000 train_time:759006ms step_avg:81.43ms +[2025-07-06 08:30:24] [Rank 0] step:9321/10000 train_time:759006ms step_avg:81.43ms +[2025-07-06 08:30:25] [Rank 0] step:9341/10000 train_time:760505ms step_avg:81.42ms +[2025-07-06 08:30:25] [Rank 0] step:9341/10000 train_time:760505ms step_avg:81.42ms +[2025-07-06 08:30:27] [Rank 0] step:9361/10000 train_time:762261ms step_avg:81.43ms +[2025-07-06 08:30:27] [Rank 0] step:9361/10000 train_time:762261ms step_avg:81.43ms +[2025-07-06 08:30:28] [Rank 0] step:9381/10000 train_time:763743ms step_avg:81.41ms +[2025-07-06 08:30:28] [Rank 0] step:9381/10000 train_time:763743ms step_avg:81.41ms +[2025-07-06 08:30:30] [Rank 0] step:9401/10000 train_time:765249ms step_avg:81.40ms +[2025-07-06 08:30:30] [Rank 0] step:9401/10000 train_time:765249ms step_avg:81.40ms +[2025-07-06 08:30:32] [Rank 0] step:9421/10000 train_time:766750ms step_avg:81.39ms +[2025-07-06 08:30:32] [Rank 0] step:9421/10000 train_time:766750ms step_avg:81.39ms +[2025-07-06 08:30:33] [Rank 0] step:9441/10000 train_time:768251ms step_avg:81.37ms +[2025-07-06 08:30:33] [Rank 0] step:9441/10000 train_time:768251ms step_avg:81.37ms +[2025-07-06 08:30:35] [Rank 0] step:9461/10000 train_time:770403ms step_avg:81.43ms +[2025-07-06 08:30:35] [Rank 0] step:9461/10000 train_time:770403ms step_avg:81.43ms +[2025-07-06 08:30:37] [Rank 0] step:9481/10000 train_time:771903ms step_avg:81.42ms +[2025-07-06 08:30:37] [Rank 0] step:9481/10000 train_time:771903ms step_avg:81.42ms +[2025-07-06 08:30:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 08:30:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 08:30:39] [Rank 0] PRINT: step:9500/10000 train_loss:0.8587 val_loss:0.8614 train_time:773404ms step_avg:81.41ms +[2025-07-06 08:30:39] [Rank 0] PRINT: step:9500/10000 train_loss:0.8587 val_loss:0.8614 train_time:773404ms step_avg:81.41ms +[2025-07-06 08:30:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 08:30:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 08:30:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 08:30:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 08:30:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 08:30:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 08:36:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 08:36:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 08:36:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 08:36:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 08:36:07] [Rank 0] Total Loss: 5.6062 +[2025-07-06 08:36:07] [Rank 0] Total Loss: 5.6062 +[2025-07-06 08:36:07] [Rank 0] Total FTA: 0.9581 +[2025-07-06 08:36:07] [Rank 0] Total FTA: 0.9581 +[2025-07-06 08:36:07] [Rank 0] Group 0 Loss: 5.8635 +[2025-07-06 08:36:07] [Rank 0] Group 0 Loss: 5.8635 +[2025-07-06 08:36:07] [Rank 0] Group 1 Loss: 5.6075 +[2025-07-06 08:36:07] [Rank 0] Group 1 Loss: 5.6075 +[2025-07-06 08:36:07] [Rank 0] Group 2 Loss: 5.4958 +[2025-07-06 08:36:07] [Rank 0] Group 2 Loss: 5.4958 +[2025-07-06 08:36:07] [Rank 0] Group 3 Loss: 5.5354 +[2025-07-06 08:36:07] [Rank 0] Group 3 Loss: 5.5354 +[2025-07-06 08:36:07] [Rank 0] Group 4 Loss: 5.6698 +[2025-07-06 08:36:07] [Rank 0] Group 4 Loss: 5.6698 +[2025-07-06 08:36:07] [Rank 0] Group 5 Loss: 5.5782 +[2025-07-06 08:36:07] [Rank 0] Group 5 Loss: 5.5782 +[2025-07-06 08:36:07] [Rank 0] Group 6 Loss: 5.5024 +[2025-07-06 08:36:07] [Rank 0] Group 6 Loss: 5.5024 +[2025-07-06 08:36:07] [Rank 0] Group 7 Loss: 5.5588 +[2025-07-06 08:36:07] [Rank 0] Group 7 Loss: 5.5588 +[2025-07-06 08:36:07] [Rank 0] Group 8 Loss: 5.5596 +[2025-07-06 08:36:07] [Rank 0] Group 8 Loss: 5.5596 +[2025-07-06 08:36:07] [Rank 0] Group 9 Loss: 5.5867 +[2025-07-06 08:36:07] [Rank 0] Group 9 Loss: 5.5867 +[2025-07-06 08:36:07] [Rank 0] Group 10 Loss: 5.5574 +[2025-07-06 08:36:07] [Rank 0] Group 10 Loss: 5.5574 +[2025-07-06 08:36:07] [Rank 0] Group 11 Loss: 5.5707 +[2025-07-06 08:36:07] [Rank 0] Group 11 Loss: 5.5707 +[2025-07-06 08:36:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 08:36:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 08:36:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 08:36:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 08:36:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 08:36:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 08:36:07] [Rank 0] Group 3 FTA: 0.9010 +[2025-07-06 08:36:07] [Rank 0] Group 3 FTA: 0.9010 +[2025-07-06 08:36:07] [Rank 0] Group 4 FTA: 0.9714 +[2025-07-06 08:36:07] [Rank 0] Group 4 FTA: 0.9714 +[2025-07-06 08:36:07] [Rank 0] Group 5 FTA: 0.9792 +[2025-07-06 08:36:07] [Rank 0] Group 5 FTA: 0.9792 +[2025-07-06 08:36:07] [Rank 0] Group 6 FTA: 0.9479 +[2025-07-06 08:36:07] [Rank 0] Group 6 FTA: 0.9479 +[2025-07-06 08:36:07] [Rank 0] Group 7 FTA: 0.9115 +[2025-07-06 08:36:07] [Rank 0] Group 7 FTA: 0.9115 +[2025-07-06 08:36:07] [Rank 0] Group 8 FTA: 0.9115 +[2025-07-06 08:36:07] [Rank 0] Group 8 FTA: 0.9115 +[2025-07-06 08:36:07] [Rank 0] Group 9 FTA: 0.9531 +[2025-07-06 08:36:07] [Rank 0] Group 9 FTA: 0.9531 +[2025-07-06 08:36:07] [Rank 0] Group 10 FTA: 0.9512 +[2025-07-06 08:36:07] [Rank 0] Group 10 FTA: 0.9512 +[2025-07-06 08:36:07] [Rank 0] Group 11 FTA: 0.9473 +[2025-07-06 08:36:07] [Rank 0] Group 11 FTA: 0.9473 +[2025-07-06 08:36:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 08:36:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 08:36:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 08:36:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 08:36:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 08:36:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 08:36:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 08:36:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 08:36:08] [Rank 0] step:9501/10000 train_time:773426ms step_avg:81.40ms +[2025-07-06 08:36:08] [Rank 0] step:9501/10000 train_time:773426ms step_avg:81.40ms +[2025-07-06 08:36:10] [Rank 0] step:9521/10000 train_time:774940ms step_avg:81.39ms +[2025-07-06 08:36:10] [Rank 0] step:9521/10000 train_time:774940ms step_avg:81.39ms +[2025-07-06 08:36:12] [Rank 0] step:9541/10000 train_time:777103ms step_avg:81.45ms +[2025-07-06 08:36:12] [Rank 0] step:9541/10000 train_time:777103ms step_avg:81.45ms +[2025-07-06 08:36:13] [Rank 0] step:9561/10000 train_time:778575ms step_avg:81.43ms +[2025-07-06 08:36:13] [Rank 0] step:9561/10000 train_time:778575ms step_avg:81.43ms +[2025-07-06 08:36:15] [Rank 0] step:9581/10000 train_time:780067ms step_avg:81.42ms +[2025-07-06 08:36:15] [Rank 0] step:9581/10000 train_time:780067ms step_avg:81.42ms +[2025-07-06 08:36:16] [Rank 0] step:9601/10000 train_time:781562ms step_avg:81.40ms +[2025-07-06 08:36:16] [Rank 0] step:9601/10000 train_time:781562ms step_avg:81.40ms +[2025-07-06 08:36:18] [Rank 0] step:9621/10000 train_time:783059ms step_avg:81.39ms +[2025-07-06 08:36:18] [Rank 0] step:9621/10000 train_time:783059ms step_avg:81.39ms +[2025-07-06 08:36:20] [Rank 0] step:9641/10000 train_time:784789ms step_avg:81.40ms +[2025-07-06 08:36:20] [Rank 0] step:9641/10000 train_time:784789ms step_avg:81.40ms +[2025-07-06 08:36:21] [Rank 0] step:9661/10000 train_time:786284ms step_avg:81.39ms +[2025-07-06 08:36:21] [Rank 0] step:9661/10000 train_time:786284ms step_avg:81.39ms +[2025-07-06 08:36:23] [Rank 0] step:9681/10000 train_time:787993ms step_avg:81.40ms +[2025-07-06 08:36:23] [Rank 0] step:9681/10000 train_time:787993ms step_avg:81.40ms +[2025-07-06 08:36:24] [Rank 0] step:9701/10000 train_time:789490ms step_avg:81.38ms +[2025-07-06 08:36:24] [Rank 0] step:9701/10000 train_time:789490ms step_avg:81.38ms +[2025-07-06 08:36:26] [Rank 0] step:9721/10000 train_time:791242ms step_avg:81.40ms +[2025-07-06 08:36:26] [Rank 0] step:9721/10000 train_time:791242ms step_avg:81.40ms +[2025-07-06 08:36:28] [Rank 0] step:9741/10000 train_time:793138ms step_avg:81.42ms +[2025-07-06 08:36:28] [Rank 0] step:9741/10000 train_time:793138ms step_avg:81.42ms +[2025-07-06 08:36:29] [Rank 0] step:9761/10000 train_time:794636ms step_avg:81.41ms +[2025-07-06 08:36:29] [Rank 0] step:9761/10000 train_time:794636ms step_avg:81.41ms +[2025-07-06 08:36:31] [Rank 0] step:9781/10000 train_time:796134ms step_avg:81.40ms +[2025-07-06 08:36:31] [Rank 0] step:9781/10000 train_time:796134ms step_avg:81.40ms +[2025-07-06 08:36:32] [Rank 0] step:9801/10000 train_time:797633ms step_avg:81.38ms +[2025-07-06 08:36:32] [Rank 0] step:9801/10000 train_time:797633ms step_avg:81.38ms +[2025-07-06 08:36:35] [Rank 0] step:9821/10000 train_time:799784ms step_avg:81.44ms +[2025-07-06 08:36:35] [Rank 0] step:9821/10000 train_time:799784ms step_avg:81.44ms +[2025-07-06 08:36:36] [Rank 0] step:9841/10000 train_time:801282ms step_avg:81.42ms +[2025-07-06 08:36:36] [Rank 0] step:9841/10000 train_time:801282ms step_avg:81.42ms +[2025-07-06 08:36:38] [Rank 0] step:9861/10000 train_time:802783ms step_avg:81.41ms +[2025-07-06 08:36:38] [Rank 0] step:9861/10000 train_time:802783ms step_avg:81.41ms +[2025-07-06 08:36:39] [Rank 0] step:9881/10000 train_time:804284ms step_avg:81.40ms +[2025-07-06 08:36:39] [Rank 0] step:9881/10000 train_time:804284ms step_avg:81.40ms +[2025-07-06 08:36:41] [Rank 0] step:9901/10000 train_time:805835ms step_avg:81.39ms +[2025-07-06 08:36:41] [Rank 0] step:9901/10000 train_time:805835ms step_avg:81.39ms +[2025-07-06 08:36:43] [Rank 0] step:9921/10000 train_time:807927ms step_avg:81.44ms +[2025-07-06 08:36:43] [Rank 0] step:9921/10000 train_time:807927ms step_avg:81.44ms +[2025-07-06 08:36:44] [Rank 0] step:9941/10000 train_time:809428ms step_avg:81.42ms +[2025-07-06 08:36:44] [Rank 0] step:9941/10000 train_time:809428ms step_avg:81.42ms +[2025-07-06 08:36:46] [Rank 0] step:9961/10000 train_time:810928ms step_avg:81.41ms +[2025-07-06 08:36:46] [Rank 0] step:9961/10000 train_time:810928ms step_avg:81.41ms +[2025-07-06 08:36:47] [Rank 0] step:9981/10000 train_time:812429ms step_avg:81.40ms +[2025-07-06 08:36:47] [Rank 0] step:9981/10000 train_time:812429ms step_avg:81.40ms +[2025-07-06 08:36:49] [Rank 0] step:10000/10000 train_time:814090ms step_avg:81.41ms +[2025-07-06 08:36:49] [Rank 0] step:10000/10000 train_time:814090ms step_avg:81.41ms +[2025-07-06 08:36:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 08:36:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 08:36:50] [Rank 0] PRINT: step:10000/10000 train_loss:0.8579 val_loss:0.8613 train_time:814170ms step_avg:81.42ms +[2025-07-06 08:36:50] [Rank 0] PRINT: step:10000/10000 train_loss:0.8579 val_loss:0.8613 train_time:814170ms step_avg:81.42ms +[2025-07-06 08:36:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 08:36:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 08:36:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 08:36:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 08:36:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 08:36:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 08:42:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 08:42:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 08:42:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 08:42:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 08:42:16] [Rank 0] Total Loss: 5.6439 +[2025-07-06 08:42:16] [Rank 0] Total Loss: 5.6439 +[2025-07-06 08:42:16] [Rank 0] Total FTA: 0.9721 +[2025-07-06 08:42:16] [Rank 0] Total FTA: 0.9721 +[2025-07-06 08:42:16] [Rank 0] Group 0 Loss: 5.8667 +[2025-07-06 08:42:16] [Rank 0] Group 0 Loss: 5.8667 +[2025-07-06 08:42:16] [Rank 0] Group 1 Loss: 5.7210 +[2025-07-06 08:42:16] [Rank 0] Group 1 Loss: 5.7210 +[2025-07-06 08:42:16] [Rank 0] Group 2 Loss: 5.5165 +[2025-07-06 08:42:16] [Rank 0] Group 2 Loss: 5.5165 +[2025-07-06 08:42:16] [Rank 0] Group 3 Loss: 5.6328 +[2025-07-06 08:42:16] [Rank 0] Group 3 Loss: 5.6328 +[2025-07-06 08:42:16] [Rank 0] Group 4 Loss: 5.6879 +[2025-07-06 08:42:16] [Rank 0] Group 4 Loss: 5.6879 +[2025-07-06 08:42:16] [Rank 0] Group 5 Loss: 5.6241 +[2025-07-06 08:42:16] [Rank 0] Group 5 Loss: 5.6241 +[2025-07-06 08:42:16] [Rank 0] Group 6 Loss: 5.4661 +[2025-07-06 08:42:16] [Rank 0] Group 6 Loss: 5.4661 +[2025-07-06 08:42:16] [Rank 0] Group 7 Loss: 5.6191 +[2025-07-06 08:42:16] [Rank 0] Group 7 Loss: 5.6191 +[2025-07-06 08:42:16] [Rank 0] Group 8 Loss: 5.6087 +[2025-07-06 08:42:16] [Rank 0] Group 8 Loss: 5.6087 +[2025-07-06 08:42:16] [Rank 0] Group 9 Loss: 5.5398 +[2025-07-06 08:42:16] [Rank 0] Group 9 Loss: 5.5398 +[2025-07-06 08:42:16] [Rank 0] Group 10 Loss: 5.5864 +[2025-07-06 08:42:16] [Rank 0] Group 10 Loss: 5.5864 +[2025-07-06 08:42:16] [Rank 0] Group 11 Loss: 5.6347 +[2025-07-06 08:42:16] [Rank 0] Group 11 Loss: 5.6347 +[2025-07-06 08:42:16] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 08:42:16] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 08:42:16] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 08:42:16] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 08:42:16] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 08:42:16] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 08:42:16] [Rank 0] Group 3 FTA: 0.9609 +[2025-07-06 08:42:16] [Rank 0] Group 3 FTA: 0.9609 +[2025-07-06 08:42:16] [Rank 0] Group 4 FTA: 0.9792 +[2025-07-06 08:42:16] [Rank 0] Group 4 FTA: 0.9792 +[2025-07-06 08:42:16] [Rank 0] Group 5 FTA: 0.9766 +[2025-07-06 08:42:16] [Rank 0] Group 5 FTA: 0.9766 +[2025-07-06 08:42:16] [Rank 0] Group 6 FTA: 0.9453 +[2025-07-06 08:42:16] [Rank 0] Group 6 FTA: 0.9453 +[2025-07-06 08:42:16] [Rank 0] Group 7 FTA: 0.9453 +[2025-07-06 08:42:16] [Rank 0] Group 7 FTA: 0.9453 +[2025-07-06 08:42:16] [Rank 0] Group 8 FTA: 0.9531 +[2025-07-06 08:42:16] [Rank 0] Group 8 FTA: 0.9531 +[2025-07-06 08:42:16] [Rank 0] Group 9 FTA: 0.9531 +[2025-07-06 08:42:16] [Rank 0] Group 9 FTA: 0.9531 +[2025-07-06 08:42:16] [Rank 0] Group 10 FTA: 0.9727 +[2025-07-06 08:42:16] [Rank 0] Group 10 FTA: 0.9727 +[2025-07-06 08:42:16] [Rank 0] Group 11 FTA: 0.9619 +[2025-07-06 08:42:16] [Rank 0] Group 11 FTA: 0.9619 +[2025-07-06 08:42:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 08:42:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-06 08:42:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 08:42:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-06 08:42:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 08:42:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-06 08:42:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 08:42:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-06 08:42:18] [Rank 0] step:10001/10000 train_time:814193ms step_avg:81.41ms +[2025-07-06 08:42:18] [Rank 0] step:10001/10000 train_time:814193ms step_avg:81.41ms +[2025-07-06 08:42:18] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 08:42:18 2025 --- +[2025-07-06 08:42:18] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 08:42:18 2025 --- +[2025-07-06 08:42:18] [Rank 0] PRINT: Peak memory allocated: 9109 MiB reserved: 10356 MiB +[2025-07-06 08:42:18] [Rank 0] PRINT: Peak memory allocated: 9109 MiB reserved: 10356 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/config.json b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..936d94fc725a91d07577e6ac01fcb89e9d054d59 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "151d6b3c-1904-457e-958e-1d537de20e32", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..28a9d21fc5e304f9cef44215bb42aee36222b3a9 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ba3661af18758ab2b04752dfd3e63521878a1d1a1ba06d76e7d143d635b245b +size 436380 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..aad1ae718be3aca4eac961bc7e0e2fe9e9035eed --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98c454672f40601d1500acbbf3d38b50d3a1602823fac89cc62d20df2eaa7dc8 +size 355298 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..b4cea5d6ef9a5ef6ea929797b332348833d33d65 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a16c4dd41215d48dd1c747f5b07809fd293c9439effa4abddaf1f7189ac2c4b +size 109847 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..670a1f8683aae278da8d7414ee46ad980077edb4 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b95502bf8c5f0008dd56c003dd15d27c96aab3dfa1b014abbd03a6ffeeccb1d +size 111858 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/training_log_151d6b3c-1904-457e-958e-1d537de20e32.txt b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/training_log_151d6b3c-1904-457e-958e-1d537de20e32.txt new file mode 100644 index 0000000000000000000000000000000000000000..eac23ce76986f0262fa98653a4bc11298af0f7ea --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/training_log_151d6b3c-1904-457e-958e-1d537de20e32.txt @@ -0,0 +1,5144 @@ +[2025-07-07 20:53:56] [Rank 0] PRINT: --- Script Start: Mon Jul 7 20:53:56 2025 --- +[2025-07-07 20:53:56] [Rank 0] PRINT: --- Script Start: Mon Jul 7 20:53:56 2025 --- +[2025-07-07 20:53:56] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-07 20:53:56] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-07 20:53:56] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 20:53:56] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 20:53:56] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-07 20:53:56] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-07 20:53:56] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43 +[2025-07-07 20:53:56] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43 +[2025-07-07 20:53:56] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 20:53:56] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 20:53:57] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 20:53:57] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 20:53:57] [Rank 0] PRINT: Constructing model... +[2025-07-07 20:53:57] [Rank 0] PRINT: Constructing model... +[2025-07-07 20:53:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 20:53:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 20:53:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 20:53:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 20:53:59] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 20:53:59] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 20:53:59] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 20:53:59] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 20:53:59] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 20:53:59] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 20:53:59] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 20:53:59] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 20:53:59] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 20:53:59] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 20:54:00] [Rank 0] PRINT: Model returns: +[2025-07-07 20:54:00] [Rank 0] PRINT: Model returns: +[2025-07-07 20:54:00] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 20:54:00] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 20:54:00] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-07 20:54:00] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-07 20:54:00] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-07 20:54:00] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-07 20:54:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-07 20:54:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-07 20:54:00] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-07 20:54:00] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-07 20:54:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 20:54:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 20:54:00] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 20:54:00] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 20:54:00] [Rank 0] PRINT: Starting warmup... +[2025-07-07 20:54:00] [Rank 0] PRINT: Starting warmup... +[2025-07-07 20:55:37] [Rank 0] PRINT: Warmup complete. +[2025-07-07 20:55:37] [Rank 0] PRINT: Warmup complete. +[2025-07-07 20:55:37] [Rank 0] PRINT: Starting training... +[2025-07-07 20:55:37] [Rank 0] PRINT: Starting training... +[2025-07-07 20:55:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:55:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:55:46] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 20:55:46] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 20:55:48] [Rank 0] step:21/10000 train_time:1754ms step_avg:83.51ms +[2025-07-07 20:55:48] [Rank 0] step:21/10000 train_time:1754ms step_avg:83.51ms +[2025-07-07 20:55:49] [Rank 0] step:41/10000 train_time:3213ms step_avg:78.37ms +[2025-07-07 20:55:49] [Rank 0] step:41/10000 train_time:3213ms step_avg:78.37ms +[2025-07-07 20:55:51] [Rank 0] step:61/10000 train_time:4677ms step_avg:76.68ms +[2025-07-07 20:55:51] [Rank 0] step:61/10000 train_time:4677ms step_avg:76.68ms +[2025-07-07 20:55:52] [Rank 0] step:81/10000 train_time:6139ms step_avg:75.79ms +[2025-07-07 20:55:52] [Rank 0] step:81/10000 train_time:6139ms step_avg:75.79ms +[2025-07-07 20:55:54] [Rank 0] step:101/10000 train_time:8315ms step_avg:82.33ms +[2025-07-07 20:55:54] [Rank 0] step:101/10000 train_time:8315ms step_avg:82.33ms +[2025-07-07 20:55:56] [Rank 0] step:121/10000 train_time:9780ms step_avg:80.82ms +[2025-07-07 20:55:56] [Rank 0] step:121/10000 train_time:9780ms step_avg:80.82ms +[2025-07-07 20:55:57] [Rank 0] step:141/10000 train_time:11250ms step_avg:79.79ms +[2025-07-07 20:55:57] [Rank 0] step:141/10000 train_time:11250ms step_avg:79.79ms +[2025-07-07 20:55:59] [Rank 0] step:161/10000 train_time:12722ms step_avg:79.02ms +[2025-07-07 20:55:59] [Rank 0] step:161/10000 train_time:12722ms step_avg:79.02ms +[2025-07-07 20:56:01] [Rank 0] step:181/10000 train_time:14244ms step_avg:78.70ms +[2025-07-07 20:56:01] [Rank 0] step:181/10000 train_time:14244ms step_avg:78.70ms +[2025-07-07 20:56:02] [Rank 0] step:201/10000 train_time:16320ms step_avg:81.19ms +[2025-07-07 20:56:02] [Rank 0] step:201/10000 train_time:16320ms step_avg:81.19ms +[2025-07-07 20:56:04] [Rank 0] step:221/10000 train_time:17790ms step_avg:80.50ms +[2025-07-07 20:56:04] [Rank 0] step:221/10000 train_time:17790ms step_avg:80.50ms +[2025-07-07 20:56:05] [Rank 0] step:241/10000 train_time:19263ms step_avg:79.93ms +[2025-07-07 20:56:05] [Rank 0] step:241/10000 train_time:19263ms step_avg:79.93ms +[2025-07-07 20:56:07] [Rank 0] step:261/10000 train_time:20733ms step_avg:79.44ms +[2025-07-07 20:56:07] [Rank 0] step:261/10000 train_time:20733ms step_avg:79.44ms +[2025-07-07 20:56:08] [Rank 0] step:281/10000 train_time:22440ms step_avg:79.86ms +[2025-07-07 20:56:08] [Rank 0] step:281/10000 train_time:22440ms step_avg:79.86ms +[2025-07-07 20:56:10] [Rank 0] step:301/10000 train_time:23910ms step_avg:79.43ms +[2025-07-07 20:56:10] [Rank 0] step:301/10000 train_time:23910ms step_avg:79.43ms +[2025-07-07 20:56:11] [Rank 0] step:321/10000 train_time:25380ms step_avg:79.07ms +[2025-07-07 20:56:11] [Rank 0] step:321/10000 train_time:25380ms step_avg:79.07ms +[2025-07-07 20:56:13] [Rank 0] step:341/10000 train_time:26854ms step_avg:78.75ms +[2025-07-07 20:56:13] [Rank 0] step:341/10000 train_time:26854ms step_avg:78.75ms +[2025-07-07 20:56:14] [Rank 0] step:361/10000 train_time:28532ms step_avg:79.04ms +[2025-07-07 20:56:14] [Rank 0] step:361/10000 train_time:28532ms step_avg:79.04ms +[2025-07-07 20:56:16] [Rank 0] step:381/10000 train_time:30010ms step_avg:78.77ms +[2025-07-07 20:56:16] [Rank 0] step:381/10000 train_time:30010ms step_avg:78.77ms +[2025-07-07 20:56:17] [Rank 0] step:401/10000 train_time:31482ms step_avg:78.51ms +[2025-07-07 20:56:17] [Rank 0] step:401/10000 train_time:31482ms step_avg:78.51ms +[2025-07-07 20:56:19] [Rank 0] step:421/10000 train_time:32954ms step_avg:78.28ms +[2025-07-07 20:56:19] [Rank 0] step:421/10000 train_time:32954ms step_avg:78.28ms +[2025-07-07 20:56:20] [Rank 0] step:441/10000 train_time:34428ms step_avg:78.07ms +[2025-07-07 20:56:20] [Rank 0] step:441/10000 train_time:34428ms step_avg:78.07ms +[2025-07-07 20:56:22] [Rank 0] step:461/10000 train_time:36138ms step_avg:78.39ms +[2025-07-07 20:56:22] [Rank 0] step:461/10000 train_time:36138ms step_avg:78.39ms +[2025-07-07 20:56:23] [Rank 0] step:481/10000 train_time:37612ms step_avg:78.20ms +[2025-07-07 20:56:23] [Rank 0] step:481/10000 train_time:37612ms step_avg:78.20ms +[2025-07-07 20:56:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:56:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:56:26] [Rank 0] PRINT: step:500/10000 train_loss:3.3873 val_loss:1.6150 train_time:39084ms step_avg:78.17ms +[2025-07-07 20:56:26] [Rank 0] PRINT: step:500/10000 train_loss:3.3873 val_loss:1.6150 train_time:39084ms step_avg:78.17ms +[2025-07-07 20:56:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:56:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:56:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:56:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:56:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:56:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:01:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:01:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:01:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:01:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:01:46] [Rank 0] Total Loss: 4.2120 +[2025-07-07 21:01:46] [Rank 0] Total Loss: 4.2120 +[2025-07-07 21:01:46] [Rank 0] Total FTA: 0.0861 +[2025-07-07 21:01:46] [Rank 0] Total FTA: 0.0861 +[2025-07-07 21:01:46] [Rank 0] Group 0 Loss: 4.5269 +[2025-07-07 21:01:46] [Rank 0] Group 0 Loss: 4.5269 +[2025-07-07 21:01:46] [Rank 0] Group 1 Loss: 4.2930 +[2025-07-07 21:01:46] [Rank 0] Group 1 Loss: 4.2930 +[2025-07-07 21:01:46] [Rank 0] Group 2 Loss: 4.0276 +[2025-07-07 21:01:46] [Rank 0] Group 2 Loss: 4.0276 +[2025-07-07 21:01:46] [Rank 0] Group 3 Loss: 4.1298 +[2025-07-07 21:01:46] [Rank 0] Group 3 Loss: 4.1298 +[2025-07-07 21:01:46] [Rank 0] Group 4 Loss: 4.1036 +[2025-07-07 21:01:46] [Rank 0] Group 4 Loss: 4.1036 +[2025-07-07 21:01:46] [Rank 0] Group 5 Loss: 4.1347 +[2025-07-07 21:01:46] [Rank 0] Group 5 Loss: 4.1347 +[2025-07-07 21:01:46] [Rank 0] Group 6 Loss: 4.1040 +[2025-07-07 21:01:46] [Rank 0] Group 6 Loss: 4.1040 +[2025-07-07 21:01:46] [Rank 0] Group 7 Loss: 4.2050 +[2025-07-07 21:01:46] [Rank 0] Group 7 Loss: 4.2050 +[2025-07-07 21:01:46] [Rank 0] Group 8 Loss: 4.1700 +[2025-07-07 21:01:46] [Rank 0] Group 8 Loss: 4.1700 +[2025-07-07 21:01:46] [Rank 0] Group 9 Loss: 4.1403 +[2025-07-07 21:01:46] [Rank 0] Group 9 Loss: 4.1403 +[2025-07-07 21:01:46] [Rank 0] Group 10 Loss: 4.1991 +[2025-07-07 21:01:46] [Rank 0] Group 10 Loss: 4.1991 +[2025-07-07 21:01:46] [Rank 0] Group 11 Loss: 4.1982 +[2025-07-07 21:01:46] [Rank 0] Group 11 Loss: 4.1982 +[2025-07-07 21:01:46] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 21:01:46] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 21:01:46] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 21:01:46] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 21:01:46] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-07 21:01:46] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-07 21:01:46] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 21:01:46] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 21:01:46] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 21:01:46] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 21:01:46] [Rank 0] Group 5 FTA: 0.0833 +[2025-07-07 21:01:46] [Rank 0] Group 5 FTA: 0.0833 +[2025-07-07 21:01:47] [Rank 0] Group 6 FTA: 0.0703 +[2025-07-07 21:01:47] [Rank 0] Group 6 FTA: 0.0703 +[2025-07-07 21:01:47] [Rank 0] Group 7 FTA: 0.1276 +[2025-07-07 21:01:47] [Rank 0] Group 7 FTA: 0.1276 +[2025-07-07 21:01:47] [Rank 0] Group 8 FTA: 0.0833 +[2025-07-07 21:01:47] [Rank 0] Group 8 FTA: 0.0833 +[2025-07-07 21:01:47] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 21:01:47] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 21:01:47] [Rank 0] Group 10 FTA: 0.0840 +[2025-07-07 21:01:47] [Rank 0] Group 10 FTA: 0.0840 +[2025-07-07 21:01:47] [Rank 0] Group 11 FTA: 0.0957 +[2025-07-07 21:01:47] [Rank 0] Group 11 FTA: 0.0957 +[2025-07-07 21:01:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 21:01:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 21:01:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 21:01:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 21:01:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 21:01:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 21:01:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 21:01:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 21:01:48] [Rank 0] step:501/10000 train_time:39105ms step_avg:78.05ms +[2025-07-07 21:01:48] [Rank 0] step:501/10000 train_time:39105ms step_avg:78.05ms +[2025-07-07 21:01:49] [Rank 0] step:521/10000 train_time:40563ms step_avg:77.86ms +[2025-07-07 21:01:49] [Rank 0] step:521/10000 train_time:40563ms step_avg:77.86ms +[2025-07-07 21:01:51] [Rank 0] step:541/10000 train_time:42287ms step_avg:78.16ms +[2025-07-07 21:01:51] [Rank 0] step:541/10000 train_time:42287ms step_avg:78.16ms +[2025-07-07 21:01:53] [Rank 0] step:561/10000 train_time:44148ms step_avg:78.69ms +[2025-07-07 21:01:53] [Rank 0] step:561/10000 train_time:44148ms step_avg:78.69ms +[2025-07-07 21:01:54] [Rank 0] step:581/10000 train_time:45609ms step_avg:78.50ms +[2025-07-07 21:01:54] [Rank 0] step:581/10000 train_time:45609ms step_avg:78.50ms +[2025-07-07 21:01:56] [Rank 0] step:601/10000 train_time:47075ms step_avg:78.33ms +[2025-07-07 21:01:56] [Rank 0] step:601/10000 train_time:47075ms step_avg:78.33ms +[2025-07-07 21:01:57] [Rank 0] step:621/10000 train_time:48541ms step_avg:78.17ms +[2025-07-07 21:01:57] [Rank 0] step:621/10000 train_time:48541ms step_avg:78.17ms +[2025-07-07 21:01:59] [Rank 0] step:641/10000 train_time:50675ms step_avg:79.06ms +[2025-07-07 21:01:59] [Rank 0] step:641/10000 train_time:50675ms step_avg:79.06ms +[2025-07-07 21:02:01] [Rank 0] step:661/10000 train_time:52140ms step_avg:78.88ms +[2025-07-07 21:02:01] [Rank 0] step:661/10000 train_time:52140ms step_avg:78.88ms +[2025-07-07 21:02:02] [Rank 0] step:681/10000 train_time:53609ms step_avg:78.72ms +[2025-07-07 21:02:02] [Rank 0] step:681/10000 train_time:53609ms step_avg:78.72ms +[2025-07-07 21:02:04] [Rank 0] step:701/10000 train_time:55075ms step_avg:78.57ms +[2025-07-07 21:02:04] [Rank 0] step:701/10000 train_time:55075ms step_avg:78.57ms +[2025-07-07 21:02:06] [Rank 0] step:721/10000 train_time:56543ms step_avg:78.42ms +[2025-07-07 21:02:06] [Rank 0] step:721/10000 train_time:56543ms step_avg:78.42ms +[2025-07-07 21:02:07] [Rank 0] step:741/10000 train_time:58248ms step_avg:78.61ms +[2025-07-07 21:02:07] [Rank 0] step:741/10000 train_time:58248ms step_avg:78.61ms +[2025-07-07 21:02:09] [Rank 0] step:761/10000 train_time:59725ms step_avg:78.48ms +[2025-07-07 21:02:09] [Rank 0] step:761/10000 train_time:59725ms step_avg:78.48ms +[2025-07-07 21:02:10] [Rank 0] step:781/10000 train_time:61202ms step_avg:78.36ms +[2025-07-07 21:02:10] [Rank 0] step:781/10000 train_time:61202ms step_avg:78.36ms +[2025-07-07 21:02:11] [Rank 0] step:801/10000 train_time:62678ms step_avg:78.25ms +[2025-07-07 21:02:11] [Rank 0] step:801/10000 train_time:62678ms step_avg:78.25ms +[2025-07-07 21:02:14] [Rank 0] step:821/10000 train_time:64825ms step_avg:78.96ms +[2025-07-07 21:02:14] [Rank 0] step:821/10000 train_time:64825ms step_avg:78.96ms +[2025-07-07 21:02:15] [Rank 0] step:841/10000 train_time:66299ms step_avg:78.83ms +[2025-07-07 21:02:15] [Rank 0] step:841/10000 train_time:66299ms step_avg:78.83ms +[2025-07-07 21:02:17] [Rank 0] step:861/10000 train_time:67774ms step_avg:78.72ms +[2025-07-07 21:02:17] [Rank 0] step:861/10000 train_time:67774ms step_avg:78.72ms +[2025-07-07 21:02:18] [Rank 0] step:881/10000 train_time:69249ms step_avg:78.60ms +[2025-07-07 21:02:18] [Rank 0] step:881/10000 train_time:69249ms step_avg:78.60ms +[2025-07-07 21:02:20] [Rank 0] step:901/10000 train_time:70724ms step_avg:78.50ms +[2025-07-07 21:02:20] [Rank 0] step:901/10000 train_time:70724ms step_avg:78.50ms +[2025-07-07 21:02:22] [Rank 0] step:921/10000 train_time:72861ms step_avg:79.11ms +[2025-07-07 21:02:22] [Rank 0] step:921/10000 train_time:72861ms step_avg:79.11ms +[2025-07-07 21:02:23] [Rank 0] step:941/10000 train_time:74337ms step_avg:79.00ms +[2025-07-07 21:02:23] [Rank 0] step:941/10000 train_time:74337ms step_avg:79.00ms +[2025-07-07 21:02:25] [Rank 0] step:961/10000 train_time:75811ms step_avg:78.89ms +[2025-07-07 21:02:25] [Rank 0] step:961/10000 train_time:75811ms step_avg:78.89ms +[2025-07-07 21:02:26] [Rank 0] step:981/10000 train_time:77284ms step_avg:78.78ms +[2025-07-07 21:02:26] [Rank 0] step:981/10000 train_time:77284ms step_avg:78.78ms +[2025-07-07 21:02:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:02:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:02:29] [Rank 0] PRINT: step:1000/10000 train_loss:1.4614 val_loss:1.3147 train_time:79417ms step_avg:79.42ms +[2025-07-07 21:02:29] [Rank 0] PRINT: step:1000/10000 train_loss:1.4614 val_loss:1.3147 train_time:79417ms step_avg:79.42ms +[2025-07-07 21:02:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:02:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:02:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:02:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:02:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:02:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:07:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:07:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:07:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:07:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:07:50] [Rank 0] Total Loss: 4.3859 +[2025-07-07 21:07:50] [Rank 0] Total Loss: 4.3859 +[2025-07-07 21:07:50] [Rank 0] Total FTA: 0.1401 +[2025-07-07 21:07:50] [Rank 0] Total FTA: 0.1401 +[2025-07-07 21:07:50] [Rank 0] Group 0 Loss: 4.5697 +[2025-07-07 21:07:50] [Rank 0] Group 0 Loss: 4.5697 +[2025-07-07 21:07:50] [Rank 0] Group 1 Loss: 4.2877 +[2025-07-07 21:07:50] [Rank 0] Group 1 Loss: 4.2877 +[2025-07-07 21:07:50] [Rank 0] Group 2 Loss: 4.2614 +[2025-07-07 21:07:50] [Rank 0] Group 2 Loss: 4.2614 +[2025-07-07 21:07:50] [Rank 0] Group 3 Loss: 4.4021 +[2025-07-07 21:07:50] [Rank 0] Group 3 Loss: 4.4021 +[2025-07-07 21:07:50] [Rank 0] Group 4 Loss: 4.2732 +[2025-07-07 21:07:50] [Rank 0] Group 4 Loss: 4.2732 +[2025-07-07 21:07:50] [Rank 0] Group 5 Loss: 4.3471 +[2025-07-07 21:07:50] [Rank 0] Group 5 Loss: 4.3471 +[2025-07-07 21:07:50] [Rank 0] Group 6 Loss: 4.3406 +[2025-07-07 21:07:50] [Rank 0] Group 6 Loss: 4.3406 +[2025-07-07 21:07:50] [Rank 0] Group 7 Loss: 4.4136 +[2025-07-07 21:07:50] [Rank 0] Group 7 Loss: 4.4136 +[2025-07-07 21:07:50] [Rank 0] Group 8 Loss: 4.4162 +[2025-07-07 21:07:50] [Rank 0] Group 8 Loss: 4.4162 +[2025-07-07 21:07:50] [Rank 0] Group 9 Loss: 4.4143 +[2025-07-07 21:07:50] [Rank 0] Group 9 Loss: 4.4143 +[2025-07-07 21:07:50] [Rank 0] Group 10 Loss: 4.3680 +[2025-07-07 21:07:50] [Rank 0] Group 10 Loss: 4.3680 +[2025-07-07 21:07:50] [Rank 0] Group 11 Loss: 4.3792 +[2025-07-07 21:07:50] [Rank 0] Group 11 Loss: 4.3792 +[2025-07-07 21:07:50] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 21:07:50] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 21:07:50] [Rank 0] Group 1 FTA: 0.3411 +[2025-07-07 21:07:50] [Rank 0] Group 1 FTA: 0.3411 +[2025-07-07 21:07:50] [Rank 0] Group 2 FTA: 0.1510 +[2025-07-07 21:07:50] [Rank 0] Group 2 FTA: 0.1510 +[2025-07-07 21:07:50] [Rank 0] Group 3 FTA: 0.1432 +[2025-07-07 21:07:50] [Rank 0] Group 3 FTA: 0.1432 +[2025-07-07 21:07:50] [Rank 0] Group 4 FTA: 0.0729 +[2025-07-07 21:07:50] [Rank 0] Group 4 FTA: 0.0729 +[2025-07-07 21:07:50] [Rank 0] Group 5 FTA: 0.1406 +[2025-07-07 21:07:50] [Rank 0] Group 5 FTA: 0.1406 +[2025-07-07 21:07:50] [Rank 0] Group 6 FTA: 0.1849 +[2025-07-07 21:07:50] [Rank 0] Group 6 FTA: 0.1849 +[2025-07-07 21:07:50] [Rank 0] Group 7 FTA: 0.1589 +[2025-07-07 21:07:50] [Rank 0] Group 7 FTA: 0.1589 +[2025-07-07 21:07:50] [Rank 0] Group 8 FTA: 0.1536 +[2025-07-07 21:07:50] [Rank 0] Group 8 FTA: 0.1536 +[2025-07-07 21:07:50] [Rank 0] Group 9 FTA: 0.1953 +[2025-07-07 21:07:50] [Rank 0] Group 9 FTA: 0.1953 +[2025-07-07 21:07:50] [Rank 0] Group 10 FTA: 0.1582 +[2025-07-07 21:07:50] [Rank 0] Group 10 FTA: 0.1582 +[2025-07-07 21:07:50] [Rank 0] Group 11 FTA: 0.1377 +[2025-07-07 21:07:50] [Rank 0] Group 11 FTA: 0.1377 +[2025-07-07 21:07:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 21:07:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 21:07:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 21:07:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 21:07:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 21:07:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 21:07:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 21:07:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 21:07:51] [Rank 0] step:1001/10000 train_time:79438ms step_avg:79.36ms +[2025-07-07 21:07:51] [Rank 0] step:1001/10000 train_time:79438ms step_avg:79.36ms +[2025-07-07 21:07:53] [Rank 0] step:1021/10000 train_time:80916ms step_avg:79.25ms +[2025-07-07 21:07:53] [Rank 0] step:1021/10000 train_time:80916ms step_avg:79.25ms +[2025-07-07 21:07:54] [Rank 0] step:1041/10000 train_time:82390ms step_avg:79.15ms +[2025-07-07 21:07:54] [Rank 0] step:1041/10000 train_time:82390ms step_avg:79.15ms +[2025-07-07 21:07:56] [Rank 0] step:1061/10000 train_time:83861ms step_avg:79.04ms +[2025-07-07 21:07:56] [Rank 0] step:1061/10000 train_time:83861ms step_avg:79.04ms +[2025-07-07 21:07:58] [Rank 0] step:1081/10000 train_time:86007ms step_avg:79.56ms +[2025-07-07 21:07:58] [Rank 0] step:1081/10000 train_time:86007ms step_avg:79.56ms +[2025-07-07 21:07:59] [Rank 0] step:1101/10000 train_time:87459ms step_avg:79.44ms +[2025-07-07 21:07:59] [Rank 0] step:1101/10000 train_time:87459ms step_avg:79.44ms +[2025-07-07 21:08:01] [Rank 0] step:1121/10000 train_time:88931ms step_avg:79.33ms +[2025-07-07 21:08:01] [Rank 0] step:1121/10000 train_time:88931ms step_avg:79.33ms +[2025-07-07 21:08:02] [Rank 0] step:1141/10000 train_time:90403ms step_avg:79.23ms +[2025-07-07 21:08:02] [Rank 0] step:1141/10000 train_time:90403ms step_avg:79.23ms +[2025-07-07 21:08:04] [Rank 0] step:1161/10000 train_time:91873ms step_avg:79.13ms +[2025-07-07 21:08:04] [Rank 0] step:1161/10000 train_time:91873ms step_avg:79.13ms +[2025-07-07 21:08:06] [Rank 0] step:1181/10000 train_time:94018ms step_avg:79.61ms +[2025-07-07 21:08:06] [Rank 0] step:1181/10000 train_time:94018ms step_avg:79.61ms +[2025-07-07 21:08:07] [Rank 0] step:1201/10000 train_time:95488ms step_avg:79.51ms +[2025-07-07 21:08:07] [Rank 0] step:1201/10000 train_time:95488ms step_avg:79.51ms +[2025-07-07 21:08:09] [Rank 0] step:1221/10000 train_time:96963ms step_avg:79.41ms +[2025-07-07 21:08:09] [Rank 0] step:1221/10000 train_time:96963ms step_avg:79.41ms +[2025-07-07 21:08:10] [Rank 0] step:1241/10000 train_time:98437ms step_avg:79.32ms +[2025-07-07 21:08:10] [Rank 0] step:1241/10000 train_time:98437ms step_avg:79.32ms +[2025-07-07 21:08:12] [Rank 0] step:1261/10000 train_time:100599ms step_avg:79.78ms +[2025-07-07 21:08:12] [Rank 0] step:1261/10000 train_time:100599ms step_avg:79.78ms +[2025-07-07 21:08:14] [Rank 0] step:1281/10000 train_time:102057ms step_avg:79.67ms +[2025-07-07 21:08:14] [Rank 0] step:1281/10000 train_time:102057ms step_avg:79.67ms +[2025-07-07 21:08:15] [Rank 0] step:1301/10000 train_time:103530ms step_avg:79.58ms +[2025-07-07 21:08:15] [Rank 0] step:1301/10000 train_time:103530ms step_avg:79.58ms +[2025-07-07 21:08:17] [Rank 0] step:1321/10000 train_time:105007ms step_avg:79.49ms +[2025-07-07 21:08:17] [Rank 0] step:1321/10000 train_time:105007ms step_avg:79.49ms +[2025-07-07 21:08:18] [Rank 0] step:1341/10000 train_time:106485ms step_avg:79.41ms +[2025-07-07 21:08:18] [Rank 0] step:1341/10000 train_time:106485ms step_avg:79.41ms +[2025-07-07 21:08:20] [Rank 0] step:1361/10000 train_time:108194ms step_avg:79.50ms +[2025-07-07 21:08:20] [Rank 0] step:1361/10000 train_time:108194ms step_avg:79.50ms +[2025-07-07 21:08:21] [Rank 0] step:1381/10000 train_time:109673ms step_avg:79.42ms +[2025-07-07 21:08:21] [Rank 0] step:1381/10000 train_time:109673ms step_avg:79.42ms +[2025-07-07 21:08:23] [Rank 0] step:1401/10000 train_time:111149ms step_avg:79.34ms +[2025-07-07 21:08:23] [Rank 0] step:1401/10000 train_time:111149ms step_avg:79.34ms +[2025-07-07 21:08:24] [Rank 0] step:1421/10000 train_time:112626ms step_avg:79.26ms +[2025-07-07 21:08:24] [Rank 0] step:1421/10000 train_time:112626ms step_avg:79.26ms +[2025-07-07 21:08:26] [Rank 0] step:1441/10000 train_time:114154ms step_avg:79.22ms +[2025-07-07 21:08:26] [Rank 0] step:1441/10000 train_time:114154ms step_avg:79.22ms +[2025-07-07 21:08:28] [Rank 0] step:1461/10000 train_time:116234ms step_avg:79.56ms +[2025-07-07 21:08:28] [Rank 0] step:1461/10000 train_time:116234ms step_avg:79.56ms +[2025-07-07 21:08:29] [Rank 0] step:1481/10000 train_time:117710ms step_avg:79.48ms +[2025-07-07 21:08:29] [Rank 0] step:1481/10000 train_time:117710ms step_avg:79.48ms +[2025-07-07 21:08:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:08:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:08:32] [Rank 0] PRINT: step:1500/10000 train_loss:1.2250 val_loss:1.1960 train_time:119186ms step_avg:79.46ms +[2025-07-07 21:08:32] [Rank 0] PRINT: step:1500/10000 train_loss:1.2250 val_loss:1.1960 train_time:119186ms step_avg:79.46ms +[2025-07-07 21:08:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:08:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:08:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:08:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:08:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:08:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:13:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:13:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:13:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:13:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:13:52] [Rank 0] Total Loss: 4.7960 +[2025-07-07 21:13:52] [Rank 0] Total Loss: 4.7960 +[2025-07-07 21:13:52] [Rank 0] Total FTA: 0.3217 +[2025-07-07 21:13:52] [Rank 0] Total FTA: 0.3217 +[2025-07-07 21:13:52] [Rank 0] Group 0 Loss: 4.9580 +[2025-07-07 21:13:52] [Rank 0] Group 0 Loss: 4.9580 +[2025-07-07 21:13:52] [Rank 0] Group 1 Loss: 4.7287 +[2025-07-07 21:13:52] [Rank 0] Group 1 Loss: 4.7287 +[2025-07-07 21:13:52] [Rank 0] Group 2 Loss: 4.6514 +[2025-07-07 21:13:52] [Rank 0] Group 2 Loss: 4.6514 +[2025-07-07 21:13:52] [Rank 0] Group 3 Loss: 4.8259 +[2025-07-07 21:13:52] [Rank 0] Group 3 Loss: 4.8259 +[2025-07-07 21:13:52] [Rank 0] Group 4 Loss: 4.7342 +[2025-07-07 21:13:52] [Rank 0] Group 4 Loss: 4.7342 +[2025-07-07 21:13:52] [Rank 0] Group 5 Loss: 4.7593 +[2025-07-07 21:13:52] [Rank 0] Group 5 Loss: 4.7593 +[2025-07-07 21:13:52] [Rank 0] Group 6 Loss: 4.6661 +[2025-07-07 21:13:52] [Rank 0] Group 6 Loss: 4.6661 +[2025-07-07 21:13:52] [Rank 0] Group 7 Loss: 4.8251 +[2025-07-07 21:13:52] [Rank 0] Group 7 Loss: 4.8251 +[2025-07-07 21:13:52] [Rank 0] Group 8 Loss: 4.7788 +[2025-07-07 21:13:52] [Rank 0] Group 8 Loss: 4.7788 +[2025-07-07 21:13:52] [Rank 0] Group 9 Loss: 4.8346 +[2025-07-07 21:13:52] [Rank 0] Group 9 Loss: 4.8346 +[2025-07-07 21:13:52] [Rank 0] Group 10 Loss: 4.7946 +[2025-07-07 21:13:52] [Rank 0] Group 10 Loss: 4.7946 +[2025-07-07 21:13:52] [Rank 0] Group 11 Loss: 4.8150 +[2025-07-07 21:13:52] [Rank 0] Group 11 Loss: 4.8150 +[2025-07-07 21:13:52] [Rank 0] Group 0 FTA: 0.3134 +[2025-07-07 21:13:52] [Rank 0] Group 0 FTA: 0.3134 +[2025-07-07 21:13:52] [Rank 0] Group 1 FTA: 0.5026 +[2025-07-07 21:13:52] [Rank 0] Group 1 FTA: 0.5026 +[2025-07-07 21:13:52] [Rank 0] Group 2 FTA: 0.3438 +[2025-07-07 21:13:52] [Rank 0] Group 2 FTA: 0.3438 +[2025-07-07 21:13:52] [Rank 0] Group 3 FTA: 0.3047 +[2025-07-07 21:13:52] [Rank 0] Group 3 FTA: 0.3047 +[2025-07-07 21:13:52] [Rank 0] Group 4 FTA: 0.2604 +[2025-07-07 21:13:52] [Rank 0] Group 4 FTA: 0.2604 +[2025-07-07 21:13:52] [Rank 0] Group 5 FTA: 0.3594 +[2025-07-07 21:13:52] [Rank 0] Group 5 FTA: 0.3594 +[2025-07-07 21:13:52] [Rank 0] Group 6 FTA: 0.2786 +[2025-07-07 21:13:52] [Rank 0] Group 6 FTA: 0.2786 +[2025-07-07 21:13:52] [Rank 0] Group 7 FTA: 0.3151 +[2025-07-07 21:13:52] [Rank 0] Group 7 FTA: 0.3151 +[2025-07-07 21:13:52] [Rank 0] Group 8 FTA: 0.3073 +[2025-07-07 21:13:52] [Rank 0] Group 8 FTA: 0.3073 +[2025-07-07 21:13:52] [Rank 0] Group 9 FTA: 0.2773 +[2025-07-07 21:13:52] [Rank 0] Group 9 FTA: 0.2773 +[2025-07-07 21:13:52] [Rank 0] Group 10 FTA: 0.3320 +[2025-07-07 21:13:52] [Rank 0] Group 10 FTA: 0.3320 +[2025-07-07 21:13:52] [Rank 0] Group 11 FTA: 0.2969 +[2025-07-07 21:13:52] [Rank 0] Group 11 FTA: 0.2969 +[2025-07-07 21:13:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 21:13:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 21:13:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 21:13:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 21:13:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 21:13:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 21:13:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 21:13:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 21:13:54] [Rank 0] step:1501/10000 train_time:119207ms step_avg:79.42ms +[2025-07-07 21:13:54] [Rank 0] step:1501/10000 train_time:119207ms step_avg:79.42ms +[2025-07-07 21:13:55] [Rank 0] step:1521/10000 train_time:120702ms step_avg:79.36ms +[2025-07-07 21:13:55] [Rank 0] step:1521/10000 train_time:120702ms step_avg:79.36ms +[2025-07-07 21:13:57] [Rank 0] step:1541/10000 train_time:122826ms step_avg:79.71ms +[2025-07-07 21:13:57] [Rank 0] step:1541/10000 train_time:122826ms step_avg:79.71ms +[2025-07-07 21:13:59] [Rank 0] step:1561/10000 train_time:124296ms step_avg:79.63ms +[2025-07-07 21:13:59] [Rank 0] step:1561/10000 train_time:124296ms step_avg:79.63ms +[2025-07-07 21:14:00] [Rank 0] step:1581/10000 train_time:125769ms step_avg:79.55ms +[2025-07-07 21:14:00] [Rank 0] step:1581/10000 train_time:125769ms step_avg:79.55ms +[2025-07-07 21:14:02] [Rank 0] step:1601/10000 train_time:127243ms step_avg:79.48ms +[2025-07-07 21:14:02] [Rank 0] step:1601/10000 train_time:127243ms step_avg:79.48ms +[2025-07-07 21:14:04] [Rank 0] step:1621/10000 train_time:128714ms step_avg:79.40ms +[2025-07-07 21:14:04] [Rank 0] step:1621/10000 train_time:128714ms step_avg:79.40ms +[2025-07-07 21:14:05] [Rank 0] step:1641/10000 train_time:130832ms step_avg:79.73ms +[2025-07-07 21:14:05] [Rank 0] step:1641/10000 train_time:130832ms step_avg:79.73ms +[2025-07-07 21:14:07] [Rank 0] step:1661/10000 train_time:132304ms step_avg:79.65ms +[2025-07-07 21:14:07] [Rank 0] step:1661/10000 train_time:132304ms step_avg:79.65ms +[2025-07-07 21:14:08] [Rank 0] step:1681/10000 train_time:133779ms step_avg:79.58ms +[2025-07-07 21:14:08] [Rank 0] step:1681/10000 train_time:133779ms step_avg:79.58ms +[2025-07-07 21:14:10] [Rank 0] step:1701/10000 train_time:135495ms step_avg:79.66ms +[2025-07-07 21:14:10] [Rank 0] step:1701/10000 train_time:135495ms step_avg:79.66ms +[2025-07-07 21:14:12] [Rank 0] step:1721/10000 train_time:137642ms step_avg:79.98ms +[2025-07-07 21:14:12] [Rank 0] step:1721/10000 train_time:137642ms step_avg:79.98ms +[2025-07-07 21:14:14] [Rank 0] step:1741/10000 train_time:139118ms step_avg:79.91ms +[2025-07-07 21:14:14] [Rank 0] step:1741/10000 train_time:139118ms step_avg:79.91ms +[2025-07-07 21:14:15] [Rank 0] step:1761/10000 train_time:140595ms step_avg:79.84ms +[2025-07-07 21:14:15] [Rank 0] step:1761/10000 train_time:140595ms step_avg:79.84ms +[2025-07-07 21:14:16] [Rank 0] step:1781/10000 train_time:142069ms step_avg:79.77ms +[2025-07-07 21:14:16] [Rank 0] step:1781/10000 train_time:142069ms step_avg:79.77ms +[2025-07-07 21:14:18] [Rank 0] step:1801/10000 train_time:143803ms step_avg:79.85ms +[2025-07-07 21:14:18] [Rank 0] step:1801/10000 train_time:143803ms step_avg:79.85ms +[2025-07-07 21:14:20] [Rank 0] step:1821/10000 train_time:145260ms step_avg:79.77ms +[2025-07-07 21:14:20] [Rank 0] step:1821/10000 train_time:145260ms step_avg:79.77ms +[2025-07-07 21:14:21] [Rank 0] step:1841/10000 train_time:146738ms step_avg:79.71ms +[2025-07-07 21:14:21] [Rank 0] step:1841/10000 train_time:146738ms step_avg:79.71ms +[2025-07-07 21:14:23] [Rank 0] step:1861/10000 train_time:148213ms step_avg:79.64ms +[2025-07-07 21:14:23] [Rank 0] step:1861/10000 train_time:148213ms step_avg:79.64ms +[2025-07-07 21:14:24] [Rank 0] step:1881/10000 train_time:149689ms step_avg:79.58ms +[2025-07-07 21:14:24] [Rank 0] step:1881/10000 train_time:149689ms step_avg:79.58ms +[2025-07-07 21:14:26] [Rank 0] step:1901/10000 train_time:151817ms step_avg:79.86ms +[2025-07-07 21:14:26] [Rank 0] step:1901/10000 train_time:151817ms step_avg:79.86ms +[2025-07-07 21:14:28] [Rank 0] step:1921/10000 train_time:153295ms step_avg:79.80ms +[2025-07-07 21:14:28] [Rank 0] step:1921/10000 train_time:153295ms step_avg:79.80ms +[2025-07-07 21:14:29] [Rank 0] step:1941/10000 train_time:154770ms step_avg:79.74ms +[2025-07-07 21:14:29] [Rank 0] step:1941/10000 train_time:154770ms step_avg:79.74ms +[2025-07-07 21:14:31] [Rank 0] step:1961/10000 train_time:156246ms step_avg:79.68ms +[2025-07-07 21:14:31] [Rank 0] step:1961/10000 train_time:156246ms step_avg:79.68ms +[2025-07-07 21:14:33] [Rank 0] step:1981/10000 train_time:158399ms step_avg:79.96ms +[2025-07-07 21:14:33] [Rank 0] step:1981/10000 train_time:158399ms step_avg:79.96ms +[2025-07-07 21:14:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:14:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:14:35] [Rank 0] PRINT: step:2000/10000 train_loss:1.1208 val_loss:1.0952 train_time:159854ms step_avg:79.93ms +[2025-07-07 21:14:35] [Rank 0] PRINT: step:2000/10000 train_loss:1.1208 val_loss:1.0952 train_time:159854ms step_avg:79.93ms +[2025-07-07 21:14:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:14:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:14:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:14:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:14:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:14:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:19:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:19:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:19:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:19:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:19:56] [Rank 0] Total Loss: 4.9018 +[2025-07-07 21:19:56] [Rank 0] Total Loss: 4.9018 +[2025-07-07 21:19:56] [Rank 0] Total FTA: 0.5003 +[2025-07-07 21:19:56] [Rank 0] Total FTA: 0.5003 +[2025-07-07 21:19:56] [Rank 0] Group 0 Loss: 5.1187 +[2025-07-07 21:19:56] [Rank 0] Group 0 Loss: 5.1187 +[2025-07-07 21:19:56] [Rank 0] Group 1 Loss: 4.7754 +[2025-07-07 21:19:56] [Rank 0] Group 1 Loss: 4.7754 +[2025-07-07 21:19:56] [Rank 0] Group 2 Loss: 4.9336 +[2025-07-07 21:19:56] [Rank 0] Group 2 Loss: 4.9336 +[2025-07-07 21:19:56] [Rank 0] Group 3 Loss: 4.8616 +[2025-07-07 21:19:56] [Rank 0] Group 3 Loss: 4.8616 +[2025-07-07 21:19:56] [Rank 0] Group 4 Loss: 4.8239 +[2025-07-07 21:19:56] [Rank 0] Group 4 Loss: 4.8239 +[2025-07-07 21:19:56] [Rank 0] Group 5 Loss: 4.8491 +[2025-07-07 21:19:56] [Rank 0] Group 5 Loss: 4.8491 +[2025-07-07 21:19:56] [Rank 0] Group 6 Loss: 4.8216 +[2025-07-07 21:19:56] [Rank 0] Group 6 Loss: 4.8216 +[2025-07-07 21:19:56] [Rank 0] Group 7 Loss: 4.8868 +[2025-07-07 21:19:56] [Rank 0] Group 7 Loss: 4.8868 +[2025-07-07 21:19:56] [Rank 0] Group 8 Loss: 4.8835 +[2025-07-07 21:19:56] [Rank 0] Group 8 Loss: 4.8835 +[2025-07-07 21:19:56] [Rank 0] Group 9 Loss: 4.8874 +[2025-07-07 21:19:56] [Rank 0] Group 9 Loss: 4.8874 +[2025-07-07 21:19:56] [Rank 0] Group 10 Loss: 4.8996 +[2025-07-07 21:19:56] [Rank 0] Group 10 Loss: 4.8996 +[2025-07-07 21:19:56] [Rank 0] Group 11 Loss: 4.8855 +[2025-07-07 21:19:56] [Rank 0] Group 11 Loss: 4.8855 +[2025-07-07 21:19:56] [Rank 0] Group 0 FTA: 0.6788 +[2025-07-07 21:19:56] [Rank 0] Group 0 FTA: 0.6788 +[2025-07-07 21:19:56] [Rank 0] Group 1 FTA: 0.1849 +[2025-07-07 21:19:56] [Rank 0] Group 1 FTA: 0.1849 +[2025-07-07 21:19:57] [Rank 0] Group 2 FTA: 0.5885 +[2025-07-07 21:19:57] [Rank 0] Group 2 FTA: 0.5885 +[2025-07-07 21:19:57] [Rank 0] Group 3 FTA: 0.2734 +[2025-07-07 21:19:57] [Rank 0] Group 3 FTA: 0.2734 +[2025-07-07 21:19:57] [Rank 0] Group 4 FTA: 0.4609 +[2025-07-07 21:19:57] [Rank 0] Group 4 FTA: 0.4609 +[2025-07-07 21:19:57] [Rank 0] Group 5 FTA: 0.5521 +[2025-07-07 21:19:57] [Rank 0] Group 5 FTA: 0.5521 +[2025-07-07 21:19:57] [Rank 0] Group 6 FTA: 0.4948 +[2025-07-07 21:19:57] [Rank 0] Group 6 FTA: 0.4948 +[2025-07-07 21:19:57] [Rank 0] Group 7 FTA: 0.5052 +[2025-07-07 21:19:57] [Rank 0] Group 7 FTA: 0.5052 +[2025-07-07 21:19:57] [Rank 0] Group 8 FTA: 0.5104 +[2025-07-07 21:19:57] [Rank 0] Group 8 FTA: 0.5104 +[2025-07-07 21:19:57] [Rank 0] Group 9 FTA: 0.4883 +[2025-07-07 21:19:57] [Rank 0] Group 9 FTA: 0.4883 +[2025-07-07 21:19:57] [Rank 0] Group 10 FTA: 0.5332 +[2025-07-07 21:19:57] [Rank 0] Group 10 FTA: 0.5332 +[2025-07-07 21:19:57] [Rank 0] Group 11 FTA: 0.5146 +[2025-07-07 21:19:57] [Rank 0] Group 11 FTA: 0.5146 +[2025-07-07 21:19:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 21:19:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 21:19:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 21:19:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 21:19:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 21:19:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 21:19:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 21:19:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 21:19:58] [Rank 0] step:2001/10000 train_time:159876ms step_avg:79.90ms +[2025-07-07 21:19:58] [Rank 0] step:2001/10000 train_time:159876ms step_avg:79.90ms +[2025-07-07 21:19:59] [Rank 0] step:2021/10000 train_time:161348ms step_avg:79.84ms +[2025-07-07 21:19:59] [Rank 0] step:2021/10000 train_time:161348ms step_avg:79.84ms +[2025-07-07 21:20:01] [Rank 0] step:2041/10000 train_time:162820ms step_avg:79.77ms +[2025-07-07 21:20:01] [Rank 0] step:2041/10000 train_time:162820ms step_avg:79.77ms +[2025-07-07 21:20:02] [Rank 0] step:2061/10000 train_time:164301ms step_avg:79.72ms +[2025-07-07 21:20:02] [Rank 0] step:2061/10000 train_time:164301ms step_avg:79.72ms +[2025-07-07 21:20:05] [Rank 0] step:2081/10000 train_time:166431ms step_avg:79.98ms +[2025-07-07 21:20:05] [Rank 0] step:2081/10000 train_time:166431ms step_avg:79.98ms +[2025-07-07 21:20:06] [Rank 0] step:2101/10000 train_time:167907ms step_avg:79.92ms +[2025-07-07 21:20:06] [Rank 0] step:2101/10000 train_time:167907ms step_avg:79.92ms +[2025-07-07 21:20:07] [Rank 0] step:2121/10000 train_time:169382ms step_avg:79.86ms +[2025-07-07 21:20:07] [Rank 0] step:2121/10000 train_time:169382ms step_avg:79.86ms +[2025-07-07 21:20:09] [Rank 0] step:2141/10000 train_time:170854ms step_avg:79.80ms +[2025-07-07 21:20:09] [Rank 0] step:2141/10000 train_time:170854ms step_avg:79.80ms +[2025-07-07 21:20:11] [Rank 0] step:2161/10000 train_time:172381ms step_avg:79.77ms +[2025-07-07 21:20:11] [Rank 0] step:2161/10000 train_time:172381ms step_avg:79.77ms +[2025-07-07 21:20:13] [Rank 0] step:2181/10000 train_time:174469ms step_avg:80.00ms +[2025-07-07 21:20:13] [Rank 0] step:2181/10000 train_time:174469ms step_avg:80.00ms +[2025-07-07 21:20:14] [Rank 0] step:2201/10000 train_time:175941ms step_avg:79.94ms +[2025-07-07 21:20:14] [Rank 0] step:2201/10000 train_time:175941ms step_avg:79.94ms +[2025-07-07 21:20:16] [Rank 0] step:2221/10000 train_time:177416ms step_avg:79.88ms +[2025-07-07 21:20:16] [Rank 0] step:2221/10000 train_time:177416ms step_avg:79.88ms +[2025-07-07 21:20:17] [Rank 0] step:2241/10000 train_time:178914ms step_avg:79.84ms +[2025-07-07 21:20:17] [Rank 0] step:2241/10000 train_time:178914ms step_avg:79.84ms +[2025-07-07 21:20:19] [Rank 0] step:2261/10000 train_time:181070ms step_avg:80.08ms +[2025-07-07 21:20:19] [Rank 0] step:2261/10000 train_time:181070ms step_avg:80.08ms +[2025-07-07 21:20:21] [Rank 0] step:2281/10000 train_time:182569ms step_avg:80.04ms +[2025-07-07 21:20:21] [Rank 0] step:2281/10000 train_time:182569ms step_avg:80.04ms +[2025-07-07 21:20:22] [Rank 0] step:2301/10000 train_time:184068ms step_avg:79.99ms +[2025-07-07 21:20:22] [Rank 0] step:2301/10000 train_time:184068ms step_avg:79.99ms +[2025-07-07 21:20:24] [Rank 0] step:2321/10000 train_time:185568ms step_avg:79.95ms +[2025-07-07 21:20:24] [Rank 0] step:2321/10000 train_time:185568ms step_avg:79.95ms +[2025-07-07 21:20:25] [Rank 0] step:2341/10000 train_time:187067ms step_avg:79.91ms +[2025-07-07 21:20:25] [Rank 0] step:2341/10000 train_time:187067ms step_avg:79.91ms +[2025-07-07 21:20:27] [Rank 0] step:2361/10000 train_time:188804ms step_avg:79.97ms +[2025-07-07 21:20:27] [Rank 0] step:2361/10000 train_time:188804ms step_avg:79.97ms +[2025-07-07 21:20:29] [Rank 0] step:2381/10000 train_time:190528ms step_avg:80.02ms +[2025-07-07 21:20:29] [Rank 0] step:2381/10000 train_time:190528ms step_avg:80.02ms +[2025-07-07 21:20:30] [Rank 0] step:2401/10000 train_time:192026ms step_avg:79.98ms +[2025-07-07 21:20:30] [Rank 0] step:2401/10000 train_time:192026ms step_avg:79.98ms +[2025-07-07 21:20:32] [Rank 0] step:2421/10000 train_time:193527ms step_avg:79.94ms +[2025-07-07 21:20:32] [Rank 0] step:2421/10000 train_time:193527ms step_avg:79.94ms +[2025-07-07 21:20:34] [Rank 0] step:2441/10000 train_time:195695ms step_avg:80.17ms +[2025-07-07 21:20:34] [Rank 0] step:2441/10000 train_time:195695ms step_avg:80.17ms +[2025-07-07 21:20:35] [Rank 0] step:2461/10000 train_time:197193ms step_avg:80.13ms +[2025-07-07 21:20:35] [Rank 0] step:2461/10000 train_time:197193ms step_avg:80.13ms +[2025-07-07 21:20:37] [Rank 0] step:2481/10000 train_time:198694ms step_avg:80.09ms +[2025-07-07 21:20:37] [Rank 0] step:2481/10000 train_time:198694ms step_avg:80.09ms +[2025-07-07 21:20:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:20:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:20:39] [Rank 0] PRINT: step:2500/10000 train_loss:0.9967 val_loss:0.9395 train_time:200195ms step_avg:80.08ms +[2025-07-07 21:20:39] [Rank 0] PRINT: step:2500/10000 train_loss:0.9967 val_loss:0.9395 train_time:200195ms step_avg:80.08ms +[2025-07-07 21:20:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:20:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:20:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:20:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:20:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:20:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:26:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:26:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:26:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:26:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:26:03] [Rank 0] Total Loss: 5.0202 +[2025-07-07 21:26:03] [Rank 0] Total Loss: 5.0202 +[2025-07-07 21:26:03] [Rank 0] Total FTA: 0.7366 +[2025-07-07 21:26:03] [Rank 0] Total FTA: 0.7366 +[2025-07-07 21:26:03] [Rank 0] Group 0 Loss: 5.0538 +[2025-07-07 21:26:03] [Rank 0] Group 0 Loss: 5.0538 +[2025-07-07 21:26:03] [Rank 0] Group 1 Loss: 4.9633 +[2025-07-07 21:26:03] [Rank 0] Group 1 Loss: 4.9633 +[2025-07-07 21:26:03] [Rank 0] Group 2 Loss: 4.8510 +[2025-07-07 21:26:03] [Rank 0] Group 2 Loss: 4.8510 +[2025-07-07 21:26:03] [Rank 0] Group 3 Loss: 5.0875 +[2025-07-07 21:26:03] [Rank 0] Group 3 Loss: 5.0875 +[2025-07-07 21:26:03] [Rank 0] Group 4 Loss: 4.9859 +[2025-07-07 21:26:03] [Rank 0] Group 4 Loss: 4.9859 +[2025-07-07 21:26:03] [Rank 0] Group 5 Loss: 5.0535 +[2025-07-07 21:26:03] [Rank 0] Group 5 Loss: 5.0535 +[2025-07-07 21:26:03] [Rank 0] Group 6 Loss: 4.9141 +[2025-07-07 21:26:03] [Rank 0] Group 6 Loss: 4.9141 +[2025-07-07 21:26:03] [Rank 0] Group 7 Loss: 5.0566 +[2025-07-07 21:26:03] [Rank 0] Group 7 Loss: 5.0566 +[2025-07-07 21:26:03] [Rank 0] Group 8 Loss: 5.0555 +[2025-07-07 21:26:03] [Rank 0] Group 8 Loss: 5.0555 +[2025-07-07 21:26:03] [Rank 0] Group 9 Loss: 4.9742 +[2025-07-07 21:26:03] [Rank 0] Group 9 Loss: 4.9742 +[2025-07-07 21:26:03] [Rank 0] Group 10 Loss: 5.0354 +[2025-07-07 21:26:03] [Rank 0] Group 10 Loss: 5.0354 +[2025-07-07 21:26:03] [Rank 0] Group 11 Loss: 5.0719 +[2025-07-07 21:26:03] [Rank 0] Group 11 Loss: 5.0719 +[2025-07-07 21:26:03] [Rank 0] Group 0 FTA: 0.6658 +[2025-07-07 21:26:03] [Rank 0] Group 0 FTA: 0.6658 +[2025-07-07 21:26:03] [Rank 0] Group 1 FTA: 0.6484 +[2025-07-07 21:26:03] [Rank 0] Group 1 FTA: 0.6484 +[2025-07-07 21:26:03] [Rank 0] Group 2 FTA: 0.6302 +[2025-07-07 21:26:03] [Rank 0] Group 2 FTA: 0.6302 +[2025-07-07 21:26:03] [Rank 0] Group 3 FTA: 0.6849 +[2025-07-07 21:26:03] [Rank 0] Group 3 FTA: 0.6849 +[2025-07-07 21:26:03] [Rank 0] Group 4 FTA: 0.7708 +[2025-07-07 21:26:03] [Rank 0] Group 4 FTA: 0.7708 +[2025-07-07 21:26:03] [Rank 0] Group 5 FTA: 0.7656 +[2025-07-07 21:26:03] [Rank 0] Group 5 FTA: 0.7656 +[2025-07-07 21:26:03] [Rank 0] Group 6 FTA: 0.7552 +[2025-07-07 21:26:03] [Rank 0] Group 6 FTA: 0.7552 +[2025-07-07 21:26:03] [Rank 0] Group 7 FTA: 0.8047 +[2025-07-07 21:26:03] [Rank 0] Group 7 FTA: 0.8047 +[2025-07-07 21:26:03] [Rank 0] Group 8 FTA: 0.7917 +[2025-07-07 21:26:03] [Rank 0] Group 8 FTA: 0.7917 +[2025-07-07 21:26:03] [Rank 0] Group 9 FTA: 0.7695 +[2025-07-07 21:26:03] [Rank 0] Group 9 FTA: 0.7695 +[2025-07-07 21:26:03] [Rank 0] Group 10 FTA: 0.7676 +[2025-07-07 21:26:03] [Rank 0] Group 10 FTA: 0.7676 +[2025-07-07 21:26:03] [Rank 0] Group 11 FTA: 0.7812 +[2025-07-07 21:26:03] [Rank 0] Group 11 FTA: 0.7812 +[2025-07-07 21:26:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 21:26:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 21:26:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 21:26:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 21:26:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 21:26:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 21:26:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 21:26:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 21:26:04] [Rank 0] step:2501/10000 train_time:200215ms step_avg:80.05ms +[2025-07-07 21:26:04] [Rank 0] step:2501/10000 train_time:200215ms step_avg:80.05ms +[2025-07-07 21:26:06] [Rank 0] step:2521/10000 train_time:201983ms step_avg:80.12ms +[2025-07-07 21:26:06] [Rank 0] step:2521/10000 train_time:201983ms step_avg:80.12ms +[2025-07-07 21:26:08] [Rank 0] step:2541/10000 train_time:203872ms step_avg:80.23ms +[2025-07-07 21:26:08] [Rank 0] step:2541/10000 train_time:203872ms step_avg:80.23ms +[2025-07-07 21:26:09] [Rank 0] step:2561/10000 train_time:205365ms step_avg:80.19ms +[2025-07-07 21:26:09] [Rank 0] step:2561/10000 train_time:205365ms step_avg:80.19ms +[2025-07-07 21:26:11] [Rank 0] step:2581/10000 train_time:206861ms step_avg:80.15ms +[2025-07-07 21:26:11] [Rank 0] step:2581/10000 train_time:206861ms step_avg:80.15ms +[2025-07-07 21:26:12] [Rank 0] step:2601/10000 train_time:208358ms step_avg:80.11ms +[2025-07-07 21:26:12] [Rank 0] step:2601/10000 train_time:208358ms step_avg:80.11ms +[2025-07-07 21:26:14] [Rank 0] step:2621/10000 train_time:210088ms step_avg:80.16ms +[2025-07-07 21:26:14] [Rank 0] step:2621/10000 train_time:210088ms step_avg:80.16ms +[2025-07-07 21:26:16] [Rank 0] step:2641/10000 train_time:211587ms step_avg:80.12ms +[2025-07-07 21:26:16] [Rank 0] step:2641/10000 train_time:211587ms step_avg:80.12ms +[2025-07-07 21:26:17] [Rank 0] step:2661/10000 train_time:213084ms step_avg:80.08ms +[2025-07-07 21:26:17] [Rank 0] step:2661/10000 train_time:213084ms step_avg:80.08ms +[2025-07-07 21:26:19] [Rank 0] step:2681/10000 train_time:214582ms step_avg:80.04ms +[2025-07-07 21:26:19] [Rank 0] step:2681/10000 train_time:214582ms step_avg:80.04ms +[2025-07-07 21:26:20] [Rank 0] step:2701/10000 train_time:216135ms step_avg:80.02ms +[2025-07-07 21:26:20] [Rank 0] step:2701/10000 train_time:216135ms step_avg:80.02ms +[2025-07-07 21:26:22] [Rank 0] step:2721/10000 train_time:217819ms step_avg:80.05ms +[2025-07-07 21:26:22] [Rank 0] step:2721/10000 train_time:217819ms step_avg:80.05ms +[2025-07-07 21:26:23] [Rank 0] step:2741/10000 train_time:219317ms step_avg:80.01ms +[2025-07-07 21:26:23] [Rank 0] step:2741/10000 train_time:219317ms step_avg:80.01ms +[2025-07-07 21:26:25] [Rank 0] step:2761/10000 train_time:220815ms step_avg:79.98ms +[2025-07-07 21:26:25] [Rank 0] step:2761/10000 train_time:220815ms step_avg:79.98ms +[2025-07-07 21:26:26] [Rank 0] step:2781/10000 train_time:222316ms step_avg:79.94ms +[2025-07-07 21:26:26] [Rank 0] step:2781/10000 train_time:222316ms step_avg:79.94ms +[2025-07-07 21:26:28] [Rank 0] step:2801/10000 train_time:224470ms step_avg:80.14ms +[2025-07-07 21:26:28] [Rank 0] step:2801/10000 train_time:224470ms step_avg:80.14ms +[2025-07-07 21:26:30] [Rank 0] step:2821/10000 train_time:225967ms step_avg:80.10ms +[2025-07-07 21:26:30] [Rank 0] step:2821/10000 train_time:225967ms step_avg:80.10ms +[2025-07-07 21:26:31] [Rank 0] step:2841/10000 train_time:227465ms step_avg:80.07ms +[2025-07-07 21:26:31] [Rank 0] step:2841/10000 train_time:227465ms step_avg:80.07ms +[2025-07-07 21:26:33] [Rank 0] step:2861/10000 train_time:228967ms step_avg:80.03ms +[2025-07-07 21:26:33] [Rank 0] step:2861/10000 train_time:228967ms step_avg:80.03ms +[2025-07-07 21:26:35] [Rank 0] step:2881/10000 train_time:230723ms step_avg:80.08ms +[2025-07-07 21:26:35] [Rank 0] step:2881/10000 train_time:230723ms step_avg:80.08ms +[2025-07-07 21:26:37] [Rank 0] step:2901/10000 train_time:232613ms step_avg:80.18ms +[2025-07-07 21:26:37] [Rank 0] step:2901/10000 train_time:232613ms step_avg:80.18ms +[2025-07-07 21:26:38] [Rank 0] step:2921/10000 train_time:234113ms step_avg:80.15ms +[2025-07-07 21:26:38] [Rank 0] step:2921/10000 train_time:234113ms step_avg:80.15ms +[2025-07-07 21:26:40] [Rank 0] step:2941/10000 train_time:235613ms step_avg:80.11ms +[2025-07-07 21:26:40] [Rank 0] step:2941/10000 train_time:235613ms step_avg:80.11ms +[2025-07-07 21:26:41] [Rank 0] step:2961/10000 train_time:237114ms step_avg:80.08ms +[2025-07-07 21:26:41] [Rank 0] step:2961/10000 train_time:237114ms step_avg:80.08ms +[2025-07-07 21:26:43] [Rank 0] step:2981/10000 train_time:239285ms step_avg:80.27ms +[2025-07-07 21:26:43] [Rank 0] step:2981/10000 train_time:239285ms step_avg:80.27ms +[2025-07-07 21:26:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:26:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:26:46] [Rank 0] PRINT: step:3000/10000 train_loss:0.9168 val_loss:0.8981 train_time:240786ms step_avg:80.26ms +[2025-07-07 21:26:46] [Rank 0] PRINT: step:3000/10000 train_loss:0.9168 val_loss:0.8981 train_time:240786ms step_avg:80.26ms +[2025-07-07 21:26:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:26:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:26:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:26:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:26:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:26:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:32:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:32:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:32:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:32:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:32:09] [Rank 0] Total Loss: 5.1840 +[2025-07-07 21:32:09] [Rank 0] Total Loss: 5.1840 +[2025-07-07 21:32:09] [Rank 0] Total FTA: 0.8095 +[2025-07-07 21:32:09] [Rank 0] Total FTA: 0.8095 +[2025-07-07 21:32:09] [Rank 0] Group 0 Loss: 5.2685 +[2025-07-07 21:32:09] [Rank 0] Group 0 Loss: 5.2685 +[2025-07-07 21:32:09] [Rank 0] Group 1 Loss: 5.0162 +[2025-07-07 21:32:09] [Rank 0] Group 1 Loss: 5.0162 +[2025-07-07 21:32:09] [Rank 0] Group 2 Loss: 5.2408 +[2025-07-07 21:32:09] [Rank 0] Group 2 Loss: 5.2408 +[2025-07-07 21:32:09] [Rank 0] Group 3 Loss: 5.0853 +[2025-07-07 21:32:09] [Rank 0] Group 3 Loss: 5.0853 +[2025-07-07 21:32:09] [Rank 0] Group 4 Loss: 5.1544 +[2025-07-07 21:32:09] [Rank 0] Group 4 Loss: 5.1544 +[2025-07-07 21:32:09] [Rank 0] Group 5 Loss: 5.1954 +[2025-07-07 21:32:09] [Rank 0] Group 5 Loss: 5.1954 +[2025-07-07 21:32:09] [Rank 0] Group 6 Loss: 5.1399 +[2025-07-07 21:32:09] [Rank 0] Group 6 Loss: 5.1399 +[2025-07-07 21:32:09] [Rank 0] Group 7 Loss: 5.2232 +[2025-07-07 21:32:09] [Rank 0] Group 7 Loss: 5.2232 +[2025-07-07 21:32:09] [Rank 0] Group 8 Loss: 5.1699 +[2025-07-07 21:32:09] [Rank 0] Group 8 Loss: 5.1699 +[2025-07-07 21:32:09] [Rank 0] Group 9 Loss: 5.2426 +[2025-07-07 21:32:09] [Rank 0] Group 9 Loss: 5.2426 +[2025-07-07 21:32:09] [Rank 0] Group 10 Loss: 5.2179 +[2025-07-07 21:32:09] [Rank 0] Group 10 Loss: 5.2179 +[2025-07-07 21:32:09] [Rank 0] Group 11 Loss: 5.1818 +[2025-07-07 21:32:09] [Rank 0] Group 11 Loss: 5.1818 +[2025-07-07 21:32:09] [Rank 0] Group 0 FTA: 0.8231 +[2025-07-07 21:32:09] [Rank 0] Group 0 FTA: 0.8231 +[2025-07-07 21:32:09] [Rank 0] Group 1 FTA: 0.8281 +[2025-07-07 21:32:09] [Rank 0] Group 1 FTA: 0.8281 +[2025-07-07 21:32:09] [Rank 0] Group 2 FTA: 0.6328 +[2025-07-07 21:32:09] [Rank 0] Group 2 FTA: 0.6328 +[2025-07-07 21:32:09] [Rank 0] Group 3 FTA: 0.7344 +[2025-07-07 21:32:09] [Rank 0] Group 3 FTA: 0.7344 +[2025-07-07 21:32:09] [Rank 0] Group 4 FTA: 0.6953 +[2025-07-07 21:32:09] [Rank 0] Group 4 FTA: 0.6953 +[2025-07-07 21:32:09] [Rank 0] Group 5 FTA: 0.8776 +[2025-07-07 21:32:09] [Rank 0] Group 5 FTA: 0.8776 +[2025-07-07 21:32:09] [Rank 0] Group 6 FTA: 0.8073 +[2025-07-07 21:32:09] [Rank 0] Group 6 FTA: 0.8073 +[2025-07-07 21:32:09] [Rank 0] Group 7 FTA: 0.8490 +[2025-07-07 21:32:09] [Rank 0] Group 7 FTA: 0.8490 +[2025-07-07 21:32:09] [Rank 0] Group 8 FTA: 0.8359 +[2025-07-07 21:32:09] [Rank 0] Group 8 FTA: 0.8359 +[2025-07-07 21:32:09] [Rank 0] Group 9 FTA: 0.8203 +[2025-07-07 21:32:09] [Rank 0] Group 9 FTA: 0.8203 +[2025-07-07 21:32:09] [Rank 0] Group 10 FTA: 0.8418 +[2025-07-07 21:32:09] [Rank 0] Group 10 FTA: 0.8418 +[2025-07-07 21:32:09] [Rank 0] Group 11 FTA: 0.8613 +[2025-07-07 21:32:09] [Rank 0] Group 11 FTA: 0.8613 +[2025-07-07 21:32:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 21:32:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 21:32:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 21:32:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 21:32:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 21:32:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 21:32:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 21:32:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 21:32:11] [Rank 0] step:3001/10000 train_time:240806ms step_avg:80.24ms +[2025-07-07 21:32:11] [Rank 0] step:3001/10000 train_time:240806ms step_avg:80.24ms +[2025-07-07 21:32:12] [Rank 0] step:3021/10000 train_time:242298ms step_avg:80.20ms +[2025-07-07 21:32:12] [Rank 0] step:3021/10000 train_time:242298ms step_avg:80.20ms +[2025-07-07 21:32:14] [Rank 0] step:3041/10000 train_time:243793ms step_avg:80.17ms +[2025-07-07 21:32:14] [Rank 0] step:3041/10000 train_time:243793ms step_avg:80.17ms +[2025-07-07 21:32:16] [Rank 0] step:3061/10000 train_time:245544ms step_avg:80.22ms +[2025-07-07 21:32:16] [Rank 0] step:3061/10000 train_time:245544ms step_avg:80.22ms +[2025-07-07 21:32:17] [Rank 0] step:3081/10000 train_time:247428ms step_avg:80.31ms +[2025-07-07 21:32:17] [Rank 0] step:3081/10000 train_time:247428ms step_avg:80.31ms +[2025-07-07 21:32:19] [Rank 0] step:3101/10000 train_time:248926ms step_avg:80.27ms +[2025-07-07 21:32:19] [Rank 0] step:3101/10000 train_time:248926ms step_avg:80.27ms +[2025-07-07 21:32:20] [Rank 0] step:3121/10000 train_time:250422ms step_avg:80.24ms +[2025-07-07 21:32:20] [Rank 0] step:3121/10000 train_time:250422ms step_avg:80.24ms +[2025-07-07 21:32:22] [Rank 0] step:3141/10000 train_time:251919ms step_avg:80.20ms +[2025-07-07 21:32:22] [Rank 0] step:3141/10000 train_time:251919ms step_avg:80.20ms +[2025-07-07 21:32:24] [Rank 0] step:3161/10000 train_time:254073ms step_avg:80.38ms +[2025-07-07 21:32:24] [Rank 0] step:3161/10000 train_time:254073ms step_avg:80.38ms +[2025-07-07 21:32:25] [Rank 0] step:3181/10000 train_time:255569ms step_avg:80.34ms +[2025-07-07 21:32:25] [Rank 0] step:3181/10000 train_time:255569ms step_avg:80.34ms +[2025-07-07 21:32:27] [Rank 0] step:3201/10000 train_time:257067ms step_avg:80.31ms +[2025-07-07 21:32:27] [Rank 0] step:3201/10000 train_time:257067ms step_avg:80.31ms +[2025-07-07 21:32:28] [Rank 0] step:3221/10000 train_time:258565ms step_avg:80.27ms +[2025-07-07 21:32:28] [Rank 0] step:3221/10000 train_time:258565ms step_avg:80.27ms +[2025-07-07 21:32:31] [Rank 0] step:3241/10000 train_time:260117ms step_avg:80.26ms +[2025-07-07 21:32:31] [Rank 0] step:3241/10000 train_time:260117ms step_avg:80.26ms +[2025-07-07 21:32:32] [Rank 0] step:3261/10000 train_time:262218ms step_avg:80.41ms +[2025-07-07 21:32:32] [Rank 0] step:3261/10000 train_time:262218ms step_avg:80.41ms +[2025-07-07 21:32:34] [Rank 0] step:3281/10000 train_time:263718ms step_avg:80.38ms +[2025-07-07 21:32:34] [Rank 0] step:3281/10000 train_time:263718ms step_avg:80.38ms +[2025-07-07 21:32:35] [Rank 0] step:3301/10000 train_time:265217ms step_avg:80.34ms +[2025-07-07 21:32:35] [Rank 0] step:3301/10000 train_time:265217ms step_avg:80.34ms +[2025-07-07 21:32:37] [Rank 0] step:3321/10000 train_time:266720ms step_avg:80.31ms +[2025-07-07 21:32:37] [Rank 0] step:3321/10000 train_time:266720ms step_avg:80.31ms +[2025-07-07 21:32:39] [Rank 0] step:3341/10000 train_time:268890ms step_avg:80.48ms +[2025-07-07 21:32:39] [Rank 0] step:3341/10000 train_time:268890ms step_avg:80.48ms +[2025-07-07 21:32:40] [Rank 0] step:3361/10000 train_time:270386ms step_avg:80.45ms +[2025-07-07 21:32:40] [Rank 0] step:3361/10000 train_time:270386ms step_avg:80.45ms +[2025-07-07 21:32:42] [Rank 0] step:3381/10000 train_time:271886ms step_avg:80.42ms +[2025-07-07 21:32:42] [Rank 0] step:3381/10000 train_time:271886ms step_avg:80.42ms +[2025-07-07 21:32:43] [Rank 0] step:3401/10000 train_time:273386ms step_avg:80.38ms +[2025-07-07 21:32:43] [Rank 0] step:3401/10000 train_time:273386ms step_avg:80.38ms +[2025-07-07 21:32:45] [Rank 0] step:3421/10000 train_time:274937ms step_avg:80.37ms +[2025-07-07 21:32:45] [Rank 0] step:3421/10000 train_time:274937ms step_avg:80.37ms +[2025-07-07 21:32:47] [Rank 0] step:3441/10000 train_time:277032ms step_avg:80.51ms +[2025-07-07 21:32:47] [Rank 0] step:3441/10000 train_time:277032ms step_avg:80.51ms +[2025-07-07 21:32:48] [Rank 0] step:3461/10000 train_time:278530ms step_avg:80.48ms +[2025-07-07 21:32:48] [Rank 0] step:3461/10000 train_time:278530ms step_avg:80.48ms +[2025-07-07 21:32:50] [Rank 0] step:3481/10000 train_time:280029ms step_avg:80.44ms +[2025-07-07 21:32:50] [Rank 0] step:3481/10000 train_time:280029ms step_avg:80.44ms +[2025-07-07 21:32:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:32:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:32:52] [Rank 0] PRINT: step:3500/10000 train_loss:0.8894 val_loss:0.8844 train_time:281529ms step_avg:80.44ms +[2025-07-07 21:32:52] [Rank 0] PRINT: step:3500/10000 train_loss:0.8894 val_loss:0.8844 train_time:281529ms step_avg:80.44ms +[2025-07-07 21:32:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:32:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:32:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:32:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:32:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:32:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:38:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:38:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:38:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:38:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:38:13] [Rank 0] Total Loss: 5.2310 +[2025-07-07 21:38:13] [Rank 0] Total Loss: 5.2310 +[2025-07-07 21:38:13] [Rank 0] Total FTA: 0.8218 +[2025-07-07 21:38:13] [Rank 0] Total FTA: 0.8218 +[2025-07-07 21:38:13] [Rank 0] Group 0 Loss: 5.2977 +[2025-07-07 21:38:13] [Rank 0] Group 0 Loss: 5.2977 +[2025-07-07 21:38:13] [Rank 0] Group 1 Loss: 5.2410 +[2025-07-07 21:38:13] [Rank 0] Group 1 Loss: 5.2410 +[2025-07-07 21:38:13] [Rank 0] Group 2 Loss: 5.0750 +[2025-07-07 21:38:13] [Rank 0] Group 2 Loss: 5.0750 +[2025-07-07 21:38:13] [Rank 0] Group 3 Loss: 5.1890 +[2025-07-07 21:38:13] [Rank 0] Group 3 Loss: 5.1890 +[2025-07-07 21:38:13] [Rank 0] Group 4 Loss: 5.2310 +[2025-07-07 21:38:13] [Rank 0] Group 4 Loss: 5.2310 +[2025-07-07 21:38:13] [Rank 0] Group 5 Loss: 5.2086 +[2025-07-07 21:38:13] [Rank 0] Group 5 Loss: 5.2086 +[2025-07-07 21:38:13] [Rank 0] Group 6 Loss: 5.1583 +[2025-07-07 21:38:13] [Rank 0] Group 6 Loss: 5.1583 +[2025-07-07 21:38:13] [Rank 0] Group 7 Loss: 5.2566 +[2025-07-07 21:38:13] [Rank 0] Group 7 Loss: 5.2566 +[2025-07-07 21:38:13] [Rank 0] Group 8 Loss: 5.2291 +[2025-07-07 21:38:13] [Rank 0] Group 8 Loss: 5.2291 +[2025-07-07 21:38:13] [Rank 0] Group 9 Loss: 5.2138 +[2025-07-07 21:38:13] [Rank 0] Group 9 Loss: 5.2138 +[2025-07-07 21:38:13] [Rank 0] Group 10 Loss: 5.2538 +[2025-07-07 21:38:13] [Rank 0] Group 10 Loss: 5.2538 +[2025-07-07 21:38:13] [Rank 0] Group 11 Loss: 5.2713 +[2025-07-07 21:38:13] [Rank 0] Group 11 Loss: 5.2713 +[2025-07-07 21:38:13] [Rank 0] Group 0 FTA: 0.5085 +[2025-07-07 21:38:13] [Rank 0] Group 0 FTA: 0.5085 +[2025-07-07 21:38:13] [Rank 0] Group 1 FTA: 0.8594 +[2025-07-07 21:38:13] [Rank 0] Group 1 FTA: 0.8594 +[2025-07-07 21:38:13] [Rank 0] Group 2 FTA: 0.7500 +[2025-07-07 21:38:13] [Rank 0] Group 2 FTA: 0.7500 +[2025-07-07 21:38:13] [Rank 0] Group 3 FTA: 0.9531 +[2025-07-07 21:38:13] [Rank 0] Group 3 FTA: 0.9531 +[2025-07-07 21:38:13] [Rank 0] Group 4 FTA: 0.8854 +[2025-07-07 21:38:13] [Rank 0] Group 4 FTA: 0.8854 +[2025-07-07 21:38:13] [Rank 0] Group 5 FTA: 0.8490 +[2025-07-07 21:38:13] [Rank 0] Group 5 FTA: 0.8490 +[2025-07-07 21:38:13] [Rank 0] Group 6 FTA: 0.8099 +[2025-07-07 21:38:13] [Rank 0] Group 6 FTA: 0.8099 +[2025-07-07 21:38:13] [Rank 0] Group 7 FTA: 0.9010 +[2025-07-07 21:38:13] [Rank 0] Group 7 FTA: 0.9010 +[2025-07-07 21:38:13] [Rank 0] Group 8 FTA: 0.9062 +[2025-07-07 21:38:13] [Rank 0] Group 8 FTA: 0.9062 +[2025-07-07 21:38:13] [Rank 0] Group 9 FTA: 0.8867 +[2025-07-07 21:38:13] [Rank 0] Group 9 FTA: 0.8867 +[2025-07-07 21:38:13] [Rank 0] Group 10 FTA: 0.8906 +[2025-07-07 21:38:13] [Rank 0] Group 10 FTA: 0.8906 +[2025-07-07 21:38:13] [Rank 0] Group 11 FTA: 0.8789 +[2025-07-07 21:38:13] [Rank 0] Group 11 FTA: 0.8789 +[2025-07-07 21:38:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 21:38:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 21:38:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 21:38:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 21:38:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 21:38:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 21:38:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 21:38:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 21:38:15] [Rank 0] step:3501/10000 train_time:281550ms step_avg:80.42ms +[2025-07-07 21:38:15] [Rank 0] step:3501/10000 train_time:281550ms step_avg:80.42ms +[2025-07-07 21:38:17] [Rank 0] step:3521/10000 train_time:283303ms step_avg:80.46ms +[2025-07-07 21:38:17] [Rank 0] step:3521/10000 train_time:283303ms step_avg:80.46ms +[2025-07-07 21:38:18] [Rank 0] step:3541/10000 train_time:284798ms step_avg:80.43ms +[2025-07-07 21:38:18] [Rank 0] step:3541/10000 train_time:284798ms step_avg:80.43ms +[2025-07-07 21:38:20] [Rank 0] step:3561/10000 train_time:286292ms step_avg:80.40ms +[2025-07-07 21:38:20] [Rank 0] step:3561/10000 train_time:286292ms step_avg:80.40ms +[2025-07-07 21:38:21] [Rank 0] step:3581/10000 train_time:287789ms step_avg:80.37ms +[2025-07-07 21:38:21] [Rank 0] step:3581/10000 train_time:287789ms step_avg:80.37ms +[2025-07-07 21:38:23] [Rank 0] step:3601/10000 train_time:289400ms step_avg:80.37ms +[2025-07-07 21:38:23] [Rank 0] step:3601/10000 train_time:289400ms step_avg:80.37ms +[2025-07-07 21:38:24] [Rank 0] step:3621/10000 train_time:291138ms step_avg:80.40ms +[2025-07-07 21:38:24] [Rank 0] step:3621/10000 train_time:291138ms step_avg:80.40ms +[2025-07-07 21:38:26] [Rank 0] step:3641/10000 train_time:292633ms step_avg:80.37ms +[2025-07-07 21:38:26] [Rank 0] step:3641/10000 train_time:292633ms step_avg:80.37ms +[2025-07-07 21:38:27] [Rank 0] step:3661/10000 train_time:294131ms step_avg:80.34ms +[2025-07-07 21:38:27] [Rank 0] step:3661/10000 train_time:294131ms step_avg:80.34ms +[2025-07-07 21:38:29] [Rank 0] step:3681/10000 train_time:295630ms step_avg:80.31ms +[2025-07-07 21:38:29] [Rank 0] step:3681/10000 train_time:295630ms step_avg:80.31ms +[2025-07-07 21:38:31] [Rank 0] step:3701/10000 train_time:297794ms step_avg:80.46ms +[2025-07-07 21:38:31] [Rank 0] step:3701/10000 train_time:297794ms step_avg:80.46ms +[2025-07-07 21:38:33] [Rank 0] step:3721/10000 train_time:299290ms step_avg:80.43ms +[2025-07-07 21:38:33] [Rank 0] step:3721/10000 train_time:299290ms step_avg:80.43ms +[2025-07-07 21:38:34] [Rank 0] step:3741/10000 train_time:300788ms step_avg:80.40ms +[2025-07-07 21:38:34] [Rank 0] step:3741/10000 train_time:300788ms step_avg:80.40ms +[2025-07-07 21:38:36] [Rank 0] step:3761/10000 train_time:302286ms step_avg:80.37ms +[2025-07-07 21:38:36] [Rank 0] step:3761/10000 train_time:302286ms step_avg:80.37ms +[2025-07-07 21:38:38] [Rank 0] step:3781/10000 train_time:303785ms step_avg:80.35ms +[2025-07-07 21:38:38] [Rank 0] step:3781/10000 train_time:303785ms step_avg:80.35ms +[2025-07-07 21:38:39] [Rank 0] step:3801/10000 train_time:305941ms step_avg:80.49ms +[2025-07-07 21:38:39] [Rank 0] step:3801/10000 train_time:305941ms step_avg:80.49ms +[2025-07-07 21:38:41] [Rank 0] step:3821/10000 train_time:307439ms step_avg:80.46ms +[2025-07-07 21:38:41] [Rank 0] step:3821/10000 train_time:307439ms step_avg:80.46ms +[2025-07-07 21:38:42] [Rank 0] step:3841/10000 train_time:308938ms step_avg:80.43ms +[2025-07-07 21:38:42] [Rank 0] step:3841/10000 train_time:308938ms step_avg:80.43ms +[2025-07-07 21:38:44] [Rank 0] step:3861/10000 train_time:310438ms step_avg:80.40ms +[2025-07-07 21:38:44] [Rank 0] step:3861/10000 train_time:310438ms step_avg:80.40ms +[2025-07-07 21:38:45] [Rank 0] step:3881/10000 train_time:312173ms step_avg:80.44ms +[2025-07-07 21:38:45] [Rank 0] step:3881/10000 train_time:312173ms step_avg:80.44ms +[2025-07-07 21:38:47] [Rank 0] step:3901/10000 train_time:313671ms step_avg:80.41ms +[2025-07-07 21:38:47] [Rank 0] step:3901/10000 train_time:313671ms step_avg:80.41ms +[2025-07-07 21:38:48] [Rank 0] step:3921/10000 train_time:315170ms step_avg:80.38ms +[2025-07-07 21:38:48] [Rank 0] step:3921/10000 train_time:315170ms step_avg:80.38ms +[2025-07-07 21:38:50] [Rank 0] step:3941/10000 train_time:316671ms step_avg:80.35ms +[2025-07-07 21:38:50] [Rank 0] step:3941/10000 train_time:316671ms step_avg:80.35ms +[2025-07-07 21:38:52] [Rank 0] step:3961/10000 train_time:318172ms step_avg:80.33ms +[2025-07-07 21:38:52] [Rank 0] step:3961/10000 train_time:318172ms step_avg:80.33ms +[2025-07-07 21:38:54] [Rank 0] step:3981/10000 train_time:320342ms step_avg:80.47ms +[2025-07-07 21:38:54] [Rank 0] step:3981/10000 train_time:320342ms step_avg:80.47ms +[2025-07-07 21:38:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:38:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:38:56] [Rank 0] PRINT: step:4000/10000 train_loss:0.8805 val_loss:0.8773 train_time:321839ms step_avg:80.46ms +[2025-07-07 21:38:56] [Rank 0] PRINT: step:4000/10000 train_loss:0.8805 val_loss:0.8773 train_time:321839ms step_avg:80.46ms +[2025-07-07 21:38:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:38:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:38:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:38:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:38:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:38:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:44:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:44:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:44:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:44:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:44:18] [Rank 0] Total Loss: 5.2817 +[2025-07-07 21:44:18] [Rank 0] Total Loss: 5.2817 +[2025-07-07 21:44:18] [Rank 0] Total FTA: 0.8818 +[2025-07-07 21:44:18] [Rank 0] Total FTA: 0.8818 +[2025-07-07 21:44:18] [Rank 0] Group 0 Loss: 5.3423 +[2025-07-07 21:44:18] [Rank 0] Group 0 Loss: 5.3423 +[2025-07-07 21:44:18] [Rank 0] Group 1 Loss: 5.1253 +[2025-07-07 21:44:18] [Rank 0] Group 1 Loss: 5.1253 +[2025-07-07 21:44:18] [Rank 0] Group 2 Loss: 5.2164 +[2025-07-07 21:44:18] [Rank 0] Group 2 Loss: 5.2164 +[2025-07-07 21:44:18] [Rank 0] Group 3 Loss: 5.2874 +[2025-07-07 21:44:18] [Rank 0] Group 3 Loss: 5.2874 +[2025-07-07 21:44:18] [Rank 0] Group 4 Loss: 5.3127 +[2025-07-07 21:44:18] [Rank 0] Group 4 Loss: 5.3127 +[2025-07-07 21:44:18] [Rank 0] Group 5 Loss: 5.2650 +[2025-07-07 21:44:18] [Rank 0] Group 5 Loss: 5.2650 +[2025-07-07 21:44:18] [Rank 0] Group 6 Loss: 5.2327 +[2025-07-07 21:44:18] [Rank 0] Group 6 Loss: 5.2327 +[2025-07-07 21:44:18] [Rank 0] Group 7 Loss: 5.3390 +[2025-07-07 21:44:18] [Rank 0] Group 7 Loss: 5.3390 +[2025-07-07 21:44:18] [Rank 0] Group 8 Loss: 5.3028 +[2025-07-07 21:44:18] [Rank 0] Group 8 Loss: 5.3028 +[2025-07-07 21:44:18] [Rank 0] Group 9 Loss: 5.2590 +[2025-07-07 21:44:18] [Rank 0] Group 9 Loss: 5.2590 +[2025-07-07 21:44:18] [Rank 0] Group 10 Loss: 5.3014 +[2025-07-07 21:44:18] [Rank 0] Group 10 Loss: 5.3014 +[2025-07-07 21:44:18] [Rank 0] Group 11 Loss: 5.2969 +[2025-07-07 21:44:18] [Rank 0] Group 11 Loss: 5.2969 +[2025-07-07 21:44:18] [Rank 0] Group 0 FTA: 0.6840 +[2025-07-07 21:44:18] [Rank 0] Group 0 FTA: 0.6840 +[2025-07-07 21:44:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 21:44:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 21:44:18] [Rank 0] Group 2 FTA: 0.9245 +[2025-07-07 21:44:18] [Rank 0] Group 2 FTA: 0.9245 +[2025-07-07 21:44:18] [Rank 0] Group 3 FTA: 0.9609 +[2025-07-07 21:44:18] [Rank 0] Group 3 FTA: 0.9609 +[2025-07-07 21:44:18] [Rank 0] Group 4 FTA: 0.8568 +[2025-07-07 21:44:18] [Rank 0] Group 4 FTA: 0.8568 +[2025-07-07 21:44:18] [Rank 0] Group 5 FTA: 0.8724 +[2025-07-07 21:44:18] [Rank 0] Group 5 FTA: 0.8724 +[2025-07-07 21:44:18] [Rank 0] Group 6 FTA: 0.9010 +[2025-07-07 21:44:18] [Rank 0] Group 6 FTA: 0.9010 +[2025-07-07 21:44:18] [Rank 0] Group 7 FTA: 0.8880 +[2025-07-07 21:44:18] [Rank 0] Group 7 FTA: 0.8880 +[2025-07-07 21:44:18] [Rank 0] Group 8 FTA: 0.8984 +[2025-07-07 21:44:18] [Rank 0] Group 8 FTA: 0.8984 +[2025-07-07 21:44:18] [Rank 0] Group 9 FTA: 0.9180 +[2025-07-07 21:44:18] [Rank 0] Group 9 FTA: 0.9180 +[2025-07-07 21:44:18] [Rank 0] Group 10 FTA: 0.9102 +[2025-07-07 21:44:18] [Rank 0] Group 10 FTA: 0.9102 +[2025-07-07 21:44:18] [Rank 0] Group 11 FTA: 0.9141 +[2025-07-07 21:44:18] [Rank 0] Group 11 FTA: 0.9141 +[2025-07-07 21:44:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 21:44:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 21:44:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 21:44:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 21:44:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 21:44:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 21:44:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 21:44:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 21:44:20] [Rank 0] step:4001/10000 train_time:321860ms step_avg:80.44ms +[2025-07-07 21:44:20] [Rank 0] step:4001/10000 train_time:321860ms step_avg:80.44ms +[2025-07-07 21:44:21] [Rank 0] step:4021/10000 train_time:323356ms step_avg:80.42ms +[2025-07-07 21:44:21] [Rank 0] step:4021/10000 train_time:323356ms step_avg:80.42ms +[2025-07-07 21:44:23] [Rank 0] step:4041/10000 train_time:324850ms step_avg:80.39ms +[2025-07-07 21:44:23] [Rank 0] step:4041/10000 train_time:324850ms step_avg:80.39ms +[2025-07-07 21:44:24] [Rank 0] step:4061/10000 train_time:326582ms step_avg:80.42ms +[2025-07-07 21:44:24] [Rank 0] step:4061/10000 train_time:326582ms step_avg:80.42ms +[2025-07-07 21:44:26] [Rank 0] step:4081/10000 train_time:328075ms step_avg:80.39ms +[2025-07-07 21:44:26] [Rank 0] step:4081/10000 train_time:328075ms step_avg:80.39ms +[2025-07-07 21:44:27] [Rank 0] step:4101/10000 train_time:329569ms step_avg:80.36ms +[2025-07-07 21:44:27] [Rank 0] step:4101/10000 train_time:329569ms step_avg:80.36ms +[2025-07-07 21:44:29] [Rank 0] step:4121/10000 train_time:331067ms step_avg:80.34ms +[2025-07-07 21:44:29] [Rank 0] step:4121/10000 train_time:331067ms step_avg:80.34ms +[2025-07-07 21:44:31] [Rank 0] step:4141/10000 train_time:332565ms step_avg:80.31ms +[2025-07-07 21:44:31] [Rank 0] step:4141/10000 train_time:332565ms step_avg:80.31ms +[2025-07-07 21:44:32] [Rank 0] step:4161/10000 train_time:334729ms step_avg:80.44ms +[2025-07-07 21:44:32] [Rank 0] step:4161/10000 train_time:334729ms step_avg:80.44ms +[2025-07-07 21:44:34] [Rank 0] step:4181/10000 train_time:336227ms step_avg:80.42ms +[2025-07-07 21:44:34] [Rank 0] step:4181/10000 train_time:336227ms step_avg:80.42ms +[2025-07-07 21:44:35] [Rank 0] step:4201/10000 train_time:337725ms step_avg:80.39ms +[2025-07-07 21:44:35] [Rank 0] step:4201/10000 train_time:337725ms step_avg:80.39ms +[2025-07-07 21:44:37] [Rank 0] step:4221/10000 train_time:339223ms step_avg:80.37ms +[2025-07-07 21:44:37] [Rank 0] step:4221/10000 train_time:339223ms step_avg:80.37ms +[2025-07-07 21:44:39] [Rank 0] step:4241/10000 train_time:341370ms step_avg:80.49ms +[2025-07-07 21:44:39] [Rank 0] step:4241/10000 train_time:341370ms step_avg:80.49ms +[2025-07-07 21:44:41] [Rank 0] step:4261/10000 train_time:342870ms step_avg:80.47ms +[2025-07-07 21:44:41] [Rank 0] step:4261/10000 train_time:342870ms step_avg:80.47ms +[2025-07-07 21:44:42] [Rank 0] step:4281/10000 train_time:344474ms step_avg:80.47ms +[2025-07-07 21:44:42] [Rank 0] step:4281/10000 train_time:344474ms step_avg:80.47ms +[2025-07-07 21:44:44] [Rank 0] step:4301/10000 train_time:346081ms step_avg:80.47ms +[2025-07-07 21:44:44] [Rank 0] step:4301/10000 train_time:346081ms step_avg:80.47ms +[2025-07-07 21:44:46] [Rank 0] step:4321/10000 train_time:347633ms step_avg:80.45ms +[2025-07-07 21:44:46] [Rank 0] step:4321/10000 train_time:347633ms step_avg:80.45ms +[2025-07-07 21:44:47] [Rank 0] step:4341/10000 train_time:349740ms step_avg:80.57ms +[2025-07-07 21:44:47] [Rank 0] step:4341/10000 train_time:349740ms step_avg:80.57ms +[2025-07-07 21:44:49] [Rank 0] step:4361/10000 train_time:351241ms step_avg:80.54ms +[2025-07-07 21:44:49] [Rank 0] step:4361/10000 train_time:351241ms step_avg:80.54ms +[2025-07-07 21:44:50] [Rank 0] step:4381/10000 train_time:352742ms step_avg:80.52ms +[2025-07-07 21:44:50] [Rank 0] step:4381/10000 train_time:352742ms step_avg:80.52ms +[2025-07-07 21:44:52] [Rank 0] step:4401/10000 train_time:354242ms step_avg:80.49ms +[2025-07-07 21:44:52] [Rank 0] step:4401/10000 train_time:354242ms step_avg:80.49ms +[2025-07-07 21:44:54] [Rank 0] step:4421/10000 train_time:356411ms step_avg:80.62ms +[2025-07-07 21:44:54] [Rank 0] step:4421/10000 train_time:356411ms step_avg:80.62ms +[2025-07-07 21:44:56] [Rank 0] step:4441/10000 train_time:357909ms step_avg:80.59ms +[2025-07-07 21:44:56] [Rank 0] step:4441/10000 train_time:357909ms step_avg:80.59ms +[2025-07-07 21:44:57] [Rank 0] step:4461/10000 train_time:359409ms step_avg:80.57ms +[2025-07-07 21:44:57] [Rank 0] step:4461/10000 train_time:359409ms step_avg:80.57ms +[2025-07-07 21:44:59] [Rank 0] step:4481/10000 train_time:360910ms step_avg:80.54ms +[2025-07-07 21:44:59] [Rank 0] step:4481/10000 train_time:360910ms step_avg:80.54ms +[2025-07-07 21:45:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:45:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:45:01] [Rank 0] PRINT: step:4500/10000 train_loss:0.8749 val_loss:0.8721 train_time:362410ms step_avg:80.54ms +[2025-07-07 21:45:01] [Rank 0] PRINT: step:4500/10000 train_loss:0.8749 val_loss:0.8721 train_time:362410ms step_avg:80.54ms +[2025-07-07 21:45:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:45:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:45:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:45:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:45:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:45:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:50:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:50:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:50:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:50:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:50:24] [Rank 0] Total Loss: 5.3799 +[2025-07-07 21:50:24] [Rank 0] Total Loss: 5.3799 +[2025-07-07 21:50:24] [Rank 0] Total FTA: 0.8885 +[2025-07-07 21:50:24] [Rank 0] Total FTA: 0.8885 +[2025-07-07 21:50:24] [Rank 0] Group 0 Loss: 5.4178 +[2025-07-07 21:50:24] [Rank 0] Group 0 Loss: 5.4178 +[2025-07-07 21:50:24] [Rank 0] Group 1 Loss: 5.5088 +[2025-07-07 21:50:24] [Rank 0] Group 1 Loss: 5.5088 +[2025-07-07 21:50:24] [Rank 0] Group 2 Loss: 5.3606 +[2025-07-07 21:50:24] [Rank 0] Group 2 Loss: 5.3606 +[2025-07-07 21:50:24] [Rank 0] Group 3 Loss: 5.4518 +[2025-07-07 21:50:24] [Rank 0] Group 3 Loss: 5.4518 +[2025-07-07 21:50:24] [Rank 0] Group 4 Loss: 5.3334 +[2025-07-07 21:50:24] [Rank 0] Group 4 Loss: 5.3334 +[2025-07-07 21:50:24] [Rank 0] Group 5 Loss: 5.3195 +[2025-07-07 21:50:24] [Rank 0] Group 5 Loss: 5.3195 +[2025-07-07 21:50:24] [Rank 0] Group 6 Loss: 5.2903 +[2025-07-07 21:50:24] [Rank 0] Group 6 Loss: 5.2903 +[2025-07-07 21:50:24] [Rank 0] Group 7 Loss: 5.3936 +[2025-07-07 21:50:24] [Rank 0] Group 7 Loss: 5.3936 +[2025-07-07 21:50:24] [Rank 0] Group 8 Loss: 5.3657 +[2025-07-07 21:50:24] [Rank 0] Group 8 Loss: 5.3657 +[2025-07-07 21:50:24] [Rank 0] Group 9 Loss: 5.2805 +[2025-07-07 21:50:24] [Rank 0] Group 9 Loss: 5.2805 +[2025-07-07 21:50:24] [Rank 0] Group 10 Loss: 5.3534 +[2025-07-07 21:50:24] [Rank 0] Group 10 Loss: 5.3534 +[2025-07-07 21:50:24] [Rank 0] Group 11 Loss: 5.3956 +[2025-07-07 21:50:24] [Rank 0] Group 11 Loss: 5.3956 +[2025-07-07 21:50:24] [Rank 0] Group 0 FTA: 0.8205 +[2025-07-07 21:50:24] [Rank 0] Group 0 FTA: 0.8205 +[2025-07-07 21:50:24] [Rank 0] Group 1 FTA: 0.8464 +[2025-07-07 21:50:24] [Rank 0] Group 1 FTA: 0.8464 +[2025-07-07 21:50:24] [Rank 0] Group 2 FTA: 0.9271 +[2025-07-07 21:50:24] [Rank 0] Group 2 FTA: 0.9271 +[2025-07-07 21:50:24] [Rank 0] Group 3 FTA: 0.8724 +[2025-07-07 21:50:24] [Rank 0] Group 3 FTA: 0.8724 +[2025-07-07 21:50:24] [Rank 0] Group 4 FTA: 0.9167 +[2025-07-07 21:50:24] [Rank 0] Group 4 FTA: 0.9167 +[2025-07-07 21:50:24] [Rank 0] Group 5 FTA: 0.9036 +[2025-07-07 21:50:24] [Rank 0] Group 5 FTA: 0.9036 +[2025-07-07 21:50:24] [Rank 0] Group 6 FTA: 0.9245 +[2025-07-07 21:50:24] [Rank 0] Group 6 FTA: 0.9245 +[2025-07-07 21:50:24] [Rank 0] Group 7 FTA: 0.9219 +[2025-07-07 21:50:24] [Rank 0] Group 7 FTA: 0.9219 +[2025-07-07 21:50:24] [Rank 0] Group 8 FTA: 0.8776 +[2025-07-07 21:50:24] [Rank 0] Group 8 FTA: 0.8776 +[2025-07-07 21:50:24] [Rank 0] Group 9 FTA: 0.9219 +[2025-07-07 21:50:24] [Rank 0] Group 9 FTA: 0.9219 +[2025-07-07 21:50:24] [Rank 0] Group 10 FTA: 0.8984 +[2025-07-07 21:50:24] [Rank 0] Group 10 FTA: 0.8984 +[2025-07-07 21:50:24] [Rank 0] Group 11 FTA: 0.8955 +[2025-07-07 21:50:24] [Rank 0] Group 11 FTA: 0.8955 +[2025-07-07 21:50:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 21:50:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 21:50:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 21:50:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 21:50:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 21:50:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 21:50:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 21:50:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 21:50:26] [Rank 0] step:4501/10000 train_time:362436ms step_avg:80.52ms +[2025-07-07 21:50:26] [Rank 0] step:4501/10000 train_time:362436ms step_avg:80.52ms +[2025-07-07 21:50:28] [Rank 0] step:4521/10000 train_time:364619ms step_avg:80.65ms +[2025-07-07 21:50:28] [Rank 0] step:4521/10000 train_time:364619ms step_avg:80.65ms +[2025-07-07 21:50:29] [Rank 0] step:4541/10000 train_time:366111ms step_avg:80.62ms +[2025-07-07 21:50:29] [Rank 0] step:4541/10000 train_time:366111ms step_avg:80.62ms +[2025-07-07 21:50:31] [Rank 0] step:4561/10000 train_time:367606ms step_avg:80.60ms +[2025-07-07 21:50:31] [Rank 0] step:4561/10000 train_time:367606ms step_avg:80.60ms +[2025-07-07 21:50:32] [Rank 0] step:4581/10000 train_time:369102ms step_avg:80.57ms +[2025-07-07 21:50:32] [Rank 0] step:4581/10000 train_time:369102ms step_avg:80.57ms +[2025-07-07 21:50:35] [Rank 0] step:4601/10000 train_time:371261ms step_avg:80.69ms +[2025-07-07 21:50:35] [Rank 0] step:4601/10000 train_time:371261ms step_avg:80.69ms +[2025-07-07 21:50:36] [Rank 0] step:4621/10000 train_time:372756ms step_avg:80.67ms +[2025-07-07 21:50:36] [Rank 0] step:4621/10000 train_time:372756ms step_avg:80.67ms +[2025-07-07 21:50:38] [Rank 0] step:4641/10000 train_time:374251ms step_avg:80.64ms +[2025-07-07 21:50:38] [Rank 0] step:4641/10000 train_time:374251ms step_avg:80.64ms +[2025-07-07 21:50:39] [Rank 0] step:4661/10000 train_time:375746ms step_avg:80.61ms +[2025-07-07 21:50:39] [Rank 0] step:4661/10000 train_time:375746ms step_avg:80.61ms +[2025-07-07 21:50:41] [Rank 0] step:4681/10000 train_time:377244ms step_avg:80.59ms +[2025-07-07 21:50:41] [Rank 0] step:4681/10000 train_time:377244ms step_avg:80.59ms +[2025-07-07 21:50:43] [Rank 0] step:4701/10000 train_time:379405ms step_avg:80.71ms +[2025-07-07 21:50:43] [Rank 0] step:4701/10000 train_time:379405ms step_avg:80.71ms +[2025-07-07 21:50:44] [Rank 0] step:4721/10000 train_time:380901ms step_avg:80.68ms +[2025-07-07 21:50:44] [Rank 0] step:4721/10000 train_time:380901ms step_avg:80.68ms +[2025-07-07 21:50:46] [Rank 0] step:4741/10000 train_time:382400ms step_avg:80.66ms +[2025-07-07 21:50:46] [Rank 0] step:4741/10000 train_time:382400ms step_avg:80.66ms +[2025-07-07 21:50:47] [Rank 0] step:4761/10000 train_time:383898ms step_avg:80.63ms +[2025-07-07 21:50:47] [Rank 0] step:4761/10000 train_time:383898ms step_avg:80.63ms +[2025-07-07 21:50:49] [Rank 0] step:4781/10000 train_time:386046ms step_avg:80.75ms +[2025-07-07 21:50:49] [Rank 0] step:4781/10000 train_time:386046ms step_avg:80.75ms +[2025-07-07 21:50:51] [Rank 0] step:4801/10000 train_time:387544ms step_avg:80.72ms +[2025-07-07 21:50:51] [Rank 0] step:4801/10000 train_time:387544ms step_avg:80.72ms +[2025-07-07 21:50:52] [Rank 0] step:4821/10000 train_time:389042ms step_avg:80.70ms +[2025-07-07 21:50:52] [Rank 0] step:4821/10000 train_time:389042ms step_avg:80.70ms +[2025-07-07 21:50:54] [Rank 0] step:4841/10000 train_time:390541ms step_avg:80.67ms +[2025-07-07 21:50:54] [Rank 0] step:4841/10000 train_time:390541ms step_avg:80.67ms +[2025-07-07 21:50:56] [Rank 0] step:4861/10000 train_time:392091ms step_avg:80.66ms +[2025-07-07 21:50:56] [Rank 0] step:4861/10000 train_time:392091ms step_avg:80.66ms +[2025-07-07 21:50:58] [Rank 0] step:4881/10000 train_time:394191ms step_avg:80.76ms +[2025-07-07 21:50:58] [Rank 0] step:4881/10000 train_time:394191ms step_avg:80.76ms +[2025-07-07 21:50:59] [Rank 0] step:4901/10000 train_time:395692ms step_avg:80.74ms +[2025-07-07 21:50:59] [Rank 0] step:4901/10000 train_time:395692ms step_avg:80.74ms +[2025-07-07 21:51:01] [Rank 0] step:4921/10000 train_time:397192ms step_avg:80.71ms +[2025-07-07 21:51:01] [Rank 0] step:4921/10000 train_time:397192ms step_avg:80.71ms +[2025-07-07 21:51:02] [Rank 0] step:4941/10000 train_time:398962ms step_avg:80.75ms +[2025-07-07 21:51:02] [Rank 0] step:4941/10000 train_time:398962ms step_avg:80.75ms +[2025-07-07 21:51:04] [Rank 0] step:4961/10000 train_time:401118ms step_avg:80.85ms +[2025-07-07 21:51:04] [Rank 0] step:4961/10000 train_time:401118ms step_avg:80.85ms +[2025-07-07 21:51:06] [Rank 0] step:4981/10000 train_time:402619ms step_avg:80.83ms +[2025-07-07 21:51:06] [Rank 0] step:4981/10000 train_time:402619ms step_avg:80.83ms +[2025-07-07 21:51:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:51:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:51:08] [Rank 0] PRINT: step:5000/10000 train_loss:0.8711 val_loss:0.8700 train_time:404118ms step_avg:80.82ms +[2025-07-07 21:51:08] [Rank 0] PRINT: step:5000/10000 train_loss:0.8711 val_loss:0.8700 train_time:404118ms step_avg:80.82ms +[2025-07-07 21:51:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:51:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:51:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:51:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:51:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:51:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:56:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:56:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:56:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:56:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:56:32] [Rank 0] Total Loss: 5.3347 +[2025-07-07 21:56:32] [Rank 0] Total Loss: 5.3347 +[2025-07-07 21:56:32] [Rank 0] Total FTA: 0.8933 +[2025-07-07 21:56:32] [Rank 0] Total FTA: 0.8933 +[2025-07-07 21:56:32] [Rank 0] Group 0 Loss: 5.2843 +[2025-07-07 21:56:32] [Rank 0] Group 0 Loss: 5.2843 +[2025-07-07 21:56:32] [Rank 0] Group 1 Loss: 5.5419 +[2025-07-07 21:56:32] [Rank 0] Group 1 Loss: 5.5419 +[2025-07-07 21:56:32] [Rank 0] Group 2 Loss: 5.2314 +[2025-07-07 21:56:32] [Rank 0] Group 2 Loss: 5.2314 +[2025-07-07 21:56:32] [Rank 0] Group 3 Loss: 5.2624 +[2025-07-07 21:56:32] [Rank 0] Group 3 Loss: 5.2624 +[2025-07-07 21:56:32] [Rank 0] Group 4 Loss: 5.2730 +[2025-07-07 21:56:32] [Rank 0] Group 4 Loss: 5.2730 +[2025-07-07 21:56:32] [Rank 0] Group 5 Loss: 5.3391 +[2025-07-07 21:56:32] [Rank 0] Group 5 Loss: 5.3391 +[2025-07-07 21:56:32] [Rank 0] Group 6 Loss: 5.2651 +[2025-07-07 21:56:32] [Rank 0] Group 6 Loss: 5.2651 +[2025-07-07 21:56:32] [Rank 0] Group 7 Loss: 5.3625 +[2025-07-07 21:56:32] [Rank 0] Group 7 Loss: 5.3625 +[2025-07-07 21:56:32] [Rank 0] Group 8 Loss: 5.4116 +[2025-07-07 21:56:32] [Rank 0] Group 8 Loss: 5.4116 +[2025-07-07 21:56:32] [Rank 0] Group 9 Loss: 5.2990 +[2025-07-07 21:56:32] [Rank 0] Group 9 Loss: 5.2990 +[2025-07-07 21:56:32] [Rank 0] Group 10 Loss: 5.4020 +[2025-07-07 21:56:32] [Rank 0] Group 10 Loss: 5.4020 +[2025-07-07 21:56:32] [Rank 0] Group 11 Loss: 5.3445 +[2025-07-07 21:56:32] [Rank 0] Group 11 Loss: 5.3445 +[2025-07-07 21:56:32] [Rank 0] Group 0 FTA: 0.6905 +[2025-07-07 21:56:32] [Rank 0] Group 0 FTA: 0.6905 +[2025-07-07 21:56:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 21:56:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 21:56:32] [Rank 0] Group 2 FTA: 0.9062 +[2025-07-07 21:56:32] [Rank 0] Group 2 FTA: 0.9062 +[2025-07-07 21:56:32] [Rank 0] Group 3 FTA: 0.8646 +[2025-07-07 21:56:32] [Rank 0] Group 3 FTA: 0.8646 +[2025-07-07 21:56:32] [Rank 0] Group 4 FTA: 0.8932 +[2025-07-07 21:56:32] [Rank 0] Group 4 FTA: 0.8932 +[2025-07-07 21:56:32] [Rank 0] Group 5 FTA: 0.9661 +[2025-07-07 21:56:32] [Rank 0] Group 5 FTA: 0.9661 +[2025-07-07 21:56:32] [Rank 0] Group 6 FTA: 0.9401 +[2025-07-07 21:56:32] [Rank 0] Group 6 FTA: 0.9401 +[2025-07-07 21:56:32] [Rank 0] Group 7 FTA: 0.9219 +[2025-07-07 21:56:32] [Rank 0] Group 7 FTA: 0.9219 +[2025-07-07 21:56:32] [Rank 0] Group 8 FTA: 0.9297 +[2025-07-07 21:56:32] [Rank 0] Group 8 FTA: 0.9297 +[2025-07-07 21:56:32] [Rank 0] Group 9 FTA: 0.8867 +[2025-07-07 21:56:32] [Rank 0] Group 9 FTA: 0.8867 +[2025-07-07 21:56:32] [Rank 0] Group 10 FTA: 0.9414 +[2025-07-07 21:56:32] [Rank 0] Group 10 FTA: 0.9414 +[2025-07-07 21:56:32] [Rank 0] Group 11 FTA: 0.9199 +[2025-07-07 21:56:32] [Rank 0] Group 11 FTA: 0.9199 +[2025-07-07 21:56:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 21:56:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 21:56:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 21:56:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 21:56:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 21:56:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 21:56:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 21:56:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 21:56:34] [Rank 0] step:5001/10000 train_time:404139ms step_avg:80.81ms +[2025-07-07 21:56:34] [Rank 0] step:5001/10000 train_time:404139ms step_avg:80.81ms +[2025-07-07 21:56:35] [Rank 0] step:5021/10000 train_time:405641ms step_avg:80.79ms +[2025-07-07 21:56:35] [Rank 0] step:5021/10000 train_time:405641ms step_avg:80.79ms +[2025-07-07 21:56:37] [Rank 0] step:5041/10000 train_time:407799ms step_avg:80.90ms +[2025-07-07 21:56:37] [Rank 0] step:5041/10000 train_time:407799ms step_avg:80.90ms +[2025-07-07 21:56:39] [Rank 0] step:5061/10000 train_time:409274ms step_avg:80.87ms +[2025-07-07 21:56:39] [Rank 0] step:5061/10000 train_time:409274ms step_avg:80.87ms +[2025-07-07 21:56:40] [Rank 0] step:5081/10000 train_time:410769ms step_avg:80.84ms +[2025-07-07 21:56:40] [Rank 0] step:5081/10000 train_time:410769ms step_avg:80.84ms +[2025-07-07 21:56:42] [Rank 0] step:5101/10000 train_time:412266ms step_avg:80.82ms +[2025-07-07 21:56:42] [Rank 0] step:5101/10000 train_time:412266ms step_avg:80.82ms +[2025-07-07 21:56:43] [Rank 0] step:5121/10000 train_time:413763ms step_avg:80.80ms +[2025-07-07 21:56:43] [Rank 0] step:5121/10000 train_time:413763ms step_avg:80.80ms +[2025-07-07 21:56:45] [Rank 0] step:5141/10000 train_time:415918ms step_avg:80.90ms +[2025-07-07 21:56:45] [Rank 0] step:5141/10000 train_time:415918ms step_avg:80.90ms +[2025-07-07 21:56:47] [Rank 0] step:5161/10000 train_time:417414ms step_avg:80.88ms +[2025-07-07 21:56:47] [Rank 0] step:5161/10000 train_time:417414ms step_avg:80.88ms +[2025-07-07 21:56:48] [Rank 0] step:5181/10000 train_time:418910ms step_avg:80.85ms +[2025-07-07 21:56:48] [Rank 0] step:5181/10000 train_time:418910ms step_avg:80.85ms +[2025-07-07 21:56:50] [Rank 0] step:5201/10000 train_time:420408ms step_avg:80.83ms +[2025-07-07 21:56:50] [Rank 0] step:5201/10000 train_time:420408ms step_avg:80.83ms +[2025-07-07 21:56:52] [Rank 0] step:5221/10000 train_time:421908ms step_avg:80.81ms +[2025-07-07 21:56:52] [Rank 0] step:5221/10000 train_time:421908ms step_avg:80.81ms +[2025-07-07 21:56:53] [Rank 0] step:5241/10000 train_time:424063ms step_avg:80.91ms +[2025-07-07 21:56:53] [Rank 0] step:5241/10000 train_time:424063ms step_avg:80.91ms +[2025-07-07 21:56:55] [Rank 0] step:5261/10000 train_time:425562ms step_avg:80.89ms +[2025-07-07 21:56:55] [Rank 0] step:5261/10000 train_time:425562ms step_avg:80.89ms +[2025-07-07 21:56:56] [Rank 0] step:5281/10000 train_time:427060ms step_avg:80.87ms +[2025-07-07 21:56:56] [Rank 0] step:5281/10000 train_time:427060ms step_avg:80.87ms +[2025-07-07 21:56:58] [Rank 0] step:5301/10000 train_time:428559ms step_avg:80.84ms +[2025-07-07 21:56:58] [Rank 0] step:5301/10000 train_time:428559ms step_avg:80.84ms +[2025-07-07 21:57:00] [Rank 0] step:5321/10000 train_time:430703ms step_avg:80.94ms +[2025-07-07 21:57:00] [Rank 0] step:5321/10000 train_time:430703ms step_avg:80.94ms +[2025-07-07 21:57:02] [Rank 0] step:5341/10000 train_time:432200ms step_avg:80.92ms +[2025-07-07 21:57:02] [Rank 0] step:5341/10000 train_time:432200ms step_avg:80.92ms +[2025-07-07 21:57:03] [Rank 0] step:5361/10000 train_time:433699ms step_avg:80.90ms +[2025-07-07 21:57:03] [Rank 0] step:5361/10000 train_time:433699ms step_avg:80.90ms +[2025-07-07 21:57:05] [Rank 0] step:5381/10000 train_time:435199ms step_avg:80.88ms +[2025-07-07 21:57:05] [Rank 0] step:5381/10000 train_time:435199ms step_avg:80.88ms +[2025-07-07 21:57:07] [Rank 0] step:5401/10000 train_time:436956ms step_avg:80.90ms +[2025-07-07 21:57:07] [Rank 0] step:5401/10000 train_time:436956ms step_avg:80.90ms +[2025-07-07 21:57:08] [Rank 0] step:5421/10000 train_time:438848ms step_avg:80.95ms +[2025-07-07 21:57:08] [Rank 0] step:5421/10000 train_time:438848ms step_avg:80.95ms +[2025-07-07 21:57:10] [Rank 0] step:5441/10000 train_time:440348ms step_avg:80.93ms +[2025-07-07 21:57:10] [Rank 0] step:5441/10000 train_time:440348ms step_avg:80.93ms +[2025-07-07 21:57:11] [Rank 0] step:5461/10000 train_time:441849ms step_avg:80.91ms +[2025-07-07 21:57:11] [Rank 0] step:5461/10000 train_time:441849ms step_avg:80.91ms +[2025-07-07 21:57:13] [Rank 0] step:5481/10000 train_time:443348ms step_avg:80.89ms +[2025-07-07 21:57:13] [Rank 0] step:5481/10000 train_time:443348ms step_avg:80.89ms +[2025-07-07 21:57:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:57:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:57:16] [Rank 0] PRINT: step:5500/10000 train_loss:0.8681 val_loss:0.8699 train_time:445485ms step_avg:81.00ms +[2025-07-07 21:57:16] [Rank 0] PRINT: step:5500/10000 train_loss:0.8681 val_loss:0.8699 train_time:445485ms step_avg:81.00ms +[2025-07-07 21:57:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:57:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:57:16] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:57:16] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:57:16] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:57:16] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:02:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:02:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:02:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:02:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:02:40] [Rank 0] Total Loss: 5.3951 +[2025-07-07 22:02:40] [Rank 0] Total Loss: 5.3951 +[2025-07-07 22:02:40] [Rank 0] Total FTA: 0.9544 +[2025-07-07 22:02:40] [Rank 0] Total FTA: 0.9544 +[2025-07-07 22:02:40] [Rank 0] Group 0 Loss: 5.4979 +[2025-07-07 22:02:40] [Rank 0] Group 0 Loss: 5.4979 +[2025-07-07 22:02:40] [Rank 0] Group 1 Loss: 5.4360 +[2025-07-07 22:02:40] [Rank 0] Group 1 Loss: 5.4360 +[2025-07-07 22:02:40] [Rank 0] Group 2 Loss: 5.4760 +[2025-07-07 22:02:40] [Rank 0] Group 2 Loss: 5.4760 +[2025-07-07 22:02:40] [Rank 0] Group 3 Loss: 5.1820 +[2025-07-07 22:02:40] [Rank 0] Group 3 Loss: 5.1820 +[2025-07-07 22:02:40] [Rank 0] Group 4 Loss: 5.3593 +[2025-07-07 22:02:40] [Rank 0] Group 4 Loss: 5.3593 +[2025-07-07 22:02:40] [Rank 0] Group 5 Loss: 5.2859 +[2025-07-07 22:02:40] [Rank 0] Group 5 Loss: 5.2859 +[2025-07-07 22:02:40] [Rank 0] Group 6 Loss: 5.2923 +[2025-07-07 22:02:40] [Rank 0] Group 6 Loss: 5.2923 +[2025-07-07 22:02:40] [Rank 0] Group 7 Loss: 5.4158 +[2025-07-07 22:02:40] [Rank 0] Group 7 Loss: 5.4158 +[2025-07-07 22:02:40] [Rank 0] Group 8 Loss: 5.4254 +[2025-07-07 22:02:40] [Rank 0] Group 8 Loss: 5.4254 +[2025-07-07 22:02:40] [Rank 0] Group 9 Loss: 5.3984 +[2025-07-07 22:02:40] [Rank 0] Group 9 Loss: 5.3984 +[2025-07-07 22:02:40] [Rank 0] Group 10 Loss: 5.4086 +[2025-07-07 22:02:40] [Rank 0] Group 10 Loss: 5.4086 +[2025-07-07 22:02:40] [Rank 0] Group 11 Loss: 5.4182 +[2025-07-07 22:02:40] [Rank 0] Group 11 Loss: 5.4182 +[2025-07-07 22:02:40] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 22:02:40] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 22:02:40] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 22:02:40] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 22:02:40] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 22:02:40] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 22:02:40] [Rank 0] Group 3 FTA: 0.9010 +[2025-07-07 22:02:40] [Rank 0] Group 3 FTA: 0.9010 +[2025-07-07 22:02:40] [Rank 0] Group 4 FTA: 0.9453 +[2025-07-07 22:02:40] [Rank 0] Group 4 FTA: 0.9453 +[2025-07-07 22:02:40] [Rank 0] Group 5 FTA: 0.9766 +[2025-07-07 22:02:40] [Rank 0] Group 5 FTA: 0.9766 +[2025-07-07 22:02:40] [Rank 0] Group 6 FTA: 0.9479 +[2025-07-07 22:02:40] [Rank 0] Group 6 FTA: 0.9479 +[2025-07-07 22:02:40] [Rank 0] Group 7 FTA: 0.9297 +[2025-07-07 22:02:40] [Rank 0] Group 7 FTA: 0.9297 +[2025-07-07 22:02:40] [Rank 0] Group 8 FTA: 0.9271 +[2025-07-07 22:02:40] [Rank 0] Group 8 FTA: 0.9271 +[2025-07-07 22:02:40] [Rank 0] Group 9 FTA: 0.9531 +[2025-07-07 22:02:40] [Rank 0] Group 9 FTA: 0.9531 +[2025-07-07 22:02:40] [Rank 0] Group 10 FTA: 0.9238 +[2025-07-07 22:02:40] [Rank 0] Group 10 FTA: 0.9238 +[2025-07-07 22:02:40] [Rank 0] Group 11 FTA: 0.9385 +[2025-07-07 22:02:40] [Rank 0] Group 11 FTA: 0.9385 +[2025-07-07 22:02:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 22:02:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 22:02:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 22:02:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 22:02:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 22:02:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 22:02:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 22:02:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 22:02:42] [Rank 0] step:5501/10000 train_time:445506ms step_avg:80.99ms +[2025-07-07 22:02:42] [Rank 0] step:5501/10000 train_time:445506ms step_avg:80.99ms +[2025-07-07 22:02:43] [Rank 0] step:5521/10000 train_time:447009ms step_avg:80.97ms +[2025-07-07 22:02:43] [Rank 0] step:5521/10000 train_time:447009ms step_avg:80.97ms +[2025-07-07 22:02:45] [Rank 0] step:5541/10000 train_time:448501ms step_avg:80.94ms +[2025-07-07 22:02:45] [Rank 0] step:5541/10000 train_time:448501ms step_avg:80.94ms +[2025-07-07 22:02:46] [Rank 0] step:5561/10000 train_time:449995ms step_avg:80.92ms +[2025-07-07 22:02:46] [Rank 0] step:5561/10000 train_time:449995ms step_avg:80.92ms +[2025-07-07 22:02:48] [Rank 0] step:5581/10000 train_time:451489ms step_avg:80.90ms +[2025-07-07 22:02:48] [Rank 0] step:5581/10000 train_time:451489ms step_avg:80.90ms +[2025-07-07 22:02:50] [Rank 0] step:5601/10000 train_time:453649ms step_avg:80.99ms +[2025-07-07 22:02:50] [Rank 0] step:5601/10000 train_time:453649ms step_avg:80.99ms +[2025-07-07 22:02:51] [Rank 0] step:5621/10000 train_time:455144ms step_avg:80.97ms +[2025-07-07 22:02:51] [Rank 0] step:5621/10000 train_time:455144ms step_avg:80.97ms +[2025-07-07 22:02:53] [Rank 0] step:5641/10000 train_time:456640ms step_avg:80.95ms +[2025-07-07 22:02:53] [Rank 0] step:5641/10000 train_time:456640ms step_avg:80.95ms +[2025-07-07 22:02:54] [Rank 0] step:5661/10000 train_time:458139ms step_avg:80.93ms +[2025-07-07 22:02:54] [Rank 0] step:5661/10000 train_time:458139ms step_avg:80.93ms +[2025-07-07 22:02:56] [Rank 0] step:5681/10000 train_time:460291ms step_avg:81.02ms +[2025-07-07 22:02:56] [Rank 0] step:5681/10000 train_time:460291ms step_avg:81.02ms +[2025-07-07 22:02:58] [Rank 0] step:5701/10000 train_time:461789ms step_avg:81.00ms +[2025-07-07 22:02:58] [Rank 0] step:5701/10000 train_time:461789ms step_avg:81.00ms +[2025-07-07 22:02:59] [Rank 0] step:5721/10000 train_time:463287ms step_avg:80.98ms +[2025-07-07 22:02:59] [Rank 0] step:5721/10000 train_time:463287ms step_avg:80.98ms +[2025-07-07 22:03:01] [Rank 0] step:5741/10000 train_time:464784ms step_avg:80.96ms +[2025-07-07 22:03:01] [Rank 0] step:5741/10000 train_time:464784ms step_avg:80.96ms +[2025-07-07 22:03:03] [Rank 0] step:5761/10000 train_time:466540ms step_avg:80.98ms +[2025-07-07 22:03:03] [Rank 0] step:5761/10000 train_time:466540ms step_avg:80.98ms +[2025-07-07 22:03:04] [Rank 0] step:5781/10000 train_time:468019ms step_avg:80.96ms +[2025-07-07 22:03:04] [Rank 0] step:5781/10000 train_time:468019ms step_avg:80.96ms +[2025-07-07 22:03:06] [Rank 0] step:5801/10000 train_time:469518ms step_avg:80.94ms +[2025-07-07 22:03:06] [Rank 0] step:5801/10000 train_time:469518ms step_avg:80.94ms +[2025-07-07 22:03:07] [Rank 0] step:5821/10000 train_time:471016ms step_avg:80.92ms +[2025-07-07 22:03:07] [Rank 0] step:5821/10000 train_time:471016ms step_avg:80.92ms +[2025-07-07 22:03:09] [Rank 0] step:5841/10000 train_time:472518ms step_avg:80.90ms +[2025-07-07 22:03:09] [Rank 0] step:5841/10000 train_time:472518ms step_avg:80.90ms +[2025-07-07 22:03:11] [Rank 0] step:5861/10000 train_time:474661ms step_avg:80.99ms +[2025-07-07 22:03:11] [Rank 0] step:5861/10000 train_time:474661ms step_avg:80.99ms +[2025-07-07 22:03:12] [Rank 0] step:5881/10000 train_time:476160ms step_avg:80.97ms +[2025-07-07 22:03:12] [Rank 0] step:5881/10000 train_time:476160ms step_avg:80.97ms +[2025-07-07 22:03:14] [Rank 0] step:5901/10000 train_time:477660ms step_avg:80.95ms +[2025-07-07 22:03:14] [Rank 0] step:5901/10000 train_time:477660ms step_avg:80.95ms +[2025-07-07 22:03:15] [Rank 0] step:5921/10000 train_time:479159ms step_avg:80.93ms +[2025-07-07 22:03:15] [Rank 0] step:5921/10000 train_time:479159ms step_avg:80.93ms +[2025-07-07 22:03:17] [Rank 0] step:5941/10000 train_time:480657ms step_avg:80.91ms +[2025-07-07 22:03:17] [Rank 0] step:5941/10000 train_time:480657ms step_avg:80.91ms +[2025-07-07 22:03:19] [Rank 0] step:5961/10000 train_time:482809ms step_avg:80.99ms +[2025-07-07 22:03:19] [Rank 0] step:5961/10000 train_time:482809ms step_avg:80.99ms +[2025-07-07 22:03:20] [Rank 0] step:5981/10000 train_time:484308ms step_avg:80.97ms +[2025-07-07 22:03:20] [Rank 0] step:5981/10000 train_time:484308ms step_avg:80.97ms +[2025-07-07 22:03:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:03:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:03:23] [Rank 0] PRINT: step:6000/10000 train_loss:0.8669 val_loss:0.8676 train_time:485807ms step_avg:80.97ms +[2025-07-07 22:03:23] [Rank 0] PRINT: step:6000/10000 train_loss:0.8669 val_loss:0.8676 train_time:485807ms step_avg:80.97ms +[2025-07-07 22:03:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:03:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:03:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:03:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:03:23] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:03:23] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:08:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:08:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:08:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:08:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:08:43] [Rank 0] Total Loss: 5.3657 +[2025-07-07 22:08:43] [Rank 0] Total Loss: 5.3657 +[2025-07-07 22:08:43] [Rank 0] Total FTA: 0.9581 +[2025-07-07 22:08:43] [Rank 0] Total FTA: 0.9581 +[2025-07-07 22:08:43] [Rank 0] Group 0 Loss: 5.4148 +[2025-07-07 22:08:43] [Rank 0] Group 0 Loss: 5.4148 +[2025-07-07 22:08:43] [Rank 0] Group 1 Loss: 5.3573 +[2025-07-07 22:08:43] [Rank 0] Group 1 Loss: 5.3573 +[2025-07-07 22:08:43] [Rank 0] Group 2 Loss: 5.4222 +[2025-07-07 22:08:43] [Rank 0] Group 2 Loss: 5.4222 +[2025-07-07 22:08:43] [Rank 0] Group 3 Loss: 5.2705 +[2025-07-07 22:08:43] [Rank 0] Group 3 Loss: 5.2705 +[2025-07-07 22:08:43] [Rank 0] Group 4 Loss: 5.3406 +[2025-07-07 22:08:43] [Rank 0] Group 4 Loss: 5.3406 +[2025-07-07 22:08:43] [Rank 0] Group 5 Loss: 5.2651 +[2025-07-07 22:08:43] [Rank 0] Group 5 Loss: 5.2651 +[2025-07-07 22:08:43] [Rank 0] Group 6 Loss: 5.2380 +[2025-07-07 22:08:43] [Rank 0] Group 6 Loss: 5.2380 +[2025-07-07 22:08:43] [Rank 0] Group 7 Loss: 5.4360 +[2025-07-07 22:08:43] [Rank 0] Group 7 Loss: 5.4360 +[2025-07-07 22:08:43] [Rank 0] Group 8 Loss: 5.4047 +[2025-07-07 22:08:43] [Rank 0] Group 8 Loss: 5.4047 +[2025-07-07 22:08:43] [Rank 0] Group 9 Loss: 5.3608 +[2025-07-07 22:08:43] [Rank 0] Group 9 Loss: 5.3608 +[2025-07-07 22:08:43] [Rank 0] Group 10 Loss: 5.4077 +[2025-07-07 22:08:43] [Rank 0] Group 10 Loss: 5.4077 +[2025-07-07 22:08:43] [Rank 0] Group 11 Loss: 5.3806 +[2025-07-07 22:08:43] [Rank 0] Group 11 Loss: 5.3806 +[2025-07-07 22:08:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 22:08:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 22:08:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 22:08:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 22:08:43] [Rank 0] Group 2 FTA: 0.9271 +[2025-07-07 22:08:43] [Rank 0] Group 2 FTA: 0.9271 +[2025-07-07 22:08:43] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 22:08:43] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 22:08:43] [Rank 0] Group 4 FTA: 0.9818 +[2025-07-07 22:08:43] [Rank 0] Group 4 FTA: 0.9818 +[2025-07-07 22:08:43] [Rank 0] Group 5 FTA: 0.9271 +[2025-07-07 22:08:43] [Rank 0] Group 5 FTA: 0.9271 +[2025-07-07 22:08:43] [Rank 0] Group 6 FTA: 0.9089 +[2025-07-07 22:08:43] [Rank 0] Group 6 FTA: 0.9089 +[2025-07-07 22:08:43] [Rank 0] Group 7 FTA: 0.9453 +[2025-07-07 22:08:43] [Rank 0] Group 7 FTA: 0.9453 +[2025-07-07 22:08:43] [Rank 0] Group 8 FTA: 0.9193 +[2025-07-07 22:08:43] [Rank 0] Group 8 FTA: 0.9193 +[2025-07-07 22:08:43] [Rank 0] Group 9 FTA: 0.9492 +[2025-07-07 22:08:43] [Rank 0] Group 9 FTA: 0.9492 +[2025-07-07 22:08:43] [Rank 0] Group 10 FTA: 0.9609 +[2025-07-07 22:08:43] [Rank 0] Group 10 FTA: 0.9609 +[2025-07-07 22:08:43] [Rank 0] Group 11 FTA: 0.9482 +[2025-07-07 22:08:43] [Rank 0] Group 11 FTA: 0.9482 +[2025-07-07 22:08:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 22:08:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 22:08:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 22:08:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 22:08:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 22:08:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 22:08:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 22:08:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 22:08:45] [Rank 0] step:6001/10000 train_time:485828ms step_avg:80.96ms +[2025-07-07 22:08:45] [Rank 0] step:6001/10000 train_time:485828ms step_avg:80.96ms +[2025-07-07 22:08:46] [Rank 0] step:6021/10000 train_time:487323ms step_avg:80.94ms +[2025-07-07 22:08:46] [Rank 0] step:6021/10000 train_time:487323ms step_avg:80.94ms +[2025-07-07 22:08:48] [Rank 0] step:6041/10000 train_time:489477ms step_avg:81.03ms +[2025-07-07 22:08:48] [Rank 0] step:6041/10000 train_time:489477ms step_avg:81.03ms +[2025-07-07 22:08:50] [Rank 0] step:6061/10000 train_time:490971ms step_avg:81.00ms +[2025-07-07 22:08:50] [Rank 0] step:6061/10000 train_time:490971ms step_avg:81.00ms +[2025-07-07 22:08:51] [Rank 0] step:6081/10000 train_time:492465ms step_avg:80.98ms +[2025-07-07 22:08:51] [Rank 0] step:6081/10000 train_time:492465ms step_avg:80.98ms +[2025-07-07 22:08:53] [Rank 0] step:6101/10000 train_time:493962ms step_avg:80.96ms +[2025-07-07 22:08:53] [Rank 0] step:6101/10000 train_time:493962ms step_avg:80.96ms +[2025-07-07 22:08:55] [Rank 0] step:6121/10000 train_time:496145ms step_avg:81.06ms +[2025-07-07 22:08:55] [Rank 0] step:6121/10000 train_time:496145ms step_avg:81.06ms +[2025-07-07 22:08:57] [Rank 0] step:6141/10000 train_time:497752ms step_avg:81.05ms +[2025-07-07 22:08:57] [Rank 0] step:6141/10000 train_time:497752ms step_avg:81.05ms +[2025-07-07 22:08:58] [Rank 0] step:6161/10000 train_time:499337ms step_avg:81.05ms +[2025-07-07 22:08:58] [Rank 0] step:6161/10000 train_time:499337ms step_avg:81.05ms +[2025-07-07 22:09:00] [Rank 0] step:6181/10000 train_time:500834ms step_avg:81.03ms +[2025-07-07 22:09:00] [Rank 0] step:6181/10000 train_time:500834ms step_avg:81.03ms +[2025-07-07 22:09:01] [Rank 0] step:6201/10000 train_time:502331ms step_avg:81.01ms +[2025-07-07 22:09:01] [Rank 0] step:6201/10000 train_time:502331ms step_avg:81.01ms +[2025-07-07 22:09:03] [Rank 0] step:6221/10000 train_time:504066ms step_avg:81.03ms +[2025-07-07 22:09:03] [Rank 0] step:6221/10000 train_time:504066ms step_avg:81.03ms +[2025-07-07 22:09:04] [Rank 0] step:6241/10000 train_time:505563ms step_avg:81.01ms +[2025-07-07 22:09:04] [Rank 0] step:6241/10000 train_time:505563ms step_avg:81.01ms +[2025-07-07 22:09:06] [Rank 0] step:6261/10000 train_time:507062ms step_avg:80.99ms +[2025-07-07 22:09:06] [Rank 0] step:6261/10000 train_time:507062ms step_avg:80.99ms +[2025-07-07 22:09:07] [Rank 0] step:6281/10000 train_time:508562ms step_avg:80.97ms +[2025-07-07 22:09:07] [Rank 0] step:6281/10000 train_time:508562ms step_avg:80.97ms +[2025-07-07 22:09:10] [Rank 0] step:6301/10000 train_time:510061ms step_avg:80.95ms +[2025-07-07 22:09:10] [Rank 0] step:6301/10000 train_time:510061ms step_avg:80.95ms +[2025-07-07 22:09:11] [Rank 0] step:6321/10000 train_time:512215ms step_avg:81.03ms +[2025-07-07 22:09:11] [Rank 0] step:6321/10000 train_time:512215ms step_avg:81.03ms +[2025-07-07 22:09:13] [Rank 0] step:6341/10000 train_time:513713ms step_avg:81.01ms +[2025-07-07 22:09:13] [Rank 0] step:6341/10000 train_time:513713ms step_avg:81.01ms +[2025-07-07 22:09:14] [Rank 0] step:6361/10000 train_time:515211ms step_avg:81.00ms +[2025-07-07 22:09:14] [Rank 0] step:6361/10000 train_time:515211ms step_avg:81.00ms +[2025-07-07 22:09:16] [Rank 0] step:6381/10000 train_time:516710ms step_avg:80.98ms +[2025-07-07 22:09:16] [Rank 0] step:6381/10000 train_time:516710ms step_avg:80.98ms +[2025-07-07 22:09:18] [Rank 0] step:6401/10000 train_time:518856ms step_avg:81.06ms +[2025-07-07 22:09:18] [Rank 0] step:6401/10000 train_time:518856ms step_avg:81.06ms +[2025-07-07 22:09:19] [Rank 0] step:6421/10000 train_time:520353ms step_avg:81.04ms +[2025-07-07 22:09:19] [Rank 0] step:6421/10000 train_time:520353ms step_avg:81.04ms +[2025-07-07 22:09:21] [Rank 0] step:6441/10000 train_time:521853ms step_avg:81.02ms +[2025-07-07 22:09:21] [Rank 0] step:6441/10000 train_time:521853ms step_avg:81.02ms +[2025-07-07 22:09:22] [Rank 0] step:6461/10000 train_time:523352ms step_avg:81.00ms +[2025-07-07 22:09:22] [Rank 0] step:6461/10000 train_time:523352ms step_avg:81.00ms +[2025-07-07 22:09:24] [Rank 0] step:6481/10000 train_time:525106ms step_avg:81.02ms +[2025-07-07 22:09:24] [Rank 0] step:6481/10000 train_time:525106ms step_avg:81.02ms +[2025-07-07 22:09:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:09:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:09:26] [Rank 0] PRINT: step:6500/10000 train_loss:0.8653 val_loss:0.8653 train_time:526585ms step_avg:81.01ms +[2025-07-07 22:09:26] [Rank 0] PRINT: step:6500/10000 train_loss:0.8653 val_loss:0.8653 train_time:526585ms step_avg:81.01ms +[2025-07-07 22:09:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:09:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:09:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:09:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:09:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:09:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:14:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:14:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:14:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:14:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:14:48] [Rank 0] Total Loss: 5.4350 +[2025-07-07 22:14:48] [Rank 0] Total Loss: 5.4350 +[2025-07-07 22:14:48] [Rank 0] Total FTA: 0.9217 +[2025-07-07 22:14:48] [Rank 0] Total FTA: 0.9217 +[2025-07-07 22:14:48] [Rank 0] Group 0 Loss: 5.3133 +[2025-07-07 22:14:48] [Rank 0] Group 0 Loss: 5.3133 +[2025-07-07 22:14:48] [Rank 0] Group 1 Loss: 5.5296 +[2025-07-07 22:14:48] [Rank 0] Group 1 Loss: 5.5296 +[2025-07-07 22:14:48] [Rank 0] Group 2 Loss: 5.4345 +[2025-07-07 22:14:48] [Rank 0] Group 2 Loss: 5.4345 +[2025-07-07 22:14:48] [Rank 0] Group 3 Loss: 5.3583 +[2025-07-07 22:14:48] [Rank 0] Group 3 Loss: 5.3583 +[2025-07-07 22:14:48] [Rank 0] Group 4 Loss: 5.4161 +[2025-07-07 22:14:48] [Rank 0] Group 4 Loss: 5.4161 +[2025-07-07 22:14:48] [Rank 0] Group 5 Loss: 5.5263 +[2025-07-07 22:14:48] [Rank 0] Group 5 Loss: 5.5263 +[2025-07-07 22:14:48] [Rank 0] Group 6 Loss: 5.3741 +[2025-07-07 22:14:48] [Rank 0] Group 6 Loss: 5.3741 +[2025-07-07 22:14:48] [Rank 0] Group 7 Loss: 5.5285 +[2025-07-07 22:14:48] [Rank 0] Group 7 Loss: 5.5285 +[2025-07-07 22:14:48] [Rank 0] Group 8 Loss: 5.4446 +[2025-07-07 22:14:48] [Rank 0] Group 8 Loss: 5.4446 +[2025-07-07 22:14:48] [Rank 0] Group 9 Loss: 5.4600 +[2025-07-07 22:14:48] [Rank 0] Group 9 Loss: 5.4600 +[2025-07-07 22:14:48] [Rank 0] Group 10 Loss: 5.4317 +[2025-07-07 22:14:48] [Rank 0] Group 10 Loss: 5.4317 +[2025-07-07 22:14:48] [Rank 0] Group 11 Loss: 5.4722 +[2025-07-07 22:14:48] [Rank 0] Group 11 Loss: 5.4722 +[2025-07-07 22:14:48] [Rank 0] Group 0 FTA: 0.8440 +[2025-07-07 22:14:48] [Rank 0] Group 0 FTA: 0.8440 +[2025-07-07 22:14:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 22:14:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 22:14:48] [Rank 0] Group 2 FTA: 0.8698 +[2025-07-07 22:14:48] [Rank 0] Group 2 FTA: 0.8698 +[2025-07-07 22:14:48] [Rank 0] Group 3 FTA: 0.9557 +[2025-07-07 22:14:48] [Rank 0] Group 3 FTA: 0.9557 +[2025-07-07 22:14:48] [Rank 0] Group 4 FTA: 0.9740 +[2025-07-07 22:14:48] [Rank 0] Group 4 FTA: 0.9740 +[2025-07-07 22:14:48] [Rank 0] Group 5 FTA: 0.9349 +[2025-07-07 22:14:48] [Rank 0] Group 5 FTA: 0.9349 +[2025-07-07 22:14:48] [Rank 0] Group 6 FTA: 0.8958 +[2025-07-07 22:14:48] [Rank 0] Group 6 FTA: 0.8958 +[2025-07-07 22:14:48] [Rank 0] Group 7 FTA: 0.9115 +[2025-07-07 22:14:48] [Rank 0] Group 7 FTA: 0.9115 +[2025-07-07 22:14:48] [Rank 0] Group 8 FTA: 0.9115 +[2025-07-07 22:14:48] [Rank 0] Group 8 FTA: 0.9115 +[2025-07-07 22:14:48] [Rank 0] Group 9 FTA: 0.9570 +[2025-07-07 22:14:48] [Rank 0] Group 9 FTA: 0.9570 +[2025-07-07 22:14:48] [Rank 0] Group 10 FTA: 0.9316 +[2025-07-07 22:14:48] [Rank 0] Group 10 FTA: 0.9316 +[2025-07-07 22:14:48] [Rank 0] Group 11 FTA: 0.9365 +[2025-07-07 22:14:48] [Rank 0] Group 11 FTA: 0.9365 +[2025-07-07 22:14:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 22:14:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 22:14:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 22:14:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 22:14:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 22:14:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 22:14:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 22:14:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 22:14:50] [Rank 0] step:6501/10000 train_time:526606ms step_avg:81.00ms +[2025-07-07 22:14:50] [Rank 0] step:6501/10000 train_time:526606ms step_avg:81.00ms +[2025-07-07 22:14:51] [Rank 0] step:6521/10000 train_time:528110ms step_avg:80.99ms +[2025-07-07 22:14:51] [Rank 0] step:6521/10000 train_time:528110ms step_avg:80.99ms +[2025-07-07 22:14:53] [Rank 0] step:6541/10000 train_time:529605ms step_avg:80.97ms +[2025-07-07 22:14:53] [Rank 0] step:6541/10000 train_time:529605ms step_avg:80.97ms +[2025-07-07 22:14:54] [Rank 0] step:6561/10000 train_time:531102ms step_avg:80.95ms +[2025-07-07 22:14:54] [Rank 0] step:6561/10000 train_time:531102ms step_avg:80.95ms +[2025-07-07 22:14:56] [Rank 0] step:6581/10000 train_time:532833ms step_avg:80.97ms +[2025-07-07 22:14:56] [Rank 0] step:6581/10000 train_time:532833ms step_avg:80.97ms +[2025-07-07 22:14:57] [Rank 0] step:6601/10000 train_time:534325ms step_avg:80.95ms +[2025-07-07 22:14:57] [Rank 0] step:6601/10000 train_time:534325ms step_avg:80.95ms +[2025-07-07 22:14:59] [Rank 0] step:6621/10000 train_time:535820ms step_avg:80.93ms +[2025-07-07 22:14:59] [Rank 0] step:6621/10000 train_time:535820ms step_avg:80.93ms +[2025-07-07 22:15:00] [Rank 0] step:6641/10000 train_time:537316ms step_avg:80.91ms +[2025-07-07 22:15:00] [Rank 0] step:6641/10000 train_time:537316ms step_avg:80.91ms +[2025-07-07 22:15:03] [Rank 0] step:6661/10000 train_time:539478ms step_avg:80.99ms +[2025-07-07 22:15:03] [Rank 0] step:6661/10000 train_time:539478ms step_avg:80.99ms +[2025-07-07 22:15:04] [Rank 0] step:6681/10000 train_time:540956ms step_avg:80.97ms +[2025-07-07 22:15:04] [Rank 0] step:6681/10000 train_time:540956ms step_avg:80.97ms +[2025-07-07 22:15:05] [Rank 0] step:6701/10000 train_time:542451ms step_avg:80.95ms +[2025-07-07 22:15:05] [Rank 0] step:6701/10000 train_time:542451ms step_avg:80.95ms +[2025-07-07 22:15:07] [Rank 0] step:6721/10000 train_time:543948ms step_avg:80.93ms +[2025-07-07 22:15:07] [Rank 0] step:6721/10000 train_time:543948ms step_avg:80.93ms +[2025-07-07 22:15:08] [Rank 0] step:6741/10000 train_time:545445ms step_avg:80.91ms +[2025-07-07 22:15:08] [Rank 0] step:6741/10000 train_time:545445ms step_avg:80.91ms +[2025-07-07 22:15:10] [Rank 0] step:6761/10000 train_time:547283ms step_avg:80.95ms +[2025-07-07 22:15:10] [Rank 0] step:6761/10000 train_time:547283ms step_avg:80.95ms +[2025-07-07 22:15:12] [Rank 0] step:6781/10000 train_time:548780ms step_avg:80.93ms +[2025-07-07 22:15:12] [Rank 0] step:6781/10000 train_time:548780ms step_avg:80.93ms +[2025-07-07 22:15:13] [Rank 0] step:6801/10000 train_time:550276ms step_avg:80.91ms +[2025-07-07 22:15:13] [Rank 0] step:6801/10000 train_time:550276ms step_avg:80.91ms +[2025-07-07 22:15:15] [Rank 0] step:6821/10000 train_time:551774ms step_avg:80.89ms +[2025-07-07 22:15:15] [Rank 0] step:6821/10000 train_time:551774ms step_avg:80.89ms +[2025-07-07 22:15:17] [Rank 0] step:6841/10000 train_time:553566ms step_avg:80.92ms +[2025-07-07 22:15:17] [Rank 0] step:6841/10000 train_time:553566ms step_avg:80.92ms +[2025-07-07 22:15:19] [Rank 0] step:6861/10000 train_time:555675ms step_avg:80.99ms +[2025-07-07 22:15:19] [Rank 0] step:6861/10000 train_time:555675ms step_avg:80.99ms +[2025-07-07 22:15:20] [Rank 0] step:6881/10000 train_time:557174ms step_avg:80.97ms +[2025-07-07 22:15:20] [Rank 0] step:6881/10000 train_time:557174ms step_avg:80.97ms +[2025-07-07 22:15:22] [Rank 0] step:6901/10000 train_time:558675ms step_avg:80.96ms +[2025-07-07 22:15:22] [Rank 0] step:6901/10000 train_time:558675ms step_avg:80.96ms +[2025-07-07 22:15:23] [Rank 0] step:6921/10000 train_time:560172ms step_avg:80.94ms +[2025-07-07 22:15:23] [Rank 0] step:6921/10000 train_time:560172ms step_avg:80.94ms +[2025-07-07 22:15:25] [Rank 0] step:6941/10000 train_time:561906ms step_avg:80.95ms +[2025-07-07 22:15:25] [Rank 0] step:6941/10000 train_time:561906ms step_avg:80.95ms +[2025-07-07 22:15:26] [Rank 0] step:6961/10000 train_time:563405ms step_avg:80.94ms +[2025-07-07 22:15:26] [Rank 0] step:6961/10000 train_time:563405ms step_avg:80.94ms +[2025-07-07 22:15:28] [Rank 0] step:6981/10000 train_time:564909ms step_avg:80.92ms +[2025-07-07 22:15:28] [Rank 0] step:6981/10000 train_time:564909ms step_avg:80.92ms +[2025-07-07 22:15:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:15:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:15:30] [Rank 0] PRINT: step:7000/10000 train_loss:0.8639 val_loss:0.8641 train_time:566410ms step_avg:80.92ms +[2025-07-07 22:15:30] [Rank 0] PRINT: step:7000/10000 train_loss:0.8639 val_loss:0.8641 train_time:566410ms step_avg:80.92ms +[2025-07-07 22:15:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:15:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:15:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:15:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:15:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:15:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:20:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:20:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:20:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:20:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:20:53] [Rank 0] Total Loss: 5.4344 +[2025-07-07 22:20:53] [Rank 0] Total Loss: 5.4344 +[2025-07-07 22:20:53] [Rank 0] Total FTA: 0.9265 +[2025-07-07 22:20:53] [Rank 0] Total FTA: 0.9265 +[2025-07-07 22:20:53] [Rank 0] Group 0 Loss: 5.4686 +[2025-07-07 22:20:53] [Rank 0] Group 0 Loss: 5.4686 +[2025-07-07 22:20:53] [Rank 0] Group 1 Loss: 5.4357 +[2025-07-07 22:20:53] [Rank 0] Group 1 Loss: 5.4357 +[2025-07-07 22:20:53] [Rank 0] Group 2 Loss: 5.4913 +[2025-07-07 22:20:53] [Rank 0] Group 2 Loss: 5.4913 +[2025-07-07 22:20:53] [Rank 0] Group 3 Loss: 5.3430 +[2025-07-07 22:20:53] [Rank 0] Group 3 Loss: 5.3430 +[2025-07-07 22:20:53] [Rank 0] Group 4 Loss: 5.4118 +[2025-07-07 22:20:53] [Rank 0] Group 4 Loss: 5.4118 +[2025-07-07 22:20:53] [Rank 0] Group 5 Loss: 5.3832 +[2025-07-07 22:20:53] [Rank 0] Group 5 Loss: 5.3832 +[2025-07-07 22:20:53] [Rank 0] Group 6 Loss: 5.3522 +[2025-07-07 22:20:53] [Rank 0] Group 6 Loss: 5.3522 +[2025-07-07 22:20:53] [Rank 0] Group 7 Loss: 5.4880 +[2025-07-07 22:20:53] [Rank 0] Group 7 Loss: 5.4880 +[2025-07-07 22:20:53] [Rank 0] Group 8 Loss: 5.4538 +[2025-07-07 22:20:53] [Rank 0] Group 8 Loss: 5.4538 +[2025-07-07 22:20:53] [Rank 0] Group 9 Loss: 5.4176 +[2025-07-07 22:20:53] [Rank 0] Group 9 Loss: 5.4176 +[2025-07-07 22:20:53] [Rank 0] Group 10 Loss: 5.4358 +[2025-07-07 22:20:53] [Rank 0] Group 10 Loss: 5.4358 +[2025-07-07 22:20:53] [Rank 0] Group 11 Loss: 5.4555 +[2025-07-07 22:20:53] [Rank 0] Group 11 Loss: 5.4555 +[2025-07-07 22:20:53] [Rank 0] Group 0 FTA: 0.6905 +[2025-07-07 22:20:53] [Rank 0] Group 0 FTA: 0.6905 +[2025-07-07 22:20:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 22:20:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 22:20:53] [Rank 0] Group 2 FTA: 0.9349 +[2025-07-07 22:20:53] [Rank 0] Group 2 FTA: 0.9349 +[2025-07-07 22:20:53] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 22:20:53] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 22:20:53] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 22:20:53] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 22:20:53] [Rank 0] Group 5 FTA: 0.9688 +[2025-07-07 22:20:53] [Rank 0] Group 5 FTA: 0.9688 +[2025-07-07 22:20:53] [Rank 0] Group 6 FTA: 0.9427 +[2025-07-07 22:20:53] [Rank 0] Group 6 FTA: 0.9427 +[2025-07-07 22:20:53] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-07 22:20:53] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-07 22:20:53] [Rank 0] Group 8 FTA: 0.9688 +[2025-07-07 22:20:53] [Rank 0] Group 8 FTA: 0.9688 +[2025-07-07 22:20:53] [Rank 0] Group 9 FTA: 0.9609 +[2025-07-07 22:20:53] [Rank 0] Group 9 FTA: 0.9609 +[2025-07-07 22:20:53] [Rank 0] Group 10 FTA: 0.9551 +[2025-07-07 22:20:53] [Rank 0] Group 10 FTA: 0.9551 +[2025-07-07 22:20:53] [Rank 0] Group 11 FTA: 0.9521 +[2025-07-07 22:20:53] [Rank 0] Group 11 FTA: 0.9521 +[2025-07-07 22:20:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 22:20:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 22:20:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 22:20:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 22:20:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 22:20:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 22:20:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 22:20:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 22:20:55] [Rank 0] step:7001/10000 train_time:566430ms step_avg:80.91ms +[2025-07-07 22:20:55] [Rank 0] step:7001/10000 train_time:566430ms step_avg:80.91ms +[2025-07-07 22:20:57] [Rank 0] step:7021/10000 train_time:568581ms step_avg:80.98ms +[2025-07-07 22:20:57] [Rank 0] step:7021/10000 train_time:568581ms step_avg:80.98ms +[2025-07-07 22:20:58] [Rank 0] step:7041/10000 train_time:570058ms step_avg:80.96ms +[2025-07-07 22:20:58] [Rank 0] step:7041/10000 train_time:570058ms step_avg:80.96ms +[2025-07-07 22:21:00] [Rank 0] step:7061/10000 train_time:571549ms step_avg:80.94ms +[2025-07-07 22:21:00] [Rank 0] step:7061/10000 train_time:571549ms step_avg:80.94ms +[2025-07-07 22:21:01] [Rank 0] step:7081/10000 train_time:573044ms step_avg:80.93ms +[2025-07-07 22:21:01] [Rank 0] step:7081/10000 train_time:573044ms step_avg:80.93ms +[2025-07-07 22:21:03] [Rank 0] step:7101/10000 train_time:574538ms step_avg:80.91ms +[2025-07-07 22:21:03] [Rank 0] step:7101/10000 train_time:574538ms step_avg:80.91ms +[2025-07-07 22:21:05] [Rank 0] step:7121/10000 train_time:576269ms step_avg:80.93ms +[2025-07-07 22:21:05] [Rank 0] step:7121/10000 train_time:576269ms step_avg:80.93ms +[2025-07-07 22:21:06] [Rank 0] step:7141/10000 train_time:577764ms step_avg:80.91ms +[2025-07-07 22:21:06] [Rank 0] step:7141/10000 train_time:577764ms step_avg:80.91ms +[2025-07-07 22:21:08] [Rank 0] step:7161/10000 train_time:579260ms step_avg:80.89ms +[2025-07-07 22:21:08] [Rank 0] step:7161/10000 train_time:579260ms step_avg:80.89ms +[2025-07-07 22:21:09] [Rank 0] step:7181/10000 train_time:580758ms step_avg:80.87ms +[2025-07-07 22:21:09] [Rank 0] step:7181/10000 train_time:580758ms step_avg:80.87ms +[2025-07-07 22:21:11] [Rank 0] step:7201/10000 train_time:582917ms step_avg:80.95ms +[2025-07-07 22:21:11] [Rank 0] step:7201/10000 train_time:582917ms step_avg:80.95ms +[2025-07-07 22:21:13] [Rank 0] step:7221/10000 train_time:584394ms step_avg:80.93ms +[2025-07-07 22:21:13] [Rank 0] step:7221/10000 train_time:584394ms step_avg:80.93ms +[2025-07-07 22:21:14] [Rank 0] step:7241/10000 train_time:585892ms step_avg:80.91ms +[2025-07-07 22:21:14] [Rank 0] step:7241/10000 train_time:585892ms step_avg:80.91ms +[2025-07-07 22:21:16] [Rank 0] step:7261/10000 train_time:587391ms step_avg:80.90ms +[2025-07-07 22:21:16] [Rank 0] step:7261/10000 train_time:587391ms step_avg:80.90ms +[2025-07-07 22:21:17] [Rank 0] step:7281/10000 train_time:588887ms step_avg:80.88ms +[2025-07-07 22:21:17] [Rank 0] step:7281/10000 train_time:588887ms step_avg:80.88ms +[2025-07-07 22:21:19] [Rank 0] step:7301/10000 train_time:591035ms step_avg:80.95ms +[2025-07-07 22:21:19] [Rank 0] step:7301/10000 train_time:591035ms step_avg:80.95ms +[2025-07-07 22:21:21] [Rank 0] step:7321/10000 train_time:592532ms step_avg:80.94ms +[2025-07-07 22:21:21] [Rank 0] step:7321/10000 train_time:592532ms step_avg:80.94ms +[2025-07-07 22:21:22] [Rank 0] step:7341/10000 train_time:594030ms step_avg:80.92ms +[2025-07-07 22:21:22] [Rank 0] step:7341/10000 train_time:594030ms step_avg:80.92ms +[2025-07-07 22:21:24] [Rank 0] step:7361/10000 train_time:595529ms step_avg:80.90ms +[2025-07-07 22:21:24] [Rank 0] step:7361/10000 train_time:595529ms step_avg:80.90ms +[2025-07-07 22:21:26] [Rank 0] step:7381/10000 train_time:597700ms step_avg:80.98ms +[2025-07-07 22:21:26] [Rank 0] step:7381/10000 train_time:597700ms step_avg:80.98ms +[2025-07-07 22:21:27] [Rank 0] step:7401/10000 train_time:599176ms step_avg:80.96ms +[2025-07-07 22:21:27] [Rank 0] step:7401/10000 train_time:599176ms step_avg:80.96ms +[2025-07-07 22:21:29] [Rank 0] step:7421/10000 train_time:600677ms step_avg:80.94ms +[2025-07-07 22:21:29] [Rank 0] step:7421/10000 train_time:600677ms step_avg:80.94ms +[2025-07-07 22:21:30] [Rank 0] step:7441/10000 train_time:602177ms step_avg:80.93ms +[2025-07-07 22:21:30] [Rank 0] step:7441/10000 train_time:602177ms step_avg:80.93ms +[2025-07-07 22:21:32] [Rank 0] step:7461/10000 train_time:603675ms step_avg:80.91ms +[2025-07-07 22:21:32] [Rank 0] step:7461/10000 train_time:603675ms step_avg:80.91ms +[2025-07-07 22:21:34] [Rank 0] step:7481/10000 train_time:605817ms step_avg:80.98ms +[2025-07-07 22:21:34] [Rank 0] step:7481/10000 train_time:605817ms step_avg:80.98ms +[2025-07-07 22:21:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:21:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:21:36] [Rank 0] PRINT: step:7500/10000 train_loss:0.8625 val_loss:0.8631 train_time:607559ms step_avg:81.01ms +[2025-07-07 22:21:36] [Rank 0] PRINT: step:7500/10000 train_loss:0.8625 val_loss:0.8631 train_time:607559ms step_avg:81.01ms +[2025-07-07 22:21:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:21:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:21:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:21:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:21:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:21:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:27:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:27:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:27:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:27:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:27:02] [Rank 0] Total Loss: 5.3903 +[2025-07-07 22:27:02] [Rank 0] Total Loss: 5.3903 +[2025-07-07 22:27:02] [Rank 0] Total FTA: 0.9178 +[2025-07-07 22:27:02] [Rank 0] Total FTA: 0.9178 +[2025-07-07 22:27:02] [Rank 0] Group 0 Loss: 5.4310 +[2025-07-07 22:27:02] [Rank 0] Group 0 Loss: 5.4310 +[2025-07-07 22:27:02] [Rank 0] Group 1 Loss: 5.3149 +[2025-07-07 22:27:02] [Rank 0] Group 1 Loss: 5.3149 +[2025-07-07 22:27:02] [Rank 0] Group 2 Loss: 5.4201 +[2025-07-07 22:27:02] [Rank 0] Group 2 Loss: 5.4201 +[2025-07-07 22:27:02] [Rank 0] Group 3 Loss: 5.2576 +[2025-07-07 22:27:02] [Rank 0] Group 3 Loss: 5.2576 +[2025-07-07 22:27:02] [Rank 0] Group 4 Loss: 5.3916 +[2025-07-07 22:27:02] [Rank 0] Group 4 Loss: 5.3916 +[2025-07-07 22:27:02] [Rank 0] Group 5 Loss: 5.3571 +[2025-07-07 22:27:02] [Rank 0] Group 5 Loss: 5.3571 +[2025-07-07 22:27:02] [Rank 0] Group 6 Loss: 5.3263 +[2025-07-07 22:27:02] [Rank 0] Group 6 Loss: 5.3263 +[2025-07-07 22:27:02] [Rank 0] Group 7 Loss: 5.4339 +[2025-07-07 22:27:02] [Rank 0] Group 7 Loss: 5.4339 +[2025-07-07 22:27:02] [Rank 0] Group 8 Loss: 5.4655 +[2025-07-07 22:27:02] [Rank 0] Group 8 Loss: 5.4655 +[2025-07-07 22:27:02] [Rank 0] Group 9 Loss: 5.3774 +[2025-07-07 22:27:02] [Rank 0] Group 9 Loss: 5.3774 +[2025-07-07 22:27:02] [Rank 0] Group 10 Loss: 5.4140 +[2025-07-07 22:27:02] [Rank 0] Group 10 Loss: 5.4140 +[2025-07-07 22:27:02] [Rank 0] Group 11 Loss: 5.4094 +[2025-07-07 22:27:02] [Rank 0] Group 11 Loss: 5.4094 +[2025-07-07 22:27:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 22:27:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 22:27:02] [Rank 0] Group 1 FTA: 0.8620 +[2025-07-07 22:27:02] [Rank 0] Group 1 FTA: 0.8620 +[2025-07-07 22:27:02] [Rank 0] Group 2 FTA: 0.5677 +[2025-07-07 22:27:02] [Rank 0] Group 2 FTA: 0.5677 +[2025-07-07 22:27:02] [Rank 0] Group 3 FTA: 0.9349 +[2025-07-07 22:27:02] [Rank 0] Group 3 FTA: 0.9349 +[2025-07-07 22:27:02] [Rank 0] Group 4 FTA: 0.8984 +[2025-07-07 22:27:02] [Rank 0] Group 4 FTA: 0.8984 +[2025-07-07 22:27:02] [Rank 0] Group 5 FTA: 0.9375 +[2025-07-07 22:27:02] [Rank 0] Group 5 FTA: 0.9375 +[2025-07-07 22:27:02] [Rank 0] Group 6 FTA: 0.9062 +[2025-07-07 22:27:02] [Rank 0] Group 6 FTA: 0.9062 +[2025-07-07 22:27:02] [Rank 0] Group 7 FTA: 0.9479 +[2025-07-07 22:27:02] [Rank 0] Group 7 FTA: 0.9479 +[2025-07-07 22:27:02] [Rank 0] Group 8 FTA: 0.9740 +[2025-07-07 22:27:02] [Rank 0] Group 8 FTA: 0.9740 +[2025-07-07 22:27:02] [Rank 0] Group 9 FTA: 0.9570 +[2025-07-07 22:27:02] [Rank 0] Group 9 FTA: 0.9570 +[2025-07-07 22:27:02] [Rank 0] Group 10 FTA: 0.9453 +[2025-07-07 22:27:02] [Rank 0] Group 10 FTA: 0.9453 +[2025-07-07 22:27:02] [Rank 0] Group 11 FTA: 0.9502 +[2025-07-07 22:27:02] [Rank 0] Group 11 FTA: 0.9502 +[2025-07-07 22:27:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 22:27:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 22:27:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 22:27:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 22:27:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 22:27:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 22:27:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 22:27:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 22:27:03] [Rank 0] step:7501/10000 train_time:607580ms step_avg:81.00ms +[2025-07-07 22:27:03] [Rank 0] step:7501/10000 train_time:607580ms step_avg:81.00ms +[2025-07-07 22:27:05] [Rank 0] step:7521/10000 train_time:609078ms step_avg:80.98ms +[2025-07-07 22:27:05] [Rank 0] step:7521/10000 train_time:609078ms step_avg:80.98ms +[2025-07-07 22:27:06] [Rank 0] step:7541/10000 train_time:610574ms step_avg:80.97ms +[2025-07-07 22:27:06] [Rank 0] step:7541/10000 train_time:610574ms step_avg:80.97ms +[2025-07-07 22:27:08] [Rank 0] step:7561/10000 train_time:612120ms step_avg:80.96ms +[2025-07-07 22:27:08] [Rank 0] step:7561/10000 train_time:612120ms step_avg:80.96ms +[2025-07-07 22:27:10] [Rank 0] step:7581/10000 train_time:614225ms step_avg:81.02ms +[2025-07-07 22:27:10] [Rank 0] step:7581/10000 train_time:614225ms step_avg:81.02ms +[2025-07-07 22:27:11] [Rank 0] step:7601/10000 train_time:615717ms step_avg:81.00ms +[2025-07-07 22:27:11] [Rank 0] step:7601/10000 train_time:615717ms step_avg:81.00ms +[2025-07-07 22:27:13] [Rank 0] step:7621/10000 train_time:617211ms step_avg:80.99ms +[2025-07-07 22:27:13] [Rank 0] step:7621/10000 train_time:617211ms step_avg:80.99ms +[2025-07-07 22:27:14] [Rank 0] step:7641/10000 train_time:618707ms step_avg:80.97ms +[2025-07-07 22:27:14] [Rank 0] step:7641/10000 train_time:618707ms step_avg:80.97ms +[2025-07-07 22:27:17] [Rank 0] step:7661/10000 train_time:620865ms step_avg:81.04ms +[2025-07-07 22:27:17] [Rank 0] step:7661/10000 train_time:620865ms step_avg:81.04ms +[2025-07-07 22:27:18] [Rank 0] step:7681/10000 train_time:622360ms step_avg:81.03ms +[2025-07-07 22:27:18] [Rank 0] step:7681/10000 train_time:622360ms step_avg:81.03ms +[2025-07-07 22:27:20] [Rank 0] step:7701/10000 train_time:623856ms step_avg:81.01ms +[2025-07-07 22:27:20] [Rank 0] step:7701/10000 train_time:623856ms step_avg:81.01ms +[2025-07-07 22:27:21] [Rank 0] step:7721/10000 train_time:625353ms step_avg:80.99ms +[2025-07-07 22:27:21] [Rank 0] step:7721/10000 train_time:625353ms step_avg:80.99ms +[2025-07-07 22:27:23] [Rank 0] step:7741/10000 train_time:626900ms step_avg:80.98ms +[2025-07-07 22:27:23] [Rank 0] step:7741/10000 train_time:626900ms step_avg:80.98ms +[2025-07-07 22:27:24] [Rank 0] step:7761/10000 train_time:628581ms step_avg:80.99ms +[2025-07-07 22:27:24] [Rank 0] step:7761/10000 train_time:628581ms step_avg:80.99ms +[2025-07-07 22:27:26] [Rank 0] step:7781/10000 train_time:630080ms step_avg:80.98ms +[2025-07-07 22:27:26] [Rank 0] step:7781/10000 train_time:630080ms step_avg:80.98ms +[2025-07-07 22:27:27] [Rank 0] step:7801/10000 train_time:631580ms step_avg:80.96ms +[2025-07-07 22:27:27] [Rank 0] step:7801/10000 train_time:631580ms step_avg:80.96ms +[2025-07-07 22:27:29] [Rank 0] step:7821/10000 train_time:633078ms step_avg:80.95ms +[2025-07-07 22:27:29] [Rank 0] step:7821/10000 train_time:633078ms step_avg:80.95ms +[2025-07-07 22:27:31] [Rank 0] step:7841/10000 train_time:635237ms step_avg:81.01ms +[2025-07-07 22:27:31] [Rank 0] step:7841/10000 train_time:635237ms step_avg:81.01ms +[2025-07-07 22:27:32] [Rank 0] step:7861/10000 train_time:636733ms step_avg:81.00ms +[2025-07-07 22:27:32] [Rank 0] step:7861/10000 train_time:636733ms step_avg:81.00ms +[2025-07-07 22:27:34] [Rank 0] step:7881/10000 train_time:638232ms step_avg:80.98ms +[2025-07-07 22:27:34] [Rank 0] step:7881/10000 train_time:638232ms step_avg:80.98ms +[2025-07-07 22:27:35] [Rank 0] step:7901/10000 train_time:639733ms step_avg:80.97ms +[2025-07-07 22:27:35] [Rank 0] step:7901/10000 train_time:639733ms step_avg:80.97ms +[2025-07-07 22:27:37] [Rank 0] step:7921/10000 train_time:641285ms step_avg:80.96ms +[2025-07-07 22:27:37] [Rank 0] step:7921/10000 train_time:641285ms step_avg:80.96ms +[2025-07-07 22:27:38] [Rank 0] step:7941/10000 train_time:642768ms step_avg:80.94ms +[2025-07-07 22:27:38] [Rank 0] step:7941/10000 train_time:642768ms step_avg:80.94ms +[2025-07-07 22:27:40] [Rank 0] step:7961/10000 train_time:644267ms step_avg:80.93ms +[2025-07-07 22:27:40] [Rank 0] step:7961/10000 train_time:644267ms step_avg:80.93ms +[2025-07-07 22:27:41] [Rank 0] step:7981/10000 train_time:645768ms step_avg:80.91ms +[2025-07-07 22:27:41] [Rank 0] step:7981/10000 train_time:645768ms step_avg:80.91ms +[2025-07-07 22:27:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:27:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:27:44] [Rank 0] PRINT: step:8000/10000 train_loss:0.8614 val_loss:0.8625 train_time:647268ms step_avg:80.91ms +[2025-07-07 22:27:44] [Rank 0] PRINT: step:8000/10000 train_loss:0.8614 val_loss:0.8625 train_time:647268ms step_avg:80.91ms +[2025-07-07 22:27:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:27:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:27:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:27:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:27:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:27:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:33:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:33:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:33:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:33:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:33:09] [Rank 0] Total Loss: 5.4601 +[2025-07-07 22:33:09] [Rank 0] Total Loss: 5.4601 +[2025-07-07 22:33:09] [Rank 0] Total FTA: 0.9194 +[2025-07-07 22:33:09] [Rank 0] Total FTA: 0.9194 +[2025-07-07 22:33:09] [Rank 0] Group 0 Loss: 5.4616 +[2025-07-07 22:33:09] [Rank 0] Group 0 Loss: 5.4616 +[2025-07-07 22:33:09] [Rank 0] Group 1 Loss: 5.5193 +[2025-07-07 22:33:09] [Rank 0] Group 1 Loss: 5.5193 +[2025-07-07 22:33:09] [Rank 0] Group 2 Loss: 5.5050 +[2025-07-07 22:33:09] [Rank 0] Group 2 Loss: 5.5050 +[2025-07-07 22:33:09] [Rank 0] Group 3 Loss: 5.3314 +[2025-07-07 22:33:09] [Rank 0] Group 3 Loss: 5.3314 +[2025-07-07 22:33:09] [Rank 0] Group 4 Loss: 5.4655 +[2025-07-07 22:33:09] [Rank 0] Group 4 Loss: 5.4655 +[2025-07-07 22:33:09] [Rank 0] Group 5 Loss: 5.3784 +[2025-07-07 22:33:09] [Rank 0] Group 5 Loss: 5.3784 +[2025-07-07 22:33:09] [Rank 0] Group 6 Loss: 5.4229 +[2025-07-07 22:33:09] [Rank 0] Group 6 Loss: 5.4229 +[2025-07-07 22:33:09] [Rank 0] Group 7 Loss: 5.4967 +[2025-07-07 22:33:09] [Rank 0] Group 7 Loss: 5.4967 +[2025-07-07 22:33:09] [Rank 0] Group 8 Loss: 5.5188 +[2025-07-07 22:33:09] [Rank 0] Group 8 Loss: 5.5188 +[2025-07-07 22:33:09] [Rank 0] Group 9 Loss: 5.4035 +[2025-07-07 22:33:09] [Rank 0] Group 9 Loss: 5.4035 +[2025-07-07 22:33:09] [Rank 0] Group 10 Loss: 5.4740 +[2025-07-07 22:33:09] [Rank 0] Group 10 Loss: 5.4740 +[2025-07-07 22:33:09] [Rank 0] Group 11 Loss: 5.4822 +[2025-07-07 22:33:09] [Rank 0] Group 11 Loss: 5.4822 +[2025-07-07 22:33:09] [Rank 0] Group 0 FTA: 0.8257 +[2025-07-07 22:33:09] [Rank 0] Group 0 FTA: 0.8257 +[2025-07-07 22:33:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 22:33:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 22:33:09] [Rank 0] Group 2 FTA: 0.7734 +[2025-07-07 22:33:09] [Rank 0] Group 2 FTA: 0.7734 +[2025-07-07 22:33:09] [Rank 0] Group 3 FTA: 0.9609 +[2025-07-07 22:33:09] [Rank 0] Group 3 FTA: 0.9609 +[2025-07-07 22:33:09] [Rank 0] Group 4 FTA: 0.9089 +[2025-07-07 22:33:09] [Rank 0] Group 4 FTA: 0.9089 +[2025-07-07 22:33:09] [Rank 0] Group 5 FTA: 0.9115 +[2025-07-07 22:33:09] [Rank 0] Group 5 FTA: 0.9115 +[2025-07-07 22:33:09] [Rank 0] Group 6 FTA: 0.9453 +[2025-07-07 22:33:09] [Rank 0] Group 6 FTA: 0.9453 +[2025-07-07 22:33:09] [Rank 0] Group 7 FTA: 0.9505 +[2025-07-07 22:33:09] [Rank 0] Group 7 FTA: 0.9505 +[2025-07-07 22:33:09] [Rank 0] Group 8 FTA: 0.9375 +[2025-07-07 22:33:09] [Rank 0] Group 8 FTA: 0.9375 +[2025-07-07 22:33:09] [Rank 0] Group 9 FTA: 0.9336 +[2025-07-07 22:33:09] [Rank 0] Group 9 FTA: 0.9336 +[2025-07-07 22:33:09] [Rank 0] Group 10 FTA: 0.9453 +[2025-07-07 22:33:09] [Rank 0] Group 10 FTA: 0.9453 +[2025-07-07 22:33:09] [Rank 0] Group 11 FTA: 0.9609 +[2025-07-07 22:33:09] [Rank 0] Group 11 FTA: 0.9609 +[2025-07-07 22:33:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 22:33:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 22:33:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 22:33:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 22:33:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 22:33:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 22:33:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 22:33:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 22:33:10] [Rank 0] step:8001/10000 train_time:647289ms step_avg:80.90ms +[2025-07-07 22:33:10] [Rank 0] step:8001/10000 train_time:647289ms step_avg:80.90ms +[2025-07-07 22:33:12] [Rank 0] step:8021/10000 train_time:649005ms step_avg:80.91ms +[2025-07-07 22:33:12] [Rank 0] step:8021/10000 train_time:649005ms step_avg:80.91ms +[2025-07-07 22:33:13] [Rank 0] step:8041/10000 train_time:650496ms step_avg:80.90ms +[2025-07-07 22:33:13] [Rank 0] step:8041/10000 train_time:650496ms step_avg:80.90ms +[2025-07-07 22:33:15] [Rank 0] step:8061/10000 train_time:651991ms step_avg:80.88ms +[2025-07-07 22:33:15] [Rank 0] step:8061/10000 train_time:651991ms step_avg:80.88ms +[2025-07-07 22:33:16] [Rank 0] step:8081/10000 train_time:653484ms step_avg:80.87ms +[2025-07-07 22:33:16] [Rank 0] step:8081/10000 train_time:653484ms step_avg:80.87ms +[2025-07-07 22:33:19] [Rank 0] step:8101/10000 train_time:655036ms step_avg:80.86ms +[2025-07-07 22:33:19] [Rank 0] step:8101/10000 train_time:655036ms step_avg:80.86ms +[2025-07-07 22:33:20] [Rank 0] step:8121/10000 train_time:657119ms step_avg:80.92ms +[2025-07-07 22:33:20] [Rank 0] step:8121/10000 train_time:657119ms step_avg:80.92ms +[2025-07-07 22:33:22] [Rank 0] step:8141/10000 train_time:658612ms step_avg:80.90ms +[2025-07-07 22:33:22] [Rank 0] step:8141/10000 train_time:658612ms step_avg:80.90ms +[2025-07-07 22:33:23] [Rank 0] step:8161/10000 train_time:660107ms step_avg:80.89ms +[2025-07-07 22:33:23] [Rank 0] step:8161/10000 train_time:660107ms step_avg:80.89ms +[2025-07-07 22:33:25] [Rank 0] step:8181/10000 train_time:661602ms step_avg:80.87ms +[2025-07-07 22:33:25] [Rank 0] step:8181/10000 train_time:661602ms step_avg:80.87ms +[2025-07-07 22:33:27] [Rank 0] step:8201/10000 train_time:663766ms step_avg:80.94ms +[2025-07-07 22:33:27] [Rank 0] step:8201/10000 train_time:663766ms step_avg:80.94ms +[2025-07-07 22:33:28] [Rank 0] step:8221/10000 train_time:665260ms step_avg:80.92ms +[2025-07-07 22:33:28] [Rank 0] step:8221/10000 train_time:665260ms step_avg:80.92ms +[2025-07-07 22:33:30] [Rank 0] step:8241/10000 train_time:666757ms step_avg:80.91ms +[2025-07-07 22:33:30] [Rank 0] step:8241/10000 train_time:666757ms step_avg:80.91ms +[2025-07-07 22:33:31] [Rank 0] step:8261/10000 train_time:668253ms step_avg:80.89ms +[2025-07-07 22:33:31] [Rank 0] step:8261/10000 train_time:668253ms step_avg:80.89ms +[2025-07-07 22:33:33] [Rank 0] step:8281/10000 train_time:669801ms step_avg:80.88ms +[2025-07-07 22:33:33] [Rank 0] step:8281/10000 train_time:669801ms step_avg:80.88ms +[2025-07-07 22:33:34] [Rank 0] step:8301/10000 train_time:671484ms step_avg:80.89ms +[2025-07-07 22:33:34] [Rank 0] step:8301/10000 train_time:671484ms step_avg:80.89ms +[2025-07-07 22:33:36] [Rank 0] step:8321/10000 train_time:672983ms step_avg:80.88ms +[2025-07-07 22:33:36] [Rank 0] step:8321/10000 train_time:672983ms step_avg:80.88ms +[2025-07-07 22:33:37] [Rank 0] step:8341/10000 train_time:674480ms step_avg:80.86ms +[2025-07-07 22:33:37] [Rank 0] step:8341/10000 train_time:674480ms step_avg:80.86ms +[2025-07-07 22:33:39] [Rank 0] step:8361/10000 train_time:675978ms step_avg:80.85ms +[2025-07-07 22:33:39] [Rank 0] step:8361/10000 train_time:675978ms step_avg:80.85ms +[2025-07-07 22:33:41] [Rank 0] step:8381/10000 train_time:677714ms step_avg:80.86ms +[2025-07-07 22:33:41] [Rank 0] step:8381/10000 train_time:677714ms step_avg:80.86ms +[2025-07-07 22:33:42] [Rank 0] step:8401/10000 train_time:679213ms step_avg:80.85ms +[2025-07-07 22:33:42] [Rank 0] step:8401/10000 train_time:679213ms step_avg:80.85ms +[2025-07-07 22:33:44] [Rank 0] step:8421/10000 train_time:680710ms step_avg:80.83ms +[2025-07-07 22:33:44] [Rank 0] step:8421/10000 train_time:680710ms step_avg:80.83ms +[2025-07-07 22:33:45] [Rank 0] step:8441/10000 train_time:682209ms step_avg:80.82ms +[2025-07-07 22:33:45] [Rank 0] step:8441/10000 train_time:682209ms step_avg:80.82ms +[2025-07-07 22:33:47] [Rank 0] step:8461/10000 train_time:683964ms step_avg:80.84ms +[2025-07-07 22:33:47] [Rank 0] step:8461/10000 train_time:683964ms step_avg:80.84ms +[2025-07-07 22:33:49] [Rank 0] step:8481/10000 train_time:685860ms step_avg:80.87ms +[2025-07-07 22:33:49] [Rank 0] step:8481/10000 train_time:685860ms step_avg:80.87ms +[2025-07-07 22:33:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:33:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:33:51] [Rank 0] PRINT: step:8500/10000 train_loss:0.8604 val_loss:0.8620 train_time:687359ms step_avg:80.87ms +[2025-07-07 22:33:51] [Rank 0] PRINT: step:8500/10000 train_loss:0.8604 val_loss:0.8620 train_time:687359ms step_avg:80.87ms +[2025-07-07 22:33:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:33:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:33:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:33:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:33:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:33:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:39:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:39:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:39:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:39:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:39:20] [Rank 0] Total Loss: 5.4862 +[2025-07-07 22:39:20] [Rank 0] Total Loss: 5.4862 +[2025-07-07 22:39:20] [Rank 0] Total FTA: 0.9327 +[2025-07-07 22:39:20] [Rank 0] Total FTA: 0.9327 +[2025-07-07 22:39:20] [Rank 0] Group 0 Loss: 5.5013 +[2025-07-07 22:39:20] [Rank 0] Group 0 Loss: 5.5013 +[2025-07-07 22:39:20] [Rank 0] Group 1 Loss: 5.4567 +[2025-07-07 22:39:20] [Rank 0] Group 1 Loss: 5.4567 +[2025-07-07 22:39:20] [Rank 0] Group 2 Loss: 5.4967 +[2025-07-07 22:39:20] [Rank 0] Group 2 Loss: 5.4967 +[2025-07-07 22:39:20] [Rank 0] Group 3 Loss: 5.3572 +[2025-07-07 22:39:20] [Rank 0] Group 3 Loss: 5.3572 +[2025-07-07 22:39:20] [Rank 0] Group 4 Loss: 5.4479 +[2025-07-07 22:39:20] [Rank 0] Group 4 Loss: 5.4479 +[2025-07-07 22:39:20] [Rank 0] Group 5 Loss: 5.4858 +[2025-07-07 22:39:20] [Rank 0] Group 5 Loss: 5.4858 +[2025-07-07 22:39:20] [Rank 0] Group 6 Loss: 5.4546 +[2025-07-07 22:39:20] [Rank 0] Group 6 Loss: 5.4546 +[2025-07-07 22:39:20] [Rank 0] Group 7 Loss: 5.4903 +[2025-07-07 22:39:20] [Rank 0] Group 7 Loss: 5.4903 +[2025-07-07 22:39:20] [Rank 0] Group 8 Loss: 5.4954 +[2025-07-07 22:39:20] [Rank 0] Group 8 Loss: 5.4954 +[2025-07-07 22:39:20] [Rank 0] Group 9 Loss: 5.4572 +[2025-07-07 22:39:20] [Rank 0] Group 9 Loss: 5.4572 +[2025-07-07 22:39:20] [Rank 0] Group 10 Loss: 5.5119 +[2025-07-07 22:39:20] [Rank 0] Group 10 Loss: 5.5119 +[2025-07-07 22:39:20] [Rank 0] Group 11 Loss: 5.5463 +[2025-07-07 22:39:20] [Rank 0] Group 11 Loss: 5.5463 +[2025-07-07 22:39:20] [Rank 0] Group 0 FTA: 0.8283 +[2025-07-07 22:39:20] [Rank 0] Group 0 FTA: 0.8283 +[2025-07-07 22:39:20] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 22:39:20] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 22:39:20] [Rank 0] Group 2 FTA: 0.9193 +[2025-07-07 22:39:20] [Rank 0] Group 2 FTA: 0.9193 +[2025-07-07 22:39:20] [Rank 0] Group 3 FTA: 0.9219 +[2025-07-07 22:39:20] [Rank 0] Group 3 FTA: 0.9219 +[2025-07-07 22:39:20] [Rank 0] Group 4 FTA: 0.9115 +[2025-07-07 22:39:20] [Rank 0] Group 4 FTA: 0.9115 +[2025-07-07 22:39:20] [Rank 0] Group 5 FTA: 0.9635 +[2025-07-07 22:39:20] [Rank 0] Group 5 FTA: 0.9635 +[2025-07-07 22:39:20] [Rank 0] Group 6 FTA: 0.9323 +[2025-07-07 22:39:20] [Rank 0] Group 6 FTA: 0.9323 +[2025-07-07 22:39:20] [Rank 0] Group 7 FTA: 0.9609 +[2025-07-07 22:39:20] [Rank 0] Group 7 FTA: 0.9609 +[2025-07-07 22:39:20] [Rank 0] Group 8 FTA: 0.9583 +[2025-07-07 22:39:20] [Rank 0] Group 8 FTA: 0.9583 +[2025-07-07 22:39:20] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-07 22:39:20] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-07 22:39:20] [Rank 0] Group 10 FTA: 0.9570 +[2025-07-07 22:39:20] [Rank 0] Group 10 FTA: 0.9570 +[2025-07-07 22:39:20] [Rank 0] Group 11 FTA: 0.9570 +[2025-07-07 22:39:20] [Rank 0] Group 11 FTA: 0.9570 +[2025-07-07 22:39:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 22:39:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 22:39:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 22:39:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 22:39:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 22:39:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 22:39:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 22:39:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 22:39:22] [Rank 0] step:8501/10000 train_time:687381ms step_avg:80.86ms +[2025-07-07 22:39:22] [Rank 0] step:8501/10000 train_time:687381ms step_avg:80.86ms +[2025-07-07 22:39:23] [Rank 0] step:8521/10000 train_time:688878ms step_avg:80.84ms +[2025-07-07 22:39:23] [Rank 0] step:8521/10000 train_time:688878ms step_avg:80.84ms +[2025-07-07 22:39:25] [Rank 0] step:8541/10000 train_time:690371ms step_avg:80.83ms +[2025-07-07 22:39:25] [Rank 0] step:8541/10000 train_time:690371ms step_avg:80.83ms +[2025-07-07 22:39:27] [Rank 0] step:8561/10000 train_time:692513ms step_avg:80.89ms +[2025-07-07 22:39:27] [Rank 0] step:8561/10000 train_time:692513ms step_avg:80.89ms +[2025-07-07 22:39:28] [Rank 0] step:8581/10000 train_time:694006ms step_avg:80.88ms +[2025-07-07 22:39:28] [Rank 0] step:8581/10000 train_time:694006ms step_avg:80.88ms +[2025-07-07 22:39:30] [Rank 0] step:8601/10000 train_time:695743ms step_avg:80.89ms +[2025-07-07 22:39:30] [Rank 0] step:8601/10000 train_time:695743ms step_avg:80.89ms +[2025-07-07 22:39:32] [Rank 0] step:8621/10000 train_time:697266ms step_avg:80.88ms +[2025-07-07 22:39:32] [Rank 0] step:8621/10000 train_time:697266ms step_avg:80.88ms +[2025-07-07 22:39:34] [Rank 0] step:8641/10000 train_time:698812ms step_avg:80.87ms +[2025-07-07 22:39:34] [Rank 0] step:8641/10000 train_time:698812ms step_avg:80.87ms +[2025-07-07 22:39:35] [Rank 0] step:8661/10000 train_time:700917ms step_avg:80.93ms +[2025-07-07 22:39:35] [Rank 0] step:8661/10000 train_time:700917ms step_avg:80.93ms +[2025-07-07 22:39:37] [Rank 0] step:8681/10000 train_time:702411ms step_avg:80.91ms +[2025-07-07 22:39:37] [Rank 0] step:8681/10000 train_time:702411ms step_avg:80.91ms +[2025-07-07 22:39:38] [Rank 0] step:8701/10000 train_time:703906ms step_avg:80.90ms +[2025-07-07 22:39:38] [Rank 0] step:8701/10000 train_time:703906ms step_avg:80.90ms +[2025-07-07 22:39:40] [Rank 0] step:8721/10000 train_time:705402ms step_avg:80.89ms +[2025-07-07 22:39:40] [Rank 0] step:8721/10000 train_time:705402ms step_avg:80.89ms +[2025-07-07 22:39:42] [Rank 0] step:8741/10000 train_time:707555ms step_avg:80.95ms +[2025-07-07 22:39:42] [Rank 0] step:8741/10000 train_time:707555ms step_avg:80.95ms +[2025-07-07 22:39:43] [Rank 0] step:8761/10000 train_time:709049ms step_avg:80.93ms +[2025-07-07 22:39:43] [Rank 0] step:8761/10000 train_time:709049ms step_avg:80.93ms +[2025-07-07 22:39:45] [Rank 0] step:8781/10000 train_time:710546ms step_avg:80.92ms +[2025-07-07 22:39:45] [Rank 0] step:8781/10000 train_time:710546ms step_avg:80.92ms +[2025-07-07 22:39:46] [Rank 0] step:8801/10000 train_time:712044ms step_avg:80.90ms +[2025-07-07 22:39:46] [Rank 0] step:8801/10000 train_time:712044ms step_avg:80.90ms +[2025-07-07 22:39:48] [Rank 0] step:8821/10000 train_time:713541ms step_avg:80.89ms +[2025-07-07 22:39:48] [Rank 0] step:8821/10000 train_time:713541ms step_avg:80.89ms +[2025-07-07 22:39:49] [Rank 0] step:8841/10000 train_time:715074ms step_avg:80.88ms +[2025-07-07 22:39:49] [Rank 0] step:8841/10000 train_time:715074ms step_avg:80.88ms +[2025-07-07 22:39:51] [Rank 0] step:8861/10000 train_time:716570ms step_avg:80.87ms +[2025-07-07 22:39:51] [Rank 0] step:8861/10000 train_time:716570ms step_avg:80.87ms +[2025-07-07 22:39:52] [Rank 0] step:8881/10000 train_time:718070ms step_avg:80.85ms +[2025-07-07 22:39:52] [Rank 0] step:8881/10000 train_time:718070ms step_avg:80.85ms +[2025-07-07 22:39:54] [Rank 0] step:8901/10000 train_time:719570ms step_avg:80.84ms +[2025-07-07 22:39:54] [Rank 0] step:8901/10000 train_time:719570ms step_avg:80.84ms +[2025-07-07 22:39:56] [Rank 0] step:8921/10000 train_time:721304ms step_avg:80.85ms +[2025-07-07 22:39:56] [Rank 0] step:8921/10000 train_time:721304ms step_avg:80.85ms +[2025-07-07 22:39:57] [Rank 0] step:8941/10000 train_time:722804ms step_avg:80.84ms +[2025-07-07 22:39:57] [Rank 0] step:8941/10000 train_time:722804ms step_avg:80.84ms +[2025-07-07 22:39:59] [Rank 0] step:8961/10000 train_time:724303ms step_avg:80.83ms +[2025-07-07 22:39:59] [Rank 0] step:8961/10000 train_time:724303ms step_avg:80.83ms +[2025-07-07 22:40:00] [Rank 0] step:8981/10000 train_time:725804ms step_avg:80.82ms +[2025-07-07 22:40:00] [Rank 0] step:8981/10000 train_time:725804ms step_avg:80.82ms +[2025-07-07 22:40:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:40:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:40:03] [Rank 0] PRINT: step:9000/10000 train_loss:0.8595 val_loss:0.8617 train_time:727303ms step_avg:80.81ms +[2025-07-07 22:40:03] [Rank 0] PRINT: step:9000/10000 train_loss:0.8595 val_loss:0.8617 train_time:727303ms step_avg:80.81ms +[2025-07-07 22:40:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:40:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:40:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:40:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:40:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:40:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:45:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:45:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:45:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:45:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:45:28] [Rank 0] Total Loss: 5.4773 +[2025-07-07 22:45:28] [Rank 0] Total Loss: 5.4773 +[2025-07-07 22:45:28] [Rank 0] Total FTA: 0.9441 +[2025-07-07 22:45:28] [Rank 0] Total FTA: 0.9441 +[2025-07-07 22:45:28] [Rank 0] Group 0 Loss: 5.4507 +[2025-07-07 22:45:28] [Rank 0] Group 0 Loss: 5.4507 +[2025-07-07 22:45:28] [Rank 0] Group 1 Loss: 5.3986 +[2025-07-07 22:45:28] [Rank 0] Group 1 Loss: 5.3986 +[2025-07-07 22:45:28] [Rank 0] Group 2 Loss: 5.5790 +[2025-07-07 22:45:28] [Rank 0] Group 2 Loss: 5.5790 +[2025-07-07 22:45:28] [Rank 0] Group 3 Loss: 5.2834 +[2025-07-07 22:45:28] [Rank 0] Group 3 Loss: 5.2834 +[2025-07-07 22:45:28] [Rank 0] Group 4 Loss: 5.4468 +[2025-07-07 22:45:28] [Rank 0] Group 4 Loss: 5.4468 +[2025-07-07 22:45:28] [Rank 0] Group 5 Loss: 5.4442 +[2025-07-07 22:45:28] [Rank 0] Group 5 Loss: 5.4442 +[2025-07-07 22:45:28] [Rank 0] Group 6 Loss: 5.4478 +[2025-07-07 22:45:28] [Rank 0] Group 6 Loss: 5.4478 +[2025-07-07 22:45:28] [Rank 0] Group 7 Loss: 5.5177 +[2025-07-07 22:45:28] [Rank 0] Group 7 Loss: 5.5177 +[2025-07-07 22:45:28] [Rank 0] Group 8 Loss: 5.5594 +[2025-07-07 22:45:28] [Rank 0] Group 8 Loss: 5.5594 +[2025-07-07 22:45:28] [Rank 0] Group 9 Loss: 5.5229 +[2025-07-07 22:45:28] [Rank 0] Group 9 Loss: 5.5229 +[2025-07-07 22:45:28] [Rank 0] Group 10 Loss: 5.5143 +[2025-07-07 22:45:28] [Rank 0] Group 10 Loss: 5.5143 +[2025-07-07 22:45:28] [Rank 0] Group 11 Loss: 5.5204 +[2025-07-07 22:45:28] [Rank 0] Group 11 Loss: 5.5204 +[2025-07-07 22:45:28] [Rank 0] Group 0 FTA: 0.8322 +[2025-07-07 22:45:28] [Rank 0] Group 0 FTA: 0.8322 +[2025-07-07 22:45:28] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 22:45:28] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 22:45:28] [Rank 0] Group 2 FTA: 0.8958 +[2025-07-07 22:45:28] [Rank 0] Group 2 FTA: 0.8958 +[2025-07-07 22:45:28] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 22:45:28] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 22:45:28] [Rank 0] Group 4 FTA: 0.9375 +[2025-07-07 22:45:28] [Rank 0] Group 4 FTA: 0.9375 +[2025-07-07 22:45:28] [Rank 0] Group 5 FTA: 0.9844 +[2025-07-07 22:45:28] [Rank 0] Group 5 FTA: 0.9844 +[2025-07-07 22:45:28] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-07 22:45:28] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-07 22:45:28] [Rank 0] Group 7 FTA: 0.9505 +[2025-07-07 22:45:28] [Rank 0] Group 7 FTA: 0.9505 +[2025-07-07 22:45:28] [Rank 0] Group 8 FTA: 0.9635 +[2025-07-07 22:45:28] [Rank 0] Group 8 FTA: 0.9635 +[2025-07-07 22:45:28] [Rank 0] Group 9 FTA: 0.9492 +[2025-07-07 22:45:28] [Rank 0] Group 9 FTA: 0.9492 +[2025-07-07 22:45:28] [Rank 0] Group 10 FTA: 0.9590 +[2025-07-07 22:45:28] [Rank 0] Group 10 FTA: 0.9590 +[2025-07-07 22:45:28] [Rank 0] Group 11 FTA: 0.9600 +[2025-07-07 22:45:28] [Rank 0] Group 11 FTA: 0.9600 +[2025-07-07 22:45:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 22:45:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 22:45:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 22:45:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 22:45:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 22:45:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 22:45:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 22:45:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 22:45:30] [Rank 0] step:9001/10000 train_time:727433ms step_avg:80.82ms +[2025-07-07 22:45:30] [Rank 0] step:9001/10000 train_time:727433ms step_avg:80.82ms +[2025-07-07 22:45:32] [Rank 0] step:9021/10000 train_time:729539ms step_avg:80.87ms +[2025-07-07 22:45:32] [Rank 0] step:9021/10000 train_time:729539ms step_avg:80.87ms +[2025-07-07 22:45:33] [Rank 0] step:9041/10000 train_time:731031ms step_avg:80.86ms +[2025-07-07 22:45:33] [Rank 0] step:9041/10000 train_time:731031ms step_avg:80.86ms +[2025-07-07 22:45:35] [Rank 0] step:9061/10000 train_time:732525ms step_avg:80.84ms +[2025-07-07 22:45:35] [Rank 0] step:9061/10000 train_time:732525ms step_avg:80.84ms +[2025-07-07 22:45:36] [Rank 0] step:9081/10000 train_time:734021ms step_avg:80.83ms +[2025-07-07 22:45:36] [Rank 0] step:9081/10000 train_time:734021ms step_avg:80.83ms +[2025-07-07 22:45:38] [Rank 0] step:9101/10000 train_time:735756ms step_avg:80.84ms +[2025-07-07 22:45:38] [Rank 0] step:9101/10000 train_time:735756ms step_avg:80.84ms +[2025-07-07 22:45:39] [Rank 0] step:9121/10000 train_time:737251ms step_avg:80.83ms +[2025-07-07 22:45:39] [Rank 0] step:9121/10000 train_time:737251ms step_avg:80.83ms +[2025-07-07 22:45:41] [Rank 0] step:9141/10000 train_time:738747ms step_avg:80.82ms +[2025-07-07 22:45:41] [Rank 0] step:9141/10000 train_time:738747ms step_avg:80.82ms +[2025-07-07 22:45:42] [Rank 0] step:9161/10000 train_time:740244ms step_avg:80.80ms +[2025-07-07 22:45:42] [Rank 0] step:9161/10000 train_time:740244ms step_avg:80.80ms +[2025-07-07 22:45:44] [Rank 0] step:9181/10000 train_time:741793ms step_avg:80.80ms +[2025-07-07 22:45:44] [Rank 0] step:9181/10000 train_time:741793ms step_avg:80.80ms +[2025-07-07 22:45:45] [Rank 0] step:9201/10000 train_time:743272ms step_avg:80.78ms +[2025-07-07 22:45:45] [Rank 0] step:9201/10000 train_time:743272ms step_avg:80.78ms +[2025-07-07 22:45:47] [Rank 0] step:9221/10000 train_time:744769ms step_avg:80.77ms +[2025-07-07 22:45:47] [Rank 0] step:9221/10000 train_time:744769ms step_avg:80.77ms +[2025-07-07 22:45:48] [Rank 0] step:9241/10000 train_time:746266ms step_avg:80.76ms +[2025-07-07 22:45:48] [Rank 0] step:9241/10000 train_time:746266ms step_avg:80.76ms +[2025-07-07 22:45:50] [Rank 0] step:9261/10000 train_time:748016ms step_avg:80.77ms +[2025-07-07 22:45:50] [Rank 0] step:9261/10000 train_time:748016ms step_avg:80.77ms +[2025-07-07 22:45:52] [Rank 0] step:9281/10000 train_time:750164ms step_avg:80.83ms +[2025-07-07 22:45:52] [Rank 0] step:9281/10000 train_time:750164ms step_avg:80.83ms +[2025-07-07 22:45:54] [Rank 0] step:9301/10000 train_time:751662ms step_avg:80.82ms +[2025-07-07 22:45:54] [Rank 0] step:9301/10000 train_time:751662ms step_avg:80.82ms +[2025-07-07 22:45:55] [Rank 0] step:9321/10000 train_time:753168ms step_avg:80.80ms +[2025-07-07 22:45:55] [Rank 0] step:9321/10000 train_time:753168ms step_avg:80.80ms +[2025-07-07 22:45:57] [Rank 0] step:9341/10000 train_time:754666ms step_avg:80.79ms +[2025-07-07 22:45:57] [Rank 0] step:9341/10000 train_time:754666ms step_avg:80.79ms +[2025-07-07 22:45:59] [Rank 0] step:9361/10000 train_time:756423ms step_avg:80.81ms +[2025-07-07 22:45:59] [Rank 0] step:9361/10000 train_time:756423ms step_avg:80.81ms +[2025-07-07 22:46:00] [Rank 0] step:9381/10000 train_time:758309ms step_avg:80.83ms +[2025-07-07 22:46:00] [Rank 0] step:9381/10000 train_time:758309ms step_avg:80.83ms +[2025-07-07 22:46:02] [Rank 0] step:9401/10000 train_time:759808ms step_avg:80.82ms +[2025-07-07 22:46:02] [Rank 0] step:9401/10000 train_time:759808ms step_avg:80.82ms +[2025-07-07 22:46:03] [Rank 0] step:9421/10000 train_time:761309ms step_avg:80.81ms +[2025-07-07 22:46:03] [Rank 0] step:9421/10000 train_time:761309ms step_avg:80.81ms +[2025-07-07 22:46:05] [Rank 0] step:9441/10000 train_time:762807ms step_avg:80.80ms +[2025-07-07 22:46:05] [Rank 0] step:9441/10000 train_time:762807ms step_avg:80.80ms +[2025-07-07 22:46:07] [Rank 0] step:9461/10000 train_time:764950ms step_avg:80.85ms +[2025-07-07 22:46:07] [Rank 0] step:9461/10000 train_time:764950ms step_avg:80.85ms +[2025-07-07 22:46:09] [Rank 0] step:9481/10000 train_time:766451ms step_avg:80.84ms +[2025-07-07 22:46:09] [Rank 0] step:9481/10000 train_time:766451ms step_avg:80.84ms +[2025-07-07 22:46:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:46:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:46:11] [Rank 0] PRINT: step:9500/10000 train_loss:0.8587 val_loss:0.8614 train_time:767948ms step_avg:80.84ms +[2025-07-07 22:46:11] [Rank 0] PRINT: step:9500/10000 train_loss:0.8587 val_loss:0.8614 train_time:767948ms step_avg:80.84ms +[2025-07-07 22:46:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:46:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:46:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:46:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:46:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:46:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:51:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:51:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:51:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:51:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:51:35] [Rank 0] Total Loss: 5.4653 +[2025-07-07 22:51:35] [Rank 0] Total Loss: 5.4653 +[2025-07-07 22:51:35] [Rank 0] Total FTA: 0.9222 +[2025-07-07 22:51:35] [Rank 0] Total FTA: 0.9222 +[2025-07-07 22:51:35] [Rank 0] Group 0 Loss: 5.4036 +[2025-07-07 22:51:35] [Rank 0] Group 0 Loss: 5.4036 +[2025-07-07 22:51:35] [Rank 0] Group 1 Loss: 5.4075 +[2025-07-07 22:51:35] [Rank 0] Group 1 Loss: 5.4075 +[2025-07-07 22:51:35] [Rank 0] Group 2 Loss: 5.5476 +[2025-07-07 22:51:35] [Rank 0] Group 2 Loss: 5.5476 +[2025-07-07 22:51:35] [Rank 0] Group 3 Loss: 5.2820 +[2025-07-07 22:51:35] [Rank 0] Group 3 Loss: 5.2820 +[2025-07-07 22:51:35] [Rank 0] Group 4 Loss: 5.4145 +[2025-07-07 22:51:35] [Rank 0] Group 4 Loss: 5.4145 +[2025-07-07 22:51:35] [Rank 0] Group 5 Loss: 5.4096 +[2025-07-07 22:51:35] [Rank 0] Group 5 Loss: 5.4096 +[2025-07-07 22:51:35] [Rank 0] Group 6 Loss: 5.4576 +[2025-07-07 22:51:35] [Rank 0] Group 6 Loss: 5.4576 +[2025-07-07 22:51:35] [Rank 0] Group 7 Loss: 5.5146 +[2025-07-07 22:51:35] [Rank 0] Group 7 Loss: 5.5146 +[2025-07-07 22:51:35] [Rank 0] Group 8 Loss: 5.5200 +[2025-07-07 22:51:35] [Rank 0] Group 8 Loss: 5.5200 +[2025-07-07 22:51:35] [Rank 0] Group 9 Loss: 5.5123 +[2025-07-07 22:51:35] [Rank 0] Group 9 Loss: 5.5123 +[2025-07-07 22:51:35] [Rank 0] Group 10 Loss: 5.5067 +[2025-07-07 22:51:35] [Rank 0] Group 10 Loss: 5.5067 +[2025-07-07 22:51:35] [Rank 0] Group 11 Loss: 5.5427 +[2025-07-07 22:51:35] [Rank 0] Group 11 Loss: 5.5427 +[2025-07-07 22:51:35] [Rank 0] Group 0 FTA: 0.6528 +[2025-07-07 22:51:35] [Rank 0] Group 0 FTA: 0.6528 +[2025-07-07 22:51:35] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 22:51:35] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 22:51:35] [Rank 0] Group 2 FTA: 0.9323 +[2025-07-07 22:51:35] [Rank 0] Group 2 FTA: 0.9323 +[2025-07-07 22:51:35] [Rank 0] Group 3 FTA: 0.9557 +[2025-07-07 22:51:35] [Rank 0] Group 3 FTA: 0.9557 +[2025-07-07 22:51:35] [Rank 0] Group 4 FTA: 0.9479 +[2025-07-07 22:51:35] [Rank 0] Group 4 FTA: 0.9479 +[2025-07-07 22:51:35] [Rank 0] Group 5 FTA: 0.9740 +[2025-07-07 22:51:35] [Rank 0] Group 5 FTA: 0.9740 +[2025-07-07 22:51:35] [Rank 0] Group 6 FTA: 0.9583 +[2025-07-07 22:51:35] [Rank 0] Group 6 FTA: 0.9583 +[2025-07-07 22:51:35] [Rank 0] Group 7 FTA: 0.9609 +[2025-07-07 22:51:35] [Rank 0] Group 7 FTA: 0.9609 +[2025-07-07 22:51:35] [Rank 0] Group 8 FTA: 0.9661 +[2025-07-07 22:51:35] [Rank 0] Group 8 FTA: 0.9661 +[2025-07-07 22:51:35] [Rank 0] Group 9 FTA: 0.9883 +[2025-07-07 22:51:35] [Rank 0] Group 9 FTA: 0.9883 +[2025-07-07 22:51:35] [Rank 0] Group 10 FTA: 0.9746 +[2025-07-07 22:51:35] [Rank 0] Group 10 FTA: 0.9746 +[2025-07-07 22:51:35] [Rank 0] Group 11 FTA: 0.9629 +[2025-07-07 22:51:35] [Rank 0] Group 11 FTA: 0.9629 +[2025-07-07 22:51:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 22:51:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 22:51:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 22:51:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 22:51:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 22:51:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 22:51:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 22:51:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 22:51:36] [Rank 0] step:9501/10000 train_time:767970ms step_avg:80.83ms +[2025-07-07 22:51:36] [Rank 0] step:9501/10000 train_time:767970ms step_avg:80.83ms +[2025-07-07 22:51:38] [Rank 0] step:9521/10000 train_time:769478ms step_avg:80.82ms +[2025-07-07 22:51:38] [Rank 0] step:9521/10000 train_time:769478ms step_avg:80.82ms +[2025-07-07 22:51:40] [Rank 0] step:9541/10000 train_time:771659ms step_avg:80.88ms +[2025-07-07 22:51:40] [Rank 0] step:9541/10000 train_time:771659ms step_avg:80.88ms +[2025-07-07 22:51:42] [Rank 0] step:9561/10000 train_time:773135ms step_avg:80.86ms +[2025-07-07 22:51:42] [Rank 0] step:9561/10000 train_time:773135ms step_avg:80.86ms +[2025-07-07 22:51:43] [Rank 0] step:9581/10000 train_time:774628ms step_avg:80.85ms +[2025-07-07 22:51:43] [Rank 0] step:9581/10000 train_time:774628ms step_avg:80.85ms +[2025-07-07 22:51:45] [Rank 0] step:9601/10000 train_time:776123ms step_avg:80.84ms +[2025-07-07 22:51:45] [Rank 0] step:9601/10000 train_time:776123ms step_avg:80.84ms +[2025-07-07 22:51:46] [Rank 0] step:9621/10000 train_time:777620ms step_avg:80.83ms +[2025-07-07 22:51:46] [Rank 0] step:9621/10000 train_time:777620ms step_avg:80.83ms +[2025-07-07 22:51:48] [Rank 0] step:9641/10000 train_time:779777ms step_avg:80.88ms +[2025-07-07 22:51:48] [Rank 0] step:9641/10000 train_time:779777ms step_avg:80.88ms +[2025-07-07 22:51:50] [Rank 0] step:9661/10000 train_time:781271ms step_avg:80.87ms +[2025-07-07 22:51:50] [Rank 0] step:9661/10000 train_time:781271ms step_avg:80.87ms +[2025-07-07 22:51:51] [Rank 0] step:9681/10000 train_time:782767ms step_avg:80.86ms +[2025-07-07 22:51:51] [Rank 0] step:9681/10000 train_time:782767ms step_avg:80.86ms +[2025-07-07 22:51:53] [Rank 0] step:9701/10000 train_time:784264ms step_avg:80.84ms +[2025-07-07 22:51:53] [Rank 0] step:9701/10000 train_time:784264ms step_avg:80.84ms +[2025-07-07 22:51:55] [Rank 0] step:9721/10000 train_time:786442ms step_avg:80.90ms +[2025-07-07 22:51:55] [Rank 0] step:9721/10000 train_time:786442ms step_avg:80.90ms +[2025-07-07 22:51:56] [Rank 0] step:9741/10000 train_time:787920ms step_avg:80.89ms +[2025-07-07 22:51:56] [Rank 0] step:9741/10000 train_time:787920ms step_avg:80.89ms +[2025-07-07 22:51:58] [Rank 0] step:9761/10000 train_time:789416ms step_avg:80.87ms +[2025-07-07 22:51:58] [Rank 0] step:9761/10000 train_time:789416ms step_avg:80.87ms +[2025-07-07 22:51:59] [Rank 0] step:9781/10000 train_time:790914ms step_avg:80.86ms +[2025-07-07 22:51:59] [Rank 0] step:9781/10000 train_time:790914ms step_avg:80.86ms +[2025-07-07 22:52:01] [Rank 0] step:9801/10000 train_time:792414ms step_avg:80.85ms +[2025-07-07 22:52:01] [Rank 0] step:9801/10000 train_time:792414ms step_avg:80.85ms +[2025-07-07 22:52:03] [Rank 0] step:9821/10000 train_time:794562ms step_avg:80.90ms +[2025-07-07 22:52:03] [Rank 0] step:9821/10000 train_time:794562ms step_avg:80.90ms +[2025-07-07 22:52:05] [Rank 0] step:9841/10000 train_time:796060ms step_avg:80.89ms +[2025-07-07 22:52:05] [Rank 0] step:9841/10000 train_time:796060ms step_avg:80.89ms +[2025-07-07 22:52:06] [Rank 0] step:9861/10000 train_time:797558ms step_avg:80.88ms +[2025-07-07 22:52:06] [Rank 0] step:9861/10000 train_time:797558ms step_avg:80.88ms +[2025-07-07 22:52:08] [Rank 0] step:9881/10000 train_time:799057ms step_avg:80.87ms +[2025-07-07 22:52:08] [Rank 0] step:9881/10000 train_time:799057ms step_avg:80.87ms +[2025-07-07 22:52:09] [Rank 0] step:9901/10000 train_time:800775ms step_avg:80.88ms +[2025-07-07 22:52:09] [Rank 0] step:9901/10000 train_time:800775ms step_avg:80.88ms +[2025-07-07 22:52:11] [Rank 0] step:9921/10000 train_time:802315ms step_avg:80.87ms +[2025-07-07 22:52:11] [Rank 0] step:9921/10000 train_time:802315ms step_avg:80.87ms +[2025-07-07 22:52:12] [Rank 0] step:9941/10000 train_time:803814ms step_avg:80.86ms +[2025-07-07 22:52:12] [Rank 0] step:9941/10000 train_time:803814ms step_avg:80.86ms +[2025-07-07 22:52:14] [Rank 0] step:9961/10000 train_time:805313ms step_avg:80.85ms +[2025-07-07 22:52:14] [Rank 0] step:9961/10000 train_time:805313ms step_avg:80.85ms +[2025-07-07 22:52:15] [Rank 0] step:9981/10000 train_time:806813ms step_avg:80.83ms +[2025-07-07 22:52:15] [Rank 0] step:9981/10000 train_time:806813ms step_avg:80.83ms +[2025-07-07 22:52:17] [Rank 0] step:10000/10000 train_time:808471ms step_avg:80.85ms +[2025-07-07 22:52:17] [Rank 0] step:10000/10000 train_time:808471ms step_avg:80.85ms +[2025-07-07 22:52:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:52:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:52:18] [Rank 0] PRINT: step:10000/10000 train_loss:0.8579 val_loss:0.8613 train_time:808552ms step_avg:80.86ms +[2025-07-07 22:52:18] [Rank 0] PRINT: step:10000/10000 train_loss:0.8579 val_loss:0.8613 train_time:808552ms step_avg:80.86ms +[2025-07-07 22:52:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:52:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:52:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:52:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:52:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:52:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:57:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:57:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:57:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:57:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:57:46] [Rank 0] Total Loss: 5.5082 +[2025-07-07 22:57:46] [Rank 0] Total Loss: 5.5082 +[2025-07-07 22:57:46] [Rank 0] Total FTA: 0.9199 +[2025-07-07 22:57:46] [Rank 0] Total FTA: 0.9199 +[2025-07-07 22:57:46] [Rank 0] Group 0 Loss: 5.3977 +[2025-07-07 22:57:46] [Rank 0] Group 0 Loss: 5.3977 +[2025-07-07 22:57:47] [Rank 0] Group 1 Loss: 5.5131 +[2025-07-07 22:57:47] [Rank 0] Group 1 Loss: 5.5131 +[2025-07-07 22:57:47] [Rank 0] Group 2 Loss: 5.6361 +[2025-07-07 22:57:47] [Rank 0] Group 2 Loss: 5.6361 +[2025-07-07 22:57:47] [Rank 0] Group 3 Loss: 5.3119 +[2025-07-07 22:57:47] [Rank 0] Group 3 Loss: 5.3119 +[2025-07-07 22:57:47] [Rank 0] Group 4 Loss: 5.4749 +[2025-07-07 22:57:47] [Rank 0] Group 4 Loss: 5.4749 +[2025-07-07 22:57:47] [Rank 0] Group 5 Loss: 5.4464 +[2025-07-07 22:57:47] [Rank 0] Group 5 Loss: 5.4464 +[2025-07-07 22:57:47] [Rank 0] Group 6 Loss: 5.4313 +[2025-07-07 22:57:47] [Rank 0] Group 6 Loss: 5.4313 +[2025-07-07 22:57:47] [Rank 0] Group 7 Loss: 5.5754 +[2025-07-07 22:57:47] [Rank 0] Group 7 Loss: 5.5754 +[2025-07-07 22:57:47] [Rank 0] Group 8 Loss: 5.5621 +[2025-07-07 22:57:47] [Rank 0] Group 8 Loss: 5.5621 +[2025-07-07 22:57:47] [Rank 0] Group 9 Loss: 5.5397 +[2025-07-07 22:57:47] [Rank 0] Group 9 Loss: 5.5397 +[2025-07-07 22:57:47] [Rank 0] Group 10 Loss: 5.5510 +[2025-07-07 22:57:47] [Rank 0] Group 10 Loss: 5.5510 +[2025-07-07 22:57:47] [Rank 0] Group 11 Loss: 5.6049 +[2025-07-07 22:57:47] [Rank 0] Group 11 Loss: 5.6049 +[2025-07-07 22:57:47] [Rank 0] Group 0 FTA: 0.6502 +[2025-07-07 22:57:47] [Rank 0] Group 0 FTA: 0.6502 +[2025-07-07 22:57:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 22:57:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 22:57:47] [Rank 0] Group 2 FTA: 0.8464 +[2025-07-07 22:57:47] [Rank 0] Group 2 FTA: 0.8464 +[2025-07-07 22:57:47] [Rank 0] Group 3 FTA: 0.9609 +[2025-07-07 22:57:47] [Rank 0] Group 3 FTA: 0.9609 +[2025-07-07 22:57:47] [Rank 0] Group 4 FTA: 0.9661 +[2025-07-07 22:57:47] [Rank 0] Group 4 FTA: 0.9661 +[2025-07-07 22:57:47] [Rank 0] Group 5 FTA: 0.9922 +[2025-07-07 22:57:47] [Rank 0] Group 5 FTA: 0.9922 +[2025-07-07 22:57:47] [Rank 0] Group 6 FTA: 0.9531 +[2025-07-07 22:57:47] [Rank 0] Group 6 FTA: 0.9531 +[2025-07-07 22:57:47] [Rank 0] Group 7 FTA: 0.9583 +[2025-07-07 22:57:47] [Rank 0] Group 7 FTA: 0.9583 +[2025-07-07 22:57:47] [Rank 0] Group 8 FTA: 0.9818 +[2025-07-07 22:57:47] [Rank 0] Group 8 FTA: 0.9818 +[2025-07-07 22:57:47] [Rank 0] Group 9 FTA: 0.9688 +[2025-07-07 22:57:47] [Rank 0] Group 9 FTA: 0.9688 +[2025-07-07 22:57:47] [Rank 0] Group 10 FTA: 0.9707 +[2025-07-07 22:57:47] [Rank 0] Group 10 FTA: 0.9707 +[2025-07-07 22:57:47] [Rank 0] Group 11 FTA: 0.9727 +[2025-07-07 22:57:47] [Rank 0] Group 11 FTA: 0.9727 +[2025-07-07 22:57:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 22:57:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_loss_curves.png +[2025-07-07 22:57:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 22:57:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/per_class_acc_curves.png +[2025-07-07 22:57:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 22:57:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_loss_curve.png +[2025-07-07 22:57:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 22:57:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_43/total_acc_curve.png +[2025-07-07 22:57:48] [Rank 0] step:10001/10000 train_time:808573ms step_avg:80.85ms +[2025-07-07 22:57:48] [Rank 0] step:10001/10000 train_time:808573ms step_avg:80.85ms +[2025-07-07 22:57:48] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 22:57:48 2025 --- +[2025-07-07 22:57:48] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 22:57:48 2025 --- +[2025-07-07 22:57:48] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9936 MiB +[2025-07-07 22:57:48] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9936 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/config.json b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ed2ad5a2f4e3115f333d823c97294895412b0b26 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "47559efd-23e3-4338-8881-04fd8c8d56ed", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..87ba786df45b206dca42006df1ce41c574d542db --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c08e1be6fa7cdcafcaa341b96754213c46017f815b4b0c224607290abcb4c076 +size 378188 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..3d6335fb5546c9a2c2fb77ead2a4ad57252c3eb6 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2930e3c51be87c4489c1009533e5cadc924d75115f3f13ec2f2f9885d8c73acd +size 392943 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..ce8b811ffcfef4e35606350893ff870430e9dd0a --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8de3e38b6928911a3737a3502bb18ba08813220794a324e2b4df13bc3a97bfe7 +size 106453 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..e70b4c1f590e490facf3af0e31fa9cd346740ab4 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d565f6a723acc3a934875415b5ca5f82f5b21abed008f6bce5819717dc7bc9e +size 106835 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/training_log_47559efd-23e3-4338-8881-04fd8c8d56ed.txt b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/training_log_47559efd-23e3-4338-8881-04fd8c8d56ed.txt new file mode 100644 index 0000000000000000000000000000000000000000..9912218f2304e03d1db109e5f0a89c073d56c72d --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/training_log_47559efd-23e3-4338-8881-04fd8c8d56ed.txt @@ -0,0 +1,5132 @@ +[2025-07-06 19:47:01] [Rank 0] PRINT: --- Script Start: Sun Jul 6 19:47:01 2025 --- +[2025-07-06 19:47:01] [Rank 0] PRINT: --- Script Start: Sun Jul 6 19:47:01 2025 --- +[2025-07-06 19:47:01] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-06 19:47:01] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-06 19:47:01] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 19:47:01] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 19:47:01] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-06 19:47:01] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-06 19:47:01] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45 +[2025-07-06 19:47:01] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45 +[2025-07-06 19:47:01] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 19:47:01] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 19:47:02] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 19:47:02] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 19:47:02] [Rank 0] PRINT: Constructing model... +[2025-07-06 19:47:02] [Rank 0] PRINT: Constructing model... +[2025-07-06 19:47:04] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 19:47:04] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 19:47:04] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 19:47:04] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 19:47:04] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 19:47:04] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 19:47:04] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 19:47:04] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 19:47:04] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 19:47:04] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 19:47:05] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 19:47:05] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 19:47:05] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 19:47:05] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 19:47:05] [Rank 0] PRINT: Model returns: +[2025-07-06 19:47:05] [Rank 0] PRINT: Model returns: +[2025-07-06 19:47:05] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 19:47:05] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 19:47:05] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 19:47:05] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 19:47:05] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 19:47:05] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 19:47:05] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 19:47:05] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 19:47:05] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 19:47:05] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 19:47:05] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 19:47:05] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 19:47:05] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 19:47:05] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 19:47:05] [Rank 0] PRINT: Starting warmup... +[2025-07-06 19:47:05] [Rank 0] PRINT: Starting warmup... +[2025-07-06 19:48:26] [Rank 0] PRINT: Warmup complete. +[2025-07-06 19:48:26] [Rank 0] PRINT: Warmup complete. +[2025-07-06 19:48:26] [Rank 0] PRINT: Starting training... +[2025-07-06 19:48:26] [Rank 0] PRINT: Starting training... +[2025-07-06 19:48:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:48:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:48:33] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 19:48:33] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 19:48:35] [Rank 0] step:21/10000 train_time:1552ms step_avg:73.90ms +[2025-07-06 19:48:35] [Rank 0] step:21/10000 train_time:1552ms step_avg:73.90ms +[2025-07-06 19:48:36] [Rank 0] step:41/10000 train_time:3013ms step_avg:73.48ms +[2025-07-06 19:48:36] [Rank 0] step:41/10000 train_time:3013ms step_avg:73.48ms +[2025-07-06 19:48:38] [Rank 0] step:61/10000 train_time:4477ms step_avg:73.39ms +[2025-07-06 19:48:38] [Rank 0] step:61/10000 train_time:4477ms step_avg:73.39ms +[2025-07-06 19:48:39] [Rank 0] step:81/10000 train_time:5939ms step_avg:73.32ms +[2025-07-06 19:48:39] [Rank 0] step:81/10000 train_time:5939ms step_avg:73.32ms +[2025-07-06 19:48:41] [Rank 0] step:101/10000 train_time:8055ms step_avg:79.76ms +[2025-07-06 19:48:41] [Rank 0] step:101/10000 train_time:8055ms step_avg:79.76ms +[2025-07-06 19:48:43] [Rank 0] step:121/10000 train_time:9522ms step_avg:78.69ms +[2025-07-06 19:48:43] [Rank 0] step:121/10000 train_time:9522ms step_avg:78.69ms +[2025-07-06 19:48:44] [Rank 0] step:141/10000 train_time:10991ms step_avg:77.95ms +[2025-07-06 19:48:44] [Rank 0] step:141/10000 train_time:10991ms step_avg:77.95ms +[2025-07-06 19:48:46] [Rank 0] step:161/10000 train_time:12461ms step_avg:77.40ms +[2025-07-06 19:48:46] [Rank 0] step:161/10000 train_time:12461ms step_avg:77.40ms +[2025-07-06 19:48:48] [Rank 0] step:181/10000 train_time:13930ms step_avg:76.96ms +[2025-07-06 19:48:48] [Rank 0] step:181/10000 train_time:13930ms step_avg:76.96ms +[2025-07-06 19:48:49] [Rank 0] step:201/10000 train_time:16055ms step_avg:79.88ms +[2025-07-06 19:48:49] [Rank 0] step:201/10000 train_time:16055ms step_avg:79.88ms +[2025-07-06 19:48:51] [Rank 0] step:221/10000 train_time:17617ms step_avg:79.72ms +[2025-07-06 19:48:51] [Rank 0] step:221/10000 train_time:17617ms step_avg:79.72ms +[2025-07-06 19:48:52] [Rank 0] step:241/10000 train_time:19124ms step_avg:79.35ms +[2025-07-06 19:48:52] [Rank 0] step:241/10000 train_time:19124ms step_avg:79.35ms +[2025-07-06 19:48:54] [Rank 0] step:261/10000 train_time:20595ms step_avg:78.91ms +[2025-07-06 19:48:54] [Rank 0] step:261/10000 train_time:20595ms step_avg:78.91ms +[2025-07-06 19:48:56] [Rank 0] step:281/10000 train_time:22709ms step_avg:80.82ms +[2025-07-06 19:48:56] [Rank 0] step:281/10000 train_time:22709ms step_avg:80.82ms +[2025-07-06 19:48:57] [Rank 0] step:301/10000 train_time:24181ms step_avg:80.33ms +[2025-07-06 19:48:57] [Rank 0] step:301/10000 train_time:24181ms step_avg:80.33ms +[2025-07-06 19:48:59] [Rank 0] step:321/10000 train_time:25653ms step_avg:79.92ms +[2025-07-06 19:48:59] [Rank 0] step:321/10000 train_time:25653ms step_avg:79.92ms +[2025-07-06 19:49:00] [Rank 0] step:341/10000 train_time:27127ms step_avg:79.55ms +[2025-07-06 19:49:00] [Rank 0] step:341/10000 train_time:27127ms step_avg:79.55ms +[2025-07-06 19:49:02] [Rank 0] step:361/10000 train_time:28598ms step_avg:79.22ms +[2025-07-06 19:49:02] [Rank 0] step:361/10000 train_time:28598ms step_avg:79.22ms +[2025-07-06 19:49:04] [Rank 0] step:381/10000 train_time:30713ms step_avg:80.61ms +[2025-07-06 19:49:04] [Rank 0] step:381/10000 train_time:30713ms step_avg:80.61ms +[2025-07-06 19:49:05] [Rank 0] step:401/10000 train_time:32184ms step_avg:80.26ms +[2025-07-06 19:49:05] [Rank 0] step:401/10000 train_time:32184ms step_avg:80.26ms +[2025-07-06 19:49:07] [Rank 0] step:421/10000 train_time:33657ms step_avg:79.95ms +[2025-07-06 19:49:07] [Rank 0] step:421/10000 train_time:33657ms step_avg:79.95ms +[2025-07-06 19:49:08] [Rank 0] step:441/10000 train_time:35132ms step_avg:79.66ms +[2025-07-06 19:49:08] [Rank 0] step:441/10000 train_time:35132ms step_avg:79.66ms +[2025-07-06 19:49:11] [Rank 0] step:461/10000 train_time:37268ms step_avg:80.84ms +[2025-07-06 19:49:11] [Rank 0] step:461/10000 train_time:37268ms step_avg:80.84ms +[2025-07-06 19:49:12] [Rank 0] step:481/10000 train_time:38741ms step_avg:80.54ms +[2025-07-06 19:49:12] [Rank 0] step:481/10000 train_time:38741ms step_avg:80.54ms +[2025-07-06 19:49:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:49:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:49:14] [Rank 0] PRINT: step:500/10000 train_loss:3.3877 val_loss:1.6159 train_time:40215ms step_avg:80.43ms +[2025-07-06 19:49:14] [Rank 0] PRINT: step:500/10000 train_loss:3.3877 val_loss:1.6159 train_time:40215ms step_avg:80.43ms +[2025-07-06 19:49:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:49:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:49:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:49:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:49:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:49:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:54:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:54:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:54:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:54:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:54:33] [Rank 0] Total Loss: 4.2375 +[2025-07-06 19:54:33] [Rank 0] Total Loss: 4.2375 +[2025-07-06 19:54:33] [Rank 0] Total FTA: 0.0902 +[2025-07-06 19:54:33] [Rank 0] Total FTA: 0.0902 +[2025-07-06 19:54:33] [Rank 0] Group 0 Loss: 4.5232 +[2025-07-06 19:54:33] [Rank 0] Group 0 Loss: 4.5232 +[2025-07-06 19:54:33] [Rank 0] Group 1 Loss: 4.1416 +[2025-07-06 19:54:33] [Rank 0] Group 1 Loss: 4.1416 +[2025-07-06 19:54:33] [Rank 0] Group 2 Loss: 4.1499 +[2025-07-06 19:54:33] [Rank 0] Group 2 Loss: 4.1499 +[2025-07-06 19:54:33] [Rank 0] Group 3 Loss: 4.1124 +[2025-07-06 19:54:33] [Rank 0] Group 3 Loss: 4.1124 +[2025-07-06 19:54:33] [Rank 0] Group 4 Loss: 4.2215 +[2025-07-06 19:54:33] [Rank 0] Group 4 Loss: 4.2215 +[2025-07-06 19:54:33] [Rank 0] Group 5 Loss: 4.1569 +[2025-07-06 19:54:33] [Rank 0] Group 5 Loss: 4.1569 +[2025-07-06 19:54:33] [Rank 0] Group 6 Loss: 4.1717 +[2025-07-06 19:54:33] [Rank 0] Group 6 Loss: 4.1717 +[2025-07-06 19:54:33] [Rank 0] Group 7 Loss: 4.2143 +[2025-07-06 19:54:33] [Rank 0] Group 7 Loss: 4.2143 +[2025-07-06 19:54:33] [Rank 0] Group 8 Loss: 4.1953 +[2025-07-06 19:54:33] [Rank 0] Group 8 Loss: 4.1953 +[2025-07-06 19:54:33] [Rank 0] Group 9 Loss: 4.2160 +[2025-07-06 19:54:33] [Rank 0] Group 9 Loss: 4.2160 +[2025-07-06 19:54:33] [Rank 0] Group 10 Loss: 4.2466 +[2025-07-06 19:54:33] [Rank 0] Group 10 Loss: 4.2466 +[2025-07-06 19:54:33] [Rank 0] Group 11 Loss: 4.2246 +[2025-07-06 19:54:33] [Rank 0] Group 11 Loss: 4.2246 +[2025-07-06 19:54:33] [Rank 0] Group 0 FTA: 0.1795 +[2025-07-06 19:54:33] [Rank 0] Group 0 FTA: 0.1795 +[2025-07-06 19:54:33] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 19:54:33] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 19:54:33] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-06 19:54:33] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-06 19:54:33] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-06 19:54:33] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-06 19:54:33] [Rank 0] Group 4 FTA: 0.0130 +[2025-07-06 19:54:33] [Rank 0] Group 4 FTA: 0.0130 +[2025-07-06 19:54:33] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-06 19:54:33] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-06 19:54:33] [Rank 0] Group 6 FTA: 0.1198 +[2025-07-06 19:54:33] [Rank 0] Group 6 FTA: 0.1198 +[2025-07-06 19:54:33] [Rank 0] Group 7 FTA: 0.1250 +[2025-07-06 19:54:33] [Rank 0] Group 7 FTA: 0.1250 +[2025-07-06 19:54:33] [Rank 0] Group 8 FTA: 0.0755 +[2025-07-06 19:54:33] [Rank 0] Group 8 FTA: 0.0755 +[2025-07-06 19:54:33] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-06 19:54:33] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-06 19:54:33] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-06 19:54:33] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-06 19:54:33] [Rank 0] Group 11 FTA: 0.0879 +[2025-07-06 19:54:33] [Rank 0] Group 11 FTA: 0.0879 +[2025-07-06 19:54:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 19:54:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 19:54:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 19:54:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 19:54:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 19:54:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 19:54:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 19:54:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 19:54:35] [Rank 0] step:501/10000 train_time:40235ms step_avg:80.31ms +[2025-07-06 19:54:35] [Rank 0] step:501/10000 train_time:40235ms step_avg:80.31ms +[2025-07-06 19:54:36] [Rank 0] step:521/10000 train_time:41711ms step_avg:80.06ms +[2025-07-06 19:54:36] [Rank 0] step:521/10000 train_time:41711ms step_avg:80.06ms +[2025-07-06 19:54:38] [Rank 0] step:541/10000 train_time:43435ms step_avg:80.29ms +[2025-07-06 19:54:38] [Rank 0] step:541/10000 train_time:43435ms step_avg:80.29ms +[2025-07-06 19:54:40] [Rank 0] step:561/10000 train_time:45281ms step_avg:80.71ms +[2025-07-06 19:54:40] [Rank 0] step:561/10000 train_time:45281ms step_avg:80.71ms +[2025-07-06 19:54:41] [Rank 0] step:581/10000 train_time:46744ms step_avg:80.46ms +[2025-07-06 19:54:41] [Rank 0] step:581/10000 train_time:46744ms step_avg:80.46ms +[2025-07-06 19:54:43] [Rank 0] step:601/10000 train_time:48209ms step_avg:80.22ms +[2025-07-06 19:54:43] [Rank 0] step:601/10000 train_time:48209ms step_avg:80.22ms +[2025-07-06 19:54:44] [Rank 0] step:621/10000 train_time:49676ms step_avg:79.99ms +[2025-07-06 19:54:44] [Rank 0] step:621/10000 train_time:49676ms step_avg:79.99ms +[2025-07-06 19:54:46] [Rank 0] step:641/10000 train_time:51779ms step_avg:80.78ms +[2025-07-06 19:54:46] [Rank 0] step:641/10000 train_time:51779ms step_avg:80.78ms +[2025-07-06 19:54:48] [Rank 0] step:661/10000 train_time:53245ms step_avg:80.55ms +[2025-07-06 19:54:48] [Rank 0] step:661/10000 train_time:53245ms step_avg:80.55ms +[2025-07-06 19:54:49] [Rank 0] step:681/10000 train_time:54711ms step_avg:80.34ms +[2025-07-06 19:54:49] [Rank 0] step:681/10000 train_time:54711ms step_avg:80.34ms +[2025-07-06 19:54:51] [Rank 0] step:701/10000 train_time:56176ms step_avg:80.14ms +[2025-07-06 19:54:51] [Rank 0] step:701/10000 train_time:56176ms step_avg:80.14ms +[2025-07-06 19:54:53] [Rank 0] step:721/10000 train_time:57644ms step_avg:79.95ms +[2025-07-06 19:54:53] [Rank 0] step:721/10000 train_time:57644ms step_avg:79.95ms +[2025-07-06 19:54:54] [Rank 0] step:741/10000 train_time:59781ms step_avg:80.68ms +[2025-07-06 19:54:54] [Rank 0] step:741/10000 train_time:59781ms step_avg:80.68ms +[2025-07-06 19:54:56] [Rank 0] step:761/10000 train_time:61257ms step_avg:80.50ms +[2025-07-06 19:54:56] [Rank 0] step:761/10000 train_time:61257ms step_avg:80.50ms +[2025-07-06 19:54:57] [Rank 0] step:781/10000 train_time:62736ms step_avg:80.33ms +[2025-07-06 19:54:57] [Rank 0] step:781/10000 train_time:62736ms step_avg:80.33ms +[2025-07-06 19:54:59] [Rank 0] step:801/10000 train_time:64216ms step_avg:80.17ms +[2025-07-06 19:54:59] [Rank 0] step:801/10000 train_time:64216ms step_avg:80.17ms +[2025-07-06 19:55:01] [Rank 0] step:821/10000 train_time:66347ms step_avg:80.81ms +[2025-07-06 19:55:01] [Rank 0] step:821/10000 train_time:66347ms step_avg:80.81ms +[2025-07-06 19:55:02] [Rank 0] step:841/10000 train_time:67837ms step_avg:80.66ms +[2025-07-06 19:55:02] [Rank 0] step:841/10000 train_time:67837ms step_avg:80.66ms +[2025-07-06 19:55:04] [Rank 0] step:861/10000 train_time:69316ms step_avg:80.51ms +[2025-07-06 19:55:04] [Rank 0] step:861/10000 train_time:69316ms step_avg:80.51ms +[2025-07-06 19:55:05] [Rank 0] step:881/10000 train_time:70796ms step_avg:80.36ms +[2025-07-06 19:55:05] [Rank 0] step:881/10000 train_time:70796ms step_avg:80.36ms +[2025-07-06 19:55:07] [Rank 0] step:901/10000 train_time:72535ms step_avg:80.50ms +[2025-07-06 19:55:07] [Rank 0] step:901/10000 train_time:72535ms step_avg:80.50ms +[2025-07-06 19:55:09] [Rank 0] step:921/10000 train_time:74416ms step_avg:80.80ms +[2025-07-06 19:55:09] [Rank 0] step:921/10000 train_time:74416ms step_avg:80.80ms +[2025-07-06 19:55:10] [Rank 0] step:941/10000 train_time:75893ms step_avg:80.65ms +[2025-07-06 19:55:10] [Rank 0] step:941/10000 train_time:75893ms step_avg:80.65ms +[2025-07-06 19:55:12] [Rank 0] step:961/10000 train_time:77508ms step_avg:80.65ms +[2025-07-06 19:55:12] [Rank 0] step:961/10000 train_time:77508ms step_avg:80.65ms +[2025-07-06 19:55:13] [Rank 0] step:981/10000 train_time:78987ms step_avg:80.52ms +[2025-07-06 19:55:13] [Rank 0] step:981/10000 train_time:78987ms step_avg:80.52ms +[2025-07-06 19:55:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:55:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:55:16] [Rank 0] PRINT: step:1000/10000 train_loss:1.4657 val_loss:1.3184 train_time:81130ms step_avg:81.13ms +[2025-07-06 19:55:16] [Rank 0] PRINT: step:1000/10000 train_loss:1.4657 val_loss:1.3184 train_time:81130ms step_avg:81.13ms +[2025-07-06 19:55:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:55:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:55:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:55:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:55:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:55:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:00:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:00:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:00:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:00:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:00:34] [Rank 0] Total Loss: 4.4145 +[2025-07-06 20:00:34] [Rank 0] Total Loss: 4.4145 +[2025-07-06 20:00:34] [Rank 0] Total FTA: 0.1580 +[2025-07-06 20:00:34] [Rank 0] Total FTA: 0.1580 +[2025-07-06 20:00:34] [Rank 0] Group 0 Loss: 4.5643 +[2025-07-06 20:00:34] [Rank 0] Group 0 Loss: 4.5643 +[2025-07-06 20:00:34] [Rank 0] Group 1 Loss: 4.2873 +[2025-07-06 20:00:34] [Rank 0] Group 1 Loss: 4.2873 +[2025-07-06 20:00:34] [Rank 0] Group 2 Loss: 4.2093 +[2025-07-06 20:00:34] [Rank 0] Group 2 Loss: 4.2093 +[2025-07-06 20:00:34] [Rank 0] Group 3 Loss: 4.4868 +[2025-07-06 20:00:34] [Rank 0] Group 3 Loss: 4.4868 +[2025-07-06 20:00:34] [Rank 0] Group 4 Loss: 4.4612 +[2025-07-06 20:00:34] [Rank 0] Group 4 Loss: 4.4612 +[2025-07-06 20:00:34] [Rank 0] Group 5 Loss: 4.3634 +[2025-07-06 20:00:34] [Rank 0] Group 5 Loss: 4.3634 +[2025-07-06 20:00:34] [Rank 0] Group 6 Loss: 4.3212 +[2025-07-06 20:00:34] [Rank 0] Group 6 Loss: 4.3212 +[2025-07-06 20:00:34] [Rank 0] Group 7 Loss: 4.4414 +[2025-07-06 20:00:34] [Rank 0] Group 7 Loss: 4.4414 +[2025-07-06 20:00:34] [Rank 0] Group 8 Loss: 4.3973 +[2025-07-06 20:00:34] [Rank 0] Group 8 Loss: 4.3973 +[2025-07-06 20:00:34] [Rank 0] Group 9 Loss: 4.3763 +[2025-07-06 20:00:34] [Rank 0] Group 9 Loss: 4.3763 +[2025-07-06 20:00:34] [Rank 0] Group 10 Loss: 4.4078 +[2025-07-06 20:00:34] [Rank 0] Group 10 Loss: 4.4078 +[2025-07-06 20:00:34] [Rank 0] Group 11 Loss: 4.4455 +[2025-07-06 20:00:34] [Rank 0] Group 11 Loss: 4.4455 +[2025-07-06 20:00:34] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-06 20:00:34] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-06 20:00:34] [Rank 0] Group 1 FTA: 0.4948 +[2025-07-06 20:00:34] [Rank 0] Group 1 FTA: 0.4948 +[2025-07-06 20:00:34] [Rank 0] Group 2 FTA: 0.3333 +[2025-07-06 20:00:34] [Rank 0] Group 2 FTA: 0.3333 +[2025-07-06 20:00:34] [Rank 0] Group 3 FTA: 0.1224 +[2025-07-06 20:00:34] [Rank 0] Group 3 FTA: 0.1224 +[2025-07-06 20:00:34] [Rank 0] Group 4 FTA: 0.0599 +[2025-07-06 20:00:34] [Rank 0] Group 4 FTA: 0.0599 +[2025-07-06 20:00:34] [Rank 0] Group 5 FTA: 0.0938 +[2025-07-06 20:00:34] [Rank 0] Group 5 FTA: 0.0938 +[2025-07-06 20:00:34] [Rank 0] Group 6 FTA: 0.1719 +[2025-07-06 20:00:34] [Rank 0] Group 6 FTA: 0.1719 +[2025-07-06 20:00:34] [Rank 0] Group 7 FTA: 0.1380 +[2025-07-06 20:00:34] [Rank 0] Group 7 FTA: 0.1380 +[2025-07-06 20:00:34] [Rank 0] Group 8 FTA: 0.1328 +[2025-07-06 20:00:34] [Rank 0] Group 8 FTA: 0.1328 +[2025-07-06 20:00:34] [Rank 0] Group 9 FTA: 0.1797 +[2025-07-06 20:00:34] [Rank 0] Group 9 FTA: 0.1797 +[2025-07-06 20:00:34] [Rank 0] Group 10 FTA: 0.1641 +[2025-07-06 20:00:34] [Rank 0] Group 10 FTA: 0.1641 +[2025-07-06 20:00:34] [Rank 0] Group 11 FTA: 0.1621 +[2025-07-06 20:00:34] [Rank 0] Group 11 FTA: 0.1621 +[2025-07-06 20:00:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 20:00:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 20:00:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 20:00:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 20:00:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 20:00:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 20:00:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 20:00:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 20:00:35] [Rank 0] step:1001/10000 train_time:81151ms step_avg:81.07ms +[2025-07-06 20:00:35] [Rank 0] step:1001/10000 train_time:81151ms step_avg:81.07ms +[2025-07-06 20:00:37] [Rank 0] step:1021/10000 train_time:82626ms step_avg:80.93ms +[2025-07-06 20:00:37] [Rank 0] step:1021/10000 train_time:82626ms step_avg:80.93ms +[2025-07-06 20:00:38] [Rank 0] step:1041/10000 train_time:84094ms step_avg:80.78ms +[2025-07-06 20:00:38] [Rank 0] step:1041/10000 train_time:84094ms step_avg:80.78ms +[2025-07-06 20:00:40] [Rank 0] step:1061/10000 train_time:85562ms step_avg:80.64ms +[2025-07-06 20:00:40] [Rank 0] step:1061/10000 train_time:85562ms step_avg:80.64ms +[2025-07-06 20:00:42] [Rank 0] step:1081/10000 train_time:87031ms step_avg:80.51ms +[2025-07-06 20:00:42] [Rank 0] step:1081/10000 train_time:87031ms step_avg:80.51ms +[2025-07-06 20:00:43] [Rank 0] step:1101/10000 train_time:88739ms step_avg:80.60ms +[2025-07-06 20:00:43] [Rank 0] step:1101/10000 train_time:88739ms step_avg:80.60ms +[2025-07-06 20:00:44] [Rank 0] step:1121/10000 train_time:90212ms step_avg:80.47ms +[2025-07-06 20:00:44] [Rank 0] step:1121/10000 train_time:90212ms step_avg:80.47ms +[2025-07-06 20:00:46] [Rank 0] step:1141/10000 train_time:91682ms step_avg:80.35ms +[2025-07-06 20:00:46] [Rank 0] step:1141/10000 train_time:91682ms step_avg:80.35ms +[2025-07-06 20:00:47] [Rank 0] step:1161/10000 train_time:93152ms step_avg:80.23ms +[2025-07-06 20:00:47] [Rank 0] step:1161/10000 train_time:93152ms step_avg:80.23ms +[2025-07-06 20:00:49] [Rank 0] step:1181/10000 train_time:94862ms step_avg:80.32ms +[2025-07-06 20:00:49] [Rank 0] step:1181/10000 train_time:94862ms step_avg:80.32ms +[2025-07-06 20:00:51] [Rank 0] step:1201/10000 train_time:96334ms step_avg:80.21ms +[2025-07-06 20:00:51] [Rank 0] step:1201/10000 train_time:96334ms step_avg:80.21ms +[2025-07-06 20:00:52] [Rank 0] step:1221/10000 train_time:97805ms step_avg:80.10ms +[2025-07-06 20:00:52] [Rank 0] step:1221/10000 train_time:97805ms step_avg:80.10ms +[2025-07-06 20:00:54] [Rank 0] step:1241/10000 train_time:99278ms step_avg:80.00ms +[2025-07-06 20:00:54] [Rank 0] step:1241/10000 train_time:99278ms step_avg:80.00ms +[2025-07-06 20:00:56] [Rank 0] step:1261/10000 train_time:101439ms step_avg:80.44ms +[2025-07-06 20:00:56] [Rank 0] step:1261/10000 train_time:101439ms step_avg:80.44ms +[2025-07-06 20:00:57] [Rank 0] step:1281/10000 train_time:102893ms step_avg:80.32ms +[2025-07-06 20:00:57] [Rank 0] step:1281/10000 train_time:102893ms step_avg:80.32ms +[2025-07-06 20:00:59] [Rank 0] step:1301/10000 train_time:104369ms step_avg:80.22ms +[2025-07-06 20:00:59] [Rank 0] step:1301/10000 train_time:104369ms step_avg:80.22ms +[2025-07-06 20:01:00] [Rank 0] step:1321/10000 train_time:105845ms step_avg:80.12ms +[2025-07-06 20:01:00] [Rank 0] step:1321/10000 train_time:105845ms step_avg:80.12ms +[2025-07-06 20:01:02] [Rank 0] step:1341/10000 train_time:107325ms step_avg:80.03ms +[2025-07-06 20:01:02] [Rank 0] step:1341/10000 train_time:107325ms step_avg:80.03ms +[2025-07-06 20:01:04] [Rank 0] step:1361/10000 train_time:109451ms step_avg:80.42ms +[2025-07-06 20:01:04] [Rank 0] step:1361/10000 train_time:109451ms step_avg:80.42ms +[2025-07-06 20:01:05] [Rank 0] step:1381/10000 train_time:110928ms step_avg:80.32ms +[2025-07-06 20:01:05] [Rank 0] step:1381/10000 train_time:110928ms step_avg:80.32ms +[2025-07-06 20:01:07] [Rank 0] step:1401/10000 train_time:112407ms step_avg:80.23ms +[2025-07-06 20:01:07] [Rank 0] step:1401/10000 train_time:112407ms step_avg:80.23ms +[2025-07-06 20:01:08] [Rank 0] step:1421/10000 train_time:113888ms step_avg:80.15ms +[2025-07-06 20:01:08] [Rank 0] step:1421/10000 train_time:113888ms step_avg:80.15ms +[2025-07-06 20:01:10] [Rank 0] step:1441/10000 train_time:115368ms step_avg:80.06ms +[2025-07-06 20:01:10] [Rank 0] step:1441/10000 train_time:115368ms step_avg:80.06ms +[2025-07-06 20:01:12] [Rank 0] step:1461/10000 train_time:117489ms step_avg:80.42ms +[2025-07-06 20:01:12] [Rank 0] step:1461/10000 train_time:117489ms step_avg:80.42ms +[2025-07-06 20:01:13] [Rank 0] step:1481/10000 train_time:118969ms step_avg:80.33ms +[2025-07-06 20:01:13] [Rank 0] step:1481/10000 train_time:118969ms step_avg:80.33ms +[2025-07-06 20:01:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:01:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:01:16] [Rank 0] PRINT: step:1500/10000 train_loss:1.2232 val_loss:1.1876 train_time:120448ms step_avg:80.30ms +[2025-07-06 20:01:16] [Rank 0] PRINT: step:1500/10000 train_loss:1.2232 val_loss:1.1876 train_time:120448ms step_avg:80.30ms +[2025-07-06 20:01:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:01:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:01:16] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:01:16] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:01:16] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:01:16] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:06:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:06:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:06:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:06:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:06:34] [Rank 0] Total Loss: 4.8045 +[2025-07-06 20:06:34] [Rank 0] Total Loss: 4.8045 +[2025-07-06 20:06:34] [Rank 0] Total FTA: 0.3723 +[2025-07-06 20:06:34] [Rank 0] Total FTA: 0.3723 +[2025-07-06 20:06:34] [Rank 0] Group 0 Loss: 5.1157 +[2025-07-06 20:06:34] [Rank 0] Group 0 Loss: 5.1157 +[2025-07-06 20:06:34] [Rank 0] Group 1 Loss: 4.6600 +[2025-07-06 20:06:34] [Rank 0] Group 1 Loss: 4.6600 +[2025-07-06 20:06:34] [Rank 0] Group 2 Loss: 4.6229 +[2025-07-06 20:06:34] [Rank 0] Group 2 Loss: 4.6229 +[2025-07-06 20:06:34] [Rank 0] Group 3 Loss: 4.8693 +[2025-07-06 20:06:34] [Rank 0] Group 3 Loss: 4.8693 +[2025-07-06 20:06:34] [Rank 0] Group 4 Loss: 4.7609 +[2025-07-06 20:06:34] [Rank 0] Group 4 Loss: 4.7609 +[2025-07-06 20:06:34] [Rank 0] Group 5 Loss: 4.7627 +[2025-07-06 20:06:34] [Rank 0] Group 5 Loss: 4.7627 +[2025-07-06 20:06:34] [Rank 0] Group 6 Loss: 4.6685 +[2025-07-06 20:06:34] [Rank 0] Group 6 Loss: 4.6685 +[2025-07-06 20:06:34] [Rank 0] Group 7 Loss: 4.8294 +[2025-07-06 20:06:34] [Rank 0] Group 7 Loss: 4.8294 +[2025-07-06 20:06:34] [Rank 0] Group 8 Loss: 4.7428 +[2025-07-06 20:06:34] [Rank 0] Group 8 Loss: 4.7428 +[2025-07-06 20:06:34] [Rank 0] Group 9 Loss: 4.7268 +[2025-07-06 20:06:34] [Rank 0] Group 9 Loss: 4.7268 +[2025-07-06 20:06:34] [Rank 0] Group 10 Loss: 4.7912 +[2025-07-06 20:06:34] [Rank 0] Group 10 Loss: 4.7912 +[2025-07-06 20:06:34] [Rank 0] Group 11 Loss: 4.7915 +[2025-07-06 20:06:34] [Rank 0] Group 11 Loss: 4.7915 +[2025-07-06 20:06:34] [Rank 0] Group 0 FTA: 0.5098 +[2025-07-06 20:06:34] [Rank 0] Group 0 FTA: 0.5098 +[2025-07-06 20:06:34] [Rank 0] Group 1 FTA: 0.4948 +[2025-07-06 20:06:34] [Rank 0] Group 1 FTA: 0.4948 +[2025-07-06 20:06:34] [Rank 0] Group 2 FTA: 0.4245 +[2025-07-06 20:06:34] [Rank 0] Group 2 FTA: 0.4245 +[2025-07-06 20:06:34] [Rank 0] Group 3 FTA: 0.2839 +[2025-07-06 20:06:34] [Rank 0] Group 3 FTA: 0.2839 +[2025-07-06 20:06:34] [Rank 0] Group 4 FTA: 0.2474 +[2025-07-06 20:06:34] [Rank 0] Group 4 FTA: 0.2474 +[2025-07-06 20:06:34] [Rank 0] Group 5 FTA: 0.4089 +[2025-07-06 20:06:34] [Rank 0] Group 5 FTA: 0.4089 +[2025-07-06 20:06:34] [Rank 0] Group 6 FTA: 0.3620 +[2025-07-06 20:06:34] [Rank 0] Group 6 FTA: 0.3620 +[2025-07-06 20:06:34] [Rank 0] Group 7 FTA: 0.3151 +[2025-07-06 20:06:34] [Rank 0] Group 7 FTA: 0.3151 +[2025-07-06 20:06:34] [Rank 0] Group 8 FTA: 0.3359 +[2025-07-06 20:06:34] [Rank 0] Group 8 FTA: 0.3359 +[2025-07-06 20:06:34] [Rank 0] Group 9 FTA: 0.3398 +[2025-07-06 20:06:34] [Rank 0] Group 9 FTA: 0.3398 +[2025-07-06 20:06:34] [Rank 0] Group 10 FTA: 0.3652 +[2025-07-06 20:06:34] [Rank 0] Group 10 FTA: 0.3652 +[2025-07-06 20:06:34] [Rank 0] Group 11 FTA: 0.3203 +[2025-07-06 20:06:34] [Rank 0] Group 11 FTA: 0.3203 +[2025-07-06 20:06:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 20:06:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 20:06:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 20:06:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 20:06:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 20:06:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 20:06:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 20:06:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 20:06:35] [Rank 0] step:1501/10000 train_time:120469ms step_avg:80.26ms +[2025-07-06 20:06:35] [Rank 0] step:1501/10000 train_time:120469ms step_avg:80.26ms +[2025-07-06 20:06:37] [Rank 0] step:1521/10000 train_time:121937ms step_avg:80.17ms +[2025-07-06 20:06:37] [Rank 0] step:1521/10000 train_time:121937ms step_avg:80.17ms +[2025-07-06 20:06:39] [Rank 0] step:1541/10000 train_time:124083ms step_avg:80.52ms +[2025-07-06 20:06:39] [Rank 0] step:1541/10000 train_time:124083ms step_avg:80.52ms +[2025-07-06 20:06:40] [Rank 0] step:1561/10000 train_time:125552ms step_avg:80.43ms +[2025-07-06 20:06:40] [Rank 0] step:1561/10000 train_time:125552ms step_avg:80.43ms +[2025-07-06 20:06:42] [Rank 0] step:1581/10000 train_time:127023ms step_avg:80.34ms +[2025-07-06 20:06:42] [Rank 0] step:1581/10000 train_time:127023ms step_avg:80.34ms +[2025-07-06 20:06:43] [Rank 0] step:1601/10000 train_time:128493ms step_avg:80.26ms +[2025-07-06 20:06:43] [Rank 0] step:1601/10000 train_time:128493ms step_avg:80.26ms +[2025-07-06 20:06:45] [Rank 0] step:1621/10000 train_time:130223ms step_avg:80.33ms +[2025-07-06 20:06:45] [Rank 0] step:1621/10000 train_time:130223ms step_avg:80.33ms +[2025-07-06 20:06:47] [Rank 0] step:1641/10000 train_time:132241ms step_avg:80.59ms +[2025-07-06 20:06:47] [Rank 0] step:1641/10000 train_time:132241ms step_avg:80.59ms +[2025-07-06 20:06:48] [Rank 0] step:1661/10000 train_time:133713ms step_avg:80.50ms +[2025-07-06 20:06:48] [Rank 0] step:1661/10000 train_time:133713ms step_avg:80.50ms +[2025-07-06 20:06:50] [Rank 0] step:1681/10000 train_time:135187ms step_avg:80.42ms +[2025-07-06 20:06:50] [Rank 0] step:1681/10000 train_time:135187ms step_avg:80.42ms +[2025-07-06 20:06:51] [Rank 0] step:1701/10000 train_time:136661ms step_avg:80.34ms +[2025-07-06 20:06:51] [Rank 0] step:1701/10000 train_time:136661ms step_avg:80.34ms +[2025-07-06 20:06:53] [Rank 0] step:1721/10000 train_time:138802ms step_avg:80.65ms +[2025-07-06 20:06:53] [Rank 0] step:1721/10000 train_time:138802ms step_avg:80.65ms +[2025-07-06 20:06:55] [Rank 0] step:1741/10000 train_time:140276ms step_avg:80.57ms +[2025-07-06 20:06:55] [Rank 0] step:1741/10000 train_time:140276ms step_avg:80.57ms +[2025-07-06 20:06:56] [Rank 0] step:1761/10000 train_time:141751ms step_avg:80.49ms +[2025-07-06 20:06:56] [Rank 0] step:1761/10000 train_time:141751ms step_avg:80.49ms +[2025-07-06 20:06:58] [Rank 0] step:1781/10000 train_time:143232ms step_avg:80.42ms +[2025-07-06 20:06:58] [Rank 0] step:1781/10000 train_time:143232ms step_avg:80.42ms +[2025-07-06 20:07:00] [Rank 0] step:1801/10000 train_time:144765ms step_avg:80.38ms +[2025-07-06 20:07:00] [Rank 0] step:1801/10000 train_time:144765ms step_avg:80.38ms +[2025-07-06 20:07:01] [Rank 0] step:1821/10000 train_time:146419ms step_avg:80.41ms +[2025-07-06 20:07:01] [Rank 0] step:1821/10000 train_time:146419ms step_avg:80.41ms +[2025-07-06 20:07:03] [Rank 0] step:1841/10000 train_time:147896ms step_avg:80.33ms +[2025-07-06 20:07:03] [Rank 0] step:1841/10000 train_time:147896ms step_avg:80.33ms +[2025-07-06 20:07:04] [Rank 0] step:1861/10000 train_time:149375ms step_avg:80.27ms +[2025-07-06 20:07:04] [Rank 0] step:1861/10000 train_time:149375ms step_avg:80.27ms +[2025-07-06 20:07:06] [Rank 0] step:1881/10000 train_time:150851ms step_avg:80.20ms +[2025-07-06 20:07:06] [Rank 0] step:1881/10000 train_time:150851ms step_avg:80.20ms +[2025-07-06 20:07:08] [Rank 0] step:1901/10000 train_time:152979ms step_avg:80.47ms +[2025-07-06 20:07:08] [Rank 0] step:1901/10000 train_time:152979ms step_avg:80.47ms +[2025-07-06 20:07:09] [Rank 0] step:1921/10000 train_time:154456ms step_avg:80.40ms +[2025-07-06 20:07:09] [Rank 0] step:1921/10000 train_time:154456ms step_avg:80.40ms +[2025-07-06 20:07:11] [Rank 0] step:1941/10000 train_time:155935ms step_avg:80.34ms +[2025-07-06 20:07:11] [Rank 0] step:1941/10000 train_time:155935ms step_avg:80.34ms +[2025-07-06 20:07:12] [Rank 0] step:1961/10000 train_time:157418ms step_avg:80.27ms +[2025-07-06 20:07:12] [Rank 0] step:1961/10000 train_time:157418ms step_avg:80.27ms +[2025-07-06 20:07:14] [Rank 0] step:1981/10000 train_time:158897ms step_avg:80.21ms +[2025-07-06 20:07:14] [Rank 0] step:1981/10000 train_time:158897ms step_avg:80.21ms +[2025-07-06 20:07:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:07:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:07:17] [Rank 0] PRINT: step:2000/10000 train_loss:1.1152 val_loss:1.0913 train_time:161018ms step_avg:80.51ms +[2025-07-06 20:07:17] [Rank 0] PRINT: step:2000/10000 train_loss:1.1152 val_loss:1.0913 train_time:161018ms step_avg:80.51ms +[2025-07-06 20:07:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:07:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:07:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:07:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:07:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:07:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:12:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:12:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:12:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:12:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:12:35] [Rank 0] Total Loss: 5.0736 +[2025-07-06 20:12:35] [Rank 0] Total Loss: 5.0736 +[2025-07-06 20:12:35] [Rank 0] Total FTA: 0.5223 +[2025-07-06 20:12:35] [Rank 0] Total FTA: 0.5223 +[2025-07-06 20:12:35] [Rank 0] Group 0 Loss: 5.4131 +[2025-07-06 20:12:35] [Rank 0] Group 0 Loss: 5.4131 +[2025-07-06 20:12:35] [Rank 0] Group 1 Loss: 4.8757 +[2025-07-06 20:12:35] [Rank 0] Group 1 Loss: 4.8757 +[2025-07-06 20:12:35] [Rank 0] Group 2 Loss: 4.8236 +[2025-07-06 20:12:35] [Rank 0] Group 2 Loss: 4.8236 +[2025-07-06 20:12:35] [Rank 0] Group 3 Loss: 5.1575 +[2025-07-06 20:12:35] [Rank 0] Group 3 Loss: 5.1575 +[2025-07-06 20:12:35] [Rank 0] Group 4 Loss: 4.9936 +[2025-07-06 20:12:35] [Rank 0] Group 4 Loss: 4.9936 +[2025-07-06 20:12:35] [Rank 0] Group 5 Loss: 4.9876 +[2025-07-06 20:12:35] [Rank 0] Group 5 Loss: 4.9876 +[2025-07-06 20:12:35] [Rank 0] Group 6 Loss: 4.9663 +[2025-07-06 20:12:35] [Rank 0] Group 6 Loss: 4.9663 +[2025-07-06 20:12:35] [Rank 0] Group 7 Loss: 5.1018 +[2025-07-06 20:12:35] [Rank 0] Group 7 Loss: 5.1018 +[2025-07-06 20:12:35] [Rank 0] Group 8 Loss: 5.0322 +[2025-07-06 20:12:35] [Rank 0] Group 8 Loss: 5.0322 +[2025-07-06 20:12:35] [Rank 0] Group 9 Loss: 5.1028 +[2025-07-06 20:12:35] [Rank 0] Group 9 Loss: 5.1028 +[2025-07-06 20:12:35] [Rank 0] Group 10 Loss: 5.0888 +[2025-07-06 20:12:35] [Rank 0] Group 10 Loss: 5.0888 +[2025-07-06 20:12:35] [Rank 0] Group 11 Loss: 5.0479 +[2025-07-06 20:12:35] [Rank 0] Group 11 Loss: 5.0479 +[2025-07-06 20:12:35] [Rank 0] Group 0 FTA: 0.6567 +[2025-07-06 20:12:35] [Rank 0] Group 0 FTA: 0.6567 +[2025-07-06 20:12:35] [Rank 0] Group 1 FTA: 0.1693 +[2025-07-06 20:12:35] [Rank 0] Group 1 FTA: 0.1693 +[2025-07-06 20:12:35] [Rank 0] Group 2 FTA: 0.6302 +[2025-07-06 20:12:35] [Rank 0] Group 2 FTA: 0.6302 +[2025-07-06 20:12:35] [Rank 0] Group 3 FTA: 0.4922 +[2025-07-06 20:12:35] [Rank 0] Group 3 FTA: 0.4922 +[2025-07-06 20:12:35] [Rank 0] Group 4 FTA: 0.3880 +[2025-07-06 20:12:35] [Rank 0] Group 4 FTA: 0.3880 +[2025-07-06 20:12:35] [Rank 0] Group 5 FTA: 0.5417 +[2025-07-06 20:12:35] [Rank 0] Group 5 FTA: 0.5417 +[2025-07-06 20:12:35] [Rank 0] Group 6 FTA: 0.5625 +[2025-07-06 20:12:35] [Rank 0] Group 6 FTA: 0.5625 +[2025-07-06 20:12:35] [Rank 0] Group 7 FTA: 0.4792 +[2025-07-06 20:12:35] [Rank 0] Group 7 FTA: 0.4792 +[2025-07-06 20:12:35] [Rank 0] Group 8 FTA: 0.5286 +[2025-07-06 20:12:35] [Rank 0] Group 8 FTA: 0.5286 +[2025-07-06 20:12:35] [Rank 0] Group 9 FTA: 0.5312 +[2025-07-06 20:12:35] [Rank 0] Group 9 FTA: 0.5312 +[2025-07-06 20:12:35] [Rank 0] Group 10 FTA: 0.5508 +[2025-07-06 20:12:35] [Rank 0] Group 10 FTA: 0.5508 +[2025-07-06 20:12:35] [Rank 0] Group 11 FTA: 0.5498 +[2025-07-06 20:12:35] [Rank 0] Group 11 FTA: 0.5498 +[2025-07-06 20:12:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 20:12:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 20:12:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 20:12:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 20:12:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 20:12:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 20:12:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 20:12:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 20:12:36] [Rank 0] step:2001/10000 train_time:161038ms step_avg:80.48ms +[2025-07-06 20:12:36] [Rank 0] step:2001/10000 train_time:161038ms step_avg:80.48ms +[2025-07-06 20:12:38] [Rank 0] step:2021/10000 train_time:162514ms step_avg:80.41ms +[2025-07-06 20:12:38] [Rank 0] step:2021/10000 train_time:162514ms step_avg:80.41ms +[2025-07-06 20:12:39] [Rank 0] step:2041/10000 train_time:163981ms step_avg:80.34ms +[2025-07-06 20:12:39] [Rank 0] step:2041/10000 train_time:163981ms step_avg:80.34ms +[2025-07-06 20:12:41] [Rank 0] step:2061/10000 train_time:165451ms step_avg:80.28ms +[2025-07-06 20:12:41] [Rank 0] step:2061/10000 train_time:165451ms step_avg:80.28ms +[2025-07-06 20:12:43] [Rank 0] step:2081/10000 train_time:167572ms step_avg:80.52ms +[2025-07-06 20:12:43] [Rank 0] step:2081/10000 train_time:167572ms step_avg:80.52ms +[2025-07-06 20:12:44] [Rank 0] step:2101/10000 train_time:169042ms step_avg:80.46ms +[2025-07-06 20:12:44] [Rank 0] step:2101/10000 train_time:169042ms step_avg:80.46ms +[2025-07-06 20:12:46] [Rank 0] step:2121/10000 train_time:170514ms step_avg:80.39ms +[2025-07-06 20:12:46] [Rank 0] step:2121/10000 train_time:170514ms step_avg:80.39ms +[2025-07-06 20:12:47] [Rank 0] step:2141/10000 train_time:171987ms step_avg:80.33ms +[2025-07-06 20:12:47] [Rank 0] step:2141/10000 train_time:171987ms step_avg:80.33ms +[2025-07-06 20:12:49] [Rank 0] step:2161/10000 train_time:173460ms step_avg:80.27ms +[2025-07-06 20:12:49] [Rank 0] step:2161/10000 train_time:173460ms step_avg:80.27ms +[2025-07-06 20:12:51] [Rank 0] step:2181/10000 train_time:175575ms step_avg:80.50ms +[2025-07-06 20:12:51] [Rank 0] step:2181/10000 train_time:175575ms step_avg:80.50ms +[2025-07-06 20:12:52] [Rank 0] step:2201/10000 train_time:177047ms step_avg:80.44ms +[2025-07-06 20:12:52] [Rank 0] step:2201/10000 train_time:177047ms step_avg:80.44ms +[2025-07-06 20:12:54] [Rank 0] step:2221/10000 train_time:178520ms step_avg:80.38ms +[2025-07-06 20:12:54] [Rank 0] step:2221/10000 train_time:178520ms step_avg:80.38ms +[2025-07-06 20:12:55] [Rank 0] step:2241/10000 train_time:180016ms step_avg:80.33ms +[2025-07-06 20:12:55] [Rank 0] step:2241/10000 train_time:180016ms step_avg:80.33ms +[2025-07-06 20:12:57] [Rank 0] step:2261/10000 train_time:181748ms step_avg:80.38ms +[2025-07-06 20:12:57] [Rank 0] step:2261/10000 train_time:181748ms step_avg:80.38ms +[2025-07-06 20:12:59] [Rank 0] step:2281/10000 train_time:183245ms step_avg:80.34ms +[2025-07-06 20:12:59] [Rank 0] step:2281/10000 train_time:183245ms step_avg:80.34ms +[2025-07-06 20:13:00] [Rank 0] step:2301/10000 train_time:184745ms step_avg:80.29ms +[2025-07-06 20:13:00] [Rank 0] step:2301/10000 train_time:184745ms step_avg:80.29ms +[2025-07-06 20:13:02] [Rank 0] step:2321/10000 train_time:186245ms step_avg:80.24ms +[2025-07-06 20:13:02] [Rank 0] step:2321/10000 train_time:186245ms step_avg:80.24ms +[2025-07-06 20:13:04] [Rank 0] step:2341/10000 train_time:187745ms step_avg:80.20ms +[2025-07-06 20:13:04] [Rank 0] step:2341/10000 train_time:187745ms step_avg:80.20ms +[2025-07-06 20:13:05] [Rank 0] step:2361/10000 train_time:189996ms step_avg:80.47ms +[2025-07-06 20:13:05] [Rank 0] step:2361/10000 train_time:189996ms step_avg:80.47ms +[2025-07-06 20:13:07] [Rank 0] step:2381/10000 train_time:191555ms step_avg:80.45ms +[2025-07-06 20:13:07] [Rank 0] step:2381/10000 train_time:191555ms step_avg:80.45ms +[2025-07-06 20:13:08] [Rank 0] step:2401/10000 train_time:193054ms step_avg:80.41ms +[2025-07-06 20:13:08] [Rank 0] step:2401/10000 train_time:193054ms step_avg:80.41ms +[2025-07-06 20:13:10] [Rank 0] step:2421/10000 train_time:194555ms step_avg:80.36ms +[2025-07-06 20:13:10] [Rank 0] step:2421/10000 train_time:194555ms step_avg:80.36ms +[2025-07-06 20:13:12] [Rank 0] step:2441/10000 train_time:196294ms step_avg:80.42ms +[2025-07-06 20:13:12] [Rank 0] step:2441/10000 train_time:196294ms step_avg:80.42ms +[2025-07-06 20:13:13] [Rank 0] step:2461/10000 train_time:197793ms step_avg:80.37ms +[2025-07-06 20:13:13] [Rank 0] step:2461/10000 train_time:197793ms step_avg:80.37ms +[2025-07-06 20:13:15] [Rank 0] step:2481/10000 train_time:199294ms step_avg:80.33ms +[2025-07-06 20:13:15] [Rank 0] step:2481/10000 train_time:199294ms step_avg:80.33ms +[2025-07-06 20:13:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:13:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:13:17] [Rank 0] PRINT: step:2500/10000 train_loss:0.9958 val_loss:0.9375 train_time:200794ms step_avg:80.32ms +[2025-07-06 20:13:17] [Rank 0] PRINT: step:2500/10000 train_loss:0.9958 val_loss:0.9375 train_time:200794ms step_avg:80.32ms +[2025-07-06 20:13:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:13:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:13:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:13:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:13:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:13:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:18:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:18:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:18:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:18:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:18:35] [Rank 0] Total Loss: 5.0953 +[2025-07-06 20:18:35] [Rank 0] Total Loss: 5.0953 +[2025-07-06 20:18:35] [Rank 0] Total FTA: 0.8422 +[2025-07-06 20:18:35] [Rank 0] Total FTA: 0.8422 +[2025-07-06 20:18:35] [Rank 0] Group 0 Loss: 5.1846 +[2025-07-06 20:18:35] [Rank 0] Group 0 Loss: 5.1846 +[2025-07-06 20:18:35] [Rank 0] Group 1 Loss: 4.9717 +[2025-07-06 20:18:35] [Rank 0] Group 1 Loss: 4.9717 +[2025-07-06 20:18:35] [Rank 0] Group 2 Loss: 4.8863 +[2025-07-06 20:18:35] [Rank 0] Group 2 Loss: 4.8863 +[2025-07-06 20:18:35] [Rank 0] Group 3 Loss: 5.1896 +[2025-07-06 20:18:35] [Rank 0] Group 3 Loss: 5.1896 +[2025-07-06 20:18:35] [Rank 0] Group 4 Loss: 5.1119 +[2025-07-06 20:18:35] [Rank 0] Group 4 Loss: 5.1119 +[2025-07-06 20:18:35] [Rank 0] Group 5 Loss: 5.0384 +[2025-07-06 20:18:35] [Rank 0] Group 5 Loss: 5.0384 +[2025-07-06 20:18:35] [Rank 0] Group 6 Loss: 4.9682 +[2025-07-06 20:18:35] [Rank 0] Group 6 Loss: 4.9682 +[2025-07-06 20:18:35] [Rank 0] Group 7 Loss: 5.0904 +[2025-07-06 20:18:35] [Rank 0] Group 7 Loss: 5.0904 +[2025-07-06 20:18:35] [Rank 0] Group 8 Loss: 5.1156 +[2025-07-06 20:18:35] [Rank 0] Group 8 Loss: 5.1156 +[2025-07-06 20:18:35] [Rank 0] Group 9 Loss: 5.0764 +[2025-07-06 20:18:35] [Rank 0] Group 9 Loss: 5.0764 +[2025-07-06 20:18:35] [Rank 0] Group 10 Loss: 5.1660 +[2025-07-06 20:18:35] [Rank 0] Group 10 Loss: 5.1660 +[2025-07-06 20:18:35] [Rank 0] Group 11 Loss: 5.1440 +[2025-07-06 20:18:35] [Rank 0] Group 11 Loss: 5.1440 +[2025-07-06 20:18:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 20:18:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 20:18:35] [Rank 0] Group 1 FTA: 0.6979 +[2025-07-06 20:18:35] [Rank 0] Group 1 FTA: 0.6979 +[2025-07-06 20:18:35] [Rank 0] Group 2 FTA: 0.7500 +[2025-07-06 20:18:35] [Rank 0] Group 2 FTA: 0.7500 +[2025-07-06 20:18:35] [Rank 0] Group 3 FTA: 0.9036 +[2025-07-06 20:18:35] [Rank 0] Group 3 FTA: 0.9036 +[2025-07-06 20:18:35] [Rank 0] Group 4 FTA: 0.8542 +[2025-07-06 20:18:35] [Rank 0] Group 4 FTA: 0.8542 +[2025-07-06 20:18:35] [Rank 0] Group 5 FTA: 0.8151 +[2025-07-06 20:18:35] [Rank 0] Group 5 FTA: 0.8151 +[2025-07-06 20:18:35] [Rank 0] Group 6 FTA: 0.8047 +[2025-07-06 20:18:35] [Rank 0] Group 6 FTA: 0.8047 +[2025-07-06 20:18:35] [Rank 0] Group 7 FTA: 0.8125 +[2025-07-06 20:18:35] [Rank 0] Group 7 FTA: 0.8125 +[2025-07-06 20:18:35] [Rank 0] Group 8 FTA: 0.8568 +[2025-07-06 20:18:35] [Rank 0] Group 8 FTA: 0.8568 +[2025-07-06 20:18:35] [Rank 0] Group 9 FTA: 0.7930 +[2025-07-06 20:18:35] [Rank 0] Group 9 FTA: 0.7930 +[2025-07-06 20:18:35] [Rank 0] Group 10 FTA: 0.8535 +[2025-07-06 20:18:35] [Rank 0] Group 10 FTA: 0.8535 +[2025-07-06 20:18:35] [Rank 0] Group 11 FTA: 0.8213 +[2025-07-06 20:18:35] [Rank 0] Group 11 FTA: 0.8213 +[2025-07-06 20:18:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 20:18:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 20:18:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 20:18:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 20:18:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 20:18:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 20:18:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 20:18:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 20:18:36] [Rank 0] step:2501/10000 train_time:200815ms step_avg:80.29ms +[2025-07-06 20:18:36] [Rank 0] step:2501/10000 train_time:200815ms step_avg:80.29ms +[2025-07-06 20:18:39] [Rank 0] step:2521/10000 train_time:202320ms step_avg:80.25ms +[2025-07-06 20:18:39] [Rank 0] step:2521/10000 train_time:202320ms step_avg:80.25ms +[2025-07-06 20:18:40] [Rank 0] step:2541/10000 train_time:204476ms step_avg:80.47ms +[2025-07-06 20:18:40] [Rank 0] step:2541/10000 train_time:204476ms step_avg:80.47ms +[2025-07-06 20:18:42] [Rank 0] step:2561/10000 train_time:205970ms step_avg:80.43ms +[2025-07-06 20:18:42] [Rank 0] step:2561/10000 train_time:205970ms step_avg:80.43ms +[2025-07-06 20:18:43] [Rank 0] step:2581/10000 train_time:207464ms step_avg:80.38ms +[2025-07-06 20:18:43] [Rank 0] step:2581/10000 train_time:207464ms step_avg:80.38ms +[2025-07-06 20:18:45] [Rank 0] step:2601/10000 train_time:208957ms step_avg:80.34ms +[2025-07-06 20:18:45] [Rank 0] step:2601/10000 train_time:208957ms step_avg:80.34ms +[2025-07-06 20:18:47] [Rank 0] step:2621/10000 train_time:211120ms step_avg:80.55ms +[2025-07-06 20:18:47] [Rank 0] step:2621/10000 train_time:211120ms step_avg:80.55ms +[2025-07-06 20:18:48] [Rank 0] step:2641/10000 train_time:212616ms step_avg:80.51ms +[2025-07-06 20:18:48] [Rank 0] step:2641/10000 train_time:212616ms step_avg:80.51ms +[2025-07-06 20:18:50] [Rank 0] step:2661/10000 train_time:214113ms step_avg:80.46ms +[2025-07-06 20:18:50] [Rank 0] step:2661/10000 train_time:214113ms step_avg:80.46ms +[2025-07-06 20:18:51] [Rank 0] step:2681/10000 train_time:215610ms step_avg:80.42ms +[2025-07-06 20:18:51] [Rank 0] step:2681/10000 train_time:215610ms step_avg:80.42ms +[2025-07-06 20:18:53] [Rank 0] step:2701/10000 train_time:217107ms step_avg:80.38ms +[2025-07-06 20:18:53] [Rank 0] step:2701/10000 train_time:217107ms step_avg:80.38ms +[2025-07-06 20:18:55] [Rank 0] step:2721/10000 train_time:219264ms step_avg:80.58ms +[2025-07-06 20:18:55] [Rank 0] step:2721/10000 train_time:219264ms step_avg:80.58ms +[2025-07-06 20:18:56] [Rank 0] step:2741/10000 train_time:220761ms step_avg:80.54ms +[2025-07-06 20:18:56] [Rank 0] step:2741/10000 train_time:220761ms step_avg:80.54ms +[2025-07-06 20:18:58] [Rank 0] step:2761/10000 train_time:222258ms step_avg:80.50ms +[2025-07-06 20:18:58] [Rank 0] step:2761/10000 train_time:222258ms step_avg:80.50ms +[2025-07-06 20:18:59] [Rank 0] step:2781/10000 train_time:223758ms step_avg:80.46ms +[2025-07-06 20:18:59] [Rank 0] step:2781/10000 train_time:223758ms step_avg:80.46ms +[2025-07-06 20:19:01] [Rank 0] step:2801/10000 train_time:225491ms step_avg:80.50ms +[2025-07-06 20:19:01] [Rank 0] step:2801/10000 train_time:225491ms step_avg:80.50ms +[2025-07-06 20:19:03] [Rank 0] step:2821/10000 train_time:226990ms step_avg:80.46ms +[2025-07-06 20:19:03] [Rank 0] step:2821/10000 train_time:226990ms step_avg:80.46ms +[2025-07-06 20:19:04] [Rank 0] step:2841/10000 train_time:228490ms step_avg:80.43ms +[2025-07-06 20:19:04] [Rank 0] step:2841/10000 train_time:228490ms step_avg:80.43ms +[2025-07-06 20:19:06] [Rank 0] step:2861/10000 train_time:229990ms step_avg:80.39ms +[2025-07-06 20:19:06] [Rank 0] step:2861/10000 train_time:229990ms step_avg:80.39ms +[2025-07-06 20:19:08] [Rank 0] step:2881/10000 train_time:231543ms step_avg:80.37ms +[2025-07-06 20:19:08] [Rank 0] step:2881/10000 train_time:231543ms step_avg:80.37ms +[2025-07-06 20:19:09] [Rank 0] step:2901/10000 train_time:233636ms step_avg:80.54ms +[2025-07-06 20:19:09] [Rank 0] step:2901/10000 train_time:233636ms step_avg:80.54ms +[2025-07-06 20:19:11] [Rank 0] step:2921/10000 train_time:235139ms step_avg:80.50ms +[2025-07-06 20:19:11] [Rank 0] step:2921/10000 train_time:235139ms step_avg:80.50ms +[2025-07-06 20:19:12] [Rank 0] step:2941/10000 train_time:236638ms step_avg:80.46ms +[2025-07-06 20:19:12] [Rank 0] step:2941/10000 train_time:236638ms step_avg:80.46ms +[2025-07-06 20:19:14] [Rank 0] step:2961/10000 train_time:238138ms step_avg:80.42ms +[2025-07-06 20:19:14] [Rank 0] step:2961/10000 train_time:238138ms step_avg:80.42ms +[2025-07-06 20:19:16] [Rank 0] step:2981/10000 train_time:240277ms step_avg:80.60ms +[2025-07-06 20:19:16] [Rank 0] step:2981/10000 train_time:240277ms step_avg:80.60ms +[2025-07-06 20:19:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:19:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:19:18] [Rank 0] PRINT: step:3000/10000 train_loss:0.9167 val_loss:0.8918 train_time:241774ms step_avg:80.59ms +[2025-07-06 20:19:18] [Rank 0] PRINT: step:3000/10000 train_loss:0.9167 val_loss:0.8918 train_time:241774ms step_avg:80.59ms +[2025-07-06 20:19:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:19:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:19:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:19:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:19:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:19:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:24:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:24:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:24:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:24:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:24:38] [Rank 0] Total Loss: 5.1878 +[2025-07-06 20:24:38] [Rank 0] Total Loss: 5.1878 +[2025-07-06 20:24:38] [Rank 0] Total FTA: 0.8571 +[2025-07-06 20:24:38] [Rank 0] Total FTA: 0.8571 +[2025-07-06 20:24:38] [Rank 0] Group 0 Loss: 5.3860 +[2025-07-06 20:24:38] [Rank 0] Group 0 Loss: 5.3860 +[2025-07-06 20:24:38] [Rank 0] Group 1 Loss: 4.9326 +[2025-07-06 20:24:38] [Rank 0] Group 1 Loss: 4.9326 +[2025-07-06 20:24:38] [Rank 0] Group 2 Loss: 4.9761 +[2025-07-06 20:24:38] [Rank 0] Group 2 Loss: 4.9761 +[2025-07-06 20:24:38] [Rank 0] Group 3 Loss: 5.4540 +[2025-07-06 20:24:38] [Rank 0] Group 3 Loss: 5.4540 +[2025-07-06 20:24:38] [Rank 0] Group 4 Loss: 5.1528 +[2025-07-06 20:24:38] [Rank 0] Group 4 Loss: 5.1528 +[2025-07-06 20:24:38] [Rank 0] Group 5 Loss: 5.1207 +[2025-07-06 20:24:38] [Rank 0] Group 5 Loss: 5.1207 +[2025-07-06 20:24:38] [Rank 0] Group 6 Loss: 5.0821 +[2025-07-06 20:24:38] [Rank 0] Group 6 Loss: 5.0821 +[2025-07-06 20:24:38] [Rank 0] Group 7 Loss: 5.2349 +[2025-07-06 20:24:38] [Rank 0] Group 7 Loss: 5.2349 +[2025-07-06 20:24:38] [Rank 0] Group 8 Loss: 5.1956 +[2025-07-06 20:24:38] [Rank 0] Group 8 Loss: 5.1956 +[2025-07-06 20:24:38] [Rank 0] Group 9 Loss: 5.1587 +[2025-07-06 20:24:38] [Rank 0] Group 9 Loss: 5.1587 +[2025-07-06 20:24:38] [Rank 0] Group 10 Loss: 5.1995 +[2025-07-06 20:24:38] [Rank 0] Group 10 Loss: 5.1995 +[2025-07-06 20:24:38] [Rank 0] Group 11 Loss: 5.1732 +[2025-07-06 20:24:38] [Rank 0] Group 11 Loss: 5.1732 +[2025-07-06 20:24:38] [Rank 0] Group 0 FTA: 0.8466 +[2025-07-06 20:24:38] [Rank 0] Group 0 FTA: 0.8466 +[2025-07-06 20:24:38] [Rank 0] Group 1 FTA: 0.5234 +[2025-07-06 20:24:38] [Rank 0] Group 1 FTA: 0.5234 +[2025-07-06 20:24:38] [Rank 0] Group 2 FTA: 0.8490 +[2025-07-06 20:24:38] [Rank 0] Group 2 FTA: 0.8490 +[2025-07-06 20:24:38] [Rank 0] Group 3 FTA: 0.8490 +[2025-07-06 20:24:38] [Rank 0] Group 3 FTA: 0.8490 +[2025-07-06 20:24:38] [Rank 0] Group 4 FTA: 0.8958 +[2025-07-06 20:24:38] [Rank 0] Group 4 FTA: 0.8958 +[2025-07-06 20:24:38] [Rank 0] Group 5 FTA: 0.9453 +[2025-07-06 20:24:38] [Rank 0] Group 5 FTA: 0.9453 +[2025-07-06 20:24:38] [Rank 0] Group 6 FTA: 0.8932 +[2025-07-06 20:24:38] [Rank 0] Group 6 FTA: 0.8932 +[2025-07-06 20:24:38] [Rank 0] Group 7 FTA: 0.8776 +[2025-07-06 20:24:38] [Rank 0] Group 7 FTA: 0.8776 +[2025-07-06 20:24:38] [Rank 0] Group 8 FTA: 0.9036 +[2025-07-06 20:24:38] [Rank 0] Group 8 FTA: 0.9036 +[2025-07-06 20:24:38] [Rank 0] Group 9 FTA: 0.8828 +[2025-07-06 20:24:38] [Rank 0] Group 9 FTA: 0.8828 +[2025-07-06 20:24:38] [Rank 0] Group 10 FTA: 0.8867 +[2025-07-06 20:24:38] [Rank 0] Group 10 FTA: 0.8867 +[2025-07-06 20:24:38] [Rank 0] Group 11 FTA: 0.8887 +[2025-07-06 20:24:38] [Rank 0] Group 11 FTA: 0.8887 +[2025-07-06 20:24:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 20:24:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 20:24:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 20:24:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 20:24:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 20:24:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 20:24:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 20:24:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 20:24:39] [Rank 0] step:3001/10000 train_time:241796ms step_avg:80.57ms +[2025-07-06 20:24:39] [Rank 0] step:3001/10000 train_time:241796ms step_avg:80.57ms +[2025-07-06 20:24:41] [Rank 0] step:3021/10000 train_time:243415ms step_avg:80.57ms +[2025-07-06 20:24:41] [Rank 0] step:3021/10000 train_time:243415ms step_avg:80.57ms +[2025-07-06 20:24:43] [Rank 0] step:3041/10000 train_time:244909ms step_avg:80.54ms +[2025-07-06 20:24:43] [Rank 0] step:3041/10000 train_time:244909ms step_avg:80.54ms +[2025-07-06 20:24:45] [Rank 0] step:3061/10000 train_time:246401ms step_avg:80.50ms +[2025-07-06 20:24:45] [Rank 0] step:3061/10000 train_time:246401ms step_avg:80.50ms +[2025-07-06 20:24:46] [Rank 0] step:3081/10000 train_time:248552ms step_avg:80.67ms +[2025-07-06 20:24:46] [Rank 0] step:3081/10000 train_time:248552ms step_avg:80.67ms +[2025-07-06 20:24:48] [Rank 0] step:3101/10000 train_time:250044ms step_avg:80.63ms +[2025-07-06 20:24:48] [Rank 0] step:3101/10000 train_time:250044ms step_avg:80.63ms +[2025-07-06 20:24:49] [Rank 0] step:3121/10000 train_time:251540ms step_avg:80.60ms +[2025-07-06 20:24:49] [Rank 0] step:3121/10000 train_time:251540ms step_avg:80.60ms +[2025-07-06 20:24:51] [Rank 0] step:3141/10000 train_time:253035ms step_avg:80.56ms +[2025-07-06 20:24:51] [Rank 0] step:3141/10000 train_time:253035ms step_avg:80.56ms +[2025-07-06 20:24:53] [Rank 0] step:3161/10000 train_time:255196ms step_avg:80.73ms +[2025-07-06 20:24:53] [Rank 0] step:3161/10000 train_time:255196ms step_avg:80.73ms +[2025-07-06 20:24:54] [Rank 0] step:3181/10000 train_time:256690ms step_avg:80.69ms +[2025-07-06 20:24:54] [Rank 0] step:3181/10000 train_time:256690ms step_avg:80.69ms +[2025-07-06 20:24:56] [Rank 0] step:3201/10000 train_time:258187ms step_avg:80.66ms +[2025-07-06 20:24:56] [Rank 0] step:3201/10000 train_time:258187ms step_avg:80.66ms +[2025-07-06 20:24:57] [Rank 0] step:3221/10000 train_time:259686ms step_avg:80.62ms +[2025-07-06 20:24:57] [Rank 0] step:3221/10000 train_time:259686ms step_avg:80.62ms +[2025-07-06 20:25:00] [Rank 0] step:3241/10000 train_time:261185ms step_avg:80.59ms +[2025-07-06 20:25:00] [Rank 0] step:3241/10000 train_time:261185ms step_avg:80.59ms +[2025-07-06 20:25:01] [Rank 0] step:3261/10000 train_time:263340ms step_avg:80.75ms +[2025-07-06 20:25:01] [Rank 0] step:3261/10000 train_time:263340ms step_avg:80.75ms +[2025-07-06 20:25:03] [Rank 0] step:3281/10000 train_time:264839ms step_avg:80.72ms +[2025-07-06 20:25:03] [Rank 0] step:3281/10000 train_time:264839ms step_avg:80.72ms +[2025-07-06 20:25:04] [Rank 0] step:3301/10000 train_time:266337ms step_avg:80.68ms +[2025-07-06 20:25:04] [Rank 0] step:3301/10000 train_time:266337ms step_avg:80.68ms +[2025-07-06 20:25:06] [Rank 0] step:3321/10000 train_time:267837ms step_avg:80.65ms +[2025-07-06 20:25:06] [Rank 0] step:3321/10000 train_time:267837ms step_avg:80.65ms +[2025-07-06 20:25:08] [Rank 0] step:3341/10000 train_time:269980ms step_avg:80.81ms +[2025-07-06 20:25:08] [Rank 0] step:3341/10000 train_time:269980ms step_avg:80.81ms +[2025-07-06 20:25:09] [Rank 0] step:3361/10000 train_time:271480ms step_avg:80.77ms +[2025-07-06 20:25:09] [Rank 0] step:3361/10000 train_time:271480ms step_avg:80.77ms +[2025-07-06 20:25:11] [Rank 0] step:3381/10000 train_time:272981ms step_avg:80.74ms +[2025-07-06 20:25:11] [Rank 0] step:3381/10000 train_time:272981ms step_avg:80.74ms +[2025-07-06 20:25:12] [Rank 0] step:3401/10000 train_time:274482ms step_avg:80.71ms +[2025-07-06 20:25:12] [Rank 0] step:3401/10000 train_time:274482ms step_avg:80.71ms +[2025-07-06 20:25:14] [Rank 0] step:3421/10000 train_time:275983ms step_avg:80.67ms +[2025-07-06 20:25:14] [Rank 0] step:3421/10000 train_time:275983ms step_avg:80.67ms +[2025-07-06 20:25:15] [Rank 0] step:3441/10000 train_time:277721ms step_avg:80.71ms +[2025-07-06 20:25:15] [Rank 0] step:3441/10000 train_time:277721ms step_avg:80.71ms +[2025-07-06 20:25:17] [Rank 0] step:3461/10000 train_time:279220ms step_avg:80.68ms +[2025-07-06 20:25:17] [Rank 0] step:3461/10000 train_time:279220ms step_avg:80.68ms +[2025-07-06 20:25:18] [Rank 0] step:3481/10000 train_time:280720ms step_avg:80.64ms +[2025-07-06 20:25:18] [Rank 0] step:3481/10000 train_time:280720ms step_avg:80.64ms +[2025-07-06 20:25:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:25:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:25:21] [Rank 0] PRINT: step:3500/10000 train_loss:0.8889 val_loss:0.8815 train_time:282223ms step_avg:80.64ms +[2025-07-06 20:25:21] [Rank 0] PRINT: step:3500/10000 train_loss:0.8889 val_loss:0.8815 train_time:282223ms step_avg:80.64ms +[2025-07-06 20:25:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:25:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:25:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:25:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:25:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:25:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:30:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:30:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:30:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:30:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:30:41] [Rank 0] Total Loss: 5.2754 +[2025-07-06 20:30:41] [Rank 0] Total Loss: 5.2754 +[2025-07-06 20:30:41] [Rank 0] Total FTA: 0.9320 +[2025-07-06 20:30:41] [Rank 0] Total FTA: 0.9320 +[2025-07-06 20:30:41] [Rank 0] Group 0 Loss: 5.3134 +[2025-07-06 20:30:41] [Rank 0] Group 0 Loss: 5.3134 +[2025-07-06 20:30:41] [Rank 0] Group 1 Loss: 5.2628 +[2025-07-06 20:30:41] [Rank 0] Group 1 Loss: 5.2628 +[2025-07-06 20:30:41] [Rank 0] Group 2 Loss: 5.1692 +[2025-07-06 20:30:41] [Rank 0] Group 2 Loss: 5.1692 +[2025-07-06 20:30:41] [Rank 0] Group 3 Loss: 5.4516 +[2025-07-06 20:30:41] [Rank 0] Group 3 Loss: 5.4516 +[2025-07-06 20:30:41] [Rank 0] Group 4 Loss: 5.3217 +[2025-07-06 20:30:41] [Rank 0] Group 4 Loss: 5.3217 +[2025-07-06 20:30:41] [Rank 0] Group 5 Loss: 5.2751 +[2025-07-06 20:30:41] [Rank 0] Group 5 Loss: 5.2751 +[2025-07-06 20:30:41] [Rank 0] Group 6 Loss: 5.1321 +[2025-07-06 20:30:41] [Rank 0] Group 6 Loss: 5.1321 +[2025-07-06 20:30:41] [Rank 0] Group 7 Loss: 5.2843 +[2025-07-06 20:30:41] [Rank 0] Group 7 Loss: 5.2843 +[2025-07-06 20:30:41] [Rank 0] Group 8 Loss: 5.2482 +[2025-07-06 20:30:41] [Rank 0] Group 8 Loss: 5.2482 +[2025-07-06 20:30:41] [Rank 0] Group 9 Loss: 5.2021 +[2025-07-06 20:30:41] [Rank 0] Group 9 Loss: 5.2021 +[2025-07-06 20:30:41] [Rank 0] Group 10 Loss: 5.2773 +[2025-07-06 20:30:41] [Rank 0] Group 10 Loss: 5.2773 +[2025-07-06 20:30:41] [Rank 0] Group 11 Loss: 5.2863 +[2025-07-06 20:30:41] [Rank 0] Group 11 Loss: 5.2863 +[2025-07-06 20:30:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 20:30:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 20:30:41] [Rank 0] Group 1 FTA: 0.8255 +[2025-07-06 20:30:41] [Rank 0] Group 1 FTA: 0.8255 +[2025-07-06 20:30:41] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 20:30:41] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 20:30:41] [Rank 0] Group 3 FTA: 0.9141 +[2025-07-06 20:30:41] [Rank 0] Group 3 FTA: 0.9141 +[2025-07-06 20:30:41] [Rank 0] Group 4 FTA: 0.9714 +[2025-07-06 20:30:41] [Rank 0] Group 4 FTA: 0.9714 +[2025-07-06 20:30:41] [Rank 0] Group 5 FTA: 0.8672 +[2025-07-06 20:30:41] [Rank 0] Group 5 FTA: 0.8672 +[2025-07-06 20:30:41] [Rank 0] Group 6 FTA: 0.9115 +[2025-07-06 20:30:41] [Rank 0] Group 6 FTA: 0.9115 +[2025-07-06 20:30:41] [Rank 0] Group 7 FTA: 0.9427 +[2025-07-06 20:30:41] [Rank 0] Group 7 FTA: 0.9427 +[2025-07-06 20:30:41] [Rank 0] Group 8 FTA: 0.9193 +[2025-07-06 20:30:41] [Rank 0] Group 8 FTA: 0.9193 +[2025-07-06 20:30:41] [Rank 0] Group 9 FTA: 0.8906 +[2025-07-06 20:30:41] [Rank 0] Group 9 FTA: 0.8906 +[2025-07-06 20:30:41] [Rank 0] Group 10 FTA: 0.9258 +[2025-07-06 20:30:41] [Rank 0] Group 10 FTA: 0.9258 +[2025-07-06 20:30:41] [Rank 0] Group 11 FTA: 0.9336 +[2025-07-06 20:30:41] [Rank 0] Group 11 FTA: 0.9336 +[2025-07-06 20:30:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 20:30:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 20:30:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 20:30:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 20:30:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 20:30:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 20:30:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 20:30:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 20:30:42] [Rank 0] step:3501/10000 train_time:282244ms step_avg:80.62ms +[2025-07-06 20:30:42] [Rank 0] step:3501/10000 train_time:282244ms step_avg:80.62ms +[2025-07-06 20:30:44] [Rank 0] step:3521/10000 train_time:284397ms step_avg:80.77ms +[2025-07-06 20:30:44] [Rank 0] step:3521/10000 train_time:284397ms step_avg:80.77ms +[2025-07-06 20:30:46] [Rank 0] step:3541/10000 train_time:285887ms step_avg:80.74ms +[2025-07-06 20:30:46] [Rank 0] step:3541/10000 train_time:285887ms step_avg:80.74ms +[2025-07-06 20:30:47] [Rank 0] step:3561/10000 train_time:287381ms step_avg:80.70ms +[2025-07-06 20:30:47] [Rank 0] step:3561/10000 train_time:287381ms step_avg:80.70ms +[2025-07-06 20:30:49] [Rank 0] step:3581/10000 train_time:288875ms step_avg:80.67ms +[2025-07-06 20:30:49] [Rank 0] step:3581/10000 train_time:288875ms step_avg:80.67ms +[2025-07-06 20:30:51] [Rank 0] step:3601/10000 train_time:290421ms step_avg:80.65ms +[2025-07-06 20:30:51] [Rank 0] step:3601/10000 train_time:290421ms step_avg:80.65ms +[2025-07-06 20:30:53] [Rank 0] step:3621/10000 train_time:292511ms step_avg:80.78ms +[2025-07-06 20:30:53] [Rank 0] step:3621/10000 train_time:292511ms step_avg:80.78ms +[2025-07-06 20:30:54] [Rank 0] step:3641/10000 train_time:294004ms step_avg:80.75ms +[2025-07-06 20:30:54] [Rank 0] step:3641/10000 train_time:294004ms step_avg:80.75ms +[2025-07-06 20:30:56] [Rank 0] step:3661/10000 train_time:295501ms step_avg:80.72ms +[2025-07-06 20:30:56] [Rank 0] step:3661/10000 train_time:295501ms step_avg:80.72ms +[2025-07-06 20:30:57] [Rank 0] step:3681/10000 train_time:296998ms step_avg:80.68ms +[2025-07-06 20:30:57] [Rank 0] step:3681/10000 train_time:296998ms step_avg:80.68ms +[2025-07-06 20:30:59] [Rank 0] step:3701/10000 train_time:298731ms step_avg:80.72ms +[2025-07-06 20:30:59] [Rank 0] step:3701/10000 train_time:298731ms step_avg:80.72ms +[2025-07-06 20:31:00] [Rank 0] step:3721/10000 train_time:300295ms step_avg:80.70ms +[2025-07-06 20:31:00] [Rank 0] step:3721/10000 train_time:300295ms step_avg:80.70ms +[2025-07-06 20:31:02] [Rank 0] step:3741/10000 train_time:301846ms step_avg:80.69ms +[2025-07-06 20:31:02] [Rank 0] step:3741/10000 train_time:301846ms step_avg:80.69ms +[2025-07-06 20:31:03] [Rank 0] step:3761/10000 train_time:303350ms step_avg:80.66ms +[2025-07-06 20:31:03] [Rank 0] step:3761/10000 train_time:303350ms step_avg:80.66ms +[2025-07-06 20:31:05] [Rank 0] step:3781/10000 train_time:305106ms step_avg:80.69ms +[2025-07-06 20:31:05] [Rank 0] step:3781/10000 train_time:305106ms step_avg:80.69ms +[2025-07-06 20:31:07] [Rank 0] step:3801/10000 train_time:306586ms step_avg:80.66ms +[2025-07-06 20:31:07] [Rank 0] step:3801/10000 train_time:306586ms step_avg:80.66ms +[2025-07-06 20:31:08] [Rank 0] step:3821/10000 train_time:308087ms step_avg:80.63ms +[2025-07-06 20:31:08] [Rank 0] step:3821/10000 train_time:308087ms step_avg:80.63ms +[2025-07-06 20:31:10] [Rank 0] step:3841/10000 train_time:309588ms step_avg:80.60ms +[2025-07-06 20:31:10] [Rank 0] step:3841/10000 train_time:309588ms step_avg:80.60ms +[2025-07-06 20:31:11] [Rank 0] step:3861/10000 train_time:311088ms step_avg:80.57ms +[2025-07-06 20:31:11] [Rank 0] step:3861/10000 train_time:311088ms step_avg:80.57ms +[2025-07-06 20:31:13] [Rank 0] step:3881/10000 train_time:313237ms step_avg:80.71ms +[2025-07-06 20:31:13] [Rank 0] step:3881/10000 train_time:313237ms step_avg:80.71ms +[2025-07-06 20:31:15] [Rank 0] step:3901/10000 train_time:314736ms step_avg:80.68ms +[2025-07-06 20:31:15] [Rank 0] step:3901/10000 train_time:314736ms step_avg:80.68ms +[2025-07-06 20:31:16] [Rank 0] step:3921/10000 train_time:316238ms step_avg:80.65ms +[2025-07-06 20:31:16] [Rank 0] step:3921/10000 train_time:316238ms step_avg:80.65ms +[2025-07-06 20:31:18] [Rank 0] step:3941/10000 train_time:317739ms step_avg:80.62ms +[2025-07-06 20:31:18] [Rank 0] step:3941/10000 train_time:317739ms step_avg:80.62ms +[2025-07-06 20:31:20] [Rank 0] step:3961/10000 train_time:319241ms step_avg:80.60ms +[2025-07-06 20:31:20] [Rank 0] step:3961/10000 train_time:319241ms step_avg:80.60ms +[2025-07-06 20:31:22] [Rank 0] step:3981/10000 train_time:321411ms step_avg:80.74ms +[2025-07-06 20:31:22] [Rank 0] step:3981/10000 train_time:321411ms step_avg:80.74ms +[2025-07-06 20:31:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:31:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:31:24] [Rank 0] PRINT: step:4000/10000 train_loss:0.8798 val_loss:0.8745 train_time:322913ms step_avg:80.73ms +[2025-07-06 20:31:24] [Rank 0] PRINT: step:4000/10000 train_loss:0.8798 val_loss:0.8745 train_time:322913ms step_avg:80.73ms +[2025-07-06 20:31:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:31:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:31:24] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:31:24] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:31:24] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:31:24] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:36:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:36:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:36:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:36:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:36:42] [Rank 0] Total Loss: 5.2208 +[2025-07-06 20:36:42] [Rank 0] Total Loss: 5.2208 +[2025-07-06 20:36:42] [Rank 0] Total FTA: 0.9510 +[2025-07-06 20:36:42] [Rank 0] Total FTA: 0.9510 +[2025-07-06 20:36:42] [Rank 0] Group 0 Loss: 5.3336 +[2025-07-06 20:36:42] [Rank 0] Group 0 Loss: 5.3336 +[2025-07-06 20:36:42] [Rank 0] Group 1 Loss: 5.0944 +[2025-07-06 20:36:42] [Rank 0] Group 1 Loss: 5.0944 +[2025-07-06 20:36:42] [Rank 0] Group 2 Loss: 5.0570 +[2025-07-06 20:36:42] [Rank 0] Group 2 Loss: 5.0570 +[2025-07-06 20:36:42] [Rank 0] Group 3 Loss: 5.3706 +[2025-07-06 20:36:42] [Rank 0] Group 3 Loss: 5.3706 +[2025-07-06 20:36:42] [Rank 0] Group 4 Loss: 5.1407 +[2025-07-06 20:36:42] [Rank 0] Group 4 Loss: 5.1407 +[2025-07-06 20:36:42] [Rank 0] Group 5 Loss: 5.1887 +[2025-07-06 20:36:42] [Rank 0] Group 5 Loss: 5.1887 +[2025-07-06 20:36:42] [Rank 0] Group 6 Loss: 5.1080 +[2025-07-06 20:36:42] [Rank 0] Group 6 Loss: 5.1080 +[2025-07-06 20:36:42] [Rank 0] Group 7 Loss: 5.2387 +[2025-07-06 20:36:42] [Rank 0] Group 7 Loss: 5.2387 +[2025-07-06 20:36:42] [Rank 0] Group 8 Loss: 5.2753 +[2025-07-06 20:36:42] [Rank 0] Group 8 Loss: 5.2753 +[2025-07-06 20:36:42] [Rank 0] Group 9 Loss: 5.2094 +[2025-07-06 20:36:42] [Rank 0] Group 9 Loss: 5.2094 +[2025-07-06 20:36:42] [Rank 0] Group 10 Loss: 5.2534 +[2025-07-06 20:36:42] [Rank 0] Group 10 Loss: 5.2534 +[2025-07-06 20:36:42] [Rank 0] Group 11 Loss: 5.2328 +[2025-07-06 20:36:42] [Rank 0] Group 11 Loss: 5.2328 +[2025-07-06 20:36:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 20:36:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 20:36:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 20:36:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 20:36:42] [Rank 0] Group 2 FTA: 0.9219 +[2025-07-06 20:36:42] [Rank 0] Group 2 FTA: 0.9219 +[2025-07-06 20:36:42] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 20:36:42] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 20:36:42] [Rank 0] Group 4 FTA: 0.9349 +[2025-07-06 20:36:42] [Rank 0] Group 4 FTA: 0.9349 +[2025-07-06 20:36:42] [Rank 0] Group 5 FTA: 0.9531 +[2025-07-06 20:36:42] [Rank 0] Group 5 FTA: 0.9531 +[2025-07-06 20:36:42] [Rank 0] Group 6 FTA: 0.9323 +[2025-07-06 20:36:42] [Rank 0] Group 6 FTA: 0.9323 +[2025-07-06 20:36:42] [Rank 0] Group 7 FTA: 0.9245 +[2025-07-06 20:36:42] [Rank 0] Group 7 FTA: 0.9245 +[2025-07-06 20:36:42] [Rank 0] Group 8 FTA: 0.9453 +[2025-07-06 20:36:42] [Rank 0] Group 8 FTA: 0.9453 +[2025-07-06 20:36:42] [Rank 0] Group 9 FTA: 0.9180 +[2025-07-06 20:36:42] [Rank 0] Group 9 FTA: 0.9180 +[2025-07-06 20:36:42] [Rank 0] Group 10 FTA: 0.9473 +[2025-07-06 20:36:42] [Rank 0] Group 10 FTA: 0.9473 +[2025-07-06 20:36:42] [Rank 0] Group 11 FTA: 0.9229 +[2025-07-06 20:36:42] [Rank 0] Group 11 FTA: 0.9229 +[2025-07-06 20:36:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 20:36:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 20:36:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 20:36:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 20:36:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 20:36:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 20:36:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 20:36:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 20:36:43] [Rank 0] step:4001/10000 train_time:322933ms step_avg:80.71ms +[2025-07-06 20:36:43] [Rank 0] step:4001/10000 train_time:322933ms step_avg:80.71ms +[2025-07-06 20:36:45] [Rank 0] step:4021/10000 train_time:324422ms step_avg:80.68ms +[2025-07-06 20:36:45] [Rank 0] step:4021/10000 train_time:324422ms step_avg:80.68ms +[2025-07-06 20:36:46] [Rank 0] step:4041/10000 train_time:325916ms step_avg:80.65ms +[2025-07-06 20:36:46] [Rank 0] step:4041/10000 train_time:325916ms step_avg:80.65ms +[2025-07-06 20:36:48] [Rank 0] step:4061/10000 train_time:328079ms step_avg:80.79ms +[2025-07-06 20:36:48] [Rank 0] step:4061/10000 train_time:328079ms step_avg:80.79ms +[2025-07-06 20:36:50] [Rank 0] step:4081/10000 train_time:329571ms step_avg:80.76ms +[2025-07-06 20:36:50] [Rank 0] step:4081/10000 train_time:329571ms step_avg:80.76ms +[2025-07-06 20:36:51] [Rank 0] step:4101/10000 train_time:331066ms step_avg:80.73ms +[2025-07-06 20:36:51] [Rank 0] step:4101/10000 train_time:331066ms step_avg:80.73ms +[2025-07-06 20:36:53] [Rank 0] step:4121/10000 train_time:332560ms step_avg:80.70ms +[2025-07-06 20:36:53] [Rank 0] step:4121/10000 train_time:332560ms step_avg:80.70ms +[2025-07-06 20:36:55] [Rank 0] step:4141/10000 train_time:334715ms step_avg:80.83ms +[2025-07-06 20:36:55] [Rank 0] step:4141/10000 train_time:334715ms step_avg:80.83ms +[2025-07-06 20:36:56] [Rank 0] step:4161/10000 train_time:336192ms step_avg:80.80ms +[2025-07-06 20:36:56] [Rank 0] step:4161/10000 train_time:336192ms step_avg:80.80ms +[2025-07-06 20:36:58] [Rank 0] step:4181/10000 train_time:337688ms step_avg:80.77ms +[2025-07-06 20:36:58] [Rank 0] step:4181/10000 train_time:337688ms step_avg:80.77ms +[2025-07-06 20:36:59] [Rank 0] step:4201/10000 train_time:339185ms step_avg:80.74ms +[2025-07-06 20:36:59] [Rank 0] step:4201/10000 train_time:339185ms step_avg:80.74ms +[2025-07-06 20:37:01] [Rank 0] step:4221/10000 train_time:340681ms step_avg:80.71ms +[2025-07-06 20:37:01] [Rank 0] step:4221/10000 train_time:340681ms step_avg:80.71ms +[2025-07-06 20:37:03] [Rank 0] step:4241/10000 train_time:342836ms step_avg:80.84ms +[2025-07-06 20:37:03] [Rank 0] step:4241/10000 train_time:342836ms step_avg:80.84ms +[2025-07-06 20:37:05] [Rank 0] step:4261/10000 train_time:344332ms step_avg:80.81ms +[2025-07-06 20:37:05] [Rank 0] step:4261/10000 train_time:344332ms step_avg:80.81ms +[2025-07-06 20:37:06] [Rank 0] step:4281/10000 train_time:345830ms step_avg:80.78ms +[2025-07-06 20:37:06] [Rank 0] step:4281/10000 train_time:345830ms step_avg:80.78ms +[2025-07-06 20:37:08] [Rank 0] step:4301/10000 train_time:347330ms step_avg:80.76ms +[2025-07-06 20:37:08] [Rank 0] step:4301/10000 train_time:347330ms step_avg:80.76ms +[2025-07-06 20:37:10] [Rank 0] step:4321/10000 train_time:348883ms step_avg:80.74ms +[2025-07-06 20:37:10] [Rank 0] step:4321/10000 train_time:348883ms step_avg:80.74ms +[2025-07-06 20:37:11] [Rank 0] step:4341/10000 train_time:350980ms step_avg:80.85ms +[2025-07-06 20:37:11] [Rank 0] step:4341/10000 train_time:350980ms step_avg:80.85ms +[2025-07-06 20:37:13] [Rank 0] step:4361/10000 train_time:352481ms step_avg:80.83ms +[2025-07-06 20:37:13] [Rank 0] step:4361/10000 train_time:352481ms step_avg:80.83ms +[2025-07-06 20:37:14] [Rank 0] step:4381/10000 train_time:353981ms step_avg:80.80ms +[2025-07-06 20:37:14] [Rank 0] step:4381/10000 train_time:353981ms step_avg:80.80ms +[2025-07-06 20:37:16] [Rank 0] step:4401/10000 train_time:355484ms step_avg:80.77ms +[2025-07-06 20:37:16] [Rank 0] step:4401/10000 train_time:355484ms step_avg:80.77ms +[2025-07-06 20:37:18] [Rank 0] step:4421/10000 train_time:357653ms step_avg:80.90ms +[2025-07-06 20:37:18] [Rank 0] step:4421/10000 train_time:357653ms step_avg:80.90ms +[2025-07-06 20:37:19] [Rank 0] step:4441/10000 train_time:359153ms step_avg:80.87ms +[2025-07-06 20:37:19] [Rank 0] step:4441/10000 train_time:359153ms step_avg:80.87ms +[2025-07-06 20:37:21] [Rank 0] step:4461/10000 train_time:360779ms step_avg:80.87ms +[2025-07-06 20:37:21] [Rank 0] step:4461/10000 train_time:360779ms step_avg:80.87ms +[2025-07-06 20:37:23] [Rank 0] step:4481/10000 train_time:362283ms step_avg:80.85ms +[2025-07-06 20:37:23] [Rank 0] step:4481/10000 train_time:362283ms step_avg:80.85ms +[2025-07-06 20:37:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:37:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:37:25] [Rank 0] PRINT: step:4500/10000 train_loss:0.8746 val_loss:0.8732 train_time:363785ms step_avg:80.84ms +[2025-07-06 20:37:25] [Rank 0] PRINT: step:4500/10000 train_loss:0.8746 val_loss:0.8732 train_time:363785ms step_avg:80.84ms +[2025-07-06 20:37:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:37:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:37:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:37:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:37:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:37:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:42:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:42:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:42:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:42:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:42:41] [Rank 0] Total Loss: 5.2920 +[2025-07-06 20:42:41] [Rank 0] Total Loss: 5.2920 +[2025-07-06 20:42:41] [Rank 0] Total FTA: 0.9496 +[2025-07-06 20:42:41] [Rank 0] Total FTA: 0.9496 +[2025-07-06 20:42:41] [Rank 0] Group 0 Loss: 5.3229 +[2025-07-06 20:42:41] [Rank 0] Group 0 Loss: 5.3229 +[2025-07-06 20:42:41] [Rank 0] Group 1 Loss: 5.2398 +[2025-07-06 20:42:41] [Rank 0] Group 1 Loss: 5.2398 +[2025-07-06 20:42:41] [Rank 0] Group 2 Loss: 5.0343 +[2025-07-06 20:42:41] [Rank 0] Group 2 Loss: 5.0343 +[2025-07-06 20:42:41] [Rank 0] Group 3 Loss: 5.4864 +[2025-07-06 20:42:41] [Rank 0] Group 3 Loss: 5.4864 +[2025-07-06 20:42:41] [Rank 0] Group 4 Loss: 5.3533 +[2025-07-06 20:42:41] [Rank 0] Group 4 Loss: 5.3533 +[2025-07-06 20:42:41] [Rank 0] Group 5 Loss: 5.2857 +[2025-07-06 20:42:41] [Rank 0] Group 5 Loss: 5.2857 +[2025-07-06 20:42:41] [Rank 0] Group 6 Loss: 5.1978 +[2025-07-06 20:42:41] [Rank 0] Group 6 Loss: 5.1978 +[2025-07-06 20:42:41] [Rank 0] Group 7 Loss: 5.3032 +[2025-07-06 20:42:41] [Rank 0] Group 7 Loss: 5.3032 +[2025-07-06 20:42:41] [Rank 0] Group 8 Loss: 5.3097 +[2025-07-06 20:42:41] [Rank 0] Group 8 Loss: 5.3097 +[2025-07-06 20:42:41] [Rank 0] Group 9 Loss: 5.2628 +[2025-07-06 20:42:41] [Rank 0] Group 9 Loss: 5.2628 +[2025-07-06 20:42:41] [Rank 0] Group 10 Loss: 5.2986 +[2025-07-06 20:42:41] [Rank 0] Group 10 Loss: 5.2986 +[2025-07-06 20:42:41] [Rank 0] Group 11 Loss: 5.3199 +[2025-07-06 20:42:41] [Rank 0] Group 11 Loss: 5.3199 +[2025-07-06 20:42:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 20:42:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 20:42:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 20:42:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 20:42:41] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 20:42:41] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 20:42:41] [Rank 0] Group 3 FTA: 0.9609 +[2025-07-06 20:42:41] [Rank 0] Group 3 FTA: 0.9609 +[2025-07-06 20:42:41] [Rank 0] Group 4 FTA: 0.9349 +[2025-07-06 20:42:41] [Rank 0] Group 4 FTA: 0.9349 +[2025-07-06 20:42:41] [Rank 0] Group 5 FTA: 0.9375 +[2025-07-06 20:42:41] [Rank 0] Group 5 FTA: 0.9375 +[2025-07-06 20:42:41] [Rank 0] Group 6 FTA: 0.9219 +[2025-07-06 20:42:41] [Rank 0] Group 6 FTA: 0.9219 +[2025-07-06 20:42:41] [Rank 0] Group 7 FTA: 0.9219 +[2025-07-06 20:42:41] [Rank 0] Group 7 FTA: 0.9219 +[2025-07-06 20:42:41] [Rank 0] Group 8 FTA: 0.9036 +[2025-07-06 20:42:41] [Rank 0] Group 8 FTA: 0.9036 +[2025-07-06 20:42:41] [Rank 0] Group 9 FTA: 0.9570 +[2025-07-06 20:42:41] [Rank 0] Group 9 FTA: 0.9570 +[2025-07-06 20:42:41] [Rank 0] Group 10 FTA: 0.9297 +[2025-07-06 20:42:41] [Rank 0] Group 10 FTA: 0.9297 +[2025-07-06 20:42:41] [Rank 0] Group 11 FTA: 0.9258 +[2025-07-06 20:42:41] [Rank 0] Group 11 FTA: 0.9258 +[2025-07-06 20:42:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 20:42:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 20:42:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 20:42:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 20:42:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 20:42:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 20:42:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 20:42:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 20:42:44] [Rank 0] step:4501/10000 train_time:364553ms step_avg:80.99ms +[2025-07-06 20:42:44] [Rank 0] step:4501/10000 train_time:364553ms step_avg:80.99ms +[2025-07-06 20:42:45] [Rank 0] step:4521/10000 train_time:366070ms step_avg:80.97ms +[2025-07-06 20:42:45] [Rank 0] step:4521/10000 train_time:366070ms step_avg:80.97ms +[2025-07-06 20:42:47] [Rank 0] step:4541/10000 train_time:367561ms step_avg:80.94ms +[2025-07-06 20:42:47] [Rank 0] step:4541/10000 train_time:367561ms step_avg:80.94ms +[2025-07-06 20:42:48] [Rank 0] step:4561/10000 train_time:369055ms step_avg:80.92ms +[2025-07-06 20:42:48] [Rank 0] step:4561/10000 train_time:369055ms step_avg:80.92ms +[2025-07-06 20:42:50] [Rank 0] step:4581/10000 train_time:370547ms step_avg:80.89ms +[2025-07-06 20:42:50] [Rank 0] step:4581/10000 train_time:370547ms step_avg:80.89ms +[2025-07-06 20:42:51] [Rank 0] step:4601/10000 train_time:372279ms step_avg:80.91ms +[2025-07-06 20:42:51] [Rank 0] step:4601/10000 train_time:372279ms step_avg:80.91ms +[2025-07-06 20:42:53] [Rank 0] step:4621/10000 train_time:373773ms step_avg:80.89ms +[2025-07-06 20:42:53] [Rank 0] step:4621/10000 train_time:373773ms step_avg:80.89ms +[2025-07-06 20:42:54] [Rank 0] step:4641/10000 train_time:375267ms step_avg:80.86ms +[2025-07-06 20:42:54] [Rank 0] step:4641/10000 train_time:375267ms step_avg:80.86ms +[2025-07-06 20:42:56] [Rank 0] step:4661/10000 train_time:376762ms step_avg:80.83ms +[2025-07-06 20:42:56] [Rank 0] step:4661/10000 train_time:376762ms step_avg:80.83ms +[2025-07-06 20:42:58] [Rank 0] step:4681/10000 train_time:378257ms step_avg:80.81ms +[2025-07-06 20:42:58] [Rank 0] step:4681/10000 train_time:378257ms step_avg:80.81ms +[2025-07-06 20:42:59] [Rank 0] step:4701/10000 train_time:380395ms step_avg:80.92ms +[2025-07-06 20:42:59] [Rank 0] step:4701/10000 train_time:380395ms step_avg:80.92ms +[2025-07-06 20:43:01] [Rank 0] step:4721/10000 train_time:381892ms step_avg:80.89ms +[2025-07-06 20:43:01] [Rank 0] step:4721/10000 train_time:381892ms step_avg:80.89ms +[2025-07-06 20:43:02] [Rank 0] step:4741/10000 train_time:383389ms step_avg:80.87ms +[2025-07-06 20:43:02] [Rank 0] step:4741/10000 train_time:383389ms step_avg:80.87ms +[2025-07-06 20:43:04] [Rank 0] step:4761/10000 train_time:384888ms step_avg:80.84ms +[2025-07-06 20:43:04] [Rank 0] step:4761/10000 train_time:384888ms step_avg:80.84ms +[2025-07-06 20:43:06] [Rank 0] step:4781/10000 train_time:387035ms step_avg:80.95ms +[2025-07-06 20:43:06] [Rank 0] step:4781/10000 train_time:387035ms step_avg:80.95ms +[2025-07-06 20:43:08] [Rank 0] step:4801/10000 train_time:388535ms step_avg:80.93ms +[2025-07-06 20:43:08] [Rank 0] step:4801/10000 train_time:388535ms step_avg:80.93ms +[2025-07-06 20:43:09] [Rank 0] step:4821/10000 train_time:390035ms step_avg:80.90ms +[2025-07-06 20:43:09] [Rank 0] step:4821/10000 train_time:390035ms step_avg:80.90ms +[2025-07-06 20:43:11] [Rank 0] step:4841/10000 train_time:391536ms step_avg:80.88ms +[2025-07-06 20:43:11] [Rank 0] step:4841/10000 train_time:391536ms step_avg:80.88ms +[2025-07-06 20:43:13] [Rank 0] step:4861/10000 train_time:393038ms step_avg:80.86ms +[2025-07-06 20:43:13] [Rank 0] step:4861/10000 train_time:393038ms step_avg:80.86ms +[2025-07-06 20:43:14] [Rank 0] step:4881/10000 train_time:395183ms step_avg:80.96ms +[2025-07-06 20:43:14] [Rank 0] step:4881/10000 train_time:395183ms step_avg:80.96ms +[2025-07-06 20:43:16] [Rank 0] step:4901/10000 train_time:396682ms step_avg:80.94ms +[2025-07-06 20:43:16] [Rank 0] step:4901/10000 train_time:396682ms step_avg:80.94ms +[2025-07-06 20:43:17] [Rank 0] step:4921/10000 train_time:398184ms step_avg:80.92ms +[2025-07-06 20:43:17] [Rank 0] step:4921/10000 train_time:398184ms step_avg:80.92ms +[2025-07-06 20:43:19] [Rank 0] step:4941/10000 train_time:399688ms step_avg:80.89ms +[2025-07-06 20:43:19] [Rank 0] step:4941/10000 train_time:399688ms step_avg:80.89ms +[2025-07-06 20:43:21] [Rank 0] step:4961/10000 train_time:401855ms step_avg:81.00ms +[2025-07-06 20:43:21] [Rank 0] step:4961/10000 train_time:401855ms step_avg:81.00ms +[2025-07-06 20:43:22] [Rank 0] step:4981/10000 train_time:403355ms step_avg:80.98ms +[2025-07-06 20:43:22] [Rank 0] step:4981/10000 train_time:403355ms step_avg:80.98ms +[2025-07-06 20:43:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:43:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:43:25] [Rank 0] PRINT: step:5000/10000 train_loss:0.8708 val_loss:0.8707 train_time:404860ms step_avg:80.97ms +[2025-07-06 20:43:25] [Rank 0] PRINT: step:5000/10000 train_loss:0.8708 val_loss:0.8707 train_time:404860ms step_avg:80.97ms +[2025-07-06 20:43:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:43:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:43:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:43:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:43:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:43:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:48:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:48:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:48:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:48:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:48:43] [Rank 0] Total Loss: 5.2923 +[2025-07-06 20:48:43] [Rank 0] Total Loss: 5.2923 +[2025-07-06 20:48:43] [Rank 0] Total FTA: 0.9501 +[2025-07-06 20:48:43] [Rank 0] Total FTA: 0.9501 +[2025-07-06 20:48:43] [Rank 0] Group 0 Loss: 5.5201 +[2025-07-06 20:48:43] [Rank 0] Group 0 Loss: 5.5201 +[2025-07-06 20:48:43] [Rank 0] Group 1 Loss: 5.2436 +[2025-07-06 20:48:43] [Rank 0] Group 1 Loss: 5.2436 +[2025-07-06 20:48:43] [Rank 0] Group 2 Loss: 4.9946 +[2025-07-06 20:48:43] [Rank 0] Group 2 Loss: 4.9946 +[2025-07-06 20:48:43] [Rank 0] Group 3 Loss: 5.4080 +[2025-07-06 20:48:43] [Rank 0] Group 3 Loss: 5.4080 +[2025-07-06 20:48:43] [Rank 0] Group 4 Loss: 5.2880 +[2025-07-06 20:48:43] [Rank 0] Group 4 Loss: 5.2880 +[2025-07-06 20:48:43] [Rank 0] Group 5 Loss: 5.3222 +[2025-07-06 20:48:43] [Rank 0] Group 5 Loss: 5.3222 +[2025-07-06 20:48:43] [Rank 0] Group 6 Loss: 5.1944 +[2025-07-06 20:48:43] [Rank 0] Group 6 Loss: 5.1944 +[2025-07-06 20:48:43] [Rank 0] Group 7 Loss: 5.2603 +[2025-07-06 20:48:43] [Rank 0] Group 7 Loss: 5.2603 +[2025-07-06 20:48:43] [Rank 0] Group 8 Loss: 5.2728 +[2025-07-06 20:48:43] [Rank 0] Group 8 Loss: 5.2728 +[2025-07-06 20:48:43] [Rank 0] Group 9 Loss: 5.2788 +[2025-07-06 20:48:43] [Rank 0] Group 9 Loss: 5.2788 +[2025-07-06 20:48:43] [Rank 0] Group 10 Loss: 5.2463 +[2025-07-06 20:48:43] [Rank 0] Group 10 Loss: 5.2463 +[2025-07-06 20:48:43] [Rank 0] Group 11 Loss: 5.2804 +[2025-07-06 20:48:43] [Rank 0] Group 11 Loss: 5.2804 +[2025-07-06 20:48:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 20:48:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 20:48:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 20:48:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 20:48:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 20:48:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 20:48:43] [Rank 0] Group 3 FTA: 0.9688 +[2025-07-06 20:48:43] [Rank 0] Group 3 FTA: 0.9688 +[2025-07-06 20:48:43] [Rank 0] Group 4 FTA: 0.9661 +[2025-07-06 20:48:43] [Rank 0] Group 4 FTA: 0.9661 +[2025-07-06 20:48:43] [Rank 0] Group 5 FTA: 0.9531 +[2025-07-06 20:48:43] [Rank 0] Group 5 FTA: 0.9531 +[2025-07-06 20:48:43] [Rank 0] Group 6 FTA: 0.8958 +[2025-07-06 20:48:43] [Rank 0] Group 6 FTA: 0.8958 +[2025-07-06 20:48:43] [Rank 0] Group 7 FTA: 0.9219 +[2025-07-06 20:48:43] [Rank 0] Group 7 FTA: 0.9219 +[2025-07-06 20:48:43] [Rank 0] Group 8 FTA: 0.9167 +[2025-07-06 20:48:43] [Rank 0] Group 8 FTA: 0.9167 +[2025-07-06 20:48:43] [Rank 0] Group 9 FTA: 0.9141 +[2025-07-06 20:48:43] [Rank 0] Group 9 FTA: 0.9141 +[2025-07-06 20:48:43] [Rank 0] Group 10 FTA: 0.9023 +[2025-07-06 20:48:43] [Rank 0] Group 10 FTA: 0.9023 +[2025-07-06 20:48:43] [Rank 0] Group 11 FTA: 0.9375 +[2025-07-06 20:48:43] [Rank 0] Group 11 FTA: 0.9375 +[2025-07-06 20:48:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 20:48:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 20:48:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 20:48:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 20:48:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 20:48:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 20:48:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 20:48:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 20:48:44] [Rank 0] step:5001/10000 train_time:404880ms step_avg:80.96ms +[2025-07-06 20:48:44] [Rank 0] step:5001/10000 train_time:404880ms step_avg:80.96ms +[2025-07-06 20:48:46] [Rank 0] step:5021/10000 train_time:406381ms step_avg:80.94ms +[2025-07-06 20:48:46] [Rank 0] step:5021/10000 train_time:406381ms step_avg:80.94ms +[2025-07-06 20:48:48] [Rank 0] step:5041/10000 train_time:408564ms step_avg:81.05ms +[2025-07-06 20:48:48] [Rank 0] step:5041/10000 train_time:408564ms step_avg:81.05ms +[2025-07-06 20:48:49] [Rank 0] step:5061/10000 train_time:410037ms step_avg:81.02ms +[2025-07-06 20:48:49] [Rank 0] step:5061/10000 train_time:410037ms step_avg:81.02ms +[2025-07-06 20:48:51] [Rank 0] step:5081/10000 train_time:411529ms step_avg:80.99ms +[2025-07-06 20:48:51] [Rank 0] step:5081/10000 train_time:411529ms step_avg:80.99ms +[2025-07-06 20:48:52] [Rank 0] step:5101/10000 train_time:413022ms step_avg:80.97ms +[2025-07-06 20:48:52] [Rank 0] step:5101/10000 train_time:413022ms step_avg:80.97ms +[2025-07-06 20:48:54] [Rank 0] step:5121/10000 train_time:414516ms step_avg:80.94ms +[2025-07-06 20:48:54] [Rank 0] step:5121/10000 train_time:414516ms step_avg:80.94ms +[2025-07-06 20:48:56] [Rank 0] step:5141/10000 train_time:416680ms step_avg:81.05ms +[2025-07-06 20:48:56] [Rank 0] step:5141/10000 train_time:416680ms step_avg:81.05ms +[2025-07-06 20:48:57] [Rank 0] step:5161/10000 train_time:418173ms step_avg:81.03ms +[2025-07-06 20:48:57] [Rank 0] step:5161/10000 train_time:418173ms step_avg:81.03ms +[2025-07-06 20:48:59] [Rank 0] step:5181/10000 train_time:419668ms step_avg:81.00ms +[2025-07-06 20:48:59] [Rank 0] step:5181/10000 train_time:419668ms step_avg:81.00ms +[2025-07-06 20:49:00] [Rank 0] step:5201/10000 train_time:421166ms step_avg:80.98ms +[2025-07-06 20:49:00] [Rank 0] step:5201/10000 train_time:421166ms step_avg:80.98ms +[2025-07-06 20:49:03] [Rank 0] step:5221/10000 train_time:422714ms step_avg:80.96ms +[2025-07-06 20:49:03] [Rank 0] step:5221/10000 train_time:422714ms step_avg:80.96ms +[2025-07-06 20:49:04] [Rank 0] step:5241/10000 train_time:424823ms step_avg:81.06ms +[2025-07-06 20:49:04] [Rank 0] step:5241/10000 train_time:424823ms step_avg:81.06ms +[2025-07-06 20:49:06] [Rank 0] step:5261/10000 train_time:426321ms step_avg:81.03ms +[2025-07-06 20:49:06] [Rank 0] step:5261/10000 train_time:426321ms step_avg:81.03ms +[2025-07-06 20:49:07] [Rank 0] step:5281/10000 train_time:427818ms step_avg:81.01ms +[2025-07-06 20:49:07] [Rank 0] step:5281/10000 train_time:427818ms step_avg:81.01ms +[2025-07-06 20:49:08] [Rank 0] step:5301/10000 train_time:429318ms step_avg:80.99ms +[2025-07-06 20:49:08] [Rank 0] step:5301/10000 train_time:429318ms step_avg:80.99ms +[2025-07-06 20:49:11] [Rank 0] step:5321/10000 train_time:431466ms step_avg:81.09ms +[2025-07-06 20:49:11] [Rank 0] step:5321/10000 train_time:431466ms step_avg:81.09ms +[2025-07-06 20:49:12] [Rank 0] step:5341/10000 train_time:432963ms step_avg:81.06ms +[2025-07-06 20:49:12] [Rank 0] step:5341/10000 train_time:432963ms step_avg:81.06ms +[2025-07-06 20:49:14] [Rank 0] step:5361/10000 train_time:434461ms step_avg:81.04ms +[2025-07-06 20:49:14] [Rank 0] step:5361/10000 train_time:434461ms step_avg:81.04ms +[2025-07-06 20:49:15] [Rank 0] step:5381/10000 train_time:435960ms step_avg:81.02ms +[2025-07-06 20:49:15] [Rank 0] step:5381/10000 train_time:435960ms step_avg:81.02ms +[2025-07-06 20:49:17] [Rank 0] step:5401/10000 train_time:437459ms step_avg:81.00ms +[2025-07-06 20:49:17] [Rank 0] step:5401/10000 train_time:437459ms step_avg:81.00ms +[2025-07-06 20:49:19] [Rank 0] step:5421/10000 train_time:439610ms step_avg:81.09ms +[2025-07-06 20:49:19] [Rank 0] step:5421/10000 train_time:439610ms step_avg:81.09ms +[2025-07-06 20:49:20] [Rank 0] step:5441/10000 train_time:441108ms step_avg:81.07ms +[2025-07-06 20:49:20] [Rank 0] step:5441/10000 train_time:441108ms step_avg:81.07ms +[2025-07-06 20:49:22] [Rank 0] step:5461/10000 train_time:442609ms step_avg:81.05ms +[2025-07-06 20:49:22] [Rank 0] step:5461/10000 train_time:442609ms step_avg:81.05ms +[2025-07-06 20:49:23] [Rank 0] step:5481/10000 train_time:444110ms step_avg:81.03ms +[2025-07-06 20:49:23] [Rank 0] step:5481/10000 train_time:444110ms step_avg:81.03ms +[2025-07-06 20:49:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:49:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:49:26] [Rank 0] PRINT: step:5500/10000 train_loss:0.8681 val_loss:0.8688 train_time:446251ms step_avg:81.14ms +[2025-07-06 20:49:26] [Rank 0] PRINT: step:5500/10000 train_loss:0.8681 val_loss:0.8688 train_time:446251ms step_avg:81.14ms +[2025-07-06 20:49:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:49:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:49:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:49:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:49:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:49:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:54:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:54:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:54:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:54:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:54:44] [Rank 0] Total Loss: 5.3092 +[2025-07-06 20:54:44] [Rank 0] Total Loss: 5.3092 +[2025-07-06 20:54:44] [Rank 0] Total FTA: 0.9544 +[2025-07-06 20:54:44] [Rank 0] Total FTA: 0.9544 +[2025-07-06 20:54:44] [Rank 0] Group 0 Loss: 5.4941 +[2025-07-06 20:54:44] [Rank 0] Group 0 Loss: 5.4941 +[2025-07-06 20:54:44] [Rank 0] Group 1 Loss: 5.3081 +[2025-07-06 20:54:44] [Rank 0] Group 1 Loss: 5.3081 +[2025-07-06 20:54:44] [Rank 0] Group 2 Loss: 5.0984 +[2025-07-06 20:54:44] [Rank 0] Group 2 Loss: 5.0984 +[2025-07-06 20:54:44] [Rank 0] Group 3 Loss: 5.3318 +[2025-07-06 20:54:44] [Rank 0] Group 3 Loss: 5.3318 +[2025-07-06 20:54:44] [Rank 0] Group 4 Loss: 5.3171 +[2025-07-06 20:54:44] [Rank 0] Group 4 Loss: 5.3171 +[2025-07-06 20:54:44] [Rank 0] Group 5 Loss: 5.3121 +[2025-07-06 20:54:44] [Rank 0] Group 5 Loss: 5.3121 +[2025-07-06 20:54:44] [Rank 0] Group 6 Loss: 5.2065 +[2025-07-06 20:54:44] [Rank 0] Group 6 Loss: 5.2065 +[2025-07-06 20:54:44] [Rank 0] Group 7 Loss: 5.2986 +[2025-07-06 20:54:44] [Rank 0] Group 7 Loss: 5.2986 +[2025-07-06 20:54:44] [Rank 0] Group 8 Loss: 5.3018 +[2025-07-06 20:54:44] [Rank 0] Group 8 Loss: 5.3018 +[2025-07-06 20:54:44] [Rank 0] Group 9 Loss: 5.2600 +[2025-07-06 20:54:44] [Rank 0] Group 9 Loss: 5.2600 +[2025-07-06 20:54:44] [Rank 0] Group 10 Loss: 5.2914 +[2025-07-06 20:54:44] [Rank 0] Group 10 Loss: 5.2914 +[2025-07-06 20:54:44] [Rank 0] Group 11 Loss: 5.3040 +[2025-07-06 20:54:44] [Rank 0] Group 11 Loss: 5.3040 +[2025-07-06 20:54:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 20:54:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 20:54:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 20:54:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 20:54:44] [Rank 0] Group 2 FTA: 0.8984 +[2025-07-06 20:54:44] [Rank 0] Group 2 FTA: 0.8984 +[2025-07-06 20:54:44] [Rank 0] Group 3 FTA: 0.9219 +[2025-07-06 20:54:44] [Rank 0] Group 3 FTA: 0.9219 +[2025-07-06 20:54:44] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-06 20:54:44] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-06 20:54:44] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-06 20:54:44] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-06 20:54:44] [Rank 0] Group 6 FTA: 0.9583 +[2025-07-06 20:54:44] [Rank 0] Group 6 FTA: 0.9583 +[2025-07-06 20:54:44] [Rank 0] Group 7 FTA: 0.9271 +[2025-07-06 20:54:44] [Rank 0] Group 7 FTA: 0.9271 +[2025-07-06 20:54:44] [Rank 0] Group 8 FTA: 0.9583 +[2025-07-06 20:54:44] [Rank 0] Group 8 FTA: 0.9583 +[2025-07-06 20:54:44] [Rank 0] Group 9 FTA: 0.9492 +[2025-07-06 20:54:44] [Rank 0] Group 9 FTA: 0.9492 +[2025-07-06 20:54:44] [Rank 0] Group 10 FTA: 0.9453 +[2025-07-06 20:54:44] [Rank 0] Group 10 FTA: 0.9453 +[2025-07-06 20:54:44] [Rank 0] Group 11 FTA: 0.9463 +[2025-07-06 20:54:44] [Rank 0] Group 11 FTA: 0.9463 +[2025-07-06 20:54:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 20:54:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 20:54:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 20:54:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 20:54:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 20:54:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 20:54:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 20:54:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 20:54:45] [Rank 0] step:5501/10000 train_time:446273ms step_avg:81.13ms +[2025-07-06 20:54:45] [Rank 0] step:5501/10000 train_time:446273ms step_avg:81.13ms +[2025-07-06 20:54:47] [Rank 0] step:5521/10000 train_time:447767ms step_avg:81.10ms +[2025-07-06 20:54:47] [Rank 0] step:5521/10000 train_time:447767ms step_avg:81.10ms +[2025-07-06 20:54:48] [Rank 0] step:5541/10000 train_time:449257ms step_avg:81.08ms +[2025-07-06 20:54:48] [Rank 0] step:5541/10000 train_time:449257ms step_avg:81.08ms +[2025-07-06 20:54:50] [Rank 0] step:5561/10000 train_time:450747ms step_avg:81.06ms +[2025-07-06 20:54:50] [Rank 0] step:5561/10000 train_time:450747ms step_avg:81.06ms +[2025-07-06 20:54:52] [Rank 0] step:5581/10000 train_time:452501ms step_avg:81.08ms +[2025-07-06 20:54:52] [Rank 0] step:5581/10000 train_time:452501ms step_avg:81.08ms +[2025-07-06 20:54:53] [Rank 0] step:5601/10000 train_time:454377ms step_avg:81.12ms +[2025-07-06 20:54:53] [Rank 0] step:5601/10000 train_time:454377ms step_avg:81.12ms +[2025-07-06 20:54:55] [Rank 0] step:5621/10000 train_time:455872ms step_avg:81.10ms +[2025-07-06 20:54:55] [Rank 0] step:5621/10000 train_time:455872ms step_avg:81.10ms +[2025-07-06 20:54:56] [Rank 0] step:5641/10000 train_time:457367ms step_avg:81.08ms +[2025-07-06 20:54:56] [Rank 0] step:5641/10000 train_time:457367ms step_avg:81.08ms +[2025-07-06 20:54:58] [Rank 0] step:5661/10000 train_time:458864ms step_avg:81.06ms +[2025-07-06 20:54:58] [Rank 0] step:5661/10000 train_time:458864ms step_avg:81.06ms +[2025-07-06 20:55:00] [Rank 0] step:5681/10000 train_time:461021ms step_avg:81.15ms +[2025-07-06 20:55:00] [Rank 0] step:5681/10000 train_time:461021ms step_avg:81.15ms +[2025-07-06 20:55:02] [Rank 0] step:5701/10000 train_time:462517ms step_avg:81.13ms +[2025-07-06 20:55:02] [Rank 0] step:5701/10000 train_time:462517ms step_avg:81.13ms +[2025-07-06 20:55:03] [Rank 0] step:5721/10000 train_time:464015ms step_avg:81.11ms +[2025-07-06 20:55:03] [Rank 0] step:5721/10000 train_time:464015ms step_avg:81.11ms +[2025-07-06 20:55:05] [Rank 0] step:5741/10000 train_time:465512ms step_avg:81.09ms +[2025-07-06 20:55:05] [Rank 0] step:5741/10000 train_time:465512ms step_avg:81.09ms +[2025-07-06 20:55:07] [Rank 0] step:5761/10000 train_time:467010ms step_avg:81.06ms +[2025-07-06 20:55:07] [Rank 0] step:5761/10000 train_time:467010ms step_avg:81.06ms +[2025-07-06 20:55:08] [Rank 0] step:5781/10000 train_time:469166ms step_avg:81.16ms +[2025-07-06 20:55:08] [Rank 0] step:5781/10000 train_time:469166ms step_avg:81.16ms +[2025-07-06 20:55:10] [Rank 0] step:5801/10000 train_time:470664ms step_avg:81.14ms +[2025-07-06 20:55:10] [Rank 0] step:5801/10000 train_time:470664ms step_avg:81.14ms +[2025-07-06 20:55:11] [Rank 0] step:5821/10000 train_time:472162ms step_avg:81.11ms +[2025-07-06 20:55:11] [Rank 0] step:5821/10000 train_time:472162ms step_avg:81.11ms +[2025-07-06 20:55:13] [Rank 0] step:5841/10000 train_time:473659ms step_avg:81.09ms +[2025-07-06 20:55:13] [Rank 0] step:5841/10000 train_time:473659ms step_avg:81.09ms +[2025-07-06 20:55:15] [Rank 0] step:5861/10000 train_time:475944ms step_avg:81.21ms +[2025-07-06 20:55:15] [Rank 0] step:5861/10000 train_time:475944ms step_avg:81.21ms +[2025-07-06 20:55:17] [Rank 0] step:5881/10000 train_time:477443ms step_avg:81.18ms +[2025-07-06 20:55:17] [Rank 0] step:5881/10000 train_time:477443ms step_avg:81.18ms +[2025-07-06 20:55:18] [Rank 0] step:5901/10000 train_time:478941ms step_avg:81.16ms +[2025-07-06 20:55:18] [Rank 0] step:5901/10000 train_time:478941ms step_avg:81.16ms +[2025-07-06 20:55:20] [Rank 0] step:5921/10000 train_time:480439ms step_avg:81.14ms +[2025-07-06 20:55:20] [Rank 0] step:5921/10000 train_time:480439ms step_avg:81.14ms +[2025-07-06 20:55:22] [Rank 0] step:5941/10000 train_time:481937ms step_avg:81.12ms +[2025-07-06 20:55:22] [Rank 0] step:5941/10000 train_time:481937ms step_avg:81.12ms +[2025-07-06 20:55:23] [Rank 0] step:5961/10000 train_time:484081ms step_avg:81.21ms +[2025-07-06 20:55:23] [Rank 0] step:5961/10000 train_time:484081ms step_avg:81.21ms +[2025-07-06 20:55:25] [Rank 0] step:5981/10000 train_time:485579ms step_avg:81.19ms +[2025-07-06 20:55:25] [Rank 0] step:5981/10000 train_time:485579ms step_avg:81.19ms +[2025-07-06 20:55:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:55:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:55:27] [Rank 0] PRINT: step:6000/10000 train_loss:0.8667 val_loss:0.8665 train_time:487078ms step_avg:81.18ms +[2025-07-06 20:55:27] [Rank 0] PRINT: step:6000/10000 train_loss:0.8667 val_loss:0.8665 train_time:487078ms step_avg:81.18ms +[2025-07-06 20:55:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:55:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:55:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:55:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:55:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:55:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:00:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:00:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:00:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:00:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:00:45] [Rank 0] Total Loss: 5.3328 +[2025-07-06 21:00:45] [Rank 0] Total Loss: 5.3328 +[2025-07-06 21:00:45] [Rank 0] Total FTA: 0.9405 +[2025-07-06 21:00:45] [Rank 0] Total FTA: 0.9405 +[2025-07-06 21:00:45] [Rank 0] Group 0 Loss: 5.5704 +[2025-07-06 21:00:45] [Rank 0] Group 0 Loss: 5.5704 +[2025-07-06 21:00:45] [Rank 0] Group 1 Loss: 5.2889 +[2025-07-06 21:00:45] [Rank 0] Group 1 Loss: 5.2889 +[2025-07-06 21:00:45] [Rank 0] Group 2 Loss: 5.1951 +[2025-07-06 21:00:45] [Rank 0] Group 2 Loss: 5.1951 +[2025-07-06 21:00:45] [Rank 0] Group 3 Loss: 5.3671 +[2025-07-06 21:00:45] [Rank 0] Group 3 Loss: 5.3671 +[2025-07-06 21:00:45] [Rank 0] Group 4 Loss: 5.3287 +[2025-07-06 21:00:45] [Rank 0] Group 4 Loss: 5.3287 +[2025-07-06 21:00:45] [Rank 0] Group 5 Loss: 5.2554 +[2025-07-06 21:00:45] [Rank 0] Group 5 Loss: 5.2554 +[2025-07-06 21:00:45] [Rank 0] Group 6 Loss: 5.1982 +[2025-07-06 21:00:45] [Rank 0] Group 6 Loss: 5.1982 +[2025-07-06 21:00:45] [Rank 0] Group 7 Loss: 5.3080 +[2025-07-06 21:00:45] [Rank 0] Group 7 Loss: 5.3080 +[2025-07-06 21:00:45] [Rank 0] Group 8 Loss: 5.3202 +[2025-07-06 21:00:45] [Rank 0] Group 8 Loss: 5.3202 +[2025-07-06 21:00:45] [Rank 0] Group 9 Loss: 5.2860 +[2025-07-06 21:00:45] [Rank 0] Group 9 Loss: 5.2860 +[2025-07-06 21:00:45] [Rank 0] Group 10 Loss: 5.2853 +[2025-07-06 21:00:45] [Rank 0] Group 10 Loss: 5.2853 +[2025-07-06 21:00:45] [Rank 0] Group 11 Loss: 5.3401 +[2025-07-06 21:00:45] [Rank 0] Group 11 Loss: 5.3401 +[2025-07-06 21:00:45] [Rank 0] Group 0 FTA: 0.8309 +[2025-07-06 21:00:45] [Rank 0] Group 0 FTA: 0.8309 +[2025-07-06 21:00:45] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:00:45] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:00:45] [Rank 0] Group 2 FTA: 0.9062 +[2025-07-06 21:00:45] [Rank 0] Group 2 FTA: 0.9062 +[2025-07-06 21:00:45] [Rank 0] Group 3 FTA: 0.9557 +[2025-07-06 21:00:45] [Rank 0] Group 3 FTA: 0.9557 +[2025-07-06 21:00:45] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 21:00:45] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 21:00:45] [Rank 0] Group 5 FTA: 0.9375 +[2025-07-06 21:00:45] [Rank 0] Group 5 FTA: 0.9375 +[2025-07-06 21:00:45] [Rank 0] Group 6 FTA: 0.9583 +[2025-07-06 21:00:45] [Rank 0] Group 6 FTA: 0.9583 +[2025-07-06 21:00:45] [Rank 0] Group 7 FTA: 0.9557 +[2025-07-06 21:00:45] [Rank 0] Group 7 FTA: 0.9557 +[2025-07-06 21:00:45] [Rank 0] Group 8 FTA: 0.9583 +[2025-07-06 21:00:45] [Rank 0] Group 8 FTA: 0.9583 +[2025-07-06 21:00:45] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 21:00:45] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 21:00:45] [Rank 0] Group 10 FTA: 0.9512 +[2025-07-06 21:00:45] [Rank 0] Group 10 FTA: 0.9512 +[2025-07-06 21:00:45] [Rank 0] Group 11 FTA: 0.9531 +[2025-07-06 21:00:45] [Rank 0] Group 11 FTA: 0.9531 +[2025-07-06 21:00:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 21:00:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 21:00:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 21:00:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 21:00:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 21:00:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 21:00:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 21:00:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 21:00:47] [Rank 0] step:6001/10000 train_time:487099ms step_avg:81.17ms +[2025-07-06 21:00:47] [Rank 0] step:6001/10000 train_time:487099ms step_avg:81.17ms +[2025-07-06 21:00:48] [Rank 0] step:6021/10000 train_time:488592ms step_avg:81.15ms +[2025-07-06 21:00:48] [Rank 0] step:6021/10000 train_time:488592ms step_avg:81.15ms +[2025-07-06 21:00:51] [Rank 0] step:6041/10000 train_time:490743ms step_avg:81.24ms +[2025-07-06 21:00:51] [Rank 0] step:6041/10000 train_time:490743ms step_avg:81.24ms +[2025-07-06 21:00:52] [Rank 0] step:6061/10000 train_time:492235ms step_avg:81.21ms +[2025-07-06 21:00:52] [Rank 0] step:6061/10000 train_time:492235ms step_avg:81.21ms +[2025-07-06 21:00:54] [Rank 0] step:6081/10000 train_time:493730ms step_avg:81.19ms +[2025-07-06 21:00:54] [Rank 0] step:6081/10000 train_time:493730ms step_avg:81.19ms +[2025-07-06 21:00:55] [Rank 0] step:6101/10000 train_time:495225ms step_avg:81.17ms +[2025-07-06 21:00:55] [Rank 0] step:6101/10000 train_time:495225ms step_avg:81.17ms +[2025-07-06 21:00:57] [Rank 0] step:6121/10000 train_time:496974ms step_avg:81.19ms +[2025-07-06 21:00:57] [Rank 0] step:6121/10000 train_time:496974ms step_avg:81.19ms +[2025-07-06 21:00:58] [Rank 0] step:6141/10000 train_time:498450ms step_avg:81.17ms +[2025-07-06 21:00:58] [Rank 0] step:6141/10000 train_time:498450ms step_avg:81.17ms +[2025-07-06 21:01:00] [Rank 0] step:6161/10000 train_time:499945ms step_avg:81.15ms +[2025-07-06 21:01:00] [Rank 0] step:6161/10000 train_time:499945ms step_avg:81.15ms +[2025-07-06 21:01:01] [Rank 0] step:6181/10000 train_time:501440ms step_avg:81.13ms +[2025-07-06 21:01:01] [Rank 0] step:6181/10000 train_time:501440ms step_avg:81.13ms +[2025-07-06 21:01:03] [Rank 0] step:6201/10000 train_time:502936ms step_avg:81.11ms +[2025-07-06 21:01:03] [Rank 0] step:6201/10000 train_time:502936ms step_avg:81.11ms +[2025-07-06 21:01:05] [Rank 0] step:6221/10000 train_time:505081ms step_avg:81.19ms +[2025-07-06 21:01:05] [Rank 0] step:6221/10000 train_time:505081ms step_avg:81.19ms +[2025-07-06 21:01:06] [Rank 0] step:6241/10000 train_time:506578ms step_avg:81.17ms +[2025-07-06 21:01:06] [Rank 0] step:6241/10000 train_time:506578ms step_avg:81.17ms +[2025-07-06 21:01:08] [Rank 0] step:6261/10000 train_time:508076ms step_avg:81.15ms +[2025-07-06 21:01:08] [Rank 0] step:6261/10000 train_time:508076ms step_avg:81.15ms +[2025-07-06 21:01:09] [Rank 0] step:6281/10000 train_time:509575ms step_avg:81.13ms +[2025-07-06 21:01:09] [Rank 0] step:6281/10000 train_time:509575ms step_avg:81.13ms +[2025-07-06 21:01:11] [Rank 0] step:6301/10000 train_time:511125ms step_avg:81.12ms +[2025-07-06 21:01:11] [Rank 0] step:6301/10000 train_time:511125ms step_avg:81.12ms +[2025-07-06 21:01:13] [Rank 0] step:6321/10000 train_time:513226ms step_avg:81.19ms +[2025-07-06 21:01:13] [Rank 0] step:6321/10000 train_time:513226ms step_avg:81.19ms +[2025-07-06 21:01:14] [Rank 0] step:6341/10000 train_time:514726ms step_avg:81.17ms +[2025-07-06 21:01:14] [Rank 0] step:6341/10000 train_time:514726ms step_avg:81.17ms +[2025-07-06 21:01:16] [Rank 0] step:6361/10000 train_time:516224ms step_avg:81.15ms +[2025-07-06 21:01:16] [Rank 0] step:6361/10000 train_time:516224ms step_avg:81.15ms +[2025-07-06 21:01:17] [Rank 0] step:6381/10000 train_time:517724ms step_avg:81.14ms +[2025-07-06 21:01:17] [Rank 0] step:6381/10000 train_time:517724ms step_avg:81.14ms +[2025-07-06 21:01:19] [Rank 0] step:6401/10000 train_time:519462ms step_avg:81.15ms +[2025-07-06 21:01:19] [Rank 0] step:6401/10000 train_time:519462ms step_avg:81.15ms +[2025-07-06 21:01:21] [Rank 0] step:6421/10000 train_time:520960ms step_avg:81.13ms +[2025-07-06 21:01:21] [Rank 0] step:6421/10000 train_time:520960ms step_avg:81.13ms +[2025-07-06 21:01:22] [Rank 0] step:6441/10000 train_time:522460ms step_avg:81.11ms +[2025-07-06 21:01:22] [Rank 0] step:6441/10000 train_time:522460ms step_avg:81.11ms +[2025-07-06 21:01:24] [Rank 0] step:6461/10000 train_time:523961ms step_avg:81.10ms +[2025-07-06 21:01:24] [Rank 0] step:6461/10000 train_time:523961ms step_avg:81.10ms +[2025-07-06 21:01:26] [Rank 0] step:6481/10000 train_time:525462ms step_avg:81.08ms +[2025-07-06 21:01:26] [Rank 0] step:6481/10000 train_time:525462ms step_avg:81.08ms +[2025-07-06 21:01:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:01:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:01:28] [Rank 0] PRINT: step:6500/10000 train_loss:0.8652 val_loss:0.8649 train_time:527627ms step_avg:81.17ms +[2025-07-06 21:01:28] [Rank 0] PRINT: step:6500/10000 train_loss:0.8652 val_loss:0.8649 train_time:527627ms step_avg:81.17ms +[2025-07-06 21:01:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:01:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:01:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:01:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:01:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:01:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:06:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:06:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:06:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:06:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:06:46] [Rank 0] Total Loss: 5.3734 +[2025-07-06 21:06:46] [Rank 0] Total Loss: 5.3734 +[2025-07-06 21:06:46] [Rank 0] Total FTA: 0.9743 +[2025-07-06 21:06:46] [Rank 0] Total FTA: 0.9743 +[2025-07-06 21:06:46] [Rank 0] Group 0 Loss: 5.6322 +[2025-07-06 21:06:46] [Rank 0] Group 0 Loss: 5.6322 +[2025-07-06 21:06:46] [Rank 0] Group 1 Loss: 5.3168 +[2025-07-06 21:06:46] [Rank 0] Group 1 Loss: 5.3168 +[2025-07-06 21:06:46] [Rank 0] Group 2 Loss: 5.1842 +[2025-07-06 21:06:46] [Rank 0] Group 2 Loss: 5.1842 +[2025-07-06 21:06:46] [Rank 0] Group 3 Loss: 5.4530 +[2025-07-06 21:06:46] [Rank 0] Group 3 Loss: 5.4530 +[2025-07-06 21:06:46] [Rank 0] Group 4 Loss: 5.3924 +[2025-07-06 21:06:46] [Rank 0] Group 4 Loss: 5.3924 +[2025-07-06 21:06:46] [Rank 0] Group 5 Loss: 5.2842 +[2025-07-06 21:06:46] [Rank 0] Group 5 Loss: 5.2842 +[2025-07-06 21:06:46] [Rank 0] Group 6 Loss: 5.2315 +[2025-07-06 21:06:46] [Rank 0] Group 6 Loss: 5.2315 +[2025-07-06 21:06:46] [Rank 0] Group 7 Loss: 5.3390 +[2025-07-06 21:06:46] [Rank 0] Group 7 Loss: 5.3390 +[2025-07-06 21:06:46] [Rank 0] Group 8 Loss: 5.3627 +[2025-07-06 21:06:46] [Rank 0] Group 8 Loss: 5.3627 +[2025-07-06 21:06:46] [Rank 0] Group 9 Loss: 5.2708 +[2025-07-06 21:06:46] [Rank 0] Group 9 Loss: 5.2708 +[2025-07-06 21:06:46] [Rank 0] Group 10 Loss: 5.3766 +[2025-07-06 21:06:46] [Rank 0] Group 10 Loss: 5.3766 +[2025-07-06 21:06:46] [Rank 0] Group 11 Loss: 5.3617 +[2025-07-06 21:06:46] [Rank 0] Group 11 Loss: 5.3617 +[2025-07-06 21:06:46] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:06:46] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:06:46] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:06:46] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:06:46] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:06:46] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:06:46] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 21:06:46] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 21:06:46] [Rank 0] Group 4 FTA: 0.9349 +[2025-07-06 21:06:46] [Rank 0] Group 4 FTA: 0.9349 +[2025-07-06 21:06:46] [Rank 0] Group 5 FTA: 0.9766 +[2025-07-06 21:06:46] [Rank 0] Group 5 FTA: 0.9766 +[2025-07-06 21:06:46] [Rank 0] Group 6 FTA: 0.9740 +[2025-07-06 21:06:46] [Rank 0] Group 6 FTA: 0.9740 +[2025-07-06 21:06:46] [Rank 0] Group 7 FTA: 0.9505 +[2025-07-06 21:06:46] [Rank 0] Group 7 FTA: 0.9505 +[2025-07-06 21:06:46] [Rank 0] Group 8 FTA: 0.9427 +[2025-07-06 21:06:46] [Rank 0] Group 8 FTA: 0.9427 +[2025-07-06 21:06:46] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 21:06:46] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 21:06:46] [Rank 0] Group 10 FTA: 0.9707 +[2025-07-06 21:06:46] [Rank 0] Group 10 FTA: 0.9707 +[2025-07-06 21:06:46] [Rank 0] Group 11 FTA: 0.9619 +[2025-07-06 21:06:46] [Rank 0] Group 11 FTA: 0.9619 +[2025-07-06 21:06:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 21:06:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 21:06:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 21:06:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 21:06:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 21:06:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 21:06:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 21:06:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 21:06:48] [Rank 0] step:6501/10000 train_time:527647ms step_avg:81.16ms +[2025-07-06 21:06:48] [Rank 0] step:6501/10000 train_time:527647ms step_avg:81.16ms +[2025-07-06 21:06:49] [Rank 0] step:6521/10000 train_time:529143ms step_avg:81.14ms +[2025-07-06 21:06:49] [Rank 0] step:6521/10000 train_time:529143ms step_avg:81.14ms +[2025-07-06 21:06:51] [Rank 0] step:6541/10000 train_time:530769ms step_avg:81.14ms +[2025-07-06 21:06:51] [Rank 0] step:6541/10000 train_time:530769ms step_avg:81.14ms +[2025-07-06 21:06:52] [Rank 0] step:6561/10000 train_time:532262ms step_avg:81.13ms +[2025-07-06 21:06:52] [Rank 0] step:6561/10000 train_time:532262ms step_avg:81.13ms +[2025-07-06 21:06:54] [Rank 0] step:6581/10000 train_time:534411ms step_avg:81.21ms +[2025-07-06 21:06:54] [Rank 0] step:6581/10000 train_time:534411ms step_avg:81.21ms +[2025-07-06 21:06:56] [Rank 0] step:6601/10000 train_time:535904ms step_avg:81.19ms +[2025-07-06 21:06:56] [Rank 0] step:6601/10000 train_time:535904ms step_avg:81.19ms +[2025-07-06 21:06:57] [Rank 0] step:6621/10000 train_time:537399ms step_avg:81.17ms +[2025-07-06 21:06:57] [Rank 0] step:6621/10000 train_time:537399ms step_avg:81.17ms +[2025-07-06 21:06:59] [Rank 0] step:6641/10000 train_time:538894ms step_avg:81.15ms +[2025-07-06 21:06:59] [Rank 0] step:6641/10000 train_time:538894ms step_avg:81.15ms +[2025-07-06 21:07:01] [Rank 0] step:6661/10000 train_time:541078ms step_avg:81.23ms +[2025-07-06 21:07:01] [Rank 0] step:6661/10000 train_time:541078ms step_avg:81.23ms +[2025-07-06 21:07:02] [Rank 0] step:6681/10000 train_time:542556ms step_avg:81.21ms +[2025-07-06 21:07:02] [Rank 0] step:6681/10000 train_time:542556ms step_avg:81.21ms +[2025-07-06 21:07:04] [Rank 0] step:6701/10000 train_time:544050ms step_avg:81.19ms +[2025-07-06 21:07:04] [Rank 0] step:6701/10000 train_time:544050ms step_avg:81.19ms +[2025-07-06 21:07:05] [Rank 0] step:6721/10000 train_time:545547ms step_avg:81.17ms +[2025-07-06 21:07:05] [Rank 0] step:6721/10000 train_time:545547ms step_avg:81.17ms +[2025-07-06 21:07:07] [Rank 0] step:6741/10000 train_time:547045ms step_avg:81.15ms +[2025-07-06 21:07:07] [Rank 0] step:6741/10000 train_time:547045ms step_avg:81.15ms +[2025-07-06 21:07:09] [Rank 0] step:6761/10000 train_time:549199ms step_avg:81.23ms +[2025-07-06 21:07:09] [Rank 0] step:6761/10000 train_time:549199ms step_avg:81.23ms +[2025-07-06 21:07:11] [Rank 0] step:6781/10000 train_time:550698ms step_avg:81.21ms +[2025-07-06 21:07:11] [Rank 0] step:6781/10000 train_time:550698ms step_avg:81.21ms +[2025-07-06 21:07:12] [Rank 0] step:6801/10000 train_time:552198ms step_avg:81.19ms +[2025-07-06 21:07:12] [Rank 0] step:6801/10000 train_time:552198ms step_avg:81.19ms +[2025-07-06 21:07:14] [Rank 0] step:6821/10000 train_time:553699ms step_avg:81.18ms +[2025-07-06 21:07:14] [Rank 0] step:6821/10000 train_time:553699ms step_avg:81.18ms +[2025-07-06 21:07:16] [Rank 0] step:6841/10000 train_time:555863ms step_avg:81.25ms +[2025-07-06 21:07:16] [Rank 0] step:6841/10000 train_time:555863ms step_avg:81.25ms +[2025-07-06 21:07:17] [Rank 0] step:6861/10000 train_time:557342ms step_avg:81.23ms +[2025-07-06 21:07:17] [Rank 0] step:6861/10000 train_time:557342ms step_avg:81.23ms +[2025-07-06 21:07:19] [Rank 0] step:6881/10000 train_time:558844ms step_avg:81.22ms +[2025-07-06 21:07:19] [Rank 0] step:6881/10000 train_time:558844ms step_avg:81.22ms +[2025-07-06 21:07:20] [Rank 0] step:6901/10000 train_time:560343ms step_avg:81.20ms +[2025-07-06 21:07:20] [Rank 0] step:6901/10000 train_time:560343ms step_avg:81.20ms +[2025-07-06 21:07:22] [Rank 0] step:6921/10000 train_time:561844ms step_avg:81.18ms +[2025-07-06 21:07:22] [Rank 0] step:6921/10000 train_time:561844ms step_avg:81.18ms +[2025-07-06 21:07:24] [Rank 0] step:6941/10000 train_time:563984ms step_avg:81.25ms +[2025-07-06 21:07:24] [Rank 0] step:6941/10000 train_time:563984ms step_avg:81.25ms +[2025-07-06 21:07:25] [Rank 0] step:6961/10000 train_time:565484ms step_avg:81.24ms +[2025-07-06 21:07:25] [Rank 0] step:6961/10000 train_time:565484ms step_avg:81.24ms +[2025-07-06 21:07:27] [Rank 0] step:6981/10000 train_time:566985ms step_avg:81.22ms +[2025-07-06 21:07:27] [Rank 0] step:6981/10000 train_time:566985ms step_avg:81.22ms +[2025-07-06 21:07:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:07:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:07:29] [Rank 0] PRINT: step:7000/10000 train_loss:0.8637 val_loss:0.8640 train_time:568487ms step_avg:81.21ms +[2025-07-06 21:07:29] [Rank 0] PRINT: step:7000/10000 train_loss:0.8637 val_loss:0.8640 train_time:568487ms step_avg:81.21ms +[2025-07-06 21:07:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:07:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:07:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:07:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:07:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:07:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:12:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:12:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:12:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:12:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:12:48] [Rank 0] Total Loss: 5.3979 +[2025-07-06 21:12:48] [Rank 0] Total Loss: 5.3979 +[2025-07-06 21:12:48] [Rank 0] Total FTA: 0.9735 +[2025-07-06 21:12:48] [Rank 0] Total FTA: 0.9735 +[2025-07-06 21:12:48] [Rank 0] Group 0 Loss: 5.5893 +[2025-07-06 21:12:48] [Rank 0] Group 0 Loss: 5.5893 +[2025-07-06 21:12:48] [Rank 0] Group 1 Loss: 5.4068 +[2025-07-06 21:12:48] [Rank 0] Group 1 Loss: 5.4068 +[2025-07-06 21:12:48] [Rank 0] Group 2 Loss: 5.2773 +[2025-07-06 21:12:48] [Rank 0] Group 2 Loss: 5.2773 +[2025-07-06 21:12:48] [Rank 0] Group 3 Loss: 5.4984 +[2025-07-06 21:12:48] [Rank 0] Group 3 Loss: 5.4984 +[2025-07-06 21:12:48] [Rank 0] Group 4 Loss: 5.3704 +[2025-07-06 21:12:48] [Rank 0] Group 4 Loss: 5.3704 +[2025-07-06 21:12:48] [Rank 0] Group 5 Loss: 5.3512 +[2025-07-06 21:12:48] [Rank 0] Group 5 Loss: 5.3512 +[2025-07-06 21:12:48] [Rank 0] Group 6 Loss: 5.2543 +[2025-07-06 21:12:48] [Rank 0] Group 6 Loss: 5.2543 +[2025-07-06 21:12:48] [Rank 0] Group 7 Loss: 5.3457 +[2025-07-06 21:12:48] [Rank 0] Group 7 Loss: 5.3457 +[2025-07-06 21:12:48] [Rank 0] Group 8 Loss: 5.3756 +[2025-07-06 21:12:48] [Rank 0] Group 8 Loss: 5.3756 +[2025-07-06 21:12:48] [Rank 0] Group 9 Loss: 5.3577 +[2025-07-06 21:12:48] [Rank 0] Group 9 Loss: 5.3577 +[2025-07-06 21:12:48] [Rank 0] Group 10 Loss: 5.3918 +[2025-07-06 21:12:48] [Rank 0] Group 10 Loss: 5.3918 +[2025-07-06 21:12:48] [Rank 0] Group 11 Loss: 5.3808 +[2025-07-06 21:12:48] [Rank 0] Group 11 Loss: 5.3808 +[2025-07-06 21:12:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:12:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:12:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:12:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:12:48] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:12:48] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:12:48] [Rank 0] Group 3 FTA: 0.9453 +[2025-07-06 21:12:48] [Rank 0] Group 3 FTA: 0.9453 +[2025-07-06 21:12:48] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 21:12:48] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 21:12:48] [Rank 0] Group 5 FTA: 0.9688 +[2025-07-06 21:12:48] [Rank 0] Group 5 FTA: 0.9688 +[2025-07-06 21:12:48] [Rank 0] Group 6 FTA: 0.9635 +[2025-07-06 21:12:48] [Rank 0] Group 6 FTA: 0.9635 +[2025-07-06 21:12:48] [Rank 0] Group 7 FTA: 0.9453 +[2025-07-06 21:12:48] [Rank 0] Group 7 FTA: 0.9453 +[2025-07-06 21:12:48] [Rank 0] Group 8 FTA: 0.9688 +[2025-07-06 21:12:48] [Rank 0] Group 8 FTA: 0.9688 +[2025-07-06 21:12:48] [Rank 0] Group 9 FTA: 0.9688 +[2025-07-06 21:12:48] [Rank 0] Group 9 FTA: 0.9688 +[2025-07-06 21:12:48] [Rank 0] Group 10 FTA: 0.9648 +[2025-07-06 21:12:48] [Rank 0] Group 10 FTA: 0.9648 +[2025-07-06 21:12:48] [Rank 0] Group 11 FTA: 0.9580 +[2025-07-06 21:12:48] [Rank 0] Group 11 FTA: 0.9580 +[2025-07-06 21:12:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 21:12:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 21:12:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 21:12:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 21:12:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 21:12:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 21:12:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 21:12:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 21:12:50] [Rank 0] step:7001/10000 train_time:568508ms step_avg:81.20ms +[2025-07-06 21:12:50] [Rank 0] step:7001/10000 train_time:568508ms step_avg:81.20ms +[2025-07-06 21:12:52] [Rank 0] step:7021/10000 train_time:570275ms step_avg:81.22ms +[2025-07-06 21:12:52] [Rank 0] step:7021/10000 train_time:570275ms step_avg:81.22ms +[2025-07-06 21:12:53] [Rank 0] step:7041/10000 train_time:572181ms step_avg:81.26ms +[2025-07-06 21:12:53] [Rank 0] step:7041/10000 train_time:572181ms step_avg:81.26ms +[2025-07-06 21:12:55] [Rank 0] step:7061/10000 train_time:573671ms step_avg:81.25ms +[2025-07-06 21:12:55] [Rank 0] step:7061/10000 train_time:573671ms step_avg:81.25ms +[2025-07-06 21:12:56] [Rank 0] step:7081/10000 train_time:575165ms step_avg:81.23ms +[2025-07-06 21:12:56] [Rank 0] step:7081/10000 train_time:575165ms step_avg:81.23ms +[2025-07-06 21:12:58] [Rank 0] step:7101/10000 train_time:576660ms step_avg:81.21ms +[2025-07-06 21:12:58] [Rank 0] step:7101/10000 train_time:576660ms step_avg:81.21ms +[2025-07-06 21:13:00] [Rank 0] step:7121/10000 train_time:578827ms step_avg:81.28ms +[2025-07-06 21:13:00] [Rank 0] step:7121/10000 train_time:578827ms step_avg:81.28ms +[2025-07-06 21:13:01] [Rank 0] step:7141/10000 train_time:580324ms step_avg:81.27ms +[2025-07-06 21:13:01] [Rank 0] step:7141/10000 train_time:580324ms step_avg:81.27ms +[2025-07-06 21:13:03] [Rank 0] step:7161/10000 train_time:581821ms step_avg:81.25ms +[2025-07-06 21:13:03] [Rank 0] step:7161/10000 train_time:581821ms step_avg:81.25ms +[2025-07-06 21:13:04] [Rank 0] step:7181/10000 train_time:583317ms step_avg:81.23ms +[2025-07-06 21:13:04] [Rank 0] step:7181/10000 train_time:583317ms step_avg:81.23ms +[2025-07-06 21:13:06] [Rank 0] step:7201/10000 train_time:584813ms step_avg:81.21ms +[2025-07-06 21:13:06] [Rank 0] step:7201/10000 train_time:584813ms step_avg:81.21ms +[2025-07-06 21:13:08] [Rank 0] step:7221/10000 train_time:586971ms step_avg:81.29ms +[2025-07-06 21:13:08] [Rank 0] step:7221/10000 train_time:586971ms step_avg:81.29ms +[2025-07-06 21:13:10] [Rank 0] step:7241/10000 train_time:588557ms step_avg:81.28ms +[2025-07-06 21:13:10] [Rank 0] step:7241/10000 train_time:588557ms step_avg:81.28ms +[2025-07-06 21:13:11] [Rank 0] step:7261/10000 train_time:590116ms step_avg:81.27ms +[2025-07-06 21:13:11] [Rank 0] step:7261/10000 train_time:590116ms step_avg:81.27ms +[2025-07-06 21:13:13] [Rank 0] step:7281/10000 train_time:591615ms step_avg:81.25ms +[2025-07-06 21:13:13] [Rank 0] step:7281/10000 train_time:591615ms step_avg:81.25ms +[2025-07-06 21:13:15] [Rank 0] step:7301/10000 train_time:593772ms step_avg:81.33ms +[2025-07-06 21:13:15] [Rank 0] step:7301/10000 train_time:593772ms step_avg:81.33ms +[2025-07-06 21:13:16] [Rank 0] step:7321/10000 train_time:595271ms step_avg:81.31ms +[2025-07-06 21:13:16] [Rank 0] step:7321/10000 train_time:595271ms step_avg:81.31ms +[2025-07-06 21:13:18] [Rank 0] step:7341/10000 train_time:596770ms step_avg:81.29ms +[2025-07-06 21:13:18] [Rank 0] step:7341/10000 train_time:596770ms step_avg:81.29ms +[2025-07-06 21:13:19] [Rank 0] step:7361/10000 train_time:598272ms step_avg:81.28ms +[2025-07-06 21:13:19] [Rank 0] step:7361/10000 train_time:598272ms step_avg:81.28ms +[2025-07-06 21:13:21] [Rank 0] step:7381/10000 train_time:600027ms step_avg:81.29ms +[2025-07-06 21:13:21] [Rank 0] step:7381/10000 train_time:600027ms step_avg:81.29ms +[2025-07-06 21:13:23] [Rank 0] step:7401/10000 train_time:601919ms step_avg:81.33ms +[2025-07-06 21:13:23] [Rank 0] step:7401/10000 train_time:601919ms step_avg:81.33ms +[2025-07-06 21:13:24] [Rank 0] step:7421/10000 train_time:603419ms step_avg:81.31ms +[2025-07-06 21:13:24] [Rank 0] step:7421/10000 train_time:603419ms step_avg:81.31ms +[2025-07-06 21:13:26] [Rank 0] step:7441/10000 train_time:604921ms step_avg:81.30ms +[2025-07-06 21:13:26] [Rank 0] step:7441/10000 train_time:604921ms step_avg:81.30ms +[2025-07-06 21:13:27] [Rank 0] step:7461/10000 train_time:606423ms step_avg:81.28ms +[2025-07-06 21:13:27] [Rank 0] step:7461/10000 train_time:606423ms step_avg:81.28ms +[2025-07-06 21:13:30] [Rank 0] step:7481/10000 train_time:608589ms step_avg:81.35ms +[2025-07-06 21:13:30] [Rank 0] step:7481/10000 train_time:608589ms step_avg:81.35ms +[2025-07-06 21:13:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:13:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:13:32] [Rank 0] PRINT: step:7500/10000 train_loss:0.8624 val_loss:0.8629 train_time:610090ms step_avg:81.35ms +[2025-07-06 21:13:32] [Rank 0] PRINT: step:7500/10000 train_loss:0.8624 val_loss:0.8629 train_time:610090ms step_avg:81.35ms +[2025-07-06 21:13:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:13:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:13:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:13:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:13:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:13:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:18:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:18:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:18:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:18:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:18:55] [Rank 0] Total Loss: 5.3640 +[2025-07-06 21:18:55] [Rank 0] Total Loss: 5.3640 +[2025-07-06 21:18:55] [Rank 0] Total FTA: 0.9735 +[2025-07-06 21:18:55] [Rank 0] Total FTA: 0.9735 +[2025-07-06 21:18:55] [Rank 0] Group 0 Loss: 5.6572 +[2025-07-06 21:18:55] [Rank 0] Group 0 Loss: 5.6572 +[2025-07-06 21:18:55] [Rank 0] Group 1 Loss: 5.3959 +[2025-07-06 21:18:55] [Rank 0] Group 1 Loss: 5.3959 +[2025-07-06 21:18:55] [Rank 0] Group 2 Loss: 5.2554 +[2025-07-06 21:18:55] [Rank 0] Group 2 Loss: 5.2554 +[2025-07-06 21:18:55] [Rank 0] Group 3 Loss: 5.4084 +[2025-07-06 21:18:55] [Rank 0] Group 3 Loss: 5.4084 +[2025-07-06 21:18:55] [Rank 0] Group 4 Loss: 5.2743 +[2025-07-06 21:18:55] [Rank 0] Group 4 Loss: 5.2743 +[2025-07-06 21:18:55] [Rank 0] Group 5 Loss: 5.2088 +[2025-07-06 21:18:55] [Rank 0] Group 5 Loss: 5.2088 +[2025-07-06 21:18:55] [Rank 0] Group 6 Loss: 5.2441 +[2025-07-06 21:18:55] [Rank 0] Group 6 Loss: 5.2441 +[2025-07-06 21:18:55] [Rank 0] Group 7 Loss: 5.3356 +[2025-07-06 21:18:55] [Rank 0] Group 7 Loss: 5.3356 +[2025-07-06 21:18:55] [Rank 0] Group 8 Loss: 5.3311 +[2025-07-06 21:18:55] [Rank 0] Group 8 Loss: 5.3311 +[2025-07-06 21:18:55] [Rank 0] Group 9 Loss: 5.3268 +[2025-07-06 21:18:55] [Rank 0] Group 9 Loss: 5.3268 +[2025-07-06 21:18:55] [Rank 0] Group 10 Loss: 5.3391 +[2025-07-06 21:18:55] [Rank 0] Group 10 Loss: 5.3391 +[2025-07-06 21:18:55] [Rank 0] Group 11 Loss: 5.3376 +[2025-07-06 21:18:55] [Rank 0] Group 11 Loss: 5.3376 +[2025-07-06 21:18:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:18:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:18:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:18:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:18:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:18:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:18:55] [Rank 0] Group 3 FTA: 0.9609 +[2025-07-06 21:18:55] [Rank 0] Group 3 FTA: 0.9609 +[2025-07-06 21:18:55] [Rank 0] Group 4 FTA: 0.9818 +[2025-07-06 21:18:55] [Rank 0] Group 4 FTA: 0.9818 +[2025-07-06 21:18:55] [Rank 0] Group 5 FTA: 0.9141 +[2025-07-06 21:18:55] [Rank 0] Group 5 FTA: 0.9141 +[2025-07-06 21:18:55] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-06 21:18:55] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-06 21:18:55] [Rank 0] Group 7 FTA: 0.9688 +[2025-07-06 21:18:55] [Rank 0] Group 7 FTA: 0.9688 +[2025-07-06 21:18:55] [Rank 0] Group 8 FTA: 0.9688 +[2025-07-06 21:18:55] [Rank 0] Group 8 FTA: 0.9688 +[2025-07-06 21:18:55] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 21:18:55] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 21:18:55] [Rank 0] Group 10 FTA: 0.9648 +[2025-07-06 21:18:55] [Rank 0] Group 10 FTA: 0.9648 +[2025-07-06 21:18:55] [Rank 0] Group 11 FTA: 0.9629 +[2025-07-06 21:18:55] [Rank 0] Group 11 FTA: 0.9629 +[2025-07-06 21:18:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 21:18:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 21:18:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 21:18:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 21:18:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 21:18:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 21:18:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 21:18:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 21:18:56] [Rank 0] step:7501/10000 train_time:610110ms step_avg:81.34ms +[2025-07-06 21:18:56] [Rank 0] step:7501/10000 train_time:610110ms step_avg:81.34ms +[2025-07-06 21:18:58] [Rank 0] step:7521/10000 train_time:611597ms step_avg:81.32ms +[2025-07-06 21:18:58] [Rank 0] step:7521/10000 train_time:611597ms step_avg:81.32ms +[2025-07-06 21:18:59] [Rank 0] step:7541/10000 train_time:613088ms step_avg:81.30ms +[2025-07-06 21:18:59] [Rank 0] step:7541/10000 train_time:613088ms step_avg:81.30ms +[2025-07-06 21:19:02] [Rank 0] step:7561/10000 train_time:614579ms step_avg:81.28ms +[2025-07-06 21:19:02] [Rank 0] step:7561/10000 train_time:614579ms step_avg:81.28ms +[2025-07-06 21:19:03] [Rank 0] step:7581/10000 train_time:616743ms step_avg:81.35ms +[2025-07-06 21:19:03] [Rank 0] step:7581/10000 train_time:616743ms step_avg:81.35ms +[2025-07-06 21:19:04] [Rank 0] step:7601/10000 train_time:618235ms step_avg:81.34ms +[2025-07-06 21:19:04] [Rank 0] step:7601/10000 train_time:618235ms step_avg:81.34ms +[2025-07-06 21:19:06] [Rank 0] step:7621/10000 train_time:619729ms step_avg:81.32ms +[2025-07-06 21:19:06] [Rank 0] step:7621/10000 train_time:619729ms step_avg:81.32ms +[2025-07-06 21:19:07] [Rank 0] step:7641/10000 train_time:621225ms step_avg:81.30ms +[2025-07-06 21:19:07] [Rank 0] step:7641/10000 train_time:621225ms step_avg:81.30ms +[2025-07-06 21:19:09] [Rank 0] step:7661/10000 train_time:622854ms step_avg:81.30ms +[2025-07-06 21:19:09] [Rank 0] step:7661/10000 train_time:622854ms step_avg:81.30ms +[2025-07-06 21:19:11] [Rank 0] step:7681/10000 train_time:624349ms step_avg:81.28ms +[2025-07-06 21:19:11] [Rank 0] step:7681/10000 train_time:624349ms step_avg:81.28ms +[2025-07-06 21:19:12] [Rank 0] step:7701/10000 train_time:625845ms step_avg:81.27ms +[2025-07-06 21:19:12] [Rank 0] step:7701/10000 train_time:625845ms step_avg:81.27ms +[2025-07-06 21:19:14] [Rank 0] step:7721/10000 train_time:627342ms step_avg:81.25ms +[2025-07-06 21:19:14] [Rank 0] step:7721/10000 train_time:627342ms step_avg:81.25ms +[2025-07-06 21:19:16] [Rank 0] step:7741/10000 train_time:629095ms step_avg:81.27ms +[2025-07-06 21:19:16] [Rank 0] step:7741/10000 train_time:629095ms step_avg:81.27ms +[2025-07-06 21:19:17] [Rank 0] step:7761/10000 train_time:630984ms step_avg:81.30ms +[2025-07-06 21:19:17] [Rank 0] step:7761/10000 train_time:630984ms step_avg:81.30ms +[2025-07-06 21:19:19] [Rank 0] step:7781/10000 train_time:632481ms step_avg:81.29ms +[2025-07-06 21:19:19] [Rank 0] step:7781/10000 train_time:632481ms step_avg:81.29ms +[2025-07-06 21:19:20] [Rank 0] step:7801/10000 train_time:633979ms step_avg:81.27ms +[2025-07-06 21:19:20] [Rank 0] step:7801/10000 train_time:633979ms step_avg:81.27ms +[2025-07-06 21:19:22] [Rank 0] step:7821/10000 train_time:635476ms step_avg:81.25ms +[2025-07-06 21:19:22] [Rank 0] step:7821/10000 train_time:635476ms step_avg:81.25ms +[2025-07-06 21:19:24] [Rank 0] step:7841/10000 train_time:637628ms step_avg:81.32ms +[2025-07-06 21:19:24] [Rank 0] step:7841/10000 train_time:637628ms step_avg:81.32ms +[2025-07-06 21:19:25] [Rank 0] step:7861/10000 train_time:639126ms step_avg:81.30ms +[2025-07-06 21:19:25] [Rank 0] step:7861/10000 train_time:639126ms step_avg:81.30ms +[2025-07-06 21:19:27] [Rank 0] step:7881/10000 train_time:640625ms step_avg:81.29ms +[2025-07-06 21:19:27] [Rank 0] step:7881/10000 train_time:640625ms step_avg:81.29ms +[2025-07-06 21:19:28] [Rank 0] step:7901/10000 train_time:642126ms step_avg:81.27ms +[2025-07-06 21:19:28] [Rank 0] step:7901/10000 train_time:642126ms step_avg:81.27ms +[2025-07-06 21:19:30] [Rank 0] step:7921/10000 train_time:643948ms step_avg:81.30ms +[2025-07-06 21:19:30] [Rank 0] step:7921/10000 train_time:643948ms step_avg:81.30ms +[2025-07-06 21:19:32] [Rank 0] step:7941/10000 train_time:645431ms step_avg:81.28ms +[2025-07-06 21:19:32] [Rank 0] step:7941/10000 train_time:645431ms step_avg:81.28ms +[2025-07-06 21:19:33] [Rank 0] step:7961/10000 train_time:646931ms step_avg:81.26ms +[2025-07-06 21:19:33] [Rank 0] step:7961/10000 train_time:646931ms step_avg:81.26ms +[2025-07-06 21:19:35] [Rank 0] step:7981/10000 train_time:648432ms step_avg:81.25ms +[2025-07-06 21:19:35] [Rank 0] step:7981/10000 train_time:648432ms step_avg:81.25ms +[2025-07-06 21:19:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:19:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:19:37] [Rank 0] PRINT: step:8000/10000 train_loss:0.8613 val_loss:0.8625 train_time:649934ms step_avg:81.24ms +[2025-07-06 21:19:37] [Rank 0] PRINT: step:8000/10000 train_loss:0.8613 val_loss:0.8625 train_time:649934ms step_avg:81.24ms +[2025-07-06 21:19:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:19:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:19:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:19:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:19:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:19:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:24:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:24:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:24:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:24:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:24:59] [Rank 0] Total Loss: 5.3471 +[2025-07-06 21:24:59] [Rank 0] Total Loss: 5.3471 +[2025-07-06 21:24:59] [Rank 0] Total FTA: 0.9554 +[2025-07-06 21:24:59] [Rank 0] Total FTA: 0.9554 +[2025-07-06 21:24:59] [Rank 0] Group 0 Loss: 5.6713 +[2025-07-06 21:24:59] [Rank 0] Group 0 Loss: 5.6713 +[2025-07-06 21:24:59] [Rank 0] Group 1 Loss: 5.2286 +[2025-07-06 21:24:59] [Rank 0] Group 1 Loss: 5.2286 +[2025-07-06 21:24:59] [Rank 0] Group 2 Loss: 5.2688 +[2025-07-06 21:24:59] [Rank 0] Group 2 Loss: 5.2688 +[2025-07-06 21:24:59] [Rank 0] Group 3 Loss: 5.4398 +[2025-07-06 21:24:59] [Rank 0] Group 3 Loss: 5.4398 +[2025-07-06 21:24:59] [Rank 0] Group 4 Loss: 5.3529 +[2025-07-06 21:24:59] [Rank 0] Group 4 Loss: 5.3529 +[2025-07-06 21:24:59] [Rank 0] Group 5 Loss: 5.2098 +[2025-07-06 21:24:59] [Rank 0] Group 5 Loss: 5.2098 +[2025-07-06 21:24:59] [Rank 0] Group 6 Loss: 5.1809 +[2025-07-06 21:24:59] [Rank 0] Group 6 Loss: 5.1809 +[2025-07-06 21:24:59] [Rank 0] Group 7 Loss: 5.3780 +[2025-07-06 21:24:59] [Rank 0] Group 7 Loss: 5.3780 +[2025-07-06 21:24:59] [Rank 0] Group 8 Loss: 5.3059 +[2025-07-06 21:24:59] [Rank 0] Group 8 Loss: 5.3059 +[2025-07-06 21:24:59] [Rank 0] Group 9 Loss: 5.3082 +[2025-07-06 21:24:59] [Rank 0] Group 9 Loss: 5.3082 +[2025-07-06 21:24:59] [Rank 0] Group 10 Loss: 5.2793 +[2025-07-06 21:24:59] [Rank 0] Group 10 Loss: 5.2793 +[2025-07-06 21:24:59] [Rank 0] Group 11 Loss: 5.3019 +[2025-07-06 21:24:59] [Rank 0] Group 11 Loss: 5.3019 +[2025-07-06 21:24:59] [Rank 0] Group 0 FTA: 0.8375 +[2025-07-06 21:24:59] [Rank 0] Group 0 FTA: 0.8375 +[2025-07-06 21:24:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:24:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:24:59] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:24:59] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:24:59] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 21:24:59] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 21:24:59] [Rank 0] Group 4 FTA: 0.9792 +[2025-07-06 21:24:59] [Rank 0] Group 4 FTA: 0.9792 +[2025-07-06 21:24:59] [Rank 0] Group 5 FTA: 0.9505 +[2025-07-06 21:24:59] [Rank 0] Group 5 FTA: 0.9505 +[2025-07-06 21:24:59] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-06 21:24:59] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-06 21:24:59] [Rank 0] Group 7 FTA: 0.9557 +[2025-07-06 21:24:59] [Rank 0] Group 7 FTA: 0.9557 +[2025-07-06 21:24:59] [Rank 0] Group 8 FTA: 0.9583 +[2025-07-06 21:24:59] [Rank 0] Group 8 FTA: 0.9583 +[2025-07-06 21:24:59] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 21:24:59] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 21:24:59] [Rank 0] Group 10 FTA: 0.9785 +[2025-07-06 21:24:59] [Rank 0] Group 10 FTA: 0.9785 +[2025-07-06 21:24:59] [Rank 0] Group 11 FTA: 0.9600 +[2025-07-06 21:24:59] [Rank 0] Group 11 FTA: 0.9600 +[2025-07-06 21:24:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 21:24:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 21:25:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 21:25:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 21:25:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 21:25:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 21:25:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 21:25:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 21:25:00] [Rank 0] step:8001/10000 train_time:649956ms step_avg:81.23ms +[2025-07-06 21:25:00] [Rank 0] step:8001/10000 train_time:649956ms step_avg:81.23ms +[2025-07-06 21:25:02] [Rank 0] step:8021/10000 train_time:652122ms step_avg:81.30ms +[2025-07-06 21:25:02] [Rank 0] step:8021/10000 train_time:652122ms step_avg:81.30ms +[2025-07-06 21:25:04] [Rank 0] step:8041/10000 train_time:653612ms step_avg:81.28ms +[2025-07-06 21:25:04] [Rank 0] step:8041/10000 train_time:653612ms step_avg:81.28ms +[2025-07-06 21:25:05] [Rank 0] step:8061/10000 train_time:655104ms step_avg:81.27ms +[2025-07-06 21:25:05] [Rank 0] step:8061/10000 train_time:655104ms step_avg:81.27ms +[2025-07-06 21:25:07] [Rank 0] step:8081/10000 train_time:656599ms step_avg:81.25ms +[2025-07-06 21:25:07] [Rank 0] step:8081/10000 train_time:656599ms step_avg:81.25ms +[2025-07-06 21:25:09] [Rank 0] step:8101/10000 train_time:658756ms step_avg:81.32ms +[2025-07-06 21:25:09] [Rank 0] step:8101/10000 train_time:658756ms step_avg:81.32ms +[2025-07-06 21:25:10] [Rank 0] step:8121/10000 train_time:660230ms step_avg:81.30ms +[2025-07-06 21:25:10] [Rank 0] step:8121/10000 train_time:660230ms step_avg:81.30ms +[2025-07-06 21:25:12] [Rank 0] step:8141/10000 train_time:661724ms step_avg:81.28ms +[2025-07-06 21:25:12] [Rank 0] step:8141/10000 train_time:661724ms step_avg:81.28ms +[2025-07-06 21:25:13] [Rank 0] step:8161/10000 train_time:663220ms step_avg:81.27ms +[2025-07-06 21:25:13] [Rank 0] step:8161/10000 train_time:663220ms step_avg:81.27ms +[2025-07-06 21:25:15] [Rank 0] step:8181/10000 train_time:664716ms step_avg:81.25ms +[2025-07-06 21:25:15] [Rank 0] step:8181/10000 train_time:664716ms step_avg:81.25ms +[2025-07-06 21:25:17] [Rank 0] step:8201/10000 train_time:666446ms step_avg:81.26ms +[2025-07-06 21:25:17] [Rank 0] step:8201/10000 train_time:666446ms step_avg:81.26ms +[2025-07-06 21:25:18] [Rank 0] step:8221/10000 train_time:667944ms step_avg:81.25ms +[2025-07-06 21:25:18] [Rank 0] step:8221/10000 train_time:667944ms step_avg:81.25ms +[2025-07-06 21:25:20] [Rank 0] step:8241/10000 train_time:669443ms step_avg:81.23ms +[2025-07-06 21:25:20] [Rank 0] step:8241/10000 train_time:669443ms step_avg:81.23ms +[2025-07-06 21:25:21] [Rank 0] step:8261/10000 train_time:670942ms step_avg:81.22ms +[2025-07-06 21:25:21] [Rank 0] step:8261/10000 train_time:670942ms step_avg:81.22ms +[2025-07-06 21:25:23] [Rank 0] step:8281/10000 train_time:673125ms step_avg:81.29ms +[2025-07-06 21:25:23] [Rank 0] step:8281/10000 train_time:673125ms step_avg:81.29ms +[2025-07-06 21:25:25] [Rank 0] step:8301/10000 train_time:674606ms step_avg:81.27ms +[2025-07-06 21:25:25] [Rank 0] step:8301/10000 train_time:674606ms step_avg:81.27ms +[2025-07-06 21:25:26] [Rank 0] step:8321/10000 train_time:676103ms step_avg:81.25ms +[2025-07-06 21:25:26] [Rank 0] step:8321/10000 train_time:676103ms step_avg:81.25ms +[2025-07-06 21:25:28] [Rank 0] step:8341/10000 train_time:677602ms step_avg:81.24ms +[2025-07-06 21:25:28] [Rank 0] step:8341/10000 train_time:677602ms step_avg:81.24ms +[2025-07-06 21:25:29] [Rank 0] step:8361/10000 train_time:679103ms step_avg:81.22ms +[2025-07-06 21:25:29] [Rank 0] step:8361/10000 train_time:679103ms step_avg:81.22ms +[2025-07-06 21:25:31] [Rank 0] step:8381/10000 train_time:681247ms step_avg:81.28ms +[2025-07-06 21:25:31] [Rank 0] step:8381/10000 train_time:681247ms step_avg:81.28ms +[2025-07-06 21:25:33] [Rank 0] step:8401/10000 train_time:682746ms step_avg:81.27ms +[2025-07-06 21:25:33] [Rank 0] step:8401/10000 train_time:682746ms step_avg:81.27ms +[2025-07-06 21:25:34] [Rank 0] step:8421/10000 train_time:684247ms step_avg:81.25ms +[2025-07-06 21:25:34] [Rank 0] step:8421/10000 train_time:684247ms step_avg:81.25ms +[2025-07-06 21:25:36] [Rank 0] step:8441/10000 train_time:685752ms step_avg:81.24ms +[2025-07-06 21:25:36] [Rank 0] step:8441/10000 train_time:685752ms step_avg:81.24ms +[2025-07-06 21:25:38] [Rank 0] step:8461/10000 train_time:687305ms step_avg:81.23ms +[2025-07-06 21:25:38] [Rank 0] step:8461/10000 train_time:687305ms step_avg:81.23ms +[2025-07-06 21:25:40] [Rank 0] step:8481/10000 train_time:689423ms step_avg:81.29ms +[2025-07-06 21:25:40] [Rank 0] step:8481/10000 train_time:689423ms step_avg:81.29ms +[2025-07-06 21:25:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:25:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:25:42] [Rank 0] PRINT: step:8500/10000 train_loss:0.8604 val_loss:0.8621 train_time:690926ms step_avg:81.29ms +[2025-07-06 21:25:42] [Rank 0] PRINT: step:8500/10000 train_loss:0.8604 val_loss:0.8621 train_time:690926ms step_avg:81.29ms +[2025-07-06 21:25:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:25:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:25:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:25:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:25:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:25:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:31:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:31:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:31:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:31:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:31:04] [Rank 0] Total Loss: 5.3688 +[2025-07-06 21:31:04] [Rank 0] Total Loss: 5.3688 +[2025-07-06 21:31:04] [Rank 0] Total FTA: 0.9512 +[2025-07-06 21:31:04] [Rank 0] Total FTA: 0.9512 +[2025-07-06 21:31:04] [Rank 0] Group 0 Loss: 5.6102 +[2025-07-06 21:31:04] [Rank 0] Group 0 Loss: 5.6102 +[2025-07-06 21:31:04] [Rank 0] Group 1 Loss: 5.2017 +[2025-07-06 21:31:04] [Rank 0] Group 1 Loss: 5.2017 +[2025-07-06 21:31:04] [Rank 0] Group 2 Loss: 5.2410 +[2025-07-06 21:31:04] [Rank 0] Group 2 Loss: 5.2410 +[2025-07-06 21:31:04] [Rank 0] Group 3 Loss: 5.5133 +[2025-07-06 21:31:04] [Rank 0] Group 3 Loss: 5.5133 +[2025-07-06 21:31:04] [Rank 0] Group 4 Loss: 5.3183 +[2025-07-06 21:31:04] [Rank 0] Group 4 Loss: 5.3183 +[2025-07-06 21:31:04] [Rank 0] Group 5 Loss: 5.2696 +[2025-07-06 21:31:04] [Rank 0] Group 5 Loss: 5.2696 +[2025-07-06 21:31:04] [Rank 0] Group 6 Loss: 5.2646 +[2025-07-06 21:31:04] [Rank 0] Group 6 Loss: 5.2646 +[2025-07-06 21:31:04] [Rank 0] Group 7 Loss: 5.3511 +[2025-07-06 21:31:04] [Rank 0] Group 7 Loss: 5.3511 +[2025-07-06 21:31:04] [Rank 0] Group 8 Loss: 5.3484 +[2025-07-06 21:31:04] [Rank 0] Group 8 Loss: 5.3484 +[2025-07-06 21:31:04] [Rank 0] Group 9 Loss: 5.2825 +[2025-07-06 21:31:04] [Rank 0] Group 9 Loss: 5.2825 +[2025-07-06 21:31:04] [Rank 0] Group 10 Loss: 5.3408 +[2025-07-06 21:31:04] [Rank 0] Group 10 Loss: 5.3408 +[2025-07-06 21:31:04] [Rank 0] Group 11 Loss: 5.3887 +[2025-07-06 21:31:04] [Rank 0] Group 11 Loss: 5.3887 +[2025-07-06 21:31:04] [Rank 0] Group 0 FTA: 0.8349 +[2025-07-06 21:31:04] [Rank 0] Group 0 FTA: 0.8349 +[2025-07-06 21:31:04] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:31:04] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:31:04] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:31:04] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:31:04] [Rank 0] Group 3 FTA: 0.9505 +[2025-07-06 21:31:04] [Rank 0] Group 3 FTA: 0.9505 +[2025-07-06 21:31:04] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 21:31:04] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 21:31:04] [Rank 0] Group 5 FTA: 0.9453 +[2025-07-06 21:31:04] [Rank 0] Group 5 FTA: 0.9453 +[2025-07-06 21:31:04] [Rank 0] Group 6 FTA: 0.9505 +[2025-07-06 21:31:04] [Rank 0] Group 6 FTA: 0.9505 +[2025-07-06 21:31:04] [Rank 0] Group 7 FTA: 0.9635 +[2025-07-06 21:31:04] [Rank 0] Group 7 FTA: 0.9635 +[2025-07-06 21:31:04] [Rank 0] Group 8 FTA: 0.9479 +[2025-07-06 21:31:04] [Rank 0] Group 8 FTA: 0.9479 +[2025-07-06 21:31:04] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 21:31:04] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 21:31:04] [Rank 0] Group 10 FTA: 0.9727 +[2025-07-06 21:31:04] [Rank 0] Group 10 FTA: 0.9727 +[2025-07-06 21:31:04] [Rank 0] Group 11 FTA: 0.9658 +[2025-07-06 21:31:04] [Rank 0] Group 11 FTA: 0.9658 +[2025-07-06 21:31:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 21:31:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 21:31:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 21:31:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 21:31:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 21:31:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 21:31:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 21:31:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 21:31:05] [Rank 0] step:8501/10000 train_time:690948ms step_avg:81.28ms +[2025-07-06 21:31:05] [Rank 0] step:8501/10000 train_time:690948ms step_avg:81.28ms +[2025-07-06 21:31:07] [Rank 0] step:8521/10000 train_time:692439ms step_avg:81.26ms +[2025-07-06 21:31:07] [Rank 0] step:8521/10000 train_time:692439ms step_avg:81.26ms +[2025-07-06 21:31:08] [Rank 0] step:8541/10000 train_time:693930ms step_avg:81.25ms +[2025-07-06 21:31:08] [Rank 0] step:8541/10000 train_time:693930ms step_avg:81.25ms +[2025-07-06 21:31:11] [Rank 0] step:8561/10000 train_time:696084ms step_avg:81.31ms +[2025-07-06 21:31:11] [Rank 0] step:8561/10000 train_time:696084ms step_avg:81.31ms +[2025-07-06 21:31:12] [Rank 0] step:8581/10000 train_time:697576ms step_avg:81.29ms +[2025-07-06 21:31:12] [Rank 0] step:8581/10000 train_time:697576ms step_avg:81.29ms +[2025-07-06 21:31:14] [Rank 0] step:8601/10000 train_time:699068ms step_avg:81.28ms +[2025-07-06 21:31:14] [Rank 0] step:8601/10000 train_time:699068ms step_avg:81.28ms +[2025-07-06 21:31:15] [Rank 0] step:8621/10000 train_time:700561ms step_avg:81.26ms +[2025-07-06 21:31:15] [Rank 0] step:8621/10000 train_time:700561ms step_avg:81.26ms +[2025-07-06 21:31:17] [Rank 0] step:8641/10000 train_time:702719ms step_avg:81.32ms +[2025-07-06 21:31:17] [Rank 0] step:8641/10000 train_time:702719ms step_avg:81.32ms +[2025-07-06 21:31:19] [Rank 0] step:8661/10000 train_time:704194ms step_avg:81.31ms +[2025-07-06 21:31:19] [Rank 0] step:8661/10000 train_time:704194ms step_avg:81.31ms +[2025-07-06 21:31:20] [Rank 0] step:8681/10000 train_time:705690ms step_avg:81.29ms +[2025-07-06 21:31:20] [Rank 0] step:8681/10000 train_time:705690ms step_avg:81.29ms +[2025-07-06 21:31:22] [Rank 0] step:8701/10000 train_time:707185ms step_avg:81.28ms +[2025-07-06 21:31:22] [Rank 0] step:8701/10000 train_time:707185ms step_avg:81.28ms +[2025-07-06 21:31:23] [Rank 0] step:8721/10000 train_time:708680ms step_avg:81.26ms +[2025-07-06 21:31:23] [Rank 0] step:8721/10000 train_time:708680ms step_avg:81.26ms +[2025-07-06 21:31:25] [Rank 0] step:8741/10000 train_time:710414ms step_avg:81.27ms +[2025-07-06 21:31:25] [Rank 0] step:8741/10000 train_time:710414ms step_avg:81.27ms +[2025-07-06 21:31:26] [Rank 0] step:8761/10000 train_time:711910ms step_avg:81.26ms +[2025-07-06 21:31:26] [Rank 0] step:8761/10000 train_time:711910ms step_avg:81.26ms +[2025-07-06 21:31:28] [Rank 0] step:8781/10000 train_time:713408ms step_avg:81.24ms +[2025-07-06 21:31:28] [Rank 0] step:8781/10000 train_time:713408ms step_avg:81.24ms +[2025-07-06 21:31:29] [Rank 0] step:8801/10000 train_time:714907ms step_avg:81.23ms +[2025-07-06 21:31:29] [Rank 0] step:8801/10000 train_time:714907ms step_avg:81.23ms +[2025-07-06 21:31:32] [Rank 0] step:8821/10000 train_time:716405ms step_avg:81.22ms +[2025-07-06 21:31:32] [Rank 0] step:8821/10000 train_time:716405ms step_avg:81.22ms +[2025-07-06 21:31:33] [Rank 0] step:8841/10000 train_time:718566ms step_avg:81.28ms +[2025-07-06 21:31:33] [Rank 0] step:8841/10000 train_time:718566ms step_avg:81.28ms +[2025-07-06 21:31:35] [Rank 0] step:8861/10000 train_time:720063ms step_avg:81.26ms +[2025-07-06 21:31:35] [Rank 0] step:8861/10000 train_time:720063ms step_avg:81.26ms +[2025-07-06 21:31:36] [Rank 0] step:8881/10000 train_time:721564ms step_avg:81.25ms +[2025-07-06 21:31:36] [Rank 0] step:8881/10000 train_time:721564ms step_avg:81.25ms +[2025-07-06 21:31:38] [Rank 0] step:8901/10000 train_time:723064ms step_avg:81.23ms +[2025-07-06 21:31:38] [Rank 0] step:8901/10000 train_time:723064ms step_avg:81.23ms +[2025-07-06 21:31:40] [Rank 0] step:8921/10000 train_time:725206ms step_avg:81.29ms +[2025-07-06 21:31:40] [Rank 0] step:8921/10000 train_time:725206ms step_avg:81.29ms +[2025-07-06 21:31:41] [Rank 0] step:8941/10000 train_time:726706ms step_avg:81.28ms +[2025-07-06 21:31:41] [Rank 0] step:8941/10000 train_time:726706ms step_avg:81.28ms +[2025-07-06 21:31:43] [Rank 0] step:8961/10000 train_time:728206ms step_avg:81.26ms +[2025-07-06 21:31:43] [Rank 0] step:8961/10000 train_time:728206ms step_avg:81.26ms +[2025-07-06 21:31:44] [Rank 0] step:8981/10000 train_time:729708ms step_avg:81.25ms +[2025-07-06 21:31:44] [Rank 0] step:8981/10000 train_time:729708ms step_avg:81.25ms +[2025-07-06 21:31:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:31:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:31:47] [Rank 0] PRINT: step:9000/10000 train_loss:0.8595 val_loss:0.8618 train_time:731209ms step_avg:81.25ms +[2025-07-06 21:31:47] [Rank 0] PRINT: step:9000/10000 train_loss:0.8595 val_loss:0.8618 train_time:731209ms step_avg:81.25ms +[2025-07-06 21:31:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:31:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:31:47] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:31:47] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:31:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:31:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:37:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:37:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:37:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:37:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:37:08] [Rank 0] Total Loss: 5.3651 +[2025-07-06 21:37:08] [Rank 0] Total Loss: 5.3651 +[2025-07-06 21:37:08] [Rank 0] Total FTA: 0.9846 +[2025-07-06 21:37:08] [Rank 0] Total FTA: 0.9846 +[2025-07-06 21:37:08] [Rank 0] Group 0 Loss: 5.5725 +[2025-07-06 21:37:08] [Rank 0] Group 0 Loss: 5.5725 +[2025-07-06 21:37:08] [Rank 0] Group 1 Loss: 5.3091 +[2025-07-06 21:37:08] [Rank 0] Group 1 Loss: 5.3091 +[2025-07-06 21:37:08] [Rank 0] Group 2 Loss: 5.2356 +[2025-07-06 21:37:08] [Rank 0] Group 2 Loss: 5.2356 +[2025-07-06 21:37:08] [Rank 0] Group 3 Loss: 5.5102 +[2025-07-06 21:37:08] [Rank 0] Group 3 Loss: 5.5102 +[2025-07-06 21:37:08] [Rank 0] Group 4 Loss: 5.3218 +[2025-07-06 21:37:08] [Rank 0] Group 4 Loss: 5.3218 +[2025-07-06 21:37:08] [Rank 0] Group 5 Loss: 5.3471 +[2025-07-06 21:37:08] [Rank 0] Group 5 Loss: 5.3471 +[2025-07-06 21:37:08] [Rank 0] Group 6 Loss: 5.2135 +[2025-07-06 21:37:08] [Rank 0] Group 6 Loss: 5.2135 +[2025-07-06 21:37:08] [Rank 0] Group 7 Loss: 5.3555 +[2025-07-06 21:37:08] [Rank 0] Group 7 Loss: 5.3555 +[2025-07-06 21:37:08] [Rank 0] Group 8 Loss: 5.3719 +[2025-07-06 21:37:08] [Rank 0] Group 8 Loss: 5.3719 +[2025-07-06 21:37:08] [Rank 0] Group 9 Loss: 5.2348 +[2025-07-06 21:37:08] [Rank 0] Group 9 Loss: 5.2348 +[2025-07-06 21:37:08] [Rank 0] Group 10 Loss: 5.3490 +[2025-07-06 21:37:08] [Rank 0] Group 10 Loss: 5.3490 +[2025-07-06 21:37:08] [Rank 0] Group 11 Loss: 5.3459 +[2025-07-06 21:37:08] [Rank 0] Group 11 Loss: 5.3459 +[2025-07-06 21:37:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:37:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:37:08] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:37:08] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:37:08] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:37:08] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:37:08] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 21:37:08] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 21:37:08] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-06 21:37:08] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-06 21:37:08] [Rank 0] Group 5 FTA: 0.9714 +[2025-07-06 21:37:08] [Rank 0] Group 5 FTA: 0.9714 +[2025-07-06 21:37:08] [Rank 0] Group 6 FTA: 0.9896 +[2025-07-06 21:37:08] [Rank 0] Group 6 FTA: 0.9896 +[2025-07-06 21:37:08] [Rank 0] Group 7 FTA: 0.9688 +[2025-07-06 21:37:08] [Rank 0] Group 7 FTA: 0.9688 +[2025-07-06 21:37:08] [Rank 0] Group 8 FTA: 0.9609 +[2025-07-06 21:37:08] [Rank 0] Group 8 FTA: 0.9609 +[2025-07-06 21:37:08] [Rank 0] Group 9 FTA: 0.9805 +[2025-07-06 21:37:08] [Rank 0] Group 9 FTA: 0.9805 +[2025-07-06 21:37:08] [Rank 0] Group 10 FTA: 0.9902 +[2025-07-06 21:37:08] [Rank 0] Group 10 FTA: 0.9902 +[2025-07-06 21:37:08] [Rank 0] Group 11 FTA: 0.9746 +[2025-07-06 21:37:08] [Rank 0] Group 11 FTA: 0.9746 +[2025-07-06 21:37:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 21:37:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 21:37:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 21:37:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 21:37:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 21:37:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 21:37:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 21:37:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 21:37:11] [Rank 0] step:9001/10000 train_time:731239ms step_avg:81.24ms +[2025-07-06 21:37:11] [Rank 0] step:9001/10000 train_time:731239ms step_avg:81.24ms +[2025-07-06 21:37:12] [Rank 0] step:9021/10000 train_time:733426ms step_avg:81.30ms +[2025-07-06 21:37:12] [Rank 0] step:9021/10000 train_time:733426ms step_avg:81.30ms +[2025-07-06 21:37:14] [Rank 0] step:9041/10000 train_time:734914ms step_avg:81.29ms +[2025-07-06 21:37:14] [Rank 0] step:9041/10000 train_time:734914ms step_avg:81.29ms +[2025-07-06 21:37:15] [Rank 0] step:9061/10000 train_time:736406ms step_avg:81.27ms +[2025-07-06 21:37:15] [Rank 0] step:9061/10000 train_time:736406ms step_avg:81.27ms +[2025-07-06 21:37:17] [Rank 0] step:9081/10000 train_time:737900ms step_avg:81.26ms +[2025-07-06 21:37:17] [Rank 0] step:9081/10000 train_time:737900ms step_avg:81.26ms +[2025-07-06 21:37:18] [Rank 0] step:9101/10000 train_time:739632ms step_avg:81.27ms +[2025-07-06 21:37:18] [Rank 0] step:9101/10000 train_time:739632ms step_avg:81.27ms +[2025-07-06 21:37:20] [Rank 0] step:9121/10000 train_time:741125ms step_avg:81.25ms +[2025-07-06 21:37:20] [Rank 0] step:9121/10000 train_time:741125ms step_avg:81.25ms +[2025-07-06 21:37:21] [Rank 0] step:9141/10000 train_time:742621ms step_avg:81.24ms +[2025-07-06 21:37:21] [Rank 0] step:9141/10000 train_time:742621ms step_avg:81.24ms +[2025-07-06 21:37:23] [Rank 0] step:9161/10000 train_time:744116ms step_avg:81.23ms +[2025-07-06 21:37:23] [Rank 0] step:9161/10000 train_time:744116ms step_avg:81.23ms +[2025-07-06 21:37:25] [Rank 0] step:9181/10000 train_time:745746ms step_avg:81.23ms +[2025-07-06 21:37:25] [Rank 0] step:9181/10000 train_time:745746ms step_avg:81.23ms +[2025-07-06 21:37:27] [Rank 0] step:9201/10000 train_time:747890ms step_avg:81.28ms +[2025-07-06 21:37:27] [Rank 0] step:9201/10000 train_time:747890ms step_avg:81.28ms +[2025-07-06 21:37:28] [Rank 0] step:9221/10000 train_time:749385ms step_avg:81.27ms +[2025-07-06 21:37:28] [Rank 0] step:9221/10000 train_time:749385ms step_avg:81.27ms +[2025-07-06 21:37:30] [Rank 0] step:9241/10000 train_time:750881ms step_avg:81.26ms +[2025-07-06 21:37:30] [Rank 0] step:9241/10000 train_time:750881ms step_avg:81.26ms +[2025-07-06 21:37:31] [Rank 0] step:9261/10000 train_time:752377ms step_avg:81.24ms +[2025-07-06 21:37:31] [Rank 0] step:9261/10000 train_time:752377ms step_avg:81.24ms +[2025-07-06 21:37:33] [Rank 0] step:9281/10000 train_time:754532ms step_avg:81.30ms +[2025-07-06 21:37:33] [Rank 0] step:9281/10000 train_time:754532ms step_avg:81.30ms +[2025-07-06 21:37:35] [Rank 0] step:9301/10000 train_time:756029ms step_avg:81.28ms +[2025-07-06 21:37:35] [Rank 0] step:9301/10000 train_time:756029ms step_avg:81.28ms +[2025-07-06 21:37:36] [Rank 0] step:9321/10000 train_time:757527ms step_avg:81.27ms +[2025-07-06 21:37:36] [Rank 0] step:9321/10000 train_time:757527ms step_avg:81.27ms +[2025-07-06 21:37:38] [Rank 0] step:9341/10000 train_time:759025ms step_avg:81.26ms +[2025-07-06 21:37:38] [Rank 0] step:9341/10000 train_time:759025ms step_avg:81.26ms +[2025-07-06 21:37:40] [Rank 0] step:9361/10000 train_time:760522ms step_avg:81.24ms +[2025-07-06 21:37:40] [Rank 0] step:9361/10000 train_time:760522ms step_avg:81.24ms +[2025-07-06 21:37:41] [Rank 0] step:9381/10000 train_time:762676ms step_avg:81.30ms +[2025-07-06 21:37:41] [Rank 0] step:9381/10000 train_time:762676ms step_avg:81.30ms +[2025-07-06 21:37:43] [Rank 0] step:9401/10000 train_time:764174ms step_avg:81.29ms +[2025-07-06 21:37:43] [Rank 0] step:9401/10000 train_time:764174ms step_avg:81.29ms +[2025-07-06 21:37:44] [Rank 0] step:9421/10000 train_time:765675ms step_avg:81.27ms +[2025-07-06 21:37:44] [Rank 0] step:9421/10000 train_time:765675ms step_avg:81.27ms +[2025-07-06 21:37:46] [Rank 0] step:9441/10000 train_time:767174ms step_avg:81.26ms +[2025-07-06 21:37:46] [Rank 0] step:9441/10000 train_time:767174ms step_avg:81.26ms +[2025-07-06 21:37:48] [Rank 0] step:9461/10000 train_time:769317ms step_avg:81.31ms +[2025-07-06 21:37:48] [Rank 0] step:9461/10000 train_time:769317ms step_avg:81.31ms +[2025-07-06 21:37:50] [Rank 0] step:9481/10000 train_time:770817ms step_avg:81.30ms +[2025-07-06 21:37:50] [Rank 0] step:9481/10000 train_time:770817ms step_avg:81.30ms +[2025-07-06 21:37:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:37:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:37:52] [Rank 0] PRINT: step:9500/10000 train_loss:0.8587 val_loss:0.8616 train_time:772317ms step_avg:81.30ms +[2025-07-06 21:37:52] [Rank 0] PRINT: step:9500/10000 train_loss:0.8587 val_loss:0.8616 train_time:772317ms step_avg:81.30ms +[2025-07-06 21:37:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:37:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:37:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:37:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:37:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:37:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:43:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:43:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:43:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:43:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:43:13] [Rank 0] Total Loss: 5.3667 +[2025-07-06 21:43:13] [Rank 0] Total Loss: 5.3667 +[2025-07-06 21:43:13] [Rank 0] Total FTA: 0.9879 +[2025-07-06 21:43:13] [Rank 0] Total FTA: 0.9879 +[2025-07-06 21:43:13] [Rank 0] Group 0 Loss: 5.6392 +[2025-07-06 21:43:13] [Rank 0] Group 0 Loss: 5.6392 +[2025-07-06 21:43:13] [Rank 0] Group 1 Loss: 5.3131 +[2025-07-06 21:43:13] [Rank 0] Group 1 Loss: 5.3131 +[2025-07-06 21:43:13] [Rank 0] Group 2 Loss: 5.2323 +[2025-07-06 21:43:13] [Rank 0] Group 2 Loss: 5.2323 +[2025-07-06 21:43:13] [Rank 0] Group 3 Loss: 5.4549 +[2025-07-06 21:43:13] [Rank 0] Group 3 Loss: 5.4549 +[2025-07-06 21:43:13] [Rank 0] Group 4 Loss: 5.3175 +[2025-07-06 21:43:13] [Rank 0] Group 4 Loss: 5.3175 +[2025-07-06 21:43:13] [Rank 0] Group 5 Loss: 5.3249 +[2025-07-06 21:43:13] [Rank 0] Group 5 Loss: 5.3249 +[2025-07-06 21:43:13] [Rank 0] Group 6 Loss: 5.2556 +[2025-07-06 21:43:13] [Rank 0] Group 6 Loss: 5.2556 +[2025-07-06 21:43:13] [Rank 0] Group 7 Loss: 5.3608 +[2025-07-06 21:43:13] [Rank 0] Group 7 Loss: 5.3608 +[2025-07-06 21:43:13] [Rank 0] Group 8 Loss: 5.3024 +[2025-07-06 21:43:13] [Rank 0] Group 8 Loss: 5.3024 +[2025-07-06 21:43:13] [Rank 0] Group 9 Loss: 5.3686 +[2025-07-06 21:43:13] [Rank 0] Group 9 Loss: 5.3686 +[2025-07-06 21:43:13] [Rank 0] Group 10 Loss: 5.3509 +[2025-07-06 21:43:13] [Rank 0] Group 10 Loss: 5.3509 +[2025-07-06 21:43:13] [Rank 0] Group 11 Loss: 5.3089 +[2025-07-06 21:43:13] [Rank 0] Group 11 Loss: 5.3089 +[2025-07-06 21:43:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:43:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:43:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:43:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:43:13] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:43:13] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:43:13] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 21:43:13] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 21:43:13] [Rank 0] Group 4 FTA: 0.9818 +[2025-07-06 21:43:13] [Rank 0] Group 4 FTA: 0.9818 +[2025-07-06 21:43:13] [Rank 0] Group 5 FTA: 0.9557 +[2025-07-06 21:43:13] [Rank 0] Group 5 FTA: 0.9557 +[2025-07-06 21:43:13] [Rank 0] Group 6 FTA: 0.9818 +[2025-07-06 21:43:13] [Rank 0] Group 6 FTA: 0.9818 +[2025-07-06 21:43:13] [Rank 0] Group 7 FTA: 0.9896 +[2025-07-06 21:43:13] [Rank 0] Group 7 FTA: 0.9896 +[2025-07-06 21:43:13] [Rank 0] Group 8 FTA: 0.9844 +[2025-07-06 21:43:13] [Rank 0] Group 8 FTA: 0.9844 +[2025-07-06 21:43:13] [Rank 0] Group 9 FTA: 0.9805 +[2025-07-06 21:43:13] [Rank 0] Group 9 FTA: 0.9805 +[2025-07-06 21:43:13] [Rank 0] Group 10 FTA: 0.9922 +[2025-07-06 21:43:13] [Rank 0] Group 10 FTA: 0.9922 +[2025-07-06 21:43:13] [Rank 0] Group 11 FTA: 0.9824 +[2025-07-06 21:43:13] [Rank 0] Group 11 FTA: 0.9824 +[2025-07-06 21:43:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 21:43:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 21:43:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 21:43:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 21:43:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 21:43:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 21:43:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 21:43:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 21:43:14] [Rank 0] step:9501/10000 train_time:772338ms step_avg:81.29ms +[2025-07-06 21:43:14] [Rank 0] step:9501/10000 train_time:772338ms step_avg:81.29ms +[2025-07-06 21:43:16] [Rank 0] step:9521/10000 train_time:773830ms step_avg:81.28ms +[2025-07-06 21:43:16] [Rank 0] step:9521/10000 train_time:773830ms step_avg:81.28ms +[2025-07-06 21:43:18] [Rank 0] step:9541/10000 train_time:776002ms step_avg:81.33ms +[2025-07-06 21:43:18] [Rank 0] step:9541/10000 train_time:776002ms step_avg:81.33ms +[2025-07-06 21:43:20] [Rank 0] step:9561/10000 train_time:777475ms step_avg:81.32ms +[2025-07-06 21:43:20] [Rank 0] step:9561/10000 train_time:777475ms step_avg:81.32ms +[2025-07-06 21:43:21] [Rank 0] step:9581/10000 train_time:778967ms step_avg:81.30ms +[2025-07-06 21:43:21] [Rank 0] step:9581/10000 train_time:778967ms step_avg:81.30ms +[2025-07-06 21:43:23] [Rank 0] step:9601/10000 train_time:780461ms step_avg:81.29ms +[2025-07-06 21:43:23] [Rank 0] step:9601/10000 train_time:780461ms step_avg:81.29ms +[2025-07-06 21:43:24] [Rank 0] step:9621/10000 train_time:781956ms step_avg:81.28ms +[2025-07-06 21:43:24] [Rank 0] step:9621/10000 train_time:781956ms step_avg:81.28ms +[2025-07-06 21:43:26] [Rank 0] step:9641/10000 train_time:784119ms step_avg:81.33ms +[2025-07-06 21:43:26] [Rank 0] step:9641/10000 train_time:784119ms step_avg:81.33ms +[2025-07-06 21:43:28] [Rank 0] step:9661/10000 train_time:785614ms step_avg:81.32ms +[2025-07-06 21:43:28] [Rank 0] step:9661/10000 train_time:785614ms step_avg:81.32ms +[2025-07-06 21:43:29] [Rank 0] step:9681/10000 train_time:787110ms step_avg:81.30ms +[2025-07-06 21:43:29] [Rank 0] step:9681/10000 train_time:787110ms step_avg:81.30ms +[2025-07-06 21:43:31] [Rank 0] step:9701/10000 train_time:788608ms step_avg:81.29ms +[2025-07-06 21:43:31] [Rank 0] step:9701/10000 train_time:788608ms step_avg:81.29ms +[2025-07-06 21:43:33] [Rank 0] step:9721/10000 train_time:790106ms step_avg:81.28ms +[2025-07-06 21:43:33] [Rank 0] step:9721/10000 train_time:790106ms step_avg:81.28ms +[2025-07-06 21:43:34] [Rank 0] step:9741/10000 train_time:792266ms step_avg:81.33ms +[2025-07-06 21:43:34] [Rank 0] step:9741/10000 train_time:792266ms step_avg:81.33ms +[2025-07-06 21:43:36] [Rank 0] step:9761/10000 train_time:793762ms step_avg:81.32ms +[2025-07-06 21:43:36] [Rank 0] step:9761/10000 train_time:793762ms step_avg:81.32ms +[2025-07-06 21:43:37] [Rank 0] step:9781/10000 train_time:795260ms step_avg:81.31ms +[2025-07-06 21:43:37] [Rank 0] step:9781/10000 train_time:795260ms step_avg:81.31ms +[2025-07-06 21:43:39] [Rank 0] step:9801/10000 train_time:796759ms step_avg:81.29ms +[2025-07-06 21:43:39] [Rank 0] step:9801/10000 train_time:796759ms step_avg:81.29ms +[2025-07-06 21:43:41] [Rank 0] step:9821/10000 train_time:798493ms step_avg:81.30ms +[2025-07-06 21:43:41] [Rank 0] step:9821/10000 train_time:798493ms step_avg:81.30ms +[2025-07-06 21:43:42] [Rank 0] step:9841/10000 train_time:799993ms step_avg:81.29ms +[2025-07-06 21:43:42] [Rank 0] step:9841/10000 train_time:799993ms step_avg:81.29ms +[2025-07-06 21:43:44] [Rank 0] step:9861/10000 train_time:801606ms step_avg:81.29ms +[2025-07-06 21:43:44] [Rank 0] step:9861/10000 train_time:801606ms step_avg:81.29ms +[2025-07-06 21:43:45] [Rank 0] step:9881/10000 train_time:803107ms step_avg:81.28ms +[2025-07-06 21:43:45] [Rank 0] step:9881/10000 train_time:803107ms step_avg:81.28ms +[2025-07-06 21:43:47] [Rank 0] step:9901/10000 train_time:804657ms step_avg:81.27ms +[2025-07-06 21:43:47] [Rank 0] step:9901/10000 train_time:804657ms step_avg:81.27ms +[2025-07-06 21:43:49] [Rank 0] step:9921/10000 train_time:806759ms step_avg:81.32ms +[2025-07-06 21:43:49] [Rank 0] step:9921/10000 train_time:806759ms step_avg:81.32ms +[2025-07-06 21:43:50] [Rank 0] step:9941/10000 train_time:808259ms step_avg:81.31ms +[2025-07-06 21:43:50] [Rank 0] step:9941/10000 train_time:808259ms step_avg:81.31ms +[2025-07-06 21:43:52] [Rank 0] step:9961/10000 train_time:809760ms step_avg:81.29ms +[2025-07-06 21:43:52] [Rank 0] step:9961/10000 train_time:809760ms step_avg:81.29ms +[2025-07-06 21:43:53] [Rank 0] step:9981/10000 train_time:811260ms step_avg:81.28ms +[2025-07-06 21:43:53] [Rank 0] step:9981/10000 train_time:811260ms step_avg:81.28ms +[2025-07-06 21:43:55] [Rank 0] step:10000/10000 train_time:813326ms step_avg:81.33ms +[2025-07-06 21:43:55] [Rank 0] step:10000/10000 train_time:813326ms step_avg:81.33ms +[2025-07-06 21:43:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:43:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:43:56] [Rank 0] PRINT: step:10000/10000 train_loss:0.8578 val_loss:0.8614 train_time:813405ms step_avg:81.34ms +[2025-07-06 21:43:56] [Rank 0] PRINT: step:10000/10000 train_loss:0.8578 val_loss:0.8614 train_time:813405ms step_avg:81.34ms +[2025-07-06 21:43:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:43:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:43:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:43:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:43:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:43:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:49:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:49:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:49:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:49:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:49:14] [Rank 0] Total Loss: 5.3971 +[2025-07-06 21:49:14] [Rank 0] Total Loss: 5.3971 +[2025-07-06 21:49:14] [Rank 0] Total FTA: 0.9918 +[2025-07-06 21:49:14] [Rank 0] Total FTA: 0.9918 +[2025-07-06 21:49:14] [Rank 0] Group 0 Loss: 5.5997 +[2025-07-06 21:49:14] [Rank 0] Group 0 Loss: 5.5997 +[2025-07-06 21:49:14] [Rank 0] Group 1 Loss: 5.2879 +[2025-07-06 21:49:14] [Rank 0] Group 1 Loss: 5.2879 +[2025-07-06 21:49:14] [Rank 0] Group 2 Loss: 5.2599 +[2025-07-06 21:49:14] [Rank 0] Group 2 Loss: 5.2599 +[2025-07-06 21:49:14] [Rank 0] Group 3 Loss: 5.5237 +[2025-07-06 21:49:14] [Rank 0] Group 3 Loss: 5.5237 +[2025-07-06 21:49:14] [Rank 0] Group 4 Loss: 5.3257 +[2025-07-06 21:49:14] [Rank 0] Group 4 Loss: 5.3257 +[2025-07-06 21:49:14] [Rank 0] Group 5 Loss: 5.2853 +[2025-07-06 21:49:14] [Rank 0] Group 5 Loss: 5.2853 +[2025-07-06 21:49:14] [Rank 0] Group 6 Loss: 5.2956 +[2025-07-06 21:49:14] [Rank 0] Group 6 Loss: 5.2956 +[2025-07-06 21:49:14] [Rank 0] Group 7 Loss: 5.4279 +[2025-07-06 21:49:14] [Rank 0] Group 7 Loss: 5.4279 +[2025-07-06 21:49:14] [Rank 0] Group 8 Loss: 5.3534 +[2025-07-06 21:49:14] [Rank 0] Group 8 Loss: 5.3534 +[2025-07-06 21:49:14] [Rank 0] Group 9 Loss: 5.3771 +[2025-07-06 21:49:14] [Rank 0] Group 9 Loss: 5.3771 +[2025-07-06 21:49:14] [Rank 0] Group 10 Loss: 5.4329 +[2025-07-06 21:49:14] [Rank 0] Group 10 Loss: 5.4329 +[2025-07-06 21:49:14] [Rank 0] Group 11 Loss: 5.3884 +[2025-07-06 21:49:14] [Rank 0] Group 11 Loss: 5.3884 +[2025-07-06 21:49:14] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:49:14] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:49:14] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:49:14] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:49:14] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:49:14] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:49:14] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 21:49:14] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 21:49:14] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 21:49:14] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 21:49:14] [Rank 0] Group 5 FTA: 1.0000 +[2025-07-06 21:49:14] [Rank 0] Group 5 FTA: 1.0000 +[2025-07-06 21:49:14] [Rank 0] Group 6 FTA: 0.9740 +[2025-07-06 21:49:14] [Rank 0] Group 6 FTA: 0.9740 +[2025-07-06 21:49:14] [Rank 0] Group 7 FTA: 0.9740 +[2025-07-06 21:49:14] [Rank 0] Group 7 FTA: 0.9740 +[2025-07-06 21:49:14] [Rank 0] Group 8 FTA: 0.9922 +[2025-07-06 21:49:14] [Rank 0] Group 8 FTA: 0.9922 +[2025-07-06 21:49:14] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 21:49:14] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 21:49:14] [Rank 0] Group 10 FTA: 0.9961 +[2025-07-06 21:49:14] [Rank 0] Group 10 FTA: 0.9961 +[2025-07-06 21:49:14] [Rank 0] Group 11 FTA: 0.9854 +[2025-07-06 21:49:14] [Rank 0] Group 11 FTA: 0.9854 +[2025-07-06 21:49:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 21:49:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-06 21:49:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 21:49:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-06 21:49:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 21:49:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-06 21:49:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 21:49:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-06 21:49:16] [Rank 0] step:10001/10000 train_time:813427ms step_avg:81.33ms +[2025-07-06 21:49:16] [Rank 0] step:10001/10000 train_time:813427ms step_avg:81.33ms +[2025-07-06 21:49:16] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 21:49:16 2025 --- +[2025-07-06 21:49:16] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 21:49:16 2025 --- +[2025-07-06 21:49:16] [Rank 0] PRINT: Peak memory allocated: 9109 MiB reserved: 10336 MiB +[2025-07-06 21:49:16] [Rank 0] PRINT: Peak memory allocated: 9109 MiB reserved: 10336 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/config.json b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ff4dcccc9cd3895e09f571929585b3a14c8a0ea6 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "aa3213de-5e81-4309-a68b-35e0012f117d", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..11a3233f4f45a4fe0986c39ff83ea12376aa00e7 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b94daa2767ef657873f074d6c8c2637019f865ca288b59cc802c5504e1b779fd +size 384211 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..af5d548909b21c33cbc5d3ec14ed53f638277be0 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82175ce32e0526d23f50c7738cf02d83110540ab80321cb514699dab439dcd30 +size 355088 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..7fbc0abf5d66df92403f5598dd32d4aa408421bf --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0989f28eab44c49bbdd8614c9152dfbaf85df9a52319a37b23763c632d36d4f5 +size 114380 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..3d335b226a4a0e4bd7e2a5d66b42da65722f800c --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba1a97367e3b47068990837ac4cdf6b300ddcf25e00c558d1fd8ed7a33d1d41b +size 104916 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/training_log_aa3213de-5e81-4309-a68b-35e0012f117d.txt b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/training_log_aa3213de-5e81-4309-a68b-35e0012f117d.txt new file mode 100644 index 0000000000000000000000000000000000000000..646b693feb82e91fd41110960546d1f3a6b3eeb1 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/training_log_aa3213de-5e81-4309-a68b-35e0012f117d.txt @@ -0,0 +1,5144 @@ +[2025-07-08 02:54:09] [Rank 0] PRINT: --- Script Start: Tue Jul 8 02:54:09 2025 --- +[2025-07-08 02:54:09] [Rank 0] PRINT: --- Script Start: Tue Jul 8 02:54:09 2025 --- +[2025-07-08 02:54:09] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-08 02:54:09] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-08 02:54:09] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-08 02:54:09] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-08 02:54:09] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-08 02:54:09] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-08 02:54:09] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46 +[2025-07-08 02:54:09] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46 +[2025-07-08 02:54:09] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-08 02:54:09] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-08 02:54:09] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-08 02:54:09] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-08 02:54:09] [Rank 0] PRINT: Constructing model... +[2025-07-08 02:54:09] [Rank 0] PRINT: Constructing model... +[2025-07-08 02:54:11] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-08 02:54:11] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-08 02:54:11] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-08 02:54:11] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-08 02:54:11] [Rank 0] PRINT: Testing model forward function: +[2025-07-08 02:54:11] [Rank 0] PRINT: Testing model forward function: +[2025-07-08 02:54:12] [Rank 0] PRINT: Model test - Result type: +[2025-07-08 02:54:12] [Rank 0] PRINT: Model test - Result type: +[2025-07-08 02:54:12] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-08 02:54:12] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-08 02:54:12] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-08 02:54:12] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-08 02:54:12] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-08 02:54:12] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-08 02:54:12] [Rank 0] PRINT: Model returns: +[2025-07-08 02:54:12] [Rank 0] PRINT: Model returns: +[2025-07-08 02:54:12] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-08 02:54:12] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-08 02:54:12] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-08 02:54:12] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-08 02:54:12] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-08 02:54:12] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-08 02:54:12] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-08 02:54:12] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-08 02:54:12] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-08 02:54:12] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-08 02:54:12] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-08 02:54:12] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-08 02:54:12] [Rank 0] PRINT: Model compilation complete. +[2025-07-08 02:54:12] [Rank 0] PRINT: Model compilation complete. +[2025-07-08 02:54:12] [Rank 0] PRINT: Starting warmup... +[2025-07-08 02:54:12] [Rank 0] PRINT: Starting warmup... +[2025-07-08 02:55:18] [Rank 0] PRINT: Warmup complete. +[2025-07-08 02:55:18] [Rank 0] PRINT: Warmup complete. +[2025-07-08 02:55:18] [Rank 0] PRINT: Starting training... +[2025-07-08 02:55:18] [Rank 0] PRINT: Starting training... +[2025-07-08 02:55:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:55:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:55:25] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-08 02:55:25] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-08 02:55:27] [Rank 0] step:21/10000 train_time:1749ms step_avg:83.28ms +[2025-07-08 02:55:27] [Rank 0] step:21/10000 train_time:1749ms step_avg:83.28ms +[2025-07-08 02:55:28] [Rank 0] step:41/10000 train_time:3203ms step_avg:78.12ms +[2025-07-08 02:55:28] [Rank 0] step:41/10000 train_time:3203ms step_avg:78.12ms +[2025-07-08 02:55:30] [Rank 0] step:61/10000 train_time:4660ms step_avg:76.40ms +[2025-07-08 02:55:30] [Rank 0] step:61/10000 train_time:4660ms step_avg:76.40ms +[2025-07-08 02:55:31] [Rank 0] step:81/10000 train_time:6116ms step_avg:75.50ms +[2025-07-08 02:55:31] [Rank 0] step:81/10000 train_time:6116ms step_avg:75.50ms +[2025-07-08 02:55:33] [Rank 0] step:101/10000 train_time:8235ms step_avg:81.53ms +[2025-07-08 02:55:33] [Rank 0] step:101/10000 train_time:8235ms step_avg:81.53ms +[2025-07-08 02:55:35] [Rank 0] step:121/10000 train_time:9693ms step_avg:80.11ms +[2025-07-08 02:55:35] [Rank 0] step:121/10000 train_time:9693ms step_avg:80.11ms +[2025-07-08 02:55:36] [Rank 0] step:141/10000 train_time:11292ms step_avg:80.09ms +[2025-07-08 02:55:36] [Rank 0] step:141/10000 train_time:11292ms step_avg:80.09ms +[2025-07-08 02:55:38] [Rank 0] step:161/10000 train_time:12754ms step_avg:79.22ms +[2025-07-08 02:55:38] [Rank 0] step:161/10000 train_time:12754ms step_avg:79.22ms +[2025-07-08 02:55:40] [Rank 0] step:181/10000 train_time:14473ms step_avg:79.96ms +[2025-07-08 02:55:40] [Rank 0] step:181/10000 train_time:14473ms step_avg:79.96ms +[2025-07-08 02:55:41] [Rank 0] step:201/10000 train_time:16325ms step_avg:81.22ms +[2025-07-08 02:55:41] [Rank 0] step:201/10000 train_time:16325ms step_avg:81.22ms +[2025-07-08 02:55:43] [Rank 0] step:221/10000 train_time:17787ms step_avg:80.48ms +[2025-07-08 02:55:43] [Rank 0] step:221/10000 train_time:17787ms step_avg:80.48ms +[2025-07-08 02:55:44] [Rank 0] step:241/10000 train_time:19252ms step_avg:79.88ms +[2025-07-08 02:55:44] [Rank 0] step:241/10000 train_time:19252ms step_avg:79.88ms +[2025-07-08 02:55:46] [Rank 0] step:261/10000 train_time:20716ms step_avg:79.37ms +[2025-07-08 02:55:46] [Rank 0] step:261/10000 train_time:20716ms step_avg:79.37ms +[2025-07-08 02:55:47] [Rank 0] step:281/10000 train_time:22420ms step_avg:79.79ms +[2025-07-08 02:55:47] [Rank 0] step:281/10000 train_time:22420ms step_avg:79.79ms +[2025-07-08 02:55:49] [Rank 0] step:301/10000 train_time:23883ms step_avg:79.35ms +[2025-07-08 02:55:49] [Rank 0] step:301/10000 train_time:23883ms step_avg:79.35ms +[2025-07-08 02:55:50] [Rank 0] step:321/10000 train_time:25349ms step_avg:78.97ms +[2025-07-08 02:55:50] [Rank 0] step:321/10000 train_time:25349ms step_avg:78.97ms +[2025-07-08 02:55:52] [Rank 0] step:341/10000 train_time:26815ms step_avg:78.64ms +[2025-07-08 02:55:52] [Rank 0] step:341/10000 train_time:26815ms step_avg:78.64ms +[2025-07-08 02:55:54] [Rank 0] step:361/10000 train_time:28334ms step_avg:78.49ms +[2025-07-08 02:55:54] [Rank 0] step:361/10000 train_time:28334ms step_avg:78.49ms +[2025-07-08 02:55:55] [Rank 0] step:381/10000 train_time:30407ms step_avg:79.81ms +[2025-07-08 02:55:55] [Rank 0] step:381/10000 train_time:30407ms step_avg:79.81ms +[2025-07-08 02:55:57] [Rank 0] step:401/10000 train_time:31873ms step_avg:79.48ms +[2025-07-08 02:55:57] [Rank 0] step:401/10000 train_time:31873ms step_avg:79.48ms +[2025-07-08 02:55:58] [Rank 0] step:421/10000 train_time:33341ms step_avg:79.20ms +[2025-07-08 02:55:58] [Rank 0] step:421/10000 train_time:33341ms step_avg:79.20ms +[2025-07-08 02:56:00] [Rank 0] step:441/10000 train_time:34807ms step_avg:78.93ms +[2025-07-08 02:56:00] [Rank 0] step:441/10000 train_time:34807ms step_avg:78.93ms +[2025-07-08 02:56:02] [Rank 0] step:461/10000 train_time:36935ms step_avg:80.12ms +[2025-07-08 02:56:02] [Rank 0] step:461/10000 train_time:36935ms step_avg:80.12ms +[2025-07-08 02:56:03] [Rank 0] step:481/10000 train_time:38402ms step_avg:79.84ms +[2025-07-08 02:56:03] [Rank 0] step:481/10000 train_time:38402ms step_avg:79.84ms +[2025-07-08 02:56:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:56:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:56:06] [Rank 0] PRINT: step:500/10000 train_loss:3.3871 val_loss:1.6158 train_time:39872ms step_avg:79.74ms +[2025-07-08 02:56:06] [Rank 0] PRINT: step:500/10000 train_loss:3.3871 val_loss:1.6158 train_time:39872ms step_avg:79.74ms +[2025-07-08 02:56:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:56:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:56:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:56:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:56:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:56:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:01:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:01:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:01:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:01:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:01:29] [Rank 0] Total Loss: 4.3556 +[2025-07-08 03:01:29] [Rank 0] Total Loss: 4.3556 +[2025-07-08 03:01:29] [Rank 0] Total FTA: 0.0895 +[2025-07-08 03:01:29] [Rank 0] Total FTA: 0.0895 +[2025-07-08 03:01:29] [Rank 0] Group 0 Loss: 4.6695 +[2025-07-08 03:01:29] [Rank 0] Group 0 Loss: 4.6695 +[2025-07-08 03:01:29] [Rank 0] Group 1 Loss: 4.2401 +[2025-07-08 03:01:29] [Rank 0] Group 1 Loss: 4.2401 +[2025-07-08 03:01:29] [Rank 0] Group 2 Loss: 4.1857 +[2025-07-08 03:01:29] [Rank 0] Group 2 Loss: 4.1857 +[2025-07-08 03:01:29] [Rank 0] Group 3 Loss: 4.2924 +[2025-07-08 03:01:29] [Rank 0] Group 3 Loss: 4.2924 +[2025-07-08 03:01:29] [Rank 0] Group 4 Loss: 4.3402 +[2025-07-08 03:01:29] [Rank 0] Group 4 Loss: 4.3402 +[2025-07-08 03:01:29] [Rank 0] Group 5 Loss: 4.2524 +[2025-07-08 03:01:29] [Rank 0] Group 5 Loss: 4.2524 +[2025-07-08 03:01:29] [Rank 0] Group 6 Loss: 4.2778 +[2025-07-08 03:01:29] [Rank 0] Group 6 Loss: 4.2778 +[2025-07-08 03:01:29] [Rank 0] Group 7 Loss: 4.3411 +[2025-07-08 03:01:29] [Rank 0] Group 7 Loss: 4.3411 +[2025-07-08 03:01:29] [Rank 0] Group 8 Loss: 4.3253 +[2025-07-08 03:01:29] [Rank 0] Group 8 Loss: 4.3253 +[2025-07-08 03:01:29] [Rank 0] Group 9 Loss: 4.3283 +[2025-07-08 03:01:29] [Rank 0] Group 9 Loss: 4.3283 +[2025-07-08 03:01:29] [Rank 0] Group 10 Loss: 4.3437 +[2025-07-08 03:01:29] [Rank 0] Group 10 Loss: 4.3437 +[2025-07-08 03:01:29] [Rank 0] Group 11 Loss: 4.3535 +[2025-07-08 03:01:29] [Rank 0] Group 11 Loss: 4.3535 +[2025-07-08 03:01:29] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-08 03:01:29] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-08 03:01:29] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 03:01:29] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 03:01:29] [Rank 0] Group 2 FTA: 0.0651 +[2025-07-08 03:01:29] [Rank 0] Group 2 FTA: 0.0651 +[2025-07-08 03:01:29] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-08 03:01:29] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-08 03:01:29] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-08 03:01:29] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-08 03:01:29] [Rank 0] Group 5 FTA: 0.0990 +[2025-07-08 03:01:29] [Rank 0] Group 5 FTA: 0.0990 +[2025-07-08 03:01:29] [Rank 0] Group 6 FTA: 0.1172 +[2025-07-08 03:01:29] [Rank 0] Group 6 FTA: 0.1172 +[2025-07-08 03:01:29] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-08 03:01:29] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-08 03:01:29] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-08 03:01:29] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-08 03:01:29] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-08 03:01:29] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-08 03:01:29] [Rank 0] Group 10 FTA: 0.1074 +[2025-07-08 03:01:29] [Rank 0] Group 10 FTA: 0.1074 +[2025-07-08 03:01:29] [Rank 0] Group 11 FTA: 0.0908 +[2025-07-08 03:01:29] [Rank 0] Group 11 FTA: 0.0908 +[2025-07-08 03:01:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 03:01:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 03:01:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 03:01:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 03:01:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 03:01:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 03:01:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 03:01:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 03:01:31] [Rank 0] step:501/10000 train_time:39891ms step_avg:79.62ms +[2025-07-08 03:01:31] [Rank 0] step:501/10000 train_time:39891ms step_avg:79.62ms +[2025-07-08 03:01:32] [Rank 0] step:521/10000 train_time:41365ms step_avg:79.40ms +[2025-07-08 03:01:32] [Rank 0] step:521/10000 train_time:41365ms step_avg:79.40ms +[2025-07-08 03:01:34] [Rank 0] step:541/10000 train_time:42872ms step_avg:79.25ms +[2025-07-08 03:01:34] [Rank 0] step:541/10000 train_time:42872ms step_avg:79.25ms +[2025-07-08 03:01:35] [Rank 0] step:561/10000 train_time:44518ms step_avg:79.35ms +[2025-07-08 03:01:35] [Rank 0] step:561/10000 train_time:44518ms step_avg:79.35ms +[2025-07-08 03:01:37] [Rank 0] step:581/10000 train_time:45975ms step_avg:79.13ms +[2025-07-08 03:01:37] [Rank 0] step:581/10000 train_time:45975ms step_avg:79.13ms +[2025-07-08 03:01:38] [Rank 0] step:601/10000 train_time:47433ms step_avg:78.92ms +[2025-07-08 03:01:38] [Rank 0] step:601/10000 train_time:47433ms step_avg:78.92ms +[2025-07-08 03:01:40] [Rank 0] step:621/10000 train_time:48892ms step_avg:78.73ms +[2025-07-08 03:01:40] [Rank 0] step:621/10000 train_time:48892ms step_avg:78.73ms +[2025-07-08 03:01:41] [Rank 0] step:641/10000 train_time:50589ms step_avg:78.92ms +[2025-07-08 03:01:41] [Rank 0] step:641/10000 train_time:50589ms step_avg:78.92ms +[2025-07-08 03:01:43] [Rank 0] step:661/10000 train_time:52047ms step_avg:78.74ms +[2025-07-08 03:01:43] [Rank 0] step:661/10000 train_time:52047ms step_avg:78.74ms +[2025-07-08 03:01:44] [Rank 0] step:681/10000 train_time:53505ms step_avg:78.57ms +[2025-07-08 03:01:44] [Rank 0] step:681/10000 train_time:53505ms step_avg:78.57ms +[2025-07-08 03:01:46] [Rank 0] step:701/10000 train_time:54962ms step_avg:78.40ms +[2025-07-08 03:01:46] [Rank 0] step:701/10000 train_time:54962ms step_avg:78.40ms +[2025-07-08 03:01:48] [Rank 0] step:721/10000 train_time:56681ms step_avg:78.61ms +[2025-07-08 03:01:48] [Rank 0] step:721/10000 train_time:56681ms step_avg:78.61ms +[2025-07-08 03:01:49] [Rank 0] step:741/10000 train_time:58551ms step_avg:79.02ms +[2025-07-08 03:01:49] [Rank 0] step:741/10000 train_time:58551ms step_avg:79.02ms +[2025-07-08 03:01:51] [Rank 0] step:761/10000 train_time:60018ms step_avg:78.87ms +[2025-07-08 03:01:51] [Rank 0] step:761/10000 train_time:60018ms step_avg:78.87ms +[2025-07-08 03:01:52] [Rank 0] step:781/10000 train_time:61489ms step_avg:78.73ms +[2025-07-08 03:01:52] [Rank 0] step:781/10000 train_time:61489ms step_avg:78.73ms +[2025-07-08 03:01:54] [Rank 0] step:801/10000 train_time:62958ms step_avg:78.60ms +[2025-07-08 03:01:54] [Rank 0] step:801/10000 train_time:62958ms step_avg:78.60ms +[2025-07-08 03:01:56] [Rank 0] step:821/10000 train_time:64800ms step_avg:78.93ms +[2025-07-08 03:01:56] [Rank 0] step:821/10000 train_time:64800ms step_avg:78.93ms +[2025-07-08 03:01:57] [Rank 0] step:841/10000 train_time:66266ms step_avg:78.79ms +[2025-07-08 03:01:57] [Rank 0] step:841/10000 train_time:66266ms step_avg:78.79ms +[2025-07-08 03:01:59] [Rank 0] step:861/10000 train_time:67735ms step_avg:78.67ms +[2025-07-08 03:01:59] [Rank 0] step:861/10000 train_time:67735ms step_avg:78.67ms +[2025-07-08 03:02:00] [Rank 0] step:881/10000 train_time:69205ms step_avg:78.55ms +[2025-07-08 03:02:00] [Rank 0] step:881/10000 train_time:69205ms step_avg:78.55ms +[2025-07-08 03:02:02] [Rank 0] step:901/10000 train_time:70930ms step_avg:78.72ms +[2025-07-08 03:02:02] [Rank 0] step:901/10000 train_time:70930ms step_avg:78.72ms +[2025-07-08 03:02:04] [Rank 0] step:921/10000 train_time:72802ms step_avg:79.05ms +[2025-07-08 03:02:04] [Rank 0] step:921/10000 train_time:72802ms step_avg:79.05ms +[2025-07-08 03:02:05] [Rank 0] step:941/10000 train_time:74272ms step_avg:78.93ms +[2025-07-08 03:02:05] [Rank 0] step:941/10000 train_time:74272ms step_avg:78.93ms +[2025-07-08 03:02:07] [Rank 0] step:961/10000 train_time:75740ms step_avg:78.81ms +[2025-07-08 03:02:07] [Rank 0] step:961/10000 train_time:75740ms step_avg:78.81ms +[2025-07-08 03:02:08] [Rank 0] step:981/10000 train_time:77211ms step_avg:78.71ms +[2025-07-08 03:02:08] [Rank 0] step:981/10000 train_time:77211ms step_avg:78.71ms +[2025-07-08 03:02:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:02:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:02:11] [Rank 0] PRINT: step:1000/10000 train_loss:1.4663 val_loss:1.3220 train_time:79326ms step_avg:79.33ms +[2025-07-08 03:02:11] [Rank 0] PRINT: step:1000/10000 train_loss:1.4663 val_loss:1.3220 train_time:79326ms step_avg:79.33ms +[2025-07-08 03:02:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:02:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:02:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:02:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:02:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:02:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:07:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:07:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:07:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:07:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:07:36] [Rank 0] Total Loss: 4.6261 +[2025-07-08 03:07:36] [Rank 0] Total Loss: 4.6261 +[2025-07-08 03:07:36] [Rank 0] Total FTA: 0.2066 +[2025-07-08 03:07:36] [Rank 0] Total FTA: 0.2066 +[2025-07-08 03:07:36] [Rank 0] Group 0 Loss: 4.8600 +[2025-07-08 03:07:36] [Rank 0] Group 0 Loss: 4.8600 +[2025-07-08 03:07:36] [Rank 0] Group 1 Loss: 4.6269 +[2025-07-08 03:07:36] [Rank 0] Group 1 Loss: 4.6269 +[2025-07-08 03:07:36] [Rank 0] Group 2 Loss: 4.5089 +[2025-07-08 03:07:36] [Rank 0] Group 2 Loss: 4.5089 +[2025-07-08 03:07:36] [Rank 0] Group 3 Loss: 4.4683 +[2025-07-08 03:07:36] [Rank 0] Group 3 Loss: 4.4683 +[2025-07-08 03:07:36] [Rank 0] Group 4 Loss: 4.5583 +[2025-07-08 03:07:36] [Rank 0] Group 4 Loss: 4.5583 +[2025-07-08 03:07:36] [Rank 0] Group 5 Loss: 4.6133 +[2025-07-08 03:07:36] [Rank 0] Group 5 Loss: 4.6133 +[2025-07-08 03:07:36] [Rank 0] Group 6 Loss: 4.5391 +[2025-07-08 03:07:36] [Rank 0] Group 6 Loss: 4.5391 +[2025-07-08 03:07:36] [Rank 0] Group 7 Loss: 4.6495 +[2025-07-08 03:07:36] [Rank 0] Group 7 Loss: 4.6495 +[2025-07-08 03:07:36] [Rank 0] Group 8 Loss: 4.6342 +[2025-07-08 03:07:36] [Rank 0] Group 8 Loss: 4.6342 +[2025-07-08 03:07:36] [Rank 0] Group 9 Loss: 4.5951 +[2025-07-08 03:07:36] [Rank 0] Group 9 Loss: 4.5951 +[2025-07-08 03:07:36] [Rank 0] Group 10 Loss: 4.6345 +[2025-07-08 03:07:36] [Rank 0] Group 10 Loss: 4.6345 +[2025-07-08 03:07:36] [Rank 0] Group 11 Loss: 4.6081 +[2025-07-08 03:07:36] [Rank 0] Group 11 Loss: 4.6081 +[2025-07-08 03:07:36] [Rank 0] Group 0 FTA: 0.4837 +[2025-07-08 03:07:36] [Rank 0] Group 0 FTA: 0.4837 +[2025-07-08 03:07:36] [Rank 0] Group 1 FTA: 0.1849 +[2025-07-08 03:07:36] [Rank 0] Group 1 FTA: 0.1849 +[2025-07-08 03:07:36] [Rank 0] Group 2 FTA: 0.2474 +[2025-07-08 03:07:36] [Rank 0] Group 2 FTA: 0.2474 +[2025-07-08 03:07:36] [Rank 0] Group 3 FTA: 0.1120 +[2025-07-08 03:07:36] [Rank 0] Group 3 FTA: 0.1120 +[2025-07-08 03:07:36] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-08 03:07:36] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-08 03:07:36] [Rank 0] Group 5 FTA: 0.2448 +[2025-07-08 03:07:36] [Rank 0] Group 5 FTA: 0.2448 +[2025-07-08 03:07:36] [Rank 0] Group 6 FTA: 0.1641 +[2025-07-08 03:07:36] [Rank 0] Group 6 FTA: 0.1641 +[2025-07-08 03:07:36] [Rank 0] Group 7 FTA: 0.1667 +[2025-07-08 03:07:36] [Rank 0] Group 7 FTA: 0.1667 +[2025-07-08 03:07:36] [Rank 0] Group 8 FTA: 0.1875 +[2025-07-08 03:07:36] [Rank 0] Group 8 FTA: 0.1875 +[2025-07-08 03:07:36] [Rank 0] Group 9 FTA: 0.1992 +[2025-07-08 03:07:36] [Rank 0] Group 9 FTA: 0.1992 +[2025-07-08 03:07:36] [Rank 0] Group 10 FTA: 0.1680 +[2025-07-08 03:07:36] [Rank 0] Group 10 FTA: 0.1680 +[2025-07-08 03:07:36] [Rank 0] Group 11 FTA: 0.1328 +[2025-07-08 03:07:36] [Rank 0] Group 11 FTA: 0.1328 +[2025-07-08 03:07:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 03:07:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 03:07:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 03:07:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 03:07:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 03:07:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 03:07:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 03:07:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 03:07:38] [Rank 0] step:1001/10000 train_time:79346ms step_avg:79.27ms +[2025-07-08 03:07:38] [Rank 0] step:1001/10000 train_time:79346ms step_avg:79.27ms +[2025-07-08 03:07:39] [Rank 0] step:1021/10000 train_time:80810ms step_avg:79.15ms +[2025-07-08 03:07:39] [Rank 0] step:1021/10000 train_time:80810ms step_avg:79.15ms +[2025-07-08 03:07:41] [Rank 0] step:1041/10000 train_time:82272ms step_avg:79.03ms +[2025-07-08 03:07:41] [Rank 0] step:1041/10000 train_time:82272ms step_avg:79.03ms +[2025-07-08 03:07:42] [Rank 0] step:1061/10000 train_time:83737ms step_avg:78.92ms +[2025-07-08 03:07:42] [Rank 0] step:1061/10000 train_time:83737ms step_avg:78.92ms +[2025-07-08 03:07:44] [Rank 0] step:1081/10000 train_time:85460ms step_avg:79.06ms +[2025-07-08 03:07:44] [Rank 0] step:1081/10000 train_time:85460ms step_avg:79.06ms +[2025-07-08 03:07:46] [Rank 0] step:1101/10000 train_time:87331ms step_avg:79.32ms +[2025-07-08 03:07:46] [Rank 0] step:1101/10000 train_time:87331ms step_avg:79.32ms +[2025-07-08 03:07:47] [Rank 0] step:1121/10000 train_time:88793ms step_avg:79.21ms +[2025-07-08 03:07:47] [Rank 0] step:1121/10000 train_time:88793ms step_avg:79.21ms +[2025-07-08 03:07:49] [Rank 0] step:1141/10000 train_time:90265ms step_avg:79.11ms +[2025-07-08 03:07:49] [Rank 0] step:1141/10000 train_time:90265ms step_avg:79.11ms +[2025-07-08 03:07:50] [Rank 0] step:1161/10000 train_time:91733ms step_avg:79.01ms +[2025-07-08 03:07:50] [Rank 0] step:1161/10000 train_time:91733ms step_avg:79.01ms +[2025-07-08 03:07:52] [Rank 0] step:1181/10000 train_time:93861ms step_avg:79.48ms +[2025-07-08 03:07:52] [Rank 0] step:1181/10000 train_time:93861ms step_avg:79.48ms +[2025-07-08 03:07:54] [Rank 0] step:1201/10000 train_time:95327ms step_avg:79.37ms +[2025-07-08 03:07:54] [Rank 0] step:1201/10000 train_time:95327ms step_avg:79.37ms +[2025-07-08 03:07:55] [Rank 0] step:1221/10000 train_time:96794ms step_avg:79.27ms +[2025-07-08 03:07:55] [Rank 0] step:1221/10000 train_time:96794ms step_avg:79.27ms +[2025-07-08 03:07:57] [Rank 0] step:1241/10000 train_time:98264ms step_avg:79.18ms +[2025-07-08 03:07:57] [Rank 0] step:1241/10000 train_time:98264ms step_avg:79.18ms +[2025-07-08 03:07:59] [Rank 0] step:1261/10000 train_time:99780ms step_avg:79.13ms +[2025-07-08 03:07:59] [Rank 0] step:1261/10000 train_time:99780ms step_avg:79.13ms +[2025-07-08 03:08:00] [Rank 0] step:1281/10000 train_time:101864ms step_avg:79.52ms +[2025-07-08 03:08:00] [Rank 0] step:1281/10000 train_time:101864ms step_avg:79.52ms +[2025-07-08 03:08:02] [Rank 0] step:1301/10000 train_time:103334ms step_avg:79.43ms +[2025-07-08 03:08:02] [Rank 0] step:1301/10000 train_time:103334ms step_avg:79.43ms +[2025-07-08 03:08:03] [Rank 0] step:1321/10000 train_time:104802ms step_avg:79.34ms +[2025-07-08 03:08:03] [Rank 0] step:1321/10000 train_time:104802ms step_avg:79.34ms +[2025-07-08 03:08:05] [Rank 0] step:1341/10000 train_time:106273ms step_avg:79.25ms +[2025-07-08 03:08:05] [Rank 0] step:1341/10000 train_time:106273ms step_avg:79.25ms +[2025-07-08 03:08:07] [Rank 0] step:1361/10000 train_time:108389ms step_avg:79.64ms +[2025-07-08 03:08:07] [Rank 0] step:1361/10000 train_time:108389ms step_avg:79.64ms +[2025-07-08 03:08:08] [Rank 0] step:1381/10000 train_time:109863ms step_avg:79.55ms +[2025-07-08 03:08:08] [Rank 0] step:1381/10000 train_time:109863ms step_avg:79.55ms +[2025-07-08 03:08:10] [Rank 0] step:1401/10000 train_time:111332ms step_avg:79.47ms +[2025-07-08 03:08:10] [Rank 0] step:1401/10000 train_time:111332ms step_avg:79.47ms +[2025-07-08 03:08:11] [Rank 0] step:1421/10000 train_time:112803ms step_avg:79.38ms +[2025-07-08 03:08:11] [Rank 0] step:1421/10000 train_time:112803ms step_avg:79.38ms +[2025-07-08 03:08:13] [Rank 0] step:1441/10000 train_time:114329ms step_avg:79.34ms +[2025-07-08 03:08:13] [Rank 0] step:1441/10000 train_time:114329ms step_avg:79.34ms +[2025-07-08 03:08:15] [Rank 0] step:1461/10000 train_time:116553ms step_avg:79.78ms +[2025-07-08 03:08:15] [Rank 0] step:1461/10000 train_time:116553ms step_avg:79.78ms +[2025-07-08 03:08:16] [Rank 0] step:1481/10000 train_time:118024ms step_avg:79.69ms +[2025-07-08 03:08:16] [Rank 0] step:1481/10000 train_time:118024ms step_avg:79.69ms +[2025-07-08 03:08:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:08:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:08:19] [Rank 0] PRINT: step:1500/10000 train_loss:1.2294 val_loss:1.2008 train_time:119494ms step_avg:79.66ms +[2025-07-08 03:08:19] [Rank 0] PRINT: step:1500/10000 train_loss:1.2294 val_loss:1.2008 train_time:119494ms step_avg:79.66ms +[2025-07-08 03:08:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:08:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:08:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:08:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:08:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:08:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:13:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:13:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:13:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:13:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:13:44] [Rank 0] Total Loss: 5.0532 +[2025-07-08 03:13:44] [Rank 0] Total Loss: 5.0532 +[2025-07-08 03:13:44] [Rank 0] Total FTA: 0.3389 +[2025-07-08 03:13:44] [Rank 0] Total FTA: 0.3389 +[2025-07-08 03:13:44] [Rank 0] Group 0 Loss: 5.5099 +[2025-07-08 03:13:44] [Rank 0] Group 0 Loss: 5.5099 +[2025-07-08 03:13:44] [Rank 0] Group 1 Loss: 4.9436 +[2025-07-08 03:13:44] [Rank 0] Group 1 Loss: 4.9436 +[2025-07-08 03:13:44] [Rank 0] Group 2 Loss: 4.8494 +[2025-07-08 03:13:44] [Rank 0] Group 2 Loss: 4.8494 +[2025-07-08 03:13:44] [Rank 0] Group 3 Loss: 5.0493 +[2025-07-08 03:13:44] [Rank 0] Group 3 Loss: 5.0493 +[2025-07-08 03:13:44] [Rank 0] Group 4 Loss: 4.9988 +[2025-07-08 03:13:44] [Rank 0] Group 4 Loss: 4.9988 +[2025-07-08 03:13:44] [Rank 0] Group 5 Loss: 4.9659 +[2025-07-08 03:13:44] [Rank 0] Group 5 Loss: 4.9659 +[2025-07-08 03:13:44] [Rank 0] Group 6 Loss: 4.9225 +[2025-07-08 03:13:44] [Rank 0] Group 6 Loss: 4.9225 +[2025-07-08 03:13:44] [Rank 0] Group 7 Loss: 4.9972 +[2025-07-08 03:13:44] [Rank 0] Group 7 Loss: 4.9972 +[2025-07-08 03:13:44] [Rank 0] Group 8 Loss: 5.0004 +[2025-07-08 03:13:44] [Rank 0] Group 8 Loss: 5.0004 +[2025-07-08 03:13:44] [Rank 0] Group 9 Loss: 4.9572 +[2025-07-08 03:13:44] [Rank 0] Group 9 Loss: 4.9572 +[2025-07-08 03:13:44] [Rank 0] Group 10 Loss: 5.0521 +[2025-07-08 03:13:44] [Rank 0] Group 10 Loss: 5.0521 +[2025-07-08 03:13:44] [Rank 0] Group 11 Loss: 4.9970 +[2025-07-08 03:13:44] [Rank 0] Group 11 Loss: 4.9970 +[2025-07-08 03:13:44] [Rank 0] Group 0 FTA: 0.3394 +[2025-07-08 03:13:44] [Rank 0] Group 0 FTA: 0.3394 +[2025-07-08 03:13:44] [Rank 0] Group 1 FTA: 0.3359 +[2025-07-08 03:13:44] [Rank 0] Group 1 FTA: 0.3359 +[2025-07-08 03:13:44] [Rank 0] Group 2 FTA: 0.4766 +[2025-07-08 03:13:44] [Rank 0] Group 2 FTA: 0.4766 +[2025-07-08 03:13:44] [Rank 0] Group 3 FTA: 0.3307 +[2025-07-08 03:13:44] [Rank 0] Group 3 FTA: 0.3307 +[2025-07-08 03:13:44] [Rank 0] Group 4 FTA: 0.2656 +[2025-07-08 03:13:44] [Rank 0] Group 4 FTA: 0.2656 +[2025-07-08 03:13:44] [Rank 0] Group 5 FTA: 0.4062 +[2025-07-08 03:13:44] [Rank 0] Group 5 FTA: 0.4062 +[2025-07-08 03:13:44] [Rank 0] Group 6 FTA: 0.2943 +[2025-07-08 03:13:44] [Rank 0] Group 6 FTA: 0.2943 +[2025-07-08 03:13:44] [Rank 0] Group 7 FTA: 0.3151 +[2025-07-08 03:13:44] [Rank 0] Group 7 FTA: 0.3151 +[2025-07-08 03:13:44] [Rank 0] Group 8 FTA: 0.3594 +[2025-07-08 03:13:44] [Rank 0] Group 8 FTA: 0.3594 +[2025-07-08 03:13:44] [Rank 0] Group 9 FTA: 0.3477 +[2025-07-08 03:13:44] [Rank 0] Group 9 FTA: 0.3477 +[2025-07-08 03:13:44] [Rank 0] Group 10 FTA: 0.3379 +[2025-07-08 03:13:44] [Rank 0] Group 10 FTA: 0.3379 +[2025-07-08 03:13:44] [Rank 0] Group 11 FTA: 0.3096 +[2025-07-08 03:13:44] [Rank 0] Group 11 FTA: 0.3096 +[2025-07-08 03:13:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 03:13:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 03:13:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 03:13:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 03:13:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 03:13:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 03:13:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 03:13:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 03:13:46] [Rank 0] step:1501/10000 train_time:119514ms step_avg:79.62ms +[2025-07-08 03:13:46] [Rank 0] step:1501/10000 train_time:119514ms step_avg:79.62ms +[2025-07-08 03:13:47] [Rank 0] step:1521/10000 train_time:120999ms step_avg:79.55ms +[2025-07-08 03:13:47] [Rank 0] step:1521/10000 train_time:120999ms step_avg:79.55ms +[2025-07-08 03:13:49] [Rank 0] step:1541/10000 train_time:123125ms step_avg:79.90ms +[2025-07-08 03:13:49] [Rank 0] step:1541/10000 train_time:123125ms step_avg:79.90ms +[2025-07-08 03:13:51] [Rank 0] step:1561/10000 train_time:124589ms step_avg:79.81ms +[2025-07-08 03:13:51] [Rank 0] step:1561/10000 train_time:124589ms step_avg:79.81ms +[2025-07-08 03:13:52] [Rank 0] step:1581/10000 train_time:126057ms step_avg:79.73ms +[2025-07-08 03:13:52] [Rank 0] step:1581/10000 train_time:126057ms step_avg:79.73ms +[2025-07-08 03:13:54] [Rank 0] step:1601/10000 train_time:127522ms step_avg:79.65ms +[2025-07-08 03:13:54] [Rank 0] step:1601/10000 train_time:127522ms step_avg:79.65ms +[2025-07-08 03:13:55] [Rank 0] step:1621/10000 train_time:128989ms step_avg:79.57ms +[2025-07-08 03:13:55] [Rank 0] step:1621/10000 train_time:128989ms step_avg:79.57ms +[2025-07-08 03:13:57] [Rank 0] step:1641/10000 train_time:130694ms step_avg:79.64ms +[2025-07-08 03:13:57] [Rank 0] step:1641/10000 train_time:130694ms step_avg:79.64ms +[2025-07-08 03:13:58] [Rank 0] step:1661/10000 train_time:132162ms step_avg:79.57ms +[2025-07-08 03:13:58] [Rank 0] step:1661/10000 train_time:132162ms step_avg:79.57ms +[2025-07-08 03:14:00] [Rank 0] step:1681/10000 train_time:133629ms step_avg:79.49ms +[2025-07-08 03:14:00] [Rank 0] step:1681/10000 train_time:133629ms step_avg:79.49ms +[2025-07-08 03:14:01] [Rank 0] step:1701/10000 train_time:135095ms step_avg:79.42ms +[2025-07-08 03:14:01] [Rank 0] step:1701/10000 train_time:135095ms step_avg:79.42ms +[2025-07-08 03:14:03] [Rank 0] step:1721/10000 train_time:137205ms step_avg:79.72ms +[2025-07-08 03:14:03] [Rank 0] step:1721/10000 train_time:137205ms step_avg:79.72ms +[2025-07-08 03:14:05] [Rank 0] step:1741/10000 train_time:138676ms step_avg:79.65ms +[2025-07-08 03:14:05] [Rank 0] step:1741/10000 train_time:138676ms step_avg:79.65ms +[2025-07-08 03:14:06] [Rank 0] step:1761/10000 train_time:140143ms step_avg:79.58ms +[2025-07-08 03:14:06] [Rank 0] step:1761/10000 train_time:140143ms step_avg:79.58ms +[2025-07-08 03:14:08] [Rank 0] step:1781/10000 train_time:141614ms step_avg:79.51ms +[2025-07-08 03:14:08] [Rank 0] step:1781/10000 train_time:141614ms step_avg:79.51ms +[2025-07-08 03:14:10] [Rank 0] step:1801/10000 train_time:143758ms step_avg:79.82ms +[2025-07-08 03:14:10] [Rank 0] step:1801/10000 train_time:143758ms step_avg:79.82ms +[2025-07-08 03:14:11] [Rank 0] step:1821/10000 train_time:145211ms step_avg:79.74ms +[2025-07-08 03:14:11] [Rank 0] step:1821/10000 train_time:145211ms step_avg:79.74ms +[2025-07-08 03:14:13] [Rank 0] step:1841/10000 train_time:146680ms step_avg:79.67ms +[2025-07-08 03:14:13] [Rank 0] step:1841/10000 train_time:146680ms step_avg:79.67ms +[2025-07-08 03:14:14] [Rank 0] step:1861/10000 train_time:148150ms step_avg:79.61ms +[2025-07-08 03:14:14] [Rank 0] step:1861/10000 train_time:148150ms step_avg:79.61ms +[2025-07-08 03:14:16] [Rank 0] step:1881/10000 train_time:149625ms step_avg:79.55ms +[2025-07-08 03:14:16] [Rank 0] step:1881/10000 train_time:149625ms step_avg:79.55ms +[2025-07-08 03:14:18] [Rank 0] step:1901/10000 train_time:151733ms step_avg:79.82ms +[2025-07-08 03:14:18] [Rank 0] step:1901/10000 train_time:151733ms step_avg:79.82ms +[2025-07-08 03:14:19] [Rank 0] step:1921/10000 train_time:153207ms step_avg:79.75ms +[2025-07-08 03:14:19] [Rank 0] step:1921/10000 train_time:153207ms step_avg:79.75ms +[2025-07-08 03:14:21] [Rank 0] step:1941/10000 train_time:154675ms step_avg:79.69ms +[2025-07-08 03:14:21] [Rank 0] step:1941/10000 train_time:154675ms step_avg:79.69ms +[2025-07-08 03:14:22] [Rank 0] step:1961/10000 train_time:156146ms step_avg:79.63ms +[2025-07-08 03:14:22] [Rank 0] step:1961/10000 train_time:156146ms step_avg:79.63ms +[2025-07-08 03:14:24] [Rank 0] step:1981/10000 train_time:157874ms step_avg:79.69ms +[2025-07-08 03:14:24] [Rank 0] step:1981/10000 train_time:157874ms step_avg:79.69ms +[2025-07-08 03:14:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:14:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:14:27] [Rank 0] PRINT: step:2000/10000 train_loss:1.1250 val_loss:1.0971 train_time:159737ms step_avg:79.87ms +[2025-07-08 03:14:27] [Rank 0] PRINT: step:2000/10000 train_loss:1.1250 val_loss:1.0971 train_time:159737ms step_avg:79.87ms +[2025-07-08 03:14:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:14:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:14:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:14:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:14:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:14:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:19:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:19:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:19:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:19:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:19:52] [Rank 0] Total Loss: 5.2399 +[2025-07-08 03:19:52] [Rank 0] Total Loss: 5.2399 +[2025-07-08 03:19:52] [Rank 0] Total FTA: 0.4486 +[2025-07-08 03:19:52] [Rank 0] Total FTA: 0.4486 +[2025-07-08 03:19:52] [Rank 0] Group 0 Loss: 5.7529 +[2025-07-08 03:19:52] [Rank 0] Group 0 Loss: 5.7529 +[2025-07-08 03:19:52] [Rank 0] Group 1 Loss: 5.1151 +[2025-07-08 03:19:52] [Rank 0] Group 1 Loss: 5.1151 +[2025-07-08 03:19:52] [Rank 0] Group 2 Loss: 5.1559 +[2025-07-08 03:19:52] [Rank 0] Group 2 Loss: 5.1559 +[2025-07-08 03:19:52] [Rank 0] Group 3 Loss: 5.2534 +[2025-07-08 03:19:52] [Rank 0] Group 3 Loss: 5.2534 +[2025-07-08 03:19:52] [Rank 0] Group 4 Loss: 5.0344 +[2025-07-08 03:19:52] [Rank 0] Group 4 Loss: 5.0344 +[2025-07-08 03:19:52] [Rank 0] Group 5 Loss: 5.0606 +[2025-07-08 03:19:52] [Rank 0] Group 5 Loss: 5.0606 +[2025-07-08 03:19:52] [Rank 0] Group 6 Loss: 5.1213 +[2025-07-08 03:19:52] [Rank 0] Group 6 Loss: 5.1213 +[2025-07-08 03:19:52] [Rank 0] Group 7 Loss: 5.2178 +[2025-07-08 03:19:52] [Rank 0] Group 7 Loss: 5.2178 +[2025-07-08 03:19:52] [Rank 0] Group 8 Loss: 5.1242 +[2025-07-08 03:19:52] [Rank 0] Group 8 Loss: 5.1242 +[2025-07-08 03:19:52] [Rank 0] Group 9 Loss: 5.1771 +[2025-07-08 03:19:52] [Rank 0] Group 9 Loss: 5.1771 +[2025-07-08 03:19:52] [Rank 0] Group 10 Loss: 5.1718 +[2025-07-08 03:19:52] [Rank 0] Group 10 Loss: 5.1718 +[2025-07-08 03:19:52] [Rank 0] Group 11 Loss: 5.2183 +[2025-07-08 03:19:52] [Rank 0] Group 11 Loss: 5.2183 +[2025-07-08 03:19:52] [Rank 0] Group 0 FTA: 0.3576 +[2025-07-08 03:19:52] [Rank 0] Group 0 FTA: 0.3576 +[2025-07-08 03:19:52] [Rank 0] Group 1 FTA: 0.3099 +[2025-07-08 03:19:52] [Rank 0] Group 1 FTA: 0.3099 +[2025-07-08 03:19:52] [Rank 0] Group 2 FTA: 0.4896 +[2025-07-08 03:19:52] [Rank 0] Group 2 FTA: 0.4896 +[2025-07-08 03:19:52] [Rank 0] Group 3 FTA: 0.2630 +[2025-07-08 03:19:52] [Rank 0] Group 3 FTA: 0.2630 +[2025-07-08 03:19:52] [Rank 0] Group 4 FTA: 0.4870 +[2025-07-08 03:19:52] [Rank 0] Group 4 FTA: 0.4870 +[2025-07-08 03:19:52] [Rank 0] Group 5 FTA: 0.5599 +[2025-07-08 03:19:52] [Rank 0] Group 5 FTA: 0.5599 +[2025-07-08 03:19:52] [Rank 0] Group 6 FTA: 0.4688 +[2025-07-08 03:19:52] [Rank 0] Group 6 FTA: 0.4688 +[2025-07-08 03:19:52] [Rank 0] Group 7 FTA: 0.4609 +[2025-07-08 03:19:52] [Rank 0] Group 7 FTA: 0.4609 +[2025-07-08 03:19:52] [Rank 0] Group 8 FTA: 0.5078 +[2025-07-08 03:19:52] [Rank 0] Group 8 FTA: 0.5078 +[2025-07-08 03:19:52] [Rank 0] Group 9 FTA: 0.4883 +[2025-07-08 03:19:52] [Rank 0] Group 9 FTA: 0.4883 +[2025-07-08 03:19:52] [Rank 0] Group 10 FTA: 0.5254 +[2025-07-08 03:19:52] [Rank 0] Group 10 FTA: 0.5254 +[2025-07-08 03:19:52] [Rank 0] Group 11 FTA: 0.4844 +[2025-07-08 03:19:52] [Rank 0] Group 11 FTA: 0.4844 +[2025-07-08 03:19:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 03:19:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 03:19:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 03:19:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 03:19:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 03:19:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 03:19:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 03:19:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 03:19:54] [Rank 0] step:2001/10000 train_time:159757ms step_avg:79.84ms +[2025-07-08 03:19:54] [Rank 0] step:2001/10000 train_time:159757ms step_avg:79.84ms +[2025-07-08 03:19:55] [Rank 0] step:2021/10000 train_time:161236ms step_avg:79.78ms +[2025-07-08 03:19:55] [Rank 0] step:2021/10000 train_time:161236ms step_avg:79.78ms +[2025-07-08 03:19:56] [Rank 0] step:2041/10000 train_time:162702ms step_avg:79.72ms +[2025-07-08 03:19:56] [Rank 0] step:2041/10000 train_time:162702ms step_avg:79.72ms +[2025-07-08 03:19:58] [Rank 0] step:2061/10000 train_time:164165ms step_avg:79.65ms +[2025-07-08 03:19:58] [Rank 0] step:2061/10000 train_time:164165ms step_avg:79.65ms +[2025-07-08 03:20:00] [Rank 0] step:2081/10000 train_time:165871ms step_avg:79.71ms +[2025-07-08 03:20:00] [Rank 0] step:2081/10000 train_time:165871ms step_avg:79.71ms +[2025-07-08 03:20:01] [Rank 0] step:2101/10000 train_time:167336ms step_avg:79.65ms +[2025-07-08 03:20:01] [Rank 0] step:2101/10000 train_time:167336ms step_avg:79.65ms +[2025-07-08 03:20:03] [Rank 0] step:2121/10000 train_time:168802ms step_avg:79.59ms +[2025-07-08 03:20:03] [Rank 0] step:2121/10000 train_time:168802ms step_avg:79.59ms +[2025-07-08 03:20:04] [Rank 0] step:2141/10000 train_time:170270ms step_avg:79.53ms +[2025-07-08 03:20:04] [Rank 0] step:2141/10000 train_time:170270ms step_avg:79.53ms +[2025-07-08 03:20:06] [Rank 0] step:2161/10000 train_time:171994ms step_avg:79.59ms +[2025-07-08 03:20:06] [Rank 0] step:2161/10000 train_time:171994ms step_avg:79.59ms +[2025-07-08 03:20:08] [Rank 0] step:2181/10000 train_time:173862ms step_avg:79.72ms +[2025-07-08 03:20:08] [Rank 0] step:2181/10000 train_time:173862ms step_avg:79.72ms +[2025-07-08 03:20:09] [Rank 0] step:2201/10000 train_time:175329ms step_avg:79.66ms +[2025-07-08 03:20:09] [Rank 0] step:2201/10000 train_time:175329ms step_avg:79.66ms +[2025-07-08 03:20:11] [Rank 0] step:2221/10000 train_time:176799ms step_avg:79.60ms +[2025-07-08 03:20:11] [Rank 0] step:2221/10000 train_time:176799ms step_avg:79.60ms +[2025-07-08 03:20:12] [Rank 0] step:2241/10000 train_time:178285ms step_avg:79.56ms +[2025-07-08 03:20:12] [Rank 0] step:2241/10000 train_time:178285ms step_avg:79.56ms +[2025-07-08 03:20:14] [Rank 0] step:2261/10000 train_time:180434ms step_avg:79.80ms +[2025-07-08 03:20:14] [Rank 0] step:2261/10000 train_time:180434ms step_avg:79.80ms +[2025-07-08 03:20:16] [Rank 0] step:2281/10000 train_time:181926ms step_avg:79.76ms +[2025-07-08 03:20:16] [Rank 0] step:2281/10000 train_time:181926ms step_avg:79.76ms +[2025-07-08 03:20:17] [Rank 0] step:2301/10000 train_time:183421ms step_avg:79.71ms +[2025-07-08 03:20:17] [Rank 0] step:2301/10000 train_time:183421ms step_avg:79.71ms +[2025-07-08 03:20:19] [Rank 0] step:2321/10000 train_time:184916ms step_avg:79.67ms +[2025-07-08 03:20:19] [Rank 0] step:2321/10000 train_time:184916ms step_avg:79.67ms +[2025-07-08 03:20:21] [Rank 0] step:2341/10000 train_time:187103ms step_avg:79.92ms +[2025-07-08 03:20:21] [Rank 0] step:2341/10000 train_time:187103ms step_avg:79.92ms +[2025-07-08 03:20:22] [Rank 0] step:2361/10000 train_time:188577ms step_avg:79.87ms +[2025-07-08 03:20:22] [Rank 0] step:2361/10000 train_time:188577ms step_avg:79.87ms +[2025-07-08 03:20:24] [Rank 0] step:2381/10000 train_time:190072ms step_avg:79.83ms +[2025-07-08 03:20:24] [Rank 0] step:2381/10000 train_time:190072ms step_avg:79.83ms +[2025-07-08 03:20:25] [Rank 0] step:2401/10000 train_time:191568ms step_avg:79.79ms +[2025-07-08 03:20:25] [Rank 0] step:2401/10000 train_time:191568ms step_avg:79.79ms +[2025-07-08 03:20:27] [Rank 0] step:2421/10000 train_time:193064ms step_avg:79.75ms +[2025-07-08 03:20:27] [Rank 0] step:2421/10000 train_time:193064ms step_avg:79.75ms +[2025-07-08 03:20:29] [Rank 0] step:2441/10000 train_time:195221ms step_avg:79.98ms +[2025-07-08 03:20:29] [Rank 0] step:2441/10000 train_time:195221ms step_avg:79.98ms +[2025-07-08 03:20:31] [Rank 0] step:2461/10000 train_time:196717ms step_avg:79.93ms +[2025-07-08 03:20:31] [Rank 0] step:2461/10000 train_time:196717ms step_avg:79.93ms +[2025-07-08 03:20:32] [Rank 0] step:2481/10000 train_time:198215ms step_avg:79.89ms +[2025-07-08 03:20:32] [Rank 0] step:2481/10000 train_time:198215ms step_avg:79.89ms +[2025-07-08 03:20:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:20:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:20:34] [Rank 0] PRINT: step:2500/10000 train_loss:1.0011 val_loss:0.9415 train_time:199712ms step_avg:79.88ms +[2025-07-08 03:20:34] [Rank 0] PRINT: step:2500/10000 train_loss:1.0011 val_loss:0.9415 train_time:199712ms step_avg:79.88ms +[2025-07-08 03:20:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:20:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:20:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:20:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:20:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:20:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:25:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:25:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:25:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:25:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:25:59] [Rank 0] Total Loss: 5.2168 +[2025-07-08 03:25:59] [Rank 0] Total Loss: 5.2168 +[2025-07-08 03:25:59] [Rank 0] Total FTA: 0.7674 +[2025-07-08 03:25:59] [Rank 0] Total FTA: 0.7674 +[2025-07-08 03:25:59] [Rank 0] Group 0 Loss: 5.7535 +[2025-07-08 03:25:59] [Rank 0] Group 0 Loss: 5.7535 +[2025-07-08 03:25:59] [Rank 0] Group 1 Loss: 4.9602 +[2025-07-08 03:25:59] [Rank 0] Group 1 Loss: 4.9602 +[2025-07-08 03:25:59] [Rank 0] Group 2 Loss: 5.0772 +[2025-07-08 03:25:59] [Rank 0] Group 2 Loss: 5.0772 +[2025-07-08 03:25:59] [Rank 0] Group 3 Loss: 5.1267 +[2025-07-08 03:25:59] [Rank 0] Group 3 Loss: 5.1267 +[2025-07-08 03:25:59] [Rank 0] Group 4 Loss: 5.0800 +[2025-07-08 03:25:59] [Rank 0] Group 4 Loss: 5.0800 +[2025-07-08 03:25:59] [Rank 0] Group 5 Loss: 5.1805 +[2025-07-08 03:25:59] [Rank 0] Group 5 Loss: 5.1805 +[2025-07-08 03:25:59] [Rank 0] Group 6 Loss: 5.0422 +[2025-07-08 03:25:59] [Rank 0] Group 6 Loss: 5.0422 +[2025-07-08 03:25:59] [Rank 0] Group 7 Loss: 5.1885 +[2025-07-08 03:25:59] [Rank 0] Group 7 Loss: 5.1885 +[2025-07-08 03:25:59] [Rank 0] Group 8 Loss: 5.2217 +[2025-07-08 03:25:59] [Rank 0] Group 8 Loss: 5.2217 +[2025-07-08 03:25:59] [Rank 0] Group 9 Loss: 5.1440 +[2025-07-08 03:25:59] [Rank 0] Group 9 Loss: 5.1440 +[2025-07-08 03:25:59] [Rank 0] Group 10 Loss: 5.1743 +[2025-07-08 03:25:59] [Rank 0] Group 10 Loss: 5.1743 +[2025-07-08 03:25:59] [Rank 0] Group 11 Loss: 5.1749 +[2025-07-08 03:25:59] [Rank 0] Group 11 Loss: 5.1749 +[2025-07-08 03:25:59] [Rank 0] Group 0 FTA: 0.6463 +[2025-07-08 03:25:59] [Rank 0] Group 0 FTA: 0.6463 +[2025-07-08 03:25:59] [Rank 0] Group 1 FTA: 0.5312 +[2025-07-08 03:25:59] [Rank 0] Group 1 FTA: 0.5312 +[2025-07-08 03:25:59] [Rank 0] Group 2 FTA: 0.7734 +[2025-07-08 03:25:59] [Rank 0] Group 2 FTA: 0.7734 +[2025-07-08 03:25:59] [Rank 0] Group 3 FTA: 0.8646 +[2025-07-08 03:25:59] [Rank 0] Group 3 FTA: 0.8646 +[2025-07-08 03:25:59] [Rank 0] Group 4 FTA: 0.8125 +[2025-07-08 03:25:59] [Rank 0] Group 4 FTA: 0.8125 +[2025-07-08 03:25:59] [Rank 0] Group 5 FTA: 0.8203 +[2025-07-08 03:25:59] [Rank 0] Group 5 FTA: 0.8203 +[2025-07-08 03:25:59] [Rank 0] Group 6 FTA: 0.7995 +[2025-07-08 03:25:59] [Rank 0] Group 6 FTA: 0.7995 +[2025-07-08 03:25:59] [Rank 0] Group 7 FTA: 0.8255 +[2025-07-08 03:25:59] [Rank 0] Group 7 FTA: 0.8255 +[2025-07-08 03:25:59] [Rank 0] Group 8 FTA: 0.7708 +[2025-07-08 03:25:59] [Rank 0] Group 8 FTA: 0.7708 +[2025-07-08 03:25:59] [Rank 0] Group 9 FTA: 0.8164 +[2025-07-08 03:25:59] [Rank 0] Group 9 FTA: 0.8164 +[2025-07-08 03:25:59] [Rank 0] Group 10 FTA: 0.7930 +[2025-07-08 03:25:59] [Rank 0] Group 10 FTA: 0.7930 +[2025-07-08 03:25:59] [Rank 0] Group 11 FTA: 0.8115 +[2025-07-08 03:25:59] [Rank 0] Group 11 FTA: 0.8115 +[2025-07-08 03:26:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 03:26:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 03:26:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 03:26:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 03:26:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 03:26:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 03:26:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 03:26:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 03:26:01] [Rank 0] step:2501/10000 train_time:199732ms step_avg:79.86ms +[2025-07-08 03:26:01] [Rank 0] step:2501/10000 train_time:199732ms step_avg:79.86ms +[2025-07-08 03:26:03] [Rank 0] step:2521/10000 train_time:201490ms step_avg:79.92ms +[2025-07-08 03:26:03] [Rank 0] step:2521/10000 train_time:201490ms step_avg:79.92ms +[2025-07-08 03:26:05] [Rank 0] step:2541/10000 train_time:203369ms step_avg:80.03ms +[2025-07-08 03:26:05] [Rank 0] step:2541/10000 train_time:203369ms step_avg:80.03ms +[2025-07-08 03:26:06] [Rank 0] step:2561/10000 train_time:204857ms step_avg:79.99ms +[2025-07-08 03:26:06] [Rank 0] step:2561/10000 train_time:204857ms step_avg:79.99ms +[2025-07-08 03:26:08] [Rank 0] step:2581/10000 train_time:206345ms step_avg:79.95ms +[2025-07-08 03:26:08] [Rank 0] step:2581/10000 train_time:206345ms step_avg:79.95ms +[2025-07-08 03:26:09] [Rank 0] step:2601/10000 train_time:207927ms step_avg:79.94ms +[2025-07-08 03:26:09] [Rank 0] step:2601/10000 train_time:207927ms step_avg:79.94ms +[2025-07-08 03:26:11] [Rank 0] step:2621/10000 train_time:210141ms step_avg:80.18ms +[2025-07-08 03:26:11] [Rank 0] step:2621/10000 train_time:210141ms step_avg:80.18ms +[2025-07-08 03:26:13] [Rank 0] step:2641/10000 train_time:211633ms step_avg:80.13ms +[2025-07-08 03:26:13] [Rank 0] step:2641/10000 train_time:211633ms step_avg:80.13ms +[2025-07-08 03:26:14] [Rank 0] step:2661/10000 train_time:213124ms step_avg:80.09ms +[2025-07-08 03:26:14] [Rank 0] step:2661/10000 train_time:213124ms step_avg:80.09ms +[2025-07-08 03:26:16] [Rank 0] step:2681/10000 train_time:214615ms step_avg:80.05ms +[2025-07-08 03:26:16] [Rank 0] step:2681/10000 train_time:214615ms step_avg:80.05ms +[2025-07-08 03:26:18] [Rank 0] step:2701/10000 train_time:216362ms step_avg:80.10ms +[2025-07-08 03:26:18] [Rank 0] step:2701/10000 train_time:216362ms step_avg:80.10ms +[2025-07-08 03:26:19] [Rank 0] step:2721/10000 train_time:218250ms step_avg:80.21ms +[2025-07-08 03:26:19] [Rank 0] step:2721/10000 train_time:218250ms step_avg:80.21ms +[2025-07-08 03:26:21] [Rank 0] step:2741/10000 train_time:219742ms step_avg:80.17ms +[2025-07-08 03:26:21] [Rank 0] step:2741/10000 train_time:219742ms step_avg:80.17ms +[2025-07-08 03:26:22] [Rank 0] step:2761/10000 train_time:221233ms step_avg:80.13ms +[2025-07-08 03:26:22] [Rank 0] step:2761/10000 train_time:221233ms step_avg:80.13ms +[2025-07-08 03:26:24] [Rank 0] step:2781/10000 train_time:222726ms step_avg:80.09ms +[2025-07-08 03:26:24] [Rank 0] step:2781/10000 train_time:222726ms step_avg:80.09ms +[2025-07-08 03:26:26] [Rank 0] step:2801/10000 train_time:224862ms step_avg:80.28ms +[2025-07-08 03:26:26] [Rank 0] step:2801/10000 train_time:224862ms step_avg:80.28ms +[2025-07-08 03:26:28] [Rank 0] step:2821/10000 train_time:226355ms step_avg:80.24ms +[2025-07-08 03:26:28] [Rank 0] step:2821/10000 train_time:226355ms step_avg:80.24ms +[2025-07-08 03:26:29] [Rank 0] step:2841/10000 train_time:227849ms step_avg:80.20ms +[2025-07-08 03:26:29] [Rank 0] step:2841/10000 train_time:227849ms step_avg:80.20ms +[2025-07-08 03:26:31] [Rank 0] step:2861/10000 train_time:229343ms step_avg:80.16ms +[2025-07-08 03:26:31] [Rank 0] step:2861/10000 train_time:229343ms step_avg:80.16ms +[2025-07-08 03:26:32] [Rank 0] step:2881/10000 train_time:231094ms step_avg:80.21ms +[2025-07-08 03:26:32] [Rank 0] step:2881/10000 train_time:231094ms step_avg:80.21ms +[2025-07-08 03:26:34] [Rank 0] step:2901/10000 train_time:232569ms step_avg:80.17ms +[2025-07-08 03:26:34] [Rank 0] step:2901/10000 train_time:232569ms step_avg:80.17ms +[2025-07-08 03:26:35] [Rank 0] step:2921/10000 train_time:234065ms step_avg:80.13ms +[2025-07-08 03:26:35] [Rank 0] step:2921/10000 train_time:234065ms step_avg:80.13ms +[2025-07-08 03:26:37] [Rank 0] step:2941/10000 train_time:235560ms step_avg:80.10ms +[2025-07-08 03:26:37] [Rank 0] step:2941/10000 train_time:235560ms step_avg:80.10ms +[2025-07-08 03:26:38] [Rank 0] step:2961/10000 train_time:237056ms step_avg:80.06ms +[2025-07-08 03:26:38] [Rank 0] step:2961/10000 train_time:237056ms step_avg:80.06ms +[2025-07-08 03:26:40] [Rank 0] step:2981/10000 train_time:239199ms step_avg:80.24ms +[2025-07-08 03:26:40] [Rank 0] step:2981/10000 train_time:239199ms step_avg:80.24ms +[2025-07-08 03:26:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:26:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:26:43] [Rank 0] PRINT: step:3000/10000 train_loss:0.9167 val_loss:0.8934 train_time:240695ms step_avg:80.23ms +[2025-07-08 03:26:43] [Rank 0] PRINT: step:3000/10000 train_loss:0.9167 val_loss:0.8934 train_time:240695ms step_avg:80.23ms +[2025-07-08 03:26:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:26:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:26:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:26:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:26:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:26:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:32:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:32:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:32:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:32:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:32:08] [Rank 0] Total Loss: 5.2789 +[2025-07-08 03:32:08] [Rank 0] Total Loss: 5.2789 +[2025-07-08 03:32:08] [Rank 0] Total FTA: 0.9159 +[2025-07-08 03:32:08] [Rank 0] Total FTA: 0.9159 +[2025-07-08 03:32:08] [Rank 0] Group 0 Loss: 5.7343 +[2025-07-08 03:32:08] [Rank 0] Group 0 Loss: 5.7343 +[2025-07-08 03:32:08] [Rank 0] Group 1 Loss: 5.1102 +[2025-07-08 03:32:08] [Rank 0] Group 1 Loss: 5.1102 +[2025-07-08 03:32:08] [Rank 0] Group 2 Loss: 5.0573 +[2025-07-08 03:32:08] [Rank 0] Group 2 Loss: 5.0573 +[2025-07-08 03:32:08] [Rank 0] Group 3 Loss: 5.2254 +[2025-07-08 03:32:08] [Rank 0] Group 3 Loss: 5.2254 +[2025-07-08 03:32:08] [Rank 0] Group 4 Loss: 5.2095 +[2025-07-08 03:32:08] [Rank 0] Group 4 Loss: 5.2095 +[2025-07-08 03:32:08] [Rank 0] Group 5 Loss: 5.1328 +[2025-07-08 03:32:08] [Rank 0] Group 5 Loss: 5.1328 +[2025-07-08 03:32:08] [Rank 0] Group 6 Loss: 5.1674 +[2025-07-08 03:32:08] [Rank 0] Group 6 Loss: 5.1674 +[2025-07-08 03:32:08] [Rank 0] Group 7 Loss: 5.2403 +[2025-07-08 03:32:08] [Rank 0] Group 7 Loss: 5.2403 +[2025-07-08 03:32:08] [Rank 0] Group 8 Loss: 5.2769 +[2025-07-08 03:32:08] [Rank 0] Group 8 Loss: 5.2769 +[2025-07-08 03:32:08] [Rank 0] Group 9 Loss: 5.2498 +[2025-07-08 03:32:08] [Rank 0] Group 9 Loss: 5.2498 +[2025-07-08 03:32:08] [Rank 0] Group 10 Loss: 5.2407 +[2025-07-08 03:32:08] [Rank 0] Group 10 Loss: 5.2407 +[2025-07-08 03:32:08] [Rank 0] Group 11 Loss: 5.2673 +[2025-07-08 03:32:08] [Rank 0] Group 11 Loss: 5.2673 +[2025-07-08 03:32:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 03:32:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 03:32:08] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 03:32:08] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 03:32:08] [Rank 0] Group 2 FTA: 0.9167 +[2025-07-08 03:32:08] [Rank 0] Group 2 FTA: 0.9167 +[2025-07-08 03:32:08] [Rank 0] Group 3 FTA: 0.8906 +[2025-07-08 03:32:08] [Rank 0] Group 3 FTA: 0.8906 +[2025-07-08 03:32:08] [Rank 0] Group 4 FTA: 0.9010 +[2025-07-08 03:32:08] [Rank 0] Group 4 FTA: 0.9010 +[2025-07-08 03:32:08] [Rank 0] Group 5 FTA: 0.9115 +[2025-07-08 03:32:08] [Rank 0] Group 5 FTA: 0.9115 +[2025-07-08 03:32:08] [Rank 0] Group 6 FTA: 0.9036 +[2025-07-08 03:32:08] [Rank 0] Group 6 FTA: 0.9036 +[2025-07-08 03:32:08] [Rank 0] Group 7 FTA: 0.9036 +[2025-07-08 03:32:08] [Rank 0] Group 7 FTA: 0.9036 +[2025-07-08 03:32:08] [Rank 0] Group 8 FTA: 0.8958 +[2025-07-08 03:32:08] [Rank 0] Group 8 FTA: 0.8958 +[2025-07-08 03:32:08] [Rank 0] Group 9 FTA: 0.8828 +[2025-07-08 03:32:08] [Rank 0] Group 9 FTA: 0.8828 +[2025-07-08 03:32:08] [Rank 0] Group 10 FTA: 0.8848 +[2025-07-08 03:32:08] [Rank 0] Group 10 FTA: 0.8848 +[2025-07-08 03:32:08] [Rank 0] Group 11 FTA: 0.8779 +[2025-07-08 03:32:08] [Rank 0] Group 11 FTA: 0.8779 +[2025-07-08 03:32:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 03:32:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 03:32:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 03:32:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 03:32:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 03:32:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 03:32:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 03:32:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 03:32:10] [Rank 0] step:3001/10000 train_time:240715ms step_avg:80.21ms +[2025-07-08 03:32:10] [Rank 0] step:3001/10000 train_time:240715ms step_avg:80.21ms +[2025-07-08 03:32:11] [Rank 0] step:3021/10000 train_time:242208ms step_avg:80.17ms +[2025-07-08 03:32:11] [Rank 0] step:3021/10000 train_time:242208ms step_avg:80.17ms +[2025-07-08 03:32:13] [Rank 0] step:3041/10000 train_time:243695ms step_avg:80.14ms +[2025-07-08 03:32:13] [Rank 0] step:3041/10000 train_time:243695ms step_avg:80.14ms +[2025-07-08 03:32:15] [Rank 0] step:3061/10000 train_time:245241ms step_avg:80.12ms +[2025-07-08 03:32:15] [Rank 0] step:3061/10000 train_time:245241ms step_avg:80.12ms +[2025-07-08 03:32:16] [Rank 0] step:3081/10000 train_time:247347ms step_avg:80.28ms +[2025-07-08 03:32:16] [Rank 0] step:3081/10000 train_time:247347ms step_avg:80.28ms +[2025-07-08 03:32:18] [Rank 0] step:3101/10000 train_time:248837ms step_avg:80.24ms +[2025-07-08 03:32:18] [Rank 0] step:3101/10000 train_time:248837ms step_avg:80.24ms +[2025-07-08 03:32:19] [Rank 0] step:3121/10000 train_time:250328ms step_avg:80.21ms +[2025-07-08 03:32:19] [Rank 0] step:3121/10000 train_time:250328ms step_avg:80.21ms +[2025-07-08 03:32:21] [Rank 0] step:3141/10000 train_time:251818ms step_avg:80.17ms +[2025-07-08 03:32:21] [Rank 0] step:3141/10000 train_time:251818ms step_avg:80.17ms +[2025-07-08 03:32:23] [Rank 0] step:3161/10000 train_time:253958ms step_avg:80.34ms +[2025-07-08 03:32:23] [Rank 0] step:3161/10000 train_time:253958ms step_avg:80.34ms +[2025-07-08 03:32:24] [Rank 0] step:3181/10000 train_time:255450ms step_avg:80.30ms +[2025-07-08 03:32:24] [Rank 0] step:3181/10000 train_time:255450ms step_avg:80.30ms +[2025-07-08 03:32:26] [Rank 0] step:3201/10000 train_time:256941ms step_avg:80.27ms +[2025-07-08 03:32:26] [Rank 0] step:3201/10000 train_time:256941ms step_avg:80.27ms +[2025-07-08 03:32:27] [Rank 0] step:3221/10000 train_time:258435ms step_avg:80.23ms +[2025-07-08 03:32:27] [Rank 0] step:3221/10000 train_time:258435ms step_avg:80.23ms +[2025-07-08 03:32:29] [Rank 0] step:3241/10000 train_time:260048ms step_avg:80.24ms +[2025-07-08 03:32:29] [Rank 0] step:3241/10000 train_time:260048ms step_avg:80.24ms +[2025-07-08 03:32:31] [Rank 0] step:3261/10000 train_time:262132ms step_avg:80.38ms +[2025-07-08 03:32:31] [Rank 0] step:3261/10000 train_time:262132ms step_avg:80.38ms +[2025-07-08 03:32:32] [Rank 0] step:3281/10000 train_time:263624ms step_avg:80.35ms +[2025-07-08 03:32:32] [Rank 0] step:3281/10000 train_time:263624ms step_avg:80.35ms +[2025-07-08 03:32:34] [Rank 0] step:3301/10000 train_time:265118ms step_avg:80.31ms +[2025-07-08 03:32:34] [Rank 0] step:3301/10000 train_time:265118ms step_avg:80.31ms +[2025-07-08 03:32:35] [Rank 0] step:3321/10000 train_time:266612ms step_avg:80.28ms +[2025-07-08 03:32:35] [Rank 0] step:3321/10000 train_time:266612ms step_avg:80.28ms +[2025-07-08 03:32:37] [Rank 0] step:3341/10000 train_time:268341ms step_avg:80.32ms +[2025-07-08 03:32:37] [Rank 0] step:3341/10000 train_time:268341ms step_avg:80.32ms +[2025-07-08 03:32:39] [Rank 0] step:3361/10000 train_time:269836ms step_avg:80.28ms +[2025-07-08 03:32:39] [Rank 0] step:3361/10000 train_time:269836ms step_avg:80.28ms +[2025-07-08 03:32:40] [Rank 0] step:3381/10000 train_time:271332ms step_avg:80.25ms +[2025-07-08 03:32:40] [Rank 0] step:3381/10000 train_time:271332ms step_avg:80.25ms +[2025-07-08 03:32:42] [Rank 0] step:3401/10000 train_time:272828ms step_avg:80.22ms +[2025-07-08 03:32:42] [Rank 0] step:3401/10000 train_time:272828ms step_avg:80.22ms +[2025-07-08 03:32:44] [Rank 0] step:3421/10000 train_time:274324ms step_avg:80.19ms +[2025-07-08 03:32:44] [Rank 0] step:3421/10000 train_time:274324ms step_avg:80.19ms +[2025-07-08 03:32:45] [Rank 0] step:3441/10000 train_time:276471ms step_avg:80.35ms +[2025-07-08 03:32:45] [Rank 0] step:3441/10000 train_time:276471ms step_avg:80.35ms +[2025-07-08 03:32:47] [Rank 0] step:3461/10000 train_time:277965ms step_avg:80.31ms +[2025-07-08 03:32:47] [Rank 0] step:3461/10000 train_time:277965ms step_avg:80.31ms +[2025-07-08 03:32:48] [Rank 0] step:3481/10000 train_time:279461ms step_avg:80.28ms +[2025-07-08 03:32:48] [Rank 0] step:3481/10000 train_time:279461ms step_avg:80.28ms +[2025-07-08 03:32:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:32:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:32:51] [Rank 0] PRINT: step:3500/10000 train_loss:0.8892 val_loss:0.8802 train_time:280957ms step_avg:80.27ms +[2025-07-08 03:32:51] [Rank 0] PRINT: step:3500/10000 train_loss:0.8892 val_loss:0.8802 train_time:280957ms step_avg:80.27ms +[2025-07-08 03:32:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:32:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:32:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:32:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:32:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:32:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:38:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:38:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:38:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:38:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:38:15] [Rank 0] Total Loss: 5.3773 +[2025-07-08 03:38:15] [Rank 0] Total Loss: 5.3773 +[2025-07-08 03:38:15] [Rank 0] Total FTA: 0.9364 +[2025-07-08 03:38:15] [Rank 0] Total FTA: 0.9364 +[2025-07-08 03:38:15] [Rank 0] Group 0 Loss: 5.7785 +[2025-07-08 03:38:15] [Rank 0] Group 0 Loss: 5.7785 +[2025-07-08 03:38:15] [Rank 0] Group 1 Loss: 5.2981 +[2025-07-08 03:38:15] [Rank 0] Group 1 Loss: 5.2981 +[2025-07-08 03:38:15] [Rank 0] Group 2 Loss: 5.1561 +[2025-07-08 03:38:15] [Rank 0] Group 2 Loss: 5.1561 +[2025-07-08 03:38:15] [Rank 0] Group 3 Loss: 5.4556 +[2025-07-08 03:38:15] [Rank 0] Group 3 Loss: 5.4556 +[2025-07-08 03:38:15] [Rank 0] Group 4 Loss: 5.3509 +[2025-07-08 03:38:15] [Rank 0] Group 4 Loss: 5.3509 +[2025-07-08 03:38:15] [Rank 0] Group 5 Loss: 5.2653 +[2025-07-08 03:38:15] [Rank 0] Group 5 Loss: 5.2653 +[2025-07-08 03:38:16] [Rank 0] Group 6 Loss: 5.1977 +[2025-07-08 03:38:16] [Rank 0] Group 6 Loss: 5.1977 +[2025-07-08 03:38:16] [Rank 0] Group 7 Loss: 5.3441 +[2025-07-08 03:38:16] [Rank 0] Group 7 Loss: 5.3441 +[2025-07-08 03:38:16] [Rank 0] Group 8 Loss: 5.3709 +[2025-07-08 03:38:16] [Rank 0] Group 8 Loss: 5.3709 +[2025-07-08 03:38:16] [Rank 0] Group 9 Loss: 5.2624 +[2025-07-08 03:38:16] [Rank 0] Group 9 Loss: 5.2624 +[2025-07-08 03:38:16] [Rank 0] Group 10 Loss: 5.2916 +[2025-07-08 03:38:16] [Rank 0] Group 10 Loss: 5.2916 +[2025-07-08 03:38:16] [Rank 0] Group 11 Loss: 5.3651 +[2025-07-08 03:38:16] [Rank 0] Group 11 Loss: 5.3651 +[2025-07-08 03:38:16] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 03:38:16] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 03:38:16] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 03:38:16] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 03:38:16] [Rank 0] Group 2 FTA: 0.9245 +[2025-07-08 03:38:16] [Rank 0] Group 2 FTA: 0.9245 +[2025-07-08 03:38:16] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 03:38:16] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 03:38:16] [Rank 0] Group 4 FTA: 0.9583 +[2025-07-08 03:38:16] [Rank 0] Group 4 FTA: 0.9583 +[2025-07-08 03:38:16] [Rank 0] Group 5 FTA: 0.9089 +[2025-07-08 03:38:16] [Rank 0] Group 5 FTA: 0.9089 +[2025-07-08 03:38:16] [Rank 0] Group 6 FTA: 0.8411 +[2025-07-08 03:38:16] [Rank 0] Group 6 FTA: 0.8411 +[2025-07-08 03:38:16] [Rank 0] Group 7 FTA: 0.9036 +[2025-07-08 03:38:16] [Rank 0] Group 7 FTA: 0.9036 +[2025-07-08 03:38:16] [Rank 0] Group 8 FTA: 0.9219 +[2025-07-08 03:38:16] [Rank 0] Group 8 FTA: 0.9219 +[2025-07-08 03:38:16] [Rank 0] Group 9 FTA: 0.9180 +[2025-07-08 03:38:16] [Rank 0] Group 9 FTA: 0.9180 +[2025-07-08 03:38:16] [Rank 0] Group 10 FTA: 0.9023 +[2025-07-08 03:38:16] [Rank 0] Group 10 FTA: 0.9023 +[2025-07-08 03:38:16] [Rank 0] Group 11 FTA: 0.9229 +[2025-07-08 03:38:16] [Rank 0] Group 11 FTA: 0.9229 +[2025-07-08 03:38:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 03:38:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 03:38:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 03:38:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 03:38:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 03:38:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 03:38:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 03:38:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 03:38:17] [Rank 0] step:3501/10000 train_time:280977ms step_avg:80.26ms +[2025-07-08 03:38:17] [Rank 0] step:3501/10000 train_time:280977ms step_avg:80.26ms +[2025-07-08 03:38:19] [Rank 0] step:3521/10000 train_time:283124ms step_avg:80.41ms +[2025-07-08 03:38:19] [Rank 0] step:3521/10000 train_time:283124ms step_avg:80.41ms +[2025-07-08 03:38:21] [Rank 0] step:3541/10000 train_time:284610ms step_avg:80.38ms +[2025-07-08 03:38:21] [Rank 0] step:3541/10000 train_time:284610ms step_avg:80.38ms +[2025-07-08 03:38:22] [Rank 0] step:3561/10000 train_time:286099ms step_avg:80.34ms +[2025-07-08 03:38:22] [Rank 0] step:3561/10000 train_time:286099ms step_avg:80.34ms +[2025-07-08 03:38:24] [Rank 0] step:3581/10000 train_time:287588ms step_avg:80.31ms +[2025-07-08 03:38:24] [Rank 0] step:3581/10000 train_time:287588ms step_avg:80.31ms +[2025-07-08 03:38:26] [Rank 0] step:3601/10000 train_time:289331ms step_avg:80.35ms +[2025-07-08 03:38:26] [Rank 0] step:3601/10000 train_time:289331ms step_avg:80.35ms +[2025-07-08 03:38:27] [Rank 0] step:3621/10000 train_time:291230ms step_avg:80.43ms +[2025-07-08 03:38:27] [Rank 0] step:3621/10000 train_time:291230ms step_avg:80.43ms +[2025-07-08 03:38:29] [Rank 0] step:3641/10000 train_time:292719ms step_avg:80.40ms +[2025-07-08 03:38:29] [Rank 0] step:3641/10000 train_time:292719ms step_avg:80.40ms +[2025-07-08 03:38:30] [Rank 0] step:3661/10000 train_time:294210ms step_avg:80.36ms +[2025-07-08 03:38:30] [Rank 0] step:3661/10000 train_time:294210ms step_avg:80.36ms +[2025-07-08 03:38:32] [Rank 0] step:3681/10000 train_time:295701ms step_avg:80.33ms +[2025-07-08 03:38:32] [Rank 0] step:3681/10000 train_time:295701ms step_avg:80.33ms +[2025-07-08 03:38:34] [Rank 0] step:3701/10000 train_time:297845ms step_avg:80.48ms +[2025-07-08 03:38:34] [Rank 0] step:3701/10000 train_time:297845ms step_avg:80.48ms +[2025-07-08 03:38:35] [Rank 0] step:3721/10000 train_time:299338ms step_avg:80.45ms +[2025-07-08 03:38:35] [Rank 0] step:3721/10000 train_time:299338ms step_avg:80.45ms +[2025-07-08 03:38:37] [Rank 0] step:3741/10000 train_time:300833ms step_avg:80.42ms +[2025-07-08 03:38:37] [Rank 0] step:3741/10000 train_time:300833ms step_avg:80.42ms +[2025-07-08 03:38:38] [Rank 0] step:3761/10000 train_time:302327ms step_avg:80.38ms +[2025-07-08 03:38:38] [Rank 0] step:3761/10000 train_time:302327ms step_avg:80.38ms +[2025-07-08 03:38:41] [Rank 0] step:3781/10000 train_time:304480ms step_avg:80.53ms +[2025-07-08 03:38:41] [Rank 0] step:3781/10000 train_time:304480ms step_avg:80.53ms +[2025-07-08 03:38:42] [Rank 0] step:3801/10000 train_time:305955ms step_avg:80.49ms +[2025-07-08 03:38:42] [Rank 0] step:3801/10000 train_time:305955ms step_avg:80.49ms +[2025-07-08 03:38:44] [Rank 0] step:3821/10000 train_time:307448ms step_avg:80.46ms +[2025-07-08 03:38:44] [Rank 0] step:3821/10000 train_time:307448ms step_avg:80.46ms +[2025-07-08 03:38:45] [Rank 0] step:3841/10000 train_time:308944ms step_avg:80.43ms +[2025-07-08 03:38:45] [Rank 0] step:3841/10000 train_time:308944ms step_avg:80.43ms +[2025-07-08 03:38:47] [Rank 0] step:3861/10000 train_time:310440ms step_avg:80.40ms +[2025-07-08 03:38:47] [Rank 0] step:3861/10000 train_time:310440ms step_avg:80.40ms +[2025-07-08 03:38:48] [Rank 0] step:3881/10000 train_time:312262ms step_avg:80.46ms +[2025-07-08 03:38:48] [Rank 0] step:3881/10000 train_time:312262ms step_avg:80.46ms +[2025-07-08 03:38:50] [Rank 0] step:3901/10000 train_time:313759ms step_avg:80.43ms +[2025-07-08 03:38:50] [Rank 0] step:3901/10000 train_time:313759ms step_avg:80.43ms +[2025-07-08 03:38:51] [Rank 0] step:3921/10000 train_time:315255ms step_avg:80.40ms +[2025-07-08 03:38:51] [Rank 0] step:3921/10000 train_time:315255ms step_avg:80.40ms +[2025-07-08 03:38:53] [Rank 0] step:3941/10000 train_time:316750ms step_avg:80.37ms +[2025-07-08 03:38:53] [Rank 0] step:3941/10000 train_time:316750ms step_avg:80.37ms +[2025-07-08 03:38:55] [Rank 0] step:3961/10000 train_time:318247ms step_avg:80.35ms +[2025-07-08 03:38:55] [Rank 0] step:3961/10000 train_time:318247ms step_avg:80.35ms +[2025-07-08 03:38:57] [Rank 0] step:3981/10000 train_time:320389ms step_avg:80.48ms +[2025-07-08 03:38:57] [Rank 0] step:3981/10000 train_time:320389ms step_avg:80.48ms +[2025-07-08 03:38:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:38:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:38:59] [Rank 0] PRINT: step:4000/10000 train_loss:0.8804 val_loss:0.8742 train_time:321886ms step_avg:80.47ms +[2025-07-08 03:38:59] [Rank 0] PRINT: step:4000/10000 train_loss:0.8804 val_loss:0.8742 train_time:321886ms step_avg:80.47ms +[2025-07-08 03:38:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:38:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:38:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:38:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:38:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:38:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:44:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:44:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:44:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:44:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:44:23] [Rank 0] Total Loss: 5.4139 +[2025-07-08 03:44:23] [Rank 0] Total Loss: 5.4139 +[2025-07-08 03:44:23] [Rank 0] Total FTA: 0.9185 +[2025-07-08 03:44:23] [Rank 0] Total FTA: 0.9185 +[2025-07-08 03:44:23] [Rank 0] Group 0 Loss: 5.6156 +[2025-07-08 03:44:23] [Rank 0] Group 0 Loss: 5.6156 +[2025-07-08 03:44:23] [Rank 0] Group 1 Loss: 5.1695 +[2025-07-08 03:44:23] [Rank 0] Group 1 Loss: 5.1695 +[2025-07-08 03:44:23] [Rank 0] Group 2 Loss: 5.4075 +[2025-07-08 03:44:23] [Rank 0] Group 2 Loss: 5.4075 +[2025-07-08 03:44:23] [Rank 0] Group 3 Loss: 5.4122 +[2025-07-08 03:44:23] [Rank 0] Group 3 Loss: 5.4122 +[2025-07-08 03:44:24] [Rank 0] Group 4 Loss: 5.4256 +[2025-07-08 03:44:24] [Rank 0] Group 4 Loss: 5.4256 +[2025-07-08 03:44:24] [Rank 0] Group 5 Loss: 5.3559 +[2025-07-08 03:44:24] [Rank 0] Group 5 Loss: 5.3559 +[2025-07-08 03:44:24] [Rank 0] Group 6 Loss: 5.3573 +[2025-07-08 03:44:24] [Rank 0] Group 6 Loss: 5.3573 +[2025-07-08 03:44:24] [Rank 0] Group 7 Loss: 5.3999 +[2025-07-08 03:44:24] [Rank 0] Group 7 Loss: 5.3999 +[2025-07-08 03:44:24] [Rank 0] Group 8 Loss: 5.4194 +[2025-07-08 03:44:24] [Rank 0] Group 8 Loss: 5.4194 +[2025-07-08 03:44:24] [Rank 0] Group 9 Loss: 5.3959 +[2025-07-08 03:44:24] [Rank 0] Group 9 Loss: 5.3959 +[2025-07-08 03:44:24] [Rank 0] Group 10 Loss: 5.3825 +[2025-07-08 03:44:24] [Rank 0] Group 10 Loss: 5.3825 +[2025-07-08 03:44:24] [Rank 0] Group 11 Loss: 5.4189 +[2025-07-08 03:44:24] [Rank 0] Group 11 Loss: 5.4189 +[2025-07-08 03:44:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 03:44:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 03:44:24] [Rank 0] Group 1 FTA: 0.8438 +[2025-07-08 03:44:24] [Rank 0] Group 1 FTA: 0.8438 +[2025-07-08 03:44:24] [Rank 0] Group 2 FTA: 0.8464 +[2025-07-08 03:44:24] [Rank 0] Group 2 FTA: 0.8464 +[2025-07-08 03:44:24] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 03:44:24] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 03:44:24] [Rank 0] Group 4 FTA: 0.9010 +[2025-07-08 03:44:24] [Rank 0] Group 4 FTA: 0.9010 +[2025-07-08 03:44:24] [Rank 0] Group 5 FTA: 0.9115 +[2025-07-08 03:44:24] [Rank 0] Group 5 FTA: 0.9115 +[2025-07-08 03:44:24] [Rank 0] Group 6 FTA: 0.9271 +[2025-07-08 03:44:24] [Rank 0] Group 6 FTA: 0.9271 +[2025-07-08 03:44:24] [Rank 0] Group 7 FTA: 0.9167 +[2025-07-08 03:44:24] [Rank 0] Group 7 FTA: 0.9167 +[2025-07-08 03:44:24] [Rank 0] Group 8 FTA: 0.9089 +[2025-07-08 03:44:24] [Rank 0] Group 8 FTA: 0.9089 +[2025-07-08 03:44:24] [Rank 0] Group 9 FTA: 0.8672 +[2025-07-08 03:44:24] [Rank 0] Group 9 FTA: 0.8672 +[2025-07-08 03:44:24] [Rank 0] Group 10 FTA: 0.9023 +[2025-07-08 03:44:24] [Rank 0] Group 10 FTA: 0.9023 +[2025-07-08 03:44:24] [Rank 0] Group 11 FTA: 0.9131 +[2025-07-08 03:44:24] [Rank 0] Group 11 FTA: 0.9131 +[2025-07-08 03:44:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 03:44:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 03:44:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 03:44:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 03:44:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 03:44:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 03:44:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 03:44:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 03:44:25] [Rank 0] step:4001/10000 train_time:321906ms step_avg:80.46ms +[2025-07-08 03:44:25] [Rank 0] step:4001/10000 train_time:321906ms step_avg:80.46ms +[2025-07-08 03:44:27] [Rank 0] step:4021/10000 train_time:323398ms step_avg:80.43ms +[2025-07-08 03:44:27] [Rank 0] step:4021/10000 train_time:323398ms step_avg:80.43ms +[2025-07-08 03:44:28] [Rank 0] step:4041/10000 train_time:324885ms step_avg:80.40ms +[2025-07-08 03:44:28] [Rank 0] step:4041/10000 train_time:324885ms step_avg:80.40ms +[2025-07-08 03:44:30] [Rank 0] step:4061/10000 train_time:327040ms step_avg:80.53ms +[2025-07-08 03:44:30] [Rank 0] step:4061/10000 train_time:327040ms step_avg:80.53ms +[2025-07-08 03:44:32] [Rank 0] step:4081/10000 train_time:328526ms step_avg:80.50ms +[2025-07-08 03:44:32] [Rank 0] step:4081/10000 train_time:328526ms step_avg:80.50ms +[2025-07-08 03:44:33] [Rank 0] step:4101/10000 train_time:330016ms step_avg:80.47ms +[2025-07-08 03:44:33] [Rank 0] step:4101/10000 train_time:330016ms step_avg:80.47ms +[2025-07-08 03:44:35] [Rank 0] step:4121/10000 train_time:331506ms step_avg:80.44ms +[2025-07-08 03:44:35] [Rank 0] step:4121/10000 train_time:331506ms step_avg:80.44ms +[2025-07-08 03:44:37] [Rank 0] step:4141/10000 train_time:333253ms step_avg:80.48ms +[2025-07-08 03:44:37] [Rank 0] step:4141/10000 train_time:333253ms step_avg:80.48ms +[2025-07-08 03:44:38] [Rank 0] step:4161/10000 train_time:335148ms step_avg:80.54ms +[2025-07-08 03:44:38] [Rank 0] step:4161/10000 train_time:335148ms step_avg:80.54ms +[2025-07-08 03:44:40] [Rank 0] step:4181/10000 train_time:336637ms step_avg:80.52ms +[2025-07-08 03:44:40] [Rank 0] step:4181/10000 train_time:336637ms step_avg:80.52ms +[2025-07-08 03:44:41] [Rank 0] step:4201/10000 train_time:338129ms step_avg:80.49ms +[2025-07-08 03:44:41] [Rank 0] step:4201/10000 train_time:338129ms step_avg:80.49ms +[2025-07-08 03:44:43] [Rank 0] step:4221/10000 train_time:339621ms step_avg:80.46ms +[2025-07-08 03:44:43] [Rank 0] step:4221/10000 train_time:339621ms step_avg:80.46ms +[2025-07-08 03:44:45] [Rank 0] step:4241/10000 train_time:341760ms step_avg:80.58ms +[2025-07-08 03:44:45] [Rank 0] step:4241/10000 train_time:341760ms step_avg:80.58ms +[2025-07-08 03:44:46] [Rank 0] step:4261/10000 train_time:343254ms step_avg:80.56ms +[2025-07-08 03:44:46] [Rank 0] step:4261/10000 train_time:343254ms step_avg:80.56ms +[2025-07-08 03:44:48] [Rank 0] step:4281/10000 train_time:344749ms step_avg:80.53ms +[2025-07-08 03:44:48] [Rank 0] step:4281/10000 train_time:344749ms step_avg:80.53ms +[2025-07-08 03:44:49] [Rank 0] step:4301/10000 train_time:346242ms step_avg:80.50ms +[2025-07-08 03:44:49] [Rank 0] step:4301/10000 train_time:346242ms step_avg:80.50ms +[2025-07-08 03:44:52] [Rank 0] step:4321/10000 train_time:347736ms step_avg:80.48ms +[2025-07-08 03:44:52] [Rank 0] step:4321/10000 train_time:347736ms step_avg:80.48ms +[2025-07-08 03:44:53] [Rank 0] step:4341/10000 train_time:349870ms step_avg:80.60ms +[2025-07-08 03:44:53] [Rank 0] step:4341/10000 train_time:349870ms step_avg:80.60ms +[2025-07-08 03:44:55] [Rank 0] step:4361/10000 train_time:351364ms step_avg:80.57ms +[2025-07-08 03:44:55] [Rank 0] step:4361/10000 train_time:351364ms step_avg:80.57ms +[2025-07-08 03:44:56] [Rank 0] step:4381/10000 train_time:352859ms step_avg:80.54ms +[2025-07-08 03:44:56] [Rank 0] step:4381/10000 train_time:352859ms step_avg:80.54ms +[2025-07-08 03:44:58] [Rank 0] step:4401/10000 train_time:354355ms step_avg:80.52ms +[2025-07-08 03:44:58] [Rank 0] step:4401/10000 train_time:354355ms step_avg:80.52ms +[2025-07-08 03:45:00] [Rank 0] step:4421/10000 train_time:356515ms step_avg:80.64ms +[2025-07-08 03:45:00] [Rank 0] step:4421/10000 train_time:356515ms step_avg:80.64ms +[2025-07-08 03:45:01] [Rank 0] step:4441/10000 train_time:358010ms step_avg:80.61ms +[2025-07-08 03:45:01] [Rank 0] step:4441/10000 train_time:358010ms step_avg:80.61ms +[2025-07-08 03:45:03] [Rank 0] step:4461/10000 train_time:359507ms step_avg:80.59ms +[2025-07-08 03:45:03] [Rank 0] step:4461/10000 train_time:359507ms step_avg:80.59ms +[2025-07-08 03:45:04] [Rank 0] step:4481/10000 train_time:361001ms step_avg:80.56ms +[2025-07-08 03:45:04] [Rank 0] step:4481/10000 train_time:361001ms step_avg:80.56ms +[2025-07-08 03:45:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:45:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:45:07] [Rank 0] PRINT: step:4500/10000 train_loss:0.8753 val_loss:0.8715 train_time:362496ms step_avg:80.55ms +[2025-07-08 03:45:07] [Rank 0] PRINT: step:4500/10000 train_loss:0.8753 val_loss:0.8715 train_time:362496ms step_avg:80.55ms +[2025-07-08 03:45:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:45:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:45:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:45:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:45:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:45:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:50:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:50:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:50:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:50:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:50:31] [Rank 0] Total Loss: 5.4386 +[2025-07-08 03:50:31] [Rank 0] Total Loss: 5.4386 +[2025-07-08 03:50:31] [Rank 0] Total FTA: 0.9068 +[2025-07-08 03:50:31] [Rank 0] Total FTA: 0.9068 +[2025-07-08 03:50:31] [Rank 0] Group 0 Loss: 5.6816 +[2025-07-08 03:50:31] [Rank 0] Group 0 Loss: 5.6816 +[2025-07-08 03:50:31] [Rank 0] Group 1 Loss: 5.2394 +[2025-07-08 03:50:31] [Rank 0] Group 1 Loss: 5.2394 +[2025-07-08 03:50:31] [Rank 0] Group 2 Loss: 5.3636 +[2025-07-08 03:50:31] [Rank 0] Group 2 Loss: 5.3636 +[2025-07-08 03:50:31] [Rank 0] Group 3 Loss: 5.5180 +[2025-07-08 03:50:31] [Rank 0] Group 3 Loss: 5.5180 +[2025-07-08 03:50:31] [Rank 0] Group 4 Loss: 5.4148 +[2025-07-08 03:50:31] [Rank 0] Group 4 Loss: 5.4148 +[2025-07-08 03:50:31] [Rank 0] Group 5 Loss: 5.3594 +[2025-07-08 03:50:31] [Rank 0] Group 5 Loss: 5.3594 +[2025-07-08 03:50:31] [Rank 0] Group 6 Loss: 5.2868 +[2025-07-08 03:50:31] [Rank 0] Group 6 Loss: 5.2868 +[2025-07-08 03:50:31] [Rank 0] Group 7 Loss: 5.4721 +[2025-07-08 03:50:31] [Rank 0] Group 7 Loss: 5.4721 +[2025-07-08 03:50:31] [Rank 0] Group 8 Loss: 5.4290 +[2025-07-08 03:50:31] [Rank 0] Group 8 Loss: 5.4290 +[2025-07-08 03:50:31] [Rank 0] Group 9 Loss: 5.4020 +[2025-07-08 03:50:31] [Rank 0] Group 9 Loss: 5.4020 +[2025-07-08 03:50:31] [Rank 0] Group 10 Loss: 5.4135 +[2025-07-08 03:50:31] [Rank 0] Group 10 Loss: 5.4135 +[2025-07-08 03:50:31] [Rank 0] Group 11 Loss: 5.4373 +[2025-07-08 03:50:31] [Rank 0] Group 11 Loss: 5.4373 +[2025-07-08 03:50:31] [Rank 0] Group 0 FTA: 0.8479 +[2025-07-08 03:50:31] [Rank 0] Group 0 FTA: 0.8479 +[2025-07-08 03:50:31] [Rank 0] Group 1 FTA: 0.8438 +[2025-07-08 03:50:31] [Rank 0] Group 1 FTA: 0.8438 +[2025-07-08 03:50:31] [Rank 0] Group 2 FTA: 0.9297 +[2025-07-08 03:50:31] [Rank 0] Group 2 FTA: 0.9297 +[2025-07-08 03:50:31] [Rank 0] Group 3 FTA: 0.9635 +[2025-07-08 03:50:31] [Rank 0] Group 3 FTA: 0.9635 +[2025-07-08 03:50:31] [Rank 0] Group 4 FTA: 0.9323 +[2025-07-08 03:50:31] [Rank 0] Group 4 FTA: 0.9323 +[2025-07-08 03:50:31] [Rank 0] Group 5 FTA: 0.9297 +[2025-07-08 03:50:31] [Rank 0] Group 5 FTA: 0.9297 +[2025-07-08 03:50:31] [Rank 0] Group 6 FTA: 0.9167 +[2025-07-08 03:50:31] [Rank 0] Group 6 FTA: 0.9167 +[2025-07-08 03:50:31] [Rank 0] Group 7 FTA: 0.8906 +[2025-07-08 03:50:31] [Rank 0] Group 7 FTA: 0.8906 +[2025-07-08 03:50:31] [Rank 0] Group 8 FTA: 0.9219 +[2025-07-08 03:50:31] [Rank 0] Group 8 FTA: 0.9219 +[2025-07-08 03:50:31] [Rank 0] Group 9 FTA: 0.9258 +[2025-07-08 03:50:31] [Rank 0] Group 9 FTA: 0.9258 +[2025-07-08 03:50:31] [Rank 0] Group 10 FTA: 0.9121 +[2025-07-08 03:50:31] [Rank 0] Group 10 FTA: 0.9121 +[2025-07-08 03:50:31] [Rank 0] Group 11 FTA: 0.9160 +[2025-07-08 03:50:31] [Rank 0] Group 11 FTA: 0.9160 +[2025-07-08 03:50:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 03:50:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 03:50:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 03:50:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 03:50:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 03:50:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 03:50:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 03:50:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 03:50:33] [Rank 0] step:4501/10000 train_time:362628ms step_avg:80.57ms +[2025-07-08 03:50:33] [Rank 0] step:4501/10000 train_time:362628ms step_avg:80.57ms +[2025-07-08 03:50:35] [Rank 0] step:4521/10000 train_time:364721ms step_avg:80.67ms +[2025-07-08 03:50:35] [Rank 0] step:4521/10000 train_time:364721ms step_avg:80.67ms +[2025-07-08 03:50:36] [Rank 0] step:4541/10000 train_time:366208ms step_avg:80.64ms +[2025-07-08 03:50:36] [Rank 0] step:4541/10000 train_time:366208ms step_avg:80.64ms +[2025-07-08 03:50:38] [Rank 0] step:4561/10000 train_time:367696ms step_avg:80.62ms +[2025-07-08 03:50:38] [Rank 0] step:4561/10000 train_time:367696ms step_avg:80.62ms +[2025-07-08 03:50:39] [Rank 0] step:4581/10000 train_time:369186ms step_avg:80.59ms +[2025-07-08 03:50:39] [Rank 0] step:4581/10000 train_time:369186ms step_avg:80.59ms +[2025-07-08 03:50:41] [Rank 0] step:4601/10000 train_time:370913ms step_avg:80.62ms +[2025-07-08 03:50:41] [Rank 0] step:4601/10000 train_time:370913ms step_avg:80.62ms +[2025-07-08 03:50:42] [Rank 0] step:4621/10000 train_time:372399ms step_avg:80.59ms +[2025-07-08 03:50:42] [Rank 0] step:4621/10000 train_time:372399ms step_avg:80.59ms +[2025-07-08 03:50:44] [Rank 0] step:4641/10000 train_time:373887ms step_avg:80.56ms +[2025-07-08 03:50:44] [Rank 0] step:4641/10000 train_time:373887ms step_avg:80.56ms +[2025-07-08 03:50:45] [Rank 0] step:4661/10000 train_time:375376ms step_avg:80.54ms +[2025-07-08 03:50:45] [Rank 0] step:4661/10000 train_time:375376ms step_avg:80.54ms +[2025-07-08 03:50:47] [Rank 0] step:4681/10000 train_time:376868ms step_avg:80.51ms +[2025-07-08 03:50:47] [Rank 0] step:4681/10000 train_time:376868ms step_avg:80.51ms +[2025-07-08 03:50:49] [Rank 0] step:4701/10000 train_time:378594ms step_avg:80.53ms +[2025-07-08 03:50:49] [Rank 0] step:4701/10000 train_time:378594ms step_avg:80.53ms +[2025-07-08 03:50:50] [Rank 0] step:4721/10000 train_time:380086ms step_avg:80.51ms +[2025-07-08 03:50:50] [Rank 0] step:4721/10000 train_time:380086ms step_avg:80.51ms +[2025-07-08 03:50:52] [Rank 0] step:4741/10000 train_time:381578ms step_avg:80.48ms +[2025-07-08 03:50:52] [Rank 0] step:4741/10000 train_time:381578ms step_avg:80.48ms +[2025-07-08 03:50:53] [Rank 0] step:4761/10000 train_time:383069ms step_avg:80.46ms +[2025-07-08 03:50:53] [Rank 0] step:4761/10000 train_time:383069ms step_avg:80.46ms +[2025-07-08 03:50:55] [Rank 0] step:4781/10000 train_time:385223ms step_avg:80.57ms +[2025-07-08 03:50:55] [Rank 0] step:4781/10000 train_time:385223ms step_avg:80.57ms +[2025-07-08 03:50:57] [Rank 0] step:4801/10000 train_time:386714ms step_avg:80.55ms +[2025-07-08 03:50:57] [Rank 0] step:4801/10000 train_time:386714ms step_avg:80.55ms +[2025-07-08 03:50:58] [Rank 0] step:4821/10000 train_time:388207ms step_avg:80.52ms +[2025-07-08 03:50:58] [Rank 0] step:4821/10000 train_time:388207ms step_avg:80.52ms +[2025-07-08 03:51:00] [Rank 0] step:4841/10000 train_time:389699ms step_avg:80.50ms +[2025-07-08 03:51:00] [Rank 0] step:4841/10000 train_time:389699ms step_avg:80.50ms +[2025-07-08 03:51:01] [Rank 0] step:4861/10000 train_time:391446ms step_avg:80.53ms +[2025-07-08 03:51:01] [Rank 0] step:4861/10000 train_time:391446ms step_avg:80.53ms +[2025-07-08 03:51:03] [Rank 0] step:4881/10000 train_time:392922ms step_avg:80.50ms +[2025-07-08 03:51:03] [Rank 0] step:4881/10000 train_time:392922ms step_avg:80.50ms +[2025-07-08 03:51:04] [Rank 0] step:4901/10000 train_time:394415ms step_avg:80.48ms +[2025-07-08 03:51:04] [Rank 0] step:4901/10000 train_time:394415ms step_avg:80.48ms +[2025-07-08 03:51:06] [Rank 0] step:4921/10000 train_time:395908ms step_avg:80.45ms +[2025-07-08 03:51:06] [Rank 0] step:4921/10000 train_time:395908ms step_avg:80.45ms +[2025-07-08 03:51:07] [Rank 0] step:4941/10000 train_time:397403ms step_avg:80.43ms +[2025-07-08 03:51:07] [Rank 0] step:4941/10000 train_time:397403ms step_avg:80.43ms +[2025-07-08 03:51:10] [Rank 0] step:4961/10000 train_time:399559ms step_avg:80.54ms +[2025-07-08 03:51:10] [Rank 0] step:4961/10000 train_time:399559ms step_avg:80.54ms +[2025-07-08 03:51:11] [Rank 0] step:4981/10000 train_time:401051ms step_avg:80.52ms +[2025-07-08 03:51:11] [Rank 0] step:4981/10000 train_time:401051ms step_avg:80.52ms +[2025-07-08 03:51:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:51:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:51:14] [Rank 0] PRINT: step:5000/10000 train_loss:0.8715 val_loss:0.8683 train_time:402545ms step_avg:80.51ms +[2025-07-08 03:51:14] [Rank 0] PRINT: step:5000/10000 train_loss:0.8715 val_loss:0.8683 train_time:402545ms step_avg:80.51ms +[2025-07-08 03:51:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:51:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:51:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:51:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:51:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:51:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:56:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:56:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 03:56:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:56:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 03:56:39] [Rank 0] Total Loss: 5.4533 +[2025-07-08 03:56:39] [Rank 0] Total Loss: 5.4533 +[2025-07-08 03:56:39] [Rank 0] Total FTA: 0.8866 +[2025-07-08 03:56:39] [Rank 0] Total FTA: 0.8866 +[2025-07-08 03:56:39] [Rank 0] Group 0 Loss: 5.9682 +[2025-07-08 03:56:39] [Rank 0] Group 0 Loss: 5.9682 +[2025-07-08 03:56:39] [Rank 0] Group 1 Loss: 5.3134 +[2025-07-08 03:56:39] [Rank 0] Group 1 Loss: 5.3134 +[2025-07-08 03:56:39] [Rank 0] Group 2 Loss: 5.2507 +[2025-07-08 03:56:39] [Rank 0] Group 2 Loss: 5.2507 +[2025-07-08 03:56:39] [Rank 0] Group 3 Loss: 5.3671 +[2025-07-08 03:56:39] [Rank 0] Group 3 Loss: 5.3671 +[2025-07-08 03:56:39] [Rank 0] Group 4 Loss: 5.4330 +[2025-07-08 03:56:39] [Rank 0] Group 4 Loss: 5.4330 +[2025-07-08 03:56:39] [Rank 0] Group 5 Loss: 5.3241 +[2025-07-08 03:56:39] [Rank 0] Group 5 Loss: 5.3241 +[2025-07-08 03:56:39] [Rank 0] Group 6 Loss: 5.3004 +[2025-07-08 03:56:39] [Rank 0] Group 6 Loss: 5.3004 +[2025-07-08 03:56:39] [Rank 0] Group 7 Loss: 5.3984 +[2025-07-08 03:56:39] [Rank 0] Group 7 Loss: 5.3984 +[2025-07-08 03:56:39] [Rank 0] Group 8 Loss: 5.4006 +[2025-07-08 03:56:39] [Rank 0] Group 8 Loss: 5.4006 +[2025-07-08 03:56:39] [Rank 0] Group 9 Loss: 5.3723 +[2025-07-08 03:56:39] [Rank 0] Group 9 Loss: 5.3723 +[2025-07-08 03:56:39] [Rank 0] Group 10 Loss: 5.4035 +[2025-07-08 03:56:39] [Rank 0] Group 10 Loss: 5.4035 +[2025-07-08 03:56:39] [Rank 0] Group 11 Loss: 5.4261 +[2025-07-08 03:56:39] [Rank 0] Group 11 Loss: 5.4261 +[2025-07-08 03:56:39] [Rank 0] Group 0 FTA: 0.8388 +[2025-07-08 03:56:39] [Rank 0] Group 0 FTA: 0.8388 +[2025-07-08 03:56:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 03:56:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 03:56:39] [Rank 0] Group 2 FTA: 0.7448 +[2025-07-08 03:56:39] [Rank 0] Group 2 FTA: 0.7448 +[2025-07-08 03:56:39] [Rank 0] Group 3 FTA: 0.8073 +[2025-07-08 03:56:39] [Rank 0] Group 3 FTA: 0.8073 +[2025-07-08 03:56:39] [Rank 0] Group 4 FTA: 0.9609 +[2025-07-08 03:56:39] [Rank 0] Group 4 FTA: 0.9609 +[2025-07-08 03:56:39] [Rank 0] Group 5 FTA: 0.8802 +[2025-07-08 03:56:39] [Rank 0] Group 5 FTA: 0.8802 +[2025-07-08 03:56:39] [Rank 0] Group 6 FTA: 0.8906 +[2025-07-08 03:56:39] [Rank 0] Group 6 FTA: 0.8906 +[2025-07-08 03:56:39] [Rank 0] Group 7 FTA: 0.9010 +[2025-07-08 03:56:39] [Rank 0] Group 7 FTA: 0.9010 +[2025-07-08 03:56:39] [Rank 0] Group 8 FTA: 0.8958 +[2025-07-08 03:56:39] [Rank 0] Group 8 FTA: 0.8958 +[2025-07-08 03:56:39] [Rank 0] Group 9 FTA: 0.9297 +[2025-07-08 03:56:39] [Rank 0] Group 9 FTA: 0.9297 +[2025-07-08 03:56:39] [Rank 0] Group 10 FTA: 0.9004 +[2025-07-08 03:56:39] [Rank 0] Group 10 FTA: 0.9004 +[2025-07-08 03:56:39] [Rank 0] Group 11 FTA: 0.9092 +[2025-07-08 03:56:39] [Rank 0] Group 11 FTA: 0.9092 +[2025-07-08 03:56:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 03:56:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 03:56:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 03:56:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 03:56:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 03:56:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 03:56:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 03:56:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 03:56:40] [Rank 0] step:5001/10000 train_time:402566ms step_avg:80.50ms +[2025-07-08 03:56:40] [Rank 0] step:5001/10000 train_time:402566ms step_avg:80.50ms +[2025-07-08 03:56:42] [Rank 0] step:5021/10000 train_time:404069ms step_avg:80.48ms +[2025-07-08 03:56:42] [Rank 0] step:5021/10000 train_time:404069ms step_avg:80.48ms +[2025-07-08 03:56:44] [Rank 0] step:5041/10000 train_time:405716ms step_avg:80.48ms +[2025-07-08 03:56:44] [Rank 0] step:5041/10000 train_time:405716ms step_avg:80.48ms +[2025-07-08 03:56:45] [Rank 0] step:5061/10000 train_time:407873ms step_avg:80.59ms +[2025-07-08 03:56:45] [Rank 0] step:5061/10000 train_time:407873ms step_avg:80.59ms +[2025-07-08 03:56:47] [Rank 0] step:5081/10000 train_time:409360ms step_avg:80.57ms +[2025-07-08 03:56:47] [Rank 0] step:5081/10000 train_time:409360ms step_avg:80.57ms +[2025-07-08 03:56:48] [Rank 0] step:5101/10000 train_time:410850ms step_avg:80.54ms +[2025-07-08 03:56:48] [Rank 0] step:5101/10000 train_time:410850ms step_avg:80.54ms +[2025-07-08 03:56:50] [Rank 0] step:5121/10000 train_time:412338ms step_avg:80.52ms +[2025-07-08 03:56:50] [Rank 0] step:5121/10000 train_time:412338ms step_avg:80.52ms +[2025-07-08 03:56:52] [Rank 0] step:5141/10000 train_time:414488ms step_avg:80.62ms +[2025-07-08 03:56:52] [Rank 0] step:5141/10000 train_time:414488ms step_avg:80.62ms +[2025-07-08 03:56:54] [Rank 0] step:5161/10000 train_time:415977ms step_avg:80.60ms +[2025-07-08 03:56:54] [Rank 0] step:5161/10000 train_time:415977ms step_avg:80.60ms +[2025-07-08 03:56:55] [Rank 0] step:5181/10000 train_time:417468ms step_avg:80.58ms +[2025-07-08 03:56:55] [Rank 0] step:5181/10000 train_time:417468ms step_avg:80.58ms +[2025-07-08 03:56:56] [Rank 0] step:5201/10000 train_time:418959ms step_avg:80.55ms +[2025-07-08 03:56:56] [Rank 0] step:5201/10000 train_time:418959ms step_avg:80.55ms +[2025-07-08 03:56:59] [Rank 0] step:5221/10000 train_time:420704ms step_avg:80.58ms +[2025-07-08 03:56:59] [Rank 0] step:5221/10000 train_time:420704ms step_avg:80.58ms +[2025-07-08 03:57:00] [Rank 0] step:5241/10000 train_time:422597ms step_avg:80.63ms +[2025-07-08 03:57:00] [Rank 0] step:5241/10000 train_time:422597ms step_avg:80.63ms +[2025-07-08 03:57:02] [Rank 0] step:5261/10000 train_time:424089ms step_avg:80.61ms +[2025-07-08 03:57:02] [Rank 0] step:5261/10000 train_time:424089ms step_avg:80.61ms +[2025-07-08 03:57:03] [Rank 0] step:5281/10000 train_time:425581ms step_avg:80.59ms +[2025-07-08 03:57:03] [Rank 0] step:5281/10000 train_time:425581ms step_avg:80.59ms +[2025-07-08 03:57:05] [Rank 0] step:5301/10000 train_time:427074ms step_avg:80.56ms +[2025-07-08 03:57:05] [Rank 0] step:5301/10000 train_time:427074ms step_avg:80.56ms +[2025-07-08 03:57:07] [Rank 0] step:5321/10000 train_time:429241ms step_avg:80.67ms +[2025-07-08 03:57:07] [Rank 0] step:5321/10000 train_time:429241ms step_avg:80.67ms +[2025-07-08 03:57:08] [Rank 0] step:5341/10000 train_time:430733ms step_avg:80.65ms +[2025-07-08 03:57:08] [Rank 0] step:5341/10000 train_time:430733ms step_avg:80.65ms +[2025-07-08 03:57:10] [Rank 0] step:5361/10000 train_time:432226ms step_avg:80.62ms +[2025-07-08 03:57:10] [Rank 0] step:5361/10000 train_time:432226ms step_avg:80.62ms +[2025-07-08 03:57:11] [Rank 0] step:5381/10000 train_time:433721ms step_avg:80.60ms +[2025-07-08 03:57:11] [Rank 0] step:5381/10000 train_time:433721ms step_avg:80.60ms +[2025-07-08 03:57:13] [Rank 0] step:5401/10000 train_time:435214ms step_avg:80.58ms +[2025-07-08 03:57:13] [Rank 0] step:5401/10000 train_time:435214ms step_avg:80.58ms +[2025-07-08 03:57:15] [Rank 0] step:5421/10000 train_time:437351ms step_avg:80.68ms +[2025-07-08 03:57:15] [Rank 0] step:5421/10000 train_time:437351ms step_avg:80.68ms +[2025-07-08 03:57:16] [Rank 0] step:5441/10000 train_time:438844ms step_avg:80.66ms +[2025-07-08 03:57:16] [Rank 0] step:5441/10000 train_time:438844ms step_avg:80.66ms +[2025-07-08 03:57:18] [Rank 0] step:5461/10000 train_time:440337ms step_avg:80.63ms +[2025-07-08 03:57:18] [Rank 0] step:5461/10000 train_time:440337ms step_avg:80.63ms +[2025-07-08 03:57:19] [Rank 0] step:5481/10000 train_time:441833ms step_avg:80.61ms +[2025-07-08 03:57:19] [Rank 0] step:5481/10000 train_time:441833ms step_avg:80.61ms +[2025-07-08 03:57:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:57:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 03:57:22] [Rank 0] PRINT: step:5500/10000 train_loss:0.8686 val_loss:0.8668 train_time:443562ms step_avg:80.65ms +[2025-07-08 03:57:22] [Rank 0] PRINT: step:5500/10000 train_loss:0.8686 val_loss:0.8668 train_time:443562ms step_avg:80.65ms +[2025-07-08 03:57:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:57:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 03:57:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:57:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 03:57:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 03:57:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:02:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:02:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:02:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:02:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:02:47] [Rank 0] Total Loss: 5.4612 +[2025-07-08 04:02:47] [Rank 0] Total Loss: 5.4612 +[2025-07-08 04:02:47] [Rank 0] Total FTA: 0.9560 +[2025-07-08 04:02:47] [Rank 0] Total FTA: 0.9560 +[2025-07-08 04:02:47] [Rank 0] Group 0 Loss: 5.7803 +[2025-07-08 04:02:47] [Rank 0] Group 0 Loss: 5.7803 +[2025-07-08 04:02:47] [Rank 0] Group 1 Loss: 5.3178 +[2025-07-08 04:02:47] [Rank 0] Group 1 Loss: 5.3178 +[2025-07-08 04:02:47] [Rank 0] Group 2 Loss: 5.3214 +[2025-07-08 04:02:47] [Rank 0] Group 2 Loss: 5.3214 +[2025-07-08 04:02:47] [Rank 0] Group 3 Loss: 5.3392 +[2025-07-08 04:02:47] [Rank 0] Group 3 Loss: 5.3392 +[2025-07-08 04:02:47] [Rank 0] Group 4 Loss: 5.4556 +[2025-07-08 04:02:47] [Rank 0] Group 4 Loss: 5.4556 +[2025-07-08 04:02:47] [Rank 0] Group 5 Loss: 5.4069 +[2025-07-08 04:02:47] [Rank 0] Group 5 Loss: 5.4069 +[2025-07-08 04:02:47] [Rank 0] Group 6 Loss: 5.3733 +[2025-07-08 04:02:47] [Rank 0] Group 6 Loss: 5.3733 +[2025-07-08 04:02:47] [Rank 0] Group 7 Loss: 5.4246 +[2025-07-08 04:02:47] [Rank 0] Group 7 Loss: 5.4246 +[2025-07-08 04:02:47] [Rank 0] Group 8 Loss: 5.4506 +[2025-07-08 04:02:47] [Rank 0] Group 8 Loss: 5.4506 +[2025-07-08 04:02:47] [Rank 0] Group 9 Loss: 5.3899 +[2025-07-08 04:02:47] [Rank 0] Group 9 Loss: 5.3899 +[2025-07-08 04:02:47] [Rank 0] Group 10 Loss: 5.4617 +[2025-07-08 04:02:47] [Rank 0] Group 10 Loss: 5.4617 +[2025-07-08 04:02:47] [Rank 0] Group 11 Loss: 5.4645 +[2025-07-08 04:02:47] [Rank 0] Group 11 Loss: 5.4645 +[2025-07-08 04:02:47] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 04:02:47] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 04:02:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 04:02:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 04:02:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 04:02:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 04:02:47] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 04:02:47] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 04:02:47] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-08 04:02:47] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-08 04:02:47] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-08 04:02:47] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-08 04:02:47] [Rank 0] Group 6 FTA: 0.9323 +[2025-07-08 04:02:47] [Rank 0] Group 6 FTA: 0.9323 +[2025-07-08 04:02:47] [Rank 0] Group 7 FTA: 0.9193 +[2025-07-08 04:02:47] [Rank 0] Group 7 FTA: 0.9193 +[2025-07-08 04:02:47] [Rank 0] Group 8 FTA: 0.9297 +[2025-07-08 04:02:47] [Rank 0] Group 8 FTA: 0.9297 +[2025-07-08 04:02:47] [Rank 0] Group 9 FTA: 0.9297 +[2025-07-08 04:02:47] [Rank 0] Group 9 FTA: 0.9297 +[2025-07-08 04:02:47] [Rank 0] Group 10 FTA: 0.9160 +[2025-07-08 04:02:47] [Rank 0] Group 10 FTA: 0.9160 +[2025-07-08 04:02:47] [Rank 0] Group 11 FTA: 0.9307 +[2025-07-08 04:02:47] [Rank 0] Group 11 FTA: 0.9307 +[2025-07-08 04:02:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 04:02:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 04:02:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 04:02:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 04:02:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 04:02:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 04:02:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 04:02:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 04:02:49] [Rank 0] step:5501/10000 train_time:443585ms step_avg:80.64ms +[2025-07-08 04:02:49] [Rank 0] step:5501/10000 train_time:443585ms step_avg:80.64ms +[2025-07-08 04:02:50] [Rank 0] step:5521/10000 train_time:445085ms step_avg:80.62ms +[2025-07-08 04:02:50] [Rank 0] step:5521/10000 train_time:445085ms step_avg:80.62ms +[2025-07-08 04:02:52] [Rank 0] step:5541/10000 train_time:446573ms step_avg:80.59ms +[2025-07-08 04:02:52] [Rank 0] step:5541/10000 train_time:446573ms step_avg:80.59ms +[2025-07-08 04:02:53] [Rank 0] step:5561/10000 train_time:448062ms step_avg:80.57ms +[2025-07-08 04:02:53] [Rank 0] step:5561/10000 train_time:448062ms step_avg:80.57ms +[2025-07-08 04:02:55] [Rank 0] step:5581/10000 train_time:449549ms step_avg:80.55ms +[2025-07-08 04:02:55] [Rank 0] step:5581/10000 train_time:449549ms step_avg:80.55ms +[2025-07-08 04:02:57] [Rank 0] step:5601/10000 train_time:451690ms step_avg:80.64ms +[2025-07-08 04:02:57] [Rank 0] step:5601/10000 train_time:451690ms step_avg:80.64ms +[2025-07-08 04:02:58] [Rank 0] step:5621/10000 train_time:453180ms step_avg:80.62ms +[2025-07-08 04:02:58] [Rank 0] step:5621/10000 train_time:453180ms step_avg:80.62ms +[2025-07-08 04:03:00] [Rank 0] step:5641/10000 train_time:454668ms step_avg:80.60ms +[2025-07-08 04:03:00] [Rank 0] step:5641/10000 train_time:454668ms step_avg:80.60ms +[2025-07-08 04:03:01] [Rank 0] step:5661/10000 train_time:456158ms step_avg:80.58ms +[2025-07-08 04:03:01] [Rank 0] step:5661/10000 train_time:456158ms step_avg:80.58ms +[2025-07-08 04:03:03] [Rank 0] step:5681/10000 train_time:457788ms step_avg:80.58ms +[2025-07-08 04:03:03] [Rank 0] step:5681/10000 train_time:457788ms step_avg:80.58ms +[2025-07-08 04:03:04] [Rank 0] step:5701/10000 train_time:459281ms step_avg:80.56ms +[2025-07-08 04:03:04] [Rank 0] step:5701/10000 train_time:459281ms step_avg:80.56ms +[2025-07-08 04:03:06] [Rank 0] step:5721/10000 train_time:460773ms step_avg:80.54ms +[2025-07-08 04:03:06] [Rank 0] step:5721/10000 train_time:460773ms step_avg:80.54ms +[2025-07-08 04:03:07] [Rank 0] step:5741/10000 train_time:462264ms step_avg:80.52ms +[2025-07-08 04:03:07] [Rank 0] step:5741/10000 train_time:462264ms step_avg:80.52ms +[2025-07-08 04:03:10] [Rank 0] step:5761/10000 train_time:463756ms step_avg:80.50ms +[2025-07-08 04:03:10] [Rank 0] step:5761/10000 train_time:463756ms step_avg:80.50ms +[2025-07-08 04:03:11] [Rank 0] step:5781/10000 train_time:465899ms step_avg:80.59ms +[2025-07-08 04:03:11] [Rank 0] step:5781/10000 train_time:465899ms step_avg:80.59ms +[2025-07-08 04:03:13] [Rank 0] step:5801/10000 train_time:467391ms step_avg:80.57ms +[2025-07-08 04:03:13] [Rank 0] step:5801/10000 train_time:467391ms step_avg:80.57ms +[2025-07-08 04:03:14] [Rank 0] step:5821/10000 train_time:468884ms step_avg:80.55ms +[2025-07-08 04:03:14] [Rank 0] step:5821/10000 train_time:468884ms step_avg:80.55ms +[2025-07-08 04:03:16] [Rank 0] step:5841/10000 train_time:470379ms step_avg:80.53ms +[2025-07-08 04:03:16] [Rank 0] step:5841/10000 train_time:470379ms step_avg:80.53ms +[2025-07-08 04:03:18] [Rank 0] step:5861/10000 train_time:472510ms step_avg:80.62ms +[2025-07-08 04:03:18] [Rank 0] step:5861/10000 train_time:472510ms step_avg:80.62ms +[2025-07-08 04:03:19] [Rank 0] step:5881/10000 train_time:474002ms step_avg:80.60ms +[2025-07-08 04:03:19] [Rank 0] step:5881/10000 train_time:474002ms step_avg:80.60ms +[2025-07-08 04:03:21] [Rank 0] step:5901/10000 train_time:475495ms step_avg:80.58ms +[2025-07-08 04:03:21] [Rank 0] step:5901/10000 train_time:475495ms step_avg:80.58ms +[2025-07-08 04:03:22] [Rank 0] step:5921/10000 train_time:476989ms step_avg:80.56ms +[2025-07-08 04:03:22] [Rank 0] step:5921/10000 train_time:476989ms step_avg:80.56ms +[2025-07-08 04:03:24] [Rank 0] step:5941/10000 train_time:478483ms step_avg:80.54ms +[2025-07-08 04:03:24] [Rank 0] step:5941/10000 train_time:478483ms step_avg:80.54ms +[2025-07-08 04:03:26] [Rank 0] step:5961/10000 train_time:480622ms step_avg:80.63ms +[2025-07-08 04:03:26] [Rank 0] step:5961/10000 train_time:480622ms step_avg:80.63ms +[2025-07-08 04:03:27] [Rank 0] step:5981/10000 train_time:482114ms step_avg:80.61ms +[2025-07-08 04:03:27] [Rank 0] step:5981/10000 train_time:482114ms step_avg:80.61ms +[2025-07-08 04:03:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:03:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:03:30] [Rank 0] PRINT: step:6000/10000 train_loss:0.8665 val_loss:0.8663 train_time:483608ms step_avg:80.60ms +[2025-07-08 04:03:30] [Rank 0] PRINT: step:6000/10000 train_loss:0.8665 val_loss:0.8663 train_time:483608ms step_avg:80.60ms +[2025-07-08 04:03:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:03:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:03:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:03:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:03:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:03:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:08:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:08:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:08:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:08:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:08:55] [Rank 0] Total Loss: 5.4784 +[2025-07-08 04:08:55] [Rank 0] Total Loss: 5.4784 +[2025-07-08 04:08:55] [Rank 0] Total FTA: 0.9183 +[2025-07-08 04:08:55] [Rank 0] Total FTA: 0.9183 +[2025-07-08 04:08:55] [Rank 0] Group 0 Loss: 5.6824 +[2025-07-08 04:08:55] [Rank 0] Group 0 Loss: 5.6824 +[2025-07-08 04:08:55] [Rank 0] Group 1 Loss: 5.5010 +[2025-07-08 04:08:55] [Rank 0] Group 1 Loss: 5.5010 +[2025-07-08 04:08:55] [Rank 0] Group 2 Loss: 5.5388 +[2025-07-08 04:08:55] [Rank 0] Group 2 Loss: 5.5388 +[2025-07-08 04:08:55] [Rank 0] Group 3 Loss: 5.4233 +[2025-07-08 04:08:55] [Rank 0] Group 3 Loss: 5.4233 +[2025-07-08 04:08:55] [Rank 0] Group 4 Loss: 5.4415 +[2025-07-08 04:08:55] [Rank 0] Group 4 Loss: 5.4415 +[2025-07-08 04:08:55] [Rank 0] Group 5 Loss: 5.4340 +[2025-07-08 04:08:55] [Rank 0] Group 5 Loss: 5.4340 +[2025-07-08 04:08:55] [Rank 0] Group 6 Loss: 5.3019 +[2025-07-08 04:08:55] [Rank 0] Group 6 Loss: 5.3019 +[2025-07-08 04:08:55] [Rank 0] Group 7 Loss: 5.4177 +[2025-07-08 04:08:55] [Rank 0] Group 7 Loss: 5.4177 +[2025-07-08 04:08:55] [Rank 0] Group 8 Loss: 5.5104 +[2025-07-08 04:08:55] [Rank 0] Group 8 Loss: 5.5104 +[2025-07-08 04:08:55] [Rank 0] Group 9 Loss: 5.3881 +[2025-07-08 04:08:55] [Rank 0] Group 9 Loss: 5.3881 +[2025-07-08 04:08:55] [Rank 0] Group 10 Loss: 5.4540 +[2025-07-08 04:08:55] [Rank 0] Group 10 Loss: 5.4540 +[2025-07-08 04:08:55] [Rank 0] Group 11 Loss: 5.4567 +[2025-07-08 04:08:55] [Rank 0] Group 11 Loss: 5.4567 +[2025-07-08 04:08:55] [Rank 0] Group 0 FTA: 0.8192 +[2025-07-08 04:08:55] [Rank 0] Group 0 FTA: 0.8192 +[2025-07-08 04:08:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 04:08:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 04:08:55] [Rank 0] Group 2 FTA: 0.9167 +[2025-07-08 04:08:55] [Rank 0] Group 2 FTA: 0.9167 +[2025-07-08 04:08:55] [Rank 0] Group 3 FTA: 0.9062 +[2025-07-08 04:08:55] [Rank 0] Group 3 FTA: 0.9062 +[2025-07-08 04:08:55] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-08 04:08:55] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-08 04:08:55] [Rank 0] Group 5 FTA: 0.9245 +[2025-07-08 04:08:55] [Rank 0] Group 5 FTA: 0.9245 +[2025-07-08 04:08:55] [Rank 0] Group 6 FTA: 0.9271 +[2025-07-08 04:08:55] [Rank 0] Group 6 FTA: 0.9271 +[2025-07-08 04:08:55] [Rank 0] Group 7 FTA: 0.9115 +[2025-07-08 04:08:55] [Rank 0] Group 7 FTA: 0.9115 +[2025-07-08 04:08:55] [Rank 0] Group 8 FTA: 0.9193 +[2025-07-08 04:08:55] [Rank 0] Group 8 FTA: 0.9193 +[2025-07-08 04:08:55] [Rank 0] Group 9 FTA: 0.9336 +[2025-07-08 04:08:55] [Rank 0] Group 9 FTA: 0.9336 +[2025-07-08 04:08:55] [Rank 0] Group 10 FTA: 0.9434 +[2025-07-08 04:08:55] [Rank 0] Group 10 FTA: 0.9434 +[2025-07-08 04:08:55] [Rank 0] Group 11 FTA: 0.9170 +[2025-07-08 04:08:55] [Rank 0] Group 11 FTA: 0.9170 +[2025-07-08 04:08:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 04:08:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 04:08:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 04:08:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 04:08:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 04:08:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 04:08:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 04:08:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 04:08:56] [Rank 0] step:6001/10000 train_time:483628ms step_avg:80.59ms +[2025-07-08 04:08:56] [Rank 0] step:6001/10000 train_time:483628ms step_avg:80.59ms +[2025-07-08 04:08:58] [Rank 0] step:6021/10000 train_time:485119ms step_avg:80.57ms +[2025-07-08 04:08:58] [Rank 0] step:6021/10000 train_time:485119ms step_avg:80.57ms +[2025-07-08 04:09:00] [Rank 0] step:6041/10000 train_time:487265ms step_avg:80.66ms +[2025-07-08 04:09:00] [Rank 0] step:6041/10000 train_time:487265ms step_avg:80.66ms +[2025-07-08 04:09:02] [Rank 0] step:6061/10000 train_time:488752ms step_avg:80.64ms +[2025-07-08 04:09:02] [Rank 0] step:6061/10000 train_time:488752ms step_avg:80.64ms +[2025-07-08 04:09:03] [Rank 0] step:6081/10000 train_time:490243ms step_avg:80.62ms +[2025-07-08 04:09:03] [Rank 0] step:6081/10000 train_time:490243ms step_avg:80.62ms +[2025-07-08 04:09:05] [Rank 0] step:6101/10000 train_time:491733ms step_avg:80.60ms +[2025-07-08 04:09:05] [Rank 0] step:6101/10000 train_time:491733ms step_avg:80.60ms +[2025-07-08 04:09:06] [Rank 0] step:6121/10000 train_time:493221ms step_avg:80.58ms +[2025-07-08 04:09:06] [Rank 0] step:6121/10000 train_time:493221ms step_avg:80.58ms +[2025-07-08 04:09:08] [Rank 0] step:6141/10000 train_time:494950ms step_avg:80.60ms +[2025-07-08 04:09:08] [Rank 0] step:6141/10000 train_time:494950ms step_avg:80.60ms +[2025-07-08 04:09:09] [Rank 0] step:6161/10000 train_time:496441ms step_avg:80.58ms +[2025-07-08 04:09:09] [Rank 0] step:6161/10000 train_time:496441ms step_avg:80.58ms +[2025-07-08 04:09:11] [Rank 0] step:6181/10000 train_time:497931ms step_avg:80.56ms +[2025-07-08 04:09:11] [Rank 0] step:6181/10000 train_time:497931ms step_avg:80.56ms +[2025-07-08 04:09:12] [Rank 0] step:6201/10000 train_time:499422ms step_avg:80.54ms +[2025-07-08 04:09:12] [Rank 0] step:6201/10000 train_time:499422ms step_avg:80.54ms +[2025-07-08 04:09:14] [Rank 0] step:6221/10000 train_time:501569ms step_avg:80.63ms +[2025-07-08 04:09:14] [Rank 0] step:6221/10000 train_time:501569ms step_avg:80.63ms +[2025-07-08 04:09:16] [Rank 0] step:6241/10000 train_time:503060ms step_avg:80.61ms +[2025-07-08 04:09:16] [Rank 0] step:6241/10000 train_time:503060ms step_avg:80.61ms +[2025-07-08 04:09:17] [Rank 0] step:6261/10000 train_time:504553ms step_avg:80.59ms +[2025-07-08 04:09:17] [Rank 0] step:6261/10000 train_time:504553ms step_avg:80.59ms +[2025-07-08 04:09:19] [Rank 0] step:6281/10000 train_time:506046ms step_avg:80.57ms +[2025-07-08 04:09:19] [Rank 0] step:6281/10000 train_time:506046ms step_avg:80.57ms +[2025-07-08 04:09:21] [Rank 0] step:6301/10000 train_time:507795ms step_avg:80.59ms +[2025-07-08 04:09:21] [Rank 0] step:6301/10000 train_time:507795ms step_avg:80.59ms +[2025-07-08 04:09:22] [Rank 0] step:6321/10000 train_time:509432ms step_avg:80.59ms +[2025-07-08 04:09:22] [Rank 0] step:6321/10000 train_time:509432ms step_avg:80.59ms +[2025-07-08 04:09:24] [Rank 0] step:6341/10000 train_time:510924ms step_avg:80.57ms +[2025-07-08 04:09:24] [Rank 0] step:6341/10000 train_time:510924ms step_avg:80.57ms +[2025-07-08 04:09:25] [Rank 0] step:6361/10000 train_time:512419ms step_avg:80.56ms +[2025-07-08 04:09:25] [Rank 0] step:6361/10000 train_time:512419ms step_avg:80.56ms +[2025-07-08 04:09:27] [Rank 0] step:6381/10000 train_time:513913ms step_avg:80.54ms +[2025-07-08 04:09:27] [Rank 0] step:6381/10000 train_time:513913ms step_avg:80.54ms +[2025-07-08 04:09:28] [Rank 0] step:6401/10000 train_time:515646ms step_avg:80.56ms +[2025-07-08 04:09:28] [Rank 0] step:6401/10000 train_time:515646ms step_avg:80.56ms +[2025-07-08 04:09:30] [Rank 0] step:6421/10000 train_time:517140ms step_avg:80.54ms +[2025-07-08 04:09:30] [Rank 0] step:6421/10000 train_time:517140ms step_avg:80.54ms +[2025-07-08 04:09:31] [Rank 0] step:6441/10000 train_time:518637ms step_avg:80.52ms +[2025-07-08 04:09:31] [Rank 0] step:6441/10000 train_time:518637ms step_avg:80.52ms +[2025-07-08 04:09:33] [Rank 0] step:6461/10000 train_time:520133ms step_avg:80.50ms +[2025-07-08 04:09:33] [Rank 0] step:6461/10000 train_time:520133ms step_avg:80.50ms +[2025-07-08 04:09:35] [Rank 0] step:6481/10000 train_time:521684ms step_avg:80.49ms +[2025-07-08 04:09:35] [Rank 0] step:6481/10000 train_time:521684ms step_avg:80.49ms +[2025-07-08 04:09:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:09:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:09:37] [Rank 0] PRINT: step:6500/10000 train_loss:0.8648 val_loss:0.8647 train_time:523363ms step_avg:80.52ms +[2025-07-08 04:09:37] [Rank 0] PRINT: step:6500/10000 train_loss:0.8648 val_loss:0.8647 train_time:523363ms step_avg:80.52ms +[2025-07-08 04:09:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:09:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:09:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:09:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:09:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:09:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:15:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:15:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:15:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:15:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:15:03] [Rank 0] Total Loss: 5.4785 +[2025-07-08 04:15:03] [Rank 0] Total Loss: 5.4785 +[2025-07-08 04:15:03] [Rank 0] Total FTA: 0.9460 +[2025-07-08 04:15:03] [Rank 0] Total FTA: 0.9460 +[2025-07-08 04:15:03] [Rank 0] Group 0 Loss: 5.7630 +[2025-07-08 04:15:03] [Rank 0] Group 0 Loss: 5.7630 +[2025-07-08 04:15:03] [Rank 0] Group 1 Loss: 5.3880 +[2025-07-08 04:15:03] [Rank 0] Group 1 Loss: 5.3880 +[2025-07-08 04:15:03] [Rank 0] Group 2 Loss: 5.3789 +[2025-07-08 04:15:03] [Rank 0] Group 2 Loss: 5.3789 +[2025-07-08 04:15:03] [Rank 0] Group 3 Loss: 5.5032 +[2025-07-08 04:15:03] [Rank 0] Group 3 Loss: 5.5032 +[2025-07-08 04:15:03] [Rank 0] Group 4 Loss: 5.4554 +[2025-07-08 04:15:03] [Rank 0] Group 4 Loss: 5.4554 +[2025-07-08 04:15:03] [Rank 0] Group 5 Loss: 5.3577 +[2025-07-08 04:15:03] [Rank 0] Group 5 Loss: 5.3577 +[2025-07-08 04:15:03] [Rank 0] Group 6 Loss: 5.3081 +[2025-07-08 04:15:03] [Rank 0] Group 6 Loss: 5.3081 +[2025-07-08 04:15:03] [Rank 0] Group 7 Loss: 5.4500 +[2025-07-08 04:15:03] [Rank 0] Group 7 Loss: 5.4500 +[2025-07-08 04:15:03] [Rank 0] Group 8 Loss: 5.4629 +[2025-07-08 04:15:03] [Rank 0] Group 8 Loss: 5.4629 +[2025-07-08 04:15:03] [Rank 0] Group 9 Loss: 5.3915 +[2025-07-08 04:15:03] [Rank 0] Group 9 Loss: 5.3915 +[2025-07-08 04:15:03] [Rank 0] Group 10 Loss: 5.4507 +[2025-07-08 04:15:03] [Rank 0] Group 10 Loss: 5.4507 +[2025-07-08 04:15:03] [Rank 0] Group 11 Loss: 5.4970 +[2025-07-08 04:15:03] [Rank 0] Group 11 Loss: 5.4970 +[2025-07-08 04:15:03] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 04:15:03] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 04:15:03] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 04:15:03] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 04:15:03] [Rank 0] Group 2 FTA: 0.9010 +[2025-07-08 04:15:03] [Rank 0] Group 2 FTA: 0.9010 +[2025-07-08 04:15:03] [Rank 0] Group 3 FTA: 0.9479 +[2025-07-08 04:15:03] [Rank 0] Group 3 FTA: 0.9479 +[2025-07-08 04:15:03] [Rank 0] Group 4 FTA: 0.9688 +[2025-07-08 04:15:03] [Rank 0] Group 4 FTA: 0.9688 +[2025-07-08 04:15:03] [Rank 0] Group 5 FTA: 0.9297 +[2025-07-08 04:15:03] [Rank 0] Group 5 FTA: 0.9297 +[2025-07-08 04:15:03] [Rank 0] Group 6 FTA: 0.9141 +[2025-07-08 04:15:03] [Rank 0] Group 6 FTA: 0.9141 +[2025-07-08 04:15:03] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-08 04:15:03] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-08 04:15:03] [Rank 0] Group 8 FTA: 0.9115 +[2025-07-08 04:15:03] [Rank 0] Group 8 FTA: 0.9115 +[2025-07-08 04:15:03] [Rank 0] Group 9 FTA: 0.9297 +[2025-07-08 04:15:03] [Rank 0] Group 9 FTA: 0.9297 +[2025-07-08 04:15:03] [Rank 0] Group 10 FTA: 0.9414 +[2025-07-08 04:15:03] [Rank 0] Group 10 FTA: 0.9414 +[2025-07-08 04:15:03] [Rank 0] Group 11 FTA: 0.9326 +[2025-07-08 04:15:03] [Rank 0] Group 11 FTA: 0.9326 +[2025-07-08 04:15:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 04:15:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 04:15:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 04:15:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 04:15:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 04:15:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 04:15:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 04:15:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 04:15:05] [Rank 0] step:6501/10000 train_time:523384ms step_avg:80.51ms +[2025-07-08 04:15:05] [Rank 0] step:6501/10000 train_time:523384ms step_avg:80.51ms +[2025-07-08 04:15:06] [Rank 0] step:6521/10000 train_time:524867ms step_avg:80.49ms +[2025-07-08 04:15:06] [Rank 0] step:6521/10000 train_time:524867ms step_avg:80.49ms +[2025-07-08 04:15:08] [Rank 0] step:6541/10000 train_time:526356ms step_avg:80.47ms +[2025-07-08 04:15:08] [Rank 0] step:6541/10000 train_time:526356ms step_avg:80.47ms +[2025-07-08 04:15:09] [Rank 0] step:6561/10000 train_time:527843ms step_avg:80.45ms +[2025-07-08 04:15:09] [Rank 0] step:6561/10000 train_time:527843ms step_avg:80.45ms +[2025-07-08 04:15:11] [Rank 0] step:6581/10000 train_time:530004ms step_avg:80.54ms +[2025-07-08 04:15:11] [Rank 0] step:6581/10000 train_time:530004ms step_avg:80.54ms +[2025-07-08 04:15:13] [Rank 0] step:6601/10000 train_time:531491ms step_avg:80.52ms +[2025-07-08 04:15:13] [Rank 0] step:6601/10000 train_time:531491ms step_avg:80.52ms +[2025-07-08 04:15:14] [Rank 0] step:6621/10000 train_time:532980ms step_avg:80.50ms +[2025-07-08 04:15:14] [Rank 0] step:6621/10000 train_time:532980ms step_avg:80.50ms +[2025-07-08 04:15:16] [Rank 0] step:6641/10000 train_time:534471ms step_avg:80.48ms +[2025-07-08 04:15:16] [Rank 0] step:6641/10000 train_time:534471ms step_avg:80.48ms +[2025-07-08 04:15:18] [Rank 0] step:6661/10000 train_time:535962ms step_avg:80.46ms +[2025-07-08 04:15:18] [Rank 0] step:6661/10000 train_time:535962ms step_avg:80.46ms +[2025-07-08 04:15:19] [Rank 0] step:6681/10000 train_time:538114ms step_avg:80.54ms +[2025-07-08 04:15:19] [Rank 0] step:6681/10000 train_time:538114ms step_avg:80.54ms +[2025-07-08 04:15:21] [Rank 0] step:6701/10000 train_time:539605ms step_avg:80.53ms +[2025-07-08 04:15:21] [Rank 0] step:6701/10000 train_time:539605ms step_avg:80.53ms +[2025-07-08 04:15:22] [Rank 0] step:6721/10000 train_time:541097ms step_avg:80.51ms +[2025-07-08 04:15:22] [Rank 0] step:6721/10000 train_time:541097ms step_avg:80.51ms +[2025-07-08 04:15:24] [Rank 0] step:6741/10000 train_time:542589ms step_avg:80.49ms +[2025-07-08 04:15:24] [Rank 0] step:6741/10000 train_time:542589ms step_avg:80.49ms +[2025-07-08 04:15:26] [Rank 0] step:6761/10000 train_time:544725ms step_avg:80.57ms +[2025-07-08 04:15:26] [Rank 0] step:6761/10000 train_time:544725ms step_avg:80.57ms +[2025-07-08 04:15:28] [Rank 0] step:6781/10000 train_time:546219ms step_avg:80.55ms +[2025-07-08 04:15:28] [Rank 0] step:6781/10000 train_time:546219ms step_avg:80.55ms +[2025-07-08 04:15:29] [Rank 0] step:6801/10000 train_time:547711ms step_avg:80.53ms +[2025-07-08 04:15:29] [Rank 0] step:6801/10000 train_time:547711ms step_avg:80.53ms +[2025-07-08 04:15:31] [Rank 0] step:6821/10000 train_time:549206ms step_avg:80.52ms +[2025-07-08 04:15:31] [Rank 0] step:6821/10000 train_time:549206ms step_avg:80.52ms +[2025-07-08 04:15:32] [Rank 0] step:6841/10000 train_time:550702ms step_avg:80.50ms +[2025-07-08 04:15:32] [Rank 0] step:6841/10000 train_time:550702ms step_avg:80.50ms +[2025-07-08 04:15:34] [Rank 0] step:6861/10000 train_time:552432ms step_avg:80.52ms +[2025-07-08 04:15:34] [Rank 0] step:6861/10000 train_time:552432ms step_avg:80.52ms +[2025-07-08 04:15:35] [Rank 0] step:6881/10000 train_time:553926ms step_avg:80.50ms +[2025-07-08 04:15:35] [Rank 0] step:6881/10000 train_time:553926ms step_avg:80.50ms +[2025-07-08 04:15:37] [Rank 0] step:6901/10000 train_time:555423ms step_avg:80.48ms +[2025-07-08 04:15:37] [Rank 0] step:6901/10000 train_time:555423ms step_avg:80.48ms +[2025-07-08 04:15:38] [Rank 0] step:6921/10000 train_time:556919ms step_avg:80.47ms +[2025-07-08 04:15:38] [Rank 0] step:6921/10000 train_time:556919ms step_avg:80.47ms +[2025-07-08 04:15:41] [Rank 0] step:6941/10000 train_time:559157ms step_avg:80.56ms +[2025-07-08 04:15:41] [Rank 0] step:6941/10000 train_time:559157ms step_avg:80.56ms +[2025-07-08 04:15:42] [Rank 0] step:6961/10000 train_time:560717ms step_avg:80.55ms +[2025-07-08 04:15:42] [Rank 0] step:6961/10000 train_time:560717ms step_avg:80.55ms +[2025-07-08 04:15:44] [Rank 0] step:6981/10000 train_time:562212ms step_avg:80.53ms +[2025-07-08 04:15:44] [Rank 0] step:6981/10000 train_time:562212ms step_avg:80.53ms +[2025-07-08 04:15:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:15:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:15:46] [Rank 0] PRINT: step:7000/10000 train_loss:0.8636 val_loss:0.8644 train_time:563708ms step_avg:80.53ms +[2025-07-08 04:15:46] [Rank 0] PRINT: step:7000/10000 train_loss:0.8636 val_loss:0.8644 train_time:563708ms step_avg:80.53ms +[2025-07-08 04:15:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:15:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:15:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:15:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:15:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:15:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:21:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:21:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:21:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:21:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:21:15] [Rank 0] Total Loss: 5.5196 +[2025-07-08 04:21:15] [Rank 0] Total Loss: 5.5196 +[2025-07-08 04:21:15] [Rank 0] Total FTA: 0.9221 +[2025-07-08 04:21:15] [Rank 0] Total FTA: 0.9221 +[2025-07-08 04:21:15] [Rank 0] Group 0 Loss: 5.6935 +[2025-07-08 04:21:15] [Rank 0] Group 0 Loss: 5.6935 +[2025-07-08 04:21:15] [Rank 0] Group 1 Loss: 5.3894 +[2025-07-08 04:21:15] [Rank 0] Group 1 Loss: 5.3894 +[2025-07-08 04:21:15] [Rank 0] Group 2 Loss: 5.4927 +[2025-07-08 04:21:15] [Rank 0] Group 2 Loss: 5.4927 +[2025-07-08 04:21:15] [Rank 0] Group 3 Loss: 5.5821 +[2025-07-08 04:21:15] [Rank 0] Group 3 Loss: 5.5821 +[2025-07-08 04:21:15] [Rank 0] Group 4 Loss: 5.4216 +[2025-07-08 04:21:15] [Rank 0] Group 4 Loss: 5.4216 +[2025-07-08 04:21:15] [Rank 0] Group 5 Loss: 5.4386 +[2025-07-08 04:21:15] [Rank 0] Group 5 Loss: 5.4386 +[2025-07-08 04:21:15] [Rank 0] Group 6 Loss: 5.4133 +[2025-07-08 04:21:15] [Rank 0] Group 6 Loss: 5.4133 +[2025-07-08 04:21:15] [Rank 0] Group 7 Loss: 5.5217 +[2025-07-08 04:21:15] [Rank 0] Group 7 Loss: 5.5217 +[2025-07-08 04:21:15] [Rank 0] Group 8 Loss: 5.4827 +[2025-07-08 04:21:15] [Rank 0] Group 8 Loss: 5.4827 +[2025-07-08 04:21:15] [Rank 0] Group 9 Loss: 5.5057 +[2025-07-08 04:21:15] [Rank 0] Group 9 Loss: 5.5057 +[2025-07-08 04:21:15] [Rank 0] Group 10 Loss: 5.5280 +[2025-07-08 04:21:15] [Rank 0] Group 10 Loss: 5.5280 +[2025-07-08 04:21:15] [Rank 0] Group 11 Loss: 5.5440 +[2025-07-08 04:21:15] [Rank 0] Group 11 Loss: 5.5440 +[2025-07-08 04:21:15] [Rank 0] Group 0 FTA: 0.8127 +[2025-07-08 04:21:15] [Rank 0] Group 0 FTA: 0.8127 +[2025-07-08 04:21:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 04:21:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 04:21:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 04:21:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 04:21:15] [Rank 0] Group 3 FTA: 0.9349 +[2025-07-08 04:21:15] [Rank 0] Group 3 FTA: 0.9349 +[2025-07-08 04:21:15] [Rank 0] Group 4 FTA: 0.9635 +[2025-07-08 04:21:15] [Rank 0] Group 4 FTA: 0.9635 +[2025-07-08 04:21:15] [Rank 0] Group 5 FTA: 0.9688 +[2025-07-08 04:21:15] [Rank 0] Group 5 FTA: 0.9688 +[2025-07-08 04:21:15] [Rank 0] Group 6 FTA: 0.9505 +[2025-07-08 04:21:15] [Rank 0] Group 6 FTA: 0.9505 +[2025-07-08 04:21:15] [Rank 0] Group 7 FTA: 0.8984 +[2025-07-08 04:21:15] [Rank 0] Group 7 FTA: 0.8984 +[2025-07-08 04:21:15] [Rank 0] Group 8 FTA: 0.9271 +[2025-07-08 04:21:15] [Rank 0] Group 8 FTA: 0.9271 +[2025-07-08 04:21:15] [Rank 0] Group 9 FTA: 0.9336 +[2025-07-08 04:21:15] [Rank 0] Group 9 FTA: 0.9336 +[2025-07-08 04:21:15] [Rank 0] Group 10 FTA: 0.9160 +[2025-07-08 04:21:15] [Rank 0] Group 10 FTA: 0.9160 +[2025-07-08 04:21:15] [Rank 0] Group 11 FTA: 0.9043 +[2025-07-08 04:21:15] [Rank 0] Group 11 FTA: 0.9043 +[2025-07-08 04:21:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 04:21:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 04:21:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 04:21:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 04:21:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 04:21:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 04:21:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 04:21:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 04:21:16] [Rank 0] step:7001/10000 train_time:563729ms step_avg:80.52ms +[2025-07-08 04:21:16] [Rank 0] step:7001/10000 train_time:563729ms step_avg:80.52ms +[2025-07-08 04:21:18] [Rank 0] step:7021/10000 train_time:565275ms step_avg:80.51ms +[2025-07-08 04:21:18] [Rank 0] step:7021/10000 train_time:565275ms step_avg:80.51ms +[2025-07-08 04:21:20] [Rank 0] step:7041/10000 train_time:567356ms step_avg:80.58ms +[2025-07-08 04:21:20] [Rank 0] step:7041/10000 train_time:567356ms step_avg:80.58ms +[2025-07-08 04:21:21] [Rank 0] step:7061/10000 train_time:568844ms step_avg:80.56ms +[2025-07-08 04:21:21] [Rank 0] step:7061/10000 train_time:568844ms step_avg:80.56ms +[2025-07-08 04:21:23] [Rank 0] step:7081/10000 train_time:570331ms step_avg:80.54ms +[2025-07-08 04:21:23] [Rank 0] step:7081/10000 train_time:570331ms step_avg:80.54ms +[2025-07-08 04:21:24] [Rank 0] step:7101/10000 train_time:571820ms step_avg:80.53ms +[2025-07-08 04:21:24] [Rank 0] step:7101/10000 train_time:571820ms step_avg:80.53ms +[2025-07-08 04:21:26] [Rank 0] step:7121/10000 train_time:573546ms step_avg:80.54ms +[2025-07-08 04:21:26] [Rank 0] step:7121/10000 train_time:573546ms step_avg:80.54ms +[2025-07-08 04:21:27] [Rank 0] step:7141/10000 train_time:575033ms step_avg:80.53ms +[2025-07-08 04:21:27] [Rank 0] step:7141/10000 train_time:575033ms step_avg:80.53ms +[2025-07-08 04:21:29] [Rank 0] step:7161/10000 train_time:576523ms step_avg:80.51ms +[2025-07-08 04:21:29] [Rank 0] step:7161/10000 train_time:576523ms step_avg:80.51ms +[2025-07-08 04:21:30] [Rank 0] step:7181/10000 train_time:578013ms step_avg:80.49ms +[2025-07-08 04:21:30] [Rank 0] step:7181/10000 train_time:578013ms step_avg:80.49ms +[2025-07-08 04:21:33] [Rank 0] step:7201/10000 train_time:580192ms step_avg:80.57ms +[2025-07-08 04:21:33] [Rank 0] step:7201/10000 train_time:580192ms step_avg:80.57ms +[2025-07-08 04:21:34] [Rank 0] step:7221/10000 train_time:581663ms step_avg:80.55ms +[2025-07-08 04:21:34] [Rank 0] step:7221/10000 train_time:581663ms step_avg:80.55ms +[2025-07-08 04:21:36] [Rank 0] step:7241/10000 train_time:583151ms step_avg:80.53ms +[2025-07-08 04:21:36] [Rank 0] step:7241/10000 train_time:583151ms step_avg:80.53ms +[2025-07-08 04:21:37] [Rank 0] step:7261/10000 train_time:584642ms step_avg:80.52ms +[2025-07-08 04:21:37] [Rank 0] step:7261/10000 train_time:584642ms step_avg:80.52ms +[2025-07-08 04:21:39] [Rank 0] step:7281/10000 train_time:586134ms step_avg:80.50ms +[2025-07-08 04:21:39] [Rank 0] step:7281/10000 train_time:586134ms step_avg:80.50ms +[2025-07-08 04:21:41] [Rank 0] step:7301/10000 train_time:588274ms step_avg:80.57ms +[2025-07-08 04:21:41] [Rank 0] step:7301/10000 train_time:588274ms step_avg:80.57ms +[2025-07-08 04:21:42] [Rank 0] step:7321/10000 train_time:589766ms step_avg:80.56ms +[2025-07-08 04:21:42] [Rank 0] step:7321/10000 train_time:589766ms step_avg:80.56ms +[2025-07-08 04:21:44] [Rank 0] step:7341/10000 train_time:591259ms step_avg:80.54ms +[2025-07-08 04:21:44] [Rank 0] step:7341/10000 train_time:591259ms step_avg:80.54ms +[2025-07-08 04:21:45] [Rank 0] step:7361/10000 train_time:592752ms step_avg:80.53ms +[2025-07-08 04:21:45] [Rank 0] step:7361/10000 train_time:592752ms step_avg:80.53ms +[2025-07-08 04:21:47] [Rank 0] step:7381/10000 train_time:594247ms step_avg:80.51ms +[2025-07-08 04:21:47] [Rank 0] step:7381/10000 train_time:594247ms step_avg:80.51ms +[2025-07-08 04:21:49] [Rank 0] step:7401/10000 train_time:596388ms step_avg:80.58ms +[2025-07-08 04:21:49] [Rank 0] step:7401/10000 train_time:596388ms step_avg:80.58ms +[2025-07-08 04:21:50] [Rank 0] step:7421/10000 train_time:597881ms step_avg:80.57ms +[2025-07-08 04:21:50] [Rank 0] step:7421/10000 train_time:597881ms step_avg:80.57ms +[2025-07-08 04:21:52] [Rank 0] step:7441/10000 train_time:599378ms step_avg:80.55ms +[2025-07-08 04:21:52] [Rank 0] step:7441/10000 train_time:599378ms step_avg:80.55ms +[2025-07-08 04:21:53] [Rank 0] step:7461/10000 train_time:600876ms step_avg:80.54ms +[2025-07-08 04:21:53] [Rank 0] step:7461/10000 train_time:600876ms step_avg:80.54ms +[2025-07-08 04:21:55] [Rank 0] step:7481/10000 train_time:602607ms step_avg:80.55ms +[2025-07-08 04:21:55] [Rank 0] step:7481/10000 train_time:602607ms step_avg:80.55ms +[2025-07-08 04:21:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:21:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:21:57] [Rank 0] PRINT: step:7500/10000 train_loss:0.8625 val_loss:0.8631 train_time:604103ms step_avg:80.55ms +[2025-07-08 04:21:57] [Rank 0] PRINT: step:7500/10000 train_loss:0.8625 val_loss:0.8631 train_time:604103ms step_avg:80.55ms +[2025-07-08 04:21:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:21:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:21:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:21:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:21:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:21:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:27:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:27:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:27:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:27:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:27:23] [Rank 0] Total Loss: 5.5158 +[2025-07-08 04:27:23] [Rank 0] Total Loss: 5.5158 +[2025-07-08 04:27:23] [Rank 0] Total FTA: 0.9544 +[2025-07-08 04:27:23] [Rank 0] Total FTA: 0.9544 +[2025-07-08 04:27:23] [Rank 0] Group 0 Loss: 5.7938 +[2025-07-08 04:27:23] [Rank 0] Group 0 Loss: 5.7938 +[2025-07-08 04:27:23] [Rank 0] Group 1 Loss: 5.3048 +[2025-07-08 04:27:23] [Rank 0] Group 1 Loss: 5.3048 +[2025-07-08 04:27:23] [Rank 0] Group 2 Loss: 5.3683 +[2025-07-08 04:27:23] [Rank 0] Group 2 Loss: 5.3683 +[2025-07-08 04:27:23] [Rank 0] Group 3 Loss: 5.5954 +[2025-07-08 04:27:23] [Rank 0] Group 3 Loss: 5.5954 +[2025-07-08 04:27:23] [Rank 0] Group 4 Loss: 5.4518 +[2025-07-08 04:27:23] [Rank 0] Group 4 Loss: 5.4518 +[2025-07-08 04:27:23] [Rank 0] Group 5 Loss: 5.4803 +[2025-07-08 04:27:23] [Rank 0] Group 5 Loss: 5.4803 +[2025-07-08 04:27:23] [Rank 0] Group 6 Loss: 5.4282 +[2025-07-08 04:27:23] [Rank 0] Group 6 Loss: 5.4282 +[2025-07-08 04:27:23] [Rank 0] Group 7 Loss: 5.4825 +[2025-07-08 04:27:23] [Rank 0] Group 7 Loss: 5.4825 +[2025-07-08 04:27:23] [Rank 0] Group 8 Loss: 5.5048 +[2025-07-08 04:27:23] [Rank 0] Group 8 Loss: 5.5048 +[2025-07-08 04:27:23] [Rank 0] Group 9 Loss: 5.5155 +[2025-07-08 04:27:23] [Rank 0] Group 9 Loss: 5.5155 +[2025-07-08 04:27:23] [Rank 0] Group 10 Loss: 5.5269 +[2025-07-08 04:27:23] [Rank 0] Group 10 Loss: 5.5269 +[2025-07-08 04:27:23] [Rank 0] Group 11 Loss: 5.4926 +[2025-07-08 04:27:23] [Rank 0] Group 11 Loss: 5.4926 +[2025-07-08 04:27:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 04:27:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 04:27:23] [Rank 0] Group 1 FTA: 0.8333 +[2025-07-08 04:27:23] [Rank 0] Group 1 FTA: 0.8333 +[2025-07-08 04:27:23] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 04:27:23] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 04:27:23] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 04:27:23] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 04:27:23] [Rank 0] Group 4 FTA: 0.9896 +[2025-07-08 04:27:23] [Rank 0] Group 4 FTA: 0.9896 +[2025-07-08 04:27:23] [Rank 0] Group 5 FTA: 0.9948 +[2025-07-08 04:27:23] [Rank 0] Group 5 FTA: 0.9948 +[2025-07-08 04:27:23] [Rank 0] Group 6 FTA: 0.9323 +[2025-07-08 04:27:23] [Rank 0] Group 6 FTA: 0.9323 +[2025-07-08 04:27:23] [Rank 0] Group 7 FTA: 0.9453 +[2025-07-08 04:27:23] [Rank 0] Group 7 FTA: 0.9453 +[2025-07-08 04:27:23] [Rank 0] Group 8 FTA: 0.9219 +[2025-07-08 04:27:23] [Rank 0] Group 8 FTA: 0.9219 +[2025-07-08 04:27:23] [Rank 0] Group 9 FTA: 0.9297 +[2025-07-08 04:27:23] [Rank 0] Group 9 FTA: 0.9297 +[2025-07-08 04:27:23] [Rank 0] Group 10 FTA: 0.9414 +[2025-07-08 04:27:23] [Rank 0] Group 10 FTA: 0.9414 +[2025-07-08 04:27:23] [Rank 0] Group 11 FTA: 0.9395 +[2025-07-08 04:27:23] [Rank 0] Group 11 FTA: 0.9395 +[2025-07-08 04:27:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 04:27:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 04:27:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 04:27:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 04:27:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 04:27:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 04:27:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 04:27:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 04:27:25] [Rank 0] step:7501/10000 train_time:604122ms step_avg:80.54ms +[2025-07-08 04:27:25] [Rank 0] step:7501/10000 train_time:604122ms step_avg:80.54ms +[2025-07-08 04:27:26] [Rank 0] step:7521/10000 train_time:605619ms step_avg:80.52ms +[2025-07-08 04:27:26] [Rank 0] step:7521/10000 train_time:605619ms step_avg:80.52ms +[2025-07-08 04:27:28] [Rank 0] step:7541/10000 train_time:607107ms step_avg:80.51ms +[2025-07-08 04:27:28] [Rank 0] step:7541/10000 train_time:607107ms step_avg:80.51ms +[2025-07-08 04:27:30] [Rank 0] step:7561/10000 train_time:608650ms step_avg:80.50ms +[2025-07-08 04:27:30] [Rank 0] step:7561/10000 train_time:608650ms step_avg:80.50ms +[2025-07-08 04:27:31] [Rank 0] step:7581/10000 train_time:610727ms step_avg:80.56ms +[2025-07-08 04:27:31] [Rank 0] step:7581/10000 train_time:610727ms step_avg:80.56ms +[2025-07-08 04:27:33] [Rank 0] step:7601/10000 train_time:612216ms step_avg:80.54ms +[2025-07-08 04:27:33] [Rank 0] step:7601/10000 train_time:612216ms step_avg:80.54ms +[2025-07-08 04:27:34] [Rank 0] step:7621/10000 train_time:613702ms step_avg:80.53ms +[2025-07-08 04:27:34] [Rank 0] step:7621/10000 train_time:613702ms step_avg:80.53ms +[2025-07-08 04:27:36] [Rank 0] step:7641/10000 train_time:615193ms step_avg:80.51ms +[2025-07-08 04:27:36] [Rank 0] step:7641/10000 train_time:615193ms step_avg:80.51ms +[2025-07-08 04:27:37] [Rank 0] step:7661/10000 train_time:616921ms step_avg:80.53ms +[2025-07-08 04:27:37] [Rank 0] step:7661/10000 train_time:616921ms step_avg:80.53ms +[2025-07-08 04:27:39] [Rank 0] step:7681/10000 train_time:618411ms step_avg:80.51ms +[2025-07-08 04:27:39] [Rank 0] step:7681/10000 train_time:618411ms step_avg:80.51ms +[2025-07-08 04:27:40] [Rank 0] step:7701/10000 train_time:619901ms step_avg:80.50ms +[2025-07-08 04:27:40] [Rank 0] step:7701/10000 train_time:619901ms step_avg:80.50ms +[2025-07-08 04:27:42] [Rank 0] step:7721/10000 train_time:621391ms step_avg:80.48ms +[2025-07-08 04:27:42] [Rank 0] step:7721/10000 train_time:621391ms step_avg:80.48ms +[2025-07-08 04:27:44] [Rank 0] step:7741/10000 train_time:623138ms step_avg:80.50ms +[2025-07-08 04:27:44] [Rank 0] step:7741/10000 train_time:623138ms step_avg:80.50ms +[2025-07-08 04:27:45] [Rank 0] step:7761/10000 train_time:625036ms step_avg:80.54ms +[2025-07-08 04:27:45] [Rank 0] step:7761/10000 train_time:625036ms step_avg:80.54ms +[2025-07-08 04:27:47] [Rank 0] step:7781/10000 train_time:626527ms step_avg:80.52ms +[2025-07-08 04:27:47] [Rank 0] step:7781/10000 train_time:626527ms step_avg:80.52ms +[2025-07-08 04:27:48] [Rank 0] step:7801/10000 train_time:628020ms step_avg:80.51ms +[2025-07-08 04:27:48] [Rank 0] step:7801/10000 train_time:628020ms step_avg:80.51ms +[2025-07-08 04:27:50] [Rank 0] step:7821/10000 train_time:629512ms step_avg:80.49ms +[2025-07-08 04:27:50] [Rank 0] step:7821/10000 train_time:629512ms step_avg:80.49ms +[2025-07-08 04:27:52] [Rank 0] step:7841/10000 train_time:631239ms step_avg:80.50ms +[2025-07-08 04:27:52] [Rank 0] step:7841/10000 train_time:631239ms step_avg:80.50ms +[2025-07-08 04:27:53] [Rank 0] step:7861/10000 train_time:632729ms step_avg:80.49ms +[2025-07-08 04:27:53] [Rank 0] step:7861/10000 train_time:632729ms step_avg:80.49ms +[2025-07-08 04:27:55] [Rank 0] step:7881/10000 train_time:634223ms step_avg:80.47ms +[2025-07-08 04:27:55] [Rank 0] step:7881/10000 train_time:634223ms step_avg:80.47ms +[2025-07-08 04:27:56] [Rank 0] step:7901/10000 train_time:635716ms step_avg:80.46ms +[2025-07-08 04:27:56] [Rank 0] step:7901/10000 train_time:635716ms step_avg:80.46ms +[2025-07-08 04:27:58] [Rank 0] step:7921/10000 train_time:637263ms step_avg:80.45ms +[2025-07-08 04:27:58] [Rank 0] step:7921/10000 train_time:637263ms step_avg:80.45ms +[2025-07-08 04:28:00] [Rank 0] step:7941/10000 train_time:639374ms step_avg:80.52ms +[2025-07-08 04:28:00] [Rank 0] step:7941/10000 train_time:639374ms step_avg:80.52ms +[2025-07-08 04:28:01] [Rank 0] step:7961/10000 train_time:640867ms step_avg:80.50ms +[2025-07-08 04:28:01] [Rank 0] step:7961/10000 train_time:640867ms step_avg:80.50ms +[2025-07-08 04:28:03] [Rank 0] step:7981/10000 train_time:642361ms step_avg:80.49ms +[2025-07-08 04:28:03] [Rank 0] step:7981/10000 train_time:642361ms step_avg:80.49ms +[2025-07-08 04:28:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:28:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:28:05] [Rank 0] PRINT: step:8000/10000 train_loss:0.8614 val_loss:0.8625 train_time:643854ms step_avg:80.48ms +[2025-07-08 04:28:05] [Rank 0] PRINT: step:8000/10000 train_loss:0.8614 val_loss:0.8625 train_time:643854ms step_avg:80.48ms +[2025-07-08 04:28:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:28:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:28:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:28:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:28:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:28:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:33:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:33:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:33:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:33:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:33:32] [Rank 0] Total Loss: 5.5571 +[2025-07-08 04:33:32] [Rank 0] Total Loss: 5.5571 +[2025-07-08 04:33:32] [Rank 0] Total FTA: 0.9675 +[2025-07-08 04:33:32] [Rank 0] Total FTA: 0.9675 +[2025-07-08 04:33:32] [Rank 0] Group 0 Loss: 5.8766 +[2025-07-08 04:33:32] [Rank 0] Group 0 Loss: 5.8766 +[2025-07-08 04:33:32] [Rank 0] Group 1 Loss: 5.4878 +[2025-07-08 04:33:32] [Rank 0] Group 1 Loss: 5.4878 +[2025-07-08 04:33:32] [Rank 0] Group 2 Loss: 5.5719 +[2025-07-08 04:33:32] [Rank 0] Group 2 Loss: 5.5719 +[2025-07-08 04:33:32] [Rank 0] Group 3 Loss: 5.5440 +[2025-07-08 04:33:32] [Rank 0] Group 3 Loss: 5.5440 +[2025-07-08 04:33:32] [Rank 0] Group 4 Loss: 5.5409 +[2025-07-08 04:33:32] [Rank 0] Group 4 Loss: 5.5409 +[2025-07-08 04:33:32] [Rank 0] Group 5 Loss: 5.4722 +[2025-07-08 04:33:32] [Rank 0] Group 5 Loss: 5.4722 +[2025-07-08 04:33:32] [Rank 0] Group 6 Loss: 5.4438 +[2025-07-08 04:33:32] [Rank 0] Group 6 Loss: 5.4438 +[2025-07-08 04:33:32] [Rank 0] Group 7 Loss: 5.4708 +[2025-07-08 04:33:32] [Rank 0] Group 7 Loss: 5.4708 +[2025-07-08 04:33:32] [Rank 0] Group 8 Loss: 5.4583 +[2025-07-08 04:33:32] [Rank 0] Group 8 Loss: 5.4583 +[2025-07-08 04:33:32] [Rank 0] Group 9 Loss: 5.4383 +[2025-07-08 04:33:32] [Rank 0] Group 9 Loss: 5.4383 +[2025-07-08 04:33:32] [Rank 0] Group 10 Loss: 5.5332 +[2025-07-08 04:33:32] [Rank 0] Group 10 Loss: 5.5332 +[2025-07-08 04:33:32] [Rank 0] Group 11 Loss: 5.5341 +[2025-07-08 04:33:32] [Rank 0] Group 11 Loss: 5.5341 +[2025-07-08 04:33:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 04:33:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 04:33:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 04:33:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 04:33:32] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 04:33:32] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 04:33:32] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 04:33:32] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 04:33:32] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-08 04:33:32] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-08 04:33:32] [Rank 0] Group 5 FTA: 0.9766 +[2025-07-08 04:33:32] [Rank 0] Group 5 FTA: 0.9766 +[2025-07-08 04:33:32] [Rank 0] Group 6 FTA: 0.9062 +[2025-07-08 04:33:32] [Rank 0] Group 6 FTA: 0.9062 +[2025-07-08 04:33:32] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-08 04:33:32] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-08 04:33:32] [Rank 0] Group 8 FTA: 0.9505 +[2025-07-08 04:33:32] [Rank 0] Group 8 FTA: 0.9505 +[2025-07-08 04:33:32] [Rank 0] Group 9 FTA: 0.9453 +[2025-07-08 04:33:32] [Rank 0] Group 9 FTA: 0.9453 +[2025-07-08 04:33:32] [Rank 0] Group 10 FTA: 0.9434 +[2025-07-08 04:33:32] [Rank 0] Group 10 FTA: 0.9434 +[2025-07-08 04:33:32] [Rank 0] Group 11 FTA: 0.9482 +[2025-07-08 04:33:32] [Rank 0] Group 11 FTA: 0.9482 +[2025-07-08 04:33:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 04:33:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 04:33:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 04:33:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 04:33:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 04:33:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 04:33:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 04:33:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 04:33:35] [Rank 0] step:8001/10000 train_time:643874ms step_avg:80.47ms +[2025-07-08 04:33:35] [Rank 0] step:8001/10000 train_time:643874ms step_avg:80.47ms +[2025-07-08 04:33:37] [Rank 0] step:8021/10000 train_time:646121ms step_avg:80.55ms +[2025-07-08 04:33:37] [Rank 0] step:8021/10000 train_time:646121ms step_avg:80.55ms +[2025-07-08 04:33:39] [Rank 0] step:8041/10000 train_time:647608ms step_avg:80.54ms +[2025-07-08 04:33:39] [Rank 0] step:8041/10000 train_time:647608ms step_avg:80.54ms +[2025-07-08 04:33:40] [Rank 0] step:8061/10000 train_time:649097ms step_avg:80.52ms +[2025-07-08 04:33:40] [Rank 0] step:8061/10000 train_time:649097ms step_avg:80.52ms +[2025-07-08 04:33:42] [Rank 0] step:8081/10000 train_time:650588ms step_avg:80.51ms +[2025-07-08 04:33:42] [Rank 0] step:8081/10000 train_time:650588ms step_avg:80.51ms +[2025-07-08 04:33:44] [Rank 0] step:8101/10000 train_time:652077ms step_avg:80.49ms +[2025-07-08 04:33:44] [Rank 0] step:8101/10000 train_time:652077ms step_avg:80.49ms +[2025-07-08 04:33:46] [Rank 0] step:8121/10000 train_time:654231ms step_avg:80.56ms +[2025-07-08 04:33:46] [Rank 0] step:8121/10000 train_time:654231ms step_avg:80.56ms +[2025-07-08 04:33:47] [Rank 0] step:8141/10000 train_time:655721ms step_avg:80.55ms +[2025-07-08 04:33:47] [Rank 0] step:8141/10000 train_time:655721ms step_avg:80.55ms +[2025-07-08 04:33:49] [Rank 0] step:8161/10000 train_time:657210ms step_avg:80.53ms +[2025-07-08 04:33:49] [Rank 0] step:8161/10000 train_time:657210ms step_avg:80.53ms +[2025-07-08 04:33:50] [Rank 0] step:8181/10000 train_time:658701ms step_avg:80.52ms +[2025-07-08 04:33:50] [Rank 0] step:8181/10000 train_time:658701ms step_avg:80.52ms +[2025-07-08 04:33:52] [Rank 0] step:8201/10000 train_time:660845ms step_avg:80.58ms +[2025-07-08 04:33:52] [Rank 0] step:8201/10000 train_time:660845ms step_avg:80.58ms +[2025-07-08 04:33:54] [Rank 0] step:8221/10000 train_time:662334ms step_avg:80.57ms +[2025-07-08 04:33:54] [Rank 0] step:8221/10000 train_time:662334ms step_avg:80.57ms +[2025-07-08 04:33:55] [Rank 0] step:8241/10000 train_time:663825ms step_avg:80.55ms +[2025-07-08 04:33:55] [Rank 0] step:8241/10000 train_time:663825ms step_avg:80.55ms +[2025-07-08 04:33:57] [Rank 0] step:8261/10000 train_time:665316ms step_avg:80.54ms +[2025-07-08 04:33:57] [Rank 0] step:8261/10000 train_time:665316ms step_avg:80.54ms +[2025-07-08 04:33:58] [Rank 0] step:8281/10000 train_time:666861ms step_avg:80.53ms +[2025-07-08 04:33:58] [Rank 0] step:8281/10000 train_time:666861ms step_avg:80.53ms +[2025-07-08 04:34:00] [Rank 0] step:8301/10000 train_time:668533ms step_avg:80.54ms +[2025-07-08 04:34:00] [Rank 0] step:8301/10000 train_time:668533ms step_avg:80.54ms +[2025-07-08 04:34:01] [Rank 0] step:8321/10000 train_time:670026ms step_avg:80.52ms +[2025-07-08 04:34:01] [Rank 0] step:8321/10000 train_time:670026ms step_avg:80.52ms +[2025-07-08 04:34:03] [Rank 0] step:8341/10000 train_time:671520ms step_avg:80.51ms +[2025-07-08 04:34:03] [Rank 0] step:8341/10000 train_time:671520ms step_avg:80.51ms +[2025-07-08 04:34:04] [Rank 0] step:8361/10000 train_time:673014ms step_avg:80.49ms +[2025-07-08 04:34:04] [Rank 0] step:8361/10000 train_time:673014ms step_avg:80.49ms +[2025-07-08 04:34:06] [Rank 0] step:8381/10000 train_time:675148ms step_avg:80.56ms +[2025-07-08 04:34:06] [Rank 0] step:8381/10000 train_time:675148ms step_avg:80.56ms +[2025-07-08 04:34:08] [Rank 0] step:8401/10000 train_time:676642ms step_avg:80.54ms +[2025-07-08 04:34:08] [Rank 0] step:8401/10000 train_time:676642ms step_avg:80.54ms +[2025-07-08 04:34:09] [Rank 0] step:8421/10000 train_time:678134ms step_avg:80.53ms +[2025-07-08 04:34:09] [Rank 0] step:8421/10000 train_time:678134ms step_avg:80.53ms +[2025-07-08 04:34:11] [Rank 0] step:8441/10000 train_time:679627ms step_avg:80.52ms +[2025-07-08 04:34:11] [Rank 0] step:8441/10000 train_time:679627ms step_avg:80.52ms +[2025-07-08 04:34:13] [Rank 0] step:8461/10000 train_time:681121ms step_avg:80.50ms +[2025-07-08 04:34:13] [Rank 0] step:8461/10000 train_time:681121ms step_avg:80.50ms +[2025-07-08 04:34:14] [Rank 0] step:8481/10000 train_time:682854ms step_avg:80.52ms +[2025-07-08 04:34:14] [Rank 0] step:8481/10000 train_time:682854ms step_avg:80.52ms +[2025-07-08 04:34:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:34:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:34:17] [Rank 0] PRINT: step:8500/10000 train_loss:0.8604 val_loss:0.8621 train_time:684348ms step_avg:80.51ms +[2025-07-08 04:34:17] [Rank 0] PRINT: step:8500/10000 train_loss:0.8604 val_loss:0.8621 train_time:684348ms step_avg:80.51ms +[2025-07-08 04:34:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:34:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:34:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:34:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:34:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:34:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:39:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:39:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:39:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:39:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:39:43] [Rank 0] Total Loss: 5.5748 +[2025-07-08 04:39:43] [Rank 0] Total Loss: 5.5748 +[2025-07-08 04:39:43] [Rank 0] Total FTA: 0.9679 +[2025-07-08 04:39:43] [Rank 0] Total FTA: 0.9679 +[2025-07-08 04:39:43] [Rank 0] Group 0 Loss: 5.9084 +[2025-07-08 04:39:43] [Rank 0] Group 0 Loss: 5.9084 +[2025-07-08 04:39:43] [Rank 0] Group 1 Loss: 5.5226 +[2025-07-08 04:39:43] [Rank 0] Group 1 Loss: 5.5226 +[2025-07-08 04:39:43] [Rank 0] Group 2 Loss: 5.5277 +[2025-07-08 04:39:43] [Rank 0] Group 2 Loss: 5.5277 +[2025-07-08 04:39:43] [Rank 0] Group 3 Loss: 5.5666 +[2025-07-08 04:39:43] [Rank 0] Group 3 Loss: 5.5666 +[2025-07-08 04:39:43] [Rank 0] Group 4 Loss: 5.5433 +[2025-07-08 04:39:43] [Rank 0] Group 4 Loss: 5.5433 +[2025-07-08 04:39:43] [Rank 0] Group 5 Loss: 5.5286 +[2025-07-08 04:39:43] [Rank 0] Group 5 Loss: 5.5286 +[2025-07-08 04:39:43] [Rank 0] Group 6 Loss: 5.3945 +[2025-07-08 04:39:43] [Rank 0] Group 6 Loss: 5.3945 +[2025-07-08 04:39:43] [Rank 0] Group 7 Loss: 5.5612 +[2025-07-08 04:39:43] [Rank 0] Group 7 Loss: 5.5612 +[2025-07-08 04:39:43] [Rank 0] Group 8 Loss: 5.5660 +[2025-07-08 04:39:43] [Rank 0] Group 8 Loss: 5.5660 +[2025-07-08 04:39:43] [Rank 0] Group 9 Loss: 5.5114 +[2025-07-08 04:39:43] [Rank 0] Group 9 Loss: 5.5114 +[2025-07-08 04:39:43] [Rank 0] Group 10 Loss: 5.4648 +[2025-07-08 04:39:43] [Rank 0] Group 10 Loss: 5.4648 +[2025-07-08 04:39:43] [Rank 0] Group 11 Loss: 5.5405 +[2025-07-08 04:39:43] [Rank 0] Group 11 Loss: 5.5405 +[2025-07-08 04:39:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 04:39:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 04:39:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 04:39:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 04:39:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 04:39:43] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 04:39:43] [Rank 0] Group 3 FTA: 0.9219 +[2025-07-08 04:39:43] [Rank 0] Group 3 FTA: 0.9219 +[2025-07-08 04:39:43] [Rank 0] Group 4 FTA: 0.9870 +[2025-07-08 04:39:43] [Rank 0] Group 4 FTA: 0.9870 +[2025-07-08 04:39:43] [Rank 0] Group 5 FTA: 0.9557 +[2025-07-08 04:39:43] [Rank 0] Group 5 FTA: 0.9557 +[2025-07-08 04:39:43] [Rank 0] Group 6 FTA: 0.9688 +[2025-07-08 04:39:43] [Rank 0] Group 6 FTA: 0.9688 +[2025-07-08 04:39:43] [Rank 0] Group 7 FTA: 0.9479 +[2025-07-08 04:39:43] [Rank 0] Group 7 FTA: 0.9479 +[2025-07-08 04:39:43] [Rank 0] Group 8 FTA: 0.9635 +[2025-07-08 04:39:43] [Rank 0] Group 8 FTA: 0.9635 +[2025-07-08 04:39:43] [Rank 0] Group 9 FTA: 0.9648 +[2025-07-08 04:39:43] [Rank 0] Group 9 FTA: 0.9648 +[2025-07-08 04:39:43] [Rank 0] Group 10 FTA: 0.9551 +[2025-07-08 04:39:43] [Rank 0] Group 10 FTA: 0.9551 +[2025-07-08 04:39:43] [Rank 0] Group 11 FTA: 0.9502 +[2025-07-08 04:39:43] [Rank 0] Group 11 FTA: 0.9502 +[2025-07-08 04:39:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 04:39:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 04:39:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 04:39:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 04:39:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 04:39:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 04:39:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 04:39:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 04:39:45] [Rank 0] step:8501/10000 train_time:684367ms step_avg:80.50ms +[2025-07-08 04:39:45] [Rank 0] step:8501/10000 train_time:684367ms step_avg:80.50ms +[2025-07-08 04:39:46] [Rank 0] step:8521/10000 train_time:685857ms step_avg:80.49ms +[2025-07-08 04:39:46] [Rank 0] step:8521/10000 train_time:685857ms step_avg:80.49ms +[2025-07-08 04:39:48] [Rank 0] step:8541/10000 train_time:687345ms step_avg:80.48ms +[2025-07-08 04:39:48] [Rank 0] step:8541/10000 train_time:687345ms step_avg:80.48ms +[2025-07-08 04:39:49] [Rank 0] step:8561/10000 train_time:689072ms step_avg:80.49ms +[2025-07-08 04:39:49] [Rank 0] step:8561/10000 train_time:689072ms step_avg:80.49ms +[2025-07-08 04:39:51] [Rank 0] step:8581/10000 train_time:690559ms step_avg:80.48ms +[2025-07-08 04:39:51] [Rank 0] step:8581/10000 train_time:690559ms step_avg:80.48ms +[2025-07-08 04:39:52] [Rank 0] step:8601/10000 train_time:692048ms step_avg:80.46ms +[2025-07-08 04:39:52] [Rank 0] step:8601/10000 train_time:692048ms step_avg:80.46ms +[2025-07-08 04:39:54] [Rank 0] step:8621/10000 train_time:693538ms step_avg:80.45ms +[2025-07-08 04:39:54] [Rank 0] step:8621/10000 train_time:693538ms step_avg:80.45ms +[2025-07-08 04:39:56] [Rank 0] step:8641/10000 train_time:695119ms step_avg:80.44ms +[2025-07-08 04:39:56] [Rank 0] step:8641/10000 train_time:695119ms step_avg:80.44ms +[2025-07-08 04:39:57] [Rank 0] step:8661/10000 train_time:696848ms step_avg:80.46ms +[2025-07-08 04:39:57] [Rank 0] step:8661/10000 train_time:696848ms step_avg:80.46ms +[2025-07-08 04:39:59] [Rank 0] step:8681/10000 train_time:698338ms step_avg:80.44ms +[2025-07-08 04:39:59] [Rank 0] step:8681/10000 train_time:698338ms step_avg:80.44ms +[2025-07-08 04:40:00] [Rank 0] step:8701/10000 train_time:699828ms step_avg:80.43ms +[2025-07-08 04:40:00] [Rank 0] step:8701/10000 train_time:699828ms step_avg:80.43ms +[2025-07-08 04:40:02] [Rank 0] step:8721/10000 train_time:701319ms step_avg:80.42ms +[2025-07-08 04:40:02] [Rank 0] step:8721/10000 train_time:701319ms step_avg:80.42ms +[2025-07-08 04:40:03] [Rank 0] step:8741/10000 train_time:703047ms step_avg:80.43ms +[2025-07-08 04:40:03] [Rank 0] step:8741/10000 train_time:703047ms step_avg:80.43ms +[2025-07-08 04:40:05] [Rank 0] step:8761/10000 train_time:704637ms step_avg:80.43ms +[2025-07-08 04:40:05] [Rank 0] step:8761/10000 train_time:704637ms step_avg:80.43ms +[2025-07-08 04:40:07] [Rank 0] step:8781/10000 train_time:706130ms step_avg:80.42ms +[2025-07-08 04:40:07] [Rank 0] step:8781/10000 train_time:706130ms step_avg:80.42ms +[2025-07-08 04:40:08] [Rank 0] step:8801/10000 train_time:707625ms step_avg:80.40ms +[2025-07-08 04:40:08] [Rank 0] step:8801/10000 train_time:707625ms step_avg:80.40ms +[2025-07-08 04:40:10] [Rank 0] step:8821/10000 train_time:709789ms step_avg:80.47ms +[2025-07-08 04:40:10] [Rank 0] step:8821/10000 train_time:709789ms step_avg:80.47ms +[2025-07-08 04:40:12] [Rank 0] step:8841/10000 train_time:711261ms step_avg:80.45ms +[2025-07-08 04:40:12] [Rank 0] step:8841/10000 train_time:711261ms step_avg:80.45ms +[2025-07-08 04:40:13] [Rank 0] step:8861/10000 train_time:712754ms step_avg:80.44ms +[2025-07-08 04:40:13] [Rank 0] step:8861/10000 train_time:712754ms step_avg:80.44ms +[2025-07-08 04:40:15] [Rank 0] step:8881/10000 train_time:714249ms step_avg:80.42ms +[2025-07-08 04:40:15] [Rank 0] step:8881/10000 train_time:714249ms step_avg:80.42ms +[2025-07-08 04:40:16] [Rank 0] step:8901/10000 train_time:715745ms step_avg:80.41ms +[2025-07-08 04:40:16] [Rank 0] step:8901/10000 train_time:715745ms step_avg:80.41ms +[2025-07-08 04:40:18] [Rank 0] step:8921/10000 train_time:717904ms step_avg:80.47ms +[2025-07-08 04:40:18] [Rank 0] step:8921/10000 train_time:717904ms step_avg:80.47ms +[2025-07-08 04:40:20] [Rank 0] step:8941/10000 train_time:719399ms step_avg:80.46ms +[2025-07-08 04:40:20] [Rank 0] step:8941/10000 train_time:719399ms step_avg:80.46ms +[2025-07-08 04:40:21] [Rank 0] step:8961/10000 train_time:720894ms step_avg:80.45ms +[2025-07-08 04:40:21] [Rank 0] step:8961/10000 train_time:720894ms step_avg:80.45ms +[2025-07-08 04:40:23] [Rank 0] step:8981/10000 train_time:722389ms step_avg:80.44ms +[2025-07-08 04:40:23] [Rank 0] step:8981/10000 train_time:722389ms step_avg:80.44ms +[2025-07-08 04:40:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:40:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:40:25] [Rank 0] PRINT: step:9000/10000 train_loss:0.8595 val_loss:0.8617 train_time:723890ms step_avg:80.43ms +[2025-07-08 04:40:25] [Rank 0] PRINT: step:9000/10000 train_loss:0.8595 val_loss:0.8617 train_time:723890ms step_avg:80.43ms +[2025-07-08 04:40:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:40:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:40:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:40:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:40:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:40:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:45:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:45:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:45:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:45:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:45:55] [Rank 0] Total Loss: 5.5880 +[2025-07-08 04:45:55] [Rank 0] Total Loss: 5.5880 +[2025-07-08 04:45:55] [Rank 0] Total FTA: 0.9670 +[2025-07-08 04:45:55] [Rank 0] Total FTA: 0.9670 +[2025-07-08 04:45:55] [Rank 0] Group 0 Loss: 5.9290 +[2025-07-08 04:45:55] [Rank 0] Group 0 Loss: 5.9290 +[2025-07-08 04:45:55] [Rank 0] Group 1 Loss: 5.6289 +[2025-07-08 04:45:55] [Rank 0] Group 1 Loss: 5.6289 +[2025-07-08 04:45:55] [Rank 0] Group 2 Loss: 5.5556 +[2025-07-08 04:45:55] [Rank 0] Group 2 Loss: 5.5556 +[2025-07-08 04:45:55] [Rank 0] Group 3 Loss: 5.5055 +[2025-07-08 04:45:55] [Rank 0] Group 3 Loss: 5.5055 +[2025-07-08 04:45:55] [Rank 0] Group 4 Loss: 5.5663 +[2025-07-08 04:45:55] [Rank 0] Group 4 Loss: 5.5663 +[2025-07-08 04:45:55] [Rank 0] Group 5 Loss: 5.4429 +[2025-07-08 04:45:55] [Rank 0] Group 5 Loss: 5.4429 +[2025-07-08 04:45:55] [Rank 0] Group 6 Loss: 5.4043 +[2025-07-08 04:45:55] [Rank 0] Group 6 Loss: 5.4043 +[2025-07-08 04:45:55] [Rank 0] Group 7 Loss: 5.5467 +[2025-07-08 04:45:55] [Rank 0] Group 7 Loss: 5.5467 +[2025-07-08 04:45:55] [Rank 0] Group 8 Loss: 5.5068 +[2025-07-08 04:45:55] [Rank 0] Group 8 Loss: 5.5068 +[2025-07-08 04:45:55] [Rank 0] Group 9 Loss: 5.4381 +[2025-07-08 04:45:55] [Rank 0] Group 9 Loss: 5.4381 +[2025-07-08 04:45:55] [Rank 0] Group 10 Loss: 5.5642 +[2025-07-08 04:45:55] [Rank 0] Group 10 Loss: 5.5642 +[2025-07-08 04:45:55] [Rank 0] Group 11 Loss: 5.5862 +[2025-07-08 04:45:55] [Rank 0] Group 11 Loss: 5.5862 +[2025-07-08 04:45:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 04:45:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 04:45:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 04:45:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 04:45:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 04:45:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 04:45:55] [Rank 0] Group 3 FTA: 0.9062 +[2025-07-08 04:45:55] [Rank 0] Group 3 FTA: 0.9062 +[2025-07-08 04:45:55] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-08 04:45:55] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-08 04:45:55] [Rank 0] Group 5 FTA: 0.9922 +[2025-07-08 04:45:55] [Rank 0] Group 5 FTA: 0.9922 +[2025-07-08 04:45:55] [Rank 0] Group 6 FTA: 0.9505 +[2025-07-08 04:45:55] [Rank 0] Group 6 FTA: 0.9505 +[2025-07-08 04:45:55] [Rank 0] Group 7 FTA: 0.9427 +[2025-07-08 04:45:55] [Rank 0] Group 7 FTA: 0.9427 +[2025-07-08 04:45:55] [Rank 0] Group 8 FTA: 0.9583 +[2025-07-08 04:45:55] [Rank 0] Group 8 FTA: 0.9583 +[2025-07-08 04:45:55] [Rank 0] Group 9 FTA: 0.9453 +[2025-07-08 04:45:55] [Rank 0] Group 9 FTA: 0.9453 +[2025-07-08 04:45:55] [Rank 0] Group 10 FTA: 0.9551 +[2025-07-08 04:45:55] [Rank 0] Group 10 FTA: 0.9551 +[2025-07-08 04:45:55] [Rank 0] Group 11 FTA: 0.9482 +[2025-07-08 04:45:55] [Rank 0] Group 11 FTA: 0.9482 +[2025-07-08 04:45:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 04:45:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 04:45:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 04:45:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 04:45:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 04:45:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 04:45:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 04:45:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 04:45:57] [Rank 0] step:9001/10000 train_time:723917ms step_avg:80.43ms +[2025-07-08 04:45:57] [Rank 0] step:9001/10000 train_time:723917ms step_avg:80.43ms +[2025-07-08 04:45:59] [Rank 0] step:9021/10000 train_time:726096ms step_avg:80.49ms +[2025-07-08 04:45:59] [Rank 0] step:9021/10000 train_time:726096ms step_avg:80.49ms +[2025-07-08 04:46:00] [Rank 0] step:9041/10000 train_time:727580ms step_avg:80.48ms +[2025-07-08 04:46:00] [Rank 0] step:9041/10000 train_time:727580ms step_avg:80.48ms +[2025-07-08 04:46:02] [Rank 0] step:9061/10000 train_time:729068ms step_avg:80.46ms +[2025-07-08 04:46:02] [Rank 0] step:9061/10000 train_time:729068ms step_avg:80.46ms +[2025-07-08 04:46:03] [Rank 0] step:9081/10000 train_time:730556ms step_avg:80.45ms +[2025-07-08 04:46:03] [Rank 0] step:9081/10000 train_time:730556ms step_avg:80.45ms +[2025-07-08 04:46:05] [Rank 0] step:9101/10000 train_time:732287ms step_avg:80.46ms +[2025-07-08 04:46:05] [Rank 0] step:9101/10000 train_time:732287ms step_avg:80.46ms +[2025-07-08 04:46:06] [Rank 0] step:9121/10000 train_time:733774ms step_avg:80.45ms +[2025-07-08 04:46:06] [Rank 0] step:9121/10000 train_time:733774ms step_avg:80.45ms +[2025-07-08 04:46:08] [Rank 0] step:9141/10000 train_time:735264ms step_avg:80.44ms +[2025-07-08 04:46:08] [Rank 0] step:9141/10000 train_time:735264ms step_avg:80.44ms +[2025-07-08 04:46:09] [Rank 0] step:9161/10000 train_time:736755ms step_avg:80.42ms +[2025-07-08 04:46:09] [Rank 0] step:9161/10000 train_time:736755ms step_avg:80.42ms +[2025-07-08 04:46:11] [Rank 0] step:9181/10000 train_time:738300ms step_avg:80.42ms +[2025-07-08 04:46:11] [Rank 0] step:9181/10000 train_time:738300ms step_avg:80.42ms +[2025-07-08 04:46:13] [Rank 0] step:9201/10000 train_time:739971ms step_avg:80.42ms +[2025-07-08 04:46:13] [Rank 0] step:9201/10000 train_time:739971ms step_avg:80.42ms +[2025-07-08 04:46:14] [Rank 0] step:9221/10000 train_time:741461ms step_avg:80.41ms +[2025-07-08 04:46:14] [Rank 0] step:9221/10000 train_time:741461ms step_avg:80.41ms +[2025-07-08 04:46:16] [Rank 0] step:9241/10000 train_time:743068ms step_avg:80.41ms +[2025-07-08 04:46:16] [Rank 0] step:9241/10000 train_time:743068ms step_avg:80.41ms +[2025-07-08 04:46:17] [Rank 0] step:9261/10000 train_time:744564ms step_avg:80.40ms +[2025-07-08 04:46:17] [Rank 0] step:9261/10000 train_time:744564ms step_avg:80.40ms +[2025-07-08 04:46:19] [Rank 0] step:9281/10000 train_time:746294ms step_avg:80.41ms +[2025-07-08 04:46:19] [Rank 0] step:9281/10000 train_time:746294ms step_avg:80.41ms +[2025-07-08 04:46:20] [Rank 0] step:9301/10000 train_time:747787ms step_avg:80.40ms +[2025-07-08 04:46:20] [Rank 0] step:9301/10000 train_time:747787ms step_avg:80.40ms +[2025-07-08 04:46:22] [Rank 0] step:9321/10000 train_time:749281ms step_avg:80.39ms +[2025-07-08 04:46:22] [Rank 0] step:9321/10000 train_time:749281ms step_avg:80.39ms +[2025-07-08 04:46:23] [Rank 0] step:9341/10000 train_time:750775ms step_avg:80.37ms +[2025-07-08 04:46:23] [Rank 0] step:9341/10000 train_time:750775ms step_avg:80.37ms +[2025-07-08 04:46:25] [Rank 0] step:9361/10000 train_time:752269ms step_avg:80.36ms +[2025-07-08 04:46:25] [Rank 0] step:9361/10000 train_time:752269ms step_avg:80.36ms +[2025-07-08 04:46:27] [Rank 0] step:9381/10000 train_time:754004ms step_avg:80.38ms +[2025-07-08 04:46:27] [Rank 0] step:9381/10000 train_time:754004ms step_avg:80.38ms +[2025-07-08 04:46:28] [Rank 0] step:9401/10000 train_time:755496ms step_avg:80.36ms +[2025-07-08 04:46:28] [Rank 0] step:9401/10000 train_time:755496ms step_avg:80.36ms +[2025-07-08 04:46:30] [Rank 0] step:9421/10000 train_time:756990ms step_avg:80.35ms +[2025-07-08 04:46:30] [Rank 0] step:9421/10000 train_time:756990ms step_avg:80.35ms +[2025-07-08 04:46:31] [Rank 0] step:9441/10000 train_time:758486ms step_avg:80.34ms +[2025-07-08 04:46:31] [Rank 0] step:9441/10000 train_time:758486ms step_avg:80.34ms +[2025-07-08 04:46:33] [Rank 0] step:9461/10000 train_time:760217ms step_avg:80.35ms +[2025-07-08 04:46:33] [Rank 0] step:9461/10000 train_time:760217ms step_avg:80.35ms +[2025-07-08 04:46:34] [Rank 0] step:9481/10000 train_time:761712ms step_avg:80.34ms +[2025-07-08 04:46:34] [Rank 0] step:9481/10000 train_time:761712ms step_avg:80.34ms +[2025-07-08 04:46:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:46:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:46:37] [Rank 0] PRINT: step:9500/10000 train_loss:0.8587 val_loss:0.8615 train_time:763205ms step_avg:80.34ms +[2025-07-08 04:46:37] [Rank 0] PRINT: step:9500/10000 train_loss:0.8587 val_loss:0.8615 train_time:763205ms step_avg:80.34ms +[2025-07-08 04:46:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:46:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:46:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:46:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:46:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:46:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:52:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:52:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:52:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:52:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:52:05] [Rank 0] Total Loss: 5.6017 +[2025-07-08 04:52:05] [Rank 0] Total Loss: 5.6017 +[2025-07-08 04:52:05] [Rank 0] Total FTA: 0.9766 +[2025-07-08 04:52:05] [Rank 0] Total FTA: 0.9766 +[2025-07-08 04:52:05] [Rank 0] Group 0 Loss: 5.8934 +[2025-07-08 04:52:05] [Rank 0] Group 0 Loss: 5.8934 +[2025-07-08 04:52:05] [Rank 0] Group 1 Loss: 5.6038 +[2025-07-08 04:52:05] [Rank 0] Group 1 Loss: 5.6038 +[2025-07-08 04:52:05] [Rank 0] Group 2 Loss: 5.5095 +[2025-07-08 04:52:05] [Rank 0] Group 2 Loss: 5.5095 +[2025-07-08 04:52:05] [Rank 0] Group 3 Loss: 5.5631 +[2025-07-08 04:52:05] [Rank 0] Group 3 Loss: 5.5631 +[2025-07-08 04:52:05] [Rank 0] Group 4 Loss: 5.5426 +[2025-07-08 04:52:05] [Rank 0] Group 4 Loss: 5.5426 +[2025-07-08 04:52:05] [Rank 0] Group 5 Loss: 5.4318 +[2025-07-08 04:52:05] [Rank 0] Group 5 Loss: 5.4318 +[2025-07-08 04:52:05] [Rank 0] Group 6 Loss: 5.4507 +[2025-07-08 04:52:05] [Rank 0] Group 6 Loss: 5.4507 +[2025-07-08 04:52:05] [Rank 0] Group 7 Loss: 5.6162 +[2025-07-08 04:52:05] [Rank 0] Group 7 Loss: 5.6162 +[2025-07-08 04:52:05] [Rank 0] Group 8 Loss: 5.5519 +[2025-07-08 04:52:05] [Rank 0] Group 8 Loss: 5.5519 +[2025-07-08 04:52:05] [Rank 0] Group 9 Loss: 5.5596 +[2025-07-08 04:52:05] [Rank 0] Group 9 Loss: 5.5596 +[2025-07-08 04:52:05] [Rank 0] Group 10 Loss: 5.5817 +[2025-07-08 04:52:05] [Rank 0] Group 10 Loss: 5.5817 +[2025-07-08 04:52:05] [Rank 0] Group 11 Loss: 5.6071 +[2025-07-08 04:52:05] [Rank 0] Group 11 Loss: 5.6071 +[2025-07-08 04:52:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 04:52:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 04:52:05] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 04:52:05] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 04:52:05] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 04:52:05] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 04:52:05] [Rank 0] Group 3 FTA: 0.9479 +[2025-07-08 04:52:05] [Rank 0] Group 3 FTA: 0.9479 +[2025-07-08 04:52:05] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-08 04:52:05] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-08 04:52:05] [Rank 0] Group 5 FTA: 0.9896 +[2025-07-08 04:52:05] [Rank 0] Group 5 FTA: 0.9896 +[2025-07-08 04:52:05] [Rank 0] Group 6 FTA: 0.9714 +[2025-07-08 04:52:05] [Rank 0] Group 6 FTA: 0.9714 +[2025-07-08 04:52:05] [Rank 0] Group 7 FTA: 0.9583 +[2025-07-08 04:52:05] [Rank 0] Group 7 FTA: 0.9583 +[2025-07-08 04:52:05] [Rank 0] Group 8 FTA: 0.9531 +[2025-07-08 04:52:05] [Rank 0] Group 8 FTA: 0.9531 +[2025-07-08 04:52:05] [Rank 0] Group 9 FTA: 0.9492 +[2025-07-08 04:52:05] [Rank 0] Group 9 FTA: 0.9492 +[2025-07-08 04:52:05] [Rank 0] Group 10 FTA: 0.9707 +[2025-07-08 04:52:05] [Rank 0] Group 10 FTA: 0.9707 +[2025-07-08 04:52:05] [Rank 0] Group 11 FTA: 0.9658 +[2025-07-08 04:52:05] [Rank 0] Group 11 FTA: 0.9658 +[2025-07-08 04:52:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 04:52:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 04:52:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 04:52:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 04:52:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 04:52:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 04:52:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 04:52:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 04:52:06] [Rank 0] step:9501/10000 train_time:763225ms step_avg:80.33ms +[2025-07-08 04:52:06] [Rank 0] step:9501/10000 train_time:763225ms step_avg:80.33ms +[2025-07-08 04:52:08] [Rank 0] step:9521/10000 train_time:764716ms step_avg:80.32ms +[2025-07-08 04:52:08] [Rank 0] step:9521/10000 train_time:764716ms step_avg:80.32ms +[2025-07-08 04:52:10] [Rank 0] step:9541/10000 train_time:766254ms step_avg:80.31ms +[2025-07-08 04:52:10] [Rank 0] step:9541/10000 train_time:766254ms step_avg:80.31ms +[2025-07-08 04:52:12] [Rank 0] step:9561/10000 train_time:768357ms step_avg:80.36ms +[2025-07-08 04:52:12] [Rank 0] step:9561/10000 train_time:768357ms step_avg:80.36ms +[2025-07-08 04:52:13] [Rank 0] step:9581/10000 train_time:769845ms step_avg:80.35ms +[2025-07-08 04:52:13] [Rank 0] step:9581/10000 train_time:769845ms step_avg:80.35ms +[2025-07-08 04:52:15] [Rank 0] step:9601/10000 train_time:771332ms step_avg:80.34ms +[2025-07-08 04:52:15] [Rank 0] step:9601/10000 train_time:771332ms step_avg:80.34ms +[2025-07-08 04:52:16] [Rank 0] step:9621/10000 train_time:772819ms step_avg:80.33ms +[2025-07-08 04:52:16] [Rank 0] step:9621/10000 train_time:772819ms step_avg:80.33ms +[2025-07-08 04:52:18] [Rank 0] step:9641/10000 train_time:774543ms step_avg:80.34ms +[2025-07-08 04:52:18] [Rank 0] step:9641/10000 train_time:774543ms step_avg:80.34ms +[2025-07-08 04:52:19] [Rank 0] step:9661/10000 train_time:776033ms step_avg:80.33ms +[2025-07-08 04:52:19] [Rank 0] step:9661/10000 train_time:776033ms step_avg:80.33ms +[2025-07-08 04:52:21] [Rank 0] step:9681/10000 train_time:777523ms step_avg:80.31ms +[2025-07-08 04:52:21] [Rank 0] step:9681/10000 train_time:777523ms step_avg:80.31ms +[2025-07-08 04:52:22] [Rank 0] step:9701/10000 train_time:779014ms step_avg:80.30ms +[2025-07-08 04:52:22] [Rank 0] step:9701/10000 train_time:779014ms step_avg:80.30ms +[2025-07-08 04:52:24] [Rank 0] step:9721/10000 train_time:780762ms step_avg:80.32ms +[2025-07-08 04:52:24] [Rank 0] step:9721/10000 train_time:780762ms step_avg:80.32ms +[2025-07-08 04:52:25] [Rank 0] step:9741/10000 train_time:782231ms step_avg:80.30ms +[2025-07-08 04:52:25] [Rank 0] step:9741/10000 train_time:782231ms step_avg:80.30ms +[2025-07-08 04:52:27] [Rank 0] step:9761/10000 train_time:783722ms step_avg:80.29ms +[2025-07-08 04:52:27] [Rank 0] step:9761/10000 train_time:783722ms step_avg:80.29ms +[2025-07-08 04:52:28] [Rank 0] step:9781/10000 train_time:785214ms step_avg:80.28ms +[2025-07-08 04:52:28] [Rank 0] step:9781/10000 train_time:785214ms step_avg:80.28ms +[2025-07-08 04:52:30] [Rank 0] step:9801/10000 train_time:786706ms step_avg:80.27ms +[2025-07-08 04:52:30] [Rank 0] step:9801/10000 train_time:786706ms step_avg:80.27ms +[2025-07-08 04:52:32] [Rank 0] step:9821/10000 train_time:788860ms step_avg:80.32ms +[2025-07-08 04:52:32] [Rank 0] step:9821/10000 train_time:788860ms step_avg:80.32ms +[2025-07-08 04:52:34] [Rank 0] step:9841/10000 train_time:790501ms step_avg:80.33ms +[2025-07-08 04:52:34] [Rank 0] step:9841/10000 train_time:790501ms step_avg:80.33ms +[2025-07-08 04:52:35] [Rank 0] step:9861/10000 train_time:791997ms step_avg:80.32ms +[2025-07-08 04:52:35] [Rank 0] step:9861/10000 train_time:791997ms step_avg:80.32ms +[2025-07-08 04:52:37] [Rank 0] step:9881/10000 train_time:793490ms step_avg:80.30ms +[2025-07-08 04:52:37] [Rank 0] step:9881/10000 train_time:793490ms step_avg:80.30ms +[2025-07-08 04:52:38] [Rank 0] step:9901/10000 train_time:795037ms step_avg:80.30ms +[2025-07-08 04:52:38] [Rank 0] step:9901/10000 train_time:795037ms step_avg:80.30ms +[2025-07-08 04:52:40] [Rank 0] step:9921/10000 train_time:796711ms step_avg:80.31ms +[2025-07-08 04:52:40] [Rank 0] step:9921/10000 train_time:796711ms step_avg:80.31ms +[2025-07-08 04:52:41] [Rank 0] step:9941/10000 train_time:798206ms step_avg:80.29ms +[2025-07-08 04:52:41] [Rank 0] step:9941/10000 train_time:798206ms step_avg:80.29ms +[2025-07-08 04:52:43] [Rank 0] step:9961/10000 train_time:799699ms step_avg:80.28ms +[2025-07-08 04:52:43] [Rank 0] step:9961/10000 train_time:799699ms step_avg:80.28ms +[2025-07-08 04:52:44] [Rank 0] step:9981/10000 train_time:801195ms step_avg:80.27ms +[2025-07-08 04:52:44] [Rank 0] step:9981/10000 train_time:801195ms step_avg:80.27ms +[2025-07-08 04:52:46] [Rank 0] step:10000/10000 train_time:802853ms step_avg:80.29ms +[2025-07-08 04:52:46] [Rank 0] step:10000/10000 train_time:802853ms step_avg:80.29ms +[2025-07-08 04:52:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:52:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 04:52:47] [Rank 0] PRINT: step:10000/10000 train_loss:0.8579 val_loss:0.8614 train_time:802932ms step_avg:80.29ms +[2025-07-08 04:52:47] [Rank 0] PRINT: step:10000/10000 train_loss:0.8579 val_loss:0.8614 train_time:802932ms step_avg:80.29ms +[2025-07-08 04:52:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:52:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 04:52:47] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:52:47] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 04:52:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:52:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 04:58:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:58:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 04:58:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:58:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 04:58:15] [Rank 0] Total Loss: 5.6328 +[2025-07-08 04:58:15] [Rank 0] Total Loss: 5.6328 +[2025-07-08 04:58:15] [Rank 0] Total FTA: 0.9830 +[2025-07-08 04:58:15] [Rank 0] Total FTA: 0.9830 +[2025-07-08 04:58:15] [Rank 0] Group 0 Loss: 5.9019 +[2025-07-08 04:58:15] [Rank 0] Group 0 Loss: 5.9019 +[2025-07-08 04:58:15] [Rank 0] Group 1 Loss: 5.6339 +[2025-07-08 04:58:15] [Rank 0] Group 1 Loss: 5.6339 +[2025-07-08 04:58:15] [Rank 0] Group 2 Loss: 5.6064 +[2025-07-08 04:58:15] [Rank 0] Group 2 Loss: 5.6064 +[2025-07-08 04:58:15] [Rank 0] Group 3 Loss: 5.6257 +[2025-07-08 04:58:15] [Rank 0] Group 3 Loss: 5.6257 +[2025-07-08 04:58:15] [Rank 0] Group 4 Loss: 5.5815 +[2025-07-08 04:58:15] [Rank 0] Group 4 Loss: 5.5815 +[2025-07-08 04:58:15] [Rank 0] Group 5 Loss: 5.5328 +[2025-07-08 04:58:15] [Rank 0] Group 5 Loss: 5.5328 +[2025-07-08 04:58:15] [Rank 0] Group 6 Loss: 5.4667 +[2025-07-08 04:58:15] [Rank 0] Group 6 Loss: 5.4667 +[2025-07-08 04:58:15] [Rank 0] Group 7 Loss: 5.6493 +[2025-07-08 04:58:15] [Rank 0] Group 7 Loss: 5.6493 +[2025-07-08 04:58:15] [Rank 0] Group 8 Loss: 5.6140 +[2025-07-08 04:58:15] [Rank 0] Group 8 Loss: 5.6140 +[2025-07-08 04:58:15] [Rank 0] Group 9 Loss: 5.5256 +[2025-07-08 04:58:15] [Rank 0] Group 9 Loss: 5.5256 +[2025-07-08 04:58:15] [Rank 0] Group 10 Loss: 5.5850 +[2025-07-08 04:58:15] [Rank 0] Group 10 Loss: 5.5850 +[2025-07-08 04:58:15] [Rank 0] Group 11 Loss: 5.6136 +[2025-07-08 04:58:15] [Rank 0] Group 11 Loss: 5.6136 +[2025-07-08 04:58:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 04:58:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 04:58:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 04:58:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 04:58:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 04:58:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 04:58:15] [Rank 0] Group 3 FTA: 0.9609 +[2025-07-08 04:58:15] [Rank 0] Group 3 FTA: 0.9609 +[2025-07-08 04:58:15] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-08 04:58:15] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-08 04:58:15] [Rank 0] Group 5 FTA: 1.0000 +[2025-07-08 04:58:15] [Rank 0] Group 5 FTA: 1.0000 +[2025-07-08 04:58:15] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-08 04:58:15] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-08 04:58:15] [Rank 0] Group 7 FTA: 0.9714 +[2025-07-08 04:58:15] [Rank 0] Group 7 FTA: 0.9714 +[2025-07-08 04:58:15] [Rank 0] Group 8 FTA: 0.9714 +[2025-07-08 04:58:15] [Rank 0] Group 8 FTA: 0.9714 +[2025-07-08 04:58:15] [Rank 0] Group 9 FTA: 0.9570 +[2025-07-08 04:58:15] [Rank 0] Group 9 FTA: 0.9570 +[2025-07-08 04:58:15] [Rank 0] Group 10 FTA: 0.9727 +[2025-07-08 04:58:15] [Rank 0] Group 10 FTA: 0.9727 +[2025-07-08 04:58:15] [Rank 0] Group 11 FTA: 0.9746 +[2025-07-08 04:58:15] [Rank 0] Group 11 FTA: 0.9746 +[2025-07-08 04:58:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 04:58:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_loss_curves.png +[2025-07-08 04:58:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 04:58:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/per_class_acc_curves.png +[2025-07-08 04:58:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 04:58:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_loss_curve.png +[2025-07-08 04:58:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 04:58:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_46/total_acc_curve.png +[2025-07-08 04:58:17] [Rank 0] step:10001/10000 train_time:802953ms step_avg:80.29ms +[2025-07-08 04:58:17] [Rank 0] step:10001/10000 train_time:802953ms step_avg:80.29ms +[2025-07-08 04:58:17] [Rank 0] PRINT: --- Training Finished: Tue Jul 8 04:58:17 2025 --- +[2025-07-08 04:58:17] [Rank 0] PRINT: --- Training Finished: Tue Jul 8 04:58:17 2025 --- +[2025-07-08 04:58:17] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9956 MiB +[2025-07-08 04:58:17] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9956 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/config.json b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..1ca85fe8ff20623307bf9dd7e2e4e84a378ffeb1 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "5cb96fd1-71d8-4e6d-85ab-34b550f278e9", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..63f1b111a87b8aa3f5b07b30c0f2eb9d7bc9a3e9 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35d40078e4281650f79d768c5f0949154c72ed273f038bb5f82cfeedc6d2e9b3 +size 379968 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..279a694cc2d2b523d9753818b9dd6af8a322f0d5 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1086e10efc18a7513ded9a2f1729c6b7906041716bc0c7f17bff529524775722 +size 366641 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..784d83337016656eab85b1de0d65513d22fc9dfb --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:530a41ad57da5c1fa509428853293244b7898e6c0d8e418c4e6e86bd79d93b19 +size 111313 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..654d45e1f6ca2e54849e88c621a3522af19525c7 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8e77393c00074fb35e68592bebcf748d4cecdd379a4c9a7d5eeab95fee9cb51 +size 118784 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/training_log_5cb96fd1-71d8-4e6d-85ab-34b550f278e9.txt b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/training_log_5cb96fd1-71d8-4e6d-85ab-34b550f278e9.txt new file mode 100644 index 0000000000000000000000000000000000000000..ce6de028916f1de2c674b8c80070f57a75152c82 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/training_log_5cb96fd1-71d8-4e6d-85ab-34b550f278e9.txt @@ -0,0 +1,5132 @@ +[2025-07-06 19:50:33] [Rank 0] PRINT: --- Script Start: Sun Jul 6 19:50:33 2025 --- +[2025-07-06 19:50:33] [Rank 0] PRINT: --- Script Start: Sun Jul 6 19:50:33 2025 --- +[2025-07-06 19:50:33] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-06 19:50:33] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-06 19:50:33] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 19:50:33] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 19:50:33] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-06 19:50:33] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-06 19:50:33] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48 +[2025-07-06 19:50:33] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48 +[2025-07-06 19:50:33] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 19:50:33] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 19:50:34] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 19:50:34] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 19:50:34] [Rank 0] PRINT: Constructing model... +[2025-07-06 19:50:34] [Rank 0] PRINT: Constructing model... +[2025-07-06 19:50:36] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 19:50:36] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 19:50:36] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 19:50:36] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 19:50:36] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 19:50:36] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 19:50:37] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 19:50:37] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 19:50:37] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 19:50:37] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 19:50:37] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 19:50:37] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 19:50:37] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 19:50:37] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 19:50:37] [Rank 0] PRINT: Model returns: +[2025-07-06 19:50:37] [Rank 0] PRINT: Model returns: +[2025-07-06 19:50:37] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 19:50:37] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 19:50:37] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 19:50:37] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 19:50:37] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 19:50:37] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 19:50:37] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 19:50:37] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 19:50:37] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 19:50:37] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 19:50:37] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 19:50:37] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 19:50:37] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 19:50:37] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 19:50:37] [Rank 0] PRINT: Starting warmup... +[2025-07-06 19:50:37] [Rank 0] PRINT: Starting warmup... +[2025-07-06 19:52:15] [Rank 0] PRINT: Warmup complete. +[2025-07-06 19:52:15] [Rank 0] PRINT: Warmup complete. +[2025-07-06 19:52:15] [Rank 0] PRINT: Starting training... +[2025-07-06 19:52:15] [Rank 0] PRINT: Starting training... +[2025-07-06 19:52:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:52:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:52:23] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 19:52:23] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 19:52:24] [Rank 0] step:21/10000 train_time:1550ms step_avg:73.80ms +[2025-07-06 19:52:24] [Rank 0] step:21/10000 train_time:1550ms step_avg:73.80ms +[2025-07-06 19:52:26] [Rank 0] step:41/10000 train_time:3006ms step_avg:73.31ms +[2025-07-06 19:52:26] [Rank 0] step:41/10000 train_time:3006ms step_avg:73.31ms +[2025-07-06 19:52:27] [Rank 0] step:61/10000 train_time:4464ms step_avg:73.17ms +[2025-07-06 19:52:27] [Rank 0] step:61/10000 train_time:4464ms step_avg:73.17ms +[2025-07-06 19:52:29] [Rank 0] step:81/10000 train_time:5922ms step_avg:73.11ms +[2025-07-06 19:52:29] [Rank 0] step:81/10000 train_time:5922ms step_avg:73.11ms +[2025-07-06 19:52:31] [Rank 0] step:101/10000 train_time:8062ms step_avg:79.82ms +[2025-07-06 19:52:31] [Rank 0] step:101/10000 train_time:8062ms step_avg:79.82ms +[2025-07-06 19:52:32] [Rank 0] step:121/10000 train_time:9522ms step_avg:78.70ms +[2025-07-06 19:52:32] [Rank 0] step:121/10000 train_time:9522ms step_avg:78.70ms +[2025-07-06 19:52:34] [Rank 0] step:141/10000 train_time:10986ms step_avg:77.91ms +[2025-07-06 19:52:34] [Rank 0] step:141/10000 train_time:10986ms step_avg:77.91ms +[2025-07-06 19:52:35] [Rank 0] step:161/10000 train_time:12612ms step_avg:78.33ms +[2025-07-06 19:52:35] [Rank 0] step:161/10000 train_time:12612ms step_avg:78.33ms +[2025-07-06 19:52:38] [Rank 0] step:181/10000 train_time:14868ms step_avg:82.14ms +[2025-07-06 19:52:38] [Rank 0] step:181/10000 train_time:14868ms step_avg:82.14ms +[2025-07-06 19:52:39] [Rank 0] step:201/10000 train_time:16319ms step_avg:81.19ms +[2025-07-06 19:52:39] [Rank 0] step:201/10000 train_time:16319ms step_avg:81.19ms +[2025-07-06 19:52:40] [Rank 0] step:221/10000 train_time:17783ms step_avg:80.47ms +[2025-07-06 19:52:40] [Rank 0] step:221/10000 train_time:17783ms step_avg:80.47ms +[2025-07-06 19:52:42] [Rank 0] step:241/10000 train_time:19250ms step_avg:79.87ms +[2025-07-06 19:52:42] [Rank 0] step:241/10000 train_time:19250ms step_avg:79.87ms +[2025-07-06 19:52:43] [Rank 0] step:261/10000 train_time:20721ms step_avg:79.39ms +[2025-07-06 19:52:43] [Rank 0] step:261/10000 train_time:20721ms step_avg:79.39ms +[2025-07-06 19:52:45] [Rank 0] step:281/10000 train_time:22425ms step_avg:79.80ms +[2025-07-06 19:52:45] [Rank 0] step:281/10000 train_time:22425ms step_avg:79.80ms +[2025-07-06 19:52:47] [Rank 0] step:301/10000 train_time:23894ms step_avg:79.38ms +[2025-07-06 19:52:47] [Rank 0] step:301/10000 train_time:23894ms step_avg:79.38ms +[2025-07-06 19:52:48] [Rank 0] step:321/10000 train_time:25360ms step_avg:79.00ms +[2025-07-06 19:52:48] [Rank 0] step:321/10000 train_time:25360ms step_avg:79.00ms +[2025-07-06 19:52:50] [Rank 0] step:341/10000 train_time:26827ms step_avg:78.67ms +[2025-07-06 19:52:50] [Rank 0] step:341/10000 train_time:26827ms step_avg:78.67ms +[2025-07-06 19:52:52] [Rank 0] step:361/10000 train_time:28347ms step_avg:78.52ms +[2025-07-06 19:52:52] [Rank 0] step:361/10000 train_time:28347ms step_avg:78.52ms +[2025-07-06 19:52:53] [Rank 0] step:381/10000 train_time:30427ms step_avg:79.86ms +[2025-07-06 19:52:53] [Rank 0] step:381/10000 train_time:30427ms step_avg:79.86ms +[2025-07-06 19:52:55] [Rank 0] step:401/10000 train_time:31893ms step_avg:79.53ms +[2025-07-06 19:52:55] [Rank 0] step:401/10000 train_time:31893ms step_avg:79.53ms +[2025-07-06 19:52:56] [Rank 0] step:421/10000 train_time:33361ms step_avg:79.24ms +[2025-07-06 19:52:56] [Rank 0] step:421/10000 train_time:33361ms step_avg:79.24ms +[2025-07-06 19:52:58] [Rank 0] step:441/10000 train_time:34831ms step_avg:78.98ms +[2025-07-06 19:52:58] [Rank 0] step:441/10000 train_time:34831ms step_avg:78.98ms +[2025-07-06 19:52:59] [Rank 0] step:461/10000 train_time:36534ms step_avg:79.25ms +[2025-07-06 19:52:59] [Rank 0] step:461/10000 train_time:36534ms step_avg:79.25ms +[2025-07-06 19:53:01] [Rank 0] step:481/10000 train_time:38004ms step_avg:79.01ms +[2025-07-06 19:53:01] [Rank 0] step:481/10000 train_time:38004ms step_avg:79.01ms +[2025-07-06 19:53:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:53:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:53:03] [Rank 0] PRINT: step:500/10000 train_loss:3.3878 val_loss:1.6139 train_time:39469ms step_avg:78.94ms +[2025-07-06 19:53:03] [Rank 0] PRINT: step:500/10000 train_loss:3.3878 val_loss:1.6139 train_time:39469ms step_avg:78.94ms +[2025-07-06 19:53:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:53:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:53:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:53:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:53:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:53:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:58:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:58:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:58:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:58:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:58:29] [Rank 0] Total Loss: 4.1040 +[2025-07-06 19:58:29] [Rank 0] Total Loss: 4.1040 +[2025-07-06 19:58:29] [Rank 0] Total FTA: 0.0818 +[2025-07-06 19:58:29] [Rank 0] Total FTA: 0.0818 +[2025-07-06 19:58:29] [Rank 0] Group 0 Loss: 4.3824 +[2025-07-06 19:58:29] [Rank 0] Group 0 Loss: 4.3824 +[2025-07-06 19:58:29] [Rank 0] Group 1 Loss: 4.0569 +[2025-07-06 19:58:29] [Rank 0] Group 1 Loss: 4.0569 +[2025-07-06 19:58:29] [Rank 0] Group 2 Loss: 3.8925 +[2025-07-06 19:58:29] [Rank 0] Group 2 Loss: 3.8925 +[2025-07-06 19:58:29] [Rank 0] Group 3 Loss: 3.9827 +[2025-07-06 19:58:29] [Rank 0] Group 3 Loss: 3.9827 +[2025-07-06 19:58:29] [Rank 0] Group 4 Loss: 4.0440 +[2025-07-06 19:58:29] [Rank 0] Group 4 Loss: 4.0440 +[2025-07-06 19:58:29] [Rank 0] Group 5 Loss: 4.0729 +[2025-07-06 19:58:29] [Rank 0] Group 5 Loss: 4.0729 +[2025-07-06 19:58:29] [Rank 0] Group 6 Loss: 4.0342 +[2025-07-06 19:58:29] [Rank 0] Group 6 Loss: 4.0342 +[2025-07-06 19:58:29] [Rank 0] Group 7 Loss: 4.1095 +[2025-07-06 19:58:29] [Rank 0] Group 7 Loss: 4.1095 +[2025-07-06 19:58:29] [Rank 0] Group 8 Loss: 4.0877 +[2025-07-06 19:58:29] [Rank 0] Group 8 Loss: 4.0877 +[2025-07-06 19:58:29] [Rank 0] Group 9 Loss: 4.1150 +[2025-07-06 19:58:29] [Rank 0] Group 9 Loss: 4.1150 +[2025-07-06 19:58:29] [Rank 0] Group 10 Loss: 4.0842 +[2025-07-06 19:58:29] [Rank 0] Group 10 Loss: 4.0842 +[2025-07-06 19:58:29] [Rank 0] Group 11 Loss: 4.1089 +[2025-07-06 19:58:29] [Rank 0] Group 11 Loss: 4.1089 +[2025-07-06 19:58:29] [Rank 0] Group 0 FTA: 0.1404 +[2025-07-06 19:58:29] [Rank 0] Group 0 FTA: 0.1404 +[2025-07-06 19:58:29] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 19:58:29] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 19:58:29] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-06 19:58:29] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-06 19:58:29] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-06 19:58:29] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-06 19:58:29] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-06 19:58:29] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-06 19:58:29] [Rank 0] Group 5 FTA: 0.0833 +[2025-07-06 19:58:29] [Rank 0] Group 5 FTA: 0.0833 +[2025-07-06 19:58:29] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-06 19:58:29] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-06 19:58:29] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-06 19:58:29] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-06 19:58:29] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-06 19:58:29] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-06 19:58:29] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-06 19:58:29] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-06 19:58:29] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-06 19:58:29] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-06 19:58:29] [Rank 0] Group 11 FTA: 0.0850 +[2025-07-06 19:58:29] [Rank 0] Group 11 FTA: 0.0850 +[2025-07-06 19:58:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 19:58:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 19:58:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 19:58:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 19:58:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 19:58:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 19:58:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 19:58:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 19:58:31] [Rank 0] step:501/10000 train_time:39491ms step_avg:78.82ms +[2025-07-06 19:58:31] [Rank 0] step:501/10000 train_time:39491ms step_avg:78.82ms +[2025-07-06 19:58:32] [Rank 0] step:521/10000 train_time:40942ms step_avg:78.58ms +[2025-07-06 19:58:32] [Rank 0] step:521/10000 train_time:40942ms step_avg:78.58ms +[2025-07-06 19:58:34] [Rank 0] step:541/10000 train_time:43092ms step_avg:79.65ms +[2025-07-06 19:58:34] [Rank 0] step:541/10000 train_time:43092ms step_avg:79.65ms +[2025-07-06 19:58:36] [Rank 0] step:561/10000 train_time:44532ms step_avg:79.38ms +[2025-07-06 19:58:36] [Rank 0] step:561/10000 train_time:44532ms step_avg:79.38ms +[2025-07-06 19:58:37] [Rank 0] step:581/10000 train_time:45989ms step_avg:79.16ms +[2025-07-06 19:58:37] [Rank 0] step:581/10000 train_time:45989ms step_avg:79.16ms +[2025-07-06 19:58:39] [Rank 0] step:601/10000 train_time:47448ms step_avg:78.95ms +[2025-07-06 19:58:39] [Rank 0] step:601/10000 train_time:47448ms step_avg:78.95ms +[2025-07-06 19:58:40] [Rank 0] step:621/10000 train_time:48911ms step_avg:78.76ms +[2025-07-06 19:58:40] [Rank 0] step:621/10000 train_time:48911ms step_avg:78.76ms +[2025-07-06 19:58:42] [Rank 0] step:641/10000 train_time:51031ms step_avg:79.61ms +[2025-07-06 19:58:42] [Rank 0] step:641/10000 train_time:51031ms step_avg:79.61ms +[2025-07-06 19:58:44] [Rank 0] step:661/10000 train_time:52489ms step_avg:79.41ms +[2025-07-06 19:58:44] [Rank 0] step:661/10000 train_time:52489ms step_avg:79.41ms +[2025-07-06 19:58:45] [Rank 0] step:681/10000 train_time:53948ms step_avg:79.22ms +[2025-07-06 19:58:45] [Rank 0] step:681/10000 train_time:53948ms step_avg:79.22ms +[2025-07-06 19:58:46] [Rank 0] step:701/10000 train_time:55412ms step_avg:79.05ms +[2025-07-06 19:58:46] [Rank 0] step:701/10000 train_time:55412ms step_avg:79.05ms +[2025-07-06 19:58:49] [Rank 0] step:721/10000 train_time:56928ms step_avg:78.96ms +[2025-07-06 19:58:49] [Rank 0] step:721/10000 train_time:56928ms step_avg:78.96ms +[2025-07-06 19:58:50] [Rank 0] step:741/10000 train_time:58995ms step_avg:79.62ms +[2025-07-06 19:58:50] [Rank 0] step:741/10000 train_time:58995ms step_avg:79.62ms +[2025-07-06 19:58:52] [Rank 0] step:761/10000 train_time:60464ms step_avg:79.45ms +[2025-07-06 19:58:52] [Rank 0] step:761/10000 train_time:60464ms step_avg:79.45ms +[2025-07-06 19:58:53] [Rank 0] step:781/10000 train_time:61935ms step_avg:79.30ms +[2025-07-06 19:58:53] [Rank 0] step:781/10000 train_time:61935ms step_avg:79.30ms +[2025-07-06 19:58:55] [Rank 0] step:801/10000 train_time:63650ms step_avg:79.46ms +[2025-07-06 19:58:55] [Rank 0] step:801/10000 train_time:63650ms step_avg:79.46ms +[2025-07-06 19:58:57] [Rank 0] step:821/10000 train_time:65446ms step_avg:79.71ms +[2025-07-06 19:58:57] [Rank 0] step:821/10000 train_time:65446ms step_avg:79.71ms +[2025-07-06 19:58:58] [Rank 0] step:841/10000 train_time:66967ms step_avg:79.63ms +[2025-07-06 19:58:58] [Rank 0] step:841/10000 train_time:66967ms step_avg:79.63ms +[2025-07-06 19:59:00] [Rank 0] step:861/10000 train_time:68438ms step_avg:79.49ms +[2025-07-06 19:59:00] [Rank 0] step:861/10000 train_time:68438ms step_avg:79.49ms +[2025-07-06 19:59:01] [Rank 0] step:881/10000 train_time:69913ms step_avg:79.36ms +[2025-07-06 19:59:01] [Rank 0] step:881/10000 train_time:69913ms step_avg:79.36ms +[2025-07-06 19:59:03] [Rank 0] step:901/10000 train_time:71648ms step_avg:79.52ms +[2025-07-06 19:59:03] [Rank 0] step:901/10000 train_time:71648ms step_avg:79.52ms +[2025-07-06 19:59:05] [Rank 0] step:921/10000 train_time:73504ms step_avg:79.81ms +[2025-07-06 19:59:05] [Rank 0] step:921/10000 train_time:73504ms step_avg:79.81ms +[2025-07-06 19:59:06] [Rank 0] step:941/10000 train_time:74977ms step_avg:79.68ms +[2025-07-06 19:59:06] [Rank 0] step:941/10000 train_time:74977ms step_avg:79.68ms +[2025-07-06 19:59:08] [Rank 0] step:961/10000 train_time:76451ms step_avg:79.55ms +[2025-07-06 19:59:08] [Rank 0] step:961/10000 train_time:76451ms step_avg:79.55ms +[2025-07-06 19:59:09] [Rank 0] step:981/10000 train_time:77928ms step_avg:79.44ms +[2025-07-06 19:59:09] [Rank 0] step:981/10000 train_time:77928ms step_avg:79.44ms +[2025-07-06 19:59:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:59:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:59:12] [Rank 0] PRINT: step:1000/10000 train_loss:1.4631 val_loss:1.3136 train_time:80058ms step_avg:80.06ms +[2025-07-06 19:59:12] [Rank 0] PRINT: step:1000/10000 train_loss:1.4631 val_loss:1.3136 train_time:80058ms step_avg:80.06ms +[2025-07-06 19:59:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:59:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:59:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:59:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:59:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:59:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:04:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:04:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:04:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:04:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:04:39] [Rank 0] Total Loss: 4.3811 +[2025-07-06 20:04:39] [Rank 0] Total Loss: 4.3811 +[2025-07-06 20:04:39] [Rank 0] Total FTA: 0.2063 +[2025-07-06 20:04:39] [Rank 0] Total FTA: 0.2063 +[2025-07-06 20:04:39] [Rank 0] Group 0 Loss: 4.6220 +[2025-07-06 20:04:39] [Rank 0] Group 0 Loss: 4.6220 +[2025-07-06 20:04:39] [Rank 0] Group 1 Loss: 4.0957 +[2025-07-06 20:04:39] [Rank 0] Group 1 Loss: 4.0957 +[2025-07-06 20:04:39] [Rank 0] Group 2 Loss: 4.2117 +[2025-07-06 20:04:39] [Rank 0] Group 2 Loss: 4.2117 +[2025-07-06 20:04:39] [Rank 0] Group 3 Loss: 4.3065 +[2025-07-06 20:04:39] [Rank 0] Group 3 Loss: 4.3065 +[2025-07-06 20:04:39] [Rank 0] Group 4 Loss: 4.4368 +[2025-07-06 20:04:39] [Rank 0] Group 4 Loss: 4.4368 +[2025-07-06 20:04:39] [Rank 0] Group 5 Loss: 4.2809 +[2025-07-06 20:04:39] [Rank 0] Group 5 Loss: 4.2809 +[2025-07-06 20:04:39] [Rank 0] Group 6 Loss: 4.3362 +[2025-07-06 20:04:39] [Rank 0] Group 6 Loss: 4.3362 +[2025-07-06 20:04:39] [Rank 0] Group 7 Loss: 4.4056 +[2025-07-06 20:04:39] [Rank 0] Group 7 Loss: 4.4056 +[2025-07-06 20:04:39] [Rank 0] Group 8 Loss: 4.3600 +[2025-07-06 20:04:39] [Rank 0] Group 8 Loss: 4.3600 +[2025-07-06 20:04:39] [Rank 0] Group 9 Loss: 4.4175 +[2025-07-06 20:04:39] [Rank 0] Group 9 Loss: 4.4175 +[2025-07-06 20:04:39] [Rank 0] Group 10 Loss: 4.4242 +[2025-07-06 20:04:39] [Rank 0] Group 10 Loss: 4.4242 +[2025-07-06 20:04:39] [Rank 0] Group 11 Loss: 4.4001 +[2025-07-06 20:04:39] [Rank 0] Group 11 Loss: 4.4001 +[2025-07-06 20:04:39] [Rank 0] Group 0 FTA: 0.3433 +[2025-07-06 20:04:39] [Rank 0] Group 0 FTA: 0.3433 +[2025-07-06 20:04:39] [Rank 0] Group 1 FTA: 0.1380 +[2025-07-06 20:04:39] [Rank 0] Group 1 FTA: 0.1380 +[2025-07-06 20:04:39] [Rank 0] Group 2 FTA: 0.2422 +[2025-07-06 20:04:39] [Rank 0] Group 2 FTA: 0.2422 +[2025-07-06 20:04:39] [Rank 0] Group 3 FTA: 0.2526 +[2025-07-06 20:04:39] [Rank 0] Group 3 FTA: 0.2526 +[2025-07-06 20:04:39] [Rank 0] Group 4 FTA: 0.2005 +[2025-07-06 20:04:39] [Rank 0] Group 4 FTA: 0.2005 +[2025-07-06 20:04:39] [Rank 0] Group 5 FTA: 0.1797 +[2025-07-06 20:04:39] [Rank 0] Group 5 FTA: 0.1797 +[2025-07-06 20:04:39] [Rank 0] Group 6 FTA: 0.2161 +[2025-07-06 20:04:39] [Rank 0] Group 6 FTA: 0.2161 +[2025-07-06 20:04:39] [Rank 0] Group 7 FTA: 0.1719 +[2025-07-06 20:04:39] [Rank 0] Group 7 FTA: 0.1719 +[2025-07-06 20:04:39] [Rank 0] Group 8 FTA: 0.1562 +[2025-07-06 20:04:39] [Rank 0] Group 8 FTA: 0.1562 +[2025-07-06 20:04:39] [Rank 0] Group 9 FTA: 0.1758 +[2025-07-06 20:04:39] [Rank 0] Group 9 FTA: 0.1758 +[2025-07-06 20:04:39] [Rank 0] Group 10 FTA: 0.1699 +[2025-07-06 20:04:39] [Rank 0] Group 10 FTA: 0.1699 +[2025-07-06 20:04:39] [Rank 0] Group 11 FTA: 0.1641 +[2025-07-06 20:04:39] [Rank 0] Group 11 FTA: 0.1641 +[2025-07-06 20:04:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 20:04:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 20:04:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 20:04:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 20:04:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 20:04:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 20:04:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 20:04:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 20:04:41] [Rank 0] step:1001/10000 train_time:80080ms step_avg:80.00ms +[2025-07-06 20:04:41] [Rank 0] step:1001/10000 train_time:80080ms step_avg:80.00ms +[2025-07-06 20:04:42] [Rank 0] step:1021/10000 train_time:81562ms step_avg:79.88ms +[2025-07-06 20:04:42] [Rank 0] step:1021/10000 train_time:81562ms step_avg:79.88ms +[2025-07-06 20:04:44] [Rank 0] step:1041/10000 train_time:83025ms step_avg:79.76ms +[2025-07-06 20:04:44] [Rank 0] step:1041/10000 train_time:83025ms step_avg:79.76ms +[2025-07-06 20:04:45] [Rank 0] step:1061/10000 train_time:84491ms step_avg:79.63ms +[2025-07-06 20:04:45] [Rank 0] step:1061/10000 train_time:84491ms step_avg:79.63ms +[2025-07-06 20:04:47] [Rank 0] step:1081/10000 train_time:86215ms step_avg:79.75ms +[2025-07-06 20:04:47] [Rank 0] step:1081/10000 train_time:86215ms step_avg:79.75ms +[2025-07-06 20:04:49] [Rank 0] step:1101/10000 train_time:88068ms step_avg:79.99ms +[2025-07-06 20:04:49] [Rank 0] step:1101/10000 train_time:88068ms step_avg:79.99ms +[2025-07-06 20:04:50] [Rank 0] step:1121/10000 train_time:89534ms step_avg:79.87ms +[2025-07-06 20:04:50] [Rank 0] step:1121/10000 train_time:89534ms step_avg:79.87ms +[2025-07-06 20:04:52] [Rank 0] step:1141/10000 train_time:91003ms step_avg:79.76ms +[2025-07-06 20:04:52] [Rank 0] step:1141/10000 train_time:91003ms step_avg:79.76ms +[2025-07-06 20:04:53] [Rank 0] step:1161/10000 train_time:92468ms step_avg:79.65ms +[2025-07-06 20:04:53] [Rank 0] step:1161/10000 train_time:92468ms step_avg:79.65ms +[2025-07-06 20:04:55] [Rank 0] step:1181/10000 train_time:94595ms step_avg:80.10ms +[2025-07-06 20:04:55] [Rank 0] step:1181/10000 train_time:94595ms step_avg:80.10ms +[2025-07-06 20:04:57] [Rank 0] step:1201/10000 train_time:96063ms step_avg:79.99ms +[2025-07-06 20:04:57] [Rank 0] step:1201/10000 train_time:96063ms step_avg:79.99ms +[2025-07-06 20:04:58] [Rank 0] step:1221/10000 train_time:97534ms step_avg:79.88ms +[2025-07-06 20:04:58] [Rank 0] step:1221/10000 train_time:97534ms step_avg:79.88ms +[2025-07-06 20:05:00] [Rank 0] step:1241/10000 train_time:99002ms step_avg:79.78ms +[2025-07-06 20:05:00] [Rank 0] step:1241/10000 train_time:99002ms step_avg:79.78ms +[2025-07-06 20:05:01] [Rank 0] step:1261/10000 train_time:100527ms step_avg:79.72ms +[2025-07-06 20:05:01] [Rank 0] step:1261/10000 train_time:100527ms step_avg:79.72ms +[2025-07-06 20:05:03] [Rank 0] step:1281/10000 train_time:101984ms step_avg:79.61ms +[2025-07-06 20:05:03] [Rank 0] step:1281/10000 train_time:101984ms step_avg:79.61ms +[2025-07-06 20:05:04] [Rank 0] step:1301/10000 train_time:103459ms step_avg:79.52ms +[2025-07-06 20:05:04] [Rank 0] step:1301/10000 train_time:103459ms step_avg:79.52ms +[2025-07-06 20:05:06] [Rank 0] step:1321/10000 train_time:104930ms step_avg:79.43ms +[2025-07-06 20:05:06] [Rank 0] step:1321/10000 train_time:104930ms step_avg:79.43ms +[2025-07-06 20:05:07] [Rank 0] step:1341/10000 train_time:106404ms step_avg:79.35ms +[2025-07-06 20:05:07] [Rank 0] step:1341/10000 train_time:106404ms step_avg:79.35ms +[2025-07-06 20:05:09] [Rank 0] step:1361/10000 train_time:108518ms step_avg:79.73ms +[2025-07-06 20:05:09] [Rank 0] step:1361/10000 train_time:108518ms step_avg:79.73ms +[2025-07-06 20:05:11] [Rank 0] step:1381/10000 train_time:109992ms step_avg:79.65ms +[2025-07-06 20:05:11] [Rank 0] step:1381/10000 train_time:109992ms step_avg:79.65ms +[2025-07-06 20:05:12] [Rank 0] step:1401/10000 train_time:111465ms step_avg:79.56ms +[2025-07-06 20:05:12] [Rank 0] step:1401/10000 train_time:111465ms step_avg:79.56ms +[2025-07-06 20:05:14] [Rank 0] step:1421/10000 train_time:113005ms step_avg:79.53ms +[2025-07-06 20:05:14] [Rank 0] step:1421/10000 train_time:113005ms step_avg:79.53ms +[2025-07-06 20:05:16] [Rank 0] step:1441/10000 train_time:115327ms step_avg:80.03ms +[2025-07-06 20:05:16] [Rank 0] step:1441/10000 train_time:115327ms step_avg:80.03ms +[2025-07-06 20:05:18] [Rank 0] step:1461/10000 train_time:116861ms step_avg:79.99ms +[2025-07-06 20:05:18] [Rank 0] step:1461/10000 train_time:116861ms step_avg:79.99ms +[2025-07-06 20:05:19] [Rank 0] step:1481/10000 train_time:118334ms step_avg:79.90ms +[2025-07-06 20:05:19] [Rank 0] step:1481/10000 train_time:118334ms step_avg:79.90ms +[2025-07-06 20:05:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:05:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:05:21] [Rank 0] PRINT: step:1500/10000 train_loss:1.2239 val_loss:1.1871 train_time:119809ms step_avg:79.87ms +[2025-07-06 20:05:21] [Rank 0] PRINT: step:1500/10000 train_loss:1.2239 val_loss:1.1871 train_time:119809ms step_avg:79.87ms +[2025-07-06 20:05:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:05:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:05:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:05:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:05:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:05:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:10:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:10:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:10:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:10:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:10:49] [Rank 0] Total Loss: 4.6830 +[2025-07-06 20:10:49] [Rank 0] Total Loss: 4.6830 +[2025-07-06 20:10:49] [Rank 0] Total FTA: 0.3444 +[2025-07-06 20:10:49] [Rank 0] Total FTA: 0.3444 +[2025-07-06 20:10:49] [Rank 0] Group 0 Loss: 4.9415 +[2025-07-06 20:10:49] [Rank 0] Group 0 Loss: 4.9415 +[2025-07-06 20:10:49] [Rank 0] Group 1 Loss: 4.3169 +[2025-07-06 20:10:49] [Rank 0] Group 1 Loss: 4.3169 +[2025-07-06 20:10:49] [Rank 0] Group 2 Loss: 4.5810 +[2025-07-06 20:10:49] [Rank 0] Group 2 Loss: 4.5810 +[2025-07-06 20:10:49] [Rank 0] Group 3 Loss: 4.6917 +[2025-07-06 20:10:49] [Rank 0] Group 3 Loss: 4.6917 +[2025-07-06 20:10:49] [Rank 0] Group 4 Loss: 4.7282 +[2025-07-06 20:10:49] [Rank 0] Group 4 Loss: 4.7282 +[2025-07-06 20:10:49] [Rank 0] Group 5 Loss: 4.6230 +[2025-07-06 20:10:49] [Rank 0] Group 5 Loss: 4.6230 +[2025-07-06 20:10:49] [Rank 0] Group 6 Loss: 4.6001 +[2025-07-06 20:10:49] [Rank 0] Group 6 Loss: 4.6001 +[2025-07-06 20:10:49] [Rank 0] Group 7 Loss: 4.7044 +[2025-07-06 20:10:49] [Rank 0] Group 7 Loss: 4.7044 +[2025-07-06 20:10:49] [Rank 0] Group 8 Loss: 4.6914 +[2025-07-06 20:10:49] [Rank 0] Group 8 Loss: 4.6914 +[2025-07-06 20:10:49] [Rank 0] Group 9 Loss: 4.6820 +[2025-07-06 20:10:49] [Rank 0] Group 9 Loss: 4.6820 +[2025-07-06 20:10:49] [Rank 0] Group 10 Loss: 4.6750 +[2025-07-06 20:10:49] [Rank 0] Group 10 Loss: 4.6750 +[2025-07-06 20:10:49] [Rank 0] Group 11 Loss: 4.6909 +[2025-07-06 20:10:49] [Rank 0] Group 11 Loss: 4.6909 +[2025-07-06 20:10:49] [Rank 0] Group 0 FTA: 0.3212 +[2025-07-06 20:10:49] [Rank 0] Group 0 FTA: 0.3212 +[2025-07-06 20:10:49] [Rank 0] Group 1 FTA: 0.4740 +[2025-07-06 20:10:49] [Rank 0] Group 1 FTA: 0.4740 +[2025-07-06 20:10:49] [Rank 0] Group 2 FTA: 0.4062 +[2025-07-06 20:10:49] [Rank 0] Group 2 FTA: 0.4062 +[2025-07-06 20:10:49] [Rank 0] Group 3 FTA: 0.2786 +[2025-07-06 20:10:49] [Rank 0] Group 3 FTA: 0.2786 +[2025-07-06 20:10:49] [Rank 0] Group 4 FTA: 0.3594 +[2025-07-06 20:10:49] [Rank 0] Group 4 FTA: 0.3594 +[2025-07-06 20:10:49] [Rank 0] Group 5 FTA: 0.3542 +[2025-07-06 20:10:49] [Rank 0] Group 5 FTA: 0.3542 +[2025-07-06 20:10:49] [Rank 0] Group 6 FTA: 0.2969 +[2025-07-06 20:10:49] [Rank 0] Group 6 FTA: 0.2969 +[2025-07-06 20:10:49] [Rank 0] Group 7 FTA: 0.3464 +[2025-07-06 20:10:49] [Rank 0] Group 7 FTA: 0.3464 +[2025-07-06 20:10:49] [Rank 0] Group 8 FTA: 0.3177 +[2025-07-06 20:10:49] [Rank 0] Group 8 FTA: 0.3177 +[2025-07-06 20:10:49] [Rank 0] Group 9 FTA: 0.3516 +[2025-07-06 20:10:49] [Rank 0] Group 9 FTA: 0.3516 +[2025-07-06 20:10:49] [Rank 0] Group 10 FTA: 0.3340 +[2025-07-06 20:10:49] [Rank 0] Group 10 FTA: 0.3340 +[2025-07-06 20:10:49] [Rank 0] Group 11 FTA: 0.3359 +[2025-07-06 20:10:49] [Rank 0] Group 11 FTA: 0.3359 +[2025-07-06 20:10:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 20:10:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 20:10:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 20:10:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 20:10:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 20:10:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 20:10:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 20:10:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 20:10:51] [Rank 0] step:1501/10000 train_time:119831ms step_avg:79.83ms +[2025-07-06 20:10:51] [Rank 0] step:1501/10000 train_time:119831ms step_avg:79.83ms +[2025-07-06 20:10:52] [Rank 0] step:1521/10000 train_time:121298ms step_avg:79.75ms +[2025-07-06 20:10:52] [Rank 0] step:1521/10000 train_time:121298ms step_avg:79.75ms +[2025-07-06 20:10:54] [Rank 0] step:1541/10000 train_time:123410ms step_avg:80.08ms +[2025-07-06 20:10:54] [Rank 0] step:1541/10000 train_time:123410ms step_avg:80.08ms +[2025-07-06 20:10:56] [Rank 0] step:1561/10000 train_time:124874ms step_avg:80.00ms +[2025-07-06 20:10:56] [Rank 0] step:1561/10000 train_time:124874ms step_avg:80.00ms +[2025-07-06 20:10:57] [Rank 0] step:1581/10000 train_time:126339ms step_avg:79.91ms +[2025-07-06 20:10:57] [Rank 0] step:1581/10000 train_time:126339ms step_avg:79.91ms +[2025-07-06 20:10:59] [Rank 0] step:1601/10000 train_time:127808ms step_avg:79.83ms +[2025-07-06 20:10:59] [Rank 0] step:1601/10000 train_time:127808ms step_avg:79.83ms +[2025-07-06 20:11:00] [Rank 0] step:1621/10000 train_time:129530ms step_avg:79.91ms +[2025-07-06 20:11:00] [Rank 0] step:1621/10000 train_time:129530ms step_avg:79.91ms +[2025-07-06 20:11:02] [Rank 0] step:1641/10000 train_time:130980ms step_avg:79.82ms +[2025-07-06 20:11:02] [Rank 0] step:1641/10000 train_time:130980ms step_avg:79.82ms +[2025-07-06 20:11:03] [Rank 0] step:1661/10000 train_time:132448ms step_avg:79.74ms +[2025-07-06 20:11:03] [Rank 0] step:1661/10000 train_time:132448ms step_avg:79.74ms +[2025-07-06 20:11:05] [Rank 0] step:1681/10000 train_time:133919ms step_avg:79.67ms +[2025-07-06 20:11:05] [Rank 0] step:1681/10000 train_time:133919ms step_avg:79.67ms +[2025-07-06 20:11:06] [Rank 0] step:1701/10000 train_time:135386ms step_avg:79.59ms +[2025-07-06 20:11:06] [Rank 0] step:1701/10000 train_time:135386ms step_avg:79.59ms +[2025-07-06 20:11:08] [Rank 0] step:1721/10000 train_time:137522ms step_avg:79.91ms +[2025-07-06 20:11:08] [Rank 0] step:1721/10000 train_time:137522ms step_avg:79.91ms +[2025-07-06 20:11:10] [Rank 0] step:1741/10000 train_time:138990ms step_avg:79.83ms +[2025-07-06 20:11:10] [Rank 0] step:1741/10000 train_time:138990ms step_avg:79.83ms +[2025-07-06 20:11:11] [Rank 0] step:1761/10000 train_time:140461ms step_avg:79.76ms +[2025-07-06 20:11:11] [Rank 0] step:1761/10000 train_time:140461ms step_avg:79.76ms +[2025-07-06 20:11:13] [Rank 0] step:1781/10000 train_time:141932ms step_avg:79.69ms +[2025-07-06 20:11:13] [Rank 0] step:1781/10000 train_time:141932ms step_avg:79.69ms +[2025-07-06 20:11:15] [Rank 0] step:1801/10000 train_time:143401ms step_avg:79.62ms +[2025-07-06 20:11:15] [Rank 0] step:1801/10000 train_time:143401ms step_avg:79.62ms +[2025-07-06 20:11:16] [Rank 0] step:1821/10000 train_time:145526ms step_avg:79.92ms +[2025-07-06 20:11:16] [Rank 0] step:1821/10000 train_time:145526ms step_avg:79.92ms +[2025-07-06 20:11:18] [Rank 0] step:1841/10000 train_time:146998ms step_avg:79.85ms +[2025-07-06 20:11:18] [Rank 0] step:1841/10000 train_time:146998ms step_avg:79.85ms +[2025-07-06 20:11:19] [Rank 0] step:1861/10000 train_time:148468ms step_avg:79.78ms +[2025-07-06 20:11:19] [Rank 0] step:1861/10000 train_time:148468ms step_avg:79.78ms +[2025-07-06 20:11:21] [Rank 0] step:1881/10000 train_time:149939ms step_avg:79.71ms +[2025-07-06 20:11:21] [Rank 0] step:1881/10000 train_time:149939ms step_avg:79.71ms +[2025-07-06 20:11:23] [Rank 0] step:1901/10000 train_time:152052ms step_avg:79.99ms +[2025-07-06 20:11:23] [Rank 0] step:1901/10000 train_time:152052ms step_avg:79.99ms +[2025-07-06 20:11:24] [Rank 0] step:1921/10000 train_time:153525ms step_avg:79.92ms +[2025-07-06 20:11:24] [Rank 0] step:1921/10000 train_time:153525ms step_avg:79.92ms +[2025-07-06 20:11:26] [Rank 0] step:1941/10000 train_time:154994ms step_avg:79.85ms +[2025-07-06 20:11:26] [Rank 0] step:1941/10000 train_time:154994ms step_avg:79.85ms +[2025-07-06 20:11:27] [Rank 0] step:1961/10000 train_time:156466ms step_avg:79.79ms +[2025-07-06 20:11:27] [Rank 0] step:1961/10000 train_time:156466ms step_avg:79.79ms +[2025-07-06 20:11:29] [Rank 0] step:1981/10000 train_time:158194ms step_avg:79.86ms +[2025-07-06 20:11:29] [Rank 0] step:1981/10000 train_time:158194ms step_avg:79.86ms +[2025-07-06 20:11:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:11:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:11:32] [Rank 0] PRINT: step:2000/10000 train_loss:1.1104 val_loss:1.0755 train_time:160053ms step_avg:80.03ms +[2025-07-06 20:11:32] [Rank 0] PRINT: step:2000/10000 train_loss:1.1104 val_loss:1.0755 train_time:160053ms step_avg:80.03ms +[2025-07-06 20:11:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:11:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:11:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:11:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:11:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:11:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:17:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:17:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:17:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:17:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:17:01] [Rank 0] Total Loss: 4.9483 +[2025-07-06 20:17:01] [Rank 0] Total Loss: 4.9483 +[2025-07-06 20:17:01] [Rank 0] Total FTA: 0.5443 +[2025-07-06 20:17:01] [Rank 0] Total FTA: 0.5443 +[2025-07-06 20:17:01] [Rank 0] Group 0 Loss: 5.2204 +[2025-07-06 20:17:01] [Rank 0] Group 0 Loss: 5.2204 +[2025-07-06 20:17:01] [Rank 0] Group 1 Loss: 4.6637 +[2025-07-06 20:17:01] [Rank 0] Group 1 Loss: 4.6637 +[2025-07-06 20:17:01] [Rank 0] Group 2 Loss: 4.8360 +[2025-07-06 20:17:01] [Rank 0] Group 2 Loss: 4.8360 +[2025-07-06 20:17:01] [Rank 0] Group 3 Loss: 5.0300 +[2025-07-06 20:17:01] [Rank 0] Group 3 Loss: 5.0300 +[2025-07-06 20:17:01] [Rank 0] Group 4 Loss: 4.9261 +[2025-07-06 20:17:01] [Rank 0] Group 4 Loss: 4.9261 +[2025-07-06 20:17:01] [Rank 0] Group 5 Loss: 4.8558 +[2025-07-06 20:17:01] [Rank 0] Group 5 Loss: 4.8558 +[2025-07-06 20:17:01] [Rank 0] Group 6 Loss: 4.8641 +[2025-07-06 20:17:01] [Rank 0] Group 6 Loss: 4.8641 +[2025-07-06 20:17:01] [Rank 0] Group 7 Loss: 4.9913 +[2025-07-06 20:17:01] [Rank 0] Group 7 Loss: 4.9913 +[2025-07-06 20:17:01] [Rank 0] Group 8 Loss: 4.9710 +[2025-07-06 20:17:01] [Rank 0] Group 8 Loss: 4.9710 +[2025-07-06 20:17:01] [Rank 0] Group 9 Loss: 4.8814 +[2025-07-06 20:17:01] [Rank 0] Group 9 Loss: 4.8814 +[2025-07-06 20:17:01] [Rank 0] Group 10 Loss: 4.9278 +[2025-07-06 20:17:01] [Rank 0] Group 10 Loss: 4.9278 +[2025-07-06 20:17:01] [Rank 0] Group 11 Loss: 4.9393 +[2025-07-06 20:17:01] [Rank 0] Group 11 Loss: 4.9393 +[2025-07-06 20:17:01] [Rank 0] Group 0 FTA: 0.6515 +[2025-07-06 20:17:01] [Rank 0] Group 0 FTA: 0.6515 +[2025-07-06 20:17:01] [Rank 0] Group 1 FTA: 0.2995 +[2025-07-06 20:17:01] [Rank 0] Group 1 FTA: 0.2995 +[2025-07-06 20:17:01] [Rank 0] Group 2 FTA: 0.6641 +[2025-07-06 20:17:01] [Rank 0] Group 2 FTA: 0.6641 +[2025-07-06 20:17:01] [Rank 0] Group 3 FTA: 0.4349 +[2025-07-06 20:17:01] [Rank 0] Group 3 FTA: 0.4349 +[2025-07-06 20:17:01] [Rank 0] Group 4 FTA: 0.5156 +[2025-07-06 20:17:01] [Rank 0] Group 4 FTA: 0.5156 +[2025-07-06 20:17:01] [Rank 0] Group 5 FTA: 0.5547 +[2025-07-06 20:17:01] [Rank 0] Group 5 FTA: 0.5547 +[2025-07-06 20:17:01] [Rank 0] Group 6 FTA: 0.5469 +[2025-07-06 20:17:01] [Rank 0] Group 6 FTA: 0.5469 +[2025-07-06 20:17:01] [Rank 0] Group 7 FTA: 0.5859 +[2025-07-06 20:17:01] [Rank 0] Group 7 FTA: 0.5859 +[2025-07-06 20:17:01] [Rank 0] Group 8 FTA: 0.5651 +[2025-07-06 20:17:01] [Rank 0] Group 8 FTA: 0.5651 +[2025-07-06 20:17:01] [Rank 0] Group 9 FTA: 0.5703 +[2025-07-06 20:17:01] [Rank 0] Group 9 FTA: 0.5703 +[2025-07-06 20:17:01] [Rank 0] Group 10 FTA: 0.5273 +[2025-07-06 20:17:01] [Rank 0] Group 10 FTA: 0.5273 +[2025-07-06 20:17:01] [Rank 0] Group 11 FTA: 0.5361 +[2025-07-06 20:17:01] [Rank 0] Group 11 FTA: 0.5361 +[2025-07-06 20:17:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 20:17:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 20:17:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 20:17:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 20:17:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 20:17:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 20:17:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 20:17:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 20:17:02] [Rank 0] step:2001/10000 train_time:160075ms step_avg:80.00ms +[2025-07-06 20:17:02] [Rank 0] step:2001/10000 train_time:160075ms step_avg:80.00ms +[2025-07-06 20:17:04] [Rank 0] step:2021/10000 train_time:161558ms step_avg:79.94ms +[2025-07-06 20:17:04] [Rank 0] step:2021/10000 train_time:161558ms step_avg:79.94ms +[2025-07-06 20:17:05] [Rank 0] step:2041/10000 train_time:163023ms step_avg:79.87ms +[2025-07-06 20:17:05] [Rank 0] step:2041/10000 train_time:163023ms step_avg:79.87ms +[2025-07-06 20:17:07] [Rank 0] step:2061/10000 train_time:164490ms step_avg:79.81ms +[2025-07-06 20:17:07] [Rank 0] step:2061/10000 train_time:164490ms step_avg:79.81ms +[2025-07-06 20:17:09] [Rank 0] step:2081/10000 train_time:166612ms step_avg:80.06ms +[2025-07-06 20:17:09] [Rank 0] step:2081/10000 train_time:166612ms step_avg:80.06ms +[2025-07-06 20:17:10] [Rank 0] step:2101/10000 train_time:168078ms step_avg:80.00ms +[2025-07-06 20:17:10] [Rank 0] step:2101/10000 train_time:168078ms step_avg:80.00ms +[2025-07-06 20:17:12] [Rank 0] step:2121/10000 train_time:169545ms step_avg:79.94ms +[2025-07-06 20:17:12] [Rank 0] step:2121/10000 train_time:169545ms step_avg:79.94ms +[2025-07-06 20:17:13] [Rank 0] step:2141/10000 train_time:171012ms step_avg:79.87ms +[2025-07-06 20:17:13] [Rank 0] step:2141/10000 train_time:171012ms step_avg:79.87ms +[2025-07-06 20:17:15] [Rank 0] step:2161/10000 train_time:172534ms step_avg:79.84ms +[2025-07-06 20:17:15] [Rank 0] step:2161/10000 train_time:172534ms step_avg:79.84ms +[2025-07-06 20:17:17] [Rank 0] step:2181/10000 train_time:174184ms step_avg:79.86ms +[2025-07-06 20:17:17] [Rank 0] step:2181/10000 train_time:174184ms step_avg:79.86ms +[2025-07-06 20:17:18] [Rank 0] step:2201/10000 train_time:175654ms step_avg:79.81ms +[2025-07-06 20:17:18] [Rank 0] step:2201/10000 train_time:175654ms step_avg:79.81ms +[2025-07-06 20:17:19] [Rank 0] step:2221/10000 train_time:177124ms step_avg:79.75ms +[2025-07-06 20:17:19] [Rank 0] step:2221/10000 train_time:177124ms step_avg:79.75ms +[2025-07-06 20:17:21] [Rank 0] step:2241/10000 train_time:178614ms step_avg:79.70ms +[2025-07-06 20:17:21] [Rank 0] step:2241/10000 train_time:178614ms step_avg:79.70ms +[2025-07-06 20:17:23] [Rank 0] step:2261/10000 train_time:180770ms step_avg:79.95ms +[2025-07-06 20:17:23] [Rank 0] step:2261/10000 train_time:180770ms step_avg:79.95ms +[2025-07-06 20:17:25] [Rank 0] step:2281/10000 train_time:182263ms step_avg:79.90ms +[2025-07-06 20:17:25] [Rank 0] step:2281/10000 train_time:182263ms step_avg:79.90ms +[2025-07-06 20:17:26] [Rank 0] step:2301/10000 train_time:183757ms step_avg:79.86ms +[2025-07-06 20:17:26] [Rank 0] step:2301/10000 train_time:183757ms step_avg:79.86ms +[2025-07-06 20:17:28] [Rank 0] step:2321/10000 train_time:185251ms step_avg:79.82ms +[2025-07-06 20:17:28] [Rank 0] step:2321/10000 train_time:185251ms step_avg:79.82ms +[2025-07-06 20:17:30] [Rank 0] step:2341/10000 train_time:187437ms step_avg:80.07ms +[2025-07-06 20:17:30] [Rank 0] step:2341/10000 train_time:187437ms step_avg:80.07ms +[2025-07-06 20:17:31] [Rank 0] step:2361/10000 train_time:188911ms step_avg:80.01ms +[2025-07-06 20:17:31] [Rank 0] step:2361/10000 train_time:188911ms step_avg:80.01ms +[2025-07-06 20:17:33] [Rank 0] step:2381/10000 train_time:190405ms step_avg:79.97ms +[2025-07-06 20:17:33] [Rank 0] step:2381/10000 train_time:190405ms step_avg:79.97ms +[2025-07-06 20:17:34] [Rank 0] step:2401/10000 train_time:191900ms step_avg:79.92ms +[2025-07-06 20:17:34] [Rank 0] step:2401/10000 train_time:191900ms step_avg:79.92ms +[2025-07-06 20:17:36] [Rank 0] step:2421/10000 train_time:193394ms step_avg:79.88ms +[2025-07-06 20:17:36] [Rank 0] step:2421/10000 train_time:193394ms step_avg:79.88ms +[2025-07-06 20:17:37] [Rank 0] step:2441/10000 train_time:194921ms step_avg:79.85ms +[2025-07-06 20:17:37] [Rank 0] step:2441/10000 train_time:194921ms step_avg:79.85ms +[2025-07-06 20:17:39] [Rank 0] step:2461/10000 train_time:196414ms step_avg:79.81ms +[2025-07-06 20:17:39] [Rank 0] step:2461/10000 train_time:196414ms step_avg:79.81ms +[2025-07-06 20:17:40] [Rank 0] step:2481/10000 train_time:197908ms step_avg:79.77ms +[2025-07-06 20:17:40] [Rank 0] step:2481/10000 train_time:197908ms step_avg:79.77ms +[2025-07-06 20:17:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:17:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:17:43] [Rank 0] PRINT: step:2500/10000 train_loss:0.9843 val_loss:0.9286 train_time:199404ms step_avg:79.76ms +[2025-07-06 20:17:43] [Rank 0] PRINT: step:2500/10000 train_loss:0.9843 val_loss:0.9286 train_time:199404ms step_avg:79.76ms +[2025-07-06 20:17:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:17:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:17:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:17:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:17:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:17:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:23:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:23:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:23:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:23:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:23:11] [Rank 0] Total Loss: 5.1817 +[2025-07-06 20:23:11] [Rank 0] Total Loss: 5.1817 +[2025-07-06 20:23:11] [Rank 0] Total FTA: 0.8525 +[2025-07-06 20:23:11] [Rank 0] Total FTA: 0.8525 +[2025-07-06 20:23:11] [Rank 0] Group 0 Loss: 5.4769 +[2025-07-06 20:23:11] [Rank 0] Group 0 Loss: 5.4769 +[2025-07-06 20:23:11] [Rank 0] Group 1 Loss: 5.0050 +[2025-07-06 20:23:11] [Rank 0] Group 1 Loss: 5.0050 +[2025-07-06 20:23:11] [Rank 0] Group 2 Loss: 5.0380 +[2025-07-06 20:23:11] [Rank 0] Group 2 Loss: 5.0380 +[2025-07-06 20:23:11] [Rank 0] Group 3 Loss: 5.2982 +[2025-07-06 20:23:11] [Rank 0] Group 3 Loss: 5.2982 +[2025-07-06 20:23:11] [Rank 0] Group 4 Loss: 5.1253 +[2025-07-06 20:23:11] [Rank 0] Group 4 Loss: 5.1253 +[2025-07-06 20:23:11] [Rank 0] Group 5 Loss: 5.0833 +[2025-07-06 20:23:11] [Rank 0] Group 5 Loss: 5.0833 +[2025-07-06 20:23:11] [Rank 0] Group 6 Loss: 5.0712 +[2025-07-06 20:23:11] [Rank 0] Group 6 Loss: 5.0712 +[2025-07-06 20:23:11] [Rank 0] Group 7 Loss: 5.1905 +[2025-07-06 20:23:11] [Rank 0] Group 7 Loss: 5.1905 +[2025-07-06 20:23:11] [Rank 0] Group 8 Loss: 5.1292 +[2025-07-06 20:23:11] [Rank 0] Group 8 Loss: 5.1292 +[2025-07-06 20:23:11] [Rank 0] Group 9 Loss: 5.1293 +[2025-07-06 20:23:11] [Rank 0] Group 9 Loss: 5.1293 +[2025-07-06 20:23:11] [Rank 0] Group 10 Loss: 5.1778 +[2025-07-06 20:23:11] [Rank 0] Group 10 Loss: 5.1778 +[2025-07-06 20:23:11] [Rank 0] Group 11 Loss: 5.1672 +[2025-07-06 20:23:11] [Rank 0] Group 11 Loss: 5.1672 +[2025-07-06 20:23:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 20:23:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 20:23:11] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 20:23:11] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 20:23:11] [Rank 0] Group 2 FTA: 0.8646 +[2025-07-06 20:23:11] [Rank 0] Group 2 FTA: 0.8646 +[2025-07-06 20:23:11] [Rank 0] Group 3 FTA: 0.8490 +[2025-07-06 20:23:11] [Rank 0] Group 3 FTA: 0.8490 +[2025-07-06 20:23:11] [Rank 0] Group 4 FTA: 0.8073 +[2025-07-06 20:23:11] [Rank 0] Group 4 FTA: 0.8073 +[2025-07-06 20:23:11] [Rank 0] Group 5 FTA: 0.7786 +[2025-07-06 20:23:11] [Rank 0] Group 5 FTA: 0.7786 +[2025-07-06 20:23:11] [Rank 0] Group 6 FTA: 0.8073 +[2025-07-06 20:23:11] [Rank 0] Group 6 FTA: 0.8073 +[2025-07-06 20:23:11] [Rank 0] Group 7 FTA: 0.7682 +[2025-07-06 20:23:11] [Rank 0] Group 7 FTA: 0.7682 +[2025-07-06 20:23:11] [Rank 0] Group 8 FTA: 0.7734 +[2025-07-06 20:23:11] [Rank 0] Group 8 FTA: 0.7734 +[2025-07-06 20:23:11] [Rank 0] Group 9 FTA: 0.8398 +[2025-07-06 20:23:11] [Rank 0] Group 9 FTA: 0.8398 +[2025-07-06 20:23:11] [Rank 0] Group 10 FTA: 0.8418 +[2025-07-06 20:23:11] [Rank 0] Group 10 FTA: 0.8418 +[2025-07-06 20:23:11] [Rank 0] Group 11 FTA: 0.8145 +[2025-07-06 20:23:11] [Rank 0] Group 11 FTA: 0.8145 +[2025-07-06 20:23:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 20:23:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 20:23:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 20:23:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 20:23:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 20:23:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 20:23:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 20:23:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 20:23:13] [Rank 0] step:2501/10000 train_time:199426ms step_avg:79.74ms +[2025-07-06 20:23:13] [Rank 0] step:2501/10000 train_time:199426ms step_avg:79.74ms +[2025-07-06 20:23:15] [Rank 0] step:2521/10000 train_time:201606ms step_avg:79.97ms +[2025-07-06 20:23:15] [Rank 0] step:2521/10000 train_time:201606ms step_avg:79.97ms +[2025-07-06 20:23:16] [Rank 0] step:2541/10000 train_time:203075ms step_avg:79.92ms +[2025-07-06 20:23:16] [Rank 0] step:2541/10000 train_time:203075ms step_avg:79.92ms +[2025-07-06 20:23:18] [Rank 0] step:2561/10000 train_time:204563ms step_avg:79.88ms +[2025-07-06 20:23:18] [Rank 0] step:2561/10000 train_time:204563ms step_avg:79.88ms +[2025-07-06 20:23:19] [Rank 0] step:2581/10000 train_time:206055ms step_avg:79.84ms +[2025-07-06 20:23:19] [Rank 0] step:2581/10000 train_time:206055ms step_avg:79.84ms +[2025-07-06 20:23:21] [Rank 0] step:2601/10000 train_time:207544ms step_avg:79.79ms +[2025-07-06 20:23:21] [Rank 0] step:2601/10000 train_time:207544ms step_avg:79.79ms +[2025-07-06 20:23:23] [Rank 0] step:2621/10000 train_time:209689ms step_avg:80.00ms +[2025-07-06 20:23:23] [Rank 0] step:2621/10000 train_time:209689ms step_avg:80.00ms +[2025-07-06 20:23:25] [Rank 0] step:2641/10000 train_time:211180ms step_avg:79.96ms +[2025-07-06 20:23:25] [Rank 0] step:2641/10000 train_time:211180ms step_avg:79.96ms +[2025-07-06 20:23:26] [Rank 0] step:2661/10000 train_time:212672ms step_avg:79.92ms +[2025-07-06 20:23:26] [Rank 0] step:2661/10000 train_time:212672ms step_avg:79.92ms +[2025-07-06 20:23:28] [Rank 0] step:2681/10000 train_time:214163ms step_avg:79.88ms +[2025-07-06 20:23:28] [Rank 0] step:2681/10000 train_time:214163ms step_avg:79.88ms +[2025-07-06 20:23:30] [Rank 0] step:2701/10000 train_time:215709ms step_avg:79.86ms +[2025-07-06 20:23:30] [Rank 0] step:2701/10000 train_time:215709ms step_avg:79.86ms +[2025-07-06 20:23:31] [Rank 0] step:2721/10000 train_time:217798ms step_avg:80.04ms +[2025-07-06 20:23:31] [Rank 0] step:2721/10000 train_time:217798ms step_avg:80.04ms +[2025-07-06 20:23:33] [Rank 0] step:2741/10000 train_time:219289ms step_avg:80.00ms +[2025-07-06 20:23:33] [Rank 0] step:2741/10000 train_time:219289ms step_avg:80.00ms +[2025-07-06 20:23:34] [Rank 0] step:2761/10000 train_time:220784ms step_avg:79.97ms +[2025-07-06 20:23:34] [Rank 0] step:2761/10000 train_time:220784ms step_avg:79.97ms +[2025-07-06 20:23:36] [Rank 0] step:2781/10000 train_time:222277ms step_avg:79.93ms +[2025-07-06 20:23:36] [Rank 0] step:2781/10000 train_time:222277ms step_avg:79.93ms +[2025-07-06 20:23:38] [Rank 0] step:2801/10000 train_time:224442ms step_avg:80.13ms +[2025-07-06 20:23:38] [Rank 0] step:2801/10000 train_time:224442ms step_avg:80.13ms +[2025-07-06 20:23:39] [Rank 0] step:2821/10000 train_time:225933ms step_avg:80.09ms +[2025-07-06 20:23:39] [Rank 0] step:2821/10000 train_time:225933ms step_avg:80.09ms +[2025-07-06 20:23:41] [Rank 0] step:2841/10000 train_time:227428ms step_avg:80.05ms +[2025-07-06 20:23:41] [Rank 0] step:2841/10000 train_time:227428ms step_avg:80.05ms +[2025-07-06 20:23:42] [Rank 0] step:2861/10000 train_time:228921ms step_avg:80.01ms +[2025-07-06 20:23:42] [Rank 0] step:2861/10000 train_time:228921ms step_avg:80.01ms +[2025-07-06 20:23:44] [Rank 0] step:2881/10000 train_time:230467ms step_avg:80.00ms +[2025-07-06 20:23:44] [Rank 0] step:2881/10000 train_time:230467ms step_avg:80.00ms +[2025-07-06 20:23:46] [Rank 0] step:2901/10000 train_time:232552ms step_avg:80.16ms +[2025-07-06 20:23:46] [Rank 0] step:2901/10000 train_time:232552ms step_avg:80.16ms +[2025-07-06 20:23:47] [Rank 0] step:2921/10000 train_time:234046ms step_avg:80.13ms +[2025-07-06 20:23:47] [Rank 0] step:2921/10000 train_time:234046ms step_avg:80.13ms +[2025-07-06 20:23:49] [Rank 0] step:2941/10000 train_time:235542ms step_avg:80.09ms +[2025-07-06 20:23:49] [Rank 0] step:2941/10000 train_time:235542ms step_avg:80.09ms +[2025-07-06 20:23:50] [Rank 0] step:2961/10000 train_time:237040ms step_avg:80.05ms +[2025-07-06 20:23:50] [Rank 0] step:2961/10000 train_time:237040ms step_avg:80.05ms +[2025-07-06 20:23:52] [Rank 0] step:2981/10000 train_time:238771ms step_avg:80.10ms +[2025-07-06 20:23:52] [Rank 0] step:2981/10000 train_time:238771ms step_avg:80.10ms +[2025-07-06 20:23:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:23:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:23:55] [Rank 0] PRINT: step:3000/10000 train_loss:0.9106 val_loss:0.8878 train_time:240266ms step_avg:80.09ms +[2025-07-06 20:23:55] [Rank 0] PRINT: step:3000/10000 train_loss:0.9106 val_loss:0.8878 train_time:240266ms step_avg:80.09ms +[2025-07-06 20:23:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:23:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:23:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:23:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:23:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:23:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:29:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:29:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:29:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:29:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:29:19] [Rank 0] Total Loss: 5.1726 +[2025-07-06 20:29:19] [Rank 0] Total Loss: 5.1726 +[2025-07-06 20:29:19] [Rank 0] Total FTA: 0.8974 +[2025-07-06 20:29:19] [Rank 0] Total FTA: 0.8974 +[2025-07-06 20:29:19] [Rank 0] Group 0 Loss: 5.4436 +[2025-07-06 20:29:19] [Rank 0] Group 0 Loss: 5.4436 +[2025-07-06 20:29:19] [Rank 0] Group 1 Loss: 5.0348 +[2025-07-06 20:29:19] [Rank 0] Group 1 Loss: 5.0348 +[2025-07-06 20:29:19] [Rank 0] Group 2 Loss: 4.8400 +[2025-07-06 20:29:19] [Rank 0] Group 2 Loss: 4.8400 +[2025-07-06 20:29:19] [Rank 0] Group 3 Loss: 5.3194 +[2025-07-06 20:29:19] [Rank 0] Group 3 Loss: 5.3194 +[2025-07-06 20:29:19] [Rank 0] Group 4 Loss: 5.1177 +[2025-07-06 20:29:19] [Rank 0] Group 4 Loss: 5.1177 +[2025-07-06 20:29:19] [Rank 0] Group 5 Loss: 5.1126 +[2025-07-06 20:29:19] [Rank 0] Group 5 Loss: 5.1126 +[2025-07-06 20:29:19] [Rank 0] Group 6 Loss: 5.1038 +[2025-07-06 20:29:19] [Rank 0] Group 6 Loss: 5.1038 +[2025-07-06 20:29:19] [Rank 0] Group 7 Loss: 5.1631 +[2025-07-06 20:29:19] [Rank 0] Group 7 Loss: 5.1631 +[2025-07-06 20:29:19] [Rank 0] Group 8 Loss: 5.1401 +[2025-07-06 20:29:19] [Rank 0] Group 8 Loss: 5.1401 +[2025-07-06 20:29:19] [Rank 0] Group 9 Loss: 5.1730 +[2025-07-06 20:29:19] [Rank 0] Group 9 Loss: 5.1730 +[2025-07-06 20:29:19] [Rank 0] Group 10 Loss: 5.1514 +[2025-07-06 20:29:19] [Rank 0] Group 10 Loss: 5.1514 +[2025-07-06 20:29:19] [Rank 0] Group 11 Loss: 5.1853 +[2025-07-06 20:29:19] [Rank 0] Group 11 Loss: 5.1853 +[2025-07-06 20:29:19] [Rank 0] Group 0 FTA: 0.8322 +[2025-07-06 20:29:19] [Rank 0] Group 0 FTA: 0.8322 +[2025-07-06 20:29:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 20:29:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 20:29:19] [Rank 0] Group 2 FTA: 0.8464 +[2025-07-06 20:29:19] [Rank 0] Group 2 FTA: 0.8464 +[2025-07-06 20:29:19] [Rank 0] Group 3 FTA: 0.8958 +[2025-07-06 20:29:19] [Rank 0] Group 3 FTA: 0.8958 +[2025-07-06 20:29:19] [Rank 0] Group 4 FTA: 0.8672 +[2025-07-06 20:29:19] [Rank 0] Group 4 FTA: 0.8672 +[2025-07-06 20:29:19] [Rank 0] Group 5 FTA: 0.8698 +[2025-07-06 20:29:19] [Rank 0] Group 5 FTA: 0.8698 +[2025-07-06 20:29:19] [Rank 0] Group 6 FTA: 0.9297 +[2025-07-06 20:29:19] [Rank 0] Group 6 FTA: 0.9297 +[2025-07-06 20:29:19] [Rank 0] Group 7 FTA: 0.9323 +[2025-07-06 20:29:19] [Rank 0] Group 7 FTA: 0.9323 +[2025-07-06 20:29:19] [Rank 0] Group 8 FTA: 0.9245 +[2025-07-06 20:29:19] [Rank 0] Group 8 FTA: 0.9245 +[2025-07-06 20:29:19] [Rank 0] Group 9 FTA: 0.9258 +[2025-07-06 20:29:19] [Rank 0] Group 9 FTA: 0.9258 +[2025-07-06 20:29:19] [Rank 0] Group 10 FTA: 0.8906 +[2025-07-06 20:29:19] [Rank 0] Group 10 FTA: 0.8906 +[2025-07-06 20:29:19] [Rank 0] Group 11 FTA: 0.9102 +[2025-07-06 20:29:19] [Rank 0] Group 11 FTA: 0.9102 +[2025-07-06 20:29:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 20:29:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 20:29:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 20:29:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 20:29:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 20:29:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 20:29:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 20:29:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 20:29:21] [Rank 0] step:3001/10000 train_time:240288ms step_avg:80.07ms +[2025-07-06 20:29:21] [Rank 0] step:3001/10000 train_time:240288ms step_avg:80.07ms +[2025-07-06 20:29:22] [Rank 0] step:3021/10000 train_time:241793ms step_avg:80.04ms +[2025-07-06 20:29:22] [Rank 0] step:3021/10000 train_time:241793ms step_avg:80.04ms +[2025-07-06 20:29:24] [Rank 0] step:3041/10000 train_time:243283ms step_avg:80.00ms +[2025-07-06 20:29:24] [Rank 0] step:3041/10000 train_time:243283ms step_avg:80.00ms +[2025-07-06 20:29:26] [Rank 0] step:3061/10000 train_time:244824ms step_avg:79.98ms +[2025-07-06 20:29:26] [Rank 0] step:3061/10000 train_time:244824ms step_avg:79.98ms +[2025-07-06 20:29:28] [Rank 0] step:3081/10000 train_time:246921ms step_avg:80.14ms +[2025-07-06 20:29:28] [Rank 0] step:3081/10000 train_time:246921ms step_avg:80.14ms +[2025-07-06 20:29:29] [Rank 0] step:3101/10000 train_time:248641ms step_avg:80.18ms +[2025-07-06 20:29:29] [Rank 0] step:3101/10000 train_time:248641ms step_avg:80.18ms +[2025-07-06 20:29:31] [Rank 0] step:3121/10000 train_time:250292ms step_avg:80.20ms +[2025-07-06 20:29:31] [Rank 0] step:3121/10000 train_time:250292ms step_avg:80.20ms +[2025-07-06 20:29:32] [Rank 0] step:3141/10000 train_time:251787ms step_avg:80.16ms +[2025-07-06 20:29:32] [Rank 0] step:3141/10000 train_time:251787ms step_avg:80.16ms +[2025-07-06 20:29:35] [Rank 0] step:3161/10000 train_time:253951ms step_avg:80.34ms +[2025-07-06 20:29:35] [Rank 0] step:3161/10000 train_time:253951ms step_avg:80.34ms +[2025-07-06 20:29:36] [Rank 0] step:3181/10000 train_time:255441ms step_avg:80.30ms +[2025-07-06 20:29:36] [Rank 0] step:3181/10000 train_time:255441ms step_avg:80.30ms +[2025-07-06 20:29:38] [Rank 0] step:3201/10000 train_time:256934ms step_avg:80.27ms +[2025-07-06 20:29:38] [Rank 0] step:3201/10000 train_time:256934ms step_avg:80.27ms +[2025-07-06 20:29:39] [Rank 0] step:3221/10000 train_time:258428ms step_avg:80.23ms +[2025-07-06 20:29:39] [Rank 0] step:3221/10000 train_time:258428ms step_avg:80.23ms +[2025-07-06 20:29:41] [Rank 0] step:3241/10000 train_time:259972ms step_avg:80.21ms +[2025-07-06 20:29:41] [Rank 0] step:3241/10000 train_time:259972ms step_avg:80.21ms +[2025-07-06 20:29:42] [Rank 0] step:3261/10000 train_time:261651ms step_avg:80.24ms +[2025-07-06 20:29:42] [Rank 0] step:3261/10000 train_time:261651ms step_avg:80.24ms +[2025-07-06 20:29:44] [Rank 0] step:3281/10000 train_time:263144ms step_avg:80.20ms +[2025-07-06 20:29:44] [Rank 0] step:3281/10000 train_time:263144ms step_avg:80.20ms +[2025-07-06 20:29:45] [Rank 0] step:3301/10000 train_time:264640ms step_avg:80.17ms +[2025-07-06 20:29:45] [Rank 0] step:3301/10000 train_time:264640ms step_avg:80.17ms +[2025-07-06 20:29:47] [Rank 0] step:3321/10000 train_time:266134ms step_avg:80.14ms +[2025-07-06 20:29:47] [Rank 0] step:3321/10000 train_time:266134ms step_avg:80.14ms +[2025-07-06 20:29:49] [Rank 0] step:3341/10000 train_time:267864ms step_avg:80.17ms +[2025-07-06 20:29:49] [Rank 0] step:3341/10000 train_time:267864ms step_avg:80.17ms +[2025-07-06 20:29:50] [Rank 0] step:3361/10000 train_time:269357ms step_avg:80.14ms +[2025-07-06 20:29:50] [Rank 0] step:3361/10000 train_time:269357ms step_avg:80.14ms +[2025-07-06 20:29:52] [Rank 0] step:3381/10000 train_time:270852ms step_avg:80.11ms +[2025-07-06 20:29:52] [Rank 0] step:3381/10000 train_time:270852ms step_avg:80.11ms +[2025-07-06 20:29:53] [Rank 0] step:3401/10000 train_time:272350ms step_avg:80.08ms +[2025-07-06 20:29:53] [Rank 0] step:3401/10000 train_time:272350ms step_avg:80.08ms +[2025-07-06 20:29:55] [Rank 0] step:3421/10000 train_time:273846ms step_avg:80.05ms +[2025-07-06 20:29:55] [Rank 0] step:3421/10000 train_time:273846ms step_avg:80.05ms +[2025-07-06 20:29:57] [Rank 0] step:3441/10000 train_time:275985ms step_avg:80.20ms +[2025-07-06 20:29:57] [Rank 0] step:3441/10000 train_time:275985ms step_avg:80.20ms +[2025-07-06 20:29:58] [Rank 0] step:3461/10000 train_time:277482ms step_avg:80.17ms +[2025-07-06 20:29:58] [Rank 0] step:3461/10000 train_time:277482ms step_avg:80.17ms +[2025-07-06 20:30:00] [Rank 0] step:3481/10000 train_time:278980ms step_avg:80.14ms +[2025-07-06 20:30:00] [Rank 0] step:3481/10000 train_time:278980ms step_avg:80.14ms +[2025-07-06 20:30:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:30:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:30:02] [Rank 0] PRINT: step:3500/10000 train_loss:0.8874 val_loss:0.8785 train_time:280478ms step_avg:80.14ms +[2025-07-06 20:30:02] [Rank 0] PRINT: step:3500/10000 train_loss:0.8874 val_loss:0.8785 train_time:280478ms step_avg:80.14ms +[2025-07-06 20:30:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:30:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:30:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:30:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:30:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:30:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:35:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:35:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:35:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:35:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:35:27] [Rank 0] Total Loss: 5.2755 +[2025-07-06 20:35:27] [Rank 0] Total Loss: 5.2755 +[2025-07-06 20:35:27] [Rank 0] Total FTA: 0.8475 +[2025-07-06 20:35:27] [Rank 0] Total FTA: 0.8475 +[2025-07-06 20:35:27] [Rank 0] Group 0 Loss: 5.4602 +[2025-07-06 20:35:27] [Rank 0] Group 0 Loss: 5.4602 +[2025-07-06 20:35:27] [Rank 0] Group 1 Loss: 5.2236 +[2025-07-06 20:35:27] [Rank 0] Group 1 Loss: 5.2236 +[2025-07-06 20:35:27] [Rank 0] Group 2 Loss: 4.9954 +[2025-07-06 20:35:27] [Rank 0] Group 2 Loss: 4.9954 +[2025-07-06 20:35:27] [Rank 0] Group 3 Loss: 5.3203 +[2025-07-06 20:35:27] [Rank 0] Group 3 Loss: 5.3203 +[2025-07-06 20:35:27] [Rank 0] Group 4 Loss: 5.2775 +[2025-07-06 20:35:27] [Rank 0] Group 4 Loss: 5.2775 +[2025-07-06 20:35:27] [Rank 0] Group 5 Loss: 5.2575 +[2025-07-06 20:35:27] [Rank 0] Group 5 Loss: 5.2575 +[2025-07-06 20:35:27] [Rank 0] Group 6 Loss: 5.1879 +[2025-07-06 20:35:27] [Rank 0] Group 6 Loss: 5.1879 +[2025-07-06 20:35:27] [Rank 0] Group 7 Loss: 5.2672 +[2025-07-06 20:35:27] [Rank 0] Group 7 Loss: 5.2672 +[2025-07-06 20:35:27] [Rank 0] Group 8 Loss: 5.3252 +[2025-07-06 20:35:27] [Rank 0] Group 8 Loss: 5.3252 +[2025-07-06 20:35:27] [Rank 0] Group 9 Loss: 5.2506 +[2025-07-06 20:35:27] [Rank 0] Group 9 Loss: 5.2506 +[2025-07-06 20:35:27] [Rank 0] Group 10 Loss: 5.2920 +[2025-07-06 20:35:27] [Rank 0] Group 10 Loss: 5.2920 +[2025-07-06 20:35:27] [Rank 0] Group 11 Loss: 5.2658 +[2025-07-06 20:35:27] [Rank 0] Group 11 Loss: 5.2658 +[2025-07-06 20:35:27] [Rank 0] Group 0 FTA: 0.5137 +[2025-07-06 20:35:27] [Rank 0] Group 0 FTA: 0.5137 +[2025-07-06 20:35:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 20:35:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 20:35:27] [Rank 0] Group 2 FTA: 0.7396 +[2025-07-06 20:35:27] [Rank 0] Group 2 FTA: 0.7396 +[2025-07-06 20:35:27] [Rank 0] Group 3 FTA: 0.8646 +[2025-07-06 20:35:27] [Rank 0] Group 3 FTA: 0.8646 +[2025-07-06 20:35:27] [Rank 0] Group 4 FTA: 0.9479 +[2025-07-06 20:35:27] [Rank 0] Group 4 FTA: 0.9479 +[2025-07-06 20:35:27] [Rank 0] Group 5 FTA: 0.9010 +[2025-07-06 20:35:27] [Rank 0] Group 5 FTA: 0.9010 +[2025-07-06 20:35:27] [Rank 0] Group 6 FTA: 0.9115 +[2025-07-06 20:35:27] [Rank 0] Group 6 FTA: 0.9115 +[2025-07-06 20:35:27] [Rank 0] Group 7 FTA: 0.9010 +[2025-07-06 20:35:27] [Rank 0] Group 7 FTA: 0.9010 +[2025-07-06 20:35:27] [Rank 0] Group 8 FTA: 0.8802 +[2025-07-06 20:35:27] [Rank 0] Group 8 FTA: 0.8802 +[2025-07-06 20:35:27] [Rank 0] Group 9 FTA: 0.9102 +[2025-07-06 20:35:27] [Rank 0] Group 9 FTA: 0.9102 +[2025-07-06 20:35:27] [Rank 0] Group 10 FTA: 0.9082 +[2025-07-06 20:35:27] [Rank 0] Group 10 FTA: 0.9082 +[2025-07-06 20:35:27] [Rank 0] Group 11 FTA: 0.9150 +[2025-07-06 20:35:27] [Rank 0] Group 11 FTA: 0.9150 +[2025-07-06 20:35:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 20:35:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 20:35:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 20:35:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 20:35:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 20:35:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 20:35:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 20:35:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 20:35:29] [Rank 0] step:3501/10000 train_time:280500ms step_avg:80.12ms +[2025-07-06 20:35:29] [Rank 0] step:3501/10000 train_time:280500ms step_avg:80.12ms +[2025-07-06 20:35:31] [Rank 0] step:3521/10000 train_time:282669ms step_avg:80.28ms +[2025-07-06 20:35:31] [Rank 0] step:3521/10000 train_time:282669ms step_avg:80.28ms +[2025-07-06 20:35:32] [Rank 0] step:3541/10000 train_time:284155ms step_avg:80.25ms +[2025-07-06 20:35:32] [Rank 0] step:3541/10000 train_time:284155ms step_avg:80.25ms +[2025-07-06 20:35:34] [Rank 0] step:3561/10000 train_time:285644ms step_avg:80.21ms +[2025-07-06 20:35:34] [Rank 0] step:3561/10000 train_time:285644ms step_avg:80.21ms +[2025-07-06 20:35:35] [Rank 0] step:3581/10000 train_time:287133ms step_avg:80.18ms +[2025-07-06 20:35:35] [Rank 0] step:3581/10000 train_time:287133ms step_avg:80.18ms +[2025-07-06 20:35:37] [Rank 0] step:3601/10000 train_time:288625ms step_avg:80.15ms +[2025-07-06 20:35:37] [Rank 0] step:3601/10000 train_time:288625ms step_avg:80.15ms +[2025-07-06 20:35:39] [Rank 0] step:3621/10000 train_time:290779ms step_avg:80.30ms +[2025-07-06 20:35:39] [Rank 0] step:3621/10000 train_time:290779ms step_avg:80.30ms +[2025-07-06 20:35:40] [Rank 0] step:3641/10000 train_time:292270ms step_avg:80.27ms +[2025-07-06 20:35:40] [Rank 0] step:3641/10000 train_time:292270ms step_avg:80.27ms +[2025-07-06 20:35:42] [Rank 0] step:3661/10000 train_time:293763ms step_avg:80.24ms +[2025-07-06 20:35:42] [Rank 0] step:3661/10000 train_time:293763ms step_avg:80.24ms +[2025-07-06 20:35:43] [Rank 0] step:3681/10000 train_time:295255ms step_avg:80.21ms +[2025-07-06 20:35:43] [Rank 0] step:3681/10000 train_time:295255ms step_avg:80.21ms +[2025-07-06 20:35:46] [Rank 0] step:3701/10000 train_time:297390ms step_avg:80.35ms +[2025-07-06 20:35:46] [Rank 0] step:3701/10000 train_time:297390ms step_avg:80.35ms +[2025-07-06 20:35:47] [Rank 0] step:3721/10000 train_time:299020ms step_avg:80.36ms +[2025-07-06 20:35:47] [Rank 0] step:3721/10000 train_time:299020ms step_avg:80.36ms +[2025-07-06 20:35:49] [Rank 0] step:3741/10000 train_time:300596ms step_avg:80.35ms +[2025-07-06 20:35:49] [Rank 0] step:3741/10000 train_time:300596ms step_avg:80.35ms +[2025-07-06 20:35:50] [Rank 0] step:3761/10000 train_time:302185ms step_avg:80.35ms +[2025-07-06 20:35:50] [Rank 0] step:3761/10000 train_time:302185ms step_avg:80.35ms +[2025-07-06 20:35:52] [Rank 0] step:3781/10000 train_time:303680ms step_avg:80.32ms +[2025-07-06 20:35:52] [Rank 0] step:3781/10000 train_time:303680ms step_avg:80.32ms +[2025-07-06 20:35:54] [Rank 0] step:3801/10000 train_time:305410ms step_avg:80.35ms +[2025-07-06 20:35:54] [Rank 0] step:3801/10000 train_time:305410ms step_avg:80.35ms +[2025-07-06 20:35:55] [Rank 0] step:3821/10000 train_time:306904ms step_avg:80.32ms +[2025-07-06 20:35:55] [Rank 0] step:3821/10000 train_time:306904ms step_avg:80.32ms +[2025-07-06 20:35:57] [Rank 0] step:3841/10000 train_time:308398ms step_avg:80.29ms +[2025-07-06 20:35:57] [Rank 0] step:3841/10000 train_time:308398ms step_avg:80.29ms +[2025-07-06 20:35:58] [Rank 0] step:3861/10000 train_time:309893ms step_avg:80.26ms +[2025-07-06 20:35:58] [Rank 0] step:3861/10000 train_time:309893ms step_avg:80.26ms +[2025-07-06 20:36:00] [Rank 0] step:3881/10000 train_time:311632ms step_avg:80.30ms +[2025-07-06 20:36:00] [Rank 0] step:3881/10000 train_time:311632ms step_avg:80.30ms +[2025-07-06 20:36:01] [Rank 0] step:3901/10000 train_time:313128ms step_avg:80.27ms +[2025-07-06 20:36:01] [Rank 0] step:3901/10000 train_time:313128ms step_avg:80.27ms +[2025-07-06 20:36:03] [Rank 0] step:3921/10000 train_time:314629ms step_avg:80.24ms +[2025-07-06 20:36:03] [Rank 0] step:3921/10000 train_time:314629ms step_avg:80.24ms +[2025-07-06 20:36:04] [Rank 0] step:3941/10000 train_time:316128ms step_avg:80.22ms +[2025-07-06 20:36:04] [Rank 0] step:3941/10000 train_time:316128ms step_avg:80.22ms +[2025-07-06 20:36:06] [Rank 0] step:3961/10000 train_time:317884ms step_avg:80.25ms +[2025-07-06 20:36:06] [Rank 0] step:3961/10000 train_time:317884ms step_avg:80.25ms +[2025-07-06 20:36:08] [Rank 0] step:3981/10000 train_time:319778ms step_avg:80.33ms +[2025-07-06 20:36:08] [Rank 0] step:3981/10000 train_time:319778ms step_avg:80.33ms +[2025-07-06 20:36:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:36:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:36:10] [Rank 0] PRINT: step:4000/10000 train_loss:0.8795 val_loss:0.8739 train_time:321277ms step_avg:80.32ms +[2025-07-06 20:36:10] [Rank 0] PRINT: step:4000/10000 train_loss:0.8795 val_loss:0.8739 train_time:321277ms step_avg:80.32ms +[2025-07-06 20:36:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:36:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:36:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:36:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:36:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:36:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:41:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:41:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:41:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:41:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:41:41] [Rank 0] Total Loss: 5.2369 +[2025-07-06 20:41:41] [Rank 0] Total Loss: 5.2369 +[2025-07-06 20:41:41] [Rank 0] Total FTA: 0.9013 +[2025-07-06 20:41:41] [Rank 0] Total FTA: 0.9013 +[2025-07-06 20:41:41] [Rank 0] Group 0 Loss: 5.5215 +[2025-07-06 20:41:41] [Rank 0] Group 0 Loss: 5.5215 +[2025-07-06 20:41:41] [Rank 0] Group 1 Loss: 5.0850 +[2025-07-06 20:41:41] [Rank 0] Group 1 Loss: 5.0850 +[2025-07-06 20:41:41] [Rank 0] Group 2 Loss: 4.9646 +[2025-07-06 20:41:41] [Rank 0] Group 2 Loss: 4.9646 +[2025-07-06 20:41:41] [Rank 0] Group 3 Loss: 5.2863 +[2025-07-06 20:41:41] [Rank 0] Group 3 Loss: 5.2863 +[2025-07-06 20:41:41] [Rank 0] Group 4 Loss: 5.1963 +[2025-07-06 20:41:41] [Rank 0] Group 4 Loss: 5.1963 +[2025-07-06 20:41:41] [Rank 0] Group 5 Loss: 5.2350 +[2025-07-06 20:41:41] [Rank 0] Group 5 Loss: 5.2350 +[2025-07-06 20:41:41] [Rank 0] Group 6 Loss: 5.1112 +[2025-07-06 20:41:41] [Rank 0] Group 6 Loss: 5.1112 +[2025-07-06 20:41:41] [Rank 0] Group 7 Loss: 5.2581 +[2025-07-06 20:41:41] [Rank 0] Group 7 Loss: 5.2581 +[2025-07-06 20:41:41] [Rank 0] Group 8 Loss: 5.2251 +[2025-07-06 20:41:41] [Rank 0] Group 8 Loss: 5.2251 +[2025-07-06 20:41:41] [Rank 0] Group 9 Loss: 5.2255 +[2025-07-06 20:41:41] [Rank 0] Group 9 Loss: 5.2255 +[2025-07-06 20:41:41] [Rank 0] Group 10 Loss: 5.2342 +[2025-07-06 20:41:41] [Rank 0] Group 10 Loss: 5.2342 +[2025-07-06 20:41:41] [Rank 0] Group 11 Loss: 5.2272 +[2025-07-06 20:41:41] [Rank 0] Group 11 Loss: 5.2272 +[2025-07-06 20:41:41] [Rank 0] Group 0 FTA: 0.8492 +[2025-07-06 20:41:41] [Rank 0] Group 0 FTA: 0.8492 +[2025-07-06 20:41:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 20:41:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 20:41:41] [Rank 0] Group 2 FTA: 0.8307 +[2025-07-06 20:41:41] [Rank 0] Group 2 FTA: 0.8307 +[2025-07-06 20:41:41] [Rank 0] Group 3 FTA: 0.9609 +[2025-07-06 20:41:41] [Rank 0] Group 3 FTA: 0.9609 +[2025-07-06 20:41:41] [Rank 0] Group 4 FTA: 0.8359 +[2025-07-06 20:41:41] [Rank 0] Group 4 FTA: 0.8359 +[2025-07-06 20:41:41] [Rank 0] Group 5 FTA: 0.8646 +[2025-07-06 20:41:41] [Rank 0] Group 5 FTA: 0.8646 +[2025-07-06 20:41:41] [Rank 0] Group 6 FTA: 0.9323 +[2025-07-06 20:41:41] [Rank 0] Group 6 FTA: 0.9323 +[2025-07-06 20:41:41] [Rank 0] Group 7 FTA: 0.8958 +[2025-07-06 20:41:41] [Rank 0] Group 7 FTA: 0.8958 +[2025-07-06 20:41:41] [Rank 0] Group 8 FTA: 0.9193 +[2025-07-06 20:41:41] [Rank 0] Group 8 FTA: 0.9193 +[2025-07-06 20:41:41] [Rank 0] Group 9 FTA: 0.9141 +[2025-07-06 20:41:41] [Rank 0] Group 9 FTA: 0.9141 +[2025-07-06 20:41:41] [Rank 0] Group 10 FTA: 0.9355 +[2025-07-06 20:41:41] [Rank 0] Group 10 FTA: 0.9355 +[2025-07-06 20:41:41] [Rank 0] Group 11 FTA: 0.9092 +[2025-07-06 20:41:41] [Rank 0] Group 11 FTA: 0.9092 +[2025-07-06 20:41:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 20:41:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 20:41:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 20:41:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 20:41:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 20:41:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 20:41:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 20:41:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 20:41:43] [Rank 0] step:4001/10000 train_time:321299ms step_avg:80.30ms +[2025-07-06 20:41:43] [Rank 0] step:4001/10000 train_time:321299ms step_avg:80.30ms +[2025-07-06 20:41:44] [Rank 0] step:4021/10000 train_time:322797ms step_avg:80.28ms +[2025-07-06 20:41:44] [Rank 0] step:4021/10000 train_time:322797ms step_avg:80.28ms +[2025-07-06 20:41:46] [Rank 0] step:4041/10000 train_time:324286ms step_avg:80.25ms +[2025-07-06 20:41:46] [Rank 0] step:4041/10000 train_time:324286ms step_avg:80.25ms +[2025-07-06 20:41:48] [Rank 0] step:4061/10000 train_time:326418ms step_avg:80.38ms +[2025-07-06 20:41:48] [Rank 0] step:4061/10000 train_time:326418ms step_avg:80.38ms +[2025-07-06 20:41:49] [Rank 0] step:4081/10000 train_time:327906ms step_avg:80.35ms +[2025-07-06 20:41:49] [Rank 0] step:4081/10000 train_time:327906ms step_avg:80.35ms +[2025-07-06 20:41:51] [Rank 0] step:4101/10000 train_time:329398ms step_avg:80.32ms +[2025-07-06 20:41:51] [Rank 0] step:4101/10000 train_time:329398ms step_avg:80.32ms +[2025-07-06 20:41:52] [Rank 0] step:4121/10000 train_time:330891ms step_avg:80.29ms +[2025-07-06 20:41:52] [Rank 0] step:4121/10000 train_time:330891ms step_avg:80.29ms +[2025-07-06 20:41:54] [Rank 0] step:4141/10000 train_time:332635ms step_avg:80.33ms +[2025-07-06 20:41:54] [Rank 0] step:4141/10000 train_time:332635ms step_avg:80.33ms +[2025-07-06 20:41:55] [Rank 0] step:4161/10000 train_time:334106ms step_avg:80.29ms +[2025-07-06 20:41:55] [Rank 0] step:4161/10000 train_time:334106ms step_avg:80.29ms +[2025-07-06 20:41:57] [Rank 0] step:4181/10000 train_time:335598ms step_avg:80.27ms +[2025-07-06 20:41:57] [Rank 0] step:4181/10000 train_time:335598ms step_avg:80.27ms +[2025-07-06 20:41:58] [Rank 0] step:4201/10000 train_time:337091ms step_avg:80.24ms +[2025-07-06 20:41:58] [Rank 0] step:4201/10000 train_time:337091ms step_avg:80.24ms +[2025-07-06 20:42:00] [Rank 0] step:4221/10000 train_time:338588ms step_avg:80.22ms +[2025-07-06 20:42:00] [Rank 0] step:4221/10000 train_time:338588ms step_avg:80.22ms +[2025-07-06 20:42:02] [Rank 0] step:4241/10000 train_time:340726ms step_avg:80.34ms +[2025-07-06 20:42:02] [Rank 0] step:4241/10000 train_time:340726ms step_avg:80.34ms +[2025-07-06 20:42:04] [Rank 0] step:4261/10000 train_time:342217ms step_avg:80.31ms +[2025-07-06 20:42:04] [Rank 0] step:4261/10000 train_time:342217ms step_avg:80.31ms +[2025-07-06 20:42:05] [Rank 0] step:4281/10000 train_time:343710ms step_avg:80.29ms +[2025-07-06 20:42:05] [Rank 0] step:4281/10000 train_time:343710ms step_avg:80.29ms +[2025-07-06 20:42:07] [Rank 0] step:4301/10000 train_time:345204ms step_avg:80.26ms +[2025-07-06 20:42:07] [Rank 0] step:4301/10000 train_time:345204ms step_avg:80.26ms +[2025-07-06 20:42:08] [Rank 0] step:4321/10000 train_time:346973ms step_avg:80.30ms +[2025-07-06 20:42:08] [Rank 0] step:4321/10000 train_time:346973ms step_avg:80.30ms +[2025-07-06 20:42:10] [Rank 0] step:4341/10000 train_time:348785ms step_avg:80.35ms +[2025-07-06 20:42:10] [Rank 0] step:4341/10000 train_time:348785ms step_avg:80.35ms +[2025-07-06 20:42:12] [Rank 0] step:4361/10000 train_time:350278ms step_avg:80.32ms +[2025-07-06 20:42:12] [Rank 0] step:4361/10000 train_time:350278ms step_avg:80.32ms +[2025-07-06 20:42:13] [Rank 0] step:4381/10000 train_time:351772ms step_avg:80.29ms +[2025-07-06 20:42:13] [Rank 0] step:4381/10000 train_time:351772ms step_avg:80.29ms +[2025-07-06 20:42:15] [Rank 0] step:4401/10000 train_time:353268ms step_avg:80.27ms +[2025-07-06 20:42:15] [Rank 0] step:4401/10000 train_time:353268ms step_avg:80.27ms +[2025-07-06 20:42:17] [Rank 0] step:4421/10000 train_time:355413ms step_avg:80.39ms +[2025-07-06 20:42:17] [Rank 0] step:4421/10000 train_time:355413ms step_avg:80.39ms +[2025-07-06 20:42:18] [Rank 0] step:4441/10000 train_time:356905ms step_avg:80.37ms +[2025-07-06 20:42:18] [Rank 0] step:4441/10000 train_time:356905ms step_avg:80.37ms +[2025-07-06 20:42:20] [Rank 0] step:4461/10000 train_time:358401ms step_avg:80.34ms +[2025-07-06 20:42:20] [Rank 0] step:4461/10000 train_time:358401ms step_avg:80.34ms +[2025-07-06 20:42:21] [Rank 0] step:4481/10000 train_time:359897ms step_avg:80.32ms +[2025-07-06 20:42:21] [Rank 0] step:4481/10000 train_time:359897ms step_avg:80.32ms +[2025-07-06 20:42:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:42:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:42:24] [Rank 0] PRINT: step:4500/10000 train_loss:0.8746 val_loss:0.8703 train_time:361392ms step_avg:80.31ms +[2025-07-06 20:42:24] [Rank 0] PRINT: step:4500/10000 train_loss:0.8746 val_loss:0.8703 train_time:361392ms step_avg:80.31ms +[2025-07-06 20:42:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:42:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:42:24] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:42:24] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:42:24] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:42:24] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:47:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:47:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:47:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:47:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:47:49] [Rank 0] Total Loss: 5.4283 +[2025-07-06 20:47:49] [Rank 0] Total Loss: 5.4283 +[2025-07-06 20:47:49] [Rank 0] Total FTA: 0.9134 +[2025-07-06 20:47:49] [Rank 0] Total FTA: 0.9134 +[2025-07-06 20:47:49] [Rank 0] Group 0 Loss: 5.7593 +[2025-07-06 20:47:49] [Rank 0] Group 0 Loss: 5.7593 +[2025-07-06 20:47:49] [Rank 0] Group 1 Loss: 5.2282 +[2025-07-06 20:47:49] [Rank 0] Group 1 Loss: 5.2282 +[2025-07-06 20:47:49] [Rank 0] Group 2 Loss: 5.4349 +[2025-07-06 20:47:49] [Rank 0] Group 2 Loss: 5.4349 +[2025-07-06 20:47:49] [Rank 0] Group 3 Loss: 5.4724 +[2025-07-06 20:47:49] [Rank 0] Group 3 Loss: 5.4724 +[2025-07-06 20:47:49] [Rank 0] Group 4 Loss: 5.3754 +[2025-07-06 20:47:49] [Rank 0] Group 4 Loss: 5.3754 +[2025-07-06 20:47:49] [Rank 0] Group 5 Loss: 5.3494 +[2025-07-06 20:47:49] [Rank 0] Group 5 Loss: 5.3494 +[2025-07-06 20:47:49] [Rank 0] Group 6 Loss: 5.3578 +[2025-07-06 20:47:49] [Rank 0] Group 6 Loss: 5.3578 +[2025-07-06 20:47:49] [Rank 0] Group 7 Loss: 5.3586 +[2025-07-06 20:47:49] [Rank 0] Group 7 Loss: 5.3586 +[2025-07-06 20:47:49] [Rank 0] Group 8 Loss: 5.3788 +[2025-07-06 20:47:49] [Rank 0] Group 8 Loss: 5.3788 +[2025-07-06 20:47:49] [Rank 0] Group 9 Loss: 5.3898 +[2025-07-06 20:47:49] [Rank 0] Group 9 Loss: 5.3898 +[2025-07-06 20:47:50] [Rank 0] Group 10 Loss: 5.3781 +[2025-07-06 20:47:50] [Rank 0] Group 10 Loss: 5.3781 +[2025-07-06 20:47:50] [Rank 0] Group 11 Loss: 5.3910 +[2025-07-06 20:47:50] [Rank 0] Group 11 Loss: 5.3910 +[2025-07-06 20:47:50] [Rank 0] Group 0 FTA: 0.8492 +[2025-07-06 20:47:50] [Rank 0] Group 0 FTA: 0.8492 +[2025-07-06 20:47:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 20:47:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 20:47:50] [Rank 0] Group 2 FTA: 0.8203 +[2025-07-06 20:47:50] [Rank 0] Group 2 FTA: 0.8203 +[2025-07-06 20:47:50] [Rank 0] Group 3 FTA: 0.9583 +[2025-07-06 20:47:50] [Rank 0] Group 3 FTA: 0.9583 +[2025-07-06 20:47:50] [Rank 0] Group 4 FTA: 0.9609 +[2025-07-06 20:47:50] [Rank 0] Group 4 FTA: 0.9609 +[2025-07-06 20:47:50] [Rank 0] Group 5 FTA: 0.9115 +[2025-07-06 20:47:50] [Rank 0] Group 5 FTA: 0.9115 +[2025-07-06 20:47:50] [Rank 0] Group 6 FTA: 0.9141 +[2025-07-06 20:47:50] [Rank 0] Group 6 FTA: 0.9141 +[2025-07-06 20:47:50] [Rank 0] Group 7 FTA: 0.9479 +[2025-07-06 20:47:50] [Rank 0] Group 7 FTA: 0.9479 +[2025-07-06 20:47:50] [Rank 0] Group 8 FTA: 0.9010 +[2025-07-06 20:47:50] [Rank 0] Group 8 FTA: 0.9010 +[2025-07-06 20:47:50] [Rank 0] Group 9 FTA: 0.9102 +[2025-07-06 20:47:50] [Rank 0] Group 9 FTA: 0.9102 +[2025-07-06 20:47:50] [Rank 0] Group 10 FTA: 0.9219 +[2025-07-06 20:47:50] [Rank 0] Group 10 FTA: 0.9219 +[2025-07-06 20:47:50] [Rank 0] Group 11 FTA: 0.9180 +[2025-07-06 20:47:50] [Rank 0] Group 11 FTA: 0.9180 +[2025-07-06 20:47:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 20:47:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 20:47:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 20:47:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 20:47:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 20:47:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 20:47:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 20:47:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 20:47:52] [Rank 0] step:4501/10000 train_time:361729ms step_avg:80.37ms +[2025-07-06 20:47:52] [Rank 0] step:4501/10000 train_time:361729ms step_avg:80.37ms +[2025-07-06 20:47:53] [Rank 0] step:4521/10000 train_time:363611ms step_avg:80.43ms +[2025-07-06 20:47:53] [Rank 0] step:4521/10000 train_time:363611ms step_avg:80.43ms +[2025-07-06 20:47:55] [Rank 0] step:4541/10000 train_time:365097ms step_avg:80.40ms +[2025-07-06 20:47:55] [Rank 0] step:4541/10000 train_time:365097ms step_avg:80.40ms +[2025-07-06 20:47:56] [Rank 0] step:4561/10000 train_time:366586ms step_avg:80.37ms +[2025-07-06 20:47:56] [Rank 0] step:4561/10000 train_time:366586ms step_avg:80.37ms +[2025-07-06 20:47:58] [Rank 0] step:4581/10000 train_time:368078ms step_avg:80.35ms +[2025-07-06 20:47:58] [Rank 0] step:4581/10000 train_time:368078ms step_avg:80.35ms +[2025-07-06 20:48:00] [Rank 0] step:4601/10000 train_time:370226ms step_avg:80.47ms +[2025-07-06 20:48:00] [Rank 0] step:4601/10000 train_time:370226ms step_avg:80.47ms +[2025-07-06 20:48:01] [Rank 0] step:4621/10000 train_time:371715ms step_avg:80.44ms +[2025-07-06 20:48:01] [Rank 0] step:4621/10000 train_time:371715ms step_avg:80.44ms +[2025-07-06 20:48:03] [Rank 0] step:4641/10000 train_time:373209ms step_avg:80.42ms +[2025-07-06 20:48:03] [Rank 0] step:4641/10000 train_time:373209ms step_avg:80.42ms +[2025-07-06 20:48:04] [Rank 0] step:4661/10000 train_time:374703ms step_avg:80.39ms +[2025-07-06 20:48:04] [Rank 0] step:4661/10000 train_time:374703ms step_avg:80.39ms +[2025-07-06 20:48:07] [Rank 0] step:4681/10000 train_time:376247ms step_avg:80.38ms +[2025-07-06 20:48:07] [Rank 0] step:4681/10000 train_time:376247ms step_avg:80.38ms +[2025-07-06 20:48:08] [Rank 0] step:4701/10000 train_time:378334ms step_avg:80.48ms +[2025-07-06 20:48:08] [Rank 0] step:4701/10000 train_time:378334ms step_avg:80.48ms +[2025-07-06 20:48:09] [Rank 0] step:4721/10000 train_time:379827ms step_avg:80.45ms +[2025-07-06 20:48:09] [Rank 0] step:4721/10000 train_time:379827ms step_avg:80.45ms +[2025-07-06 20:48:11] [Rank 0] step:4741/10000 train_time:381321ms step_avg:80.43ms +[2025-07-06 20:48:11] [Rank 0] step:4741/10000 train_time:381321ms step_avg:80.43ms +[2025-07-06 20:48:12] [Rank 0] step:4761/10000 train_time:382813ms step_avg:80.41ms +[2025-07-06 20:48:12] [Rank 0] step:4761/10000 train_time:382813ms step_avg:80.41ms +[2025-07-06 20:48:14] [Rank 0] step:4781/10000 train_time:384544ms step_avg:80.43ms +[2025-07-06 20:48:14] [Rank 0] step:4781/10000 train_time:384544ms step_avg:80.43ms +[2025-07-06 20:48:16] [Rank 0] step:4801/10000 train_time:386034ms step_avg:80.41ms +[2025-07-06 20:48:16] [Rank 0] step:4801/10000 train_time:386034ms step_avg:80.41ms +[2025-07-06 20:48:17] [Rank 0] step:4821/10000 train_time:387528ms step_avg:80.38ms +[2025-07-06 20:48:17] [Rank 0] step:4821/10000 train_time:387528ms step_avg:80.38ms +[2025-07-06 20:48:19] [Rank 0] step:4841/10000 train_time:389021ms step_avg:80.36ms +[2025-07-06 20:48:19] [Rank 0] step:4841/10000 train_time:389021ms step_avg:80.36ms +[2025-07-06 20:48:20] [Rank 0] step:4861/10000 train_time:390570ms step_avg:80.35ms +[2025-07-06 20:48:20] [Rank 0] step:4861/10000 train_time:390570ms step_avg:80.35ms +[2025-07-06 20:48:22] [Rank 0] step:4881/10000 train_time:392142ms step_avg:80.34ms +[2025-07-06 20:48:22] [Rank 0] step:4881/10000 train_time:392142ms step_avg:80.34ms +[2025-07-06 20:48:23] [Rank 0] step:4901/10000 train_time:393637ms step_avg:80.32ms +[2025-07-06 20:48:23] [Rank 0] step:4901/10000 train_time:393637ms step_avg:80.32ms +[2025-07-06 20:48:25] [Rank 0] step:4921/10000 train_time:395131ms step_avg:80.29ms +[2025-07-06 20:48:25] [Rank 0] step:4921/10000 train_time:395131ms step_avg:80.29ms +[2025-07-06 20:48:26] [Rank 0] step:4941/10000 train_time:396687ms step_avg:80.28ms +[2025-07-06 20:48:26] [Rank 0] step:4941/10000 train_time:396687ms step_avg:80.28ms +[2025-07-06 20:48:29] [Rank 0] step:4961/10000 train_time:398993ms step_avg:80.43ms +[2025-07-06 20:48:29] [Rank 0] step:4961/10000 train_time:398993ms step_avg:80.43ms +[2025-07-06 20:48:30] [Rank 0] step:4981/10000 train_time:400488ms step_avg:80.40ms +[2025-07-06 20:48:30] [Rank 0] step:4981/10000 train_time:400488ms step_avg:80.40ms +[2025-07-06 20:48:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:48:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:48:33] [Rank 0] PRINT: step:5000/10000 train_loss:0.8710 val_loss:0.8691 train_time:401982ms step_avg:80.40ms +[2025-07-06 20:48:33] [Rank 0] PRINT: step:5000/10000 train_loss:0.8710 val_loss:0.8691 train_time:401982ms step_avg:80.40ms +[2025-07-06 20:48:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:48:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:48:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:48:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:48:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:48:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:53:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:53:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 20:53:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:53:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 20:53:59] [Rank 0] Total Loss: 5.3933 +[2025-07-06 20:53:59] [Rank 0] Total Loss: 5.3933 +[2025-07-06 20:53:59] [Rank 0] Total FTA: 0.9409 +[2025-07-06 20:53:59] [Rank 0] Total FTA: 0.9409 +[2025-07-06 20:53:59] [Rank 0] Group 0 Loss: 5.5978 +[2025-07-06 20:53:59] [Rank 0] Group 0 Loss: 5.5978 +[2025-07-06 20:53:59] [Rank 0] Group 1 Loss: 5.3116 +[2025-07-06 20:53:59] [Rank 0] Group 1 Loss: 5.3116 +[2025-07-06 20:53:59] [Rank 0] Group 2 Loss: 5.0489 +[2025-07-06 20:53:59] [Rank 0] Group 2 Loss: 5.0489 +[2025-07-06 20:53:59] [Rank 0] Group 3 Loss: 5.4513 +[2025-07-06 20:53:59] [Rank 0] Group 3 Loss: 5.4513 +[2025-07-06 20:53:59] [Rank 0] Group 4 Loss: 5.3952 +[2025-07-06 20:53:59] [Rank 0] Group 4 Loss: 5.3952 +[2025-07-06 20:53:59] [Rank 0] Group 5 Loss: 5.3651 +[2025-07-06 20:53:59] [Rank 0] Group 5 Loss: 5.3651 +[2025-07-06 20:53:59] [Rank 0] Group 6 Loss: 5.2972 +[2025-07-06 20:53:59] [Rank 0] Group 6 Loss: 5.2972 +[2025-07-06 20:53:59] [Rank 0] Group 7 Loss: 5.3783 +[2025-07-06 20:53:59] [Rank 0] Group 7 Loss: 5.3783 +[2025-07-06 20:53:59] [Rank 0] Group 8 Loss: 5.3636 +[2025-07-06 20:53:59] [Rank 0] Group 8 Loss: 5.3636 +[2025-07-06 20:53:59] [Rank 0] Group 9 Loss: 5.3943 +[2025-07-06 20:53:59] [Rank 0] Group 9 Loss: 5.3943 +[2025-07-06 20:53:59] [Rank 0] Group 10 Loss: 5.4101 +[2025-07-06 20:53:59] [Rank 0] Group 10 Loss: 5.4101 +[2025-07-06 20:53:59] [Rank 0] Group 11 Loss: 5.4317 +[2025-07-06 20:53:59] [Rank 0] Group 11 Loss: 5.4317 +[2025-07-06 20:53:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 20:53:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 20:53:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 20:53:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 20:53:59] [Rank 0] Group 2 FTA: 0.8958 +[2025-07-06 20:53:59] [Rank 0] Group 2 FTA: 0.8958 +[2025-07-06 20:53:59] [Rank 0] Group 3 FTA: 0.9688 +[2025-07-06 20:53:59] [Rank 0] Group 3 FTA: 0.9688 +[2025-07-06 20:53:59] [Rank 0] Group 4 FTA: 0.9688 +[2025-07-06 20:53:59] [Rank 0] Group 4 FTA: 0.9688 +[2025-07-06 20:53:59] [Rank 0] Group 5 FTA: 0.9141 +[2025-07-06 20:53:59] [Rank 0] Group 5 FTA: 0.9141 +[2025-07-06 20:53:59] [Rank 0] Group 6 FTA: 0.8906 +[2025-07-06 20:53:59] [Rank 0] Group 6 FTA: 0.8906 +[2025-07-06 20:53:59] [Rank 0] Group 7 FTA: 0.9479 +[2025-07-06 20:53:59] [Rank 0] Group 7 FTA: 0.9479 +[2025-07-06 20:53:59] [Rank 0] Group 8 FTA: 0.9167 +[2025-07-06 20:53:59] [Rank 0] Group 8 FTA: 0.9167 +[2025-07-06 20:53:59] [Rank 0] Group 9 FTA: 0.9141 +[2025-07-06 20:53:59] [Rank 0] Group 9 FTA: 0.9141 +[2025-07-06 20:53:59] [Rank 0] Group 10 FTA: 0.9160 +[2025-07-06 20:53:59] [Rank 0] Group 10 FTA: 0.9160 +[2025-07-06 20:53:59] [Rank 0] Group 11 FTA: 0.9248 +[2025-07-06 20:53:59] [Rank 0] Group 11 FTA: 0.9248 +[2025-07-06 20:53:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 20:53:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 20:54:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 20:54:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 20:54:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 20:54:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 20:54:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 20:54:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 20:54:00] [Rank 0] step:5001/10000 train_time:402004ms step_avg:80.38ms +[2025-07-06 20:54:00] [Rank 0] step:5001/10000 train_time:402004ms step_avg:80.38ms +[2025-07-06 20:54:02] [Rank 0] step:5021/10000 train_time:403492ms step_avg:80.36ms +[2025-07-06 20:54:02] [Rank 0] step:5021/10000 train_time:403492ms step_avg:80.36ms +[2025-07-06 20:54:04] [Rank 0] step:5041/10000 train_time:405243ms step_avg:80.39ms +[2025-07-06 20:54:04] [Rank 0] step:5041/10000 train_time:405243ms step_avg:80.39ms +[2025-07-06 20:54:06] [Rank 0] step:5061/10000 train_time:407125ms step_avg:80.44ms +[2025-07-06 20:54:06] [Rank 0] step:5061/10000 train_time:407125ms step_avg:80.44ms +[2025-07-06 20:54:07] [Rank 0] step:5081/10000 train_time:408614ms step_avg:80.42ms +[2025-07-06 20:54:07] [Rank 0] step:5081/10000 train_time:408614ms step_avg:80.42ms +[2025-07-06 20:54:09] [Rank 0] step:5101/10000 train_time:410104ms step_avg:80.40ms +[2025-07-06 20:54:09] [Rank 0] step:5101/10000 train_time:410104ms step_avg:80.40ms +[2025-07-06 20:54:10] [Rank 0] step:5121/10000 train_time:411594ms step_avg:80.37ms +[2025-07-06 20:54:10] [Rank 0] step:5121/10000 train_time:411594ms step_avg:80.37ms +[2025-07-06 20:54:12] [Rank 0] step:5141/10000 train_time:413737ms step_avg:80.48ms +[2025-07-06 20:54:12] [Rank 0] step:5141/10000 train_time:413737ms step_avg:80.48ms +[2025-07-06 20:54:14] [Rank 0] step:5161/10000 train_time:415228ms step_avg:80.45ms +[2025-07-06 20:54:14] [Rank 0] step:5161/10000 train_time:415228ms step_avg:80.45ms +[2025-07-06 20:54:15] [Rank 0] step:5181/10000 train_time:416719ms step_avg:80.43ms +[2025-07-06 20:54:15] [Rank 0] step:5181/10000 train_time:416719ms step_avg:80.43ms +[2025-07-06 20:54:17] [Rank 0] step:5201/10000 train_time:418215ms step_avg:80.41ms +[2025-07-06 20:54:17] [Rank 0] step:5201/10000 train_time:418215ms step_avg:80.41ms +[2025-07-06 20:54:18] [Rank 0] step:5221/10000 train_time:419967ms step_avg:80.44ms +[2025-07-06 20:54:18] [Rank 0] step:5221/10000 train_time:419967ms step_avg:80.44ms +[2025-07-06 20:54:20] [Rank 0] step:5241/10000 train_time:421538ms step_avg:80.43ms +[2025-07-06 20:54:20] [Rank 0] step:5241/10000 train_time:421538ms step_avg:80.43ms +[2025-07-06 20:54:21] [Rank 0] step:5261/10000 train_time:423030ms step_avg:80.41ms +[2025-07-06 20:54:21] [Rank 0] step:5261/10000 train_time:423030ms step_avg:80.41ms +[2025-07-06 20:54:23] [Rank 0] step:5281/10000 train_time:424524ms step_avg:80.39ms +[2025-07-06 20:54:23] [Rank 0] step:5281/10000 train_time:424524ms step_avg:80.39ms +[2025-07-06 20:54:24] [Rank 0] step:5301/10000 train_time:426020ms step_avg:80.37ms +[2025-07-06 20:54:24] [Rank 0] step:5301/10000 train_time:426020ms step_avg:80.37ms +[2025-07-06 20:54:27] [Rank 0] step:5321/10000 train_time:428172ms step_avg:80.47ms +[2025-07-06 20:54:27] [Rank 0] step:5321/10000 train_time:428172ms step_avg:80.47ms +[2025-07-06 20:54:28] [Rank 0] step:5341/10000 train_time:429666ms step_avg:80.45ms +[2025-07-06 20:54:28] [Rank 0] step:5341/10000 train_time:429666ms step_avg:80.45ms +[2025-07-06 20:54:30] [Rank 0] step:5361/10000 train_time:431161ms step_avg:80.43ms +[2025-07-06 20:54:30] [Rank 0] step:5361/10000 train_time:431161ms step_avg:80.43ms +[2025-07-06 20:54:31] [Rank 0] step:5381/10000 train_time:432656ms step_avg:80.40ms +[2025-07-06 20:54:31] [Rank 0] step:5381/10000 train_time:432656ms step_avg:80.40ms +[2025-07-06 20:54:33] [Rank 0] step:5401/10000 train_time:434204ms step_avg:80.39ms +[2025-07-06 20:54:33] [Rank 0] step:5401/10000 train_time:434204ms step_avg:80.39ms +[2025-07-06 20:54:35] [Rank 0] step:5421/10000 train_time:436310ms step_avg:80.49ms +[2025-07-06 20:54:35] [Rank 0] step:5421/10000 train_time:436310ms step_avg:80.49ms +[2025-07-06 20:54:36] [Rank 0] step:5441/10000 train_time:437806ms step_avg:80.46ms +[2025-07-06 20:54:36] [Rank 0] step:5441/10000 train_time:437806ms step_avg:80.46ms +[2025-07-06 20:54:38] [Rank 0] step:5461/10000 train_time:439299ms step_avg:80.44ms +[2025-07-06 20:54:38] [Rank 0] step:5461/10000 train_time:439299ms step_avg:80.44ms +[2025-07-06 20:54:39] [Rank 0] step:5481/10000 train_time:440795ms step_avg:80.42ms +[2025-07-06 20:54:39] [Rank 0] step:5481/10000 train_time:440795ms step_avg:80.42ms +[2025-07-06 20:54:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:54:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 20:54:42] [Rank 0] PRINT: step:5500/10000 train_loss:0.8683 val_loss:0.8677 train_time:442955ms step_avg:80.54ms +[2025-07-06 20:54:42] [Rank 0] PRINT: step:5500/10000 train_loss:0.8683 val_loss:0.8677 train_time:442955ms step_avg:80.54ms +[2025-07-06 20:54:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:54:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 20:54:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:54:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 20:54:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 20:54:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:00:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:00:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:00:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:00:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:00:07] [Rank 0] Total Loss: 5.4532 +[2025-07-06 21:00:07] [Rank 0] Total Loss: 5.4532 +[2025-07-06 21:00:07] [Rank 0] Total FTA: 0.9462 +[2025-07-06 21:00:07] [Rank 0] Total FTA: 0.9462 +[2025-07-06 21:00:07] [Rank 0] Group 0 Loss: 5.6072 +[2025-07-06 21:00:07] [Rank 0] Group 0 Loss: 5.6072 +[2025-07-06 21:00:07] [Rank 0] Group 1 Loss: 5.3011 +[2025-07-06 21:00:07] [Rank 0] Group 1 Loss: 5.3011 +[2025-07-06 21:00:07] [Rank 0] Group 2 Loss: 5.2307 +[2025-07-06 21:00:07] [Rank 0] Group 2 Loss: 5.2307 +[2025-07-06 21:00:07] [Rank 0] Group 3 Loss: 5.6432 +[2025-07-06 21:00:07] [Rank 0] Group 3 Loss: 5.6432 +[2025-07-06 21:00:07] [Rank 0] Group 4 Loss: 5.4443 +[2025-07-06 21:00:07] [Rank 0] Group 4 Loss: 5.4443 +[2025-07-06 21:00:07] [Rank 0] Group 5 Loss: 5.3939 +[2025-07-06 21:00:07] [Rank 0] Group 5 Loss: 5.3939 +[2025-07-06 21:00:07] [Rank 0] Group 6 Loss: 5.3494 +[2025-07-06 21:00:07] [Rank 0] Group 6 Loss: 5.3494 +[2025-07-06 21:00:07] [Rank 0] Group 7 Loss: 5.4895 +[2025-07-06 21:00:07] [Rank 0] Group 7 Loss: 5.4895 +[2025-07-06 21:00:07] [Rank 0] Group 8 Loss: 5.4882 +[2025-07-06 21:00:07] [Rank 0] Group 8 Loss: 5.4882 +[2025-07-06 21:00:07] [Rank 0] Group 9 Loss: 5.4113 +[2025-07-06 21:00:07] [Rank 0] Group 9 Loss: 5.4113 +[2025-07-06 21:00:07] [Rank 0] Group 10 Loss: 5.4222 +[2025-07-06 21:00:07] [Rank 0] Group 10 Loss: 5.4222 +[2025-07-06 21:00:07] [Rank 0] Group 11 Loss: 5.4707 +[2025-07-06 21:00:07] [Rank 0] Group 11 Loss: 5.4707 +[2025-07-06 21:00:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:00:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:00:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:00:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:00:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:00:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:00:07] [Rank 0] Group 3 FTA: 0.9505 +[2025-07-06 21:00:07] [Rank 0] Group 3 FTA: 0.9505 +[2025-07-06 21:00:07] [Rank 0] Group 4 FTA: 0.9453 +[2025-07-06 21:00:07] [Rank 0] Group 4 FTA: 0.9453 +[2025-07-06 21:00:07] [Rank 0] Group 5 FTA: 0.9557 +[2025-07-06 21:00:07] [Rank 0] Group 5 FTA: 0.9557 +[2025-07-06 21:00:07] [Rank 0] Group 6 FTA: 0.9193 +[2025-07-06 21:00:07] [Rank 0] Group 6 FTA: 0.9193 +[2025-07-06 21:00:07] [Rank 0] Group 7 FTA: 0.9323 +[2025-07-06 21:00:07] [Rank 0] Group 7 FTA: 0.9323 +[2025-07-06 21:00:07] [Rank 0] Group 8 FTA: 0.9036 +[2025-07-06 21:00:07] [Rank 0] Group 8 FTA: 0.9036 +[2025-07-06 21:00:07] [Rank 0] Group 9 FTA: 0.9375 +[2025-07-06 21:00:07] [Rank 0] Group 9 FTA: 0.9375 +[2025-07-06 21:00:07] [Rank 0] Group 10 FTA: 0.9141 +[2025-07-06 21:00:07] [Rank 0] Group 10 FTA: 0.9141 +[2025-07-06 21:00:07] [Rank 0] Group 11 FTA: 0.9102 +[2025-07-06 21:00:07] [Rank 0] Group 11 FTA: 0.9102 +[2025-07-06 21:00:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 21:00:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 21:00:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 21:00:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 21:00:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 21:00:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 21:00:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 21:00:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 21:00:09] [Rank 0] step:5501/10000 train_time:442976ms step_avg:80.53ms +[2025-07-06 21:00:09] [Rank 0] step:5501/10000 train_time:442976ms step_avg:80.53ms +[2025-07-06 21:00:11] [Rank 0] step:5521/10000 train_time:444490ms step_avg:80.51ms +[2025-07-06 21:00:11] [Rank 0] step:5521/10000 train_time:444490ms step_avg:80.51ms +[2025-07-06 21:00:12] [Rank 0] step:5541/10000 train_time:445978ms step_avg:80.49ms +[2025-07-06 21:00:12] [Rank 0] step:5541/10000 train_time:445978ms step_avg:80.49ms +[2025-07-06 21:00:13] [Rank 0] step:5561/10000 train_time:447467ms step_avg:80.47ms +[2025-07-06 21:00:13] [Rank 0] step:5561/10000 train_time:447467ms step_avg:80.47ms +[2025-07-06 21:00:16] [Rank 0] step:5581/10000 train_time:448957ms step_avg:80.44ms +[2025-07-06 21:00:16] [Rank 0] step:5581/10000 train_time:448957ms step_avg:80.44ms +[2025-07-06 21:00:17] [Rank 0] step:5601/10000 train_time:451107ms step_avg:80.54ms +[2025-07-06 21:00:17] [Rank 0] step:5601/10000 train_time:451107ms step_avg:80.54ms +[2025-07-06 21:00:19] [Rank 0] step:5621/10000 train_time:452597ms step_avg:80.52ms +[2025-07-06 21:00:19] [Rank 0] step:5621/10000 train_time:452597ms step_avg:80.52ms +[2025-07-06 21:00:20] [Rank 0] step:5641/10000 train_time:454088ms step_avg:80.50ms +[2025-07-06 21:00:20] [Rank 0] step:5641/10000 train_time:454088ms step_avg:80.50ms +[2025-07-06 21:00:22] [Rank 0] step:5661/10000 train_time:455579ms step_avg:80.48ms +[2025-07-06 21:00:22] [Rank 0] step:5661/10000 train_time:455579ms step_avg:80.48ms +[2025-07-06 21:00:23] [Rank 0] step:5681/10000 train_time:457308ms step_avg:80.50ms +[2025-07-06 21:00:23] [Rank 0] step:5681/10000 train_time:457308ms step_avg:80.50ms +[2025-07-06 21:00:25] [Rank 0] step:5701/10000 train_time:458799ms step_avg:80.48ms +[2025-07-06 21:00:25] [Rank 0] step:5701/10000 train_time:458799ms step_avg:80.48ms +[2025-07-06 21:00:26] [Rank 0] step:5721/10000 train_time:460293ms step_avg:80.46ms +[2025-07-06 21:00:26] [Rank 0] step:5721/10000 train_time:460293ms step_avg:80.46ms +[2025-07-06 21:00:28] [Rank 0] step:5741/10000 train_time:461787ms step_avg:80.44ms +[2025-07-06 21:00:28] [Rank 0] step:5741/10000 train_time:461787ms step_avg:80.44ms +[2025-07-06 21:00:30] [Rank 0] step:5761/10000 train_time:463331ms step_avg:80.43ms +[2025-07-06 21:00:30] [Rank 0] step:5761/10000 train_time:463331ms step_avg:80.43ms +[2025-07-06 21:00:31] [Rank 0] step:5781/10000 train_time:465416ms step_avg:80.51ms +[2025-07-06 21:00:31] [Rank 0] step:5781/10000 train_time:465416ms step_avg:80.51ms +[2025-07-06 21:00:33] [Rank 0] step:5801/10000 train_time:466907ms step_avg:80.49ms +[2025-07-06 21:00:33] [Rank 0] step:5801/10000 train_time:466907ms step_avg:80.49ms +[2025-07-06 21:00:34] [Rank 0] step:5821/10000 train_time:468401ms step_avg:80.47ms +[2025-07-06 21:00:34] [Rank 0] step:5821/10000 train_time:468401ms step_avg:80.47ms +[2025-07-06 21:00:36] [Rank 0] step:5841/10000 train_time:469899ms step_avg:80.45ms +[2025-07-06 21:00:36] [Rank 0] step:5841/10000 train_time:469899ms step_avg:80.45ms +[2025-07-06 21:00:38] [Rank 0] step:5861/10000 train_time:472062ms step_avg:80.54ms +[2025-07-06 21:00:38] [Rank 0] step:5861/10000 train_time:472062ms step_avg:80.54ms +[2025-07-06 21:00:40] [Rank 0] step:5881/10000 train_time:473557ms step_avg:80.52ms +[2025-07-06 21:00:40] [Rank 0] step:5881/10000 train_time:473557ms step_avg:80.52ms +[2025-07-06 21:00:41] [Rank 0] step:5901/10000 train_time:475053ms step_avg:80.50ms +[2025-07-06 21:00:41] [Rank 0] step:5901/10000 train_time:475053ms step_avg:80.50ms +[2025-07-06 21:00:43] [Rank 0] step:5921/10000 train_time:476551ms step_avg:80.48ms +[2025-07-06 21:00:43] [Rank 0] step:5921/10000 train_time:476551ms step_avg:80.48ms +[2025-07-06 21:00:45] [Rank 0] step:5941/10000 train_time:478728ms step_avg:80.58ms +[2025-07-06 21:00:45] [Rank 0] step:5941/10000 train_time:478728ms step_avg:80.58ms +[2025-07-06 21:00:46] [Rank 0] step:5961/10000 train_time:480202ms step_avg:80.56ms +[2025-07-06 21:00:46] [Rank 0] step:5961/10000 train_time:480202ms step_avg:80.56ms +[2025-07-06 21:00:48] [Rank 0] step:5981/10000 train_time:481697ms step_avg:80.54ms +[2025-07-06 21:00:48] [Rank 0] step:5981/10000 train_time:481697ms step_avg:80.54ms +[2025-07-06 21:00:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:00:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:00:50] [Rank 0] PRINT: step:6000/10000 train_loss:0.8662 val_loss:0.8671 train_time:483194ms step_avg:80.53ms +[2025-07-06 21:00:50] [Rank 0] PRINT: step:6000/10000 train_loss:0.8662 val_loss:0.8671 train_time:483194ms step_avg:80.53ms +[2025-07-06 21:00:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:00:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:00:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:00:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:00:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:00:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:06:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:06:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:06:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:06:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:06:18] [Rank 0] Total Loss: 5.4759 +[2025-07-06 21:06:18] [Rank 0] Total Loss: 5.4759 +[2025-07-06 21:06:18] [Rank 0] Total FTA: 0.9173 +[2025-07-06 21:06:18] [Rank 0] Total FTA: 0.9173 +[2025-07-06 21:06:18] [Rank 0] Group 0 Loss: 5.5032 +[2025-07-06 21:06:18] [Rank 0] Group 0 Loss: 5.5032 +[2025-07-06 21:06:18] [Rank 0] Group 1 Loss: 5.3375 +[2025-07-06 21:06:18] [Rank 0] Group 1 Loss: 5.3375 +[2025-07-06 21:06:18] [Rank 0] Group 2 Loss: 5.3631 +[2025-07-06 21:06:18] [Rank 0] Group 2 Loss: 5.3631 +[2025-07-06 21:06:18] [Rank 0] Group 3 Loss: 5.6301 +[2025-07-06 21:06:18] [Rank 0] Group 3 Loss: 5.6301 +[2025-07-06 21:06:18] [Rank 0] Group 4 Loss: 5.4248 +[2025-07-06 21:06:18] [Rank 0] Group 4 Loss: 5.4248 +[2025-07-06 21:06:18] [Rank 0] Group 5 Loss: 5.4441 +[2025-07-06 21:06:18] [Rank 0] Group 5 Loss: 5.4441 +[2025-07-06 21:06:18] [Rank 0] Group 6 Loss: 5.4050 +[2025-07-06 21:06:18] [Rank 0] Group 6 Loss: 5.4050 +[2025-07-06 21:06:18] [Rank 0] Group 7 Loss: 5.5045 +[2025-07-06 21:06:18] [Rank 0] Group 7 Loss: 5.5045 +[2025-07-06 21:06:18] [Rank 0] Group 8 Loss: 5.4732 +[2025-07-06 21:06:18] [Rank 0] Group 8 Loss: 5.4732 +[2025-07-06 21:06:18] [Rank 0] Group 9 Loss: 5.4631 +[2025-07-06 21:06:18] [Rank 0] Group 9 Loss: 5.4631 +[2025-07-06 21:06:18] [Rank 0] Group 10 Loss: 5.5294 +[2025-07-06 21:06:18] [Rank 0] Group 10 Loss: 5.5294 +[2025-07-06 21:06:18] [Rank 0] Group 11 Loss: 5.5164 +[2025-07-06 21:06:18] [Rank 0] Group 11 Loss: 5.5164 +[2025-07-06 21:06:18] [Rank 0] Group 0 FTA: 0.8466 +[2025-07-06 21:06:18] [Rank 0] Group 0 FTA: 0.8466 +[2025-07-06 21:06:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:06:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:06:18] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:06:18] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:06:18] [Rank 0] Group 3 FTA: 0.7969 +[2025-07-06 21:06:18] [Rank 0] Group 3 FTA: 0.7969 +[2025-07-06 21:06:18] [Rank 0] Group 4 FTA: 0.9349 +[2025-07-06 21:06:18] [Rank 0] Group 4 FTA: 0.9349 +[2025-07-06 21:06:18] [Rank 0] Group 5 FTA: 0.9453 +[2025-07-06 21:06:18] [Rank 0] Group 5 FTA: 0.9453 +[2025-07-06 21:06:18] [Rank 0] Group 6 FTA: 0.9453 +[2025-07-06 21:06:18] [Rank 0] Group 6 FTA: 0.9453 +[2025-07-06 21:06:18] [Rank 0] Group 7 FTA: 0.9271 +[2025-07-06 21:06:18] [Rank 0] Group 7 FTA: 0.9271 +[2025-07-06 21:06:18] [Rank 0] Group 8 FTA: 0.9062 +[2025-07-06 21:06:18] [Rank 0] Group 8 FTA: 0.9062 +[2025-07-06 21:06:18] [Rank 0] Group 9 FTA: 0.8945 +[2025-07-06 21:06:18] [Rank 0] Group 9 FTA: 0.8945 +[2025-07-06 21:06:18] [Rank 0] Group 10 FTA: 0.9238 +[2025-07-06 21:06:18] [Rank 0] Group 10 FTA: 0.9238 +[2025-07-06 21:06:18] [Rank 0] Group 11 FTA: 0.9287 +[2025-07-06 21:06:18] [Rank 0] Group 11 FTA: 0.9287 +[2025-07-06 21:06:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 21:06:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 21:06:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 21:06:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 21:06:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 21:06:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 21:06:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 21:06:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 21:06:19] [Rank 0] step:6001/10000 train_time:483216ms step_avg:80.52ms +[2025-07-06 21:06:19] [Rank 0] step:6001/10000 train_time:483216ms step_avg:80.52ms +[2025-07-06 21:06:21] [Rank 0] step:6021/10000 train_time:484704ms step_avg:80.50ms +[2025-07-06 21:06:21] [Rank 0] step:6021/10000 train_time:484704ms step_avg:80.50ms +[2025-07-06 21:06:23] [Rank 0] step:6041/10000 train_time:486542ms step_avg:80.54ms +[2025-07-06 21:06:23] [Rank 0] step:6041/10000 train_time:486542ms step_avg:80.54ms +[2025-07-06 21:06:24] [Rank 0] step:6061/10000 train_time:488163ms step_avg:80.54ms +[2025-07-06 21:06:24] [Rank 0] step:6061/10000 train_time:488163ms step_avg:80.54ms +[2025-07-06 21:06:26] [Rank 0] step:6081/10000 train_time:489651ms step_avg:80.52ms +[2025-07-06 21:06:26] [Rank 0] step:6081/10000 train_time:489651ms step_avg:80.52ms +[2025-07-06 21:06:27] [Rank 0] step:6101/10000 train_time:491141ms step_avg:80.50ms +[2025-07-06 21:06:27] [Rank 0] step:6101/10000 train_time:491141ms step_avg:80.50ms +[2025-07-06 21:06:29] [Rank 0] step:6121/10000 train_time:493317ms step_avg:80.59ms +[2025-07-06 21:06:29] [Rank 0] step:6121/10000 train_time:493317ms step_avg:80.59ms +[2025-07-06 21:06:31] [Rank 0] step:6141/10000 train_time:494786ms step_avg:80.57ms +[2025-07-06 21:06:31] [Rank 0] step:6141/10000 train_time:494786ms step_avg:80.57ms +[2025-07-06 21:06:32] [Rank 0] step:6161/10000 train_time:496279ms step_avg:80.55ms +[2025-07-06 21:06:32] [Rank 0] step:6161/10000 train_time:496279ms step_avg:80.55ms +[2025-07-06 21:06:34] [Rank 0] step:6181/10000 train_time:497771ms step_avg:80.53ms +[2025-07-06 21:06:34] [Rank 0] step:6181/10000 train_time:497771ms step_avg:80.53ms +[2025-07-06 21:06:35] [Rank 0] step:6201/10000 train_time:499264ms step_avg:80.51ms +[2025-07-06 21:06:35] [Rank 0] step:6201/10000 train_time:499264ms step_avg:80.51ms +[2025-07-06 21:06:37] [Rank 0] step:6221/10000 train_time:500996ms step_avg:80.53ms +[2025-07-06 21:06:37] [Rank 0] step:6221/10000 train_time:500996ms step_avg:80.53ms +[2025-07-06 21:06:38] [Rank 0] step:6241/10000 train_time:502487ms step_avg:80.51ms +[2025-07-06 21:06:38] [Rank 0] step:6241/10000 train_time:502487ms step_avg:80.51ms +[2025-07-06 21:06:40] [Rank 0] step:6261/10000 train_time:503981ms step_avg:80.50ms +[2025-07-06 21:06:40] [Rank 0] step:6261/10000 train_time:503981ms step_avg:80.50ms +[2025-07-06 21:06:41] [Rank 0] step:6281/10000 train_time:505476ms step_avg:80.48ms +[2025-07-06 21:06:41] [Rank 0] step:6281/10000 train_time:505476ms step_avg:80.48ms +[2025-07-06 21:06:43] [Rank 0] step:6301/10000 train_time:507229ms step_avg:80.50ms +[2025-07-06 21:06:43] [Rank 0] step:6301/10000 train_time:507229ms step_avg:80.50ms +[2025-07-06 21:06:45] [Rank 0] step:6321/10000 train_time:508703ms step_avg:80.48ms +[2025-07-06 21:06:45] [Rank 0] step:6321/10000 train_time:508703ms step_avg:80.48ms +[2025-07-06 21:06:46] [Rank 0] step:6341/10000 train_time:510201ms step_avg:80.46ms +[2025-07-06 21:06:46] [Rank 0] step:6341/10000 train_time:510201ms step_avg:80.46ms +[2025-07-06 21:06:48] [Rank 0] step:6361/10000 train_time:511698ms step_avg:80.44ms +[2025-07-06 21:06:48] [Rank 0] step:6361/10000 train_time:511698ms step_avg:80.44ms +[2025-07-06 21:06:49] [Rank 0] step:6381/10000 train_time:513194ms step_avg:80.43ms +[2025-07-06 21:06:49] [Rank 0] step:6381/10000 train_time:513194ms step_avg:80.43ms +[2025-07-06 21:06:51] [Rank 0] step:6401/10000 train_time:515354ms step_avg:80.51ms +[2025-07-06 21:06:51] [Rank 0] step:6401/10000 train_time:515354ms step_avg:80.51ms +[2025-07-06 21:06:53] [Rank 0] step:6421/10000 train_time:516848ms step_avg:80.49ms +[2025-07-06 21:06:53] [Rank 0] step:6421/10000 train_time:516848ms step_avg:80.49ms +[2025-07-06 21:06:54] [Rank 0] step:6441/10000 train_time:518345ms step_avg:80.48ms +[2025-07-06 21:06:54] [Rank 0] step:6441/10000 train_time:518345ms step_avg:80.48ms +[2025-07-06 21:06:56] [Rank 0] step:6461/10000 train_time:519842ms step_avg:80.46ms +[2025-07-06 21:06:56] [Rank 0] step:6461/10000 train_time:519842ms step_avg:80.46ms +[2025-07-06 21:06:58] [Rank 0] step:6481/10000 train_time:521393ms step_avg:80.45ms +[2025-07-06 21:06:58] [Rank 0] step:6481/10000 train_time:521393ms step_avg:80.45ms +[2025-07-06 21:06:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:06:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:07:00] [Rank 0] PRINT: step:6500/10000 train_loss:0.8649 val_loss:0.8651 train_time:523498ms step_avg:80.54ms +[2025-07-06 21:07:00] [Rank 0] PRINT: step:6500/10000 train_loss:0.8649 val_loss:0.8651 train_time:523498ms step_avg:80.54ms +[2025-07-06 21:07:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:07:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:07:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:07:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:07:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:07:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:12:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:12:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:12:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:12:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:12:27] [Rank 0] Total Loss: 5.5070 +[2025-07-06 21:12:27] [Rank 0] Total Loss: 5.5070 +[2025-07-06 21:12:27] [Rank 0] Total FTA: 0.9505 +[2025-07-06 21:12:27] [Rank 0] Total FTA: 0.9505 +[2025-07-06 21:12:27] [Rank 0] Group 0 Loss: 5.6529 +[2025-07-06 21:12:27] [Rank 0] Group 0 Loss: 5.6529 +[2025-07-06 21:12:27] [Rank 0] Group 1 Loss: 5.2754 +[2025-07-06 21:12:27] [Rank 0] Group 1 Loss: 5.2754 +[2025-07-06 21:12:27] [Rank 0] Group 2 Loss: 5.3990 +[2025-07-06 21:12:27] [Rank 0] Group 2 Loss: 5.3990 +[2025-07-06 21:12:27] [Rank 0] Group 3 Loss: 5.7335 +[2025-07-06 21:12:27] [Rank 0] Group 3 Loss: 5.7335 +[2025-07-06 21:12:27] [Rank 0] Group 4 Loss: 5.5287 +[2025-07-06 21:12:27] [Rank 0] Group 4 Loss: 5.5287 +[2025-07-06 21:12:27] [Rank 0] Group 5 Loss: 5.4659 +[2025-07-06 21:12:27] [Rank 0] Group 5 Loss: 5.4659 +[2025-07-06 21:12:27] [Rank 0] Group 6 Loss: 5.4237 +[2025-07-06 21:12:27] [Rank 0] Group 6 Loss: 5.4237 +[2025-07-06 21:12:27] [Rank 0] Group 7 Loss: 5.4836 +[2025-07-06 21:12:27] [Rank 0] Group 7 Loss: 5.4836 +[2025-07-06 21:12:27] [Rank 0] Group 8 Loss: 5.4892 +[2025-07-06 21:12:27] [Rank 0] Group 8 Loss: 5.4892 +[2025-07-06 21:12:27] [Rank 0] Group 9 Loss: 5.4535 +[2025-07-06 21:12:27] [Rank 0] Group 9 Loss: 5.4535 +[2025-07-06 21:12:27] [Rank 0] Group 10 Loss: 5.5300 +[2025-07-06 21:12:27] [Rank 0] Group 10 Loss: 5.5300 +[2025-07-06 21:12:27] [Rank 0] Group 11 Loss: 5.4956 +[2025-07-06 21:12:27] [Rank 0] Group 11 Loss: 5.4956 +[2025-07-06 21:12:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:12:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:12:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:12:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:12:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:12:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:12:27] [Rank 0] Group 3 FTA: 0.9714 +[2025-07-06 21:12:27] [Rank 0] Group 3 FTA: 0.9714 +[2025-07-06 21:12:27] [Rank 0] Group 4 FTA: 0.9661 +[2025-07-06 21:12:27] [Rank 0] Group 4 FTA: 0.9661 +[2025-07-06 21:12:27] [Rank 0] Group 5 FTA: 0.9453 +[2025-07-06 21:12:27] [Rank 0] Group 5 FTA: 0.9453 +[2025-07-06 21:12:27] [Rank 0] Group 6 FTA: 0.9453 +[2025-07-06 21:12:27] [Rank 0] Group 6 FTA: 0.9453 +[2025-07-06 21:12:27] [Rank 0] Group 7 FTA: 0.9193 +[2025-07-06 21:12:27] [Rank 0] Group 7 FTA: 0.9193 +[2025-07-06 21:12:27] [Rank 0] Group 8 FTA: 0.8984 +[2025-07-06 21:12:27] [Rank 0] Group 8 FTA: 0.8984 +[2025-07-06 21:12:27] [Rank 0] Group 9 FTA: 0.9453 +[2025-07-06 21:12:27] [Rank 0] Group 9 FTA: 0.9453 +[2025-07-06 21:12:27] [Rank 0] Group 10 FTA: 0.9238 +[2025-07-06 21:12:27] [Rank 0] Group 10 FTA: 0.9238 +[2025-07-06 21:12:27] [Rank 0] Group 11 FTA: 0.9121 +[2025-07-06 21:12:27] [Rank 0] Group 11 FTA: 0.9121 +[2025-07-06 21:12:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 21:12:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 21:12:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 21:12:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 21:12:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 21:12:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 21:12:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 21:12:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 21:12:29] [Rank 0] step:6501/10000 train_time:523520ms step_avg:80.53ms +[2025-07-06 21:12:29] [Rank 0] step:6501/10000 train_time:523520ms step_avg:80.53ms +[2025-07-06 21:12:30] [Rank 0] step:6521/10000 train_time:525019ms step_avg:80.51ms +[2025-07-06 21:12:30] [Rank 0] step:6521/10000 train_time:525019ms step_avg:80.51ms +[2025-07-06 21:12:32] [Rank 0] step:6541/10000 train_time:526509ms step_avg:80.49ms +[2025-07-06 21:12:32] [Rank 0] step:6541/10000 train_time:526509ms step_avg:80.49ms +[2025-07-06 21:12:33] [Rank 0] step:6561/10000 train_time:527996ms step_avg:80.47ms +[2025-07-06 21:12:33] [Rank 0] step:6561/10000 train_time:527996ms step_avg:80.47ms +[2025-07-06 21:12:36] [Rank 0] step:6581/10000 train_time:530141ms step_avg:80.56ms +[2025-07-06 21:12:36] [Rank 0] step:6581/10000 train_time:530141ms step_avg:80.56ms +[2025-07-06 21:12:37] [Rank 0] step:6601/10000 train_time:531628ms step_avg:80.54ms +[2025-07-06 21:12:37] [Rank 0] step:6601/10000 train_time:531628ms step_avg:80.54ms +[2025-07-06 21:12:38] [Rank 0] step:6621/10000 train_time:533117ms step_avg:80.52ms +[2025-07-06 21:12:38] [Rank 0] step:6621/10000 train_time:533117ms step_avg:80.52ms +[2025-07-06 21:12:40] [Rank 0] step:6641/10000 train_time:534608ms step_avg:80.50ms +[2025-07-06 21:12:40] [Rank 0] step:6641/10000 train_time:534608ms step_avg:80.50ms +[2025-07-06 21:12:42] [Rank 0] step:6661/10000 train_time:536606ms step_avg:80.56ms +[2025-07-06 21:12:42] [Rank 0] step:6661/10000 train_time:536606ms step_avg:80.56ms +[2025-07-06 21:12:44] [Rank 0] step:6681/10000 train_time:538597ms step_avg:80.62ms +[2025-07-06 21:12:44] [Rank 0] step:6681/10000 train_time:538597ms step_avg:80.62ms +[2025-07-06 21:12:45] [Rank 0] step:6701/10000 train_time:540087ms step_avg:80.60ms +[2025-07-06 21:12:45] [Rank 0] step:6701/10000 train_time:540087ms step_avg:80.60ms +[2025-07-06 21:12:47] [Rank 0] step:6721/10000 train_time:541580ms step_avg:80.58ms +[2025-07-06 21:12:47] [Rank 0] step:6721/10000 train_time:541580ms step_avg:80.58ms +[2025-07-06 21:12:48] [Rank 0] step:6741/10000 train_time:543070ms step_avg:80.56ms +[2025-07-06 21:12:48] [Rank 0] step:6741/10000 train_time:543070ms step_avg:80.56ms +[2025-07-06 21:12:51] [Rank 0] step:6761/10000 train_time:545214ms step_avg:80.64ms +[2025-07-06 21:12:51] [Rank 0] step:6761/10000 train_time:545214ms step_avg:80.64ms +[2025-07-06 21:12:52] [Rank 0] step:6781/10000 train_time:546705ms step_avg:80.62ms +[2025-07-06 21:12:52] [Rank 0] step:6781/10000 train_time:546705ms step_avg:80.62ms +[2025-07-06 21:12:54] [Rank 0] step:6801/10000 train_time:548195ms step_avg:80.61ms +[2025-07-06 21:12:54] [Rank 0] step:6801/10000 train_time:548195ms step_avg:80.61ms +[2025-07-06 21:12:55] [Rank 0] step:6821/10000 train_time:549687ms step_avg:80.59ms +[2025-07-06 21:12:55] [Rank 0] step:6821/10000 train_time:549687ms step_avg:80.59ms +[2025-07-06 21:12:57] [Rank 0] step:6841/10000 train_time:551181ms step_avg:80.57ms +[2025-07-06 21:12:57] [Rank 0] step:6841/10000 train_time:551181ms step_avg:80.57ms +[2025-07-06 21:12:59] [Rank 0] step:6861/10000 train_time:553325ms step_avg:80.65ms +[2025-07-06 21:12:59] [Rank 0] step:6861/10000 train_time:553325ms step_avg:80.65ms +[2025-07-06 21:13:00] [Rank 0] step:6881/10000 train_time:554819ms step_avg:80.63ms +[2025-07-06 21:13:00] [Rank 0] step:6881/10000 train_time:554819ms step_avg:80.63ms +[2025-07-06 21:13:02] [Rank 0] step:6901/10000 train_time:556314ms step_avg:80.61ms +[2025-07-06 21:13:02] [Rank 0] step:6901/10000 train_time:556314ms step_avg:80.61ms +[2025-07-06 21:13:03] [Rank 0] step:6921/10000 train_time:557808ms step_avg:80.60ms +[2025-07-06 21:13:03] [Rank 0] step:6921/10000 train_time:557808ms step_avg:80.60ms +[2025-07-06 21:13:05] [Rank 0] step:6941/10000 train_time:559966ms step_avg:80.68ms +[2025-07-06 21:13:05] [Rank 0] step:6941/10000 train_time:559966ms step_avg:80.68ms +[2025-07-06 21:13:07] [Rank 0] step:6961/10000 train_time:561460ms step_avg:80.66ms +[2025-07-06 21:13:07] [Rank 0] step:6961/10000 train_time:561460ms step_avg:80.66ms +[2025-07-06 21:13:08] [Rank 0] step:6981/10000 train_time:562957ms step_avg:80.64ms +[2025-07-06 21:13:08] [Rank 0] step:6981/10000 train_time:562957ms step_avg:80.64ms +[2025-07-06 21:13:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:13:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:13:11] [Rank 0] PRINT: step:7000/10000 train_loss:0.8636 val_loss:0.8637 train_time:564453ms step_avg:80.64ms +[2025-07-06 21:13:11] [Rank 0] PRINT: step:7000/10000 train_loss:0.8636 val_loss:0.8637 train_time:564453ms step_avg:80.64ms +[2025-07-06 21:13:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:13:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:13:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:13:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:13:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:13:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:18:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:18:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:18:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:18:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:18:39] [Rank 0] Total Loss: 5.4727 +[2025-07-06 21:18:39] [Rank 0] Total Loss: 5.4727 +[2025-07-06 21:18:39] [Rank 0] Total FTA: 0.9276 +[2025-07-06 21:18:39] [Rank 0] Total FTA: 0.9276 +[2025-07-06 21:18:39] [Rank 0] Group 0 Loss: 5.6692 +[2025-07-06 21:18:39] [Rank 0] Group 0 Loss: 5.6692 +[2025-07-06 21:18:39] [Rank 0] Group 1 Loss: 5.2906 +[2025-07-06 21:18:39] [Rank 0] Group 1 Loss: 5.2906 +[2025-07-06 21:18:39] [Rank 0] Group 2 Loss: 5.2060 +[2025-07-06 21:18:39] [Rank 0] Group 2 Loss: 5.2060 +[2025-07-06 21:18:39] [Rank 0] Group 3 Loss: 5.6123 +[2025-07-06 21:18:39] [Rank 0] Group 3 Loss: 5.6123 +[2025-07-06 21:18:39] [Rank 0] Group 4 Loss: 5.4844 +[2025-07-06 21:18:39] [Rank 0] Group 4 Loss: 5.4844 +[2025-07-06 21:18:39] [Rank 0] Group 5 Loss: 5.4142 +[2025-07-06 21:18:39] [Rank 0] Group 5 Loss: 5.4142 +[2025-07-06 21:18:39] [Rank 0] Group 6 Loss: 5.4209 +[2025-07-06 21:18:39] [Rank 0] Group 6 Loss: 5.4209 +[2025-07-06 21:18:39] [Rank 0] Group 7 Loss: 5.4447 +[2025-07-06 21:18:39] [Rank 0] Group 7 Loss: 5.4447 +[2025-07-06 21:18:39] [Rank 0] Group 8 Loss: 5.4465 +[2025-07-06 21:18:39] [Rank 0] Group 8 Loss: 5.4465 +[2025-07-06 21:18:39] [Rank 0] Group 9 Loss: 5.4818 +[2025-07-06 21:18:39] [Rank 0] Group 9 Loss: 5.4818 +[2025-07-06 21:18:39] [Rank 0] Group 10 Loss: 5.4762 +[2025-07-06 21:18:39] [Rank 0] Group 10 Loss: 5.4762 +[2025-07-06 21:18:39] [Rank 0] Group 11 Loss: 5.4944 +[2025-07-06 21:18:39] [Rank 0] Group 11 Loss: 5.4944 +[2025-07-06 21:18:39] [Rank 0] Group 0 FTA: 0.8114 +[2025-07-06 21:18:39] [Rank 0] Group 0 FTA: 0.8114 +[2025-07-06 21:18:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:18:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:18:39] [Rank 0] Group 2 FTA: 0.9245 +[2025-07-06 21:18:39] [Rank 0] Group 2 FTA: 0.9245 +[2025-07-06 21:18:39] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 21:18:39] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 21:18:39] [Rank 0] Group 4 FTA: 0.9297 +[2025-07-06 21:18:39] [Rank 0] Group 4 FTA: 0.9297 +[2025-07-06 21:18:39] [Rank 0] Group 5 FTA: 0.9740 +[2025-07-06 21:18:39] [Rank 0] Group 5 FTA: 0.9740 +[2025-07-06 21:18:39] [Rank 0] Group 6 FTA: 0.9635 +[2025-07-06 21:18:39] [Rank 0] Group 6 FTA: 0.9635 +[2025-07-06 21:18:39] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-06 21:18:39] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-06 21:18:39] [Rank 0] Group 8 FTA: 0.9245 +[2025-07-06 21:18:39] [Rank 0] Group 8 FTA: 0.9245 +[2025-07-06 21:18:39] [Rank 0] Group 9 FTA: 0.9180 +[2025-07-06 21:18:39] [Rank 0] Group 9 FTA: 0.9180 +[2025-07-06 21:18:39] [Rank 0] Group 10 FTA: 0.9375 +[2025-07-06 21:18:39] [Rank 0] Group 10 FTA: 0.9375 +[2025-07-06 21:18:39] [Rank 0] Group 11 FTA: 0.9238 +[2025-07-06 21:18:39] [Rank 0] Group 11 FTA: 0.9238 +[2025-07-06 21:18:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 21:18:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 21:18:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 21:18:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 21:18:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 21:18:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 21:18:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 21:18:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 21:18:40] [Rank 0] step:7001/10000 train_time:564475ms step_avg:80.63ms +[2025-07-06 21:18:40] [Rank 0] step:7001/10000 train_time:564475ms step_avg:80.63ms +[2025-07-06 21:18:42] [Rank 0] step:7021/10000 train_time:565977ms step_avg:80.61ms +[2025-07-06 21:18:42] [Rank 0] step:7021/10000 train_time:565977ms step_avg:80.61ms +[2025-07-06 21:18:44] [Rank 0] step:7041/10000 train_time:568136ms step_avg:80.69ms +[2025-07-06 21:18:44] [Rank 0] step:7041/10000 train_time:568136ms step_avg:80.69ms +[2025-07-06 21:18:45] [Rank 0] step:7061/10000 train_time:569624ms step_avg:80.67ms +[2025-07-06 21:18:45] [Rank 0] step:7061/10000 train_time:569624ms step_avg:80.67ms +[2025-07-06 21:18:47] [Rank 0] step:7081/10000 train_time:571115ms step_avg:80.65ms +[2025-07-06 21:18:47] [Rank 0] step:7081/10000 train_time:571115ms step_avg:80.65ms +[2025-07-06 21:18:48] [Rank 0] step:7101/10000 train_time:572606ms step_avg:80.64ms +[2025-07-06 21:18:48] [Rank 0] step:7101/10000 train_time:572606ms step_avg:80.64ms +[2025-07-06 21:18:51] [Rank 0] step:7121/10000 train_time:574750ms step_avg:80.71ms +[2025-07-06 21:18:51] [Rank 0] step:7121/10000 train_time:574750ms step_avg:80.71ms +[2025-07-06 21:18:52] [Rank 0] step:7141/10000 train_time:576238ms step_avg:80.69ms +[2025-07-06 21:18:52] [Rank 0] step:7141/10000 train_time:576238ms step_avg:80.69ms +[2025-07-06 21:18:53] [Rank 0] step:7161/10000 train_time:577729ms step_avg:80.68ms +[2025-07-06 21:18:53] [Rank 0] step:7161/10000 train_time:577729ms step_avg:80.68ms +[2025-07-06 21:18:55] [Rank 0] step:7181/10000 train_time:579220ms step_avg:80.66ms +[2025-07-06 21:18:55] [Rank 0] step:7181/10000 train_time:579220ms step_avg:80.66ms +[2025-07-06 21:18:57] [Rank 0] step:7201/10000 train_time:580766ms step_avg:80.65ms +[2025-07-06 21:18:57] [Rank 0] step:7201/10000 train_time:580766ms step_avg:80.65ms +[2025-07-06 21:18:59] [Rank 0] step:7221/10000 train_time:582865ms step_avg:80.72ms +[2025-07-06 21:18:59] [Rank 0] step:7221/10000 train_time:582865ms step_avg:80.72ms +[2025-07-06 21:19:00] [Rank 0] step:7241/10000 train_time:584357ms step_avg:80.70ms +[2025-07-06 21:19:00] [Rank 0] step:7241/10000 train_time:584357ms step_avg:80.70ms +[2025-07-06 21:19:02] [Rank 0] step:7261/10000 train_time:585980ms step_avg:80.70ms +[2025-07-06 21:19:02] [Rank 0] step:7261/10000 train_time:585980ms step_avg:80.70ms +[2025-07-06 21:19:03] [Rank 0] step:7281/10000 train_time:587531ms step_avg:80.69ms +[2025-07-06 21:19:03] [Rank 0] step:7281/10000 train_time:587531ms step_avg:80.69ms +[2025-07-06 21:19:05] [Rank 0] step:7301/10000 train_time:589362ms step_avg:80.72ms +[2025-07-06 21:19:05] [Rank 0] step:7301/10000 train_time:589362ms step_avg:80.72ms +[2025-07-06 21:19:07] [Rank 0] step:7321/10000 train_time:590856ms step_avg:80.71ms +[2025-07-06 21:19:07] [Rank 0] step:7321/10000 train_time:590856ms step_avg:80.71ms +[2025-07-06 21:19:08] [Rank 0] step:7341/10000 train_time:592349ms step_avg:80.69ms +[2025-07-06 21:19:08] [Rank 0] step:7341/10000 train_time:592349ms step_avg:80.69ms +[2025-07-06 21:19:10] [Rank 0] step:7361/10000 train_time:593842ms step_avg:80.67ms +[2025-07-06 21:19:10] [Rank 0] step:7361/10000 train_time:593842ms step_avg:80.67ms +[2025-07-06 21:19:11] [Rank 0] step:7381/10000 train_time:595390ms step_avg:80.67ms +[2025-07-06 21:19:11] [Rank 0] step:7381/10000 train_time:595390ms step_avg:80.67ms +[2025-07-06 21:19:13] [Rank 0] step:7401/10000 train_time:597061ms step_avg:80.67ms +[2025-07-06 21:19:13] [Rank 0] step:7401/10000 train_time:597061ms step_avg:80.67ms +[2025-07-06 21:19:14] [Rank 0] step:7421/10000 train_time:598552ms step_avg:80.66ms +[2025-07-06 21:19:14] [Rank 0] step:7421/10000 train_time:598552ms step_avg:80.66ms +[2025-07-06 21:19:16] [Rank 0] step:7441/10000 train_time:600046ms step_avg:80.64ms +[2025-07-06 21:19:16] [Rank 0] step:7441/10000 train_time:600046ms step_avg:80.64ms +[2025-07-06 21:19:17] [Rank 0] step:7461/10000 train_time:601540ms step_avg:80.62ms +[2025-07-06 21:19:17] [Rank 0] step:7461/10000 train_time:601540ms step_avg:80.62ms +[2025-07-06 21:19:19] [Rank 0] step:7481/10000 train_time:603678ms step_avg:80.69ms +[2025-07-06 21:19:19] [Rank 0] step:7481/10000 train_time:603678ms step_avg:80.69ms +[2025-07-06 21:19:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:19:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:19:22] [Rank 0] PRINT: step:7500/10000 train_loss:0.8623 val_loss:0.8628 train_time:605171ms step_avg:80.69ms +[2025-07-06 21:19:22] [Rank 0] PRINT: step:7500/10000 train_loss:0.8623 val_loss:0.8628 train_time:605171ms step_avg:80.69ms +[2025-07-06 21:19:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:19:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:19:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:19:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:19:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:19:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:24:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:24:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:24:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:24:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:24:50] [Rank 0] Total Loss: 5.4552 +[2025-07-06 21:24:50] [Rank 0] Total Loss: 5.4552 +[2025-07-06 21:24:50] [Rank 0] Total FTA: 0.9586 +[2025-07-06 21:24:50] [Rank 0] Total FTA: 0.9586 +[2025-07-06 21:24:50] [Rank 0] Group 0 Loss: 5.7035 +[2025-07-06 21:24:50] [Rank 0] Group 0 Loss: 5.7035 +[2025-07-06 21:24:50] [Rank 0] Group 1 Loss: 5.2738 +[2025-07-06 21:24:50] [Rank 0] Group 1 Loss: 5.2738 +[2025-07-06 21:24:50] [Rank 0] Group 2 Loss: 5.2479 +[2025-07-06 21:24:50] [Rank 0] Group 2 Loss: 5.2479 +[2025-07-06 21:24:50] [Rank 0] Group 3 Loss: 5.5048 +[2025-07-06 21:24:50] [Rank 0] Group 3 Loss: 5.5048 +[2025-07-06 21:24:50] [Rank 0] Group 4 Loss: 5.4070 +[2025-07-06 21:24:50] [Rank 0] Group 4 Loss: 5.4070 +[2025-07-06 21:24:50] [Rank 0] Group 5 Loss: 5.4094 +[2025-07-06 21:24:50] [Rank 0] Group 5 Loss: 5.4094 +[2025-07-06 21:24:50] [Rank 0] Group 6 Loss: 5.3395 +[2025-07-06 21:24:50] [Rank 0] Group 6 Loss: 5.3395 +[2025-07-06 21:24:50] [Rank 0] Group 7 Loss: 5.4661 +[2025-07-06 21:24:50] [Rank 0] Group 7 Loss: 5.4661 +[2025-07-06 21:24:50] [Rank 0] Group 8 Loss: 5.4466 +[2025-07-06 21:24:50] [Rank 0] Group 8 Loss: 5.4466 +[2025-07-06 21:24:50] [Rank 0] Group 9 Loss: 5.4773 +[2025-07-06 21:24:50] [Rank 0] Group 9 Loss: 5.4773 +[2025-07-06 21:24:50] [Rank 0] Group 10 Loss: 5.4468 +[2025-07-06 21:24:50] [Rank 0] Group 10 Loss: 5.4468 +[2025-07-06 21:24:50] [Rank 0] Group 11 Loss: 5.4725 +[2025-07-06 21:24:50] [Rank 0] Group 11 Loss: 5.4725 +[2025-07-06 21:24:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:24:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:24:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:24:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:24:50] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:24:50] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:24:50] [Rank 0] Group 3 FTA: 0.9557 +[2025-07-06 21:24:50] [Rank 0] Group 3 FTA: 0.9557 +[2025-07-06 21:24:50] [Rank 0] Group 4 FTA: 0.8906 +[2025-07-06 21:24:50] [Rank 0] Group 4 FTA: 0.8906 +[2025-07-06 21:24:50] [Rank 0] Group 5 FTA: 0.9766 +[2025-07-06 21:24:50] [Rank 0] Group 5 FTA: 0.9766 +[2025-07-06 21:24:50] [Rank 0] Group 6 FTA: 0.9505 +[2025-07-06 21:24:50] [Rank 0] Group 6 FTA: 0.9505 +[2025-07-06 21:24:50] [Rank 0] Group 7 FTA: 0.9375 +[2025-07-06 21:24:50] [Rank 0] Group 7 FTA: 0.9375 +[2025-07-06 21:24:50] [Rank 0] Group 8 FTA: 0.9271 +[2025-07-06 21:24:50] [Rank 0] Group 8 FTA: 0.9271 +[2025-07-06 21:24:50] [Rank 0] Group 9 FTA: 0.9609 +[2025-07-06 21:24:50] [Rank 0] Group 9 FTA: 0.9609 +[2025-07-06 21:24:50] [Rank 0] Group 10 FTA: 0.9492 +[2025-07-06 21:24:50] [Rank 0] Group 10 FTA: 0.9492 +[2025-07-06 21:24:50] [Rank 0] Group 11 FTA: 0.9434 +[2025-07-06 21:24:50] [Rank 0] Group 11 FTA: 0.9434 +[2025-07-06 21:24:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 21:24:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 21:24:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 21:24:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 21:24:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 21:24:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 21:24:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 21:24:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 21:24:52] [Rank 0] step:7501/10000 train_time:605192ms step_avg:80.68ms +[2025-07-06 21:24:52] [Rank 0] step:7501/10000 train_time:605192ms step_avg:80.68ms +[2025-07-06 21:24:53] [Rank 0] step:7521/10000 train_time:606694ms step_avg:80.67ms +[2025-07-06 21:24:53] [Rank 0] step:7521/10000 train_time:606694ms step_avg:80.67ms +[2025-07-06 21:24:55] [Rank 0] step:7541/10000 train_time:608181ms step_avg:80.65ms +[2025-07-06 21:24:55] [Rank 0] step:7541/10000 train_time:608181ms step_avg:80.65ms +[2025-07-06 21:24:57] [Rank 0] step:7561/10000 train_time:610350ms step_avg:80.72ms +[2025-07-06 21:24:57] [Rank 0] step:7561/10000 train_time:610350ms step_avg:80.72ms +[2025-07-06 21:24:58] [Rank 0] step:7581/10000 train_time:611822ms step_avg:80.70ms +[2025-07-06 21:24:58] [Rank 0] step:7581/10000 train_time:611822ms step_avg:80.70ms +[2025-07-06 21:25:00] [Rank 0] step:7601/10000 train_time:613311ms step_avg:80.69ms +[2025-07-06 21:25:00] [Rank 0] step:7601/10000 train_time:613311ms step_avg:80.69ms +[2025-07-06 21:25:01] [Rank 0] step:7621/10000 train_time:614800ms step_avg:80.67ms +[2025-07-06 21:25:01] [Rank 0] step:7621/10000 train_time:614800ms step_avg:80.67ms +[2025-07-06 21:25:03] [Rank 0] step:7641/10000 train_time:616291ms step_avg:80.66ms +[2025-07-06 21:25:03] [Rank 0] step:7641/10000 train_time:616291ms step_avg:80.66ms +[2025-07-06 21:25:05] [Rank 0] step:7661/10000 train_time:618433ms step_avg:80.72ms +[2025-07-06 21:25:05] [Rank 0] step:7661/10000 train_time:618433ms step_avg:80.72ms +[2025-07-06 21:25:07] [Rank 0] step:7681/10000 train_time:619922ms step_avg:80.71ms +[2025-07-06 21:25:07] [Rank 0] step:7681/10000 train_time:619922ms step_avg:80.71ms +[2025-07-06 21:25:08] [Rank 0] step:7701/10000 train_time:621414ms step_avg:80.69ms +[2025-07-06 21:25:08] [Rank 0] step:7701/10000 train_time:621414ms step_avg:80.69ms +[2025-07-06 21:25:10] [Rank 0] step:7721/10000 train_time:622907ms step_avg:80.68ms +[2025-07-06 21:25:10] [Rank 0] step:7721/10000 train_time:622907ms step_avg:80.68ms +[2025-07-06 21:25:12] [Rank 0] step:7741/10000 train_time:625069ms step_avg:80.75ms +[2025-07-06 21:25:12] [Rank 0] step:7741/10000 train_time:625069ms step_avg:80.75ms +[2025-07-06 21:25:13] [Rank 0] step:7761/10000 train_time:626542ms step_avg:80.73ms +[2025-07-06 21:25:13] [Rank 0] step:7761/10000 train_time:626542ms step_avg:80.73ms +[2025-07-06 21:25:15] [Rank 0] step:7781/10000 train_time:628035ms step_avg:80.71ms +[2025-07-06 21:25:15] [Rank 0] step:7781/10000 train_time:628035ms step_avg:80.71ms +[2025-07-06 21:25:16] [Rank 0] step:7801/10000 train_time:629528ms step_avg:80.70ms +[2025-07-06 21:25:16] [Rank 0] step:7801/10000 train_time:629528ms step_avg:80.70ms +[2025-07-06 21:25:18] [Rank 0] step:7821/10000 train_time:631021ms step_avg:80.68ms +[2025-07-06 21:25:18] [Rank 0] step:7821/10000 train_time:631021ms step_avg:80.68ms +[2025-07-06 21:25:20] [Rank 0] step:7841/10000 train_time:633189ms step_avg:80.75ms +[2025-07-06 21:25:20] [Rank 0] step:7841/10000 train_time:633189ms step_avg:80.75ms +[2025-07-06 21:25:22] [Rank 0] step:7861/10000 train_time:634927ms step_avg:80.77ms +[2025-07-06 21:25:22] [Rank 0] step:7861/10000 train_time:634927ms step_avg:80.77ms +[2025-07-06 21:25:23] [Rank 0] step:7881/10000 train_time:636422ms step_avg:80.75ms +[2025-07-06 21:25:23] [Rank 0] step:7881/10000 train_time:636422ms step_avg:80.75ms +[2025-07-06 21:25:25] [Rank 0] step:7901/10000 train_time:637978ms step_avg:80.75ms +[2025-07-06 21:25:25] [Rank 0] step:7901/10000 train_time:637978ms step_avg:80.75ms +[2025-07-06 21:25:27] [Rank 0] step:7921/10000 train_time:640140ms step_avg:80.82ms +[2025-07-06 21:25:27] [Rank 0] step:7921/10000 train_time:640140ms step_avg:80.82ms +[2025-07-06 21:25:28] [Rank 0] step:7941/10000 train_time:641614ms step_avg:80.80ms +[2025-07-06 21:25:28] [Rank 0] step:7941/10000 train_time:641614ms step_avg:80.80ms +[2025-07-06 21:25:30] [Rank 0] step:7961/10000 train_time:643108ms step_avg:80.78ms +[2025-07-06 21:25:30] [Rank 0] step:7961/10000 train_time:643108ms step_avg:80.78ms +[2025-07-06 21:25:31] [Rank 0] step:7981/10000 train_time:644602ms step_avg:80.77ms +[2025-07-06 21:25:31] [Rank 0] step:7981/10000 train_time:644602ms step_avg:80.77ms +[2025-07-06 21:25:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:25:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:25:34] [Rank 0] PRINT: step:8000/10000 train_loss:0.8612 val_loss:0.8624 train_time:646095ms step_avg:80.76ms +[2025-07-06 21:25:34] [Rank 0] PRINT: step:8000/10000 train_loss:0.8612 val_loss:0.8624 train_time:646095ms step_avg:80.76ms +[2025-07-06 21:25:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:25:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:25:34] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:25:34] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:25:34] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:25:34] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:30:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:30:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:30:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:30:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:30:59] [Rank 0] Total Loss: 5.5117 +[2025-07-06 21:30:59] [Rank 0] Total Loss: 5.5117 +[2025-07-06 21:30:59] [Rank 0] Total FTA: 0.9641 +[2025-07-06 21:30:59] [Rank 0] Total FTA: 0.9641 +[2025-07-06 21:30:59] [Rank 0] Group 0 Loss: 5.6985 +[2025-07-06 21:30:59] [Rank 0] Group 0 Loss: 5.6985 +[2025-07-06 21:30:59] [Rank 0] Group 1 Loss: 5.3856 +[2025-07-06 21:30:59] [Rank 0] Group 1 Loss: 5.3856 +[2025-07-06 21:30:59] [Rank 0] Group 2 Loss: 5.2713 +[2025-07-06 21:30:59] [Rank 0] Group 2 Loss: 5.2713 +[2025-07-06 21:30:59] [Rank 0] Group 3 Loss: 5.6055 +[2025-07-06 21:30:59] [Rank 0] Group 3 Loss: 5.6055 +[2025-07-06 21:30:59] [Rank 0] Group 4 Loss: 5.4901 +[2025-07-06 21:30:59] [Rank 0] Group 4 Loss: 5.4901 +[2025-07-06 21:30:59] [Rank 0] Group 5 Loss: 5.4257 +[2025-07-06 21:30:59] [Rank 0] Group 5 Loss: 5.4257 +[2025-07-06 21:30:59] [Rank 0] Group 6 Loss: 5.4121 +[2025-07-06 21:30:59] [Rank 0] Group 6 Loss: 5.4121 +[2025-07-06 21:30:59] [Rank 0] Group 7 Loss: 5.5208 +[2025-07-06 21:30:59] [Rank 0] Group 7 Loss: 5.5208 +[2025-07-06 21:30:59] [Rank 0] Group 8 Loss: 5.5346 +[2025-07-06 21:30:59] [Rank 0] Group 8 Loss: 5.5346 +[2025-07-06 21:30:59] [Rank 0] Group 9 Loss: 5.5915 +[2025-07-06 21:30:59] [Rank 0] Group 9 Loss: 5.5915 +[2025-07-06 21:30:59] [Rank 0] Group 10 Loss: 5.4754 +[2025-07-06 21:30:59] [Rank 0] Group 10 Loss: 5.4754 +[2025-07-06 21:30:59] [Rank 0] Group 11 Loss: 5.5378 +[2025-07-06 21:30:59] [Rank 0] Group 11 Loss: 5.5378 +[2025-07-06 21:30:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:30:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:31:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:31:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:31:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:31:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:31:00] [Rank 0] Group 3 FTA: 0.9115 +[2025-07-06 21:31:00] [Rank 0] Group 3 FTA: 0.9115 +[2025-07-06 21:31:00] [Rank 0] Group 4 FTA: 0.9583 +[2025-07-06 21:31:00] [Rank 0] Group 4 FTA: 0.9583 +[2025-07-06 21:31:00] [Rank 0] Group 5 FTA: 0.9635 +[2025-07-06 21:31:00] [Rank 0] Group 5 FTA: 0.9635 +[2025-07-06 21:31:00] [Rank 0] Group 6 FTA: 0.9818 +[2025-07-06 21:31:00] [Rank 0] Group 6 FTA: 0.9818 +[2025-07-06 21:31:00] [Rank 0] Group 7 FTA: 0.9531 +[2025-07-06 21:31:00] [Rank 0] Group 7 FTA: 0.9531 +[2025-07-06 21:31:00] [Rank 0] Group 8 FTA: 0.9453 +[2025-07-06 21:31:00] [Rank 0] Group 8 FTA: 0.9453 +[2025-07-06 21:31:00] [Rank 0] Group 9 FTA: 0.9688 +[2025-07-06 21:31:00] [Rank 0] Group 9 FTA: 0.9688 +[2025-07-06 21:31:00] [Rank 0] Group 10 FTA: 0.9414 +[2025-07-06 21:31:00] [Rank 0] Group 10 FTA: 0.9414 +[2025-07-06 21:31:00] [Rank 0] Group 11 FTA: 0.9473 +[2025-07-06 21:31:00] [Rank 0] Group 11 FTA: 0.9473 +[2025-07-06 21:31:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 21:31:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 21:31:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 21:31:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 21:31:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 21:31:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 21:31:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 21:31:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 21:31:01] [Rank 0] step:8001/10000 train_time:646116ms step_avg:80.75ms +[2025-07-06 21:31:01] [Rank 0] step:8001/10000 train_time:646116ms step_avg:80.75ms +[2025-07-06 21:31:03] [Rank 0] step:8021/10000 train_time:647942ms step_avg:80.78ms +[2025-07-06 21:31:03] [Rank 0] step:8021/10000 train_time:647942ms step_avg:80.78ms +[2025-07-06 21:31:05] [Rank 0] step:8041/10000 train_time:649431ms step_avg:80.76ms +[2025-07-06 21:31:05] [Rank 0] step:8041/10000 train_time:649431ms step_avg:80.76ms +[2025-07-06 21:31:06] [Rank 0] step:8061/10000 train_time:650919ms step_avg:80.75ms +[2025-07-06 21:31:06] [Rank 0] step:8061/10000 train_time:650919ms step_avg:80.75ms +[2025-07-06 21:31:07] [Rank 0] step:8081/10000 train_time:652410ms step_avg:80.73ms +[2025-07-06 21:31:07] [Rank 0] step:8081/10000 train_time:652410ms step_avg:80.73ms +[2025-07-06 21:31:09] [Rank 0] step:8101/10000 train_time:654158ms step_avg:80.75ms +[2025-07-06 21:31:09] [Rank 0] step:8101/10000 train_time:654158ms step_avg:80.75ms +[2025-07-06 21:31:11] [Rank 0] step:8121/10000 train_time:655728ms step_avg:80.74ms +[2025-07-06 21:31:11] [Rank 0] step:8121/10000 train_time:655728ms step_avg:80.74ms +[2025-07-06 21:31:12] [Rank 0] step:8141/10000 train_time:657219ms step_avg:80.73ms +[2025-07-06 21:31:12] [Rank 0] step:8141/10000 train_time:657219ms step_avg:80.73ms +[2025-07-06 21:31:14] [Rank 0] step:8161/10000 train_time:658710ms step_avg:80.71ms +[2025-07-06 21:31:14] [Rank 0] step:8161/10000 train_time:658710ms step_avg:80.71ms +[2025-07-06 21:31:15] [Rank 0] step:8181/10000 train_time:660202ms step_avg:80.70ms +[2025-07-06 21:31:15] [Rank 0] step:8181/10000 train_time:660202ms step_avg:80.70ms +[2025-07-06 21:31:17] [Rank 0] step:8201/10000 train_time:662351ms step_avg:80.76ms +[2025-07-06 21:31:17] [Rank 0] step:8201/10000 train_time:662351ms step_avg:80.76ms +[2025-07-06 21:31:19] [Rank 0] step:8221/10000 train_time:663841ms step_avg:80.75ms +[2025-07-06 21:31:19] [Rank 0] step:8221/10000 train_time:663841ms step_avg:80.75ms +[2025-07-06 21:31:20] [Rank 0] step:8241/10000 train_time:665334ms step_avg:80.73ms +[2025-07-06 21:31:20] [Rank 0] step:8241/10000 train_time:665334ms step_avg:80.73ms +[2025-07-06 21:31:22] [Rank 0] step:8261/10000 train_time:666826ms step_avg:80.72ms +[2025-07-06 21:31:22] [Rank 0] step:8261/10000 train_time:666826ms step_avg:80.72ms +[2025-07-06 21:31:24] [Rank 0] step:8281/10000 train_time:668371ms step_avg:80.71ms +[2025-07-06 21:31:24] [Rank 0] step:8281/10000 train_time:668371ms step_avg:80.71ms +[2025-07-06 21:31:26] [Rank 0] step:8301/10000 train_time:670461ms step_avg:80.77ms +[2025-07-06 21:31:26] [Rank 0] step:8301/10000 train_time:670461ms step_avg:80.77ms +[2025-07-06 21:31:27] [Rank 0] step:8321/10000 train_time:671953ms step_avg:80.75ms +[2025-07-06 21:31:27] [Rank 0] step:8321/10000 train_time:671953ms step_avg:80.75ms +[2025-07-06 21:31:29] [Rank 0] step:8341/10000 train_time:673448ms step_avg:80.74ms +[2025-07-06 21:31:29] [Rank 0] step:8341/10000 train_time:673448ms step_avg:80.74ms +[2025-07-06 21:31:30] [Rank 0] step:8361/10000 train_time:674942ms step_avg:80.73ms +[2025-07-06 21:31:30] [Rank 0] step:8361/10000 train_time:674942ms step_avg:80.73ms +[2025-07-06 21:31:32] [Rank 0] step:8381/10000 train_time:677104ms step_avg:80.79ms +[2025-07-06 21:31:32] [Rank 0] step:8381/10000 train_time:677104ms step_avg:80.79ms +[2025-07-06 21:31:34] [Rank 0] step:8401/10000 train_time:678597ms step_avg:80.78ms +[2025-07-06 21:31:34] [Rank 0] step:8401/10000 train_time:678597ms step_avg:80.78ms +[2025-07-06 21:31:35] [Rank 0] step:8421/10000 train_time:680093ms step_avg:80.76ms +[2025-07-06 21:31:35] [Rank 0] step:8421/10000 train_time:680093ms step_avg:80.76ms +[2025-07-06 21:31:37] [Rank 0] step:8441/10000 train_time:681589ms step_avg:80.75ms +[2025-07-06 21:31:37] [Rank 0] step:8441/10000 train_time:681589ms step_avg:80.75ms +[2025-07-06 21:31:39] [Rank 0] step:8461/10000 train_time:683772ms step_avg:80.81ms +[2025-07-06 21:31:39] [Rank 0] step:8461/10000 train_time:683772ms step_avg:80.81ms +[2025-07-06 21:31:41] [Rank 0] step:8481/10000 train_time:685462ms step_avg:80.82ms +[2025-07-06 21:31:41] [Rank 0] step:8481/10000 train_time:685462ms step_avg:80.82ms +[2025-07-06 21:31:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:31:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:31:43] [Rank 0] PRINT: step:8500/10000 train_loss:0.8603 val_loss:0.8619 train_time:687004ms step_avg:80.82ms +[2025-07-06 21:31:43] [Rank 0] PRINT: step:8500/10000 train_loss:0.8603 val_loss:0.8619 train_time:687004ms step_avg:80.82ms +[2025-07-06 21:31:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:31:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:31:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:31:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:31:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:31:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:37:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:37:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:37:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:37:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:37:11] [Rank 0] Total Loss: 5.5688 +[2025-07-06 21:37:11] [Rank 0] Total Loss: 5.5688 +[2025-07-06 21:37:11] [Rank 0] Total FTA: 0.9631 +[2025-07-06 21:37:11] [Rank 0] Total FTA: 0.9631 +[2025-07-06 21:37:11] [Rank 0] Group 0 Loss: 5.8493 +[2025-07-06 21:37:11] [Rank 0] Group 0 Loss: 5.8493 +[2025-07-06 21:37:11] [Rank 0] Group 1 Loss: 5.4948 +[2025-07-06 21:37:11] [Rank 0] Group 1 Loss: 5.4948 +[2025-07-06 21:37:11] [Rank 0] Group 2 Loss: 5.4671 +[2025-07-06 21:37:11] [Rank 0] Group 2 Loss: 5.4671 +[2025-07-06 21:37:11] [Rank 0] Group 3 Loss: 5.5851 +[2025-07-06 21:37:11] [Rank 0] Group 3 Loss: 5.5851 +[2025-07-06 21:37:11] [Rank 0] Group 4 Loss: 5.5635 +[2025-07-06 21:37:11] [Rank 0] Group 4 Loss: 5.5635 +[2025-07-06 21:37:11] [Rank 0] Group 5 Loss: 5.4916 +[2025-07-06 21:37:11] [Rank 0] Group 5 Loss: 5.4916 +[2025-07-06 21:37:11] [Rank 0] Group 6 Loss: 5.4499 +[2025-07-06 21:37:11] [Rank 0] Group 6 Loss: 5.4499 +[2025-07-06 21:37:11] [Rank 0] Group 7 Loss: 5.5546 +[2025-07-06 21:37:11] [Rank 0] Group 7 Loss: 5.5546 +[2025-07-06 21:37:11] [Rank 0] Group 8 Loss: 5.5790 +[2025-07-06 21:37:11] [Rank 0] Group 8 Loss: 5.5790 +[2025-07-06 21:37:11] [Rank 0] Group 9 Loss: 5.5882 +[2025-07-06 21:37:11] [Rank 0] Group 9 Loss: 5.5882 +[2025-07-06 21:37:11] [Rank 0] Group 10 Loss: 5.5325 +[2025-07-06 21:37:11] [Rank 0] Group 10 Loss: 5.5325 +[2025-07-06 21:37:11] [Rank 0] Group 11 Loss: 5.5083 +[2025-07-06 21:37:11] [Rank 0] Group 11 Loss: 5.5083 +[2025-07-06 21:37:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:37:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:37:11] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:37:11] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:37:11] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:37:11] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:37:11] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 21:37:11] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 21:37:11] [Rank 0] Group 4 FTA: 0.9870 +[2025-07-06 21:37:11] [Rank 0] Group 4 FTA: 0.9870 +[2025-07-06 21:37:11] [Rank 0] Group 5 FTA: 0.9453 +[2025-07-06 21:37:11] [Rank 0] Group 5 FTA: 0.9453 +[2025-07-06 21:37:11] [Rank 0] Group 6 FTA: 0.9453 +[2025-07-06 21:37:11] [Rank 0] Group 6 FTA: 0.9453 +[2025-07-06 21:37:11] [Rank 0] Group 7 FTA: 0.9427 +[2025-07-06 21:37:11] [Rank 0] Group 7 FTA: 0.9427 +[2025-07-06 21:37:11] [Rank 0] Group 8 FTA: 0.9323 +[2025-07-06 21:37:11] [Rank 0] Group 8 FTA: 0.9323 +[2025-07-06 21:37:11] [Rank 0] Group 9 FTA: 0.9258 +[2025-07-06 21:37:11] [Rank 0] Group 9 FTA: 0.9258 +[2025-07-06 21:37:11] [Rank 0] Group 10 FTA: 0.9453 +[2025-07-06 21:37:11] [Rank 0] Group 10 FTA: 0.9453 +[2025-07-06 21:37:11] [Rank 0] Group 11 FTA: 0.9355 +[2025-07-06 21:37:11] [Rank 0] Group 11 FTA: 0.9355 +[2025-07-06 21:37:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 21:37:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 21:37:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 21:37:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 21:37:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 21:37:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 21:37:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 21:37:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 21:37:13] [Rank 0] step:8501/10000 train_time:687026ms step_avg:80.82ms +[2025-07-06 21:37:13] [Rank 0] step:8501/10000 train_time:687026ms step_avg:80.82ms +[2025-07-06 21:37:14] [Rank 0] step:8521/10000 train_time:688511ms step_avg:80.80ms +[2025-07-06 21:37:14] [Rank 0] step:8521/10000 train_time:688511ms step_avg:80.80ms +[2025-07-06 21:37:16] [Rank 0] step:8541/10000 train_time:689998ms step_avg:80.79ms +[2025-07-06 21:37:16] [Rank 0] step:8541/10000 train_time:689998ms step_avg:80.79ms +[2025-07-06 21:37:18] [Rank 0] step:8561/10000 train_time:692153ms step_avg:80.85ms +[2025-07-06 21:37:18] [Rank 0] step:8561/10000 train_time:692153ms step_avg:80.85ms +[2025-07-06 21:37:19] [Rank 0] step:8581/10000 train_time:693641ms step_avg:80.83ms +[2025-07-06 21:37:19] [Rank 0] step:8581/10000 train_time:693641ms step_avg:80.83ms +[2025-07-06 21:37:21] [Rank 0] step:8601/10000 train_time:695130ms step_avg:80.82ms +[2025-07-06 21:37:21] [Rank 0] step:8601/10000 train_time:695130ms step_avg:80.82ms +[2025-07-06 21:37:22] [Rank 0] step:8621/10000 train_time:696620ms step_avg:80.80ms +[2025-07-06 21:37:22] [Rank 0] step:8621/10000 train_time:696620ms step_avg:80.80ms +[2025-07-06 21:37:25] [Rank 0] step:8641/10000 train_time:698368ms step_avg:80.82ms +[2025-07-06 21:37:25] [Rank 0] step:8641/10000 train_time:698368ms step_avg:80.82ms +[2025-07-06 21:37:26] [Rank 0] step:8661/10000 train_time:700263ms step_avg:80.85ms +[2025-07-06 21:37:26] [Rank 0] step:8661/10000 train_time:700263ms step_avg:80.85ms +[2025-07-06 21:37:28] [Rank 0] step:8681/10000 train_time:701755ms step_avg:80.84ms +[2025-07-06 21:37:28] [Rank 0] step:8681/10000 train_time:701755ms step_avg:80.84ms +[2025-07-06 21:37:29] [Rank 0] step:8701/10000 train_time:703246ms step_avg:80.82ms +[2025-07-06 21:37:29] [Rank 0] step:8701/10000 train_time:703246ms step_avg:80.82ms +[2025-07-06 21:37:31] [Rank 0] step:8721/10000 train_time:704741ms step_avg:80.81ms +[2025-07-06 21:37:31] [Rank 0] step:8721/10000 train_time:704741ms step_avg:80.81ms +[2025-07-06 21:37:33] [Rank 0] step:8741/10000 train_time:706908ms step_avg:80.87ms +[2025-07-06 21:37:33] [Rank 0] step:8741/10000 train_time:706908ms step_avg:80.87ms +[2025-07-06 21:37:34] [Rank 0] step:8761/10000 train_time:708399ms step_avg:80.86ms +[2025-07-06 21:37:34] [Rank 0] step:8761/10000 train_time:708399ms step_avg:80.86ms +[2025-07-06 21:37:36] [Rank 0] step:8781/10000 train_time:709894ms step_avg:80.84ms +[2025-07-06 21:37:36] [Rank 0] step:8781/10000 train_time:709894ms step_avg:80.84ms +[2025-07-06 21:37:37] [Rank 0] step:8801/10000 train_time:711388ms step_avg:80.83ms +[2025-07-06 21:37:37] [Rank 0] step:8801/10000 train_time:711388ms step_avg:80.83ms +[2025-07-06 21:37:39] [Rank 0] step:8821/10000 train_time:713137ms step_avg:80.85ms +[2025-07-06 21:37:39] [Rank 0] step:8821/10000 train_time:713137ms step_avg:80.85ms +[2025-07-06 21:37:41] [Rank 0] step:8841/10000 train_time:715018ms step_avg:80.88ms +[2025-07-06 21:37:41] [Rank 0] step:8841/10000 train_time:715018ms step_avg:80.88ms +[2025-07-06 21:37:42] [Rank 0] step:8861/10000 train_time:716512ms step_avg:80.86ms +[2025-07-06 21:37:42] [Rank 0] step:8861/10000 train_time:716512ms step_avg:80.86ms +[2025-07-06 21:37:44] [Rank 0] step:8881/10000 train_time:718007ms step_avg:80.85ms +[2025-07-06 21:37:44] [Rank 0] step:8881/10000 train_time:718007ms step_avg:80.85ms +[2025-07-06 21:37:45] [Rank 0] step:8901/10000 train_time:719504ms step_avg:80.83ms +[2025-07-06 21:37:45] [Rank 0] step:8901/10000 train_time:719504ms step_avg:80.83ms +[2025-07-06 21:37:48] [Rank 0] step:8921/10000 train_time:721671ms step_avg:80.90ms +[2025-07-06 21:37:48] [Rank 0] step:8921/10000 train_time:721671ms step_avg:80.90ms +[2025-07-06 21:37:49] [Rank 0] step:8941/10000 train_time:723165ms step_avg:80.88ms +[2025-07-06 21:37:49] [Rank 0] step:8941/10000 train_time:723165ms step_avg:80.88ms +[2025-07-06 21:37:51] [Rank 0] step:8961/10000 train_time:724659ms step_avg:80.87ms +[2025-07-06 21:37:51] [Rank 0] step:8961/10000 train_time:724659ms step_avg:80.87ms +[2025-07-06 21:37:52] [Rank 0] step:8981/10000 train_time:726154ms step_avg:80.85ms +[2025-07-06 21:37:52] [Rank 0] step:8981/10000 train_time:726154ms step_avg:80.85ms +[2025-07-06 21:37:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:37:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:37:54] [Rank 0] PRINT: step:9000/10000 train_loss:0.8595 val_loss:0.8617 train_time:727650ms step_avg:80.85ms +[2025-07-06 21:37:54] [Rank 0] PRINT: step:9000/10000 train_loss:0.8595 val_loss:0.8617 train_time:727650ms step_avg:80.85ms +[2025-07-06 21:37:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:37:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:37:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:37:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:37:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:37:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:43:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:43:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:43:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:43:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:43:22] [Rank 0] Total Loss: 5.5337 +[2025-07-06 21:43:22] [Rank 0] Total Loss: 5.5337 +[2025-07-06 21:43:22] [Rank 0] Total FTA: 0.9741 +[2025-07-06 21:43:22] [Rank 0] Total FTA: 0.9741 +[2025-07-06 21:43:22] [Rank 0] Group 0 Loss: 5.8199 +[2025-07-06 21:43:22] [Rank 0] Group 0 Loss: 5.8199 +[2025-07-06 21:43:22] [Rank 0] Group 1 Loss: 5.3870 +[2025-07-06 21:43:22] [Rank 0] Group 1 Loss: 5.3870 +[2025-07-06 21:43:22] [Rank 0] Group 2 Loss: 5.4382 +[2025-07-06 21:43:22] [Rank 0] Group 2 Loss: 5.4382 +[2025-07-06 21:43:22] [Rank 0] Group 3 Loss: 5.5447 +[2025-07-06 21:43:22] [Rank 0] Group 3 Loss: 5.5447 +[2025-07-06 21:43:22] [Rank 0] Group 4 Loss: 5.4883 +[2025-07-06 21:43:22] [Rank 0] Group 4 Loss: 5.4883 +[2025-07-06 21:43:22] [Rank 0] Group 5 Loss: 5.4750 +[2025-07-06 21:43:22] [Rank 0] Group 5 Loss: 5.4750 +[2025-07-06 21:43:22] [Rank 0] Group 6 Loss: 5.4205 +[2025-07-06 21:43:22] [Rank 0] Group 6 Loss: 5.4205 +[2025-07-06 21:43:22] [Rank 0] Group 7 Loss: 5.5177 +[2025-07-06 21:43:22] [Rank 0] Group 7 Loss: 5.5177 +[2025-07-06 21:43:22] [Rank 0] Group 8 Loss: 5.5223 +[2025-07-06 21:43:22] [Rank 0] Group 8 Loss: 5.5223 +[2025-07-06 21:43:22] [Rank 0] Group 9 Loss: 5.4919 +[2025-07-06 21:43:22] [Rank 0] Group 9 Loss: 5.4919 +[2025-07-06 21:43:22] [Rank 0] Group 10 Loss: 5.5102 +[2025-07-06 21:43:22] [Rank 0] Group 10 Loss: 5.5102 +[2025-07-06 21:43:22] [Rank 0] Group 11 Loss: 5.5192 +[2025-07-06 21:43:22] [Rank 0] Group 11 Loss: 5.5192 +[2025-07-06 21:43:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:43:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:43:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:43:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:43:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:43:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:43:22] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 21:43:22] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 21:43:22] [Rank 0] Group 4 FTA: 0.9896 +[2025-07-06 21:43:22] [Rank 0] Group 4 FTA: 0.9896 +[2025-07-06 21:43:22] [Rank 0] Group 5 FTA: 0.9531 +[2025-07-06 21:43:22] [Rank 0] Group 5 FTA: 0.9531 +[2025-07-06 21:43:22] [Rank 0] Group 6 FTA: 0.9661 +[2025-07-06 21:43:22] [Rank 0] Group 6 FTA: 0.9661 +[2025-07-06 21:43:22] [Rank 0] Group 7 FTA: 0.9609 +[2025-07-06 21:43:22] [Rank 0] Group 7 FTA: 0.9609 +[2025-07-06 21:43:22] [Rank 0] Group 8 FTA: 0.9661 +[2025-07-06 21:43:22] [Rank 0] Group 8 FTA: 0.9661 +[2025-07-06 21:43:22] [Rank 0] Group 9 FTA: 0.9609 +[2025-07-06 21:43:22] [Rank 0] Group 9 FTA: 0.9609 +[2025-07-06 21:43:22] [Rank 0] Group 10 FTA: 0.9648 +[2025-07-06 21:43:22] [Rank 0] Group 10 FTA: 0.9648 +[2025-07-06 21:43:22] [Rank 0] Group 11 FTA: 0.9463 +[2025-07-06 21:43:22] [Rank 0] Group 11 FTA: 0.9463 +[2025-07-06 21:43:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 21:43:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 21:43:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 21:43:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 21:43:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 21:43:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 21:43:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 21:43:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 21:43:24] [Rank 0] step:9001/10000 train_time:727782ms step_avg:80.86ms +[2025-07-06 21:43:24] [Rank 0] step:9001/10000 train_time:727782ms step_avg:80.86ms +[2025-07-06 21:43:25] [Rank 0] step:9021/10000 train_time:729884ms step_avg:80.91ms +[2025-07-06 21:43:25] [Rank 0] step:9021/10000 train_time:729884ms step_avg:80.91ms +[2025-07-06 21:43:27] [Rank 0] step:9041/10000 train_time:731372ms step_avg:80.89ms +[2025-07-06 21:43:27] [Rank 0] step:9041/10000 train_time:731372ms step_avg:80.89ms +[2025-07-06 21:43:28] [Rank 0] step:9061/10000 train_time:732861ms step_avg:80.88ms +[2025-07-06 21:43:28] [Rank 0] step:9061/10000 train_time:732861ms step_avg:80.88ms +[2025-07-06 21:43:30] [Rank 0] step:9081/10000 train_time:734352ms step_avg:80.87ms +[2025-07-06 21:43:30] [Rank 0] step:9081/10000 train_time:734352ms step_avg:80.87ms +[2025-07-06 21:43:32] [Rank 0] step:9101/10000 train_time:736076ms step_avg:80.88ms +[2025-07-06 21:43:32] [Rank 0] step:9101/10000 train_time:736076ms step_avg:80.88ms +[2025-07-06 21:43:33] [Rank 0] step:9121/10000 train_time:737567ms step_avg:80.86ms +[2025-07-06 21:43:33] [Rank 0] step:9121/10000 train_time:737567ms step_avg:80.86ms +[2025-07-06 21:43:35] [Rank 0] step:9141/10000 train_time:739059ms step_avg:80.85ms +[2025-07-06 21:43:35] [Rank 0] step:9141/10000 train_time:739059ms step_avg:80.85ms +[2025-07-06 21:43:36] [Rank 0] step:9161/10000 train_time:740549ms step_avg:80.84ms +[2025-07-06 21:43:36] [Rank 0] step:9161/10000 train_time:740549ms step_avg:80.84ms +[2025-07-06 21:43:38] [Rank 0] step:9181/10000 train_time:742041ms step_avg:80.82ms +[2025-07-06 21:43:38] [Rank 0] step:9181/10000 train_time:742041ms step_avg:80.82ms +[2025-07-06 21:43:40] [Rank 0] step:9201/10000 train_time:744198ms step_avg:80.88ms +[2025-07-06 21:43:40] [Rank 0] step:9201/10000 train_time:744198ms step_avg:80.88ms +[2025-07-06 21:43:41] [Rank 0] step:9221/10000 train_time:745689ms step_avg:80.87ms +[2025-07-06 21:43:41] [Rank 0] step:9221/10000 train_time:745689ms step_avg:80.87ms +[2025-07-06 21:43:43] [Rank 0] step:9241/10000 train_time:747181ms step_avg:80.86ms +[2025-07-06 21:43:43] [Rank 0] step:9241/10000 train_time:747181ms step_avg:80.86ms +[2025-07-06 21:43:44] [Rank 0] step:9261/10000 train_time:748675ms step_avg:80.84ms +[2025-07-06 21:43:44] [Rank 0] step:9261/10000 train_time:748675ms step_avg:80.84ms +[2025-07-06 21:43:46] [Rank 0] step:9281/10000 train_time:750407ms step_avg:80.85ms +[2025-07-06 21:43:46] [Rank 0] step:9281/10000 train_time:750407ms step_avg:80.85ms +[2025-07-06 21:43:47] [Rank 0] step:9301/10000 train_time:751899ms step_avg:80.84ms +[2025-07-06 21:43:47] [Rank 0] step:9301/10000 train_time:751899ms step_avg:80.84ms +[2025-07-06 21:43:49] [Rank 0] step:9321/10000 train_time:753393ms step_avg:80.83ms +[2025-07-06 21:43:49] [Rank 0] step:9321/10000 train_time:753393ms step_avg:80.83ms +[2025-07-06 21:43:50] [Rank 0] step:9341/10000 train_time:754888ms step_avg:80.81ms +[2025-07-06 21:43:50] [Rank 0] step:9341/10000 train_time:754888ms step_avg:80.81ms +[2025-07-06 21:43:53] [Rank 0] step:9361/10000 train_time:757055ms step_avg:80.87ms +[2025-07-06 21:43:53] [Rank 0] step:9361/10000 train_time:757055ms step_avg:80.87ms +[2025-07-06 21:43:54] [Rank 0] step:9381/10000 train_time:758528ms step_avg:80.86ms +[2025-07-06 21:43:54] [Rank 0] step:9381/10000 train_time:758528ms step_avg:80.86ms +[2025-07-06 21:43:56] [Rank 0] step:9401/10000 train_time:760024ms step_avg:80.85ms +[2025-07-06 21:43:56] [Rank 0] step:9401/10000 train_time:760024ms step_avg:80.85ms +[2025-07-06 21:43:57] [Rank 0] step:9421/10000 train_time:761520ms step_avg:80.83ms +[2025-07-06 21:43:57] [Rank 0] step:9421/10000 train_time:761520ms step_avg:80.83ms +[2025-07-06 21:43:59] [Rank 0] step:9441/10000 train_time:763016ms step_avg:80.82ms +[2025-07-06 21:43:59] [Rank 0] step:9441/10000 train_time:763016ms step_avg:80.82ms +[2025-07-06 21:44:01] [Rank 0] step:9461/10000 train_time:765172ms step_avg:80.88ms +[2025-07-06 21:44:01] [Rank 0] step:9461/10000 train_time:765172ms step_avg:80.88ms +[2025-07-06 21:44:02] [Rank 0] step:9481/10000 train_time:766666ms step_avg:80.86ms +[2025-07-06 21:44:02] [Rank 0] step:9481/10000 train_time:766666ms step_avg:80.86ms +[2025-07-06 21:44:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:44:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:44:05] [Rank 0] PRINT: step:9500/10000 train_loss:0.8586 val_loss:0.8613 train_time:768162ms step_avg:80.86ms +[2025-07-06 21:44:05] [Rank 0] PRINT: step:9500/10000 train_loss:0.8586 val_loss:0.8613 train_time:768162ms step_avg:80.86ms +[2025-07-06 21:44:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:44:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:44:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:44:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:44:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:44:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:49:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:49:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:49:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:49:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:49:32] [Rank 0] Total Loss: 5.6120 +[2025-07-06 21:49:32] [Rank 0] Total Loss: 5.6120 +[2025-07-06 21:49:32] [Rank 0] Total FTA: 0.9748 +[2025-07-06 21:49:32] [Rank 0] Total FTA: 0.9748 +[2025-07-06 21:49:32] [Rank 0] Group 0 Loss: 5.8907 +[2025-07-06 21:49:32] [Rank 0] Group 0 Loss: 5.8907 +[2025-07-06 21:49:32] [Rank 0] Group 1 Loss: 5.5132 +[2025-07-06 21:49:32] [Rank 0] Group 1 Loss: 5.5132 +[2025-07-06 21:49:32] [Rank 0] Group 2 Loss: 5.5320 +[2025-07-06 21:49:32] [Rank 0] Group 2 Loss: 5.5320 +[2025-07-06 21:49:32] [Rank 0] Group 3 Loss: 5.6770 +[2025-07-06 21:49:32] [Rank 0] Group 3 Loss: 5.6770 +[2025-07-06 21:49:32] [Rank 0] Group 4 Loss: 5.4573 +[2025-07-06 21:49:32] [Rank 0] Group 4 Loss: 5.4573 +[2025-07-06 21:49:32] [Rank 0] Group 5 Loss: 5.4998 +[2025-07-06 21:49:32] [Rank 0] Group 5 Loss: 5.4998 +[2025-07-06 21:49:32] [Rank 0] Group 6 Loss: 5.4662 +[2025-07-06 21:49:32] [Rank 0] Group 6 Loss: 5.4662 +[2025-07-06 21:49:32] [Rank 0] Group 7 Loss: 5.6172 +[2025-07-06 21:49:32] [Rank 0] Group 7 Loss: 5.6172 +[2025-07-06 21:49:32] [Rank 0] Group 8 Loss: 5.6044 +[2025-07-06 21:49:32] [Rank 0] Group 8 Loss: 5.6044 +[2025-07-06 21:49:32] [Rank 0] Group 9 Loss: 5.6049 +[2025-07-06 21:49:32] [Rank 0] Group 9 Loss: 5.6049 +[2025-07-06 21:49:32] [Rank 0] Group 10 Loss: 5.5789 +[2025-07-06 21:49:32] [Rank 0] Group 10 Loss: 5.5789 +[2025-07-06 21:49:32] [Rank 0] Group 11 Loss: 5.6195 +[2025-07-06 21:49:32] [Rank 0] Group 11 Loss: 5.6195 +[2025-07-06 21:49:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:49:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:49:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:49:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:49:32] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:49:32] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:49:32] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 21:49:32] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 21:49:32] [Rank 0] Group 4 FTA: 0.9688 +[2025-07-06 21:49:32] [Rank 0] Group 4 FTA: 0.9688 +[2025-07-06 21:49:32] [Rank 0] Group 5 FTA: 0.9427 +[2025-07-06 21:49:32] [Rank 0] Group 5 FTA: 0.9427 +[2025-07-06 21:49:32] [Rank 0] Group 6 FTA: 0.9922 +[2025-07-06 21:49:32] [Rank 0] Group 6 FTA: 0.9922 +[2025-07-06 21:49:32] [Rank 0] Group 7 FTA: 0.9635 +[2025-07-06 21:49:32] [Rank 0] Group 7 FTA: 0.9635 +[2025-07-06 21:49:32] [Rank 0] Group 8 FTA: 0.9401 +[2025-07-06 21:49:32] [Rank 0] Group 8 FTA: 0.9401 +[2025-07-06 21:49:32] [Rank 0] Group 9 FTA: 0.9375 +[2025-07-06 21:49:32] [Rank 0] Group 9 FTA: 0.9375 +[2025-07-06 21:49:32] [Rank 0] Group 10 FTA: 0.9785 +[2025-07-06 21:49:32] [Rank 0] Group 10 FTA: 0.9785 +[2025-07-06 21:49:32] [Rank 0] Group 11 FTA: 0.9600 +[2025-07-06 21:49:32] [Rank 0] Group 11 FTA: 0.9600 +[2025-07-06 21:49:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 21:49:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 21:49:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 21:49:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 21:49:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 21:49:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 21:49:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 21:49:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 21:49:34] [Rank 0] step:9501/10000 train_time:768184ms step_avg:80.85ms +[2025-07-06 21:49:34] [Rank 0] step:9501/10000 train_time:768184ms step_avg:80.85ms +[2025-07-06 21:49:35] [Rank 0] step:9521/10000 train_time:769751ms step_avg:80.85ms +[2025-07-06 21:49:35] [Rank 0] step:9521/10000 train_time:769751ms step_avg:80.85ms +[2025-07-06 21:49:38] [Rank 0] step:9541/10000 train_time:771474ms step_avg:80.86ms +[2025-07-06 21:49:38] [Rank 0] step:9541/10000 train_time:771474ms step_avg:80.86ms +[2025-07-06 21:49:39] [Rank 0] step:9561/10000 train_time:773670ms step_avg:80.92ms +[2025-07-06 21:49:39] [Rank 0] step:9561/10000 train_time:773670ms step_avg:80.92ms +[2025-07-06 21:49:41] [Rank 0] step:9581/10000 train_time:775158ms step_avg:80.91ms +[2025-07-06 21:49:41] [Rank 0] step:9581/10000 train_time:775158ms step_avg:80.91ms +[2025-07-06 21:49:42] [Rank 0] step:9601/10000 train_time:776649ms step_avg:80.89ms +[2025-07-06 21:49:42] [Rank 0] step:9601/10000 train_time:776649ms step_avg:80.89ms +[2025-07-06 21:49:44] [Rank 0] step:9621/10000 train_time:778137ms step_avg:80.88ms +[2025-07-06 21:49:44] [Rank 0] step:9621/10000 train_time:778137ms step_avg:80.88ms +[2025-07-06 21:49:46] [Rank 0] step:9641/10000 train_time:780290ms step_avg:80.93ms +[2025-07-06 21:49:46] [Rank 0] step:9641/10000 train_time:780290ms step_avg:80.93ms +[2025-07-06 21:49:47] [Rank 0] step:9661/10000 train_time:781778ms step_avg:80.92ms +[2025-07-06 21:49:47] [Rank 0] step:9661/10000 train_time:781778ms step_avg:80.92ms +[2025-07-06 21:49:49] [Rank 0] step:9681/10000 train_time:783271ms step_avg:80.91ms +[2025-07-06 21:49:49] [Rank 0] step:9681/10000 train_time:783271ms step_avg:80.91ms +[2025-07-06 21:49:50] [Rank 0] step:9701/10000 train_time:784759ms step_avg:80.89ms +[2025-07-06 21:49:50] [Rank 0] step:9701/10000 train_time:784759ms step_avg:80.89ms +[2025-07-06 21:49:52] [Rank 0] step:9721/10000 train_time:786251ms step_avg:80.88ms +[2025-07-06 21:49:52] [Rank 0] step:9721/10000 train_time:786251ms step_avg:80.88ms +[2025-07-06 21:49:54] [Rank 0] step:9741/10000 train_time:787977ms step_avg:80.89ms +[2025-07-06 21:49:54] [Rank 0] step:9741/10000 train_time:787977ms step_avg:80.89ms +[2025-07-06 21:49:55] [Rank 0] step:9761/10000 train_time:789470ms step_avg:80.88ms +[2025-07-06 21:49:55] [Rank 0] step:9761/10000 train_time:789470ms step_avg:80.88ms +[2025-07-06 21:49:57] [Rank 0] step:9781/10000 train_time:790961ms step_avg:80.87ms +[2025-07-06 21:49:57] [Rank 0] step:9781/10000 train_time:790961ms step_avg:80.87ms +[2025-07-06 21:49:58] [Rank 0] step:9801/10000 train_time:792452ms step_avg:80.85ms +[2025-07-06 21:49:58] [Rank 0] step:9801/10000 train_time:792452ms step_avg:80.85ms +[2025-07-06 21:50:00] [Rank 0] step:9821/10000 train_time:794593ms step_avg:80.91ms +[2025-07-06 21:50:00] [Rank 0] step:9821/10000 train_time:794593ms step_avg:80.91ms +[2025-07-06 21:50:02] [Rank 0] step:9841/10000 train_time:796085ms step_avg:80.89ms +[2025-07-06 21:50:02] [Rank 0] step:9841/10000 train_time:796085ms step_avg:80.89ms +[2025-07-06 21:50:03] [Rank 0] step:9861/10000 train_time:797577ms step_avg:80.88ms +[2025-07-06 21:50:03] [Rank 0] step:9861/10000 train_time:797577ms step_avg:80.88ms +[2025-07-06 21:50:05] [Rank 0] step:9881/10000 train_time:799072ms step_avg:80.87ms +[2025-07-06 21:50:05] [Rank 0] step:9881/10000 train_time:799072ms step_avg:80.87ms +[2025-07-06 21:50:07] [Rank 0] step:9901/10000 train_time:800568ms step_avg:80.86ms +[2025-07-06 21:50:07] [Rank 0] step:9901/10000 train_time:800568ms step_avg:80.86ms +[2025-07-06 21:50:08] [Rank 0] step:9921/10000 train_time:802705ms step_avg:80.91ms +[2025-07-06 21:50:08] [Rank 0] step:9921/10000 train_time:802705ms step_avg:80.91ms +[2025-07-06 21:50:10] [Rank 0] step:9941/10000 train_time:804201ms step_avg:80.90ms +[2025-07-06 21:50:10] [Rank 0] step:9941/10000 train_time:804201ms step_avg:80.90ms +[2025-07-06 21:50:11] [Rank 0] step:9961/10000 train_time:805697ms step_avg:80.89ms +[2025-07-06 21:50:11] [Rank 0] step:9961/10000 train_time:805697ms step_avg:80.89ms +[2025-07-06 21:50:13] [Rank 0] step:9981/10000 train_time:807196ms step_avg:80.87ms +[2025-07-06 21:50:13] [Rank 0] step:9981/10000 train_time:807196ms step_avg:80.87ms +[2025-07-06 21:50:15] [Rank 0] step:10000/10000 train_time:809272ms step_avg:80.93ms +[2025-07-06 21:50:15] [Rank 0] step:10000/10000 train_time:809272ms step_avg:80.93ms +[2025-07-06 21:50:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:50:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 21:50:16] [Rank 0] PRINT: step:10000/10000 train_loss:0.8578 val_loss:0.8612 train_time:809353ms step_avg:80.94ms +[2025-07-06 21:50:16] [Rank 0] PRINT: step:10000/10000 train_loss:0.8578 val_loss:0.8612 train_time:809353ms step_avg:80.94ms +[2025-07-06 21:50:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:50:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 21:50:16] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:50:16] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 21:50:16] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:50:16] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 21:55:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:55:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 21:55:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:55:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 21:55:42] [Rank 0] Total Loss: 5.6234 +[2025-07-06 21:55:42] [Rank 0] Total Loss: 5.6234 +[2025-07-06 21:55:42] [Rank 0] Total FTA: 0.9805 +[2025-07-06 21:55:42] [Rank 0] Total FTA: 0.9805 +[2025-07-06 21:55:42] [Rank 0] Group 0 Loss: 5.9137 +[2025-07-06 21:55:42] [Rank 0] Group 0 Loss: 5.9137 +[2025-07-06 21:55:42] [Rank 0] Group 1 Loss: 5.5581 +[2025-07-06 21:55:42] [Rank 0] Group 1 Loss: 5.5581 +[2025-07-06 21:55:42] [Rank 0] Group 2 Loss: 5.6018 +[2025-07-06 21:55:42] [Rank 0] Group 2 Loss: 5.6018 +[2025-07-06 21:55:42] [Rank 0] Group 3 Loss: 5.6887 +[2025-07-06 21:55:42] [Rank 0] Group 3 Loss: 5.6887 +[2025-07-06 21:55:42] [Rank 0] Group 4 Loss: 5.5411 +[2025-07-06 21:55:42] [Rank 0] Group 4 Loss: 5.5411 +[2025-07-06 21:55:42] [Rank 0] Group 5 Loss: 5.4799 +[2025-07-06 21:55:42] [Rank 0] Group 5 Loss: 5.4799 +[2025-07-06 21:55:42] [Rank 0] Group 6 Loss: 5.4625 +[2025-07-06 21:55:42] [Rank 0] Group 6 Loss: 5.4625 +[2025-07-06 21:55:42] [Rank 0] Group 7 Loss: 5.5690 +[2025-07-06 21:55:42] [Rank 0] Group 7 Loss: 5.5690 +[2025-07-06 21:55:42] [Rank 0] Group 8 Loss: 5.5859 +[2025-07-06 21:55:42] [Rank 0] Group 8 Loss: 5.5859 +[2025-07-06 21:55:42] [Rank 0] Group 9 Loss: 5.6120 +[2025-07-06 21:55:42] [Rank 0] Group 9 Loss: 5.6120 +[2025-07-06 21:55:42] [Rank 0] Group 10 Loss: 5.5956 +[2025-07-06 21:55:42] [Rank 0] Group 10 Loss: 5.5956 +[2025-07-06 21:55:42] [Rank 0] Group 11 Loss: 5.6094 +[2025-07-06 21:55:42] [Rank 0] Group 11 Loss: 5.6094 +[2025-07-06 21:55:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:55:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 21:55:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:55:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 21:55:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:55:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 21:55:42] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 21:55:42] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 21:55:42] [Rank 0] Group 4 FTA: 0.9896 +[2025-07-06 21:55:42] [Rank 0] Group 4 FTA: 0.9896 +[2025-07-06 21:55:42] [Rank 0] Group 5 FTA: 0.9766 +[2025-07-06 21:55:42] [Rank 0] Group 5 FTA: 0.9766 +[2025-07-06 21:55:42] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-06 21:55:42] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-06 21:55:42] [Rank 0] Group 7 FTA: 0.9505 +[2025-07-06 21:55:42] [Rank 0] Group 7 FTA: 0.9505 +[2025-07-06 21:55:42] [Rank 0] Group 8 FTA: 0.9792 +[2025-07-06 21:55:42] [Rank 0] Group 8 FTA: 0.9792 +[2025-07-06 21:55:42] [Rank 0] Group 9 FTA: 0.9688 +[2025-07-06 21:55:42] [Rank 0] Group 9 FTA: 0.9688 +[2025-07-06 21:55:42] [Rank 0] Group 10 FTA: 0.9766 +[2025-07-06 21:55:42] [Rank 0] Group 10 FTA: 0.9766 +[2025-07-06 21:55:42] [Rank 0] Group 11 FTA: 0.9590 +[2025-07-06 21:55:42] [Rank 0] Group 11 FTA: 0.9590 +[2025-07-06 21:55:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 21:55:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-06 21:55:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 21:55:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-06 21:55:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 21:55:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-06 21:55:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 21:55:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-06 21:55:43] [Rank 0] step:10001/10000 train_time:809374ms step_avg:80.93ms +[2025-07-06 21:55:43] [Rank 0] step:10001/10000 train_time:809374ms step_avg:80.93ms +[2025-07-06 21:55:43] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 21:55:43 2025 --- +[2025-07-06 21:55:43] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 21:55:43 2025 --- +[2025-07-06 21:55:43] [Rank 0] PRINT: Peak memory allocated: 9109 MiB reserved: 10356 MiB +[2025-07-06 21:55:43] [Rank 0] PRINT: Peak memory allocated: 9109 MiB reserved: 10356 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/config.json b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/config.json new file mode 100644 index 0000000000000000000000000000000000000000..420b14c7f03c726b29572f8e6ae9103c355333ec --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 49, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "e926ca86-5d58-4540-a69c-9117bc141d07", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..40dabcc9e15607dc8c1dad9759f59a0c6d0172d9 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:511515b0f7c82d26a6bd5393431daa7d8dcb3215cffbf6741f087a1ec028d3d0 +size 401167 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..d7355ce0f337bde688f90d5f0c3412147ffa1688 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cd8e17631d801e9aafd8c641ac3708dded3e4ae66a8f7e32c0d71b8dfc68b7b +size 366081 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..ea4043695c67d5b0cf7e64441cd0bb2044dc8d6b --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6806bcc0ff7a34ec6fa68f02743060bbf66d1a7444db0889d123dccde284f69f +size 112299 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..5e0e1bd357496696aa7b7c68bfaed7e6357c2fcd --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dad7a9295fa375b064dfdc58b233a9a44cb88c946055ee7165a730ad3b7f1577 +size 109209 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/training_log_e926ca86-5d58-4540-a69c-9117bc141d07.txt b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/training_log_e926ca86-5d58-4540-a69c-9117bc141d07.txt new file mode 100644 index 0000000000000000000000000000000000000000..3f5f2ed039c445e8f5714b9098267b7d69b837d6 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/training_log_e926ca86-5d58-4540-a69c-9117bc141d07.txt @@ -0,0 +1,5144 @@ +[2025-07-09 06:28:26] [Rank 0] PRINT: --- Script Start: Wed Jul 9 06:28:26 2025 --- +[2025-07-09 06:28:26] [Rank 0] PRINT: --- Script Start: Wed Jul 9 06:28:26 2025 --- +[2025-07-09 06:28:26] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-09 06:28:26] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-09 06:28:26] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-09 06:28:26] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-09 06:28:26] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-09 06:28:26] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-09 06:28:26] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49 +[2025-07-09 06:28:26] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49 +[2025-07-09 06:28:26] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-09 06:28:26] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-09 06:28:26] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-09 06:28:26] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-09 06:28:27] [Rank 0] PRINT: Constructing model... +[2025-07-09 06:28:27] [Rank 0] PRINT: Constructing model... +[2025-07-09 06:28:29] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-09 06:28:29] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-09 06:28:29] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-09 06:28:29] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-09 06:28:29] [Rank 0] PRINT: Testing model forward function: +[2025-07-09 06:28:29] [Rank 0] PRINT: Testing model forward function: +[2025-07-09 06:28:30] [Rank 0] PRINT: Model test - Result type: +[2025-07-09 06:28:30] [Rank 0] PRINT: Model test - Result type: +[2025-07-09 06:28:30] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-09 06:28:30] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-09 06:28:30] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-09 06:28:30] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-09 06:28:30] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-09 06:28:30] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-09 06:28:30] [Rank 0] PRINT: Model returns: +[2025-07-09 06:28:30] [Rank 0] PRINT: Model returns: +[2025-07-09 06:28:30] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-09 06:28:30] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-09 06:28:30] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-09 06:28:30] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-09 06:28:30] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-09 06:28:30] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-09 06:28:30] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-09 06:28:30] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-09 06:28:30] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-09 06:28:30] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-09 06:28:30] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-09 06:28:30] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-09 06:28:30] [Rank 0] PRINT: Model compilation complete. +[2025-07-09 06:28:30] [Rank 0] PRINT: Model compilation complete. +[2025-07-09 06:28:30] [Rank 0] PRINT: Starting warmup... +[2025-07-09 06:28:30] [Rank 0] PRINT: Starting warmup... +[2025-07-09 06:30:17] [Rank 0] PRINT: Warmup complete. +[2025-07-09 06:30:17] [Rank 0] PRINT: Warmup complete. +[2025-07-09 06:30:17] [Rank 0] PRINT: Starting training... +[2025-07-09 06:30:17] [Rank 0] PRINT: Starting training... +[2025-07-09 06:30:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 06:30:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 06:30:24] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-09 06:30:24] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-09 06:30:26] [Rank 0] step:21/10000 train_time:1652ms step_avg:78.64ms +[2025-07-09 06:30:26] [Rank 0] step:21/10000 train_time:1652ms step_avg:78.64ms +[2025-07-09 06:30:28] [Rank 0] step:41/10000 train_time:3106ms step_avg:75.75ms +[2025-07-09 06:30:28] [Rank 0] step:41/10000 train_time:3106ms step_avg:75.75ms +[2025-07-09 06:30:29] [Rank 0] step:61/10000 train_time:4561ms step_avg:74.76ms +[2025-07-09 06:30:29] [Rank 0] step:61/10000 train_time:4561ms step_avg:74.76ms +[2025-07-09 06:30:30] [Rank 0] step:81/10000 train_time:6016ms step_avg:74.28ms +[2025-07-09 06:30:30] [Rank 0] step:81/10000 train_time:6016ms step_avg:74.28ms +[2025-07-09 06:30:33] [Rank 0] step:101/10000 train_time:8147ms step_avg:80.66ms +[2025-07-09 06:30:33] [Rank 0] step:101/10000 train_time:8147ms step_avg:80.66ms +[2025-07-09 06:30:34] [Rank 0] step:121/10000 train_time:9604ms step_avg:79.37ms +[2025-07-09 06:30:34] [Rank 0] step:121/10000 train_time:9604ms step_avg:79.37ms +[2025-07-09 06:30:36] [Rank 0] step:141/10000 train_time:11221ms step_avg:79.58ms +[2025-07-09 06:30:36] [Rank 0] step:141/10000 train_time:11221ms step_avg:79.58ms +[2025-07-09 06:30:37] [Rank 0] step:161/10000 train_time:12683ms step_avg:78.78ms +[2025-07-09 06:30:37] [Rank 0] step:161/10000 train_time:12683ms step_avg:78.78ms +[2025-07-09 06:30:39] [Rank 0] step:181/10000 train_time:14199ms step_avg:78.45ms +[2025-07-09 06:30:39] [Rank 0] step:181/10000 train_time:14199ms step_avg:78.45ms +[2025-07-09 06:30:40] [Rank 0] step:201/10000 train_time:15798ms step_avg:78.60ms +[2025-07-09 06:30:40] [Rank 0] step:201/10000 train_time:15798ms step_avg:78.60ms +[2025-07-09 06:30:42] [Rank 0] step:221/10000 train_time:17259ms step_avg:78.10ms +[2025-07-09 06:30:42] [Rank 0] step:221/10000 train_time:17259ms step_avg:78.10ms +[2025-07-09 06:30:43] [Rank 0] step:241/10000 train_time:18722ms step_avg:77.69ms +[2025-07-09 06:30:43] [Rank 0] step:241/10000 train_time:18722ms step_avg:77.69ms +[2025-07-09 06:30:45] [Rank 0] step:261/10000 train_time:20187ms step_avg:77.35ms +[2025-07-09 06:30:45] [Rank 0] step:261/10000 train_time:20187ms step_avg:77.35ms +[2025-07-09 06:30:47] [Rank 0] step:281/10000 train_time:22319ms step_avg:79.43ms +[2025-07-09 06:30:47] [Rank 0] step:281/10000 train_time:22319ms step_avg:79.43ms +[2025-07-09 06:30:48] [Rank 0] step:301/10000 train_time:23783ms step_avg:79.01ms +[2025-07-09 06:30:48] [Rank 0] step:301/10000 train_time:23783ms step_avg:79.01ms +[2025-07-09 06:30:50] [Rank 0] step:321/10000 train_time:25249ms step_avg:78.66ms +[2025-07-09 06:30:50] [Rank 0] step:321/10000 train_time:25249ms step_avg:78.66ms +[2025-07-09 06:30:51] [Rank 0] step:341/10000 train_time:26714ms step_avg:78.34ms +[2025-07-09 06:30:51] [Rank 0] step:341/10000 train_time:26714ms step_avg:78.34ms +[2025-07-09 06:30:53] [Rank 0] step:361/10000 train_time:28178ms step_avg:78.06ms +[2025-07-09 06:30:53] [Rank 0] step:361/10000 train_time:28178ms step_avg:78.06ms +[2025-07-09 06:30:55] [Rank 0] step:381/10000 train_time:30287ms step_avg:79.49ms +[2025-07-09 06:30:55] [Rank 0] step:381/10000 train_time:30287ms step_avg:79.49ms +[2025-07-09 06:30:56] [Rank 0] step:401/10000 train_time:31752ms step_avg:79.18ms +[2025-07-09 06:30:56] [Rank 0] step:401/10000 train_time:31752ms step_avg:79.18ms +[2025-07-09 06:30:58] [Rank 0] step:421/10000 train_time:33219ms step_avg:78.90ms +[2025-07-09 06:30:58] [Rank 0] step:421/10000 train_time:33219ms step_avg:78.90ms +[2025-07-09 06:30:59] [Rank 0] step:441/10000 train_time:34688ms step_avg:78.66ms +[2025-07-09 06:30:59] [Rank 0] step:441/10000 train_time:34688ms step_avg:78.66ms +[2025-07-09 06:31:01] [Rank 0] step:461/10000 train_time:36818ms step_avg:79.87ms +[2025-07-09 06:31:01] [Rank 0] step:461/10000 train_time:36818ms step_avg:79.87ms +[2025-07-09 06:31:03] [Rank 0] step:481/10000 train_time:38284ms step_avg:79.59ms +[2025-07-09 06:31:03] [Rank 0] step:481/10000 train_time:38284ms step_avg:79.59ms +[2025-07-09 06:31:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 06:31:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 06:31:05] [Rank 0] PRINT: step:500/10000 train_loss:3.3890 val_loss:1.6154 train_time:39751ms step_avg:79.50ms +[2025-07-09 06:31:05] [Rank 0] PRINT: step:500/10000 train_loss:3.3890 val_loss:1.6154 train_time:39751ms step_avg:79.50ms +[2025-07-09 06:31:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 06:31:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 06:31:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 06:31:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 06:31:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 06:31:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 06:36:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 06:36:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 06:36:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 06:36:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 06:36:22] [Rank 0] Total Loss: 4.3088 +[2025-07-09 06:36:22] [Rank 0] Total Loss: 4.3088 +[2025-07-09 06:36:22] [Rank 0] Total FTA: 0.0863 +[2025-07-09 06:36:22] [Rank 0] Total FTA: 0.0863 +[2025-07-09 06:36:22] [Rank 0] Group 0 Loss: 4.6282 +[2025-07-09 06:36:22] [Rank 0] Group 0 Loss: 4.6282 +[2025-07-09 06:36:22] [Rank 0] Group 1 Loss: 4.2186 +[2025-07-09 06:36:22] [Rank 0] Group 1 Loss: 4.2186 +[2025-07-09 06:36:22] [Rank 0] Group 2 Loss: 4.1195 +[2025-07-09 06:36:22] [Rank 0] Group 2 Loss: 4.1195 +[2025-07-09 06:36:22] [Rank 0] Group 3 Loss: 4.2842 +[2025-07-09 06:36:22] [Rank 0] Group 3 Loss: 4.2842 +[2025-07-09 06:36:22] [Rank 0] Group 4 Loss: 4.3032 +[2025-07-09 06:36:22] [Rank 0] Group 4 Loss: 4.3032 +[2025-07-09 06:36:22] [Rank 0] Group 5 Loss: 4.2357 +[2025-07-09 06:36:22] [Rank 0] Group 5 Loss: 4.2357 +[2025-07-09 06:36:22] [Rank 0] Group 6 Loss: 4.2283 +[2025-07-09 06:36:22] [Rank 0] Group 6 Loss: 4.2283 +[2025-07-09 06:36:22] [Rank 0] Group 7 Loss: 4.2727 +[2025-07-09 06:36:22] [Rank 0] Group 7 Loss: 4.2727 +[2025-07-09 06:36:22] [Rank 0] Group 8 Loss: 4.2895 +[2025-07-09 06:36:22] [Rank 0] Group 8 Loss: 4.2895 +[2025-07-09 06:36:22] [Rank 0] Group 9 Loss: 4.2554 +[2025-07-09 06:36:22] [Rank 0] Group 9 Loss: 4.2554 +[2025-07-09 06:36:22] [Rank 0] Group 10 Loss: 4.2903 +[2025-07-09 06:36:22] [Rank 0] Group 10 Loss: 4.2903 +[2025-07-09 06:36:22] [Rank 0] Group 11 Loss: 4.2860 +[2025-07-09 06:36:22] [Rank 0] Group 11 Loss: 4.2860 +[2025-07-09 06:36:22] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-09 06:36:22] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-09 06:36:22] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 06:36:22] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 06:36:22] [Rank 0] Group 2 FTA: 0.0521 +[2025-07-09 06:36:22] [Rank 0] Group 2 FTA: 0.0521 +[2025-07-09 06:36:22] [Rank 0] Group 3 FTA: 0.0703 +[2025-07-09 06:36:22] [Rank 0] Group 3 FTA: 0.0703 +[2025-07-09 06:36:22] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-09 06:36:22] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-09 06:36:22] [Rank 0] Group 5 FTA: 0.1016 +[2025-07-09 06:36:22] [Rank 0] Group 5 FTA: 0.1016 +[2025-07-09 06:36:22] [Rank 0] Group 6 FTA: 0.0938 +[2025-07-09 06:36:22] [Rank 0] Group 6 FTA: 0.0938 +[2025-07-09 06:36:22] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-09 06:36:22] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-09 06:36:22] [Rank 0] Group 8 FTA: 0.1016 +[2025-07-09 06:36:22] [Rank 0] Group 8 FTA: 0.1016 +[2025-07-09 06:36:22] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-09 06:36:22] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-09 06:36:22] [Rank 0] Group 10 FTA: 0.1016 +[2025-07-09 06:36:22] [Rank 0] Group 10 FTA: 0.1016 +[2025-07-09 06:36:22] [Rank 0] Group 11 FTA: 0.0713 +[2025-07-09 06:36:22] [Rank 0] Group 11 FTA: 0.0713 +[2025-07-09 06:36:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 06:36:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 06:36:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 06:36:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 06:36:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 06:36:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 06:36:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 06:36:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 06:36:23] [Rank 0] step:501/10000 train_time:39771ms step_avg:79.38ms +[2025-07-09 06:36:23] [Rank 0] step:501/10000 train_time:39771ms step_avg:79.38ms +[2025-07-09 06:36:25] [Rank 0] step:521/10000 train_time:41223ms step_avg:79.12ms +[2025-07-09 06:36:25] [Rank 0] step:521/10000 train_time:41223ms step_avg:79.12ms +[2025-07-09 06:36:27] [Rank 0] step:541/10000 train_time:43386ms step_avg:80.20ms +[2025-07-09 06:36:27] [Rank 0] step:541/10000 train_time:43386ms step_avg:80.20ms +[2025-07-09 06:36:28] [Rank 0] step:561/10000 train_time:44808ms step_avg:79.87ms +[2025-07-09 06:36:28] [Rank 0] step:561/10000 train_time:44808ms step_avg:79.87ms +[2025-07-09 06:36:30] [Rank 0] step:581/10000 train_time:46266ms step_avg:79.63ms +[2025-07-09 06:36:30] [Rank 0] step:581/10000 train_time:46266ms step_avg:79.63ms +[2025-07-09 06:36:31] [Rank 0] step:601/10000 train_time:47722ms step_avg:79.40ms +[2025-07-09 06:36:31] [Rank 0] step:601/10000 train_time:47722ms step_avg:79.40ms +[2025-07-09 06:36:33] [Rank 0] step:621/10000 train_time:49181ms step_avg:79.20ms +[2025-07-09 06:36:33] [Rank 0] step:621/10000 train_time:49181ms step_avg:79.20ms +[2025-07-09 06:36:35] [Rank 0] step:641/10000 train_time:50876ms step_avg:79.37ms +[2025-07-09 06:36:35] [Rank 0] step:641/10000 train_time:50876ms step_avg:79.37ms +[2025-07-09 06:36:36] [Rank 0] step:661/10000 train_time:52334ms step_avg:79.17ms +[2025-07-09 06:36:36] [Rank 0] step:661/10000 train_time:52334ms step_avg:79.17ms +[2025-07-09 06:36:37] [Rank 0] step:681/10000 train_time:53794ms step_avg:78.99ms +[2025-07-09 06:36:37] [Rank 0] step:681/10000 train_time:53794ms step_avg:78.99ms +[2025-07-09 06:36:39] [Rank 0] step:701/10000 train_time:55255ms step_avg:78.82ms +[2025-07-09 06:36:39] [Rank 0] step:701/10000 train_time:55255ms step_avg:78.82ms +[2025-07-09 06:36:41] [Rank 0] step:721/10000 train_time:56714ms step_avg:78.66ms +[2025-07-09 06:36:41] [Rank 0] step:721/10000 train_time:56714ms step_avg:78.66ms +[2025-07-09 06:36:42] [Rank 0] step:741/10000 train_time:58827ms step_avg:79.39ms +[2025-07-09 06:36:42] [Rank 0] step:741/10000 train_time:58827ms step_avg:79.39ms +[2025-07-09 06:36:44] [Rank 0] step:761/10000 train_time:60297ms step_avg:79.23ms +[2025-07-09 06:36:44] [Rank 0] step:761/10000 train_time:60297ms step_avg:79.23ms +[2025-07-09 06:36:45] [Rank 0] step:781/10000 train_time:61768ms step_avg:79.09ms +[2025-07-09 06:36:45] [Rank 0] step:781/10000 train_time:61768ms step_avg:79.09ms +[2025-07-09 06:36:47] [Rank 0] step:801/10000 train_time:63237ms step_avg:78.95ms +[2025-07-09 06:36:47] [Rank 0] step:801/10000 train_time:63237ms step_avg:78.95ms +[2025-07-09 06:36:49] [Rank 0] step:821/10000 train_time:64946ms step_avg:79.11ms +[2025-07-09 06:36:49] [Rank 0] step:821/10000 train_time:64946ms step_avg:79.11ms +[2025-07-09 06:36:50] [Rank 0] step:841/10000 train_time:66414ms step_avg:78.97ms +[2025-07-09 06:36:50] [Rank 0] step:841/10000 train_time:66414ms step_avg:78.97ms +[2025-07-09 06:36:52] [Rank 0] step:861/10000 train_time:67882ms step_avg:78.84ms +[2025-07-09 06:36:52] [Rank 0] step:861/10000 train_time:67882ms step_avg:78.84ms +[2025-07-09 06:36:53] [Rank 0] step:881/10000 train_time:69355ms step_avg:78.72ms +[2025-07-09 06:36:53] [Rank 0] step:881/10000 train_time:69355ms step_avg:78.72ms +[2025-07-09 06:36:55] [Rank 0] step:901/10000 train_time:71110ms step_avg:78.92ms +[2025-07-09 06:36:55] [Rank 0] step:901/10000 train_time:71110ms step_avg:78.92ms +[2025-07-09 06:36:56] [Rank 0] step:921/10000 train_time:72608ms step_avg:78.84ms +[2025-07-09 06:36:56] [Rank 0] step:921/10000 train_time:72608ms step_avg:78.84ms +[2025-07-09 06:36:58] [Rank 0] step:941/10000 train_time:74137ms step_avg:78.79ms +[2025-07-09 06:36:58] [Rank 0] step:941/10000 train_time:74137ms step_avg:78.79ms +[2025-07-09 06:36:59] [Rank 0] step:961/10000 train_time:75605ms step_avg:78.67ms +[2025-07-09 06:36:59] [Rank 0] step:961/10000 train_time:75605ms step_avg:78.67ms +[2025-07-09 06:37:01] [Rank 0] step:981/10000 train_time:77076ms step_avg:78.57ms +[2025-07-09 06:37:01] [Rank 0] step:981/10000 train_time:77076ms step_avg:78.57ms +[2025-07-09 06:37:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 06:37:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 06:37:04] [Rank 0] PRINT: step:1000/10000 train_loss:1.4641 val_loss:1.3214 train_time:79189ms step_avg:79.19ms +[2025-07-09 06:37:04] [Rank 0] PRINT: step:1000/10000 train_loss:1.4641 val_loss:1.3214 train_time:79189ms step_avg:79.19ms +[2025-07-09 06:37:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 06:37:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 06:37:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 06:37:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 06:37:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 06:37:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 06:42:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 06:42:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 06:42:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 06:42:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 06:42:22] [Rank 0] Total Loss: 4.3764 +[2025-07-09 06:42:22] [Rank 0] Total Loss: 4.3764 +[2025-07-09 06:42:22] [Rank 0] Total FTA: 0.1683 +[2025-07-09 06:42:22] [Rank 0] Total FTA: 0.1683 +[2025-07-09 06:42:22] [Rank 0] Group 0 Loss: 4.4726 +[2025-07-09 06:42:22] [Rank 0] Group 0 Loss: 4.4726 +[2025-07-09 06:42:22] [Rank 0] Group 1 Loss: 4.2061 +[2025-07-09 06:42:22] [Rank 0] Group 1 Loss: 4.2061 +[2025-07-09 06:42:22] [Rank 0] Group 2 Loss: 4.1096 +[2025-07-09 06:42:22] [Rank 0] Group 2 Loss: 4.1096 +[2025-07-09 06:42:22] [Rank 0] Group 3 Loss: 4.4217 +[2025-07-09 06:42:22] [Rank 0] Group 3 Loss: 4.4217 +[2025-07-09 06:42:22] [Rank 0] Group 4 Loss: 4.3903 +[2025-07-09 06:42:22] [Rank 0] Group 4 Loss: 4.3903 +[2025-07-09 06:42:22] [Rank 0] Group 5 Loss: 4.3605 +[2025-07-09 06:42:22] [Rank 0] Group 5 Loss: 4.3605 +[2025-07-09 06:42:22] [Rank 0] Group 6 Loss: 4.2993 +[2025-07-09 06:42:22] [Rank 0] Group 6 Loss: 4.2993 +[2025-07-09 06:42:22] [Rank 0] Group 7 Loss: 4.4474 +[2025-07-09 06:42:22] [Rank 0] Group 7 Loss: 4.4474 +[2025-07-09 06:42:23] [Rank 0] Group 8 Loss: 4.4144 +[2025-07-09 06:42:23] [Rank 0] Group 8 Loss: 4.4144 +[2025-07-09 06:42:23] [Rank 0] Group 9 Loss: 4.3539 +[2025-07-09 06:42:23] [Rank 0] Group 9 Loss: 4.3539 +[2025-07-09 06:42:23] [Rank 0] Group 10 Loss: 4.4486 +[2025-07-09 06:42:23] [Rank 0] Group 10 Loss: 4.4486 +[2025-07-09 06:42:23] [Rank 0] Group 11 Loss: 4.4094 +[2025-07-09 06:42:23] [Rank 0] Group 11 Loss: 4.4094 +[2025-07-09 06:42:23] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-09 06:42:23] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-09 06:42:23] [Rank 0] Group 1 FTA: 0.3385 +[2025-07-09 06:42:23] [Rank 0] Group 1 FTA: 0.3385 +[2025-07-09 06:42:23] [Rank 0] Group 2 FTA: 0.1458 +[2025-07-09 06:42:23] [Rank 0] Group 2 FTA: 0.1458 +[2025-07-09 06:42:23] [Rank 0] Group 3 FTA: 0.1380 +[2025-07-09 06:42:23] [Rank 0] Group 3 FTA: 0.1380 +[2025-07-09 06:42:23] [Rank 0] Group 4 FTA: 0.1276 +[2025-07-09 06:42:23] [Rank 0] Group 4 FTA: 0.1276 +[2025-07-09 06:42:23] [Rank 0] Group 5 FTA: 0.1536 +[2025-07-09 06:42:23] [Rank 0] Group 5 FTA: 0.1536 +[2025-07-09 06:42:23] [Rank 0] Group 6 FTA: 0.1823 +[2025-07-09 06:42:23] [Rank 0] Group 6 FTA: 0.1823 +[2025-07-09 06:42:23] [Rank 0] Group 7 FTA: 0.1641 +[2025-07-09 06:42:23] [Rank 0] Group 7 FTA: 0.1641 +[2025-07-09 06:42:23] [Rank 0] Group 8 FTA: 0.1589 +[2025-07-09 06:42:23] [Rank 0] Group 8 FTA: 0.1589 +[2025-07-09 06:42:23] [Rank 0] Group 9 FTA: 0.1367 +[2025-07-09 06:42:23] [Rank 0] Group 9 FTA: 0.1367 +[2025-07-09 06:42:23] [Rank 0] Group 10 FTA: 0.1699 +[2025-07-09 06:42:23] [Rank 0] Group 10 FTA: 0.1699 +[2025-07-09 06:42:23] [Rank 0] Group 11 FTA: 0.1553 +[2025-07-09 06:42:23] [Rank 0] Group 11 FTA: 0.1553 +[2025-07-09 06:42:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 06:42:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 06:42:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 06:42:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 06:42:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 06:42:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 06:42:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 06:42:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 06:42:24] [Rank 0] step:1001/10000 train_time:79209ms step_avg:79.13ms +[2025-07-09 06:42:24] [Rank 0] step:1001/10000 train_time:79209ms step_avg:79.13ms +[2025-07-09 06:42:25] [Rank 0] step:1021/10000 train_time:80681ms step_avg:79.02ms +[2025-07-09 06:42:25] [Rank 0] step:1021/10000 train_time:80681ms step_avg:79.02ms +[2025-07-09 06:42:27] [Rank 0] step:1041/10000 train_time:82144ms step_avg:78.91ms +[2025-07-09 06:42:27] [Rank 0] step:1041/10000 train_time:82144ms step_avg:78.91ms +[2025-07-09 06:42:28] [Rank 0] step:1061/10000 train_time:83607ms step_avg:78.80ms +[2025-07-09 06:42:28] [Rank 0] step:1061/10000 train_time:83607ms step_avg:78.80ms +[2025-07-09 06:42:30] [Rank 0] step:1081/10000 train_time:85138ms step_avg:78.76ms +[2025-07-09 06:42:30] [Rank 0] step:1081/10000 train_time:85138ms step_avg:78.76ms +[2025-07-09 06:42:32] [Rank 0] step:1101/10000 train_time:87186ms step_avg:79.19ms +[2025-07-09 06:42:32] [Rank 0] step:1101/10000 train_time:87186ms step_avg:79.19ms +[2025-07-09 06:42:33] [Rank 0] step:1121/10000 train_time:88652ms step_avg:79.08ms +[2025-07-09 06:42:33] [Rank 0] step:1121/10000 train_time:88652ms step_avg:79.08ms +[2025-07-09 06:42:35] [Rank 0] step:1141/10000 train_time:90116ms step_avg:78.98ms +[2025-07-09 06:42:35] [Rank 0] step:1141/10000 train_time:90116ms step_avg:78.98ms +[2025-07-09 06:42:36] [Rank 0] step:1161/10000 train_time:91581ms step_avg:78.88ms +[2025-07-09 06:42:36] [Rank 0] step:1161/10000 train_time:91581ms step_avg:78.88ms +[2025-07-09 06:42:38] [Rank 0] step:1181/10000 train_time:93716ms step_avg:79.35ms +[2025-07-09 06:42:38] [Rank 0] step:1181/10000 train_time:93716ms step_avg:79.35ms +[2025-07-09 06:42:40] [Rank 0] step:1201/10000 train_time:95181ms step_avg:79.25ms +[2025-07-09 06:42:40] [Rank 0] step:1201/10000 train_time:95181ms step_avg:79.25ms +[2025-07-09 06:42:41] [Rank 0] step:1221/10000 train_time:96647ms step_avg:79.15ms +[2025-07-09 06:42:41] [Rank 0] step:1221/10000 train_time:96647ms step_avg:79.15ms +[2025-07-09 06:42:43] [Rank 0] step:1241/10000 train_time:98117ms step_avg:79.06ms +[2025-07-09 06:42:43] [Rank 0] step:1241/10000 train_time:98117ms step_avg:79.06ms +[2025-07-09 06:42:45] [Rank 0] step:1261/10000 train_time:99585ms step_avg:78.97ms +[2025-07-09 06:42:45] [Rank 0] step:1261/10000 train_time:99585ms step_avg:78.97ms +[2025-07-09 06:42:46] [Rank 0] step:1281/10000 train_time:101714ms step_avg:79.40ms +[2025-07-09 06:42:46] [Rank 0] step:1281/10000 train_time:101714ms step_avg:79.40ms +[2025-07-09 06:42:48] [Rank 0] step:1301/10000 train_time:103183ms step_avg:79.31ms +[2025-07-09 06:42:48] [Rank 0] step:1301/10000 train_time:103183ms step_avg:79.31ms +[2025-07-09 06:42:49] [Rank 0] step:1321/10000 train_time:104654ms step_avg:79.22ms +[2025-07-09 06:42:49] [Rank 0] step:1321/10000 train_time:104654ms step_avg:79.22ms +[2025-07-09 06:42:51] [Rank 0] step:1341/10000 train_time:106124ms step_avg:79.14ms +[2025-07-09 06:42:51] [Rank 0] step:1341/10000 train_time:106124ms step_avg:79.14ms +[2025-07-09 06:42:53] [Rank 0] step:1361/10000 train_time:108244ms step_avg:79.53ms +[2025-07-09 06:42:53] [Rank 0] step:1361/10000 train_time:108244ms step_avg:79.53ms +[2025-07-09 06:42:54] [Rank 0] step:1381/10000 train_time:109715ms step_avg:79.45ms +[2025-07-09 06:42:54] [Rank 0] step:1381/10000 train_time:109715ms step_avg:79.45ms +[2025-07-09 06:42:56] [Rank 0] step:1401/10000 train_time:111188ms step_avg:79.36ms +[2025-07-09 06:42:56] [Rank 0] step:1401/10000 train_time:111188ms step_avg:79.36ms +[2025-07-09 06:42:57] [Rank 0] step:1421/10000 train_time:112659ms step_avg:79.28ms +[2025-07-09 06:42:57] [Rank 0] step:1421/10000 train_time:112659ms step_avg:79.28ms +[2025-07-09 06:43:00] [Rank 0] step:1441/10000 train_time:114131ms step_avg:79.20ms +[2025-07-09 06:43:00] [Rank 0] step:1441/10000 train_time:114131ms step_avg:79.20ms +[2025-07-09 06:43:01] [Rank 0] step:1461/10000 train_time:116247ms step_avg:79.57ms +[2025-07-09 06:43:01] [Rank 0] step:1461/10000 train_time:116247ms step_avg:79.57ms +[2025-07-09 06:43:02] [Rank 0] step:1481/10000 train_time:117722ms step_avg:79.49ms +[2025-07-09 06:43:02] [Rank 0] step:1481/10000 train_time:117722ms step_avg:79.49ms +[2025-07-09 06:43:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 06:43:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 06:43:05] [Rank 0] PRINT: step:1500/10000 train_loss:1.2254 val_loss:1.1942 train_time:119193ms step_avg:79.46ms +[2025-07-09 06:43:05] [Rank 0] PRINT: step:1500/10000 train_loss:1.2254 val_loss:1.1942 train_time:119193ms step_avg:79.46ms +[2025-07-09 06:43:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 06:43:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 06:43:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 06:43:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 06:43:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 06:43:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 06:48:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 06:48:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 06:48:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 06:48:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 06:48:25] [Rank 0] Total Loss: 4.6181 +[2025-07-09 06:48:25] [Rank 0] Total Loss: 4.6181 +[2025-07-09 06:48:25] [Rank 0] Total FTA: 0.3274 +[2025-07-09 06:48:25] [Rank 0] Total FTA: 0.3274 +[2025-07-09 06:48:25] [Rank 0] Group 0 Loss: 4.9021 +[2025-07-09 06:48:25] [Rank 0] Group 0 Loss: 4.9021 +[2025-07-09 06:48:25] [Rank 0] Group 1 Loss: 4.4704 +[2025-07-09 06:48:25] [Rank 0] Group 1 Loss: 4.4704 +[2025-07-09 06:48:25] [Rank 0] Group 2 Loss: 4.4421 +[2025-07-09 06:48:25] [Rank 0] Group 2 Loss: 4.4421 +[2025-07-09 06:48:25] [Rank 0] Group 3 Loss: 4.7105 +[2025-07-09 06:48:25] [Rank 0] Group 3 Loss: 4.7105 +[2025-07-09 06:48:25] [Rank 0] Group 4 Loss: 4.6085 +[2025-07-09 06:48:25] [Rank 0] Group 4 Loss: 4.6085 +[2025-07-09 06:48:25] [Rank 0] Group 5 Loss: 4.5462 +[2025-07-09 06:48:25] [Rank 0] Group 5 Loss: 4.5462 +[2025-07-09 06:48:25] [Rank 0] Group 6 Loss: 4.4965 +[2025-07-09 06:48:25] [Rank 0] Group 6 Loss: 4.4965 +[2025-07-09 06:48:25] [Rank 0] Group 7 Loss: 4.6436 +[2025-07-09 06:48:25] [Rank 0] Group 7 Loss: 4.6436 +[2025-07-09 06:48:25] [Rank 0] Group 8 Loss: 4.5899 +[2025-07-09 06:48:25] [Rank 0] Group 8 Loss: 4.5899 +[2025-07-09 06:48:25] [Rank 0] Group 9 Loss: 4.5160 +[2025-07-09 06:48:25] [Rank 0] Group 9 Loss: 4.5160 +[2025-07-09 06:48:25] [Rank 0] Group 10 Loss: 4.5903 +[2025-07-09 06:48:25] [Rank 0] Group 10 Loss: 4.5903 +[2025-07-09 06:48:25] [Rank 0] Group 11 Loss: 4.6078 +[2025-07-09 06:48:25] [Rank 0] Group 11 Loss: 4.6078 +[2025-07-09 06:48:25] [Rank 0] Group 0 FTA: 0.3329 +[2025-07-09 06:48:25] [Rank 0] Group 0 FTA: 0.3329 +[2025-07-09 06:48:25] [Rank 0] Group 1 FTA: 0.4271 +[2025-07-09 06:48:25] [Rank 0] Group 1 FTA: 0.4271 +[2025-07-09 06:48:25] [Rank 0] Group 2 FTA: 0.4219 +[2025-07-09 06:48:25] [Rank 0] Group 2 FTA: 0.4219 +[2025-07-09 06:48:25] [Rank 0] Group 3 FTA: 0.2292 +[2025-07-09 06:48:25] [Rank 0] Group 3 FTA: 0.2292 +[2025-07-09 06:48:25] [Rank 0] Group 4 FTA: 0.1328 +[2025-07-09 06:48:25] [Rank 0] Group 4 FTA: 0.1328 +[2025-07-09 06:48:25] [Rank 0] Group 5 FTA: 0.3594 +[2025-07-09 06:48:25] [Rank 0] Group 5 FTA: 0.3594 +[2025-07-09 06:48:25] [Rank 0] Group 6 FTA: 0.2995 +[2025-07-09 06:48:25] [Rank 0] Group 6 FTA: 0.2995 +[2025-07-09 06:48:25] [Rank 0] Group 7 FTA: 0.3698 +[2025-07-09 06:48:25] [Rank 0] Group 7 FTA: 0.3698 +[2025-07-09 06:48:25] [Rank 0] Group 8 FTA: 0.3464 +[2025-07-09 06:48:25] [Rank 0] Group 8 FTA: 0.3464 +[2025-07-09 06:48:25] [Rank 0] Group 9 FTA: 0.3320 +[2025-07-09 06:48:25] [Rank 0] Group 9 FTA: 0.3320 +[2025-07-09 06:48:25] [Rank 0] Group 10 FTA: 0.3594 +[2025-07-09 06:48:25] [Rank 0] Group 10 FTA: 0.3594 +[2025-07-09 06:48:25] [Rank 0] Group 11 FTA: 0.3184 +[2025-07-09 06:48:25] [Rank 0] Group 11 FTA: 0.3184 +[2025-07-09 06:48:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 06:48:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 06:48:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 06:48:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 06:48:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 06:48:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 06:48:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 06:48:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 06:48:26] [Rank 0] step:1501/10000 train_time:119215ms step_avg:79.42ms +[2025-07-09 06:48:26] [Rank 0] step:1501/10000 train_time:119215ms step_avg:79.42ms +[2025-07-09 06:48:28] [Rank 0] step:1521/10000 train_time:120691ms step_avg:79.35ms +[2025-07-09 06:48:28] [Rank 0] step:1521/10000 train_time:120691ms step_avg:79.35ms +[2025-07-09 06:48:30] [Rank 0] step:1541/10000 train_time:122823ms step_avg:79.70ms +[2025-07-09 06:48:30] [Rank 0] step:1541/10000 train_time:122823ms step_avg:79.70ms +[2025-07-09 06:48:32] [Rank 0] step:1561/10000 train_time:124510ms step_avg:79.76ms +[2025-07-09 06:48:32] [Rank 0] step:1561/10000 train_time:124510ms step_avg:79.76ms +[2025-07-09 06:48:33] [Rank 0] step:1581/10000 train_time:126139ms step_avg:79.78ms +[2025-07-09 06:48:33] [Rank 0] step:1581/10000 train_time:126139ms step_avg:79.78ms +[2025-07-09 06:48:35] [Rank 0] step:1601/10000 train_time:127605ms step_avg:79.70ms +[2025-07-09 06:48:35] [Rank 0] step:1601/10000 train_time:127605ms step_avg:79.70ms +[2025-07-09 06:48:37] [Rank 0] step:1621/10000 train_time:129342ms step_avg:79.79ms +[2025-07-09 06:48:37] [Rank 0] step:1621/10000 train_time:129342ms step_avg:79.79ms +[2025-07-09 06:48:38] [Rank 0] step:1641/10000 train_time:130776ms step_avg:79.69ms +[2025-07-09 06:48:38] [Rank 0] step:1641/10000 train_time:130776ms step_avg:79.69ms +[2025-07-09 06:48:39] [Rank 0] step:1661/10000 train_time:132244ms step_avg:79.62ms +[2025-07-09 06:48:39] [Rank 0] step:1661/10000 train_time:132244ms step_avg:79.62ms +[2025-07-09 06:48:41] [Rank 0] step:1681/10000 train_time:133711ms step_avg:79.54ms +[2025-07-09 06:48:41] [Rank 0] step:1681/10000 train_time:133711ms step_avg:79.54ms +[2025-07-09 06:48:42] [Rank 0] step:1701/10000 train_time:135179ms step_avg:79.47ms +[2025-07-09 06:48:42] [Rank 0] step:1701/10000 train_time:135179ms step_avg:79.47ms +[2025-07-09 06:48:44] [Rank 0] step:1721/10000 train_time:136882ms step_avg:79.54ms +[2025-07-09 06:48:44] [Rank 0] step:1721/10000 train_time:136882ms step_avg:79.54ms +[2025-07-09 06:48:46] [Rank 0] step:1741/10000 train_time:138351ms step_avg:79.47ms +[2025-07-09 06:48:46] [Rank 0] step:1741/10000 train_time:138351ms step_avg:79.47ms +[2025-07-09 06:48:47] [Rank 0] step:1761/10000 train_time:139822ms step_avg:79.40ms +[2025-07-09 06:48:47] [Rank 0] step:1761/10000 train_time:139822ms step_avg:79.40ms +[2025-07-09 06:48:48] [Rank 0] step:1781/10000 train_time:141291ms step_avg:79.33ms +[2025-07-09 06:48:48] [Rank 0] step:1781/10000 train_time:141291ms step_avg:79.33ms +[2025-07-09 06:48:50] [Rank 0] step:1801/10000 train_time:142813ms step_avg:79.30ms +[2025-07-09 06:48:50] [Rank 0] step:1801/10000 train_time:142813ms step_avg:79.30ms +[2025-07-09 06:48:52] [Rank 0] step:1821/10000 train_time:144467ms step_avg:79.33ms +[2025-07-09 06:48:52] [Rank 0] step:1821/10000 train_time:144467ms step_avg:79.33ms +[2025-07-09 06:48:53] [Rank 0] step:1841/10000 train_time:145939ms step_avg:79.27ms +[2025-07-09 06:48:53] [Rank 0] step:1841/10000 train_time:145939ms step_avg:79.27ms +[2025-07-09 06:48:55] [Rank 0] step:1861/10000 train_time:147408ms step_avg:79.21ms +[2025-07-09 06:48:55] [Rank 0] step:1861/10000 train_time:147408ms step_avg:79.21ms +[2025-07-09 06:48:56] [Rank 0] step:1881/10000 train_time:148879ms step_avg:79.15ms +[2025-07-09 06:48:56] [Rank 0] step:1881/10000 train_time:148879ms step_avg:79.15ms +[2025-07-09 06:48:58] [Rank 0] step:1901/10000 train_time:151015ms step_avg:79.44ms +[2025-07-09 06:48:58] [Rank 0] step:1901/10000 train_time:151015ms step_avg:79.44ms +[2025-07-09 06:49:00] [Rank 0] step:1921/10000 train_time:152486ms step_avg:79.38ms +[2025-07-09 06:49:00] [Rank 0] step:1921/10000 train_time:152486ms step_avg:79.38ms +[2025-07-09 06:49:01] [Rank 0] step:1941/10000 train_time:153958ms step_avg:79.32ms +[2025-07-09 06:49:01] [Rank 0] step:1941/10000 train_time:153958ms step_avg:79.32ms +[2025-07-09 06:49:03] [Rank 0] step:1961/10000 train_time:155430ms step_avg:79.26ms +[2025-07-09 06:49:03] [Rank 0] step:1961/10000 train_time:155430ms step_avg:79.26ms +[2025-07-09 06:49:05] [Rank 0] step:1981/10000 train_time:157159ms step_avg:79.33ms +[2025-07-09 06:49:05] [Rank 0] step:1981/10000 train_time:157159ms step_avg:79.33ms +[2025-07-09 06:49:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 06:49:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 06:49:07] [Rank 0] PRINT: step:2000/10000 train_loss:1.1158 val_loss:1.0912 train_time:159020ms step_avg:79.51ms +[2025-07-09 06:49:07] [Rank 0] PRINT: step:2000/10000 train_loss:1.1158 val_loss:1.0912 train_time:159020ms step_avg:79.51ms +[2025-07-09 06:49:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 06:49:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 06:49:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 06:49:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 06:49:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 06:49:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 06:54:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 06:54:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 06:54:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 06:54:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 06:54:27] [Rank 0] Total Loss: 5.0451 +[2025-07-09 06:54:27] [Rank 0] Total Loss: 5.0451 +[2025-07-09 06:54:27] [Rank 0] Total FTA: 0.5264 +[2025-07-09 06:54:27] [Rank 0] Total FTA: 0.5264 +[2025-07-09 06:54:27] [Rank 0] Group 0 Loss: 5.3939 +[2025-07-09 06:54:27] [Rank 0] Group 0 Loss: 5.3939 +[2025-07-09 06:54:27] [Rank 0] Group 1 Loss: 4.8982 +[2025-07-09 06:54:27] [Rank 0] Group 1 Loss: 4.8982 +[2025-07-09 06:54:27] [Rank 0] Group 2 Loss: 4.7529 +[2025-07-09 06:54:27] [Rank 0] Group 2 Loss: 4.7529 +[2025-07-09 06:54:27] [Rank 0] Group 3 Loss: 5.1375 +[2025-07-09 06:54:27] [Rank 0] Group 3 Loss: 5.1375 +[2025-07-09 06:54:27] [Rank 0] Group 4 Loss: 4.9817 +[2025-07-09 06:54:27] [Rank 0] Group 4 Loss: 4.9817 +[2025-07-09 06:54:27] [Rank 0] Group 5 Loss: 4.9877 +[2025-07-09 06:54:27] [Rank 0] Group 5 Loss: 4.9877 +[2025-07-09 06:54:27] [Rank 0] Group 6 Loss: 4.9680 +[2025-07-09 06:54:27] [Rank 0] Group 6 Loss: 4.9680 +[2025-07-09 06:54:27] [Rank 0] Group 7 Loss: 5.0320 +[2025-07-09 06:54:27] [Rank 0] Group 7 Loss: 5.0320 +[2025-07-09 06:54:27] [Rank 0] Group 8 Loss: 5.0329 +[2025-07-09 06:54:27] [Rank 0] Group 8 Loss: 5.0329 +[2025-07-09 06:54:27] [Rank 0] Group 9 Loss: 4.9843 +[2025-07-09 06:54:27] [Rank 0] Group 9 Loss: 4.9843 +[2025-07-09 06:54:27] [Rank 0] Group 10 Loss: 5.0071 +[2025-07-09 06:54:27] [Rank 0] Group 10 Loss: 5.0071 +[2025-07-09 06:54:27] [Rank 0] Group 11 Loss: 5.0311 +[2025-07-09 06:54:27] [Rank 0] Group 11 Loss: 5.0311 +[2025-07-09 06:54:27] [Rank 0] Group 0 FTA: 0.5098 +[2025-07-09 06:54:27] [Rank 0] Group 0 FTA: 0.5098 +[2025-07-09 06:54:27] [Rank 0] Group 1 FTA: 0.3620 +[2025-07-09 06:54:27] [Rank 0] Group 1 FTA: 0.3620 +[2025-07-09 06:54:27] [Rank 0] Group 2 FTA: 0.4896 +[2025-07-09 06:54:27] [Rank 0] Group 2 FTA: 0.4896 +[2025-07-09 06:54:27] [Rank 0] Group 3 FTA: 0.4635 +[2025-07-09 06:54:27] [Rank 0] Group 3 FTA: 0.4635 +[2025-07-09 06:54:27] [Rank 0] Group 4 FTA: 0.5417 +[2025-07-09 06:54:27] [Rank 0] Group 4 FTA: 0.5417 +[2025-07-09 06:54:27] [Rank 0] Group 5 FTA: 0.6068 +[2025-07-09 06:54:27] [Rank 0] Group 5 FTA: 0.6068 +[2025-07-09 06:54:27] [Rank 0] Group 6 FTA: 0.5286 +[2025-07-09 06:54:27] [Rank 0] Group 6 FTA: 0.5286 +[2025-07-09 06:54:27] [Rank 0] Group 7 FTA: 0.5625 +[2025-07-09 06:54:27] [Rank 0] Group 7 FTA: 0.5625 +[2025-07-09 06:54:27] [Rank 0] Group 8 FTA: 0.5573 +[2025-07-09 06:54:27] [Rank 0] Group 8 FTA: 0.5573 +[2025-07-09 06:54:27] [Rank 0] Group 9 FTA: 0.5820 +[2025-07-09 06:54:27] [Rank 0] Group 9 FTA: 0.5820 +[2025-07-09 06:54:27] [Rank 0] Group 10 FTA: 0.5117 +[2025-07-09 06:54:27] [Rank 0] Group 10 FTA: 0.5117 +[2025-07-09 06:54:27] [Rank 0] Group 11 FTA: 0.5693 +[2025-07-09 06:54:27] [Rank 0] Group 11 FTA: 0.5693 +[2025-07-09 06:54:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 06:54:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 06:54:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 06:54:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 06:54:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 06:54:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 06:54:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 06:54:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 06:54:29] [Rank 0] step:2001/10000 train_time:159041ms step_avg:79.48ms +[2025-07-09 06:54:29] [Rank 0] step:2001/10000 train_time:159041ms step_avg:79.48ms +[2025-07-09 06:54:30] [Rank 0] step:2021/10000 train_time:160506ms step_avg:79.42ms +[2025-07-09 06:54:30] [Rank 0] step:2021/10000 train_time:160506ms step_avg:79.42ms +[2025-07-09 06:54:32] [Rank 0] step:2041/10000 train_time:161971ms step_avg:79.36ms +[2025-07-09 06:54:32] [Rank 0] step:2041/10000 train_time:161971ms step_avg:79.36ms +[2025-07-09 06:54:33] [Rank 0] step:2061/10000 train_time:163432ms step_avg:79.30ms +[2025-07-09 06:54:33] [Rank 0] step:2061/10000 train_time:163432ms step_avg:79.30ms +[2025-07-09 06:54:35] [Rank 0] step:2081/10000 train_time:165571ms step_avg:79.56ms +[2025-07-09 06:54:35] [Rank 0] step:2081/10000 train_time:165571ms step_avg:79.56ms +[2025-07-09 06:54:37] [Rank 0] step:2101/10000 train_time:167033ms step_avg:79.50ms +[2025-07-09 06:54:37] [Rank 0] step:2101/10000 train_time:167033ms step_avg:79.50ms +[2025-07-09 06:54:38] [Rank 0] step:2121/10000 train_time:168498ms step_avg:79.44ms +[2025-07-09 06:54:38] [Rank 0] step:2121/10000 train_time:168498ms step_avg:79.44ms +[2025-07-09 06:54:40] [Rank 0] step:2141/10000 train_time:169966ms step_avg:79.39ms +[2025-07-09 06:54:40] [Rank 0] step:2141/10000 train_time:169966ms step_avg:79.39ms +[2025-07-09 06:54:42] [Rank 0] step:2161/10000 train_time:171431ms step_avg:79.33ms +[2025-07-09 06:54:42] [Rank 0] step:2161/10000 train_time:171431ms step_avg:79.33ms +[2025-07-09 06:54:43] [Rank 0] step:2181/10000 train_time:173137ms step_avg:79.38ms +[2025-07-09 06:54:43] [Rank 0] step:2181/10000 train_time:173137ms step_avg:79.38ms +[2025-07-09 06:54:44] [Rank 0] step:2201/10000 train_time:174605ms step_avg:79.33ms +[2025-07-09 06:54:44] [Rank 0] step:2201/10000 train_time:174605ms step_avg:79.33ms +[2025-07-09 06:54:46] [Rank 0] step:2221/10000 train_time:176072ms step_avg:79.28ms +[2025-07-09 06:54:46] [Rank 0] step:2221/10000 train_time:176072ms step_avg:79.28ms +[2025-07-09 06:54:47] [Rank 0] step:2241/10000 train_time:177563ms step_avg:79.23ms +[2025-07-09 06:54:47] [Rank 0] step:2241/10000 train_time:177563ms step_avg:79.23ms +[2025-07-09 06:54:50] [Rank 0] step:2261/10000 train_time:179868ms step_avg:79.55ms +[2025-07-09 06:54:50] [Rank 0] step:2261/10000 train_time:179868ms step_avg:79.55ms +[2025-07-09 06:54:51] [Rank 0] step:2281/10000 train_time:181430ms step_avg:79.54ms +[2025-07-09 06:54:51] [Rank 0] step:2281/10000 train_time:181430ms step_avg:79.54ms +[2025-07-09 06:54:53] [Rank 0] step:2301/10000 train_time:183070ms step_avg:79.56ms +[2025-07-09 06:54:53] [Rank 0] step:2301/10000 train_time:183070ms step_avg:79.56ms +[2025-07-09 06:54:54] [Rank 0] step:2321/10000 train_time:184564ms step_avg:79.52ms +[2025-07-09 06:54:54] [Rank 0] step:2321/10000 train_time:184564ms step_avg:79.52ms +[2025-07-09 06:54:57] [Rank 0] step:2341/10000 train_time:186313ms step_avg:79.59ms +[2025-07-09 06:54:57] [Rank 0] step:2341/10000 train_time:186313ms step_avg:79.59ms +[2025-07-09 06:54:58] [Rank 0] step:2361/10000 train_time:188221ms step_avg:79.72ms +[2025-07-09 06:54:58] [Rank 0] step:2361/10000 train_time:188221ms step_avg:79.72ms +[2025-07-09 06:55:00] [Rank 0] step:2381/10000 train_time:189715ms step_avg:79.68ms +[2025-07-09 06:55:00] [Rank 0] step:2381/10000 train_time:189715ms step_avg:79.68ms +[2025-07-09 06:55:01] [Rank 0] step:2401/10000 train_time:191209ms step_avg:79.64ms +[2025-07-09 06:55:01] [Rank 0] step:2401/10000 train_time:191209ms step_avg:79.64ms +[2025-07-09 06:55:03] [Rank 0] step:2421/10000 train_time:192707ms step_avg:79.60ms +[2025-07-09 06:55:03] [Rank 0] step:2421/10000 train_time:192707ms step_avg:79.60ms +[2025-07-09 06:55:05] [Rank 0] step:2441/10000 train_time:194863ms step_avg:79.83ms +[2025-07-09 06:55:05] [Rank 0] step:2441/10000 train_time:194863ms step_avg:79.83ms +[2025-07-09 06:55:06] [Rank 0] step:2461/10000 train_time:196358ms step_avg:79.79ms +[2025-07-09 06:55:06] [Rank 0] step:2461/10000 train_time:196358ms step_avg:79.79ms +[2025-07-09 06:55:08] [Rank 0] step:2481/10000 train_time:197854ms step_avg:79.75ms +[2025-07-09 06:55:08] [Rank 0] step:2481/10000 train_time:197854ms step_avg:79.75ms +[2025-07-09 06:55:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 06:55:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 06:55:10] [Rank 0] PRINT: step:2500/10000 train_loss:0.9927 val_loss:0.9320 train_time:199349ms step_avg:79.74ms +[2025-07-09 06:55:10] [Rank 0] PRINT: step:2500/10000 train_loss:0.9927 val_loss:0.9320 train_time:199349ms step_avg:79.74ms +[2025-07-09 06:55:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 06:55:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 06:55:10] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 06:55:10] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 06:55:10] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 06:55:10] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 07:00:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 07:00:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 07:00:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 07:00:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 07:00:30] [Rank 0] Total Loss: 5.0856 +[2025-07-09 07:00:30] [Rank 0] Total Loss: 5.0856 +[2025-07-09 07:00:30] [Rank 0] Total FTA: 0.8358 +[2025-07-09 07:00:30] [Rank 0] Total FTA: 0.8358 +[2025-07-09 07:00:30] [Rank 0] Group 0 Loss: 5.2478 +[2025-07-09 07:00:30] [Rank 0] Group 0 Loss: 5.2478 +[2025-07-09 07:00:30] [Rank 0] Group 1 Loss: 5.0361 +[2025-07-09 07:00:30] [Rank 0] Group 1 Loss: 5.0361 +[2025-07-09 07:00:30] [Rank 0] Group 2 Loss: 4.9220 +[2025-07-09 07:00:30] [Rank 0] Group 2 Loss: 4.9220 +[2025-07-09 07:00:30] [Rank 0] Group 3 Loss: 5.1613 +[2025-07-09 07:00:30] [Rank 0] Group 3 Loss: 5.1613 +[2025-07-09 07:00:30] [Rank 0] Group 4 Loss: 5.1107 +[2025-07-09 07:00:30] [Rank 0] Group 4 Loss: 5.1107 +[2025-07-09 07:00:30] [Rank 0] Group 5 Loss: 5.0253 +[2025-07-09 07:00:30] [Rank 0] Group 5 Loss: 5.0253 +[2025-07-09 07:00:30] [Rank 0] Group 6 Loss: 4.9481 +[2025-07-09 07:00:30] [Rank 0] Group 6 Loss: 4.9481 +[2025-07-09 07:00:30] [Rank 0] Group 7 Loss: 5.1128 +[2025-07-09 07:00:30] [Rank 0] Group 7 Loss: 5.1128 +[2025-07-09 07:00:30] [Rank 0] Group 8 Loss: 5.0466 +[2025-07-09 07:00:30] [Rank 0] Group 8 Loss: 5.0466 +[2025-07-09 07:00:30] [Rank 0] Group 9 Loss: 5.0651 +[2025-07-09 07:00:30] [Rank 0] Group 9 Loss: 5.0651 +[2025-07-09 07:00:30] [Rank 0] Group 10 Loss: 5.0940 +[2025-07-09 07:00:30] [Rank 0] Group 10 Loss: 5.0940 +[2025-07-09 07:00:30] [Rank 0] Group 11 Loss: 5.0855 +[2025-07-09 07:00:30] [Rank 0] Group 11 Loss: 5.0855 +[2025-07-09 07:00:30] [Rank 0] Group 0 FTA: 0.8218 +[2025-07-09 07:00:30] [Rank 0] Group 0 FTA: 0.8218 +[2025-07-09 07:00:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 07:00:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 07:00:30] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 07:00:30] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 07:00:30] [Rank 0] Group 3 FTA: 0.7448 +[2025-07-09 07:00:30] [Rank 0] Group 3 FTA: 0.7448 +[2025-07-09 07:00:30] [Rank 0] Group 4 FTA: 0.8411 +[2025-07-09 07:00:30] [Rank 0] Group 4 FTA: 0.8411 +[2025-07-09 07:00:30] [Rank 0] Group 5 FTA: 0.8568 +[2025-07-09 07:00:30] [Rank 0] Group 5 FTA: 0.8568 +[2025-07-09 07:00:30] [Rank 0] Group 6 FTA: 0.8177 +[2025-07-09 07:00:30] [Rank 0] Group 6 FTA: 0.8177 +[2025-07-09 07:00:30] [Rank 0] Group 7 FTA: 0.7708 +[2025-07-09 07:00:30] [Rank 0] Group 7 FTA: 0.7708 +[2025-07-09 07:00:30] [Rank 0] Group 8 FTA: 0.7969 +[2025-07-09 07:00:30] [Rank 0] Group 8 FTA: 0.7969 +[2025-07-09 07:00:30] [Rank 0] Group 9 FTA: 0.7734 +[2025-07-09 07:00:30] [Rank 0] Group 9 FTA: 0.7734 +[2025-07-09 07:00:30] [Rank 0] Group 10 FTA: 0.8281 +[2025-07-09 07:00:30] [Rank 0] Group 10 FTA: 0.8281 +[2025-07-09 07:00:30] [Rank 0] Group 11 FTA: 0.8125 +[2025-07-09 07:00:30] [Rank 0] Group 11 FTA: 0.8125 +[2025-07-09 07:00:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 07:00:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 07:00:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 07:00:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 07:00:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 07:00:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 07:00:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 07:00:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 07:00:31] [Rank 0] step:2501/10000 train_time:199369ms step_avg:79.72ms +[2025-07-09 07:00:31] [Rank 0] step:2501/10000 train_time:199369ms step_avg:79.72ms +[2025-07-09 07:00:33] [Rank 0] step:2521/10000 train_time:200875ms step_avg:79.68ms +[2025-07-09 07:00:33] [Rank 0] step:2521/10000 train_time:200875ms step_avg:79.68ms +[2025-07-09 07:00:35] [Rank 0] step:2541/10000 train_time:203029ms step_avg:79.90ms +[2025-07-09 07:00:35] [Rank 0] step:2541/10000 train_time:203029ms step_avg:79.90ms +[2025-07-09 07:00:36] [Rank 0] step:2561/10000 train_time:204514ms step_avg:79.86ms +[2025-07-09 07:00:36] [Rank 0] step:2561/10000 train_time:204514ms step_avg:79.86ms +[2025-07-09 07:00:38] [Rank 0] step:2581/10000 train_time:206002ms step_avg:79.81ms +[2025-07-09 07:00:38] [Rank 0] step:2581/10000 train_time:206002ms step_avg:79.81ms +[2025-07-09 07:00:39] [Rank 0] step:2601/10000 train_time:207493ms step_avg:79.77ms +[2025-07-09 07:00:39] [Rank 0] step:2601/10000 train_time:207493ms step_avg:79.77ms +[2025-07-09 07:00:42] [Rank 0] step:2621/10000 train_time:209646ms step_avg:79.99ms +[2025-07-09 07:00:42] [Rank 0] step:2621/10000 train_time:209646ms step_avg:79.99ms +[2025-07-09 07:00:43] [Rank 0] step:2641/10000 train_time:211132ms step_avg:79.94ms +[2025-07-09 07:00:43] [Rank 0] step:2641/10000 train_time:211132ms step_avg:79.94ms +[2025-07-09 07:00:44] [Rank 0] step:2661/10000 train_time:212622ms step_avg:79.90ms +[2025-07-09 07:00:44] [Rank 0] step:2661/10000 train_time:212622ms step_avg:79.90ms +[2025-07-09 07:00:46] [Rank 0] step:2681/10000 train_time:214113ms step_avg:79.86ms +[2025-07-09 07:00:46] [Rank 0] step:2681/10000 train_time:214113ms step_avg:79.86ms +[2025-07-09 07:00:48] [Rank 0] step:2701/10000 train_time:215604ms step_avg:79.82ms +[2025-07-09 07:00:48] [Rank 0] step:2701/10000 train_time:215604ms step_avg:79.82ms +[2025-07-09 07:00:49] [Rank 0] step:2721/10000 train_time:217331ms step_avg:79.87ms +[2025-07-09 07:00:49] [Rank 0] step:2721/10000 train_time:217331ms step_avg:79.87ms +[2025-07-09 07:00:51] [Rank 0] step:2741/10000 train_time:218824ms step_avg:79.83ms +[2025-07-09 07:00:51] [Rank 0] step:2741/10000 train_time:218824ms step_avg:79.83ms +[2025-07-09 07:00:52] [Rank 0] step:2761/10000 train_time:220317ms step_avg:79.80ms +[2025-07-09 07:00:52] [Rank 0] step:2761/10000 train_time:220317ms step_avg:79.80ms +[2025-07-09 07:00:54] [Rank 0] step:2781/10000 train_time:221812ms step_avg:79.76ms +[2025-07-09 07:00:54] [Rank 0] step:2781/10000 train_time:221812ms step_avg:79.76ms +[2025-07-09 07:00:55] [Rank 0] step:2801/10000 train_time:223541ms step_avg:79.81ms +[2025-07-09 07:00:55] [Rank 0] step:2801/10000 train_time:223541ms step_avg:79.81ms +[2025-07-09 07:00:57] [Rank 0] step:2821/10000 train_time:225036ms step_avg:79.77ms +[2025-07-09 07:00:57] [Rank 0] step:2821/10000 train_time:225036ms step_avg:79.77ms +[2025-07-09 07:00:58] [Rank 0] step:2841/10000 train_time:226529ms step_avg:79.74ms +[2025-07-09 07:00:58] [Rank 0] step:2841/10000 train_time:226529ms step_avg:79.74ms +[2025-07-09 07:01:00] [Rank 0] step:2861/10000 train_time:228024ms step_avg:79.70ms +[2025-07-09 07:01:00] [Rank 0] step:2861/10000 train_time:228024ms step_avg:79.70ms +[2025-07-09 07:01:02] [Rank 0] step:2881/10000 train_time:229520ms step_avg:79.67ms +[2025-07-09 07:01:02] [Rank 0] step:2881/10000 train_time:229520ms step_avg:79.67ms +[2025-07-09 07:01:04] [Rank 0] step:2901/10000 train_time:231675ms step_avg:79.86ms +[2025-07-09 07:01:04] [Rank 0] step:2901/10000 train_time:231675ms step_avg:79.86ms +[2025-07-09 07:01:05] [Rank 0] step:2921/10000 train_time:233169ms step_avg:79.83ms +[2025-07-09 07:01:05] [Rank 0] step:2921/10000 train_time:233169ms step_avg:79.83ms +[2025-07-09 07:01:07] [Rank 0] step:2941/10000 train_time:234665ms step_avg:79.79ms +[2025-07-09 07:01:07] [Rank 0] step:2941/10000 train_time:234665ms step_avg:79.79ms +[2025-07-09 07:01:08] [Rank 0] step:2961/10000 train_time:236161ms step_avg:79.76ms +[2025-07-09 07:01:08] [Rank 0] step:2961/10000 train_time:236161ms step_avg:79.76ms +[2025-07-09 07:01:10] [Rank 0] step:2981/10000 train_time:238009ms step_avg:79.84ms +[2025-07-09 07:01:10] [Rank 0] step:2981/10000 train_time:238009ms step_avg:79.84ms +[2025-07-09 07:01:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 07:01:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 07:01:12] [Rank 0] PRINT: step:3000/10000 train_loss:0.9103 val_loss:0.8901 train_time:239660ms step_avg:79.89ms +[2025-07-09 07:01:12] [Rank 0] PRINT: step:3000/10000 train_loss:0.9103 val_loss:0.8901 train_time:239660ms step_avg:79.89ms +[2025-07-09 07:01:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 07:01:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 07:01:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 07:01:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 07:01:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 07:01:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 07:06:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 07:06:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 07:06:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 07:06:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 07:06:33] [Rank 0] Total Loss: 5.2896 +[2025-07-09 07:06:33] [Rank 0] Total Loss: 5.2896 +[2025-07-09 07:06:33] [Rank 0] Total FTA: 0.9109 +[2025-07-09 07:06:33] [Rank 0] Total FTA: 0.9109 +[2025-07-09 07:06:33] [Rank 0] Group 0 Loss: 5.5460 +[2025-07-09 07:06:33] [Rank 0] Group 0 Loss: 5.5460 +[2025-07-09 07:06:33] [Rank 0] Group 1 Loss: 5.4307 +[2025-07-09 07:06:33] [Rank 0] Group 1 Loss: 5.4307 +[2025-07-09 07:06:33] [Rank 0] Group 2 Loss: 5.1381 +[2025-07-09 07:06:33] [Rank 0] Group 2 Loss: 5.1381 +[2025-07-09 07:06:33] [Rank 0] Group 3 Loss: 5.3910 +[2025-07-09 07:06:33] [Rank 0] Group 3 Loss: 5.3910 +[2025-07-09 07:06:33] [Rank 0] Group 4 Loss: 5.2285 +[2025-07-09 07:06:33] [Rank 0] Group 4 Loss: 5.2285 +[2025-07-09 07:06:33] [Rank 0] Group 5 Loss: 5.2082 +[2025-07-09 07:06:33] [Rank 0] Group 5 Loss: 5.2082 +[2025-07-09 07:06:33] [Rank 0] Group 6 Loss: 5.1326 +[2025-07-09 07:06:33] [Rank 0] Group 6 Loss: 5.1326 +[2025-07-09 07:06:33] [Rank 0] Group 7 Loss: 5.2955 +[2025-07-09 07:06:33] [Rank 0] Group 7 Loss: 5.2955 +[2025-07-09 07:06:33] [Rank 0] Group 8 Loss: 5.2271 +[2025-07-09 07:06:33] [Rank 0] Group 8 Loss: 5.2271 +[2025-07-09 07:06:33] [Rank 0] Group 9 Loss: 5.2506 +[2025-07-09 07:06:33] [Rank 0] Group 9 Loss: 5.2506 +[2025-07-09 07:06:33] [Rank 0] Group 10 Loss: 5.2140 +[2025-07-09 07:06:33] [Rank 0] Group 10 Loss: 5.2140 +[2025-07-09 07:06:33] [Rank 0] Group 11 Loss: 5.2443 +[2025-07-09 07:06:33] [Rank 0] Group 11 Loss: 5.2443 +[2025-07-09 07:06:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 07:06:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 07:06:33] [Rank 0] Group 1 FTA: 0.8464 +[2025-07-09 07:06:33] [Rank 0] Group 1 FTA: 0.8464 +[2025-07-09 07:06:33] [Rank 0] Group 2 FTA: 0.9089 +[2025-07-09 07:06:33] [Rank 0] Group 2 FTA: 0.9089 +[2025-07-09 07:06:33] [Rank 0] Group 3 FTA: 0.9479 +[2025-07-09 07:06:33] [Rank 0] Group 3 FTA: 0.9479 +[2025-07-09 07:06:33] [Rank 0] Group 4 FTA: 0.9141 +[2025-07-09 07:06:33] [Rank 0] Group 4 FTA: 0.9141 +[2025-07-09 07:06:33] [Rank 0] Group 5 FTA: 0.9531 +[2025-07-09 07:06:33] [Rank 0] Group 5 FTA: 0.9531 +[2025-07-09 07:06:33] [Rank 0] Group 6 FTA: 0.8750 +[2025-07-09 07:06:33] [Rank 0] Group 6 FTA: 0.8750 +[2025-07-09 07:06:33] [Rank 0] Group 7 FTA: 0.8646 +[2025-07-09 07:06:33] [Rank 0] Group 7 FTA: 0.8646 +[2025-07-09 07:06:33] [Rank 0] Group 8 FTA: 0.9010 +[2025-07-09 07:06:33] [Rank 0] Group 8 FTA: 0.9010 +[2025-07-09 07:06:33] [Rank 0] Group 9 FTA: 0.8359 +[2025-07-09 07:06:33] [Rank 0] Group 9 FTA: 0.8359 +[2025-07-09 07:06:33] [Rank 0] Group 10 FTA: 0.9160 +[2025-07-09 07:06:33] [Rank 0] Group 10 FTA: 0.9160 +[2025-07-09 07:06:33] [Rank 0] Group 11 FTA: 0.8887 +[2025-07-09 07:06:33] [Rank 0] Group 11 FTA: 0.8887 +[2025-07-09 07:06:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 07:06:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 07:06:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 07:06:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 07:06:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 07:06:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 07:06:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 07:06:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 07:06:34] [Rank 0] step:3001/10000 train_time:239680ms step_avg:79.87ms +[2025-07-09 07:06:34] [Rank 0] step:3001/10000 train_time:239680ms step_avg:79.87ms +[2025-07-09 07:06:36] [Rank 0] step:3021/10000 train_time:241165ms step_avg:79.83ms +[2025-07-09 07:06:36] [Rank 0] step:3021/10000 train_time:241165ms step_avg:79.83ms +[2025-07-09 07:06:37] [Rank 0] step:3041/10000 train_time:242652ms step_avg:79.79ms +[2025-07-09 07:06:37] [Rank 0] step:3041/10000 train_time:242652ms step_avg:79.79ms +[2025-07-09 07:06:39] [Rank 0] step:3061/10000 train_time:244398ms step_avg:79.84ms +[2025-07-09 07:06:39] [Rank 0] step:3061/10000 train_time:244398ms step_avg:79.84ms +[2025-07-09 07:06:41] [Rank 0] step:3081/10000 train_time:246271ms step_avg:79.93ms +[2025-07-09 07:06:41] [Rank 0] step:3081/10000 train_time:246271ms step_avg:79.93ms +[2025-07-09 07:06:42] [Rank 0] step:3101/10000 train_time:247761ms step_avg:79.90ms +[2025-07-09 07:06:42] [Rank 0] step:3101/10000 train_time:247761ms step_avg:79.90ms +[2025-07-09 07:06:44] [Rank 0] step:3121/10000 train_time:249250ms step_avg:79.86ms +[2025-07-09 07:06:44] [Rank 0] step:3121/10000 train_time:249250ms step_avg:79.86ms +[2025-07-09 07:06:45] [Rank 0] step:3141/10000 train_time:250742ms step_avg:79.83ms +[2025-07-09 07:06:45] [Rank 0] step:3141/10000 train_time:250742ms step_avg:79.83ms +[2025-07-09 07:06:48] [Rank 0] step:3161/10000 train_time:252885ms step_avg:80.00ms +[2025-07-09 07:06:48] [Rank 0] step:3161/10000 train_time:252885ms step_avg:80.00ms +[2025-07-09 07:06:49] [Rank 0] step:3181/10000 train_time:254376ms step_avg:79.97ms +[2025-07-09 07:06:49] [Rank 0] step:3181/10000 train_time:254376ms step_avg:79.97ms +[2025-07-09 07:06:51] [Rank 0] step:3201/10000 train_time:255866ms step_avg:79.93ms +[2025-07-09 07:06:51] [Rank 0] step:3201/10000 train_time:255866ms step_avg:79.93ms +[2025-07-09 07:06:52] [Rank 0] step:3221/10000 train_time:257358ms step_avg:79.90ms +[2025-07-09 07:06:52] [Rank 0] step:3221/10000 train_time:257358ms step_avg:79.90ms +[2025-07-09 07:06:54] [Rank 0] step:3241/10000 train_time:258850ms step_avg:79.87ms +[2025-07-09 07:06:54] [Rank 0] step:3241/10000 train_time:258850ms step_avg:79.87ms +[2025-07-09 07:06:56] [Rank 0] step:3261/10000 train_time:260996ms step_avg:80.04ms +[2025-07-09 07:06:56] [Rank 0] step:3261/10000 train_time:260996ms step_avg:80.04ms +[2025-07-09 07:06:57] [Rank 0] step:3281/10000 train_time:262487ms step_avg:80.00ms +[2025-07-09 07:06:57] [Rank 0] step:3281/10000 train_time:262487ms step_avg:80.00ms +[2025-07-09 07:06:59] [Rank 0] step:3301/10000 train_time:263981ms step_avg:79.97ms +[2025-07-09 07:06:59] [Rank 0] step:3301/10000 train_time:263981ms step_avg:79.97ms +[2025-07-09 07:07:00] [Rank 0] step:3321/10000 train_time:265474ms step_avg:79.94ms +[2025-07-09 07:07:00] [Rank 0] step:3321/10000 train_time:265474ms step_avg:79.94ms +[2025-07-09 07:07:02] [Rank 0] step:3341/10000 train_time:267607ms step_avg:80.10ms +[2025-07-09 07:07:02] [Rank 0] step:3341/10000 train_time:267607ms step_avg:80.10ms +[2025-07-09 07:07:04] [Rank 0] step:3361/10000 train_time:269100ms step_avg:80.07ms +[2025-07-09 07:07:04] [Rank 0] step:3361/10000 train_time:269100ms step_avg:80.07ms +[2025-07-09 07:07:05] [Rank 0] step:3381/10000 train_time:270593ms step_avg:80.03ms +[2025-07-09 07:07:05] [Rank 0] step:3381/10000 train_time:270593ms step_avg:80.03ms +[2025-07-09 07:07:07] [Rank 0] step:3401/10000 train_time:272087ms step_avg:80.00ms +[2025-07-09 07:07:07] [Rank 0] step:3401/10000 train_time:272087ms step_avg:80.00ms +[2025-07-09 07:07:09] [Rank 0] step:3421/10000 train_time:273838ms step_avg:80.05ms +[2025-07-09 07:07:09] [Rank 0] step:3421/10000 train_time:273838ms step_avg:80.05ms +[2025-07-09 07:07:10] [Rank 0] step:3441/10000 train_time:275716ms step_avg:80.13ms +[2025-07-09 07:07:10] [Rank 0] step:3441/10000 train_time:275716ms step_avg:80.13ms +[2025-07-09 07:07:12] [Rank 0] step:3461/10000 train_time:277211ms step_avg:80.10ms +[2025-07-09 07:07:12] [Rank 0] step:3461/10000 train_time:277211ms step_avg:80.10ms +[2025-07-09 07:07:13] [Rank 0] step:3481/10000 train_time:278704ms step_avg:80.06ms +[2025-07-09 07:07:13] [Rank 0] step:3481/10000 train_time:278704ms step_avg:80.06ms +[2025-07-09 07:07:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 07:07:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 07:07:16] [Rank 0] PRINT: step:3500/10000 train_loss:0.8877 val_loss:0.8810 train_time:280198ms step_avg:80.06ms +[2025-07-09 07:07:16] [Rank 0] PRINT: step:3500/10000 train_loss:0.8877 val_loss:0.8810 train_time:280198ms step_avg:80.06ms +[2025-07-09 07:07:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 07:07:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 07:07:16] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 07:07:16] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 07:07:16] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 07:07:16] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 07:12:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 07:12:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 07:12:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 07:12:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 07:12:35] [Rank 0] Total Loss: 5.3142 +[2025-07-09 07:12:35] [Rank 0] Total Loss: 5.3142 +[2025-07-09 07:12:35] [Rank 0] Total FTA: 0.9006 +[2025-07-09 07:12:35] [Rank 0] Total FTA: 0.9006 +[2025-07-09 07:12:35] [Rank 0] Group 0 Loss: 5.4878 +[2025-07-09 07:12:35] [Rank 0] Group 0 Loss: 5.4878 +[2025-07-09 07:12:35] [Rank 0] Group 1 Loss: 5.4244 +[2025-07-09 07:12:35] [Rank 0] Group 1 Loss: 5.4244 +[2025-07-09 07:12:35] [Rank 0] Group 2 Loss: 4.9356 +[2025-07-09 07:12:35] [Rank 0] Group 2 Loss: 4.9356 +[2025-07-09 07:12:35] [Rank 0] Group 3 Loss: 5.4392 +[2025-07-09 07:12:35] [Rank 0] Group 3 Loss: 5.4392 +[2025-07-09 07:12:35] [Rank 0] Group 4 Loss: 5.2822 +[2025-07-09 07:12:35] [Rank 0] Group 4 Loss: 5.2822 +[2025-07-09 07:12:35] [Rank 0] Group 5 Loss: 5.2746 +[2025-07-09 07:12:35] [Rank 0] Group 5 Loss: 5.2746 +[2025-07-09 07:12:35] [Rank 0] Group 6 Loss: 5.1995 +[2025-07-09 07:12:35] [Rank 0] Group 6 Loss: 5.1995 +[2025-07-09 07:12:35] [Rank 0] Group 7 Loss: 5.3486 +[2025-07-09 07:12:35] [Rank 0] Group 7 Loss: 5.3486 +[2025-07-09 07:12:35] [Rank 0] Group 8 Loss: 5.3179 +[2025-07-09 07:12:35] [Rank 0] Group 8 Loss: 5.3179 +[2025-07-09 07:12:35] [Rank 0] Group 9 Loss: 5.2739 +[2025-07-09 07:12:35] [Rank 0] Group 9 Loss: 5.2739 +[2025-07-09 07:12:35] [Rank 0] Group 10 Loss: 5.3106 +[2025-07-09 07:12:35] [Rank 0] Group 10 Loss: 5.3106 +[2025-07-09 07:12:35] [Rank 0] Group 11 Loss: 5.3053 +[2025-07-09 07:12:35] [Rank 0] Group 11 Loss: 5.3053 +[2025-07-09 07:12:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 07:12:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 07:12:35] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 07:12:35] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 07:12:35] [Rank 0] Group 2 FTA: 0.8073 +[2025-07-09 07:12:35] [Rank 0] Group 2 FTA: 0.8073 +[2025-07-09 07:12:35] [Rank 0] Group 3 FTA: 0.8203 +[2025-07-09 07:12:35] [Rank 0] Group 3 FTA: 0.8203 +[2025-07-09 07:12:35] [Rank 0] Group 4 FTA: 0.9297 +[2025-07-09 07:12:35] [Rank 0] Group 4 FTA: 0.9297 +[2025-07-09 07:12:35] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-09 07:12:35] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-09 07:12:35] [Rank 0] Group 6 FTA: 0.8516 +[2025-07-09 07:12:35] [Rank 0] Group 6 FTA: 0.8516 +[2025-07-09 07:12:35] [Rank 0] Group 7 FTA: 0.9089 +[2025-07-09 07:12:35] [Rank 0] Group 7 FTA: 0.9089 +[2025-07-09 07:12:35] [Rank 0] Group 8 FTA: 0.8464 +[2025-07-09 07:12:35] [Rank 0] Group 8 FTA: 0.8464 +[2025-07-09 07:12:35] [Rank 0] Group 9 FTA: 0.8984 +[2025-07-09 07:12:35] [Rank 0] Group 9 FTA: 0.8984 +[2025-07-09 07:12:35] [Rank 0] Group 10 FTA: 0.8672 +[2025-07-09 07:12:35] [Rank 0] Group 10 FTA: 0.8672 +[2025-07-09 07:12:35] [Rank 0] Group 11 FTA: 0.8809 +[2025-07-09 07:12:35] [Rank 0] Group 11 FTA: 0.8809 +[2025-07-09 07:12:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 07:12:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 07:12:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 07:12:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 07:12:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 07:12:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 07:12:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 07:12:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 07:12:37] [Rank 0] step:3501/10000 train_time:280218ms step_avg:80.04ms +[2025-07-09 07:12:37] [Rank 0] step:3501/10000 train_time:280218ms step_avg:80.04ms +[2025-07-09 07:12:39] [Rank 0] step:3521/10000 train_time:282369ms step_avg:80.20ms +[2025-07-09 07:12:39] [Rank 0] step:3521/10000 train_time:282369ms step_avg:80.20ms +[2025-07-09 07:12:40] [Rank 0] step:3541/10000 train_time:283854ms step_avg:80.16ms +[2025-07-09 07:12:40] [Rank 0] step:3541/10000 train_time:283854ms step_avg:80.16ms +[2025-07-09 07:12:42] [Rank 0] step:3561/10000 train_time:285342ms step_avg:80.13ms +[2025-07-09 07:12:42] [Rank 0] step:3561/10000 train_time:285342ms step_avg:80.13ms +[2025-07-09 07:12:43] [Rank 0] step:3581/10000 train_time:286829ms step_avg:80.10ms +[2025-07-09 07:12:43] [Rank 0] step:3581/10000 train_time:286829ms step_avg:80.10ms +[2025-07-09 07:12:45] [Rank 0] step:3601/10000 train_time:288382ms step_avg:80.08ms +[2025-07-09 07:12:45] [Rank 0] step:3601/10000 train_time:288382ms step_avg:80.08ms +[2025-07-09 07:12:47] [Rank 0] step:3621/10000 train_time:290111ms step_avg:80.12ms +[2025-07-09 07:12:47] [Rank 0] step:3621/10000 train_time:290111ms step_avg:80.12ms +[2025-07-09 07:12:48] [Rank 0] step:3641/10000 train_time:291709ms step_avg:80.12ms +[2025-07-09 07:12:48] [Rank 0] step:3641/10000 train_time:291709ms step_avg:80.12ms +[2025-07-09 07:12:50] [Rank 0] step:3661/10000 train_time:293200ms step_avg:80.09ms +[2025-07-09 07:12:50] [Rank 0] step:3661/10000 train_time:293200ms step_avg:80.09ms +[2025-07-09 07:12:51] [Rank 0] step:3681/10000 train_time:294691ms step_avg:80.06ms +[2025-07-09 07:12:51] [Rank 0] step:3681/10000 train_time:294691ms step_avg:80.06ms +[2025-07-09 07:12:53] [Rank 0] step:3701/10000 train_time:296415ms step_avg:80.09ms +[2025-07-09 07:12:53] [Rank 0] step:3701/10000 train_time:296415ms step_avg:80.09ms +[2025-07-09 07:12:54] [Rank 0] step:3721/10000 train_time:297908ms step_avg:80.06ms +[2025-07-09 07:12:54] [Rank 0] step:3721/10000 train_time:297908ms step_avg:80.06ms +[2025-07-09 07:12:56] [Rank 0] step:3741/10000 train_time:299401ms step_avg:80.03ms +[2025-07-09 07:12:56] [Rank 0] step:3741/10000 train_time:299401ms step_avg:80.03ms +[2025-07-09 07:12:57] [Rank 0] step:3761/10000 train_time:300893ms step_avg:80.00ms +[2025-07-09 07:12:57] [Rank 0] step:3761/10000 train_time:300893ms step_avg:80.00ms +[2025-07-09 07:13:00] [Rank 0] step:3781/10000 train_time:302385ms step_avg:79.97ms +[2025-07-09 07:13:00] [Rank 0] step:3781/10000 train_time:302385ms step_avg:79.97ms +[2025-07-09 07:13:01] [Rank 0] step:3801/10000 train_time:304528ms step_avg:80.12ms +[2025-07-09 07:13:01] [Rank 0] step:3801/10000 train_time:304528ms step_avg:80.12ms +[2025-07-09 07:13:03] [Rank 0] step:3821/10000 train_time:306021ms step_avg:80.09ms +[2025-07-09 07:13:03] [Rank 0] step:3821/10000 train_time:306021ms step_avg:80.09ms +[2025-07-09 07:13:04] [Rank 0] step:3841/10000 train_time:307516ms step_avg:80.06ms +[2025-07-09 07:13:04] [Rank 0] step:3841/10000 train_time:307516ms step_avg:80.06ms +[2025-07-09 07:13:06] [Rank 0] step:3861/10000 train_time:309010ms step_avg:80.03ms +[2025-07-09 07:13:06] [Rank 0] step:3861/10000 train_time:309010ms step_avg:80.03ms +[2025-07-09 07:13:08] [Rank 0] step:3881/10000 train_time:311174ms step_avg:80.18ms +[2025-07-09 07:13:08] [Rank 0] step:3881/10000 train_time:311174ms step_avg:80.18ms +[2025-07-09 07:13:09] [Rank 0] step:3901/10000 train_time:312665ms step_avg:80.15ms +[2025-07-09 07:13:09] [Rank 0] step:3901/10000 train_time:312665ms step_avg:80.15ms +[2025-07-09 07:13:11] [Rank 0] step:3921/10000 train_time:314159ms step_avg:80.12ms +[2025-07-09 07:13:11] [Rank 0] step:3921/10000 train_time:314159ms step_avg:80.12ms +[2025-07-09 07:13:12] [Rank 0] step:3941/10000 train_time:315654ms step_avg:80.09ms +[2025-07-09 07:13:12] [Rank 0] step:3941/10000 train_time:315654ms step_avg:80.09ms +[2025-07-09 07:13:14] [Rank 0] step:3961/10000 train_time:317148ms step_avg:80.07ms +[2025-07-09 07:13:14] [Rank 0] step:3961/10000 train_time:317148ms step_avg:80.07ms +[2025-07-09 07:13:16] [Rank 0] step:3981/10000 train_time:319280ms step_avg:80.20ms +[2025-07-09 07:13:16] [Rank 0] step:3981/10000 train_time:319280ms step_avg:80.20ms +[2025-07-09 07:13:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 07:13:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 07:13:18] [Rank 0] PRINT: step:4000/10000 train_loss:0.8795 val_loss:0.8744 train_time:320774ms step_avg:80.19ms +[2025-07-09 07:13:18] [Rank 0] PRINT: step:4000/10000 train_loss:0.8795 val_loss:0.8744 train_time:320774ms step_avg:80.19ms +[2025-07-09 07:13:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 07:13:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 07:13:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 07:13:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 07:13:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 07:13:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 07:18:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 07:18:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 07:18:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 07:18:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 07:18:35] [Rank 0] Total Loss: 5.3266 +[2025-07-09 07:18:35] [Rank 0] Total Loss: 5.3266 +[2025-07-09 07:18:35] [Rank 0] Total FTA: 0.9176 +[2025-07-09 07:18:35] [Rank 0] Total FTA: 0.9176 +[2025-07-09 07:18:35] [Rank 0] Group 0 Loss: 5.5774 +[2025-07-09 07:18:35] [Rank 0] Group 0 Loss: 5.5774 +[2025-07-09 07:18:35] [Rank 0] Group 1 Loss: 5.3744 +[2025-07-09 07:18:35] [Rank 0] Group 1 Loss: 5.3744 +[2025-07-09 07:18:35] [Rank 0] Group 2 Loss: 5.0884 +[2025-07-09 07:18:35] [Rank 0] Group 2 Loss: 5.0884 +[2025-07-09 07:18:35] [Rank 0] Group 3 Loss: 5.4201 +[2025-07-09 07:18:35] [Rank 0] Group 3 Loss: 5.4201 +[2025-07-09 07:18:35] [Rank 0] Group 4 Loss: 5.3261 +[2025-07-09 07:18:35] [Rank 0] Group 4 Loss: 5.3261 +[2025-07-09 07:18:35] [Rank 0] Group 5 Loss: 5.2114 +[2025-07-09 07:18:35] [Rank 0] Group 5 Loss: 5.2114 +[2025-07-09 07:18:35] [Rank 0] Group 6 Loss: 5.1512 +[2025-07-09 07:18:35] [Rank 0] Group 6 Loss: 5.1512 +[2025-07-09 07:18:35] [Rank 0] Group 7 Loss: 5.3621 +[2025-07-09 07:18:35] [Rank 0] Group 7 Loss: 5.3621 +[2025-07-09 07:18:35] [Rank 0] Group 8 Loss: 5.2943 +[2025-07-09 07:18:35] [Rank 0] Group 8 Loss: 5.2943 +[2025-07-09 07:18:35] [Rank 0] Group 9 Loss: 5.2507 +[2025-07-09 07:18:35] [Rank 0] Group 9 Loss: 5.2507 +[2025-07-09 07:18:35] [Rank 0] Group 10 Loss: 5.2888 +[2025-07-09 07:18:35] [Rank 0] Group 10 Loss: 5.2888 +[2025-07-09 07:18:35] [Rank 0] Group 11 Loss: 5.3204 +[2025-07-09 07:18:35] [Rank 0] Group 11 Loss: 5.3204 +[2025-07-09 07:18:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 07:18:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 07:18:35] [Rank 0] Group 1 FTA: 0.6719 +[2025-07-09 07:18:35] [Rank 0] Group 1 FTA: 0.6719 +[2025-07-09 07:18:35] [Rank 0] Group 2 FTA: 0.8958 +[2025-07-09 07:18:35] [Rank 0] Group 2 FTA: 0.8958 +[2025-07-09 07:18:35] [Rank 0] Group 3 FTA: 0.9557 +[2025-07-09 07:18:35] [Rank 0] Group 3 FTA: 0.9557 +[2025-07-09 07:18:35] [Rank 0] Group 4 FTA: 0.9375 +[2025-07-09 07:18:35] [Rank 0] Group 4 FTA: 0.9375 +[2025-07-09 07:18:35] [Rank 0] Group 5 FTA: 0.9271 +[2025-07-09 07:18:35] [Rank 0] Group 5 FTA: 0.9271 +[2025-07-09 07:18:35] [Rank 0] Group 6 FTA: 0.9297 +[2025-07-09 07:18:35] [Rank 0] Group 6 FTA: 0.9297 +[2025-07-09 07:18:35] [Rank 0] Group 7 FTA: 0.9115 +[2025-07-09 07:18:35] [Rank 0] Group 7 FTA: 0.9115 +[2025-07-09 07:18:35] [Rank 0] Group 8 FTA: 0.9141 +[2025-07-09 07:18:35] [Rank 0] Group 8 FTA: 0.9141 +[2025-07-09 07:18:35] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-09 07:18:35] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-09 07:18:35] [Rank 0] Group 10 FTA: 0.9258 +[2025-07-09 07:18:35] [Rank 0] Group 10 FTA: 0.9258 +[2025-07-09 07:18:35] [Rank 0] Group 11 FTA: 0.9199 +[2025-07-09 07:18:35] [Rank 0] Group 11 FTA: 0.9199 +[2025-07-09 07:18:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 07:18:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 07:18:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 07:18:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 07:18:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 07:18:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 07:18:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 07:18:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 07:18:37] [Rank 0] step:4001/10000 train_time:320795ms step_avg:80.18ms +[2025-07-09 07:18:37] [Rank 0] step:4001/10000 train_time:320795ms step_avg:80.18ms +[2025-07-09 07:18:38] [Rank 0] step:4021/10000 train_time:322299ms step_avg:80.15ms +[2025-07-09 07:18:38] [Rank 0] step:4021/10000 train_time:322299ms step_avg:80.15ms +[2025-07-09 07:18:40] [Rank 0] step:4041/10000 train_time:323787ms step_avg:80.13ms +[2025-07-09 07:18:40] [Rank 0] step:4041/10000 train_time:323787ms step_avg:80.13ms +[2025-07-09 07:18:42] [Rank 0] step:4061/10000 train_time:325925ms step_avg:80.26ms +[2025-07-09 07:18:42] [Rank 0] step:4061/10000 train_time:325925ms step_avg:80.26ms +[2025-07-09 07:18:43] [Rank 0] step:4081/10000 train_time:327413ms step_avg:80.23ms +[2025-07-09 07:18:43] [Rank 0] step:4081/10000 train_time:327413ms step_avg:80.23ms +[2025-07-09 07:18:45] [Rank 0] step:4101/10000 train_time:328901ms step_avg:80.20ms +[2025-07-09 07:18:45] [Rank 0] step:4101/10000 train_time:328901ms step_avg:80.20ms +[2025-07-09 07:18:46] [Rank 0] step:4121/10000 train_time:330390ms step_avg:80.17ms +[2025-07-09 07:18:46] [Rank 0] step:4121/10000 train_time:330390ms step_avg:80.17ms +[2025-07-09 07:18:48] [Rank 0] step:4141/10000 train_time:331931ms step_avg:80.16ms +[2025-07-09 07:18:48] [Rank 0] step:4141/10000 train_time:331931ms step_avg:80.16ms +[2025-07-09 07:18:50] [Rank 0] step:4161/10000 train_time:334035ms step_avg:80.28ms +[2025-07-09 07:18:50] [Rank 0] step:4161/10000 train_time:334035ms step_avg:80.28ms +[2025-07-09 07:18:51] [Rank 0] step:4181/10000 train_time:335524ms step_avg:80.25ms +[2025-07-09 07:18:51] [Rank 0] step:4181/10000 train_time:335524ms step_avg:80.25ms +[2025-07-09 07:18:53] [Rank 0] step:4201/10000 train_time:337016ms step_avg:80.22ms +[2025-07-09 07:18:53] [Rank 0] step:4201/10000 train_time:337016ms step_avg:80.22ms +[2025-07-09 07:18:54] [Rank 0] step:4221/10000 train_time:338507ms step_avg:80.20ms +[2025-07-09 07:18:54] [Rank 0] step:4221/10000 train_time:338507ms step_avg:80.20ms +[2025-07-09 07:18:56] [Rank 0] step:4241/10000 train_time:340236ms step_avg:80.23ms +[2025-07-09 07:18:56] [Rank 0] step:4241/10000 train_time:340236ms step_avg:80.23ms +[2025-07-09 07:18:58] [Rank 0] step:4261/10000 train_time:341729ms step_avg:80.20ms +[2025-07-09 07:18:58] [Rank 0] step:4261/10000 train_time:341729ms step_avg:80.20ms +[2025-07-09 07:18:59] [Rank 0] step:4281/10000 train_time:343219ms step_avg:80.17ms +[2025-07-09 07:18:59] [Rank 0] step:4281/10000 train_time:343219ms step_avg:80.17ms +[2025-07-09 07:19:01] [Rank 0] step:4301/10000 train_time:344713ms step_avg:80.15ms +[2025-07-09 07:19:01] [Rank 0] step:4301/10000 train_time:344713ms step_avg:80.15ms +[2025-07-09 07:19:03] [Rank 0] step:4321/10000 train_time:346206ms step_avg:80.12ms +[2025-07-09 07:19:03] [Rank 0] step:4321/10000 train_time:346206ms step_avg:80.12ms +[2025-07-09 07:19:04] [Rank 0] step:4341/10000 train_time:348493ms step_avg:80.28ms +[2025-07-09 07:19:04] [Rank 0] step:4341/10000 train_time:348493ms step_avg:80.28ms +[2025-07-09 07:19:06] [Rank 0] step:4361/10000 train_time:350045ms step_avg:80.27ms +[2025-07-09 07:19:06] [Rank 0] step:4361/10000 train_time:350045ms step_avg:80.27ms +[2025-07-09 07:19:08] [Rank 0] step:4381/10000 train_time:351655ms step_avg:80.27ms +[2025-07-09 07:19:08] [Rank 0] step:4381/10000 train_time:351655ms step_avg:80.27ms +[2025-07-09 07:19:09] [Rank 0] step:4401/10000 train_time:353150ms step_avg:80.24ms +[2025-07-09 07:19:09] [Rank 0] step:4401/10000 train_time:353150ms step_avg:80.24ms +[2025-07-09 07:19:11] [Rank 0] step:4421/10000 train_time:355305ms step_avg:80.37ms +[2025-07-09 07:19:11] [Rank 0] step:4421/10000 train_time:355305ms step_avg:80.37ms +[2025-07-09 07:19:13] [Rank 0] step:4441/10000 train_time:356798ms step_avg:80.34ms +[2025-07-09 07:19:13] [Rank 0] step:4441/10000 train_time:356798ms step_avg:80.34ms +[2025-07-09 07:19:14] [Rank 0] step:4461/10000 train_time:358295ms step_avg:80.32ms +[2025-07-09 07:19:14] [Rank 0] step:4461/10000 train_time:358295ms step_avg:80.32ms +[2025-07-09 07:19:16] [Rank 0] step:4481/10000 train_time:359788ms step_avg:80.29ms +[2025-07-09 07:19:16] [Rank 0] step:4481/10000 train_time:359788ms step_avg:80.29ms +[2025-07-09 07:19:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 07:19:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 07:19:18] [Rank 0] PRINT: step:4500/10000 train_loss:0.8746 val_loss:0.8718 train_time:361283ms step_avg:80.29ms +[2025-07-09 07:19:18] [Rank 0] PRINT: step:4500/10000 train_loss:0.8746 val_loss:0.8718 train_time:361283ms step_avg:80.29ms +[2025-07-09 07:19:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 07:19:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 07:19:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 07:19:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 07:19:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 07:19:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 07:24:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 07:24:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 07:24:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 07:24:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 07:24:35] [Rank 0] Total Loss: 5.3688 +[2025-07-09 07:24:35] [Rank 0] Total Loss: 5.3688 +[2025-07-09 07:24:35] [Rank 0] Total FTA: 0.8814 +[2025-07-09 07:24:35] [Rank 0] Total FTA: 0.8814 +[2025-07-09 07:24:35] [Rank 0] Group 0 Loss: 5.6487 +[2025-07-09 07:24:35] [Rank 0] Group 0 Loss: 5.6487 +[2025-07-09 07:24:35] [Rank 0] Group 1 Loss: 5.3717 +[2025-07-09 07:24:35] [Rank 0] Group 1 Loss: 5.3717 +[2025-07-09 07:24:35] [Rank 0] Group 2 Loss: 5.0139 +[2025-07-09 07:24:35] [Rank 0] Group 2 Loss: 5.0139 +[2025-07-09 07:24:35] [Rank 0] Group 3 Loss: 5.4471 +[2025-07-09 07:24:35] [Rank 0] Group 3 Loss: 5.4471 +[2025-07-09 07:24:35] [Rank 0] Group 4 Loss: 5.3492 +[2025-07-09 07:24:35] [Rank 0] Group 4 Loss: 5.3492 +[2025-07-09 07:24:35] [Rank 0] Group 5 Loss: 5.2834 +[2025-07-09 07:24:35] [Rank 0] Group 5 Loss: 5.2834 +[2025-07-09 07:24:35] [Rank 0] Group 6 Loss: 5.2120 +[2025-07-09 07:24:35] [Rank 0] Group 6 Loss: 5.2120 +[2025-07-09 07:24:35] [Rank 0] Group 7 Loss: 5.4284 +[2025-07-09 07:24:35] [Rank 0] Group 7 Loss: 5.4284 +[2025-07-09 07:24:35] [Rank 0] Group 8 Loss: 5.3457 +[2025-07-09 07:24:35] [Rank 0] Group 8 Loss: 5.3457 +[2025-07-09 07:24:35] [Rank 0] Group 9 Loss: 5.3399 +[2025-07-09 07:24:35] [Rank 0] Group 9 Loss: 5.3399 +[2025-07-09 07:24:35] [Rank 0] Group 10 Loss: 5.3501 +[2025-07-09 07:24:35] [Rank 0] Group 10 Loss: 5.3501 +[2025-07-09 07:24:35] [Rank 0] Group 11 Loss: 5.3625 +[2025-07-09 07:24:35] [Rank 0] Group 11 Loss: 5.3625 +[2025-07-09 07:24:35] [Rank 0] Group 0 FTA: 0.8336 +[2025-07-09 07:24:35] [Rank 0] Group 0 FTA: 0.8336 +[2025-07-09 07:24:35] [Rank 0] Group 1 FTA: 0.8333 +[2025-07-09 07:24:35] [Rank 0] Group 1 FTA: 0.8333 +[2025-07-09 07:24:35] [Rank 0] Group 2 FTA: 0.9062 +[2025-07-09 07:24:35] [Rank 0] Group 2 FTA: 0.9062 +[2025-07-09 07:24:35] [Rank 0] Group 3 FTA: 0.9635 +[2025-07-09 07:24:35] [Rank 0] Group 3 FTA: 0.9635 +[2025-07-09 07:24:35] [Rank 0] Group 4 FTA: 0.8177 +[2025-07-09 07:24:35] [Rank 0] Group 4 FTA: 0.8177 +[2025-07-09 07:24:35] [Rank 0] Group 5 FTA: 0.9193 +[2025-07-09 07:24:35] [Rank 0] Group 5 FTA: 0.9193 +[2025-07-09 07:24:35] [Rank 0] Group 6 FTA: 0.8646 +[2025-07-09 07:24:35] [Rank 0] Group 6 FTA: 0.8646 +[2025-07-09 07:24:35] [Rank 0] Group 7 FTA: 0.9167 +[2025-07-09 07:24:35] [Rank 0] Group 7 FTA: 0.9167 +[2025-07-09 07:24:35] [Rank 0] Group 8 FTA: 0.8958 +[2025-07-09 07:24:35] [Rank 0] Group 8 FTA: 0.8958 +[2025-07-09 07:24:35] [Rank 0] Group 9 FTA: 0.9141 +[2025-07-09 07:24:35] [Rank 0] Group 9 FTA: 0.9141 +[2025-07-09 07:24:35] [Rank 0] Group 10 FTA: 0.8867 +[2025-07-09 07:24:35] [Rank 0] Group 10 FTA: 0.8867 +[2025-07-09 07:24:35] [Rank 0] Group 11 FTA: 0.8818 +[2025-07-09 07:24:35] [Rank 0] Group 11 FTA: 0.8818 +[2025-07-09 07:24:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 07:24:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 07:24:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 07:24:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 07:24:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 07:24:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 07:24:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 07:24:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 07:24:37] [Rank 0] step:4501/10000 train_time:361311ms step_avg:80.27ms +[2025-07-09 07:24:37] [Rank 0] step:4501/10000 train_time:361311ms step_avg:80.27ms +[2025-07-09 07:24:38] [Rank 0] step:4521/10000 train_time:363496ms step_avg:80.40ms +[2025-07-09 07:24:38] [Rank 0] step:4521/10000 train_time:363496ms step_avg:80.40ms +[2025-07-09 07:24:40] [Rank 0] step:4541/10000 train_time:364981ms step_avg:80.37ms +[2025-07-09 07:24:40] [Rank 0] step:4541/10000 train_time:364981ms step_avg:80.37ms +[2025-07-09 07:24:41] [Rank 0] step:4561/10000 train_time:366467ms step_avg:80.35ms +[2025-07-09 07:24:41] [Rank 0] step:4561/10000 train_time:366467ms step_avg:80.35ms +[2025-07-09 07:24:43] [Rank 0] step:4581/10000 train_time:367955ms step_avg:80.32ms +[2025-07-09 07:24:43] [Rank 0] step:4581/10000 train_time:367955ms step_avg:80.32ms +[2025-07-09 07:24:45] [Rank 0] step:4601/10000 train_time:370109ms step_avg:80.44ms +[2025-07-09 07:24:45] [Rank 0] step:4601/10000 train_time:370109ms step_avg:80.44ms +[2025-07-09 07:24:47] [Rank 0] step:4621/10000 train_time:371597ms step_avg:80.41ms +[2025-07-09 07:24:47] [Rank 0] step:4621/10000 train_time:371597ms step_avg:80.41ms +[2025-07-09 07:24:48] [Rank 0] step:4641/10000 train_time:373085ms step_avg:80.39ms +[2025-07-09 07:24:48] [Rank 0] step:4641/10000 train_time:373085ms step_avg:80.39ms +[2025-07-09 07:24:50] [Rank 0] step:4661/10000 train_time:374574ms step_avg:80.36ms +[2025-07-09 07:24:50] [Rank 0] step:4661/10000 train_time:374574ms step_avg:80.36ms +[2025-07-09 07:24:52] [Rank 0] step:4681/10000 train_time:376064ms step_avg:80.34ms +[2025-07-09 07:24:52] [Rank 0] step:4681/10000 train_time:376064ms step_avg:80.34ms +[2025-07-09 07:24:53] [Rank 0] step:4701/10000 train_time:378219ms step_avg:80.45ms +[2025-07-09 07:24:53] [Rank 0] step:4701/10000 train_time:378219ms step_avg:80.45ms +[2025-07-09 07:24:55] [Rank 0] step:4721/10000 train_time:379710ms step_avg:80.43ms +[2025-07-09 07:24:55] [Rank 0] step:4721/10000 train_time:379710ms step_avg:80.43ms +[2025-07-09 07:24:56] [Rank 0] step:4741/10000 train_time:381202ms step_avg:80.41ms +[2025-07-09 07:24:56] [Rank 0] step:4741/10000 train_time:381202ms step_avg:80.41ms +[2025-07-09 07:24:58] [Rank 0] step:4761/10000 train_time:382694ms step_avg:80.38ms +[2025-07-09 07:24:58] [Rank 0] step:4761/10000 train_time:382694ms step_avg:80.38ms +[2025-07-09 07:24:59] [Rank 0] step:4781/10000 train_time:384420ms step_avg:80.41ms +[2025-07-09 07:24:59] [Rank 0] step:4781/10000 train_time:384420ms step_avg:80.41ms +[2025-07-09 07:25:01] [Rank 0] step:4801/10000 train_time:385913ms step_avg:80.38ms +[2025-07-09 07:25:01] [Rank 0] step:4801/10000 train_time:385913ms step_avg:80.38ms +[2025-07-09 07:25:02] [Rank 0] step:4821/10000 train_time:387406ms step_avg:80.36ms +[2025-07-09 07:25:02] [Rank 0] step:4821/10000 train_time:387406ms step_avg:80.36ms +[2025-07-09 07:25:04] [Rank 0] step:4841/10000 train_time:388901ms step_avg:80.33ms +[2025-07-09 07:25:04] [Rank 0] step:4841/10000 train_time:388901ms step_avg:80.33ms +[2025-07-09 07:25:06] [Rank 0] step:4861/10000 train_time:390395ms step_avg:80.31ms +[2025-07-09 07:25:06] [Rank 0] step:4861/10000 train_time:390395ms step_avg:80.31ms +[2025-07-09 07:25:08] [Rank 0] step:4881/10000 train_time:392527ms step_avg:80.42ms +[2025-07-09 07:25:08] [Rank 0] step:4881/10000 train_time:392527ms step_avg:80.42ms +[2025-07-09 07:25:09] [Rank 0] step:4901/10000 train_time:394021ms step_avg:80.40ms +[2025-07-09 07:25:09] [Rank 0] step:4901/10000 train_time:394021ms step_avg:80.40ms +[2025-07-09 07:25:11] [Rank 0] step:4921/10000 train_time:395515ms step_avg:80.37ms +[2025-07-09 07:25:11] [Rank 0] step:4921/10000 train_time:395515ms step_avg:80.37ms +[2025-07-09 07:25:12] [Rank 0] step:4941/10000 train_time:397008ms step_avg:80.35ms +[2025-07-09 07:25:12] [Rank 0] step:4941/10000 train_time:397008ms step_avg:80.35ms +[2025-07-09 07:25:14] [Rank 0] step:4961/10000 train_time:398738ms step_avg:80.37ms +[2025-07-09 07:25:14] [Rank 0] step:4961/10000 train_time:398738ms step_avg:80.37ms +[2025-07-09 07:25:15] [Rank 0] step:4981/10000 train_time:400231ms step_avg:80.35ms +[2025-07-09 07:25:15] [Rank 0] step:4981/10000 train_time:400231ms step_avg:80.35ms +[2025-07-09 07:25:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 07:25:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 07:25:18] [Rank 0] PRINT: step:5000/10000 train_loss:0.8712 val_loss:0.8686 train_time:401726ms step_avg:80.35ms +[2025-07-09 07:25:18] [Rank 0] PRINT: step:5000/10000 train_loss:0.8712 val_loss:0.8686 train_time:401726ms step_avg:80.35ms +[2025-07-09 07:25:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 07:25:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 07:25:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 07:25:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 07:25:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 07:25:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 07:30:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 07:30:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 07:30:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 07:30:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 07:30:35] [Rank 0] Total Loss: 5.5054 +[2025-07-09 07:30:35] [Rank 0] Total Loss: 5.5054 +[2025-07-09 07:30:35] [Rank 0] Total FTA: 0.9221 +[2025-07-09 07:30:35] [Rank 0] Total FTA: 0.9221 +[2025-07-09 07:30:35] [Rank 0] Group 0 Loss: 6.0258 +[2025-07-09 07:30:35] [Rank 0] Group 0 Loss: 6.0258 +[2025-07-09 07:30:35] [Rank 0] Group 1 Loss: 5.5557 +[2025-07-09 07:30:35] [Rank 0] Group 1 Loss: 5.5557 +[2025-07-09 07:30:35] [Rank 0] Group 2 Loss: 5.1863 +[2025-07-09 07:30:35] [Rank 0] Group 2 Loss: 5.1863 +[2025-07-09 07:30:35] [Rank 0] Group 3 Loss: 5.5413 +[2025-07-09 07:30:35] [Rank 0] Group 3 Loss: 5.5413 +[2025-07-09 07:30:35] [Rank 0] Group 4 Loss: 5.4195 +[2025-07-09 07:30:35] [Rank 0] Group 4 Loss: 5.4195 +[2025-07-09 07:30:35] [Rank 0] Group 5 Loss: 5.3832 +[2025-07-09 07:30:35] [Rank 0] Group 5 Loss: 5.3832 +[2025-07-09 07:30:35] [Rank 0] Group 6 Loss: 5.2569 +[2025-07-09 07:30:35] [Rank 0] Group 6 Loss: 5.2569 +[2025-07-09 07:30:35] [Rank 0] Group 7 Loss: 5.4554 +[2025-07-09 07:30:35] [Rank 0] Group 7 Loss: 5.4554 +[2025-07-09 07:30:35] [Rank 0] Group 8 Loss: 5.4974 +[2025-07-09 07:30:35] [Rank 0] Group 8 Loss: 5.4974 +[2025-07-09 07:30:35] [Rank 0] Group 9 Loss: 5.4408 +[2025-07-09 07:30:35] [Rank 0] Group 9 Loss: 5.4408 +[2025-07-09 07:30:35] [Rank 0] Group 10 Loss: 5.4255 +[2025-07-09 07:30:35] [Rank 0] Group 10 Loss: 5.4255 +[2025-07-09 07:30:35] [Rank 0] Group 11 Loss: 5.4511 +[2025-07-09 07:30:35] [Rank 0] Group 11 Loss: 5.4511 +[2025-07-09 07:30:35] [Rank 0] Group 0 FTA: 0.8544 +[2025-07-09 07:30:35] [Rank 0] Group 0 FTA: 0.8544 +[2025-07-09 07:30:35] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 07:30:35] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 07:30:35] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 07:30:35] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 07:30:35] [Rank 0] Group 3 FTA: 0.8646 +[2025-07-09 07:30:35] [Rank 0] Group 3 FTA: 0.8646 +[2025-07-09 07:30:35] [Rank 0] Group 4 FTA: 0.9557 +[2025-07-09 07:30:35] [Rank 0] Group 4 FTA: 0.9557 +[2025-07-09 07:30:35] [Rank 0] Group 5 FTA: 0.9245 +[2025-07-09 07:30:35] [Rank 0] Group 5 FTA: 0.9245 +[2025-07-09 07:30:35] [Rank 0] Group 6 FTA: 0.8672 +[2025-07-09 07:30:35] [Rank 0] Group 6 FTA: 0.8672 +[2025-07-09 07:30:35] [Rank 0] Group 7 FTA: 0.9115 +[2025-07-09 07:30:35] [Rank 0] Group 7 FTA: 0.9115 +[2025-07-09 07:30:35] [Rank 0] Group 8 FTA: 0.8984 +[2025-07-09 07:30:35] [Rank 0] Group 8 FTA: 0.8984 +[2025-07-09 07:30:35] [Rank 0] Group 9 FTA: 0.9570 +[2025-07-09 07:30:35] [Rank 0] Group 9 FTA: 0.9570 +[2025-07-09 07:30:35] [Rank 0] Group 10 FTA: 0.9512 +[2025-07-09 07:30:35] [Rank 0] Group 10 FTA: 0.9512 +[2025-07-09 07:30:35] [Rank 0] Group 11 FTA: 0.9326 +[2025-07-09 07:30:35] [Rank 0] Group 11 FTA: 0.9326 +[2025-07-09 07:30:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 07:30:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 07:30:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 07:30:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 07:30:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 07:30:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 07:30:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 07:30:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 07:30:36] [Rank 0] step:5001/10000 train_time:401749ms step_avg:80.33ms +[2025-07-09 07:30:36] [Rank 0] step:5001/10000 train_time:401749ms step_avg:80.33ms +[2025-07-09 07:30:38] [Rank 0] step:5021/10000 train_time:403237ms step_avg:80.31ms +[2025-07-09 07:30:38] [Rank 0] step:5021/10000 train_time:403237ms step_avg:80.31ms +[2025-07-09 07:30:40] [Rank 0] step:5041/10000 train_time:405394ms step_avg:80.42ms +[2025-07-09 07:30:40] [Rank 0] step:5041/10000 train_time:405394ms step_avg:80.42ms +[2025-07-09 07:30:41] [Rank 0] step:5061/10000 train_time:407034ms step_avg:80.43ms +[2025-07-09 07:30:41] [Rank 0] step:5061/10000 train_time:407034ms step_avg:80.43ms +[2025-07-09 07:30:43] [Rank 0] step:5081/10000 train_time:408678ms step_avg:80.43ms +[2025-07-09 07:30:43] [Rank 0] step:5081/10000 train_time:408678ms step_avg:80.43ms +[2025-07-09 07:30:45] [Rank 0] step:5101/10000 train_time:410165ms step_avg:80.41ms +[2025-07-09 07:30:45] [Rank 0] step:5101/10000 train_time:410165ms step_avg:80.41ms +[2025-07-09 07:30:46] [Rank 0] step:5121/10000 train_time:411655ms step_avg:80.39ms +[2025-07-09 07:30:46] [Rank 0] step:5121/10000 train_time:411655ms step_avg:80.39ms +[2025-07-09 07:30:48] [Rank 0] step:5141/10000 train_time:413379ms step_avg:80.41ms +[2025-07-09 07:30:48] [Rank 0] step:5141/10000 train_time:413379ms step_avg:80.41ms +[2025-07-09 07:30:49] [Rank 0] step:5161/10000 train_time:414870ms step_avg:80.39ms +[2025-07-09 07:30:49] [Rank 0] step:5161/10000 train_time:414870ms step_avg:80.39ms +[2025-07-09 07:30:51] [Rank 0] step:5181/10000 train_time:416361ms step_avg:80.36ms +[2025-07-09 07:30:51] [Rank 0] step:5181/10000 train_time:416361ms step_avg:80.36ms +[2025-07-09 07:30:52] [Rank 0] step:5201/10000 train_time:417852ms step_avg:80.34ms +[2025-07-09 07:30:52] [Rank 0] step:5201/10000 train_time:417852ms step_avg:80.34ms +[2025-07-09 07:30:54] [Rank 0] step:5221/10000 train_time:419342ms step_avg:80.32ms +[2025-07-09 07:30:54] [Rank 0] step:5221/10000 train_time:419342ms step_avg:80.32ms +[2025-07-09 07:30:56] [Rank 0] step:5241/10000 train_time:421488ms step_avg:80.42ms +[2025-07-09 07:30:56] [Rank 0] step:5241/10000 train_time:421488ms step_avg:80.42ms +[2025-07-09 07:30:57] [Rank 0] step:5261/10000 train_time:422981ms step_avg:80.40ms +[2025-07-09 07:30:57] [Rank 0] step:5261/10000 train_time:422981ms step_avg:80.40ms +[2025-07-09 07:30:59] [Rank 0] step:5281/10000 train_time:424473ms step_avg:80.38ms +[2025-07-09 07:30:59] [Rank 0] step:5281/10000 train_time:424473ms step_avg:80.38ms +[2025-07-09 07:31:00] [Rank 0] step:5301/10000 train_time:425966ms step_avg:80.36ms +[2025-07-09 07:31:00] [Rank 0] step:5301/10000 train_time:425966ms step_avg:80.36ms +[2025-07-09 07:31:03] [Rank 0] step:5321/10000 train_time:428101ms step_avg:80.46ms +[2025-07-09 07:31:03] [Rank 0] step:5321/10000 train_time:428101ms step_avg:80.46ms +[2025-07-09 07:31:04] [Rank 0] step:5341/10000 train_time:429594ms step_avg:80.43ms +[2025-07-09 07:31:04] [Rank 0] step:5341/10000 train_time:429594ms step_avg:80.43ms +[2025-07-09 07:31:06] [Rank 0] step:5361/10000 train_time:431087ms step_avg:80.41ms +[2025-07-09 07:31:06] [Rank 0] step:5361/10000 train_time:431087ms step_avg:80.41ms +[2025-07-09 07:31:07] [Rank 0] step:5381/10000 train_time:432582ms step_avg:80.39ms +[2025-07-09 07:31:07] [Rank 0] step:5381/10000 train_time:432582ms step_avg:80.39ms +[2025-07-09 07:31:09] [Rank 0] step:5401/10000 train_time:434075ms step_avg:80.37ms +[2025-07-09 07:31:09] [Rank 0] step:5401/10000 train_time:434075ms step_avg:80.37ms +[2025-07-09 07:31:11] [Rank 0] step:5421/10000 train_time:436211ms step_avg:80.47ms +[2025-07-09 07:31:11] [Rank 0] step:5421/10000 train_time:436211ms step_avg:80.47ms +[2025-07-09 07:31:12] [Rank 0] step:5441/10000 train_time:437706ms step_avg:80.45ms +[2025-07-09 07:31:12] [Rank 0] step:5441/10000 train_time:437706ms step_avg:80.45ms +[2025-07-09 07:31:14] [Rank 0] step:5461/10000 train_time:439201ms step_avg:80.43ms +[2025-07-09 07:31:14] [Rank 0] step:5461/10000 train_time:439201ms step_avg:80.43ms +[2025-07-09 07:31:15] [Rank 0] step:5481/10000 train_time:440697ms step_avg:80.40ms +[2025-07-09 07:31:15] [Rank 0] step:5481/10000 train_time:440697ms step_avg:80.40ms +[2025-07-09 07:31:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 07:31:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 07:31:18] [Rank 0] PRINT: step:5500/10000 train_loss:0.8681 val_loss:0.8691 train_time:442856ms step_avg:80.52ms +[2025-07-09 07:31:18] [Rank 0] PRINT: step:5500/10000 train_loss:0.8681 val_loss:0.8691 train_time:442856ms step_avg:80.52ms +[2025-07-09 07:31:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 07:31:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 07:31:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 07:31:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 07:31:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 07:31:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 07:36:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 07:36:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 07:36:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 07:36:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 07:36:36] [Rank 0] Total Loss: 5.5187 +[2025-07-09 07:36:36] [Rank 0] Total Loss: 5.5187 +[2025-07-09 07:36:36] [Rank 0] Total FTA: 0.9388 +[2025-07-09 07:36:36] [Rank 0] Total FTA: 0.9388 +[2025-07-09 07:36:36] [Rank 0] Group 0 Loss: 6.0173 +[2025-07-09 07:36:36] [Rank 0] Group 0 Loss: 6.0173 +[2025-07-09 07:36:36] [Rank 0] Group 1 Loss: 5.6095 +[2025-07-09 07:36:36] [Rank 0] Group 1 Loss: 5.6095 +[2025-07-09 07:36:36] [Rank 0] Group 2 Loss: 5.2420 +[2025-07-09 07:36:36] [Rank 0] Group 2 Loss: 5.2420 +[2025-07-09 07:36:36] [Rank 0] Group 3 Loss: 5.4916 +[2025-07-09 07:36:36] [Rank 0] Group 3 Loss: 5.4916 +[2025-07-09 07:36:36] [Rank 0] Group 4 Loss: 5.4150 +[2025-07-09 07:36:36] [Rank 0] Group 4 Loss: 5.4150 +[2025-07-09 07:36:36] [Rank 0] Group 5 Loss: 5.5256 +[2025-07-09 07:36:36] [Rank 0] Group 5 Loss: 5.5256 +[2025-07-09 07:36:36] [Rank 0] Group 6 Loss: 5.3473 +[2025-07-09 07:36:36] [Rank 0] Group 6 Loss: 5.3473 +[2025-07-09 07:36:36] [Rank 0] Group 7 Loss: 5.4410 +[2025-07-09 07:36:36] [Rank 0] Group 7 Loss: 5.4410 +[2025-07-09 07:36:36] [Rank 0] Group 8 Loss: 5.4565 +[2025-07-09 07:36:36] [Rank 0] Group 8 Loss: 5.4565 +[2025-07-09 07:36:36] [Rank 0] Group 9 Loss: 5.4183 +[2025-07-09 07:36:36] [Rank 0] Group 9 Loss: 5.4183 +[2025-07-09 07:36:36] [Rank 0] Group 10 Loss: 5.4062 +[2025-07-09 07:36:36] [Rank 0] Group 10 Loss: 5.4062 +[2025-07-09 07:36:36] [Rank 0] Group 11 Loss: 5.4585 +[2025-07-09 07:36:36] [Rank 0] Group 11 Loss: 5.4585 +[2025-07-09 07:36:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 07:36:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 07:36:36] [Rank 0] Group 1 FTA: 0.8464 +[2025-07-09 07:36:36] [Rank 0] Group 1 FTA: 0.8464 +[2025-07-09 07:36:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 07:36:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 07:36:36] [Rank 0] Group 3 FTA: 0.9557 +[2025-07-09 07:36:36] [Rank 0] Group 3 FTA: 0.9557 +[2025-07-09 07:36:36] [Rank 0] Group 4 FTA: 0.9323 +[2025-07-09 07:36:36] [Rank 0] Group 4 FTA: 0.9323 +[2025-07-09 07:36:36] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-09 07:36:36] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-09 07:36:36] [Rank 0] Group 6 FTA: 0.9167 +[2025-07-09 07:36:36] [Rank 0] Group 6 FTA: 0.9167 +[2025-07-09 07:36:36] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-09 07:36:36] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-09 07:36:36] [Rank 0] Group 8 FTA: 0.9115 +[2025-07-09 07:36:36] [Rank 0] Group 8 FTA: 0.9115 +[2025-07-09 07:36:36] [Rank 0] Group 9 FTA: 0.9219 +[2025-07-09 07:36:36] [Rank 0] Group 9 FTA: 0.9219 +[2025-07-09 07:36:36] [Rank 0] Group 10 FTA: 0.9336 +[2025-07-09 07:36:36] [Rank 0] Group 10 FTA: 0.9336 +[2025-07-09 07:36:36] [Rank 0] Group 11 FTA: 0.9180 +[2025-07-09 07:36:36] [Rank 0] Group 11 FTA: 0.9180 +[2025-07-09 07:36:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 07:36:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 07:36:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 07:36:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 07:36:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 07:36:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 07:36:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 07:36:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 07:36:37] [Rank 0] step:5501/10000 train_time:442877ms step_avg:80.51ms +[2025-07-09 07:36:37] [Rank 0] step:5501/10000 train_time:442877ms step_avg:80.51ms +[2025-07-09 07:36:39] [Rank 0] step:5521/10000 train_time:444363ms step_avg:80.49ms +[2025-07-09 07:36:39] [Rank 0] step:5521/10000 train_time:444363ms step_avg:80.49ms +[2025-07-09 07:36:40] [Rank 0] step:5541/10000 train_time:445848ms step_avg:80.46ms +[2025-07-09 07:36:40] [Rank 0] step:5541/10000 train_time:445848ms step_avg:80.46ms +[2025-07-09 07:36:42] [Rank 0] step:5561/10000 train_time:447335ms step_avg:80.44ms +[2025-07-09 07:36:42] [Rank 0] step:5561/10000 train_time:447335ms step_avg:80.44ms +[2025-07-09 07:36:44] [Rank 0] step:5581/10000 train_time:448823ms step_avg:80.42ms +[2025-07-09 07:36:44] [Rank 0] step:5581/10000 train_time:448823ms step_avg:80.42ms +[2025-07-09 07:36:45] [Rank 0] step:5601/10000 train_time:450950ms step_avg:80.51ms +[2025-07-09 07:36:45] [Rank 0] step:5601/10000 train_time:450950ms step_avg:80.51ms +[2025-07-09 07:36:47] [Rank 0] step:5621/10000 train_time:452437ms step_avg:80.49ms +[2025-07-09 07:36:47] [Rank 0] step:5621/10000 train_time:452437ms step_avg:80.49ms +[2025-07-09 07:36:48] [Rank 0] step:5641/10000 train_time:453926ms step_avg:80.47ms +[2025-07-09 07:36:48] [Rank 0] step:5641/10000 train_time:453926ms step_avg:80.47ms +[2025-07-09 07:36:50] [Rank 0] step:5661/10000 train_time:455415ms step_avg:80.45ms +[2025-07-09 07:36:50] [Rank 0] step:5661/10000 train_time:455415ms step_avg:80.45ms +[2025-07-09 07:36:51] [Rank 0] step:5681/10000 train_time:457141ms step_avg:80.47ms +[2025-07-09 07:36:51] [Rank 0] step:5681/10000 train_time:457141ms step_avg:80.47ms +[2025-07-09 07:36:53] [Rank 0] step:5701/10000 train_time:458631ms step_avg:80.45ms +[2025-07-09 07:36:53] [Rank 0] step:5701/10000 train_time:458631ms step_avg:80.45ms +[2025-07-09 07:36:54] [Rank 0] step:5721/10000 train_time:460122ms step_avg:80.43ms +[2025-07-09 07:36:54] [Rank 0] step:5721/10000 train_time:460122ms step_avg:80.43ms +[2025-07-09 07:36:56] [Rank 0] step:5741/10000 train_time:461613ms step_avg:80.41ms +[2025-07-09 07:36:56] [Rank 0] step:5741/10000 train_time:461613ms step_avg:80.41ms +[2025-07-09 07:36:58] [Rank 0] step:5761/10000 train_time:463158ms step_avg:80.40ms +[2025-07-09 07:36:58] [Rank 0] step:5761/10000 train_time:463158ms step_avg:80.40ms +[2025-07-09 07:36:59] [Rank 0] step:5781/10000 train_time:465001ms step_avg:80.44ms +[2025-07-09 07:36:59] [Rank 0] step:5781/10000 train_time:465001ms step_avg:80.44ms +[2025-07-09 07:37:01] [Rank 0] step:5801/10000 train_time:466559ms step_avg:80.43ms +[2025-07-09 07:37:01] [Rank 0] step:5801/10000 train_time:466559ms step_avg:80.43ms +[2025-07-09 07:37:03] [Rank 0] step:5821/10000 train_time:468187ms step_avg:80.43ms +[2025-07-09 07:37:03] [Rank 0] step:5821/10000 train_time:468187ms step_avg:80.43ms +[2025-07-09 07:37:04] [Rank 0] step:5841/10000 train_time:469681ms step_avg:80.41ms +[2025-07-09 07:37:04] [Rank 0] step:5841/10000 train_time:469681ms step_avg:80.41ms +[2025-07-09 07:37:06] [Rank 0] step:5861/10000 train_time:471839ms step_avg:80.50ms +[2025-07-09 07:37:06] [Rank 0] step:5861/10000 train_time:471839ms step_avg:80.50ms +[2025-07-09 07:37:08] [Rank 0] step:5881/10000 train_time:473333ms step_avg:80.49ms +[2025-07-09 07:37:08] [Rank 0] step:5881/10000 train_time:473333ms step_avg:80.49ms +[2025-07-09 07:37:09] [Rank 0] step:5901/10000 train_time:474829ms step_avg:80.47ms +[2025-07-09 07:37:09] [Rank 0] step:5901/10000 train_time:474829ms step_avg:80.47ms +[2025-07-09 07:37:11] [Rank 0] step:5921/10000 train_time:476322ms step_avg:80.45ms +[2025-07-09 07:37:11] [Rank 0] step:5921/10000 train_time:476322ms step_avg:80.45ms +[2025-07-09 07:37:13] [Rank 0] step:5941/10000 train_time:478473ms step_avg:80.54ms +[2025-07-09 07:37:13] [Rank 0] step:5941/10000 train_time:478473ms step_avg:80.54ms +[2025-07-09 07:37:14] [Rank 0] step:5961/10000 train_time:479946ms step_avg:80.51ms +[2025-07-09 07:37:14] [Rank 0] step:5961/10000 train_time:479946ms step_avg:80.51ms +[2025-07-09 07:37:16] [Rank 0] step:5981/10000 train_time:481439ms step_avg:80.49ms +[2025-07-09 07:37:16] [Rank 0] step:5981/10000 train_time:481439ms step_avg:80.49ms +[2025-07-09 07:37:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 07:37:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 07:37:18] [Rank 0] PRINT: step:6000/10000 train_loss:0.8664 val_loss:0.8660 train_time:482932ms step_avg:80.49ms +[2025-07-09 07:37:18] [Rank 0] PRINT: step:6000/10000 train_loss:0.8664 val_loss:0.8660 train_time:482932ms step_avg:80.49ms +[2025-07-09 07:37:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 07:37:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 07:37:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 07:37:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 07:37:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 07:37:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 07:42:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 07:42:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 07:42:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 07:42:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 07:42:37] [Rank 0] Total Loss: 5.5314 +[2025-07-09 07:42:37] [Rank 0] Total Loss: 5.5314 +[2025-07-09 07:42:37] [Rank 0] Total FTA: 0.9127 +[2025-07-09 07:42:37] [Rank 0] Total FTA: 0.9127 +[2025-07-09 07:42:37] [Rank 0] Group 0 Loss: 5.9718 +[2025-07-09 07:42:37] [Rank 0] Group 0 Loss: 5.9718 +[2025-07-09 07:42:37] [Rank 0] Group 1 Loss: 5.7008 +[2025-07-09 07:42:37] [Rank 0] Group 1 Loss: 5.7008 +[2025-07-09 07:42:37] [Rank 0] Group 2 Loss: 5.2557 +[2025-07-09 07:42:37] [Rank 0] Group 2 Loss: 5.2557 +[2025-07-09 07:42:37] [Rank 0] Group 3 Loss: 5.4225 +[2025-07-09 07:42:37] [Rank 0] Group 3 Loss: 5.4225 +[2025-07-09 07:42:37] [Rank 0] Group 4 Loss: 5.4258 +[2025-07-09 07:42:37] [Rank 0] Group 4 Loss: 5.4258 +[2025-07-09 07:42:37] [Rank 0] Group 5 Loss: 5.4741 +[2025-07-09 07:42:37] [Rank 0] Group 5 Loss: 5.4741 +[2025-07-09 07:42:37] [Rank 0] Group 6 Loss: 5.3868 +[2025-07-09 07:42:37] [Rank 0] Group 6 Loss: 5.3868 +[2025-07-09 07:42:37] [Rank 0] Group 7 Loss: 5.5142 +[2025-07-09 07:42:37] [Rank 0] Group 7 Loss: 5.5142 +[2025-07-09 07:42:37] [Rank 0] Group 8 Loss: 5.4974 +[2025-07-09 07:42:37] [Rank 0] Group 8 Loss: 5.4974 +[2025-07-09 07:42:37] [Rank 0] Group 9 Loss: 5.4769 +[2025-07-09 07:42:37] [Rank 0] Group 9 Loss: 5.4769 +[2025-07-09 07:42:37] [Rank 0] Group 10 Loss: 5.4559 +[2025-07-09 07:42:37] [Rank 0] Group 10 Loss: 5.4559 +[2025-07-09 07:42:37] [Rank 0] Group 11 Loss: 5.4674 +[2025-07-09 07:42:37] [Rank 0] Group 11 Loss: 5.4674 +[2025-07-09 07:42:37] [Rank 0] Group 0 FTA: 0.8349 +[2025-07-09 07:42:37] [Rank 0] Group 0 FTA: 0.8349 +[2025-07-09 07:42:37] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 07:42:37] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 07:42:37] [Rank 0] Group 2 FTA: 0.9245 +[2025-07-09 07:42:37] [Rank 0] Group 2 FTA: 0.9245 +[2025-07-09 07:42:37] [Rank 0] Group 3 FTA: 0.9661 +[2025-07-09 07:42:37] [Rank 0] Group 3 FTA: 0.9661 +[2025-07-09 07:42:37] [Rank 0] Group 4 FTA: 0.8906 +[2025-07-09 07:42:37] [Rank 0] Group 4 FTA: 0.8906 +[2025-07-09 07:42:37] [Rank 0] Group 5 FTA: 0.9531 +[2025-07-09 07:42:37] [Rank 0] Group 5 FTA: 0.9531 +[2025-07-09 07:42:37] [Rank 0] Group 6 FTA: 0.9323 +[2025-07-09 07:42:37] [Rank 0] Group 6 FTA: 0.9323 +[2025-07-09 07:42:37] [Rank 0] Group 7 FTA: 0.9271 +[2025-07-09 07:42:37] [Rank 0] Group 7 FTA: 0.9271 +[2025-07-09 07:42:37] [Rank 0] Group 8 FTA: 0.9089 +[2025-07-09 07:42:37] [Rank 0] Group 8 FTA: 0.9089 +[2025-07-09 07:42:37] [Rank 0] Group 9 FTA: 0.9180 +[2025-07-09 07:42:37] [Rank 0] Group 9 FTA: 0.9180 +[2025-07-09 07:42:37] [Rank 0] Group 10 FTA: 0.9004 +[2025-07-09 07:42:37] [Rank 0] Group 10 FTA: 0.9004 +[2025-07-09 07:42:37] [Rank 0] Group 11 FTA: 0.9004 +[2025-07-09 07:42:37] [Rank 0] Group 11 FTA: 0.9004 +[2025-07-09 07:42:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 07:42:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 07:42:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 07:42:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 07:42:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 07:42:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 07:42:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 07:42:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 07:42:39] [Rank 0] step:6001/10000 train_time:482954ms step_avg:80.48ms +[2025-07-09 07:42:39] [Rank 0] step:6001/10000 train_time:482954ms step_avg:80.48ms +[2025-07-09 07:42:40] [Rank 0] step:6021/10000 train_time:484444ms step_avg:80.46ms +[2025-07-09 07:42:40] [Rank 0] step:6021/10000 train_time:484444ms step_avg:80.46ms +[2025-07-09 07:42:42] [Rank 0] step:6041/10000 train_time:486169ms step_avg:80.48ms +[2025-07-09 07:42:42] [Rank 0] step:6041/10000 train_time:486169ms step_avg:80.48ms +[2025-07-09 07:42:43] [Rank 0] step:6061/10000 train_time:487656ms step_avg:80.46ms +[2025-07-09 07:42:43] [Rank 0] step:6061/10000 train_time:487656ms step_avg:80.46ms +[2025-07-09 07:42:45] [Rank 0] step:6081/10000 train_time:489142ms step_avg:80.44ms +[2025-07-09 07:42:45] [Rank 0] step:6081/10000 train_time:489142ms step_avg:80.44ms +[2025-07-09 07:42:46] [Rank 0] step:6101/10000 train_time:490630ms step_avg:80.42ms +[2025-07-09 07:42:46] [Rank 0] step:6101/10000 train_time:490630ms step_avg:80.42ms +[2025-07-09 07:42:48] [Rank 0] step:6121/10000 train_time:492119ms step_avg:80.40ms +[2025-07-09 07:42:48] [Rank 0] step:6121/10000 train_time:492119ms step_avg:80.40ms +[2025-07-09 07:42:50] [Rank 0] step:6141/10000 train_time:494270ms step_avg:80.49ms +[2025-07-09 07:42:50] [Rank 0] step:6141/10000 train_time:494270ms step_avg:80.49ms +[2025-07-09 07:42:51] [Rank 0] step:6161/10000 train_time:495758ms step_avg:80.47ms +[2025-07-09 07:42:51] [Rank 0] step:6161/10000 train_time:495758ms step_avg:80.47ms +[2025-07-09 07:42:53] [Rank 0] step:6181/10000 train_time:497247ms step_avg:80.45ms +[2025-07-09 07:42:53] [Rank 0] step:6181/10000 train_time:497247ms step_avg:80.45ms +[2025-07-09 07:42:54] [Rank 0] step:6201/10000 train_time:498739ms step_avg:80.43ms +[2025-07-09 07:42:54] [Rank 0] step:6201/10000 train_time:498739ms step_avg:80.43ms +[2025-07-09 07:42:57] [Rank 0] step:6221/10000 train_time:500883ms step_avg:80.51ms +[2025-07-09 07:42:57] [Rank 0] step:6221/10000 train_time:500883ms step_avg:80.51ms +[2025-07-09 07:42:58] [Rank 0] step:6241/10000 train_time:502374ms step_avg:80.50ms +[2025-07-09 07:42:58] [Rank 0] step:6241/10000 train_time:502374ms step_avg:80.50ms +[2025-07-09 07:43:00] [Rank 0] step:6261/10000 train_time:503867ms step_avg:80.48ms +[2025-07-09 07:43:00] [Rank 0] step:6261/10000 train_time:503867ms step_avg:80.48ms +[2025-07-09 07:43:01] [Rank 0] step:6281/10000 train_time:505360ms step_avg:80.46ms +[2025-07-09 07:43:01] [Rank 0] step:6281/10000 train_time:505360ms step_avg:80.46ms +[2025-07-09 07:43:03] [Rank 0] step:6301/10000 train_time:506851ms step_avg:80.44ms +[2025-07-09 07:43:03] [Rank 0] step:6301/10000 train_time:506851ms step_avg:80.44ms +[2025-07-09 07:43:05] [Rank 0] step:6321/10000 train_time:508993ms step_avg:80.52ms +[2025-07-09 07:43:05] [Rank 0] step:6321/10000 train_time:508993ms step_avg:80.52ms +[2025-07-09 07:43:06] [Rank 0] step:6341/10000 train_time:510486ms step_avg:80.51ms +[2025-07-09 07:43:06] [Rank 0] step:6341/10000 train_time:510486ms step_avg:80.51ms +[2025-07-09 07:43:08] [Rank 0] step:6361/10000 train_time:511979ms step_avg:80.49ms +[2025-07-09 07:43:08] [Rank 0] step:6361/10000 train_time:511979ms step_avg:80.49ms +[2025-07-09 07:43:09] [Rank 0] step:6381/10000 train_time:513471ms step_avg:80.47ms +[2025-07-09 07:43:09] [Rank 0] step:6381/10000 train_time:513471ms step_avg:80.47ms +[2025-07-09 07:43:11] [Rank 0] step:6401/10000 train_time:515603ms step_avg:80.55ms +[2025-07-09 07:43:11] [Rank 0] step:6401/10000 train_time:515603ms step_avg:80.55ms +[2025-07-09 07:43:13] [Rank 0] step:6421/10000 train_time:517098ms step_avg:80.53ms +[2025-07-09 07:43:13] [Rank 0] step:6421/10000 train_time:517098ms step_avg:80.53ms +[2025-07-09 07:43:14] [Rank 0] step:6441/10000 train_time:518591ms step_avg:80.51ms +[2025-07-09 07:43:14] [Rank 0] step:6441/10000 train_time:518591ms step_avg:80.51ms +[2025-07-09 07:43:16] [Rank 0] step:6461/10000 train_time:520084ms step_avg:80.50ms +[2025-07-09 07:43:16] [Rank 0] step:6461/10000 train_time:520084ms step_avg:80.50ms +[2025-07-09 07:43:18] [Rank 0] step:6481/10000 train_time:521577ms step_avg:80.48ms +[2025-07-09 07:43:18] [Rank 0] step:6481/10000 train_time:521577ms step_avg:80.48ms +[2025-07-09 07:43:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 07:43:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 07:43:20] [Rank 0] PRINT: step:6500/10000 train_loss:0.8652 val_loss:0.8649 train_time:523907ms step_avg:80.60ms +[2025-07-09 07:43:20] [Rank 0] PRINT: step:6500/10000 train_loss:0.8652 val_loss:0.8649 train_time:523907ms step_avg:80.60ms +[2025-07-09 07:43:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 07:43:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 07:43:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 07:43:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 07:43:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 07:43:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 07:48:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 07:48:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 07:48:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 07:48:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 07:48:40] [Rank 0] Total Loss: 5.5720 +[2025-07-09 07:48:40] [Rank 0] Total Loss: 5.5720 +[2025-07-09 07:48:40] [Rank 0] Total FTA: 0.9075 +[2025-07-09 07:48:40] [Rank 0] Total FTA: 0.9075 +[2025-07-09 07:48:40] [Rank 0] Group 0 Loss: 6.1150 +[2025-07-09 07:48:40] [Rank 0] Group 0 Loss: 6.1150 +[2025-07-09 07:48:40] [Rank 0] Group 1 Loss: 5.6217 +[2025-07-09 07:48:40] [Rank 0] Group 1 Loss: 5.6217 +[2025-07-09 07:48:40] [Rank 0] Group 2 Loss: 5.3052 +[2025-07-09 07:48:40] [Rank 0] Group 2 Loss: 5.3052 +[2025-07-09 07:48:40] [Rank 0] Group 3 Loss: 5.4877 +[2025-07-09 07:48:40] [Rank 0] Group 3 Loss: 5.4877 +[2025-07-09 07:48:40] [Rank 0] Group 4 Loss: 5.4646 +[2025-07-09 07:48:40] [Rank 0] Group 4 Loss: 5.4646 +[2025-07-09 07:48:40] [Rank 0] Group 5 Loss: 5.5134 +[2025-07-09 07:48:40] [Rank 0] Group 5 Loss: 5.5134 +[2025-07-09 07:48:40] [Rank 0] Group 6 Loss: 5.3575 +[2025-07-09 07:48:40] [Rank 0] Group 6 Loss: 5.3575 +[2025-07-09 07:48:40] [Rank 0] Group 7 Loss: 5.5448 +[2025-07-09 07:48:40] [Rank 0] Group 7 Loss: 5.5448 +[2025-07-09 07:48:40] [Rank 0] Group 8 Loss: 5.4881 +[2025-07-09 07:48:40] [Rank 0] Group 8 Loss: 5.4881 +[2025-07-09 07:48:40] [Rank 0] Group 9 Loss: 5.4469 +[2025-07-09 07:48:40] [Rank 0] Group 9 Loss: 5.4469 +[2025-07-09 07:48:40] [Rank 0] Group 10 Loss: 5.5295 +[2025-07-09 07:48:40] [Rank 0] Group 10 Loss: 5.5295 +[2025-07-09 07:48:40] [Rank 0] Group 11 Loss: 5.5142 +[2025-07-09 07:48:40] [Rank 0] Group 11 Loss: 5.5142 +[2025-07-09 07:48:40] [Rank 0] Group 0 FTA: 0.6580 +[2025-07-09 07:48:40] [Rank 0] Group 0 FTA: 0.6580 +[2025-07-09 07:48:40] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 07:48:40] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 07:48:40] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 07:48:40] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 07:48:40] [Rank 0] Group 3 FTA: 0.9505 +[2025-07-09 07:48:40] [Rank 0] Group 3 FTA: 0.9505 +[2025-07-09 07:48:40] [Rank 0] Group 4 FTA: 0.9349 +[2025-07-09 07:48:40] [Rank 0] Group 4 FTA: 0.9349 +[2025-07-09 07:48:40] [Rank 0] Group 5 FTA: 0.9219 +[2025-07-09 07:48:40] [Rank 0] Group 5 FTA: 0.9219 +[2025-07-09 07:48:40] [Rank 0] Group 6 FTA: 0.9271 +[2025-07-09 07:48:40] [Rank 0] Group 6 FTA: 0.9271 +[2025-07-09 07:48:40] [Rank 0] Group 7 FTA: 0.9349 +[2025-07-09 07:48:40] [Rank 0] Group 7 FTA: 0.9349 +[2025-07-09 07:48:40] [Rank 0] Group 8 FTA: 0.9062 +[2025-07-09 07:48:40] [Rank 0] Group 8 FTA: 0.9062 +[2025-07-09 07:48:40] [Rank 0] Group 9 FTA: 0.9492 +[2025-07-09 07:48:40] [Rank 0] Group 9 FTA: 0.9492 +[2025-07-09 07:48:40] [Rank 0] Group 10 FTA: 0.9473 +[2025-07-09 07:48:40] [Rank 0] Group 10 FTA: 0.9473 +[2025-07-09 07:48:40] [Rank 0] Group 11 FTA: 0.9463 +[2025-07-09 07:48:40] [Rank 0] Group 11 FTA: 0.9463 +[2025-07-09 07:48:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 07:48:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 07:48:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 07:48:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 07:48:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 07:48:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 07:48:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 07:48:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 07:48:41] [Rank 0] step:6501/10000 train_time:523929ms step_avg:80.59ms +[2025-07-09 07:48:41] [Rank 0] step:6501/10000 train_time:523929ms step_avg:80.59ms +[2025-07-09 07:48:43] [Rank 0] step:6521/10000 train_time:525429ms step_avg:80.57ms +[2025-07-09 07:48:43] [Rank 0] step:6521/10000 train_time:525429ms step_avg:80.57ms +[2025-07-09 07:48:44] [Rank 0] step:6541/10000 train_time:526913ms step_avg:80.56ms +[2025-07-09 07:48:44] [Rank 0] step:6541/10000 train_time:526913ms step_avg:80.56ms +[2025-07-09 07:48:46] [Rank 0] step:6561/10000 train_time:528398ms step_avg:80.54ms +[2025-07-09 07:48:46] [Rank 0] step:6561/10000 train_time:528398ms step_avg:80.54ms +[2025-07-09 07:48:48] [Rank 0] step:6581/10000 train_time:530531ms step_avg:80.62ms +[2025-07-09 07:48:48] [Rank 0] step:6581/10000 train_time:530531ms step_avg:80.62ms +[2025-07-09 07:48:49] [Rank 0] step:6601/10000 train_time:532017ms step_avg:80.60ms +[2025-07-09 07:48:49] [Rank 0] step:6601/10000 train_time:532017ms step_avg:80.60ms +[2025-07-09 07:48:51] [Rank 0] step:6621/10000 train_time:533505ms step_avg:80.58ms +[2025-07-09 07:48:51] [Rank 0] step:6621/10000 train_time:533505ms step_avg:80.58ms +[2025-07-09 07:48:52] [Rank 0] step:6641/10000 train_time:534995ms step_avg:80.56ms +[2025-07-09 07:48:52] [Rank 0] step:6641/10000 train_time:534995ms step_avg:80.56ms +[2025-07-09 07:48:54] [Rank 0] step:6661/10000 train_time:536738ms step_avg:80.58ms +[2025-07-09 07:48:54] [Rank 0] step:6661/10000 train_time:536738ms step_avg:80.58ms +[2025-07-09 07:48:56] [Rank 0] step:6681/10000 train_time:538638ms step_avg:80.62ms +[2025-07-09 07:48:56] [Rank 0] step:6681/10000 train_time:538638ms step_avg:80.62ms +[2025-07-09 07:48:57] [Rank 0] step:6701/10000 train_time:540129ms step_avg:80.60ms +[2025-07-09 07:48:57] [Rank 0] step:6701/10000 train_time:540129ms step_avg:80.60ms +[2025-07-09 07:48:59] [Rank 0] step:6721/10000 train_time:541617ms step_avg:80.59ms +[2025-07-09 07:48:59] [Rank 0] step:6721/10000 train_time:541617ms step_avg:80.59ms +[2025-07-09 07:49:00] [Rank 0] step:6741/10000 train_time:543109ms step_avg:80.57ms +[2025-07-09 07:49:00] [Rank 0] step:6741/10000 train_time:543109ms step_avg:80.57ms +[2025-07-09 07:49:02] [Rank 0] step:6761/10000 train_time:544839ms step_avg:80.59ms +[2025-07-09 07:49:02] [Rank 0] step:6761/10000 train_time:544839ms step_avg:80.59ms +[2025-07-09 07:49:04] [Rank 0] step:6781/10000 train_time:546331ms step_avg:80.57ms +[2025-07-09 07:49:04] [Rank 0] step:6781/10000 train_time:546331ms step_avg:80.57ms +[2025-07-09 07:49:05] [Rank 0] step:6801/10000 train_time:547822ms step_avg:80.55ms +[2025-07-09 07:49:05] [Rank 0] step:6801/10000 train_time:547822ms step_avg:80.55ms +[2025-07-09 07:49:07] [Rank 0] step:6821/10000 train_time:549314ms step_avg:80.53ms +[2025-07-09 07:49:07] [Rank 0] step:6821/10000 train_time:549314ms step_avg:80.53ms +[2025-07-09 07:49:09] [Rank 0] step:6841/10000 train_time:550807ms step_avg:80.52ms +[2025-07-09 07:49:09] [Rank 0] step:6841/10000 train_time:550807ms step_avg:80.52ms +[2025-07-09 07:49:10] [Rank 0] step:6861/10000 train_time:552945ms step_avg:80.59ms +[2025-07-09 07:49:10] [Rank 0] step:6861/10000 train_time:552945ms step_avg:80.59ms +[2025-07-09 07:49:12] [Rank 0] step:6881/10000 train_time:554436ms step_avg:80.57ms +[2025-07-09 07:49:12] [Rank 0] step:6881/10000 train_time:554436ms step_avg:80.57ms +[2025-07-09 07:49:13] [Rank 0] step:6901/10000 train_time:555929ms step_avg:80.56ms +[2025-07-09 07:49:13] [Rank 0] step:6901/10000 train_time:555929ms step_avg:80.56ms +[2025-07-09 07:49:15] [Rank 0] step:6921/10000 train_time:557422ms step_avg:80.54ms +[2025-07-09 07:49:15] [Rank 0] step:6921/10000 train_time:557422ms step_avg:80.54ms +[2025-07-09 07:49:17] [Rank 0] step:6941/10000 train_time:559556ms step_avg:80.62ms +[2025-07-09 07:49:17] [Rank 0] step:6941/10000 train_time:559556ms step_avg:80.62ms +[2025-07-09 07:49:18] [Rank 0] step:6961/10000 train_time:561048ms step_avg:80.60ms +[2025-07-09 07:49:18] [Rank 0] step:6961/10000 train_time:561048ms step_avg:80.60ms +[2025-07-09 07:49:20] [Rank 0] step:6981/10000 train_time:562541ms step_avg:80.58ms +[2025-07-09 07:49:20] [Rank 0] step:6981/10000 train_time:562541ms step_avg:80.58ms +[2025-07-09 07:49:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 07:49:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 07:49:22] [Rank 0] PRINT: step:7000/10000 train_loss:0.8636 val_loss:0.8637 train_time:564034ms step_avg:80.58ms +[2025-07-09 07:49:22] [Rank 0] PRINT: step:7000/10000 train_loss:0.8636 val_loss:0.8637 train_time:564034ms step_avg:80.58ms +[2025-07-09 07:49:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 07:49:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 07:49:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 07:49:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 07:49:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 07:49:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 07:54:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 07:54:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 07:54:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 07:54:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 07:54:41] [Rank 0] Total Loss: 5.4918 +[2025-07-09 07:54:41] [Rank 0] Total Loss: 5.4918 +[2025-07-09 07:54:41] [Rank 0] Total FTA: 0.9304 +[2025-07-09 07:54:41] [Rank 0] Total FTA: 0.9304 +[2025-07-09 07:54:41] [Rank 0] Group 0 Loss: 5.9747 +[2025-07-09 07:54:41] [Rank 0] Group 0 Loss: 5.9747 +[2025-07-09 07:54:41] [Rank 0] Group 1 Loss: 5.7442 +[2025-07-09 07:54:41] [Rank 0] Group 1 Loss: 5.7442 +[2025-07-09 07:54:41] [Rank 0] Group 2 Loss: 5.3507 +[2025-07-09 07:54:41] [Rank 0] Group 2 Loss: 5.3507 +[2025-07-09 07:54:41] [Rank 0] Group 3 Loss: 5.3028 +[2025-07-09 07:54:41] [Rank 0] Group 3 Loss: 5.3028 +[2025-07-09 07:54:41] [Rank 0] Group 4 Loss: 5.3967 +[2025-07-09 07:54:41] [Rank 0] Group 4 Loss: 5.3967 +[2025-07-09 07:54:41] [Rank 0] Group 5 Loss: 5.3869 +[2025-07-09 07:54:41] [Rank 0] Group 5 Loss: 5.3869 +[2025-07-09 07:54:41] [Rank 0] Group 6 Loss: 5.2634 +[2025-07-09 07:54:41] [Rank 0] Group 6 Loss: 5.2634 +[2025-07-09 07:54:41] [Rank 0] Group 7 Loss: 5.4357 +[2025-07-09 07:54:41] [Rank 0] Group 7 Loss: 5.4357 +[2025-07-09 07:54:41] [Rank 0] Group 8 Loss: 5.4598 +[2025-07-09 07:54:41] [Rank 0] Group 8 Loss: 5.4598 +[2025-07-09 07:54:41] [Rank 0] Group 9 Loss: 5.3434 +[2025-07-09 07:54:41] [Rank 0] Group 9 Loss: 5.3434 +[2025-07-09 07:54:41] [Rank 0] Group 10 Loss: 5.4416 +[2025-07-09 07:54:41] [Rank 0] Group 10 Loss: 5.4416 +[2025-07-09 07:54:41] [Rank 0] Group 11 Loss: 5.4142 +[2025-07-09 07:54:41] [Rank 0] Group 11 Loss: 5.4142 +[2025-07-09 07:54:41] [Rank 0] Group 0 FTA: 0.8570 +[2025-07-09 07:54:41] [Rank 0] Group 0 FTA: 0.8570 +[2025-07-09 07:54:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 07:54:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 07:54:41] [Rank 0] Group 2 FTA: 0.9115 +[2025-07-09 07:54:41] [Rank 0] Group 2 FTA: 0.9115 +[2025-07-09 07:54:41] [Rank 0] Group 3 FTA: 0.9323 +[2025-07-09 07:54:41] [Rank 0] Group 3 FTA: 0.9323 +[2025-07-09 07:54:41] [Rank 0] Group 4 FTA: 0.9557 +[2025-07-09 07:54:41] [Rank 0] Group 4 FTA: 0.9557 +[2025-07-09 07:54:41] [Rank 0] Group 5 FTA: 0.9323 +[2025-07-09 07:54:41] [Rank 0] Group 5 FTA: 0.9323 +[2025-07-09 07:54:41] [Rank 0] Group 6 FTA: 0.9297 +[2025-07-09 07:54:41] [Rank 0] Group 6 FTA: 0.9297 +[2025-07-09 07:54:41] [Rank 0] Group 7 FTA: 0.9167 +[2025-07-09 07:54:41] [Rank 0] Group 7 FTA: 0.9167 +[2025-07-09 07:54:41] [Rank 0] Group 8 FTA: 0.9375 +[2025-07-09 07:54:41] [Rank 0] Group 8 FTA: 0.9375 +[2025-07-09 07:54:41] [Rank 0] Group 9 FTA: 0.9570 +[2025-07-09 07:54:41] [Rank 0] Group 9 FTA: 0.9570 +[2025-07-09 07:54:41] [Rank 0] Group 10 FTA: 0.9375 +[2025-07-09 07:54:41] [Rank 0] Group 10 FTA: 0.9375 +[2025-07-09 07:54:41] [Rank 0] Group 11 FTA: 0.9482 +[2025-07-09 07:54:41] [Rank 0] Group 11 FTA: 0.9482 +[2025-07-09 07:54:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 07:54:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 07:54:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 07:54:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 07:54:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 07:54:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 07:54:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 07:54:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 07:54:43] [Rank 0] step:7001/10000 train_time:564055ms step_avg:80.57ms +[2025-07-09 07:54:43] [Rank 0] step:7001/10000 train_time:564055ms step_avg:80.57ms +[2025-07-09 07:54:45] [Rank 0] step:7021/10000 train_time:565554ms step_avg:80.55ms +[2025-07-09 07:54:45] [Rank 0] step:7021/10000 train_time:565554ms step_avg:80.55ms +[2025-07-09 07:54:46] [Rank 0] step:7041/10000 train_time:567687ms step_avg:80.63ms +[2025-07-09 07:54:46] [Rank 0] step:7041/10000 train_time:567687ms step_avg:80.63ms +[2025-07-09 07:54:48] [Rank 0] step:7061/10000 train_time:569172ms step_avg:80.61ms +[2025-07-09 07:54:48] [Rank 0] step:7061/10000 train_time:569172ms step_avg:80.61ms +[2025-07-09 07:54:49] [Rank 0] step:7081/10000 train_time:570658ms step_avg:80.59ms +[2025-07-09 07:54:49] [Rank 0] step:7081/10000 train_time:570658ms step_avg:80.59ms +[2025-07-09 07:54:51] [Rank 0] step:7101/10000 train_time:572146ms step_avg:80.57ms +[2025-07-09 07:54:51] [Rank 0] step:7101/10000 train_time:572146ms step_avg:80.57ms +[2025-07-09 07:54:53] [Rank 0] step:7121/10000 train_time:574301ms step_avg:80.65ms +[2025-07-09 07:54:53] [Rank 0] step:7121/10000 train_time:574301ms step_avg:80.65ms +[2025-07-09 07:54:54] [Rank 0] step:7141/10000 train_time:575921ms step_avg:80.65ms +[2025-07-09 07:54:54] [Rank 0] step:7141/10000 train_time:575921ms step_avg:80.65ms +[2025-07-09 07:54:56] [Rank 0] step:7161/10000 train_time:577495ms step_avg:80.64ms +[2025-07-09 07:54:56] [Rank 0] step:7161/10000 train_time:577495ms step_avg:80.64ms +[2025-07-09 07:54:58] [Rank 0] step:7181/10000 train_time:579094ms step_avg:80.64ms +[2025-07-09 07:54:58] [Rank 0] step:7181/10000 train_time:579094ms step_avg:80.64ms +[2025-07-09 07:55:00] [Rank 0] step:7201/10000 train_time:580586ms step_avg:80.63ms +[2025-07-09 07:55:00] [Rank 0] step:7201/10000 train_time:580586ms step_avg:80.63ms +[2025-07-09 07:55:01] [Rank 0] step:7221/10000 train_time:582731ms step_avg:80.70ms +[2025-07-09 07:55:01] [Rank 0] step:7221/10000 train_time:582731ms step_avg:80.70ms +[2025-07-09 07:55:03] [Rank 0] step:7241/10000 train_time:584222ms step_avg:80.68ms +[2025-07-09 07:55:03] [Rank 0] step:7241/10000 train_time:584222ms step_avg:80.68ms +[2025-07-09 07:55:04] [Rank 0] step:7261/10000 train_time:585714ms step_avg:80.67ms +[2025-07-09 07:55:04] [Rank 0] step:7261/10000 train_time:585714ms step_avg:80.67ms +[2025-07-09 07:55:06] [Rank 0] step:7281/10000 train_time:587207ms step_avg:80.65ms +[2025-07-09 07:55:06] [Rank 0] step:7281/10000 train_time:587207ms step_avg:80.65ms +[2025-07-09 07:55:07] [Rank 0] step:7301/10000 train_time:588934ms step_avg:80.66ms +[2025-07-09 07:55:07] [Rank 0] step:7301/10000 train_time:588934ms step_avg:80.66ms +[2025-07-09 07:55:09] [Rank 0] step:7321/10000 train_time:590427ms step_avg:80.65ms +[2025-07-09 07:55:09] [Rank 0] step:7321/10000 train_time:590427ms step_avg:80.65ms +[2025-07-09 07:55:10] [Rank 0] step:7341/10000 train_time:591919ms step_avg:80.63ms +[2025-07-09 07:55:10] [Rank 0] step:7341/10000 train_time:591919ms step_avg:80.63ms +[2025-07-09 07:55:12] [Rank 0] step:7361/10000 train_time:593413ms step_avg:80.62ms +[2025-07-09 07:55:12] [Rank 0] step:7361/10000 train_time:593413ms step_avg:80.62ms +[2025-07-09 07:55:14] [Rank 0] step:7381/10000 train_time:594906ms step_avg:80.60ms +[2025-07-09 07:55:14] [Rank 0] step:7381/10000 train_time:594906ms step_avg:80.60ms +[2025-07-09 07:55:16] [Rank 0] step:7401/10000 train_time:597068ms step_avg:80.67ms +[2025-07-09 07:55:16] [Rank 0] step:7401/10000 train_time:597068ms step_avg:80.67ms +[2025-07-09 07:55:17] [Rank 0] step:7421/10000 train_time:598560ms step_avg:80.66ms +[2025-07-09 07:55:17] [Rank 0] step:7421/10000 train_time:598560ms step_avg:80.66ms +[2025-07-09 07:55:19] [Rank 0] step:7441/10000 train_time:600053ms step_avg:80.64ms +[2025-07-09 07:55:19] [Rank 0] step:7441/10000 train_time:600053ms step_avg:80.64ms +[2025-07-09 07:55:20] [Rank 0] step:7461/10000 train_time:601545ms step_avg:80.63ms +[2025-07-09 07:55:20] [Rank 0] step:7461/10000 train_time:601545ms step_avg:80.63ms +[2025-07-09 07:55:22] [Rank 0] step:7481/10000 train_time:603680ms step_avg:80.70ms +[2025-07-09 07:55:22] [Rank 0] step:7481/10000 train_time:603680ms step_avg:80.70ms +[2025-07-09 07:55:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 07:55:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 07:55:25] [Rank 0] PRINT: step:7500/10000 train_loss:0.8624 val_loss:0.8628 train_time:605172ms step_avg:80.69ms +[2025-07-09 07:55:25] [Rank 0] PRINT: step:7500/10000 train_loss:0.8624 val_loss:0.8628 train_time:605172ms step_avg:80.69ms +[2025-07-09 07:55:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 07:55:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 07:55:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 07:55:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 07:55:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 07:55:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 08:00:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 08:00:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 08:00:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 08:00:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 08:00:46] [Rank 0] Total Loss: 5.5174 +[2025-07-09 08:00:46] [Rank 0] Total Loss: 5.5174 +[2025-07-09 08:00:46] [Rank 0] Total FTA: 0.8999 +[2025-07-09 08:00:46] [Rank 0] Total FTA: 0.8999 +[2025-07-09 08:00:46] [Rank 0] Group 0 Loss: 6.0050 +[2025-07-09 08:00:46] [Rank 0] Group 0 Loss: 6.0050 +[2025-07-09 08:00:46] [Rank 0] Group 1 Loss: 5.6424 +[2025-07-09 08:00:46] [Rank 0] Group 1 Loss: 5.6424 +[2025-07-09 08:00:46] [Rank 0] Group 2 Loss: 5.2307 +[2025-07-09 08:00:46] [Rank 0] Group 2 Loss: 5.2307 +[2025-07-09 08:00:46] [Rank 0] Group 3 Loss: 5.4576 +[2025-07-09 08:00:46] [Rank 0] Group 3 Loss: 5.4576 +[2025-07-09 08:00:46] [Rank 0] Group 4 Loss: 5.4456 +[2025-07-09 08:00:46] [Rank 0] Group 4 Loss: 5.4456 +[2025-07-09 08:00:46] [Rank 0] Group 5 Loss: 5.3850 +[2025-07-09 08:00:46] [Rank 0] Group 5 Loss: 5.3850 +[2025-07-09 08:00:46] [Rank 0] Group 6 Loss: 5.3027 +[2025-07-09 08:00:46] [Rank 0] Group 6 Loss: 5.3027 +[2025-07-09 08:00:46] [Rank 0] Group 7 Loss: 5.4747 +[2025-07-09 08:00:46] [Rank 0] Group 7 Loss: 5.4747 +[2025-07-09 08:00:46] [Rank 0] Group 8 Loss: 5.5107 +[2025-07-09 08:00:46] [Rank 0] Group 8 Loss: 5.5107 +[2025-07-09 08:00:46] [Rank 0] Group 9 Loss: 5.4408 +[2025-07-09 08:00:46] [Rank 0] Group 9 Loss: 5.4408 +[2025-07-09 08:00:46] [Rank 0] Group 10 Loss: 5.4097 +[2025-07-09 08:00:46] [Rank 0] Group 10 Loss: 5.4097 +[2025-07-09 08:00:46] [Rank 0] Group 11 Loss: 5.4831 +[2025-07-09 08:00:46] [Rank 0] Group 11 Loss: 5.4831 +[2025-07-09 08:00:46] [Rank 0] Group 0 FTA: 0.6697 +[2025-07-09 08:00:46] [Rank 0] Group 0 FTA: 0.6697 +[2025-07-09 08:00:46] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 08:00:46] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 08:00:46] [Rank 0] Group 2 FTA: 0.9219 +[2025-07-09 08:00:46] [Rank 0] Group 2 FTA: 0.9219 +[2025-07-09 08:00:46] [Rank 0] Group 3 FTA: 0.9219 +[2025-07-09 08:00:46] [Rank 0] Group 3 FTA: 0.9219 +[2025-07-09 08:00:46] [Rank 0] Group 4 FTA: 0.9036 +[2025-07-09 08:00:46] [Rank 0] Group 4 FTA: 0.9036 +[2025-07-09 08:00:46] [Rank 0] Group 5 FTA: 0.9271 +[2025-07-09 08:00:46] [Rank 0] Group 5 FTA: 0.9271 +[2025-07-09 08:00:46] [Rank 0] Group 6 FTA: 0.9635 +[2025-07-09 08:00:46] [Rank 0] Group 6 FTA: 0.9635 +[2025-07-09 08:00:46] [Rank 0] Group 7 FTA: 0.9141 +[2025-07-09 08:00:46] [Rank 0] Group 7 FTA: 0.9141 +[2025-07-09 08:00:46] [Rank 0] Group 8 FTA: 0.9036 +[2025-07-09 08:00:46] [Rank 0] Group 8 FTA: 0.9036 +[2025-07-09 08:00:46] [Rank 0] Group 9 FTA: 0.9453 +[2025-07-09 08:00:46] [Rank 0] Group 9 FTA: 0.9453 +[2025-07-09 08:00:46] [Rank 0] Group 10 FTA: 0.9551 +[2025-07-09 08:00:46] [Rank 0] Group 10 FTA: 0.9551 +[2025-07-09 08:00:46] [Rank 0] Group 11 FTA: 0.9375 +[2025-07-09 08:00:46] [Rank 0] Group 11 FTA: 0.9375 +[2025-07-09 08:00:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 08:00:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 08:00:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 08:00:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 08:00:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 08:00:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 08:00:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 08:00:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 08:00:47] [Rank 0] step:7501/10000 train_time:605193ms step_avg:80.68ms +[2025-07-09 08:00:47] [Rank 0] step:7501/10000 train_time:605193ms step_avg:80.68ms +[2025-07-09 08:00:49] [Rank 0] step:7521/10000 train_time:606680ms step_avg:80.66ms +[2025-07-09 08:00:49] [Rank 0] step:7521/10000 train_time:606680ms step_avg:80.66ms +[2025-07-09 08:00:50] [Rank 0] step:7541/10000 train_time:608165ms step_avg:80.65ms +[2025-07-09 08:00:50] [Rank 0] step:7541/10000 train_time:608165ms step_avg:80.65ms +[2025-07-09 08:00:52] [Rank 0] step:7561/10000 train_time:609651ms step_avg:80.63ms +[2025-07-09 08:00:52] [Rank 0] step:7561/10000 train_time:609651ms step_avg:80.63ms +[2025-07-09 08:00:54] [Rank 0] step:7581/10000 train_time:611792ms step_avg:80.70ms +[2025-07-09 08:00:54] [Rank 0] step:7581/10000 train_time:611792ms step_avg:80.70ms +[2025-07-09 08:00:55] [Rank 0] step:7601/10000 train_time:613281ms step_avg:80.68ms +[2025-07-09 08:00:55] [Rank 0] step:7601/10000 train_time:613281ms step_avg:80.68ms +[2025-07-09 08:00:57] [Rank 0] step:7621/10000 train_time:614768ms step_avg:80.67ms +[2025-07-09 08:00:57] [Rank 0] step:7621/10000 train_time:614768ms step_avg:80.67ms +[2025-07-09 08:00:58] [Rank 0] step:7641/10000 train_time:616256ms step_avg:80.65ms +[2025-07-09 08:00:58] [Rank 0] step:7641/10000 train_time:616256ms step_avg:80.65ms +[2025-07-09 08:01:01] [Rank 0] step:7661/10000 train_time:618410ms step_avg:80.72ms +[2025-07-09 08:01:01] [Rank 0] step:7661/10000 train_time:618410ms step_avg:80.72ms +[2025-07-09 08:01:02] [Rank 0] step:7681/10000 train_time:619898ms step_avg:80.71ms +[2025-07-09 08:01:02] [Rank 0] step:7681/10000 train_time:619898ms step_avg:80.71ms +[2025-07-09 08:01:04] [Rank 0] step:7701/10000 train_time:621391ms step_avg:80.69ms +[2025-07-09 08:01:04] [Rank 0] step:7701/10000 train_time:621391ms step_avg:80.69ms +[2025-07-09 08:01:05] [Rank 0] step:7721/10000 train_time:622883ms step_avg:80.67ms +[2025-07-09 08:01:05] [Rank 0] step:7721/10000 train_time:622883ms step_avg:80.67ms +[2025-07-09 08:01:07] [Rank 0] step:7741/10000 train_time:624373ms step_avg:80.66ms +[2025-07-09 08:01:07] [Rank 0] step:7741/10000 train_time:624373ms step_avg:80.66ms +[2025-07-09 08:01:09] [Rank 0] step:7761/10000 train_time:626517ms step_avg:80.73ms +[2025-07-09 08:01:09] [Rank 0] step:7761/10000 train_time:626517ms step_avg:80.73ms +[2025-07-09 08:01:10] [Rank 0] step:7781/10000 train_time:628008ms step_avg:80.71ms +[2025-07-09 08:01:10] [Rank 0] step:7781/10000 train_time:628008ms step_avg:80.71ms +[2025-07-09 08:01:12] [Rank 0] step:7801/10000 train_time:629500ms step_avg:80.69ms +[2025-07-09 08:01:12] [Rank 0] step:7801/10000 train_time:629500ms step_avg:80.69ms +[2025-07-09 08:01:13] [Rank 0] step:7821/10000 train_time:630992ms step_avg:80.68ms +[2025-07-09 08:01:13] [Rank 0] step:7821/10000 train_time:630992ms step_avg:80.68ms +[2025-07-09 08:01:15] [Rank 0] step:7841/10000 train_time:632902ms step_avg:80.72ms +[2025-07-09 08:01:15] [Rank 0] step:7841/10000 train_time:632902ms step_avg:80.72ms +[2025-07-09 08:01:17] [Rank 0] step:7861/10000 train_time:634394ms step_avg:80.70ms +[2025-07-09 08:01:17] [Rank 0] step:7861/10000 train_time:634394ms step_avg:80.70ms +[2025-07-09 08:01:18] [Rank 0] step:7881/10000 train_time:636002ms step_avg:80.70ms +[2025-07-09 08:01:18] [Rank 0] step:7881/10000 train_time:636002ms step_avg:80.70ms +[2025-07-09 08:01:20] [Rank 0] step:7901/10000 train_time:637494ms step_avg:80.69ms +[2025-07-09 08:01:20] [Rank 0] step:7901/10000 train_time:637494ms step_avg:80.69ms +[2025-07-09 08:01:22] [Rank 0] step:7921/10000 train_time:638987ms step_avg:80.67ms +[2025-07-09 08:01:22] [Rank 0] step:7921/10000 train_time:638987ms step_avg:80.67ms +[2025-07-09 08:01:23] [Rank 0] step:7941/10000 train_time:641144ms step_avg:80.74ms +[2025-07-09 08:01:23] [Rank 0] step:7941/10000 train_time:641144ms step_avg:80.74ms +[2025-07-09 08:01:25] [Rank 0] step:7961/10000 train_time:642638ms step_avg:80.72ms +[2025-07-09 08:01:25] [Rank 0] step:7961/10000 train_time:642638ms step_avg:80.72ms +[2025-07-09 08:01:26] [Rank 0] step:7981/10000 train_time:644132ms step_avg:80.71ms +[2025-07-09 08:01:26] [Rank 0] step:7981/10000 train_time:644132ms step_avg:80.71ms +[2025-07-09 08:01:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 08:01:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 08:01:29] [Rank 0] PRINT: step:8000/10000 train_loss:0.8613 val_loss:0.8623 train_time:645630ms step_avg:80.70ms +[2025-07-09 08:01:29] [Rank 0] PRINT: step:8000/10000 train_loss:0.8613 val_loss:0.8623 train_time:645630ms step_avg:80.70ms +[2025-07-09 08:01:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 08:01:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 08:01:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 08:01:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 08:01:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 08:01:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 08:06:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 08:06:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 08:06:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 08:06:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 08:06:50] [Rank 0] Total Loss: 5.4683 +[2025-07-09 08:06:50] [Rank 0] Total Loss: 5.4683 +[2025-07-09 08:06:50] [Rank 0] Total FTA: 0.9675 +[2025-07-09 08:06:50] [Rank 0] Total FTA: 0.9675 +[2025-07-09 08:06:50] [Rank 0] Group 0 Loss: 5.9280 +[2025-07-09 08:06:50] [Rank 0] Group 0 Loss: 5.9280 +[2025-07-09 08:06:50] [Rank 0] Group 1 Loss: 5.6658 +[2025-07-09 08:06:50] [Rank 0] Group 1 Loss: 5.6658 +[2025-07-09 08:06:50] [Rank 0] Group 2 Loss: 5.2644 +[2025-07-09 08:06:50] [Rank 0] Group 2 Loss: 5.2644 +[2025-07-09 08:06:50] [Rank 0] Group 3 Loss: 5.3250 +[2025-07-09 08:06:50] [Rank 0] Group 3 Loss: 5.3250 +[2025-07-09 08:06:50] [Rank 0] Group 4 Loss: 5.3454 +[2025-07-09 08:06:50] [Rank 0] Group 4 Loss: 5.3454 +[2025-07-09 08:06:50] [Rank 0] Group 5 Loss: 5.3942 +[2025-07-09 08:06:50] [Rank 0] Group 5 Loss: 5.3942 +[2025-07-09 08:06:50] [Rank 0] Group 6 Loss: 5.2540 +[2025-07-09 08:06:50] [Rank 0] Group 6 Loss: 5.2540 +[2025-07-09 08:06:50] [Rank 0] Group 7 Loss: 5.4190 +[2025-07-09 08:06:50] [Rank 0] Group 7 Loss: 5.4190 +[2025-07-09 08:06:50] [Rank 0] Group 8 Loss: 5.4317 +[2025-07-09 08:06:50] [Rank 0] Group 8 Loss: 5.4317 +[2025-07-09 08:06:50] [Rank 0] Group 9 Loss: 5.3867 +[2025-07-09 08:06:50] [Rank 0] Group 9 Loss: 5.3867 +[2025-07-09 08:06:50] [Rank 0] Group 10 Loss: 5.4172 +[2025-07-09 08:06:50] [Rank 0] Group 10 Loss: 5.4172 +[2025-07-09 08:06:50] [Rank 0] Group 11 Loss: 5.4114 +[2025-07-09 08:06:50] [Rank 0] Group 11 Loss: 5.4114 +[2025-07-09 08:06:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 08:06:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 08:06:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 08:06:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 08:06:50] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 08:06:50] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 08:06:50] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-09 08:06:50] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-09 08:06:50] [Rank 0] Group 4 FTA: 0.9479 +[2025-07-09 08:06:50] [Rank 0] Group 4 FTA: 0.9479 +[2025-07-09 08:06:50] [Rank 0] Group 5 FTA: 0.9688 +[2025-07-09 08:06:50] [Rank 0] Group 5 FTA: 0.9688 +[2025-07-09 08:06:50] [Rank 0] Group 6 FTA: 0.9609 +[2025-07-09 08:06:50] [Rank 0] Group 6 FTA: 0.9609 +[2025-07-09 08:06:50] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-09 08:06:50] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-09 08:06:50] [Rank 0] Group 8 FTA: 0.9375 +[2025-07-09 08:06:50] [Rank 0] Group 8 FTA: 0.9375 +[2025-07-09 08:06:50] [Rank 0] Group 9 FTA: 0.9531 +[2025-07-09 08:06:50] [Rank 0] Group 9 FTA: 0.9531 +[2025-07-09 08:06:50] [Rank 0] Group 10 FTA: 0.9434 +[2025-07-09 08:06:50] [Rank 0] Group 10 FTA: 0.9434 +[2025-07-09 08:06:50] [Rank 0] Group 11 FTA: 0.9531 +[2025-07-09 08:06:50] [Rank 0] Group 11 FTA: 0.9531 +[2025-07-09 08:06:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 08:06:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 08:06:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 08:06:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 08:06:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 08:06:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 08:06:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 08:06:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 08:06:52] [Rank 0] step:8001/10000 train_time:645652ms step_avg:80.70ms +[2025-07-09 08:06:52] [Rank 0] step:8001/10000 train_time:645652ms step_avg:80.70ms +[2025-07-09 08:06:54] [Rank 0] step:8021/10000 train_time:647806ms step_avg:80.76ms +[2025-07-09 08:06:54] [Rank 0] step:8021/10000 train_time:647806ms step_avg:80.76ms +[2025-07-09 08:06:56] [Rank 0] step:8041/10000 train_time:649292ms step_avg:80.75ms +[2025-07-09 08:06:56] [Rank 0] step:8041/10000 train_time:649292ms step_avg:80.75ms +[2025-07-09 08:06:57] [Rank 0] step:8061/10000 train_time:650779ms step_avg:80.73ms +[2025-07-09 08:06:57] [Rank 0] step:8061/10000 train_time:650779ms step_avg:80.73ms +[2025-07-09 08:06:58] [Rank 0] step:8081/10000 train_time:652264ms step_avg:80.72ms +[2025-07-09 08:06:58] [Rank 0] step:8081/10000 train_time:652264ms step_avg:80.72ms +[2025-07-09 08:07:00] [Rank 0] step:8101/10000 train_time:653799ms step_avg:80.71ms +[2025-07-09 08:07:00] [Rank 0] step:8101/10000 train_time:653799ms step_avg:80.71ms +[2025-07-09 08:07:02] [Rank 0] step:8121/10000 train_time:655474ms step_avg:80.71ms +[2025-07-09 08:07:02] [Rank 0] step:8121/10000 train_time:655474ms step_avg:80.71ms +[2025-07-09 08:07:03] [Rank 0] step:8141/10000 train_time:656963ms step_avg:80.70ms +[2025-07-09 08:07:03] [Rank 0] step:8141/10000 train_time:656963ms step_avg:80.70ms +[2025-07-09 08:07:05] [Rank 0] step:8161/10000 train_time:658452ms step_avg:80.68ms +[2025-07-09 08:07:05] [Rank 0] step:8161/10000 train_time:658452ms step_avg:80.68ms +[2025-07-09 08:07:06] [Rank 0] step:8181/10000 train_time:659942ms step_avg:80.67ms +[2025-07-09 08:07:06] [Rank 0] step:8181/10000 train_time:659942ms step_avg:80.67ms +[2025-07-09 08:07:08] [Rank 0] step:8201/10000 train_time:662081ms step_avg:80.73ms +[2025-07-09 08:07:08] [Rank 0] step:8201/10000 train_time:662081ms step_avg:80.73ms +[2025-07-09 08:07:10] [Rank 0] step:8221/10000 train_time:663573ms step_avg:80.72ms +[2025-07-09 08:07:10] [Rank 0] step:8221/10000 train_time:663573ms step_avg:80.72ms +[2025-07-09 08:07:11] [Rank 0] step:8241/10000 train_time:665064ms step_avg:80.70ms +[2025-07-09 08:07:11] [Rank 0] step:8241/10000 train_time:665064ms step_avg:80.70ms +[2025-07-09 08:07:13] [Rank 0] step:8261/10000 train_time:666556ms step_avg:80.69ms +[2025-07-09 08:07:13] [Rank 0] step:8261/10000 train_time:666556ms step_avg:80.69ms +[2025-07-09 08:07:15] [Rank 0] step:8281/10000 train_time:668100ms step_avg:80.68ms +[2025-07-09 08:07:15] [Rank 0] step:8281/10000 train_time:668100ms step_avg:80.68ms +[2025-07-09 08:07:16] [Rank 0] step:8301/10000 train_time:669774ms step_avg:80.69ms +[2025-07-09 08:07:16] [Rank 0] step:8301/10000 train_time:669774ms step_avg:80.69ms +[2025-07-09 08:07:17] [Rank 0] step:8321/10000 train_time:671266ms step_avg:80.67ms +[2025-07-09 08:07:17] [Rank 0] step:8321/10000 train_time:671266ms step_avg:80.67ms +[2025-07-09 08:07:19] [Rank 0] step:8341/10000 train_time:672758ms step_avg:80.66ms +[2025-07-09 08:07:19] [Rank 0] step:8341/10000 train_time:672758ms step_avg:80.66ms +[2025-07-09 08:07:20] [Rank 0] step:8361/10000 train_time:674248ms step_avg:80.64ms +[2025-07-09 08:07:20] [Rank 0] step:8361/10000 train_time:674248ms step_avg:80.64ms +[2025-07-09 08:07:23] [Rank 0] step:8381/10000 train_time:676387ms step_avg:80.70ms +[2025-07-09 08:07:23] [Rank 0] step:8381/10000 train_time:676387ms step_avg:80.70ms +[2025-07-09 08:07:24] [Rank 0] step:8401/10000 train_time:677879ms step_avg:80.69ms +[2025-07-09 08:07:24] [Rank 0] step:8401/10000 train_time:677879ms step_avg:80.69ms +[2025-07-09 08:07:26] [Rank 0] step:8421/10000 train_time:679372ms step_avg:80.68ms +[2025-07-09 08:07:26] [Rank 0] step:8421/10000 train_time:679372ms step_avg:80.68ms +[2025-07-09 08:07:27] [Rank 0] step:8441/10000 train_time:680865ms step_avg:80.66ms +[2025-07-09 08:07:27] [Rank 0] step:8441/10000 train_time:680865ms step_avg:80.66ms +[2025-07-09 08:07:29] [Rank 0] step:8461/10000 train_time:682613ms step_avg:80.68ms +[2025-07-09 08:07:29] [Rank 0] step:8461/10000 train_time:682613ms step_avg:80.68ms +[2025-07-09 08:07:31] [Rank 0] step:8481/10000 train_time:684495ms step_avg:80.71ms +[2025-07-09 08:07:31] [Rank 0] step:8481/10000 train_time:684495ms step_avg:80.71ms +[2025-07-09 08:07:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 08:07:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 08:07:33] [Rank 0] PRINT: step:8500/10000 train_loss:0.8603 val_loss:0.8619 train_time:685989ms step_avg:80.70ms +[2025-07-09 08:07:33] [Rank 0] PRINT: step:8500/10000 train_loss:0.8603 val_loss:0.8619 train_time:685989ms step_avg:80.70ms +[2025-07-09 08:07:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 08:07:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 08:07:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 08:07:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 08:07:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 08:07:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 08:12:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 08:12:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 08:12:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 08:12:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 08:12:55] [Rank 0] Total Loss: 5.5006 +[2025-07-09 08:12:55] [Rank 0] Total Loss: 5.5006 +[2025-07-09 08:12:55] [Rank 0] Total FTA: 0.9675 +[2025-07-09 08:12:55] [Rank 0] Total FTA: 0.9675 +[2025-07-09 08:12:55] [Rank 0] Group 0 Loss: 5.8021 +[2025-07-09 08:12:55] [Rank 0] Group 0 Loss: 5.8021 +[2025-07-09 08:12:55] [Rank 0] Group 1 Loss: 5.7043 +[2025-07-09 08:12:55] [Rank 0] Group 1 Loss: 5.7043 +[2025-07-09 08:12:55] [Rank 0] Group 2 Loss: 5.2932 +[2025-07-09 08:12:55] [Rank 0] Group 2 Loss: 5.2932 +[2025-07-09 08:12:55] [Rank 0] Group 3 Loss: 5.5001 +[2025-07-09 08:12:55] [Rank 0] Group 3 Loss: 5.5001 +[2025-07-09 08:12:55] [Rank 0] Group 4 Loss: 5.5040 +[2025-07-09 08:12:55] [Rank 0] Group 4 Loss: 5.5040 +[2025-07-09 08:12:55] [Rank 0] Group 5 Loss: 5.3971 +[2025-07-09 08:12:55] [Rank 0] Group 5 Loss: 5.3971 +[2025-07-09 08:12:55] [Rank 0] Group 6 Loss: 5.2953 +[2025-07-09 08:12:55] [Rank 0] Group 6 Loss: 5.2953 +[2025-07-09 08:12:55] [Rank 0] Group 7 Loss: 5.4786 +[2025-07-09 08:12:55] [Rank 0] Group 7 Loss: 5.4786 +[2025-07-09 08:12:55] [Rank 0] Group 8 Loss: 5.4540 +[2025-07-09 08:12:55] [Rank 0] Group 8 Loss: 5.4540 +[2025-07-09 08:12:55] [Rank 0] Group 9 Loss: 5.4139 +[2025-07-09 08:12:55] [Rank 0] Group 9 Loss: 5.4139 +[2025-07-09 08:12:55] [Rank 0] Group 10 Loss: 5.4570 +[2025-07-09 08:12:55] [Rank 0] Group 10 Loss: 5.4570 +[2025-07-09 08:12:55] [Rank 0] Group 11 Loss: 5.4593 +[2025-07-09 08:12:55] [Rank 0] Group 11 Loss: 5.4593 +[2025-07-09 08:12:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 08:12:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 08:12:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 08:12:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 08:12:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 08:12:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 08:12:55] [Rank 0] Group 3 FTA: 0.9688 +[2025-07-09 08:12:55] [Rank 0] Group 3 FTA: 0.9688 +[2025-07-09 08:12:55] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-09 08:12:55] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-09 08:12:55] [Rank 0] Group 5 FTA: 0.9818 +[2025-07-09 08:12:55] [Rank 0] Group 5 FTA: 0.9818 +[2025-07-09 08:12:55] [Rank 0] Group 6 FTA: 0.9557 +[2025-07-09 08:12:55] [Rank 0] Group 6 FTA: 0.9557 +[2025-07-09 08:12:55] [Rank 0] Group 7 FTA: 0.9427 +[2025-07-09 08:12:55] [Rank 0] Group 7 FTA: 0.9427 +[2025-07-09 08:12:55] [Rank 0] Group 8 FTA: 0.9297 +[2025-07-09 08:12:55] [Rank 0] Group 8 FTA: 0.9297 +[2025-07-09 08:12:55] [Rank 0] Group 9 FTA: 0.9570 +[2025-07-09 08:12:55] [Rank 0] Group 9 FTA: 0.9570 +[2025-07-09 08:12:55] [Rank 0] Group 10 FTA: 0.9590 +[2025-07-09 08:12:55] [Rank 0] Group 10 FTA: 0.9590 +[2025-07-09 08:12:55] [Rank 0] Group 11 FTA: 0.9443 +[2025-07-09 08:12:55] [Rank 0] Group 11 FTA: 0.9443 +[2025-07-09 08:12:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 08:12:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 08:12:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 08:12:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 08:12:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 08:12:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 08:12:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 08:12:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 08:12:58] [Rank 0] step:8501/10000 train_time:686008ms step_avg:80.70ms +[2025-07-09 08:12:58] [Rank 0] step:8501/10000 train_time:686008ms step_avg:80.70ms +[2025-07-09 08:13:00] [Rank 0] step:8521/10000 train_time:687510ms step_avg:80.68ms +[2025-07-09 08:13:00] [Rank 0] step:8521/10000 train_time:687510ms step_avg:80.68ms +[2025-07-09 08:13:01] [Rank 0] step:8541/10000 train_time:688994ms step_avg:80.67ms +[2025-07-09 08:13:01] [Rank 0] step:8541/10000 train_time:688994ms step_avg:80.67ms +[2025-07-09 08:13:03] [Rank 0] step:8561/10000 train_time:691129ms step_avg:80.73ms +[2025-07-09 08:13:03] [Rank 0] step:8561/10000 train_time:691129ms step_avg:80.73ms +[2025-07-09 08:13:05] [Rank 0] step:8581/10000 train_time:692615ms step_avg:80.71ms +[2025-07-09 08:13:05] [Rank 0] step:8581/10000 train_time:692615ms step_avg:80.71ms +[2025-07-09 08:13:06] [Rank 0] step:8601/10000 train_time:694103ms step_avg:80.70ms +[2025-07-09 08:13:06] [Rank 0] step:8601/10000 train_time:694103ms step_avg:80.70ms +[2025-07-09 08:13:08] [Rank 0] step:8621/10000 train_time:695592ms step_avg:80.69ms +[2025-07-09 08:13:08] [Rank 0] step:8621/10000 train_time:695592ms step_avg:80.69ms +[2025-07-09 08:13:10] [Rank 0] step:8641/10000 train_time:697080ms step_avg:80.67ms +[2025-07-09 08:13:10] [Rank 0] step:8641/10000 train_time:697080ms step_avg:80.67ms +[2025-07-09 08:13:12] [Rank 0] step:8661/10000 train_time:699233ms step_avg:80.73ms +[2025-07-09 08:13:12] [Rank 0] step:8661/10000 train_time:699233ms step_avg:80.73ms +[2025-07-09 08:13:13] [Rank 0] step:8681/10000 train_time:700722ms step_avg:80.72ms +[2025-07-09 08:13:13] [Rank 0] step:8681/10000 train_time:700722ms step_avg:80.72ms +[2025-07-09 08:13:15] [Rank 0] step:8701/10000 train_time:702212ms step_avg:80.70ms +[2025-07-09 08:13:15] [Rank 0] step:8701/10000 train_time:702212ms step_avg:80.70ms +[2025-07-09 08:13:16] [Rank 0] step:8721/10000 train_time:703703ms step_avg:80.69ms +[2025-07-09 08:13:16] [Rank 0] step:8721/10000 train_time:703703ms step_avg:80.69ms +[2025-07-09 08:13:18] [Rank 0] step:8741/10000 train_time:705849ms step_avg:80.75ms +[2025-07-09 08:13:18] [Rank 0] step:8741/10000 train_time:705849ms step_avg:80.75ms +[2025-07-09 08:13:20] [Rank 0] step:8761/10000 train_time:707338ms step_avg:80.74ms +[2025-07-09 08:13:20] [Rank 0] step:8761/10000 train_time:707338ms step_avg:80.74ms +[2025-07-09 08:13:21] [Rank 0] step:8781/10000 train_time:708829ms step_avg:80.72ms +[2025-07-09 08:13:21] [Rank 0] step:8781/10000 train_time:708829ms step_avg:80.72ms +[2025-07-09 08:13:23] [Rank 0] step:8801/10000 train_time:710322ms step_avg:80.71ms +[2025-07-09 08:13:23] [Rank 0] step:8801/10000 train_time:710322ms step_avg:80.71ms +[2025-07-09 08:13:25] [Rank 0] step:8821/10000 train_time:711815ms step_avg:80.70ms +[2025-07-09 08:13:25] [Rank 0] step:8821/10000 train_time:711815ms step_avg:80.70ms +[2025-07-09 08:13:26] [Rank 0] step:8841/10000 train_time:713958ms step_avg:80.76ms +[2025-07-09 08:13:26] [Rank 0] step:8841/10000 train_time:713958ms step_avg:80.76ms +[2025-07-09 08:13:28] [Rank 0] step:8861/10000 train_time:715450ms step_avg:80.74ms +[2025-07-09 08:13:28] [Rank 0] step:8861/10000 train_time:715450ms step_avg:80.74ms +[2025-07-09 08:13:29] [Rank 0] step:8881/10000 train_time:716944ms step_avg:80.73ms +[2025-07-09 08:13:29] [Rank 0] step:8881/10000 train_time:716944ms step_avg:80.73ms +[2025-07-09 08:13:31] [Rank 0] step:8901/10000 train_time:718437ms step_avg:80.71ms +[2025-07-09 08:13:31] [Rank 0] step:8901/10000 train_time:718437ms step_avg:80.71ms +[2025-07-09 08:13:32] [Rank 0] step:8921/10000 train_time:720169ms step_avg:80.73ms +[2025-07-09 08:13:32] [Rank 0] step:8921/10000 train_time:720169ms step_avg:80.73ms +[2025-07-09 08:13:34] [Rank 0] step:8941/10000 train_time:721661ms step_avg:80.71ms +[2025-07-09 08:13:34] [Rank 0] step:8941/10000 train_time:721661ms step_avg:80.71ms +[2025-07-09 08:13:35] [Rank 0] step:8961/10000 train_time:723155ms step_avg:80.70ms +[2025-07-09 08:13:35] [Rank 0] step:8961/10000 train_time:723155ms step_avg:80.70ms +[2025-07-09 08:13:37] [Rank 0] step:8981/10000 train_time:724649ms step_avg:80.69ms +[2025-07-09 08:13:37] [Rank 0] step:8981/10000 train_time:724649ms step_avg:80.69ms +[2025-07-09 08:13:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 08:13:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 08:13:39] [Rank 0] PRINT: step:9000/10000 train_loss:0.8595 val_loss:0.8616 train_time:726145ms step_avg:80.68ms +[2025-07-09 08:13:39] [Rank 0] PRINT: step:9000/10000 train_loss:0.8595 val_loss:0.8616 train_time:726145ms step_avg:80.68ms +[2025-07-09 08:13:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 08:13:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 08:13:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 08:13:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 08:13:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 08:13:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 08:19:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 08:19:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 08:19:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 08:19:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 08:19:00] [Rank 0] Total Loss: 5.4902 +[2025-07-09 08:19:00] [Rank 0] Total Loss: 5.4902 +[2025-07-09 08:19:00] [Rank 0] Total FTA: 0.9720 +[2025-07-09 08:19:00] [Rank 0] Total FTA: 0.9720 +[2025-07-09 08:19:00] [Rank 0] Group 0 Loss: 5.8588 +[2025-07-09 08:19:00] [Rank 0] Group 0 Loss: 5.8588 +[2025-07-09 08:19:00] [Rank 0] Group 1 Loss: 5.6979 +[2025-07-09 08:19:00] [Rank 0] Group 1 Loss: 5.6979 +[2025-07-09 08:19:00] [Rank 0] Group 2 Loss: 5.3121 +[2025-07-09 08:19:00] [Rank 0] Group 2 Loss: 5.3121 +[2025-07-09 08:19:00] [Rank 0] Group 3 Loss: 5.4947 +[2025-07-09 08:19:00] [Rank 0] Group 3 Loss: 5.4947 +[2025-07-09 08:19:00] [Rank 0] Group 4 Loss: 5.3870 +[2025-07-09 08:19:00] [Rank 0] Group 4 Loss: 5.3870 +[2025-07-09 08:19:00] [Rank 0] Group 5 Loss: 5.3264 +[2025-07-09 08:19:00] [Rank 0] Group 5 Loss: 5.3264 +[2025-07-09 08:19:00] [Rank 0] Group 6 Loss: 5.3231 +[2025-07-09 08:19:00] [Rank 0] Group 6 Loss: 5.3231 +[2025-07-09 08:19:00] [Rank 0] Group 7 Loss: 5.4614 +[2025-07-09 08:19:00] [Rank 0] Group 7 Loss: 5.4614 +[2025-07-09 08:19:00] [Rank 0] Group 8 Loss: 5.4409 +[2025-07-09 08:19:00] [Rank 0] Group 8 Loss: 5.4409 +[2025-07-09 08:19:00] [Rank 0] Group 9 Loss: 5.3314 +[2025-07-09 08:19:00] [Rank 0] Group 9 Loss: 5.3314 +[2025-07-09 08:19:00] [Rank 0] Group 10 Loss: 5.4347 +[2025-07-09 08:19:00] [Rank 0] Group 10 Loss: 5.4347 +[2025-07-09 08:19:00] [Rank 0] Group 11 Loss: 5.4602 +[2025-07-09 08:19:00] [Rank 0] Group 11 Loss: 5.4602 +[2025-07-09 08:19:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 08:19:00] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 08:19:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 08:19:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 08:19:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 08:19:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 08:19:00] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-09 08:19:00] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-09 08:19:00] [Rank 0] Group 4 FTA: 0.9818 +[2025-07-09 08:19:00] [Rank 0] Group 4 FTA: 0.9818 +[2025-07-09 08:19:00] [Rank 0] Group 5 FTA: 0.9818 +[2025-07-09 08:19:00] [Rank 0] Group 5 FTA: 0.9818 +[2025-07-09 08:19:00] [Rank 0] Group 6 FTA: 0.9844 +[2025-07-09 08:19:00] [Rank 0] Group 6 FTA: 0.9844 +[2025-07-09 08:19:00] [Rank 0] Group 7 FTA: 0.9583 +[2025-07-09 08:19:00] [Rank 0] Group 7 FTA: 0.9583 +[2025-07-09 08:19:00] [Rank 0] Group 8 FTA: 0.9505 +[2025-07-09 08:19:00] [Rank 0] Group 8 FTA: 0.9505 +[2025-07-09 08:19:00] [Rank 0] Group 9 FTA: 0.9453 +[2025-07-09 08:19:00] [Rank 0] Group 9 FTA: 0.9453 +[2025-07-09 08:19:00] [Rank 0] Group 10 FTA: 0.9512 +[2025-07-09 08:19:00] [Rank 0] Group 10 FTA: 0.9512 +[2025-07-09 08:19:00] [Rank 0] Group 11 FTA: 0.9375 +[2025-07-09 08:19:00] [Rank 0] Group 11 FTA: 0.9375 +[2025-07-09 08:19:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 08:19:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 08:19:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 08:19:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 08:19:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 08:19:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 08:19:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 08:19:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 08:19:02] [Rank 0] step:9001/10000 train_time:726277ms step_avg:80.69ms +[2025-07-09 08:19:02] [Rank 0] step:9001/10000 train_time:726277ms step_avg:80.69ms +[2025-07-09 08:19:04] [Rank 0] step:9021/10000 train_time:728376ms step_avg:80.74ms +[2025-07-09 08:19:04] [Rank 0] step:9021/10000 train_time:728376ms step_avg:80.74ms +[2025-07-09 08:19:05] [Rank 0] step:9041/10000 train_time:729861ms step_avg:80.73ms +[2025-07-09 08:19:05] [Rank 0] step:9041/10000 train_time:729861ms step_avg:80.73ms +[2025-07-09 08:19:06] [Rank 0] step:9061/10000 train_time:731347ms step_avg:80.71ms +[2025-07-09 08:19:06] [Rank 0] step:9061/10000 train_time:731347ms step_avg:80.71ms +[2025-07-09 08:19:08] [Rank 0] step:9081/10000 train_time:732834ms step_avg:80.70ms +[2025-07-09 08:19:08] [Rank 0] step:9081/10000 train_time:732834ms step_avg:80.70ms +[2025-07-09 08:19:10] [Rank 0] step:9101/10000 train_time:734703ms step_avg:80.73ms +[2025-07-09 08:19:10] [Rank 0] step:9101/10000 train_time:734703ms step_avg:80.73ms +[2025-07-09 08:19:11] [Rank 0] step:9121/10000 train_time:736278ms step_avg:80.72ms +[2025-07-09 08:19:11] [Rank 0] step:9121/10000 train_time:736278ms step_avg:80.72ms +[2025-07-09 08:19:13] [Rank 0] step:9141/10000 train_time:737793ms step_avg:80.71ms +[2025-07-09 08:19:13] [Rank 0] step:9141/10000 train_time:737793ms step_avg:80.71ms +[2025-07-09 08:19:14] [Rank 0] step:9161/10000 train_time:739283ms step_avg:80.70ms +[2025-07-09 08:19:14] [Rank 0] step:9161/10000 train_time:739283ms step_avg:80.70ms +[2025-07-09 08:19:16] [Rank 0] step:9181/10000 train_time:740771ms step_avg:80.69ms +[2025-07-09 08:19:16] [Rank 0] step:9181/10000 train_time:740771ms step_avg:80.69ms +[2025-07-09 08:19:18] [Rank 0] step:9201/10000 train_time:742500ms step_avg:80.70ms +[2025-07-09 08:19:18] [Rank 0] step:9201/10000 train_time:742500ms step_avg:80.70ms +[2025-07-09 08:19:19] [Rank 0] step:9221/10000 train_time:743990ms step_avg:80.68ms +[2025-07-09 08:19:19] [Rank 0] step:9221/10000 train_time:743990ms step_avg:80.68ms +[2025-07-09 08:19:21] [Rank 0] step:9241/10000 train_time:745481ms step_avg:80.67ms +[2025-07-09 08:19:21] [Rank 0] step:9241/10000 train_time:745481ms step_avg:80.67ms +[2025-07-09 08:19:22] [Rank 0] step:9261/10000 train_time:746974ms step_avg:80.66ms +[2025-07-09 08:19:22] [Rank 0] step:9261/10000 train_time:746974ms step_avg:80.66ms +[2025-07-09 08:19:24] [Rank 0] step:9281/10000 train_time:749105ms step_avg:80.71ms +[2025-07-09 08:19:24] [Rank 0] step:9281/10000 train_time:749105ms step_avg:80.71ms +[2025-07-09 08:19:26] [Rank 0] step:9301/10000 train_time:750597ms step_avg:80.70ms +[2025-07-09 08:19:26] [Rank 0] step:9301/10000 train_time:750597ms step_avg:80.70ms +[2025-07-09 08:19:27] [Rank 0] step:9321/10000 train_time:752089ms step_avg:80.69ms +[2025-07-09 08:19:27] [Rank 0] step:9321/10000 train_time:752089ms step_avg:80.69ms +[2025-07-09 08:19:29] [Rank 0] step:9341/10000 train_time:753582ms step_avg:80.67ms +[2025-07-09 08:19:29] [Rank 0] step:9341/10000 train_time:753582ms step_avg:80.67ms +[2025-07-09 08:19:31] [Rank 0] step:9361/10000 train_time:755075ms step_avg:80.66ms +[2025-07-09 08:19:31] [Rank 0] step:9361/10000 train_time:755075ms step_avg:80.66ms +[2025-07-09 08:19:32] [Rank 0] step:9381/10000 train_time:757215ms step_avg:80.72ms +[2025-07-09 08:19:32] [Rank 0] step:9381/10000 train_time:757215ms step_avg:80.72ms +[2025-07-09 08:19:34] [Rank 0] step:9401/10000 train_time:758707ms step_avg:80.70ms +[2025-07-09 08:19:34] [Rank 0] step:9401/10000 train_time:758707ms step_avg:80.70ms +[2025-07-09 08:19:35] [Rank 0] step:9421/10000 train_time:760200ms step_avg:80.69ms +[2025-07-09 08:19:35] [Rank 0] step:9421/10000 train_time:760200ms step_avg:80.69ms +[2025-07-09 08:19:37] [Rank 0] step:9441/10000 train_time:761694ms step_avg:80.68ms +[2025-07-09 08:19:37] [Rank 0] step:9441/10000 train_time:761694ms step_avg:80.68ms +[2025-07-09 08:19:38] [Rank 0] step:9461/10000 train_time:763322ms step_avg:80.68ms +[2025-07-09 08:19:38] [Rank 0] step:9461/10000 train_time:763322ms step_avg:80.68ms +[2025-07-09 08:19:40] [Rank 0] step:9481/10000 train_time:764814ms step_avg:80.67ms +[2025-07-09 08:19:40] [Rank 0] step:9481/10000 train_time:764814ms step_avg:80.67ms +[2025-07-09 08:19:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 08:19:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 08:19:42] [Rank 0] PRINT: step:9500/10000 train_loss:0.8587 val_loss:0.8614 train_time:766310ms step_avg:80.66ms +[2025-07-09 08:19:42] [Rank 0] PRINT: step:9500/10000 train_loss:0.8587 val_loss:0.8614 train_time:766310ms step_avg:80.66ms +[2025-07-09 08:19:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 08:19:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 08:19:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 08:19:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 08:19:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 08:19:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 08:25:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 08:25:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 08:25:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 08:25:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 08:25:05] [Rank 0] Total Loss: 5.5485 +[2025-07-09 08:25:05] [Rank 0] Total Loss: 5.5485 +[2025-07-09 08:25:05] [Rank 0] Total FTA: 0.9750 +[2025-07-09 08:25:05] [Rank 0] Total FTA: 0.9750 +[2025-07-09 08:25:05] [Rank 0] Group 0 Loss: 5.8714 +[2025-07-09 08:25:05] [Rank 0] Group 0 Loss: 5.8714 +[2025-07-09 08:25:05] [Rank 0] Group 1 Loss: 5.8523 +[2025-07-09 08:25:05] [Rank 0] Group 1 Loss: 5.8523 +[2025-07-09 08:25:05] [Rank 0] Group 2 Loss: 5.3702 +[2025-07-09 08:25:05] [Rank 0] Group 2 Loss: 5.3702 +[2025-07-09 08:25:05] [Rank 0] Group 3 Loss: 5.4929 +[2025-07-09 08:25:05] [Rank 0] Group 3 Loss: 5.4929 +[2025-07-09 08:25:05] [Rank 0] Group 4 Loss: 5.4569 +[2025-07-09 08:25:05] [Rank 0] Group 4 Loss: 5.4569 +[2025-07-09 08:25:05] [Rank 0] Group 5 Loss: 5.4648 +[2025-07-09 08:25:05] [Rank 0] Group 5 Loss: 5.4648 +[2025-07-09 08:25:05] [Rank 0] Group 6 Loss: 5.3619 +[2025-07-09 08:25:05] [Rank 0] Group 6 Loss: 5.3619 +[2025-07-09 08:25:05] [Rank 0] Group 7 Loss: 5.5354 +[2025-07-09 08:25:05] [Rank 0] Group 7 Loss: 5.5354 +[2025-07-09 08:25:05] [Rank 0] Group 8 Loss: 5.4899 +[2025-07-09 08:25:05] [Rank 0] Group 8 Loss: 5.4899 +[2025-07-09 08:25:05] [Rank 0] Group 9 Loss: 5.4346 +[2025-07-09 08:25:05] [Rank 0] Group 9 Loss: 5.4346 +[2025-07-09 08:25:05] [Rank 0] Group 10 Loss: 5.5151 +[2025-07-09 08:25:05] [Rank 0] Group 10 Loss: 5.5151 +[2025-07-09 08:25:05] [Rank 0] Group 11 Loss: 5.4876 +[2025-07-09 08:25:05] [Rank 0] Group 11 Loss: 5.4876 +[2025-07-09 08:25:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 08:25:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 08:25:05] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 08:25:05] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 08:25:05] [Rank 0] Group 2 FTA: 0.9297 +[2025-07-09 08:25:05] [Rank 0] Group 2 FTA: 0.9297 +[2025-07-09 08:25:05] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-09 08:25:05] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-09 08:25:05] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-09 08:25:05] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-09 08:25:05] [Rank 0] Group 5 FTA: 0.9948 +[2025-07-09 08:25:05] [Rank 0] Group 5 FTA: 0.9948 +[2025-07-09 08:25:05] [Rank 0] Group 6 FTA: 0.9818 +[2025-07-09 08:25:05] [Rank 0] Group 6 FTA: 0.9818 +[2025-07-09 08:25:05] [Rank 0] Group 7 FTA: 0.9609 +[2025-07-09 08:25:05] [Rank 0] Group 7 FTA: 0.9609 +[2025-07-09 08:25:05] [Rank 0] Group 8 FTA: 0.9453 +[2025-07-09 08:25:05] [Rank 0] Group 8 FTA: 0.9453 +[2025-07-09 08:25:05] [Rank 0] Group 9 FTA: 0.9609 +[2025-07-09 08:25:05] [Rank 0] Group 9 FTA: 0.9609 +[2025-07-09 08:25:05] [Rank 0] Group 10 FTA: 0.9629 +[2025-07-09 08:25:05] [Rank 0] Group 10 FTA: 0.9629 +[2025-07-09 08:25:05] [Rank 0] Group 11 FTA: 0.9697 +[2025-07-09 08:25:05] [Rank 0] Group 11 FTA: 0.9697 +[2025-07-09 08:25:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 08:25:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 08:25:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 08:25:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 08:25:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 08:25:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 08:25:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 08:25:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 08:25:07] [Rank 0] step:9501/10000 train_time:766332ms step_avg:80.66ms +[2025-07-09 08:25:07] [Rank 0] step:9501/10000 train_time:766332ms step_avg:80.66ms +[2025-07-09 08:25:08] [Rank 0] step:9521/10000 train_time:767816ms step_avg:80.64ms +[2025-07-09 08:25:08] [Rank 0] step:9521/10000 train_time:767816ms step_avg:80.64ms +[2025-07-09 08:25:10] [Rank 0] step:9541/10000 train_time:769559ms step_avg:80.66ms +[2025-07-09 08:25:10] [Rank 0] step:9541/10000 train_time:769559ms step_avg:80.66ms +[2025-07-09 08:25:12] [Rank 0] step:9561/10000 train_time:771437ms step_avg:80.69ms +[2025-07-09 08:25:12] [Rank 0] step:9561/10000 train_time:771437ms step_avg:80.69ms +[2025-07-09 08:25:13] [Rank 0] step:9581/10000 train_time:772923ms step_avg:80.67ms +[2025-07-09 08:25:13] [Rank 0] step:9581/10000 train_time:772923ms step_avg:80.67ms +[2025-07-09 08:25:15] [Rank 0] step:9601/10000 train_time:774411ms step_avg:80.66ms +[2025-07-09 08:25:15] [Rank 0] step:9601/10000 train_time:774411ms step_avg:80.66ms +[2025-07-09 08:25:16] [Rank 0] step:9621/10000 train_time:775899ms step_avg:80.65ms +[2025-07-09 08:25:16] [Rank 0] step:9621/10000 train_time:775899ms step_avg:80.65ms +[2025-07-09 08:25:18] [Rank 0] step:9641/10000 train_time:777627ms step_avg:80.66ms +[2025-07-09 08:25:18] [Rank 0] step:9641/10000 train_time:777627ms step_avg:80.66ms +[2025-07-09 08:25:19] [Rank 0] step:9661/10000 train_time:779115ms step_avg:80.65ms +[2025-07-09 08:25:19] [Rank 0] step:9661/10000 train_time:779115ms step_avg:80.65ms +[2025-07-09 08:25:21] [Rank 0] step:9681/10000 train_time:780605ms step_avg:80.63ms +[2025-07-09 08:25:21] [Rank 0] step:9681/10000 train_time:780605ms step_avg:80.63ms +[2025-07-09 08:25:22] [Rank 0] step:9701/10000 train_time:782095ms step_avg:80.62ms +[2025-07-09 08:25:22] [Rank 0] step:9701/10000 train_time:782095ms step_avg:80.62ms +[2025-07-09 08:25:25] [Rank 0] step:9721/10000 train_time:783586ms step_avg:80.61ms +[2025-07-09 08:25:25] [Rank 0] step:9721/10000 train_time:783586ms step_avg:80.61ms +[2025-07-09 08:25:26] [Rank 0] step:9741/10000 train_time:785742ms step_avg:80.66ms +[2025-07-09 08:25:26] [Rank 0] step:9741/10000 train_time:785742ms step_avg:80.66ms +[2025-07-09 08:25:28] [Rank 0] step:9761/10000 train_time:787229ms step_avg:80.65ms +[2025-07-09 08:25:28] [Rank 0] step:9761/10000 train_time:787229ms step_avg:80.65ms +[2025-07-09 08:25:29] [Rank 0] step:9781/10000 train_time:788886ms step_avg:80.65ms +[2025-07-09 08:25:29] [Rank 0] step:9781/10000 train_time:788886ms step_avg:80.65ms +[2025-07-09 08:25:31] [Rank 0] step:9801/10000 train_time:790377ms step_avg:80.64ms +[2025-07-09 08:25:31] [Rank 0] step:9801/10000 train_time:790377ms step_avg:80.64ms +[2025-07-09 08:25:33] [Rank 0] step:9821/10000 train_time:792612ms step_avg:80.71ms +[2025-07-09 08:25:33] [Rank 0] step:9821/10000 train_time:792612ms step_avg:80.71ms +[2025-07-09 08:25:34] [Rank 0] step:9841/10000 train_time:794103ms step_avg:80.69ms +[2025-07-09 08:25:34] [Rank 0] step:9841/10000 train_time:794103ms step_avg:80.69ms +[2025-07-09 08:25:36] [Rank 0] step:9861/10000 train_time:795596ms step_avg:80.68ms +[2025-07-09 08:25:36] [Rank 0] step:9861/10000 train_time:795596ms step_avg:80.68ms +[2025-07-09 08:25:37] [Rank 0] step:9881/10000 train_time:797090ms step_avg:80.67ms +[2025-07-09 08:25:37] [Rank 0] step:9881/10000 train_time:797090ms step_avg:80.67ms +[2025-07-09 08:25:40] [Rank 0] step:9901/10000 train_time:799247ms step_avg:80.72ms +[2025-07-09 08:25:40] [Rank 0] step:9901/10000 train_time:799247ms step_avg:80.72ms +[2025-07-09 08:25:41] [Rank 0] step:9921/10000 train_time:800720ms step_avg:80.71ms +[2025-07-09 08:25:41] [Rank 0] step:9921/10000 train_time:800720ms step_avg:80.71ms +[2025-07-09 08:25:43] [Rank 0] step:9941/10000 train_time:802213ms step_avg:80.70ms +[2025-07-09 08:25:43] [Rank 0] step:9941/10000 train_time:802213ms step_avg:80.70ms +[2025-07-09 08:25:44] [Rank 0] step:9961/10000 train_time:803705ms step_avg:80.69ms +[2025-07-09 08:25:44] [Rank 0] step:9961/10000 train_time:803705ms step_avg:80.69ms +[2025-07-09 08:25:46] [Rank 0] step:9981/10000 train_time:805199ms step_avg:80.67ms +[2025-07-09 08:25:46] [Rank 0] step:9981/10000 train_time:805199ms step_avg:80.67ms +[2025-07-09 08:25:48] [Rank 0] step:10000/10000 train_time:807256ms step_avg:80.73ms +[2025-07-09 08:25:48] [Rank 0] step:10000/10000 train_time:807256ms step_avg:80.73ms +[2025-07-09 08:25:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 08:25:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 08:25:49] [Rank 0] PRINT: step:10000/10000 train_loss:0.8578 val_loss:0.8612 train_time:807335ms step_avg:80.73ms +[2025-07-09 08:25:49] [Rank 0] PRINT: step:10000/10000 train_loss:0.8578 val_loss:0.8612 train_time:807335ms step_avg:80.73ms +[2025-07-09 08:25:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 08:25:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 08:25:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 08:25:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 08:25:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 08:25:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 08:31:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 08:31:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 08:31:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 08:31:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 08:31:11] [Rank 0] Total Loss: 5.5681 +[2025-07-09 08:31:11] [Rank 0] Total Loss: 5.5681 +[2025-07-09 08:31:11] [Rank 0] Total FTA: 0.9803 +[2025-07-09 08:31:11] [Rank 0] Total FTA: 0.9803 +[2025-07-09 08:31:11] [Rank 0] Group 0 Loss: 5.8842 +[2025-07-09 08:31:11] [Rank 0] Group 0 Loss: 5.8842 +[2025-07-09 08:31:11] [Rank 0] Group 1 Loss: 5.9198 +[2025-07-09 08:31:11] [Rank 0] Group 1 Loss: 5.9198 +[2025-07-09 08:31:11] [Rank 0] Group 2 Loss: 5.4404 +[2025-07-09 08:31:11] [Rank 0] Group 2 Loss: 5.4404 +[2025-07-09 08:31:11] [Rank 0] Group 3 Loss: 5.5469 +[2025-07-09 08:31:11] [Rank 0] Group 3 Loss: 5.5469 +[2025-07-09 08:31:11] [Rank 0] Group 4 Loss: 5.4701 +[2025-07-09 08:31:11] [Rank 0] Group 4 Loss: 5.4701 +[2025-07-09 08:31:11] [Rank 0] Group 5 Loss: 5.4206 +[2025-07-09 08:31:11] [Rank 0] Group 5 Loss: 5.4206 +[2025-07-09 08:31:11] [Rank 0] Group 6 Loss: 5.4012 +[2025-07-09 08:31:11] [Rank 0] Group 6 Loss: 5.4012 +[2025-07-09 08:31:11] [Rank 0] Group 7 Loss: 5.5370 +[2025-07-09 08:31:11] [Rank 0] Group 7 Loss: 5.5370 +[2025-07-09 08:31:11] [Rank 0] Group 8 Loss: 5.5108 +[2025-07-09 08:31:11] [Rank 0] Group 8 Loss: 5.5108 +[2025-07-09 08:31:11] [Rank 0] Group 9 Loss: 5.4583 +[2025-07-09 08:31:11] [Rank 0] Group 9 Loss: 5.4583 +[2025-07-09 08:31:11] [Rank 0] Group 10 Loss: 5.5326 +[2025-07-09 08:31:11] [Rank 0] Group 10 Loss: 5.5326 +[2025-07-09 08:31:11] [Rank 0] Group 11 Loss: 5.4879 +[2025-07-09 08:31:11] [Rank 0] Group 11 Loss: 5.4879 +[2025-07-09 08:31:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 08:31:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 08:31:11] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 08:31:11] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 08:31:11] [Rank 0] Group 2 FTA: 0.8958 +[2025-07-09 08:31:11] [Rank 0] Group 2 FTA: 0.8958 +[2025-07-09 08:31:11] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-09 08:31:11] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-09 08:31:11] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-09 08:31:11] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-09 08:31:11] [Rank 0] Group 5 FTA: 1.0000 +[2025-07-09 08:31:11] [Rank 0] Group 5 FTA: 1.0000 +[2025-07-09 08:31:11] [Rank 0] Group 6 FTA: 0.9974 +[2025-07-09 08:31:11] [Rank 0] Group 6 FTA: 0.9974 +[2025-07-09 08:31:11] [Rank 0] Group 7 FTA: 0.9740 +[2025-07-09 08:31:11] [Rank 0] Group 7 FTA: 0.9740 +[2025-07-09 08:31:11] [Rank 0] Group 8 FTA: 0.9661 +[2025-07-09 08:31:11] [Rank 0] Group 8 FTA: 0.9661 +[2025-07-09 08:31:11] [Rank 0] Group 9 FTA: 0.9805 +[2025-07-09 08:31:11] [Rank 0] Group 9 FTA: 0.9805 +[2025-07-09 08:31:11] [Rank 0] Group 10 FTA: 0.9648 +[2025-07-09 08:31:11] [Rank 0] Group 10 FTA: 0.9648 +[2025-07-09 08:31:11] [Rank 0] Group 11 FTA: 0.9766 +[2025-07-09 08:31:11] [Rank 0] Group 11 FTA: 0.9766 +[2025-07-09 08:31:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 08:31:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_loss_curves.png +[2025-07-09 08:31:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 08:31:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/per_class_acc_curves.png +[2025-07-09 08:31:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 08:31:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_loss_curve.png +[2025-07-09 08:31:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 08:31:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.002_seed_49/total_acc_curve.png +[2025-07-09 08:31:13] [Rank 0] step:10001/10000 train_time:807356ms step_avg:80.73ms +[2025-07-09 08:31:13] [Rank 0] step:10001/10000 train_time:807356ms step_avg:80.73ms +[2025-07-09 08:31:13] [Rank 0] PRINT: --- Training Finished: Wed Jul 9 08:31:13 2025 --- +[2025-07-09 08:31:13] [Rank 0] PRINT: --- Training Finished: Wed Jul 9 08:31:13 2025 --- +[2025-07-09 08:31:13] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9916 MiB +[2025-07-09 08:31:13] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 9916 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/config.json b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..a506710b68ec07420d7cb822af68de5f71d8a52f --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "f9130572-45ea-4450-af12-a55e2e4af04e", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..158d6e08080cea8615568f2e93a0c11c8c65c967 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd48fa19fe15ca886b5d506403ea2e70d4100f2efdaedb664c1aae81184312c4 +size 319313 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..45ebe935d2a346c148d81918fcf6135e27a6c4bc --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8a47594918e27c7e54448cd2600affbbc80218d71e31a948e8bbf994e65c63e +size 382589 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..f40dcbda2333f3cd9ceeef40d12cea9cc3b30ebb --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af7634b60e2b4c09a60a1c8e51a6e71e4024d3392a9780200b365c9bbebce504 +size 108375 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..e4684f273c269e2ea0655272dab78a2d9700c156 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c4c4a6c106fd1a4a2152fed8096149e334ed54544f9be2d95b7a47a6a99d4bb +size 110848 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/training_log_f9130572-45ea-4450-af12-a55e2e4af04e.txt b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/training_log_f9130572-45ea-4450-af12-a55e2e4af04e.txt new file mode 100644 index 0000000000000000000000000000000000000000..6077ac49a351aa6cdb31c0249048bdcb703ca942 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/training_log_f9130572-45ea-4450-af12-a55e2e4af04e.txt @@ -0,0 +1,5132 @@ +[2025-07-06 04:21:56] [Rank 0] PRINT: --- Script Start: Sun Jul 6 04:21:56 2025 --- +[2025-07-06 04:21:56] [Rank 0] PRINT: --- Script Start: Sun Jul 6 04:21:56 2025 --- +[2025-07-06 04:21:56] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-06 04:21:56] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-06 04:21:56] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 04:21:56] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 04:21:56] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-06 04:21:56] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-06 04:21:56] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42 +[2025-07-06 04:21:56] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42 +[2025-07-06 04:21:56] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 04:21:56] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 04:21:57] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 04:21:57] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 04:21:57] [Rank 0] PRINT: Constructing model... +[2025-07-06 04:21:57] [Rank 0] PRINT: Constructing model... +[2025-07-06 04:21:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 04:21:59] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 04:21:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 04:21:59] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 04:21:59] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 04:21:59] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 04:22:00] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 04:22:00] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 04:22:00] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 04:22:00] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 04:22:00] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 04:22:00] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 04:22:00] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 04:22:00] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 04:22:00] [Rank 0] PRINT: Model returns: +[2025-07-06 04:22:00] [Rank 0] PRINT: Model returns: +[2025-07-06 04:22:00] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 04:22:00] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 04:22:00] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 04:22:00] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 04:22:00] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 04:22:00] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 04:22:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 04:22:00] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 04:22:00] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 04:22:00] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 04:22:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 04:22:00] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 04:22:00] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 04:22:00] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 04:22:00] [Rank 0] PRINT: Starting warmup... +[2025-07-06 04:22:00] [Rank 0] PRINT: Starting warmup... +[2025-07-06 04:29:04] [Rank 0] PRINT: Warmup complete. +[2025-07-06 04:29:04] [Rank 0] PRINT: Warmup complete. +[2025-07-06 04:29:04] [Rank 0] PRINT: Starting training... +[2025-07-06 04:29:04] [Rank 0] PRINT: Starting training... +[2025-07-06 04:29:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 04:29:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 04:33:21] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 04:33:21] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 04:33:23] [Rank 0] step:21/10000 train_time:1753ms step_avg:83.46ms +[2025-07-06 04:33:23] [Rank 0] step:21/10000 train_time:1753ms step_avg:83.46ms +[2025-07-06 04:33:24] [Rank 0] step:41/10000 train_time:3211ms step_avg:78.32ms +[2025-07-06 04:33:24] [Rank 0] step:41/10000 train_time:3211ms step_avg:78.32ms +[2025-07-06 04:33:26] [Rank 0] step:61/10000 train_time:4673ms step_avg:76.61ms +[2025-07-06 04:33:26] [Rank 0] step:61/10000 train_time:4673ms step_avg:76.61ms +[2025-07-06 04:33:27] [Rank 0] step:81/10000 train_time:6137ms step_avg:75.77ms +[2025-07-06 04:33:27] [Rank 0] step:81/10000 train_time:6137ms step_avg:75.77ms +[2025-07-06 04:33:29] [Rank 0] step:101/10000 train_time:8261ms step_avg:81.80ms +[2025-07-06 04:33:29] [Rank 0] step:101/10000 train_time:8261ms step_avg:81.80ms +[2025-07-06 04:33:31] [Rank 0] step:121/10000 train_time:9723ms step_avg:80.36ms +[2025-07-06 04:33:31] [Rank 0] step:121/10000 train_time:9723ms step_avg:80.36ms +[2025-07-06 04:33:32] [Rank 0] step:141/10000 train_time:11188ms step_avg:79.35ms +[2025-07-06 04:33:32] [Rank 0] step:141/10000 train_time:11188ms step_avg:79.35ms +[2025-07-06 04:33:34] [Rank 0] step:161/10000 train_time:12652ms step_avg:78.58ms +[2025-07-06 04:33:34] [Rank 0] step:161/10000 train_time:12652ms step_avg:78.58ms +[2025-07-06 04:33:36] [Rank 0] step:181/10000 train_time:14117ms step_avg:78.00ms +[2025-07-06 04:33:36] [Rank 0] step:181/10000 train_time:14117ms step_avg:78.00ms +[2025-07-06 04:33:37] [Rank 0] step:201/10000 train_time:16228ms step_avg:80.73ms +[2025-07-06 04:33:37] [Rank 0] step:201/10000 train_time:16228ms step_avg:80.73ms +[2025-07-06 04:33:39] [Rank 0] step:221/10000 train_time:17692ms step_avg:80.05ms +[2025-07-06 04:33:39] [Rank 0] step:221/10000 train_time:17692ms step_avg:80.05ms +[2025-07-06 04:33:40] [Rank 0] step:241/10000 train_time:19159ms step_avg:79.50ms +[2025-07-06 04:33:40] [Rank 0] step:241/10000 train_time:19159ms step_avg:79.50ms +[2025-07-06 04:33:41] [Rank 0] step:261/10000 train_time:20625ms step_avg:79.02ms +[2025-07-06 04:33:41] [Rank 0] step:261/10000 train_time:20625ms step_avg:79.02ms +[2025-07-06 04:33:44] [Rank 0] step:281/10000 train_time:22754ms step_avg:80.97ms +[2025-07-06 04:33:44] [Rank 0] step:281/10000 train_time:22754ms step_avg:80.97ms +[2025-07-06 04:33:45] [Rank 0] step:301/10000 train_time:24219ms step_avg:80.46ms +[2025-07-06 04:33:45] [Rank 0] step:301/10000 train_time:24219ms step_avg:80.46ms +[2025-07-06 04:33:47] [Rank 0] step:321/10000 train_time:25685ms step_avg:80.01ms +[2025-07-06 04:33:47] [Rank 0] step:321/10000 train_time:25685ms step_avg:80.01ms +[2025-07-06 04:33:48] [Rank 0] step:341/10000 train_time:27153ms step_avg:79.63ms +[2025-07-06 04:33:48] [Rank 0] step:341/10000 train_time:27153ms step_avg:79.63ms +[2025-07-06 04:33:50] [Rank 0] step:361/10000 train_time:28876ms step_avg:79.99ms +[2025-07-06 04:33:50] [Rank 0] step:361/10000 train_time:28876ms step_avg:79.99ms +[2025-07-06 04:33:52] [Rank 0] step:381/10000 train_time:30752ms step_avg:80.72ms +[2025-07-06 04:33:52] [Rank 0] step:381/10000 train_time:30752ms step_avg:80.72ms +[2025-07-06 04:33:53] [Rank 0] step:401/10000 train_time:32217ms step_avg:80.34ms +[2025-07-06 04:33:53] [Rank 0] step:401/10000 train_time:32217ms step_avg:80.34ms +[2025-07-06 04:33:55] [Rank 0] step:421/10000 train_time:33681ms step_avg:80.00ms +[2025-07-06 04:33:55] [Rank 0] step:421/10000 train_time:33681ms step_avg:80.00ms +[2025-07-06 04:33:56] [Rank 0] step:441/10000 train_time:35263ms step_avg:79.96ms +[2025-07-06 04:33:56] [Rank 0] step:441/10000 train_time:35263ms step_avg:79.96ms +[2025-07-06 04:33:58] [Rank 0] step:461/10000 train_time:37505ms step_avg:81.35ms +[2025-07-06 04:33:58] [Rank 0] step:461/10000 train_time:37505ms step_avg:81.35ms +[2025-07-06 04:34:00] [Rank 0] step:481/10000 train_time:38968ms step_avg:81.01ms +[2025-07-06 04:34:00] [Rank 0] step:481/10000 train_time:38968ms step_avg:81.01ms +[2025-07-06 04:34:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 04:34:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 04:34:02] [Rank 0] PRINT: step:500/10000 train_loss:2.2950 val_loss:1.3675 train_time:40430ms step_avg:80.86ms +[2025-07-06 04:34:02] [Rank 0] PRINT: step:500/10000 train_loss:2.2950 val_loss:1.3675 train_time:40430ms step_avg:80.86ms +[2025-07-06 04:34:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 04:34:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 04:34:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 04:34:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 04:34:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 04:34:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 04:39:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 04:39:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 04:39:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 04:39:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 04:39:26] [Rank 0] Total Loss: 4.2642 +[2025-07-06 04:39:26] [Rank 0] Total Loss: 4.2642 +[2025-07-06 04:39:26] [Rank 0] Total FTA: 0.1092 +[2025-07-06 04:39:26] [Rank 0] Total FTA: 0.1092 +[2025-07-06 04:39:26] [Rank 0] Group 0 Loss: 4.4715 +[2025-07-06 04:39:26] [Rank 0] Group 0 Loss: 4.4715 +[2025-07-06 04:39:26] [Rank 0] Group 1 Loss: 4.0785 +[2025-07-06 04:39:26] [Rank 0] Group 1 Loss: 4.0785 +[2025-07-06 04:39:26] [Rank 0] Group 2 Loss: 4.0836 +[2025-07-06 04:39:26] [Rank 0] Group 2 Loss: 4.0836 +[2025-07-06 04:39:26] [Rank 0] Group 3 Loss: 4.3275 +[2025-07-06 04:39:26] [Rank 0] Group 3 Loss: 4.3275 +[2025-07-06 04:39:26] [Rank 0] Group 4 Loss: 4.2820 +[2025-07-06 04:39:26] [Rank 0] Group 4 Loss: 4.2820 +[2025-07-06 04:39:26] [Rank 0] Group 5 Loss: 4.1969 +[2025-07-06 04:39:26] [Rank 0] Group 5 Loss: 4.1969 +[2025-07-06 04:39:26] [Rank 0] Group 6 Loss: 4.1620 +[2025-07-06 04:39:26] [Rank 0] Group 6 Loss: 4.1620 +[2025-07-06 04:39:26] [Rank 0] Group 7 Loss: 4.2900 +[2025-07-06 04:39:26] [Rank 0] Group 7 Loss: 4.2900 +[2025-07-06 04:39:26] [Rank 0] Group 8 Loss: 4.2422 +[2025-07-06 04:39:26] [Rank 0] Group 8 Loss: 4.2422 +[2025-07-06 04:39:26] [Rank 0] Group 9 Loss: 4.2127 +[2025-07-06 04:39:26] [Rank 0] Group 9 Loss: 4.2127 +[2025-07-06 04:39:26] [Rank 0] Group 10 Loss: 4.2514 +[2025-07-06 04:39:26] [Rank 0] Group 10 Loss: 4.2514 +[2025-07-06 04:39:26] [Rank 0] Group 11 Loss: 4.2967 +[2025-07-06 04:39:26] [Rank 0] Group 11 Loss: 4.2967 +[2025-07-06 04:39:26] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-06 04:39:26] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-06 04:39:26] [Rank 0] Group 1 FTA: 0.1615 +[2025-07-06 04:39:26] [Rank 0] Group 1 FTA: 0.1615 +[2025-07-06 04:39:26] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-06 04:39:26] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-06 04:39:26] [Rank 0] Group 3 FTA: 0.0990 +[2025-07-06 04:39:26] [Rank 0] Group 3 FTA: 0.0990 +[2025-07-06 04:39:26] [Rank 0] Group 4 FTA: 0.0417 +[2025-07-06 04:39:26] [Rank 0] Group 4 FTA: 0.0417 +[2025-07-06 04:39:26] [Rank 0] Group 5 FTA: 0.1302 +[2025-07-06 04:39:26] [Rank 0] Group 5 FTA: 0.1302 +[2025-07-06 04:39:26] [Rank 0] Group 6 FTA: 0.1016 +[2025-07-06 04:39:26] [Rank 0] Group 6 FTA: 0.1016 +[2025-07-06 04:39:26] [Rank 0] Group 7 FTA: 0.1094 +[2025-07-06 04:39:26] [Rank 0] Group 7 FTA: 0.1094 +[2025-07-06 04:39:26] [Rank 0] Group 8 FTA: 0.1172 +[2025-07-06 04:39:26] [Rank 0] Group 8 FTA: 0.1172 +[2025-07-06 04:39:26] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-06 04:39:26] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-06 04:39:26] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-06 04:39:26] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-06 04:39:26] [Rank 0] Group 11 FTA: 0.0967 +[2025-07-06 04:39:26] [Rank 0] Group 11 FTA: 0.0967 +[2025-07-06 04:39:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 04:39:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 04:39:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 04:39:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 04:39:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 04:39:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 04:39:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 04:39:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 04:39:28] [Rank 0] step:501/10000 train_time:40452ms step_avg:80.74ms +[2025-07-06 04:39:28] [Rank 0] step:501/10000 train_time:40452ms step_avg:80.74ms +[2025-07-06 04:39:29] [Rank 0] step:521/10000 train_time:41925ms step_avg:80.47ms +[2025-07-06 04:39:29] [Rank 0] step:521/10000 train_time:41925ms step_avg:80.47ms +[2025-07-06 04:39:31] [Rank 0] step:541/10000 train_time:43440ms step_avg:80.30ms +[2025-07-06 04:39:31] [Rank 0] step:541/10000 train_time:43440ms step_avg:80.30ms +[2025-07-06 04:39:33] [Rank 0] step:561/10000 train_time:45514ms step_avg:81.13ms +[2025-07-06 04:39:33] [Rank 0] step:561/10000 train_time:45514ms step_avg:81.13ms +[2025-07-06 04:39:34] [Rank 0] step:581/10000 train_time:46971ms step_avg:80.84ms +[2025-07-06 04:39:34] [Rank 0] step:581/10000 train_time:46971ms step_avg:80.84ms +[2025-07-06 04:39:36] [Rank 0] step:601/10000 train_time:48429ms step_avg:80.58ms +[2025-07-06 04:39:36] [Rank 0] step:601/10000 train_time:48429ms step_avg:80.58ms +[2025-07-06 04:39:37] [Rank 0] step:621/10000 train_time:49889ms step_avg:80.34ms +[2025-07-06 04:39:37] [Rank 0] step:621/10000 train_time:49889ms step_avg:80.34ms +[2025-07-06 04:39:39] [Rank 0] step:641/10000 train_time:51588ms step_avg:80.48ms +[2025-07-06 04:39:39] [Rank 0] step:641/10000 train_time:51588ms step_avg:80.48ms +[2025-07-06 04:39:40] [Rank 0] step:661/10000 train_time:53047ms step_avg:80.25ms +[2025-07-06 04:39:40] [Rank 0] step:661/10000 train_time:53047ms step_avg:80.25ms +[2025-07-06 04:39:42] [Rank 0] step:681/10000 train_time:54506ms step_avg:80.04ms +[2025-07-06 04:39:42] [Rank 0] step:681/10000 train_time:54506ms step_avg:80.04ms +[2025-07-06 04:39:43] [Rank 0] step:701/10000 train_time:55968ms step_avg:79.84ms +[2025-07-06 04:39:43] [Rank 0] step:701/10000 train_time:55968ms step_avg:79.84ms +[2025-07-06 04:39:45] [Rank 0] step:721/10000 train_time:57688ms step_avg:80.01ms +[2025-07-06 04:39:45] [Rank 0] step:721/10000 train_time:57688ms step_avg:80.01ms +[2025-07-06 04:39:47] [Rank 0] step:741/10000 train_time:59535ms step_avg:80.34ms +[2025-07-06 04:39:47] [Rank 0] step:741/10000 train_time:59535ms step_avg:80.34ms +[2025-07-06 04:39:48] [Rank 0] step:761/10000 train_time:61005ms step_avg:80.16ms +[2025-07-06 04:39:48] [Rank 0] step:761/10000 train_time:61005ms step_avg:80.16ms +[2025-07-06 04:39:50] [Rank 0] step:781/10000 train_time:62476ms step_avg:80.00ms +[2025-07-06 04:39:50] [Rank 0] step:781/10000 train_time:62476ms step_avg:80.00ms +[2025-07-06 04:39:51] [Rank 0] step:801/10000 train_time:63949ms step_avg:79.84ms +[2025-07-06 04:39:51] [Rank 0] step:801/10000 train_time:63949ms step_avg:79.84ms +[2025-07-06 04:39:53] [Rank 0] step:821/10000 train_time:65656ms step_avg:79.97ms +[2025-07-06 04:39:53] [Rank 0] step:821/10000 train_time:65656ms step_avg:79.97ms +[2025-07-06 04:39:54] [Rank 0] step:841/10000 train_time:67130ms step_avg:79.82ms +[2025-07-06 04:39:54] [Rank 0] step:841/10000 train_time:67130ms step_avg:79.82ms +[2025-07-06 04:39:56] [Rank 0] step:861/10000 train_time:68601ms step_avg:79.68ms +[2025-07-06 04:39:56] [Rank 0] step:861/10000 train_time:68601ms step_avg:79.68ms +[2025-07-06 04:39:57] [Rank 0] step:881/10000 train_time:70075ms step_avg:79.54ms +[2025-07-06 04:39:57] [Rank 0] step:881/10000 train_time:70075ms step_avg:79.54ms +[2025-07-06 04:39:59] [Rank 0] step:901/10000 train_time:71551ms step_avg:79.41ms +[2025-07-06 04:39:59] [Rank 0] step:901/10000 train_time:71551ms step_avg:79.41ms +[2025-07-06 04:40:01] [Rank 0] step:921/10000 train_time:73693ms step_avg:80.01ms +[2025-07-06 04:40:01] [Rank 0] step:921/10000 train_time:73693ms step_avg:80.01ms +[2025-07-06 04:40:02] [Rank 0] step:941/10000 train_time:75165ms step_avg:79.88ms +[2025-07-06 04:40:02] [Rank 0] step:941/10000 train_time:75165ms step_avg:79.88ms +[2025-07-06 04:40:04] [Rank 0] step:961/10000 train_time:76638ms step_avg:79.75ms +[2025-07-06 04:40:04] [Rank 0] step:961/10000 train_time:76638ms step_avg:79.75ms +[2025-07-06 04:40:05] [Rank 0] step:981/10000 train_time:78113ms step_avg:79.63ms +[2025-07-06 04:40:05] [Rank 0] step:981/10000 train_time:78113ms step_avg:79.63ms +[2025-07-06 04:40:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 04:40:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 04:40:08] [Rank 0] PRINT: step:1000/10000 train_loss:1.2757 val_loss:1.2002 train_time:80245ms step_avg:80.25ms +[2025-07-06 04:40:08] [Rank 0] PRINT: step:1000/10000 train_loss:1.2757 val_loss:1.2002 train_time:80245ms step_avg:80.25ms +[2025-07-06 04:40:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 04:40:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 04:40:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 04:40:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 04:40:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 04:40:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 04:45:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 04:45:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 04:45:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 04:45:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 04:45:35] [Rank 0] Total Loss: 4.7667 +[2025-07-06 04:45:35] [Rank 0] Total Loss: 4.7667 +[2025-07-06 04:45:35] [Rank 0] Total FTA: 0.3092 +[2025-07-06 04:45:35] [Rank 0] Total FTA: 0.3092 +[2025-07-06 04:45:35] [Rank 0] Group 0 Loss: 5.1833 +[2025-07-06 04:45:35] [Rank 0] Group 0 Loss: 5.1833 +[2025-07-06 04:45:35] [Rank 0] Group 1 Loss: 4.7190 +[2025-07-06 04:45:35] [Rank 0] Group 1 Loss: 4.7190 +[2025-07-06 04:45:35] [Rank 0] Group 2 Loss: 4.6180 +[2025-07-06 04:45:35] [Rank 0] Group 2 Loss: 4.6180 +[2025-07-06 04:45:35] [Rank 0] Group 3 Loss: 4.7282 +[2025-07-06 04:45:35] [Rank 0] Group 3 Loss: 4.7282 +[2025-07-06 04:45:35] [Rank 0] Group 4 Loss: 4.6822 +[2025-07-06 04:45:35] [Rank 0] Group 4 Loss: 4.6822 +[2025-07-06 04:45:35] [Rank 0] Group 5 Loss: 4.6796 +[2025-07-06 04:45:35] [Rank 0] Group 5 Loss: 4.6796 +[2025-07-06 04:45:35] [Rank 0] Group 6 Loss: 4.6778 +[2025-07-06 04:45:35] [Rank 0] Group 6 Loss: 4.6778 +[2025-07-06 04:45:35] [Rank 0] Group 7 Loss: 4.7415 +[2025-07-06 04:45:35] [Rank 0] Group 7 Loss: 4.7415 +[2025-07-06 04:45:35] [Rank 0] Group 8 Loss: 4.6997 +[2025-07-06 04:45:35] [Rank 0] Group 8 Loss: 4.6997 +[2025-07-06 04:45:35] [Rank 0] Group 9 Loss: 4.6634 +[2025-07-06 04:45:35] [Rank 0] Group 9 Loss: 4.6634 +[2025-07-06 04:45:35] [Rank 0] Group 10 Loss: 4.7090 +[2025-07-06 04:45:35] [Rank 0] Group 10 Loss: 4.7090 +[2025-07-06 04:45:35] [Rank 0] Group 11 Loss: 4.7291 +[2025-07-06 04:45:35] [Rank 0] Group 11 Loss: 4.7291 +[2025-07-06 04:45:35] [Rank 0] Group 0 FTA: 0.3381 +[2025-07-06 04:45:35] [Rank 0] Group 0 FTA: 0.3381 +[2025-07-06 04:45:35] [Rank 0] Group 1 FTA: 0.3333 +[2025-07-06 04:45:35] [Rank 0] Group 1 FTA: 0.3333 +[2025-07-06 04:45:35] [Rank 0] Group 2 FTA: 0.4453 +[2025-07-06 04:45:35] [Rank 0] Group 2 FTA: 0.4453 +[2025-07-06 04:45:35] [Rank 0] Group 3 FTA: 0.2917 +[2025-07-06 04:45:35] [Rank 0] Group 3 FTA: 0.2917 +[2025-07-06 04:45:35] [Rank 0] Group 4 FTA: 0.2240 +[2025-07-06 04:45:35] [Rank 0] Group 4 FTA: 0.2240 +[2025-07-06 04:45:35] [Rank 0] Group 5 FTA: 0.3281 +[2025-07-06 04:45:35] [Rank 0] Group 5 FTA: 0.3281 +[2025-07-06 04:45:35] [Rank 0] Group 6 FTA: 0.3125 +[2025-07-06 04:45:35] [Rank 0] Group 6 FTA: 0.3125 +[2025-07-06 04:45:35] [Rank 0] Group 7 FTA: 0.2865 +[2025-07-06 04:45:35] [Rank 0] Group 7 FTA: 0.2865 +[2025-07-06 04:45:35] [Rank 0] Group 8 FTA: 0.2760 +[2025-07-06 04:45:35] [Rank 0] Group 8 FTA: 0.2760 +[2025-07-06 04:45:35] [Rank 0] Group 9 FTA: 0.3047 +[2025-07-06 04:45:35] [Rank 0] Group 9 FTA: 0.3047 +[2025-07-06 04:45:35] [Rank 0] Group 10 FTA: 0.2656 +[2025-07-06 04:45:35] [Rank 0] Group 10 FTA: 0.2656 +[2025-07-06 04:45:35] [Rank 0] Group 11 FTA: 0.3018 +[2025-07-06 04:45:35] [Rank 0] Group 11 FTA: 0.3018 +[2025-07-06 04:45:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 04:45:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 04:45:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 04:45:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 04:45:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 04:45:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 04:45:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 04:45:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 04:45:37] [Rank 0] step:1001/10000 train_time:80268ms step_avg:80.19ms +[2025-07-06 04:45:37] [Rank 0] step:1001/10000 train_time:80268ms step_avg:80.19ms +[2025-07-06 04:45:39] [Rank 0] step:1021/10000 train_time:81842ms step_avg:80.16ms +[2025-07-06 04:45:39] [Rank 0] step:1021/10000 train_time:81842ms step_avg:80.16ms +[2025-07-06 04:45:40] [Rank 0] step:1041/10000 train_time:83306ms step_avg:80.02ms +[2025-07-06 04:45:40] [Rank 0] step:1041/10000 train_time:83306ms step_avg:80.02ms +[2025-07-06 04:45:41] [Rank 0] step:1061/10000 train_time:84773ms step_avg:79.90ms +[2025-07-06 04:45:41] [Rank 0] step:1061/10000 train_time:84773ms step_avg:79.90ms +[2025-07-06 04:45:44] [Rank 0] step:1081/10000 train_time:86241ms step_avg:79.78ms +[2025-07-06 04:45:44] [Rank 0] step:1081/10000 train_time:86241ms step_avg:79.78ms +[2025-07-06 04:45:45] [Rank 0] step:1101/10000 train_time:88364ms step_avg:80.26ms +[2025-07-06 04:45:45] [Rank 0] step:1101/10000 train_time:88364ms step_avg:80.26ms +[2025-07-06 04:45:46] [Rank 0] step:1121/10000 train_time:89831ms step_avg:80.13ms +[2025-07-06 04:45:46] [Rank 0] step:1121/10000 train_time:89831ms step_avg:80.13ms +[2025-07-06 04:45:48] [Rank 0] step:1141/10000 train_time:91299ms step_avg:80.02ms +[2025-07-06 04:45:48] [Rank 0] step:1141/10000 train_time:91299ms step_avg:80.02ms +[2025-07-06 04:45:49] [Rank 0] step:1161/10000 train_time:92769ms step_avg:79.90ms +[2025-07-06 04:45:49] [Rank 0] step:1161/10000 train_time:92769ms step_avg:79.90ms +[2025-07-06 04:45:52] [Rank 0] step:1181/10000 train_time:94893ms step_avg:80.35ms +[2025-07-06 04:45:52] [Rank 0] step:1181/10000 train_time:94893ms step_avg:80.35ms +[2025-07-06 04:45:53] [Rank 0] step:1201/10000 train_time:96362ms step_avg:80.23ms +[2025-07-06 04:45:53] [Rank 0] step:1201/10000 train_time:96362ms step_avg:80.23ms +[2025-07-06 04:45:54] [Rank 0] step:1221/10000 train_time:97833ms step_avg:80.13ms +[2025-07-06 04:45:54] [Rank 0] step:1221/10000 train_time:97833ms step_avg:80.13ms +[2025-07-06 04:45:56] [Rank 0] step:1241/10000 train_time:99304ms step_avg:80.02ms +[2025-07-06 04:45:56] [Rank 0] step:1241/10000 train_time:99304ms step_avg:80.02ms +[2025-07-06 04:45:58] [Rank 0] step:1261/10000 train_time:100776ms step_avg:79.92ms +[2025-07-06 04:45:58] [Rank 0] step:1261/10000 train_time:100776ms step_avg:79.92ms +[2025-07-06 04:46:00] [Rank 0] step:1281/10000 train_time:102895ms step_avg:80.32ms +[2025-07-06 04:46:00] [Rank 0] step:1281/10000 train_time:102895ms step_avg:80.32ms +[2025-07-06 04:46:01] [Rank 0] step:1301/10000 train_time:104364ms step_avg:80.22ms +[2025-07-06 04:46:01] [Rank 0] step:1301/10000 train_time:104364ms step_avg:80.22ms +[2025-07-06 04:46:02] [Rank 0] step:1321/10000 train_time:105835ms step_avg:80.12ms +[2025-07-06 04:46:02] [Rank 0] step:1321/10000 train_time:105835ms step_avg:80.12ms +[2025-07-06 04:46:04] [Rank 0] step:1341/10000 train_time:107308ms step_avg:80.02ms +[2025-07-06 04:46:04] [Rank 0] step:1341/10000 train_time:107308ms step_avg:80.02ms +[2025-07-06 04:46:06] [Rank 0] step:1361/10000 train_time:109419ms step_avg:80.40ms +[2025-07-06 04:46:06] [Rank 0] step:1361/10000 train_time:109419ms step_avg:80.40ms +[2025-07-06 04:46:08] [Rank 0] step:1381/10000 train_time:110990ms step_avg:80.37ms +[2025-07-06 04:46:08] [Rank 0] step:1381/10000 train_time:110990ms step_avg:80.37ms +[2025-07-06 04:46:09] [Rank 0] step:1401/10000 train_time:112461ms step_avg:80.27ms +[2025-07-06 04:46:09] [Rank 0] step:1401/10000 train_time:112461ms step_avg:80.27ms +[2025-07-06 04:46:11] [Rank 0] step:1421/10000 train_time:114034ms step_avg:80.25ms +[2025-07-06 04:46:11] [Rank 0] step:1421/10000 train_time:114034ms step_avg:80.25ms +[2025-07-06 04:46:12] [Rank 0] step:1441/10000 train_time:115508ms step_avg:80.16ms +[2025-07-06 04:46:12] [Rank 0] step:1441/10000 train_time:115508ms step_avg:80.16ms +[2025-07-06 04:46:14] [Rank 0] step:1461/10000 train_time:117217ms step_avg:80.23ms +[2025-07-06 04:46:14] [Rank 0] step:1461/10000 train_time:117217ms step_avg:80.23ms +[2025-07-06 04:46:15] [Rank 0] step:1481/10000 train_time:118691ms step_avg:80.14ms +[2025-07-06 04:46:15] [Rank 0] step:1481/10000 train_time:118691ms step_avg:80.14ms +[2025-07-06 04:46:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 04:46:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 04:46:18] [Rank 0] PRINT: step:1500/10000 train_loss:1.0835 val_loss:1.0171 train_time:120166ms step_avg:80.11ms +[2025-07-06 04:46:18] [Rank 0] PRINT: step:1500/10000 train_loss:1.0835 val_loss:1.0171 train_time:120166ms step_avg:80.11ms +[2025-07-06 04:46:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 04:46:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 04:46:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 04:46:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 04:46:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 04:46:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 04:51:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 04:51:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 04:51:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 04:51:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 04:51:42] [Rank 0] Total Loss: 5.1168 +[2025-07-06 04:51:42] [Rank 0] Total Loss: 5.1168 +[2025-07-06 04:51:42] [Rank 0] Total FTA: 0.6586 +[2025-07-06 04:51:42] [Rank 0] Total FTA: 0.6586 +[2025-07-06 04:51:42] [Rank 0] Group 0 Loss: 5.4488 +[2025-07-06 04:51:42] [Rank 0] Group 0 Loss: 5.4488 +[2025-07-06 04:51:42] [Rank 0] Group 1 Loss: 4.9687 +[2025-07-06 04:51:42] [Rank 0] Group 1 Loss: 4.9687 +[2025-07-06 04:51:42] [Rank 0] Group 2 Loss: 4.9595 +[2025-07-06 04:51:42] [Rank 0] Group 2 Loss: 4.9595 +[2025-07-06 04:51:42] [Rank 0] Group 3 Loss: 5.2047 +[2025-07-06 04:51:42] [Rank 0] Group 3 Loss: 5.2047 +[2025-07-06 04:51:42] [Rank 0] Group 4 Loss: 5.0600 +[2025-07-06 04:51:42] [Rank 0] Group 4 Loss: 5.0600 +[2025-07-06 04:51:42] [Rank 0] Group 5 Loss: 5.0858 +[2025-07-06 04:51:42] [Rank 0] Group 5 Loss: 5.0858 +[2025-07-06 04:51:42] [Rank 0] Group 6 Loss: 4.9858 +[2025-07-06 04:51:42] [Rank 0] Group 6 Loss: 4.9858 +[2025-07-06 04:51:42] [Rank 0] Group 7 Loss: 5.1018 +[2025-07-06 04:51:42] [Rank 0] Group 7 Loss: 5.1018 +[2025-07-06 04:51:42] [Rank 0] Group 8 Loss: 5.0667 +[2025-07-06 04:51:42] [Rank 0] Group 8 Loss: 5.0667 +[2025-07-06 04:51:42] [Rank 0] Group 9 Loss: 5.0503 +[2025-07-06 04:51:42] [Rank 0] Group 9 Loss: 5.0503 +[2025-07-06 04:51:42] [Rank 0] Group 10 Loss: 5.0783 +[2025-07-06 04:51:42] [Rank 0] Group 10 Loss: 5.0783 +[2025-07-06 04:51:42] [Rank 0] Group 11 Loss: 5.0912 +[2025-07-06 04:51:42] [Rank 0] Group 11 Loss: 5.0912 +[2025-07-06 04:51:42] [Rank 0] Group 0 FTA: 0.6710 +[2025-07-06 04:51:42] [Rank 0] Group 0 FTA: 0.6710 +[2025-07-06 04:51:42] [Rank 0] Group 1 FTA: 0.4870 +[2025-07-06 04:51:42] [Rank 0] Group 1 FTA: 0.4870 +[2025-07-06 04:51:42] [Rank 0] Group 2 FTA: 0.7578 +[2025-07-06 04:51:42] [Rank 0] Group 2 FTA: 0.7578 +[2025-07-06 04:51:42] [Rank 0] Group 3 FTA: 0.6979 +[2025-07-06 04:51:42] [Rank 0] Group 3 FTA: 0.6979 +[2025-07-06 04:51:42] [Rank 0] Group 4 FTA: 0.7344 +[2025-07-06 04:51:42] [Rank 0] Group 4 FTA: 0.7344 +[2025-07-06 04:51:42] [Rank 0] Group 5 FTA: 0.5964 +[2025-07-06 04:51:42] [Rank 0] Group 5 FTA: 0.5964 +[2025-07-06 04:51:42] [Rank 0] Group 6 FTA: 0.6406 +[2025-07-06 04:51:42] [Rank 0] Group 6 FTA: 0.6406 +[2025-07-06 04:51:42] [Rank 0] Group 7 FTA: 0.6198 +[2025-07-06 04:51:42] [Rank 0] Group 7 FTA: 0.6198 +[2025-07-06 04:51:42] [Rank 0] Group 8 FTA: 0.6536 +[2025-07-06 04:51:42] [Rank 0] Group 8 FTA: 0.6536 +[2025-07-06 04:51:42] [Rank 0] Group 9 FTA: 0.6641 +[2025-07-06 04:51:42] [Rank 0] Group 9 FTA: 0.6641 +[2025-07-06 04:51:42] [Rank 0] Group 10 FTA: 0.6758 +[2025-07-06 04:51:42] [Rank 0] Group 10 FTA: 0.6758 +[2025-07-06 04:51:42] [Rank 0] Group 11 FTA: 0.6699 +[2025-07-06 04:51:42] [Rank 0] Group 11 FTA: 0.6699 +[2025-07-06 04:51:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 04:51:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 04:51:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 04:51:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 04:51:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 04:51:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 04:51:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 04:51:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 04:51:44] [Rank 0] step:1501/10000 train_time:120188ms step_avg:80.07ms +[2025-07-06 04:51:44] [Rank 0] step:1501/10000 train_time:120188ms step_avg:80.07ms +[2025-07-06 04:51:45] [Rank 0] step:1521/10000 train_time:121669ms step_avg:79.99ms +[2025-07-06 04:51:45] [Rank 0] step:1521/10000 train_time:121669ms step_avg:79.99ms +[2025-07-06 04:51:47] [Rank 0] step:1541/10000 train_time:123788ms step_avg:80.33ms +[2025-07-06 04:51:47] [Rank 0] step:1541/10000 train_time:123788ms step_avg:80.33ms +[2025-07-06 04:51:49] [Rank 0] step:1561/10000 train_time:125254ms step_avg:80.24ms +[2025-07-06 04:51:49] [Rank 0] step:1561/10000 train_time:125254ms step_avg:80.24ms +[2025-07-06 04:51:50] [Rank 0] step:1581/10000 train_time:126720ms step_avg:80.15ms +[2025-07-06 04:51:50] [Rank 0] step:1581/10000 train_time:126720ms step_avg:80.15ms +[2025-07-06 04:51:52] [Rank 0] step:1601/10000 train_time:128472ms step_avg:80.24ms +[2025-07-06 04:51:52] [Rank 0] step:1601/10000 train_time:128472ms step_avg:80.24ms +[2025-07-06 04:51:54] [Rank 0] step:1621/10000 train_time:130196ms step_avg:80.32ms +[2025-07-06 04:51:54] [Rank 0] step:1621/10000 train_time:130196ms step_avg:80.32ms +[2025-07-06 04:51:56] [Rank 0] step:1641/10000 train_time:132076ms step_avg:80.49ms +[2025-07-06 04:51:56] [Rank 0] step:1641/10000 train_time:132076ms step_avg:80.49ms +[2025-07-06 04:51:57] [Rank 0] step:1661/10000 train_time:133544ms step_avg:80.40ms +[2025-07-06 04:51:57] [Rank 0] step:1661/10000 train_time:133544ms step_avg:80.40ms +[2025-07-06 04:51:59] [Rank 0] step:1681/10000 train_time:135013ms step_avg:80.32ms +[2025-07-06 04:51:59] [Rank 0] step:1681/10000 train_time:135013ms step_avg:80.32ms +[2025-07-06 04:52:00] [Rank 0] step:1701/10000 train_time:136484ms step_avg:80.24ms +[2025-07-06 04:52:00] [Rank 0] step:1701/10000 train_time:136484ms step_avg:80.24ms +[2025-07-06 04:52:02] [Rank 0] step:1721/10000 train_time:138605ms step_avg:80.54ms +[2025-07-06 04:52:02] [Rank 0] step:1721/10000 train_time:138605ms step_avg:80.54ms +[2025-07-06 04:52:04] [Rank 0] step:1741/10000 train_time:140073ms step_avg:80.46ms +[2025-07-06 04:52:04] [Rank 0] step:1741/10000 train_time:140073ms step_avg:80.46ms +[2025-07-06 04:52:05] [Rank 0] step:1761/10000 train_time:141542ms step_avg:80.38ms +[2025-07-06 04:52:05] [Rank 0] step:1761/10000 train_time:141542ms step_avg:80.38ms +[2025-07-06 04:52:07] [Rank 0] step:1781/10000 train_time:143014ms step_avg:80.30ms +[2025-07-06 04:52:07] [Rank 0] step:1781/10000 train_time:143014ms step_avg:80.30ms +[2025-07-06 04:52:09] [Rank 0] step:1801/10000 train_time:144535ms step_avg:80.25ms +[2025-07-06 04:52:09] [Rank 0] step:1801/10000 train_time:144535ms step_avg:80.25ms +[2025-07-06 04:52:10] [Rank 0] step:1821/10000 train_time:146706ms step_avg:80.56ms +[2025-07-06 04:52:10] [Rank 0] step:1821/10000 train_time:146706ms step_avg:80.56ms +[2025-07-06 04:52:12] [Rank 0] step:1841/10000 train_time:148175ms step_avg:80.49ms +[2025-07-06 04:52:12] [Rank 0] step:1841/10000 train_time:148175ms step_avg:80.49ms +[2025-07-06 04:52:13] [Rank 0] step:1861/10000 train_time:149647ms step_avg:80.41ms +[2025-07-06 04:52:13] [Rank 0] step:1861/10000 train_time:149647ms step_avg:80.41ms +[2025-07-06 04:52:15] [Rank 0] step:1881/10000 train_time:151115ms step_avg:80.34ms +[2025-07-06 04:52:15] [Rank 0] step:1881/10000 train_time:151115ms step_avg:80.34ms +[2025-07-06 04:52:17] [Rank 0] step:1901/10000 train_time:153230ms step_avg:80.60ms +[2025-07-06 04:52:17] [Rank 0] step:1901/10000 train_time:153230ms step_avg:80.60ms +[2025-07-06 04:52:18] [Rank 0] step:1921/10000 train_time:154699ms step_avg:80.53ms +[2025-07-06 04:52:18] [Rank 0] step:1921/10000 train_time:154699ms step_avg:80.53ms +[2025-07-06 04:52:20] [Rank 0] step:1941/10000 train_time:156170ms step_avg:80.46ms +[2025-07-06 04:52:20] [Rank 0] step:1941/10000 train_time:156170ms step_avg:80.46ms +[2025-07-06 04:52:21] [Rank 0] step:1961/10000 train_time:157644ms step_avg:80.39ms +[2025-07-06 04:52:21] [Rank 0] step:1961/10000 train_time:157644ms step_avg:80.39ms +[2025-07-06 04:52:23] [Rank 0] step:1981/10000 train_time:159786ms step_avg:80.66ms +[2025-07-06 04:52:23] [Rank 0] step:1981/10000 train_time:159786ms step_avg:80.66ms +[2025-07-06 04:52:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 04:52:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 04:52:26] [Rank 0] PRINT: step:2000/10000 train_loss:0.9146 val_loss:0.9593 train_time:161238ms step_avg:80.62ms +[2025-07-06 04:52:26] [Rank 0] PRINT: step:2000/10000 train_loss:0.9146 val_loss:0.9593 train_time:161238ms step_avg:80.62ms +[2025-07-06 04:52:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 04:52:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 04:52:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 04:52:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 04:52:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 04:52:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 04:57:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 04:57:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 04:57:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 04:57:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 04:57:50] [Rank 0] Total Loss: 5.2406 +[2025-07-06 04:57:50] [Rank 0] Total Loss: 5.2406 +[2025-07-06 04:57:50] [Rank 0] Total FTA: 0.8042 +[2025-07-06 04:57:50] [Rank 0] Total FTA: 0.8042 +[2025-07-06 04:57:50] [Rank 0] Group 0 Loss: 5.6146 +[2025-07-06 04:57:50] [Rank 0] Group 0 Loss: 5.6146 +[2025-07-06 04:57:50] [Rank 0] Group 1 Loss: 5.0761 +[2025-07-06 04:57:50] [Rank 0] Group 1 Loss: 5.0761 +[2025-07-06 04:57:50] [Rank 0] Group 2 Loss: 5.0556 +[2025-07-06 04:57:50] [Rank 0] Group 2 Loss: 5.0556 +[2025-07-06 04:57:50] [Rank 0] Group 3 Loss: 5.3822 +[2025-07-06 04:57:50] [Rank 0] Group 3 Loss: 5.3822 +[2025-07-06 04:57:50] [Rank 0] Group 4 Loss: 5.1292 +[2025-07-06 04:57:50] [Rank 0] Group 4 Loss: 5.1292 +[2025-07-06 04:57:50] [Rank 0] Group 5 Loss: 5.0986 +[2025-07-06 04:57:50] [Rank 0] Group 5 Loss: 5.0986 +[2025-07-06 04:57:50] [Rank 0] Group 6 Loss: 5.1221 +[2025-07-06 04:57:50] [Rank 0] Group 6 Loss: 5.1221 +[2025-07-06 04:57:50] [Rank 0] Group 7 Loss: 5.2707 +[2025-07-06 04:57:50] [Rank 0] Group 7 Loss: 5.2707 +[2025-07-06 04:57:50] [Rank 0] Group 8 Loss: 5.2065 +[2025-07-06 04:57:50] [Rank 0] Group 8 Loss: 5.2065 +[2025-07-06 04:57:50] [Rank 0] Group 9 Loss: 5.2054 +[2025-07-06 04:57:50] [Rank 0] Group 9 Loss: 5.2054 +[2025-07-06 04:57:50] [Rank 0] Group 10 Loss: 5.2210 +[2025-07-06 04:57:50] [Rank 0] Group 10 Loss: 5.2210 +[2025-07-06 04:57:50] [Rank 0] Group 11 Loss: 5.1975 +[2025-07-06 04:57:50] [Rank 0] Group 11 Loss: 5.1975 +[2025-07-06 04:57:50] [Rank 0] Group 0 FTA: 0.8466 +[2025-07-06 04:57:50] [Rank 0] Group 0 FTA: 0.8466 +[2025-07-06 04:57:50] [Rank 0] Group 1 FTA: 0.6823 +[2025-07-06 04:57:50] [Rank 0] Group 1 FTA: 0.6823 +[2025-07-06 04:57:50] [Rank 0] Group 2 FTA: 0.8490 +[2025-07-06 04:57:50] [Rank 0] Group 2 FTA: 0.8490 +[2025-07-06 04:57:51] [Rank 0] Group 3 FTA: 0.7682 +[2025-07-06 04:57:51] [Rank 0] Group 3 FTA: 0.7682 +[2025-07-06 04:57:51] [Rank 0] Group 4 FTA: 0.8490 +[2025-07-06 04:57:51] [Rank 0] Group 4 FTA: 0.8490 +[2025-07-06 04:57:51] [Rank 0] Group 5 FTA: 0.7786 +[2025-07-06 04:57:51] [Rank 0] Group 5 FTA: 0.7786 +[2025-07-06 04:57:51] [Rank 0] Group 6 FTA: 0.8281 +[2025-07-06 04:57:51] [Rank 0] Group 6 FTA: 0.8281 +[2025-07-06 04:57:51] [Rank 0] Group 7 FTA: 0.7734 +[2025-07-06 04:57:51] [Rank 0] Group 7 FTA: 0.7734 +[2025-07-06 04:57:51] [Rank 0] Group 8 FTA: 0.8307 +[2025-07-06 04:57:51] [Rank 0] Group 8 FTA: 0.8307 +[2025-07-06 04:57:51] [Rank 0] Group 9 FTA: 0.8047 +[2025-07-06 04:57:51] [Rank 0] Group 9 FTA: 0.8047 +[2025-07-06 04:57:51] [Rank 0] Group 10 FTA: 0.7656 +[2025-07-06 04:57:51] [Rank 0] Group 10 FTA: 0.7656 +[2025-07-06 04:57:51] [Rank 0] Group 11 FTA: 0.8193 +[2025-07-06 04:57:51] [Rank 0] Group 11 FTA: 0.8193 +[2025-07-06 04:57:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 04:57:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 04:57:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 04:57:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 04:57:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 04:57:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 04:57:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 04:57:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 04:57:52] [Rank 0] step:2001/10000 train_time:161262ms step_avg:80.59ms +[2025-07-06 04:57:52] [Rank 0] step:2001/10000 train_time:161262ms step_avg:80.59ms +[2025-07-06 04:57:54] [Rank 0] step:2021/10000 train_time:162737ms step_avg:80.52ms +[2025-07-06 04:57:54] [Rank 0] step:2021/10000 train_time:162737ms step_avg:80.52ms +[2025-07-06 04:57:55] [Rank 0] step:2041/10000 train_time:164205ms step_avg:80.45ms +[2025-07-06 04:57:55] [Rank 0] step:2041/10000 train_time:164205ms step_avg:80.45ms +[2025-07-06 04:57:57] [Rank 0] step:2061/10000 train_time:165673ms step_avg:80.38ms +[2025-07-06 04:57:57] [Rank 0] step:2061/10000 train_time:165673ms step_avg:80.38ms +[2025-07-06 04:57:58] [Rank 0] step:2081/10000 train_time:167379ms step_avg:80.43ms +[2025-07-06 04:57:58] [Rank 0] step:2081/10000 train_time:167379ms step_avg:80.43ms +[2025-07-06 04:58:00] [Rank 0] step:2101/10000 train_time:168949ms step_avg:80.41ms +[2025-07-06 04:58:00] [Rank 0] step:2101/10000 train_time:168949ms step_avg:80.41ms +[2025-07-06 04:58:01] [Rank 0] step:2121/10000 train_time:170416ms step_avg:80.35ms +[2025-07-06 04:58:01] [Rank 0] step:2121/10000 train_time:170416ms step_avg:80.35ms +[2025-07-06 04:58:03] [Rank 0] step:2141/10000 train_time:171884ms step_avg:80.28ms +[2025-07-06 04:58:03] [Rank 0] step:2141/10000 train_time:171884ms step_avg:80.28ms +[2025-07-06 04:58:05] [Rank 0] step:2161/10000 train_time:173405ms step_avg:80.24ms +[2025-07-06 04:58:05] [Rank 0] step:2161/10000 train_time:173405ms step_avg:80.24ms +[2025-07-06 04:58:07] [Rank 0] step:2181/10000 train_time:175465ms step_avg:80.45ms +[2025-07-06 04:58:07] [Rank 0] step:2181/10000 train_time:175465ms step_avg:80.45ms +[2025-07-06 04:58:08] [Rank 0] step:2201/10000 train_time:176933ms step_avg:80.39ms +[2025-07-06 04:58:08] [Rank 0] step:2201/10000 train_time:176933ms step_avg:80.39ms +[2025-07-06 04:58:10] [Rank 0] step:2221/10000 train_time:178502ms step_avg:80.37ms +[2025-07-06 04:58:10] [Rank 0] step:2221/10000 train_time:178502ms step_avg:80.37ms +[2025-07-06 04:58:11] [Rank 0] step:2241/10000 train_time:180170ms step_avg:80.40ms +[2025-07-06 04:58:11] [Rank 0] step:2241/10000 train_time:180170ms step_avg:80.40ms +[2025-07-06 04:58:13] [Rank 0] step:2261/10000 train_time:181982ms step_avg:80.49ms +[2025-07-06 04:58:13] [Rank 0] step:2261/10000 train_time:181982ms step_avg:80.49ms +[2025-07-06 04:58:15] [Rank 0] step:2281/10000 train_time:183475ms step_avg:80.44ms +[2025-07-06 04:58:15] [Rank 0] step:2281/10000 train_time:183475ms step_avg:80.44ms +[2025-07-06 04:58:16] [Rank 0] step:2301/10000 train_time:184970ms step_avg:80.39ms +[2025-07-06 04:58:16] [Rank 0] step:2301/10000 train_time:184970ms step_avg:80.39ms +[2025-07-06 04:58:18] [Rank 0] step:2321/10000 train_time:186466ms step_avg:80.34ms +[2025-07-06 04:58:18] [Rank 0] step:2321/10000 train_time:186466ms step_avg:80.34ms +[2025-07-06 04:58:20] [Rank 0] step:2341/10000 train_time:188012ms step_avg:80.31ms +[2025-07-06 04:58:20] [Rank 0] step:2341/10000 train_time:188012ms step_avg:80.31ms +[2025-07-06 04:58:21] [Rank 0] step:2361/10000 train_time:190116ms step_avg:80.52ms +[2025-07-06 04:58:21] [Rank 0] step:2361/10000 train_time:190116ms step_avg:80.52ms +[2025-07-06 04:58:23] [Rank 0] step:2381/10000 train_time:191616ms step_avg:80.48ms +[2025-07-06 04:58:23] [Rank 0] step:2381/10000 train_time:191616ms step_avg:80.48ms +[2025-07-06 04:58:24] [Rank 0] step:2401/10000 train_time:193113ms step_avg:80.43ms +[2025-07-06 04:58:24] [Rank 0] step:2401/10000 train_time:193113ms step_avg:80.43ms +[2025-07-06 04:58:26] [Rank 0] step:2421/10000 train_time:194611ms step_avg:80.38ms +[2025-07-06 04:58:26] [Rank 0] step:2421/10000 train_time:194611ms step_avg:80.38ms +[2025-07-06 04:58:28] [Rank 0] step:2441/10000 train_time:196760ms step_avg:80.61ms +[2025-07-06 04:58:28] [Rank 0] step:2441/10000 train_time:196760ms step_avg:80.61ms +[2025-07-06 04:58:29] [Rank 0] step:2461/10000 train_time:198255ms step_avg:80.56ms +[2025-07-06 04:58:29] [Rank 0] step:2461/10000 train_time:198255ms step_avg:80.56ms +[2025-07-06 04:58:31] [Rank 0] step:2481/10000 train_time:199753ms step_avg:80.51ms +[2025-07-06 04:58:31] [Rank 0] step:2481/10000 train_time:199753ms step_avg:80.51ms +[2025-07-06 04:58:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 04:58:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 04:58:33] [Rank 0] PRINT: step:2500/10000 train_loss:0.8927 val_loss:0.8821 train_time:201252ms step_avg:80.50ms +[2025-07-06 04:58:33] [Rank 0] PRINT: step:2500/10000 train_loss:0.8927 val_loss:0.8821 train_time:201252ms step_avg:80.50ms +[2025-07-06 04:58:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 04:58:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 04:58:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 04:58:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 04:58:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 04:58:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 05:03:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 05:03:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 05:03:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 05:03:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 05:03:58] [Rank 0] Total Loss: 5.1810 +[2025-07-06 05:03:58] [Rank 0] Total Loss: 5.1810 +[2025-07-06 05:03:58] [Rank 0] Total FTA: 0.8369 +[2025-07-06 05:03:58] [Rank 0] Total FTA: 0.8369 +[2025-07-06 05:03:58] [Rank 0] Group 0 Loss: 5.3380 +[2025-07-06 05:03:58] [Rank 0] Group 0 Loss: 5.3380 +[2025-07-06 05:03:58] [Rank 0] Group 1 Loss: 5.0721 +[2025-07-06 05:03:58] [Rank 0] Group 1 Loss: 5.0721 +[2025-07-06 05:03:58] [Rank 0] Group 2 Loss: 5.0193 +[2025-07-06 05:03:58] [Rank 0] Group 2 Loss: 5.0193 +[2025-07-06 05:03:58] [Rank 0] Group 3 Loss: 5.2378 +[2025-07-06 05:03:58] [Rank 0] Group 3 Loss: 5.2378 +[2025-07-06 05:03:58] [Rank 0] Group 4 Loss: 5.1955 +[2025-07-06 05:03:58] [Rank 0] Group 4 Loss: 5.1955 +[2025-07-06 05:03:58] [Rank 0] Group 5 Loss: 5.0833 +[2025-07-06 05:03:58] [Rank 0] Group 5 Loss: 5.0833 +[2025-07-06 05:03:58] [Rank 0] Group 6 Loss: 5.0691 +[2025-07-06 05:03:58] [Rank 0] Group 6 Loss: 5.0691 +[2025-07-06 05:03:58] [Rank 0] Group 7 Loss: 5.1770 +[2025-07-06 05:03:58] [Rank 0] Group 7 Loss: 5.1770 +[2025-07-06 05:03:58] [Rank 0] Group 8 Loss: 5.2382 +[2025-07-06 05:03:58] [Rank 0] Group 8 Loss: 5.2382 +[2025-07-06 05:03:58] [Rank 0] Group 9 Loss: 5.2075 +[2025-07-06 05:03:58] [Rank 0] Group 9 Loss: 5.2075 +[2025-07-06 05:03:58] [Rank 0] Group 10 Loss: 5.1960 +[2025-07-06 05:03:58] [Rank 0] Group 10 Loss: 5.1960 +[2025-07-06 05:03:58] [Rank 0] Group 11 Loss: 5.1823 +[2025-07-06 05:03:58] [Rank 0] Group 11 Loss: 5.1823 +[2025-07-06 05:03:58] [Rank 0] Group 0 FTA: 0.6450 +[2025-07-06 05:03:58] [Rank 0] Group 0 FTA: 0.6450 +[2025-07-06 05:03:58] [Rank 0] Group 1 FTA: 0.8516 +[2025-07-06 05:03:58] [Rank 0] Group 1 FTA: 0.8516 +[2025-07-06 05:03:58] [Rank 0] Group 2 FTA: 0.6849 +[2025-07-06 05:03:58] [Rank 0] Group 2 FTA: 0.6849 +[2025-07-06 05:03:58] [Rank 0] Group 3 FTA: 0.8307 +[2025-07-06 05:03:58] [Rank 0] Group 3 FTA: 0.8307 +[2025-07-06 05:03:58] [Rank 0] Group 4 FTA: 0.8776 +[2025-07-06 05:03:58] [Rank 0] Group 4 FTA: 0.8776 +[2025-07-06 05:03:58] [Rank 0] Group 5 FTA: 0.8542 +[2025-07-06 05:03:58] [Rank 0] Group 5 FTA: 0.8542 +[2025-07-06 05:03:58] [Rank 0] Group 6 FTA: 0.9323 +[2025-07-06 05:03:58] [Rank 0] Group 6 FTA: 0.9323 +[2025-07-06 05:03:58] [Rank 0] Group 7 FTA: 0.8880 +[2025-07-06 05:03:58] [Rank 0] Group 7 FTA: 0.8880 +[2025-07-06 05:03:58] [Rank 0] Group 8 FTA: 0.8932 +[2025-07-06 05:03:58] [Rank 0] Group 8 FTA: 0.8932 +[2025-07-06 05:03:58] [Rank 0] Group 9 FTA: 0.8828 +[2025-07-06 05:03:58] [Rank 0] Group 9 FTA: 0.8828 +[2025-07-06 05:03:58] [Rank 0] Group 10 FTA: 0.8887 +[2025-07-06 05:03:58] [Rank 0] Group 10 FTA: 0.8887 +[2025-07-06 05:03:58] [Rank 0] Group 11 FTA: 0.8994 +[2025-07-06 05:03:58] [Rank 0] Group 11 FTA: 0.8994 +[2025-07-06 05:03:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 05:03:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 05:03:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 05:03:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 05:03:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 05:03:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 05:04:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 05:04:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 05:04:00] [Rank 0] step:2501/10000 train_time:201272ms step_avg:80.48ms +[2025-07-06 05:04:00] [Rank 0] step:2501/10000 train_time:201272ms step_avg:80.48ms +[2025-07-06 05:04:02] [Rank 0] step:2521/10000 train_time:203468ms step_avg:80.71ms +[2025-07-06 05:04:02] [Rank 0] step:2521/10000 train_time:203468ms step_avg:80.71ms +[2025-07-06 05:04:03] [Rank 0] step:2541/10000 train_time:204939ms step_avg:80.65ms +[2025-07-06 05:04:03] [Rank 0] step:2541/10000 train_time:204939ms step_avg:80.65ms +[2025-07-06 05:04:05] [Rank 0] step:2561/10000 train_time:206428ms step_avg:80.60ms +[2025-07-06 05:04:05] [Rank 0] step:2561/10000 train_time:206428ms step_avg:80.60ms +[2025-07-06 05:04:06] [Rank 0] step:2581/10000 train_time:207920ms step_avg:80.56ms +[2025-07-06 05:04:06] [Rank 0] step:2581/10000 train_time:207920ms step_avg:80.56ms +[2025-07-06 05:04:08] [Rank 0] step:2601/10000 train_time:209411ms step_avg:80.51ms +[2025-07-06 05:04:08] [Rank 0] step:2601/10000 train_time:209411ms step_avg:80.51ms +[2025-07-06 05:04:10] [Rank 0] step:2621/10000 train_time:211550ms step_avg:80.71ms +[2025-07-06 05:04:10] [Rank 0] step:2621/10000 train_time:211550ms step_avg:80.71ms +[2025-07-06 05:04:11] [Rank 0] step:2641/10000 train_time:213043ms step_avg:80.67ms +[2025-07-06 05:04:11] [Rank 0] step:2641/10000 train_time:213043ms step_avg:80.67ms +[2025-07-06 05:04:13] [Rank 0] step:2661/10000 train_time:214536ms step_avg:80.62ms +[2025-07-06 05:04:13] [Rank 0] step:2661/10000 train_time:214536ms step_avg:80.62ms +[2025-07-06 05:04:14] [Rank 0] step:2681/10000 train_time:216030ms step_avg:80.58ms +[2025-07-06 05:04:14] [Rank 0] step:2681/10000 train_time:216030ms step_avg:80.58ms +[2025-07-06 05:04:17] [Rank 0] step:2701/10000 train_time:218188ms step_avg:80.78ms +[2025-07-06 05:04:17] [Rank 0] step:2701/10000 train_time:218188ms step_avg:80.78ms +[2025-07-06 05:04:18] [Rank 0] step:2721/10000 train_time:219661ms step_avg:80.73ms +[2025-07-06 05:04:18] [Rank 0] step:2721/10000 train_time:219661ms step_avg:80.73ms +[2025-07-06 05:04:20] [Rank 0] step:2741/10000 train_time:221155ms step_avg:80.68ms +[2025-07-06 05:04:20] [Rank 0] step:2741/10000 train_time:221155ms step_avg:80.68ms +[2025-07-06 05:04:21] [Rank 0] step:2761/10000 train_time:222649ms step_avg:80.64ms +[2025-07-06 05:04:21] [Rank 0] step:2761/10000 train_time:222649ms step_avg:80.64ms +[2025-07-06 05:04:23] [Rank 0] step:2781/10000 train_time:224145ms step_avg:80.60ms +[2025-07-06 05:04:23] [Rank 0] step:2781/10000 train_time:224145ms step_avg:80.60ms +[2025-07-06 05:04:25] [Rank 0] step:2801/10000 train_time:226305ms step_avg:80.79ms +[2025-07-06 05:04:25] [Rank 0] step:2801/10000 train_time:226305ms step_avg:80.79ms +[2025-07-06 05:04:26] [Rank 0] step:2821/10000 train_time:227800ms step_avg:80.75ms +[2025-07-06 05:04:26] [Rank 0] step:2821/10000 train_time:227800ms step_avg:80.75ms +[2025-07-06 05:04:28] [Rank 0] step:2841/10000 train_time:229295ms step_avg:80.71ms +[2025-07-06 05:04:28] [Rank 0] step:2841/10000 train_time:229295ms step_avg:80.71ms +[2025-07-06 05:04:29] [Rank 0] step:2861/10000 train_time:230792ms step_avg:80.67ms +[2025-07-06 05:04:29] [Rank 0] step:2861/10000 train_time:230792ms step_avg:80.67ms +[2025-07-06 05:04:31] [Rank 0] step:2881/10000 train_time:232493ms step_avg:80.70ms +[2025-07-06 05:04:31] [Rank 0] step:2881/10000 train_time:232493ms step_avg:80.70ms +[2025-07-06 05:04:33] [Rank 0] step:2901/10000 train_time:234089ms step_avg:80.69ms +[2025-07-06 05:04:33] [Rank 0] step:2901/10000 train_time:234089ms step_avg:80.69ms +[2025-07-06 05:04:34] [Rank 0] step:2921/10000 train_time:235590ms step_avg:80.65ms +[2025-07-06 05:04:34] [Rank 0] step:2921/10000 train_time:235590ms step_avg:80.65ms +[2025-07-06 05:04:36] [Rank 0] step:2941/10000 train_time:237088ms step_avg:80.61ms +[2025-07-06 05:04:36] [Rank 0] step:2941/10000 train_time:237088ms step_avg:80.61ms +[2025-07-06 05:04:37] [Rank 0] step:2961/10000 train_time:238589ms step_avg:80.58ms +[2025-07-06 05:04:37] [Rank 0] step:2961/10000 train_time:238589ms step_avg:80.58ms +[2025-07-06 05:04:39] [Rank 0] step:2981/10000 train_time:240739ms step_avg:80.76ms +[2025-07-06 05:04:39] [Rank 0] step:2981/10000 train_time:240739ms step_avg:80.76ms +[2025-07-06 05:04:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 05:04:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 05:04:42] [Rank 0] PRINT: step:3000/10000 train_loss:0.8803 val_loss:0.8731 train_time:242239ms step_avg:80.75ms +[2025-07-06 05:04:42] [Rank 0] PRINT: step:3000/10000 train_loss:0.8803 val_loss:0.8731 train_time:242239ms step_avg:80.75ms +[2025-07-06 05:04:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 05:04:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 05:04:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 05:04:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 05:04:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 05:04:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 05:10:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 05:10:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 05:10:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 05:10:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 05:10:05] [Rank 0] Total Loss: 5.2500 +[2025-07-06 05:10:05] [Rank 0] Total Loss: 5.2500 +[2025-07-06 05:10:05] [Rank 0] Total FTA: 0.9412 +[2025-07-06 05:10:05] [Rank 0] Total FTA: 0.9412 +[2025-07-06 05:10:05] [Rank 0] Group 0 Loss: 5.5002 +[2025-07-06 05:10:05] [Rank 0] Group 0 Loss: 5.5002 +[2025-07-06 05:10:05] [Rank 0] Group 1 Loss: 5.1509 +[2025-07-06 05:10:05] [Rank 0] Group 1 Loss: 5.1509 +[2025-07-06 05:10:05] [Rank 0] Group 2 Loss: 5.1074 +[2025-07-06 05:10:05] [Rank 0] Group 2 Loss: 5.1074 +[2025-07-06 05:10:05] [Rank 0] Group 3 Loss: 5.2693 +[2025-07-06 05:10:05] [Rank 0] Group 3 Loss: 5.2693 +[2025-07-06 05:10:05] [Rank 0] Group 4 Loss: 5.2641 +[2025-07-06 05:10:05] [Rank 0] Group 4 Loss: 5.2641 +[2025-07-06 05:10:05] [Rank 0] Group 5 Loss: 5.1998 +[2025-07-06 05:10:05] [Rank 0] Group 5 Loss: 5.1998 +[2025-07-06 05:10:05] [Rank 0] Group 6 Loss: 5.1356 +[2025-07-06 05:10:05] [Rank 0] Group 6 Loss: 5.1356 +[2025-07-06 05:10:05] [Rank 0] Group 7 Loss: 5.2717 +[2025-07-06 05:10:05] [Rank 0] Group 7 Loss: 5.2717 +[2025-07-06 05:10:05] [Rank 0] Group 8 Loss: 5.2223 +[2025-07-06 05:10:05] [Rank 0] Group 8 Loss: 5.2223 +[2025-07-06 05:10:05] [Rank 0] Group 9 Loss: 5.2191 +[2025-07-06 05:10:05] [Rank 0] Group 9 Loss: 5.2191 +[2025-07-06 05:10:05] [Rank 0] Group 10 Loss: 5.1993 +[2025-07-06 05:10:05] [Rank 0] Group 10 Loss: 5.1993 +[2025-07-06 05:10:05] [Rank 0] Group 11 Loss: 5.2371 +[2025-07-06 05:10:05] [Rank 0] Group 11 Loss: 5.2371 +[2025-07-06 05:10:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 05:10:05] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 05:10:05] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 05:10:05] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 05:10:05] [Rank 0] Group 2 FTA: 0.9375 +[2025-07-06 05:10:05] [Rank 0] Group 2 FTA: 0.9375 +[2025-07-06 05:10:05] [Rank 0] Group 3 FTA: 0.9141 +[2025-07-06 05:10:05] [Rank 0] Group 3 FTA: 0.9141 +[2025-07-06 05:10:05] [Rank 0] Group 4 FTA: 0.9401 +[2025-07-06 05:10:05] [Rank 0] Group 4 FTA: 0.9401 +[2025-07-06 05:10:05] [Rank 0] Group 5 FTA: 0.9557 +[2025-07-06 05:10:05] [Rank 0] Group 5 FTA: 0.9557 +[2025-07-06 05:10:05] [Rank 0] Group 6 FTA: 0.9167 +[2025-07-06 05:10:05] [Rank 0] Group 6 FTA: 0.9167 +[2025-07-06 05:10:05] [Rank 0] Group 7 FTA: 0.9349 +[2025-07-06 05:10:05] [Rank 0] Group 7 FTA: 0.9349 +[2025-07-06 05:10:05] [Rank 0] Group 8 FTA: 0.8854 +[2025-07-06 05:10:05] [Rank 0] Group 8 FTA: 0.8854 +[2025-07-06 05:10:05] [Rank 0] Group 9 FTA: 0.9453 +[2025-07-06 05:10:05] [Rank 0] Group 9 FTA: 0.9453 +[2025-07-06 05:10:05] [Rank 0] Group 10 FTA: 0.9238 +[2025-07-06 05:10:05] [Rank 0] Group 10 FTA: 0.9238 +[2025-07-06 05:10:05] [Rank 0] Group 11 FTA: 0.9219 +[2025-07-06 05:10:05] [Rank 0] Group 11 FTA: 0.9219 +[2025-07-06 05:10:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 05:10:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 05:10:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 05:10:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 05:10:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 05:10:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 05:10:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 05:10:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 05:10:07] [Rank 0] step:3001/10000 train_time:242262ms step_avg:80.73ms +[2025-07-06 05:10:07] [Rank 0] step:3001/10000 train_time:242262ms step_avg:80.73ms +[2025-07-06 05:10:08] [Rank 0] step:3021/10000 train_time:243875ms step_avg:80.73ms +[2025-07-06 05:10:08] [Rank 0] step:3021/10000 train_time:243875ms step_avg:80.73ms +[2025-07-06 05:10:10] [Rank 0] step:3041/10000 train_time:245364ms step_avg:80.69ms +[2025-07-06 05:10:10] [Rank 0] step:3041/10000 train_time:245364ms step_avg:80.69ms +[2025-07-06 05:10:12] [Rank 0] step:3061/10000 train_time:247113ms step_avg:80.73ms +[2025-07-06 05:10:12] [Rank 0] step:3061/10000 train_time:247113ms step_avg:80.73ms +[2025-07-06 05:10:14] [Rank 0] step:3081/10000 train_time:249015ms step_avg:80.82ms +[2025-07-06 05:10:14] [Rank 0] step:3081/10000 train_time:249015ms step_avg:80.82ms +[2025-07-06 05:10:15] [Rank 0] step:3101/10000 train_time:250505ms step_avg:80.78ms +[2025-07-06 05:10:15] [Rank 0] step:3101/10000 train_time:250505ms step_avg:80.78ms +[2025-07-06 05:10:17] [Rank 0] step:3121/10000 train_time:251996ms step_avg:80.74ms +[2025-07-06 05:10:17] [Rank 0] step:3121/10000 train_time:251996ms step_avg:80.74ms +[2025-07-06 05:10:18] [Rank 0] step:3141/10000 train_time:253490ms step_avg:80.70ms +[2025-07-06 05:10:18] [Rank 0] step:3141/10000 train_time:253490ms step_avg:80.70ms +[2025-07-06 05:10:20] [Rank 0] step:3161/10000 train_time:255628ms step_avg:80.87ms +[2025-07-06 05:10:20] [Rank 0] step:3161/10000 train_time:255628ms step_avg:80.87ms +[2025-07-06 05:10:22] [Rank 0] step:3181/10000 train_time:257120ms step_avg:80.83ms +[2025-07-06 05:10:22] [Rank 0] step:3181/10000 train_time:257120ms step_avg:80.83ms +[2025-07-06 05:10:23] [Rank 0] step:3201/10000 train_time:258615ms step_avg:80.79ms +[2025-07-06 05:10:23] [Rank 0] step:3201/10000 train_time:258615ms step_avg:80.79ms +[2025-07-06 05:10:25] [Rank 0] step:3221/10000 train_time:260108ms step_avg:80.75ms +[2025-07-06 05:10:25] [Rank 0] step:3221/10000 train_time:260108ms step_avg:80.75ms +[2025-07-06 05:10:27] [Rank 0] step:3241/10000 train_time:261652ms step_avg:80.73ms +[2025-07-06 05:10:27] [Rank 0] step:3241/10000 train_time:261652ms step_avg:80.73ms +[2025-07-06 05:10:28] [Rank 0] step:3261/10000 train_time:263739ms step_avg:80.88ms +[2025-07-06 05:10:28] [Rank 0] step:3261/10000 train_time:263739ms step_avg:80.88ms +[2025-07-06 05:10:30] [Rank 0] step:3281/10000 train_time:265232ms step_avg:80.84ms +[2025-07-06 05:10:30] [Rank 0] step:3281/10000 train_time:265232ms step_avg:80.84ms +[2025-07-06 05:10:31] [Rank 0] step:3301/10000 train_time:266727ms step_avg:80.80ms +[2025-07-06 05:10:31] [Rank 0] step:3301/10000 train_time:266727ms step_avg:80.80ms +[2025-07-06 05:10:33] [Rank 0] step:3321/10000 train_time:268221ms step_avg:80.77ms +[2025-07-06 05:10:33] [Rank 0] step:3321/10000 train_time:268221ms step_avg:80.77ms +[2025-07-06 05:10:35] [Rank 0] step:3341/10000 train_time:270382ms step_avg:80.93ms +[2025-07-06 05:10:35] [Rank 0] step:3341/10000 train_time:270382ms step_avg:80.93ms +[2025-07-06 05:10:36] [Rank 0] step:3361/10000 train_time:271878ms step_avg:80.89ms +[2025-07-06 05:10:36] [Rank 0] step:3361/10000 train_time:271878ms step_avg:80.89ms +[2025-07-06 05:10:38] [Rank 0] step:3381/10000 train_time:273373ms step_avg:80.86ms +[2025-07-06 05:10:38] [Rank 0] step:3381/10000 train_time:273373ms step_avg:80.86ms +[2025-07-06 05:10:39] [Rank 0] step:3401/10000 train_time:274871ms step_avg:80.82ms +[2025-07-06 05:10:39] [Rank 0] step:3401/10000 train_time:274871ms step_avg:80.82ms +[2025-07-06 05:10:42] [Rank 0] step:3421/10000 train_time:276368ms step_avg:80.79ms +[2025-07-06 05:10:42] [Rank 0] step:3421/10000 train_time:276368ms step_avg:80.79ms +[2025-07-06 05:10:43] [Rank 0] step:3441/10000 train_time:278525ms step_avg:80.94ms +[2025-07-06 05:10:43] [Rank 0] step:3441/10000 train_time:278525ms step_avg:80.94ms +[2025-07-06 05:10:45] [Rank 0] step:3461/10000 train_time:280022ms step_avg:80.91ms +[2025-07-06 05:10:45] [Rank 0] step:3461/10000 train_time:280022ms step_avg:80.91ms +[2025-07-06 05:10:46] [Rank 0] step:3481/10000 train_time:281521ms step_avg:80.87ms +[2025-07-06 05:10:46] [Rank 0] step:3481/10000 train_time:281521ms step_avg:80.87ms +[2025-07-06 05:10:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 05:10:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 05:10:48] [Rank 0] PRINT: step:3500/10000 train_loss:0.8737 val_loss:0.8687 train_time:283018ms step_avg:80.86ms +[2025-07-06 05:10:48] [Rank 0] PRINT: step:3500/10000 train_loss:0.8737 val_loss:0.8687 train_time:283018ms step_avg:80.86ms +[2025-07-06 05:10:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 05:10:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 05:10:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 05:10:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 05:10:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 05:10:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 05:16:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 05:16:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 05:16:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 05:16:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 05:16:13] [Rank 0] Total Loss: 5.2494 +[2025-07-06 05:16:13] [Rank 0] Total Loss: 5.2494 +[2025-07-06 05:16:13] [Rank 0] Total FTA: 0.9622 +[2025-07-06 05:16:13] [Rank 0] Total FTA: 0.9622 +[2025-07-06 05:16:13] [Rank 0] Group 0 Loss: 5.4385 +[2025-07-06 05:16:13] [Rank 0] Group 0 Loss: 5.4385 +[2025-07-06 05:16:13] [Rank 0] Group 1 Loss: 5.2021 +[2025-07-06 05:16:13] [Rank 0] Group 1 Loss: 5.2021 +[2025-07-06 05:16:13] [Rank 0] Group 2 Loss: 5.0796 +[2025-07-06 05:16:13] [Rank 0] Group 2 Loss: 5.0796 +[2025-07-06 05:16:13] [Rank 0] Group 3 Loss: 5.3000 +[2025-07-06 05:16:13] [Rank 0] Group 3 Loss: 5.3000 +[2025-07-06 05:16:13] [Rank 0] Group 4 Loss: 5.2585 +[2025-07-06 05:16:13] [Rank 0] Group 4 Loss: 5.2585 +[2025-07-06 05:16:13] [Rank 0] Group 5 Loss: 5.1657 +[2025-07-06 05:16:13] [Rank 0] Group 5 Loss: 5.1657 +[2025-07-06 05:16:13] [Rank 0] Group 6 Loss: 5.1373 +[2025-07-06 05:16:13] [Rank 0] Group 6 Loss: 5.1373 +[2025-07-06 05:16:13] [Rank 0] Group 7 Loss: 5.3088 +[2025-07-06 05:16:13] [Rank 0] Group 7 Loss: 5.3088 +[2025-07-06 05:16:13] [Rank 0] Group 8 Loss: 5.2621 +[2025-07-06 05:16:13] [Rank 0] Group 8 Loss: 5.2621 +[2025-07-06 05:16:13] [Rank 0] Group 9 Loss: 5.1883 +[2025-07-06 05:16:13] [Rank 0] Group 9 Loss: 5.1883 +[2025-07-06 05:16:13] [Rank 0] Group 10 Loss: 5.2248 +[2025-07-06 05:16:13] [Rank 0] Group 10 Loss: 5.2248 +[2025-07-06 05:16:13] [Rank 0] Group 11 Loss: 5.2403 +[2025-07-06 05:16:13] [Rank 0] Group 11 Loss: 5.2403 +[2025-07-06 05:16:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 05:16:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 05:16:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 05:16:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 05:16:13] [Rank 0] Group 2 FTA: 0.9349 +[2025-07-06 05:16:13] [Rank 0] Group 2 FTA: 0.9349 +[2025-07-06 05:16:13] [Rank 0] Group 3 FTA: 0.9688 +[2025-07-06 05:16:13] [Rank 0] Group 3 FTA: 0.9688 +[2025-07-06 05:16:13] [Rank 0] Group 4 FTA: 0.9505 +[2025-07-06 05:16:13] [Rank 0] Group 4 FTA: 0.9505 +[2025-07-06 05:16:13] [Rank 0] Group 5 FTA: 0.9557 +[2025-07-06 05:16:13] [Rank 0] Group 5 FTA: 0.9557 +[2025-07-06 05:16:13] [Rank 0] Group 6 FTA: 0.9583 +[2025-07-06 05:16:13] [Rank 0] Group 6 FTA: 0.9583 +[2025-07-06 05:16:13] [Rank 0] Group 7 FTA: 0.9609 +[2025-07-06 05:16:13] [Rank 0] Group 7 FTA: 0.9609 +[2025-07-06 05:16:13] [Rank 0] Group 8 FTA: 0.9453 +[2025-07-06 05:16:13] [Rank 0] Group 8 FTA: 0.9453 +[2025-07-06 05:16:13] [Rank 0] Group 9 FTA: 0.9492 +[2025-07-06 05:16:13] [Rank 0] Group 9 FTA: 0.9492 +[2025-07-06 05:16:13] [Rank 0] Group 10 FTA: 0.9688 +[2025-07-06 05:16:13] [Rank 0] Group 10 FTA: 0.9688 +[2025-07-06 05:16:13] [Rank 0] Group 11 FTA: 0.9424 +[2025-07-06 05:16:13] [Rank 0] Group 11 FTA: 0.9424 +[2025-07-06 05:16:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 05:16:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 05:16:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 05:16:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 05:16:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 05:16:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 05:16:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 05:16:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 05:16:15] [Rank 0] step:3501/10000 train_time:283040ms step_avg:80.85ms +[2025-07-06 05:16:15] [Rank 0] step:3501/10000 train_time:283040ms step_avg:80.85ms +[2025-07-06 05:16:17] [Rank 0] step:3521/10000 train_time:285198ms step_avg:81.00ms +[2025-07-06 05:16:17] [Rank 0] step:3521/10000 train_time:285198ms step_avg:81.00ms +[2025-07-06 05:16:19] [Rank 0] step:3541/10000 train_time:286788ms step_avg:80.99ms +[2025-07-06 05:16:19] [Rank 0] step:3541/10000 train_time:286788ms step_avg:80.99ms +[2025-07-06 05:16:20] [Rank 0] step:3561/10000 train_time:288279ms step_avg:80.95ms +[2025-07-06 05:16:20] [Rank 0] step:3561/10000 train_time:288279ms step_avg:80.95ms +[2025-07-06 05:16:22] [Rank 0] step:3581/10000 train_time:289771ms step_avg:80.92ms +[2025-07-06 05:16:22] [Rank 0] step:3581/10000 train_time:289771ms step_avg:80.92ms +[2025-07-06 05:16:24] [Rank 0] step:3601/10000 train_time:291945ms step_avg:81.07ms +[2025-07-06 05:16:24] [Rank 0] step:3601/10000 train_time:291945ms step_avg:81.07ms +[2025-07-06 05:16:26] [Rank 0] step:3621/10000 train_time:293406ms step_avg:81.03ms +[2025-07-06 05:16:26] [Rank 0] step:3621/10000 train_time:293406ms step_avg:81.03ms +[2025-07-06 05:16:27] [Rank 0] step:3641/10000 train_time:294898ms step_avg:80.99ms +[2025-07-06 05:16:27] [Rank 0] step:3641/10000 train_time:294898ms step_avg:80.99ms +[2025-07-06 05:16:29] [Rank 0] step:3661/10000 train_time:296391ms step_avg:80.96ms +[2025-07-06 05:16:29] [Rank 0] step:3661/10000 train_time:296391ms step_avg:80.96ms +[2025-07-06 05:16:30] [Rank 0] step:3681/10000 train_time:297883ms step_avg:80.92ms +[2025-07-06 05:16:30] [Rank 0] step:3681/10000 train_time:297883ms step_avg:80.92ms +[2025-07-06 05:16:32] [Rank 0] step:3701/10000 train_time:300017ms step_avg:81.06ms +[2025-07-06 05:16:32] [Rank 0] step:3701/10000 train_time:300017ms step_avg:81.06ms +[2025-07-06 05:16:34] [Rank 0] step:3721/10000 train_time:301511ms step_avg:81.03ms +[2025-07-06 05:16:34] [Rank 0] step:3721/10000 train_time:301511ms step_avg:81.03ms +[2025-07-06 05:16:35] [Rank 0] step:3741/10000 train_time:303005ms step_avg:81.00ms +[2025-07-06 05:16:35] [Rank 0] step:3741/10000 train_time:303005ms step_avg:81.00ms +[2025-07-06 05:16:37] [Rank 0] step:3761/10000 train_time:304499ms step_avg:80.96ms +[2025-07-06 05:16:37] [Rank 0] step:3761/10000 train_time:304499ms step_avg:80.96ms +[2025-07-06 05:16:38] [Rank 0] step:3781/10000 train_time:306263ms step_avg:81.00ms +[2025-07-06 05:16:38] [Rank 0] step:3781/10000 train_time:306263ms step_avg:81.00ms +[2025-07-06 05:16:40] [Rank 0] step:3801/10000 train_time:307725ms step_avg:80.96ms +[2025-07-06 05:16:40] [Rank 0] step:3801/10000 train_time:307725ms step_avg:80.96ms +[2025-07-06 05:16:41] [Rank 0] step:3821/10000 train_time:309221ms step_avg:80.93ms +[2025-07-06 05:16:41] [Rank 0] step:3821/10000 train_time:309221ms step_avg:80.93ms +[2025-07-06 05:16:43] [Rank 0] step:3841/10000 train_time:310718ms step_avg:80.90ms +[2025-07-06 05:16:43] [Rank 0] step:3841/10000 train_time:310718ms step_avg:80.90ms +[2025-07-06 05:16:44] [Rank 0] step:3861/10000 train_time:312214ms step_avg:80.86ms +[2025-07-06 05:16:44] [Rank 0] step:3861/10000 train_time:312214ms step_avg:80.86ms +[2025-07-06 05:16:47] [Rank 0] step:3881/10000 train_time:314354ms step_avg:81.00ms +[2025-07-06 05:16:47] [Rank 0] step:3881/10000 train_time:314354ms step_avg:81.00ms +[2025-07-06 05:16:48] [Rank 0] step:3901/10000 train_time:315851ms step_avg:80.97ms +[2025-07-06 05:16:48] [Rank 0] step:3901/10000 train_time:315851ms step_avg:80.97ms +[2025-07-06 05:16:50] [Rank 0] step:3921/10000 train_time:317347ms step_avg:80.94ms +[2025-07-06 05:16:50] [Rank 0] step:3921/10000 train_time:317347ms step_avg:80.94ms +[2025-07-06 05:16:51] [Rank 0] step:3941/10000 train_time:318846ms step_avg:80.90ms +[2025-07-06 05:16:51] [Rank 0] step:3941/10000 train_time:318846ms step_avg:80.90ms +[2025-07-06 05:16:53] [Rank 0] step:3961/10000 train_time:320344ms step_avg:80.87ms +[2025-07-06 05:16:53] [Rank 0] step:3961/10000 train_time:320344ms step_avg:80.87ms +[2025-07-06 05:16:55] [Rank 0] step:3981/10000 train_time:322502ms step_avg:81.01ms +[2025-07-06 05:16:55] [Rank 0] step:3981/10000 train_time:322502ms step_avg:81.01ms +[2025-07-06 05:16:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 05:16:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 05:16:57] [Rank 0] PRINT: step:4000/10000 train_loss:0.8699 val_loss:0.8667 train_time:324000ms step_avg:81.00ms +[2025-07-06 05:16:57] [Rank 0] PRINT: step:4000/10000 train_loss:0.8699 val_loss:0.8667 train_time:324000ms step_avg:81.00ms +[2025-07-06 05:16:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 05:16:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 05:16:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 05:16:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 05:16:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 05:16:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 05:22:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 05:22:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 05:22:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 05:22:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 05:22:25] [Rank 0] Total Loss: 5.2144 +[2025-07-06 05:22:25] [Rank 0] Total Loss: 5.2144 +[2025-07-06 05:22:25] [Rank 0] Total FTA: 0.9688 +[2025-07-06 05:22:25] [Rank 0] Total FTA: 0.9688 +[2025-07-06 05:22:25] [Rank 0] Group 0 Loss: 5.3459 +[2025-07-06 05:22:25] [Rank 0] Group 0 Loss: 5.3459 +[2025-07-06 05:22:25] [Rank 0] Group 1 Loss: 5.1859 +[2025-07-06 05:22:25] [Rank 0] Group 1 Loss: 5.1859 +[2025-07-06 05:22:25] [Rank 0] Group 2 Loss: 5.0234 +[2025-07-06 05:22:25] [Rank 0] Group 2 Loss: 5.0234 +[2025-07-06 05:22:25] [Rank 0] Group 3 Loss: 5.1552 +[2025-07-06 05:22:25] [Rank 0] Group 3 Loss: 5.1552 +[2025-07-06 05:22:25] [Rank 0] Group 4 Loss: 5.3318 +[2025-07-06 05:22:25] [Rank 0] Group 4 Loss: 5.3318 +[2025-07-06 05:22:25] [Rank 0] Group 5 Loss: 5.1454 +[2025-07-06 05:22:25] [Rank 0] Group 5 Loss: 5.1454 +[2025-07-06 05:22:25] [Rank 0] Group 6 Loss: 5.1009 +[2025-07-06 05:22:25] [Rank 0] Group 6 Loss: 5.1009 +[2025-07-06 05:22:25] [Rank 0] Group 7 Loss: 5.2409 +[2025-07-06 05:22:25] [Rank 0] Group 7 Loss: 5.2409 +[2025-07-06 05:22:25] [Rank 0] Group 8 Loss: 5.2392 +[2025-07-06 05:22:25] [Rank 0] Group 8 Loss: 5.2392 +[2025-07-06 05:22:25] [Rank 0] Group 9 Loss: 5.1988 +[2025-07-06 05:22:25] [Rank 0] Group 9 Loss: 5.1988 +[2025-07-06 05:22:25] [Rank 0] Group 10 Loss: 5.2405 +[2025-07-06 05:22:25] [Rank 0] Group 10 Loss: 5.2405 +[2025-07-06 05:22:25] [Rank 0] Group 11 Loss: 5.2161 +[2025-07-06 05:22:25] [Rank 0] Group 11 Loss: 5.2161 +[2025-07-06 05:22:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 05:22:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 05:22:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 05:22:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 05:22:25] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 05:22:25] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 05:22:25] [Rank 0] Group 3 FTA: 0.9453 +[2025-07-06 05:22:25] [Rank 0] Group 3 FTA: 0.9453 +[2025-07-06 05:22:25] [Rank 0] Group 4 FTA: 0.9609 +[2025-07-06 05:22:25] [Rank 0] Group 4 FTA: 0.9609 +[2025-07-06 05:22:25] [Rank 0] Group 5 FTA: 0.9505 +[2025-07-06 05:22:25] [Rank 0] Group 5 FTA: 0.9505 +[2025-07-06 05:22:25] [Rank 0] Group 6 FTA: 0.9453 +[2025-07-06 05:22:25] [Rank 0] Group 6 FTA: 0.9453 +[2025-07-06 05:22:25] [Rank 0] Group 7 FTA: 0.9661 +[2025-07-06 05:22:25] [Rank 0] Group 7 FTA: 0.9661 +[2025-07-06 05:22:25] [Rank 0] Group 8 FTA: 0.9583 +[2025-07-06 05:22:25] [Rank 0] Group 8 FTA: 0.9583 +[2025-07-06 05:22:25] [Rank 0] Group 9 FTA: 0.9531 +[2025-07-06 05:22:25] [Rank 0] Group 9 FTA: 0.9531 +[2025-07-06 05:22:25] [Rank 0] Group 10 FTA: 0.9648 +[2025-07-06 05:22:25] [Rank 0] Group 10 FTA: 0.9648 +[2025-07-06 05:22:25] [Rank 0] Group 11 FTA: 0.9600 +[2025-07-06 05:22:25] [Rank 0] Group 11 FTA: 0.9600 +[2025-07-06 05:22:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 05:22:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 05:22:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 05:22:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 05:22:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 05:22:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 05:22:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 05:22:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 05:22:26] [Rank 0] step:4001/10000 train_time:324021ms step_avg:80.98ms +[2025-07-06 05:22:26] [Rank 0] step:4001/10000 train_time:324021ms step_avg:80.98ms +[2025-07-06 05:22:28] [Rank 0] step:4021/10000 train_time:325512ms step_avg:80.95ms +[2025-07-06 05:22:28] [Rank 0] step:4021/10000 train_time:325512ms step_avg:80.95ms +[2025-07-06 05:22:29] [Rank 0] step:4041/10000 train_time:326999ms step_avg:80.92ms +[2025-07-06 05:22:29] [Rank 0] step:4041/10000 train_time:326999ms step_avg:80.92ms +[2025-07-06 05:22:32] [Rank 0] step:4061/10000 train_time:329158ms step_avg:81.05ms +[2025-07-06 05:22:32] [Rank 0] step:4061/10000 train_time:329158ms step_avg:81.05ms +[2025-07-06 05:22:33] [Rank 0] step:4081/10000 train_time:330648ms step_avg:81.02ms +[2025-07-06 05:22:33] [Rank 0] step:4081/10000 train_time:330648ms step_avg:81.02ms +[2025-07-06 05:22:35] [Rank 0] step:4101/10000 train_time:332139ms step_avg:80.99ms +[2025-07-06 05:22:35] [Rank 0] step:4101/10000 train_time:332139ms step_avg:80.99ms +[2025-07-06 05:22:36] [Rank 0] step:4121/10000 train_time:333632ms step_avg:80.96ms +[2025-07-06 05:22:36] [Rank 0] step:4121/10000 train_time:333632ms step_avg:80.96ms +[2025-07-06 05:22:38] [Rank 0] step:4141/10000 train_time:335380ms step_avg:80.99ms +[2025-07-06 05:22:38] [Rank 0] step:4141/10000 train_time:335380ms step_avg:80.99ms +[2025-07-06 05:22:40] [Rank 0] step:4161/10000 train_time:337269ms step_avg:81.05ms +[2025-07-06 05:22:40] [Rank 0] step:4161/10000 train_time:337269ms step_avg:81.05ms +[2025-07-06 05:22:41] [Rank 0] step:4181/10000 train_time:338762ms step_avg:81.02ms +[2025-07-06 05:22:41] [Rank 0] step:4181/10000 train_time:338762ms step_avg:81.02ms +[2025-07-06 05:22:43] [Rank 0] step:4201/10000 train_time:340257ms step_avg:80.99ms +[2025-07-06 05:22:43] [Rank 0] step:4201/10000 train_time:340257ms step_avg:80.99ms +[2025-07-06 05:22:44] [Rank 0] step:4221/10000 train_time:341752ms step_avg:80.96ms +[2025-07-06 05:22:44] [Rank 0] step:4221/10000 train_time:341752ms step_avg:80.96ms +[2025-07-06 05:22:46] [Rank 0] step:4241/10000 train_time:343913ms step_avg:81.09ms +[2025-07-06 05:22:46] [Rank 0] step:4241/10000 train_time:343913ms step_avg:81.09ms +[2025-07-06 05:22:48] [Rank 0] step:4261/10000 train_time:345407ms step_avg:81.06ms +[2025-07-06 05:22:48] [Rank 0] step:4261/10000 train_time:345407ms step_avg:81.06ms +[2025-07-06 05:22:49] [Rank 0] step:4281/10000 train_time:346902ms step_avg:81.03ms +[2025-07-06 05:22:49] [Rank 0] step:4281/10000 train_time:346902ms step_avg:81.03ms +[2025-07-06 05:22:51] [Rank 0] step:4301/10000 train_time:348397ms step_avg:81.00ms +[2025-07-06 05:22:51] [Rank 0] step:4301/10000 train_time:348397ms step_avg:81.00ms +[2025-07-06 05:22:53] [Rank 0] step:4321/10000 train_time:349944ms step_avg:80.99ms +[2025-07-06 05:22:53] [Rank 0] step:4321/10000 train_time:349944ms step_avg:80.99ms +[2025-07-06 05:22:55] [Rank 0] step:4341/10000 train_time:352056ms step_avg:81.10ms +[2025-07-06 05:22:55] [Rank 0] step:4341/10000 train_time:352056ms step_avg:81.10ms +[2025-07-06 05:22:56] [Rank 0] step:4361/10000 train_time:353554ms step_avg:81.07ms +[2025-07-06 05:22:56] [Rank 0] step:4361/10000 train_time:353554ms step_avg:81.07ms +[2025-07-06 05:22:57] [Rank 0] step:4381/10000 train_time:355052ms step_avg:81.04ms +[2025-07-06 05:22:57] [Rank 0] step:4381/10000 train_time:355052ms step_avg:81.04ms +[2025-07-06 05:22:59] [Rank 0] step:4401/10000 train_time:356549ms step_avg:81.02ms +[2025-07-06 05:22:59] [Rank 0] step:4401/10000 train_time:356549ms step_avg:81.02ms +[2025-07-06 05:23:01] [Rank 0] step:4421/10000 train_time:358699ms step_avg:81.14ms +[2025-07-06 05:23:01] [Rank 0] step:4421/10000 train_time:358699ms step_avg:81.14ms +[2025-07-06 05:23:03] [Rank 0] step:4441/10000 train_time:360197ms step_avg:81.11ms +[2025-07-06 05:23:03] [Rank 0] step:4441/10000 train_time:360197ms step_avg:81.11ms +[2025-07-06 05:23:04] [Rank 0] step:4461/10000 train_time:361695ms step_avg:81.08ms +[2025-07-06 05:23:04] [Rank 0] step:4461/10000 train_time:361695ms step_avg:81.08ms +[2025-07-06 05:23:06] [Rank 0] step:4481/10000 train_time:363194ms step_avg:81.05ms +[2025-07-06 05:23:06] [Rank 0] step:4481/10000 train_time:363194ms step_avg:81.05ms +[2025-07-06 05:23:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 05:23:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 05:23:08] [Rank 0] PRINT: step:4500/10000 train_loss:0.8670 val_loss:0.8641 train_time:364694ms step_avg:81.04ms +[2025-07-06 05:23:08] [Rank 0] PRINT: step:4500/10000 train_loss:0.8670 val_loss:0.8641 train_time:364694ms step_avg:81.04ms +[2025-07-06 05:23:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 05:23:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 05:23:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 05:23:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 05:23:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 05:23:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 05:28:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 05:28:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 05:28:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 05:28:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 05:28:34] [Rank 0] Total Loss: 5.3139 +[2025-07-06 05:28:34] [Rank 0] Total Loss: 5.3139 +[2025-07-06 05:28:34] [Rank 0] Total FTA: 0.9441 +[2025-07-06 05:28:34] [Rank 0] Total FTA: 0.9441 +[2025-07-06 05:28:34] [Rank 0] Group 0 Loss: 5.5437 +[2025-07-06 05:28:34] [Rank 0] Group 0 Loss: 5.5437 +[2025-07-06 05:28:34] [Rank 0] Group 1 Loss: 5.3350 +[2025-07-06 05:28:34] [Rank 0] Group 1 Loss: 5.3350 +[2025-07-06 05:28:34] [Rank 0] Group 2 Loss: 5.1987 +[2025-07-06 05:28:34] [Rank 0] Group 2 Loss: 5.1987 +[2025-07-06 05:28:34] [Rank 0] Group 3 Loss: 5.1823 +[2025-07-06 05:28:34] [Rank 0] Group 3 Loss: 5.1823 +[2025-07-06 05:28:34] [Rank 0] Group 4 Loss: 5.3319 +[2025-07-06 05:28:34] [Rank 0] Group 4 Loss: 5.3319 +[2025-07-06 05:28:34] [Rank 0] Group 5 Loss: 5.2602 +[2025-07-06 05:28:34] [Rank 0] Group 5 Loss: 5.2602 +[2025-07-06 05:28:34] [Rank 0] Group 6 Loss: 5.1938 +[2025-07-06 05:28:34] [Rank 0] Group 6 Loss: 5.1938 +[2025-07-06 05:28:34] [Rank 0] Group 7 Loss: 5.3221 +[2025-07-06 05:28:34] [Rank 0] Group 7 Loss: 5.3221 +[2025-07-06 05:28:34] [Rank 0] Group 8 Loss: 5.2820 +[2025-07-06 05:28:34] [Rank 0] Group 8 Loss: 5.2820 +[2025-07-06 05:28:34] [Rank 0] Group 9 Loss: 5.2508 +[2025-07-06 05:28:34] [Rank 0] Group 9 Loss: 5.2508 +[2025-07-06 05:28:34] [Rank 0] Group 10 Loss: 5.2941 +[2025-07-06 05:28:34] [Rank 0] Group 10 Loss: 5.2941 +[2025-07-06 05:28:34] [Rank 0] Group 11 Loss: 5.3188 +[2025-07-06 05:28:34] [Rank 0] Group 11 Loss: 5.3188 +[2025-07-06 05:28:34] [Rank 0] Group 0 FTA: 0.8283 +[2025-07-06 05:28:34] [Rank 0] Group 0 FTA: 0.8283 +[2025-07-06 05:28:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 05:28:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 05:28:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 05:28:34] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 05:28:34] [Rank 0] Group 3 FTA: 0.9531 +[2025-07-06 05:28:34] [Rank 0] Group 3 FTA: 0.9531 +[2025-07-06 05:28:34] [Rank 0] Group 4 FTA: 0.9740 +[2025-07-06 05:28:34] [Rank 0] Group 4 FTA: 0.9740 +[2025-07-06 05:28:34] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-06 05:28:34] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-06 05:28:34] [Rank 0] Group 6 FTA: 0.9427 +[2025-07-06 05:28:34] [Rank 0] Group 6 FTA: 0.9427 +[2025-07-06 05:28:34] [Rank 0] Group 7 FTA: 0.9557 +[2025-07-06 05:28:34] [Rank 0] Group 7 FTA: 0.9557 +[2025-07-06 05:28:34] [Rank 0] Group 8 FTA: 0.9635 +[2025-07-06 05:28:34] [Rank 0] Group 8 FTA: 0.9635 +[2025-07-06 05:28:34] [Rank 0] Group 9 FTA: 0.9688 +[2025-07-06 05:28:34] [Rank 0] Group 9 FTA: 0.9688 +[2025-07-06 05:28:34] [Rank 0] Group 10 FTA: 0.9434 +[2025-07-06 05:28:34] [Rank 0] Group 10 FTA: 0.9434 +[2025-07-06 05:28:34] [Rank 0] Group 11 FTA: 0.9521 +[2025-07-06 05:28:34] [Rank 0] Group 11 FTA: 0.9521 +[2025-07-06 05:28:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 05:28:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 05:28:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 05:28:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 05:28:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 05:28:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 05:28:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 05:28:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 05:28:37] [Rank 0] step:4501/10000 train_time:365441ms step_avg:81.19ms +[2025-07-06 05:28:37] [Rank 0] step:4501/10000 train_time:365441ms step_avg:81.19ms +[2025-07-06 05:28:38] [Rank 0] step:4521/10000 train_time:366950ms step_avg:81.17ms +[2025-07-06 05:28:38] [Rank 0] step:4521/10000 train_time:366950ms step_avg:81.17ms +[2025-07-06 05:28:40] [Rank 0] step:4541/10000 train_time:368439ms step_avg:81.14ms +[2025-07-06 05:28:40] [Rank 0] step:4541/10000 train_time:368439ms step_avg:81.14ms +[2025-07-06 05:28:41] [Rank 0] step:4561/10000 train_time:369929ms step_avg:81.11ms +[2025-07-06 05:28:41] [Rank 0] step:4561/10000 train_time:369929ms step_avg:81.11ms +[2025-07-06 05:28:43] [Rank 0] step:4581/10000 train_time:371419ms step_avg:81.08ms +[2025-07-06 05:28:43] [Rank 0] step:4581/10000 train_time:371419ms step_avg:81.08ms +[2025-07-06 05:28:45] [Rank 0] step:4601/10000 train_time:373804ms step_avg:81.24ms +[2025-07-06 05:28:45] [Rank 0] step:4601/10000 train_time:373804ms step_avg:81.24ms +[2025-07-06 05:28:47] [Rank 0] step:4621/10000 train_time:375299ms step_avg:81.22ms +[2025-07-06 05:28:47] [Rank 0] step:4621/10000 train_time:375299ms step_avg:81.22ms +[2025-07-06 05:28:48] [Rank 0] step:4641/10000 train_time:376791ms step_avg:81.19ms +[2025-07-06 05:28:48] [Rank 0] step:4641/10000 train_time:376791ms step_avg:81.19ms +[2025-07-06 05:28:50] [Rank 0] step:4661/10000 train_time:378284ms step_avg:81.16ms +[2025-07-06 05:28:50] [Rank 0] step:4661/10000 train_time:378284ms step_avg:81.16ms +[2025-07-06 05:28:52] [Rank 0] step:4681/10000 train_time:380032ms step_avg:81.19ms +[2025-07-06 05:28:52] [Rank 0] step:4681/10000 train_time:380032ms step_avg:81.19ms +[2025-07-06 05:28:53] [Rank 0] step:4701/10000 train_time:381917ms step_avg:81.24ms +[2025-07-06 05:28:53] [Rank 0] step:4701/10000 train_time:381917ms step_avg:81.24ms +[2025-07-06 05:28:55] [Rank 0] step:4721/10000 train_time:383410ms step_avg:81.21ms +[2025-07-06 05:28:55] [Rank 0] step:4721/10000 train_time:383410ms step_avg:81.21ms +[2025-07-06 05:28:56] [Rank 0] step:4741/10000 train_time:384904ms step_avg:81.19ms +[2025-07-06 05:28:56] [Rank 0] step:4741/10000 train_time:384904ms step_avg:81.19ms +[2025-07-06 05:28:58] [Rank 0] step:4761/10000 train_time:386398ms step_avg:81.16ms +[2025-07-06 05:28:58] [Rank 0] step:4761/10000 train_time:386398ms step_avg:81.16ms +[2025-07-06 05:29:00] [Rank 0] step:4781/10000 train_time:388128ms step_avg:81.18ms +[2025-07-06 05:29:00] [Rank 0] step:4781/10000 train_time:388128ms step_avg:81.18ms +[2025-07-06 05:29:01] [Rank 0] step:4801/10000 train_time:389627ms step_avg:81.16ms +[2025-07-06 05:29:01] [Rank 0] step:4801/10000 train_time:389627ms step_avg:81.16ms +[2025-07-06 05:29:03] [Rank 0] step:4821/10000 train_time:391123ms step_avg:81.13ms +[2025-07-06 05:29:03] [Rank 0] step:4821/10000 train_time:391123ms step_avg:81.13ms +[2025-07-06 05:29:04] [Rank 0] step:4841/10000 train_time:392619ms step_avg:81.10ms +[2025-07-06 05:29:04] [Rank 0] step:4841/10000 train_time:392619ms step_avg:81.10ms +[2025-07-06 05:29:06] [Rank 0] step:4861/10000 train_time:394371ms step_avg:81.13ms +[2025-07-06 05:29:06] [Rank 0] step:4861/10000 train_time:394371ms step_avg:81.13ms +[2025-07-06 05:29:07] [Rank 0] step:4881/10000 train_time:395851ms step_avg:81.10ms +[2025-07-06 05:29:07] [Rank 0] step:4881/10000 train_time:395851ms step_avg:81.10ms +[2025-07-06 05:29:09] [Rank 0] step:4901/10000 train_time:397347ms step_avg:81.07ms +[2025-07-06 05:29:09] [Rank 0] step:4901/10000 train_time:397347ms step_avg:81.07ms +[2025-07-06 05:29:10] [Rank 0] step:4921/10000 train_time:398844ms step_avg:81.05ms +[2025-07-06 05:29:10] [Rank 0] step:4921/10000 train_time:398844ms step_avg:81.05ms +[2025-07-06 05:29:12] [Rank 0] step:4941/10000 train_time:400341ms step_avg:81.02ms +[2025-07-06 05:29:12] [Rank 0] step:4941/10000 train_time:400341ms step_avg:81.02ms +[2025-07-06 05:29:14] [Rank 0] step:4961/10000 train_time:402482ms step_avg:81.13ms +[2025-07-06 05:29:14] [Rank 0] step:4961/10000 train_time:402482ms step_avg:81.13ms +[2025-07-06 05:29:15] [Rank 0] step:4981/10000 train_time:403981ms step_avg:81.10ms +[2025-07-06 05:29:15] [Rank 0] step:4981/10000 train_time:403981ms step_avg:81.10ms +[2025-07-06 05:29:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 05:29:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 05:29:18] [Rank 0] PRINT: step:5000/10000 train_loss:0.8644 val_loss:0.8635 train_time:405478ms step_avg:81.10ms +[2025-07-06 05:29:18] [Rank 0] PRINT: step:5000/10000 train_loss:0.8644 val_loss:0.8635 train_time:405478ms step_avg:81.10ms +[2025-07-06 05:29:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 05:29:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 05:29:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 05:29:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 05:29:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 05:29:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 05:34:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 05:34:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 05:34:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 05:34:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 05:34:43] [Rank 0] Total Loss: 5.2696 +[2025-07-06 05:34:43] [Rank 0] Total Loss: 5.2696 +[2025-07-06 05:34:43] [Rank 0] Total FTA: 0.9688 +[2025-07-06 05:34:43] [Rank 0] Total FTA: 0.9688 +[2025-07-06 05:34:43] [Rank 0] Group 0 Loss: 5.4391 +[2025-07-06 05:34:43] [Rank 0] Group 0 Loss: 5.4391 +[2025-07-06 05:34:43] [Rank 0] Group 1 Loss: 5.3587 +[2025-07-06 05:34:43] [Rank 0] Group 1 Loss: 5.3587 +[2025-07-06 05:34:43] [Rank 0] Group 2 Loss: 5.1592 +[2025-07-06 05:34:43] [Rank 0] Group 2 Loss: 5.1592 +[2025-07-06 05:34:43] [Rank 0] Group 3 Loss: 5.2161 +[2025-07-06 05:34:43] [Rank 0] Group 3 Loss: 5.2161 +[2025-07-06 05:34:43] [Rank 0] Group 4 Loss: 5.2898 +[2025-07-06 05:34:43] [Rank 0] Group 4 Loss: 5.2898 +[2025-07-06 05:34:43] [Rank 0] Group 5 Loss: 5.2418 +[2025-07-06 05:34:43] [Rank 0] Group 5 Loss: 5.2418 +[2025-07-06 05:34:44] [Rank 0] Group 6 Loss: 5.1174 +[2025-07-06 05:34:44] [Rank 0] Group 6 Loss: 5.1174 +[2025-07-06 05:34:44] [Rank 0] Group 7 Loss: 5.2366 +[2025-07-06 05:34:44] [Rank 0] Group 7 Loss: 5.2366 +[2025-07-06 05:34:44] [Rank 0] Group 8 Loss: 5.2578 +[2025-07-06 05:34:44] [Rank 0] Group 8 Loss: 5.2578 +[2025-07-06 05:34:44] [Rank 0] Group 9 Loss: 5.2387 +[2025-07-06 05:34:44] [Rank 0] Group 9 Loss: 5.2387 +[2025-07-06 05:34:44] [Rank 0] Group 10 Loss: 5.2596 +[2025-07-06 05:34:44] [Rank 0] Group 10 Loss: 5.2596 +[2025-07-06 05:34:44] [Rank 0] Group 11 Loss: 5.2599 +[2025-07-06 05:34:44] [Rank 0] Group 11 Loss: 5.2599 +[2025-07-06 05:34:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 05:34:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 05:34:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 05:34:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 05:34:44] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 05:34:44] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 05:34:44] [Rank 0] Group 3 FTA: 0.9323 +[2025-07-06 05:34:44] [Rank 0] Group 3 FTA: 0.9323 +[2025-07-06 05:34:44] [Rank 0] Group 4 FTA: 0.9557 +[2025-07-06 05:34:44] [Rank 0] Group 4 FTA: 0.9557 +[2025-07-06 05:34:44] [Rank 0] Group 5 FTA: 0.9766 +[2025-07-06 05:34:44] [Rank 0] Group 5 FTA: 0.9766 +[2025-07-06 05:34:44] [Rank 0] Group 6 FTA: 0.9609 +[2025-07-06 05:34:44] [Rank 0] Group 6 FTA: 0.9609 +[2025-07-06 05:34:44] [Rank 0] Group 7 FTA: 0.9740 +[2025-07-06 05:34:44] [Rank 0] Group 7 FTA: 0.9740 +[2025-07-06 05:34:44] [Rank 0] Group 8 FTA: 0.9427 +[2025-07-06 05:34:44] [Rank 0] Group 8 FTA: 0.9427 +[2025-07-06 05:34:44] [Rank 0] Group 9 FTA: 0.9609 +[2025-07-06 05:34:44] [Rank 0] Group 9 FTA: 0.9609 +[2025-07-06 05:34:44] [Rank 0] Group 10 FTA: 0.9629 +[2025-07-06 05:34:44] [Rank 0] Group 10 FTA: 0.9629 +[2025-07-06 05:34:44] [Rank 0] Group 11 FTA: 0.9531 +[2025-07-06 05:34:44] [Rank 0] Group 11 FTA: 0.9531 +[2025-07-06 05:34:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 05:34:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 05:34:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 05:34:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 05:34:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 05:34:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 05:34:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 05:34:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 05:34:45] [Rank 0] step:5001/10000 train_time:405499ms step_avg:81.08ms +[2025-07-06 05:34:45] [Rank 0] step:5001/10000 train_time:405499ms step_avg:81.08ms +[2025-07-06 05:34:47] [Rank 0] step:5021/10000 train_time:406998ms step_avg:81.06ms +[2025-07-06 05:34:47] [Rank 0] step:5021/10000 train_time:406998ms step_avg:81.06ms +[2025-07-06 05:34:49] [Rank 0] step:5041/10000 train_time:408554ms step_avg:81.05ms +[2025-07-06 05:34:49] [Rank 0] step:5041/10000 train_time:408554ms step_avg:81.05ms +[2025-07-06 05:34:50] [Rank 0] step:5061/10000 train_time:410626ms step_avg:81.14ms +[2025-07-06 05:34:50] [Rank 0] step:5061/10000 train_time:410626ms step_avg:81.14ms +[2025-07-06 05:34:52] [Rank 0] step:5081/10000 train_time:412117ms step_avg:81.11ms +[2025-07-06 05:34:52] [Rank 0] step:5081/10000 train_time:412117ms step_avg:81.11ms +[2025-07-06 05:34:53] [Rank 0] step:5101/10000 train_time:413607ms step_avg:81.08ms +[2025-07-06 05:34:53] [Rank 0] step:5101/10000 train_time:413607ms step_avg:81.08ms +[2025-07-06 05:34:55] [Rank 0] step:5121/10000 train_time:415099ms step_avg:81.06ms +[2025-07-06 05:34:55] [Rank 0] step:5121/10000 train_time:415099ms step_avg:81.06ms +[2025-07-06 05:34:57] [Rank 0] step:5141/10000 train_time:417242ms step_avg:81.16ms +[2025-07-06 05:34:57] [Rank 0] step:5141/10000 train_time:417242ms step_avg:81.16ms +[2025-07-06 05:34:58] [Rank 0] step:5161/10000 train_time:418734ms step_avg:81.13ms +[2025-07-06 05:34:58] [Rank 0] step:5161/10000 train_time:418734ms step_avg:81.13ms +[2025-07-06 05:35:00] [Rank 0] step:5181/10000 train_time:420230ms step_avg:81.11ms +[2025-07-06 05:35:00] [Rank 0] step:5181/10000 train_time:420230ms step_avg:81.11ms +[2025-07-06 05:35:01] [Rank 0] step:5201/10000 train_time:421724ms step_avg:81.09ms +[2025-07-06 05:35:01] [Rank 0] step:5201/10000 train_time:421724ms step_avg:81.09ms +[2025-07-06 05:35:04] [Rank 0] step:5221/10000 train_time:423217ms step_avg:81.06ms +[2025-07-06 05:35:04] [Rank 0] step:5221/10000 train_time:423217ms step_avg:81.06ms +[2025-07-06 05:35:05] [Rank 0] step:5241/10000 train_time:425570ms step_avg:81.20ms +[2025-07-06 05:35:05] [Rank 0] step:5241/10000 train_time:425570ms step_avg:81.20ms +[2025-07-06 05:35:07] [Rank 0] step:5261/10000 train_time:427065ms step_avg:81.18ms +[2025-07-06 05:35:07] [Rank 0] step:5261/10000 train_time:427065ms step_avg:81.18ms +[2025-07-06 05:35:08] [Rank 0] step:5281/10000 train_time:428560ms step_avg:81.15ms +[2025-07-06 05:35:08] [Rank 0] step:5281/10000 train_time:428560ms step_avg:81.15ms +[2025-07-06 05:35:10] [Rank 0] step:5301/10000 train_time:430057ms step_avg:81.13ms +[2025-07-06 05:35:10] [Rank 0] step:5301/10000 train_time:430057ms step_avg:81.13ms +[2025-07-06 05:35:12] [Rank 0] step:5321/10000 train_time:432219ms step_avg:81.23ms +[2025-07-06 05:35:12] [Rank 0] step:5321/10000 train_time:432219ms step_avg:81.23ms +[2025-07-06 05:35:13] [Rank 0] step:5341/10000 train_time:433714ms step_avg:81.20ms +[2025-07-06 05:35:13] [Rank 0] step:5341/10000 train_time:433714ms step_avg:81.20ms +[2025-07-06 05:35:15] [Rank 0] step:5361/10000 train_time:435211ms step_avg:81.18ms +[2025-07-06 05:35:15] [Rank 0] step:5361/10000 train_time:435211ms step_avg:81.18ms +[2025-07-06 05:35:16] [Rank 0] step:5381/10000 train_time:436709ms step_avg:81.16ms +[2025-07-06 05:35:16] [Rank 0] step:5381/10000 train_time:436709ms step_avg:81.16ms +[2025-07-06 05:35:19] [Rank 0] step:5401/10000 train_time:438210ms step_avg:81.13ms +[2025-07-06 05:35:19] [Rank 0] step:5401/10000 train_time:438210ms step_avg:81.13ms +[2025-07-06 05:35:20] [Rank 0] step:5421/10000 train_time:440364ms step_avg:81.23ms +[2025-07-06 05:35:20] [Rank 0] step:5421/10000 train_time:440364ms step_avg:81.23ms +[2025-07-06 05:35:22] [Rank 0] step:5441/10000 train_time:441862ms step_avg:81.21ms +[2025-07-06 05:35:22] [Rank 0] step:5441/10000 train_time:441862ms step_avg:81.21ms +[2025-07-06 05:35:23] [Rank 0] step:5461/10000 train_time:443360ms step_avg:81.19ms +[2025-07-06 05:35:23] [Rank 0] step:5461/10000 train_time:443360ms step_avg:81.19ms +[2025-07-06 05:35:25] [Rank 0] step:5481/10000 train_time:444858ms step_avg:81.16ms +[2025-07-06 05:35:25] [Rank 0] step:5481/10000 train_time:444858ms step_avg:81.16ms +[2025-07-06 05:35:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 05:35:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 05:35:28] [Rank 0] PRINT: step:5500/10000 train_loss:0.8624 val_loss:0.8635 train_time:447096ms step_avg:81.29ms +[2025-07-06 05:35:28] [Rank 0] PRINT: step:5500/10000 train_loss:0.8624 val_loss:0.8635 train_time:447096ms step_avg:81.29ms +[2025-07-06 05:35:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 05:35:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 05:35:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 05:35:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 05:35:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 05:35:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 05:40:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 05:40:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 05:40:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 05:40:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 05:40:52] [Rank 0] Total Loss: 5.4104 +[2025-07-06 05:40:52] [Rank 0] Total Loss: 5.4104 +[2025-07-06 05:40:52] [Rank 0] Total FTA: 0.9517 +[2025-07-06 05:40:52] [Rank 0] Total FTA: 0.9517 +[2025-07-06 05:40:52] [Rank 0] Group 0 Loss: 5.5990 +[2025-07-06 05:40:52] [Rank 0] Group 0 Loss: 5.5990 +[2025-07-06 05:40:52] [Rank 0] Group 1 Loss: 5.6410 +[2025-07-06 05:40:52] [Rank 0] Group 1 Loss: 5.6410 +[2025-07-06 05:40:52] [Rank 0] Group 2 Loss: 5.2822 +[2025-07-06 05:40:52] [Rank 0] Group 2 Loss: 5.2822 +[2025-07-06 05:40:52] [Rank 0] Group 3 Loss: 5.3015 +[2025-07-06 05:40:52] [Rank 0] Group 3 Loss: 5.3015 +[2025-07-06 05:40:52] [Rank 0] Group 4 Loss: 5.4643 +[2025-07-06 05:40:52] [Rank 0] Group 4 Loss: 5.4643 +[2025-07-06 05:40:52] [Rank 0] Group 5 Loss: 5.3945 +[2025-07-06 05:40:52] [Rank 0] Group 5 Loss: 5.3945 +[2025-07-06 05:40:52] [Rank 0] Group 6 Loss: 5.2342 +[2025-07-06 05:40:52] [Rank 0] Group 6 Loss: 5.2342 +[2025-07-06 05:40:52] [Rank 0] Group 7 Loss: 5.4242 +[2025-07-06 05:40:52] [Rank 0] Group 7 Loss: 5.4242 +[2025-07-06 05:40:52] [Rank 0] Group 8 Loss: 5.3772 +[2025-07-06 05:40:52] [Rank 0] Group 8 Loss: 5.3772 +[2025-07-06 05:40:52] [Rank 0] Group 9 Loss: 5.3658 +[2025-07-06 05:40:52] [Rank 0] Group 9 Loss: 5.3658 +[2025-07-06 05:40:52] [Rank 0] Group 10 Loss: 5.3415 +[2025-07-06 05:40:52] [Rank 0] Group 10 Loss: 5.3415 +[2025-07-06 05:40:52] [Rank 0] Group 11 Loss: 5.3761 +[2025-07-06 05:40:52] [Rank 0] Group 11 Loss: 5.3761 +[2025-07-06 05:40:52] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 05:40:52] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 05:40:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 05:40:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 05:40:52] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 05:40:52] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 05:40:52] [Rank 0] Group 3 FTA: 0.8932 +[2025-07-06 05:40:52] [Rank 0] Group 3 FTA: 0.8932 +[2025-07-06 05:40:52] [Rank 0] Group 4 FTA: 0.8932 +[2025-07-06 05:40:52] [Rank 0] Group 4 FTA: 0.8932 +[2025-07-06 05:40:52] [Rank 0] Group 5 FTA: 0.9661 +[2025-07-06 05:40:52] [Rank 0] Group 5 FTA: 0.9661 +[2025-07-06 05:40:52] [Rank 0] Group 6 FTA: 0.9219 +[2025-07-06 05:40:52] [Rank 0] Group 6 FTA: 0.9219 +[2025-07-06 05:40:52] [Rank 0] Group 7 FTA: 0.9635 +[2025-07-06 05:40:52] [Rank 0] Group 7 FTA: 0.9635 +[2025-07-06 05:40:52] [Rank 0] Group 8 FTA: 0.9557 +[2025-07-06 05:40:52] [Rank 0] Group 8 FTA: 0.9557 +[2025-07-06 05:40:52] [Rank 0] Group 9 FTA: 0.9492 +[2025-07-06 05:40:52] [Rank 0] Group 9 FTA: 0.9492 +[2025-07-06 05:40:52] [Rank 0] Group 10 FTA: 0.9297 +[2025-07-06 05:40:52] [Rank 0] Group 10 FTA: 0.9297 +[2025-07-06 05:40:52] [Rank 0] Group 11 FTA: 0.9346 +[2025-07-06 05:40:52] [Rank 0] Group 11 FTA: 0.9346 +[2025-07-06 05:40:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 05:40:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 05:40:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 05:40:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 05:40:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 05:40:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 05:40:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 05:40:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 05:40:54] [Rank 0] step:5501/10000 train_time:447118ms step_avg:81.28ms +[2025-07-06 05:40:54] [Rank 0] step:5501/10000 train_time:447118ms step_avg:81.28ms +[2025-07-06 05:40:56] [Rank 0] step:5521/10000 train_time:448620ms step_avg:81.26ms +[2025-07-06 05:40:56] [Rank 0] step:5521/10000 train_time:448620ms step_avg:81.26ms +[2025-07-06 05:40:57] [Rank 0] step:5541/10000 train_time:450109ms step_avg:81.23ms +[2025-07-06 05:40:57] [Rank 0] step:5541/10000 train_time:450109ms step_avg:81.23ms +[2025-07-06 05:40:59] [Rank 0] step:5561/10000 train_time:451599ms step_avg:81.21ms +[2025-07-06 05:40:59] [Rank 0] step:5561/10000 train_time:451599ms step_avg:81.21ms +[2025-07-06 05:41:01] [Rank 0] step:5581/10000 train_time:453191ms step_avg:81.20ms +[2025-07-06 05:41:01] [Rank 0] step:5581/10000 train_time:453191ms step_avg:81.20ms +[2025-07-06 05:41:02] [Rank 0] step:5601/10000 train_time:455345ms step_avg:81.30ms +[2025-07-06 05:41:02] [Rank 0] step:5601/10000 train_time:455345ms step_avg:81.30ms +[2025-07-06 05:41:04] [Rank 0] step:5621/10000 train_time:456836ms step_avg:81.27ms +[2025-07-06 05:41:04] [Rank 0] step:5621/10000 train_time:456836ms step_avg:81.27ms +[2025-07-06 05:41:05] [Rank 0] step:5641/10000 train_time:458331ms step_avg:81.25ms +[2025-07-06 05:41:05] [Rank 0] step:5641/10000 train_time:458331ms step_avg:81.25ms +[2025-07-06 05:41:07] [Rank 0] step:5661/10000 train_time:459824ms step_avg:81.23ms +[2025-07-06 05:41:07] [Rank 0] step:5661/10000 train_time:459824ms step_avg:81.23ms +[2025-07-06 05:41:09] [Rank 0] step:5681/10000 train_time:461956ms step_avg:81.32ms +[2025-07-06 05:41:09] [Rank 0] step:5681/10000 train_time:461956ms step_avg:81.32ms +[2025-07-06 05:41:10] [Rank 0] step:5701/10000 train_time:463450ms step_avg:81.29ms +[2025-07-06 05:41:10] [Rank 0] step:5701/10000 train_time:463450ms step_avg:81.29ms +[2025-07-06 05:41:12] [Rank 0] step:5721/10000 train_time:464944ms step_avg:81.27ms +[2025-07-06 05:41:12] [Rank 0] step:5721/10000 train_time:464944ms step_avg:81.27ms +[2025-07-06 05:41:13] [Rank 0] step:5741/10000 train_time:466438ms step_avg:81.25ms +[2025-07-06 05:41:13] [Rank 0] step:5741/10000 train_time:466438ms step_avg:81.25ms +[2025-07-06 05:41:16] [Rank 0] step:5761/10000 train_time:467995ms step_avg:81.24ms +[2025-07-06 05:41:16] [Rank 0] step:5761/10000 train_time:467995ms step_avg:81.24ms +[2025-07-06 05:41:17] [Rank 0] step:5781/10000 train_time:470069ms step_avg:81.31ms +[2025-07-06 05:41:17] [Rank 0] step:5781/10000 train_time:470069ms step_avg:81.31ms +[2025-07-06 05:41:19] [Rank 0] step:5801/10000 train_time:471565ms step_avg:81.29ms +[2025-07-06 05:41:19] [Rank 0] step:5801/10000 train_time:471565ms step_avg:81.29ms +[2025-07-06 05:41:20] [Rank 0] step:5821/10000 train_time:473060ms step_avg:81.27ms +[2025-07-06 05:41:20] [Rank 0] step:5821/10000 train_time:473060ms step_avg:81.27ms +[2025-07-06 05:41:22] [Rank 0] step:5841/10000 train_time:474559ms step_avg:81.25ms +[2025-07-06 05:41:22] [Rank 0] step:5841/10000 train_time:474559ms step_avg:81.25ms +[2025-07-06 05:41:24] [Rank 0] step:5861/10000 train_time:476865ms step_avg:81.36ms +[2025-07-06 05:41:24] [Rank 0] step:5861/10000 train_time:476865ms step_avg:81.36ms +[2025-07-06 05:41:25] [Rank 0] step:5881/10000 train_time:478442ms step_avg:81.35ms +[2025-07-06 05:41:25] [Rank 0] step:5881/10000 train_time:478442ms step_avg:81.35ms +[2025-07-06 05:41:27] [Rank 0] step:5901/10000 train_time:480042ms step_avg:81.35ms +[2025-07-06 05:41:27] [Rank 0] step:5901/10000 train_time:480042ms step_avg:81.35ms +[2025-07-06 05:41:28] [Rank 0] step:5921/10000 train_time:481541ms step_avg:81.33ms +[2025-07-06 05:41:28] [Rank 0] step:5921/10000 train_time:481541ms step_avg:81.33ms +[2025-07-06 05:41:30] [Rank 0] step:5941/10000 train_time:483040ms step_avg:81.31ms +[2025-07-06 05:41:30] [Rank 0] step:5941/10000 train_time:483040ms step_avg:81.31ms +[2025-07-06 05:41:32] [Rank 0] step:5961/10000 train_time:484777ms step_avg:81.32ms +[2025-07-06 05:41:32] [Rank 0] step:5961/10000 train_time:484777ms step_avg:81.32ms +[2025-07-06 05:41:33] [Rank 0] step:5981/10000 train_time:486273ms step_avg:81.30ms +[2025-07-06 05:41:33] [Rank 0] step:5981/10000 train_time:486273ms step_avg:81.30ms +[2025-07-06 05:41:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 05:41:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 05:41:36] [Rank 0] PRINT: step:6000/10000 train_loss:0.8608 val_loss:0.8635 train_time:487773ms step_avg:81.30ms +[2025-07-06 05:41:36] [Rank 0] PRINT: step:6000/10000 train_loss:0.8608 val_loss:0.8635 train_time:487773ms step_avg:81.30ms +[2025-07-06 05:41:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 05:41:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 05:41:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 05:41:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 05:41:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 05:41:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 05:47:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 05:47:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 05:47:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 05:47:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 05:47:01] [Rank 0] Total Loss: 5.4352 +[2025-07-06 05:47:01] [Rank 0] Total Loss: 5.4352 +[2025-07-06 05:47:01] [Rank 0] Total FTA: 0.9663 +[2025-07-06 05:47:01] [Rank 0] Total FTA: 0.9663 +[2025-07-06 05:47:01] [Rank 0] Group 0 Loss: 5.7260 +[2025-07-06 05:47:01] [Rank 0] Group 0 Loss: 5.7260 +[2025-07-06 05:47:01] [Rank 0] Group 1 Loss: 5.6260 +[2025-07-06 05:47:01] [Rank 0] Group 1 Loss: 5.6260 +[2025-07-06 05:47:01] [Rank 0] Group 2 Loss: 5.3671 +[2025-07-06 05:47:01] [Rank 0] Group 2 Loss: 5.3671 +[2025-07-06 05:47:01] [Rank 0] Group 3 Loss: 5.3361 +[2025-07-06 05:47:01] [Rank 0] Group 3 Loss: 5.3361 +[2025-07-06 05:47:01] [Rank 0] Group 4 Loss: 5.4257 +[2025-07-06 05:47:01] [Rank 0] Group 4 Loss: 5.4257 +[2025-07-06 05:47:01] [Rank 0] Group 5 Loss: 5.3609 +[2025-07-06 05:47:01] [Rank 0] Group 5 Loss: 5.3609 +[2025-07-06 05:47:01] [Rank 0] Group 6 Loss: 5.2267 +[2025-07-06 05:47:01] [Rank 0] Group 6 Loss: 5.2267 +[2025-07-06 05:47:01] [Rank 0] Group 7 Loss: 5.4377 +[2025-07-06 05:47:01] [Rank 0] Group 7 Loss: 5.4377 +[2025-07-06 05:47:01] [Rank 0] Group 8 Loss: 5.4266 +[2025-07-06 05:47:01] [Rank 0] Group 8 Loss: 5.4266 +[2025-07-06 05:47:01] [Rank 0] Group 9 Loss: 5.3216 +[2025-07-06 05:47:01] [Rank 0] Group 9 Loss: 5.3216 +[2025-07-06 05:47:01] [Rank 0] Group 10 Loss: 5.3417 +[2025-07-06 05:47:01] [Rank 0] Group 10 Loss: 5.3417 +[2025-07-06 05:47:01] [Rank 0] Group 11 Loss: 5.3953 +[2025-07-06 05:47:01] [Rank 0] Group 11 Loss: 5.3953 +[2025-07-06 05:47:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 05:47:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 05:47:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 05:47:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 05:47:01] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 05:47:01] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 05:47:01] [Rank 0] Group 3 FTA: 0.9583 +[2025-07-06 05:47:01] [Rank 0] Group 3 FTA: 0.9583 +[2025-07-06 05:47:01] [Rank 0] Group 4 FTA: 0.9271 +[2025-07-06 05:47:01] [Rank 0] Group 4 FTA: 0.9271 +[2025-07-06 05:47:01] [Rank 0] Group 5 FTA: 0.9688 +[2025-07-06 05:47:01] [Rank 0] Group 5 FTA: 0.9688 +[2025-07-06 05:47:01] [Rank 0] Group 6 FTA: 0.9583 +[2025-07-06 05:47:01] [Rank 0] Group 6 FTA: 0.9583 +[2025-07-06 05:47:01] [Rank 0] Group 7 FTA: 0.9609 +[2025-07-06 05:47:01] [Rank 0] Group 7 FTA: 0.9609 +[2025-07-06 05:47:01] [Rank 0] Group 8 FTA: 0.9531 +[2025-07-06 05:47:01] [Rank 0] Group 8 FTA: 0.9531 +[2025-07-06 05:47:01] [Rank 0] Group 9 FTA: 0.9375 +[2025-07-06 05:47:01] [Rank 0] Group 9 FTA: 0.9375 +[2025-07-06 05:47:01] [Rank 0] Group 10 FTA: 0.9492 +[2025-07-06 05:47:01] [Rank 0] Group 10 FTA: 0.9492 +[2025-07-06 05:47:01] [Rank 0] Group 11 FTA: 0.9580 +[2025-07-06 05:47:01] [Rank 0] Group 11 FTA: 0.9580 +[2025-07-06 05:47:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 05:47:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 05:47:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 05:47:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 05:47:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 05:47:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 05:47:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 05:47:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 05:47:03] [Rank 0] step:6001/10000 train_time:487795ms step_avg:81.29ms +[2025-07-06 05:47:03] [Rank 0] step:6001/10000 train_time:487795ms step_avg:81.29ms +[2025-07-06 05:47:04] [Rank 0] step:6021/10000 train_time:489284ms step_avg:81.26ms +[2025-07-06 05:47:04] [Rank 0] step:6021/10000 train_time:489284ms step_avg:81.26ms +[2025-07-06 05:47:06] [Rank 0] step:6041/10000 train_time:491442ms step_avg:81.35ms +[2025-07-06 05:47:06] [Rank 0] step:6041/10000 train_time:491442ms step_avg:81.35ms +[2025-07-06 05:47:08] [Rank 0] step:6061/10000 train_time:492929ms step_avg:81.33ms +[2025-07-06 05:47:08] [Rank 0] step:6061/10000 train_time:492929ms step_avg:81.33ms +[2025-07-06 05:47:09] [Rank 0] step:6081/10000 train_time:494419ms step_avg:81.31ms +[2025-07-06 05:47:09] [Rank 0] step:6081/10000 train_time:494419ms step_avg:81.31ms +[2025-07-06 05:47:11] [Rank 0] step:6101/10000 train_time:496011ms step_avg:81.30ms +[2025-07-06 05:47:11] [Rank 0] step:6101/10000 train_time:496011ms step_avg:81.30ms +[2025-07-06 05:47:13] [Rank 0] step:6121/10000 train_time:498185ms step_avg:81.39ms +[2025-07-06 05:47:13] [Rank 0] step:6121/10000 train_time:498185ms step_avg:81.39ms +[2025-07-06 05:47:14] [Rank 0] step:6141/10000 train_time:499647ms step_avg:81.36ms +[2025-07-06 05:47:14] [Rank 0] step:6141/10000 train_time:499647ms step_avg:81.36ms +[2025-07-06 05:47:16] [Rank 0] step:6161/10000 train_time:501139ms step_avg:81.34ms +[2025-07-06 05:47:16] [Rank 0] step:6161/10000 train_time:501139ms step_avg:81.34ms +[2025-07-06 05:47:17] [Rank 0] step:6181/10000 train_time:502631ms step_avg:81.32ms +[2025-07-06 05:47:17] [Rank 0] step:6181/10000 train_time:502631ms step_avg:81.32ms +[2025-07-06 05:47:19] [Rank 0] step:6201/10000 train_time:504125ms step_avg:81.30ms +[2025-07-06 05:47:19] [Rank 0] step:6201/10000 train_time:504125ms step_avg:81.30ms +[2025-07-06 05:47:21] [Rank 0] step:6221/10000 train_time:506259ms step_avg:81.38ms +[2025-07-06 05:47:21] [Rank 0] step:6221/10000 train_time:506259ms step_avg:81.38ms +[2025-07-06 05:47:23] [Rank 0] step:6241/10000 train_time:507753ms step_avg:81.36ms +[2025-07-06 05:47:23] [Rank 0] step:6241/10000 train_time:507753ms step_avg:81.36ms +[2025-07-06 05:47:24] [Rank 0] step:6261/10000 train_time:509347ms step_avg:81.35ms +[2025-07-06 05:47:24] [Rank 0] step:6261/10000 train_time:509347ms step_avg:81.35ms +[2025-07-06 05:47:26] [Rank 0] step:6281/10000 train_time:510841ms step_avg:81.33ms +[2025-07-06 05:47:26] [Rank 0] step:6281/10000 train_time:510841ms step_avg:81.33ms +[2025-07-06 05:47:28] [Rank 0] step:6301/10000 train_time:513119ms step_avg:81.43ms +[2025-07-06 05:47:28] [Rank 0] step:6301/10000 train_time:513119ms step_avg:81.43ms +[2025-07-06 05:47:29] [Rank 0] step:6321/10000 train_time:514595ms step_avg:81.41ms +[2025-07-06 05:47:29] [Rank 0] step:6321/10000 train_time:514595ms step_avg:81.41ms +[2025-07-06 05:47:31] [Rank 0] step:6341/10000 train_time:516188ms step_avg:81.40ms +[2025-07-06 05:47:31] [Rank 0] step:6341/10000 train_time:516188ms step_avg:81.40ms +[2025-07-06 05:47:32] [Rank 0] step:6361/10000 train_time:517683ms step_avg:81.38ms +[2025-07-06 05:47:32] [Rank 0] step:6361/10000 train_time:517683ms step_avg:81.38ms +[2025-07-06 05:47:34] [Rank 0] step:6381/10000 train_time:519180ms step_avg:81.36ms +[2025-07-06 05:47:34] [Rank 0] step:6381/10000 train_time:519180ms step_avg:81.36ms +[2025-07-06 05:47:36] [Rank 0] step:6401/10000 train_time:521334ms step_avg:81.45ms +[2025-07-06 05:47:36] [Rank 0] step:6401/10000 train_time:521334ms step_avg:81.45ms +[2025-07-06 05:47:38] [Rank 0] step:6421/10000 train_time:522829ms step_avg:81.42ms +[2025-07-06 05:47:38] [Rank 0] step:6421/10000 train_time:522829ms step_avg:81.42ms +[2025-07-06 05:47:39] [Rank 0] step:6441/10000 train_time:524327ms step_avg:81.40ms +[2025-07-06 05:47:39] [Rank 0] step:6441/10000 train_time:524327ms step_avg:81.40ms +[2025-07-06 05:47:41] [Rank 0] step:6461/10000 train_time:525825ms step_avg:81.38ms +[2025-07-06 05:47:41] [Rank 0] step:6461/10000 train_time:525825ms step_avg:81.38ms +[2025-07-06 05:47:43] [Rank 0] step:6481/10000 train_time:527322ms step_avg:81.36ms +[2025-07-06 05:47:43] [Rank 0] step:6481/10000 train_time:527322ms step_avg:81.36ms +[2025-07-06 05:47:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 05:47:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 05:47:45] [Rank 0] PRINT: step:6500/10000 train_loss:0.8608 val_loss:0.8605 train_time:529538ms step_avg:81.47ms +[2025-07-06 05:47:45] [Rank 0] PRINT: step:6500/10000 train_loss:0.8608 val_loss:0.8605 train_time:529538ms step_avg:81.47ms +[2025-07-06 05:47:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 05:47:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 05:47:45] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 05:47:45] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 05:47:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 05:47:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 05:53:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 05:53:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 05:53:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 05:53:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 05:53:09] [Rank 0] Total Loss: 5.3654 +[2025-07-06 05:53:09] [Rank 0] Total Loss: 5.3654 +[2025-07-06 05:53:10] [Rank 0] Total FTA: 0.9716 +[2025-07-06 05:53:10] [Rank 0] Total FTA: 0.9716 +[2025-07-06 05:53:10] [Rank 0] Group 0 Loss: 5.6106 +[2025-07-06 05:53:10] [Rank 0] Group 0 Loss: 5.6106 +[2025-07-06 05:53:10] [Rank 0] Group 1 Loss: 5.2186 +[2025-07-06 05:53:10] [Rank 0] Group 1 Loss: 5.2186 +[2025-07-06 05:53:10] [Rank 0] Group 2 Loss: 5.1378 +[2025-07-06 05:53:10] [Rank 0] Group 2 Loss: 5.1378 +[2025-07-06 05:53:10] [Rank 0] Group 3 Loss: 5.3073 +[2025-07-06 05:53:10] [Rank 0] Group 3 Loss: 5.3073 +[2025-07-06 05:53:10] [Rank 0] Group 4 Loss: 5.4570 +[2025-07-06 05:53:10] [Rank 0] Group 4 Loss: 5.4570 +[2025-07-06 05:53:10] [Rank 0] Group 5 Loss: 5.3158 +[2025-07-06 05:53:10] [Rank 0] Group 5 Loss: 5.3158 +[2025-07-06 05:53:10] [Rank 0] Group 6 Loss: 5.2702 +[2025-07-06 05:53:10] [Rank 0] Group 6 Loss: 5.2702 +[2025-07-06 05:53:10] [Rank 0] Group 7 Loss: 5.3917 +[2025-07-06 05:53:10] [Rank 0] Group 7 Loss: 5.3917 +[2025-07-06 05:53:10] [Rank 0] Group 8 Loss: 5.3578 +[2025-07-06 05:53:10] [Rank 0] Group 8 Loss: 5.3578 +[2025-07-06 05:53:10] [Rank 0] Group 9 Loss: 5.3473 +[2025-07-06 05:53:10] [Rank 0] Group 9 Loss: 5.3473 +[2025-07-06 05:53:10] [Rank 0] Group 10 Loss: 5.3489 +[2025-07-06 05:53:10] [Rank 0] Group 10 Loss: 5.3489 +[2025-07-06 05:53:10] [Rank 0] Group 11 Loss: 5.3689 +[2025-07-06 05:53:10] [Rank 0] Group 11 Loss: 5.3689 +[2025-07-06 05:53:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 05:53:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 05:53:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 05:53:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 05:53:10] [Rank 0] Group 2 FTA: 0.9010 +[2025-07-06 05:53:10] [Rank 0] Group 2 FTA: 0.9010 +[2025-07-06 05:53:10] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 05:53:10] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 05:53:10] [Rank 0] Group 4 FTA: 0.9792 +[2025-07-06 05:53:10] [Rank 0] Group 4 FTA: 0.9792 +[2025-07-06 05:53:10] [Rank 0] Group 5 FTA: 0.9896 +[2025-07-06 05:53:10] [Rank 0] Group 5 FTA: 0.9896 +[2025-07-06 05:53:10] [Rank 0] Group 6 FTA: 0.9557 +[2025-07-06 05:53:10] [Rank 0] Group 6 FTA: 0.9557 +[2025-07-06 05:53:11] [Rank 0] Group 7 FTA: 0.9714 +[2025-07-06 05:53:11] [Rank 0] Group 7 FTA: 0.9714 +[2025-07-06 05:53:11] [Rank 0] Group 8 FTA: 0.9635 +[2025-07-06 05:53:11] [Rank 0] Group 8 FTA: 0.9635 +[2025-07-06 05:53:11] [Rank 0] Group 9 FTA: 0.9648 +[2025-07-06 05:53:11] [Rank 0] Group 9 FTA: 0.9648 +[2025-07-06 05:53:11] [Rank 0] Group 10 FTA: 0.9590 +[2025-07-06 05:53:11] [Rank 0] Group 10 FTA: 0.9590 +[2025-07-06 05:53:11] [Rank 0] Group 11 FTA: 0.9629 +[2025-07-06 05:53:11] [Rank 0] Group 11 FTA: 0.9629 +[2025-07-06 05:53:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 05:53:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 05:53:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 05:53:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 05:53:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 05:53:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 05:53:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 05:53:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 05:53:12] [Rank 0] step:6501/10000 train_time:529562ms step_avg:81.46ms +[2025-07-06 05:53:12] [Rank 0] step:6501/10000 train_time:529562ms step_avg:81.46ms +[2025-07-06 05:53:14] [Rank 0] step:6521/10000 train_time:531055ms step_avg:81.44ms +[2025-07-06 05:53:14] [Rank 0] step:6521/10000 train_time:531055ms step_avg:81.44ms +[2025-07-06 05:53:15] [Rank 0] step:6541/10000 train_time:532541ms step_avg:81.42ms +[2025-07-06 05:53:15] [Rank 0] step:6541/10000 train_time:532541ms step_avg:81.42ms +[2025-07-06 05:53:17] [Rank 0] step:6561/10000 train_time:534031ms step_avg:81.39ms +[2025-07-06 05:53:17] [Rank 0] step:6561/10000 train_time:534031ms step_avg:81.39ms +[2025-07-06 05:53:19] [Rank 0] step:6581/10000 train_time:536166ms step_avg:81.47ms +[2025-07-06 05:53:19] [Rank 0] step:6581/10000 train_time:536166ms step_avg:81.47ms +[2025-07-06 05:53:21] [Rank 0] step:6601/10000 train_time:537757ms step_avg:81.47ms +[2025-07-06 05:53:21] [Rank 0] step:6601/10000 train_time:537757ms step_avg:81.47ms +[2025-07-06 05:53:22] [Rank 0] step:6621/10000 train_time:539248ms step_avg:81.45ms +[2025-07-06 05:53:22] [Rank 0] step:6621/10000 train_time:539248ms step_avg:81.45ms +[2025-07-06 05:53:24] [Rank 0] step:6641/10000 train_time:540739ms step_avg:81.42ms +[2025-07-06 05:53:24] [Rank 0] step:6641/10000 train_time:540739ms step_avg:81.42ms +[2025-07-06 05:53:26] [Rank 0] step:6661/10000 train_time:542484ms step_avg:81.44ms +[2025-07-06 05:53:26] [Rank 0] step:6661/10000 train_time:542484ms step_avg:81.44ms +[2025-07-06 05:53:27] [Rank 0] step:6681/10000 train_time:544374ms step_avg:81.48ms +[2025-07-06 05:53:27] [Rank 0] step:6681/10000 train_time:544374ms step_avg:81.48ms +[2025-07-06 05:53:29] [Rank 0] step:6701/10000 train_time:545868ms step_avg:81.46ms +[2025-07-06 05:53:29] [Rank 0] step:6701/10000 train_time:545868ms step_avg:81.46ms +[2025-07-06 05:53:30] [Rank 0] step:6721/10000 train_time:547363ms step_avg:81.44ms +[2025-07-06 05:53:30] [Rank 0] step:6721/10000 train_time:547363ms step_avg:81.44ms +[2025-07-06 05:53:32] [Rank 0] step:6741/10000 train_time:548856ms step_avg:81.42ms +[2025-07-06 05:53:32] [Rank 0] step:6741/10000 train_time:548856ms step_avg:81.42ms +[2025-07-06 05:53:34] [Rank 0] step:6761/10000 train_time:551016ms step_avg:81.50ms +[2025-07-06 05:53:34] [Rank 0] step:6761/10000 train_time:551016ms step_avg:81.50ms +[2025-07-06 05:53:35] [Rank 0] step:6781/10000 train_time:552609ms step_avg:81.49ms +[2025-07-06 05:53:35] [Rank 0] step:6781/10000 train_time:552609ms step_avg:81.49ms +[2025-07-06 05:53:37] [Rank 0] step:6801/10000 train_time:554103ms step_avg:81.47ms +[2025-07-06 05:53:37] [Rank 0] step:6801/10000 train_time:554103ms step_avg:81.47ms +[2025-07-06 05:53:39] [Rank 0] step:6821/10000 train_time:555697ms step_avg:81.47ms +[2025-07-06 05:53:39] [Rank 0] step:6821/10000 train_time:555697ms step_avg:81.47ms +[2025-07-06 05:53:41] [Rank 0] step:6841/10000 train_time:557876ms step_avg:81.55ms +[2025-07-06 05:53:41] [Rank 0] step:6841/10000 train_time:557876ms step_avg:81.55ms +[2025-07-06 05:53:42] [Rank 0] step:6861/10000 train_time:559351ms step_avg:81.53ms +[2025-07-06 05:53:42] [Rank 0] step:6861/10000 train_time:559351ms step_avg:81.53ms +[2025-07-06 05:53:44] [Rank 0] step:6881/10000 train_time:560847ms step_avg:81.51ms +[2025-07-06 05:53:44] [Rank 0] step:6881/10000 train_time:560847ms step_avg:81.51ms +[2025-07-06 05:53:45] [Rank 0] step:6901/10000 train_time:562343ms step_avg:81.49ms +[2025-07-06 05:53:45] [Rank 0] step:6901/10000 train_time:562343ms step_avg:81.49ms +[2025-07-06 05:53:47] [Rank 0] step:6921/10000 train_time:563837ms step_avg:81.47ms +[2025-07-06 05:53:47] [Rank 0] step:6921/10000 train_time:563837ms step_avg:81.47ms +[2025-07-06 05:53:49] [Rank 0] step:6941/10000 train_time:565992ms step_avg:81.54ms +[2025-07-06 05:53:49] [Rank 0] step:6941/10000 train_time:565992ms step_avg:81.54ms +[2025-07-06 05:53:50] [Rank 0] step:6961/10000 train_time:567490ms step_avg:81.52ms +[2025-07-06 05:53:50] [Rank 0] step:6961/10000 train_time:567490ms step_avg:81.52ms +[2025-07-06 05:53:52] [Rank 0] step:6981/10000 train_time:568987ms step_avg:81.51ms +[2025-07-06 05:53:52] [Rank 0] step:6981/10000 train_time:568987ms step_avg:81.51ms +[2025-07-06 05:53:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 05:53:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 05:53:54] [Rank 0] PRINT: step:7000/10000 train_loss:0.8597 val_loss:0.8600 train_time:570483ms step_avg:81.50ms +[2025-07-06 05:53:54] [Rank 0] PRINT: step:7000/10000 train_loss:0.8597 val_loss:0.8600 train_time:570483ms step_avg:81.50ms +[2025-07-06 05:53:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 05:53:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 05:53:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 05:53:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 05:53:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 05:53:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 05:59:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 05:59:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 05:59:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 05:59:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 05:59:22] [Rank 0] Total Loss: 5.3707 +[2025-07-06 05:59:22] [Rank 0] Total Loss: 5.3707 +[2025-07-06 05:59:22] [Rank 0] Total FTA: 0.9590 +[2025-07-06 05:59:22] [Rank 0] Total FTA: 0.9590 +[2025-07-06 05:59:22] [Rank 0] Group 0 Loss: 5.5460 +[2025-07-06 05:59:22] [Rank 0] Group 0 Loss: 5.5460 +[2025-07-06 05:59:22] [Rank 0] Group 1 Loss: 5.1727 +[2025-07-06 05:59:22] [Rank 0] Group 1 Loss: 5.1727 +[2025-07-06 05:59:22] [Rank 0] Group 2 Loss: 5.1964 +[2025-07-06 05:59:22] [Rank 0] Group 2 Loss: 5.1964 +[2025-07-06 05:59:22] [Rank 0] Group 3 Loss: 5.3046 +[2025-07-06 05:59:22] [Rank 0] Group 3 Loss: 5.3046 +[2025-07-06 05:59:23] [Rank 0] Group 4 Loss: 5.4784 +[2025-07-06 05:59:23] [Rank 0] Group 4 Loss: 5.4784 +[2025-07-06 05:59:23] [Rank 0] Group 5 Loss: 5.2869 +[2025-07-06 05:59:23] [Rank 0] Group 5 Loss: 5.2869 +[2025-07-06 05:59:23] [Rank 0] Group 6 Loss: 5.2548 +[2025-07-06 05:59:23] [Rank 0] Group 6 Loss: 5.2548 +[2025-07-06 05:59:23] [Rank 0] Group 7 Loss: 5.4343 +[2025-07-06 05:59:23] [Rank 0] Group 7 Loss: 5.4343 +[2025-07-06 05:59:23] [Rank 0] Group 8 Loss: 5.3677 +[2025-07-06 05:59:23] [Rank 0] Group 8 Loss: 5.3677 +[2025-07-06 05:59:23] [Rank 0] Group 9 Loss: 5.3903 +[2025-07-06 05:59:23] [Rank 0] Group 9 Loss: 5.3903 +[2025-07-06 05:59:23] [Rank 0] Group 10 Loss: 5.3737 +[2025-07-06 05:59:23] [Rank 0] Group 10 Loss: 5.3737 +[2025-07-06 05:59:23] [Rank 0] Group 11 Loss: 5.4089 +[2025-07-06 05:59:23] [Rank 0] Group 11 Loss: 5.4089 +[2025-07-06 05:59:23] [Rank 0] Group 0 FTA: 0.8296 +[2025-07-06 05:59:23] [Rank 0] Group 0 FTA: 0.8296 +[2025-07-06 05:59:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 05:59:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 05:59:23] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 05:59:23] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 05:59:23] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 05:59:23] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 05:59:23] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 05:59:23] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 05:59:23] [Rank 0] Group 5 FTA: 0.9974 +[2025-07-06 05:59:23] [Rank 0] Group 5 FTA: 0.9974 +[2025-07-06 05:59:23] [Rank 0] Group 6 FTA: 0.9714 +[2025-07-06 05:59:23] [Rank 0] Group 6 FTA: 0.9714 +[2025-07-06 05:59:23] [Rank 0] Group 7 FTA: 0.9661 +[2025-07-06 05:59:23] [Rank 0] Group 7 FTA: 0.9661 +[2025-07-06 05:59:23] [Rank 0] Group 8 FTA: 0.9688 +[2025-07-06 05:59:23] [Rank 0] Group 8 FTA: 0.9688 +[2025-07-06 05:59:23] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 05:59:23] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 05:59:23] [Rank 0] Group 10 FTA: 0.9707 +[2025-07-06 05:59:23] [Rank 0] Group 10 FTA: 0.9707 +[2025-07-06 05:59:23] [Rank 0] Group 11 FTA: 0.9590 +[2025-07-06 05:59:23] [Rank 0] Group 11 FTA: 0.9590 +[2025-07-06 05:59:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 05:59:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 05:59:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 05:59:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 05:59:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 05:59:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 05:59:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 05:59:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 05:59:25] [Rank 0] step:7001/10000 train_time:570505ms step_avg:81.49ms +[2025-07-06 05:59:25] [Rank 0] step:7001/10000 train_time:570505ms step_avg:81.49ms +[2025-07-06 05:59:27] [Rank 0] step:7021/10000 train_time:572695ms step_avg:81.57ms +[2025-07-06 05:59:27] [Rank 0] step:7021/10000 train_time:572695ms step_avg:81.57ms +[2025-07-06 05:59:29] [Rank 0] step:7041/10000 train_time:574263ms step_avg:81.56ms +[2025-07-06 05:59:29] [Rank 0] step:7041/10000 train_time:574263ms step_avg:81.56ms +[2025-07-06 05:59:30] [Rank 0] step:7061/10000 train_time:575751ms step_avg:81.54ms +[2025-07-06 05:59:30] [Rank 0] step:7061/10000 train_time:575751ms step_avg:81.54ms +[2025-07-06 05:59:32] [Rank 0] step:7081/10000 train_time:577241ms step_avg:81.52ms +[2025-07-06 05:59:32] [Rank 0] step:7081/10000 train_time:577241ms step_avg:81.52ms +[2025-07-06 05:59:33] [Rank 0] step:7101/10000 train_time:578732ms step_avg:81.50ms +[2025-07-06 05:59:33] [Rank 0] step:7101/10000 train_time:578732ms step_avg:81.50ms +[2025-07-06 05:59:35] [Rank 0] step:7121/10000 train_time:580873ms step_avg:81.57ms +[2025-07-06 05:59:35] [Rank 0] step:7121/10000 train_time:580873ms step_avg:81.57ms +[2025-07-06 05:59:37] [Rank 0] step:7141/10000 train_time:582365ms step_avg:81.55ms +[2025-07-06 05:59:37] [Rank 0] step:7141/10000 train_time:582365ms step_avg:81.55ms +[2025-07-06 05:59:38] [Rank 0] step:7161/10000 train_time:583857ms step_avg:81.53ms +[2025-07-06 05:59:38] [Rank 0] step:7161/10000 train_time:583857ms step_avg:81.53ms +[2025-07-06 05:59:40] [Rank 0] step:7181/10000 train_time:585350ms step_avg:81.51ms +[2025-07-06 05:59:40] [Rank 0] step:7181/10000 train_time:585350ms step_avg:81.51ms +[2025-07-06 05:59:42] [Rank 0] step:7201/10000 train_time:586894ms step_avg:81.50ms +[2025-07-06 05:59:42] [Rank 0] step:7201/10000 train_time:586894ms step_avg:81.50ms +[2025-07-06 05:59:43] [Rank 0] step:7221/10000 train_time:588984ms step_avg:81.57ms +[2025-07-06 05:59:43] [Rank 0] step:7221/10000 train_time:588984ms step_avg:81.57ms +[2025-07-06 05:59:45] [Rank 0] step:7241/10000 train_time:590477ms step_avg:81.55ms +[2025-07-06 05:59:45] [Rank 0] step:7241/10000 train_time:590477ms step_avg:81.55ms +[2025-07-06 05:59:46] [Rank 0] step:7261/10000 train_time:591971ms step_avg:81.53ms +[2025-07-06 05:59:46] [Rank 0] step:7261/10000 train_time:591971ms step_avg:81.53ms +[2025-07-06 05:59:48] [Rank 0] step:7281/10000 train_time:593465ms step_avg:81.51ms +[2025-07-06 05:59:48] [Rank 0] step:7281/10000 train_time:593465ms step_avg:81.51ms +[2025-07-06 05:59:50] [Rank 0] step:7301/10000 train_time:595626ms step_avg:81.58ms +[2025-07-06 05:59:50] [Rank 0] step:7301/10000 train_time:595626ms step_avg:81.58ms +[2025-07-06 05:59:52] [Rank 0] step:7321/10000 train_time:597121ms step_avg:81.56ms +[2025-07-06 05:59:52] [Rank 0] step:7321/10000 train_time:597121ms step_avg:81.56ms +[2025-07-06 05:59:53] [Rank 0] step:7341/10000 train_time:598615ms step_avg:81.54ms +[2025-07-06 05:59:53] [Rank 0] step:7341/10000 train_time:598615ms step_avg:81.54ms +[2025-07-06 05:59:55] [Rank 0] step:7361/10000 train_time:600110ms step_avg:81.53ms +[2025-07-06 05:59:55] [Rank 0] step:7361/10000 train_time:600110ms step_avg:81.53ms +[2025-07-06 05:59:56] [Rank 0] step:7381/10000 train_time:601865ms step_avg:81.54ms +[2025-07-06 05:59:56] [Rank 0] step:7381/10000 train_time:601865ms step_avg:81.54ms +[2025-07-06 05:59:58] [Rank 0] step:7401/10000 train_time:603339ms step_avg:81.52ms +[2025-07-06 05:59:58] [Rank 0] step:7401/10000 train_time:603339ms step_avg:81.52ms +[2025-07-06 05:59:59] [Rank 0] step:7421/10000 train_time:604934ms step_avg:81.52ms +[2025-07-06 05:59:59] [Rank 0] step:7421/10000 train_time:604934ms step_avg:81.52ms +[2025-07-06 06:00:01] [Rank 0] step:7441/10000 train_time:606431ms step_avg:81.50ms +[2025-07-06 06:00:01] [Rank 0] step:7441/10000 train_time:606431ms step_avg:81.50ms +[2025-07-06 06:00:02] [Rank 0] step:7461/10000 train_time:607936ms step_avg:81.48ms +[2025-07-06 06:00:02] [Rank 0] step:7461/10000 train_time:607936ms step_avg:81.48ms +[2025-07-06 06:00:05] [Rank 0] step:7481/10000 train_time:610094ms step_avg:81.55ms +[2025-07-06 06:00:05] [Rank 0] step:7481/10000 train_time:610094ms step_avg:81.55ms +[2025-07-06 06:00:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 06:00:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 06:00:07] [Rank 0] PRINT: step:7500/10000 train_loss:0.8587 val_loss:0.8597 train_time:611588ms step_avg:81.55ms +[2025-07-06 06:00:07] [Rank 0] PRINT: step:7500/10000 train_loss:0.8587 val_loss:0.8597 train_time:611588ms step_avg:81.55ms +[2025-07-06 06:00:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 06:00:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 06:00:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 06:00:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 06:00:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 06:00:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 06:05:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 06:05:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 06:05:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 06:05:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 06:05:34] [Rank 0] Total Loss: 5.3493 +[2025-07-06 06:05:34] [Rank 0] Total Loss: 5.3493 +[2025-07-06 06:05:34] [Rank 0] Total FTA: 0.9485 +[2025-07-06 06:05:34] [Rank 0] Total FTA: 0.9485 +[2025-07-06 06:05:34] [Rank 0] Group 0 Loss: 5.6870 +[2025-07-06 06:05:34] [Rank 0] Group 0 Loss: 5.6870 +[2025-07-06 06:05:34] [Rank 0] Group 1 Loss: 5.2697 +[2025-07-06 06:05:34] [Rank 0] Group 1 Loss: 5.2697 +[2025-07-06 06:05:34] [Rank 0] Group 2 Loss: 5.2270 +[2025-07-06 06:05:34] [Rank 0] Group 2 Loss: 5.2270 +[2025-07-06 06:05:34] [Rank 0] Group 3 Loss: 5.2097 +[2025-07-06 06:05:34] [Rank 0] Group 3 Loss: 5.2097 +[2025-07-06 06:05:34] [Rank 0] Group 4 Loss: 5.4015 +[2025-07-06 06:05:34] [Rank 0] Group 4 Loss: 5.4015 +[2025-07-06 06:05:34] [Rank 0] Group 5 Loss: 5.2889 +[2025-07-06 06:05:34] [Rank 0] Group 5 Loss: 5.2889 +[2025-07-06 06:05:34] [Rank 0] Group 6 Loss: 5.2097 +[2025-07-06 06:05:34] [Rank 0] Group 6 Loss: 5.2097 +[2025-07-06 06:05:34] [Rank 0] Group 7 Loss: 5.3402 +[2025-07-06 06:05:34] [Rank 0] Group 7 Loss: 5.3402 +[2025-07-06 06:05:34] [Rank 0] Group 8 Loss: 5.2770 +[2025-07-06 06:05:34] [Rank 0] Group 8 Loss: 5.2770 +[2025-07-06 06:05:34] [Rank 0] Group 9 Loss: 5.3552 +[2025-07-06 06:05:34] [Rank 0] Group 9 Loss: 5.3552 +[2025-07-06 06:05:34] [Rank 0] Group 10 Loss: 5.3372 +[2025-07-06 06:05:34] [Rank 0] Group 10 Loss: 5.3372 +[2025-07-06 06:05:34] [Rank 0] Group 11 Loss: 5.3144 +[2025-07-06 06:05:34] [Rank 0] Group 11 Loss: 5.3144 +[2025-07-06 06:05:34] [Rank 0] Group 0 FTA: 0.8257 +[2025-07-06 06:05:34] [Rank 0] Group 0 FTA: 0.8257 +[2025-07-06 06:05:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 06:05:34] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 06:05:34] [Rank 0] Group 2 FTA: 0.9375 +[2025-07-06 06:05:34] [Rank 0] Group 2 FTA: 0.9375 +[2025-07-06 06:05:34] [Rank 0] Group 3 FTA: 0.9141 +[2025-07-06 06:05:34] [Rank 0] Group 3 FTA: 0.9141 +[2025-07-06 06:05:34] [Rank 0] Group 4 FTA: 0.9583 +[2025-07-06 06:05:34] [Rank 0] Group 4 FTA: 0.9583 +[2025-07-06 06:05:34] [Rank 0] Group 5 FTA: 1.0000 +[2025-07-06 06:05:34] [Rank 0] Group 5 FTA: 1.0000 +[2025-07-06 06:05:34] [Rank 0] Group 6 FTA: 0.9635 +[2025-07-06 06:05:34] [Rank 0] Group 6 FTA: 0.9635 +[2025-07-06 06:05:34] [Rank 0] Group 7 FTA: 0.9844 +[2025-07-06 06:05:34] [Rank 0] Group 7 FTA: 0.9844 +[2025-07-06 06:05:34] [Rank 0] Group 8 FTA: 0.9714 +[2025-07-06 06:05:34] [Rank 0] Group 8 FTA: 0.9714 +[2025-07-06 06:05:34] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 06:05:34] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 06:05:34] [Rank 0] Group 10 FTA: 0.9688 +[2025-07-06 06:05:34] [Rank 0] Group 10 FTA: 0.9688 +[2025-07-06 06:05:34] [Rank 0] Group 11 FTA: 0.9707 +[2025-07-06 06:05:34] [Rank 0] Group 11 FTA: 0.9707 +[2025-07-06 06:05:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 06:05:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 06:05:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 06:05:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 06:05:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 06:05:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 06:05:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 06:05:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 06:05:36] [Rank 0] step:7501/10000 train_time:611612ms step_avg:81.54ms +[2025-07-06 06:05:36] [Rank 0] step:7501/10000 train_time:611612ms step_avg:81.54ms +[2025-07-06 06:05:38] [Rank 0] step:7521/10000 train_time:613094ms step_avg:81.52ms +[2025-07-06 06:05:38] [Rank 0] step:7521/10000 train_time:613094ms step_avg:81.52ms +[2025-07-06 06:05:39] [Rank 0] step:7541/10000 train_time:614777ms step_avg:81.52ms +[2025-07-06 06:05:39] [Rank 0] step:7541/10000 train_time:614777ms step_avg:81.52ms +[2025-07-06 06:05:41] [Rank 0] step:7561/10000 train_time:616266ms step_avg:81.51ms +[2025-07-06 06:05:41] [Rank 0] step:7561/10000 train_time:616266ms step_avg:81.51ms +[2025-07-06 06:05:43] [Rank 0] step:7581/10000 train_time:618416ms step_avg:81.57ms +[2025-07-06 06:05:43] [Rank 0] step:7581/10000 train_time:618416ms step_avg:81.57ms +[2025-07-06 06:05:44] [Rank 0] step:7601/10000 train_time:619908ms step_avg:81.56ms +[2025-07-06 06:05:44] [Rank 0] step:7601/10000 train_time:619908ms step_avg:81.56ms +[2025-07-06 06:05:46] [Rank 0] step:7621/10000 train_time:621400ms step_avg:81.54ms +[2025-07-06 06:05:46] [Rank 0] step:7621/10000 train_time:621400ms step_avg:81.54ms +[2025-07-06 06:05:47] [Rank 0] step:7641/10000 train_time:622893ms step_avg:81.52ms +[2025-07-06 06:05:47] [Rank 0] step:7641/10000 train_time:622893ms step_avg:81.52ms +[2025-07-06 06:05:49] [Rank 0] step:7661/10000 train_time:625026ms step_avg:81.59ms +[2025-07-06 06:05:49] [Rank 0] step:7661/10000 train_time:625026ms step_avg:81.59ms +[2025-07-06 06:05:51] [Rank 0] step:7681/10000 train_time:626518ms step_avg:81.57ms +[2025-07-06 06:05:51] [Rank 0] step:7681/10000 train_time:626518ms step_avg:81.57ms +[2025-07-06 06:05:52] [Rank 0] step:7701/10000 train_time:628012ms step_avg:81.55ms +[2025-07-06 06:05:52] [Rank 0] step:7701/10000 train_time:628012ms step_avg:81.55ms +[2025-07-06 06:05:54] [Rank 0] step:7721/10000 train_time:629506ms step_avg:81.53ms +[2025-07-06 06:05:54] [Rank 0] step:7721/10000 train_time:629506ms step_avg:81.53ms +[2025-07-06 06:05:56] [Rank 0] step:7741/10000 train_time:631664ms step_avg:81.60ms +[2025-07-06 06:05:56] [Rank 0] step:7741/10000 train_time:631664ms step_avg:81.60ms +[2025-07-06 06:05:58] [Rank 0] step:7761/10000 train_time:633137ms step_avg:81.58ms +[2025-07-06 06:05:58] [Rank 0] step:7761/10000 train_time:633137ms step_avg:81.58ms +[2025-07-06 06:05:59] [Rank 0] step:7781/10000 train_time:634631ms step_avg:81.56ms +[2025-07-06 06:05:59] [Rank 0] step:7781/10000 train_time:634631ms step_avg:81.56ms +[2025-07-06 06:06:01] [Rank 0] step:7801/10000 train_time:636125ms step_avg:81.54ms +[2025-07-06 06:06:01] [Rank 0] step:7801/10000 train_time:636125ms step_avg:81.54ms +[2025-07-06 06:06:02] [Rank 0] step:7821/10000 train_time:637618ms step_avg:81.53ms +[2025-07-06 06:06:02] [Rank 0] step:7821/10000 train_time:637618ms step_avg:81.53ms +[2025-07-06 06:06:04] [Rank 0] step:7841/10000 train_time:639877ms step_avg:81.61ms +[2025-07-06 06:06:04] [Rank 0] step:7841/10000 train_time:639877ms step_avg:81.61ms +[2025-07-06 06:06:06] [Rank 0] step:7861/10000 train_time:641372ms step_avg:81.59ms +[2025-07-06 06:06:06] [Rank 0] step:7861/10000 train_time:641372ms step_avg:81.59ms +[2025-07-06 06:06:07] [Rank 0] step:7881/10000 train_time:642867ms step_avg:81.57ms +[2025-07-06 06:06:07] [Rank 0] step:7881/10000 train_time:642867ms step_avg:81.57ms +[2025-07-06 06:06:09] [Rank 0] step:7901/10000 train_time:644362ms step_avg:81.55ms +[2025-07-06 06:06:09] [Rank 0] step:7901/10000 train_time:644362ms step_avg:81.55ms +[2025-07-06 06:06:11] [Rank 0] step:7921/10000 train_time:645908ms step_avg:81.54ms +[2025-07-06 06:06:11] [Rank 0] step:7921/10000 train_time:645908ms step_avg:81.54ms +[2025-07-06 06:06:12] [Rank 0] step:7941/10000 train_time:648018ms step_avg:81.60ms +[2025-07-06 06:06:12] [Rank 0] step:7941/10000 train_time:648018ms step_avg:81.60ms +[2025-07-06 06:06:14] [Rank 0] step:7961/10000 train_time:649614ms step_avg:81.60ms +[2025-07-06 06:06:14] [Rank 0] step:7961/10000 train_time:649614ms step_avg:81.60ms +[2025-07-06 06:06:16] [Rank 0] step:7981/10000 train_time:651109ms step_avg:81.58ms +[2025-07-06 06:06:16] [Rank 0] step:7981/10000 train_time:651109ms step_avg:81.58ms +[2025-07-06 06:06:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 06:06:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 06:06:18] [Rank 0] PRINT: step:8000/10000 train_loss:0.8580 val_loss:0.8592 train_time:652706ms step_avg:81.59ms +[2025-07-06 06:06:18] [Rank 0] PRINT: step:8000/10000 train_loss:0.8580 val_loss:0.8592 train_time:652706ms step_avg:81.59ms +[2025-07-06 06:06:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 06:06:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 06:06:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 06:06:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 06:06:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 06:06:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 06:11:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 06:11:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 06:11:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 06:11:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 06:11:47] [Rank 0] Total Loss: 5.3675 +[2025-07-06 06:11:47] [Rank 0] Total Loss: 5.3675 +[2025-07-06 06:11:47] [Rank 0] Total FTA: 0.9837 +[2025-07-06 06:11:47] [Rank 0] Total FTA: 0.9837 +[2025-07-06 06:11:47] [Rank 0] Group 0 Loss: 5.7354 +[2025-07-06 06:11:47] [Rank 0] Group 0 Loss: 5.7354 +[2025-07-06 06:11:47] [Rank 0] Group 1 Loss: 5.2654 +[2025-07-06 06:11:47] [Rank 0] Group 1 Loss: 5.2654 +[2025-07-06 06:11:47] [Rank 0] Group 2 Loss: 5.1822 +[2025-07-06 06:11:47] [Rank 0] Group 2 Loss: 5.1822 +[2025-07-06 06:11:47] [Rank 0] Group 3 Loss: 5.2955 +[2025-07-06 06:11:47] [Rank 0] Group 3 Loss: 5.2955 +[2025-07-06 06:11:47] [Rank 0] Group 4 Loss: 5.3257 +[2025-07-06 06:11:47] [Rank 0] Group 4 Loss: 5.3257 +[2025-07-06 06:11:48] [Rank 0] Group 5 Loss: 5.3664 +[2025-07-06 06:11:48] [Rank 0] Group 5 Loss: 5.3664 +[2025-07-06 06:11:48] [Rank 0] Group 6 Loss: 5.2098 +[2025-07-06 06:11:48] [Rank 0] Group 6 Loss: 5.2098 +[2025-07-06 06:11:48] [Rank 0] Group 7 Loss: 5.3721 +[2025-07-06 06:11:48] [Rank 0] Group 7 Loss: 5.3721 +[2025-07-06 06:11:48] [Rank 0] Group 8 Loss: 5.3575 +[2025-07-06 06:11:48] [Rank 0] Group 8 Loss: 5.3575 +[2025-07-06 06:11:48] [Rank 0] Group 9 Loss: 5.2885 +[2025-07-06 06:11:48] [Rank 0] Group 9 Loss: 5.2885 +[2025-07-06 06:11:48] [Rank 0] Group 10 Loss: 5.3312 +[2025-07-06 06:11:48] [Rank 0] Group 10 Loss: 5.3312 +[2025-07-06 06:11:48] [Rank 0] Group 11 Loss: 5.3409 +[2025-07-06 06:11:48] [Rank 0] Group 11 Loss: 5.3409 +[2025-07-06 06:11:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 06:11:48] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 06:11:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 06:11:48] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 06:11:48] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 06:11:48] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 06:11:48] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 06:11:48] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 06:11:48] [Rank 0] Group 4 FTA: 0.9740 +[2025-07-06 06:11:48] [Rank 0] Group 4 FTA: 0.9740 +[2025-07-06 06:11:48] [Rank 0] Group 5 FTA: 0.9870 +[2025-07-06 06:11:48] [Rank 0] Group 5 FTA: 0.9870 +[2025-07-06 06:11:48] [Rank 0] Group 6 FTA: 0.9661 +[2025-07-06 06:11:48] [Rank 0] Group 6 FTA: 0.9661 +[2025-07-06 06:11:48] [Rank 0] Group 7 FTA: 0.9870 +[2025-07-06 06:11:48] [Rank 0] Group 7 FTA: 0.9870 +[2025-07-06 06:11:48] [Rank 0] Group 8 FTA: 0.9609 +[2025-07-06 06:11:48] [Rank 0] Group 8 FTA: 0.9609 +[2025-07-06 06:11:48] [Rank 0] Group 9 FTA: 0.9688 +[2025-07-06 06:11:48] [Rank 0] Group 9 FTA: 0.9688 +[2025-07-06 06:11:48] [Rank 0] Group 10 FTA: 0.9785 +[2025-07-06 06:11:48] [Rank 0] Group 10 FTA: 0.9785 +[2025-07-06 06:11:48] [Rank 0] Group 11 FTA: 0.9756 +[2025-07-06 06:11:48] [Rank 0] Group 11 FTA: 0.9756 +[2025-07-06 06:11:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 06:11:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 06:11:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 06:11:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 06:11:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 06:11:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 06:11:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 06:11:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 06:11:50] [Rank 0] step:8001/10000 train_time:652728ms step_avg:81.58ms +[2025-07-06 06:11:50] [Rank 0] step:8001/10000 train_time:652728ms step_avg:81.58ms +[2025-07-06 06:11:52] [Rank 0] step:8021/10000 train_time:654908ms step_avg:81.65ms +[2025-07-06 06:11:52] [Rank 0] step:8021/10000 train_time:654908ms step_avg:81.65ms +[2025-07-06 06:11:54] [Rank 0] step:8041/10000 train_time:656394ms step_avg:81.63ms +[2025-07-06 06:11:54] [Rank 0] step:8041/10000 train_time:656394ms step_avg:81.63ms +[2025-07-06 06:11:55] [Rank 0] step:8061/10000 train_time:657883ms step_avg:81.61ms +[2025-07-06 06:11:55] [Rank 0] step:8061/10000 train_time:657883ms step_avg:81.61ms +[2025-07-06 06:11:57] [Rank 0] step:8081/10000 train_time:659372ms step_avg:81.60ms +[2025-07-06 06:11:57] [Rank 0] step:8081/10000 train_time:659372ms step_avg:81.60ms +[2025-07-06 06:11:58] [Rank 0] step:8101/10000 train_time:661068ms step_avg:81.60ms +[2025-07-06 06:11:58] [Rank 0] step:8101/10000 train_time:661068ms step_avg:81.60ms +[2025-07-06 06:12:00] [Rank 0] step:8121/10000 train_time:662768ms step_avg:81.61ms +[2025-07-06 06:12:00] [Rank 0] step:8121/10000 train_time:662768ms step_avg:81.61ms +[2025-07-06 06:12:02] [Rank 0] step:8141/10000 train_time:664258ms step_avg:81.59ms +[2025-07-06 06:12:02] [Rank 0] step:8141/10000 train_time:664258ms step_avg:81.59ms +[2025-07-06 06:12:03] [Rank 0] step:8161/10000 train_time:665751ms step_avg:81.58ms +[2025-07-06 06:12:03] [Rank 0] step:8161/10000 train_time:665751ms step_avg:81.58ms +[2025-07-06 06:12:05] [Rank 0] step:8181/10000 train_time:667243ms step_avg:81.56ms +[2025-07-06 06:12:05] [Rank 0] step:8181/10000 train_time:667243ms step_avg:81.56ms +[2025-07-06 06:12:07] [Rank 0] step:8201/10000 train_time:669375ms step_avg:81.62ms +[2025-07-06 06:12:07] [Rank 0] step:8201/10000 train_time:669375ms step_avg:81.62ms +[2025-07-06 06:12:08] [Rank 0] step:8221/10000 train_time:670867ms step_avg:81.60ms +[2025-07-06 06:12:08] [Rank 0] step:8221/10000 train_time:670867ms step_avg:81.60ms +[2025-07-06 06:12:10] [Rank 0] step:8241/10000 train_time:672359ms step_avg:81.59ms +[2025-07-06 06:12:10] [Rank 0] step:8241/10000 train_time:672359ms step_avg:81.59ms +[2025-07-06 06:12:11] [Rank 0] step:8261/10000 train_time:673853ms step_avg:81.57ms +[2025-07-06 06:12:11] [Rank 0] step:8261/10000 train_time:673853ms step_avg:81.57ms +[2025-07-06 06:12:13] [Rank 0] step:8281/10000 train_time:675400ms step_avg:81.56ms +[2025-07-06 06:12:13] [Rank 0] step:8281/10000 train_time:675400ms step_avg:81.56ms +[2025-07-06 06:12:15] [Rank 0] step:8301/10000 train_time:677587ms step_avg:81.63ms +[2025-07-06 06:12:15] [Rank 0] step:8301/10000 train_time:677587ms step_avg:81.63ms +[2025-07-06 06:12:16] [Rank 0] step:8321/10000 train_time:679189ms step_avg:81.62ms +[2025-07-06 06:12:16] [Rank 0] step:8321/10000 train_time:679189ms step_avg:81.62ms +[2025-07-06 06:12:18] [Rank 0] step:8341/10000 train_time:680784ms step_avg:81.62ms +[2025-07-06 06:12:18] [Rank 0] step:8341/10000 train_time:680784ms step_avg:81.62ms +[2025-07-06 06:12:20] [Rank 0] step:8361/10000 train_time:682277ms step_avg:81.60ms +[2025-07-06 06:12:20] [Rank 0] step:8361/10000 train_time:682277ms step_avg:81.60ms +[2025-07-06 06:12:22] [Rank 0] step:8381/10000 train_time:684415ms step_avg:81.66ms +[2025-07-06 06:12:22] [Rank 0] step:8381/10000 train_time:684415ms step_avg:81.66ms +[2025-07-06 06:12:23] [Rank 0] step:8401/10000 train_time:686008ms step_avg:81.66ms +[2025-07-06 06:12:23] [Rank 0] step:8401/10000 train_time:686008ms step_avg:81.66ms +[2025-07-06 06:12:25] [Rank 0] step:8421/10000 train_time:687503ms step_avg:81.64ms +[2025-07-06 06:12:25] [Rank 0] step:8421/10000 train_time:687503ms step_avg:81.64ms +[2025-07-06 06:12:26] [Rank 0] step:8441/10000 train_time:688997ms step_avg:81.63ms +[2025-07-06 06:12:26] [Rank 0] step:8441/10000 train_time:688997ms step_avg:81.63ms +[2025-07-06 06:12:28] [Rank 0] step:8461/10000 train_time:690749ms step_avg:81.64ms +[2025-07-06 06:12:28] [Rank 0] step:8461/10000 train_time:690749ms step_avg:81.64ms +[2025-07-06 06:12:30] [Rank 0] step:8481/10000 train_time:692654ms step_avg:81.67ms +[2025-07-06 06:12:30] [Rank 0] step:8481/10000 train_time:692654ms step_avg:81.67ms +[2025-07-06 06:12:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 06:12:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 06:12:32] [Rank 0] PRINT: step:8500/10000 train_loss:0.8572 val_loss:0.8588 train_time:694250ms step_avg:81.68ms +[2025-07-06 06:12:32] [Rank 0] PRINT: step:8500/10000 train_loss:0.8572 val_loss:0.8588 train_time:694250ms step_avg:81.68ms +[2025-07-06 06:12:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 06:12:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 06:12:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 06:12:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 06:12:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 06:12:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 06:18:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 06:18:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 06:18:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 06:18:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 06:18:00] [Rank 0] Total Loss: 5.3773 +[2025-07-06 06:18:00] [Rank 0] Total Loss: 5.3773 +[2025-07-06 06:18:00] [Rank 0] Total FTA: 0.9856 +[2025-07-06 06:18:00] [Rank 0] Total FTA: 0.9856 +[2025-07-06 06:18:00] [Rank 0] Group 0 Loss: 5.5691 +[2025-07-06 06:18:00] [Rank 0] Group 0 Loss: 5.5691 +[2025-07-06 06:18:00] [Rank 0] Group 1 Loss: 5.3075 +[2025-07-06 06:18:00] [Rank 0] Group 1 Loss: 5.3075 +[2025-07-06 06:18:00] [Rank 0] Group 2 Loss: 5.2548 +[2025-07-06 06:18:00] [Rank 0] Group 2 Loss: 5.2548 +[2025-07-06 06:18:01] [Rank 0] Group 3 Loss: 5.3023 +[2025-07-06 06:18:01] [Rank 0] Group 3 Loss: 5.3023 +[2025-07-06 06:18:01] [Rank 0] Group 4 Loss: 5.3975 +[2025-07-06 06:18:01] [Rank 0] Group 4 Loss: 5.3975 +[2025-07-06 06:18:01] [Rank 0] Group 5 Loss: 5.3582 +[2025-07-06 06:18:01] [Rank 0] Group 5 Loss: 5.3582 +[2025-07-06 06:18:01] [Rank 0] Group 6 Loss: 5.2751 +[2025-07-06 06:18:01] [Rank 0] Group 6 Loss: 5.2751 +[2025-07-06 06:18:01] [Rank 0] Group 7 Loss: 5.4009 +[2025-07-06 06:18:01] [Rank 0] Group 7 Loss: 5.4009 +[2025-07-06 06:18:01] [Rank 0] Group 8 Loss: 5.3790 +[2025-07-06 06:18:01] [Rank 0] Group 8 Loss: 5.3790 +[2025-07-06 06:18:01] [Rank 0] Group 9 Loss: 5.3625 +[2025-07-06 06:18:01] [Rank 0] Group 9 Loss: 5.3625 +[2025-07-06 06:18:01] [Rank 0] Group 10 Loss: 5.3323 +[2025-07-06 06:18:01] [Rank 0] Group 10 Loss: 5.3323 +[2025-07-06 06:18:01] [Rank 0] Group 11 Loss: 5.3880 +[2025-07-06 06:18:01] [Rank 0] Group 11 Loss: 5.3880 +[2025-07-06 06:18:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 06:18:01] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 06:18:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 06:18:01] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 06:18:01] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 06:18:01] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 06:18:01] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 06:18:01] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 06:18:01] [Rank 0] Group 4 FTA: 0.9818 +[2025-07-06 06:18:01] [Rank 0] Group 4 FTA: 0.9818 +[2025-07-06 06:18:02] [Rank 0] Group 5 FTA: 0.9661 +[2025-07-06 06:18:02] [Rank 0] Group 5 FTA: 0.9661 +[2025-07-06 06:18:02] [Rank 0] Group 6 FTA: 0.9844 +[2025-07-06 06:18:02] [Rank 0] Group 6 FTA: 0.9844 +[2025-07-06 06:18:02] [Rank 0] Group 7 FTA: 0.9896 +[2025-07-06 06:18:02] [Rank 0] Group 7 FTA: 0.9896 +[2025-07-06 06:18:02] [Rank 0] Group 8 FTA: 0.9818 +[2025-07-06 06:18:02] [Rank 0] Group 8 FTA: 0.9818 +[2025-07-06 06:18:02] [Rank 0] Group 9 FTA: 0.9688 +[2025-07-06 06:18:02] [Rank 0] Group 9 FTA: 0.9688 +[2025-07-06 06:18:02] [Rank 0] Group 10 FTA: 0.9785 +[2025-07-06 06:18:02] [Rank 0] Group 10 FTA: 0.9785 +[2025-07-06 06:18:02] [Rank 0] Group 11 FTA: 0.9756 +[2025-07-06 06:18:02] [Rank 0] Group 11 FTA: 0.9756 +[2025-07-06 06:18:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 06:18:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 06:18:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 06:18:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 06:18:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 06:18:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 06:18:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 06:18:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 06:18:04] [Rank 0] step:8501/10000 train_time:694272ms step_avg:81.67ms +[2025-07-06 06:18:04] [Rank 0] step:8501/10000 train_time:694272ms step_avg:81.67ms +[2025-07-06 06:18:05] [Rank 0] step:8521/10000 train_time:695770ms step_avg:81.65ms +[2025-07-06 06:18:05] [Rank 0] step:8521/10000 train_time:695770ms step_avg:81.65ms +[2025-07-06 06:18:07] [Rank 0] step:8541/10000 train_time:697360ms step_avg:81.65ms +[2025-07-06 06:18:07] [Rank 0] step:8541/10000 train_time:697360ms step_avg:81.65ms +[2025-07-06 06:18:09] [Rank 0] step:8561/10000 train_time:699590ms step_avg:81.72ms +[2025-07-06 06:18:09] [Rank 0] step:8561/10000 train_time:699590ms step_avg:81.72ms +[2025-07-06 06:18:11] [Rank 0] step:8581/10000 train_time:701075ms step_avg:81.70ms +[2025-07-06 06:18:11] [Rank 0] step:8581/10000 train_time:701075ms step_avg:81.70ms +[2025-07-06 06:18:12] [Rank 0] step:8601/10000 train_time:702665ms step_avg:81.70ms +[2025-07-06 06:18:12] [Rank 0] step:8601/10000 train_time:702665ms step_avg:81.70ms +[2025-07-06 06:18:14] [Rank 0] step:8621/10000 train_time:704258ms step_avg:81.69ms +[2025-07-06 06:18:14] [Rank 0] step:8621/10000 train_time:704258ms step_avg:81.69ms +[2025-07-06 06:18:16] [Rank 0] step:8641/10000 train_time:705749ms step_avg:81.67ms +[2025-07-06 06:18:16] [Rank 0] step:8641/10000 train_time:705749ms step_avg:81.67ms +[2025-07-06 06:18:18] [Rank 0] step:8661/10000 train_time:708128ms step_avg:81.76ms +[2025-07-06 06:18:18] [Rank 0] step:8661/10000 train_time:708128ms step_avg:81.76ms +[2025-07-06 06:18:19] [Rank 0] step:8681/10000 train_time:709727ms step_avg:81.76ms +[2025-07-06 06:18:19] [Rank 0] step:8681/10000 train_time:709727ms step_avg:81.76ms +[2025-07-06 06:18:21] [Rank 0] step:8701/10000 train_time:711225ms step_avg:81.74ms +[2025-07-06 06:18:21] [Rank 0] step:8701/10000 train_time:711225ms step_avg:81.74ms +[2025-07-06 06:18:22] [Rank 0] step:8721/10000 train_time:712717ms step_avg:81.72ms +[2025-07-06 06:18:22] [Rank 0] step:8721/10000 train_time:712717ms step_avg:81.72ms +[2025-07-06 06:18:24] [Rank 0] step:8741/10000 train_time:714952ms step_avg:81.79ms +[2025-07-06 06:18:24] [Rank 0] step:8741/10000 train_time:714952ms step_avg:81.79ms +[2025-07-06 06:18:26] [Rank 0] step:8761/10000 train_time:716445ms step_avg:81.78ms +[2025-07-06 06:18:26] [Rank 0] step:8761/10000 train_time:716445ms step_avg:81.78ms +[2025-07-06 06:18:28] [Rank 0] step:8781/10000 train_time:718040ms step_avg:81.77ms +[2025-07-06 06:18:28] [Rank 0] step:8781/10000 train_time:718040ms step_avg:81.77ms +[2025-07-06 06:18:29] [Rank 0] step:8801/10000 train_time:719534ms step_avg:81.76ms +[2025-07-06 06:18:29] [Rank 0] step:8801/10000 train_time:719534ms step_avg:81.76ms +[2025-07-06 06:18:31] [Rank 0] step:8821/10000 train_time:721684ms step_avg:81.81ms +[2025-07-06 06:18:31] [Rank 0] step:8821/10000 train_time:721684ms step_avg:81.81ms +[2025-07-06 06:18:33] [Rank 0] step:8841/10000 train_time:723159ms step_avg:81.80ms +[2025-07-06 06:18:33] [Rank 0] step:8841/10000 train_time:723159ms step_avg:81.80ms +[2025-07-06 06:18:34] [Rank 0] step:8861/10000 train_time:724654ms step_avg:81.78ms +[2025-07-06 06:18:34] [Rank 0] step:8861/10000 train_time:724654ms step_avg:81.78ms +[2025-07-06 06:18:36] [Rank 0] step:8881/10000 train_time:726152ms step_avg:81.76ms +[2025-07-06 06:18:36] [Rank 0] step:8881/10000 train_time:726152ms step_avg:81.76ms +[2025-07-06 06:18:37] [Rank 0] step:8901/10000 train_time:727649ms step_avg:81.75ms +[2025-07-06 06:18:37] [Rank 0] step:8901/10000 train_time:727649ms step_avg:81.75ms +[2025-07-06 06:18:39] [Rank 0] step:8921/10000 train_time:729481ms step_avg:81.77ms +[2025-07-06 06:18:39] [Rank 0] step:8921/10000 train_time:729481ms step_avg:81.77ms +[2025-07-06 06:18:41] [Rank 0] step:8941/10000 train_time:731080ms step_avg:81.77ms +[2025-07-06 06:18:41] [Rank 0] step:8941/10000 train_time:731080ms step_avg:81.77ms +[2025-07-06 06:18:42] [Rank 0] step:8961/10000 train_time:732578ms step_avg:81.75ms +[2025-07-06 06:18:42] [Rank 0] step:8961/10000 train_time:732578ms step_avg:81.75ms +[2025-07-06 06:18:44] [Rank 0] step:8981/10000 train_time:734176ms step_avg:81.75ms +[2025-07-06 06:18:44] [Rank 0] step:8981/10000 train_time:734176ms step_avg:81.75ms +[2025-07-06 06:18:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 06:18:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 06:18:46] [Rank 0] PRINT: step:9000/10000 train_loss:0.8564 val_loss:0.8586 train_time:735674ms step_avg:81.74ms +[2025-07-06 06:18:46] [Rank 0] PRINT: step:9000/10000 train_loss:0.8564 val_loss:0.8586 train_time:735674ms step_avg:81.74ms +[2025-07-06 06:18:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 06:18:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 06:18:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 06:18:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 06:18:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 06:18:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 06:24:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 06:24:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 06:24:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 06:24:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 06:24:13] [Rank 0] Total Loss: 5.3761 +[2025-07-06 06:24:13] [Rank 0] Total Loss: 5.3761 +[2025-07-06 06:24:13] [Rank 0] Total FTA: 0.9924 +[2025-07-06 06:24:13] [Rank 0] Total FTA: 0.9924 +[2025-07-06 06:24:13] [Rank 0] Group 0 Loss: 5.5632 +[2025-07-06 06:24:13] [Rank 0] Group 0 Loss: 5.5632 +[2025-07-06 06:24:13] [Rank 0] Group 1 Loss: 5.3664 +[2025-07-06 06:24:13] [Rank 0] Group 1 Loss: 5.3664 +[2025-07-06 06:24:14] [Rank 0] Group 2 Loss: 5.2678 +[2025-07-06 06:24:14] [Rank 0] Group 2 Loss: 5.2678 +[2025-07-06 06:24:14] [Rank 0] Group 3 Loss: 5.3275 +[2025-07-06 06:24:14] [Rank 0] Group 3 Loss: 5.3275 +[2025-07-06 06:24:14] [Rank 0] Group 4 Loss: 5.3898 +[2025-07-06 06:24:14] [Rank 0] Group 4 Loss: 5.3898 +[2025-07-06 06:24:14] [Rank 0] Group 5 Loss: 5.3359 +[2025-07-06 06:24:14] [Rank 0] Group 5 Loss: 5.3359 +[2025-07-06 06:24:14] [Rank 0] Group 6 Loss: 5.2139 +[2025-07-06 06:24:14] [Rank 0] Group 6 Loss: 5.2139 +[2025-07-06 06:24:14] [Rank 0] Group 7 Loss: 5.3463 +[2025-07-06 06:24:14] [Rank 0] Group 7 Loss: 5.3463 +[2025-07-06 06:24:14] [Rank 0] Group 8 Loss: 5.4033 +[2025-07-06 06:24:14] [Rank 0] Group 8 Loss: 5.4033 +[2025-07-06 06:24:14] [Rank 0] Group 9 Loss: 5.3078 +[2025-07-06 06:24:14] [Rank 0] Group 9 Loss: 5.3078 +[2025-07-06 06:24:14] [Rank 0] Group 10 Loss: 5.3865 +[2025-07-06 06:24:14] [Rank 0] Group 10 Loss: 5.3865 +[2025-07-06 06:24:14] [Rank 0] Group 11 Loss: 5.3818 +[2025-07-06 06:24:14] [Rank 0] Group 11 Loss: 5.3818 +[2025-07-06 06:24:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 06:24:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 06:24:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 06:24:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 06:24:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 06:24:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 06:24:15] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 06:24:15] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 06:24:15] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 06:24:15] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 06:24:15] [Rank 0] Group 5 FTA: 1.0000 +[2025-07-06 06:24:15] [Rank 0] Group 5 FTA: 1.0000 +[2025-07-06 06:24:15] [Rank 0] Group 6 FTA: 0.9740 +[2025-07-06 06:24:15] [Rank 0] Group 6 FTA: 0.9740 +[2025-07-06 06:24:16] [Rank 0] Group 7 FTA: 0.9870 +[2025-07-06 06:24:16] [Rank 0] Group 7 FTA: 0.9870 +[2025-07-06 06:24:16] [Rank 0] Group 8 FTA: 0.9818 +[2025-07-06 06:24:16] [Rank 0] Group 8 FTA: 0.9818 +[2025-07-06 06:24:16] [Rank 0] Group 9 FTA: 0.9883 +[2025-07-06 06:24:16] [Rank 0] Group 9 FTA: 0.9883 +[2025-07-06 06:24:16] [Rank 0] Group 10 FTA: 0.9922 +[2025-07-06 06:24:16] [Rank 0] Group 10 FTA: 0.9922 +[2025-07-06 06:24:16] [Rank 0] Group 11 FTA: 0.9863 +[2025-07-06 06:24:16] [Rank 0] Group 11 FTA: 0.9863 +[2025-07-06 06:24:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 06:24:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 06:24:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 06:24:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 06:24:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 06:24:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 06:24:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 06:24:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 06:24:19] [Rank 0] step:9001/10000 train_time:735703ms step_avg:81.74ms +[2025-07-06 06:24:19] [Rank 0] step:9001/10000 train_time:735703ms step_avg:81.74ms +[2025-07-06 06:24:20] [Rank 0] step:9021/10000 train_time:737901ms step_avg:81.80ms +[2025-07-06 06:24:20] [Rank 0] step:9021/10000 train_time:737901ms step_avg:81.80ms +[2025-07-06 06:24:22] [Rank 0] step:9041/10000 train_time:739487ms step_avg:81.79ms +[2025-07-06 06:24:22] [Rank 0] step:9041/10000 train_time:739487ms step_avg:81.79ms +[2025-07-06 06:24:23] [Rank 0] step:9061/10000 train_time:741075ms step_avg:81.79ms +[2025-07-06 06:24:23] [Rank 0] step:9061/10000 train_time:741075ms step_avg:81.79ms +[2025-07-06 06:24:25] [Rank 0] step:9081/10000 train_time:742566ms step_avg:81.77ms +[2025-07-06 06:24:25] [Rank 0] step:9081/10000 train_time:742566ms step_avg:81.77ms +[2025-07-06 06:24:27] [Rank 0] step:9101/10000 train_time:744802ms step_avg:81.84ms +[2025-07-06 06:24:27] [Rank 0] step:9101/10000 train_time:744802ms step_avg:81.84ms +[2025-07-06 06:24:28] [Rank 0] step:9121/10000 train_time:746292ms step_avg:81.82ms +[2025-07-06 06:24:28] [Rank 0] step:9121/10000 train_time:746292ms step_avg:81.82ms +[2025-07-06 06:24:30] [Rank 0] step:9141/10000 train_time:747785ms step_avg:81.81ms +[2025-07-06 06:24:30] [Rank 0] step:9141/10000 train_time:747785ms step_avg:81.81ms +[2025-07-06 06:24:31] [Rank 0] step:9161/10000 train_time:749277ms step_avg:81.79ms +[2025-07-06 06:24:31] [Rank 0] step:9161/10000 train_time:749277ms step_avg:81.79ms +[2025-07-06 06:24:33] [Rank 0] step:9181/10000 train_time:751036ms step_avg:81.80ms +[2025-07-06 06:24:33] [Rank 0] step:9181/10000 train_time:751036ms step_avg:81.80ms +[2025-07-06 06:24:35] [Rank 0] step:9201/10000 train_time:752599ms step_avg:81.80ms +[2025-07-06 06:24:35] [Rank 0] step:9201/10000 train_time:752599ms step_avg:81.80ms +[2025-07-06 06:24:37] [Rank 0] step:9221/10000 train_time:754358ms step_avg:81.81ms +[2025-07-06 06:24:37] [Rank 0] step:9221/10000 train_time:754358ms step_avg:81.81ms +[2025-07-06 06:24:38] [Rank 0] step:9241/10000 train_time:756036ms step_avg:81.81ms +[2025-07-06 06:24:38] [Rank 0] step:9241/10000 train_time:756036ms step_avg:81.81ms +[2025-07-06 06:24:40] [Rank 0] step:9261/10000 train_time:757530ms step_avg:81.80ms +[2025-07-06 06:24:40] [Rank 0] step:9261/10000 train_time:757530ms step_avg:81.80ms +[2025-07-06 06:24:42] [Rank 0] step:9281/10000 train_time:759687ms step_avg:81.85ms +[2025-07-06 06:24:42] [Rank 0] step:9281/10000 train_time:759687ms step_avg:81.85ms +[2025-07-06 06:24:43] [Rank 0] step:9301/10000 train_time:761177ms step_avg:81.84ms +[2025-07-06 06:24:43] [Rank 0] step:9301/10000 train_time:761177ms step_avg:81.84ms +[2025-07-06 06:24:45] [Rank 0] step:9321/10000 train_time:762672ms step_avg:81.82ms +[2025-07-06 06:24:45] [Rank 0] step:9321/10000 train_time:762672ms step_avg:81.82ms +[2025-07-06 06:24:46] [Rank 0] step:9341/10000 train_time:764169ms step_avg:81.81ms +[2025-07-06 06:24:46] [Rank 0] step:9341/10000 train_time:764169ms step_avg:81.81ms +[2025-07-06 06:24:48] [Rank 0] step:9361/10000 train_time:765920ms step_avg:81.82ms +[2025-07-06 06:24:48] [Rank 0] step:9361/10000 train_time:765920ms step_avg:81.82ms +[2025-07-06 06:24:50] [Rank 0] step:9381/10000 train_time:767830ms step_avg:81.85ms +[2025-07-06 06:24:50] [Rank 0] step:9381/10000 train_time:767830ms step_avg:81.85ms +[2025-07-06 06:24:51] [Rank 0] step:9401/10000 train_time:769322ms step_avg:81.83ms +[2025-07-06 06:24:51] [Rank 0] step:9401/10000 train_time:769322ms step_avg:81.83ms +[2025-07-06 06:24:53] [Rank 0] step:9421/10000 train_time:770819ms step_avg:81.82ms +[2025-07-06 06:24:53] [Rank 0] step:9421/10000 train_time:770819ms step_avg:81.82ms +[2025-07-06 06:24:54] [Rank 0] step:9441/10000 train_time:772314ms step_avg:81.80ms +[2025-07-06 06:24:54] [Rank 0] step:9441/10000 train_time:772314ms step_avg:81.80ms +[2025-07-06 06:24:57] [Rank 0] step:9461/10000 train_time:774470ms step_avg:81.86ms +[2025-07-06 06:24:57] [Rank 0] step:9461/10000 train_time:774470ms step_avg:81.86ms +[2025-07-06 06:24:58] [Rank 0] step:9481/10000 train_time:776065ms step_avg:81.85ms +[2025-07-06 06:24:58] [Rank 0] step:9481/10000 train_time:776065ms step_avg:81.85ms +[2025-07-06 06:25:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 06:25:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 06:25:01] [Rank 0] PRINT: step:9500/10000 train_loss:0.8557 val_loss:0.8584 train_time:777560ms step_avg:81.85ms +[2025-07-06 06:25:01] [Rank 0] PRINT: step:9500/10000 train_loss:0.8557 val_loss:0.8584 train_time:777560ms step_avg:81.85ms +[2025-07-06 06:25:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 06:25:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 06:25:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 06:25:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 06:25:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 06:25:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 06:30:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 06:30:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 06:30:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 06:30:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 06:30:28] [Rank 0] Total Loss: 5.3911 +[2025-07-06 06:30:28] [Rank 0] Total Loss: 5.3911 +[2025-07-06 06:30:28] [Rank 0] Total FTA: 0.9929 +[2025-07-06 06:30:28] [Rank 0] Total FTA: 0.9929 +[2025-07-06 06:30:28] [Rank 0] Group 0 Loss: 5.6098 +[2025-07-06 06:30:28] [Rank 0] Group 0 Loss: 5.6098 +[2025-07-06 06:30:28] [Rank 0] Group 1 Loss: 5.2588 +[2025-07-06 06:30:28] [Rank 0] Group 1 Loss: 5.2588 +[2025-07-06 06:30:28] [Rank 0] Group 2 Loss: 5.2702 +[2025-07-06 06:30:28] [Rank 0] Group 2 Loss: 5.2702 +[2025-07-06 06:30:28] [Rank 0] Group 3 Loss: 5.2817 +[2025-07-06 06:30:28] [Rank 0] Group 3 Loss: 5.2817 +[2025-07-06 06:30:28] [Rank 0] Group 4 Loss: 5.4332 +[2025-07-06 06:30:28] [Rank 0] Group 4 Loss: 5.4332 +[2025-07-06 06:30:28] [Rank 0] Group 5 Loss: 5.3271 +[2025-07-06 06:30:28] [Rank 0] Group 5 Loss: 5.3271 +[2025-07-06 06:30:28] [Rank 0] Group 6 Loss: 5.2943 +[2025-07-06 06:30:28] [Rank 0] Group 6 Loss: 5.2943 +[2025-07-06 06:30:28] [Rank 0] Group 7 Loss: 5.3948 +[2025-07-06 06:30:28] [Rank 0] Group 7 Loss: 5.3948 +[2025-07-06 06:30:28] [Rank 0] Group 8 Loss: 5.4031 +[2025-07-06 06:30:28] [Rank 0] Group 8 Loss: 5.4031 +[2025-07-06 06:30:28] [Rank 0] Group 9 Loss: 5.4339 +[2025-07-06 06:30:28] [Rank 0] Group 9 Loss: 5.4339 +[2025-07-06 06:30:28] [Rank 0] Group 10 Loss: 5.3670 +[2025-07-06 06:30:28] [Rank 0] Group 10 Loss: 5.3670 +[2025-07-06 06:30:28] [Rank 0] Group 11 Loss: 5.4027 +[2025-07-06 06:30:28] [Rank 0] Group 11 Loss: 5.4027 +[2025-07-06 06:30:28] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 06:30:28] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 06:30:28] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 06:30:28] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 06:30:28] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 06:30:28] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 06:30:28] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 06:30:28] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 06:30:28] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 06:30:28] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 06:30:28] [Rank 0] Group 5 FTA: 1.0000 +[2025-07-06 06:30:28] [Rank 0] Group 5 FTA: 1.0000 +[2025-07-06 06:30:28] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-06 06:30:28] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-06 06:30:28] [Rank 0] Group 7 FTA: 0.9870 +[2025-07-06 06:30:28] [Rank 0] Group 7 FTA: 0.9870 +[2025-07-06 06:30:28] [Rank 0] Group 8 FTA: 0.9818 +[2025-07-06 06:30:28] [Rank 0] Group 8 FTA: 0.9818 +[2025-07-06 06:30:28] [Rank 0] Group 9 FTA: 0.9805 +[2025-07-06 06:30:28] [Rank 0] Group 9 FTA: 0.9805 +[2025-07-06 06:30:28] [Rank 0] Group 10 FTA: 0.9961 +[2025-07-06 06:30:28] [Rank 0] Group 10 FTA: 0.9961 +[2025-07-06 06:30:28] [Rank 0] Group 11 FTA: 0.9873 +[2025-07-06 06:30:28] [Rank 0] Group 11 FTA: 0.9873 +[2025-07-06 06:30:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 06:30:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 06:30:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 06:30:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 06:30:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 06:30:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 06:30:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 06:30:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 06:30:30] [Rank 0] step:9501/10000 train_time:777583ms step_avg:81.84ms +[2025-07-06 06:30:30] [Rank 0] step:9501/10000 train_time:777583ms step_avg:81.84ms +[2025-07-06 06:30:31] [Rank 0] step:9521/10000 train_time:779085ms step_avg:81.83ms +[2025-07-06 06:30:31] [Rank 0] step:9521/10000 train_time:779085ms step_avg:81.83ms +[2025-07-06 06:30:34] [Rank 0] step:9541/10000 train_time:780834ms step_avg:81.84ms +[2025-07-06 06:30:34] [Rank 0] step:9541/10000 train_time:780834ms step_avg:81.84ms +[2025-07-06 06:30:35] [Rank 0] step:9561/10000 train_time:782729ms step_avg:81.87ms +[2025-07-06 06:30:35] [Rank 0] step:9561/10000 train_time:782729ms step_avg:81.87ms +[2025-07-06 06:30:37] [Rank 0] step:9581/10000 train_time:784219ms step_avg:81.85ms +[2025-07-06 06:30:37] [Rank 0] step:9581/10000 train_time:784219ms step_avg:81.85ms +[2025-07-06 06:30:38] [Rank 0] step:9601/10000 train_time:785711ms step_avg:81.84ms +[2025-07-06 06:30:38] [Rank 0] step:9601/10000 train_time:785711ms step_avg:81.84ms +[2025-07-06 06:30:40] [Rank 0] step:9621/10000 train_time:787200ms step_avg:81.82ms +[2025-07-06 06:30:40] [Rank 0] step:9621/10000 train_time:787200ms step_avg:81.82ms +[2025-07-06 06:30:42] [Rank 0] step:9641/10000 train_time:789339ms step_avg:81.87ms +[2025-07-06 06:30:42] [Rank 0] step:9641/10000 train_time:789339ms step_avg:81.87ms +[2025-07-06 06:30:43] [Rank 0] step:9661/10000 train_time:790931ms step_avg:81.87ms +[2025-07-06 06:30:43] [Rank 0] step:9661/10000 train_time:790931ms step_avg:81.87ms +[2025-07-06 06:30:45] [Rank 0] step:9681/10000 train_time:792424ms step_avg:81.85ms +[2025-07-06 06:30:45] [Rank 0] step:9681/10000 train_time:792424ms step_avg:81.85ms +[2025-07-06 06:30:46] [Rank 0] step:9701/10000 train_time:793916ms step_avg:81.84ms +[2025-07-06 06:30:46] [Rank 0] step:9701/10000 train_time:793916ms step_avg:81.84ms +[2025-07-06 06:30:48] [Rank 0] step:9721/10000 train_time:795460ms step_avg:81.83ms +[2025-07-06 06:30:48] [Rank 0] step:9721/10000 train_time:795460ms step_avg:81.83ms +[2025-07-06 06:30:49] [Rank 0] step:9741/10000 train_time:797140ms step_avg:81.83ms +[2025-07-06 06:30:49] [Rank 0] step:9741/10000 train_time:797140ms step_avg:81.83ms +[2025-07-06 06:30:51] [Rank 0] step:9761/10000 train_time:798734ms step_avg:81.83ms +[2025-07-06 06:30:51] [Rank 0] step:9761/10000 train_time:798734ms step_avg:81.83ms +[2025-07-06 06:30:53] [Rank 0] step:9781/10000 train_time:800229ms step_avg:81.81ms +[2025-07-06 06:30:53] [Rank 0] step:9781/10000 train_time:800229ms step_avg:81.81ms +[2025-07-06 06:30:54] [Rank 0] step:9801/10000 train_time:801723ms step_avg:81.80ms +[2025-07-06 06:30:54] [Rank 0] step:9801/10000 train_time:801723ms step_avg:81.80ms +[2025-07-06 06:30:56] [Rank 0] step:9821/10000 train_time:803993ms step_avg:81.86ms +[2025-07-06 06:30:56] [Rank 0] step:9821/10000 train_time:803993ms step_avg:81.86ms +[2025-07-06 06:30:58] [Rank 0] step:9841/10000 train_time:805489ms step_avg:81.85ms +[2025-07-06 06:30:58] [Rank 0] step:9841/10000 train_time:805489ms step_avg:81.85ms +[2025-07-06 06:30:59] [Rank 0] step:9861/10000 train_time:806985ms step_avg:81.84ms +[2025-07-06 06:30:59] [Rank 0] step:9861/10000 train_time:806985ms step_avg:81.84ms +[2025-07-06 06:31:01] [Rank 0] step:9881/10000 train_time:808481ms step_avg:81.82ms +[2025-07-06 06:31:01] [Rank 0] step:9881/10000 train_time:808481ms step_avg:81.82ms +[2025-07-06 06:31:02] [Rank 0] step:9901/10000 train_time:810029ms step_avg:81.81ms +[2025-07-06 06:31:02] [Rank 0] step:9901/10000 train_time:810029ms step_avg:81.81ms +[2025-07-06 06:31:04] [Rank 0] step:9921/10000 train_time:811612ms step_avg:81.81ms +[2025-07-06 06:31:04] [Rank 0] step:9921/10000 train_time:811612ms step_avg:81.81ms +[2025-07-06 06:31:05] [Rank 0] step:9941/10000 train_time:813110ms step_avg:81.79ms +[2025-07-06 06:31:05] [Rank 0] step:9941/10000 train_time:813110ms step_avg:81.79ms +[2025-07-06 06:31:07] [Rank 0] step:9961/10000 train_time:814608ms step_avg:81.78ms +[2025-07-06 06:31:07] [Rank 0] step:9961/10000 train_time:814608ms step_avg:81.78ms +[2025-07-06 06:31:08] [Rank 0] step:9981/10000 train_time:816107ms step_avg:81.77ms +[2025-07-06 06:31:08] [Rank 0] step:9981/10000 train_time:816107ms step_avg:81.77ms +[2025-07-06 06:31:11] [Rank 0] step:10000/10000 train_time:818196ms step_avg:81.82ms +[2025-07-06 06:31:11] [Rank 0] step:10000/10000 train_time:818196ms step_avg:81.82ms +[2025-07-06 06:31:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 06:31:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 06:31:12] [Rank 0] PRINT: step:10000/10000 train_loss:0.8549 val_loss:0.8584 train_time:818278ms step_avg:81.83ms +[2025-07-06 06:31:12] [Rank 0] PRINT: step:10000/10000 train_loss:0.8549 val_loss:0.8584 train_time:818278ms step_avg:81.83ms +[2025-07-06 06:31:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 06:31:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 06:31:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 06:31:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 06:31:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 06:31:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 06:36:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 06:36:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 06:36:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 06:36:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 06:36:36] [Rank 0] Total Loss: 5.3623 +[2025-07-06 06:36:36] [Rank 0] Total Loss: 5.3623 +[2025-07-06 06:36:36] [Rank 0] Total FTA: 0.9933 +[2025-07-06 06:36:36] [Rank 0] Total FTA: 0.9933 +[2025-07-06 06:36:36] [Rank 0] Group 0 Loss: 5.4821 +[2025-07-06 06:36:36] [Rank 0] Group 0 Loss: 5.4821 +[2025-07-06 06:36:36] [Rank 0] Group 1 Loss: 5.3125 +[2025-07-06 06:36:36] [Rank 0] Group 1 Loss: 5.3125 +[2025-07-06 06:36:36] [Rank 0] Group 2 Loss: 5.2576 +[2025-07-06 06:36:36] [Rank 0] Group 2 Loss: 5.2576 +[2025-07-06 06:36:36] [Rank 0] Group 3 Loss: 5.2830 +[2025-07-06 06:36:36] [Rank 0] Group 3 Loss: 5.2830 +[2025-07-06 06:36:36] [Rank 0] Group 4 Loss: 5.4119 +[2025-07-06 06:36:36] [Rank 0] Group 4 Loss: 5.4119 +[2025-07-06 06:36:36] [Rank 0] Group 5 Loss: 5.3324 +[2025-07-06 06:36:36] [Rank 0] Group 5 Loss: 5.3324 +[2025-07-06 06:36:36] [Rank 0] Group 6 Loss: 5.2134 +[2025-07-06 06:36:36] [Rank 0] Group 6 Loss: 5.2134 +[2025-07-06 06:36:36] [Rank 0] Group 7 Loss: 5.4224 +[2025-07-06 06:36:36] [Rank 0] Group 7 Loss: 5.4224 +[2025-07-06 06:36:36] [Rank 0] Group 8 Loss: 5.3855 +[2025-07-06 06:36:36] [Rank 0] Group 8 Loss: 5.3855 +[2025-07-06 06:36:36] [Rank 0] Group 9 Loss: 5.3426 +[2025-07-06 06:36:36] [Rank 0] Group 9 Loss: 5.3426 +[2025-07-06 06:36:36] [Rank 0] Group 10 Loss: 5.3470 +[2025-07-06 06:36:36] [Rank 0] Group 10 Loss: 5.3470 +[2025-07-06 06:36:36] [Rank 0] Group 11 Loss: 5.3900 +[2025-07-06 06:36:36] [Rank 0] Group 11 Loss: 5.3900 +[2025-07-06 06:36:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 06:36:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 06:36:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 06:36:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 06:36:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 06:36:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 06:36:36] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 06:36:36] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 06:36:36] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 06:36:36] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 06:36:36] [Rank 0] Group 5 FTA: 1.0000 +[2025-07-06 06:36:36] [Rank 0] Group 5 FTA: 1.0000 +[2025-07-06 06:36:36] [Rank 0] Group 6 FTA: 0.9818 +[2025-07-06 06:36:36] [Rank 0] Group 6 FTA: 0.9818 +[2025-07-06 06:36:36] [Rank 0] Group 7 FTA: 0.9870 +[2025-07-06 06:36:36] [Rank 0] Group 7 FTA: 0.9870 +[2025-07-06 06:36:36] [Rank 0] Group 8 FTA: 0.9766 +[2025-07-06 06:36:36] [Rank 0] Group 8 FTA: 0.9766 +[2025-07-06 06:36:36] [Rank 0] Group 9 FTA: 0.9883 +[2025-07-06 06:36:36] [Rank 0] Group 9 FTA: 0.9883 +[2025-07-06 06:36:36] [Rank 0] Group 10 FTA: 0.9883 +[2025-07-06 06:36:36] [Rank 0] Group 10 FTA: 0.9883 +[2025-07-06 06:36:36] [Rank 0] Group 11 FTA: 0.9922 +[2025-07-06 06:36:36] [Rank 0] Group 11 FTA: 0.9922 +[2025-07-06 06:36:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 06:36:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 06:36:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 06:36:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 06:36:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 06:36:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 06:36:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 06:36:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 06:36:38] [Rank 0] step:10001/10000 train_time:818300ms step_avg:81.82ms +[2025-07-06 06:36:38] [Rank 0] step:10001/10000 train_time:818300ms step_avg:81.82ms +[2025-07-06 06:36:38] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 06:36:38 2025 --- +[2025-07-06 06:36:38] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 06:36:38 2025 --- +[2025-07-06 06:36:38] [Rank 0] PRINT: Peak memory allocated: 9183 MiB reserved: 10636 MiB +[2025-07-06 06:36:38] [Rank 0] PRINT: Peak memory allocated: 9183 MiB reserved: 10636 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/config.json b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..7091e05e00090e3be18a20d254c3ef5ef00391fa --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "d3cb6b88-3f7b-4f9a-b285-a5ccf4cde85b", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..9da0cf890042ab07af00993dd21354bb23714c16 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:579e02c256145fd6724a38f8a3ff79a0babc4ac9c47f3e596711d97d567d1bd5 +size 320964 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..7f6214be747e1d049be66e737e3cc67cb73a8753 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:679479241915598623535e2b1a27eee2e2540910ba3981f69fe8984b1a5d1a00 +size 343640 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..32c537e5023aa3aa0e883e6c7ecdd4541e3a3cb7 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:450392b5739b0e5a60f05043c102607686babb0dc163cd1b36a3a17ba0a399d8 +size 106881 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..8b16133f1c879e38adb36c87bd6f262b01f80303 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8f27ade36a08ed0d4e9db4d6af435bfeefda0682e5fc57f27f0245f969f5318c +size 111578 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/training_log_d3cb6b88-3f7b-4f9a-b285-a5ccf4cde85b.txt b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/training_log_d3cb6b88-3f7b-4f9a-b285-a5ccf4cde85b.txt new file mode 100644 index 0000000000000000000000000000000000000000..26e1304db5ab0d6531da8559e29e1d785f5da1b8 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/training_log_d3cb6b88-3f7b-4f9a-b285-a5ccf4cde85b.txt @@ -0,0 +1,5144 @@ +[2025-07-07 18:46:51] [Rank 0] PRINT: --- Script Start: Mon Jul 7 18:46:51 2025 --- +[2025-07-07 18:46:51] [Rank 0] PRINT: --- Script Start: Mon Jul 7 18:46:51 2025 --- +[2025-07-07 18:46:51] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-07 18:46:51] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-07 18:46:51] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 18:46:51] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 18:46:51] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-07 18:46:51] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-07 18:46:51] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43 +[2025-07-07 18:46:51] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43 +[2025-07-07 18:46:51] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 18:46:51] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 18:46:51] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 18:46:51] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 18:46:51] [Rank 0] PRINT: Constructing model... +[2025-07-07 18:46:51] [Rank 0] PRINT: Constructing model... +[2025-07-07 18:46:54] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 18:46:54] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 18:46:54] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 18:46:54] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 18:46:54] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 18:46:54] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 18:46:54] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 18:46:54] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 18:46:54] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 18:46:54] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 18:46:54] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 18:46:54] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 18:46:54] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 18:46:54] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 18:46:55] [Rank 0] PRINT: Model returns: +[2025-07-07 18:46:55] [Rank 0] PRINT: Model returns: +[2025-07-07 18:46:55] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 18:46:55] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 18:46:55] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-07 18:46:55] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-07 18:46:55] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-07 18:46:55] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-07 18:46:55] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-07 18:46:55] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-07 18:46:55] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-07 18:46:55] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-07 18:46:55] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 18:46:55] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 18:46:55] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 18:46:55] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 18:46:55] [Rank 0] PRINT: Starting warmup... +[2025-07-07 18:46:55] [Rank 0] PRINT: Starting warmup... +[2025-07-07 18:48:53] [Rank 0] PRINT: Warmup complete. +[2025-07-07 18:48:53] [Rank 0] PRINT: Warmup complete. +[2025-07-07 18:48:53] [Rank 0] PRINT: Starting training... +[2025-07-07 18:48:53] [Rank 0] PRINT: Starting training... +[2025-07-07 18:48:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:48:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:49:01] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 18:49:01] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 18:49:03] [Rank 0] step:21/10000 train_time:1755ms step_avg:83.56ms +[2025-07-07 18:49:03] [Rank 0] step:21/10000 train_time:1755ms step_avg:83.56ms +[2025-07-07 18:49:04] [Rank 0] step:41/10000 train_time:3215ms step_avg:78.42ms +[2025-07-07 18:49:04] [Rank 0] step:41/10000 train_time:3215ms step_avg:78.42ms +[2025-07-07 18:49:06] [Rank 0] step:61/10000 train_time:4677ms step_avg:76.67ms +[2025-07-07 18:49:06] [Rank 0] step:61/10000 train_time:4677ms step_avg:76.67ms +[2025-07-07 18:49:07] [Rank 0] step:81/10000 train_time:6144ms step_avg:75.86ms +[2025-07-07 18:49:07] [Rank 0] step:81/10000 train_time:6144ms step_avg:75.86ms +[2025-07-07 18:49:09] [Rank 0] step:101/10000 train_time:8282ms step_avg:82.00ms +[2025-07-07 18:49:09] [Rank 0] step:101/10000 train_time:8282ms step_avg:82.00ms +[2025-07-07 18:49:11] [Rank 0] step:121/10000 train_time:9745ms step_avg:80.54ms +[2025-07-07 18:49:11] [Rank 0] step:121/10000 train_time:9745ms step_avg:80.54ms +[2025-07-07 18:49:12] [Rank 0] step:141/10000 train_time:11215ms step_avg:79.54ms +[2025-07-07 18:49:12] [Rank 0] step:141/10000 train_time:11215ms step_avg:79.54ms +[2025-07-07 18:49:14] [Rank 0] step:161/10000 train_time:12679ms step_avg:78.75ms +[2025-07-07 18:49:14] [Rank 0] step:161/10000 train_time:12679ms step_avg:78.75ms +[2025-07-07 18:49:16] [Rank 0] step:181/10000 train_time:14148ms step_avg:78.17ms +[2025-07-07 18:49:16] [Rank 0] step:181/10000 train_time:14148ms step_avg:78.17ms +[2025-07-07 18:49:17] [Rank 0] step:201/10000 train_time:16280ms step_avg:80.99ms +[2025-07-07 18:49:17] [Rank 0] step:201/10000 train_time:16280ms step_avg:80.99ms +[2025-07-07 18:49:19] [Rank 0] step:221/10000 train_time:17750ms step_avg:80.32ms +[2025-07-07 18:49:19] [Rank 0] step:221/10000 train_time:17750ms step_avg:80.32ms +[2025-07-07 18:49:20] [Rank 0] step:241/10000 train_time:19218ms step_avg:79.74ms +[2025-07-07 18:49:20] [Rank 0] step:241/10000 train_time:19218ms step_avg:79.74ms +[2025-07-07 18:49:22] [Rank 0] step:261/10000 train_time:20688ms step_avg:79.27ms +[2025-07-07 18:49:22] [Rank 0] step:261/10000 train_time:20688ms step_avg:79.27ms +[2025-07-07 18:49:23] [Rank 0] step:281/10000 train_time:22401ms step_avg:79.72ms +[2025-07-07 18:49:23] [Rank 0] step:281/10000 train_time:22401ms step_avg:79.72ms +[2025-07-07 18:49:25] [Rank 0] step:301/10000 train_time:23873ms step_avg:79.31ms +[2025-07-07 18:49:25] [Rank 0] step:301/10000 train_time:23873ms step_avg:79.31ms +[2025-07-07 18:49:26] [Rank 0] step:321/10000 train_time:25341ms step_avg:78.94ms +[2025-07-07 18:49:26] [Rank 0] step:321/10000 train_time:25341ms step_avg:78.94ms +[2025-07-07 18:49:28] [Rank 0] step:341/10000 train_time:26810ms step_avg:78.62ms +[2025-07-07 18:49:28] [Rank 0] step:341/10000 train_time:26810ms step_avg:78.62ms +[2025-07-07 18:49:30] [Rank 0] step:361/10000 train_time:28540ms step_avg:79.06ms +[2025-07-07 18:49:30] [Rank 0] step:361/10000 train_time:28540ms step_avg:79.06ms +[2025-07-07 18:49:31] [Rank 0] step:381/10000 train_time:30396ms step_avg:79.78ms +[2025-07-07 18:49:31] [Rank 0] step:381/10000 train_time:30396ms step_avg:79.78ms +[2025-07-07 18:49:33] [Rank 0] step:401/10000 train_time:31863ms step_avg:79.46ms +[2025-07-07 18:49:33] [Rank 0] step:401/10000 train_time:31863ms step_avg:79.46ms +[2025-07-07 18:49:34] [Rank 0] step:421/10000 train_time:33333ms step_avg:79.18ms +[2025-07-07 18:49:34] [Rank 0] step:421/10000 train_time:33333ms step_avg:79.18ms +[2025-07-07 18:49:36] [Rank 0] step:441/10000 train_time:34803ms step_avg:78.92ms +[2025-07-07 18:49:36] [Rank 0] step:441/10000 train_time:34803ms step_avg:78.92ms +[2025-07-07 18:49:38] [Rank 0] step:461/10000 train_time:36920ms step_avg:80.09ms +[2025-07-07 18:49:38] [Rank 0] step:461/10000 train_time:36920ms step_avg:80.09ms +[2025-07-07 18:49:39] [Rank 0] step:481/10000 train_time:38383ms step_avg:79.80ms +[2025-07-07 18:49:39] [Rank 0] step:481/10000 train_time:38383ms step_avg:79.80ms +[2025-07-07 18:49:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:49:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:49:42] [Rank 0] PRINT: step:500/10000 train_loss:2.2943 val_loss:1.3646 train_time:39851ms step_avg:79.70ms +[2025-07-07 18:49:42] [Rank 0] PRINT: step:500/10000 train_loss:2.2943 val_loss:1.3646 train_time:39851ms step_avg:79.70ms +[2025-07-07 18:49:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:49:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:49:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:49:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:49:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:49:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:55:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:55:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:55:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:55:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:55:10] [Rank 0] Total Loss: 4.4220 +[2025-07-07 18:55:10] [Rank 0] Total Loss: 4.4220 +[2025-07-07 18:55:10] [Rank 0] Total FTA: 0.1062 +[2025-07-07 18:55:10] [Rank 0] Total FTA: 0.1062 +[2025-07-07 18:55:10] [Rank 0] Group 0 Loss: 4.7290 +[2025-07-07 18:55:10] [Rank 0] Group 0 Loss: 4.7290 +[2025-07-07 18:55:10] [Rank 0] Group 1 Loss: 4.3007 +[2025-07-07 18:55:10] [Rank 0] Group 1 Loss: 4.3007 +[2025-07-07 18:55:10] [Rank 0] Group 2 Loss: 4.2126 +[2025-07-07 18:55:10] [Rank 0] Group 2 Loss: 4.2126 +[2025-07-07 18:55:10] [Rank 0] Group 3 Loss: 4.4790 +[2025-07-07 18:55:10] [Rank 0] Group 3 Loss: 4.4790 +[2025-07-07 18:55:10] [Rank 0] Group 4 Loss: 4.3296 +[2025-07-07 18:55:10] [Rank 0] Group 4 Loss: 4.3296 +[2025-07-07 18:55:10] [Rank 0] Group 5 Loss: 4.3722 +[2025-07-07 18:55:10] [Rank 0] Group 5 Loss: 4.3722 +[2025-07-07 18:55:10] [Rank 0] Group 6 Loss: 4.2941 +[2025-07-07 18:55:10] [Rank 0] Group 6 Loss: 4.2941 +[2025-07-07 18:55:10] [Rank 0] Group 7 Loss: 4.4231 +[2025-07-07 18:55:10] [Rank 0] Group 7 Loss: 4.4231 +[2025-07-07 18:55:10] [Rank 0] Group 8 Loss: 4.3861 +[2025-07-07 18:55:10] [Rank 0] Group 8 Loss: 4.3861 +[2025-07-07 18:55:11] [Rank 0] Group 9 Loss: 4.3884 +[2025-07-07 18:55:11] [Rank 0] Group 9 Loss: 4.3884 +[2025-07-07 18:55:11] [Rank 0] Group 10 Loss: 4.4108 +[2025-07-07 18:55:11] [Rank 0] Group 10 Loss: 4.4108 +[2025-07-07 18:55:11] [Rank 0] Group 11 Loss: 4.4227 +[2025-07-07 18:55:11] [Rank 0] Group 11 Loss: 4.4227 +[2025-07-07 18:55:11] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 18:55:11] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 18:55:11] [Rank 0] Group 1 FTA: 0.1693 +[2025-07-07 18:55:11] [Rank 0] Group 1 FTA: 0.1693 +[2025-07-07 18:55:11] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-07 18:55:11] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-07 18:55:11] [Rank 0] Group 3 FTA: 0.0781 +[2025-07-07 18:55:11] [Rank 0] Group 3 FTA: 0.0781 +[2025-07-07 18:55:11] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-07 18:55:11] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-07 18:55:11] [Rank 0] Group 5 FTA: 0.1328 +[2025-07-07 18:55:11] [Rank 0] Group 5 FTA: 0.1328 +[2025-07-07 18:55:11] [Rank 0] Group 6 FTA: 0.0651 +[2025-07-07 18:55:11] [Rank 0] Group 6 FTA: 0.0651 +[2025-07-07 18:55:11] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-07 18:55:11] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-07 18:55:11] [Rank 0] Group 8 FTA: 0.1068 +[2025-07-07 18:55:11] [Rank 0] Group 8 FTA: 0.1068 +[2025-07-07 18:55:11] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 18:55:11] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 18:55:11] [Rank 0] Group 10 FTA: 0.0742 +[2025-07-07 18:55:11] [Rank 0] Group 10 FTA: 0.0742 +[2025-07-07 18:55:11] [Rank 0] Group 11 FTA: 0.1006 +[2025-07-07 18:55:11] [Rank 0] Group 11 FTA: 0.1006 +[2025-07-07 18:55:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 18:55:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 18:55:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 18:55:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 18:55:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 18:55:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 18:55:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 18:55:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 18:55:12] [Rank 0] step:501/10000 train_time:39873ms step_avg:79.59ms +[2025-07-07 18:55:12] [Rank 0] step:501/10000 train_time:39873ms step_avg:79.59ms +[2025-07-07 18:55:13] [Rank 0] step:521/10000 train_time:41344ms step_avg:79.36ms +[2025-07-07 18:55:13] [Rank 0] step:521/10000 train_time:41344ms step_avg:79.36ms +[2025-07-07 18:55:16] [Rank 0] step:541/10000 train_time:42804ms step_avg:79.12ms +[2025-07-07 18:55:16] [Rank 0] step:541/10000 train_time:42804ms step_avg:79.12ms +[2025-07-07 18:55:17] [Rank 0] step:561/10000 train_time:44914ms step_avg:80.06ms +[2025-07-07 18:55:17] [Rank 0] step:561/10000 train_time:44914ms step_avg:80.06ms +[2025-07-07 18:55:18] [Rank 0] step:581/10000 train_time:46371ms step_avg:79.81ms +[2025-07-07 18:55:18] [Rank 0] step:581/10000 train_time:46371ms step_avg:79.81ms +[2025-07-07 18:55:20] [Rank 0] step:601/10000 train_time:47831ms step_avg:79.59ms +[2025-07-07 18:55:20] [Rank 0] step:601/10000 train_time:47831ms step_avg:79.59ms +[2025-07-07 18:55:21] [Rank 0] step:621/10000 train_time:49294ms step_avg:79.38ms +[2025-07-07 18:55:21] [Rank 0] step:621/10000 train_time:49294ms step_avg:79.38ms +[2025-07-07 18:55:24] [Rank 0] step:641/10000 train_time:51418ms step_avg:80.22ms +[2025-07-07 18:55:24] [Rank 0] step:641/10000 train_time:51418ms step_avg:80.22ms +[2025-07-07 18:55:25] [Rank 0] step:661/10000 train_time:52878ms step_avg:80.00ms +[2025-07-07 18:55:25] [Rank 0] step:661/10000 train_time:52878ms step_avg:80.00ms +[2025-07-07 18:55:26] [Rank 0] step:681/10000 train_time:54342ms step_avg:79.80ms +[2025-07-07 18:55:26] [Rank 0] step:681/10000 train_time:54342ms step_avg:79.80ms +[2025-07-07 18:55:28] [Rank 0] step:701/10000 train_time:55808ms step_avg:79.61ms +[2025-07-07 18:55:28] [Rank 0] step:701/10000 train_time:55808ms step_avg:79.61ms +[2025-07-07 18:55:30] [Rank 0] step:721/10000 train_time:57528ms step_avg:79.79ms +[2025-07-07 18:55:30] [Rank 0] step:721/10000 train_time:57528ms step_avg:79.79ms +[2025-07-07 18:55:31] [Rank 0] step:741/10000 train_time:59385ms step_avg:80.14ms +[2025-07-07 18:55:31] [Rank 0] step:741/10000 train_time:59385ms step_avg:80.14ms +[2025-07-07 18:55:33] [Rank 0] step:761/10000 train_time:60863ms step_avg:79.98ms +[2025-07-07 18:55:33] [Rank 0] step:761/10000 train_time:60863ms step_avg:79.98ms +[2025-07-07 18:55:34] [Rank 0] step:781/10000 train_time:62341ms step_avg:79.82ms +[2025-07-07 18:55:34] [Rank 0] step:781/10000 train_time:62341ms step_avg:79.82ms +[2025-07-07 18:55:36] [Rank 0] step:801/10000 train_time:63815ms step_avg:79.67ms +[2025-07-07 18:55:36] [Rank 0] step:801/10000 train_time:63815ms step_avg:79.67ms +[2025-07-07 18:55:38] [Rank 0] step:821/10000 train_time:65947ms step_avg:80.33ms +[2025-07-07 18:55:38] [Rank 0] step:821/10000 train_time:65947ms step_avg:80.33ms +[2025-07-07 18:55:40] [Rank 0] step:841/10000 train_time:67421ms step_avg:80.17ms +[2025-07-07 18:55:40] [Rank 0] step:841/10000 train_time:67421ms step_avg:80.17ms +[2025-07-07 18:55:41] [Rank 0] step:861/10000 train_time:68898ms step_avg:80.02ms +[2025-07-07 18:55:41] [Rank 0] step:861/10000 train_time:68898ms step_avg:80.02ms +[2025-07-07 18:55:42] [Rank 0] step:881/10000 train_time:70373ms step_avg:79.88ms +[2025-07-07 18:55:42] [Rank 0] step:881/10000 train_time:70373ms step_avg:79.88ms +[2025-07-07 18:55:45] [Rank 0] step:901/10000 train_time:71852ms step_avg:79.75ms +[2025-07-07 18:55:45] [Rank 0] step:901/10000 train_time:71852ms step_avg:79.75ms +[2025-07-07 18:55:46] [Rank 0] step:921/10000 train_time:73988ms step_avg:80.33ms +[2025-07-07 18:55:46] [Rank 0] step:921/10000 train_time:73988ms step_avg:80.33ms +[2025-07-07 18:55:48] [Rank 0] step:941/10000 train_time:75467ms step_avg:80.20ms +[2025-07-07 18:55:48] [Rank 0] step:941/10000 train_time:75467ms step_avg:80.20ms +[2025-07-07 18:55:49] [Rank 0] step:961/10000 train_time:76941ms step_avg:80.06ms +[2025-07-07 18:55:49] [Rank 0] step:961/10000 train_time:76941ms step_avg:80.06ms +[2025-07-07 18:55:51] [Rank 0] step:981/10000 train_time:78416ms step_avg:79.94ms +[2025-07-07 18:55:51] [Rank 0] step:981/10000 train_time:78416ms step_avg:79.94ms +[2025-07-07 18:55:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:55:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:55:54] [Rank 0] PRINT: step:1000/10000 train_loss:1.2760 val_loss:1.1961 train_time:80541ms step_avg:80.54ms +[2025-07-07 18:55:54] [Rank 0] PRINT: step:1000/10000 train_loss:1.2760 val_loss:1.1961 train_time:80541ms step_avg:80.54ms +[2025-07-07 18:55:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:55:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:55:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:55:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:55:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:55:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:01:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:01:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:01:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:01:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:01:23] [Rank 0] Total Loss: 4.8912 +[2025-07-07 19:01:23] [Rank 0] Total Loss: 4.8912 +[2025-07-07 19:01:23] [Rank 0] Total FTA: 0.2869 +[2025-07-07 19:01:23] [Rank 0] Total FTA: 0.2869 +[2025-07-07 19:01:23] [Rank 0] Group 0 Loss: 5.2748 +[2025-07-07 19:01:23] [Rank 0] Group 0 Loss: 5.2748 +[2025-07-07 19:01:23] [Rank 0] Group 1 Loss: 4.7022 +[2025-07-07 19:01:23] [Rank 0] Group 1 Loss: 4.7022 +[2025-07-07 19:01:23] [Rank 0] Group 2 Loss: 4.7802 +[2025-07-07 19:01:23] [Rank 0] Group 2 Loss: 4.7802 +[2025-07-07 19:01:23] [Rank 0] Group 3 Loss: 4.8842 +[2025-07-07 19:01:23] [Rank 0] Group 3 Loss: 4.8842 +[2025-07-07 19:01:23] [Rank 0] Group 4 Loss: 4.8047 +[2025-07-07 19:01:23] [Rank 0] Group 4 Loss: 4.8047 +[2025-07-07 19:01:23] [Rank 0] Group 5 Loss: 4.8132 +[2025-07-07 19:01:23] [Rank 0] Group 5 Loss: 4.8132 +[2025-07-07 19:01:23] [Rank 0] Group 6 Loss: 4.7930 +[2025-07-07 19:01:23] [Rank 0] Group 6 Loss: 4.7930 +[2025-07-07 19:01:23] [Rank 0] Group 7 Loss: 4.8765 +[2025-07-07 19:01:23] [Rank 0] Group 7 Loss: 4.8765 +[2025-07-07 19:01:23] [Rank 0] Group 8 Loss: 4.8861 +[2025-07-07 19:01:23] [Rank 0] Group 8 Loss: 4.8861 +[2025-07-07 19:01:23] [Rank 0] Group 9 Loss: 4.8577 +[2025-07-07 19:01:23] [Rank 0] Group 9 Loss: 4.8577 +[2025-07-07 19:01:23] [Rank 0] Group 10 Loss: 4.8472 +[2025-07-07 19:01:23] [Rank 0] Group 10 Loss: 4.8472 +[2025-07-07 19:01:23] [Rank 0] Group 11 Loss: 4.8543 +[2025-07-07 19:01:23] [Rank 0] Group 11 Loss: 4.8543 +[2025-07-07 19:01:23] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-07 19:01:23] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-07 19:01:23] [Rank 0] Group 1 FTA: 0.3646 +[2025-07-07 19:01:23] [Rank 0] Group 1 FTA: 0.3646 +[2025-07-07 19:01:23] [Rank 0] Group 2 FTA: 0.3646 +[2025-07-07 19:01:23] [Rank 0] Group 2 FTA: 0.3646 +[2025-07-07 19:01:24] [Rank 0] Group 3 FTA: 0.2370 +[2025-07-07 19:01:24] [Rank 0] Group 3 FTA: 0.2370 +[2025-07-07 19:01:24] [Rank 0] Group 4 FTA: 0.2891 +[2025-07-07 19:01:24] [Rank 0] Group 4 FTA: 0.2891 +[2025-07-07 19:01:24] [Rank 0] Group 5 FTA: 0.3984 +[2025-07-07 19:01:24] [Rank 0] Group 5 FTA: 0.3984 +[2025-07-07 19:01:24] [Rank 0] Group 6 FTA: 0.3255 +[2025-07-07 19:01:24] [Rank 0] Group 6 FTA: 0.3255 +[2025-07-07 19:01:24] [Rank 0] Group 7 FTA: 0.3125 +[2025-07-07 19:01:24] [Rank 0] Group 7 FTA: 0.3125 +[2025-07-07 19:01:24] [Rank 0] Group 8 FTA: 0.2734 +[2025-07-07 19:01:24] [Rank 0] Group 8 FTA: 0.2734 +[2025-07-07 19:01:24] [Rank 0] Group 9 FTA: 0.3125 +[2025-07-07 19:01:24] [Rank 0] Group 9 FTA: 0.3125 +[2025-07-07 19:01:24] [Rank 0] Group 10 FTA: 0.2539 +[2025-07-07 19:01:24] [Rank 0] Group 10 FTA: 0.2539 +[2025-07-07 19:01:24] [Rank 0] Group 11 FTA: 0.2891 +[2025-07-07 19:01:24] [Rank 0] Group 11 FTA: 0.2891 +[2025-07-07 19:01:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 19:01:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 19:01:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 19:01:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 19:01:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 19:01:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 19:01:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 19:01:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 19:01:25] [Rank 0] step:1001/10000 train_time:80563ms step_avg:80.48ms +[2025-07-07 19:01:25] [Rank 0] step:1001/10000 train_time:80563ms step_avg:80.48ms +[2025-07-07 19:01:27] [Rank 0] step:1021/10000 train_time:82049ms step_avg:80.36ms +[2025-07-07 19:01:27] [Rank 0] step:1021/10000 train_time:82049ms step_avg:80.36ms +[2025-07-07 19:01:28] [Rank 0] step:1041/10000 train_time:83516ms step_avg:80.23ms +[2025-07-07 19:01:28] [Rank 0] step:1041/10000 train_time:83516ms step_avg:80.23ms +[2025-07-07 19:01:29] [Rank 0] step:1061/10000 train_time:84982ms step_avg:80.10ms +[2025-07-07 19:01:29] [Rank 0] step:1061/10000 train_time:84982ms step_avg:80.10ms +[2025-07-07 19:01:32] [Rank 0] step:1081/10000 train_time:86453ms step_avg:79.97ms +[2025-07-07 19:01:32] [Rank 0] step:1081/10000 train_time:86453ms step_avg:79.97ms +[2025-07-07 19:01:33] [Rank 0] step:1101/10000 train_time:88573ms step_avg:80.45ms +[2025-07-07 19:01:33] [Rank 0] step:1101/10000 train_time:88573ms step_avg:80.45ms +[2025-07-07 19:01:35] [Rank 0] step:1121/10000 train_time:90040ms step_avg:80.32ms +[2025-07-07 19:01:35] [Rank 0] step:1121/10000 train_time:90040ms step_avg:80.32ms +[2025-07-07 19:01:36] [Rank 0] step:1141/10000 train_time:91512ms step_avg:80.20ms +[2025-07-07 19:01:36] [Rank 0] step:1141/10000 train_time:91512ms step_avg:80.20ms +[2025-07-07 19:01:37] [Rank 0] step:1161/10000 train_time:92986ms step_avg:80.09ms +[2025-07-07 19:01:37] [Rank 0] step:1161/10000 train_time:92986ms step_avg:80.09ms +[2025-07-07 19:01:40] [Rank 0] step:1181/10000 train_time:95131ms step_avg:80.55ms +[2025-07-07 19:01:40] [Rank 0] step:1181/10000 train_time:95131ms step_avg:80.55ms +[2025-07-07 19:01:41] [Rank 0] step:1201/10000 train_time:96602ms step_avg:80.43ms +[2025-07-07 19:01:41] [Rank 0] step:1201/10000 train_time:96602ms step_avg:80.43ms +[2025-07-07 19:01:43] [Rank 0] step:1221/10000 train_time:98077ms step_avg:80.33ms +[2025-07-07 19:01:43] [Rank 0] step:1221/10000 train_time:98077ms step_avg:80.33ms +[2025-07-07 19:01:44] [Rank 0] step:1241/10000 train_time:99548ms step_avg:80.22ms +[2025-07-07 19:01:44] [Rank 0] step:1241/10000 train_time:99548ms step_avg:80.22ms +[2025-07-07 19:01:46] [Rank 0] step:1261/10000 train_time:101075ms step_avg:80.15ms +[2025-07-07 19:01:46] [Rank 0] step:1261/10000 train_time:101075ms step_avg:80.15ms +[2025-07-07 19:01:48] [Rank 0] step:1281/10000 train_time:103167ms step_avg:80.54ms +[2025-07-07 19:01:48] [Rank 0] step:1281/10000 train_time:103167ms step_avg:80.54ms +[2025-07-07 19:01:49] [Rank 0] step:1301/10000 train_time:104640ms step_avg:80.43ms +[2025-07-07 19:01:49] [Rank 0] step:1301/10000 train_time:104640ms step_avg:80.43ms +[2025-07-07 19:01:51] [Rank 0] step:1321/10000 train_time:106113ms step_avg:80.33ms +[2025-07-07 19:01:51] [Rank 0] step:1321/10000 train_time:106113ms step_avg:80.33ms +[2025-07-07 19:01:52] [Rank 0] step:1341/10000 train_time:107590ms step_avg:80.23ms +[2025-07-07 19:01:52] [Rank 0] step:1341/10000 train_time:107590ms step_avg:80.23ms +[2025-07-07 19:01:54] [Rank 0] step:1361/10000 train_time:109725ms step_avg:80.62ms +[2025-07-07 19:01:54] [Rank 0] step:1361/10000 train_time:109725ms step_avg:80.62ms +[2025-07-07 19:01:56] [Rank 0] step:1381/10000 train_time:111197ms step_avg:80.52ms +[2025-07-07 19:01:56] [Rank 0] step:1381/10000 train_time:111197ms step_avg:80.52ms +[2025-07-07 19:01:57] [Rank 0] step:1401/10000 train_time:112673ms step_avg:80.42ms +[2025-07-07 19:01:57] [Rank 0] step:1401/10000 train_time:112673ms step_avg:80.42ms +[2025-07-07 19:01:59] [Rank 0] step:1421/10000 train_time:114149ms step_avg:80.33ms +[2025-07-07 19:01:59] [Rank 0] step:1421/10000 train_time:114149ms step_avg:80.33ms +[2025-07-07 19:02:01] [Rank 0] step:1441/10000 train_time:115881ms step_avg:80.42ms +[2025-07-07 19:02:01] [Rank 0] step:1441/10000 train_time:115881ms step_avg:80.42ms +[2025-07-07 19:02:02] [Rank 0] step:1461/10000 train_time:117759ms step_avg:80.60ms +[2025-07-07 19:02:02] [Rank 0] step:1461/10000 train_time:117759ms step_avg:80.60ms +[2025-07-07 19:02:04] [Rank 0] step:1481/10000 train_time:119236ms step_avg:80.51ms +[2025-07-07 19:02:04] [Rank 0] step:1481/10000 train_time:119236ms step_avg:80.51ms +[2025-07-07 19:02:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:02:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:02:06] [Rank 0] PRINT: step:1500/10000 train_loss:1.0759 val_loss:1.0121 train_time:120710ms step_avg:80.47ms +[2025-07-07 19:02:06] [Rank 0] PRINT: step:1500/10000 train_loss:1.0759 val_loss:1.0121 train_time:120710ms step_avg:80.47ms +[2025-07-07 19:02:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:02:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:02:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:02:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:02:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:02:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:07:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:07:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:07:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:07:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:07:35] [Rank 0] Total Loss: 5.1938 +[2025-07-07 19:07:35] [Rank 0] Total Loss: 5.1938 +[2025-07-07 19:07:35] [Rank 0] Total FTA: 0.5775 +[2025-07-07 19:07:35] [Rank 0] Total FTA: 0.5775 +[2025-07-07 19:07:35] [Rank 0] Group 0 Loss: 5.3903 +[2025-07-07 19:07:35] [Rank 0] Group 0 Loss: 5.3903 +[2025-07-07 19:07:35] [Rank 0] Group 1 Loss: 5.0010 +[2025-07-07 19:07:35] [Rank 0] Group 1 Loss: 5.0010 +[2025-07-07 19:07:35] [Rank 0] Group 2 Loss: 5.0848 +[2025-07-07 19:07:35] [Rank 0] Group 2 Loss: 5.0848 +[2025-07-07 19:07:35] [Rank 0] Group 3 Loss: 5.2051 +[2025-07-07 19:07:35] [Rank 0] Group 3 Loss: 5.2051 +[2025-07-07 19:07:35] [Rank 0] Group 4 Loss: 5.1174 +[2025-07-07 19:07:35] [Rank 0] Group 4 Loss: 5.1174 +[2025-07-07 19:07:35] [Rank 0] Group 5 Loss: 5.1775 +[2025-07-07 19:07:35] [Rank 0] Group 5 Loss: 5.1775 +[2025-07-07 19:07:35] [Rank 0] Group 6 Loss: 5.1029 +[2025-07-07 19:07:35] [Rank 0] Group 6 Loss: 5.1029 +[2025-07-07 19:07:35] [Rank 0] Group 7 Loss: 5.2153 +[2025-07-07 19:07:35] [Rank 0] Group 7 Loss: 5.2153 +[2025-07-07 19:07:35] [Rank 0] Group 8 Loss: 5.1609 +[2025-07-07 19:07:35] [Rank 0] Group 8 Loss: 5.1609 +[2025-07-07 19:07:35] [Rank 0] Group 9 Loss: 5.2659 +[2025-07-07 19:07:35] [Rank 0] Group 9 Loss: 5.2659 +[2025-07-07 19:07:35] [Rank 0] Group 10 Loss: 5.1880 +[2025-07-07 19:07:35] [Rank 0] Group 10 Loss: 5.1880 +[2025-07-07 19:07:35] [Rank 0] Group 11 Loss: 5.2131 +[2025-07-07 19:07:35] [Rank 0] Group 11 Loss: 5.2131 +[2025-07-07 19:07:35] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-07 19:07:35] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-07 19:07:35] [Rank 0] Group 1 FTA: 0.6797 +[2025-07-07 19:07:35] [Rank 0] Group 1 FTA: 0.6797 +[2025-07-07 19:07:35] [Rank 0] Group 2 FTA: 0.4740 +[2025-07-07 19:07:35] [Rank 0] Group 2 FTA: 0.4740 +[2025-07-07 19:07:35] [Rank 0] Group 3 FTA: 0.6667 +[2025-07-07 19:07:35] [Rank 0] Group 3 FTA: 0.6667 +[2025-07-07 19:07:35] [Rank 0] Group 4 FTA: 0.5938 +[2025-07-07 19:07:35] [Rank 0] Group 4 FTA: 0.5938 +[2025-07-07 19:07:35] [Rank 0] Group 5 FTA: 0.5990 +[2025-07-07 19:07:35] [Rank 0] Group 5 FTA: 0.5990 +[2025-07-07 19:07:36] [Rank 0] Group 6 FTA: 0.6536 +[2025-07-07 19:07:36] [Rank 0] Group 6 FTA: 0.6536 +[2025-07-07 19:07:36] [Rank 0] Group 7 FTA: 0.7396 +[2025-07-07 19:07:36] [Rank 0] Group 7 FTA: 0.7396 +[2025-07-07 19:07:36] [Rank 0] Group 8 FTA: 0.6562 +[2025-07-07 19:07:36] [Rank 0] Group 8 FTA: 0.6562 +[2025-07-07 19:07:36] [Rank 0] Group 9 FTA: 0.6484 +[2025-07-07 19:07:36] [Rank 0] Group 9 FTA: 0.6484 +[2025-07-07 19:07:36] [Rank 0] Group 10 FTA: 0.6562 +[2025-07-07 19:07:36] [Rank 0] Group 10 FTA: 0.6562 +[2025-07-07 19:07:36] [Rank 0] Group 11 FTA: 0.6680 +[2025-07-07 19:07:36] [Rank 0] Group 11 FTA: 0.6680 +[2025-07-07 19:07:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 19:07:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 19:07:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 19:07:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 19:07:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 19:07:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 19:07:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 19:07:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 19:07:37] [Rank 0] step:1501/10000 train_time:120732ms step_avg:80.43ms +[2025-07-07 19:07:37] [Rank 0] step:1501/10000 train_time:120732ms step_avg:80.43ms +[2025-07-07 19:07:38] [Rank 0] step:1521/10000 train_time:122216ms step_avg:80.35ms +[2025-07-07 19:07:38] [Rank 0] step:1521/10000 train_time:122216ms step_avg:80.35ms +[2025-07-07 19:07:41] [Rank 0] step:1541/10000 train_time:124344ms step_avg:80.69ms +[2025-07-07 19:07:41] [Rank 0] step:1541/10000 train_time:124344ms step_avg:80.69ms +[2025-07-07 19:07:42] [Rank 0] step:1561/10000 train_time:126106ms step_avg:80.79ms +[2025-07-07 19:07:42] [Rank 0] step:1561/10000 train_time:126106ms step_avg:80.79ms +[2025-07-07 19:07:44] [Rank 0] step:1581/10000 train_time:127575ms step_avg:80.69ms +[2025-07-07 19:07:44] [Rank 0] step:1581/10000 train_time:127575ms step_avg:80.69ms +[2025-07-07 19:07:45] [Rank 0] step:1601/10000 train_time:129044ms step_avg:80.60ms +[2025-07-07 19:07:45] [Rank 0] step:1601/10000 train_time:129044ms step_avg:80.60ms +[2025-07-07 19:07:47] [Rank 0] step:1621/10000 train_time:130568ms step_avg:80.55ms +[2025-07-07 19:07:47] [Rank 0] step:1621/10000 train_time:130568ms step_avg:80.55ms +[2025-07-07 19:07:49] [Rank 0] step:1641/10000 train_time:132633ms step_avg:80.82ms +[2025-07-07 19:07:49] [Rank 0] step:1641/10000 train_time:132633ms step_avg:80.82ms +[2025-07-07 19:07:50] [Rank 0] step:1661/10000 train_time:134104ms step_avg:80.74ms +[2025-07-07 19:07:50] [Rank 0] step:1661/10000 train_time:134104ms step_avg:80.74ms +[2025-07-07 19:07:52] [Rank 0] step:1681/10000 train_time:135574ms step_avg:80.65ms +[2025-07-07 19:07:52] [Rank 0] step:1681/10000 train_time:135574ms step_avg:80.65ms +[2025-07-07 19:07:53] [Rank 0] step:1701/10000 train_time:137047ms step_avg:80.57ms +[2025-07-07 19:07:53] [Rank 0] step:1701/10000 train_time:137047ms step_avg:80.57ms +[2025-07-07 19:07:55] [Rank 0] step:1721/10000 train_time:139161ms step_avg:80.86ms +[2025-07-07 19:07:55] [Rank 0] step:1721/10000 train_time:139161ms step_avg:80.86ms +[2025-07-07 19:07:57] [Rank 0] step:1741/10000 train_time:140635ms step_avg:80.78ms +[2025-07-07 19:07:57] [Rank 0] step:1741/10000 train_time:140635ms step_avg:80.78ms +[2025-07-07 19:07:58] [Rank 0] step:1761/10000 train_time:142106ms step_avg:80.70ms +[2025-07-07 19:07:58] [Rank 0] step:1761/10000 train_time:142106ms step_avg:80.70ms +[2025-07-07 19:08:00] [Rank 0] step:1781/10000 train_time:143579ms step_avg:80.62ms +[2025-07-07 19:08:00] [Rank 0] step:1781/10000 train_time:143579ms step_avg:80.62ms +[2025-07-07 19:08:02] [Rank 0] step:1801/10000 train_time:145310ms step_avg:80.68ms +[2025-07-07 19:08:02] [Rank 0] step:1801/10000 train_time:145310ms step_avg:80.68ms +[2025-07-07 19:08:03] [Rank 0] step:1821/10000 train_time:147169ms step_avg:80.82ms +[2025-07-07 19:08:03] [Rank 0] step:1821/10000 train_time:147169ms step_avg:80.82ms +[2025-07-07 19:08:05] [Rank 0] step:1841/10000 train_time:148640ms step_avg:80.74ms +[2025-07-07 19:08:05] [Rank 0] step:1841/10000 train_time:148640ms step_avg:80.74ms +[2025-07-07 19:08:06] [Rank 0] step:1861/10000 train_time:150113ms step_avg:80.66ms +[2025-07-07 19:08:06] [Rank 0] step:1861/10000 train_time:150113ms step_avg:80.66ms +[2025-07-07 19:08:08] [Rank 0] step:1881/10000 train_time:151589ms step_avg:80.59ms +[2025-07-07 19:08:08] [Rank 0] step:1881/10000 train_time:151589ms step_avg:80.59ms +[2025-07-07 19:08:10] [Rank 0] step:1901/10000 train_time:153299ms step_avg:80.64ms +[2025-07-07 19:08:10] [Rank 0] step:1901/10000 train_time:153299ms step_avg:80.64ms +[2025-07-07 19:08:11] [Rank 0] step:1921/10000 train_time:154773ms step_avg:80.57ms +[2025-07-07 19:08:11] [Rank 0] step:1921/10000 train_time:154773ms step_avg:80.57ms +[2025-07-07 19:08:13] [Rank 0] step:1941/10000 train_time:156246ms step_avg:80.50ms +[2025-07-07 19:08:13] [Rank 0] step:1941/10000 train_time:156246ms step_avg:80.50ms +[2025-07-07 19:08:14] [Rank 0] step:1961/10000 train_time:157720ms step_avg:80.43ms +[2025-07-07 19:08:14] [Rank 0] step:1961/10000 train_time:157720ms step_avg:80.43ms +[2025-07-07 19:08:16] [Rank 0] step:1981/10000 train_time:159249ms step_avg:80.39ms +[2025-07-07 19:08:16] [Rank 0] step:1981/10000 train_time:159249ms step_avg:80.39ms +[2025-07-07 19:08:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:08:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:08:18] [Rank 0] PRINT: step:2000/10000 train_loss:0.9139 val_loss:0.9596 train_time:161311ms step_avg:80.66ms +[2025-07-07 19:08:18] [Rank 0] PRINT: step:2000/10000 train_loss:0.9139 val_loss:0.9596 train_time:161311ms step_avg:80.66ms +[2025-07-07 19:08:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:08:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:08:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:08:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:08:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:08:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:13:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:13:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:13:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:13:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:13:51] [Rank 0] Total Loss: 5.2968 +[2025-07-07 19:13:51] [Rank 0] Total Loss: 5.2968 +[2025-07-07 19:13:51] [Rank 0] Total FTA: 0.7763 +[2025-07-07 19:13:51] [Rank 0] Total FTA: 0.7763 +[2025-07-07 19:13:51] [Rank 0] Group 0 Loss: 5.5072 +[2025-07-07 19:13:51] [Rank 0] Group 0 Loss: 5.5072 +[2025-07-07 19:13:51] [Rank 0] Group 1 Loss: 5.2077 +[2025-07-07 19:13:51] [Rank 0] Group 1 Loss: 5.2077 +[2025-07-07 19:13:51] [Rank 0] Group 2 Loss: 5.1204 +[2025-07-07 19:13:51] [Rank 0] Group 2 Loss: 5.1204 +[2025-07-07 19:13:51] [Rank 0] Group 3 Loss: 5.3203 +[2025-07-07 19:13:51] [Rank 0] Group 3 Loss: 5.3203 +[2025-07-07 19:13:51] [Rank 0] Group 4 Loss: 5.2217 +[2025-07-07 19:13:51] [Rank 0] Group 4 Loss: 5.2217 +[2025-07-07 19:13:51] [Rank 0] Group 5 Loss: 5.2355 +[2025-07-07 19:13:51] [Rank 0] Group 5 Loss: 5.2355 +[2025-07-07 19:13:51] [Rank 0] Group 6 Loss: 5.2560 +[2025-07-07 19:13:51] [Rank 0] Group 6 Loss: 5.2560 +[2025-07-07 19:13:51] [Rank 0] Group 7 Loss: 5.2770 +[2025-07-07 19:13:51] [Rank 0] Group 7 Loss: 5.2770 +[2025-07-07 19:13:51] [Rank 0] Group 8 Loss: 5.3213 +[2025-07-07 19:13:51] [Rank 0] Group 8 Loss: 5.3213 +[2025-07-07 19:13:51] [Rank 0] Group 9 Loss: 5.3245 +[2025-07-07 19:13:51] [Rank 0] Group 9 Loss: 5.3245 +[2025-07-07 19:13:51] [Rank 0] Group 10 Loss: 5.3025 +[2025-07-07 19:13:51] [Rank 0] Group 10 Loss: 5.3025 +[2025-07-07 19:13:51] [Rank 0] Group 11 Loss: 5.2846 +[2025-07-07 19:13:51] [Rank 0] Group 11 Loss: 5.2846 +[2025-07-07 19:13:51] [Rank 0] Group 0 FTA: 0.6528 +[2025-07-07 19:13:51] [Rank 0] Group 0 FTA: 0.6528 +[2025-07-07 19:13:51] [Rank 0] Group 1 FTA: 0.8203 +[2025-07-07 19:13:51] [Rank 0] Group 1 FTA: 0.8203 +[2025-07-07 19:13:51] [Rank 0] Group 2 FTA: 0.8438 +[2025-07-07 19:13:51] [Rank 0] Group 2 FTA: 0.8438 +[2025-07-07 19:13:51] [Rank 0] Group 3 FTA: 0.7917 +[2025-07-07 19:13:51] [Rank 0] Group 3 FTA: 0.7917 +[2025-07-07 19:13:51] [Rank 0] Group 4 FTA: 0.7917 +[2025-07-07 19:13:51] [Rank 0] Group 4 FTA: 0.7917 +[2025-07-07 19:13:51] [Rank 0] Group 5 FTA: 0.7786 +[2025-07-07 19:13:51] [Rank 0] Group 5 FTA: 0.7786 +[2025-07-07 19:13:51] [Rank 0] Group 6 FTA: 0.8021 +[2025-07-07 19:13:51] [Rank 0] Group 6 FTA: 0.8021 +[2025-07-07 19:13:51] [Rank 0] Group 7 FTA: 0.7812 +[2025-07-07 19:13:51] [Rank 0] Group 7 FTA: 0.7812 +[2025-07-07 19:13:51] [Rank 0] Group 8 FTA: 0.7995 +[2025-07-07 19:13:51] [Rank 0] Group 8 FTA: 0.7995 +[2025-07-07 19:13:51] [Rank 0] Group 9 FTA: 0.7656 +[2025-07-07 19:13:51] [Rank 0] Group 9 FTA: 0.7656 +[2025-07-07 19:13:51] [Rank 0] Group 10 FTA: 0.8008 +[2025-07-07 19:13:51] [Rank 0] Group 10 FTA: 0.8008 +[2025-07-07 19:13:51] [Rank 0] Group 11 FTA: 0.7852 +[2025-07-07 19:13:51] [Rank 0] Group 11 FTA: 0.7852 +[2025-07-07 19:13:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 19:13:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 19:13:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 19:13:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 19:13:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 19:13:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 19:13:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 19:13:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 19:13:52] [Rank 0] step:2001/10000 train_time:161334ms step_avg:80.63ms +[2025-07-07 19:13:52] [Rank 0] step:2001/10000 train_time:161334ms step_avg:80.63ms +[2025-07-07 19:13:54] [Rank 0] step:2021/10000 train_time:162801ms step_avg:80.55ms +[2025-07-07 19:13:54] [Rank 0] step:2021/10000 train_time:162801ms step_avg:80.55ms +[2025-07-07 19:13:55] [Rank 0] step:2041/10000 train_time:164268ms step_avg:80.48ms +[2025-07-07 19:13:55] [Rank 0] step:2041/10000 train_time:164268ms step_avg:80.48ms +[2025-07-07 19:13:57] [Rank 0] step:2061/10000 train_time:165737ms step_avg:80.42ms +[2025-07-07 19:13:57] [Rank 0] step:2061/10000 train_time:165737ms step_avg:80.42ms +[2025-07-07 19:13:59] [Rank 0] step:2081/10000 train_time:167852ms step_avg:80.66ms +[2025-07-07 19:13:59] [Rank 0] step:2081/10000 train_time:167852ms step_avg:80.66ms +[2025-07-07 19:14:00] [Rank 0] step:2101/10000 train_time:169322ms step_avg:80.59ms +[2025-07-07 19:14:00] [Rank 0] step:2101/10000 train_time:169322ms step_avg:80.59ms +[2025-07-07 19:14:02] [Rank 0] step:2121/10000 train_time:171049ms step_avg:80.65ms +[2025-07-07 19:14:02] [Rank 0] step:2121/10000 train_time:171049ms step_avg:80.65ms +[2025-07-07 19:14:04] [Rank 0] step:2141/10000 train_time:172520ms step_avg:80.58ms +[2025-07-07 19:14:04] [Rank 0] step:2141/10000 train_time:172520ms step_avg:80.58ms +[2025-07-07 19:14:06] [Rank 0] step:2161/10000 train_time:174249ms step_avg:80.63ms +[2025-07-07 19:14:06] [Rank 0] step:2161/10000 train_time:174249ms step_avg:80.63ms +[2025-07-07 19:14:07] [Rank 0] step:2181/10000 train_time:176113ms step_avg:80.75ms +[2025-07-07 19:14:07] [Rank 0] step:2181/10000 train_time:176113ms step_avg:80.75ms +[2025-07-07 19:14:09] [Rank 0] step:2201/10000 train_time:177583ms step_avg:80.68ms +[2025-07-07 19:14:09] [Rank 0] step:2201/10000 train_time:177583ms step_avg:80.68ms +[2025-07-07 19:14:10] [Rank 0] step:2221/10000 train_time:179056ms step_avg:80.62ms +[2025-07-07 19:14:10] [Rank 0] step:2221/10000 train_time:179056ms step_avg:80.62ms +[2025-07-07 19:14:12] [Rank 0] step:2241/10000 train_time:180549ms step_avg:80.57ms +[2025-07-07 19:14:12] [Rank 0] step:2241/10000 train_time:180549ms step_avg:80.57ms +[2025-07-07 19:14:14] [Rank 0] step:2261/10000 train_time:182713ms step_avg:80.81ms +[2025-07-07 19:14:14] [Rank 0] step:2261/10000 train_time:182713ms step_avg:80.81ms +[2025-07-07 19:14:15] [Rank 0] step:2281/10000 train_time:184209ms step_avg:80.76ms +[2025-07-07 19:14:15] [Rank 0] step:2281/10000 train_time:184209ms step_avg:80.76ms +[2025-07-07 19:14:17] [Rank 0] step:2301/10000 train_time:185712ms step_avg:80.71ms +[2025-07-07 19:14:17] [Rank 0] step:2301/10000 train_time:185712ms step_avg:80.71ms +[2025-07-07 19:14:18] [Rank 0] step:2321/10000 train_time:187207ms step_avg:80.66ms +[2025-07-07 19:14:18] [Rank 0] step:2321/10000 train_time:187207ms step_avg:80.66ms +[2025-07-07 19:14:20] [Rank 0] step:2341/10000 train_time:188961ms step_avg:80.72ms +[2025-07-07 19:14:20] [Rank 0] step:2341/10000 train_time:188961ms step_avg:80.72ms +[2025-07-07 19:14:22] [Rank 0] step:2361/10000 train_time:190856ms step_avg:80.84ms +[2025-07-07 19:14:22] [Rank 0] step:2361/10000 train_time:190856ms step_avg:80.84ms +[2025-07-07 19:14:23] [Rank 0] step:2381/10000 train_time:192355ms step_avg:80.79ms +[2025-07-07 19:14:23] [Rank 0] step:2381/10000 train_time:192355ms step_avg:80.79ms +[2025-07-07 19:14:25] [Rank 0] step:2401/10000 train_time:193852ms step_avg:80.74ms +[2025-07-07 19:14:25] [Rank 0] step:2401/10000 train_time:193852ms step_avg:80.74ms +[2025-07-07 19:14:26] [Rank 0] step:2421/10000 train_time:195349ms step_avg:80.69ms +[2025-07-07 19:14:26] [Rank 0] step:2421/10000 train_time:195349ms step_avg:80.69ms +[2025-07-07 19:14:29] [Rank 0] step:2441/10000 train_time:197499ms step_avg:80.91ms +[2025-07-07 19:14:29] [Rank 0] step:2441/10000 train_time:197499ms step_avg:80.91ms +[2025-07-07 19:14:30] [Rank 0] step:2461/10000 train_time:198997ms step_avg:80.86ms +[2025-07-07 19:14:30] [Rank 0] step:2461/10000 train_time:198997ms step_avg:80.86ms +[2025-07-07 19:14:32] [Rank 0] step:2481/10000 train_time:200495ms step_avg:80.81ms +[2025-07-07 19:14:32] [Rank 0] step:2481/10000 train_time:200495ms step_avg:80.81ms +[2025-07-07 19:14:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:14:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:14:34] [Rank 0] PRINT: step:2500/10000 train_loss:0.8927 val_loss:0.8801 train_time:201993ms step_avg:80.80ms +[2025-07-07 19:14:34] [Rank 0] PRINT: step:2500/10000 train_loss:0.8927 val_loss:0.8801 train_time:201993ms step_avg:80.80ms +[2025-07-07 19:14:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:14:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:14:34] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:14:34] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:14:34] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:14:34] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:20:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:20:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:20:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:20:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:20:03] [Rank 0] Total Loss: 5.2733 +[2025-07-07 19:20:03] [Rank 0] Total Loss: 5.2733 +[2025-07-07 19:20:03] [Rank 0] Total FTA: 0.9045 +[2025-07-07 19:20:03] [Rank 0] Total FTA: 0.9045 +[2025-07-07 19:20:03] [Rank 0] Group 0 Loss: 5.4297 +[2025-07-07 19:20:03] [Rank 0] Group 0 Loss: 5.4297 +[2025-07-07 19:20:03] [Rank 0] Group 1 Loss: 5.1932 +[2025-07-07 19:20:03] [Rank 0] Group 1 Loss: 5.1932 +[2025-07-07 19:20:03] [Rank 0] Group 2 Loss: 4.9849 +[2025-07-07 19:20:03] [Rank 0] Group 2 Loss: 4.9849 +[2025-07-07 19:20:03] [Rank 0] Group 3 Loss: 5.2887 +[2025-07-07 19:20:03] [Rank 0] Group 3 Loss: 5.2887 +[2025-07-07 19:20:03] [Rank 0] Group 4 Loss: 5.2746 +[2025-07-07 19:20:03] [Rank 0] Group 4 Loss: 5.2746 +[2025-07-07 19:20:03] [Rank 0] Group 5 Loss: 5.2407 +[2025-07-07 19:20:03] [Rank 0] Group 5 Loss: 5.2407 +[2025-07-07 19:20:03] [Rank 0] Group 6 Loss: 5.1630 +[2025-07-07 19:20:03] [Rank 0] Group 6 Loss: 5.1630 +[2025-07-07 19:20:03] [Rank 0] Group 7 Loss: 5.2960 +[2025-07-07 19:20:03] [Rank 0] Group 7 Loss: 5.2960 +[2025-07-07 19:20:03] [Rank 0] Group 8 Loss: 5.3245 +[2025-07-07 19:20:03] [Rank 0] Group 8 Loss: 5.3245 +[2025-07-07 19:20:03] [Rank 0] Group 9 Loss: 5.2283 +[2025-07-07 19:20:03] [Rank 0] Group 9 Loss: 5.2283 +[2025-07-07 19:20:03] [Rank 0] Group 10 Loss: 5.2693 +[2025-07-07 19:20:03] [Rank 0] Group 10 Loss: 5.2693 +[2025-07-07 19:20:03] [Rank 0] Group 11 Loss: 5.3267 +[2025-07-07 19:20:03] [Rank 0] Group 11 Loss: 5.3267 +[2025-07-07 19:20:03] [Rank 0] Group 0 FTA: 0.8453 +[2025-07-07 19:20:03] [Rank 0] Group 0 FTA: 0.8453 +[2025-07-07 19:20:03] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 19:20:03] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 19:20:03] [Rank 0] Group 2 FTA: 0.9427 +[2025-07-07 19:20:03] [Rank 0] Group 2 FTA: 0.9427 +[2025-07-07 19:20:03] [Rank 0] Group 3 FTA: 0.8672 +[2025-07-07 19:20:03] [Rank 0] Group 3 FTA: 0.8672 +[2025-07-07 19:20:03] [Rank 0] Group 4 FTA: 0.9089 +[2025-07-07 19:20:03] [Rank 0] Group 4 FTA: 0.9089 +[2025-07-07 19:20:03] [Rank 0] Group 5 FTA: 0.9036 +[2025-07-07 19:20:03] [Rank 0] Group 5 FTA: 0.9036 +[2025-07-07 19:20:03] [Rank 0] Group 6 FTA: 0.9193 +[2025-07-07 19:20:03] [Rank 0] Group 6 FTA: 0.9193 +[2025-07-07 19:20:03] [Rank 0] Group 7 FTA: 0.9219 +[2025-07-07 19:20:03] [Rank 0] Group 7 FTA: 0.9219 +[2025-07-07 19:20:03] [Rank 0] Group 8 FTA: 0.9167 +[2025-07-07 19:20:03] [Rank 0] Group 8 FTA: 0.9167 +[2025-07-07 19:20:03] [Rank 0] Group 9 FTA: 0.9023 +[2025-07-07 19:20:03] [Rank 0] Group 9 FTA: 0.9023 +[2025-07-07 19:20:03] [Rank 0] Group 10 FTA: 0.9082 +[2025-07-07 19:20:03] [Rank 0] Group 10 FTA: 0.9082 +[2025-07-07 19:20:03] [Rank 0] Group 11 FTA: 0.8936 +[2025-07-07 19:20:03] [Rank 0] Group 11 FTA: 0.8936 +[2025-07-07 19:20:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 19:20:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 19:20:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 19:20:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 19:20:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 19:20:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 19:20:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 19:20:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 19:20:05] [Rank 0] step:2501/10000 train_time:202016ms step_avg:80.77ms +[2025-07-07 19:20:05] [Rank 0] step:2501/10000 train_time:202016ms step_avg:80.77ms +[2025-07-07 19:20:07] [Rank 0] step:2521/10000 train_time:203782ms step_avg:80.83ms +[2025-07-07 19:20:07] [Rank 0] step:2521/10000 train_time:203782ms step_avg:80.83ms +[2025-07-07 19:20:08] [Rank 0] step:2541/10000 train_time:205675ms step_avg:80.94ms +[2025-07-07 19:20:08] [Rank 0] step:2541/10000 train_time:205675ms step_avg:80.94ms +[2025-07-07 19:20:10] [Rank 0] step:2561/10000 train_time:207168ms step_avg:80.89ms +[2025-07-07 19:20:10] [Rank 0] step:2561/10000 train_time:207168ms step_avg:80.89ms +[2025-07-07 19:20:11] [Rank 0] step:2581/10000 train_time:208660ms step_avg:80.84ms +[2025-07-07 19:20:11] [Rank 0] step:2581/10000 train_time:208660ms step_avg:80.84ms +[2025-07-07 19:20:13] [Rank 0] step:2601/10000 train_time:210153ms step_avg:80.80ms +[2025-07-07 19:20:13] [Rank 0] step:2601/10000 train_time:210153ms step_avg:80.80ms +[2025-07-07 19:20:15] [Rank 0] step:2621/10000 train_time:212318ms step_avg:81.01ms +[2025-07-07 19:20:15] [Rank 0] step:2621/10000 train_time:212318ms step_avg:81.01ms +[2025-07-07 19:20:16] [Rank 0] step:2641/10000 train_time:213811ms step_avg:80.96ms +[2025-07-07 19:20:16] [Rank 0] step:2641/10000 train_time:213811ms step_avg:80.96ms +[2025-07-07 19:20:18] [Rank 0] step:2661/10000 train_time:215305ms step_avg:80.91ms +[2025-07-07 19:20:18] [Rank 0] step:2661/10000 train_time:215305ms step_avg:80.91ms +[2025-07-07 19:20:19] [Rank 0] step:2681/10000 train_time:216799ms step_avg:80.86ms +[2025-07-07 19:20:19] [Rank 0] step:2681/10000 train_time:216799ms step_avg:80.86ms +[2025-07-07 19:20:21] [Rank 0] step:2701/10000 train_time:218534ms step_avg:80.91ms +[2025-07-07 19:20:21] [Rank 0] step:2701/10000 train_time:218534ms step_avg:80.91ms +[2025-07-07 19:20:23] [Rank 0] step:2721/10000 train_time:220096ms step_avg:80.89ms +[2025-07-07 19:20:23] [Rank 0] step:2721/10000 train_time:220096ms step_avg:80.89ms +[2025-07-07 19:20:24] [Rank 0] step:2741/10000 train_time:221591ms step_avg:80.84ms +[2025-07-07 19:20:24] [Rank 0] step:2741/10000 train_time:221591ms step_avg:80.84ms +[2025-07-07 19:20:26] [Rank 0] step:2761/10000 train_time:223086ms step_avg:80.80ms +[2025-07-07 19:20:26] [Rank 0] step:2761/10000 train_time:223086ms step_avg:80.80ms +[2025-07-07 19:20:27] [Rank 0] step:2781/10000 train_time:224581ms step_avg:80.76ms +[2025-07-07 19:20:27] [Rank 0] step:2781/10000 train_time:224581ms step_avg:80.76ms +[2025-07-07 19:20:29] [Rank 0] step:2801/10000 train_time:226314ms step_avg:80.80ms +[2025-07-07 19:20:29] [Rank 0] step:2801/10000 train_time:226314ms step_avg:80.80ms +[2025-07-07 19:20:30] [Rank 0] step:2821/10000 train_time:227811ms step_avg:80.76ms +[2025-07-07 19:20:30] [Rank 0] step:2821/10000 train_time:227811ms step_avg:80.76ms +[2025-07-07 19:20:32] [Rank 0] step:2841/10000 train_time:229307ms step_avg:80.71ms +[2025-07-07 19:20:32] [Rank 0] step:2841/10000 train_time:229307ms step_avg:80.71ms +[2025-07-07 19:20:33] [Rank 0] step:2861/10000 train_time:230803ms step_avg:80.67ms +[2025-07-07 19:20:33] [Rank 0] step:2861/10000 train_time:230803ms step_avg:80.67ms +[2025-07-07 19:20:36] [Rank 0] step:2881/10000 train_time:232558ms step_avg:80.72ms +[2025-07-07 19:20:36] [Rank 0] step:2881/10000 train_time:232558ms step_avg:80.72ms +[2025-07-07 19:20:37] [Rank 0] step:2901/10000 train_time:234450ms step_avg:80.82ms +[2025-07-07 19:20:37] [Rank 0] step:2901/10000 train_time:234450ms step_avg:80.82ms +[2025-07-07 19:20:39] [Rank 0] step:2921/10000 train_time:235946ms step_avg:80.78ms +[2025-07-07 19:20:39] [Rank 0] step:2921/10000 train_time:235946ms step_avg:80.78ms +[2025-07-07 19:20:40] [Rank 0] step:2941/10000 train_time:237443ms step_avg:80.74ms +[2025-07-07 19:20:40] [Rank 0] step:2941/10000 train_time:237443ms step_avg:80.74ms +[2025-07-07 19:20:42] [Rank 0] step:2961/10000 train_time:238941ms step_avg:80.70ms +[2025-07-07 19:20:42] [Rank 0] step:2961/10000 train_time:238941ms step_avg:80.70ms +[2025-07-07 19:20:44] [Rank 0] step:2981/10000 train_time:241087ms step_avg:80.87ms +[2025-07-07 19:20:44] [Rank 0] step:2981/10000 train_time:241087ms step_avg:80.87ms +[2025-07-07 19:20:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:20:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:20:46] [Rank 0] PRINT: step:3000/10000 train_loss:0.8802 val_loss:0.8732 train_time:242585ms step_avg:80.86ms +[2025-07-07 19:20:46] [Rank 0] PRINT: step:3000/10000 train_loss:0.8802 val_loss:0.8732 train_time:242585ms step_avg:80.86ms +[2025-07-07 19:20:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:20:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:20:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:20:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:20:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:20:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:26:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:26:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:26:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:26:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:26:18] [Rank 0] Total Loss: 5.2536 +[2025-07-07 19:26:18] [Rank 0] Total Loss: 5.2536 +[2025-07-07 19:26:18] [Rank 0] Total FTA: 0.9032 +[2025-07-07 19:26:18] [Rank 0] Total FTA: 0.9032 +[2025-07-07 19:26:18] [Rank 0] Group 0 Loss: 5.5559 +[2025-07-07 19:26:18] [Rank 0] Group 0 Loss: 5.5559 +[2025-07-07 19:26:18] [Rank 0] Group 1 Loss: 5.0759 +[2025-07-07 19:26:18] [Rank 0] Group 1 Loss: 5.0759 +[2025-07-07 19:26:18] [Rank 0] Group 2 Loss: 4.9983 +[2025-07-07 19:26:18] [Rank 0] Group 2 Loss: 4.9983 +[2025-07-07 19:26:18] [Rank 0] Group 3 Loss: 5.2120 +[2025-07-07 19:26:18] [Rank 0] Group 3 Loss: 5.2120 +[2025-07-07 19:26:18] [Rank 0] Group 4 Loss: 5.2765 +[2025-07-07 19:26:18] [Rank 0] Group 4 Loss: 5.2765 +[2025-07-07 19:26:18] [Rank 0] Group 5 Loss: 5.2008 +[2025-07-07 19:26:18] [Rank 0] Group 5 Loss: 5.2008 +[2025-07-07 19:26:18] [Rank 0] Group 6 Loss: 5.1772 +[2025-07-07 19:26:18] [Rank 0] Group 6 Loss: 5.1772 +[2025-07-07 19:26:18] [Rank 0] Group 7 Loss: 5.2776 +[2025-07-07 19:26:18] [Rank 0] Group 7 Loss: 5.2776 +[2025-07-07 19:26:18] [Rank 0] Group 8 Loss: 5.2346 +[2025-07-07 19:26:18] [Rank 0] Group 8 Loss: 5.2346 +[2025-07-07 19:26:18] [Rank 0] Group 9 Loss: 5.3159 +[2025-07-07 19:26:18] [Rank 0] Group 9 Loss: 5.3159 +[2025-07-07 19:26:18] [Rank 0] Group 10 Loss: 5.2334 +[2025-07-07 19:26:18] [Rank 0] Group 10 Loss: 5.2334 +[2025-07-07 19:26:18] [Rank 0] Group 11 Loss: 5.2371 +[2025-07-07 19:26:18] [Rank 0] Group 11 Loss: 5.2371 +[2025-07-07 19:26:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 19:26:18] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 19:26:18] [Rank 0] Group 1 FTA: 0.8359 +[2025-07-07 19:26:18] [Rank 0] Group 1 FTA: 0.8359 +[2025-07-07 19:26:18] [Rank 0] Group 2 FTA: 0.9115 +[2025-07-07 19:26:18] [Rank 0] Group 2 FTA: 0.9115 +[2025-07-07 19:26:18] [Rank 0] Group 3 FTA: 0.8594 +[2025-07-07 19:26:18] [Rank 0] Group 3 FTA: 0.8594 +[2025-07-07 19:26:18] [Rank 0] Group 4 FTA: 0.9271 +[2025-07-07 19:26:18] [Rank 0] Group 4 FTA: 0.9271 +[2025-07-07 19:26:18] [Rank 0] Group 5 FTA: 0.8516 +[2025-07-07 19:26:18] [Rank 0] Group 5 FTA: 0.8516 +[2025-07-07 19:26:18] [Rank 0] Group 6 FTA: 0.9193 +[2025-07-07 19:26:18] [Rank 0] Group 6 FTA: 0.9193 +[2025-07-07 19:26:18] [Rank 0] Group 7 FTA: 0.8906 +[2025-07-07 19:26:18] [Rank 0] Group 7 FTA: 0.8906 +[2025-07-07 19:26:18] [Rank 0] Group 8 FTA: 0.8828 +[2025-07-07 19:26:18] [Rank 0] Group 8 FTA: 0.8828 +[2025-07-07 19:26:18] [Rank 0] Group 9 FTA: 0.8945 +[2025-07-07 19:26:18] [Rank 0] Group 9 FTA: 0.8945 +[2025-07-07 19:26:18] [Rank 0] Group 10 FTA: 0.8867 +[2025-07-07 19:26:18] [Rank 0] Group 10 FTA: 0.8867 +[2025-07-07 19:26:18] [Rank 0] Group 11 FTA: 0.8965 +[2025-07-07 19:26:18] [Rank 0] Group 11 FTA: 0.8965 +[2025-07-07 19:26:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 19:26:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 19:26:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 19:26:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 19:26:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 19:26:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 19:26:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 19:26:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 19:26:19] [Rank 0] step:3001/10000 train_time:242606ms step_avg:80.84ms +[2025-07-07 19:26:19] [Rank 0] step:3001/10000 train_time:242606ms step_avg:80.84ms +[2025-07-07 19:26:21] [Rank 0] step:3021/10000 train_time:244112ms step_avg:80.81ms +[2025-07-07 19:26:21] [Rank 0] step:3021/10000 train_time:244112ms step_avg:80.81ms +[2025-07-07 19:26:22] [Rank 0] step:3041/10000 train_time:245603ms step_avg:80.76ms +[2025-07-07 19:26:22] [Rank 0] step:3041/10000 train_time:245603ms step_avg:80.76ms +[2025-07-07 19:26:25] [Rank 0] step:3061/10000 train_time:247096ms step_avg:80.72ms +[2025-07-07 19:26:25] [Rank 0] step:3061/10000 train_time:247096ms step_avg:80.72ms +[2025-07-07 19:26:26] [Rank 0] step:3081/10000 train_time:249249ms step_avg:80.90ms +[2025-07-07 19:26:26] [Rank 0] step:3081/10000 train_time:249249ms step_avg:80.90ms +[2025-07-07 19:26:28] [Rank 0] step:3101/10000 train_time:250741ms step_avg:80.86ms +[2025-07-07 19:26:28] [Rank 0] step:3101/10000 train_time:250741ms step_avg:80.86ms +[2025-07-07 19:26:29] [Rank 0] step:3121/10000 train_time:252235ms step_avg:80.82ms +[2025-07-07 19:26:29] [Rank 0] step:3121/10000 train_time:252235ms step_avg:80.82ms +[2025-07-07 19:26:30] [Rank 0] step:3141/10000 train_time:253729ms step_avg:80.78ms +[2025-07-07 19:26:30] [Rank 0] step:3141/10000 train_time:253729ms step_avg:80.78ms +[2025-07-07 19:26:32] [Rank 0] step:3161/10000 train_time:255462ms step_avg:80.82ms +[2025-07-07 19:26:32] [Rank 0] step:3161/10000 train_time:255462ms step_avg:80.82ms +[2025-07-07 19:26:34] [Rank 0] step:3181/10000 train_time:256956ms step_avg:80.78ms +[2025-07-07 19:26:34] [Rank 0] step:3181/10000 train_time:256956ms step_avg:80.78ms +[2025-07-07 19:26:35] [Rank 0] step:3201/10000 train_time:258450ms step_avg:80.74ms +[2025-07-07 19:26:35] [Rank 0] step:3201/10000 train_time:258450ms step_avg:80.74ms +[2025-07-07 19:26:37] [Rank 0] step:3221/10000 train_time:259947ms step_avg:80.70ms +[2025-07-07 19:26:37] [Rank 0] step:3221/10000 train_time:259947ms step_avg:80.70ms +[2025-07-07 19:26:39] [Rank 0] step:3241/10000 train_time:261442ms step_avg:80.67ms +[2025-07-07 19:26:39] [Rank 0] step:3241/10000 train_time:261442ms step_avg:80.67ms +[2025-07-07 19:26:41] [Rank 0] step:3261/10000 train_time:263835ms step_avg:80.91ms +[2025-07-07 19:26:41] [Rank 0] step:3261/10000 train_time:263835ms step_avg:80.91ms +[2025-07-07 19:26:42] [Rank 0] step:3281/10000 train_time:265330ms step_avg:80.87ms +[2025-07-07 19:26:42] [Rank 0] step:3281/10000 train_time:265330ms step_avg:80.87ms +[2025-07-07 19:26:44] [Rank 0] step:3301/10000 train_time:266826ms step_avg:80.83ms +[2025-07-07 19:26:44] [Rank 0] step:3301/10000 train_time:266826ms step_avg:80.83ms +[2025-07-07 19:26:45] [Rank 0] step:3321/10000 train_time:268322ms step_avg:80.80ms +[2025-07-07 19:26:45] [Rank 0] step:3321/10000 train_time:268322ms step_avg:80.80ms +[2025-07-07 19:26:47] [Rank 0] step:3341/10000 train_time:270487ms step_avg:80.96ms +[2025-07-07 19:26:47] [Rank 0] step:3341/10000 train_time:270487ms step_avg:80.96ms +[2025-07-07 19:26:49] [Rank 0] step:3361/10000 train_time:271982ms step_avg:80.92ms +[2025-07-07 19:26:49] [Rank 0] step:3361/10000 train_time:271982ms step_avg:80.92ms +[2025-07-07 19:26:50] [Rank 0] step:3381/10000 train_time:273479ms step_avg:80.89ms +[2025-07-07 19:26:50] [Rank 0] step:3381/10000 train_time:273479ms step_avg:80.89ms +[2025-07-07 19:26:52] [Rank 0] step:3401/10000 train_time:274976ms step_avg:80.85ms +[2025-07-07 19:26:52] [Rank 0] step:3401/10000 train_time:274976ms step_avg:80.85ms +[2025-07-07 19:26:54] [Rank 0] step:3421/10000 train_time:277154ms step_avg:81.02ms +[2025-07-07 19:26:54] [Rank 0] step:3421/10000 train_time:277154ms step_avg:81.02ms +[2025-07-07 19:26:55] [Rank 0] step:3441/10000 train_time:278632ms step_avg:80.97ms +[2025-07-07 19:26:55] [Rank 0] step:3441/10000 train_time:278632ms step_avg:80.97ms +[2025-07-07 19:26:57] [Rank 0] step:3461/10000 train_time:280129ms step_avg:80.94ms +[2025-07-07 19:26:57] [Rank 0] step:3461/10000 train_time:280129ms step_avg:80.94ms +[2025-07-07 19:26:58] [Rank 0] step:3481/10000 train_time:281627ms step_avg:80.90ms +[2025-07-07 19:26:58] [Rank 0] step:3481/10000 train_time:281627ms step_avg:80.90ms +[2025-07-07 19:27:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:27:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:27:01] [Rank 0] PRINT: step:3500/10000 train_loss:0.8737 val_loss:0.8676 train_time:283123ms step_avg:80.89ms +[2025-07-07 19:27:01] [Rank 0] PRINT: step:3500/10000 train_loss:0.8737 val_loss:0.8676 train_time:283123ms step_avg:80.89ms +[2025-07-07 19:27:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:27:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:27:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:27:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:27:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:27:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:32:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:32:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:32:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:32:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:32:29] [Rank 0] Total Loss: 5.2946 +[2025-07-07 19:32:29] [Rank 0] Total Loss: 5.2946 +[2025-07-07 19:32:29] [Rank 0] Total FTA: 0.9483 +[2025-07-07 19:32:29] [Rank 0] Total FTA: 0.9483 +[2025-07-07 19:32:29] [Rank 0] Group 0 Loss: 5.4568 +[2025-07-07 19:32:29] [Rank 0] Group 0 Loss: 5.4568 +[2025-07-07 19:32:29] [Rank 0] Group 1 Loss: 5.4247 +[2025-07-07 19:32:29] [Rank 0] Group 1 Loss: 5.4247 +[2025-07-07 19:32:30] [Rank 0] Group 2 Loss: 5.0146 +[2025-07-07 19:32:30] [Rank 0] Group 2 Loss: 5.0146 +[2025-07-07 19:32:30] [Rank 0] Group 3 Loss: 5.2029 +[2025-07-07 19:32:30] [Rank 0] Group 3 Loss: 5.2029 +[2025-07-07 19:32:30] [Rank 0] Group 4 Loss: 5.3533 +[2025-07-07 19:32:30] [Rank 0] Group 4 Loss: 5.3533 +[2025-07-07 19:32:30] [Rank 0] Group 5 Loss: 5.2200 +[2025-07-07 19:32:30] [Rank 0] Group 5 Loss: 5.2200 +[2025-07-07 19:32:30] [Rank 0] Group 6 Loss: 5.1711 +[2025-07-07 19:32:30] [Rank 0] Group 6 Loss: 5.1711 +[2025-07-07 19:32:30] [Rank 0] Group 7 Loss: 5.2823 +[2025-07-07 19:32:30] [Rank 0] Group 7 Loss: 5.2823 +[2025-07-07 19:32:30] [Rank 0] Group 8 Loss: 5.3094 +[2025-07-07 19:32:30] [Rank 0] Group 8 Loss: 5.3094 +[2025-07-07 19:32:30] [Rank 0] Group 9 Loss: 5.2795 +[2025-07-07 19:32:30] [Rank 0] Group 9 Loss: 5.2795 +[2025-07-07 19:32:30] [Rank 0] Group 10 Loss: 5.2769 +[2025-07-07 19:32:30] [Rank 0] Group 10 Loss: 5.2769 +[2025-07-07 19:32:30] [Rank 0] Group 11 Loss: 5.3272 +[2025-07-07 19:32:30] [Rank 0] Group 11 Loss: 5.3272 +[2025-07-07 19:32:30] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 19:32:30] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 19:32:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 19:32:30] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 19:32:30] [Rank 0] Group 2 FTA: 0.9141 +[2025-07-07 19:32:30] [Rank 0] Group 2 FTA: 0.9141 +[2025-07-07 19:32:30] [Rank 0] Group 3 FTA: 0.8906 +[2025-07-07 19:32:30] [Rank 0] Group 3 FTA: 0.8906 +[2025-07-07 19:32:30] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 19:32:30] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 19:32:30] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-07 19:32:30] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-07 19:32:30] [Rank 0] Group 6 FTA: 0.9323 +[2025-07-07 19:32:30] [Rank 0] Group 6 FTA: 0.9323 +[2025-07-07 19:32:30] [Rank 0] Group 7 FTA: 0.9427 +[2025-07-07 19:32:30] [Rank 0] Group 7 FTA: 0.9427 +[2025-07-07 19:32:30] [Rank 0] Group 8 FTA: 0.9271 +[2025-07-07 19:32:30] [Rank 0] Group 8 FTA: 0.9271 +[2025-07-07 19:32:30] [Rank 0] Group 9 FTA: 0.9453 +[2025-07-07 19:32:30] [Rank 0] Group 9 FTA: 0.9453 +[2025-07-07 19:32:30] [Rank 0] Group 10 FTA: 0.9160 +[2025-07-07 19:32:30] [Rank 0] Group 10 FTA: 0.9160 +[2025-07-07 19:32:30] [Rank 0] Group 11 FTA: 0.9346 +[2025-07-07 19:32:30] [Rank 0] Group 11 FTA: 0.9346 +[2025-07-07 19:32:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 19:32:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 19:32:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 19:32:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 19:32:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 19:32:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 19:32:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 19:32:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 19:32:31] [Rank 0] step:3501/10000 train_time:283146ms step_avg:80.88ms +[2025-07-07 19:32:31] [Rank 0] step:3501/10000 train_time:283146ms step_avg:80.88ms +[2025-07-07 19:32:33] [Rank 0] step:3521/10000 train_time:285318ms step_avg:81.03ms +[2025-07-07 19:32:33] [Rank 0] step:3521/10000 train_time:285318ms step_avg:81.03ms +[2025-07-07 19:32:35] [Rank 0] step:3541/10000 train_time:286807ms step_avg:81.00ms +[2025-07-07 19:32:35] [Rank 0] step:3541/10000 train_time:286807ms step_avg:81.00ms +[2025-07-07 19:32:36] [Rank 0] step:3561/10000 train_time:288298ms step_avg:80.96ms +[2025-07-07 19:32:36] [Rank 0] step:3561/10000 train_time:288298ms step_avg:80.96ms +[2025-07-07 19:32:38] [Rank 0] step:3581/10000 train_time:289792ms step_avg:80.92ms +[2025-07-07 19:32:38] [Rank 0] step:3581/10000 train_time:289792ms step_avg:80.92ms +[2025-07-07 19:32:40] [Rank 0] step:3601/10000 train_time:291540ms step_avg:80.96ms +[2025-07-07 19:32:40] [Rank 0] step:3601/10000 train_time:291540ms step_avg:80.96ms +[2025-07-07 19:32:41] [Rank 0] step:3621/10000 train_time:293014ms step_avg:80.92ms +[2025-07-07 19:32:41] [Rank 0] step:3621/10000 train_time:293014ms step_avg:80.92ms +[2025-07-07 19:32:42] [Rank 0] step:3641/10000 train_time:294508ms step_avg:80.89ms +[2025-07-07 19:32:42] [Rank 0] step:3641/10000 train_time:294508ms step_avg:80.89ms +[2025-07-07 19:32:44] [Rank 0] step:3661/10000 train_time:296003ms step_avg:80.85ms +[2025-07-07 19:32:44] [Rank 0] step:3661/10000 train_time:296003ms step_avg:80.85ms +[2025-07-07 19:32:45] [Rank 0] step:3681/10000 train_time:297499ms step_avg:80.82ms +[2025-07-07 19:32:45] [Rank 0] step:3681/10000 train_time:297499ms step_avg:80.82ms +[2025-07-07 19:32:48] [Rank 0] step:3701/10000 train_time:299654ms step_avg:80.97ms +[2025-07-07 19:32:48] [Rank 0] step:3701/10000 train_time:299654ms step_avg:80.97ms +[2025-07-07 19:32:49] [Rank 0] step:3721/10000 train_time:301146ms step_avg:80.93ms +[2025-07-07 19:32:49] [Rank 0] step:3721/10000 train_time:301146ms step_avg:80.93ms +[2025-07-07 19:32:51] [Rank 0] step:3741/10000 train_time:302641ms step_avg:80.90ms +[2025-07-07 19:32:51] [Rank 0] step:3741/10000 train_time:302641ms step_avg:80.90ms +[2025-07-07 19:32:52] [Rank 0] step:3761/10000 train_time:304139ms step_avg:80.87ms +[2025-07-07 19:32:52] [Rank 0] step:3761/10000 train_time:304139ms step_avg:80.87ms +[2025-07-07 19:32:54] [Rank 0] step:3781/10000 train_time:305691ms step_avg:80.85ms +[2025-07-07 19:32:54] [Rank 0] step:3781/10000 train_time:305691ms step_avg:80.85ms +[2025-07-07 19:32:56] [Rank 0] step:3801/10000 train_time:307797ms step_avg:80.98ms +[2025-07-07 19:32:56] [Rank 0] step:3801/10000 train_time:307797ms step_avg:80.98ms +[2025-07-07 19:32:57] [Rank 0] step:3821/10000 train_time:309294ms step_avg:80.95ms +[2025-07-07 19:32:57] [Rank 0] step:3821/10000 train_time:309294ms step_avg:80.95ms +[2025-07-07 19:32:59] [Rank 0] step:3841/10000 train_time:310789ms step_avg:80.91ms +[2025-07-07 19:32:59] [Rank 0] step:3841/10000 train_time:310789ms step_avg:80.91ms +[2025-07-07 19:33:01] [Rank 0] step:3861/10000 train_time:312531ms step_avg:80.95ms +[2025-07-07 19:33:01] [Rank 0] step:3861/10000 train_time:312531ms step_avg:80.95ms +[2025-07-07 19:33:02] [Rank 0] step:3881/10000 train_time:314268ms step_avg:80.98ms +[2025-07-07 19:33:02] [Rank 0] step:3881/10000 train_time:314268ms step_avg:80.98ms +[2025-07-07 19:33:04] [Rank 0] step:3901/10000 train_time:315764ms step_avg:80.94ms +[2025-07-07 19:33:04] [Rank 0] step:3901/10000 train_time:315764ms step_avg:80.94ms +[2025-07-07 19:33:05] [Rank 0] step:3921/10000 train_time:317263ms step_avg:80.91ms +[2025-07-07 19:33:05] [Rank 0] step:3921/10000 train_time:317263ms step_avg:80.91ms +[2025-07-07 19:33:07] [Rank 0] step:3941/10000 train_time:318759ms step_avg:80.88ms +[2025-07-07 19:33:07] [Rank 0] step:3941/10000 train_time:318759ms step_avg:80.88ms +[2025-07-07 19:33:09] [Rank 0] step:3961/10000 train_time:320513ms step_avg:80.92ms +[2025-07-07 19:33:09] [Rank 0] step:3961/10000 train_time:320513ms step_avg:80.92ms +[2025-07-07 19:33:10] [Rank 0] step:3981/10000 train_time:322421ms step_avg:80.99ms +[2025-07-07 19:33:10] [Rank 0] step:3981/10000 train_time:322421ms step_avg:80.99ms +[2025-07-07 19:33:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:33:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:33:13] [Rank 0] PRINT: step:4000/10000 train_loss:0.8697 val_loss:0.8665 train_time:323917ms step_avg:80.98ms +[2025-07-07 19:33:13] [Rank 0] PRINT: step:4000/10000 train_loss:0.8697 val_loss:0.8665 train_time:323917ms step_avg:80.98ms +[2025-07-07 19:33:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:33:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:33:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:33:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:33:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:33:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:38:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:38:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:38:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:38:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:38:44] [Rank 0] Total Loss: 5.2917 +[2025-07-07 19:38:44] [Rank 0] Total Loss: 5.2917 +[2025-07-07 19:38:44] [Rank 0] Total FTA: 0.8846 +[2025-07-07 19:38:44] [Rank 0] Total FTA: 0.8846 +[2025-07-07 19:38:44] [Rank 0] Group 0 Loss: 5.5278 +[2025-07-07 19:38:44] [Rank 0] Group 0 Loss: 5.5278 +[2025-07-07 19:38:44] [Rank 0] Group 1 Loss: 5.1100 +[2025-07-07 19:38:44] [Rank 0] Group 1 Loss: 5.1100 +[2025-07-07 19:38:44] [Rank 0] Group 2 Loss: 5.0935 +[2025-07-07 19:38:44] [Rank 0] Group 2 Loss: 5.0935 +[2025-07-07 19:38:44] [Rank 0] Group 3 Loss: 5.2199 +[2025-07-07 19:38:44] [Rank 0] Group 3 Loss: 5.2199 +[2025-07-07 19:38:44] [Rank 0] Group 4 Loss: 5.3251 +[2025-07-07 19:38:44] [Rank 0] Group 4 Loss: 5.3251 +[2025-07-07 19:38:44] [Rank 0] Group 5 Loss: 5.2329 +[2025-07-07 19:38:44] [Rank 0] Group 5 Loss: 5.2329 +[2025-07-07 19:38:44] [Rank 0] Group 6 Loss: 5.2467 +[2025-07-07 19:38:44] [Rank 0] Group 6 Loss: 5.2467 +[2025-07-07 19:38:44] [Rank 0] Group 7 Loss: 5.3003 +[2025-07-07 19:38:44] [Rank 0] Group 7 Loss: 5.3003 +[2025-07-07 19:38:44] [Rank 0] Group 8 Loss: 5.3212 +[2025-07-07 19:38:44] [Rank 0] Group 8 Loss: 5.3212 +[2025-07-07 19:38:44] [Rank 0] Group 9 Loss: 5.2618 +[2025-07-07 19:38:44] [Rank 0] Group 9 Loss: 5.2618 +[2025-07-07 19:38:44] [Rank 0] Group 10 Loss: 5.3047 +[2025-07-07 19:38:44] [Rank 0] Group 10 Loss: 5.3047 +[2025-07-07 19:38:44] [Rank 0] Group 11 Loss: 5.2972 +[2025-07-07 19:38:44] [Rank 0] Group 11 Loss: 5.2972 +[2025-07-07 19:38:44] [Rank 0] Group 0 FTA: 0.5098 +[2025-07-07 19:38:44] [Rank 0] Group 0 FTA: 0.5098 +[2025-07-07 19:38:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 19:38:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 19:38:44] [Rank 0] Group 2 FTA: 0.9245 +[2025-07-07 19:38:44] [Rank 0] Group 2 FTA: 0.9245 +[2025-07-07 19:38:44] [Rank 0] Group 3 FTA: 0.8776 +[2025-07-07 19:38:44] [Rank 0] Group 3 FTA: 0.8776 +[2025-07-07 19:38:44] [Rank 0] Group 4 FTA: 0.9401 +[2025-07-07 19:38:44] [Rank 0] Group 4 FTA: 0.9401 +[2025-07-07 19:38:44] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-07 19:38:44] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-07 19:38:44] [Rank 0] Group 6 FTA: 0.9531 +[2025-07-07 19:38:44] [Rank 0] Group 6 FTA: 0.9531 +[2025-07-07 19:38:44] [Rank 0] Group 7 FTA: 0.9505 +[2025-07-07 19:38:44] [Rank 0] Group 7 FTA: 0.9505 +[2025-07-07 19:38:44] [Rank 0] Group 8 FTA: 0.9531 +[2025-07-07 19:38:44] [Rank 0] Group 8 FTA: 0.9531 +[2025-07-07 19:38:44] [Rank 0] Group 9 FTA: 0.9336 +[2025-07-07 19:38:44] [Rank 0] Group 9 FTA: 0.9336 +[2025-07-07 19:38:44] [Rank 0] Group 10 FTA: 0.9453 +[2025-07-07 19:38:44] [Rank 0] Group 10 FTA: 0.9453 +[2025-07-07 19:38:44] [Rank 0] Group 11 FTA: 0.9434 +[2025-07-07 19:38:44] [Rank 0] Group 11 FTA: 0.9434 +[2025-07-07 19:38:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 19:38:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 19:38:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 19:38:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 19:38:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 19:38:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 19:38:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 19:38:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 19:38:46] [Rank 0] step:4001/10000 train_time:323940ms step_avg:80.96ms +[2025-07-07 19:38:46] [Rank 0] step:4001/10000 train_time:323940ms step_avg:80.96ms +[2025-07-07 19:38:47] [Rank 0] step:4021/10000 train_time:325442ms step_avg:80.94ms +[2025-07-07 19:38:47] [Rank 0] step:4021/10000 train_time:325442ms step_avg:80.94ms +[2025-07-07 19:38:49] [Rank 0] step:4041/10000 train_time:326932ms step_avg:80.90ms +[2025-07-07 19:38:49] [Rank 0] step:4041/10000 train_time:326932ms step_avg:80.90ms +[2025-07-07 19:38:51] [Rank 0] step:4061/10000 train_time:329065ms step_avg:81.03ms +[2025-07-07 19:38:51] [Rank 0] step:4061/10000 train_time:329065ms step_avg:81.03ms +[2025-07-07 19:38:52] [Rank 0] step:4081/10000 train_time:330557ms step_avg:81.00ms +[2025-07-07 19:38:52] [Rank 0] step:4081/10000 train_time:330557ms step_avg:81.00ms +[2025-07-07 19:38:54] [Rank 0] step:4101/10000 train_time:332050ms step_avg:80.97ms +[2025-07-07 19:38:54] [Rank 0] step:4101/10000 train_time:332050ms step_avg:80.97ms +[2025-07-07 19:38:55] [Rank 0] step:4121/10000 train_time:333545ms step_avg:80.94ms +[2025-07-07 19:38:55] [Rank 0] step:4121/10000 train_time:333545ms step_avg:80.94ms +[2025-07-07 19:38:58] [Rank 0] step:4141/10000 train_time:335094ms step_avg:80.92ms +[2025-07-07 19:38:58] [Rank 0] step:4141/10000 train_time:335094ms step_avg:80.92ms +[2025-07-07 19:38:59] [Rank 0] step:4161/10000 train_time:337177ms step_avg:81.03ms +[2025-07-07 19:38:59] [Rank 0] step:4161/10000 train_time:337177ms step_avg:81.03ms +[2025-07-07 19:39:01] [Rank 0] step:4181/10000 train_time:338674ms step_avg:81.00ms +[2025-07-07 19:39:01] [Rank 0] step:4181/10000 train_time:338674ms step_avg:81.00ms +[2025-07-07 19:39:02] [Rank 0] step:4201/10000 train_time:340171ms step_avg:80.97ms +[2025-07-07 19:39:02] [Rank 0] step:4201/10000 train_time:340171ms step_avg:80.97ms +[2025-07-07 19:39:04] [Rank 0] step:4221/10000 train_time:341670ms step_avg:80.95ms +[2025-07-07 19:39:04] [Rank 0] step:4221/10000 train_time:341670ms step_avg:80.95ms +[2025-07-07 19:39:06] [Rank 0] step:4241/10000 train_time:343822ms step_avg:81.07ms +[2025-07-07 19:39:06] [Rank 0] step:4241/10000 train_time:343822ms step_avg:81.07ms +[2025-07-07 19:39:07] [Rank 0] step:4261/10000 train_time:345316ms step_avg:81.04ms +[2025-07-07 19:39:07] [Rank 0] step:4261/10000 train_time:345316ms step_avg:81.04ms +[2025-07-07 19:39:09] [Rank 0] step:4281/10000 train_time:346813ms step_avg:81.01ms +[2025-07-07 19:39:09] [Rank 0] step:4281/10000 train_time:346813ms step_avg:81.01ms +[2025-07-07 19:39:10] [Rank 0] step:4301/10000 train_time:348310ms step_avg:80.98ms +[2025-07-07 19:39:10] [Rank 0] step:4301/10000 train_time:348310ms step_avg:80.98ms +[2025-07-07 19:39:12] [Rank 0] step:4321/10000 train_time:350487ms step_avg:81.11ms +[2025-07-07 19:39:12] [Rank 0] step:4321/10000 train_time:350487ms step_avg:81.11ms +[2025-07-07 19:39:14] [Rank 0] step:4341/10000 train_time:351966ms step_avg:81.08ms +[2025-07-07 19:39:14] [Rank 0] step:4341/10000 train_time:351966ms step_avg:81.08ms +[2025-07-07 19:39:15] [Rank 0] step:4361/10000 train_time:353465ms step_avg:81.05ms +[2025-07-07 19:39:15] [Rank 0] step:4361/10000 train_time:353465ms step_avg:81.05ms +[2025-07-07 19:39:17] [Rank 0] step:4381/10000 train_time:354962ms step_avg:81.02ms +[2025-07-07 19:39:17] [Rank 0] step:4381/10000 train_time:354962ms step_avg:81.02ms +[2025-07-07 19:39:18] [Rank 0] step:4401/10000 train_time:356546ms step_avg:81.01ms +[2025-07-07 19:39:18] [Rank 0] step:4401/10000 train_time:356546ms step_avg:81.01ms +[2025-07-07 19:39:20] [Rank 0] step:4421/10000 train_time:358218ms step_avg:81.03ms +[2025-07-07 19:39:20] [Rank 0] step:4421/10000 train_time:358218ms step_avg:81.03ms +[2025-07-07 19:39:22] [Rank 0] step:4441/10000 train_time:359715ms step_avg:81.00ms +[2025-07-07 19:39:22] [Rank 0] step:4441/10000 train_time:359715ms step_avg:81.00ms +[2025-07-07 19:39:23] [Rank 0] step:4461/10000 train_time:361212ms step_avg:80.97ms +[2025-07-07 19:39:23] [Rank 0] step:4461/10000 train_time:361212ms step_avg:80.97ms +[2025-07-07 19:39:25] [Rank 0] step:4481/10000 train_time:362709ms step_avg:80.94ms +[2025-07-07 19:39:25] [Rank 0] step:4481/10000 train_time:362709ms step_avg:80.94ms +[2025-07-07 19:39:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:39:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:39:27] [Rank 0] PRINT: step:4500/10000 train_loss:0.8666 val_loss:0.8650 train_time:364207ms step_avg:80.93ms +[2025-07-07 19:39:27] [Rank 0] PRINT: step:4500/10000 train_loss:0.8666 val_loss:0.8650 train_time:364207ms step_avg:80.93ms +[2025-07-07 19:39:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:39:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:39:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:39:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:39:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:39:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:44:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:44:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:44:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:44:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:44:57] [Rank 0] Total Loss: 5.4180 +[2025-07-07 19:44:57] [Rank 0] Total Loss: 5.4180 +[2025-07-07 19:44:57] [Rank 0] Total FTA: 0.9460 +[2025-07-07 19:44:57] [Rank 0] Total FTA: 0.9460 +[2025-07-07 19:44:57] [Rank 0] Group 0 Loss: 5.5611 +[2025-07-07 19:44:57] [Rank 0] Group 0 Loss: 5.5611 +[2025-07-07 19:44:57] [Rank 0] Group 1 Loss: 5.4069 +[2025-07-07 19:44:57] [Rank 0] Group 1 Loss: 5.4069 +[2025-07-07 19:44:57] [Rank 0] Group 2 Loss: 5.1845 +[2025-07-07 19:44:57] [Rank 0] Group 2 Loss: 5.1845 +[2025-07-07 19:44:57] [Rank 0] Group 3 Loss: 5.4903 +[2025-07-07 19:44:57] [Rank 0] Group 3 Loss: 5.4903 +[2025-07-07 19:44:57] [Rank 0] Group 4 Loss: 5.3941 +[2025-07-07 19:44:57] [Rank 0] Group 4 Loss: 5.3941 +[2025-07-07 19:44:57] [Rank 0] Group 5 Loss: 5.3946 +[2025-07-07 19:44:57] [Rank 0] Group 5 Loss: 5.3946 +[2025-07-07 19:44:57] [Rank 0] Group 6 Loss: 5.2840 +[2025-07-07 19:44:57] [Rank 0] Group 6 Loss: 5.2840 +[2025-07-07 19:44:57] [Rank 0] Group 7 Loss: 5.4254 +[2025-07-07 19:44:57] [Rank 0] Group 7 Loss: 5.4254 +[2025-07-07 19:44:57] [Rank 0] Group 8 Loss: 5.3902 +[2025-07-07 19:44:57] [Rank 0] Group 8 Loss: 5.3902 +[2025-07-07 19:44:57] [Rank 0] Group 9 Loss: 5.3184 +[2025-07-07 19:44:57] [Rank 0] Group 9 Loss: 5.3184 +[2025-07-07 19:44:57] [Rank 0] Group 10 Loss: 5.4042 +[2025-07-07 19:44:57] [Rank 0] Group 10 Loss: 5.4042 +[2025-07-07 19:44:57] [Rank 0] Group 11 Loss: 5.4826 +[2025-07-07 19:44:57] [Rank 0] Group 11 Loss: 5.4826 +[2025-07-07 19:44:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 19:44:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 19:44:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 19:44:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 19:44:57] [Rank 0] Group 2 FTA: 0.8203 +[2025-07-07 19:44:57] [Rank 0] Group 2 FTA: 0.8203 +[2025-07-07 19:44:57] [Rank 0] Group 3 FTA: 0.9219 +[2025-07-07 19:44:57] [Rank 0] Group 3 FTA: 0.9219 +[2025-07-07 19:44:57] [Rank 0] Group 4 FTA: 0.9505 +[2025-07-07 19:44:57] [Rank 0] Group 4 FTA: 0.9505 +[2025-07-07 19:44:57] [Rank 0] Group 5 FTA: 0.9479 +[2025-07-07 19:44:57] [Rank 0] Group 5 FTA: 0.9479 +[2025-07-07 19:44:57] [Rank 0] Group 6 FTA: 0.9401 +[2025-07-07 19:44:57] [Rank 0] Group 6 FTA: 0.9401 +[2025-07-07 19:44:57] [Rank 0] Group 7 FTA: 0.9375 +[2025-07-07 19:44:57] [Rank 0] Group 7 FTA: 0.9375 +[2025-07-07 19:44:57] [Rank 0] Group 8 FTA: 0.9349 +[2025-07-07 19:44:57] [Rank 0] Group 8 FTA: 0.9349 +[2025-07-07 19:44:57] [Rank 0] Group 9 FTA: 0.9531 +[2025-07-07 19:44:57] [Rank 0] Group 9 FTA: 0.9531 +[2025-07-07 19:44:57] [Rank 0] Group 10 FTA: 0.9434 +[2025-07-07 19:44:57] [Rank 0] Group 10 FTA: 0.9434 +[2025-07-07 19:44:57] [Rank 0] Group 11 FTA: 0.9482 +[2025-07-07 19:44:57] [Rank 0] Group 11 FTA: 0.9482 +[2025-07-07 19:44:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 19:44:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 19:44:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 19:44:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 19:44:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 19:44:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 19:44:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 19:44:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 19:44:59] [Rank 0] step:4501/10000 train_time:364543ms step_avg:80.99ms +[2025-07-07 19:44:59] [Rank 0] step:4501/10000 train_time:364543ms step_avg:80.99ms +[2025-07-07 19:45:00] [Rank 0] step:4521/10000 train_time:366421ms step_avg:81.05ms +[2025-07-07 19:45:00] [Rank 0] step:4521/10000 train_time:366421ms step_avg:81.05ms +[2025-07-07 19:45:02] [Rank 0] step:4541/10000 train_time:367916ms step_avg:81.02ms +[2025-07-07 19:45:02] [Rank 0] step:4541/10000 train_time:367916ms step_avg:81.02ms +[2025-07-07 19:45:03] [Rank 0] step:4561/10000 train_time:369406ms step_avg:80.99ms +[2025-07-07 19:45:03] [Rank 0] step:4561/10000 train_time:369406ms step_avg:80.99ms +[2025-07-07 19:45:05] [Rank 0] step:4581/10000 train_time:370901ms step_avg:80.97ms +[2025-07-07 19:45:05] [Rank 0] step:4581/10000 train_time:370901ms step_avg:80.97ms +[2025-07-07 19:45:07] [Rank 0] step:4601/10000 train_time:373063ms step_avg:81.08ms +[2025-07-07 19:45:07] [Rank 0] step:4601/10000 train_time:373063ms step_avg:81.08ms +[2025-07-07 19:45:09] [Rank 0] step:4621/10000 train_time:374558ms step_avg:81.06ms +[2025-07-07 19:45:09] [Rank 0] step:4621/10000 train_time:374558ms step_avg:81.06ms +[2025-07-07 19:45:10] [Rank 0] step:4641/10000 train_time:376051ms step_avg:81.03ms +[2025-07-07 19:45:10] [Rank 0] step:4641/10000 train_time:376051ms step_avg:81.03ms +[2025-07-07 19:45:12] [Rank 0] step:4661/10000 train_time:377547ms step_avg:81.00ms +[2025-07-07 19:45:12] [Rank 0] step:4661/10000 train_time:377547ms step_avg:81.00ms +[2025-07-07 19:45:14] [Rank 0] step:4681/10000 train_time:379732ms step_avg:81.12ms +[2025-07-07 19:45:14] [Rank 0] step:4681/10000 train_time:379732ms step_avg:81.12ms +[2025-07-07 19:45:15] [Rank 0] step:4701/10000 train_time:381207ms step_avg:81.09ms +[2025-07-07 19:45:15] [Rank 0] step:4701/10000 train_time:381207ms step_avg:81.09ms +[2025-07-07 19:45:17] [Rank 0] step:4721/10000 train_time:382702ms step_avg:81.06ms +[2025-07-07 19:45:17] [Rank 0] step:4721/10000 train_time:382702ms step_avg:81.06ms +[2025-07-07 19:45:18] [Rank 0] step:4741/10000 train_time:384196ms step_avg:81.04ms +[2025-07-07 19:45:18] [Rank 0] step:4741/10000 train_time:384196ms step_avg:81.04ms +[2025-07-07 19:45:20] [Rank 0] step:4761/10000 train_time:385692ms step_avg:81.01ms +[2025-07-07 19:45:20] [Rank 0] step:4761/10000 train_time:385692ms step_avg:81.01ms +[2025-07-07 19:45:21] [Rank 0] step:4781/10000 train_time:387430ms step_avg:81.04ms +[2025-07-07 19:45:21] [Rank 0] step:4781/10000 train_time:387430ms step_avg:81.04ms +[2025-07-07 19:45:23] [Rank 0] step:4801/10000 train_time:388924ms step_avg:81.01ms +[2025-07-07 19:45:23] [Rank 0] step:4801/10000 train_time:388924ms step_avg:81.01ms +[2025-07-07 19:45:24] [Rank 0] step:4821/10000 train_time:390419ms step_avg:80.98ms +[2025-07-07 19:45:24] [Rank 0] step:4821/10000 train_time:390419ms step_avg:80.98ms +[2025-07-07 19:45:26] [Rank 0] step:4841/10000 train_time:391917ms step_avg:80.96ms +[2025-07-07 19:45:26] [Rank 0] step:4841/10000 train_time:391917ms step_avg:80.96ms +[2025-07-07 19:45:28] [Rank 0] step:4861/10000 train_time:393670ms step_avg:80.99ms +[2025-07-07 19:45:28] [Rank 0] step:4861/10000 train_time:393670ms step_avg:80.99ms +[2025-07-07 19:45:30] [Rank 0] step:4881/10000 train_time:395575ms step_avg:81.04ms +[2025-07-07 19:45:30] [Rank 0] step:4881/10000 train_time:395575ms step_avg:81.04ms +[2025-07-07 19:45:31] [Rank 0] step:4901/10000 train_time:397072ms step_avg:81.02ms +[2025-07-07 19:45:31] [Rank 0] step:4901/10000 train_time:397072ms step_avg:81.02ms +[2025-07-07 19:45:33] [Rank 0] step:4921/10000 train_time:398569ms step_avg:80.99ms +[2025-07-07 19:45:33] [Rank 0] step:4921/10000 train_time:398569ms step_avg:80.99ms +[2025-07-07 19:45:34] [Rank 0] step:4941/10000 train_time:400065ms step_avg:80.97ms +[2025-07-07 19:45:34] [Rank 0] step:4941/10000 train_time:400065ms step_avg:80.97ms +[2025-07-07 19:45:36] [Rank 0] step:4961/10000 train_time:402216ms step_avg:81.08ms +[2025-07-07 19:45:36] [Rank 0] step:4961/10000 train_time:402216ms step_avg:81.08ms +[2025-07-07 19:45:38] [Rank 0] step:4981/10000 train_time:403855ms step_avg:81.08ms +[2025-07-07 19:45:38] [Rank 0] step:4981/10000 train_time:403855ms step_avg:81.08ms +[2025-07-07 19:45:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:45:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:45:40] [Rank 0] PRINT: step:5000/10000 train_loss:0.8642 val_loss:0.8656 train_time:405408ms step_avg:81.08ms +[2025-07-07 19:45:40] [Rank 0] PRINT: step:5000/10000 train_loss:0.8642 val_loss:0.8656 train_time:405408ms step_avg:81.08ms +[2025-07-07 19:45:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:45:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:45:41] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:45:41] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:45:41] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:45:41] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:51:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:51:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:51:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:51:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:51:10] [Rank 0] Total Loss: 5.3708 +[2025-07-07 19:51:10] [Rank 0] Total Loss: 5.3708 +[2025-07-07 19:51:10] [Rank 0] Total FTA: 0.9627 +[2025-07-07 19:51:10] [Rank 0] Total FTA: 0.9627 +[2025-07-07 19:51:10] [Rank 0] Group 0 Loss: 5.5966 +[2025-07-07 19:51:10] [Rank 0] Group 0 Loss: 5.5966 +[2025-07-07 19:51:10] [Rank 0] Group 1 Loss: 5.1913 +[2025-07-07 19:51:10] [Rank 0] Group 1 Loss: 5.1913 +[2025-07-07 19:51:10] [Rank 0] Group 2 Loss: 5.2593 +[2025-07-07 19:51:10] [Rank 0] Group 2 Loss: 5.2593 +[2025-07-07 19:51:10] [Rank 0] Group 3 Loss: 5.3280 +[2025-07-07 19:51:10] [Rank 0] Group 3 Loss: 5.3280 +[2025-07-07 19:51:10] [Rank 0] Group 4 Loss: 5.3641 +[2025-07-07 19:51:10] [Rank 0] Group 4 Loss: 5.3641 +[2025-07-07 19:51:10] [Rank 0] Group 5 Loss: 5.3574 +[2025-07-07 19:51:10] [Rank 0] Group 5 Loss: 5.3574 +[2025-07-07 19:51:10] [Rank 0] Group 6 Loss: 5.2257 +[2025-07-07 19:51:10] [Rank 0] Group 6 Loss: 5.2257 +[2025-07-07 19:51:10] [Rank 0] Group 7 Loss: 5.3885 +[2025-07-07 19:51:10] [Rank 0] Group 7 Loss: 5.3885 +[2025-07-07 19:51:10] [Rank 0] Group 8 Loss: 5.3902 +[2025-07-07 19:51:10] [Rank 0] Group 8 Loss: 5.3902 +[2025-07-07 19:51:10] [Rank 0] Group 9 Loss: 5.3153 +[2025-07-07 19:51:10] [Rank 0] Group 9 Loss: 5.3153 +[2025-07-07 19:51:10] [Rank 0] Group 10 Loss: 5.4236 +[2025-07-07 19:51:10] [Rank 0] Group 10 Loss: 5.4236 +[2025-07-07 19:51:10] [Rank 0] Group 11 Loss: 5.3621 +[2025-07-07 19:51:10] [Rank 0] Group 11 Loss: 5.3621 +[2025-07-07 19:51:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 19:51:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 19:51:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 19:51:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 19:51:10] [Rank 0] Group 2 FTA: 0.8255 +[2025-07-07 19:51:10] [Rank 0] Group 2 FTA: 0.8255 +[2025-07-07 19:51:10] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 19:51:10] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 19:51:10] [Rank 0] Group 4 FTA: 0.9818 +[2025-07-07 19:51:10] [Rank 0] Group 4 FTA: 0.9818 +[2025-07-07 19:51:11] [Rank 0] Group 5 FTA: 0.9896 +[2025-07-07 19:51:11] [Rank 0] Group 5 FTA: 0.9896 +[2025-07-07 19:51:11] [Rank 0] Group 6 FTA: 0.9583 +[2025-07-07 19:51:11] [Rank 0] Group 6 FTA: 0.9583 +[2025-07-07 19:51:11] [Rank 0] Group 7 FTA: 0.9427 +[2025-07-07 19:51:11] [Rank 0] Group 7 FTA: 0.9427 +[2025-07-07 19:51:11] [Rank 0] Group 8 FTA: 0.9479 +[2025-07-07 19:51:11] [Rank 0] Group 8 FTA: 0.9479 +[2025-07-07 19:51:11] [Rank 0] Group 9 FTA: 0.9531 +[2025-07-07 19:51:11] [Rank 0] Group 9 FTA: 0.9531 +[2025-07-07 19:51:11] [Rank 0] Group 10 FTA: 0.9551 +[2025-07-07 19:51:11] [Rank 0] Group 10 FTA: 0.9551 +[2025-07-07 19:51:11] [Rank 0] Group 11 FTA: 0.9619 +[2025-07-07 19:51:11] [Rank 0] Group 11 FTA: 0.9619 +[2025-07-07 19:51:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 19:51:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 19:51:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 19:51:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 19:51:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 19:51:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 19:51:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 19:51:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 19:51:12] [Rank 0] step:5001/10000 train_time:405430ms step_avg:81.07ms +[2025-07-07 19:51:12] [Rank 0] step:5001/10000 train_time:405430ms step_avg:81.07ms +[2025-07-07 19:51:13] [Rank 0] step:5021/10000 train_time:406931ms step_avg:81.05ms +[2025-07-07 19:51:13] [Rank 0] step:5021/10000 train_time:406931ms step_avg:81.05ms +[2025-07-07 19:51:16] [Rank 0] step:5041/10000 train_time:408421ms step_avg:81.02ms +[2025-07-07 19:51:16] [Rank 0] step:5041/10000 train_time:408421ms step_avg:81.02ms +[2025-07-07 19:51:17] [Rank 0] step:5061/10000 train_time:410560ms step_avg:81.12ms +[2025-07-07 19:51:17] [Rank 0] step:5061/10000 train_time:410560ms step_avg:81.12ms +[2025-07-07 19:51:19] [Rank 0] step:5081/10000 train_time:412051ms step_avg:81.10ms +[2025-07-07 19:51:19] [Rank 0] step:5081/10000 train_time:412051ms step_avg:81.10ms +[2025-07-07 19:51:20] [Rank 0] step:5101/10000 train_time:413546ms step_avg:81.07ms +[2025-07-07 19:51:20] [Rank 0] step:5101/10000 train_time:413546ms step_avg:81.07ms +[2025-07-07 19:51:22] [Rank 0] step:5121/10000 train_time:415038ms step_avg:81.05ms +[2025-07-07 19:51:22] [Rank 0] step:5121/10000 train_time:415038ms step_avg:81.05ms +[2025-07-07 19:51:23] [Rank 0] step:5141/10000 train_time:416765ms step_avg:81.07ms +[2025-07-07 19:51:23] [Rank 0] step:5141/10000 train_time:416765ms step_avg:81.07ms +[2025-07-07 19:51:25] [Rank 0] step:5161/10000 train_time:418258ms step_avg:81.04ms +[2025-07-07 19:51:25] [Rank 0] step:5161/10000 train_time:418258ms step_avg:81.04ms +[2025-07-07 19:51:26] [Rank 0] step:5181/10000 train_time:419753ms step_avg:81.02ms +[2025-07-07 19:51:26] [Rank 0] step:5181/10000 train_time:419753ms step_avg:81.02ms +[2025-07-07 19:51:28] [Rank 0] step:5201/10000 train_time:421250ms step_avg:80.99ms +[2025-07-07 19:51:28] [Rank 0] step:5201/10000 train_time:421250ms step_avg:80.99ms +[2025-07-07 19:51:30] [Rank 0] step:5221/10000 train_time:422744ms step_avg:80.97ms +[2025-07-07 19:51:30] [Rank 0] step:5221/10000 train_time:422744ms step_avg:80.97ms +[2025-07-07 19:51:31] [Rank 0] step:5241/10000 train_time:424478ms step_avg:80.99ms +[2025-07-07 19:51:31] [Rank 0] step:5241/10000 train_time:424478ms step_avg:80.99ms +[2025-07-07 19:51:33] [Rank 0] step:5261/10000 train_time:425976ms step_avg:80.97ms +[2025-07-07 19:51:33] [Rank 0] step:5261/10000 train_time:425976ms step_avg:80.97ms +[2025-07-07 19:51:34] [Rank 0] step:5281/10000 train_time:427481ms step_avg:80.95ms +[2025-07-07 19:51:34] [Rank 0] step:5281/10000 train_time:427481ms step_avg:80.95ms +[2025-07-07 19:51:36] [Rank 0] step:5301/10000 train_time:428978ms step_avg:80.92ms +[2025-07-07 19:51:36] [Rank 0] step:5301/10000 train_time:428978ms step_avg:80.92ms +[2025-07-07 19:51:37] [Rank 0] step:5321/10000 train_time:430711ms step_avg:80.95ms +[2025-07-07 19:51:37] [Rank 0] step:5321/10000 train_time:430711ms step_avg:80.95ms +[2025-07-07 19:51:39] [Rank 0] step:5341/10000 train_time:432208ms step_avg:80.92ms +[2025-07-07 19:51:39] [Rank 0] step:5341/10000 train_time:432208ms step_avg:80.92ms +[2025-07-07 19:51:40] [Rank 0] step:5361/10000 train_time:433705ms step_avg:80.90ms +[2025-07-07 19:51:40] [Rank 0] step:5361/10000 train_time:433705ms step_avg:80.90ms +[2025-07-07 19:51:42] [Rank 0] step:5381/10000 train_time:435202ms step_avg:80.88ms +[2025-07-07 19:51:42] [Rank 0] step:5381/10000 train_time:435202ms step_avg:80.88ms +[2025-07-07 19:51:44] [Rank 0] step:5401/10000 train_time:436749ms step_avg:80.86ms +[2025-07-07 19:51:44] [Rank 0] step:5401/10000 train_time:436749ms step_avg:80.86ms +[2025-07-07 19:51:45] [Rank 0] step:5421/10000 train_time:438853ms step_avg:80.95ms +[2025-07-07 19:51:45] [Rank 0] step:5421/10000 train_time:438853ms step_avg:80.95ms +[2025-07-07 19:51:47] [Rank 0] step:5441/10000 train_time:440346ms step_avg:80.93ms +[2025-07-07 19:51:47] [Rank 0] step:5441/10000 train_time:440346ms step_avg:80.93ms +[2025-07-07 19:51:48] [Rank 0] step:5461/10000 train_time:441843ms step_avg:80.91ms +[2025-07-07 19:51:48] [Rank 0] step:5461/10000 train_time:441843ms step_avg:80.91ms +[2025-07-07 19:51:50] [Rank 0] step:5481/10000 train_time:443341ms step_avg:80.89ms +[2025-07-07 19:51:50] [Rank 0] step:5481/10000 train_time:443341ms step_avg:80.89ms +[2025-07-07 19:51:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:51:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:51:53] [Rank 0] PRINT: step:5500/10000 train_loss:0.8630 val_loss:0.8634 train_time:445070ms step_avg:80.92ms +[2025-07-07 19:51:53] [Rank 0] PRINT: step:5500/10000 train_loss:0.8630 val_loss:0.8634 train_time:445070ms step_avg:80.92ms +[2025-07-07 19:51:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:51:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:51:53] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:51:53] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:51:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:51:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:57:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:57:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:57:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:57:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:57:21] [Rank 0] Total Loss: 5.3986 +[2025-07-07 19:57:21] [Rank 0] Total Loss: 5.3986 +[2025-07-07 19:57:21] [Rank 0] Total FTA: 0.9723 +[2025-07-07 19:57:21] [Rank 0] Total FTA: 0.9723 +[2025-07-07 19:57:21] [Rank 0] Group 0 Loss: 5.7539 +[2025-07-07 19:57:21] [Rank 0] Group 0 Loss: 5.7539 +[2025-07-07 19:57:21] [Rank 0] Group 1 Loss: 5.2919 +[2025-07-07 19:57:21] [Rank 0] Group 1 Loss: 5.2919 +[2025-07-07 19:57:21] [Rank 0] Group 2 Loss: 5.1334 +[2025-07-07 19:57:21] [Rank 0] Group 2 Loss: 5.1334 +[2025-07-07 19:57:21] [Rank 0] Group 3 Loss: 5.3165 +[2025-07-07 19:57:21] [Rank 0] Group 3 Loss: 5.3165 +[2025-07-07 19:57:21] [Rank 0] Group 4 Loss: 5.4402 +[2025-07-07 19:57:21] [Rank 0] Group 4 Loss: 5.4402 +[2025-07-07 19:57:21] [Rank 0] Group 5 Loss: 5.2630 +[2025-07-07 19:57:21] [Rank 0] Group 5 Loss: 5.2630 +[2025-07-07 19:57:21] [Rank 0] Group 6 Loss: 5.2919 +[2025-07-07 19:57:21] [Rank 0] Group 6 Loss: 5.2919 +[2025-07-07 19:57:21] [Rank 0] Group 7 Loss: 5.3504 +[2025-07-07 19:57:21] [Rank 0] Group 7 Loss: 5.3504 +[2025-07-07 19:57:21] [Rank 0] Group 8 Loss: 5.3722 +[2025-07-07 19:57:21] [Rank 0] Group 8 Loss: 5.3722 +[2025-07-07 19:57:21] [Rank 0] Group 9 Loss: 5.4021 +[2025-07-07 19:57:21] [Rank 0] Group 9 Loss: 5.4021 +[2025-07-07 19:57:21] [Rank 0] Group 10 Loss: 5.4070 +[2025-07-07 19:57:21] [Rank 0] Group 10 Loss: 5.4070 +[2025-07-07 19:57:21] [Rank 0] Group 11 Loss: 5.3999 +[2025-07-07 19:57:21] [Rank 0] Group 11 Loss: 5.3999 +[2025-07-07 19:57:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 19:57:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 19:57:21] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 19:57:21] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 19:57:21] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 19:57:21] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 19:57:21] [Rank 0] Group 3 FTA: 0.9479 +[2025-07-07 19:57:21] [Rank 0] Group 3 FTA: 0.9479 +[2025-07-07 19:57:21] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 19:57:21] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 19:57:21] [Rank 0] Group 5 FTA: 0.9609 +[2025-07-07 19:57:21] [Rank 0] Group 5 FTA: 0.9609 +[2025-07-07 19:57:21] [Rank 0] Group 6 FTA: 0.9193 +[2025-07-07 19:57:21] [Rank 0] Group 6 FTA: 0.9193 +[2025-07-07 19:57:21] [Rank 0] Group 7 FTA: 0.9427 +[2025-07-07 19:57:21] [Rank 0] Group 7 FTA: 0.9427 +[2025-07-07 19:57:21] [Rank 0] Group 8 FTA: 0.9583 +[2025-07-07 19:57:21] [Rank 0] Group 8 FTA: 0.9583 +[2025-07-07 19:57:21] [Rank 0] Group 9 FTA: 0.9648 +[2025-07-07 19:57:21] [Rank 0] Group 9 FTA: 0.9648 +[2025-07-07 19:57:21] [Rank 0] Group 10 FTA: 0.9746 +[2025-07-07 19:57:21] [Rank 0] Group 10 FTA: 0.9746 +[2025-07-07 19:57:21] [Rank 0] Group 11 FTA: 0.9707 +[2025-07-07 19:57:21] [Rank 0] Group 11 FTA: 0.9707 +[2025-07-07 19:57:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 19:57:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 19:57:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 19:57:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 19:57:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 19:57:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 19:57:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 19:57:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 19:57:23] [Rank 0] step:5501/10000 train_time:445092ms step_avg:80.91ms +[2025-07-07 19:57:23] [Rank 0] step:5501/10000 train_time:445092ms step_avg:80.91ms +[2025-07-07 19:57:24] [Rank 0] step:5521/10000 train_time:446586ms step_avg:80.89ms +[2025-07-07 19:57:24] [Rank 0] step:5521/10000 train_time:446586ms step_avg:80.89ms +[2025-07-07 19:57:26] [Rank 0] step:5541/10000 train_time:448076ms step_avg:80.87ms +[2025-07-07 19:57:26] [Rank 0] step:5541/10000 train_time:448076ms step_avg:80.87ms +[2025-07-07 19:57:27] [Rank 0] step:5561/10000 train_time:449570ms step_avg:80.84ms +[2025-07-07 19:57:27] [Rank 0] step:5561/10000 train_time:449570ms step_avg:80.84ms +[2025-07-07 19:57:29] [Rank 0] step:5581/10000 train_time:451060ms step_avg:80.82ms +[2025-07-07 19:57:29] [Rank 0] step:5581/10000 train_time:451060ms step_avg:80.82ms +[2025-07-07 19:57:31] [Rank 0] step:5601/10000 train_time:453206ms step_avg:80.92ms +[2025-07-07 19:57:31] [Rank 0] step:5601/10000 train_time:453206ms step_avg:80.92ms +[2025-07-07 19:57:32] [Rank 0] step:5621/10000 train_time:454699ms step_avg:80.89ms +[2025-07-07 19:57:32] [Rank 0] step:5621/10000 train_time:454699ms step_avg:80.89ms +[2025-07-07 19:57:34] [Rank 0] step:5641/10000 train_time:456194ms step_avg:80.87ms +[2025-07-07 19:57:34] [Rank 0] step:5641/10000 train_time:456194ms step_avg:80.87ms +[2025-07-07 19:57:35] [Rank 0] step:5661/10000 train_time:457691ms step_avg:80.85ms +[2025-07-07 19:57:35] [Rank 0] step:5661/10000 train_time:457691ms step_avg:80.85ms +[2025-07-07 19:57:37] [Rank 0] step:5681/10000 train_time:459425ms step_avg:80.87ms +[2025-07-07 19:57:37] [Rank 0] step:5681/10000 train_time:459425ms step_avg:80.87ms +[2025-07-07 19:57:39] [Rank 0] step:5701/10000 train_time:460919ms step_avg:80.85ms +[2025-07-07 19:57:39] [Rank 0] step:5701/10000 train_time:460919ms step_avg:80.85ms +[2025-07-07 19:57:40] [Rank 0] step:5721/10000 train_time:462413ms step_avg:80.83ms +[2025-07-07 19:57:40] [Rank 0] step:5721/10000 train_time:462413ms step_avg:80.83ms +[2025-07-07 19:57:42] [Rank 0] step:5741/10000 train_time:463910ms step_avg:80.81ms +[2025-07-07 19:57:42] [Rank 0] step:5741/10000 train_time:463910ms step_avg:80.81ms +[2025-07-07 19:57:44] [Rank 0] step:5761/10000 train_time:465663ms step_avg:80.83ms +[2025-07-07 19:57:44] [Rank 0] step:5761/10000 train_time:465663ms step_avg:80.83ms +[2025-07-07 19:57:45] [Rank 0] step:5781/10000 train_time:467546ms step_avg:80.88ms +[2025-07-07 19:57:45] [Rank 0] step:5781/10000 train_time:467546ms step_avg:80.88ms +[2025-07-07 19:57:47] [Rank 0] step:5801/10000 train_time:469044ms step_avg:80.86ms +[2025-07-07 19:57:47] [Rank 0] step:5801/10000 train_time:469044ms step_avg:80.86ms +[2025-07-07 19:57:48] [Rank 0] step:5821/10000 train_time:470541ms step_avg:80.84ms +[2025-07-07 19:57:48] [Rank 0] step:5821/10000 train_time:470541ms step_avg:80.84ms +[2025-07-07 19:57:50] [Rank 0] step:5841/10000 train_time:472038ms step_avg:80.81ms +[2025-07-07 19:57:50] [Rank 0] step:5841/10000 train_time:472038ms step_avg:80.81ms +[2025-07-07 19:57:52] [Rank 0] step:5861/10000 train_time:474187ms step_avg:80.91ms +[2025-07-07 19:57:52] [Rank 0] step:5861/10000 train_time:474187ms step_avg:80.91ms +[2025-07-07 19:57:53] [Rank 0] step:5881/10000 train_time:475682ms step_avg:80.88ms +[2025-07-07 19:57:53] [Rank 0] step:5881/10000 train_time:475682ms step_avg:80.88ms +[2025-07-07 19:57:55] [Rank 0] step:5901/10000 train_time:477183ms step_avg:80.86ms +[2025-07-07 19:57:55] [Rank 0] step:5901/10000 train_time:477183ms step_avg:80.86ms +[2025-07-07 19:57:56] [Rank 0] step:5921/10000 train_time:478681ms step_avg:80.84ms +[2025-07-07 19:57:56] [Rank 0] step:5921/10000 train_time:478681ms step_avg:80.84ms +[2025-07-07 19:57:58] [Rank 0] step:5941/10000 train_time:480436ms step_avg:80.87ms +[2025-07-07 19:57:58] [Rank 0] step:5941/10000 train_time:480436ms step_avg:80.87ms +[2025-07-07 19:58:00] [Rank 0] step:5961/10000 train_time:481913ms step_avg:80.84ms +[2025-07-07 19:58:00] [Rank 0] step:5961/10000 train_time:481913ms step_avg:80.84ms +[2025-07-07 19:58:01] [Rank 0] step:5981/10000 train_time:483411ms step_avg:80.82ms +[2025-07-07 19:58:01] [Rank 0] step:5981/10000 train_time:483411ms step_avg:80.82ms +[2025-07-07 19:58:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:58:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:58:03] [Rank 0] PRINT: step:6000/10000 train_loss:0.8623 val_loss:0.8613 train_time:484912ms step_avg:80.82ms +[2025-07-07 19:58:03] [Rank 0] PRINT: step:6000/10000 train_loss:0.8623 val_loss:0.8613 train_time:484912ms step_avg:80.82ms +[2025-07-07 19:58:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:58:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:58:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:58:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:58:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:58:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:03:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:03:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:03:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:03:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:03:33] [Rank 0] Total Loss: 5.3986 +[2025-07-07 20:03:33] [Rank 0] Total Loss: 5.3986 +[2025-07-07 20:03:33] [Rank 0] Total FTA: 0.9799 +[2025-07-07 20:03:33] [Rank 0] Total FTA: 0.9799 +[2025-07-07 20:03:33] [Rank 0] Group 0 Loss: 5.6806 +[2025-07-07 20:03:33] [Rank 0] Group 0 Loss: 5.6806 +[2025-07-07 20:03:33] [Rank 0] Group 1 Loss: 5.2796 +[2025-07-07 20:03:33] [Rank 0] Group 1 Loss: 5.2796 +[2025-07-07 20:03:33] [Rank 0] Group 2 Loss: 5.1788 +[2025-07-07 20:03:33] [Rank 0] Group 2 Loss: 5.1788 +[2025-07-07 20:03:33] [Rank 0] Group 3 Loss: 5.4302 +[2025-07-07 20:03:33] [Rank 0] Group 3 Loss: 5.4302 +[2025-07-07 20:03:33] [Rank 0] Group 4 Loss: 5.3340 +[2025-07-07 20:03:33] [Rank 0] Group 4 Loss: 5.3340 +[2025-07-07 20:03:33] [Rank 0] Group 5 Loss: 5.3514 +[2025-07-07 20:03:33] [Rank 0] Group 5 Loss: 5.3514 +[2025-07-07 20:03:33] [Rank 0] Group 6 Loss: 5.1872 +[2025-07-07 20:03:33] [Rank 0] Group 6 Loss: 5.1872 +[2025-07-07 20:03:33] [Rank 0] Group 7 Loss: 5.4047 +[2025-07-07 20:03:33] [Rank 0] Group 7 Loss: 5.4047 +[2025-07-07 20:03:33] [Rank 0] Group 8 Loss: 5.4247 +[2025-07-07 20:03:33] [Rank 0] Group 8 Loss: 5.4247 +[2025-07-07 20:03:33] [Rank 0] Group 9 Loss: 5.4086 +[2025-07-07 20:03:33] [Rank 0] Group 9 Loss: 5.4086 +[2025-07-07 20:03:33] [Rank 0] Group 10 Loss: 5.4023 +[2025-07-07 20:03:33] [Rank 0] Group 10 Loss: 5.4023 +[2025-07-07 20:03:33] [Rank 0] Group 11 Loss: 5.4070 +[2025-07-07 20:03:33] [Rank 0] Group 11 Loss: 5.4070 +[2025-07-07 20:03:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 20:03:33] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 20:03:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 20:03:33] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 20:03:33] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 20:03:33] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 20:03:33] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 20:03:33] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 20:03:33] [Rank 0] Group 4 FTA: 0.9714 +[2025-07-07 20:03:33] [Rank 0] Group 4 FTA: 0.9714 +[2025-07-07 20:03:33] [Rank 0] Group 5 FTA: 0.9844 +[2025-07-07 20:03:33] [Rank 0] Group 5 FTA: 0.9844 +[2025-07-07 20:03:33] [Rank 0] Group 6 FTA: 0.9740 +[2025-07-07 20:03:33] [Rank 0] Group 6 FTA: 0.9740 +[2025-07-07 20:03:33] [Rank 0] Group 7 FTA: 0.9792 +[2025-07-07 20:03:33] [Rank 0] Group 7 FTA: 0.9792 +[2025-07-07 20:03:33] [Rank 0] Group 8 FTA: 0.9505 +[2025-07-07 20:03:33] [Rank 0] Group 8 FTA: 0.9505 +[2025-07-07 20:03:33] [Rank 0] Group 9 FTA: 0.9609 +[2025-07-07 20:03:33] [Rank 0] Group 9 FTA: 0.9609 +[2025-07-07 20:03:33] [Rank 0] Group 10 FTA: 0.9648 +[2025-07-07 20:03:33] [Rank 0] Group 10 FTA: 0.9648 +[2025-07-07 20:03:33] [Rank 0] Group 11 FTA: 0.9697 +[2025-07-07 20:03:33] [Rank 0] Group 11 FTA: 0.9697 +[2025-07-07 20:03:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 20:03:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 20:03:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 20:03:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 20:03:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 20:03:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 20:03:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 20:03:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 20:03:34] [Rank 0] step:6001/10000 train_time:484933ms step_avg:80.81ms +[2025-07-07 20:03:34] [Rank 0] step:6001/10000 train_time:484933ms step_avg:80.81ms +[2025-07-07 20:03:36] [Rank 0] step:6021/10000 train_time:486431ms step_avg:80.79ms +[2025-07-07 20:03:36] [Rank 0] step:6021/10000 train_time:486431ms step_avg:80.79ms +[2025-07-07 20:03:38] [Rank 0] step:6041/10000 train_time:488579ms step_avg:80.88ms +[2025-07-07 20:03:38] [Rank 0] step:6041/10000 train_time:488579ms step_avg:80.88ms +[2025-07-07 20:03:39] [Rank 0] step:6061/10000 train_time:490068ms step_avg:80.86ms +[2025-07-07 20:03:39] [Rank 0] step:6061/10000 train_time:490068ms step_avg:80.86ms +[2025-07-07 20:03:41] [Rank 0] step:6081/10000 train_time:491560ms step_avg:80.84ms +[2025-07-07 20:03:41] [Rank 0] step:6081/10000 train_time:491560ms step_avg:80.84ms +[2025-07-07 20:03:42] [Rank 0] step:6101/10000 train_time:493055ms step_avg:80.82ms +[2025-07-07 20:03:42] [Rank 0] step:6101/10000 train_time:493055ms step_avg:80.82ms +[2025-07-07 20:03:44] [Rank 0] step:6121/10000 train_time:494549ms step_avg:80.80ms +[2025-07-07 20:03:44] [Rank 0] step:6121/10000 train_time:494549ms step_avg:80.80ms +[2025-07-07 20:03:46] [Rank 0] step:6141/10000 train_time:496279ms step_avg:80.81ms +[2025-07-07 20:03:46] [Rank 0] step:6141/10000 train_time:496279ms step_avg:80.81ms +[2025-07-07 20:03:47] [Rank 0] step:6161/10000 train_time:497773ms step_avg:80.79ms +[2025-07-07 20:03:47] [Rank 0] step:6161/10000 train_time:497773ms step_avg:80.79ms +[2025-07-07 20:03:49] [Rank 0] step:6181/10000 train_time:499270ms step_avg:80.78ms +[2025-07-07 20:03:49] [Rank 0] step:6181/10000 train_time:499270ms step_avg:80.78ms +[2025-07-07 20:03:50] [Rank 0] step:6201/10000 train_time:500766ms step_avg:80.76ms +[2025-07-07 20:03:50] [Rank 0] step:6201/10000 train_time:500766ms step_avg:80.76ms +[2025-07-07 20:03:52] [Rank 0] step:6221/10000 train_time:502916ms step_avg:80.84ms +[2025-07-07 20:03:52] [Rank 0] step:6221/10000 train_time:502916ms step_avg:80.84ms +[2025-07-07 20:03:54] [Rank 0] step:6241/10000 train_time:504411ms step_avg:80.82ms +[2025-07-07 20:03:54] [Rank 0] step:6241/10000 train_time:504411ms step_avg:80.82ms +[2025-07-07 20:03:55] [Rank 0] step:6261/10000 train_time:505907ms step_avg:80.80ms +[2025-07-07 20:03:55] [Rank 0] step:6261/10000 train_time:505907ms step_avg:80.80ms +[2025-07-07 20:03:57] [Rank 0] step:6281/10000 train_time:507400ms step_avg:80.78ms +[2025-07-07 20:03:57] [Rank 0] step:6281/10000 train_time:507400ms step_avg:80.78ms +[2025-07-07 20:03:59] [Rank 0] step:6301/10000 train_time:508948ms step_avg:80.77ms +[2025-07-07 20:03:59] [Rank 0] step:6301/10000 train_time:508948ms step_avg:80.77ms +[2025-07-07 20:04:00] [Rank 0] step:6321/10000 train_time:511059ms step_avg:80.85ms +[2025-07-07 20:04:00] [Rank 0] step:6321/10000 train_time:511059ms step_avg:80.85ms +[2025-07-07 20:04:02] [Rank 0] step:6341/10000 train_time:512553ms step_avg:80.83ms +[2025-07-07 20:04:02] [Rank 0] step:6341/10000 train_time:512553ms step_avg:80.83ms +[2025-07-07 20:04:03] [Rank 0] step:6361/10000 train_time:514051ms step_avg:80.81ms +[2025-07-07 20:04:03] [Rank 0] step:6361/10000 train_time:514051ms step_avg:80.81ms +[2025-07-07 20:04:05] [Rank 0] step:6381/10000 train_time:515548ms step_avg:80.79ms +[2025-07-07 20:04:05] [Rank 0] step:6381/10000 train_time:515548ms step_avg:80.79ms +[2025-07-07 20:04:07] [Rank 0] step:6401/10000 train_time:517279ms step_avg:80.81ms +[2025-07-07 20:04:07] [Rank 0] step:6401/10000 train_time:517279ms step_avg:80.81ms +[2025-07-07 20:04:08] [Rank 0] step:6421/10000 train_time:518775ms step_avg:80.79ms +[2025-07-07 20:04:08] [Rank 0] step:6421/10000 train_time:518775ms step_avg:80.79ms +[2025-07-07 20:04:10] [Rank 0] step:6441/10000 train_time:520273ms step_avg:80.78ms +[2025-07-07 20:04:10] [Rank 0] step:6441/10000 train_time:520273ms step_avg:80.78ms +[2025-07-07 20:04:11] [Rank 0] step:6461/10000 train_time:521772ms step_avg:80.76ms +[2025-07-07 20:04:11] [Rank 0] step:6461/10000 train_time:521772ms step_avg:80.76ms +[2025-07-07 20:04:13] [Rank 0] step:6481/10000 train_time:523526ms step_avg:80.78ms +[2025-07-07 20:04:13] [Rank 0] step:6481/10000 train_time:523526ms step_avg:80.78ms +[2025-07-07 20:04:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:04:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:04:15] [Rank 0] PRINT: step:6500/10000 train_loss:0.8607 val_loss:0.8604 train_time:525004ms step_avg:80.77ms +[2025-07-07 20:04:15] [Rank 0] PRINT: step:6500/10000 train_loss:0.8607 val_loss:0.8604 train_time:525004ms step_avg:80.77ms +[2025-07-07 20:04:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:04:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:04:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:04:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:04:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:04:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:09:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:09:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:09:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:09:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:09:45] [Rank 0] Total Loss: 5.4679 +[2025-07-07 20:09:45] [Rank 0] Total Loss: 5.4679 +[2025-07-07 20:09:45] [Rank 0] Total FTA: 0.9759 +[2025-07-07 20:09:45] [Rank 0] Total FTA: 0.9759 +[2025-07-07 20:09:45] [Rank 0] Group 0 Loss: 5.8549 +[2025-07-07 20:09:45] [Rank 0] Group 0 Loss: 5.8549 +[2025-07-07 20:09:45] [Rank 0] Group 1 Loss: 5.3025 +[2025-07-07 20:09:45] [Rank 0] Group 1 Loss: 5.3025 +[2025-07-07 20:09:45] [Rank 0] Group 2 Loss: 5.2804 +[2025-07-07 20:09:45] [Rank 0] Group 2 Loss: 5.2804 +[2025-07-07 20:09:45] [Rank 0] Group 3 Loss: 5.4639 +[2025-07-07 20:09:45] [Rank 0] Group 3 Loss: 5.4639 +[2025-07-07 20:09:45] [Rank 0] Group 4 Loss: 5.3951 +[2025-07-07 20:09:45] [Rank 0] Group 4 Loss: 5.3951 +[2025-07-07 20:09:45] [Rank 0] Group 5 Loss: 5.4507 +[2025-07-07 20:09:45] [Rank 0] Group 5 Loss: 5.4507 +[2025-07-07 20:09:46] [Rank 0] Group 6 Loss: 5.3050 +[2025-07-07 20:09:46] [Rank 0] Group 6 Loss: 5.3050 +[2025-07-07 20:09:46] [Rank 0] Group 7 Loss: 5.4667 +[2025-07-07 20:09:46] [Rank 0] Group 7 Loss: 5.4667 +[2025-07-07 20:09:46] [Rank 0] Group 8 Loss: 5.4529 +[2025-07-07 20:09:46] [Rank 0] Group 8 Loss: 5.4529 +[2025-07-07 20:09:46] [Rank 0] Group 9 Loss: 5.4466 +[2025-07-07 20:09:46] [Rank 0] Group 9 Loss: 5.4466 +[2025-07-07 20:09:46] [Rank 0] Group 10 Loss: 5.4184 +[2025-07-07 20:09:46] [Rank 0] Group 10 Loss: 5.4184 +[2025-07-07 20:09:46] [Rank 0] Group 11 Loss: 5.4421 +[2025-07-07 20:09:46] [Rank 0] Group 11 Loss: 5.4421 +[2025-07-07 20:09:46] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 20:09:46] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 20:09:46] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 20:09:46] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 20:09:46] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 20:09:46] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 20:09:46] [Rank 0] Group 3 FTA: 0.9635 +[2025-07-07 20:09:46] [Rank 0] Group 3 FTA: 0.9635 +[2025-07-07 20:09:46] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 20:09:46] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 20:09:46] [Rank 0] Group 5 FTA: 0.9714 +[2025-07-07 20:09:46] [Rank 0] Group 5 FTA: 0.9714 +[2025-07-07 20:09:46] [Rank 0] Group 6 FTA: 0.9922 +[2025-07-07 20:09:46] [Rank 0] Group 6 FTA: 0.9922 +[2025-07-07 20:09:46] [Rank 0] Group 7 FTA: 0.9375 +[2025-07-07 20:09:46] [Rank 0] Group 7 FTA: 0.9375 +[2025-07-07 20:09:46] [Rank 0] Group 8 FTA: 0.9375 +[2025-07-07 20:09:46] [Rank 0] Group 8 FTA: 0.9375 +[2025-07-07 20:09:46] [Rank 0] Group 9 FTA: 0.9727 +[2025-07-07 20:09:46] [Rank 0] Group 9 FTA: 0.9727 +[2025-07-07 20:09:46] [Rank 0] Group 10 FTA: 0.9590 +[2025-07-07 20:09:46] [Rank 0] Group 10 FTA: 0.9590 +[2025-07-07 20:09:46] [Rank 0] Group 11 FTA: 0.9688 +[2025-07-07 20:09:46] [Rank 0] Group 11 FTA: 0.9688 +[2025-07-07 20:09:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 20:09:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 20:09:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 20:09:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 20:09:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 20:09:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 20:09:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 20:09:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 20:09:47] [Rank 0] step:6501/10000 train_time:525025ms step_avg:80.76ms +[2025-07-07 20:09:47] [Rank 0] step:6501/10000 train_time:525025ms step_avg:80.76ms +[2025-07-07 20:09:49] [Rank 0] step:6521/10000 train_time:526516ms step_avg:80.74ms +[2025-07-07 20:09:49] [Rank 0] step:6521/10000 train_time:526516ms step_avg:80.74ms +[2025-07-07 20:09:50] [Rank 0] step:6541/10000 train_time:528003ms step_avg:80.72ms +[2025-07-07 20:09:50] [Rank 0] step:6541/10000 train_time:528003ms step_avg:80.72ms +[2025-07-07 20:09:52] [Rank 0] step:6561/10000 train_time:529494ms step_avg:80.70ms +[2025-07-07 20:09:52] [Rank 0] step:6561/10000 train_time:529494ms step_avg:80.70ms +[2025-07-07 20:09:53] [Rank 0] step:6581/10000 train_time:531172ms step_avg:80.71ms +[2025-07-07 20:09:53] [Rank 0] step:6581/10000 train_time:531172ms step_avg:80.71ms +[2025-07-07 20:09:55] [Rank 0] step:6601/10000 train_time:532665ms step_avg:80.69ms +[2025-07-07 20:09:55] [Rank 0] step:6601/10000 train_time:532665ms step_avg:80.69ms +[2025-07-07 20:09:56] [Rank 0] step:6621/10000 train_time:534159ms step_avg:80.68ms +[2025-07-07 20:09:56] [Rank 0] step:6621/10000 train_time:534159ms step_avg:80.68ms +[2025-07-07 20:09:58] [Rank 0] step:6641/10000 train_time:535652ms step_avg:80.66ms +[2025-07-07 20:09:58] [Rank 0] step:6641/10000 train_time:535652ms step_avg:80.66ms +[2025-07-07 20:10:00] [Rank 0] step:6661/10000 train_time:537147ms step_avg:80.64ms +[2025-07-07 20:10:00] [Rank 0] step:6661/10000 train_time:537147ms step_avg:80.64ms +[2025-07-07 20:10:01] [Rank 0] step:6681/10000 train_time:539296ms step_avg:80.72ms +[2025-07-07 20:10:01] [Rank 0] step:6681/10000 train_time:539296ms step_avg:80.72ms +[2025-07-07 20:10:03] [Rank 0] step:6701/10000 train_time:540789ms step_avg:80.70ms +[2025-07-07 20:10:03] [Rank 0] step:6701/10000 train_time:540789ms step_avg:80.70ms +[2025-07-07 20:10:04] [Rank 0] step:6721/10000 train_time:542285ms step_avg:80.69ms +[2025-07-07 20:10:04] [Rank 0] step:6721/10000 train_time:542285ms step_avg:80.69ms +[2025-07-07 20:10:06] [Rank 0] step:6741/10000 train_time:543781ms step_avg:80.67ms +[2025-07-07 20:10:06] [Rank 0] step:6741/10000 train_time:543781ms step_avg:80.67ms +[2025-07-07 20:10:08] [Rank 0] step:6761/10000 train_time:545939ms step_avg:80.75ms +[2025-07-07 20:10:08] [Rank 0] step:6761/10000 train_time:545939ms step_avg:80.75ms +[2025-07-07 20:10:10] [Rank 0] step:6781/10000 train_time:547433ms step_avg:80.73ms +[2025-07-07 20:10:10] [Rank 0] step:6781/10000 train_time:547433ms step_avg:80.73ms +[2025-07-07 20:10:11] [Rank 0] step:6801/10000 train_time:548928ms step_avg:80.71ms +[2025-07-07 20:10:11] [Rank 0] step:6801/10000 train_time:548928ms step_avg:80.71ms +[2025-07-07 20:10:12] [Rank 0] step:6821/10000 train_time:550424ms step_avg:80.70ms +[2025-07-07 20:10:12] [Rank 0] step:6821/10000 train_time:550424ms step_avg:80.70ms +[2025-07-07 20:10:15] [Rank 0] step:6841/10000 train_time:551972ms step_avg:80.69ms +[2025-07-07 20:10:15] [Rank 0] step:6841/10000 train_time:551972ms step_avg:80.69ms +[2025-07-07 20:10:16] [Rank 0] step:6861/10000 train_time:554080ms step_avg:80.76ms +[2025-07-07 20:10:16] [Rank 0] step:6861/10000 train_time:554080ms step_avg:80.76ms +[2025-07-07 20:10:18] [Rank 0] step:6881/10000 train_time:555577ms step_avg:80.74ms +[2025-07-07 20:10:18] [Rank 0] step:6881/10000 train_time:555577ms step_avg:80.74ms +[2025-07-07 20:10:19] [Rank 0] step:6901/10000 train_time:557074ms step_avg:80.72ms +[2025-07-07 20:10:19] [Rank 0] step:6901/10000 train_time:557074ms step_avg:80.72ms +[2025-07-07 20:10:21] [Rank 0] step:6921/10000 train_time:558572ms step_avg:80.71ms +[2025-07-07 20:10:21] [Rank 0] step:6921/10000 train_time:558572ms step_avg:80.71ms +[2025-07-07 20:10:23] [Rank 0] step:6941/10000 train_time:560724ms step_avg:80.78ms +[2025-07-07 20:10:23] [Rank 0] step:6941/10000 train_time:560724ms step_avg:80.78ms +[2025-07-07 20:10:24] [Rank 0] step:6961/10000 train_time:562219ms step_avg:80.77ms +[2025-07-07 20:10:24] [Rank 0] step:6961/10000 train_time:562219ms step_avg:80.77ms +[2025-07-07 20:10:26] [Rank 0] step:6981/10000 train_time:563717ms step_avg:80.75ms +[2025-07-07 20:10:26] [Rank 0] step:6981/10000 train_time:563717ms step_avg:80.75ms +[2025-07-07 20:10:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:10:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:10:28] [Rank 0] PRINT: step:7000/10000 train_loss:0.8595 val_loss:0.8599 train_time:565215ms step_avg:80.74ms +[2025-07-07 20:10:28] [Rank 0] PRINT: step:7000/10000 train_loss:0.8595 val_loss:0.8599 train_time:565215ms step_avg:80.74ms +[2025-07-07 20:10:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:10:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:10:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:10:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:10:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:10:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:15:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:15:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:15:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:15:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:15:58] [Rank 0] Total Loss: 5.4475 +[2025-07-07 20:15:58] [Rank 0] Total Loss: 5.4475 +[2025-07-07 20:15:58] [Rank 0] Total FTA: 0.9805 +[2025-07-07 20:15:58] [Rank 0] Total FTA: 0.9805 +[2025-07-07 20:15:58] [Rank 0] Group 0 Loss: 5.7516 +[2025-07-07 20:15:58] [Rank 0] Group 0 Loss: 5.7516 +[2025-07-07 20:15:59] [Rank 0] Group 1 Loss: 5.4431 +[2025-07-07 20:15:59] [Rank 0] Group 1 Loss: 5.4431 +[2025-07-07 20:15:59] [Rank 0] Group 2 Loss: 5.2590 +[2025-07-07 20:15:59] [Rank 0] Group 2 Loss: 5.2590 +[2025-07-07 20:15:59] [Rank 0] Group 3 Loss: 5.3851 +[2025-07-07 20:15:59] [Rank 0] Group 3 Loss: 5.3851 +[2025-07-07 20:15:59] [Rank 0] Group 4 Loss: 5.4620 +[2025-07-07 20:15:59] [Rank 0] Group 4 Loss: 5.4620 +[2025-07-07 20:15:59] [Rank 0] Group 5 Loss: 5.3766 +[2025-07-07 20:15:59] [Rank 0] Group 5 Loss: 5.3766 +[2025-07-07 20:15:59] [Rank 0] Group 6 Loss: 5.2999 +[2025-07-07 20:15:59] [Rank 0] Group 6 Loss: 5.2999 +[2025-07-07 20:15:59] [Rank 0] Group 7 Loss: 5.4257 +[2025-07-07 20:15:59] [Rank 0] Group 7 Loss: 5.4257 +[2025-07-07 20:15:59] [Rank 0] Group 8 Loss: 5.4234 +[2025-07-07 20:15:59] [Rank 0] Group 8 Loss: 5.4234 +[2025-07-07 20:15:59] [Rank 0] Group 9 Loss: 5.4175 +[2025-07-07 20:15:59] [Rank 0] Group 9 Loss: 5.4175 +[2025-07-07 20:15:59] [Rank 0] Group 10 Loss: 5.4150 +[2025-07-07 20:15:59] [Rank 0] Group 10 Loss: 5.4150 +[2025-07-07 20:15:59] [Rank 0] Group 11 Loss: 5.4323 +[2025-07-07 20:15:59] [Rank 0] Group 11 Loss: 5.4323 +[2025-07-07 20:15:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 20:15:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 20:15:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 20:15:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 20:15:59] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 20:15:59] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 20:15:59] [Rank 0] Group 3 FTA: 0.9740 +[2025-07-07 20:15:59] [Rank 0] Group 3 FTA: 0.9740 +[2025-07-07 20:15:59] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 20:15:59] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 20:15:59] [Rank 0] Group 5 FTA: 0.9661 +[2025-07-07 20:15:59] [Rank 0] Group 5 FTA: 0.9661 +[2025-07-07 20:15:59] [Rank 0] Group 6 FTA: 0.9688 +[2025-07-07 20:15:59] [Rank 0] Group 6 FTA: 0.9688 +[2025-07-07 20:15:59] [Rank 0] Group 7 FTA: 0.9635 +[2025-07-07 20:15:59] [Rank 0] Group 7 FTA: 0.9635 +[2025-07-07 20:15:59] [Rank 0] Group 8 FTA: 0.9609 +[2025-07-07 20:15:59] [Rank 0] Group 8 FTA: 0.9609 +[2025-07-07 20:15:59] [Rank 0] Group 9 FTA: 0.9727 +[2025-07-07 20:15:59] [Rank 0] Group 9 FTA: 0.9727 +[2025-07-07 20:15:59] [Rank 0] Group 10 FTA: 0.9668 +[2025-07-07 20:15:59] [Rank 0] Group 10 FTA: 0.9668 +[2025-07-07 20:15:59] [Rank 0] Group 11 FTA: 0.9785 +[2025-07-07 20:15:59] [Rank 0] Group 11 FTA: 0.9785 +[2025-07-07 20:15:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 20:15:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 20:16:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 20:16:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 20:16:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 20:16:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 20:16:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 20:16:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 20:16:00] [Rank 0] step:7001/10000 train_time:565236ms step_avg:80.74ms +[2025-07-07 20:16:00] [Rank 0] step:7001/10000 train_time:565236ms step_avg:80.74ms +[2025-07-07 20:16:02] [Rank 0] step:7021/10000 train_time:566792ms step_avg:80.73ms +[2025-07-07 20:16:02] [Rank 0] step:7021/10000 train_time:566792ms step_avg:80.73ms +[2025-07-07 20:16:04] [Rank 0] step:7041/10000 train_time:568900ms step_avg:80.80ms +[2025-07-07 20:16:04] [Rank 0] step:7041/10000 train_time:568900ms step_avg:80.80ms +[2025-07-07 20:16:05] [Rank 0] step:7061/10000 train_time:570389ms step_avg:80.78ms +[2025-07-07 20:16:05] [Rank 0] step:7061/10000 train_time:570389ms step_avg:80.78ms +[2025-07-07 20:16:07] [Rank 0] step:7081/10000 train_time:571879ms step_avg:80.76ms +[2025-07-07 20:16:07] [Rank 0] step:7081/10000 train_time:571879ms step_avg:80.76ms +[2025-07-07 20:16:08] [Rank 0] step:7101/10000 train_time:573372ms step_avg:80.75ms +[2025-07-07 20:16:08] [Rank 0] step:7101/10000 train_time:573372ms step_avg:80.75ms +[2025-07-07 20:16:10] [Rank 0] step:7121/10000 train_time:575100ms step_avg:80.76ms +[2025-07-07 20:16:10] [Rank 0] step:7121/10000 train_time:575100ms step_avg:80.76ms +[2025-07-07 20:16:12] [Rank 0] step:7141/10000 train_time:576797ms step_avg:80.77ms +[2025-07-07 20:16:12] [Rank 0] step:7141/10000 train_time:576797ms step_avg:80.77ms +[2025-07-07 20:16:13] [Rank 0] step:7161/10000 train_time:578291ms step_avg:80.76ms +[2025-07-07 20:16:13] [Rank 0] step:7161/10000 train_time:578291ms step_avg:80.76ms +[2025-07-07 20:16:15] [Rank 0] step:7181/10000 train_time:579786ms step_avg:80.74ms +[2025-07-07 20:16:15] [Rank 0] step:7181/10000 train_time:579786ms step_avg:80.74ms +[2025-07-07 20:16:17] [Rank 0] step:7201/10000 train_time:581280ms step_avg:80.72ms +[2025-07-07 20:16:17] [Rank 0] step:7201/10000 train_time:581280ms step_avg:80.72ms +[2025-07-07 20:16:18] [Rank 0] step:7221/10000 train_time:583429ms step_avg:80.80ms +[2025-07-07 20:16:18] [Rank 0] step:7221/10000 train_time:583429ms step_avg:80.80ms +[2025-07-07 20:16:20] [Rank 0] step:7241/10000 train_time:584924ms step_avg:80.78ms +[2025-07-07 20:16:20] [Rank 0] step:7241/10000 train_time:584924ms step_avg:80.78ms +[2025-07-07 20:16:21] [Rank 0] step:7261/10000 train_time:586420ms step_avg:80.76ms +[2025-07-07 20:16:21] [Rank 0] step:7261/10000 train_time:586420ms step_avg:80.76ms +[2025-07-07 20:16:23] [Rank 0] step:7281/10000 train_time:587915ms step_avg:80.75ms +[2025-07-07 20:16:23] [Rank 0] step:7281/10000 train_time:587915ms step_avg:80.75ms +[2025-07-07 20:16:25] [Rank 0] step:7301/10000 train_time:590070ms step_avg:80.82ms +[2025-07-07 20:16:25] [Rank 0] step:7301/10000 train_time:590070ms step_avg:80.82ms +[2025-07-07 20:16:27] [Rank 0] step:7321/10000 train_time:591573ms step_avg:80.80ms +[2025-07-07 20:16:27] [Rank 0] step:7321/10000 train_time:591573ms step_avg:80.80ms +[2025-07-07 20:16:28] [Rank 0] step:7341/10000 train_time:593066ms step_avg:80.79ms +[2025-07-07 20:16:28] [Rank 0] step:7341/10000 train_time:593066ms step_avg:80.79ms +[2025-07-07 20:16:30] [Rank 0] step:7361/10000 train_time:594563ms step_avg:80.77ms +[2025-07-07 20:16:30] [Rank 0] step:7361/10000 train_time:594563ms step_avg:80.77ms +[2025-07-07 20:16:32] [Rank 0] step:7381/10000 train_time:596062ms step_avg:80.76ms +[2025-07-07 20:16:32] [Rank 0] step:7381/10000 train_time:596062ms step_avg:80.76ms +[2025-07-07 20:16:33] [Rank 0] step:7401/10000 train_time:598214ms step_avg:80.83ms +[2025-07-07 20:16:33] [Rank 0] step:7401/10000 train_time:598214ms step_avg:80.83ms +[2025-07-07 20:16:35] [Rank 0] step:7421/10000 train_time:599711ms step_avg:80.81ms +[2025-07-07 20:16:35] [Rank 0] step:7421/10000 train_time:599711ms step_avg:80.81ms +[2025-07-07 20:16:36] [Rank 0] step:7441/10000 train_time:601208ms step_avg:80.80ms +[2025-07-07 20:16:36] [Rank 0] step:7441/10000 train_time:601208ms step_avg:80.80ms +[2025-07-07 20:16:38] [Rank 0] step:7461/10000 train_time:602706ms step_avg:80.78ms +[2025-07-07 20:16:38] [Rank 0] step:7461/10000 train_time:602706ms step_avg:80.78ms +[2025-07-07 20:16:40] [Rank 0] step:7481/10000 train_time:604855ms step_avg:80.85ms +[2025-07-07 20:16:40] [Rank 0] step:7481/10000 train_time:604855ms step_avg:80.85ms +[2025-07-07 20:16:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:16:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:16:42] [Rank 0] PRINT: step:7500/10000 train_loss:0.8588 val_loss:0.8593 train_time:606351ms step_avg:80.85ms +[2025-07-07 20:16:42] [Rank 0] PRINT: step:7500/10000 train_loss:0.8588 val_loss:0.8593 train_time:606351ms step_avg:80.85ms +[2025-07-07 20:16:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:16:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:16:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:16:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:16:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:16:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:22:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:22:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:22:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:22:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:22:15] [Rank 0] Total Loss: 5.4043 +[2025-07-07 20:22:15] [Rank 0] Total Loss: 5.4043 +[2025-07-07 20:22:15] [Rank 0] Total FTA: 0.9808 +[2025-07-07 20:22:15] [Rank 0] Total FTA: 0.9808 +[2025-07-07 20:22:15] [Rank 0] Group 0 Loss: 5.8118 +[2025-07-07 20:22:15] [Rank 0] Group 0 Loss: 5.8118 +[2025-07-07 20:22:15] [Rank 0] Group 1 Loss: 5.3461 +[2025-07-07 20:22:15] [Rank 0] Group 1 Loss: 5.3461 +[2025-07-07 20:22:15] [Rank 0] Group 2 Loss: 5.2520 +[2025-07-07 20:22:15] [Rank 0] Group 2 Loss: 5.2520 +[2025-07-07 20:22:15] [Rank 0] Group 3 Loss: 5.3376 +[2025-07-07 20:22:15] [Rank 0] Group 3 Loss: 5.3376 +[2025-07-07 20:22:15] [Rank 0] Group 4 Loss: 5.4033 +[2025-07-07 20:22:15] [Rank 0] Group 4 Loss: 5.4033 +[2025-07-07 20:22:15] [Rank 0] Group 5 Loss: 5.3303 +[2025-07-07 20:22:15] [Rank 0] Group 5 Loss: 5.3303 +[2025-07-07 20:22:15] [Rank 0] Group 6 Loss: 5.2884 +[2025-07-07 20:22:15] [Rank 0] Group 6 Loss: 5.2884 +[2025-07-07 20:22:15] [Rank 0] Group 7 Loss: 5.3939 +[2025-07-07 20:22:15] [Rank 0] Group 7 Loss: 5.3939 +[2025-07-07 20:22:15] [Rank 0] Group 8 Loss: 5.3708 +[2025-07-07 20:22:15] [Rank 0] Group 8 Loss: 5.3708 +[2025-07-07 20:22:15] [Rank 0] Group 9 Loss: 5.3007 +[2025-07-07 20:22:15] [Rank 0] Group 9 Loss: 5.3007 +[2025-07-07 20:22:15] [Rank 0] Group 10 Loss: 5.3471 +[2025-07-07 20:22:15] [Rank 0] Group 10 Loss: 5.3471 +[2025-07-07 20:22:15] [Rank 0] Group 11 Loss: 5.3447 +[2025-07-07 20:22:15] [Rank 0] Group 11 Loss: 5.3447 +[2025-07-07 20:22:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 20:22:15] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 20:22:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 20:22:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 20:22:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 20:22:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 20:22:15] [Rank 0] Group 3 FTA: 0.9583 +[2025-07-07 20:22:15] [Rank 0] Group 3 FTA: 0.9583 +[2025-07-07 20:22:15] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 20:22:15] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 20:22:15] [Rank 0] Group 5 FTA: 0.9922 +[2025-07-07 20:22:15] [Rank 0] Group 5 FTA: 0.9922 +[2025-07-07 20:22:15] [Rank 0] Group 6 FTA: 0.9766 +[2025-07-07 20:22:15] [Rank 0] Group 6 FTA: 0.9766 +[2025-07-07 20:22:15] [Rank 0] Group 7 FTA: 0.9714 +[2025-07-07 20:22:15] [Rank 0] Group 7 FTA: 0.9714 +[2025-07-07 20:22:15] [Rank 0] Group 8 FTA: 0.9714 +[2025-07-07 20:22:15] [Rank 0] Group 8 FTA: 0.9714 +[2025-07-07 20:22:15] [Rank 0] Group 9 FTA: 0.9727 +[2025-07-07 20:22:15] [Rank 0] Group 9 FTA: 0.9727 +[2025-07-07 20:22:15] [Rank 0] Group 10 FTA: 0.9707 +[2025-07-07 20:22:15] [Rank 0] Group 10 FTA: 0.9707 +[2025-07-07 20:22:15] [Rank 0] Group 11 FTA: 0.9648 +[2025-07-07 20:22:15] [Rank 0] Group 11 FTA: 0.9648 +[2025-07-07 20:22:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 20:22:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 20:22:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 20:22:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 20:22:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 20:22:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 20:22:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 20:22:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 20:22:16] [Rank 0] step:7501/10000 train_time:606372ms step_avg:80.84ms +[2025-07-07 20:22:16] [Rank 0] step:7501/10000 train_time:606372ms step_avg:80.84ms +[2025-07-07 20:22:18] [Rank 0] step:7521/10000 train_time:607873ms step_avg:80.82ms +[2025-07-07 20:22:18] [Rank 0] step:7521/10000 train_time:607873ms step_avg:80.82ms +[2025-07-07 20:22:19] [Rank 0] step:7541/10000 train_time:609364ms step_avg:80.81ms +[2025-07-07 20:22:19] [Rank 0] step:7541/10000 train_time:609364ms step_avg:80.81ms +[2025-07-07 20:22:22] [Rank 0] step:7561/10000 train_time:611114ms step_avg:80.82ms +[2025-07-07 20:22:22] [Rank 0] step:7561/10000 train_time:611114ms step_avg:80.82ms +[2025-07-07 20:22:23] [Rank 0] step:7581/10000 train_time:613014ms step_avg:80.86ms +[2025-07-07 20:22:23] [Rank 0] step:7581/10000 train_time:613014ms step_avg:80.86ms +[2025-07-07 20:22:25] [Rank 0] step:7601/10000 train_time:614507ms step_avg:80.85ms +[2025-07-07 20:22:25] [Rank 0] step:7601/10000 train_time:614507ms step_avg:80.85ms +[2025-07-07 20:22:26] [Rank 0] step:7621/10000 train_time:616000ms step_avg:80.83ms +[2025-07-07 20:22:26] [Rank 0] step:7621/10000 train_time:616000ms step_avg:80.83ms +[2025-07-07 20:22:28] [Rank 0] step:7641/10000 train_time:617492ms step_avg:80.81ms +[2025-07-07 20:22:28] [Rank 0] step:7641/10000 train_time:617492ms step_avg:80.81ms +[2025-07-07 20:22:30] [Rank 0] step:7661/10000 train_time:619627ms step_avg:80.88ms +[2025-07-07 20:22:30] [Rank 0] step:7661/10000 train_time:619627ms step_avg:80.88ms +[2025-07-07 20:22:31] [Rank 0] step:7681/10000 train_time:621365ms step_avg:80.90ms +[2025-07-07 20:22:31] [Rank 0] step:7681/10000 train_time:621365ms step_avg:80.90ms +[2025-07-07 20:22:33] [Rank 0] step:7701/10000 train_time:622860ms step_avg:80.88ms +[2025-07-07 20:22:33] [Rank 0] step:7701/10000 train_time:622860ms step_avg:80.88ms +[2025-07-07 20:22:34] [Rank 0] step:7721/10000 train_time:624355ms step_avg:80.86ms +[2025-07-07 20:22:34] [Rank 0] step:7721/10000 train_time:624355ms step_avg:80.86ms +[2025-07-07 20:22:37] [Rank 0] step:7741/10000 train_time:625905ms step_avg:80.86ms +[2025-07-07 20:22:37] [Rank 0] step:7741/10000 train_time:625905ms step_avg:80.86ms +[2025-07-07 20:22:38] [Rank 0] step:7761/10000 train_time:627991ms step_avg:80.92ms +[2025-07-07 20:22:38] [Rank 0] step:7761/10000 train_time:627991ms step_avg:80.92ms +[2025-07-07 20:22:40] [Rank 0] step:7781/10000 train_time:629488ms step_avg:80.90ms +[2025-07-07 20:22:40] [Rank 0] step:7781/10000 train_time:629488ms step_avg:80.90ms +[2025-07-07 20:22:41] [Rank 0] step:7801/10000 train_time:630986ms step_avg:80.89ms +[2025-07-07 20:22:41] [Rank 0] step:7801/10000 train_time:630986ms step_avg:80.89ms +[2025-07-07 20:22:43] [Rank 0] step:7821/10000 train_time:632483ms step_avg:80.87ms +[2025-07-07 20:22:43] [Rank 0] step:7821/10000 train_time:632483ms step_avg:80.87ms +[2025-07-07 20:22:45] [Rank 0] step:7841/10000 train_time:634635ms step_avg:80.94ms +[2025-07-07 20:22:45] [Rank 0] step:7841/10000 train_time:634635ms step_avg:80.94ms +[2025-07-07 20:22:46] [Rank 0] step:7861/10000 train_time:636130ms step_avg:80.92ms +[2025-07-07 20:22:46] [Rank 0] step:7861/10000 train_time:636130ms step_avg:80.92ms +[2025-07-07 20:22:48] [Rank 0] step:7881/10000 train_time:637626ms step_avg:80.91ms +[2025-07-07 20:22:48] [Rank 0] step:7881/10000 train_time:637626ms step_avg:80.91ms +[2025-07-07 20:22:49] [Rank 0] step:7901/10000 train_time:639122ms step_avg:80.89ms +[2025-07-07 20:22:49] [Rank 0] step:7901/10000 train_time:639122ms step_avg:80.89ms +[2025-07-07 20:22:51] [Rank 0] step:7921/10000 train_time:640874ms step_avg:80.91ms +[2025-07-07 20:22:51] [Rank 0] step:7921/10000 train_time:640874ms step_avg:80.91ms +[2025-07-07 20:22:53] [Rank 0] step:7941/10000 train_time:642783ms step_avg:80.94ms +[2025-07-07 20:22:53] [Rank 0] step:7941/10000 train_time:642783ms step_avg:80.94ms +[2025-07-07 20:22:54] [Rank 0] step:7961/10000 train_time:644282ms step_avg:80.93ms +[2025-07-07 20:22:54] [Rank 0] step:7961/10000 train_time:644282ms step_avg:80.93ms +[2025-07-07 20:22:56] [Rank 0] step:7981/10000 train_time:645779ms step_avg:80.91ms +[2025-07-07 20:22:56] [Rank 0] step:7981/10000 train_time:645779ms step_avg:80.91ms +[2025-07-07 20:22:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:22:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:22:58] [Rank 0] PRINT: step:8000/10000 train_loss:0.8579 val_loss:0.8590 train_time:647276ms step_avg:80.91ms +[2025-07-07 20:22:58] [Rank 0] PRINT: step:8000/10000 train_loss:0.8579 val_loss:0.8590 train_time:647276ms step_avg:80.91ms +[2025-07-07 20:22:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:22:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:22:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:22:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:22:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:22:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:28:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:28:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:28:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:28:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:28:27] [Rank 0] Total Loss: 5.3682 +[2025-07-07 20:28:27] [Rank 0] Total Loss: 5.3682 +[2025-07-07 20:28:27] [Rank 0] Total FTA: 0.9901 +[2025-07-07 20:28:27] [Rank 0] Total FTA: 0.9901 +[2025-07-07 20:28:27] [Rank 0] Group 0 Loss: 5.7274 +[2025-07-07 20:28:27] [Rank 0] Group 0 Loss: 5.7274 +[2025-07-07 20:28:27] [Rank 0] Group 1 Loss: 5.3969 +[2025-07-07 20:28:27] [Rank 0] Group 1 Loss: 5.3969 +[2025-07-07 20:28:27] [Rank 0] Group 2 Loss: 5.2650 +[2025-07-07 20:28:27] [Rank 0] Group 2 Loss: 5.2650 +[2025-07-07 20:28:27] [Rank 0] Group 3 Loss: 5.3204 +[2025-07-07 20:28:27] [Rank 0] Group 3 Loss: 5.3204 +[2025-07-07 20:28:27] [Rank 0] Group 4 Loss: 5.3528 +[2025-07-07 20:28:27] [Rank 0] Group 4 Loss: 5.3528 +[2025-07-07 20:28:27] [Rank 0] Group 5 Loss: 5.2821 +[2025-07-07 20:28:27] [Rank 0] Group 5 Loss: 5.2821 +[2025-07-07 20:28:27] [Rank 0] Group 6 Loss: 5.2557 +[2025-07-07 20:28:27] [Rank 0] Group 6 Loss: 5.2557 +[2025-07-07 20:28:27] [Rank 0] Group 7 Loss: 5.3712 +[2025-07-07 20:28:27] [Rank 0] Group 7 Loss: 5.3712 +[2025-07-07 20:28:27] [Rank 0] Group 8 Loss: 5.3045 +[2025-07-07 20:28:27] [Rank 0] Group 8 Loss: 5.3045 +[2025-07-07 20:28:27] [Rank 0] Group 9 Loss: 5.2955 +[2025-07-07 20:28:27] [Rank 0] Group 9 Loss: 5.2955 +[2025-07-07 20:28:27] [Rank 0] Group 10 Loss: 5.2922 +[2025-07-07 20:28:27] [Rank 0] Group 10 Loss: 5.2922 +[2025-07-07 20:28:27] [Rank 0] Group 11 Loss: 5.3034 +[2025-07-07 20:28:27] [Rank 0] Group 11 Loss: 5.3034 +[2025-07-07 20:28:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 20:28:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 20:28:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 20:28:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 20:28:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 20:28:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 20:28:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 20:28:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 20:28:27] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 20:28:27] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 20:28:27] [Rank 0] Group 5 FTA: 0.9870 +[2025-07-07 20:28:27] [Rank 0] Group 5 FTA: 0.9870 +[2025-07-07 20:28:27] [Rank 0] Group 6 FTA: 0.9844 +[2025-07-07 20:28:27] [Rank 0] Group 6 FTA: 0.9844 +[2025-07-07 20:28:27] [Rank 0] Group 7 FTA: 0.9844 +[2025-07-07 20:28:27] [Rank 0] Group 7 FTA: 0.9844 +[2025-07-07 20:28:27] [Rank 0] Group 8 FTA: 0.9818 +[2025-07-07 20:28:27] [Rank 0] Group 8 FTA: 0.9818 +[2025-07-07 20:28:27] [Rank 0] Group 9 FTA: 0.9844 +[2025-07-07 20:28:27] [Rank 0] Group 9 FTA: 0.9844 +[2025-07-07 20:28:27] [Rank 0] Group 10 FTA: 0.9668 +[2025-07-07 20:28:27] [Rank 0] Group 10 FTA: 0.9668 +[2025-07-07 20:28:27] [Rank 0] Group 11 FTA: 0.9893 +[2025-07-07 20:28:27] [Rank 0] Group 11 FTA: 0.9893 +[2025-07-07 20:28:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 20:28:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 20:28:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 20:28:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 20:28:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 20:28:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 20:28:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 20:28:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 20:28:29] [Rank 0] step:8001/10000 train_time:647298ms step_avg:80.90ms +[2025-07-07 20:28:29] [Rank 0] step:8001/10000 train_time:647298ms step_avg:80.90ms +[2025-07-07 20:28:31] [Rank 0] step:8021/10000 train_time:649441ms step_avg:80.97ms +[2025-07-07 20:28:31] [Rank 0] step:8021/10000 train_time:649441ms step_avg:80.97ms +[2025-07-07 20:28:32] [Rank 0] step:8041/10000 train_time:650933ms step_avg:80.95ms +[2025-07-07 20:28:32] [Rank 0] step:8041/10000 train_time:650933ms step_avg:80.95ms +[2025-07-07 20:28:34] [Rank 0] step:8061/10000 train_time:652422ms step_avg:80.94ms +[2025-07-07 20:28:34] [Rank 0] step:8061/10000 train_time:652422ms step_avg:80.94ms +[2025-07-07 20:28:35] [Rank 0] step:8081/10000 train_time:653915ms step_avg:80.92ms +[2025-07-07 20:28:35] [Rank 0] step:8081/10000 train_time:653915ms step_avg:80.92ms +[2025-07-07 20:28:37] [Rank 0] step:8101/10000 train_time:655459ms step_avg:80.91ms +[2025-07-07 20:28:37] [Rank 0] step:8101/10000 train_time:655459ms step_avg:80.91ms +[2025-07-07 20:28:39] [Rank 0] step:8121/10000 train_time:657552ms step_avg:80.97ms +[2025-07-07 20:28:39] [Rank 0] step:8121/10000 train_time:657552ms step_avg:80.97ms +[2025-07-07 20:28:40] [Rank 0] step:8141/10000 train_time:659046ms step_avg:80.95ms +[2025-07-07 20:28:40] [Rank 0] step:8141/10000 train_time:659046ms step_avg:80.95ms +[2025-07-07 20:28:42] [Rank 0] step:8161/10000 train_time:660541ms step_avg:80.94ms +[2025-07-07 20:28:42] [Rank 0] step:8161/10000 train_time:660541ms step_avg:80.94ms +[2025-07-07 20:28:43] [Rank 0] step:8181/10000 train_time:662035ms step_avg:80.92ms +[2025-07-07 20:28:43] [Rank 0] step:8181/10000 train_time:662035ms step_avg:80.92ms +[2025-07-07 20:28:45] [Rank 0] step:8201/10000 train_time:663766ms step_avg:80.94ms +[2025-07-07 20:28:45] [Rank 0] step:8201/10000 train_time:663766ms step_avg:80.94ms +[2025-07-07 20:28:47] [Rank 0] step:8221/10000 train_time:665261ms step_avg:80.92ms +[2025-07-07 20:28:47] [Rank 0] step:8221/10000 train_time:665261ms step_avg:80.92ms +[2025-07-07 20:28:48] [Rank 0] step:8241/10000 train_time:666755ms step_avg:80.91ms +[2025-07-07 20:28:48] [Rank 0] step:8241/10000 train_time:666755ms step_avg:80.91ms +[2025-07-07 20:28:50] [Rank 0] step:8261/10000 train_time:668386ms step_avg:80.91ms +[2025-07-07 20:28:50] [Rank 0] step:8261/10000 train_time:668386ms step_avg:80.91ms +[2025-07-07 20:28:52] [Rank 0] step:8281/10000 train_time:670015ms step_avg:80.91ms +[2025-07-07 20:28:52] [Rank 0] step:8281/10000 train_time:670015ms step_avg:80.91ms +[2025-07-07 20:28:53] [Rank 0] step:8301/10000 train_time:672114ms step_avg:80.97ms +[2025-07-07 20:28:53] [Rank 0] step:8301/10000 train_time:672114ms step_avg:80.97ms +[2025-07-07 20:28:55] [Rank 0] step:8321/10000 train_time:673609ms step_avg:80.95ms +[2025-07-07 20:28:55] [Rank 0] step:8321/10000 train_time:673609ms step_avg:80.95ms +[2025-07-07 20:28:56] [Rank 0] step:8341/10000 train_time:675107ms step_avg:80.94ms +[2025-07-07 20:28:56] [Rank 0] step:8341/10000 train_time:675107ms step_avg:80.94ms +[2025-07-07 20:28:58] [Rank 0] step:8361/10000 train_time:676603ms step_avg:80.92ms +[2025-07-07 20:28:58] [Rank 0] step:8361/10000 train_time:676603ms step_avg:80.92ms +[2025-07-07 20:29:00] [Rank 0] step:8381/10000 train_time:678756ms step_avg:80.99ms +[2025-07-07 20:29:00] [Rank 0] step:8381/10000 train_time:678756ms step_avg:80.99ms +[2025-07-07 20:29:02] [Rank 0] step:8401/10000 train_time:680251ms step_avg:80.97ms +[2025-07-07 20:29:02] [Rank 0] step:8401/10000 train_time:680251ms step_avg:80.97ms +[2025-07-07 20:29:03] [Rank 0] step:8421/10000 train_time:681749ms step_avg:80.96ms +[2025-07-07 20:29:03] [Rank 0] step:8421/10000 train_time:681749ms step_avg:80.96ms +[2025-07-07 20:29:05] [Rank 0] step:8441/10000 train_time:683246ms step_avg:80.94ms +[2025-07-07 20:29:05] [Rank 0] step:8441/10000 train_time:683246ms step_avg:80.94ms +[2025-07-07 20:29:06] [Rank 0] step:8461/10000 train_time:684795ms step_avg:80.94ms +[2025-07-07 20:29:06] [Rank 0] step:8461/10000 train_time:684795ms step_avg:80.94ms +[2025-07-07 20:29:08] [Rank 0] step:8481/10000 train_time:686480ms step_avg:80.94ms +[2025-07-07 20:29:08] [Rank 0] step:8481/10000 train_time:686480ms step_avg:80.94ms +[2025-07-07 20:29:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:29:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:29:10] [Rank 0] PRINT: step:8500/10000 train_loss:0.8571 val_loss:0.8587 train_time:687977ms step_avg:80.94ms +[2025-07-07 20:29:10] [Rank 0] PRINT: step:8500/10000 train_loss:0.8571 val_loss:0.8587 train_time:687977ms step_avg:80.94ms +[2025-07-07 20:29:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:29:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:29:10] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:29:10] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:29:10] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:29:10] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:34:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:34:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:34:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:34:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:34:42] [Rank 0] Total Loss: 5.4082 +[2025-07-07 20:34:42] [Rank 0] Total Loss: 5.4082 +[2025-07-07 20:34:42] [Rank 0] Total FTA: 0.9908 +[2025-07-07 20:34:42] [Rank 0] Total FTA: 0.9908 +[2025-07-07 20:34:42] [Rank 0] Group 0 Loss: 5.7351 +[2025-07-07 20:34:42] [Rank 0] Group 0 Loss: 5.7351 +[2025-07-07 20:34:42] [Rank 0] Group 1 Loss: 5.4034 +[2025-07-07 20:34:42] [Rank 0] Group 1 Loss: 5.4034 +[2025-07-07 20:34:42] [Rank 0] Group 2 Loss: 5.3435 +[2025-07-07 20:34:42] [Rank 0] Group 2 Loss: 5.3435 +[2025-07-07 20:34:42] [Rank 0] Group 3 Loss: 5.3036 +[2025-07-07 20:34:42] [Rank 0] Group 3 Loss: 5.3036 +[2025-07-07 20:34:42] [Rank 0] Group 4 Loss: 5.3881 +[2025-07-07 20:34:42] [Rank 0] Group 4 Loss: 5.3881 +[2025-07-07 20:34:42] [Rank 0] Group 5 Loss: 5.2981 +[2025-07-07 20:34:42] [Rank 0] Group 5 Loss: 5.2981 +[2025-07-07 20:34:42] [Rank 0] Group 6 Loss: 5.2673 +[2025-07-07 20:34:42] [Rank 0] Group 6 Loss: 5.2673 +[2025-07-07 20:34:42] [Rank 0] Group 7 Loss: 5.3727 +[2025-07-07 20:34:42] [Rank 0] Group 7 Loss: 5.3727 +[2025-07-07 20:34:42] [Rank 0] Group 8 Loss: 5.3147 +[2025-07-07 20:34:42] [Rank 0] Group 8 Loss: 5.3147 +[2025-07-07 20:34:42] [Rank 0] Group 9 Loss: 5.3732 +[2025-07-07 20:34:42] [Rank 0] Group 9 Loss: 5.3732 +[2025-07-07 20:34:42] [Rank 0] Group 10 Loss: 5.3627 +[2025-07-07 20:34:42] [Rank 0] Group 10 Loss: 5.3627 +[2025-07-07 20:34:42] [Rank 0] Group 11 Loss: 5.4097 +[2025-07-07 20:34:42] [Rank 0] Group 11 Loss: 5.4097 +[2025-07-07 20:34:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 20:34:42] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 20:34:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 20:34:42] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 20:34:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 20:34:42] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 20:34:42] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 20:34:42] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 20:34:42] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 20:34:42] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 20:34:42] [Rank 0] Group 5 FTA: 1.0000 +[2025-07-07 20:34:42] [Rank 0] Group 5 FTA: 1.0000 +[2025-07-07 20:34:42] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-07 20:34:42] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-07 20:34:42] [Rank 0] Group 7 FTA: 0.9792 +[2025-07-07 20:34:42] [Rank 0] Group 7 FTA: 0.9792 +[2025-07-07 20:34:42] [Rank 0] Group 8 FTA: 0.9740 +[2025-07-07 20:34:42] [Rank 0] Group 8 FTA: 0.9740 +[2025-07-07 20:34:42] [Rank 0] Group 9 FTA: 0.9961 +[2025-07-07 20:34:42] [Rank 0] Group 9 FTA: 0.9961 +[2025-07-07 20:34:42] [Rank 0] Group 10 FTA: 0.9805 +[2025-07-07 20:34:42] [Rank 0] Group 10 FTA: 0.9805 +[2025-07-07 20:34:42] [Rank 0] Group 11 FTA: 0.9854 +[2025-07-07 20:34:42] [Rank 0] Group 11 FTA: 0.9854 +[2025-07-07 20:34:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 20:34:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 20:34:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 20:34:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 20:34:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 20:34:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 20:34:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 20:34:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 20:34:43] [Rank 0] step:8501/10000 train_time:687999ms step_avg:80.93ms +[2025-07-07 20:34:43] [Rank 0] step:8501/10000 train_time:687999ms step_avg:80.93ms +[2025-07-07 20:34:45] [Rank 0] step:8521/10000 train_time:689521ms step_avg:80.92ms +[2025-07-07 20:34:45] [Rank 0] step:8521/10000 train_time:689521ms step_avg:80.92ms +[2025-07-07 20:34:46] [Rank 0] step:8541/10000 train_time:691009ms step_avg:80.90ms +[2025-07-07 20:34:46] [Rank 0] step:8541/10000 train_time:691009ms step_avg:80.90ms +[2025-07-07 20:34:48] [Rank 0] step:8561/10000 train_time:693157ms step_avg:80.97ms +[2025-07-07 20:34:48] [Rank 0] step:8561/10000 train_time:693157ms step_avg:80.97ms +[2025-07-07 20:34:50] [Rank 0] step:8581/10000 train_time:694648ms step_avg:80.95ms +[2025-07-07 20:34:50] [Rank 0] step:8581/10000 train_time:694648ms step_avg:80.95ms +[2025-07-07 20:34:51] [Rank 0] step:8601/10000 train_time:696140ms step_avg:80.94ms +[2025-07-07 20:34:51] [Rank 0] step:8601/10000 train_time:696140ms step_avg:80.94ms +[2025-07-07 20:34:53] [Rank 0] step:8621/10000 train_time:697636ms step_avg:80.92ms +[2025-07-07 20:34:53] [Rank 0] step:8621/10000 train_time:697636ms step_avg:80.92ms +[2025-07-07 20:34:55] [Rank 0] step:8641/10000 train_time:699129ms step_avg:80.91ms +[2025-07-07 20:34:55] [Rank 0] step:8641/10000 train_time:699129ms step_avg:80.91ms +[2025-07-07 20:34:56] [Rank 0] step:8661/10000 train_time:701268ms step_avg:80.97ms +[2025-07-07 20:34:56] [Rank 0] step:8661/10000 train_time:701268ms step_avg:80.97ms +[2025-07-07 20:34:58] [Rank 0] step:8681/10000 train_time:702761ms step_avg:80.95ms +[2025-07-07 20:34:58] [Rank 0] step:8681/10000 train_time:702761ms step_avg:80.95ms +[2025-07-07 20:34:59] [Rank 0] step:8701/10000 train_time:704259ms step_avg:80.94ms +[2025-07-07 20:34:59] [Rank 0] step:8701/10000 train_time:704259ms step_avg:80.94ms +[2025-07-07 20:35:01] [Rank 0] step:8721/10000 train_time:705787ms step_avg:80.93ms +[2025-07-07 20:35:01] [Rank 0] step:8721/10000 train_time:705787ms step_avg:80.93ms +[2025-07-07 20:35:03] [Rank 0] step:8741/10000 train_time:707911ms step_avg:80.99ms +[2025-07-07 20:35:03] [Rank 0] step:8741/10000 train_time:707911ms step_avg:80.99ms +[2025-07-07 20:35:05] [Rank 0] step:8761/10000 train_time:709406ms step_avg:80.97ms +[2025-07-07 20:35:05] [Rank 0] step:8761/10000 train_time:709406ms step_avg:80.97ms +[2025-07-07 20:35:06] [Rank 0] step:8781/10000 train_time:710902ms step_avg:80.96ms +[2025-07-07 20:35:06] [Rank 0] step:8781/10000 train_time:710902ms step_avg:80.96ms +[2025-07-07 20:35:08] [Rank 0] step:8801/10000 train_time:712401ms step_avg:80.95ms +[2025-07-07 20:35:08] [Rank 0] step:8801/10000 train_time:712401ms step_avg:80.95ms +[2025-07-07 20:35:09] [Rank 0] step:8821/10000 train_time:714011ms step_avg:80.94ms +[2025-07-07 20:35:09] [Rank 0] step:8821/10000 train_time:714011ms step_avg:80.94ms +[2025-07-07 20:35:11] [Rank 0] step:8841/10000 train_time:715596ms step_avg:80.94ms +[2025-07-07 20:35:11] [Rank 0] step:8841/10000 train_time:715596ms step_avg:80.94ms +[2025-07-07 20:35:12] [Rank 0] step:8861/10000 train_time:717092ms step_avg:80.93ms +[2025-07-07 20:35:12] [Rank 0] step:8861/10000 train_time:717092ms step_avg:80.93ms +[2025-07-07 20:35:14] [Rank 0] step:8881/10000 train_time:718589ms step_avg:80.91ms +[2025-07-07 20:35:14] [Rank 0] step:8881/10000 train_time:718589ms step_avg:80.91ms +[2025-07-07 20:35:15] [Rank 0] step:8901/10000 train_time:720087ms step_avg:80.90ms +[2025-07-07 20:35:15] [Rank 0] step:8901/10000 train_time:720087ms step_avg:80.90ms +[2025-07-07 20:35:17] [Rank 0] step:8921/10000 train_time:721821ms step_avg:80.91ms +[2025-07-07 20:35:17] [Rank 0] step:8921/10000 train_time:721821ms step_avg:80.91ms +[2025-07-07 20:35:18] [Rank 0] step:8941/10000 train_time:723317ms step_avg:80.90ms +[2025-07-07 20:35:18] [Rank 0] step:8941/10000 train_time:723317ms step_avg:80.90ms +[2025-07-07 20:35:20] [Rank 0] step:8961/10000 train_time:724813ms step_avg:80.89ms +[2025-07-07 20:35:20] [Rank 0] step:8961/10000 train_time:724813ms step_avg:80.89ms +[2025-07-07 20:35:21] [Rank 0] step:8981/10000 train_time:726315ms step_avg:80.87ms +[2025-07-07 20:35:21] [Rank 0] step:8981/10000 train_time:726315ms step_avg:80.87ms +[2025-07-07 20:35:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:35:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:35:24] [Rank 0] PRINT: step:9000/10000 train_loss:0.8564 val_loss:0.8585 train_time:727810ms step_avg:80.87ms +[2025-07-07 20:35:24] [Rank 0] PRINT: step:9000/10000 train_loss:0.8564 val_loss:0.8585 train_time:727810ms step_avg:80.87ms +[2025-07-07 20:35:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:35:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:35:24] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:35:24] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:35:24] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:35:24] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:40:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:40:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:40:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:40:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:40:55] [Rank 0] Total Loss: 5.4421 +[2025-07-07 20:40:55] [Rank 0] Total Loss: 5.4421 +[2025-07-07 20:40:55] [Rank 0] Total FTA: 0.9874 +[2025-07-07 20:40:55] [Rank 0] Total FTA: 0.9874 +[2025-07-07 20:40:55] [Rank 0] Group 0 Loss: 5.8470 +[2025-07-07 20:40:55] [Rank 0] Group 0 Loss: 5.8470 +[2025-07-07 20:40:55] [Rank 0] Group 1 Loss: 5.3825 +[2025-07-07 20:40:55] [Rank 0] Group 1 Loss: 5.3825 +[2025-07-07 20:40:55] [Rank 0] Group 2 Loss: 5.3705 +[2025-07-07 20:40:55] [Rank 0] Group 2 Loss: 5.3705 +[2025-07-07 20:40:55] [Rank 0] Group 3 Loss: 5.2777 +[2025-07-07 20:40:55] [Rank 0] Group 3 Loss: 5.2777 +[2025-07-07 20:40:55] [Rank 0] Group 4 Loss: 5.4260 +[2025-07-07 20:40:55] [Rank 0] Group 4 Loss: 5.4260 +[2025-07-07 20:40:55] [Rank 0] Group 5 Loss: 5.3692 +[2025-07-07 20:40:55] [Rank 0] Group 5 Loss: 5.3692 +[2025-07-07 20:40:55] [Rank 0] Group 6 Loss: 5.3135 +[2025-07-07 20:40:55] [Rank 0] Group 6 Loss: 5.3135 +[2025-07-07 20:40:55] [Rank 0] Group 7 Loss: 5.4212 +[2025-07-07 20:40:55] [Rank 0] Group 7 Loss: 5.4212 +[2025-07-07 20:40:55] [Rank 0] Group 8 Loss: 5.4379 +[2025-07-07 20:40:55] [Rank 0] Group 8 Loss: 5.4379 +[2025-07-07 20:40:55] [Rank 0] Group 9 Loss: 5.4312 +[2025-07-07 20:40:55] [Rank 0] Group 9 Loss: 5.4312 +[2025-07-07 20:40:55] [Rank 0] Group 10 Loss: 5.3773 +[2025-07-07 20:40:55] [Rank 0] Group 10 Loss: 5.3773 +[2025-07-07 20:40:55] [Rank 0] Group 11 Loss: 5.3752 +[2025-07-07 20:40:55] [Rank 0] Group 11 Loss: 5.3752 +[2025-07-07 20:40:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 20:40:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 20:40:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 20:40:55] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 20:40:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 20:40:55] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 20:40:55] [Rank 0] Group 3 FTA: 0.9583 +[2025-07-07 20:40:55] [Rank 0] Group 3 FTA: 0.9583 +[2025-07-07 20:40:55] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 20:40:55] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 20:40:55] [Rank 0] Group 5 FTA: 0.9896 +[2025-07-07 20:40:55] [Rank 0] Group 5 FTA: 0.9896 +[2025-07-07 20:40:55] [Rank 0] Group 6 FTA: 0.9818 +[2025-07-07 20:40:55] [Rank 0] Group 6 FTA: 0.9818 +[2025-07-07 20:40:55] [Rank 0] Group 7 FTA: 0.9870 +[2025-07-07 20:40:55] [Rank 0] Group 7 FTA: 0.9870 +[2025-07-07 20:40:55] [Rank 0] Group 8 FTA: 0.9661 +[2025-07-07 20:40:55] [Rank 0] Group 8 FTA: 0.9661 +[2025-07-07 20:40:55] [Rank 0] Group 9 FTA: 0.9844 +[2025-07-07 20:40:55] [Rank 0] Group 9 FTA: 0.9844 +[2025-07-07 20:40:55] [Rank 0] Group 10 FTA: 0.9922 +[2025-07-07 20:40:55] [Rank 0] Group 10 FTA: 0.9922 +[2025-07-07 20:40:55] [Rank 0] Group 11 FTA: 0.9824 +[2025-07-07 20:40:55] [Rank 0] Group 11 FTA: 0.9824 +[2025-07-07 20:40:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 20:40:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 20:40:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 20:40:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 20:40:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 20:40:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 20:40:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 20:40:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 20:40:57] [Rank 0] step:9001/10000 train_time:727838ms step_avg:80.86ms +[2025-07-07 20:40:57] [Rank 0] step:9001/10000 train_time:727838ms step_avg:80.86ms +[2025-07-07 20:40:59] [Rank 0] step:9021/10000 train_time:730023ms step_avg:80.92ms +[2025-07-07 20:40:59] [Rank 0] step:9021/10000 train_time:730023ms step_avg:80.92ms +[2025-07-07 20:41:00] [Rank 0] step:9041/10000 train_time:731516ms step_avg:80.91ms +[2025-07-07 20:41:00] [Rank 0] step:9041/10000 train_time:731516ms step_avg:80.91ms +[2025-07-07 20:41:02] [Rank 0] step:9061/10000 train_time:733009ms step_avg:80.90ms +[2025-07-07 20:41:02] [Rank 0] step:9061/10000 train_time:733009ms step_avg:80.90ms +[2025-07-07 20:41:03] [Rank 0] step:9081/10000 train_time:734498ms step_avg:80.88ms +[2025-07-07 20:41:03] [Rank 0] step:9081/10000 train_time:734498ms step_avg:80.88ms +[2025-07-07 20:41:06] [Rank 0] step:9101/10000 train_time:736633ms step_avg:80.94ms +[2025-07-07 20:41:06] [Rank 0] step:9101/10000 train_time:736633ms step_avg:80.94ms +[2025-07-07 20:41:07] [Rank 0] step:9121/10000 train_time:738126ms step_avg:80.93ms +[2025-07-07 20:41:07] [Rank 0] step:9121/10000 train_time:738126ms step_avg:80.93ms +[2025-07-07 20:41:09] [Rank 0] step:9141/10000 train_time:739620ms step_avg:80.91ms +[2025-07-07 20:41:09] [Rank 0] step:9141/10000 train_time:739620ms step_avg:80.91ms +[2025-07-07 20:41:10] [Rank 0] step:9161/10000 train_time:741114ms step_avg:80.90ms +[2025-07-07 20:41:10] [Rank 0] step:9161/10000 train_time:741114ms step_avg:80.90ms +[2025-07-07 20:41:12] [Rank 0] step:9181/10000 train_time:742608ms step_avg:80.89ms +[2025-07-07 20:41:12] [Rank 0] step:9181/10000 train_time:742608ms step_avg:80.89ms +[2025-07-07 20:41:14] [Rank 0] step:9201/10000 train_time:744743ms step_avg:80.94ms +[2025-07-07 20:41:14] [Rank 0] step:9201/10000 train_time:744743ms step_avg:80.94ms +[2025-07-07 20:41:15] [Rank 0] step:9221/10000 train_time:746237ms step_avg:80.93ms +[2025-07-07 20:41:15] [Rank 0] step:9221/10000 train_time:746237ms step_avg:80.93ms +[2025-07-07 20:41:17] [Rank 0] step:9241/10000 train_time:747733ms step_avg:80.91ms +[2025-07-07 20:41:17] [Rank 0] step:9241/10000 train_time:747733ms step_avg:80.91ms +[2025-07-07 20:41:18] [Rank 0] step:9261/10000 train_time:749229ms step_avg:80.90ms +[2025-07-07 20:41:18] [Rank 0] step:9261/10000 train_time:749229ms step_avg:80.90ms +[2025-07-07 20:41:20] [Rank 0] step:9281/10000 train_time:751386ms step_avg:80.96ms +[2025-07-07 20:41:20] [Rank 0] step:9281/10000 train_time:751386ms step_avg:80.96ms +[2025-07-07 20:41:22] [Rank 0] step:9301/10000 train_time:752880ms step_avg:80.95ms +[2025-07-07 20:41:22] [Rank 0] step:9301/10000 train_time:752880ms step_avg:80.95ms +[2025-07-07 20:41:23] [Rank 0] step:9321/10000 train_time:754377ms step_avg:80.93ms +[2025-07-07 20:41:23] [Rank 0] step:9321/10000 train_time:754377ms step_avg:80.93ms +[2025-07-07 20:41:25] [Rank 0] step:9341/10000 train_time:755873ms step_avg:80.92ms +[2025-07-07 20:41:25] [Rank 0] step:9341/10000 train_time:755873ms step_avg:80.92ms +[2025-07-07 20:41:26] [Rank 0] step:9361/10000 train_time:757421ms step_avg:80.91ms +[2025-07-07 20:41:26] [Rank 0] step:9361/10000 train_time:757421ms step_avg:80.91ms +[2025-07-07 20:41:28] [Rank 0] step:9381/10000 train_time:759106ms step_avg:80.92ms +[2025-07-07 20:41:28] [Rank 0] step:9381/10000 train_time:759106ms step_avg:80.92ms +[2025-07-07 20:41:30] [Rank 0] step:9401/10000 train_time:760733ms step_avg:80.92ms +[2025-07-07 20:41:30] [Rank 0] step:9401/10000 train_time:760733ms step_avg:80.92ms +[2025-07-07 20:41:31] [Rank 0] step:9421/10000 train_time:762230ms step_avg:80.91ms +[2025-07-07 20:41:31] [Rank 0] step:9421/10000 train_time:762230ms step_avg:80.91ms +[2025-07-07 20:41:33] [Rank 0] step:9441/10000 train_time:763728ms step_avg:80.89ms +[2025-07-07 20:41:33] [Rank 0] step:9441/10000 train_time:763728ms step_avg:80.89ms +[2025-07-07 20:41:35] [Rank 0] step:9461/10000 train_time:765882ms step_avg:80.95ms +[2025-07-07 20:41:35] [Rank 0] step:9461/10000 train_time:765882ms step_avg:80.95ms +[2025-07-07 20:41:36] [Rank 0] step:9481/10000 train_time:767380ms step_avg:80.94ms +[2025-07-07 20:41:36] [Rank 0] step:9481/10000 train_time:767380ms step_avg:80.94ms +[2025-07-07 20:41:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:41:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:41:39] [Rank 0] PRINT: step:9500/10000 train_loss:0.8556 val_loss:0.8583 train_time:768876ms step_avg:80.93ms +[2025-07-07 20:41:39] [Rank 0] PRINT: step:9500/10000 train_loss:0.8556 val_loss:0.8583 train_time:768876ms step_avg:80.93ms +[2025-07-07 20:41:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:41:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:41:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:41:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:41:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:41:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:47:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:47:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:47:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:47:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:47:11] [Rank 0] Total Loss: 5.4958 +[2025-07-07 20:47:11] [Rank 0] Total Loss: 5.4958 +[2025-07-07 20:47:11] [Rank 0] Total FTA: 0.9911 +[2025-07-07 20:47:11] [Rank 0] Total FTA: 0.9911 +[2025-07-07 20:47:11] [Rank 0] Group 0 Loss: 5.9832 +[2025-07-07 20:47:11] [Rank 0] Group 0 Loss: 5.9832 +[2025-07-07 20:47:11] [Rank 0] Group 1 Loss: 5.4390 +[2025-07-07 20:47:11] [Rank 0] Group 1 Loss: 5.4390 +[2025-07-07 20:47:11] [Rank 0] Group 2 Loss: 5.3210 +[2025-07-07 20:47:11] [Rank 0] Group 2 Loss: 5.3210 +[2025-07-07 20:47:11] [Rank 0] Group 3 Loss: 5.3160 +[2025-07-07 20:47:11] [Rank 0] Group 3 Loss: 5.3160 +[2025-07-07 20:47:11] [Rank 0] Group 4 Loss: 5.4451 +[2025-07-07 20:47:11] [Rank 0] Group 4 Loss: 5.4451 +[2025-07-07 20:47:11] [Rank 0] Group 5 Loss: 5.3813 +[2025-07-07 20:47:11] [Rank 0] Group 5 Loss: 5.3813 +[2025-07-07 20:47:11] [Rank 0] Group 6 Loss: 5.3686 +[2025-07-07 20:47:11] [Rank 0] Group 6 Loss: 5.3686 +[2025-07-07 20:47:11] [Rank 0] Group 7 Loss: 5.5069 +[2025-07-07 20:47:11] [Rank 0] Group 7 Loss: 5.5069 +[2025-07-07 20:47:11] [Rank 0] Group 8 Loss: 5.4162 +[2025-07-07 20:47:11] [Rank 0] Group 8 Loss: 5.4162 +[2025-07-07 20:47:11] [Rank 0] Group 9 Loss: 5.5035 +[2025-07-07 20:47:11] [Rank 0] Group 9 Loss: 5.5035 +[2025-07-07 20:47:11] [Rank 0] Group 10 Loss: 5.4242 +[2025-07-07 20:47:11] [Rank 0] Group 10 Loss: 5.4242 +[2025-07-07 20:47:11] [Rank 0] Group 11 Loss: 5.4532 +[2025-07-07 20:47:11] [Rank 0] Group 11 Loss: 5.4532 +[2025-07-07 20:47:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 20:47:11] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 20:47:11] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 20:47:11] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 20:47:11] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 20:47:11] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 20:47:11] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 20:47:11] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 20:47:11] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 20:47:11] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 20:47:11] [Rank 0] Group 5 FTA: 0.9896 +[2025-07-07 20:47:11] [Rank 0] Group 5 FTA: 0.9896 +[2025-07-07 20:47:11] [Rank 0] Group 6 FTA: 0.9818 +[2025-07-07 20:47:11] [Rank 0] Group 6 FTA: 0.9818 +[2025-07-07 20:47:11] [Rank 0] Group 7 FTA: 0.9896 +[2025-07-07 20:47:11] [Rank 0] Group 7 FTA: 0.9896 +[2025-07-07 20:47:11] [Rank 0] Group 8 FTA: 0.9870 +[2025-07-07 20:47:11] [Rank 0] Group 8 FTA: 0.9870 +[2025-07-07 20:47:11] [Rank 0] Group 9 FTA: 0.9922 +[2025-07-07 20:47:11] [Rank 0] Group 9 FTA: 0.9922 +[2025-07-07 20:47:11] [Rank 0] Group 10 FTA: 0.9785 +[2025-07-07 20:47:11] [Rank 0] Group 10 FTA: 0.9785 +[2025-07-07 20:47:11] [Rank 0] Group 11 FTA: 0.9834 +[2025-07-07 20:47:11] [Rank 0] Group 11 FTA: 0.9834 +[2025-07-07 20:47:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 20:47:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 20:47:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 20:47:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 20:47:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 20:47:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 20:47:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 20:47:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 20:47:13] [Rank 0] step:9501/10000 train_time:768898ms step_avg:80.93ms +[2025-07-07 20:47:13] [Rank 0] step:9501/10000 train_time:768898ms step_avg:80.93ms +[2025-07-07 20:47:14] [Rank 0] step:9521/10000 train_time:770385ms step_avg:80.91ms +[2025-07-07 20:47:14] [Rank 0] step:9521/10000 train_time:770385ms step_avg:80.91ms +[2025-07-07 20:47:16] [Rank 0] step:9541/10000 train_time:772542ms step_avg:80.97ms +[2025-07-07 20:47:16] [Rank 0] step:9541/10000 train_time:772542ms step_avg:80.97ms +[2025-07-07 20:47:18] [Rank 0] step:9561/10000 train_time:774014ms step_avg:80.96ms +[2025-07-07 20:47:18] [Rank 0] step:9561/10000 train_time:774014ms step_avg:80.96ms +[2025-07-07 20:47:19] [Rank 0] step:9581/10000 train_time:775508ms step_avg:80.94ms +[2025-07-07 20:47:19] [Rank 0] step:9581/10000 train_time:775508ms step_avg:80.94ms +[2025-07-07 20:47:21] [Rank 0] step:9601/10000 train_time:777000ms step_avg:80.93ms +[2025-07-07 20:47:21] [Rank 0] step:9601/10000 train_time:777000ms step_avg:80.93ms +[2025-07-07 20:47:22] [Rank 0] step:9621/10000 train_time:778497ms step_avg:80.92ms +[2025-07-07 20:47:22] [Rank 0] step:9621/10000 train_time:778497ms step_avg:80.92ms +[2025-07-07 20:47:25] [Rank 0] step:9641/10000 train_time:780659ms step_avg:80.97ms +[2025-07-07 20:47:25] [Rank 0] step:9641/10000 train_time:780659ms step_avg:80.97ms +[2025-07-07 20:47:26] [Rank 0] step:9661/10000 train_time:782152ms step_avg:80.96ms +[2025-07-07 20:47:26] [Rank 0] step:9661/10000 train_time:782152ms step_avg:80.96ms +[2025-07-07 20:47:28] [Rank 0] step:9681/10000 train_time:783646ms step_avg:80.95ms +[2025-07-07 20:47:28] [Rank 0] step:9681/10000 train_time:783646ms step_avg:80.95ms +[2025-07-07 20:47:29] [Rank 0] step:9701/10000 train_time:785141ms step_avg:80.93ms +[2025-07-07 20:47:29] [Rank 0] step:9701/10000 train_time:785141ms step_avg:80.93ms +[2025-07-07 20:47:31] [Rank 0] step:9721/10000 train_time:786686ms step_avg:80.93ms +[2025-07-07 20:47:31] [Rank 0] step:9721/10000 train_time:786686ms step_avg:80.93ms +[2025-07-07 20:47:33] [Rank 0] step:9741/10000 train_time:788769ms step_avg:80.97ms +[2025-07-07 20:47:33] [Rank 0] step:9741/10000 train_time:788769ms step_avg:80.97ms +[2025-07-07 20:47:34] [Rank 0] step:9761/10000 train_time:790265ms step_avg:80.96ms +[2025-07-07 20:47:34] [Rank 0] step:9761/10000 train_time:790265ms step_avg:80.96ms +[2025-07-07 20:47:36] [Rank 0] step:9781/10000 train_time:791760ms step_avg:80.95ms +[2025-07-07 20:47:36] [Rank 0] step:9781/10000 train_time:791760ms step_avg:80.95ms +[2025-07-07 20:47:37] [Rank 0] step:9801/10000 train_time:793256ms step_avg:80.94ms +[2025-07-07 20:47:37] [Rank 0] step:9801/10000 train_time:793256ms step_avg:80.94ms +[2025-07-07 20:47:39] [Rank 0] step:9821/10000 train_time:795412ms step_avg:80.99ms +[2025-07-07 20:47:39] [Rank 0] step:9821/10000 train_time:795412ms step_avg:80.99ms +[2025-07-07 20:47:41] [Rank 0] step:9841/10000 train_time:796908ms step_avg:80.98ms +[2025-07-07 20:47:41] [Rank 0] step:9841/10000 train_time:796908ms step_avg:80.98ms +[2025-07-07 20:47:42] [Rank 0] step:9861/10000 train_time:798403ms step_avg:80.97ms +[2025-07-07 20:47:42] [Rank 0] step:9861/10000 train_time:798403ms step_avg:80.97ms +[2025-07-07 20:47:44] [Rank 0] step:9881/10000 train_time:799900ms step_avg:80.95ms +[2025-07-07 20:47:44] [Rank 0] step:9881/10000 train_time:799900ms step_avg:80.95ms +[2025-07-07 20:47:46] [Rank 0] step:9901/10000 train_time:801654ms step_avg:80.97ms +[2025-07-07 20:47:46] [Rank 0] step:9901/10000 train_time:801654ms step_avg:80.97ms +[2025-07-07 20:47:48] [Rank 0] step:9921/10000 train_time:803664ms step_avg:81.01ms +[2025-07-07 20:47:48] [Rank 0] step:9921/10000 train_time:803664ms step_avg:81.01ms +[2025-07-07 20:47:49] [Rank 0] step:9941/10000 train_time:805303ms step_avg:81.01ms +[2025-07-07 20:47:49] [Rank 0] step:9941/10000 train_time:805303ms step_avg:81.01ms +[2025-07-07 20:47:51] [Rank 0] step:9961/10000 train_time:806802ms step_avg:81.00ms +[2025-07-07 20:47:51] [Rank 0] step:9961/10000 train_time:806802ms step_avg:81.00ms +[2025-07-07 20:47:52] [Rank 0] step:9981/10000 train_time:808300ms step_avg:80.98ms +[2025-07-07 20:47:52] [Rank 0] step:9981/10000 train_time:808300ms step_avg:80.98ms +[2025-07-07 20:47:54] [Rank 0] step:10000/10000 train_time:810378ms step_avg:81.04ms +[2025-07-07 20:47:54] [Rank 0] step:10000/10000 train_time:810378ms step_avg:81.04ms +[2025-07-07 20:47:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:47:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:47:55] [Rank 0] PRINT: step:10000/10000 train_loss:0.8549 val_loss:0.8583 train_time:810459ms step_avg:81.05ms +[2025-07-07 20:47:55] [Rank 0] PRINT: step:10000/10000 train_loss:0.8549 val_loss:0.8583 train_time:810459ms step_avg:81.05ms +[2025-07-07 20:47:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:47:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:47:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:47:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:47:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:47:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:53:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:53:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:53:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:53:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:53:26] [Rank 0] Total Loss: 5.5450 +[2025-07-07 20:53:26] [Rank 0] Total Loss: 5.5450 +[2025-07-07 20:53:26] [Rank 0] Total FTA: 0.9927 +[2025-07-07 20:53:26] [Rank 0] Total FTA: 0.9927 +[2025-07-07 20:53:26] [Rank 0] Group 0 Loss: 6.0165 +[2025-07-07 20:53:26] [Rank 0] Group 0 Loss: 6.0165 +[2025-07-07 20:53:26] [Rank 0] Group 1 Loss: 5.5197 +[2025-07-07 20:53:26] [Rank 0] Group 1 Loss: 5.5197 +[2025-07-07 20:53:26] [Rank 0] Group 2 Loss: 5.4562 +[2025-07-07 20:53:26] [Rank 0] Group 2 Loss: 5.4562 +[2025-07-07 20:53:26] [Rank 0] Group 3 Loss: 5.3705 +[2025-07-07 20:53:26] [Rank 0] Group 3 Loss: 5.3705 +[2025-07-07 20:53:26] [Rank 0] Group 4 Loss: 5.5239 +[2025-07-07 20:53:26] [Rank 0] Group 4 Loss: 5.5239 +[2025-07-07 20:53:26] [Rank 0] Group 5 Loss: 5.4589 +[2025-07-07 20:53:26] [Rank 0] Group 5 Loss: 5.4589 +[2025-07-07 20:53:26] [Rank 0] Group 6 Loss: 5.3263 +[2025-07-07 20:53:26] [Rank 0] Group 6 Loss: 5.3263 +[2025-07-07 20:53:26] [Rank 0] Group 7 Loss: 5.5551 +[2025-07-07 20:53:26] [Rank 0] Group 7 Loss: 5.5551 +[2025-07-07 20:53:26] [Rank 0] Group 8 Loss: 5.4662 +[2025-07-07 20:53:26] [Rank 0] Group 8 Loss: 5.4662 +[2025-07-07 20:53:26] [Rank 0] Group 9 Loss: 5.5039 +[2025-07-07 20:53:26] [Rank 0] Group 9 Loss: 5.5039 +[2025-07-07 20:53:26] [Rank 0] Group 10 Loss: 5.4634 +[2025-07-07 20:53:26] [Rank 0] Group 10 Loss: 5.4634 +[2025-07-07 20:53:26] [Rank 0] Group 11 Loss: 5.4985 +[2025-07-07 20:53:26] [Rank 0] Group 11 Loss: 5.4985 +[2025-07-07 20:53:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 20:53:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 20:53:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 20:53:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 20:53:26] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 20:53:26] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 20:53:26] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 20:53:26] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 20:53:26] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 20:53:26] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 20:53:26] [Rank 0] Group 5 FTA: 0.9974 +[2025-07-07 20:53:26] [Rank 0] Group 5 FTA: 0.9974 +[2025-07-07 20:53:26] [Rank 0] Group 6 FTA: 0.9818 +[2025-07-07 20:53:26] [Rank 0] Group 6 FTA: 0.9818 +[2025-07-07 20:53:26] [Rank 0] Group 7 FTA: 0.9870 +[2025-07-07 20:53:26] [Rank 0] Group 7 FTA: 0.9870 +[2025-07-07 20:53:26] [Rank 0] Group 8 FTA: 0.9844 +[2025-07-07 20:53:26] [Rank 0] Group 8 FTA: 0.9844 +[2025-07-07 20:53:26] [Rank 0] Group 9 FTA: 0.9922 +[2025-07-07 20:53:26] [Rank 0] Group 9 FTA: 0.9922 +[2025-07-07 20:53:26] [Rank 0] Group 10 FTA: 0.9863 +[2025-07-07 20:53:26] [Rank 0] Group 10 FTA: 0.9863 +[2025-07-07 20:53:26] [Rank 0] Group 11 FTA: 0.9873 +[2025-07-07 20:53:26] [Rank 0] Group 11 FTA: 0.9873 +[2025-07-07 20:53:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 20:53:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-07 20:53:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 20:53:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-07 20:53:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 20:53:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-07 20:53:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 20:53:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-07 20:53:27] [Rank 0] step:10001/10000 train_time:810480ms step_avg:81.04ms +[2025-07-07 20:53:27] [Rank 0] step:10001/10000 train_time:810480ms step_avg:81.04ms +[2025-07-07 20:53:27] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 20:53:27 2025 --- +[2025-07-07 20:53:27] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 20:53:27 2025 --- +[2025-07-07 20:53:27] [Rank 0] PRINT: Peak memory allocated: 9109 MiB reserved: 10336 MiB +[2025-07-07 20:53:27] [Rank 0] PRINT: Peak memory allocated: 9109 MiB reserved: 10336 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/config.json b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..30f8aeb88acbab37c8cd9f144f44c075311f04b7 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "976d6688-744a-4dad-82a3-f2eaceaad17d", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..34f0e71d5d5e2d807e4f94180aa092ce92a066e5 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:313906e0089784734ea3c8011881f3631a0a6db602ef84f84aed0350888e51dd +size 346604 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..372ee43b5019d2993c6a4927a8e55d7b4a65e20c --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0cb357f990044cf760052ff49e49eb441e42f4a4e558db85b5ec2cfe5c01bc19 +size 410490 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..0ec6da435c9fb1a4902294127212005e040739e7 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04e17ee03e0da37c85d73f498ef18d426b803fb8179112d95f9814beec481509 +size 109211 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..f46d4e711e0d9ac3f5747727134f8b6d8b111686 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72eedf06e9804ae84ee84d3406562900851c765bf1d4ebe5d00a26beb813efd1 +size 108802 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/training_log_976d6688-744a-4dad-82a3-f2eaceaad17d.txt b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/training_log_976d6688-744a-4dad-82a3-f2eaceaad17d.txt new file mode 100644 index 0000000000000000000000000000000000000000..fcdc40457ea481874e7381cb918fda27900d9a76 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/training_log_976d6688-744a-4dad-82a3-f2eaceaad17d.txt @@ -0,0 +1,5132 @@ +[2025-07-06 17:27:22] [Rank 0] PRINT: --- Script Start: Sun Jul 6 17:27:22 2025 --- +[2025-07-06 17:27:22] [Rank 0] PRINT: --- Script Start: Sun Jul 6 17:27:22 2025 --- +[2025-07-06 17:27:22] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-06 17:27:22] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-06 17:27:22] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 17:27:22] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 17:27:22] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-06 17:27:22] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-06 17:27:22] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45 +[2025-07-06 17:27:22] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45 +[2025-07-06 17:27:22] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 17:27:22] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 17:27:22] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 17:27:22] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 17:27:22] [Rank 0] PRINT: Constructing model... +[2025-07-06 17:27:22] [Rank 0] PRINT: Constructing model... +[2025-07-06 17:27:24] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 17:27:24] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 17:27:24] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 17:27:24] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 17:27:24] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 17:27:24] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 17:27:26] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 17:27:26] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 17:27:26] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 17:27:26] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 17:27:26] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 17:27:26] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 17:27:26] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 17:27:26] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 17:27:26] [Rank 0] PRINT: Model returns: +[2025-07-06 17:27:26] [Rank 0] PRINT: Model returns: +[2025-07-06 17:27:26] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 17:27:26] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 17:27:26] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 17:27:26] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 17:27:26] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 17:27:26] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 17:27:26] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 17:27:26] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 17:27:26] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 17:27:26] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 17:27:26] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 17:27:26] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 17:27:26] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 17:27:26] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 17:27:26] [Rank 0] PRINT: Starting warmup... +[2025-07-06 17:27:26] [Rank 0] PRINT: Starting warmup... +[2025-07-06 17:37:24] [Rank 0] PRINT: Warmup complete. +[2025-07-06 17:37:24] [Rank 0] PRINT: Warmup complete. +[2025-07-06 17:37:24] [Rank 0] PRINT: Starting training... +[2025-07-06 17:37:24] [Rank 0] PRINT: Starting training... +[2025-07-06 17:37:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:37:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:41:33] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 17:41:33] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 17:41:35] [Rank 0] step:21/10000 train_time:1753ms step_avg:83.47ms +[2025-07-06 17:41:35] [Rank 0] step:21/10000 train_time:1753ms step_avg:83.47ms +[2025-07-06 17:41:36] [Rank 0] step:41/10000 train_time:3214ms step_avg:78.39ms +[2025-07-06 17:41:36] [Rank 0] step:41/10000 train_time:3214ms step_avg:78.39ms +[2025-07-06 17:41:37] [Rank 0] step:61/10000 train_time:4676ms step_avg:76.65ms +[2025-07-06 17:41:37] [Rank 0] step:61/10000 train_time:4676ms step_avg:76.65ms +[2025-07-06 17:41:39] [Rank 0] step:81/10000 train_time:6139ms step_avg:75.78ms +[2025-07-06 17:41:39] [Rank 0] step:81/10000 train_time:6139ms step_avg:75.78ms +[2025-07-06 17:41:41] [Rank 0] step:101/10000 train_time:8273ms step_avg:81.91ms +[2025-07-06 17:41:41] [Rank 0] step:101/10000 train_time:8273ms step_avg:81.91ms +[2025-07-06 17:41:43] [Rank 0] step:121/10000 train_time:9739ms step_avg:80.48ms +[2025-07-06 17:41:43] [Rank 0] step:121/10000 train_time:9739ms step_avg:80.48ms +[2025-07-06 17:41:44] [Rank 0] step:141/10000 train_time:11205ms step_avg:79.47ms +[2025-07-06 17:41:44] [Rank 0] step:141/10000 train_time:11205ms step_avg:79.47ms +[2025-07-06 17:41:45] [Rank 0] step:161/10000 train_time:12674ms step_avg:78.72ms +[2025-07-06 17:41:45] [Rank 0] step:161/10000 train_time:12674ms step_avg:78.72ms +[2025-07-06 17:41:48] [Rank 0] step:181/10000 train_time:14400ms step_avg:79.56ms +[2025-07-06 17:41:48] [Rank 0] step:181/10000 train_time:14400ms step_avg:79.56ms +[2025-07-06 17:41:49] [Rank 0] step:201/10000 train_time:16265ms step_avg:80.92ms +[2025-07-06 17:41:49] [Rank 0] step:201/10000 train_time:16265ms step_avg:80.92ms +[2025-07-06 17:41:51] [Rank 0] step:221/10000 train_time:17733ms step_avg:80.24ms +[2025-07-06 17:41:51] [Rank 0] step:221/10000 train_time:17733ms step_avg:80.24ms +[2025-07-06 17:41:52] [Rank 0] step:241/10000 train_time:19202ms step_avg:79.68ms +[2025-07-06 17:41:52] [Rank 0] step:241/10000 train_time:19202ms step_avg:79.68ms +[2025-07-06 17:41:53] [Rank 0] step:261/10000 train_time:20670ms step_avg:79.20ms +[2025-07-06 17:41:53] [Rank 0] step:261/10000 train_time:20670ms step_avg:79.20ms +[2025-07-06 17:41:56] [Rank 0] step:281/10000 train_time:22794ms step_avg:81.12ms +[2025-07-06 17:41:56] [Rank 0] step:281/10000 train_time:22794ms step_avg:81.12ms +[2025-07-06 17:41:57] [Rank 0] step:301/10000 train_time:24264ms step_avg:80.61ms +[2025-07-06 17:41:57] [Rank 0] step:301/10000 train_time:24264ms step_avg:80.61ms +[2025-07-06 17:41:59] [Rank 0] step:321/10000 train_time:25732ms step_avg:80.16ms +[2025-07-06 17:41:59] [Rank 0] step:321/10000 train_time:25732ms step_avg:80.16ms +[2025-07-06 17:42:00] [Rank 0] step:341/10000 train_time:27198ms step_avg:79.76ms +[2025-07-06 17:42:00] [Rank 0] step:341/10000 train_time:27198ms step_avg:79.76ms +[2025-07-06 17:42:02] [Rank 0] step:361/10000 train_time:28923ms step_avg:80.12ms +[2025-07-06 17:42:02] [Rank 0] step:361/10000 train_time:28923ms step_avg:80.12ms +[2025-07-06 17:42:04] [Rank 0] step:381/10000 train_time:30797ms step_avg:80.83ms +[2025-07-06 17:42:04] [Rank 0] step:381/10000 train_time:30797ms step_avg:80.83ms +[2025-07-06 17:42:05] [Rank 0] step:401/10000 train_time:32265ms step_avg:80.46ms +[2025-07-06 17:42:05] [Rank 0] step:401/10000 train_time:32265ms step_avg:80.46ms +[2025-07-06 17:42:07] [Rank 0] step:421/10000 train_time:33733ms step_avg:80.13ms +[2025-07-06 17:42:07] [Rank 0] step:421/10000 train_time:33733ms step_avg:80.13ms +[2025-07-06 17:42:08] [Rank 0] step:441/10000 train_time:35200ms step_avg:79.82ms +[2025-07-06 17:42:08] [Rank 0] step:441/10000 train_time:35200ms step_avg:79.82ms +[2025-07-06 17:42:10] [Rank 0] step:461/10000 train_time:37321ms step_avg:80.96ms +[2025-07-06 17:42:10] [Rank 0] step:461/10000 train_time:37321ms step_avg:80.96ms +[2025-07-06 17:42:12] [Rank 0] step:481/10000 train_time:38788ms step_avg:80.64ms +[2025-07-06 17:42:12] [Rank 0] step:481/10000 train_time:38788ms step_avg:80.64ms +[2025-07-06 17:42:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:42:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:42:14] [Rank 0] PRINT: step:500/10000 train_loss:2.2938 val_loss:1.3645 train_time:40253ms step_avg:80.51ms +[2025-07-06 17:42:14] [Rank 0] PRINT: step:500/10000 train_loss:2.2938 val_loss:1.3645 train_time:40253ms step_avg:80.51ms +[2025-07-06 17:42:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:42:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:42:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:42:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:42:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:42:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:47:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:47:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:47:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:47:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:47:45] [Rank 0] Total Loss: 4.5505 +[2025-07-06 17:47:45] [Rank 0] Total Loss: 4.5505 +[2025-07-06 17:47:45] [Rank 0] Total FTA: 0.1173 +[2025-07-06 17:47:45] [Rank 0] Total FTA: 0.1173 +[2025-07-06 17:47:45] [Rank 0] Group 0 Loss: 4.6620 +[2025-07-06 17:47:45] [Rank 0] Group 0 Loss: 4.6620 +[2025-07-06 17:47:45] [Rank 0] Group 1 Loss: 4.3496 +[2025-07-06 17:47:45] [Rank 0] Group 1 Loss: 4.3496 +[2025-07-06 17:47:45] [Rank 0] Group 2 Loss: 4.3312 +[2025-07-06 17:47:45] [Rank 0] Group 2 Loss: 4.3312 +[2025-07-06 17:47:45] [Rank 0] Group 3 Loss: 4.6016 +[2025-07-06 17:47:45] [Rank 0] Group 3 Loss: 4.6016 +[2025-07-06 17:47:45] [Rank 0] Group 4 Loss: 4.6045 +[2025-07-06 17:47:45] [Rank 0] Group 4 Loss: 4.6045 +[2025-07-06 17:47:45] [Rank 0] Group 5 Loss: 4.4918 +[2025-07-06 17:47:45] [Rank 0] Group 5 Loss: 4.4918 +[2025-07-06 17:47:45] [Rank 0] Group 6 Loss: 4.5273 +[2025-07-06 17:47:45] [Rank 0] Group 6 Loss: 4.5273 +[2025-07-06 17:47:45] [Rank 0] Group 7 Loss: 4.5676 +[2025-07-06 17:47:45] [Rank 0] Group 7 Loss: 4.5676 +[2025-07-06 17:47:45] [Rank 0] Group 8 Loss: 4.5325 +[2025-07-06 17:47:45] [Rank 0] Group 8 Loss: 4.5325 +[2025-07-06 17:47:45] [Rank 0] Group 9 Loss: 4.5701 +[2025-07-06 17:47:45] [Rank 0] Group 9 Loss: 4.5701 +[2025-07-06 17:47:45] [Rank 0] Group 10 Loss: 4.6102 +[2025-07-06 17:47:45] [Rank 0] Group 10 Loss: 4.6102 +[2025-07-06 17:47:45] [Rank 0] Group 11 Loss: 4.5809 +[2025-07-06 17:47:45] [Rank 0] Group 11 Loss: 4.5809 +[2025-07-06 17:47:45] [Rank 0] Group 0 FTA: 0.1795 +[2025-07-06 17:47:45] [Rank 0] Group 0 FTA: 0.1795 +[2025-07-06 17:47:45] [Rank 0] Group 1 FTA: 0.3281 +[2025-07-06 17:47:45] [Rank 0] Group 1 FTA: 0.3281 +[2025-07-06 17:47:45] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-06 17:47:45] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-06 17:47:45] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-06 17:47:45] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-06 17:47:45] [Rank 0] Group 4 FTA: 0.0130 +[2025-07-06 17:47:45] [Rank 0] Group 4 FTA: 0.0130 +[2025-07-06 17:47:45] [Rank 0] Group 5 FTA: 0.1406 +[2025-07-06 17:47:45] [Rank 0] Group 5 FTA: 0.1406 +[2025-07-06 17:47:45] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-06 17:47:45] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-06 17:47:45] [Rank 0] Group 7 FTA: 0.1172 +[2025-07-06 17:47:45] [Rank 0] Group 7 FTA: 0.1172 +[2025-07-06 17:47:45] [Rank 0] Group 8 FTA: 0.0911 +[2025-07-06 17:47:45] [Rank 0] Group 8 FTA: 0.0911 +[2025-07-06 17:47:45] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-06 17:47:45] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-06 17:47:45] [Rank 0] Group 10 FTA: 0.1074 +[2025-07-06 17:47:45] [Rank 0] Group 10 FTA: 0.1074 +[2025-07-06 17:47:45] [Rank 0] Group 11 FTA: 0.0879 +[2025-07-06 17:47:45] [Rank 0] Group 11 FTA: 0.0879 +[2025-07-06 17:47:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 17:47:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 17:47:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 17:47:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 17:47:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 17:47:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 17:47:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 17:47:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 17:47:47] [Rank 0] step:501/10000 train_time:40274ms step_avg:80.39ms +[2025-07-06 17:47:47] [Rank 0] step:501/10000 train_time:40274ms step_avg:80.39ms +[2025-07-06 17:47:48] [Rank 0] step:521/10000 train_time:41756ms step_avg:80.14ms +[2025-07-06 17:47:48] [Rank 0] step:521/10000 train_time:41756ms step_avg:80.14ms +[2025-07-06 17:47:50] [Rank 0] step:541/10000 train_time:43212ms step_avg:79.87ms +[2025-07-06 17:47:50] [Rank 0] step:541/10000 train_time:43212ms step_avg:79.87ms +[2025-07-06 17:47:51] [Rank 0] step:561/10000 train_time:44909ms step_avg:80.05ms +[2025-07-06 17:47:51] [Rank 0] step:561/10000 train_time:44909ms step_avg:80.05ms +[2025-07-06 17:47:53] [Rank 0] step:581/10000 train_time:46364ms step_avg:79.80ms +[2025-07-06 17:47:53] [Rank 0] step:581/10000 train_time:46364ms step_avg:79.80ms +[2025-07-06 17:47:54] [Rank 0] step:601/10000 train_time:47822ms step_avg:79.57ms +[2025-07-06 17:47:54] [Rank 0] step:601/10000 train_time:47822ms step_avg:79.57ms +[2025-07-06 17:47:56] [Rank 0] step:621/10000 train_time:49282ms step_avg:79.36ms +[2025-07-06 17:47:56] [Rank 0] step:621/10000 train_time:49282ms step_avg:79.36ms +[2025-07-06 17:47:58] [Rank 0] step:641/10000 train_time:51405ms step_avg:80.19ms +[2025-07-06 17:47:58] [Rank 0] step:641/10000 train_time:51405ms step_avg:80.19ms +[2025-07-06 17:47:59] [Rank 0] step:661/10000 train_time:52865ms step_avg:79.98ms +[2025-07-06 17:47:59] [Rank 0] step:661/10000 train_time:52865ms step_avg:79.98ms +[2025-07-06 17:48:01] [Rank 0] step:681/10000 train_time:54326ms step_avg:79.77ms +[2025-07-06 17:48:01] [Rank 0] step:681/10000 train_time:54326ms step_avg:79.77ms +[2025-07-06 17:48:02] [Rank 0] step:701/10000 train_time:55789ms step_avg:79.58ms +[2025-07-06 17:48:02] [Rank 0] step:701/10000 train_time:55789ms step_avg:79.58ms +[2025-07-06 17:48:04] [Rank 0] step:721/10000 train_time:57327ms step_avg:79.51ms +[2025-07-06 17:48:04] [Rank 0] step:721/10000 train_time:57327ms step_avg:79.51ms +[2025-07-06 17:48:06] [Rank 0] step:741/10000 train_time:59374ms step_avg:80.13ms +[2025-07-06 17:48:06] [Rank 0] step:741/10000 train_time:59374ms step_avg:80.13ms +[2025-07-06 17:48:07] [Rank 0] step:761/10000 train_time:60846ms step_avg:79.96ms +[2025-07-06 17:48:07] [Rank 0] step:761/10000 train_time:60846ms step_avg:79.96ms +[2025-07-06 17:48:09] [Rank 0] step:781/10000 train_time:62320ms step_avg:79.80ms +[2025-07-06 17:48:09] [Rank 0] step:781/10000 train_time:62320ms step_avg:79.80ms +[2025-07-06 17:48:10] [Rank 0] step:801/10000 train_time:63793ms step_avg:79.64ms +[2025-07-06 17:48:10] [Rank 0] step:801/10000 train_time:63793ms step_avg:79.64ms +[2025-07-06 17:48:12] [Rank 0] step:821/10000 train_time:65910ms step_avg:80.28ms +[2025-07-06 17:48:12] [Rank 0] step:821/10000 train_time:65910ms step_avg:80.28ms +[2025-07-06 17:48:14] [Rank 0] step:841/10000 train_time:67382ms step_avg:80.12ms +[2025-07-06 17:48:14] [Rank 0] step:841/10000 train_time:67382ms step_avg:80.12ms +[2025-07-06 17:48:15] [Rank 0] step:861/10000 train_time:68856ms step_avg:79.97ms +[2025-07-06 17:48:15] [Rank 0] step:861/10000 train_time:68856ms step_avg:79.97ms +[2025-07-06 17:48:17] [Rank 0] step:881/10000 train_time:70330ms step_avg:79.83ms +[2025-07-06 17:48:17] [Rank 0] step:881/10000 train_time:70330ms step_avg:79.83ms +[2025-07-06 17:48:19] [Rank 0] step:901/10000 train_time:72510ms step_avg:80.48ms +[2025-07-06 17:48:19] [Rank 0] step:901/10000 train_time:72510ms step_avg:80.48ms +[2025-07-06 17:48:20] [Rank 0] step:921/10000 train_time:73945ms step_avg:80.29ms +[2025-07-06 17:48:20] [Rank 0] step:921/10000 train_time:73945ms step_avg:80.29ms +[2025-07-06 17:48:22] [Rank 0] step:941/10000 train_time:75418ms step_avg:80.15ms +[2025-07-06 17:48:22] [Rank 0] step:941/10000 train_time:75418ms step_avg:80.15ms +[2025-07-06 17:48:23] [Rank 0] step:961/10000 train_time:76890ms step_avg:80.01ms +[2025-07-06 17:48:23] [Rank 0] step:961/10000 train_time:76890ms step_avg:80.01ms +[2025-07-06 17:48:25] [Rank 0] step:981/10000 train_time:78363ms step_avg:79.88ms +[2025-07-06 17:48:25] [Rank 0] step:981/10000 train_time:78363ms step_avg:79.88ms +[2025-07-06 17:48:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:48:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:48:28] [Rank 0] PRINT: step:1000/10000 train_loss:1.2746 val_loss:1.1885 train_time:80501ms step_avg:80.50ms +[2025-07-06 17:48:28] [Rank 0] PRINT: step:1000/10000 train_loss:1.2746 val_loss:1.1885 train_time:80501ms step_avg:80.50ms +[2025-07-06 17:48:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:48:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:48:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:48:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:48:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:48:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:54:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:54:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:54:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:54:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:54:00] [Rank 0] Total Loss: 4.8112 +[2025-07-06 17:54:00] [Rank 0] Total Loss: 4.8112 +[2025-07-06 17:54:00] [Rank 0] Total FTA: 0.3094 +[2025-07-06 17:54:00] [Rank 0] Total FTA: 0.3094 +[2025-07-06 17:54:00] [Rank 0] Group 0 Loss: 5.0610 +[2025-07-06 17:54:00] [Rank 0] Group 0 Loss: 5.0610 +[2025-07-06 17:54:00] [Rank 0] Group 1 Loss: 4.6469 +[2025-07-06 17:54:00] [Rank 0] Group 1 Loss: 4.6469 +[2025-07-06 17:54:00] [Rank 0] Group 2 Loss: 4.6395 +[2025-07-06 17:54:00] [Rank 0] Group 2 Loss: 4.6395 +[2025-07-06 17:54:00] [Rank 0] Group 3 Loss: 4.8472 +[2025-07-06 17:54:00] [Rank 0] Group 3 Loss: 4.8472 +[2025-07-06 17:54:00] [Rank 0] Group 4 Loss: 4.8251 +[2025-07-06 17:54:00] [Rank 0] Group 4 Loss: 4.8251 +[2025-07-06 17:54:00] [Rank 0] Group 5 Loss: 4.7524 +[2025-07-06 17:54:00] [Rank 0] Group 5 Loss: 4.7524 +[2025-07-06 17:54:00] [Rank 0] Group 6 Loss: 4.7125 +[2025-07-06 17:54:00] [Rank 0] Group 6 Loss: 4.7125 +[2025-07-06 17:54:00] [Rank 0] Group 7 Loss: 4.8282 +[2025-07-06 17:54:00] [Rank 0] Group 7 Loss: 4.8282 +[2025-07-06 17:54:00] [Rank 0] Group 8 Loss: 4.7623 +[2025-07-06 17:54:00] [Rank 0] Group 8 Loss: 4.7623 +[2025-07-06 17:54:00] [Rank 0] Group 9 Loss: 4.7507 +[2025-07-06 17:54:00] [Rank 0] Group 9 Loss: 4.7507 +[2025-07-06 17:54:00] [Rank 0] Group 10 Loss: 4.7887 +[2025-07-06 17:54:00] [Rank 0] Group 10 Loss: 4.7887 +[2025-07-06 17:54:00] [Rank 0] Group 11 Loss: 4.8280 +[2025-07-06 17:54:00] [Rank 0] Group 11 Loss: 4.8280 +[2025-07-06 17:54:00] [Rank 0] Group 0 FTA: 0.3238 +[2025-07-06 17:54:00] [Rank 0] Group 0 FTA: 0.3238 +[2025-07-06 17:54:00] [Rank 0] Group 1 FTA: 0.3255 +[2025-07-06 17:54:00] [Rank 0] Group 1 FTA: 0.3255 +[2025-07-06 17:54:00] [Rank 0] Group 2 FTA: 0.4036 +[2025-07-06 17:54:00] [Rank 0] Group 2 FTA: 0.4036 +[2025-07-06 17:54:00] [Rank 0] Group 3 FTA: 0.2760 +[2025-07-06 17:54:00] [Rank 0] Group 3 FTA: 0.2760 +[2025-07-06 17:54:00] [Rank 0] Group 4 FTA: 0.2812 +[2025-07-06 17:54:00] [Rank 0] Group 4 FTA: 0.2812 +[2025-07-06 17:54:00] [Rank 0] Group 5 FTA: 0.2682 +[2025-07-06 17:54:00] [Rank 0] Group 5 FTA: 0.2682 +[2025-07-06 17:54:00] [Rank 0] Group 6 FTA: 0.2500 +[2025-07-06 17:54:00] [Rank 0] Group 6 FTA: 0.2500 +[2025-07-06 17:54:00] [Rank 0] Group 7 FTA: 0.2917 +[2025-07-06 17:54:00] [Rank 0] Group 7 FTA: 0.2917 +[2025-07-06 17:54:00] [Rank 0] Group 8 FTA: 0.3281 +[2025-07-06 17:54:00] [Rank 0] Group 8 FTA: 0.3281 +[2025-07-06 17:54:00] [Rank 0] Group 9 FTA: 0.3047 +[2025-07-06 17:54:00] [Rank 0] Group 9 FTA: 0.3047 +[2025-07-06 17:54:00] [Rank 0] Group 10 FTA: 0.3340 +[2025-07-06 17:54:00] [Rank 0] Group 10 FTA: 0.3340 +[2025-07-06 17:54:00] [Rank 0] Group 11 FTA: 0.3066 +[2025-07-06 17:54:00] [Rank 0] Group 11 FTA: 0.3066 +[2025-07-06 17:54:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 17:54:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 17:54:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 17:54:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 17:54:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 17:54:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 17:54:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 17:54:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 17:54:02] [Rank 0] step:1001/10000 train_time:80525ms step_avg:80.44ms +[2025-07-06 17:54:02] [Rank 0] step:1001/10000 train_time:80525ms step_avg:80.44ms +[2025-07-06 17:54:03] [Rank 0] step:1021/10000 train_time:81982ms step_avg:80.30ms +[2025-07-06 17:54:03] [Rank 0] step:1021/10000 train_time:81982ms step_avg:80.30ms +[2025-07-06 17:54:05] [Rank 0] step:1041/10000 train_time:83448ms step_avg:80.16ms +[2025-07-06 17:54:05] [Rank 0] step:1041/10000 train_time:83448ms step_avg:80.16ms +[2025-07-06 17:54:06] [Rank 0] step:1061/10000 train_time:84916ms step_avg:80.03ms +[2025-07-06 17:54:06] [Rank 0] step:1061/10000 train_time:84916ms step_avg:80.03ms +[2025-07-06 17:54:08] [Rank 0] step:1081/10000 train_time:86382ms step_avg:79.91ms +[2025-07-06 17:54:08] [Rank 0] step:1081/10000 train_time:86382ms step_avg:79.91ms +[2025-07-06 17:54:10] [Rank 0] step:1101/10000 train_time:88522ms step_avg:80.40ms +[2025-07-06 17:54:10] [Rank 0] step:1101/10000 train_time:88522ms step_avg:80.40ms +[2025-07-06 17:54:11] [Rank 0] step:1121/10000 train_time:89990ms step_avg:80.28ms +[2025-07-06 17:54:11] [Rank 0] step:1121/10000 train_time:89990ms step_avg:80.28ms +[2025-07-06 17:54:13] [Rank 0] step:1141/10000 train_time:91458ms step_avg:80.16ms +[2025-07-06 17:54:13] [Rank 0] step:1141/10000 train_time:91458ms step_avg:80.16ms +[2025-07-06 17:54:14] [Rank 0] step:1161/10000 train_time:92927ms step_avg:80.04ms +[2025-07-06 17:54:14] [Rank 0] step:1161/10000 train_time:92927ms step_avg:80.04ms +[2025-07-06 17:54:16] [Rank 0] step:1181/10000 train_time:94634ms step_avg:80.13ms +[2025-07-06 17:54:16] [Rank 0] step:1181/10000 train_time:94634ms step_avg:80.13ms +[2025-07-06 17:54:18] [Rank 0] step:1201/10000 train_time:96103ms step_avg:80.02ms +[2025-07-06 17:54:18] [Rank 0] step:1201/10000 train_time:96103ms step_avg:80.02ms +[2025-07-06 17:54:19] [Rank 0] step:1221/10000 train_time:97572ms step_avg:79.91ms +[2025-07-06 17:54:19] [Rank 0] step:1221/10000 train_time:97572ms step_avg:79.91ms +[2025-07-06 17:54:20] [Rank 0] step:1241/10000 train_time:99043ms step_avg:79.81ms +[2025-07-06 17:54:20] [Rank 0] step:1241/10000 train_time:99043ms step_avg:79.81ms +[2025-07-06 17:54:23] [Rank 0] step:1261/10000 train_time:101190ms step_avg:80.25ms +[2025-07-06 17:54:23] [Rank 0] step:1261/10000 train_time:101190ms step_avg:80.25ms +[2025-07-06 17:54:24] [Rank 0] step:1281/10000 train_time:102641ms step_avg:80.13ms +[2025-07-06 17:54:24] [Rank 0] step:1281/10000 train_time:102641ms step_avg:80.13ms +[2025-07-06 17:54:26] [Rank 0] step:1301/10000 train_time:104112ms step_avg:80.02ms +[2025-07-06 17:54:26] [Rank 0] step:1301/10000 train_time:104112ms step_avg:80.02ms +[2025-07-06 17:54:27] [Rank 0] step:1321/10000 train_time:105586ms step_avg:79.93ms +[2025-07-06 17:54:27] [Rank 0] step:1321/10000 train_time:105586ms step_avg:79.93ms +[2025-07-06 17:54:28] [Rank 0] step:1341/10000 train_time:107058ms step_avg:79.83ms +[2025-07-06 17:54:28] [Rank 0] step:1341/10000 train_time:107058ms step_avg:79.83ms +[2025-07-06 17:54:30] [Rank 0] step:1361/10000 train_time:108766ms step_avg:79.92ms +[2025-07-06 17:54:30] [Rank 0] step:1361/10000 train_time:108766ms step_avg:79.92ms +[2025-07-06 17:54:32] [Rank 0] step:1381/10000 train_time:110236ms step_avg:79.82ms +[2025-07-06 17:54:32] [Rank 0] step:1381/10000 train_time:110236ms step_avg:79.82ms +[2025-07-06 17:54:33] [Rank 0] step:1401/10000 train_time:111709ms step_avg:79.74ms +[2025-07-06 17:54:33] [Rank 0] step:1401/10000 train_time:111709ms step_avg:79.74ms +[2025-07-06 17:54:35] [Rank 0] step:1421/10000 train_time:113182ms step_avg:79.65ms +[2025-07-06 17:54:35] [Rank 0] step:1421/10000 train_time:113182ms step_avg:79.65ms +[2025-07-06 17:54:37] [Rank 0] step:1441/10000 train_time:115329ms step_avg:80.03ms +[2025-07-06 17:54:37] [Rank 0] step:1441/10000 train_time:115329ms step_avg:80.03ms +[2025-07-06 17:54:38] [Rank 0] step:1461/10000 train_time:116782ms step_avg:79.93ms +[2025-07-06 17:54:38] [Rank 0] step:1461/10000 train_time:116782ms step_avg:79.93ms +[2025-07-06 17:54:40] [Rank 0] step:1481/10000 train_time:118257ms step_avg:79.85ms +[2025-07-06 17:54:40] [Rank 0] step:1481/10000 train_time:118257ms step_avg:79.85ms +[2025-07-06 17:54:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:54:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:54:42] [Rank 0] PRINT: step:1500/10000 train_loss:1.0756 val_loss:1.0097 train_time:119728ms step_avg:79.82ms +[2025-07-06 17:54:42] [Rank 0] PRINT: step:1500/10000 train_loss:1.0756 val_loss:1.0097 train_time:119728ms step_avg:79.82ms +[2025-07-06 17:54:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:54:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:54:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:54:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:54:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:54:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:00:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:00:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:00:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:00:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:00:14] [Rank 0] Total Loss: 5.1822 +[2025-07-06 18:00:14] [Rank 0] Total Loss: 5.1822 +[2025-07-06 18:00:14] [Rank 0] Total FTA: 0.7744 +[2025-07-06 18:00:14] [Rank 0] Total FTA: 0.7744 +[2025-07-06 18:00:14] [Rank 0] Group 0 Loss: 5.3681 +[2025-07-06 18:00:14] [Rank 0] Group 0 Loss: 5.3681 +[2025-07-06 18:00:14] [Rank 0] Group 1 Loss: 5.0854 +[2025-07-06 18:00:14] [Rank 0] Group 1 Loss: 5.0854 +[2025-07-06 18:00:14] [Rank 0] Group 2 Loss: 4.9886 +[2025-07-06 18:00:14] [Rank 0] Group 2 Loss: 4.9886 +[2025-07-06 18:00:14] [Rank 0] Group 3 Loss: 5.3702 +[2025-07-06 18:00:14] [Rank 0] Group 3 Loss: 5.3702 +[2025-07-06 18:00:14] [Rank 0] Group 4 Loss: 5.1332 +[2025-07-06 18:00:14] [Rank 0] Group 4 Loss: 5.1332 +[2025-07-06 18:00:14] [Rank 0] Group 5 Loss: 5.1518 +[2025-07-06 18:00:14] [Rank 0] Group 5 Loss: 5.1518 +[2025-07-06 18:00:14] [Rank 0] Group 6 Loss: 5.0923 +[2025-07-06 18:00:14] [Rank 0] Group 6 Loss: 5.0923 +[2025-07-06 18:00:14] [Rank 0] Group 7 Loss: 5.2134 +[2025-07-06 18:00:14] [Rank 0] Group 7 Loss: 5.2134 +[2025-07-06 18:00:14] [Rank 0] Group 8 Loss: 5.1372 +[2025-07-06 18:00:14] [Rank 0] Group 8 Loss: 5.1372 +[2025-07-06 18:00:14] [Rank 0] Group 9 Loss: 5.1122 +[2025-07-06 18:00:14] [Rank 0] Group 9 Loss: 5.1122 +[2025-07-06 18:00:14] [Rank 0] Group 10 Loss: 5.1652 +[2025-07-06 18:00:14] [Rank 0] Group 10 Loss: 5.1652 +[2025-07-06 18:00:14] [Rank 0] Group 11 Loss: 5.1757 +[2025-07-06 18:00:14] [Rank 0] Group 11 Loss: 5.1757 +[2025-07-06 18:00:14] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:00:14] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:00:14] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:00:14] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:00:14] [Rank 0] Group 2 FTA: 0.6641 +[2025-07-06 18:00:14] [Rank 0] Group 2 FTA: 0.6641 +[2025-07-06 18:00:14] [Rank 0] Group 3 FTA: 0.7969 +[2025-07-06 18:00:14] [Rank 0] Group 3 FTA: 0.7969 +[2025-07-06 18:00:14] [Rank 0] Group 4 FTA: 0.8151 +[2025-07-06 18:00:14] [Rank 0] Group 4 FTA: 0.8151 +[2025-07-06 18:00:14] [Rank 0] Group 5 FTA: 0.7995 +[2025-07-06 18:00:14] [Rank 0] Group 5 FTA: 0.7995 +[2025-07-06 18:00:14] [Rank 0] Group 6 FTA: 0.7292 +[2025-07-06 18:00:14] [Rank 0] Group 6 FTA: 0.7292 +[2025-07-06 18:00:14] [Rank 0] Group 7 FTA: 0.6641 +[2025-07-06 18:00:14] [Rank 0] Group 7 FTA: 0.6641 +[2025-07-06 18:00:14] [Rank 0] Group 8 FTA: 0.6589 +[2025-07-06 18:00:14] [Rank 0] Group 8 FTA: 0.6589 +[2025-07-06 18:00:14] [Rank 0] Group 9 FTA: 0.6680 +[2025-07-06 18:00:14] [Rank 0] Group 9 FTA: 0.6680 +[2025-07-06 18:00:14] [Rank 0] Group 10 FTA: 0.7012 +[2025-07-06 18:00:14] [Rank 0] Group 10 FTA: 0.7012 +[2025-07-06 18:00:14] [Rank 0] Group 11 FTA: 0.6934 +[2025-07-06 18:00:14] [Rank 0] Group 11 FTA: 0.6934 +[2025-07-06 18:00:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 18:00:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 18:00:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 18:00:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 18:00:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 18:00:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 18:00:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 18:00:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 18:00:16] [Rank 0] step:1501/10000 train_time:119750ms step_avg:79.78ms +[2025-07-06 18:00:16] [Rank 0] step:1501/10000 train_time:119750ms step_avg:79.78ms +[2025-07-06 18:00:17] [Rank 0] step:1521/10000 train_time:121250ms step_avg:79.72ms +[2025-07-06 18:00:17] [Rank 0] step:1521/10000 train_time:121250ms step_avg:79.72ms +[2025-07-06 18:00:20] [Rank 0] step:1541/10000 train_time:123456ms step_avg:80.11ms +[2025-07-06 18:00:20] [Rank 0] step:1541/10000 train_time:123456ms step_avg:80.11ms +[2025-07-06 18:00:21] [Rank 0] step:1561/10000 train_time:124922ms step_avg:80.03ms +[2025-07-06 18:00:21] [Rank 0] step:1561/10000 train_time:124922ms step_avg:80.03ms +[2025-07-06 18:00:23] [Rank 0] step:1581/10000 train_time:126390ms step_avg:79.94ms +[2025-07-06 18:00:23] [Rank 0] step:1581/10000 train_time:126390ms step_avg:79.94ms +[2025-07-06 18:00:24] [Rank 0] step:1601/10000 train_time:127855ms step_avg:79.86ms +[2025-07-06 18:00:24] [Rank 0] step:1601/10000 train_time:127855ms step_avg:79.86ms +[2025-07-06 18:00:26] [Rank 0] step:1621/10000 train_time:129323ms step_avg:79.78ms +[2025-07-06 18:00:26] [Rank 0] step:1621/10000 train_time:129323ms step_avg:79.78ms +[2025-07-06 18:00:28] [Rank 0] step:1641/10000 train_time:131429ms step_avg:80.09ms +[2025-07-06 18:00:28] [Rank 0] step:1641/10000 train_time:131429ms step_avg:80.09ms +[2025-07-06 18:00:29] [Rank 0] step:1661/10000 train_time:132898ms step_avg:80.01ms +[2025-07-06 18:00:29] [Rank 0] step:1661/10000 train_time:132898ms step_avg:80.01ms +[2025-07-06 18:00:30] [Rank 0] step:1681/10000 train_time:134367ms step_avg:79.93ms +[2025-07-06 18:00:30] [Rank 0] step:1681/10000 train_time:134367ms step_avg:79.93ms +[2025-07-06 18:00:32] [Rank 0] step:1701/10000 train_time:135837ms step_avg:79.86ms +[2025-07-06 18:00:32] [Rank 0] step:1701/10000 train_time:135837ms step_avg:79.86ms +[2025-07-06 18:00:34] [Rank 0] step:1721/10000 train_time:137961ms step_avg:80.16ms +[2025-07-06 18:00:34] [Rank 0] step:1721/10000 train_time:137961ms step_avg:80.16ms +[2025-07-06 18:00:36] [Rank 0] step:1741/10000 train_time:139432ms step_avg:80.09ms +[2025-07-06 18:00:36] [Rank 0] step:1741/10000 train_time:139432ms step_avg:80.09ms +[2025-07-06 18:00:37] [Rank 0] step:1761/10000 train_time:140903ms step_avg:80.01ms +[2025-07-06 18:00:37] [Rank 0] step:1761/10000 train_time:140903ms step_avg:80.01ms +[2025-07-06 18:00:39] [Rank 0] step:1781/10000 train_time:142373ms step_avg:79.94ms +[2025-07-06 18:00:39] [Rank 0] step:1781/10000 train_time:142373ms step_avg:79.94ms +[2025-07-06 18:00:41] [Rank 0] step:1801/10000 train_time:144532ms step_avg:80.25ms +[2025-07-06 18:00:41] [Rank 0] step:1801/10000 train_time:144532ms step_avg:80.25ms +[2025-07-06 18:00:42] [Rank 0] step:1821/10000 train_time:145966ms step_avg:80.16ms +[2025-07-06 18:00:42] [Rank 0] step:1821/10000 train_time:145966ms step_avg:80.16ms +[2025-07-06 18:00:44] [Rank 0] step:1841/10000 train_time:147438ms step_avg:80.09ms +[2025-07-06 18:00:44] [Rank 0] step:1841/10000 train_time:147438ms step_avg:80.09ms +[2025-07-06 18:00:45] [Rank 0] step:1861/10000 train_time:148911ms step_avg:80.02ms +[2025-07-06 18:00:45] [Rank 0] step:1861/10000 train_time:148911ms step_avg:80.02ms +[2025-07-06 18:00:47] [Rank 0] step:1881/10000 train_time:150385ms step_avg:79.95ms +[2025-07-06 18:00:47] [Rank 0] step:1881/10000 train_time:150385ms step_avg:79.95ms +[2025-07-06 18:00:49] [Rank 0] step:1901/10000 train_time:152520ms step_avg:80.23ms +[2025-07-06 18:00:49] [Rank 0] step:1901/10000 train_time:152520ms step_avg:80.23ms +[2025-07-06 18:00:50] [Rank 0] step:1921/10000 train_time:153993ms step_avg:80.16ms +[2025-07-06 18:00:50] [Rank 0] step:1921/10000 train_time:153993ms step_avg:80.16ms +[2025-07-06 18:00:52] [Rank 0] step:1941/10000 train_time:155467ms step_avg:80.10ms +[2025-07-06 18:00:52] [Rank 0] step:1941/10000 train_time:155467ms step_avg:80.10ms +[2025-07-06 18:00:53] [Rank 0] step:1961/10000 train_time:156945ms step_avg:80.03ms +[2025-07-06 18:00:53] [Rank 0] step:1961/10000 train_time:156945ms step_avg:80.03ms +[2025-07-06 18:00:55] [Rank 0] step:1981/10000 train_time:159124ms step_avg:80.32ms +[2025-07-06 18:00:55] [Rank 0] step:1981/10000 train_time:159124ms step_avg:80.32ms +[2025-07-06 18:00:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:00:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:00:58] [Rank 0] PRINT: step:2000/10000 train_loss:0.9121 val_loss:0.9570 train_time:160560ms step_avg:80.28ms +[2025-07-06 18:00:58] [Rank 0] PRINT: step:2000/10000 train_loss:0.9121 val_loss:0.9570 train_time:160560ms step_avg:80.28ms +[2025-07-06 18:00:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:00:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:00:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:00:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:00:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:00:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:06:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:06:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:06:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:06:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:06:27] [Rank 0] Total Loss: 5.2874 +[2025-07-06 18:06:27] [Rank 0] Total Loss: 5.2874 +[2025-07-06 18:06:27] [Rank 0] Total FTA: 0.8276 +[2025-07-06 18:06:27] [Rank 0] Total FTA: 0.8276 +[2025-07-06 18:06:27] [Rank 0] Group 0 Loss: 5.5512 +[2025-07-06 18:06:27] [Rank 0] Group 0 Loss: 5.5512 +[2025-07-06 18:06:27] [Rank 0] Group 1 Loss: 5.2201 +[2025-07-06 18:06:27] [Rank 0] Group 1 Loss: 5.2201 +[2025-07-06 18:06:27] [Rank 0] Group 2 Loss: 5.0447 +[2025-07-06 18:06:27] [Rank 0] Group 2 Loss: 5.0447 +[2025-07-06 18:06:27] [Rank 0] Group 3 Loss: 5.4767 +[2025-07-06 18:06:27] [Rank 0] Group 3 Loss: 5.4767 +[2025-07-06 18:06:27] [Rank 0] Group 4 Loss: 5.2022 +[2025-07-06 18:06:27] [Rank 0] Group 4 Loss: 5.2022 +[2025-07-06 18:06:27] [Rank 0] Group 5 Loss: 5.2400 +[2025-07-06 18:06:27] [Rank 0] Group 5 Loss: 5.2400 +[2025-07-06 18:06:27] [Rank 0] Group 6 Loss: 5.2004 +[2025-07-06 18:06:27] [Rank 0] Group 6 Loss: 5.2004 +[2025-07-06 18:06:27] [Rank 0] Group 7 Loss: 5.2761 +[2025-07-06 18:06:27] [Rank 0] Group 7 Loss: 5.2761 +[2025-07-06 18:06:27] [Rank 0] Group 8 Loss: 5.2282 +[2025-07-06 18:06:27] [Rank 0] Group 8 Loss: 5.2282 +[2025-07-06 18:06:27] [Rank 0] Group 9 Loss: 5.3215 +[2025-07-06 18:06:27] [Rank 0] Group 9 Loss: 5.3215 +[2025-07-06 18:06:27] [Rank 0] Group 10 Loss: 5.2785 +[2025-07-06 18:06:27] [Rank 0] Group 10 Loss: 5.2785 +[2025-07-06 18:06:27] [Rank 0] Group 11 Loss: 5.2391 +[2025-07-06 18:06:27] [Rank 0] Group 11 Loss: 5.2391 +[2025-07-06 18:06:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:06:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:06:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:06:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:06:27] [Rank 0] Group 2 FTA: 0.7161 +[2025-07-06 18:06:27] [Rank 0] Group 2 FTA: 0.7161 +[2025-07-06 18:06:27] [Rank 0] Group 3 FTA: 0.6042 +[2025-07-06 18:06:27] [Rank 0] Group 3 FTA: 0.6042 +[2025-07-06 18:06:27] [Rank 0] Group 4 FTA: 0.8490 +[2025-07-06 18:06:27] [Rank 0] Group 4 FTA: 0.8490 +[2025-07-06 18:06:27] [Rank 0] Group 5 FTA: 0.8594 +[2025-07-06 18:06:27] [Rank 0] Group 5 FTA: 0.8594 +[2025-07-06 18:06:27] [Rank 0] Group 6 FTA: 0.8568 +[2025-07-06 18:06:27] [Rank 0] Group 6 FTA: 0.8568 +[2025-07-06 18:06:27] [Rank 0] Group 7 FTA: 0.7682 +[2025-07-06 18:06:27] [Rank 0] Group 7 FTA: 0.7682 +[2025-07-06 18:06:27] [Rank 0] Group 8 FTA: 0.8203 +[2025-07-06 18:06:27] [Rank 0] Group 8 FTA: 0.8203 +[2025-07-06 18:06:27] [Rank 0] Group 9 FTA: 0.7578 +[2025-07-06 18:06:27] [Rank 0] Group 9 FTA: 0.7578 +[2025-07-06 18:06:27] [Rank 0] Group 10 FTA: 0.7852 +[2025-07-06 18:06:27] [Rank 0] Group 10 FTA: 0.7852 +[2025-07-06 18:06:27] [Rank 0] Group 11 FTA: 0.7920 +[2025-07-06 18:06:27] [Rank 0] Group 11 FTA: 0.7920 +[2025-07-06 18:06:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 18:06:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 18:06:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 18:06:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 18:06:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 18:06:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 18:06:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 18:06:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 18:06:30] [Rank 0] step:2001/10000 train_time:160582ms step_avg:80.25ms +[2025-07-06 18:06:30] [Rank 0] step:2001/10000 train_time:160582ms step_avg:80.25ms +[2025-07-06 18:06:31] [Rank 0] step:2021/10000 train_time:162069ms step_avg:80.19ms +[2025-07-06 18:06:31] [Rank 0] step:2021/10000 train_time:162069ms step_avg:80.19ms +[2025-07-06 18:06:33] [Rank 0] step:2041/10000 train_time:163542ms step_avg:80.13ms +[2025-07-06 18:06:33] [Rank 0] step:2041/10000 train_time:163542ms step_avg:80.13ms +[2025-07-06 18:06:34] [Rank 0] step:2061/10000 train_time:164999ms step_avg:80.06ms +[2025-07-06 18:06:34] [Rank 0] step:2061/10000 train_time:164999ms step_avg:80.06ms +[2025-07-06 18:06:36] [Rank 0] step:2081/10000 train_time:167135ms step_avg:80.31ms +[2025-07-06 18:06:36] [Rank 0] step:2081/10000 train_time:167135ms step_avg:80.31ms +[2025-07-06 18:06:38] [Rank 0] step:2101/10000 train_time:168691ms step_avg:80.29ms +[2025-07-06 18:06:38] [Rank 0] step:2101/10000 train_time:168691ms step_avg:80.29ms +[2025-07-06 18:06:40] [Rank 0] step:2121/10000 train_time:170220ms step_avg:80.25ms +[2025-07-06 18:06:40] [Rank 0] step:2121/10000 train_time:170220ms step_avg:80.25ms +[2025-07-06 18:06:41] [Rank 0] step:2141/10000 train_time:171685ms step_avg:80.19ms +[2025-07-06 18:06:41] [Rank 0] step:2141/10000 train_time:171685ms step_avg:80.19ms +[2025-07-06 18:06:43] [Rank 0] step:2161/10000 train_time:173816ms step_avg:80.43ms +[2025-07-06 18:06:43] [Rank 0] step:2161/10000 train_time:173816ms step_avg:80.43ms +[2025-07-06 18:06:45] [Rank 0] step:2181/10000 train_time:175263ms step_avg:80.36ms +[2025-07-06 18:06:45] [Rank 0] step:2181/10000 train_time:175263ms step_avg:80.36ms +[2025-07-06 18:06:46] [Rank 0] step:2201/10000 train_time:176734ms step_avg:80.30ms +[2025-07-06 18:06:46] [Rank 0] step:2201/10000 train_time:176734ms step_avg:80.30ms +[2025-07-06 18:06:47] [Rank 0] step:2221/10000 train_time:178201ms step_avg:80.23ms +[2025-07-06 18:06:47] [Rank 0] step:2221/10000 train_time:178201ms step_avg:80.23ms +[2025-07-06 18:06:49] [Rank 0] step:2241/10000 train_time:179694ms step_avg:80.18ms +[2025-07-06 18:06:49] [Rank 0] step:2241/10000 train_time:179694ms step_avg:80.18ms +[2025-07-06 18:06:51] [Rank 0] step:2261/10000 train_time:181532ms step_avg:80.29ms +[2025-07-06 18:06:51] [Rank 0] step:2261/10000 train_time:181532ms step_avg:80.29ms +[2025-07-06 18:06:52] [Rank 0] step:2281/10000 train_time:183028ms step_avg:80.24ms +[2025-07-06 18:06:52] [Rank 0] step:2281/10000 train_time:183028ms step_avg:80.24ms +[2025-07-06 18:06:54] [Rank 0] step:2301/10000 train_time:184523ms step_avg:80.19ms +[2025-07-06 18:06:54] [Rank 0] step:2301/10000 train_time:184523ms step_avg:80.19ms +[2025-07-06 18:06:55] [Rank 0] step:2321/10000 train_time:186021ms step_avg:80.15ms +[2025-07-06 18:06:55] [Rank 0] step:2321/10000 train_time:186021ms step_avg:80.15ms +[2025-07-06 18:06:57] [Rank 0] step:2341/10000 train_time:187586ms step_avg:80.13ms +[2025-07-06 18:06:57] [Rank 0] step:2341/10000 train_time:187586ms step_avg:80.13ms +[2025-07-06 18:06:59] [Rank 0] step:2361/10000 train_time:189253ms step_avg:80.16ms +[2025-07-06 18:06:59] [Rank 0] step:2361/10000 train_time:189253ms step_avg:80.16ms +[2025-07-06 18:07:00] [Rank 0] step:2381/10000 train_time:190751ms step_avg:80.11ms +[2025-07-06 18:07:00] [Rank 0] step:2381/10000 train_time:190751ms step_avg:80.11ms +[2025-07-06 18:07:02] [Rank 0] step:2401/10000 train_time:192252ms step_avg:80.07ms +[2025-07-06 18:07:02] [Rank 0] step:2401/10000 train_time:192252ms step_avg:80.07ms +[2025-07-06 18:07:03] [Rank 0] step:2421/10000 train_time:193750ms step_avg:80.03ms +[2025-07-06 18:07:03] [Rank 0] step:2421/10000 train_time:193750ms step_avg:80.03ms +[2025-07-06 18:07:05] [Rank 0] step:2441/10000 train_time:195927ms step_avg:80.26ms +[2025-07-06 18:07:05] [Rank 0] step:2441/10000 train_time:195927ms step_avg:80.26ms +[2025-07-06 18:07:07] [Rank 0] step:2461/10000 train_time:197416ms step_avg:80.22ms +[2025-07-06 18:07:07] [Rank 0] step:2461/10000 train_time:197416ms step_avg:80.22ms +[2025-07-06 18:07:08] [Rank 0] step:2481/10000 train_time:198915ms step_avg:80.18ms +[2025-07-06 18:07:08] [Rank 0] step:2481/10000 train_time:198915ms step_avg:80.18ms +[2025-07-06 18:07:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:07:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:07:11] [Rank 0] PRINT: step:2500/10000 train_loss:0.8919 val_loss:0.8791 train_time:200415ms step_avg:80.17ms +[2025-07-06 18:07:11] [Rank 0] PRINT: step:2500/10000 train_loss:0.8919 val_loss:0.8791 train_time:200415ms step_avg:80.17ms +[2025-07-06 18:07:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:07:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:07:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:07:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:07:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:07:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:12:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:12:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:12:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:12:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:12:43] [Rank 0] Total Loss: 5.2894 +[2025-07-06 18:12:43] [Rank 0] Total Loss: 5.2894 +[2025-07-06 18:12:43] [Rank 0] Total FTA: 0.9119 +[2025-07-06 18:12:43] [Rank 0] Total FTA: 0.9119 +[2025-07-06 18:12:43] [Rank 0] Group 0 Loss: 5.6017 +[2025-07-06 18:12:43] [Rank 0] Group 0 Loss: 5.6017 +[2025-07-06 18:12:43] [Rank 0] Group 1 Loss: 5.2559 +[2025-07-06 18:12:43] [Rank 0] Group 1 Loss: 5.2559 +[2025-07-06 18:12:43] [Rank 0] Group 2 Loss: 5.0651 +[2025-07-06 18:12:43] [Rank 0] Group 2 Loss: 5.0651 +[2025-07-06 18:12:43] [Rank 0] Group 3 Loss: 5.2321 +[2025-07-06 18:12:43] [Rank 0] Group 3 Loss: 5.2321 +[2025-07-06 18:12:43] [Rank 0] Group 4 Loss: 5.2131 +[2025-07-06 18:12:43] [Rank 0] Group 4 Loss: 5.2131 +[2025-07-06 18:12:43] [Rank 0] Group 5 Loss: 5.2134 +[2025-07-06 18:12:43] [Rank 0] Group 5 Loss: 5.2134 +[2025-07-06 18:12:43] [Rank 0] Group 6 Loss: 5.1422 +[2025-07-06 18:12:43] [Rank 0] Group 6 Loss: 5.1422 +[2025-07-06 18:12:43] [Rank 0] Group 7 Loss: 5.2673 +[2025-07-06 18:12:43] [Rank 0] Group 7 Loss: 5.2673 +[2025-07-06 18:12:43] [Rank 0] Group 8 Loss: 5.2524 +[2025-07-06 18:12:43] [Rank 0] Group 8 Loss: 5.2524 +[2025-07-06 18:12:43] [Rank 0] Group 9 Loss: 5.2503 +[2025-07-06 18:12:43] [Rank 0] Group 9 Loss: 5.2503 +[2025-07-06 18:12:43] [Rank 0] Group 10 Loss: 5.3324 +[2025-07-06 18:12:43] [Rank 0] Group 10 Loss: 5.3324 +[2025-07-06 18:12:43] [Rank 0] Group 11 Loss: 5.2960 +[2025-07-06 18:12:43] [Rank 0] Group 11 Loss: 5.2960 +[2025-07-06 18:12:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:12:43] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:12:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:12:43] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:12:43] [Rank 0] Group 2 FTA: 0.8333 +[2025-07-06 18:12:43] [Rank 0] Group 2 FTA: 0.8333 +[2025-07-06 18:12:43] [Rank 0] Group 3 FTA: 0.8750 +[2025-07-06 18:12:43] [Rank 0] Group 3 FTA: 0.8750 +[2025-07-06 18:12:43] [Rank 0] Group 4 FTA: 0.8646 +[2025-07-06 18:12:43] [Rank 0] Group 4 FTA: 0.8646 +[2025-07-06 18:12:43] [Rank 0] Group 5 FTA: 0.8750 +[2025-07-06 18:12:43] [Rank 0] Group 5 FTA: 0.8750 +[2025-07-06 18:12:43] [Rank 0] Group 6 FTA: 0.9245 +[2025-07-06 18:12:43] [Rank 0] Group 6 FTA: 0.9245 +[2025-07-06 18:12:43] [Rank 0] Group 7 FTA: 0.8906 +[2025-07-06 18:12:43] [Rank 0] Group 7 FTA: 0.8906 +[2025-07-06 18:12:43] [Rank 0] Group 8 FTA: 0.8906 +[2025-07-06 18:12:43] [Rank 0] Group 8 FTA: 0.8906 +[2025-07-06 18:12:43] [Rank 0] Group 9 FTA: 0.9023 +[2025-07-06 18:12:43] [Rank 0] Group 9 FTA: 0.9023 +[2025-07-06 18:12:43] [Rank 0] Group 10 FTA: 0.8945 +[2025-07-06 18:12:43] [Rank 0] Group 10 FTA: 0.8945 +[2025-07-06 18:12:43] [Rank 0] Group 11 FTA: 0.9102 +[2025-07-06 18:12:43] [Rank 0] Group 11 FTA: 0.9102 +[2025-07-06 18:12:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 18:12:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 18:12:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 18:12:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 18:12:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 18:12:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 18:12:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 18:12:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 18:12:44] [Rank 0] step:2501/10000 train_time:200440ms step_avg:80.14ms +[2025-07-06 18:12:44] [Rank 0] step:2501/10000 train_time:200440ms step_avg:80.14ms +[2025-07-06 18:12:47] [Rank 0] step:2521/10000 train_time:202004ms step_avg:80.13ms +[2025-07-06 18:12:47] [Rank 0] step:2521/10000 train_time:202004ms step_avg:80.13ms +[2025-07-06 18:12:48] [Rank 0] step:2541/10000 train_time:204091ms step_avg:80.32ms +[2025-07-06 18:12:48] [Rank 0] step:2541/10000 train_time:204091ms step_avg:80.32ms +[2025-07-06 18:12:50] [Rank 0] step:2561/10000 train_time:205579ms step_avg:80.27ms +[2025-07-06 18:12:50] [Rank 0] step:2561/10000 train_time:205579ms step_avg:80.27ms +[2025-07-06 18:12:51] [Rank 0] step:2581/10000 train_time:207070ms step_avg:80.23ms +[2025-07-06 18:12:51] [Rank 0] step:2581/10000 train_time:207070ms step_avg:80.23ms +[2025-07-06 18:12:53] [Rank 0] step:2601/10000 train_time:208561ms step_avg:80.18ms +[2025-07-06 18:12:53] [Rank 0] step:2601/10000 train_time:208561ms step_avg:80.18ms +[2025-07-06 18:12:55] [Rank 0] step:2621/10000 train_time:210703ms step_avg:80.39ms +[2025-07-06 18:12:55] [Rank 0] step:2621/10000 train_time:210703ms step_avg:80.39ms +[2025-07-06 18:12:56] [Rank 0] step:2641/10000 train_time:212195ms step_avg:80.35ms +[2025-07-06 18:12:56] [Rank 0] step:2641/10000 train_time:212195ms step_avg:80.35ms +[2025-07-06 18:12:58] [Rank 0] step:2661/10000 train_time:213745ms step_avg:80.32ms +[2025-07-06 18:12:58] [Rank 0] step:2661/10000 train_time:213745ms step_avg:80.32ms +[2025-07-06 18:12:59] [Rank 0] step:2681/10000 train_time:215238ms step_avg:80.28ms +[2025-07-06 18:12:59] [Rank 0] step:2681/10000 train_time:215238ms step_avg:80.28ms +[2025-07-06 18:13:01] [Rank 0] step:2701/10000 train_time:216805ms step_avg:80.27ms +[2025-07-06 18:13:01] [Rank 0] step:2701/10000 train_time:216805ms step_avg:80.27ms +[2025-07-06 18:13:03] [Rank 0] step:2721/10000 train_time:218879ms step_avg:80.44ms +[2025-07-06 18:13:03] [Rank 0] step:2721/10000 train_time:218879ms step_avg:80.44ms +[2025-07-06 18:13:04] [Rank 0] step:2741/10000 train_time:220373ms step_avg:80.40ms +[2025-07-06 18:13:04] [Rank 0] step:2741/10000 train_time:220373ms step_avg:80.40ms +[2025-07-06 18:13:06] [Rank 0] step:2761/10000 train_time:221869ms step_avg:80.36ms +[2025-07-06 18:13:06] [Rank 0] step:2761/10000 train_time:221869ms step_avg:80.36ms +[2025-07-06 18:13:07] [Rank 0] step:2781/10000 train_time:223366ms step_avg:80.32ms +[2025-07-06 18:13:07] [Rank 0] step:2781/10000 train_time:223366ms step_avg:80.32ms +[2025-07-06 18:13:09] [Rank 0] step:2801/10000 train_time:225521ms step_avg:80.51ms +[2025-07-06 18:13:09] [Rank 0] step:2801/10000 train_time:225521ms step_avg:80.51ms +[2025-07-06 18:13:11] [Rank 0] step:2821/10000 train_time:227018ms step_avg:80.47ms +[2025-07-06 18:13:11] [Rank 0] step:2821/10000 train_time:227018ms step_avg:80.47ms +[2025-07-06 18:13:12] [Rank 0] step:2841/10000 train_time:228518ms step_avg:80.44ms +[2025-07-06 18:13:12] [Rank 0] step:2841/10000 train_time:228518ms step_avg:80.44ms +[2025-07-06 18:13:14] [Rank 0] step:2861/10000 train_time:230013ms step_avg:80.40ms +[2025-07-06 18:13:14] [Rank 0] step:2861/10000 train_time:230013ms step_avg:80.40ms +[2025-07-06 18:13:16] [Rank 0] step:2881/10000 train_time:232210ms step_avg:80.60ms +[2025-07-06 18:13:16] [Rank 0] step:2881/10000 train_time:232210ms step_avg:80.60ms +[2025-07-06 18:13:18] [Rank 0] step:2901/10000 train_time:233667ms step_avg:80.55ms +[2025-07-06 18:13:18] [Rank 0] step:2901/10000 train_time:233667ms step_avg:80.55ms +[2025-07-06 18:13:19] [Rank 0] step:2921/10000 train_time:235164ms step_avg:80.51ms +[2025-07-06 18:13:19] [Rank 0] step:2921/10000 train_time:235164ms step_avg:80.51ms +[2025-07-06 18:13:21] [Rank 0] step:2941/10000 train_time:236660ms step_avg:80.47ms +[2025-07-06 18:13:21] [Rank 0] step:2941/10000 train_time:236660ms step_avg:80.47ms +[2025-07-06 18:13:22] [Rank 0] step:2961/10000 train_time:238158ms step_avg:80.43ms +[2025-07-06 18:13:22] [Rank 0] step:2961/10000 train_time:238158ms step_avg:80.43ms +[2025-07-06 18:13:24] [Rank 0] step:2981/10000 train_time:240306ms step_avg:80.61ms +[2025-07-06 18:13:24] [Rank 0] step:2981/10000 train_time:240306ms step_avg:80.61ms +[2025-07-06 18:13:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:13:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:13:27] [Rank 0] PRINT: step:3000/10000 train_loss:0.8800 val_loss:0.8715 train_time:241802ms step_avg:80.60ms +[2025-07-06 18:13:27] [Rank 0] PRINT: step:3000/10000 train_loss:0.8800 val_loss:0.8715 train_time:241802ms step_avg:80.60ms +[2025-07-06 18:13:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:13:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:13:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:13:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:13:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:13:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:18:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:18:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:18:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:18:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:18:58] [Rank 0] Total Loss: 5.3331 +[2025-07-06 18:18:58] [Rank 0] Total Loss: 5.3331 +[2025-07-06 18:18:58] [Rank 0] Total FTA: 0.9132 +[2025-07-06 18:18:58] [Rank 0] Total FTA: 0.9132 +[2025-07-06 18:18:58] [Rank 0] Group 0 Loss: 5.6480 +[2025-07-06 18:18:58] [Rank 0] Group 0 Loss: 5.6480 +[2025-07-06 18:18:58] [Rank 0] Group 1 Loss: 5.3956 +[2025-07-06 18:18:58] [Rank 0] Group 1 Loss: 5.3956 +[2025-07-06 18:18:58] [Rank 0] Group 2 Loss: 5.1160 +[2025-07-06 18:18:58] [Rank 0] Group 2 Loss: 5.1160 +[2025-07-06 18:18:58] [Rank 0] Group 3 Loss: 5.4156 +[2025-07-06 18:18:58] [Rank 0] Group 3 Loss: 5.4156 +[2025-07-06 18:18:58] [Rank 0] Group 4 Loss: 5.2273 +[2025-07-06 18:18:58] [Rank 0] Group 4 Loss: 5.2273 +[2025-07-06 18:18:58] [Rank 0] Group 5 Loss: 5.2542 +[2025-07-06 18:18:58] [Rank 0] Group 5 Loss: 5.2542 +[2025-07-06 18:18:58] [Rank 0] Group 6 Loss: 5.1978 +[2025-07-06 18:18:58] [Rank 0] Group 6 Loss: 5.1978 +[2025-07-06 18:18:58] [Rank 0] Group 7 Loss: 5.3439 +[2025-07-06 18:18:58] [Rank 0] Group 7 Loss: 5.3439 +[2025-07-06 18:18:58] [Rank 0] Group 8 Loss: 5.3058 +[2025-07-06 18:18:58] [Rank 0] Group 8 Loss: 5.3058 +[2025-07-06 18:18:58] [Rank 0] Group 9 Loss: 5.2740 +[2025-07-06 18:18:58] [Rank 0] Group 9 Loss: 5.2740 +[2025-07-06 18:18:58] [Rank 0] Group 10 Loss: 5.3142 +[2025-07-06 18:18:58] [Rank 0] Group 10 Loss: 5.3142 +[2025-07-06 18:18:58] [Rank 0] Group 11 Loss: 5.2743 +[2025-07-06 18:18:58] [Rank 0] Group 11 Loss: 5.2743 +[2025-07-06 18:18:58] [Rank 0] Group 0 FTA: 0.8205 +[2025-07-06 18:18:58] [Rank 0] Group 0 FTA: 0.8205 +[2025-07-06 18:18:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:18:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:18:58] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 18:18:58] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 18:18:58] [Rank 0] Group 3 FTA: 0.8854 +[2025-07-06 18:18:58] [Rank 0] Group 3 FTA: 0.8854 +[2025-07-06 18:18:58] [Rank 0] Group 4 FTA: 0.9219 +[2025-07-06 18:18:58] [Rank 0] Group 4 FTA: 0.9219 +[2025-07-06 18:18:58] [Rank 0] Group 5 FTA: 0.9323 +[2025-07-06 18:18:58] [Rank 0] Group 5 FTA: 0.9323 +[2025-07-06 18:18:58] [Rank 0] Group 6 FTA: 0.8932 +[2025-07-06 18:18:58] [Rank 0] Group 6 FTA: 0.8932 +[2025-07-06 18:18:58] [Rank 0] Group 7 FTA: 0.8880 +[2025-07-06 18:18:58] [Rank 0] Group 7 FTA: 0.8880 +[2025-07-06 18:18:58] [Rank 0] Group 8 FTA: 0.9141 +[2025-07-06 18:18:58] [Rank 0] Group 8 FTA: 0.9141 +[2025-07-06 18:18:58] [Rank 0] Group 9 FTA: 0.9219 +[2025-07-06 18:18:58] [Rank 0] Group 9 FTA: 0.9219 +[2025-07-06 18:18:58] [Rank 0] Group 10 FTA: 0.9199 +[2025-07-06 18:18:58] [Rank 0] Group 10 FTA: 0.9199 +[2025-07-06 18:18:58] [Rank 0] Group 11 FTA: 0.9287 +[2025-07-06 18:18:58] [Rank 0] Group 11 FTA: 0.9287 +[2025-07-06 18:18:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 18:18:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 18:18:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 18:18:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 18:18:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 18:18:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 18:18:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 18:18:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 18:19:00] [Rank 0] step:3001/10000 train_time:241825ms step_avg:80.58ms +[2025-07-06 18:19:00] [Rank 0] step:3001/10000 train_time:241825ms step_avg:80.58ms +[2025-07-06 18:19:01] [Rank 0] step:3021/10000 train_time:243323ms step_avg:80.54ms +[2025-07-06 18:19:01] [Rank 0] step:3021/10000 train_time:243323ms step_avg:80.54ms +[2025-07-06 18:19:03] [Rank 0] step:3041/10000 train_time:244810ms step_avg:80.50ms +[2025-07-06 18:19:03] [Rank 0] step:3041/10000 train_time:244810ms step_avg:80.50ms +[2025-07-06 18:19:04] [Rank 0] step:3061/10000 train_time:246581ms step_avg:80.56ms +[2025-07-06 18:19:04] [Rank 0] step:3061/10000 train_time:246581ms step_avg:80.56ms +[2025-07-06 18:19:06] [Rank 0] step:3081/10000 train_time:248037ms step_avg:80.51ms +[2025-07-06 18:19:06] [Rank 0] step:3081/10000 train_time:248037ms step_avg:80.51ms +[2025-07-06 18:19:07] [Rank 0] step:3101/10000 train_time:249529ms step_avg:80.47ms +[2025-07-06 18:19:07] [Rank 0] step:3101/10000 train_time:249529ms step_avg:80.47ms +[2025-07-06 18:19:09] [Rank 0] step:3121/10000 train_time:251021ms step_avg:80.43ms +[2025-07-06 18:19:09] [Rank 0] step:3121/10000 train_time:251021ms step_avg:80.43ms +[2025-07-06 18:19:10] [Rank 0] step:3141/10000 train_time:252514ms step_avg:80.39ms +[2025-07-06 18:19:10] [Rank 0] step:3141/10000 train_time:252514ms step_avg:80.39ms +[2025-07-06 18:19:12] [Rank 0] step:3161/10000 train_time:254648ms step_avg:80.56ms +[2025-07-06 18:19:12] [Rank 0] step:3161/10000 train_time:254648ms step_avg:80.56ms +[2025-07-06 18:19:14] [Rank 0] step:3181/10000 train_time:256140ms step_avg:80.52ms +[2025-07-06 18:19:14] [Rank 0] step:3181/10000 train_time:256140ms step_avg:80.52ms +[2025-07-06 18:19:15] [Rank 0] step:3201/10000 train_time:257634ms step_avg:80.49ms +[2025-07-06 18:19:15] [Rank 0] step:3201/10000 train_time:257634ms step_avg:80.49ms +[2025-07-06 18:19:17] [Rank 0] step:3221/10000 train_time:259125ms step_avg:80.45ms +[2025-07-06 18:19:17] [Rank 0] step:3221/10000 train_time:259125ms step_avg:80.45ms +[2025-07-06 18:19:19] [Rank 0] step:3241/10000 train_time:260711ms step_avg:80.44ms +[2025-07-06 18:19:19] [Rank 0] step:3241/10000 train_time:260711ms step_avg:80.44ms +[2025-07-06 18:19:20] [Rank 0] step:3261/10000 train_time:262440ms step_avg:80.48ms +[2025-07-06 18:19:20] [Rank 0] step:3261/10000 train_time:262440ms step_avg:80.48ms +[2025-07-06 18:19:22] [Rank 0] step:3281/10000 train_time:263935ms step_avg:80.44ms +[2025-07-06 18:19:22] [Rank 0] step:3281/10000 train_time:263935ms step_avg:80.44ms +[2025-07-06 18:19:23] [Rank 0] step:3301/10000 train_time:265430ms step_avg:80.41ms +[2025-07-06 18:19:23] [Rank 0] step:3301/10000 train_time:265430ms step_avg:80.41ms +[2025-07-06 18:19:25] [Rank 0] step:3321/10000 train_time:266924ms step_avg:80.37ms +[2025-07-06 18:19:25] [Rank 0] step:3321/10000 train_time:266924ms step_avg:80.37ms +[2025-07-06 18:19:27] [Rank 0] step:3341/10000 train_time:269081ms step_avg:80.54ms +[2025-07-06 18:19:27] [Rank 0] step:3341/10000 train_time:269081ms step_avg:80.54ms +[2025-07-06 18:19:28] [Rank 0] step:3361/10000 train_time:270576ms step_avg:80.50ms +[2025-07-06 18:19:28] [Rank 0] step:3361/10000 train_time:270576ms step_avg:80.50ms +[2025-07-06 18:19:30] [Rank 0] step:3381/10000 train_time:272071ms step_avg:80.47ms +[2025-07-06 18:19:30] [Rank 0] step:3381/10000 train_time:272071ms step_avg:80.47ms +[2025-07-06 18:19:31] [Rank 0] step:3401/10000 train_time:273565ms step_avg:80.44ms +[2025-07-06 18:19:31] [Rank 0] step:3401/10000 train_time:273565ms step_avg:80.44ms +[2025-07-06 18:19:33] [Rank 0] step:3421/10000 train_time:275128ms step_avg:80.42ms +[2025-07-06 18:19:33] [Rank 0] step:3421/10000 train_time:275128ms step_avg:80.42ms +[2025-07-06 18:19:35] [Rank 0] step:3441/10000 train_time:276788ms step_avg:80.44ms +[2025-07-06 18:19:35] [Rank 0] step:3441/10000 train_time:276788ms step_avg:80.44ms +[2025-07-06 18:19:36] [Rank 0] step:3461/10000 train_time:278281ms step_avg:80.40ms +[2025-07-06 18:19:36] [Rank 0] step:3461/10000 train_time:278281ms step_avg:80.40ms +[2025-07-06 18:19:38] [Rank 0] step:3481/10000 train_time:279779ms step_avg:80.37ms +[2025-07-06 18:19:38] [Rank 0] step:3481/10000 train_time:279779ms step_avg:80.37ms +[2025-07-06 18:19:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:19:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:19:40] [Rank 0] PRINT: step:3500/10000 train_loss:0.8736 val_loss:0.8683 train_time:281276ms step_avg:80.36ms +[2025-07-06 18:19:40] [Rank 0] PRINT: step:3500/10000 train_loss:0.8736 val_loss:0.8683 train_time:281276ms step_avg:80.36ms +[2025-07-06 18:19:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:19:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:19:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:19:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:19:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:19:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:25:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:25:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:25:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:25:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:25:12] [Rank 0] Total Loss: 5.3587 +[2025-07-06 18:25:12] [Rank 0] Total Loss: 5.3587 +[2025-07-06 18:25:12] [Rank 0] Total FTA: 0.9569 +[2025-07-06 18:25:12] [Rank 0] Total FTA: 0.9569 +[2025-07-06 18:25:12] [Rank 0] Group 0 Loss: 5.6476 +[2025-07-06 18:25:12] [Rank 0] Group 0 Loss: 5.6476 +[2025-07-06 18:25:12] [Rank 0] Group 1 Loss: 5.3896 +[2025-07-06 18:25:12] [Rank 0] Group 1 Loss: 5.3896 +[2025-07-06 18:25:12] [Rank 0] Group 2 Loss: 5.2238 +[2025-07-06 18:25:12] [Rank 0] Group 2 Loss: 5.2238 +[2025-07-06 18:25:12] [Rank 0] Group 3 Loss: 5.3467 +[2025-07-06 18:25:12] [Rank 0] Group 3 Loss: 5.3467 +[2025-07-06 18:25:12] [Rank 0] Group 4 Loss: 5.3373 +[2025-07-06 18:25:12] [Rank 0] Group 4 Loss: 5.3373 +[2025-07-06 18:25:13] [Rank 0] Group 5 Loss: 5.3300 +[2025-07-06 18:25:13] [Rank 0] Group 5 Loss: 5.3300 +[2025-07-06 18:25:13] [Rank 0] Group 6 Loss: 5.2139 +[2025-07-06 18:25:13] [Rank 0] Group 6 Loss: 5.2139 +[2025-07-06 18:25:13] [Rank 0] Group 7 Loss: 5.3218 +[2025-07-06 18:25:13] [Rank 0] Group 7 Loss: 5.3218 +[2025-07-06 18:25:13] [Rank 0] Group 8 Loss: 5.2616 +[2025-07-06 18:25:13] [Rank 0] Group 8 Loss: 5.2616 +[2025-07-06 18:25:13] [Rank 0] Group 9 Loss: 5.2797 +[2025-07-06 18:25:13] [Rank 0] Group 9 Loss: 5.2797 +[2025-07-06 18:25:13] [Rank 0] Group 10 Loss: 5.3391 +[2025-07-06 18:25:13] [Rank 0] Group 10 Loss: 5.3391 +[2025-07-06 18:25:13] [Rank 0] Group 11 Loss: 5.3382 +[2025-07-06 18:25:13] [Rank 0] Group 11 Loss: 5.3382 +[2025-07-06 18:25:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:25:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:25:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:25:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:25:13] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 18:25:13] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 18:25:13] [Rank 0] Group 3 FTA: 0.9141 +[2025-07-06 18:25:13] [Rank 0] Group 3 FTA: 0.9141 +[2025-07-06 18:25:13] [Rank 0] Group 4 FTA: 0.9661 +[2025-07-06 18:25:13] [Rank 0] Group 4 FTA: 0.9661 +[2025-07-06 18:25:13] [Rank 0] Group 5 FTA: 0.9714 +[2025-07-06 18:25:13] [Rank 0] Group 5 FTA: 0.9714 +[2025-07-06 18:25:13] [Rank 0] Group 6 FTA: 0.9661 +[2025-07-06 18:25:13] [Rank 0] Group 6 FTA: 0.9661 +[2025-07-06 18:25:13] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-06 18:25:13] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-06 18:25:13] [Rank 0] Group 8 FTA: 0.9115 +[2025-07-06 18:25:13] [Rank 0] Group 8 FTA: 0.9115 +[2025-07-06 18:25:13] [Rank 0] Group 9 FTA: 0.9453 +[2025-07-06 18:25:13] [Rank 0] Group 9 FTA: 0.9453 +[2025-07-06 18:25:13] [Rank 0] Group 10 FTA: 0.9355 +[2025-07-06 18:25:13] [Rank 0] Group 10 FTA: 0.9355 +[2025-07-06 18:25:13] [Rank 0] Group 11 FTA: 0.9326 +[2025-07-06 18:25:13] [Rank 0] Group 11 FTA: 0.9326 +[2025-07-06 18:25:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 18:25:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 18:25:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 18:25:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 18:25:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 18:25:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 18:25:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 18:25:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 18:25:14] [Rank 0] step:3501/10000 train_time:281298ms step_avg:80.35ms +[2025-07-06 18:25:14] [Rank 0] step:3501/10000 train_time:281298ms step_avg:80.35ms +[2025-07-06 18:25:16] [Rank 0] step:3521/10000 train_time:283444ms step_avg:80.50ms +[2025-07-06 18:25:16] [Rank 0] step:3521/10000 train_time:283444ms step_avg:80.50ms +[2025-07-06 18:25:18] [Rank 0] step:3541/10000 train_time:284929ms step_avg:80.47ms +[2025-07-06 18:25:18] [Rank 0] step:3541/10000 train_time:284929ms step_avg:80.47ms +[2025-07-06 18:25:19] [Rank 0] step:3561/10000 train_time:286418ms step_avg:80.43ms +[2025-07-06 18:25:19] [Rank 0] step:3561/10000 train_time:286418ms step_avg:80.43ms +[2025-07-06 18:25:21] [Rank 0] step:3581/10000 train_time:287908ms step_avg:80.40ms +[2025-07-06 18:25:21] [Rank 0] step:3581/10000 train_time:287908ms step_avg:80.40ms +[2025-07-06 18:25:22] [Rank 0] step:3601/10000 train_time:289671ms step_avg:80.44ms +[2025-07-06 18:25:22] [Rank 0] step:3601/10000 train_time:289671ms step_avg:80.44ms +[2025-07-06 18:25:24] [Rank 0] step:3621/10000 train_time:291133ms step_avg:80.40ms +[2025-07-06 18:25:24] [Rank 0] step:3621/10000 train_time:291133ms step_avg:80.40ms +[2025-07-06 18:25:25] [Rank 0] step:3641/10000 train_time:292617ms step_avg:80.37ms +[2025-07-06 18:25:25] [Rank 0] step:3641/10000 train_time:292617ms step_avg:80.37ms +[2025-07-06 18:25:27] [Rank 0] step:3661/10000 train_time:294110ms step_avg:80.34ms +[2025-07-06 18:25:27] [Rank 0] step:3661/10000 train_time:294110ms step_avg:80.34ms +[2025-07-06 18:25:28] [Rank 0] step:3681/10000 train_time:295603ms step_avg:80.31ms +[2025-07-06 18:25:28] [Rank 0] step:3681/10000 train_time:295603ms step_avg:80.31ms +[2025-07-06 18:25:31] [Rank 0] step:3701/10000 train_time:297749ms step_avg:80.45ms +[2025-07-06 18:25:31] [Rank 0] step:3701/10000 train_time:297749ms step_avg:80.45ms +[2025-07-06 18:25:32] [Rank 0] step:3721/10000 train_time:299243ms step_avg:80.42ms +[2025-07-06 18:25:32] [Rank 0] step:3721/10000 train_time:299243ms step_avg:80.42ms +[2025-07-06 18:25:34] [Rank 0] step:3741/10000 train_time:300738ms step_avg:80.39ms +[2025-07-06 18:25:34] [Rank 0] step:3741/10000 train_time:300738ms step_avg:80.39ms +[2025-07-06 18:25:35] [Rank 0] step:3761/10000 train_time:302231ms step_avg:80.36ms +[2025-07-06 18:25:35] [Rank 0] step:3761/10000 train_time:302231ms step_avg:80.36ms +[2025-07-06 18:25:37] [Rank 0] step:3781/10000 train_time:303895ms step_avg:80.37ms +[2025-07-06 18:25:37] [Rank 0] step:3781/10000 train_time:303895ms step_avg:80.37ms +[2025-07-06 18:25:39] [Rank 0] step:3801/10000 train_time:306051ms step_avg:80.52ms +[2025-07-06 18:25:39] [Rank 0] step:3801/10000 train_time:306051ms step_avg:80.52ms +[2025-07-06 18:25:40] [Rank 0] step:3821/10000 train_time:307542ms step_avg:80.49ms +[2025-07-06 18:25:40] [Rank 0] step:3821/10000 train_time:307542ms step_avg:80.49ms +[2025-07-06 18:25:42] [Rank 0] step:3841/10000 train_time:309037ms step_avg:80.46ms +[2025-07-06 18:25:42] [Rank 0] step:3841/10000 train_time:309037ms step_avg:80.46ms +[2025-07-06 18:25:43] [Rank 0] step:3861/10000 train_time:310533ms step_avg:80.43ms +[2025-07-06 18:25:43] [Rank 0] step:3861/10000 train_time:310533ms step_avg:80.43ms +[2025-07-06 18:25:45] [Rank 0] step:3881/10000 train_time:312693ms step_avg:80.57ms +[2025-07-06 18:25:45] [Rank 0] step:3881/10000 train_time:312693ms step_avg:80.57ms +[2025-07-06 18:25:47] [Rank 0] step:3901/10000 train_time:314186ms step_avg:80.54ms +[2025-07-06 18:25:47] [Rank 0] step:3901/10000 train_time:314186ms step_avg:80.54ms +[2025-07-06 18:25:48] [Rank 0] step:3921/10000 train_time:315683ms step_avg:80.51ms +[2025-07-06 18:25:48] [Rank 0] step:3921/10000 train_time:315683ms step_avg:80.51ms +[2025-07-06 18:25:50] [Rank 0] step:3941/10000 train_time:317181ms step_avg:80.48ms +[2025-07-06 18:25:50] [Rank 0] step:3941/10000 train_time:317181ms step_avg:80.48ms +[2025-07-06 18:25:52] [Rank 0] step:3961/10000 train_time:319361ms step_avg:80.63ms +[2025-07-06 18:25:52] [Rank 0] step:3961/10000 train_time:319361ms step_avg:80.63ms +[2025-07-06 18:25:54] [Rank 0] step:3981/10000 train_time:320838ms step_avg:80.59ms +[2025-07-06 18:25:54] [Rank 0] step:3981/10000 train_time:320838ms step_avg:80.59ms +[2025-07-06 18:25:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:25:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:25:56] [Rank 0] PRINT: step:4000/10000 train_loss:0.8695 val_loss:0.8677 train_time:322334ms step_avg:80.58ms +[2025-07-06 18:25:56] [Rank 0] PRINT: step:4000/10000 train_loss:0.8695 val_loss:0.8677 train_time:322334ms step_avg:80.58ms +[2025-07-06 18:25:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:25:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:25:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:25:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:25:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:25:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:31:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:31:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:31:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:31:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:31:28] [Rank 0] Total Loss: 5.3689 +[2025-07-06 18:31:28] [Rank 0] Total Loss: 5.3689 +[2025-07-06 18:31:28] [Rank 0] Total FTA: 0.9320 +[2025-07-06 18:31:28] [Rank 0] Total FTA: 0.9320 +[2025-07-06 18:31:28] [Rank 0] Group 0 Loss: 5.5125 +[2025-07-06 18:31:28] [Rank 0] Group 0 Loss: 5.5125 +[2025-07-06 18:31:28] [Rank 0] Group 1 Loss: 5.3377 +[2025-07-06 18:31:28] [Rank 0] Group 1 Loss: 5.3377 +[2025-07-06 18:31:28] [Rank 0] Group 2 Loss: 5.2062 +[2025-07-06 18:31:28] [Rank 0] Group 2 Loss: 5.2062 +[2025-07-06 18:31:28] [Rank 0] Group 3 Loss: 5.5109 +[2025-07-06 18:31:28] [Rank 0] Group 3 Loss: 5.5109 +[2025-07-06 18:31:28] [Rank 0] Group 4 Loss: 5.3152 +[2025-07-06 18:31:28] [Rank 0] Group 4 Loss: 5.3152 +[2025-07-06 18:31:28] [Rank 0] Group 5 Loss: 5.2512 +[2025-07-06 18:31:28] [Rank 0] Group 5 Loss: 5.2512 +[2025-07-06 18:31:28] [Rank 0] Group 6 Loss: 5.2477 +[2025-07-06 18:31:28] [Rank 0] Group 6 Loss: 5.2477 +[2025-07-06 18:31:28] [Rank 0] Group 7 Loss: 5.3495 +[2025-07-06 18:31:28] [Rank 0] Group 7 Loss: 5.3495 +[2025-07-06 18:31:28] [Rank 0] Group 8 Loss: 5.3730 +[2025-07-06 18:31:28] [Rank 0] Group 8 Loss: 5.3730 +[2025-07-06 18:31:28] [Rank 0] Group 9 Loss: 5.3624 +[2025-07-06 18:31:28] [Rank 0] Group 9 Loss: 5.3624 +[2025-07-06 18:31:28] [Rank 0] Group 10 Loss: 5.4019 +[2025-07-06 18:31:28] [Rank 0] Group 10 Loss: 5.4019 +[2025-07-06 18:31:28] [Rank 0] Group 11 Loss: 5.3810 +[2025-07-06 18:31:28] [Rank 0] Group 11 Loss: 5.3810 +[2025-07-06 18:31:28] [Rank 0] Group 0 FTA: 0.8322 +[2025-07-06 18:31:28] [Rank 0] Group 0 FTA: 0.8322 +[2025-07-06 18:31:28] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:31:28] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:31:28] [Rank 0] Group 2 FTA: 0.9219 +[2025-07-06 18:31:28] [Rank 0] Group 2 FTA: 0.9219 +[2025-07-06 18:31:28] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 18:31:28] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 18:31:28] [Rank 0] Group 4 FTA: 0.9375 +[2025-07-06 18:31:28] [Rank 0] Group 4 FTA: 0.9375 +[2025-07-06 18:31:28] [Rank 0] Group 5 FTA: 0.9661 +[2025-07-06 18:31:28] [Rank 0] Group 5 FTA: 0.9661 +[2025-07-06 18:31:28] [Rank 0] Group 6 FTA: 0.9505 +[2025-07-06 18:31:28] [Rank 0] Group 6 FTA: 0.9505 +[2025-07-06 18:31:28] [Rank 0] Group 7 FTA: 0.9297 +[2025-07-06 18:31:28] [Rank 0] Group 7 FTA: 0.9297 +[2025-07-06 18:31:28] [Rank 0] Group 8 FTA: 0.9141 +[2025-07-06 18:31:28] [Rank 0] Group 8 FTA: 0.9141 +[2025-07-06 18:31:28] [Rank 0] Group 9 FTA: 0.9375 +[2025-07-06 18:31:28] [Rank 0] Group 9 FTA: 0.9375 +[2025-07-06 18:31:28] [Rank 0] Group 10 FTA: 0.9395 +[2025-07-06 18:31:28] [Rank 0] Group 10 FTA: 0.9395 +[2025-07-06 18:31:28] [Rank 0] Group 11 FTA: 0.9404 +[2025-07-06 18:31:28] [Rank 0] Group 11 FTA: 0.9404 +[2025-07-06 18:31:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 18:31:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 18:31:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 18:31:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 18:31:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 18:31:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 18:31:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 18:31:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 18:31:29] [Rank 0] step:4001/10000 train_time:322360ms step_avg:80.57ms +[2025-07-06 18:31:29] [Rank 0] step:4001/10000 train_time:322360ms step_avg:80.57ms +[2025-07-06 18:31:31] [Rank 0] step:4021/10000 train_time:323840ms step_avg:80.54ms +[2025-07-06 18:31:31] [Rank 0] step:4021/10000 train_time:323840ms step_avg:80.54ms +[2025-07-06 18:31:32] [Rank 0] step:4041/10000 train_time:325326ms step_avg:80.51ms +[2025-07-06 18:31:32] [Rank 0] step:4041/10000 train_time:325326ms step_avg:80.51ms +[2025-07-06 18:31:34] [Rank 0] step:4061/10000 train_time:327469ms step_avg:80.64ms +[2025-07-06 18:31:34] [Rank 0] step:4061/10000 train_time:327469ms step_avg:80.64ms +[2025-07-06 18:31:36] [Rank 0] step:4081/10000 train_time:328958ms step_avg:80.61ms +[2025-07-06 18:31:36] [Rank 0] step:4081/10000 train_time:328958ms step_avg:80.61ms +[2025-07-06 18:31:37] [Rank 0] step:4101/10000 train_time:330449ms step_avg:80.58ms +[2025-07-06 18:31:37] [Rank 0] step:4101/10000 train_time:330449ms step_avg:80.58ms +[2025-07-06 18:31:39] [Rank 0] step:4121/10000 train_time:331939ms step_avg:80.55ms +[2025-07-06 18:31:39] [Rank 0] step:4121/10000 train_time:331939ms step_avg:80.55ms +[2025-07-06 18:31:41] [Rank 0] step:4141/10000 train_time:333688ms step_avg:80.58ms +[2025-07-06 18:31:41] [Rank 0] step:4141/10000 train_time:333688ms step_avg:80.58ms +[2025-07-06 18:31:43] [Rank 0] step:4161/10000 train_time:335578ms step_avg:80.65ms +[2025-07-06 18:31:43] [Rank 0] step:4161/10000 train_time:335578ms step_avg:80.65ms +[2025-07-06 18:31:44] [Rank 0] step:4181/10000 train_time:337072ms step_avg:80.62ms +[2025-07-06 18:31:44] [Rank 0] step:4181/10000 train_time:337072ms step_avg:80.62ms +[2025-07-06 18:31:46] [Rank 0] step:4201/10000 train_time:338562ms step_avg:80.59ms +[2025-07-06 18:31:46] [Rank 0] step:4201/10000 train_time:338562ms step_avg:80.59ms +[2025-07-06 18:31:47] [Rank 0] step:4221/10000 train_time:340055ms step_avg:80.56ms +[2025-07-06 18:31:47] [Rank 0] step:4221/10000 train_time:340055ms step_avg:80.56ms +[2025-07-06 18:31:49] [Rank 0] step:4241/10000 train_time:342189ms step_avg:80.69ms +[2025-07-06 18:31:49] [Rank 0] step:4241/10000 train_time:342189ms step_avg:80.69ms +[2025-07-06 18:31:51] [Rank 0] step:4261/10000 train_time:343681ms step_avg:80.66ms +[2025-07-06 18:31:51] [Rank 0] step:4261/10000 train_time:343681ms step_avg:80.66ms +[2025-07-06 18:31:52] [Rank 0] step:4281/10000 train_time:345174ms step_avg:80.63ms +[2025-07-06 18:31:52] [Rank 0] step:4281/10000 train_time:345174ms step_avg:80.63ms +[2025-07-06 18:31:54] [Rank 0] step:4301/10000 train_time:346667ms step_avg:80.60ms +[2025-07-06 18:31:54] [Rank 0] step:4301/10000 train_time:346667ms step_avg:80.60ms +[2025-07-06 18:31:56] [Rank 0] step:4321/10000 train_time:348834ms step_avg:80.73ms +[2025-07-06 18:31:56] [Rank 0] step:4321/10000 train_time:348834ms step_avg:80.73ms +[2025-07-06 18:31:57] [Rank 0] step:4341/10000 train_time:350301ms step_avg:80.70ms +[2025-07-06 18:31:57] [Rank 0] step:4341/10000 train_time:350301ms step_avg:80.70ms +[2025-07-06 18:31:59] [Rank 0] step:4361/10000 train_time:351797ms step_avg:80.67ms +[2025-07-06 18:31:59] [Rank 0] step:4361/10000 train_time:351797ms step_avg:80.67ms +[2025-07-06 18:32:00] [Rank 0] step:4381/10000 train_time:353293ms step_avg:80.64ms +[2025-07-06 18:32:00] [Rank 0] step:4381/10000 train_time:353293ms step_avg:80.64ms +[2025-07-06 18:32:02] [Rank 0] step:4401/10000 train_time:354790ms step_avg:80.62ms +[2025-07-06 18:32:02] [Rank 0] step:4401/10000 train_time:354790ms step_avg:80.62ms +[2025-07-06 18:32:04] [Rank 0] step:4421/10000 train_time:356940ms step_avg:80.74ms +[2025-07-06 18:32:04] [Rank 0] step:4421/10000 train_time:356940ms step_avg:80.74ms +[2025-07-06 18:32:05] [Rank 0] step:4441/10000 train_time:358439ms step_avg:80.71ms +[2025-07-06 18:32:05] [Rank 0] step:4441/10000 train_time:358439ms step_avg:80.71ms +[2025-07-06 18:32:07] [Rank 0] step:4461/10000 train_time:359934ms step_avg:80.68ms +[2025-07-06 18:32:07] [Rank 0] step:4461/10000 train_time:359934ms step_avg:80.68ms +[2025-07-06 18:32:08] [Rank 0] step:4481/10000 train_time:361429ms step_avg:80.66ms +[2025-07-06 18:32:08] [Rank 0] step:4481/10000 train_time:361429ms step_avg:80.66ms +[2025-07-06 18:32:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:32:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:32:11] [Rank 0] PRINT: step:4500/10000 train_loss:0.8668 val_loss:0.8652 train_time:362922ms step_avg:80.65ms +[2025-07-06 18:32:11] [Rank 0] PRINT: step:4500/10000 train_loss:0.8668 val_loss:0.8652 train_time:362922ms step_avg:80.65ms +[2025-07-06 18:32:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:32:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:32:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:32:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:32:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:32:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:37:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:37:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:37:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:37:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:37:44] [Rank 0] Total Loss: 5.4033 +[2025-07-06 18:37:44] [Rank 0] Total Loss: 5.4033 +[2025-07-06 18:37:44] [Rank 0] Total FTA: 0.9617 +[2025-07-06 18:37:44] [Rank 0] Total FTA: 0.9617 +[2025-07-06 18:37:44] [Rank 0] Group 0 Loss: 5.6369 +[2025-07-06 18:37:44] [Rank 0] Group 0 Loss: 5.6369 +[2025-07-06 18:37:44] [Rank 0] Group 1 Loss: 5.3665 +[2025-07-06 18:37:44] [Rank 0] Group 1 Loss: 5.3665 +[2025-07-06 18:37:44] [Rank 0] Group 2 Loss: 5.2814 +[2025-07-06 18:37:44] [Rank 0] Group 2 Loss: 5.2814 +[2025-07-06 18:37:44] [Rank 0] Group 3 Loss: 5.2893 +[2025-07-06 18:37:44] [Rank 0] Group 3 Loss: 5.2893 +[2025-07-06 18:37:44] [Rank 0] Group 4 Loss: 5.4276 +[2025-07-06 18:37:44] [Rank 0] Group 4 Loss: 5.4276 +[2025-07-06 18:37:44] [Rank 0] Group 5 Loss: 5.3586 +[2025-07-06 18:37:44] [Rank 0] Group 5 Loss: 5.3586 +[2025-07-06 18:37:44] [Rank 0] Group 6 Loss: 5.3170 +[2025-07-06 18:37:44] [Rank 0] Group 6 Loss: 5.3170 +[2025-07-06 18:37:44] [Rank 0] Group 7 Loss: 5.3902 +[2025-07-06 18:37:44] [Rank 0] Group 7 Loss: 5.3902 +[2025-07-06 18:37:44] [Rank 0] Group 8 Loss: 5.3969 +[2025-07-06 18:37:44] [Rank 0] Group 8 Loss: 5.3969 +[2025-07-06 18:37:44] [Rank 0] Group 9 Loss: 5.3569 +[2025-07-06 18:37:44] [Rank 0] Group 9 Loss: 5.3569 +[2025-07-06 18:37:44] [Rank 0] Group 10 Loss: 5.3895 +[2025-07-06 18:37:44] [Rank 0] Group 10 Loss: 5.3895 +[2025-07-06 18:37:44] [Rank 0] Group 11 Loss: 5.3960 +[2025-07-06 18:37:44] [Rank 0] Group 11 Loss: 5.3960 +[2025-07-06 18:37:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:37:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:37:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:37:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:37:44] [Rank 0] Group 2 FTA: 0.9141 +[2025-07-06 18:37:44] [Rank 0] Group 2 FTA: 0.9141 +[2025-07-06 18:37:44] [Rank 0] Group 3 FTA: 0.9609 +[2025-07-06 18:37:44] [Rank 0] Group 3 FTA: 0.9609 +[2025-07-06 18:37:44] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 18:37:44] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 18:37:44] [Rank 0] Group 5 FTA: 0.9349 +[2025-07-06 18:37:44] [Rank 0] Group 5 FTA: 0.9349 +[2025-07-06 18:37:44] [Rank 0] Group 6 FTA: 0.9688 +[2025-07-06 18:37:44] [Rank 0] Group 6 FTA: 0.9688 +[2025-07-06 18:37:44] [Rank 0] Group 7 FTA: 0.9297 +[2025-07-06 18:37:44] [Rank 0] Group 7 FTA: 0.9297 +[2025-07-06 18:37:44] [Rank 0] Group 8 FTA: 0.9297 +[2025-07-06 18:37:44] [Rank 0] Group 8 FTA: 0.9297 +[2025-07-06 18:37:44] [Rank 0] Group 9 FTA: 0.9531 +[2025-07-06 18:37:44] [Rank 0] Group 9 FTA: 0.9531 +[2025-07-06 18:37:44] [Rank 0] Group 10 FTA: 0.9629 +[2025-07-06 18:37:44] [Rank 0] Group 10 FTA: 0.9629 +[2025-07-06 18:37:44] [Rank 0] Group 11 FTA: 0.9551 +[2025-07-06 18:37:44] [Rank 0] Group 11 FTA: 0.9551 +[2025-07-06 18:37:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 18:37:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 18:37:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 18:37:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 18:37:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 18:37:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 18:37:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 18:37:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 18:37:46] [Rank 0] step:4501/10000 train_time:362951ms step_avg:80.64ms +[2025-07-06 18:37:46] [Rank 0] step:4501/10000 train_time:362951ms step_avg:80.64ms +[2025-07-06 18:37:48] [Rank 0] step:4521/10000 train_time:365149ms step_avg:80.77ms +[2025-07-06 18:37:48] [Rank 0] step:4521/10000 train_time:365149ms step_avg:80.77ms +[2025-07-06 18:37:49] [Rank 0] step:4541/10000 train_time:366628ms step_avg:80.74ms +[2025-07-06 18:37:49] [Rank 0] step:4541/10000 train_time:366628ms step_avg:80.74ms +[2025-07-06 18:37:51] [Rank 0] step:4561/10000 train_time:368117ms step_avg:80.71ms +[2025-07-06 18:37:51] [Rank 0] step:4561/10000 train_time:368117ms step_avg:80.71ms +[2025-07-06 18:37:52] [Rank 0] step:4581/10000 train_time:369607ms step_avg:80.68ms +[2025-07-06 18:37:52] [Rank 0] step:4581/10000 train_time:369607ms step_avg:80.68ms +[2025-07-06 18:37:54] [Rank 0] step:4601/10000 train_time:371752ms step_avg:80.80ms +[2025-07-06 18:37:54] [Rank 0] step:4601/10000 train_time:371752ms step_avg:80.80ms +[2025-07-06 18:37:56] [Rank 0] step:4621/10000 train_time:373243ms step_avg:80.77ms +[2025-07-06 18:37:56] [Rank 0] step:4621/10000 train_time:373243ms step_avg:80.77ms +[2025-07-06 18:37:57] [Rank 0] step:4641/10000 train_time:374733ms step_avg:80.74ms +[2025-07-06 18:37:57] [Rank 0] step:4641/10000 train_time:374733ms step_avg:80.74ms +[2025-07-06 18:37:59] [Rank 0] step:4661/10000 train_time:376224ms step_avg:80.72ms +[2025-07-06 18:37:59] [Rank 0] step:4661/10000 train_time:376224ms step_avg:80.72ms +[2025-07-06 18:38:01] [Rank 0] step:4681/10000 train_time:378391ms step_avg:80.84ms +[2025-07-06 18:38:01] [Rank 0] step:4681/10000 train_time:378391ms step_avg:80.84ms +[2025-07-06 18:38:02] [Rank 0] step:4701/10000 train_time:379862ms step_avg:80.80ms +[2025-07-06 18:38:02] [Rank 0] step:4701/10000 train_time:379862ms step_avg:80.80ms +[2025-07-06 18:38:04] [Rank 0] step:4721/10000 train_time:381355ms step_avg:80.78ms +[2025-07-06 18:38:04] [Rank 0] step:4721/10000 train_time:381355ms step_avg:80.78ms +[2025-07-06 18:38:05] [Rank 0] step:4741/10000 train_time:382848ms step_avg:80.75ms +[2025-07-06 18:38:05] [Rank 0] step:4741/10000 train_time:382848ms step_avg:80.75ms +[2025-07-06 18:38:07] [Rank 0] step:4761/10000 train_time:384343ms step_avg:80.73ms +[2025-07-06 18:38:07] [Rank 0] step:4761/10000 train_time:384343ms step_avg:80.73ms +[2025-07-06 18:38:09] [Rank 0] step:4781/10000 train_time:386507ms step_avg:80.84ms +[2025-07-06 18:38:09] [Rank 0] step:4781/10000 train_time:386507ms step_avg:80.84ms +[2025-07-06 18:38:11] [Rank 0] step:4801/10000 train_time:388002ms step_avg:80.82ms +[2025-07-06 18:38:11] [Rank 0] step:4801/10000 train_time:388002ms step_avg:80.82ms +[2025-07-06 18:38:12] [Rank 0] step:4821/10000 train_time:389497ms step_avg:80.79ms +[2025-07-06 18:38:12] [Rank 0] step:4821/10000 train_time:389497ms step_avg:80.79ms +[2025-07-06 18:38:13] [Rank 0] step:4841/10000 train_time:390990ms step_avg:80.77ms +[2025-07-06 18:38:13] [Rank 0] step:4841/10000 train_time:390990ms step_avg:80.77ms +[2025-07-06 18:38:15] [Rank 0] step:4861/10000 train_time:392754ms step_avg:80.80ms +[2025-07-06 18:38:15] [Rank 0] step:4861/10000 train_time:392754ms step_avg:80.80ms +[2025-07-06 18:38:17] [Rank 0] step:4881/10000 train_time:394210ms step_avg:80.76ms +[2025-07-06 18:38:17] [Rank 0] step:4881/10000 train_time:394210ms step_avg:80.76ms +[2025-07-06 18:38:18] [Rank 0] step:4901/10000 train_time:395705ms step_avg:80.74ms +[2025-07-06 18:38:18] [Rank 0] step:4901/10000 train_time:395705ms step_avg:80.74ms +[2025-07-06 18:38:20] [Rank 0] step:4921/10000 train_time:397201ms step_avg:80.72ms +[2025-07-06 18:38:20] [Rank 0] step:4921/10000 train_time:397201ms step_avg:80.72ms +[2025-07-06 18:38:21] [Rank 0] step:4941/10000 train_time:398696ms step_avg:80.69ms +[2025-07-06 18:38:21] [Rank 0] step:4941/10000 train_time:398696ms step_avg:80.69ms +[2025-07-06 18:38:23] [Rank 0] step:4961/10000 train_time:400843ms step_avg:80.80ms +[2025-07-06 18:38:23] [Rank 0] step:4961/10000 train_time:400843ms step_avg:80.80ms +[2025-07-06 18:38:25] [Rank 0] step:4981/10000 train_time:402336ms step_avg:80.77ms +[2025-07-06 18:38:25] [Rank 0] step:4981/10000 train_time:402336ms step_avg:80.77ms +[2025-07-06 18:38:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:38:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:38:27] [Rank 0] PRINT: step:5000/10000 train_loss:0.8644 val_loss:0.8633 train_time:403829ms step_avg:80.77ms +[2025-07-06 18:38:27] [Rank 0] PRINT: step:5000/10000 train_loss:0.8644 val_loss:0.8633 train_time:403829ms step_avg:80.77ms +[2025-07-06 18:38:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:38:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:38:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:38:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:38:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:38:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:43:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:43:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:43:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:43:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:43:58] [Rank 0] Total Loss: 5.3983 +[2025-07-06 18:43:58] [Rank 0] Total Loss: 5.3983 +[2025-07-06 18:43:58] [Rank 0] Total FTA: 0.9517 +[2025-07-06 18:43:58] [Rank 0] Total FTA: 0.9517 +[2025-07-06 18:43:58] [Rank 0] Group 0 Loss: 5.6637 +[2025-07-06 18:43:58] [Rank 0] Group 0 Loss: 5.6637 +[2025-07-06 18:43:58] [Rank 0] Group 1 Loss: 5.1210 +[2025-07-06 18:43:58] [Rank 0] Group 1 Loss: 5.1210 +[2025-07-06 18:43:58] [Rank 0] Group 2 Loss: 5.1346 +[2025-07-06 18:43:58] [Rank 0] Group 2 Loss: 5.1346 +[2025-07-06 18:43:58] [Rank 0] Group 3 Loss: 5.4298 +[2025-07-06 18:43:58] [Rank 0] Group 3 Loss: 5.4298 +[2025-07-06 18:43:58] [Rank 0] Group 4 Loss: 5.3398 +[2025-07-06 18:43:58] [Rank 0] Group 4 Loss: 5.3398 +[2025-07-06 18:43:58] [Rank 0] Group 5 Loss: 5.4009 +[2025-07-06 18:43:58] [Rank 0] Group 5 Loss: 5.4009 +[2025-07-06 18:43:58] [Rank 0] Group 6 Loss: 5.3638 +[2025-07-06 18:43:58] [Rank 0] Group 6 Loss: 5.3638 +[2025-07-06 18:43:58] [Rank 0] Group 7 Loss: 5.4359 +[2025-07-06 18:43:58] [Rank 0] Group 7 Loss: 5.4359 +[2025-07-06 18:43:58] [Rank 0] Group 8 Loss: 5.4180 +[2025-07-06 18:43:58] [Rank 0] Group 8 Loss: 5.4180 +[2025-07-06 18:43:58] [Rank 0] Group 9 Loss: 5.4034 +[2025-07-06 18:43:58] [Rank 0] Group 9 Loss: 5.4034 +[2025-07-06 18:43:58] [Rank 0] Group 10 Loss: 5.3648 +[2025-07-06 18:43:58] [Rank 0] Group 10 Loss: 5.3648 +[2025-07-06 18:43:58] [Rank 0] Group 11 Loss: 5.4181 +[2025-07-06 18:43:58] [Rank 0] Group 11 Loss: 5.4181 +[2025-07-06 18:43:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:43:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:43:58] [Rank 0] Group 1 FTA: 0.8177 +[2025-07-06 18:43:58] [Rank 0] Group 1 FTA: 0.8177 +[2025-07-06 18:43:58] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 18:43:58] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 18:43:58] [Rank 0] Group 3 FTA: 0.9427 +[2025-07-06 18:43:58] [Rank 0] Group 3 FTA: 0.9427 +[2025-07-06 18:43:59] [Rank 0] Group 4 FTA: 0.9740 +[2025-07-06 18:43:59] [Rank 0] Group 4 FTA: 0.9740 +[2025-07-06 18:43:59] [Rank 0] Group 5 FTA: 0.9609 +[2025-07-06 18:43:59] [Rank 0] Group 5 FTA: 0.9609 +[2025-07-06 18:43:59] [Rank 0] Group 6 FTA: 0.9688 +[2025-07-06 18:43:59] [Rank 0] Group 6 FTA: 0.9688 +[2025-07-06 18:43:59] [Rank 0] Group 7 FTA: 0.9323 +[2025-07-06 18:43:59] [Rank 0] Group 7 FTA: 0.9323 +[2025-07-06 18:43:59] [Rank 0] Group 8 FTA: 0.9323 +[2025-07-06 18:43:59] [Rank 0] Group 8 FTA: 0.9323 +[2025-07-06 18:43:59] [Rank 0] Group 9 FTA: 0.9648 +[2025-07-06 18:43:59] [Rank 0] Group 9 FTA: 0.9648 +[2025-07-06 18:43:59] [Rank 0] Group 10 FTA: 0.9375 +[2025-07-06 18:43:59] [Rank 0] Group 10 FTA: 0.9375 +[2025-07-06 18:43:59] [Rank 0] Group 11 FTA: 0.9512 +[2025-07-06 18:43:59] [Rank 0] Group 11 FTA: 0.9512 +[2025-07-06 18:43:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 18:43:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 18:43:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 18:43:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 18:44:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 18:44:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 18:44:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 18:44:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 18:44:00] [Rank 0] step:5001/10000 train_time:403851ms step_avg:80.75ms +[2025-07-06 18:44:00] [Rank 0] step:5001/10000 train_time:403851ms step_avg:80.75ms +[2025-07-06 18:44:02] [Rank 0] step:5021/10000 train_time:405364ms step_avg:80.73ms +[2025-07-06 18:44:02] [Rank 0] step:5021/10000 train_time:405364ms step_avg:80.73ms +[2025-07-06 18:44:04] [Rank 0] step:5041/10000 train_time:406919ms step_avg:80.72ms +[2025-07-06 18:44:04] [Rank 0] step:5041/10000 train_time:406919ms step_avg:80.72ms +[2025-07-06 18:44:05] [Rank 0] step:5061/10000 train_time:409005ms step_avg:80.82ms +[2025-07-06 18:44:05] [Rank 0] step:5061/10000 train_time:409005ms step_avg:80.82ms +[2025-07-06 18:44:07] [Rank 0] step:5081/10000 train_time:410498ms step_avg:80.79ms +[2025-07-06 18:44:07] [Rank 0] step:5081/10000 train_time:410498ms step_avg:80.79ms +[2025-07-06 18:44:08] [Rank 0] step:5101/10000 train_time:411987ms step_avg:80.77ms +[2025-07-06 18:44:08] [Rank 0] step:5101/10000 train_time:411987ms step_avg:80.77ms +[2025-07-06 18:44:10] [Rank 0] step:5121/10000 train_time:413487ms step_avg:80.74ms +[2025-07-06 18:44:10] [Rank 0] step:5121/10000 train_time:413487ms step_avg:80.74ms +[2025-07-06 18:44:12] [Rank 0] step:5141/10000 train_time:415619ms step_avg:80.84ms +[2025-07-06 18:44:12] [Rank 0] step:5141/10000 train_time:415619ms step_avg:80.84ms +[2025-07-06 18:44:13] [Rank 0] step:5161/10000 train_time:417109ms step_avg:80.82ms +[2025-07-06 18:44:13] [Rank 0] step:5161/10000 train_time:417109ms step_avg:80.82ms +[2025-07-06 18:44:15] [Rank 0] step:5181/10000 train_time:418601ms step_avg:80.80ms +[2025-07-06 18:44:15] [Rank 0] step:5181/10000 train_time:418601ms step_avg:80.80ms +[2025-07-06 18:44:16] [Rank 0] step:5201/10000 train_time:420093ms step_avg:80.77ms +[2025-07-06 18:44:16] [Rank 0] step:5201/10000 train_time:420093ms step_avg:80.77ms +[2025-07-06 18:44:18] [Rank 0] step:5221/10000 train_time:422275ms step_avg:80.88ms +[2025-07-06 18:44:18] [Rank 0] step:5221/10000 train_time:422275ms step_avg:80.88ms +[2025-07-06 18:44:20] [Rank 0] step:5241/10000 train_time:423729ms step_avg:80.85ms +[2025-07-06 18:44:20] [Rank 0] step:5241/10000 train_time:423729ms step_avg:80.85ms +[2025-07-06 18:44:21] [Rank 0] step:5261/10000 train_time:425230ms step_avg:80.83ms +[2025-07-06 18:44:21] [Rank 0] step:5261/10000 train_time:425230ms step_avg:80.83ms +[2025-07-06 18:44:23] [Rank 0] step:5281/10000 train_time:426717ms step_avg:80.80ms +[2025-07-06 18:44:23] [Rank 0] step:5281/10000 train_time:426717ms step_avg:80.80ms +[2025-07-06 18:44:24] [Rank 0] step:5301/10000 train_time:428209ms step_avg:80.78ms +[2025-07-06 18:44:24] [Rank 0] step:5301/10000 train_time:428209ms step_avg:80.78ms +[2025-07-06 18:44:27] [Rank 0] step:5321/10000 train_time:430371ms step_avg:80.88ms +[2025-07-06 18:44:27] [Rank 0] step:5321/10000 train_time:430371ms step_avg:80.88ms +[2025-07-06 18:44:28] [Rank 0] step:5341/10000 train_time:431866ms step_avg:80.86ms +[2025-07-06 18:44:28] [Rank 0] step:5341/10000 train_time:431866ms step_avg:80.86ms +[2025-07-06 18:44:30] [Rank 0] step:5361/10000 train_time:433361ms step_avg:80.84ms +[2025-07-06 18:44:30] [Rank 0] step:5361/10000 train_time:433361ms step_avg:80.84ms +[2025-07-06 18:44:31] [Rank 0] step:5381/10000 train_time:434855ms step_avg:80.81ms +[2025-07-06 18:44:31] [Rank 0] step:5381/10000 train_time:434855ms step_avg:80.81ms +[2025-07-06 18:44:33] [Rank 0] step:5401/10000 train_time:437053ms step_avg:80.92ms +[2025-07-06 18:44:33] [Rank 0] step:5401/10000 train_time:437053ms step_avg:80.92ms +[2025-07-06 18:44:35] [Rank 0] step:5421/10000 train_time:438624ms step_avg:80.91ms +[2025-07-06 18:44:35] [Rank 0] step:5421/10000 train_time:438624ms step_avg:80.91ms +[2025-07-06 18:44:36] [Rank 0] step:5441/10000 train_time:440119ms step_avg:80.89ms +[2025-07-06 18:44:36] [Rank 0] step:5441/10000 train_time:440119ms step_avg:80.89ms +[2025-07-06 18:44:38] [Rank 0] step:5461/10000 train_time:441614ms step_avg:80.87ms +[2025-07-06 18:44:38] [Rank 0] step:5461/10000 train_time:441614ms step_avg:80.87ms +[2025-07-06 18:44:39] [Rank 0] step:5481/10000 train_time:443110ms step_avg:80.84ms +[2025-07-06 18:44:39] [Rank 0] step:5481/10000 train_time:443110ms step_avg:80.84ms +[2025-07-06 18:44:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:44:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:44:42] [Rank 0] PRINT: step:5500/10000 train_loss:0.8625 val_loss:0.8640 train_time:444840ms step_avg:80.88ms +[2025-07-06 18:44:42] [Rank 0] PRINT: step:5500/10000 train_loss:0.8625 val_loss:0.8640 train_time:444840ms step_avg:80.88ms +[2025-07-06 18:44:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:44:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:44:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:44:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:44:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:44:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:50:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:50:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:50:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:50:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:50:13] [Rank 0] Total Loss: 5.4515 +[2025-07-06 18:50:13] [Rank 0] Total Loss: 5.4515 +[2025-07-06 18:50:13] [Rank 0] Total FTA: 0.9734 +[2025-07-06 18:50:13] [Rank 0] Total FTA: 0.9734 +[2025-07-06 18:50:13] [Rank 0] Group 0 Loss: 5.5730 +[2025-07-06 18:50:13] [Rank 0] Group 0 Loss: 5.5730 +[2025-07-06 18:50:13] [Rank 0] Group 1 Loss: 5.4637 +[2025-07-06 18:50:13] [Rank 0] Group 1 Loss: 5.4637 +[2025-07-06 18:50:13] [Rank 0] Group 2 Loss: 5.2621 +[2025-07-06 18:50:13] [Rank 0] Group 2 Loss: 5.2621 +[2025-07-06 18:50:13] [Rank 0] Group 3 Loss: 5.5439 +[2025-07-06 18:50:13] [Rank 0] Group 3 Loss: 5.5439 +[2025-07-06 18:50:13] [Rank 0] Group 4 Loss: 5.4102 +[2025-07-06 18:50:13] [Rank 0] Group 4 Loss: 5.4102 +[2025-07-06 18:50:13] [Rank 0] Group 5 Loss: 5.3962 +[2025-07-06 18:50:13] [Rank 0] Group 5 Loss: 5.3962 +[2025-07-06 18:50:13] [Rank 0] Group 6 Loss: 5.3866 +[2025-07-06 18:50:13] [Rank 0] Group 6 Loss: 5.3866 +[2025-07-06 18:50:13] [Rank 0] Group 7 Loss: 5.4509 +[2025-07-06 18:50:13] [Rank 0] Group 7 Loss: 5.4509 +[2025-07-06 18:50:13] [Rank 0] Group 8 Loss: 5.4628 +[2025-07-06 18:50:13] [Rank 0] Group 8 Loss: 5.4628 +[2025-07-06 18:50:13] [Rank 0] Group 9 Loss: 5.4132 +[2025-07-06 18:50:13] [Rank 0] Group 9 Loss: 5.4132 +[2025-07-06 18:50:13] [Rank 0] Group 10 Loss: 5.4287 +[2025-07-06 18:50:13] [Rank 0] Group 10 Loss: 5.4287 +[2025-07-06 18:50:13] [Rank 0] Group 11 Loss: 5.4695 +[2025-07-06 18:50:13] [Rank 0] Group 11 Loss: 5.4695 +[2025-07-06 18:50:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:50:13] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:50:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:50:13] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:50:13] [Rank 0] Group 2 FTA: 0.9193 +[2025-07-06 18:50:13] [Rank 0] Group 2 FTA: 0.9193 +[2025-07-06 18:50:13] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 18:50:13] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 18:50:13] [Rank 0] Group 4 FTA: 0.9870 +[2025-07-06 18:50:13] [Rank 0] Group 4 FTA: 0.9870 +[2025-07-06 18:50:13] [Rank 0] Group 5 FTA: 0.9531 +[2025-07-06 18:50:13] [Rank 0] Group 5 FTA: 0.9531 +[2025-07-06 18:50:13] [Rank 0] Group 6 FTA: 0.9714 +[2025-07-06 18:50:13] [Rank 0] Group 6 FTA: 0.9714 +[2025-07-06 18:50:13] [Rank 0] Group 7 FTA: 0.9661 +[2025-07-06 18:50:13] [Rank 0] Group 7 FTA: 0.9661 +[2025-07-06 18:50:13] [Rank 0] Group 8 FTA: 0.9557 +[2025-07-06 18:50:13] [Rank 0] Group 8 FTA: 0.9557 +[2025-07-06 18:50:13] [Rank 0] Group 9 FTA: 0.9688 +[2025-07-06 18:50:13] [Rank 0] Group 9 FTA: 0.9688 +[2025-07-06 18:50:13] [Rank 0] Group 10 FTA: 0.9727 +[2025-07-06 18:50:13] [Rank 0] Group 10 FTA: 0.9727 +[2025-07-06 18:50:13] [Rank 0] Group 11 FTA: 0.9678 +[2025-07-06 18:50:13] [Rank 0] Group 11 FTA: 0.9678 +[2025-07-06 18:50:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 18:50:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 18:50:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 18:50:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 18:50:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 18:50:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 18:50:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 18:50:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 18:50:15] [Rank 0] step:5501/10000 train_time:444865ms step_avg:80.87ms +[2025-07-06 18:50:15] [Rank 0] step:5501/10000 train_time:444865ms step_avg:80.87ms +[2025-07-06 18:50:16] [Rank 0] step:5521/10000 train_time:446365ms step_avg:80.85ms +[2025-07-06 18:50:16] [Rank 0] step:5521/10000 train_time:446365ms step_avg:80.85ms +[2025-07-06 18:50:18] [Rank 0] step:5541/10000 train_time:447852ms step_avg:80.83ms +[2025-07-06 18:50:18] [Rank 0] step:5541/10000 train_time:447852ms step_avg:80.83ms +[2025-07-06 18:50:19] [Rank 0] step:5561/10000 train_time:449340ms step_avg:80.80ms +[2025-07-06 18:50:19] [Rank 0] step:5561/10000 train_time:449340ms step_avg:80.80ms +[2025-07-06 18:50:21] [Rank 0] step:5581/10000 train_time:451089ms step_avg:80.83ms +[2025-07-06 18:50:21] [Rank 0] step:5581/10000 train_time:451089ms step_avg:80.83ms +[2025-07-06 18:50:23] [Rank 0] step:5601/10000 train_time:452979ms step_avg:80.87ms +[2025-07-06 18:50:23] [Rank 0] step:5601/10000 train_time:452979ms step_avg:80.87ms +[2025-07-06 18:50:24] [Rank 0] step:5621/10000 train_time:454466ms step_avg:80.85ms +[2025-07-06 18:50:24] [Rank 0] step:5621/10000 train_time:454466ms step_avg:80.85ms +[2025-07-06 18:50:26] [Rank 0] step:5641/10000 train_time:455956ms step_avg:80.83ms +[2025-07-06 18:50:26] [Rank 0] step:5641/10000 train_time:455956ms step_avg:80.83ms +[2025-07-06 18:50:27] [Rank 0] step:5661/10000 train_time:457448ms step_avg:80.81ms +[2025-07-06 18:50:27] [Rank 0] step:5661/10000 train_time:457448ms step_avg:80.81ms +[2025-07-06 18:50:29] [Rank 0] step:5681/10000 train_time:459182ms step_avg:80.83ms +[2025-07-06 18:50:29] [Rank 0] step:5681/10000 train_time:459182ms step_avg:80.83ms +[2025-07-06 18:50:30] [Rank 0] step:5701/10000 train_time:460666ms step_avg:80.80ms +[2025-07-06 18:50:30] [Rank 0] step:5701/10000 train_time:460666ms step_avg:80.80ms +[2025-07-06 18:50:32] [Rank 0] step:5721/10000 train_time:462156ms step_avg:80.78ms +[2025-07-06 18:50:32] [Rank 0] step:5721/10000 train_time:462156ms step_avg:80.78ms +[2025-07-06 18:50:33] [Rank 0] step:5741/10000 train_time:463649ms step_avg:80.76ms +[2025-07-06 18:50:33] [Rank 0] step:5741/10000 train_time:463649ms step_avg:80.76ms +[2025-07-06 18:50:35] [Rank 0] step:5761/10000 train_time:465830ms step_avg:80.86ms +[2025-07-06 18:50:35] [Rank 0] step:5761/10000 train_time:465830ms step_avg:80.86ms +[2025-07-06 18:50:37] [Rank 0] step:5781/10000 train_time:467286ms step_avg:80.83ms +[2025-07-06 18:50:37] [Rank 0] step:5781/10000 train_time:467286ms step_avg:80.83ms +[2025-07-06 18:50:38] [Rank 0] step:5801/10000 train_time:468778ms step_avg:80.81ms +[2025-07-06 18:50:38] [Rank 0] step:5801/10000 train_time:468778ms step_avg:80.81ms +[2025-07-06 18:50:40] [Rank 0] step:5821/10000 train_time:470273ms step_avg:80.79ms +[2025-07-06 18:50:40] [Rank 0] step:5821/10000 train_time:470273ms step_avg:80.79ms +[2025-07-06 18:50:41] [Rank 0] step:5841/10000 train_time:471767ms step_avg:80.77ms +[2025-07-06 18:50:41] [Rank 0] step:5841/10000 train_time:471767ms step_avg:80.77ms +[2025-07-06 18:50:44] [Rank 0] step:5861/10000 train_time:473928ms step_avg:80.86ms +[2025-07-06 18:50:44] [Rank 0] step:5861/10000 train_time:473928ms step_avg:80.86ms +[2025-07-06 18:50:45] [Rank 0] step:5881/10000 train_time:475425ms step_avg:80.84ms +[2025-07-06 18:50:45] [Rank 0] step:5881/10000 train_time:475425ms step_avg:80.84ms +[2025-07-06 18:50:47] [Rank 0] step:5901/10000 train_time:476921ms step_avg:80.82ms +[2025-07-06 18:50:47] [Rank 0] step:5901/10000 train_time:476921ms step_avg:80.82ms +[2025-07-06 18:50:48] [Rank 0] step:5921/10000 train_time:478416ms step_avg:80.80ms +[2025-07-06 18:50:48] [Rank 0] step:5921/10000 train_time:478416ms step_avg:80.80ms +[2025-07-06 18:50:50] [Rank 0] step:5941/10000 train_time:479914ms step_avg:80.78ms +[2025-07-06 18:50:50] [Rank 0] step:5941/10000 train_time:479914ms step_avg:80.78ms +[2025-07-06 18:50:51] [Rank 0] step:5961/10000 train_time:481649ms step_avg:80.80ms +[2025-07-06 18:50:51] [Rank 0] step:5961/10000 train_time:481649ms step_avg:80.80ms +[2025-07-06 18:50:53] [Rank 0] step:5981/10000 train_time:483144ms step_avg:80.78ms +[2025-07-06 18:50:53] [Rank 0] step:5981/10000 train_time:483144ms step_avg:80.78ms +[2025-07-06 18:50:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:50:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:50:55] [Rank 0] PRINT: step:6000/10000 train_loss:0.8621 val_loss:0.8622 train_time:484707ms step_avg:80.78ms +[2025-07-06 18:50:55] [Rank 0] PRINT: step:6000/10000 train_loss:0.8621 val_loss:0.8622 train_time:484707ms step_avg:80.78ms +[2025-07-06 18:50:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:50:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:50:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:50:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:50:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:50:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:56:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:56:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:56:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:56:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:56:25] [Rank 0] Total Loss: 5.3208 +[2025-07-06 18:56:25] [Rank 0] Total Loss: 5.3208 +[2025-07-06 18:56:25] [Rank 0] Total FTA: 0.9860 +[2025-07-06 18:56:25] [Rank 0] Total FTA: 0.9860 +[2025-07-06 18:56:25] [Rank 0] Group 0 Loss: 5.5659 +[2025-07-06 18:56:25] [Rank 0] Group 0 Loss: 5.5659 +[2025-07-06 18:56:25] [Rank 0] Group 1 Loss: 5.2456 +[2025-07-06 18:56:25] [Rank 0] Group 1 Loss: 5.2456 +[2025-07-06 18:56:25] [Rank 0] Group 2 Loss: 5.1946 +[2025-07-06 18:56:25] [Rank 0] Group 2 Loss: 5.1946 +[2025-07-06 18:56:25] [Rank 0] Group 3 Loss: 5.2855 +[2025-07-06 18:56:25] [Rank 0] Group 3 Loss: 5.2855 +[2025-07-06 18:56:25] [Rank 0] Group 4 Loss: 5.2770 +[2025-07-06 18:56:25] [Rank 0] Group 4 Loss: 5.2770 +[2025-07-06 18:56:25] [Rank 0] Group 5 Loss: 5.2997 +[2025-07-06 18:56:25] [Rank 0] Group 5 Loss: 5.2997 +[2025-07-06 18:56:25] [Rank 0] Group 6 Loss: 5.2499 +[2025-07-06 18:56:25] [Rank 0] Group 6 Loss: 5.2499 +[2025-07-06 18:56:25] [Rank 0] Group 7 Loss: 5.2848 +[2025-07-06 18:56:25] [Rank 0] Group 7 Loss: 5.2848 +[2025-07-06 18:56:25] [Rank 0] Group 8 Loss: 5.2983 +[2025-07-06 18:56:25] [Rank 0] Group 8 Loss: 5.2983 +[2025-07-06 18:56:25] [Rank 0] Group 9 Loss: 5.2918 +[2025-07-06 18:56:25] [Rank 0] Group 9 Loss: 5.2918 +[2025-07-06 18:56:25] [Rank 0] Group 10 Loss: 5.2806 +[2025-07-06 18:56:25] [Rank 0] Group 10 Loss: 5.2806 +[2025-07-06 18:56:25] [Rank 0] Group 11 Loss: 5.3257 +[2025-07-06 18:56:25] [Rank 0] Group 11 Loss: 5.3257 +[2025-07-06 18:56:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:56:25] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:56:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:56:25] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:56:25] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 18:56:25] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 18:56:25] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 18:56:25] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 18:56:25] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 18:56:25] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 18:56:25] [Rank 0] Group 5 FTA: 0.9661 +[2025-07-06 18:56:25] [Rank 0] Group 5 FTA: 0.9661 +[2025-07-06 18:56:25] [Rank 0] Group 6 FTA: 0.9948 +[2025-07-06 18:56:25] [Rank 0] Group 6 FTA: 0.9948 +[2025-07-06 18:56:25] [Rank 0] Group 7 FTA: 0.9714 +[2025-07-06 18:56:25] [Rank 0] Group 7 FTA: 0.9714 +[2025-07-06 18:56:25] [Rank 0] Group 8 FTA: 0.9714 +[2025-07-06 18:56:25] [Rank 0] Group 8 FTA: 0.9714 +[2025-07-06 18:56:25] [Rank 0] Group 9 FTA: 0.9883 +[2025-07-06 18:56:25] [Rank 0] Group 9 FTA: 0.9883 +[2025-07-06 18:56:25] [Rank 0] Group 10 FTA: 0.9785 +[2025-07-06 18:56:25] [Rank 0] Group 10 FTA: 0.9785 +[2025-07-06 18:56:25] [Rank 0] Group 11 FTA: 0.9727 +[2025-07-06 18:56:25] [Rank 0] Group 11 FTA: 0.9727 +[2025-07-06 18:56:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 18:56:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 18:56:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 18:56:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 18:56:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 18:56:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 18:56:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 18:56:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 18:56:27] [Rank 0] step:6001/10000 train_time:484731ms step_avg:80.78ms +[2025-07-06 18:56:27] [Rank 0] step:6001/10000 train_time:484731ms step_avg:80.78ms +[2025-07-06 18:56:28] [Rank 0] step:6021/10000 train_time:486243ms step_avg:80.76ms +[2025-07-06 18:56:28] [Rank 0] step:6021/10000 train_time:486243ms step_avg:80.76ms +[2025-07-06 18:56:30] [Rank 0] step:6041/10000 train_time:487971ms step_avg:80.78ms +[2025-07-06 18:56:30] [Rank 0] step:6041/10000 train_time:487971ms step_avg:80.78ms +[2025-07-06 18:56:31] [Rank 0] step:6061/10000 train_time:489460ms step_avg:80.76ms +[2025-07-06 18:56:31] [Rank 0] step:6061/10000 train_time:489460ms step_avg:80.76ms +[2025-07-06 18:56:33] [Rank 0] step:6081/10000 train_time:490947ms step_avg:80.73ms +[2025-07-06 18:56:33] [Rank 0] step:6081/10000 train_time:490947ms step_avg:80.73ms +[2025-07-06 18:56:34] [Rank 0] step:6101/10000 train_time:492436ms step_avg:80.71ms +[2025-07-06 18:56:34] [Rank 0] step:6101/10000 train_time:492436ms step_avg:80.71ms +[2025-07-06 18:56:37] [Rank 0] step:6121/10000 train_time:494615ms step_avg:80.81ms +[2025-07-06 18:56:37] [Rank 0] step:6121/10000 train_time:494615ms step_avg:80.81ms +[2025-07-06 18:56:38] [Rank 0] step:6141/10000 train_time:496083ms step_avg:80.78ms +[2025-07-06 18:56:38] [Rank 0] step:6141/10000 train_time:496083ms step_avg:80.78ms +[2025-07-06 18:56:40] [Rank 0] step:6161/10000 train_time:497573ms step_avg:80.76ms +[2025-07-06 18:56:40] [Rank 0] step:6161/10000 train_time:497573ms step_avg:80.76ms +[2025-07-06 18:56:41] [Rank 0] step:6181/10000 train_time:499066ms step_avg:80.74ms +[2025-07-06 18:56:41] [Rank 0] step:6181/10000 train_time:499066ms step_avg:80.74ms +[2025-07-06 18:56:43] [Rank 0] step:6201/10000 train_time:500557ms step_avg:80.72ms +[2025-07-06 18:56:43] [Rank 0] step:6201/10000 train_time:500557ms step_avg:80.72ms +[2025-07-06 18:56:45] [Rank 0] step:6221/10000 train_time:502792ms step_avg:80.82ms +[2025-07-06 18:56:45] [Rank 0] step:6221/10000 train_time:502792ms step_avg:80.82ms +[2025-07-06 18:56:46] [Rank 0] step:6241/10000 train_time:504283ms step_avg:80.80ms +[2025-07-06 18:56:46] [Rank 0] step:6241/10000 train_time:504283ms step_avg:80.80ms +[2025-07-06 18:56:48] [Rank 0] step:6261/10000 train_time:505776ms step_avg:80.78ms +[2025-07-06 18:56:48] [Rank 0] step:6261/10000 train_time:505776ms step_avg:80.78ms +[2025-07-06 18:56:49] [Rank 0] step:6281/10000 train_time:507268ms step_avg:80.76ms +[2025-07-06 18:56:49] [Rank 0] step:6281/10000 train_time:507268ms step_avg:80.76ms +[2025-07-06 18:56:51] [Rank 0] step:6301/10000 train_time:509430ms step_avg:80.85ms +[2025-07-06 18:56:51] [Rank 0] step:6301/10000 train_time:509430ms step_avg:80.85ms +[2025-07-06 18:56:53] [Rank 0] step:6321/10000 train_time:510906ms step_avg:80.83ms +[2025-07-06 18:56:53] [Rank 0] step:6321/10000 train_time:510906ms step_avg:80.83ms +[2025-07-06 18:56:54] [Rank 0] step:6341/10000 train_time:512402ms step_avg:80.81ms +[2025-07-06 18:56:54] [Rank 0] step:6341/10000 train_time:512402ms step_avg:80.81ms +[2025-07-06 18:56:56] [Rank 0] step:6361/10000 train_time:513899ms step_avg:80.79ms +[2025-07-06 18:56:56] [Rank 0] step:6361/10000 train_time:513899ms step_avg:80.79ms +[2025-07-06 18:56:57] [Rank 0] step:6381/10000 train_time:515394ms step_avg:80.77ms +[2025-07-06 18:56:57] [Rank 0] step:6381/10000 train_time:515394ms step_avg:80.77ms +[2025-07-06 18:57:00] [Rank 0] step:6401/10000 train_time:517547ms step_avg:80.85ms +[2025-07-06 18:57:00] [Rank 0] step:6401/10000 train_time:517547ms step_avg:80.85ms +[2025-07-06 18:57:01] [Rank 0] step:6421/10000 train_time:519041ms step_avg:80.83ms +[2025-07-06 18:57:01] [Rank 0] step:6421/10000 train_time:519041ms step_avg:80.83ms +[2025-07-06 18:57:02] [Rank 0] step:6441/10000 train_time:520535ms step_avg:80.82ms +[2025-07-06 18:57:02] [Rank 0] step:6441/10000 train_time:520535ms step_avg:80.82ms +[2025-07-06 18:57:04] [Rank 0] step:6461/10000 train_time:522031ms step_avg:80.80ms +[2025-07-06 18:57:04] [Rank 0] step:6461/10000 train_time:522031ms step_avg:80.80ms +[2025-07-06 18:57:06] [Rank 0] step:6481/10000 train_time:524213ms step_avg:80.88ms +[2025-07-06 18:57:06] [Rank 0] step:6481/10000 train_time:524213ms step_avg:80.88ms +[2025-07-06 18:57:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:57:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:57:09] [Rank 0] PRINT: step:6500/10000 train_loss:0.8606 val_loss:0.8610 train_time:525688ms step_avg:80.88ms +[2025-07-06 18:57:09] [Rank 0] PRINT: step:6500/10000 train_loss:0.8606 val_loss:0.8610 train_time:525688ms step_avg:80.88ms +[2025-07-06 18:57:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:57:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:57:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:57:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:57:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:57:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:02:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:02:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:02:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:02:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:02:36] [Rank 0] Total Loss: 5.3177 +[2025-07-06 19:02:36] [Rank 0] Total Loss: 5.3177 +[2025-07-06 19:02:36] [Rank 0] Total FTA: 0.9215 +[2025-07-06 19:02:36] [Rank 0] Total FTA: 0.9215 +[2025-07-06 19:02:36] [Rank 0] Group 0 Loss: 5.3876 +[2025-07-06 19:02:36] [Rank 0] Group 0 Loss: 5.3876 +[2025-07-06 19:02:36] [Rank 0] Group 1 Loss: 5.3508 +[2025-07-06 19:02:36] [Rank 0] Group 1 Loss: 5.3508 +[2025-07-06 19:02:36] [Rank 0] Group 2 Loss: 5.1781 +[2025-07-06 19:02:36] [Rank 0] Group 2 Loss: 5.1781 +[2025-07-06 19:02:36] [Rank 0] Group 3 Loss: 5.2836 +[2025-07-06 19:02:36] [Rank 0] Group 3 Loss: 5.2836 +[2025-07-06 19:02:36] [Rank 0] Group 4 Loss: 5.3166 +[2025-07-06 19:02:36] [Rank 0] Group 4 Loss: 5.3166 +[2025-07-06 19:02:36] [Rank 0] Group 5 Loss: 5.3132 +[2025-07-06 19:02:36] [Rank 0] Group 5 Loss: 5.3132 +[2025-07-06 19:02:36] [Rank 0] Group 6 Loss: 5.2366 +[2025-07-06 19:02:36] [Rank 0] Group 6 Loss: 5.2366 +[2025-07-06 19:02:36] [Rank 0] Group 7 Loss: 5.3113 +[2025-07-06 19:02:36] [Rank 0] Group 7 Loss: 5.3113 +[2025-07-06 19:02:36] [Rank 0] Group 8 Loss: 5.3439 +[2025-07-06 19:02:36] [Rank 0] Group 8 Loss: 5.3439 +[2025-07-06 19:02:36] [Rank 0] Group 9 Loss: 5.2539 +[2025-07-06 19:02:36] [Rank 0] Group 9 Loss: 5.2539 +[2025-07-06 19:02:36] [Rank 0] Group 10 Loss: 5.3726 +[2025-07-06 19:02:36] [Rank 0] Group 10 Loss: 5.3726 +[2025-07-06 19:02:36] [Rank 0] Group 11 Loss: 5.3312 +[2025-07-06 19:02:36] [Rank 0] Group 11 Loss: 5.3312 +[2025-07-06 19:02:36] [Rank 0] Group 0 FTA: 0.6593 +[2025-07-06 19:02:36] [Rank 0] Group 0 FTA: 0.6593 +[2025-07-06 19:02:36] [Rank 0] Group 1 FTA: 0.8646 +[2025-07-06 19:02:36] [Rank 0] Group 1 FTA: 0.8646 +[2025-07-06 19:02:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:02:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:02:36] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 19:02:36] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 19:02:36] [Rank 0] Group 4 FTA: 0.9896 +[2025-07-06 19:02:36] [Rank 0] Group 4 FTA: 0.9896 +[2025-07-06 19:02:36] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-06 19:02:36] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-06 19:02:36] [Rank 0] Group 6 FTA: 0.9688 +[2025-07-06 19:02:36] [Rank 0] Group 6 FTA: 0.9688 +[2025-07-06 19:02:36] [Rank 0] Group 7 FTA: 0.9505 +[2025-07-06 19:02:36] [Rank 0] Group 7 FTA: 0.9505 +[2025-07-06 19:02:36] [Rank 0] Group 8 FTA: 0.9375 +[2025-07-06 19:02:36] [Rank 0] Group 8 FTA: 0.9375 +[2025-07-06 19:02:36] [Rank 0] Group 9 FTA: 0.9844 +[2025-07-06 19:02:36] [Rank 0] Group 9 FTA: 0.9844 +[2025-07-06 19:02:36] [Rank 0] Group 10 FTA: 0.9609 +[2025-07-06 19:02:36] [Rank 0] Group 10 FTA: 0.9609 +[2025-07-06 19:02:36] [Rank 0] Group 11 FTA: 0.9717 +[2025-07-06 19:02:36] [Rank 0] Group 11 FTA: 0.9717 +[2025-07-06 19:02:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 19:02:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 19:02:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 19:02:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 19:02:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 19:02:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 19:02:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 19:02:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 19:02:38] [Rank 0] step:6501/10000 train_time:525710ms step_avg:80.87ms +[2025-07-06 19:02:38] [Rank 0] step:6501/10000 train_time:525710ms step_avg:80.87ms +[2025-07-06 19:02:39] [Rank 0] step:6521/10000 train_time:527217ms step_avg:80.85ms +[2025-07-06 19:02:39] [Rank 0] step:6521/10000 train_time:527217ms step_avg:80.85ms +[2025-07-06 19:02:41] [Rank 0] step:6541/10000 train_time:528706ms step_avg:80.83ms +[2025-07-06 19:02:41] [Rank 0] step:6541/10000 train_time:528706ms step_avg:80.83ms +[2025-07-06 19:02:42] [Rank 0] step:6561/10000 train_time:530195ms step_avg:80.81ms +[2025-07-06 19:02:42] [Rank 0] step:6561/10000 train_time:530195ms step_avg:80.81ms +[2025-07-06 19:02:44] [Rank 0] step:6581/10000 train_time:532353ms step_avg:80.89ms +[2025-07-06 19:02:44] [Rank 0] step:6581/10000 train_time:532353ms step_avg:80.89ms +[2025-07-06 19:02:46] [Rank 0] step:6601/10000 train_time:533842ms step_avg:80.87ms +[2025-07-06 19:02:46] [Rank 0] step:6601/10000 train_time:533842ms step_avg:80.87ms +[2025-07-06 19:02:47] [Rank 0] step:6621/10000 train_time:535334ms step_avg:80.85ms +[2025-07-06 19:02:47] [Rank 0] step:6621/10000 train_time:535334ms step_avg:80.85ms +[2025-07-06 19:02:49] [Rank 0] step:6641/10000 train_time:536825ms step_avg:80.84ms +[2025-07-06 19:02:49] [Rank 0] step:6641/10000 train_time:536825ms step_avg:80.84ms +[2025-07-06 19:02:51] [Rank 0] step:6661/10000 train_time:538574ms step_avg:80.85ms +[2025-07-06 19:02:51] [Rank 0] step:6661/10000 train_time:538574ms step_avg:80.85ms +[2025-07-06 19:02:52] [Rank 0] step:6681/10000 train_time:540048ms step_avg:80.83ms +[2025-07-06 19:02:52] [Rank 0] step:6681/10000 train_time:540048ms step_avg:80.83ms +[2025-07-06 19:02:53] [Rank 0] step:6701/10000 train_time:541540ms step_avg:80.81ms +[2025-07-06 19:02:53] [Rank 0] step:6701/10000 train_time:541540ms step_avg:80.81ms +[2025-07-06 19:02:55] [Rank 0] step:6721/10000 train_time:543034ms step_avg:80.80ms +[2025-07-06 19:02:55] [Rank 0] step:6721/10000 train_time:543034ms step_avg:80.80ms +[2025-07-06 19:02:56] [Rank 0] step:6741/10000 train_time:544536ms step_avg:80.78ms +[2025-07-06 19:02:56] [Rank 0] step:6741/10000 train_time:544536ms step_avg:80.78ms +[2025-07-06 19:02:58] [Rank 0] step:6761/10000 train_time:546271ms step_avg:80.80ms +[2025-07-06 19:02:58] [Rank 0] step:6761/10000 train_time:546271ms step_avg:80.80ms +[2025-07-06 19:03:00] [Rank 0] step:6781/10000 train_time:547763ms step_avg:80.78ms +[2025-07-06 19:03:00] [Rank 0] step:6781/10000 train_time:547763ms step_avg:80.78ms +[2025-07-06 19:03:01] [Rank 0] step:6801/10000 train_time:549261ms step_avg:80.76ms +[2025-07-06 19:03:01] [Rank 0] step:6801/10000 train_time:549261ms step_avg:80.76ms +[2025-07-06 19:03:03] [Rank 0] step:6821/10000 train_time:550758ms step_avg:80.74ms +[2025-07-06 19:03:03] [Rank 0] step:6821/10000 train_time:550758ms step_avg:80.74ms +[2025-07-06 19:03:05] [Rank 0] step:6841/10000 train_time:552535ms step_avg:80.77ms +[2025-07-06 19:03:05] [Rank 0] step:6841/10000 train_time:552535ms step_avg:80.77ms +[2025-07-06 19:03:06] [Rank 0] step:6861/10000 train_time:554421ms step_avg:80.81ms +[2025-07-06 19:03:06] [Rank 0] step:6861/10000 train_time:554421ms step_avg:80.81ms +[2025-07-06 19:03:08] [Rank 0] step:6881/10000 train_time:555919ms step_avg:80.79ms +[2025-07-06 19:03:08] [Rank 0] step:6881/10000 train_time:555919ms step_avg:80.79ms +[2025-07-06 19:03:09] [Rank 0] step:6901/10000 train_time:557416ms step_avg:80.77ms +[2025-07-06 19:03:09] [Rank 0] step:6901/10000 train_time:557416ms step_avg:80.77ms +[2025-07-06 19:03:11] [Rank 0] step:6921/10000 train_time:558915ms step_avg:80.76ms +[2025-07-06 19:03:11] [Rank 0] step:6921/10000 train_time:558915ms step_avg:80.76ms +[2025-07-06 19:03:13] [Rank 0] step:6941/10000 train_time:561062ms step_avg:80.83ms +[2025-07-06 19:03:13] [Rank 0] step:6941/10000 train_time:561062ms step_avg:80.83ms +[2025-07-06 19:03:15] [Rank 0] step:6961/10000 train_time:562560ms step_avg:80.82ms +[2025-07-06 19:03:15] [Rank 0] step:6961/10000 train_time:562560ms step_avg:80.82ms +[2025-07-06 19:03:16] [Rank 0] step:6981/10000 train_time:564058ms step_avg:80.80ms +[2025-07-06 19:03:16] [Rank 0] step:6981/10000 train_time:564058ms step_avg:80.80ms +[2025-07-06 19:03:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:03:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:03:18] [Rank 0] PRINT: step:7000/10000 train_loss:0.8594 val_loss:0.8605 train_time:565558ms step_avg:80.79ms +[2025-07-06 19:03:18] [Rank 0] PRINT: step:7000/10000 train_loss:0.8594 val_loss:0.8605 train_time:565558ms step_avg:80.79ms +[2025-07-06 19:03:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:03:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:03:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:03:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:03:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:03:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:08:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:08:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:08:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:08:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:08:52] [Rank 0] Total Loss: 5.4028 +[2025-07-06 19:08:52] [Rank 0] Total Loss: 5.4028 +[2025-07-06 19:08:52] [Rank 0] Total FTA: 0.9340 +[2025-07-06 19:08:52] [Rank 0] Total FTA: 0.9340 +[2025-07-06 19:08:52] [Rank 0] Group 0 Loss: 5.4401 +[2025-07-06 19:08:52] [Rank 0] Group 0 Loss: 5.4401 +[2025-07-06 19:08:52] [Rank 0] Group 1 Loss: 5.2979 +[2025-07-06 19:08:52] [Rank 0] Group 1 Loss: 5.2979 +[2025-07-06 19:08:52] [Rank 0] Group 2 Loss: 5.2723 +[2025-07-06 19:08:52] [Rank 0] Group 2 Loss: 5.2723 +[2025-07-06 19:08:52] [Rank 0] Group 3 Loss: 5.4090 +[2025-07-06 19:08:52] [Rank 0] Group 3 Loss: 5.4090 +[2025-07-06 19:08:52] [Rank 0] Group 4 Loss: 5.3428 +[2025-07-06 19:08:52] [Rank 0] Group 4 Loss: 5.3428 +[2025-07-06 19:08:52] [Rank 0] Group 5 Loss: 5.3855 +[2025-07-06 19:08:52] [Rank 0] Group 5 Loss: 5.3855 +[2025-07-06 19:08:52] [Rank 0] Group 6 Loss: 5.3479 +[2025-07-06 19:08:52] [Rank 0] Group 6 Loss: 5.3479 +[2025-07-06 19:08:52] [Rank 0] Group 7 Loss: 5.4471 +[2025-07-06 19:08:52] [Rank 0] Group 7 Loss: 5.4471 +[2025-07-06 19:08:52] [Rank 0] Group 8 Loss: 5.4428 +[2025-07-06 19:08:52] [Rank 0] Group 8 Loss: 5.4428 +[2025-07-06 19:08:52] [Rank 0] Group 9 Loss: 5.4290 +[2025-07-06 19:08:52] [Rank 0] Group 9 Loss: 5.4290 +[2025-07-06 19:08:52] [Rank 0] Group 10 Loss: 5.4663 +[2025-07-06 19:08:52] [Rank 0] Group 10 Loss: 5.4663 +[2025-07-06 19:08:52] [Rank 0] Group 11 Loss: 5.4402 +[2025-07-06 19:08:52] [Rank 0] Group 11 Loss: 5.4402 +[2025-07-06 19:08:52] [Rank 0] Group 0 FTA: 0.8179 +[2025-07-06 19:08:52] [Rank 0] Group 0 FTA: 0.8179 +[2025-07-06 19:08:52] [Rank 0] Group 1 FTA: 0.8542 +[2025-07-06 19:08:52] [Rank 0] Group 1 FTA: 0.8542 +[2025-07-06 19:08:52] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:08:52] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:08:52] [Rank 0] Group 3 FTA: 0.9557 +[2025-07-06 19:08:52] [Rank 0] Group 3 FTA: 0.9557 +[2025-07-06 19:08:52] [Rank 0] Group 4 FTA: 0.9505 +[2025-07-06 19:08:52] [Rank 0] Group 4 FTA: 0.9505 +[2025-07-06 19:08:52] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-06 19:08:52] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-06 19:08:52] [Rank 0] Group 6 FTA: 0.9609 +[2025-07-06 19:08:52] [Rank 0] Group 6 FTA: 0.9609 +[2025-07-06 19:08:52] [Rank 0] Group 7 FTA: 0.9557 +[2025-07-06 19:08:52] [Rank 0] Group 7 FTA: 0.9557 +[2025-07-06 19:08:52] [Rank 0] Group 8 FTA: 0.9609 +[2025-07-06 19:08:52] [Rank 0] Group 8 FTA: 0.9609 +[2025-07-06 19:08:52] [Rank 0] Group 9 FTA: 0.9727 +[2025-07-06 19:08:52] [Rank 0] Group 9 FTA: 0.9727 +[2025-07-06 19:08:52] [Rank 0] Group 10 FTA: 0.9531 +[2025-07-06 19:08:52] [Rank 0] Group 10 FTA: 0.9531 +[2025-07-06 19:08:52] [Rank 0] Group 11 FTA: 0.9551 +[2025-07-06 19:08:52] [Rank 0] Group 11 FTA: 0.9551 +[2025-07-06 19:08:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 19:08:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 19:08:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 19:08:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 19:08:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 19:08:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 19:08:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 19:08:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 19:08:53] [Rank 0] step:7001/10000 train_time:565581ms step_avg:80.79ms +[2025-07-06 19:08:53] [Rank 0] step:7001/10000 train_time:565581ms step_avg:80.79ms +[2025-07-06 19:08:55] [Rank 0] step:7021/10000 train_time:567327ms step_avg:80.80ms +[2025-07-06 19:08:55] [Rank 0] step:7021/10000 train_time:567327ms step_avg:80.80ms +[2025-07-06 19:08:57] [Rank 0] step:7041/10000 train_time:569219ms step_avg:80.84ms +[2025-07-06 19:08:57] [Rank 0] step:7041/10000 train_time:569219ms step_avg:80.84ms +[2025-07-06 19:08:58] [Rank 0] step:7061/10000 train_time:570707ms step_avg:80.83ms +[2025-07-06 19:08:58] [Rank 0] step:7061/10000 train_time:570707ms step_avg:80.83ms +[2025-07-06 19:09:00] [Rank 0] step:7081/10000 train_time:572198ms step_avg:80.81ms +[2025-07-06 19:09:00] [Rank 0] step:7081/10000 train_time:572198ms step_avg:80.81ms +[2025-07-06 19:09:01] [Rank 0] step:7101/10000 train_time:573689ms step_avg:80.79ms +[2025-07-06 19:09:01] [Rank 0] step:7101/10000 train_time:573689ms step_avg:80.79ms +[2025-07-06 19:09:04] [Rank 0] step:7121/10000 train_time:575836ms step_avg:80.86ms +[2025-07-06 19:09:04] [Rank 0] step:7121/10000 train_time:575836ms step_avg:80.86ms +[2025-07-06 19:09:05] [Rank 0] step:7141/10000 train_time:577327ms step_avg:80.85ms +[2025-07-06 19:09:05] [Rank 0] step:7141/10000 train_time:577327ms step_avg:80.85ms +[2025-07-06 19:09:06] [Rank 0] step:7161/10000 train_time:578821ms step_avg:80.83ms +[2025-07-06 19:09:06] [Rank 0] step:7161/10000 train_time:578821ms step_avg:80.83ms +[2025-07-06 19:09:08] [Rank 0] step:7181/10000 train_time:580315ms step_avg:80.81ms +[2025-07-06 19:09:08] [Rank 0] step:7181/10000 train_time:580315ms step_avg:80.81ms +[2025-07-06 19:09:10] [Rank 0] step:7201/10000 train_time:582063ms step_avg:80.83ms +[2025-07-06 19:09:10] [Rank 0] step:7201/10000 train_time:582063ms step_avg:80.83ms +[2025-07-06 19:09:12] [Rank 0] step:7221/10000 train_time:583945ms step_avg:80.87ms +[2025-07-06 19:09:12] [Rank 0] step:7221/10000 train_time:583945ms step_avg:80.87ms +[2025-07-06 19:09:13] [Rank 0] step:7241/10000 train_time:585441ms step_avg:80.85ms +[2025-07-06 19:09:13] [Rank 0] step:7241/10000 train_time:585441ms step_avg:80.85ms +[2025-07-06 19:09:15] [Rank 0] step:7261/10000 train_time:586937ms step_avg:80.83ms +[2025-07-06 19:09:15] [Rank 0] step:7261/10000 train_time:586937ms step_avg:80.83ms +[2025-07-06 19:09:16] [Rank 0] step:7281/10000 train_time:588433ms step_avg:80.82ms +[2025-07-06 19:09:16] [Rank 0] step:7281/10000 train_time:588433ms step_avg:80.82ms +[2025-07-06 19:09:18] [Rank 0] step:7301/10000 train_time:590588ms step_avg:80.89ms +[2025-07-06 19:09:18] [Rank 0] step:7301/10000 train_time:590588ms step_avg:80.89ms +[2025-07-06 19:09:20] [Rank 0] step:7321/10000 train_time:592085ms step_avg:80.87ms +[2025-07-06 19:09:20] [Rank 0] step:7321/10000 train_time:592085ms step_avg:80.87ms +[2025-07-06 19:09:21] [Rank 0] step:7341/10000 train_time:593581ms step_avg:80.86ms +[2025-07-06 19:09:21] [Rank 0] step:7341/10000 train_time:593581ms step_avg:80.86ms +[2025-07-06 19:09:23] [Rank 0] step:7361/10000 train_time:595079ms step_avg:80.84ms +[2025-07-06 19:09:23] [Rank 0] step:7361/10000 train_time:595079ms step_avg:80.84ms +[2025-07-06 19:09:25] [Rank 0] step:7381/10000 train_time:596630ms step_avg:80.83ms +[2025-07-06 19:09:25] [Rank 0] step:7381/10000 train_time:596630ms step_avg:80.83ms +[2025-07-06 19:09:26] [Rank 0] step:7401/10000 train_time:598734ms step_avg:80.90ms +[2025-07-06 19:09:26] [Rank 0] step:7401/10000 train_time:598734ms step_avg:80.90ms +[2025-07-06 19:09:28] [Rank 0] step:7421/10000 train_time:600231ms step_avg:80.88ms +[2025-07-06 19:09:28] [Rank 0] step:7421/10000 train_time:600231ms step_avg:80.88ms +[2025-07-06 19:09:29] [Rank 0] step:7441/10000 train_time:601729ms step_avg:80.87ms +[2025-07-06 19:09:29] [Rank 0] step:7441/10000 train_time:601729ms step_avg:80.87ms +[2025-07-06 19:09:31] [Rank 0] step:7461/10000 train_time:603228ms step_avg:80.85ms +[2025-07-06 19:09:31] [Rank 0] step:7461/10000 train_time:603228ms step_avg:80.85ms +[2025-07-06 19:09:33] [Rank 0] step:7481/10000 train_time:605064ms step_avg:80.88ms +[2025-07-06 19:09:33] [Rank 0] step:7481/10000 train_time:605064ms step_avg:80.88ms +[2025-07-06 19:09:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:09:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:09:35] [Rank 0] PRINT: step:7500/10000 train_loss:0.8587 val_loss:0.8599 train_time:606561ms step_avg:80.87ms +[2025-07-06 19:09:35] [Rank 0] PRINT: step:7500/10000 train_loss:0.8587 val_loss:0.8599 train_time:606561ms step_avg:80.87ms +[2025-07-06 19:09:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:09:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:09:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:09:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:09:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:09:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:15:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:15:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:15:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:15:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:15:06] [Rank 0] Total Loss: 5.4969 +[2025-07-06 19:15:06] [Rank 0] Total Loss: 5.4969 +[2025-07-06 19:15:06] [Rank 0] Total FTA: 0.9510 +[2025-07-06 19:15:06] [Rank 0] Total FTA: 0.9510 +[2025-07-06 19:15:06] [Rank 0] Group 0 Loss: 5.5675 +[2025-07-06 19:15:06] [Rank 0] Group 0 Loss: 5.5675 +[2025-07-06 19:15:07] [Rank 0] Group 1 Loss: 5.6011 +[2025-07-06 19:15:07] [Rank 0] Group 1 Loss: 5.6011 +[2025-07-06 19:15:07] [Rank 0] Group 2 Loss: 5.5567 +[2025-07-06 19:15:07] [Rank 0] Group 2 Loss: 5.5567 +[2025-07-06 19:15:07] [Rank 0] Group 3 Loss: 5.4699 +[2025-07-06 19:15:07] [Rank 0] Group 3 Loss: 5.4699 +[2025-07-06 19:15:07] [Rank 0] Group 4 Loss: 5.3946 +[2025-07-06 19:15:07] [Rank 0] Group 4 Loss: 5.3946 +[2025-07-06 19:15:07] [Rank 0] Group 5 Loss: 5.4459 +[2025-07-06 19:15:07] [Rank 0] Group 5 Loss: 5.4459 +[2025-07-06 19:15:07] [Rank 0] Group 6 Loss: 5.3944 +[2025-07-06 19:15:07] [Rank 0] Group 6 Loss: 5.3944 +[2025-07-06 19:15:07] [Rank 0] Group 7 Loss: 5.4742 +[2025-07-06 19:15:07] [Rank 0] Group 7 Loss: 5.4742 +[2025-07-06 19:15:07] [Rank 0] Group 8 Loss: 5.4933 +[2025-07-06 19:15:07] [Rank 0] Group 8 Loss: 5.4933 +[2025-07-06 19:15:07] [Rank 0] Group 9 Loss: 5.4872 +[2025-07-06 19:15:07] [Rank 0] Group 9 Loss: 5.4872 +[2025-07-06 19:15:07] [Rank 0] Group 10 Loss: 5.5078 +[2025-07-06 19:15:07] [Rank 0] Group 10 Loss: 5.5078 +[2025-07-06 19:15:07] [Rank 0] Group 11 Loss: 5.4951 +[2025-07-06 19:15:07] [Rank 0] Group 11 Loss: 5.4951 +[2025-07-06 19:15:07] [Rank 0] Group 0 FTA: 0.8309 +[2025-07-06 19:15:07] [Rank 0] Group 0 FTA: 0.8309 +[2025-07-06 19:15:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:15:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:15:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:15:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:15:07] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 19:15:07] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 19:15:07] [Rank 0] Group 4 FTA: 0.9714 +[2025-07-06 19:15:07] [Rank 0] Group 4 FTA: 0.9714 +[2025-07-06 19:15:07] [Rank 0] Group 5 FTA: 0.9453 +[2025-07-06 19:15:07] [Rank 0] Group 5 FTA: 0.9453 +[2025-07-06 19:15:07] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-06 19:15:07] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-06 19:15:07] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-06 19:15:07] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-06 19:15:07] [Rank 0] Group 8 FTA: 0.9505 +[2025-07-06 19:15:07] [Rank 0] Group 8 FTA: 0.9505 +[2025-07-06 19:15:07] [Rank 0] Group 9 FTA: 0.9570 +[2025-07-06 19:15:07] [Rank 0] Group 9 FTA: 0.9570 +[2025-07-06 19:15:07] [Rank 0] Group 10 FTA: 0.9707 +[2025-07-06 19:15:07] [Rank 0] Group 10 FTA: 0.9707 +[2025-07-06 19:15:07] [Rank 0] Group 11 FTA: 0.9629 +[2025-07-06 19:15:07] [Rank 0] Group 11 FTA: 0.9629 +[2025-07-06 19:15:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 19:15:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 19:15:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 19:15:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 19:15:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 19:15:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 19:15:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 19:15:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 19:15:08] [Rank 0] step:7501/10000 train_time:606582ms step_avg:80.87ms +[2025-07-06 19:15:08] [Rank 0] step:7501/10000 train_time:606582ms step_avg:80.87ms +[2025-07-06 19:15:10] [Rank 0] step:7521/10000 train_time:608089ms step_avg:80.85ms +[2025-07-06 19:15:10] [Rank 0] step:7521/10000 train_time:608089ms step_avg:80.85ms +[2025-07-06 19:15:11] [Rank 0] step:7541/10000 train_time:609575ms step_avg:80.83ms +[2025-07-06 19:15:11] [Rank 0] step:7541/10000 train_time:609575ms step_avg:80.83ms +[2025-07-06 19:15:13] [Rank 0] step:7561/10000 train_time:611352ms step_avg:80.86ms +[2025-07-06 19:15:13] [Rank 0] step:7561/10000 train_time:611352ms step_avg:80.86ms +[2025-07-06 19:15:15] [Rank 0] step:7581/10000 train_time:613224ms step_avg:80.89ms +[2025-07-06 19:15:15] [Rank 0] step:7581/10000 train_time:613224ms step_avg:80.89ms +[2025-07-06 19:15:16] [Rank 0] step:7601/10000 train_time:614714ms step_avg:80.87ms +[2025-07-06 19:15:16] [Rank 0] step:7601/10000 train_time:614714ms step_avg:80.87ms +[2025-07-06 19:15:18] [Rank 0] step:7621/10000 train_time:616205ms step_avg:80.86ms +[2025-07-06 19:15:18] [Rank 0] step:7621/10000 train_time:616205ms step_avg:80.86ms +[2025-07-06 19:15:19] [Rank 0] step:7641/10000 train_time:617696ms step_avg:80.84ms +[2025-07-06 19:15:19] [Rank 0] step:7641/10000 train_time:617696ms step_avg:80.84ms +[2025-07-06 19:15:21] [Rank 0] step:7661/10000 train_time:619421ms step_avg:80.85ms +[2025-07-06 19:15:21] [Rank 0] step:7661/10000 train_time:619421ms step_avg:80.85ms +[2025-07-06 19:15:23] [Rank 0] step:7681/10000 train_time:620913ms step_avg:80.84ms +[2025-07-06 19:15:23] [Rank 0] step:7681/10000 train_time:620913ms step_avg:80.84ms +[2025-07-06 19:15:24] [Rank 0] step:7701/10000 train_time:622405ms step_avg:80.82ms +[2025-07-06 19:15:24] [Rank 0] step:7701/10000 train_time:622405ms step_avg:80.82ms +[2025-07-06 19:15:26] [Rank 0] step:7721/10000 train_time:623900ms step_avg:80.81ms +[2025-07-06 19:15:26] [Rank 0] step:7721/10000 train_time:623900ms step_avg:80.81ms +[2025-07-06 19:15:28] [Rank 0] step:7741/10000 train_time:625651ms step_avg:80.82ms +[2025-07-06 19:15:28] [Rank 0] step:7741/10000 train_time:625651ms step_avg:80.82ms +[2025-07-06 19:15:29] [Rank 0] step:7761/10000 train_time:627533ms step_avg:80.86ms +[2025-07-06 19:15:29] [Rank 0] step:7761/10000 train_time:627533ms step_avg:80.86ms +[2025-07-06 19:15:31] [Rank 0] step:7781/10000 train_time:629028ms step_avg:80.84ms +[2025-07-06 19:15:31] [Rank 0] step:7781/10000 train_time:629028ms step_avg:80.84ms +[2025-07-06 19:15:32] [Rank 0] step:7801/10000 train_time:630521ms step_avg:80.83ms +[2025-07-06 19:15:32] [Rank 0] step:7801/10000 train_time:630521ms step_avg:80.83ms +[2025-07-06 19:15:34] [Rank 0] step:7821/10000 train_time:632015ms step_avg:80.81ms +[2025-07-06 19:15:34] [Rank 0] step:7821/10000 train_time:632015ms step_avg:80.81ms +[2025-07-06 19:15:36] [Rank 0] step:7841/10000 train_time:634177ms step_avg:80.88ms +[2025-07-06 19:15:36] [Rank 0] step:7841/10000 train_time:634177ms step_avg:80.88ms +[2025-07-06 19:15:37] [Rank 0] step:7861/10000 train_time:635673ms step_avg:80.86ms +[2025-07-06 19:15:37] [Rank 0] step:7861/10000 train_time:635673ms step_avg:80.86ms +[2025-07-06 19:15:39] [Rank 0] step:7881/10000 train_time:637169ms step_avg:80.85ms +[2025-07-06 19:15:39] [Rank 0] step:7881/10000 train_time:637169ms step_avg:80.85ms +[2025-07-06 19:15:40] [Rank 0] step:7901/10000 train_time:638665ms step_avg:80.83ms +[2025-07-06 19:15:40] [Rank 0] step:7901/10000 train_time:638665ms step_avg:80.83ms +[2025-07-06 19:15:43] [Rank 0] step:7921/10000 train_time:640171ms step_avg:80.82ms +[2025-07-06 19:15:43] [Rank 0] step:7921/10000 train_time:640171ms step_avg:80.82ms +[2025-07-06 19:15:44] [Rank 0] step:7941/10000 train_time:642319ms step_avg:80.89ms +[2025-07-06 19:15:44] [Rank 0] step:7941/10000 train_time:642319ms step_avg:80.89ms +[2025-07-06 19:15:46] [Rank 0] step:7961/10000 train_time:643812ms step_avg:80.87ms +[2025-07-06 19:15:46] [Rank 0] step:7961/10000 train_time:643812ms step_avg:80.87ms +[2025-07-06 19:15:47] [Rank 0] step:7981/10000 train_time:645308ms step_avg:80.86ms +[2025-07-06 19:15:47] [Rank 0] step:7981/10000 train_time:645308ms step_avg:80.86ms +[2025-07-06 19:15:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:15:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:15:49] [Rank 0] PRINT: step:8000/10000 train_loss:0.8580 val_loss:0.8592 train_time:646804ms step_avg:80.85ms +[2025-07-06 19:15:49] [Rank 0] PRINT: step:8000/10000 train_loss:0.8580 val_loss:0.8592 train_time:646804ms step_avg:80.85ms +[2025-07-06 19:15:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:15:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:15:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:15:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:15:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:15:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:21:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:21:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:21:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:21:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:21:20] [Rank 0] Total Loss: 5.4483 +[2025-07-06 19:21:20] [Rank 0] Total Loss: 5.4483 +[2025-07-06 19:21:20] [Rank 0] Total FTA: 0.9760 +[2025-07-06 19:21:20] [Rank 0] Total FTA: 0.9760 +[2025-07-06 19:21:20] [Rank 0] Group 0 Loss: 5.6384 +[2025-07-06 19:21:20] [Rank 0] Group 0 Loss: 5.6384 +[2025-07-06 19:21:20] [Rank 0] Group 1 Loss: 5.4450 +[2025-07-06 19:21:20] [Rank 0] Group 1 Loss: 5.4450 +[2025-07-06 19:21:20] [Rank 0] Group 2 Loss: 5.3550 +[2025-07-06 19:21:20] [Rank 0] Group 2 Loss: 5.3550 +[2025-07-06 19:21:20] [Rank 0] Group 3 Loss: 5.4877 +[2025-07-06 19:21:20] [Rank 0] Group 3 Loss: 5.4877 +[2025-07-06 19:21:20] [Rank 0] Group 4 Loss: 5.4110 +[2025-07-06 19:21:20] [Rank 0] Group 4 Loss: 5.4110 +[2025-07-06 19:21:20] [Rank 0] Group 5 Loss: 5.4211 +[2025-07-06 19:21:20] [Rank 0] Group 5 Loss: 5.4211 +[2025-07-06 19:21:20] [Rank 0] Group 6 Loss: 5.3121 +[2025-07-06 19:21:20] [Rank 0] Group 6 Loss: 5.3121 +[2025-07-06 19:21:20] [Rank 0] Group 7 Loss: 5.4531 +[2025-07-06 19:21:20] [Rank 0] Group 7 Loss: 5.4531 +[2025-07-06 19:21:20] [Rank 0] Group 8 Loss: 5.4791 +[2025-07-06 19:21:20] [Rank 0] Group 8 Loss: 5.4791 +[2025-07-06 19:21:20] [Rank 0] Group 9 Loss: 5.4217 +[2025-07-06 19:21:20] [Rank 0] Group 9 Loss: 5.4217 +[2025-07-06 19:21:20] [Rank 0] Group 10 Loss: 5.3727 +[2025-07-06 19:21:20] [Rank 0] Group 10 Loss: 5.3727 +[2025-07-06 19:21:20] [Rank 0] Group 11 Loss: 5.4333 +[2025-07-06 19:21:20] [Rank 0] Group 11 Loss: 5.4333 +[2025-07-06 19:21:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 19:21:20] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 19:21:20] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:21:20] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:21:20] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:21:20] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:21:20] [Rank 0] Group 3 FTA: 0.9635 +[2025-07-06 19:21:20] [Rank 0] Group 3 FTA: 0.9635 +[2025-07-06 19:21:20] [Rank 0] Group 4 FTA: 0.9714 +[2025-07-06 19:21:20] [Rank 0] Group 4 FTA: 0.9714 +[2025-07-06 19:21:20] [Rank 0] Group 5 FTA: 0.9844 +[2025-07-06 19:21:20] [Rank 0] Group 5 FTA: 0.9844 +[2025-07-06 19:21:20] [Rank 0] Group 6 FTA: 0.9635 +[2025-07-06 19:21:20] [Rank 0] Group 6 FTA: 0.9635 +[2025-07-06 19:21:20] [Rank 0] Group 7 FTA: 0.9271 +[2025-07-06 19:21:20] [Rank 0] Group 7 FTA: 0.9271 +[2025-07-06 19:21:20] [Rank 0] Group 8 FTA: 0.9740 +[2025-07-06 19:21:20] [Rank 0] Group 8 FTA: 0.9740 +[2025-07-06 19:21:20] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 19:21:20] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 19:21:20] [Rank 0] Group 10 FTA: 0.9629 +[2025-07-06 19:21:20] [Rank 0] Group 10 FTA: 0.9629 +[2025-07-06 19:21:20] [Rank 0] Group 11 FTA: 0.9736 +[2025-07-06 19:21:20] [Rank 0] Group 11 FTA: 0.9736 +[2025-07-06 19:21:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 19:21:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 19:21:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 19:21:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 19:21:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 19:21:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 19:21:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 19:21:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 19:21:22] [Rank 0] step:8001/10000 train_time:646829ms step_avg:80.84ms +[2025-07-06 19:21:22] [Rank 0] step:8001/10000 train_time:646829ms step_avg:80.84ms +[2025-07-06 19:21:24] [Rank 0] step:8021/10000 train_time:649003ms step_avg:80.91ms +[2025-07-06 19:21:24] [Rank 0] step:8021/10000 train_time:649003ms step_avg:80.91ms +[2025-07-06 19:21:25] [Rank 0] step:8041/10000 train_time:650490ms step_avg:80.90ms +[2025-07-06 19:21:25] [Rank 0] step:8041/10000 train_time:650490ms step_avg:80.90ms +[2025-07-06 19:21:27] [Rank 0] step:8061/10000 train_time:651978ms step_avg:80.88ms +[2025-07-06 19:21:27] [Rank 0] step:8061/10000 train_time:651978ms step_avg:80.88ms +[2025-07-06 19:21:28] [Rank 0] step:8081/10000 train_time:653575ms step_avg:80.88ms +[2025-07-06 19:21:28] [Rank 0] step:8081/10000 train_time:653575ms step_avg:80.88ms +[2025-07-06 19:21:31] [Rank 0] step:8101/10000 train_time:655738ms step_avg:80.95ms +[2025-07-06 19:21:31] [Rank 0] step:8101/10000 train_time:655738ms step_avg:80.95ms +[2025-07-06 19:21:32] [Rank 0] step:8121/10000 train_time:657208ms step_avg:80.93ms +[2025-07-06 19:21:32] [Rank 0] step:8121/10000 train_time:657208ms step_avg:80.93ms +[2025-07-06 19:21:34] [Rank 0] step:8141/10000 train_time:658699ms step_avg:80.91ms +[2025-07-06 19:21:34] [Rank 0] step:8141/10000 train_time:658699ms step_avg:80.91ms +[2025-07-06 19:21:35] [Rank 0] step:8161/10000 train_time:660190ms step_avg:80.90ms +[2025-07-06 19:21:35] [Rank 0] step:8161/10000 train_time:660190ms step_avg:80.90ms +[2025-07-06 19:21:37] [Rank 0] step:8181/10000 train_time:661684ms step_avg:80.88ms +[2025-07-06 19:21:37] [Rank 0] step:8181/10000 train_time:661684ms step_avg:80.88ms +[2025-07-06 19:21:39] [Rank 0] step:8201/10000 train_time:663822ms step_avg:80.94ms +[2025-07-06 19:21:39] [Rank 0] step:8201/10000 train_time:663822ms step_avg:80.94ms +[2025-07-06 19:21:40] [Rank 0] step:8221/10000 train_time:665316ms step_avg:80.93ms +[2025-07-06 19:21:40] [Rank 0] step:8221/10000 train_time:665316ms step_avg:80.93ms +[2025-07-06 19:21:42] [Rank 0] step:8241/10000 train_time:666809ms step_avg:80.91ms +[2025-07-06 19:21:42] [Rank 0] step:8241/10000 train_time:666809ms step_avg:80.91ms +[2025-07-06 19:21:43] [Rank 0] step:8261/10000 train_time:668302ms step_avg:80.90ms +[2025-07-06 19:21:43] [Rank 0] step:8261/10000 train_time:668302ms step_avg:80.90ms +[2025-07-06 19:21:45] [Rank 0] step:8281/10000 train_time:670072ms step_avg:80.92ms +[2025-07-06 19:21:45] [Rank 0] step:8281/10000 train_time:670072ms step_avg:80.92ms +[2025-07-06 19:21:46] [Rank 0] step:8301/10000 train_time:671529ms step_avg:80.90ms +[2025-07-06 19:21:46] [Rank 0] step:8301/10000 train_time:671529ms step_avg:80.90ms +[2025-07-06 19:21:48] [Rank 0] step:8321/10000 train_time:673026ms step_avg:80.88ms +[2025-07-06 19:21:48] [Rank 0] step:8321/10000 train_time:673026ms step_avg:80.88ms +[2025-07-06 19:21:49] [Rank 0] step:8341/10000 train_time:674521ms step_avg:80.87ms +[2025-07-06 19:21:49] [Rank 0] step:8341/10000 train_time:674521ms step_avg:80.87ms +[2025-07-06 19:21:51] [Rank 0] step:8361/10000 train_time:676017ms step_avg:80.85ms +[2025-07-06 19:21:51] [Rank 0] step:8361/10000 train_time:676017ms step_avg:80.85ms +[2025-07-06 19:21:53] [Rank 0] step:8381/10000 train_time:678157ms step_avg:80.92ms +[2025-07-06 19:21:53] [Rank 0] step:8381/10000 train_time:678157ms step_avg:80.92ms +[2025-07-06 19:21:54] [Rank 0] step:8401/10000 train_time:679652ms step_avg:80.90ms +[2025-07-06 19:21:54] [Rank 0] step:8401/10000 train_time:679652ms step_avg:80.90ms +[2025-07-06 19:21:56] [Rank 0] step:8421/10000 train_time:681147ms step_avg:80.89ms +[2025-07-06 19:21:56] [Rank 0] step:8421/10000 train_time:681147ms step_avg:80.89ms +[2025-07-06 19:21:57] [Rank 0] step:8441/10000 train_time:682645ms step_avg:80.87ms +[2025-07-06 19:21:57] [Rank 0] step:8441/10000 train_time:682645ms step_avg:80.87ms +[2025-07-06 19:22:00] [Rank 0] step:8461/10000 train_time:684418ms step_avg:80.89ms +[2025-07-06 19:22:00] [Rank 0] step:8461/10000 train_time:684418ms step_avg:80.89ms +[2025-07-06 19:22:01] [Rank 0] step:8481/10000 train_time:686302ms step_avg:80.92ms +[2025-07-06 19:22:01] [Rank 0] step:8481/10000 train_time:686302ms step_avg:80.92ms +[2025-07-06 19:22:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:22:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:22:04] [Rank 0] PRINT: step:8500/10000 train_loss:0.8572 val_loss:0.8589 train_time:687797ms step_avg:80.92ms +[2025-07-06 19:22:04] [Rank 0] PRINT: step:8500/10000 train_loss:0.8572 val_loss:0.8589 train_time:687797ms step_avg:80.92ms +[2025-07-06 19:22:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:22:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:22:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:22:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:22:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:22:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:27:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:27:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:27:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:27:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:27:36] [Rank 0] Total Loss: 5.4351 +[2025-07-06 19:27:36] [Rank 0] Total Loss: 5.4351 +[2025-07-06 19:27:36] [Rank 0] Total FTA: 0.9846 +[2025-07-06 19:27:36] [Rank 0] Total FTA: 0.9846 +[2025-07-06 19:27:36] [Rank 0] Group 0 Loss: 5.5899 +[2025-07-06 19:27:36] [Rank 0] Group 0 Loss: 5.5899 +[2025-07-06 19:27:36] [Rank 0] Group 1 Loss: 5.4473 +[2025-07-06 19:27:36] [Rank 0] Group 1 Loss: 5.4473 +[2025-07-06 19:27:36] [Rank 0] Group 2 Loss: 5.2657 +[2025-07-06 19:27:36] [Rank 0] Group 2 Loss: 5.2657 +[2025-07-06 19:27:36] [Rank 0] Group 3 Loss: 5.5720 +[2025-07-06 19:27:36] [Rank 0] Group 3 Loss: 5.5720 +[2025-07-06 19:27:36] [Rank 0] Group 4 Loss: 5.3813 +[2025-07-06 19:27:36] [Rank 0] Group 4 Loss: 5.3813 +[2025-07-06 19:27:36] [Rank 0] Group 5 Loss: 5.3522 +[2025-07-06 19:27:36] [Rank 0] Group 5 Loss: 5.3522 +[2025-07-06 19:27:36] [Rank 0] Group 6 Loss: 5.3298 +[2025-07-06 19:27:36] [Rank 0] Group 6 Loss: 5.3298 +[2025-07-06 19:27:36] [Rank 0] Group 7 Loss: 5.3772 +[2025-07-06 19:27:36] [Rank 0] Group 7 Loss: 5.3772 +[2025-07-06 19:27:36] [Rank 0] Group 8 Loss: 5.3972 +[2025-07-06 19:27:36] [Rank 0] Group 8 Loss: 5.3972 +[2025-07-06 19:27:36] [Rank 0] Group 9 Loss: 5.3550 +[2025-07-06 19:27:36] [Rank 0] Group 9 Loss: 5.3550 +[2025-07-06 19:27:36] [Rank 0] Group 10 Loss: 5.4168 +[2025-07-06 19:27:36] [Rank 0] Group 10 Loss: 5.4168 +[2025-07-06 19:27:36] [Rank 0] Group 11 Loss: 5.4824 +[2025-07-06 19:27:36] [Rank 0] Group 11 Loss: 5.4824 +[2025-07-06 19:27:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 19:27:36] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 19:27:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:27:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:27:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:27:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:27:36] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 19:27:36] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 19:27:36] [Rank 0] Group 4 FTA: 0.9844 +[2025-07-06 19:27:36] [Rank 0] Group 4 FTA: 0.9844 +[2025-07-06 19:27:36] [Rank 0] Group 5 FTA: 0.9818 +[2025-07-06 19:27:36] [Rank 0] Group 5 FTA: 0.9818 +[2025-07-06 19:27:36] [Rank 0] Group 6 FTA: 0.9661 +[2025-07-06 19:27:36] [Rank 0] Group 6 FTA: 0.9661 +[2025-07-06 19:27:36] [Rank 0] Group 7 FTA: 0.9792 +[2025-07-06 19:27:36] [Rank 0] Group 7 FTA: 0.9792 +[2025-07-06 19:27:36] [Rank 0] Group 8 FTA: 0.9557 +[2025-07-06 19:27:36] [Rank 0] Group 8 FTA: 0.9557 +[2025-07-06 19:27:36] [Rank 0] Group 9 FTA: 0.9883 +[2025-07-06 19:27:36] [Rank 0] Group 9 FTA: 0.9883 +[2025-07-06 19:27:36] [Rank 0] Group 10 FTA: 0.9785 +[2025-07-06 19:27:36] [Rank 0] Group 10 FTA: 0.9785 +[2025-07-06 19:27:36] [Rank 0] Group 11 FTA: 0.9785 +[2025-07-06 19:27:36] [Rank 0] Group 11 FTA: 0.9785 +[2025-07-06 19:27:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 19:27:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 19:27:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 19:27:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 19:27:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 19:27:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 19:27:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 19:27:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 19:27:38] [Rank 0] step:8501/10000 train_time:687819ms step_avg:80.91ms +[2025-07-06 19:27:38] [Rank 0] step:8501/10000 train_time:687819ms step_avg:80.91ms +[2025-07-06 19:27:39] [Rank 0] step:8521/10000 train_time:689321ms step_avg:80.90ms +[2025-07-06 19:27:39] [Rank 0] step:8521/10000 train_time:689321ms step_avg:80.90ms +[2025-07-06 19:27:41] [Rank 0] step:8541/10000 train_time:690808ms step_avg:80.88ms +[2025-07-06 19:27:41] [Rank 0] step:8541/10000 train_time:690808ms step_avg:80.88ms +[2025-07-06 19:27:43] [Rank 0] step:8561/10000 train_time:692962ms step_avg:80.94ms +[2025-07-06 19:27:43] [Rank 0] step:8561/10000 train_time:692962ms step_avg:80.94ms +[2025-07-06 19:27:44] [Rank 0] step:8581/10000 train_time:694449ms step_avg:80.93ms +[2025-07-06 19:27:44] [Rank 0] step:8581/10000 train_time:694449ms step_avg:80.93ms +[2025-07-06 19:27:46] [Rank 0] step:8601/10000 train_time:695938ms step_avg:80.91ms +[2025-07-06 19:27:46] [Rank 0] step:8601/10000 train_time:695938ms step_avg:80.91ms +[2025-07-06 19:27:48] [Rank 0] step:8621/10000 train_time:697579ms step_avg:80.92ms +[2025-07-06 19:27:48] [Rank 0] step:8621/10000 train_time:697579ms step_avg:80.92ms +[2025-07-06 19:27:50] [Rank 0] step:8641/10000 train_time:699749ms step_avg:80.98ms +[2025-07-06 19:27:50] [Rank 0] step:8641/10000 train_time:699749ms step_avg:80.98ms +[2025-07-06 19:27:51] [Rank 0] step:8661/10000 train_time:701202ms step_avg:80.96ms +[2025-07-06 19:27:51] [Rank 0] step:8661/10000 train_time:701202ms step_avg:80.96ms +[2025-07-06 19:27:53] [Rank 0] step:8681/10000 train_time:702694ms step_avg:80.95ms +[2025-07-06 19:27:53] [Rank 0] step:8681/10000 train_time:702694ms step_avg:80.95ms +[2025-07-06 19:27:54] [Rank 0] step:8701/10000 train_time:704193ms step_avg:80.93ms +[2025-07-06 19:27:54] [Rank 0] step:8701/10000 train_time:704193ms step_avg:80.93ms +[2025-07-06 19:27:56] [Rank 0] step:8721/10000 train_time:705676ms step_avg:80.92ms +[2025-07-06 19:27:56] [Rank 0] step:8721/10000 train_time:705676ms step_avg:80.92ms +[2025-07-06 19:27:58] [Rank 0] step:8741/10000 train_time:707814ms step_avg:80.98ms +[2025-07-06 19:27:58] [Rank 0] step:8741/10000 train_time:707814ms step_avg:80.98ms +[2025-07-06 19:27:59] [Rank 0] step:8761/10000 train_time:709308ms step_avg:80.96ms +[2025-07-06 19:27:59] [Rank 0] step:8761/10000 train_time:709308ms step_avg:80.96ms +[2025-07-06 19:28:01] [Rank 0] step:8781/10000 train_time:710803ms step_avg:80.95ms +[2025-07-06 19:28:01] [Rank 0] step:8781/10000 train_time:710803ms step_avg:80.95ms +[2025-07-06 19:28:02] [Rank 0] step:8801/10000 train_time:712299ms step_avg:80.93ms +[2025-07-06 19:28:02] [Rank 0] step:8801/10000 train_time:712299ms step_avg:80.93ms +[2025-07-06 19:28:04] [Rank 0] step:8821/10000 train_time:714052ms step_avg:80.95ms +[2025-07-06 19:28:04] [Rank 0] step:8821/10000 train_time:714052ms step_avg:80.95ms +[2025-07-06 19:28:06] [Rank 0] step:8841/10000 train_time:715959ms step_avg:80.98ms +[2025-07-06 19:28:06] [Rank 0] step:8841/10000 train_time:715959ms step_avg:80.98ms +[2025-07-06 19:28:07] [Rank 0] step:8861/10000 train_time:717456ms step_avg:80.97ms +[2025-07-06 19:28:07] [Rank 0] step:8861/10000 train_time:717456ms step_avg:80.97ms +[2025-07-06 19:28:09] [Rank 0] step:8881/10000 train_time:718952ms step_avg:80.95ms +[2025-07-06 19:28:09] [Rank 0] step:8881/10000 train_time:718952ms step_avg:80.95ms +[2025-07-06 19:28:10] [Rank 0] step:8901/10000 train_time:720450ms step_avg:80.94ms +[2025-07-06 19:28:10] [Rank 0] step:8901/10000 train_time:720450ms step_avg:80.94ms +[2025-07-06 19:28:13] [Rank 0] step:8921/10000 train_time:722603ms step_avg:81.00ms +[2025-07-06 19:28:13] [Rank 0] step:8921/10000 train_time:722603ms step_avg:81.00ms +[2025-07-06 19:28:14] [Rank 0] step:8941/10000 train_time:724102ms step_avg:80.99ms +[2025-07-06 19:28:14] [Rank 0] step:8941/10000 train_time:724102ms step_avg:80.99ms +[2025-07-06 19:28:16] [Rank 0] step:8961/10000 train_time:725601ms step_avg:80.97ms +[2025-07-06 19:28:16] [Rank 0] step:8961/10000 train_time:725601ms step_avg:80.97ms +[2025-07-06 19:28:17] [Rank 0] step:8981/10000 train_time:727103ms step_avg:80.96ms +[2025-07-06 19:28:17] [Rank 0] step:8981/10000 train_time:727103ms step_avg:80.96ms +[2025-07-06 19:28:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:28:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:28:20] [Rank 0] PRINT: step:9000/10000 train_loss:0.8564 val_loss:0.8587 train_time:728606ms step_avg:80.96ms +[2025-07-06 19:28:20] [Rank 0] PRINT: step:9000/10000 train_loss:0.8564 val_loss:0.8587 train_time:728606ms step_avg:80.96ms +[2025-07-06 19:28:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:28:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:28:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:28:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:28:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:28:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:33:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:33:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:33:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:33:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:33:51] [Rank 0] Total Loss: 5.4217 +[2025-07-06 19:33:51] [Rank 0] Total Loss: 5.4217 +[2025-07-06 19:33:51] [Rank 0] Total FTA: 0.9892 +[2025-07-06 19:33:51] [Rank 0] Total FTA: 0.9892 +[2025-07-06 19:33:51] [Rank 0] Group 0 Loss: 5.5483 +[2025-07-06 19:33:51] [Rank 0] Group 0 Loss: 5.5483 +[2025-07-06 19:33:51] [Rank 0] Group 1 Loss: 5.5328 +[2025-07-06 19:33:51] [Rank 0] Group 1 Loss: 5.5328 +[2025-07-06 19:33:51] [Rank 0] Group 2 Loss: 5.3393 +[2025-07-06 19:33:51] [Rank 0] Group 2 Loss: 5.3393 +[2025-07-06 19:33:51] [Rank 0] Group 3 Loss: 5.4168 +[2025-07-06 19:33:51] [Rank 0] Group 3 Loss: 5.4168 +[2025-07-06 19:33:51] [Rank 0] Group 4 Loss: 5.4125 +[2025-07-06 19:33:51] [Rank 0] Group 4 Loss: 5.4125 +[2025-07-06 19:33:51] [Rank 0] Group 5 Loss: 5.3346 +[2025-07-06 19:33:51] [Rank 0] Group 5 Loss: 5.3346 +[2025-07-06 19:33:51] [Rank 0] Group 6 Loss: 5.3022 +[2025-07-06 19:33:51] [Rank 0] Group 6 Loss: 5.3022 +[2025-07-06 19:33:51] [Rank 0] Group 7 Loss: 5.4201 +[2025-07-06 19:33:51] [Rank 0] Group 7 Loss: 5.4201 +[2025-07-06 19:33:51] [Rank 0] Group 8 Loss: 5.4454 +[2025-07-06 19:33:51] [Rank 0] Group 8 Loss: 5.4454 +[2025-07-06 19:33:51] [Rank 0] Group 9 Loss: 5.3327 +[2025-07-06 19:33:51] [Rank 0] Group 9 Loss: 5.3327 +[2025-07-06 19:33:51] [Rank 0] Group 10 Loss: 5.4440 +[2025-07-06 19:33:51] [Rank 0] Group 10 Loss: 5.4440 +[2025-07-06 19:33:51] [Rank 0] Group 11 Loss: 5.4014 +[2025-07-06 19:33:51] [Rank 0] Group 11 Loss: 5.4014 +[2025-07-06 19:33:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 19:33:51] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 19:33:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:33:51] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:33:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:33:51] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:33:52] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 19:33:52] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 19:33:52] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-06 19:33:52] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-06 19:33:52] [Rank 0] Group 5 FTA: 1.0000 +[2025-07-06 19:33:52] [Rank 0] Group 5 FTA: 1.0000 +[2025-07-06 19:33:52] [Rank 0] Group 6 FTA: 0.9896 +[2025-07-06 19:33:52] [Rank 0] Group 6 FTA: 0.9896 +[2025-07-06 19:33:52] [Rank 0] Group 7 FTA: 0.9896 +[2025-07-06 19:33:52] [Rank 0] Group 7 FTA: 0.9896 +[2025-07-06 19:33:52] [Rank 0] Group 8 FTA: 0.9531 +[2025-07-06 19:33:52] [Rank 0] Group 8 FTA: 0.9531 +[2025-07-06 19:33:52] [Rank 0] Group 9 FTA: 0.9844 +[2025-07-06 19:33:52] [Rank 0] Group 9 FTA: 0.9844 +[2025-07-06 19:33:52] [Rank 0] Group 10 FTA: 0.9902 +[2025-07-06 19:33:52] [Rank 0] Group 10 FTA: 0.9902 +[2025-07-06 19:33:52] [Rank 0] Group 11 FTA: 0.9834 +[2025-07-06 19:33:52] [Rank 0] Group 11 FTA: 0.9834 +[2025-07-06 19:33:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 19:33:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 19:33:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 19:33:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 19:33:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 19:33:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 19:33:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 19:33:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 19:33:54] [Rank 0] step:9001/10000 train_time:728637ms step_avg:80.95ms +[2025-07-06 19:33:54] [Rank 0] step:9001/10000 train_time:728637ms step_avg:80.95ms +[2025-07-06 19:33:55] [Rank 0] step:9021/10000 train_time:730834ms step_avg:81.01ms +[2025-07-06 19:33:55] [Rank 0] step:9021/10000 train_time:730834ms step_avg:81.01ms +[2025-07-06 19:33:57] [Rank 0] step:9041/10000 train_time:732322ms step_avg:81.00ms +[2025-07-06 19:33:57] [Rank 0] step:9041/10000 train_time:732322ms step_avg:81.00ms +[2025-07-06 19:33:58] [Rank 0] step:9061/10000 train_time:733809ms step_avg:80.99ms +[2025-07-06 19:33:58] [Rank 0] step:9061/10000 train_time:733809ms step_avg:80.99ms +[2025-07-06 19:34:00] [Rank 0] step:9081/10000 train_time:735298ms step_avg:80.97ms +[2025-07-06 19:34:00] [Rank 0] step:9081/10000 train_time:735298ms step_avg:80.97ms +[2025-07-06 19:34:02] [Rank 0] step:9101/10000 train_time:737445ms step_avg:81.03ms +[2025-07-06 19:34:02] [Rank 0] step:9101/10000 train_time:737445ms step_avg:81.03ms +[2025-07-06 19:34:03] [Rank 0] step:9121/10000 train_time:738936ms step_avg:81.01ms +[2025-07-06 19:34:03] [Rank 0] step:9121/10000 train_time:738936ms step_avg:81.01ms +[2025-07-06 19:34:05] [Rank 0] step:9141/10000 train_time:740426ms step_avg:81.00ms +[2025-07-06 19:34:05] [Rank 0] step:9141/10000 train_time:740426ms step_avg:81.00ms +[2025-07-06 19:34:07] [Rank 0] step:9161/10000 train_time:742055ms step_avg:81.00ms +[2025-07-06 19:34:07] [Rank 0] step:9161/10000 train_time:742055ms step_avg:81.00ms +[2025-07-06 19:34:09] [Rank 0] step:9181/10000 train_time:743593ms step_avg:80.99ms +[2025-07-06 19:34:09] [Rank 0] step:9181/10000 train_time:743593ms step_avg:80.99ms +[2025-07-06 19:34:10] [Rank 0] step:9201/10000 train_time:745682ms step_avg:81.04ms +[2025-07-06 19:34:10] [Rank 0] step:9201/10000 train_time:745682ms step_avg:81.04ms +[2025-07-06 19:34:12] [Rank 0] step:9221/10000 train_time:747174ms step_avg:81.03ms +[2025-07-06 19:34:12] [Rank 0] step:9221/10000 train_time:747174ms step_avg:81.03ms +[2025-07-06 19:34:13] [Rank 0] step:9241/10000 train_time:748666ms step_avg:81.02ms +[2025-07-06 19:34:13] [Rank 0] step:9241/10000 train_time:748666ms step_avg:81.02ms +[2025-07-06 19:34:15] [Rank 0] step:9261/10000 train_time:750160ms step_avg:81.00ms +[2025-07-06 19:34:15] [Rank 0] step:9261/10000 train_time:750160ms step_avg:81.00ms +[2025-07-06 19:34:17] [Rank 0] step:9281/10000 train_time:752296ms step_avg:81.06ms +[2025-07-06 19:34:17] [Rank 0] step:9281/10000 train_time:752296ms step_avg:81.06ms +[2025-07-06 19:34:18] [Rank 0] step:9301/10000 train_time:753789ms step_avg:81.04ms +[2025-07-06 19:34:18] [Rank 0] step:9301/10000 train_time:753789ms step_avg:81.04ms +[2025-07-06 19:34:20] [Rank 0] step:9321/10000 train_time:755285ms step_avg:81.03ms +[2025-07-06 19:34:20] [Rank 0] step:9321/10000 train_time:755285ms step_avg:81.03ms +[2025-07-06 19:34:21] [Rank 0] step:9341/10000 train_time:756783ms step_avg:81.02ms +[2025-07-06 19:34:21] [Rank 0] step:9341/10000 train_time:756783ms step_avg:81.02ms +[2025-07-06 19:34:23] [Rank 0] step:9361/10000 train_time:758983ms step_avg:81.08ms +[2025-07-06 19:34:23] [Rank 0] step:9361/10000 train_time:758983ms step_avg:81.08ms +[2025-07-06 19:34:25] [Rank 0] step:9381/10000 train_time:760440ms step_avg:81.06ms +[2025-07-06 19:34:25] [Rank 0] step:9381/10000 train_time:760440ms step_avg:81.06ms +[2025-07-06 19:34:26] [Rank 0] step:9401/10000 train_time:761938ms step_avg:81.05ms +[2025-07-06 19:34:26] [Rank 0] step:9401/10000 train_time:761938ms step_avg:81.05ms +[2025-07-06 19:34:28] [Rank 0] step:9421/10000 train_time:763435ms step_avg:81.04ms +[2025-07-06 19:34:28] [Rank 0] step:9421/10000 train_time:763435ms step_avg:81.04ms +[2025-07-06 19:34:29] [Rank 0] step:9441/10000 train_time:764933ms step_avg:81.02ms +[2025-07-06 19:34:29] [Rank 0] step:9441/10000 train_time:764933ms step_avg:81.02ms +[2025-07-06 19:34:32] [Rank 0] step:9461/10000 train_time:767082ms step_avg:81.08ms +[2025-07-06 19:34:32] [Rank 0] step:9461/10000 train_time:767082ms step_avg:81.08ms +[2025-07-06 19:34:33] [Rank 0] step:9481/10000 train_time:768577ms step_avg:81.07ms +[2025-07-06 19:34:33] [Rank 0] step:9481/10000 train_time:768577ms step_avg:81.07ms +[2025-07-06 19:34:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:34:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:34:35] [Rank 0] PRINT: step:9500/10000 train_loss:0.8557 val_loss:0.8585 train_time:770074ms step_avg:81.06ms +[2025-07-06 19:34:35] [Rank 0] PRINT: step:9500/10000 train_loss:0.8557 val_loss:0.8585 train_time:770074ms step_avg:81.06ms +[2025-07-06 19:34:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:34:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:34:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:34:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:34:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:34:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:40:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:40:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:40:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:40:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:40:07] [Rank 0] Total Loss: 5.4680 +[2025-07-06 19:40:07] [Rank 0] Total Loss: 5.4680 +[2025-07-06 19:40:07] [Rank 0] Total FTA: 0.9888 +[2025-07-06 19:40:07] [Rank 0] Total FTA: 0.9888 +[2025-07-06 19:40:07] [Rank 0] Group 0 Loss: 5.6746 +[2025-07-06 19:40:07] [Rank 0] Group 0 Loss: 5.6746 +[2025-07-06 19:40:07] [Rank 0] Group 1 Loss: 5.5181 +[2025-07-06 19:40:07] [Rank 0] Group 1 Loss: 5.5181 +[2025-07-06 19:40:07] [Rank 0] Group 2 Loss: 5.3820 +[2025-07-06 19:40:07] [Rank 0] Group 2 Loss: 5.3820 +[2025-07-06 19:40:07] [Rank 0] Group 3 Loss: 5.5103 +[2025-07-06 19:40:07] [Rank 0] Group 3 Loss: 5.5103 +[2025-07-06 19:40:07] [Rank 0] Group 4 Loss: 5.4494 +[2025-07-06 19:40:07] [Rank 0] Group 4 Loss: 5.4494 +[2025-07-06 19:40:07] [Rank 0] Group 5 Loss: 5.4408 +[2025-07-06 19:40:07] [Rank 0] Group 5 Loss: 5.4408 +[2025-07-06 19:40:07] [Rank 0] Group 6 Loss: 5.3705 +[2025-07-06 19:40:07] [Rank 0] Group 6 Loss: 5.3705 +[2025-07-06 19:40:07] [Rank 0] Group 7 Loss: 5.4504 +[2025-07-06 19:40:07] [Rank 0] Group 7 Loss: 5.4504 +[2025-07-06 19:40:07] [Rank 0] Group 8 Loss: 5.4013 +[2025-07-06 19:40:07] [Rank 0] Group 8 Loss: 5.4013 +[2025-07-06 19:40:07] [Rank 0] Group 9 Loss: 5.4751 +[2025-07-06 19:40:07] [Rank 0] Group 9 Loss: 5.4751 +[2025-07-06 19:40:07] [Rank 0] Group 10 Loss: 5.4455 +[2025-07-06 19:40:07] [Rank 0] Group 10 Loss: 5.4455 +[2025-07-06 19:40:07] [Rank 0] Group 11 Loss: 5.4051 +[2025-07-06 19:40:07] [Rank 0] Group 11 Loss: 5.4051 +[2025-07-06 19:40:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 19:40:07] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 19:40:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:40:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:40:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:40:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:40:07] [Rank 0] Group 3 FTA: 0.9661 +[2025-07-06 19:40:07] [Rank 0] Group 3 FTA: 0.9661 +[2025-07-06 19:40:07] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 19:40:07] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 19:40:07] [Rank 0] Group 5 FTA: 0.9948 +[2025-07-06 19:40:07] [Rank 0] Group 5 FTA: 0.9948 +[2025-07-06 19:40:07] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-06 19:40:07] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-06 19:40:07] [Rank 0] Group 7 FTA: 0.9896 +[2025-07-06 19:40:07] [Rank 0] Group 7 FTA: 0.9896 +[2025-07-06 19:40:07] [Rank 0] Group 8 FTA: 0.9766 +[2025-07-06 19:40:07] [Rank 0] Group 8 FTA: 0.9766 +[2025-07-06 19:40:07] [Rank 0] Group 9 FTA: 0.9805 +[2025-07-06 19:40:07] [Rank 0] Group 9 FTA: 0.9805 +[2025-07-06 19:40:07] [Rank 0] Group 10 FTA: 0.9805 +[2025-07-06 19:40:07] [Rank 0] Group 10 FTA: 0.9805 +[2025-07-06 19:40:07] [Rank 0] Group 11 FTA: 0.9883 +[2025-07-06 19:40:07] [Rank 0] Group 11 FTA: 0.9883 +[2025-07-06 19:40:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 19:40:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 19:40:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 19:40:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 19:40:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 19:40:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 19:40:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 19:40:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 19:40:09] [Rank 0] step:9501/10000 train_time:770096ms step_avg:81.05ms +[2025-07-06 19:40:09] [Rank 0] step:9501/10000 train_time:770096ms step_avg:81.05ms +[2025-07-06 19:40:10] [Rank 0] step:9521/10000 train_time:771608ms step_avg:81.04ms +[2025-07-06 19:40:10] [Rank 0] step:9521/10000 train_time:771608ms step_avg:81.04ms +[2025-07-06 19:40:12] [Rank 0] step:9541/10000 train_time:773096ms step_avg:81.03ms +[2025-07-06 19:40:12] [Rank 0] step:9541/10000 train_time:773096ms step_avg:81.03ms +[2025-07-06 19:40:14] [Rank 0] step:9561/10000 train_time:775251ms step_avg:81.08ms +[2025-07-06 19:40:14] [Rank 0] step:9561/10000 train_time:775251ms step_avg:81.08ms +[2025-07-06 19:40:15] [Rank 0] step:9581/10000 train_time:776739ms step_avg:81.07ms +[2025-07-06 19:40:15] [Rank 0] step:9581/10000 train_time:776739ms step_avg:81.07ms +[2025-07-06 19:40:17] [Rank 0] step:9601/10000 train_time:778228ms step_avg:81.06ms +[2025-07-06 19:40:17] [Rank 0] step:9601/10000 train_time:778228ms step_avg:81.06ms +[2025-07-06 19:40:18] [Rank 0] step:9621/10000 train_time:779718ms step_avg:81.04ms +[2025-07-06 19:40:18] [Rank 0] step:9621/10000 train_time:779718ms step_avg:81.04ms +[2025-07-06 19:40:20] [Rank 0] step:9641/10000 train_time:781864ms step_avg:81.10ms +[2025-07-06 19:40:20] [Rank 0] step:9641/10000 train_time:781864ms step_avg:81.10ms +[2025-07-06 19:40:22] [Rank 0] step:9661/10000 train_time:783362ms step_avg:81.08ms +[2025-07-06 19:40:22] [Rank 0] step:9661/10000 train_time:783362ms step_avg:81.08ms +[2025-07-06 19:40:23] [Rank 0] step:9681/10000 train_time:784847ms step_avg:81.07ms +[2025-07-06 19:40:23] [Rank 0] step:9681/10000 train_time:784847ms step_avg:81.07ms +[2025-07-06 19:40:25] [Rank 0] step:9701/10000 train_time:786340ms step_avg:81.06ms +[2025-07-06 19:40:25] [Rank 0] step:9701/10000 train_time:786340ms step_avg:81.06ms +[2025-07-06 19:40:27] [Rank 0] step:9721/10000 train_time:787953ms step_avg:81.06ms +[2025-07-06 19:40:27] [Rank 0] step:9721/10000 train_time:787953ms step_avg:81.06ms +[2025-07-06 19:40:29] [Rank 0] step:9741/10000 train_time:790103ms step_avg:81.11ms +[2025-07-06 19:40:29] [Rank 0] step:9741/10000 train_time:790103ms step_avg:81.11ms +[2025-07-06 19:40:30] [Rank 0] step:9761/10000 train_time:791596ms step_avg:81.10ms +[2025-07-06 19:40:30] [Rank 0] step:9761/10000 train_time:791596ms step_avg:81.10ms +[2025-07-06 19:40:32] [Rank 0] step:9781/10000 train_time:793091ms step_avg:81.08ms +[2025-07-06 19:40:32] [Rank 0] step:9781/10000 train_time:793091ms step_avg:81.08ms +[2025-07-06 19:40:33] [Rank 0] step:9801/10000 train_time:794586ms step_avg:81.07ms +[2025-07-06 19:40:33] [Rank 0] step:9801/10000 train_time:794586ms step_avg:81.07ms +[2025-07-06 19:40:35] [Rank 0] step:9821/10000 train_time:796746ms step_avg:81.13ms +[2025-07-06 19:40:35] [Rank 0] step:9821/10000 train_time:796746ms step_avg:81.13ms +[2025-07-06 19:40:37] [Rank 0] step:9841/10000 train_time:798250ms step_avg:81.11ms +[2025-07-06 19:40:37] [Rank 0] step:9841/10000 train_time:798250ms step_avg:81.11ms +[2025-07-06 19:40:38] [Rank 0] step:9861/10000 train_time:799735ms step_avg:81.10ms +[2025-07-06 19:40:38] [Rank 0] step:9861/10000 train_time:799735ms step_avg:81.10ms +[2025-07-06 19:40:40] [Rank 0] step:9881/10000 train_time:801230ms step_avg:81.09ms +[2025-07-06 19:40:40] [Rank 0] step:9881/10000 train_time:801230ms step_avg:81.09ms +[2025-07-06 19:40:42] [Rank 0] step:9901/10000 train_time:803433ms step_avg:81.15ms +[2025-07-06 19:40:42] [Rank 0] step:9901/10000 train_time:803433ms step_avg:81.15ms +[2025-07-06 19:40:43] [Rank 0] step:9921/10000 train_time:804890ms step_avg:81.13ms +[2025-07-06 19:40:43] [Rank 0] step:9921/10000 train_time:804890ms step_avg:81.13ms +[2025-07-06 19:40:45] [Rank 0] step:9941/10000 train_time:806385ms step_avg:81.12ms +[2025-07-06 19:40:45] [Rank 0] step:9941/10000 train_time:806385ms step_avg:81.12ms +[2025-07-06 19:40:46] [Rank 0] step:9961/10000 train_time:807881ms step_avg:81.10ms +[2025-07-06 19:40:46] [Rank 0] step:9961/10000 train_time:807881ms step_avg:81.10ms +[2025-07-06 19:40:48] [Rank 0] step:9981/10000 train_time:809377ms step_avg:81.09ms +[2025-07-06 19:40:48] [Rank 0] step:9981/10000 train_time:809377ms step_avg:81.09ms +[2025-07-06 19:40:50] [Rank 0] step:10000/10000 train_time:811457ms step_avg:81.15ms +[2025-07-06 19:40:50] [Rank 0] step:10000/10000 train_time:811457ms step_avg:81.15ms +[2025-07-06 19:40:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:40:50] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:40:51] [Rank 0] PRINT: step:10000/10000 train_loss:0.8549 val_loss:0.8585 train_time:811539ms step_avg:81.15ms +[2025-07-06 19:40:51] [Rank 0] PRINT: step:10000/10000 train_loss:0.8549 val_loss:0.8585 train_time:811539ms step_avg:81.15ms +[2025-07-06 19:40:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:40:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:40:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:40:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:40:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:40:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:46:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:46:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:46:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:46:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:46:23] [Rank 0] Total Loss: 5.5073 +[2025-07-06 19:46:23] [Rank 0] Total Loss: 5.5073 +[2025-07-06 19:46:23] [Rank 0] Total FTA: 0.9915 +[2025-07-06 19:46:23] [Rank 0] Total FTA: 0.9915 +[2025-07-06 19:46:23] [Rank 0] Group 0 Loss: 5.6666 +[2025-07-06 19:46:23] [Rank 0] Group 0 Loss: 5.6666 +[2025-07-06 19:46:23] [Rank 0] Group 1 Loss: 5.5631 +[2025-07-06 19:46:23] [Rank 0] Group 1 Loss: 5.5631 +[2025-07-06 19:46:23] [Rank 0] Group 2 Loss: 5.4305 +[2025-07-06 19:46:23] [Rank 0] Group 2 Loss: 5.4305 +[2025-07-06 19:46:23] [Rank 0] Group 3 Loss: 5.4499 +[2025-07-06 19:46:23] [Rank 0] Group 3 Loss: 5.4499 +[2025-07-06 19:46:23] [Rank 0] Group 4 Loss: 5.4639 +[2025-07-06 19:46:23] [Rank 0] Group 4 Loss: 5.4639 +[2025-07-06 19:46:23] [Rank 0] Group 5 Loss: 5.4142 +[2025-07-06 19:46:23] [Rank 0] Group 5 Loss: 5.4142 +[2025-07-06 19:46:23] [Rank 0] Group 6 Loss: 5.4145 +[2025-07-06 19:46:23] [Rank 0] Group 6 Loss: 5.4145 +[2025-07-06 19:46:24] [Rank 0] Group 7 Loss: 5.5559 +[2025-07-06 19:46:24] [Rank 0] Group 7 Loss: 5.5559 +[2025-07-06 19:46:24] [Rank 0] Group 8 Loss: 5.4393 +[2025-07-06 19:46:24] [Rank 0] Group 8 Loss: 5.4393 +[2025-07-06 19:46:24] [Rank 0] Group 9 Loss: 5.5089 +[2025-07-06 19:46:24] [Rank 0] Group 9 Loss: 5.5089 +[2025-07-06 19:46:24] [Rank 0] Group 10 Loss: 5.5228 +[2025-07-06 19:46:24] [Rank 0] Group 10 Loss: 5.5228 +[2025-07-06 19:46:24] [Rank 0] Group 11 Loss: 5.5021 +[2025-07-06 19:46:24] [Rank 0] Group 11 Loss: 5.5021 +[2025-07-06 19:46:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 19:46:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 19:46:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:46:24] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:46:24] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:46:24] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:46:24] [Rank 0] Group 3 FTA: 0.9401 +[2025-07-06 19:46:24] [Rank 0] Group 3 FTA: 0.9401 +[2025-07-06 19:46:24] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 19:46:24] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-06 19:46:24] [Rank 0] Group 5 FTA: 1.0000 +[2025-07-06 19:46:24] [Rank 0] Group 5 FTA: 1.0000 +[2025-07-06 19:46:24] [Rank 0] Group 6 FTA: 0.9948 +[2025-07-06 19:46:24] [Rank 0] Group 6 FTA: 0.9948 +[2025-07-06 19:46:24] [Rank 0] Group 7 FTA: 0.9974 +[2025-07-06 19:46:24] [Rank 0] Group 7 FTA: 0.9974 +[2025-07-06 19:46:24] [Rank 0] Group 8 FTA: 0.9818 +[2025-07-06 19:46:24] [Rank 0] Group 8 FTA: 0.9818 +[2025-07-06 19:46:24] [Rank 0] Group 9 FTA: 0.9883 +[2025-07-06 19:46:24] [Rank 0] Group 9 FTA: 0.9883 +[2025-07-06 19:46:24] [Rank 0] Group 10 FTA: 0.9902 +[2025-07-06 19:46:24] [Rank 0] Group 10 FTA: 0.9902 +[2025-07-06 19:46:24] [Rank 0] Group 11 FTA: 0.9932 +[2025-07-06 19:46:24] [Rank 0] Group 11 FTA: 0.9932 +[2025-07-06 19:46:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 19:46:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-06 19:46:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 19:46:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-06 19:46:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 19:46:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-06 19:46:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 19:46:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-06 19:46:25] [Rank 0] step:10001/10000 train_time:811559ms step_avg:81.15ms +[2025-07-06 19:46:25] [Rank 0] step:10001/10000 train_time:811559ms step_avg:81.15ms +[2025-07-06 19:46:25] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 19:46:25 2025 --- +[2025-07-06 19:46:25] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 19:46:25 2025 --- +[2025-07-06 19:46:25] [Rank 0] PRINT: Peak memory allocated: 9183 MiB reserved: 10636 MiB +[2025-07-06 19:46:25] [Rank 0] PRINT: Peak memory allocated: 9183 MiB reserved: 10636 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/config.json b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/config.json new file mode 100644 index 0000000000000000000000000000000000000000..fb40173c9f0df82c2186f6f7575a5a7f3881c567 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 46, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "ec205c92-06d2-4af8-8415-dd706ed94262", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..9ccca2914606a3dbafdaa2af48b543543a138965 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b03b48a9eff761390f686b9c1c3d31b23497b6f130e0aa249553d65a4d85f07 +size 364274 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..d8c8d33062dbc9b5e4742ea28b41dfa759735c30 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef1d1144bed62fd5cbf0055c23755a2264d0d8d358526d99ecb47e76a9b6aa9a +size 405176 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..56279a2ec0930098ec8381acd2ae20642eda0408 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7041e70ee3842acd1c8a5776bc4e08855f684676e7f5660ce39337cb8413066 +size 110867 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..1183ea19756c7a86a2336c8ed6c68ba66eb8165e --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7509db08b11b781c08ee33361cd8d6100383ccc66858a2a8c6215d359461894 +size 115745 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/training_log_ec205c92-06d2-4af8-8415-dd706ed94262.txt b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/training_log_ec205c92-06d2-4af8-8415-dd706ed94262.txt new file mode 100644 index 0000000000000000000000000000000000000000..2d1940aface25bf67b68056f6567703d45bbe54d --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/training_log_ec205c92-06d2-4af8-8415-dd706ed94262.txt @@ -0,0 +1,5144 @@ +[2025-07-08 00:49:31] [Rank 0] PRINT: --- Script Start: Tue Jul 8 00:49:31 2025 --- +[2025-07-08 00:49:31] [Rank 0] PRINT: --- Script Start: Tue Jul 8 00:49:31 2025 --- +[2025-07-08 00:49:31] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-08 00:49:31] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=46, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-08 00:49:31] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-08 00:49:31] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-08 00:49:31] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-08 00:49:31] [Rank 0] PRINT: Using fixed seed: 46 +[2025-07-08 00:49:31] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46 +[2025-07-08 00:49:31] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46 +[2025-07-08 00:49:31] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-08 00:49:31] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-08 00:49:32] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-08 00:49:32] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-08 00:49:32] [Rank 0] PRINT: Constructing model... +[2025-07-08 00:49:32] [Rank 0] PRINT: Constructing model... +[2025-07-08 00:49:34] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-08 00:49:34] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-08 00:49:34] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-08 00:49:34] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-08 00:49:34] [Rank 0] PRINT: Testing model forward function: +[2025-07-08 00:49:34] [Rank 0] PRINT: Testing model forward function: +[2025-07-08 00:49:34] [Rank 0] PRINT: Model test - Result type: +[2025-07-08 00:49:34] [Rank 0] PRINT: Model test - Result type: +[2025-07-08 00:49:34] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-08 00:49:34] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-08 00:49:34] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-08 00:49:34] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-08 00:49:34] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-08 00:49:34] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-08 00:49:34] [Rank 0] PRINT: Model returns: +[2025-07-08 00:49:34] [Rank 0] PRINT: Model returns: +[2025-07-08 00:49:34] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-08 00:49:34] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-08 00:49:34] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-08 00:49:34] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-08 00:49:34] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-08 00:49:34] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-08 00:49:34] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-08 00:49:34] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-08 00:49:35] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-08 00:49:35] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-08 00:49:35] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-08 00:49:35] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-08 00:49:35] [Rank 0] PRINT: Model compilation complete. +[2025-07-08 00:49:35] [Rank 0] PRINT: Model compilation complete. +[2025-07-08 00:49:35] [Rank 0] PRINT: Starting warmup... +[2025-07-08 00:49:35] [Rank 0] PRINT: Starting warmup... +[2025-07-08 00:50:46] [Rank 0] PRINT: Warmup complete. +[2025-07-08 00:50:46] [Rank 0] PRINT: Warmup complete. +[2025-07-08 00:50:46] [Rank 0] PRINT: Starting training... +[2025-07-08 00:50:46] [Rank 0] PRINT: Starting training... +[2025-07-08 00:50:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:50:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:50:55] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-08 00:50:55] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-08 00:50:57] [Rank 0] step:21/10000 train_time:1757ms step_avg:83.65ms +[2025-07-08 00:50:57] [Rank 0] step:21/10000 train_time:1757ms step_avg:83.65ms +[2025-07-08 00:50:58] [Rank 0] step:41/10000 train_time:3214ms step_avg:78.38ms +[2025-07-08 00:50:58] [Rank 0] step:41/10000 train_time:3214ms step_avg:78.38ms +[2025-07-08 00:51:00] [Rank 0] step:61/10000 train_time:4675ms step_avg:76.64ms +[2025-07-08 00:51:00] [Rank 0] step:61/10000 train_time:4675ms step_avg:76.64ms +[2025-07-08 00:51:01] [Rank 0] step:81/10000 train_time:6139ms step_avg:75.79ms +[2025-07-08 00:51:01] [Rank 0] step:81/10000 train_time:6139ms step_avg:75.79ms +[2025-07-08 00:51:03] [Rank 0] step:101/10000 train_time:8249ms step_avg:81.67ms +[2025-07-08 00:51:03] [Rank 0] step:101/10000 train_time:8249ms step_avg:81.67ms +[2025-07-08 00:51:05] [Rank 0] step:121/10000 train_time:9713ms step_avg:80.28ms +[2025-07-08 00:51:05] [Rank 0] step:121/10000 train_time:9713ms step_avg:80.28ms +[2025-07-08 00:51:06] [Rank 0] step:141/10000 train_time:11173ms step_avg:79.24ms +[2025-07-08 00:51:06] [Rank 0] step:141/10000 train_time:11173ms step_avg:79.24ms +[2025-07-08 00:51:07] [Rank 0] step:161/10000 train_time:12635ms step_avg:78.48ms +[2025-07-08 00:51:07] [Rank 0] step:161/10000 train_time:12635ms step_avg:78.48ms +[2025-07-08 00:51:10] [Rank 0] step:181/10000 train_time:14151ms step_avg:78.18ms +[2025-07-08 00:51:10] [Rank 0] step:181/10000 train_time:14151ms step_avg:78.18ms +[2025-07-08 00:51:11] [Rank 0] step:201/10000 train_time:16211ms step_avg:80.65ms +[2025-07-08 00:51:11] [Rank 0] step:201/10000 train_time:16211ms step_avg:80.65ms +[2025-07-08 00:51:13] [Rank 0] step:221/10000 train_time:17673ms step_avg:79.97ms +[2025-07-08 00:51:13] [Rank 0] step:221/10000 train_time:17673ms step_avg:79.97ms +[2025-07-08 00:51:14] [Rank 0] step:241/10000 train_time:19140ms step_avg:79.42ms +[2025-07-08 00:51:14] [Rank 0] step:241/10000 train_time:19140ms step_avg:79.42ms +[2025-07-08 00:51:16] [Rank 0] step:261/10000 train_time:20702ms step_avg:79.32ms +[2025-07-08 00:51:16] [Rank 0] step:261/10000 train_time:20702ms step_avg:79.32ms +[2025-07-08 00:51:18] [Rank 0] step:281/10000 train_time:22869ms step_avg:81.39ms +[2025-07-08 00:51:18] [Rank 0] step:281/10000 train_time:22869ms step_avg:81.39ms +[2025-07-08 00:51:19] [Rank 0] step:301/10000 train_time:24338ms step_avg:80.86ms +[2025-07-08 00:51:19] [Rank 0] step:301/10000 train_time:24338ms step_avg:80.86ms +[2025-07-08 00:51:21] [Rank 0] step:321/10000 train_time:25803ms step_avg:80.38ms +[2025-07-08 00:51:21] [Rank 0] step:321/10000 train_time:25803ms step_avg:80.38ms +[2025-07-08 00:51:22] [Rank 0] step:341/10000 train_time:27268ms step_avg:79.97ms +[2025-07-08 00:51:22] [Rank 0] step:341/10000 train_time:27268ms step_avg:79.97ms +[2025-07-08 00:51:24] [Rank 0] step:361/10000 train_time:29423ms step_avg:81.51ms +[2025-07-08 00:51:24] [Rank 0] step:361/10000 train_time:29423ms step_avg:81.51ms +[2025-07-08 00:51:26] [Rank 0] step:381/10000 train_time:30870ms step_avg:81.02ms +[2025-07-08 00:51:26] [Rank 0] step:381/10000 train_time:30870ms step_avg:81.02ms +[2025-07-08 00:51:27] [Rank 0] step:401/10000 train_time:32332ms step_avg:80.63ms +[2025-07-08 00:51:27] [Rank 0] step:401/10000 train_time:32332ms step_avg:80.63ms +[2025-07-08 00:51:29] [Rank 0] step:421/10000 train_time:33796ms step_avg:80.28ms +[2025-07-08 00:51:29] [Rank 0] step:421/10000 train_time:33796ms step_avg:80.28ms +[2025-07-08 00:51:30] [Rank 0] step:441/10000 train_time:35261ms step_avg:79.96ms +[2025-07-08 00:51:30] [Rank 0] step:441/10000 train_time:35261ms step_avg:79.96ms +[2025-07-08 00:51:32] [Rank 0] step:461/10000 train_time:36959ms step_avg:80.17ms +[2025-07-08 00:51:32] [Rank 0] step:461/10000 train_time:36959ms step_avg:80.17ms +[2025-07-08 00:51:33] [Rank 0] step:481/10000 train_time:38424ms step_avg:79.88ms +[2025-07-08 00:51:33] [Rank 0] step:481/10000 train_time:38424ms step_avg:79.88ms +[2025-07-08 00:51:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:51:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:51:36] [Rank 0] PRINT: step:500/10000 train_loss:2.2939 val_loss:1.3675 train_time:39887ms step_avg:79.77ms +[2025-07-08 00:51:36] [Rank 0] PRINT: step:500/10000 train_loss:2.2939 val_loss:1.3675 train_time:39887ms step_avg:79.77ms +[2025-07-08 00:51:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:51:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:51:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:51:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:51:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:51:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:56:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:56:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:56:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:56:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:56:57] [Rank 0] Total Loss: 4.5173 +[2025-07-08 00:56:57] [Rank 0] Total Loss: 4.5173 +[2025-07-08 00:56:57] [Rank 0] Total FTA: 0.1028 +[2025-07-08 00:56:57] [Rank 0] Total FTA: 0.1028 +[2025-07-08 00:56:57] [Rank 0] Group 0 Loss: 4.8248 +[2025-07-08 00:56:57] [Rank 0] Group 0 Loss: 4.8248 +[2025-07-08 00:56:57] [Rank 0] Group 1 Loss: 4.2660 +[2025-07-08 00:56:57] [Rank 0] Group 1 Loss: 4.2660 +[2025-07-08 00:56:57] [Rank 0] Group 2 Loss: 4.3396 +[2025-07-08 00:56:57] [Rank 0] Group 2 Loss: 4.3396 +[2025-07-08 00:56:57] [Rank 0] Group 3 Loss: 4.6034 +[2025-07-08 00:56:57] [Rank 0] Group 3 Loss: 4.6034 +[2025-07-08 00:56:57] [Rank 0] Group 4 Loss: 4.4863 +[2025-07-08 00:56:57] [Rank 0] Group 4 Loss: 4.4863 +[2025-07-08 00:56:57] [Rank 0] Group 5 Loss: 4.4430 +[2025-07-08 00:56:57] [Rank 0] Group 5 Loss: 4.4430 +[2025-07-08 00:56:57] [Rank 0] Group 6 Loss: 4.4214 +[2025-07-08 00:56:57] [Rank 0] Group 6 Loss: 4.4214 +[2025-07-08 00:56:57] [Rank 0] Group 7 Loss: 4.5128 +[2025-07-08 00:56:57] [Rank 0] Group 7 Loss: 4.5128 +[2025-07-08 00:56:57] [Rank 0] Group 8 Loss: 4.4831 +[2025-07-08 00:56:57] [Rank 0] Group 8 Loss: 4.4831 +[2025-07-08 00:56:57] [Rank 0] Group 9 Loss: 4.4895 +[2025-07-08 00:56:57] [Rank 0] Group 9 Loss: 4.4895 +[2025-07-08 00:56:57] [Rank 0] Group 10 Loss: 4.5108 +[2025-07-08 00:56:57] [Rank 0] Group 10 Loss: 4.5108 +[2025-07-08 00:56:57] [Rank 0] Group 11 Loss: 4.5151 +[2025-07-08 00:56:57] [Rank 0] Group 11 Loss: 4.5151 +[2025-07-08 00:56:57] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-08 00:56:57] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-08 00:56:57] [Rank 0] Group 1 FTA: 0.1589 +[2025-07-08 00:56:57] [Rank 0] Group 1 FTA: 0.1589 +[2025-07-08 00:56:58] [Rank 0] Group 2 FTA: 0.0651 +[2025-07-08 00:56:58] [Rank 0] Group 2 FTA: 0.0651 +[2025-07-08 00:56:58] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-08 00:56:58] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-08 00:56:58] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-08 00:56:58] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-08 00:56:58] [Rank 0] Group 5 FTA: 0.1432 +[2025-07-08 00:56:58] [Rank 0] Group 5 FTA: 0.1432 +[2025-07-08 00:56:58] [Rank 0] Group 6 FTA: 0.1198 +[2025-07-08 00:56:58] [Rank 0] Group 6 FTA: 0.1198 +[2025-07-08 00:56:58] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-08 00:56:58] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-08 00:56:58] [Rank 0] Group 8 FTA: 0.0677 +[2025-07-08 00:56:58] [Rank 0] Group 8 FTA: 0.0677 +[2025-07-08 00:56:58] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-08 00:56:58] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-08 00:56:58] [Rank 0] Group 10 FTA: 0.1094 +[2025-07-08 00:56:58] [Rank 0] Group 10 FTA: 0.1094 +[2025-07-08 00:56:58] [Rank 0] Group 11 FTA: 0.0928 +[2025-07-08 00:56:58] [Rank 0] Group 11 FTA: 0.0928 +[2025-07-08 00:56:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 00:56:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 00:56:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 00:56:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 00:56:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 00:56:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 00:56:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 00:56:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 00:56:59] [Rank 0] step:501/10000 train_time:39907ms step_avg:79.65ms +[2025-07-08 00:56:59] [Rank 0] step:501/10000 train_time:39907ms step_avg:79.65ms +[2025-07-08 00:57:00] [Rank 0] step:521/10000 train_time:41364ms step_avg:79.39ms +[2025-07-08 00:57:00] [Rank 0] step:521/10000 train_time:41364ms step_avg:79.39ms +[2025-07-08 00:57:02] [Rank 0] step:541/10000 train_time:42875ms step_avg:79.25ms +[2025-07-08 00:57:02] [Rank 0] step:541/10000 train_time:42875ms step_avg:79.25ms +[2025-07-08 00:57:03] [Rank 0] step:561/10000 train_time:44319ms step_avg:79.00ms +[2025-07-08 00:57:03] [Rank 0] step:561/10000 train_time:44319ms step_avg:79.00ms +[2025-07-08 00:57:05] [Rank 0] step:581/10000 train_time:45775ms step_avg:78.79ms +[2025-07-08 00:57:05] [Rank 0] step:581/10000 train_time:45775ms step_avg:78.79ms +[2025-07-08 00:57:06] [Rank 0] step:601/10000 train_time:47233ms step_avg:78.59ms +[2025-07-08 00:57:06] [Rank 0] step:601/10000 train_time:47233ms step_avg:78.59ms +[2025-07-08 00:57:08] [Rank 0] step:621/10000 train_time:48691ms step_avg:78.41ms +[2025-07-08 00:57:08] [Rank 0] step:621/10000 train_time:48691ms step_avg:78.41ms +[2025-07-08 00:57:10] [Rank 0] step:641/10000 train_time:50793ms step_avg:79.24ms +[2025-07-08 00:57:10] [Rank 0] step:641/10000 train_time:50793ms step_avg:79.24ms +[2025-07-08 00:57:11] [Rank 0] step:661/10000 train_time:52253ms step_avg:79.05ms +[2025-07-08 00:57:11] [Rank 0] step:661/10000 train_time:52253ms step_avg:79.05ms +[2025-07-08 00:57:13] [Rank 0] step:681/10000 train_time:53712ms step_avg:78.87ms +[2025-07-08 00:57:13] [Rank 0] step:681/10000 train_time:53712ms step_avg:78.87ms +[2025-07-08 00:57:14] [Rank 0] step:701/10000 train_time:55171ms step_avg:78.70ms +[2025-07-08 00:57:14] [Rank 0] step:701/10000 train_time:55171ms step_avg:78.70ms +[2025-07-08 00:57:16] [Rank 0] step:721/10000 train_time:56634ms step_avg:78.55ms +[2025-07-08 00:57:16] [Rank 0] step:721/10000 train_time:56634ms step_avg:78.55ms +[2025-07-08 00:57:18] [Rank 0] step:741/10000 train_time:58760ms step_avg:79.30ms +[2025-07-08 00:57:18] [Rank 0] step:741/10000 train_time:58760ms step_avg:79.30ms +[2025-07-08 00:57:19] [Rank 0] step:761/10000 train_time:60228ms step_avg:79.14ms +[2025-07-08 00:57:19] [Rank 0] step:761/10000 train_time:60228ms step_avg:79.14ms +[2025-07-08 00:57:21] [Rank 0] step:781/10000 train_time:61700ms step_avg:79.00ms +[2025-07-08 00:57:21] [Rank 0] step:781/10000 train_time:61700ms step_avg:79.00ms +[2025-07-08 00:57:22] [Rank 0] step:801/10000 train_time:63173ms step_avg:78.87ms +[2025-07-08 00:57:22] [Rank 0] step:801/10000 train_time:63173ms step_avg:78.87ms +[2025-07-08 00:57:24] [Rank 0] step:821/10000 train_time:65297ms step_avg:79.53ms +[2025-07-08 00:57:24] [Rank 0] step:821/10000 train_time:65297ms step_avg:79.53ms +[2025-07-08 00:57:26] [Rank 0] step:841/10000 train_time:66767ms step_avg:79.39ms +[2025-07-08 00:57:26] [Rank 0] step:841/10000 train_time:66767ms step_avg:79.39ms +[2025-07-08 00:57:27] [Rank 0] step:861/10000 train_time:68239ms step_avg:79.26ms +[2025-07-08 00:57:27] [Rank 0] step:861/10000 train_time:68239ms step_avg:79.26ms +[2025-07-08 00:57:29] [Rank 0] step:881/10000 train_time:69711ms step_avg:79.13ms +[2025-07-08 00:57:29] [Rank 0] step:881/10000 train_time:69711ms step_avg:79.13ms +[2025-07-08 00:57:31] [Rank 0] step:901/10000 train_time:71847ms step_avg:79.74ms +[2025-07-08 00:57:31] [Rank 0] step:901/10000 train_time:71847ms step_avg:79.74ms +[2025-07-08 00:57:32] [Rank 0] step:921/10000 train_time:73301ms step_avg:79.59ms +[2025-07-08 00:57:32] [Rank 0] step:921/10000 train_time:73301ms step_avg:79.59ms +[2025-07-08 00:57:34] [Rank 0] step:941/10000 train_time:74772ms step_avg:79.46ms +[2025-07-08 00:57:34] [Rank 0] step:941/10000 train_time:74772ms step_avg:79.46ms +[2025-07-08 00:57:35] [Rank 0] step:961/10000 train_time:76396ms step_avg:79.50ms +[2025-07-08 00:57:35] [Rank 0] step:961/10000 train_time:76396ms step_avg:79.50ms +[2025-07-08 00:57:37] [Rank 0] step:981/10000 train_time:77872ms step_avg:79.38ms +[2025-07-08 00:57:37] [Rank 0] step:981/10000 train_time:77872ms step_avg:79.38ms +[2025-07-08 00:57:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:57:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:57:39] [Rank 0] PRINT: step:1000/10000 train_loss:1.2804 val_loss:1.2051 train_time:79581ms step_avg:79.58ms +[2025-07-08 00:57:39] [Rank 0] PRINT: step:1000/10000 train_loss:1.2804 val_loss:1.2051 train_time:79581ms step_avg:79.58ms +[2025-07-08 00:57:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:57:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:57:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:57:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:57:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:57:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:03:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:03:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:03:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:03:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:03:02] [Rank 0] Total Loss: 4.7990 +[2025-07-08 01:03:02] [Rank 0] Total Loss: 4.7990 +[2025-07-08 01:03:02] [Rank 0] Total FTA: 0.3133 +[2025-07-08 01:03:02] [Rank 0] Total FTA: 0.3133 +[2025-07-08 01:03:02] [Rank 0] Group 0 Loss: 5.1379 +[2025-07-08 01:03:02] [Rank 0] Group 0 Loss: 5.1379 +[2025-07-08 01:03:02] [Rank 0] Group 1 Loss: 4.6518 +[2025-07-08 01:03:02] [Rank 0] Group 1 Loss: 4.6518 +[2025-07-08 01:03:02] [Rank 0] Group 2 Loss: 4.7248 +[2025-07-08 01:03:02] [Rank 0] Group 2 Loss: 4.7248 +[2025-07-08 01:03:02] [Rank 0] Group 3 Loss: 4.7725 +[2025-07-08 01:03:02] [Rank 0] Group 3 Loss: 4.7725 +[2025-07-08 01:03:02] [Rank 0] Group 4 Loss: 4.7599 +[2025-07-08 01:03:02] [Rank 0] Group 4 Loss: 4.7599 +[2025-07-08 01:03:02] [Rank 0] Group 5 Loss: 4.7458 +[2025-07-08 01:03:02] [Rank 0] Group 5 Loss: 4.7458 +[2025-07-08 01:03:02] [Rank 0] Group 6 Loss: 4.6868 +[2025-07-08 01:03:02] [Rank 0] Group 6 Loss: 4.6868 +[2025-07-08 01:03:02] [Rank 0] Group 7 Loss: 4.7741 +[2025-07-08 01:03:02] [Rank 0] Group 7 Loss: 4.7741 +[2025-07-08 01:03:02] [Rank 0] Group 8 Loss: 4.7826 +[2025-07-08 01:03:02] [Rank 0] Group 8 Loss: 4.7826 +[2025-07-08 01:03:02] [Rank 0] Group 9 Loss: 4.7311 +[2025-07-08 01:03:02] [Rank 0] Group 9 Loss: 4.7311 +[2025-07-08 01:03:02] [Rank 0] Group 10 Loss: 4.7904 +[2025-07-08 01:03:02] [Rank 0] Group 10 Loss: 4.7904 +[2025-07-08 01:03:02] [Rank 0] Group 11 Loss: 4.7507 +[2025-07-08 01:03:02] [Rank 0] Group 11 Loss: 4.7507 +[2025-07-08 01:03:02] [Rank 0] Group 0 FTA: 0.4837 +[2025-07-08 01:03:02] [Rank 0] Group 0 FTA: 0.4837 +[2025-07-08 01:03:02] [Rank 0] Group 1 FTA: 0.1849 +[2025-07-08 01:03:02] [Rank 0] Group 1 FTA: 0.1849 +[2025-07-08 01:03:02] [Rank 0] Group 2 FTA: 0.3255 +[2025-07-08 01:03:02] [Rank 0] Group 2 FTA: 0.3255 +[2025-07-08 01:03:02] [Rank 0] Group 3 FTA: 0.3724 +[2025-07-08 01:03:02] [Rank 0] Group 3 FTA: 0.3724 +[2025-07-08 01:03:02] [Rank 0] Group 4 FTA: 0.1953 +[2025-07-08 01:03:02] [Rank 0] Group 4 FTA: 0.1953 +[2025-07-08 01:03:02] [Rank 0] Group 5 FTA: 0.3255 +[2025-07-08 01:03:02] [Rank 0] Group 5 FTA: 0.3255 +[2025-07-08 01:03:02] [Rank 0] Group 6 FTA: 0.2760 +[2025-07-08 01:03:02] [Rank 0] Group 6 FTA: 0.2760 +[2025-07-08 01:03:02] [Rank 0] Group 7 FTA: 0.3281 +[2025-07-08 01:03:02] [Rank 0] Group 7 FTA: 0.3281 +[2025-07-08 01:03:02] [Rank 0] Group 8 FTA: 0.2917 +[2025-07-08 01:03:02] [Rank 0] Group 8 FTA: 0.2917 +[2025-07-08 01:03:02] [Rank 0] Group 9 FTA: 0.2969 +[2025-07-08 01:03:02] [Rank 0] Group 9 FTA: 0.2969 +[2025-07-08 01:03:02] [Rank 0] Group 10 FTA: 0.2949 +[2025-07-08 01:03:02] [Rank 0] Group 10 FTA: 0.2949 +[2025-07-08 01:03:02] [Rank 0] Group 11 FTA: 0.2764 +[2025-07-08 01:03:02] [Rank 0] Group 11 FTA: 0.2764 +[2025-07-08 01:03:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 01:03:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 01:03:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 01:03:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 01:03:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 01:03:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 01:03:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 01:03:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 01:03:04] [Rank 0] step:1001/10000 train_time:79601ms step_avg:79.52ms +[2025-07-08 01:03:04] [Rank 0] step:1001/10000 train_time:79601ms step_avg:79.52ms +[2025-07-08 01:03:05] [Rank 0] step:1021/10000 train_time:81083ms step_avg:79.41ms +[2025-07-08 01:03:05] [Rank 0] step:1021/10000 train_time:81083ms step_avg:79.41ms +[2025-07-08 01:03:07] [Rank 0] step:1041/10000 train_time:82553ms step_avg:79.30ms +[2025-07-08 01:03:07] [Rank 0] step:1041/10000 train_time:82553ms step_avg:79.30ms +[2025-07-08 01:03:08] [Rank 0] step:1061/10000 train_time:84019ms step_avg:79.19ms +[2025-07-08 01:03:08] [Rank 0] step:1061/10000 train_time:84019ms step_avg:79.19ms +[2025-07-08 01:03:10] [Rank 0] step:1081/10000 train_time:86174ms step_avg:79.72ms +[2025-07-08 01:03:10] [Rank 0] step:1081/10000 train_time:86174ms step_avg:79.72ms +[2025-07-08 01:03:12] [Rank 0] step:1101/10000 train_time:87623ms step_avg:79.59ms +[2025-07-08 01:03:12] [Rank 0] step:1101/10000 train_time:87623ms step_avg:79.59ms +[2025-07-08 01:03:13] [Rank 0] step:1121/10000 train_time:89090ms step_avg:79.47ms +[2025-07-08 01:03:13] [Rank 0] step:1121/10000 train_time:89090ms step_avg:79.47ms +[2025-07-08 01:03:15] [Rank 0] step:1141/10000 train_time:90559ms step_avg:79.37ms +[2025-07-08 01:03:15] [Rank 0] step:1141/10000 train_time:90559ms step_avg:79.37ms +[2025-07-08 01:03:16] [Rank 0] step:1161/10000 train_time:92030ms step_avg:79.27ms +[2025-07-08 01:03:16] [Rank 0] step:1161/10000 train_time:92030ms step_avg:79.27ms +[2025-07-08 01:03:18] [Rank 0] step:1181/10000 train_time:93839ms step_avg:79.46ms +[2025-07-08 01:03:18] [Rank 0] step:1181/10000 train_time:93839ms step_avg:79.46ms +[2025-07-08 01:03:20] [Rank 0] step:1201/10000 train_time:95308ms step_avg:79.36ms +[2025-07-08 01:03:20] [Rank 0] step:1201/10000 train_time:95308ms step_avg:79.36ms +[2025-07-08 01:03:21] [Rank 0] step:1221/10000 train_time:96776ms step_avg:79.26ms +[2025-07-08 01:03:21] [Rank 0] step:1221/10000 train_time:96776ms step_avg:79.26ms +[2025-07-08 01:03:22] [Rank 0] step:1241/10000 train_time:98260ms step_avg:79.18ms +[2025-07-08 01:03:22] [Rank 0] step:1241/10000 train_time:98260ms step_avg:79.18ms +[2025-07-08 01:03:25] [Rank 0] step:1261/10000 train_time:100414ms step_avg:79.63ms +[2025-07-08 01:03:25] [Rank 0] step:1261/10000 train_time:100414ms step_avg:79.63ms +[2025-07-08 01:03:26] [Rank 0] step:1281/10000 train_time:101864ms step_avg:79.52ms +[2025-07-08 01:03:26] [Rank 0] step:1281/10000 train_time:101864ms step_avg:79.52ms +[2025-07-08 01:03:28] [Rank 0] step:1301/10000 train_time:103335ms step_avg:79.43ms +[2025-07-08 01:03:28] [Rank 0] step:1301/10000 train_time:103335ms step_avg:79.43ms +[2025-07-08 01:03:29] [Rank 0] step:1321/10000 train_time:104807ms step_avg:79.34ms +[2025-07-08 01:03:29] [Rank 0] step:1321/10000 train_time:104807ms step_avg:79.34ms +[2025-07-08 01:03:31] [Rank 0] step:1341/10000 train_time:106280ms step_avg:79.25ms +[2025-07-08 01:03:31] [Rank 0] step:1341/10000 train_time:106280ms step_avg:79.25ms +[2025-07-08 01:03:33] [Rank 0] step:1361/10000 train_time:108421ms step_avg:79.66ms +[2025-07-08 01:03:33] [Rank 0] step:1361/10000 train_time:108421ms step_avg:79.66ms +[2025-07-08 01:03:34] [Rank 0] step:1381/10000 train_time:109891ms step_avg:79.57ms +[2025-07-08 01:03:34] [Rank 0] step:1381/10000 train_time:109891ms step_avg:79.57ms +[2025-07-08 01:03:36] [Rank 0] step:1401/10000 train_time:111363ms step_avg:79.49ms +[2025-07-08 01:03:36] [Rank 0] step:1401/10000 train_time:111363ms step_avg:79.49ms +[2025-07-08 01:03:37] [Rank 0] step:1421/10000 train_time:112836ms step_avg:79.41ms +[2025-07-08 01:03:37] [Rank 0] step:1421/10000 train_time:112836ms step_avg:79.41ms +[2025-07-08 01:03:39] [Rank 0] step:1441/10000 train_time:114358ms step_avg:79.36ms +[2025-07-08 01:03:39] [Rank 0] step:1441/10000 train_time:114358ms step_avg:79.36ms +[2025-07-08 01:03:40] [Rank 0] step:1461/10000 train_time:116019ms step_avg:79.41ms +[2025-07-08 01:03:40] [Rank 0] step:1461/10000 train_time:116019ms step_avg:79.41ms +[2025-07-08 01:03:42] [Rank 0] step:1481/10000 train_time:117491ms step_avg:79.33ms +[2025-07-08 01:03:42] [Rank 0] step:1481/10000 train_time:117491ms step_avg:79.33ms +[2025-07-08 01:03:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:03:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:03:44] [Rank 0] PRINT: step:1500/10000 train_loss:1.0946 val_loss:1.0225 train_time:118964ms step_avg:79.31ms +[2025-07-08 01:03:44] [Rank 0] PRINT: step:1500/10000 train_loss:1.0946 val_loss:1.0225 train_time:118964ms step_avg:79.31ms +[2025-07-08 01:03:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:03:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:03:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:03:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:03:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:03:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:09:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:09:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:09:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:09:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:09:10] [Rank 0] Total Loss: 5.1472 +[2025-07-08 01:09:10] [Rank 0] Total Loss: 5.1472 +[2025-07-08 01:09:10] [Rank 0] Total FTA: 0.7110 +[2025-07-08 01:09:10] [Rank 0] Total FTA: 0.7110 +[2025-07-08 01:09:10] [Rank 0] Group 0 Loss: 5.4496 +[2025-07-08 01:09:10] [Rank 0] Group 0 Loss: 5.4496 +[2025-07-08 01:09:10] [Rank 0] Group 1 Loss: 4.8758 +[2025-07-08 01:09:10] [Rank 0] Group 1 Loss: 4.8758 +[2025-07-08 01:09:10] [Rank 0] Group 2 Loss: 4.9250 +[2025-07-08 01:09:10] [Rank 0] Group 2 Loss: 4.9250 +[2025-07-08 01:09:10] [Rank 0] Group 3 Loss: 5.2293 +[2025-07-08 01:09:10] [Rank 0] Group 3 Loss: 5.2293 +[2025-07-08 01:09:10] [Rank 0] Group 4 Loss: 5.1142 +[2025-07-08 01:09:10] [Rank 0] Group 4 Loss: 5.1142 +[2025-07-08 01:09:10] [Rank 0] Group 5 Loss: 5.1239 +[2025-07-08 01:09:10] [Rank 0] Group 5 Loss: 5.1239 +[2025-07-08 01:09:10] [Rank 0] Group 6 Loss: 5.0518 +[2025-07-08 01:09:10] [Rank 0] Group 6 Loss: 5.0518 +[2025-07-08 01:09:10] [Rank 0] Group 7 Loss: 5.1179 +[2025-07-08 01:09:10] [Rank 0] Group 7 Loss: 5.1179 +[2025-07-08 01:09:10] [Rank 0] Group 8 Loss: 5.1355 +[2025-07-08 01:09:10] [Rank 0] Group 8 Loss: 5.1355 +[2025-07-08 01:09:10] [Rank 0] Group 9 Loss: 5.0953 +[2025-07-08 01:09:10] [Rank 0] Group 9 Loss: 5.0953 +[2025-07-08 01:09:10] [Rank 0] Group 10 Loss: 5.1832 +[2025-07-08 01:09:10] [Rank 0] Group 10 Loss: 5.1832 +[2025-07-08 01:09:10] [Rank 0] Group 11 Loss: 5.1420 +[2025-07-08 01:09:10] [Rank 0] Group 11 Loss: 5.1420 +[2025-07-08 01:09:10] [Rank 0] Group 0 FTA: 0.8322 +[2025-07-08 01:09:10] [Rank 0] Group 0 FTA: 0.8322 +[2025-07-08 01:09:10] [Rank 0] Group 1 FTA: 0.8333 +[2025-07-08 01:09:10] [Rank 0] Group 1 FTA: 0.8333 +[2025-07-08 01:09:10] [Rank 0] Group 2 FTA: 0.7240 +[2025-07-08 01:09:10] [Rank 0] Group 2 FTA: 0.7240 +[2025-07-08 01:09:10] [Rank 0] Group 3 FTA: 0.6979 +[2025-07-08 01:09:10] [Rank 0] Group 3 FTA: 0.6979 +[2025-07-08 01:09:10] [Rank 0] Group 4 FTA: 0.7552 +[2025-07-08 01:09:10] [Rank 0] Group 4 FTA: 0.7552 +[2025-07-08 01:09:10] [Rank 0] Group 5 FTA: 0.6458 +[2025-07-08 01:09:10] [Rank 0] Group 5 FTA: 0.6458 +[2025-07-08 01:09:10] [Rank 0] Group 6 FTA: 0.6693 +[2025-07-08 01:09:10] [Rank 0] Group 6 FTA: 0.6693 +[2025-07-08 01:09:10] [Rank 0] Group 7 FTA: 0.6484 +[2025-07-08 01:09:10] [Rank 0] Group 7 FTA: 0.6484 +[2025-07-08 01:09:10] [Rank 0] Group 8 FTA: 0.6510 +[2025-07-08 01:09:10] [Rank 0] Group 8 FTA: 0.6510 +[2025-07-08 01:09:10] [Rank 0] Group 9 FTA: 0.6602 +[2025-07-08 01:09:10] [Rank 0] Group 9 FTA: 0.6602 +[2025-07-08 01:09:10] [Rank 0] Group 10 FTA: 0.6680 +[2025-07-08 01:09:10] [Rank 0] Group 10 FTA: 0.6680 +[2025-07-08 01:09:10] [Rank 0] Group 11 FTA: 0.6777 +[2025-07-08 01:09:10] [Rank 0] Group 11 FTA: 0.6777 +[2025-07-08 01:09:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 01:09:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 01:09:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 01:09:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 01:09:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 01:09:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 01:09:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 01:09:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 01:09:12] [Rank 0] step:1501/10000 train_time:118984ms step_avg:79.27ms +[2025-07-08 01:09:12] [Rank 0] step:1501/10000 train_time:118984ms step_avg:79.27ms +[2025-07-08 01:09:13] [Rank 0] step:1521/10000 train_time:120453ms step_avg:79.19ms +[2025-07-08 01:09:13] [Rank 0] step:1521/10000 train_time:120453ms step_avg:79.19ms +[2025-07-08 01:09:15] [Rank 0] step:1541/10000 train_time:122591ms step_avg:79.55ms +[2025-07-08 01:09:15] [Rank 0] step:1541/10000 train_time:122591ms step_avg:79.55ms +[2025-07-08 01:09:17] [Rank 0] step:1561/10000 train_time:124058ms step_avg:79.47ms +[2025-07-08 01:09:17] [Rank 0] step:1561/10000 train_time:124058ms step_avg:79.47ms +[2025-07-08 01:09:18] [Rank 0] step:1581/10000 train_time:125526ms step_avg:79.40ms +[2025-07-08 01:09:18] [Rank 0] step:1581/10000 train_time:125526ms step_avg:79.40ms +[2025-07-08 01:09:20] [Rank 0] step:1601/10000 train_time:126996ms step_avg:79.32ms +[2025-07-08 01:09:20] [Rank 0] step:1601/10000 train_time:126996ms step_avg:79.32ms +[2025-07-08 01:09:22] [Rank 0] step:1621/10000 train_time:128521ms step_avg:79.28ms +[2025-07-08 01:09:22] [Rank 0] step:1621/10000 train_time:128521ms step_avg:79.28ms +[2025-07-08 01:09:23] [Rank 0] step:1641/10000 train_time:130594ms step_avg:79.58ms +[2025-07-08 01:09:23] [Rank 0] step:1641/10000 train_time:130594ms step_avg:79.58ms +[2025-07-08 01:09:25] [Rank 0] step:1661/10000 train_time:132064ms step_avg:79.51ms +[2025-07-08 01:09:25] [Rank 0] step:1661/10000 train_time:132064ms step_avg:79.51ms +[2025-07-08 01:09:26] [Rank 0] step:1681/10000 train_time:133534ms step_avg:79.44ms +[2025-07-08 01:09:26] [Rank 0] step:1681/10000 train_time:133534ms step_avg:79.44ms +[2025-07-08 01:09:28] [Rank 0] step:1701/10000 train_time:135004ms step_avg:79.37ms +[2025-07-08 01:09:28] [Rank 0] step:1701/10000 train_time:135004ms step_avg:79.37ms +[2025-07-08 01:09:29] [Rank 0] step:1721/10000 train_time:136708ms step_avg:79.44ms +[2025-07-08 01:09:29] [Rank 0] step:1721/10000 train_time:136708ms step_avg:79.44ms +[2025-07-08 01:09:31] [Rank 0] step:1741/10000 train_time:138181ms step_avg:79.37ms +[2025-07-08 01:09:31] [Rank 0] step:1741/10000 train_time:138181ms step_avg:79.37ms +[2025-07-08 01:09:32] [Rank 0] step:1761/10000 train_time:139653ms step_avg:79.30ms +[2025-07-08 01:09:32] [Rank 0] step:1761/10000 train_time:139653ms step_avg:79.30ms +[2025-07-08 01:09:34] [Rank 0] step:1781/10000 train_time:141121ms step_avg:79.24ms +[2025-07-08 01:09:34] [Rank 0] step:1781/10000 train_time:141121ms step_avg:79.24ms +[2025-07-08 01:09:36] [Rank 0] step:1801/10000 train_time:143254ms step_avg:79.54ms +[2025-07-08 01:09:36] [Rank 0] step:1801/10000 train_time:143254ms step_avg:79.54ms +[2025-07-08 01:09:37] [Rank 0] step:1821/10000 train_time:144704ms step_avg:79.46ms +[2025-07-08 01:09:37] [Rank 0] step:1821/10000 train_time:144704ms step_avg:79.46ms +[2025-07-08 01:09:39] [Rank 0] step:1841/10000 train_time:146177ms step_avg:79.40ms +[2025-07-08 01:09:39] [Rank 0] step:1841/10000 train_time:146177ms step_avg:79.40ms +[2025-07-08 01:09:40] [Rank 0] step:1861/10000 train_time:147649ms step_avg:79.34ms +[2025-07-08 01:09:40] [Rank 0] step:1861/10000 train_time:147649ms step_avg:79.34ms +[2025-07-08 01:09:42] [Rank 0] step:1881/10000 train_time:149122ms step_avg:79.28ms +[2025-07-08 01:09:42] [Rank 0] step:1881/10000 train_time:149122ms step_avg:79.28ms +[2025-07-08 01:09:44] [Rank 0] step:1901/10000 train_time:151263ms step_avg:79.57ms +[2025-07-08 01:09:44] [Rank 0] step:1901/10000 train_time:151263ms step_avg:79.57ms +[2025-07-08 01:09:45] [Rank 0] step:1921/10000 train_time:152735ms step_avg:79.51ms +[2025-07-08 01:09:45] [Rank 0] step:1921/10000 train_time:152735ms step_avg:79.51ms +[2025-07-08 01:09:47] [Rank 0] step:1941/10000 train_time:154208ms step_avg:79.45ms +[2025-07-08 01:09:47] [Rank 0] step:1941/10000 train_time:154208ms step_avg:79.45ms +[2025-07-08 01:09:48] [Rank 0] step:1961/10000 train_time:155680ms step_avg:79.39ms +[2025-07-08 01:09:48] [Rank 0] step:1961/10000 train_time:155680ms step_avg:79.39ms +[2025-07-08 01:09:50] [Rank 0] step:1981/10000 train_time:157202ms step_avg:79.35ms +[2025-07-08 01:09:50] [Rank 0] step:1981/10000 train_time:157202ms step_avg:79.35ms +[2025-07-08 01:09:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:09:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:09:53] [Rank 0] PRINT: step:2000/10000 train_loss:0.9181 val_loss:0.9618 train_time:159267ms step_avg:79.63ms +[2025-07-08 01:09:53] [Rank 0] PRINT: step:2000/10000 train_loss:0.9181 val_loss:0.9618 train_time:159267ms step_avg:79.63ms +[2025-07-08 01:09:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:09:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:09:53] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:09:53] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:09:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:09:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:15:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:15:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:15:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:15:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:15:20] [Rank 0] Total Loss: 5.3072 +[2025-07-08 01:15:20] [Rank 0] Total Loss: 5.3072 +[2025-07-08 01:15:20] [Rank 0] Total FTA: 0.7286 +[2025-07-08 01:15:20] [Rank 0] Total FTA: 0.7286 +[2025-07-08 01:15:20] [Rank 0] Group 0 Loss: 5.5988 +[2025-07-08 01:15:20] [Rank 0] Group 0 Loss: 5.5988 +[2025-07-08 01:15:20] [Rank 0] Group 1 Loss: 5.4097 +[2025-07-08 01:15:20] [Rank 0] Group 1 Loss: 5.4097 +[2025-07-08 01:15:20] [Rank 0] Group 2 Loss: 5.1506 +[2025-07-08 01:15:20] [Rank 0] Group 2 Loss: 5.1506 +[2025-07-08 01:15:20] [Rank 0] Group 3 Loss: 5.3297 +[2025-07-08 01:15:20] [Rank 0] Group 3 Loss: 5.3297 +[2025-07-08 01:15:20] [Rank 0] Group 4 Loss: 5.2602 +[2025-07-08 01:15:20] [Rank 0] Group 4 Loss: 5.2602 +[2025-07-08 01:15:20] [Rank 0] Group 5 Loss: 5.1588 +[2025-07-08 01:15:20] [Rank 0] Group 5 Loss: 5.1588 +[2025-07-08 01:15:20] [Rank 0] Group 6 Loss: 5.2048 +[2025-07-08 01:15:20] [Rank 0] Group 6 Loss: 5.2048 +[2025-07-08 01:15:20] [Rank 0] Group 7 Loss: 5.2810 +[2025-07-08 01:15:20] [Rank 0] Group 7 Loss: 5.2810 +[2025-07-08 01:15:20] [Rank 0] Group 8 Loss: 5.1759 +[2025-07-08 01:15:20] [Rank 0] Group 8 Loss: 5.1759 +[2025-07-08 01:15:20] [Rank 0] Group 9 Loss: 5.2553 +[2025-07-08 01:15:20] [Rank 0] Group 9 Loss: 5.2553 +[2025-07-08 01:15:20] [Rank 0] Group 10 Loss: 5.2724 +[2025-07-08 01:15:20] [Rank 0] Group 10 Loss: 5.2724 +[2025-07-08 01:15:20] [Rank 0] Group 11 Loss: 5.3012 +[2025-07-08 01:15:20] [Rank 0] Group 11 Loss: 5.3012 +[2025-07-08 01:15:20] [Rank 0] Group 0 FTA: 0.5150 +[2025-07-08 01:15:20] [Rank 0] Group 0 FTA: 0.5150 +[2025-07-08 01:15:20] [Rank 0] Group 1 FTA: 0.6615 +[2025-07-08 01:15:20] [Rank 0] Group 1 FTA: 0.6615 +[2025-07-08 01:15:20] [Rank 0] Group 2 FTA: 0.8464 +[2025-07-08 01:15:20] [Rank 0] Group 2 FTA: 0.8464 +[2025-07-08 01:15:20] [Rank 0] Group 3 FTA: 0.7005 +[2025-07-08 01:15:20] [Rank 0] Group 3 FTA: 0.7005 +[2025-07-08 01:15:20] [Rank 0] Group 4 FTA: 0.8958 +[2025-07-08 01:15:20] [Rank 0] Group 4 FTA: 0.8958 +[2025-07-08 01:15:20] [Rank 0] Group 5 FTA: 0.6693 +[2025-07-08 01:15:20] [Rank 0] Group 5 FTA: 0.6693 +[2025-07-08 01:15:20] [Rank 0] Group 6 FTA: 0.7682 +[2025-07-08 01:15:20] [Rank 0] Group 6 FTA: 0.7682 +[2025-07-08 01:15:20] [Rank 0] Group 7 FTA: 0.7812 +[2025-07-08 01:15:20] [Rank 0] Group 7 FTA: 0.7812 +[2025-07-08 01:15:20] [Rank 0] Group 8 FTA: 0.7500 +[2025-07-08 01:15:20] [Rank 0] Group 8 FTA: 0.7500 +[2025-07-08 01:15:20] [Rank 0] Group 9 FTA: 0.7734 +[2025-07-08 01:15:20] [Rank 0] Group 9 FTA: 0.7734 +[2025-07-08 01:15:20] [Rank 0] Group 10 FTA: 0.7656 +[2025-07-08 01:15:20] [Rank 0] Group 10 FTA: 0.7656 +[2025-07-08 01:15:20] [Rank 0] Group 11 FTA: 0.7676 +[2025-07-08 01:15:20] [Rank 0] Group 11 FTA: 0.7676 +[2025-07-08 01:15:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 01:15:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 01:15:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 01:15:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 01:15:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 01:15:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 01:15:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 01:15:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 01:15:21] [Rank 0] step:2001/10000 train_time:159287ms step_avg:79.60ms +[2025-07-08 01:15:21] [Rank 0] step:2001/10000 train_time:159287ms step_avg:79.60ms +[2025-07-08 01:15:23] [Rank 0] step:2021/10000 train_time:160756ms step_avg:79.54ms +[2025-07-08 01:15:23] [Rank 0] step:2021/10000 train_time:160756ms step_avg:79.54ms +[2025-07-08 01:15:24] [Rank 0] step:2041/10000 train_time:162223ms step_avg:79.48ms +[2025-07-08 01:15:24] [Rank 0] step:2041/10000 train_time:162223ms step_avg:79.48ms +[2025-07-08 01:15:26] [Rank 0] step:2061/10000 train_time:163688ms step_avg:79.42ms +[2025-07-08 01:15:26] [Rank 0] step:2061/10000 train_time:163688ms step_avg:79.42ms +[2025-07-08 01:15:28] [Rank 0] step:2081/10000 train_time:165828ms step_avg:79.69ms +[2025-07-08 01:15:28] [Rank 0] step:2081/10000 train_time:165828ms step_avg:79.69ms +[2025-07-08 01:15:29] [Rank 0] step:2101/10000 train_time:167297ms step_avg:79.63ms +[2025-07-08 01:15:29] [Rank 0] step:2101/10000 train_time:167297ms step_avg:79.63ms +[2025-07-08 01:15:31] [Rank 0] step:2121/10000 train_time:168935ms step_avg:79.65ms +[2025-07-08 01:15:31] [Rank 0] step:2121/10000 train_time:168935ms step_avg:79.65ms +[2025-07-08 01:15:32] [Rank 0] step:2141/10000 train_time:170404ms step_avg:79.59ms +[2025-07-08 01:15:32] [Rank 0] step:2141/10000 train_time:170404ms step_avg:79.59ms +[2025-07-08 01:15:34] [Rank 0] step:2161/10000 train_time:171925ms step_avg:79.56ms +[2025-07-08 01:15:34] [Rank 0] step:2161/10000 train_time:171925ms step_avg:79.56ms +[2025-07-08 01:15:35] [Rank 0] step:2181/10000 train_time:173578ms step_avg:79.59ms +[2025-07-08 01:15:35] [Rank 0] step:2181/10000 train_time:173578ms step_avg:79.59ms +[2025-07-08 01:15:37] [Rank 0] step:2201/10000 train_time:175046ms step_avg:79.53ms +[2025-07-08 01:15:37] [Rank 0] step:2201/10000 train_time:175046ms step_avg:79.53ms +[2025-07-08 01:15:38] [Rank 0] step:2221/10000 train_time:176516ms step_avg:79.48ms +[2025-07-08 01:15:38] [Rank 0] step:2221/10000 train_time:176516ms step_avg:79.48ms +[2025-07-08 01:15:40] [Rank 0] step:2241/10000 train_time:178006ms step_avg:79.43ms +[2025-07-08 01:15:40] [Rank 0] step:2241/10000 train_time:178006ms step_avg:79.43ms +[2025-07-08 01:15:42] [Rank 0] step:2261/10000 train_time:180146ms step_avg:79.68ms +[2025-07-08 01:15:42] [Rank 0] step:2261/10000 train_time:180146ms step_avg:79.68ms +[2025-07-08 01:15:44] [Rank 0] step:2281/10000 train_time:181641ms step_avg:79.63ms +[2025-07-08 01:15:44] [Rank 0] step:2281/10000 train_time:181641ms step_avg:79.63ms +[2025-07-08 01:15:45] [Rank 0] step:2301/10000 train_time:183135ms step_avg:79.59ms +[2025-07-08 01:15:45] [Rank 0] step:2301/10000 train_time:183135ms step_avg:79.59ms +[2025-07-08 01:15:47] [Rank 0] step:2321/10000 train_time:184630ms step_avg:79.55ms +[2025-07-08 01:15:47] [Rank 0] step:2321/10000 train_time:184630ms step_avg:79.55ms +[2025-07-08 01:15:48] [Rank 0] step:2341/10000 train_time:186385ms step_avg:79.62ms +[2025-07-08 01:15:48] [Rank 0] step:2341/10000 train_time:186385ms step_avg:79.62ms +[2025-07-08 01:15:50] [Rank 0] step:2361/10000 train_time:187962ms step_avg:79.61ms +[2025-07-08 01:15:50] [Rank 0] step:2361/10000 train_time:187962ms step_avg:79.61ms +[2025-07-08 01:15:51] [Rank 0] step:2381/10000 train_time:189458ms step_avg:79.57ms +[2025-07-08 01:15:51] [Rank 0] step:2381/10000 train_time:189458ms step_avg:79.57ms +[2025-07-08 01:15:53] [Rank 0] step:2401/10000 train_time:190955ms step_avg:79.53ms +[2025-07-08 01:15:53] [Rank 0] step:2401/10000 train_time:190955ms step_avg:79.53ms +[2025-07-08 01:15:54] [Rank 0] step:2421/10000 train_time:192452ms step_avg:79.49ms +[2025-07-08 01:15:54] [Rank 0] step:2421/10000 train_time:192452ms step_avg:79.49ms +[2025-07-08 01:15:57] [Rank 0] step:2441/10000 train_time:194613ms step_avg:79.73ms +[2025-07-08 01:15:57] [Rank 0] step:2441/10000 train_time:194613ms step_avg:79.73ms +[2025-07-08 01:15:58] [Rank 0] step:2461/10000 train_time:196111ms step_avg:79.69ms +[2025-07-08 01:15:58] [Rank 0] step:2461/10000 train_time:196111ms step_avg:79.69ms +[2025-07-08 01:16:00] [Rank 0] step:2481/10000 train_time:197610ms step_avg:79.65ms +[2025-07-08 01:16:00] [Rank 0] step:2481/10000 train_time:197610ms step_avg:79.65ms +[2025-07-08 01:16:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:16:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:16:02] [Rank 0] PRINT: step:2500/10000 train_loss:0.8933 val_loss:0.8803 train_time:199107ms step_avg:79.64ms +[2025-07-08 01:16:02] [Rank 0] PRINT: step:2500/10000 train_loss:0.8933 val_loss:0.8803 train_time:199107ms step_avg:79.64ms +[2025-07-08 01:16:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:16:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:16:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:16:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:16:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:16:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:21:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:21:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:21:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:21:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:21:29] [Rank 0] Total Loss: 5.1206 +[2025-07-08 01:21:29] [Rank 0] Total Loss: 5.1206 +[2025-07-08 01:21:29] [Rank 0] Total FTA: 0.8464 +[2025-07-08 01:21:29] [Rank 0] Total FTA: 0.8464 +[2025-07-08 01:21:29] [Rank 0] Group 0 Loss: 5.3037 +[2025-07-08 01:21:29] [Rank 0] Group 0 Loss: 5.3037 +[2025-07-08 01:21:29] [Rank 0] Group 1 Loss: 4.8932 +[2025-07-08 01:21:29] [Rank 0] Group 1 Loss: 4.8932 +[2025-07-08 01:21:29] [Rank 0] Group 2 Loss: 5.0541 +[2025-07-08 01:21:29] [Rank 0] Group 2 Loss: 5.0541 +[2025-07-08 01:21:29] [Rank 0] Group 3 Loss: 5.1034 +[2025-07-08 01:21:29] [Rank 0] Group 3 Loss: 5.1034 +[2025-07-08 01:21:29] [Rank 0] Group 4 Loss: 5.1108 +[2025-07-08 01:21:29] [Rank 0] Group 4 Loss: 5.1108 +[2025-07-08 01:21:29] [Rank 0] Group 5 Loss: 5.1824 +[2025-07-08 01:21:29] [Rank 0] Group 5 Loss: 5.1824 +[2025-07-08 01:21:29] [Rank 0] Group 6 Loss: 5.0236 +[2025-07-08 01:21:29] [Rank 0] Group 6 Loss: 5.0236 +[2025-07-08 01:21:29] [Rank 0] Group 7 Loss: 5.1079 +[2025-07-08 01:21:29] [Rank 0] Group 7 Loss: 5.1079 +[2025-07-08 01:21:29] [Rank 0] Group 8 Loss: 5.1524 +[2025-07-08 01:21:29] [Rank 0] Group 8 Loss: 5.1524 +[2025-07-08 01:21:29] [Rank 0] Group 9 Loss: 5.0943 +[2025-07-08 01:21:29] [Rank 0] Group 9 Loss: 5.0943 +[2025-07-08 01:21:29] [Rank 0] Group 10 Loss: 5.1458 +[2025-07-08 01:21:29] [Rank 0] Group 10 Loss: 5.1458 +[2025-07-08 01:21:29] [Rank 0] Group 11 Loss: 5.1035 +[2025-07-08 01:21:29] [Rank 0] Group 11 Loss: 5.1035 +[2025-07-08 01:21:29] [Rank 0] Group 0 FTA: 0.6775 +[2025-07-08 01:21:29] [Rank 0] Group 0 FTA: 0.6775 +[2025-07-08 01:21:29] [Rank 0] Group 1 FTA: 0.6406 +[2025-07-08 01:21:29] [Rank 0] Group 1 FTA: 0.6406 +[2025-07-08 01:21:29] [Rank 0] Group 2 FTA: 0.9193 +[2025-07-08 01:21:29] [Rank 0] Group 2 FTA: 0.9193 +[2025-07-08 01:21:29] [Rank 0] Group 3 FTA: 0.9635 +[2025-07-08 01:21:29] [Rank 0] Group 3 FTA: 0.9635 +[2025-07-08 01:21:29] [Rank 0] Group 4 FTA: 0.9193 +[2025-07-08 01:21:29] [Rank 0] Group 4 FTA: 0.9193 +[2025-07-08 01:21:29] [Rank 0] Group 5 FTA: 0.9323 +[2025-07-08 01:21:29] [Rank 0] Group 5 FTA: 0.9323 +[2025-07-08 01:21:29] [Rank 0] Group 6 FTA: 0.8333 +[2025-07-08 01:21:29] [Rank 0] Group 6 FTA: 0.8333 +[2025-07-08 01:21:29] [Rank 0] Group 7 FTA: 0.8646 +[2025-07-08 01:21:29] [Rank 0] Group 7 FTA: 0.8646 +[2025-07-08 01:21:29] [Rank 0] Group 8 FTA: 0.8568 +[2025-07-08 01:21:29] [Rank 0] Group 8 FTA: 0.8568 +[2025-07-08 01:21:29] [Rank 0] Group 9 FTA: 0.8555 +[2025-07-08 01:21:29] [Rank 0] Group 9 FTA: 0.8555 +[2025-07-08 01:21:29] [Rank 0] Group 10 FTA: 0.8887 +[2025-07-08 01:21:29] [Rank 0] Group 10 FTA: 0.8887 +[2025-07-08 01:21:29] [Rank 0] Group 11 FTA: 0.8906 +[2025-07-08 01:21:29] [Rank 0] Group 11 FTA: 0.8906 +[2025-07-08 01:21:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 01:21:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 01:21:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 01:21:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 01:21:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 01:21:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 01:21:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 01:21:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 01:21:30] [Rank 0] step:2501/10000 train_time:199128ms step_avg:79.62ms +[2025-07-08 01:21:30] [Rank 0] step:2501/10000 train_time:199128ms step_avg:79.62ms +[2025-07-08 01:21:33] [Rank 0] step:2521/10000 train_time:200675ms step_avg:79.60ms +[2025-07-08 01:21:33] [Rank 0] step:2521/10000 train_time:200675ms step_avg:79.60ms +[2025-07-08 01:21:34] [Rank 0] step:2541/10000 train_time:202757ms step_avg:79.79ms +[2025-07-08 01:21:34] [Rank 0] step:2541/10000 train_time:202757ms step_avg:79.79ms +[2025-07-08 01:21:36] [Rank 0] step:2561/10000 train_time:204248ms step_avg:79.75ms +[2025-07-08 01:21:36] [Rank 0] step:2561/10000 train_time:204248ms step_avg:79.75ms +[2025-07-08 01:21:37] [Rank 0] step:2581/10000 train_time:205738ms step_avg:79.71ms +[2025-07-08 01:21:37] [Rank 0] step:2581/10000 train_time:205738ms step_avg:79.71ms +[2025-07-08 01:21:39] [Rank 0] step:2601/10000 train_time:207230ms step_avg:79.67ms +[2025-07-08 01:21:39] [Rank 0] step:2601/10000 train_time:207230ms step_avg:79.67ms +[2025-07-08 01:21:41] [Rank 0] step:2621/10000 train_time:209368ms step_avg:79.88ms +[2025-07-08 01:21:41] [Rank 0] step:2621/10000 train_time:209368ms step_avg:79.88ms +[2025-07-08 01:21:42] [Rank 0] step:2641/10000 train_time:210860ms step_avg:79.84ms +[2025-07-08 01:21:42] [Rank 0] step:2641/10000 train_time:210860ms step_avg:79.84ms +[2025-07-08 01:21:44] [Rank 0] step:2661/10000 train_time:212353ms step_avg:79.80ms +[2025-07-08 01:21:44] [Rank 0] step:2661/10000 train_time:212353ms step_avg:79.80ms +[2025-07-08 01:21:45] [Rank 0] step:2681/10000 train_time:213846ms step_avg:79.76ms +[2025-07-08 01:21:45] [Rank 0] step:2681/10000 train_time:213846ms step_avg:79.76ms +[2025-07-08 01:21:47] [Rank 0] step:2701/10000 train_time:215392ms step_avg:79.75ms +[2025-07-08 01:21:47] [Rank 0] step:2701/10000 train_time:215392ms step_avg:79.75ms +[2025-07-08 01:21:49] [Rank 0] step:2721/10000 train_time:217607ms step_avg:79.97ms +[2025-07-08 01:21:49] [Rank 0] step:2721/10000 train_time:217607ms step_avg:79.97ms +[2025-07-08 01:21:50] [Rank 0] step:2741/10000 train_time:219107ms step_avg:79.94ms +[2025-07-08 01:21:50] [Rank 0] step:2741/10000 train_time:219107ms step_avg:79.94ms +[2025-07-08 01:21:52] [Rank 0] step:2761/10000 train_time:220601ms step_avg:79.90ms +[2025-07-08 01:21:52] [Rank 0] step:2761/10000 train_time:220601ms step_avg:79.90ms +[2025-07-08 01:21:53] [Rank 0] step:2781/10000 train_time:222097ms step_avg:79.86ms +[2025-07-08 01:21:53] [Rank 0] step:2781/10000 train_time:222097ms step_avg:79.86ms +[2025-07-08 01:21:56] [Rank 0] step:2801/10000 train_time:224250ms step_avg:80.06ms +[2025-07-08 01:21:56] [Rank 0] step:2801/10000 train_time:224250ms step_avg:80.06ms +[2025-07-08 01:21:57] [Rank 0] step:2821/10000 train_time:225745ms step_avg:80.02ms +[2025-07-08 01:21:57] [Rank 0] step:2821/10000 train_time:225745ms step_avg:80.02ms +[2025-07-08 01:21:59] [Rank 0] step:2841/10000 train_time:227241ms step_avg:79.99ms +[2025-07-08 01:21:59] [Rank 0] step:2841/10000 train_time:227241ms step_avg:79.99ms +[2025-07-08 01:22:00] [Rank 0] step:2861/10000 train_time:228736ms step_avg:79.95ms +[2025-07-08 01:22:00] [Rank 0] step:2861/10000 train_time:228736ms step_avg:79.95ms +[2025-07-08 01:22:02] [Rank 0] step:2881/10000 train_time:230288ms step_avg:79.93ms +[2025-07-08 01:22:02] [Rank 0] step:2881/10000 train_time:230288ms step_avg:79.93ms +[2025-07-08 01:22:04] [Rank 0] step:2901/10000 train_time:232395ms step_avg:80.11ms +[2025-07-08 01:22:04] [Rank 0] step:2901/10000 train_time:232395ms step_avg:80.11ms +[2025-07-08 01:22:05] [Rank 0] step:2921/10000 train_time:233892ms step_avg:80.07ms +[2025-07-08 01:22:05] [Rank 0] step:2921/10000 train_time:233892ms step_avg:80.07ms +[2025-07-08 01:22:07] [Rank 0] step:2941/10000 train_time:235392ms step_avg:80.04ms +[2025-07-08 01:22:07] [Rank 0] step:2941/10000 train_time:235392ms step_avg:80.04ms +[2025-07-08 01:22:08] [Rank 0] step:2961/10000 train_time:236891ms step_avg:80.00ms +[2025-07-08 01:22:08] [Rank 0] step:2961/10000 train_time:236891ms step_avg:80.00ms +[2025-07-08 01:22:10] [Rank 0] step:2981/10000 train_time:239035ms step_avg:80.19ms +[2025-07-08 01:22:10] [Rank 0] step:2981/10000 train_time:239035ms step_avg:80.19ms +[2025-07-08 01:22:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:22:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:22:13] [Rank 0] PRINT: step:3000/10000 train_loss:0.8808 val_loss:0.8735 train_time:240531ms step_avg:80.18ms +[2025-07-08 01:22:13] [Rank 0] PRINT: step:3000/10000 train_loss:0.8808 val_loss:0.8735 train_time:240531ms step_avg:80.18ms +[2025-07-08 01:22:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:22:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:22:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:22:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:22:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:22:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:27:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:27:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:27:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:27:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:27:39] [Rank 0] Total Loss: 5.2891 +[2025-07-08 01:27:39] [Rank 0] Total Loss: 5.2891 +[2025-07-08 01:27:39] [Rank 0] Total FTA: 0.8848 +[2025-07-08 01:27:39] [Rank 0] Total FTA: 0.8848 +[2025-07-08 01:27:39] [Rank 0] Group 0 Loss: 5.5807 +[2025-07-08 01:27:39] [Rank 0] Group 0 Loss: 5.5807 +[2025-07-08 01:27:39] [Rank 0] Group 1 Loss: 5.1817 +[2025-07-08 01:27:39] [Rank 0] Group 1 Loss: 5.1817 +[2025-07-08 01:27:39] [Rank 0] Group 2 Loss: 5.0675 +[2025-07-08 01:27:39] [Rank 0] Group 2 Loss: 5.0675 +[2025-07-08 01:27:39] [Rank 0] Group 3 Loss: 5.3357 +[2025-07-08 01:27:39] [Rank 0] Group 3 Loss: 5.3357 +[2025-07-08 01:27:39] [Rank 0] Group 4 Loss: 5.2933 +[2025-07-08 01:27:39] [Rank 0] Group 4 Loss: 5.2933 +[2025-07-08 01:27:39] [Rank 0] Group 5 Loss: 5.1754 +[2025-07-08 01:27:39] [Rank 0] Group 5 Loss: 5.1754 +[2025-07-08 01:27:39] [Rank 0] Group 6 Loss: 5.1703 +[2025-07-08 01:27:39] [Rank 0] Group 6 Loss: 5.1703 +[2025-07-08 01:27:39] [Rank 0] Group 7 Loss: 5.2723 +[2025-07-08 01:27:39] [Rank 0] Group 7 Loss: 5.2723 +[2025-07-08 01:27:39] [Rank 0] Group 8 Loss: 5.2694 +[2025-07-08 01:27:39] [Rank 0] Group 8 Loss: 5.2694 +[2025-07-08 01:27:39] [Rank 0] Group 9 Loss: 5.2910 +[2025-07-08 01:27:39] [Rank 0] Group 9 Loss: 5.2910 +[2025-07-08 01:27:39] [Rank 0] Group 10 Loss: 5.2594 +[2025-07-08 01:27:39] [Rank 0] Group 10 Loss: 5.2594 +[2025-07-08 01:27:39] [Rank 0] Group 11 Loss: 5.2897 +[2025-07-08 01:27:39] [Rank 0] Group 11 Loss: 5.2897 +[2025-07-08 01:27:39] [Rank 0] Group 0 FTA: 0.8205 +[2025-07-08 01:27:39] [Rank 0] Group 0 FTA: 0.8205 +[2025-07-08 01:27:39] [Rank 0] Group 1 FTA: 0.8281 +[2025-07-08 01:27:39] [Rank 0] Group 1 FTA: 0.8281 +[2025-07-08 01:27:39] [Rank 0] Group 2 FTA: 0.8958 +[2025-07-08 01:27:39] [Rank 0] Group 2 FTA: 0.8958 +[2025-07-08 01:27:39] [Rank 0] Group 3 FTA: 0.8776 +[2025-07-08 01:27:39] [Rank 0] Group 3 FTA: 0.8776 +[2025-07-08 01:27:39] [Rank 0] Group 4 FTA: 0.8750 +[2025-07-08 01:27:39] [Rank 0] Group 4 FTA: 0.8750 +[2025-07-08 01:27:39] [Rank 0] Group 5 FTA: 0.8490 +[2025-07-08 01:27:39] [Rank 0] Group 5 FTA: 0.8490 +[2025-07-08 01:27:39] [Rank 0] Group 6 FTA: 0.9115 +[2025-07-08 01:27:39] [Rank 0] Group 6 FTA: 0.9115 +[2025-07-08 01:27:39] [Rank 0] Group 7 FTA: 0.9323 +[2025-07-08 01:27:39] [Rank 0] Group 7 FTA: 0.9323 +[2025-07-08 01:27:39] [Rank 0] Group 8 FTA: 0.9219 +[2025-07-08 01:27:39] [Rank 0] Group 8 FTA: 0.9219 +[2025-07-08 01:27:39] [Rank 0] Group 9 FTA: 0.9297 +[2025-07-08 01:27:39] [Rank 0] Group 9 FTA: 0.9297 +[2025-07-08 01:27:39] [Rank 0] Group 10 FTA: 0.8867 +[2025-07-08 01:27:39] [Rank 0] Group 10 FTA: 0.8867 +[2025-07-08 01:27:39] [Rank 0] Group 11 FTA: 0.9160 +[2025-07-08 01:27:39] [Rank 0] Group 11 FTA: 0.9160 +[2025-07-08 01:27:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 01:27:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 01:27:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 01:27:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 01:27:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 01:27:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 01:27:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 01:27:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 01:27:40] [Rank 0] step:3001/10000 train_time:240551ms step_avg:80.16ms +[2025-07-08 01:27:40] [Rank 0] step:3001/10000 train_time:240551ms step_avg:80.16ms +[2025-07-08 01:27:42] [Rank 0] step:3021/10000 train_time:242066ms step_avg:80.13ms +[2025-07-08 01:27:42] [Rank 0] step:3021/10000 train_time:242066ms step_avg:80.13ms +[2025-07-08 01:27:43] [Rank 0] step:3041/10000 train_time:243557ms step_avg:80.09ms +[2025-07-08 01:27:43] [Rank 0] step:3041/10000 train_time:243557ms step_avg:80.09ms +[2025-07-08 01:27:45] [Rank 0] step:3061/10000 train_time:245309ms step_avg:80.14ms +[2025-07-08 01:27:45] [Rank 0] step:3061/10000 train_time:245309ms step_avg:80.14ms +[2025-07-08 01:27:47] [Rank 0] step:3081/10000 train_time:247208ms step_avg:80.24ms +[2025-07-08 01:27:47] [Rank 0] step:3081/10000 train_time:247208ms step_avg:80.24ms +[2025-07-08 01:27:48] [Rank 0] step:3101/10000 train_time:248701ms step_avg:80.20ms +[2025-07-08 01:27:48] [Rank 0] step:3101/10000 train_time:248701ms step_avg:80.20ms +[2025-07-08 01:27:50] [Rank 0] step:3121/10000 train_time:250193ms step_avg:80.16ms +[2025-07-08 01:27:50] [Rank 0] step:3121/10000 train_time:250193ms step_avg:80.16ms +[2025-07-08 01:27:51] [Rank 0] step:3141/10000 train_time:251687ms step_avg:80.13ms +[2025-07-08 01:27:51] [Rank 0] step:3141/10000 train_time:251687ms step_avg:80.13ms +[2025-07-08 01:27:54] [Rank 0] step:3161/10000 train_time:253858ms step_avg:80.31ms +[2025-07-08 01:27:54] [Rank 0] step:3161/10000 train_time:253858ms step_avg:80.31ms +[2025-07-08 01:27:55] [Rank 0] step:3181/10000 train_time:255351ms step_avg:80.27ms +[2025-07-08 01:27:55] [Rank 0] step:3181/10000 train_time:255351ms step_avg:80.27ms +[2025-07-08 01:27:57] [Rank 0] step:3201/10000 train_time:256846ms step_avg:80.24ms +[2025-07-08 01:27:57] [Rank 0] step:3201/10000 train_time:256846ms step_avg:80.24ms +[2025-07-08 01:27:58] [Rank 0] step:3221/10000 train_time:258340ms step_avg:80.20ms +[2025-07-08 01:27:58] [Rank 0] step:3221/10000 train_time:258340ms step_avg:80.20ms +[2025-07-08 01:28:00] [Rank 0] step:3241/10000 train_time:259834ms step_avg:80.17ms +[2025-07-08 01:28:00] [Rank 0] step:3241/10000 train_time:259834ms step_avg:80.17ms +[2025-07-08 01:28:01] [Rank 0] step:3261/10000 train_time:261566ms step_avg:80.21ms +[2025-07-08 01:28:01] [Rank 0] step:3261/10000 train_time:261566ms step_avg:80.21ms +[2025-07-08 01:28:03] [Rank 0] step:3281/10000 train_time:263062ms step_avg:80.18ms +[2025-07-08 01:28:03] [Rank 0] step:3281/10000 train_time:263062ms step_avg:80.18ms +[2025-07-08 01:28:04] [Rank 0] step:3301/10000 train_time:264557ms step_avg:80.14ms +[2025-07-08 01:28:04] [Rank 0] step:3301/10000 train_time:264557ms step_avg:80.14ms +[2025-07-08 01:28:06] [Rank 0] step:3321/10000 train_time:266052ms step_avg:80.11ms +[2025-07-08 01:28:06] [Rank 0] step:3321/10000 train_time:266052ms step_avg:80.11ms +[2025-07-08 01:28:08] [Rank 0] step:3341/10000 train_time:268192ms step_avg:80.27ms +[2025-07-08 01:28:08] [Rank 0] step:3341/10000 train_time:268192ms step_avg:80.27ms +[2025-07-08 01:28:09] [Rank 0] step:3361/10000 train_time:269775ms step_avg:80.27ms +[2025-07-08 01:28:09] [Rank 0] step:3361/10000 train_time:269775ms step_avg:80.27ms +[2025-07-08 01:28:11] [Rank 0] step:3381/10000 train_time:271272ms step_avg:80.23ms +[2025-07-08 01:28:11] [Rank 0] step:3381/10000 train_time:271272ms step_avg:80.23ms +[2025-07-08 01:28:12] [Rank 0] step:3401/10000 train_time:272767ms step_avg:80.20ms +[2025-07-08 01:28:12] [Rank 0] step:3401/10000 train_time:272767ms step_avg:80.20ms +[2025-07-08 01:28:14] [Rank 0] step:3421/10000 train_time:274262ms step_avg:80.17ms +[2025-07-08 01:28:14] [Rank 0] step:3421/10000 train_time:274262ms step_avg:80.17ms +[2025-07-08 01:28:16] [Rank 0] step:3441/10000 train_time:275998ms step_avg:80.21ms +[2025-07-08 01:28:16] [Rank 0] step:3441/10000 train_time:275998ms step_avg:80.21ms +[2025-07-08 01:28:17] [Rank 0] step:3461/10000 train_time:277494ms step_avg:80.18ms +[2025-07-08 01:28:17] [Rank 0] step:3461/10000 train_time:277494ms step_avg:80.18ms +[2025-07-08 01:28:19] [Rank 0] step:3481/10000 train_time:278991ms step_avg:80.15ms +[2025-07-08 01:28:19] [Rank 0] step:3481/10000 train_time:278991ms step_avg:80.15ms +[2025-07-08 01:28:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:28:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:28:21] [Rank 0] PRINT: step:3500/10000 train_loss:0.8745 val_loss:0.8702 train_time:280488ms step_avg:80.14ms +[2025-07-08 01:28:21] [Rank 0] PRINT: step:3500/10000 train_loss:0.8745 val_loss:0.8702 train_time:280488ms step_avg:80.14ms +[2025-07-08 01:28:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:28:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:28:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:28:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:28:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:28:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:33:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:33:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:33:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:33:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:33:47] [Rank 0] Total Loss: 5.3218 +[2025-07-08 01:33:47] [Rank 0] Total Loss: 5.3218 +[2025-07-08 01:33:47] [Rank 0] Total FTA: 0.9091 +[2025-07-08 01:33:47] [Rank 0] Total FTA: 0.9091 +[2025-07-08 01:33:47] [Rank 0] Group 0 Loss: 5.6090 +[2025-07-08 01:33:47] [Rank 0] Group 0 Loss: 5.6090 +[2025-07-08 01:33:47] [Rank 0] Group 1 Loss: 5.3527 +[2025-07-08 01:33:47] [Rank 0] Group 1 Loss: 5.3527 +[2025-07-08 01:33:47] [Rank 0] Group 2 Loss: 5.1529 +[2025-07-08 01:33:47] [Rank 0] Group 2 Loss: 5.1529 +[2025-07-08 01:33:47] [Rank 0] Group 3 Loss: 5.3051 +[2025-07-08 01:33:47] [Rank 0] Group 3 Loss: 5.3051 +[2025-07-08 01:33:47] [Rank 0] Group 4 Loss: 5.3774 +[2025-07-08 01:33:47] [Rank 0] Group 4 Loss: 5.3774 +[2025-07-08 01:33:47] [Rank 0] Group 5 Loss: 5.2543 +[2025-07-08 01:33:47] [Rank 0] Group 5 Loss: 5.2543 +[2025-07-08 01:33:47] [Rank 0] Group 6 Loss: 5.1719 +[2025-07-08 01:33:47] [Rank 0] Group 6 Loss: 5.1719 +[2025-07-08 01:33:47] [Rank 0] Group 7 Loss: 5.2774 +[2025-07-08 01:33:47] [Rank 0] Group 7 Loss: 5.2774 +[2025-07-08 01:33:47] [Rank 0] Group 8 Loss: 5.3258 +[2025-07-08 01:33:47] [Rank 0] Group 8 Loss: 5.3258 +[2025-07-08 01:33:47] [Rank 0] Group 9 Loss: 5.2439 +[2025-07-08 01:33:47] [Rank 0] Group 9 Loss: 5.2439 +[2025-07-08 01:33:47] [Rank 0] Group 10 Loss: 5.2394 +[2025-07-08 01:33:47] [Rank 0] Group 10 Loss: 5.2394 +[2025-07-08 01:33:47] [Rank 0] Group 11 Loss: 5.3009 +[2025-07-08 01:33:47] [Rank 0] Group 11 Loss: 5.3009 +[2025-07-08 01:33:47] [Rank 0] Group 0 FTA: 0.8544 +[2025-07-08 01:33:47] [Rank 0] Group 0 FTA: 0.8544 +[2025-07-08 01:33:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 01:33:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 01:33:47] [Rank 0] Group 2 FTA: 0.9089 +[2025-07-08 01:33:47] [Rank 0] Group 2 FTA: 0.9089 +[2025-07-08 01:33:47] [Rank 0] Group 3 FTA: 0.8750 +[2025-07-08 01:33:47] [Rank 0] Group 3 FTA: 0.8750 +[2025-07-08 01:33:47] [Rank 0] Group 4 FTA: 0.8854 +[2025-07-08 01:33:47] [Rank 0] Group 4 FTA: 0.8854 +[2025-07-08 01:33:47] [Rank 0] Group 5 FTA: 0.9557 +[2025-07-08 01:33:47] [Rank 0] Group 5 FTA: 0.9557 +[2025-07-08 01:33:47] [Rank 0] Group 6 FTA: 0.9115 +[2025-07-08 01:33:47] [Rank 0] Group 6 FTA: 0.9115 +[2025-07-08 01:33:47] [Rank 0] Group 7 FTA: 0.9479 +[2025-07-08 01:33:47] [Rank 0] Group 7 FTA: 0.9479 +[2025-07-08 01:33:47] [Rank 0] Group 8 FTA: 0.8984 +[2025-07-08 01:33:47] [Rank 0] Group 8 FTA: 0.8984 +[2025-07-08 01:33:47] [Rank 0] Group 9 FTA: 0.9141 +[2025-07-08 01:33:47] [Rank 0] Group 9 FTA: 0.9141 +[2025-07-08 01:33:47] [Rank 0] Group 10 FTA: 0.9102 +[2025-07-08 01:33:47] [Rank 0] Group 10 FTA: 0.9102 +[2025-07-08 01:33:47] [Rank 0] Group 11 FTA: 0.9072 +[2025-07-08 01:33:47] [Rank 0] Group 11 FTA: 0.9072 +[2025-07-08 01:33:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 01:33:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 01:33:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 01:33:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 01:33:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 01:33:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 01:33:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 01:33:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 01:33:48] [Rank 0] step:3501/10000 train_time:280508ms step_avg:80.12ms +[2025-07-08 01:33:48] [Rank 0] step:3501/10000 train_time:280508ms step_avg:80.12ms +[2025-07-08 01:33:51] [Rank 0] step:3521/10000 train_time:282661ms step_avg:80.28ms +[2025-07-08 01:33:51] [Rank 0] step:3521/10000 train_time:282661ms step_avg:80.28ms +[2025-07-08 01:33:52] [Rank 0] step:3541/10000 train_time:284151ms step_avg:80.25ms +[2025-07-08 01:33:52] [Rank 0] step:3541/10000 train_time:284151ms step_avg:80.25ms +[2025-07-08 01:33:53] [Rank 0] step:3561/10000 train_time:285640ms step_avg:80.21ms +[2025-07-08 01:33:53] [Rank 0] step:3561/10000 train_time:285640ms step_avg:80.21ms +[2025-07-08 01:33:55] [Rank 0] step:3581/10000 train_time:287131ms step_avg:80.18ms +[2025-07-08 01:33:55] [Rank 0] step:3581/10000 train_time:287131ms step_avg:80.18ms +[2025-07-08 01:33:57] [Rank 0] step:3601/10000 train_time:288880ms step_avg:80.22ms +[2025-07-08 01:33:57] [Rank 0] step:3601/10000 train_time:288880ms step_avg:80.22ms +[2025-07-08 01:33:59] [Rank 0] step:3621/10000 train_time:290772ms step_avg:80.30ms +[2025-07-08 01:33:59] [Rank 0] step:3621/10000 train_time:290772ms step_avg:80.30ms +[2025-07-08 01:34:00] [Rank 0] step:3641/10000 train_time:292264ms step_avg:80.27ms +[2025-07-08 01:34:00] [Rank 0] step:3641/10000 train_time:292264ms step_avg:80.27ms +[2025-07-08 01:34:02] [Rank 0] step:3661/10000 train_time:293757ms step_avg:80.24ms +[2025-07-08 01:34:02] [Rank 0] step:3661/10000 train_time:293757ms step_avg:80.24ms +[2025-07-08 01:34:03] [Rank 0] step:3681/10000 train_time:295250ms step_avg:80.21ms +[2025-07-08 01:34:03] [Rank 0] step:3681/10000 train_time:295250ms step_avg:80.21ms +[2025-07-08 01:34:05] [Rank 0] step:3701/10000 train_time:297383ms step_avg:80.35ms +[2025-07-08 01:34:05] [Rank 0] step:3701/10000 train_time:297383ms step_avg:80.35ms +[2025-07-08 01:34:07] [Rank 0] step:3721/10000 train_time:298877ms step_avg:80.32ms +[2025-07-08 01:34:07] [Rank 0] step:3721/10000 train_time:298877ms step_avg:80.32ms +[2025-07-08 01:34:08] [Rank 0] step:3741/10000 train_time:300371ms step_avg:80.29ms +[2025-07-08 01:34:08] [Rank 0] step:3741/10000 train_time:300371ms step_avg:80.29ms +[2025-07-08 01:34:10] [Rank 0] step:3761/10000 train_time:301867ms step_avg:80.26ms +[2025-07-08 01:34:10] [Rank 0] step:3761/10000 train_time:301867ms step_avg:80.26ms +[2025-07-08 01:34:12] [Rank 0] step:3781/10000 train_time:303416ms step_avg:80.25ms +[2025-07-08 01:34:12] [Rank 0] step:3781/10000 train_time:303416ms step_avg:80.25ms +[2025-07-08 01:34:13] [Rank 0] step:3801/10000 train_time:305526ms step_avg:80.38ms +[2025-07-08 01:34:13] [Rank 0] step:3801/10000 train_time:305526ms step_avg:80.38ms +[2025-07-08 01:34:15] [Rank 0] step:3821/10000 train_time:307020ms step_avg:80.35ms +[2025-07-08 01:34:15] [Rank 0] step:3821/10000 train_time:307020ms step_avg:80.35ms +[2025-07-08 01:34:16] [Rank 0] step:3841/10000 train_time:308516ms step_avg:80.32ms +[2025-07-08 01:34:16] [Rank 0] step:3841/10000 train_time:308516ms step_avg:80.32ms +[2025-07-08 01:34:18] [Rank 0] step:3861/10000 train_time:310012ms step_avg:80.29ms +[2025-07-08 01:34:18] [Rank 0] step:3861/10000 train_time:310012ms step_avg:80.29ms +[2025-07-08 01:34:20] [Rank 0] step:3881/10000 train_time:312168ms step_avg:80.43ms +[2025-07-08 01:34:20] [Rank 0] step:3881/10000 train_time:312168ms step_avg:80.43ms +[2025-07-08 01:34:22] [Rank 0] step:3901/10000 train_time:313662ms step_avg:80.41ms +[2025-07-08 01:34:22] [Rank 0] step:3901/10000 train_time:313662ms step_avg:80.41ms +[2025-07-08 01:34:23] [Rank 0] step:3921/10000 train_time:315157ms step_avg:80.38ms +[2025-07-08 01:34:23] [Rank 0] step:3921/10000 train_time:315157ms step_avg:80.38ms +[2025-07-08 01:34:24] [Rank 0] step:3941/10000 train_time:316652ms step_avg:80.35ms +[2025-07-08 01:34:24] [Rank 0] step:3941/10000 train_time:316652ms step_avg:80.35ms +[2025-07-08 01:34:27] [Rank 0] step:3961/10000 train_time:318149ms step_avg:80.32ms +[2025-07-08 01:34:27] [Rank 0] step:3961/10000 train_time:318149ms step_avg:80.32ms +[2025-07-08 01:34:28] [Rank 0] step:3981/10000 train_time:320451ms step_avg:80.50ms +[2025-07-08 01:34:28] [Rank 0] step:3981/10000 train_time:320451ms step_avg:80.50ms +[2025-07-08 01:34:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:34:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:34:31] [Rank 0] PRINT: step:4000/10000 train_loss:0.8701 val_loss:0.8660 train_time:321945ms step_avg:80.49ms +[2025-07-08 01:34:31] [Rank 0] PRINT: step:4000/10000 train_loss:0.8701 val_loss:0.8660 train_time:321945ms step_avg:80.49ms +[2025-07-08 01:34:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:34:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:34:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:34:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:34:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:34:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:39:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:39:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:39:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:39:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:39:55] [Rank 0] Total Loss: 5.3384 +[2025-07-08 01:39:55] [Rank 0] Total Loss: 5.3384 +[2025-07-08 01:39:55] [Rank 0] Total FTA: 0.9384 +[2025-07-08 01:39:55] [Rank 0] Total FTA: 0.9384 +[2025-07-08 01:39:55] [Rank 0] Group 0 Loss: 5.6330 +[2025-07-08 01:39:55] [Rank 0] Group 0 Loss: 5.6330 +[2025-07-08 01:39:55] [Rank 0] Group 1 Loss: 5.3518 +[2025-07-08 01:39:55] [Rank 0] Group 1 Loss: 5.3518 +[2025-07-08 01:39:55] [Rank 0] Group 2 Loss: 5.0201 +[2025-07-08 01:39:55] [Rank 0] Group 2 Loss: 5.0201 +[2025-07-08 01:39:55] [Rank 0] Group 3 Loss: 5.2993 +[2025-07-08 01:39:55] [Rank 0] Group 3 Loss: 5.2993 +[2025-07-08 01:39:55] [Rank 0] Group 4 Loss: 5.2781 +[2025-07-08 01:39:55] [Rank 0] Group 4 Loss: 5.2781 +[2025-07-08 01:39:55] [Rank 0] Group 5 Loss: 5.2838 +[2025-07-08 01:39:55] [Rank 0] Group 5 Loss: 5.2838 +[2025-07-08 01:39:55] [Rank 0] Group 6 Loss: 5.2729 +[2025-07-08 01:39:55] [Rank 0] Group 6 Loss: 5.2729 +[2025-07-08 01:39:55] [Rank 0] Group 7 Loss: 5.3082 +[2025-07-08 01:39:55] [Rank 0] Group 7 Loss: 5.3082 +[2025-07-08 01:39:55] [Rank 0] Group 8 Loss: 5.3450 +[2025-07-08 01:39:55] [Rank 0] Group 8 Loss: 5.3450 +[2025-07-08 01:39:55] [Rank 0] Group 9 Loss: 5.3514 +[2025-07-08 01:39:55] [Rank 0] Group 9 Loss: 5.3514 +[2025-07-08 01:39:55] [Rank 0] Group 10 Loss: 5.2928 +[2025-07-08 01:39:55] [Rank 0] Group 10 Loss: 5.2928 +[2025-07-08 01:39:55] [Rank 0] Group 11 Loss: 5.3425 +[2025-07-08 01:39:55] [Rank 0] Group 11 Loss: 5.3425 +[2025-07-08 01:39:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 01:39:55] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 01:39:55] [Rank 0] Group 1 FTA: 0.8047 +[2025-07-08 01:39:55] [Rank 0] Group 1 FTA: 0.8047 +[2025-07-08 01:39:55] [Rank 0] Group 2 FTA: 0.9115 +[2025-07-08 01:39:55] [Rank 0] Group 2 FTA: 0.9115 +[2025-07-08 01:39:55] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 01:39:55] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 01:39:55] [Rank 0] Group 4 FTA: 0.9349 +[2025-07-08 01:39:55] [Rank 0] Group 4 FTA: 0.9349 +[2025-07-08 01:39:55] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-08 01:39:55] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-08 01:39:55] [Rank 0] Group 6 FTA: 0.9245 +[2025-07-08 01:39:55] [Rank 0] Group 6 FTA: 0.9245 +[2025-07-08 01:39:55] [Rank 0] Group 7 FTA: 0.9427 +[2025-07-08 01:39:55] [Rank 0] Group 7 FTA: 0.9427 +[2025-07-08 01:39:55] [Rank 0] Group 8 FTA: 0.9141 +[2025-07-08 01:39:55] [Rank 0] Group 8 FTA: 0.9141 +[2025-07-08 01:39:55] [Rank 0] Group 9 FTA: 0.9180 +[2025-07-08 01:39:55] [Rank 0] Group 9 FTA: 0.9180 +[2025-07-08 01:39:55] [Rank 0] Group 10 FTA: 0.9375 +[2025-07-08 01:39:55] [Rank 0] Group 10 FTA: 0.9375 +[2025-07-08 01:39:55] [Rank 0] Group 11 FTA: 0.9414 +[2025-07-08 01:39:55] [Rank 0] Group 11 FTA: 0.9414 +[2025-07-08 01:39:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 01:39:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 01:39:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 01:39:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 01:39:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 01:39:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 01:39:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 01:39:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 01:39:57] [Rank 0] step:4001/10000 train_time:321965ms step_avg:80.47ms +[2025-07-08 01:39:57] [Rank 0] step:4001/10000 train_time:321965ms step_avg:80.47ms +[2025-07-08 01:39:58] [Rank 0] step:4021/10000 train_time:323472ms step_avg:80.45ms +[2025-07-08 01:39:58] [Rank 0] step:4021/10000 train_time:323472ms step_avg:80.45ms +[2025-07-08 01:40:00] [Rank 0] step:4041/10000 train_time:324961ms step_avg:80.42ms +[2025-07-08 01:40:00] [Rank 0] step:4041/10000 train_time:324961ms step_avg:80.42ms +[2025-07-08 01:40:02] [Rank 0] step:4061/10000 train_time:327101ms step_avg:80.55ms +[2025-07-08 01:40:02] [Rank 0] step:4061/10000 train_time:327101ms step_avg:80.55ms +[2025-07-08 01:40:03] [Rank 0] step:4081/10000 train_time:328591ms step_avg:80.52ms +[2025-07-08 01:40:03] [Rank 0] step:4081/10000 train_time:328591ms step_avg:80.52ms +[2025-07-08 01:40:05] [Rank 0] step:4101/10000 train_time:330081ms step_avg:80.49ms +[2025-07-08 01:40:05] [Rank 0] step:4101/10000 train_time:330081ms step_avg:80.49ms +[2025-07-08 01:40:06] [Rank 0] step:4121/10000 train_time:331572ms step_avg:80.46ms +[2025-07-08 01:40:06] [Rank 0] step:4121/10000 train_time:331572ms step_avg:80.46ms +[2025-07-08 01:40:08] [Rank 0] step:4141/10000 train_time:333116ms step_avg:80.44ms +[2025-07-08 01:40:08] [Rank 0] step:4141/10000 train_time:333116ms step_avg:80.44ms +[2025-07-08 01:40:10] [Rank 0] step:4161/10000 train_time:334796ms step_avg:80.46ms +[2025-07-08 01:40:10] [Rank 0] step:4161/10000 train_time:334796ms step_avg:80.46ms +[2025-07-08 01:40:11] [Rank 0] step:4181/10000 train_time:336289ms step_avg:80.43ms +[2025-07-08 01:40:11] [Rank 0] step:4181/10000 train_time:336289ms step_avg:80.43ms +[2025-07-08 01:40:13] [Rank 0] step:4201/10000 train_time:337781ms step_avg:80.40ms +[2025-07-08 01:40:13] [Rank 0] step:4201/10000 train_time:337781ms step_avg:80.40ms +[2025-07-08 01:40:14] [Rank 0] step:4221/10000 train_time:339275ms step_avg:80.38ms +[2025-07-08 01:40:14] [Rank 0] step:4221/10000 train_time:339275ms step_avg:80.38ms +[2025-07-08 01:40:16] [Rank 0] step:4241/10000 train_time:341439ms step_avg:80.51ms +[2025-07-08 01:40:16] [Rank 0] step:4241/10000 train_time:341439ms step_avg:80.51ms +[2025-07-08 01:40:18] [Rank 0] step:4261/10000 train_time:342931ms step_avg:80.48ms +[2025-07-08 01:40:18] [Rank 0] step:4261/10000 train_time:342931ms step_avg:80.48ms +[2025-07-08 01:40:19] [Rank 0] step:4281/10000 train_time:344426ms step_avg:80.45ms +[2025-07-08 01:40:19] [Rank 0] step:4281/10000 train_time:344426ms step_avg:80.45ms +[2025-07-08 01:40:21] [Rank 0] step:4301/10000 train_time:345922ms step_avg:80.43ms +[2025-07-08 01:40:21] [Rank 0] step:4301/10000 train_time:345922ms step_avg:80.43ms +[2025-07-08 01:40:23] [Rank 0] step:4321/10000 train_time:347674ms step_avg:80.46ms +[2025-07-08 01:40:23] [Rank 0] step:4321/10000 train_time:347674ms step_avg:80.46ms +[2025-07-08 01:40:24] [Rank 0] step:4341/10000 train_time:349583ms step_avg:80.53ms +[2025-07-08 01:40:24] [Rank 0] step:4341/10000 train_time:349583ms step_avg:80.53ms +[2025-07-08 01:40:26] [Rank 0] step:4361/10000 train_time:351080ms step_avg:80.50ms +[2025-07-08 01:40:26] [Rank 0] step:4361/10000 train_time:351080ms step_avg:80.50ms +[2025-07-08 01:40:27] [Rank 0] step:4381/10000 train_time:352575ms step_avg:80.48ms +[2025-07-08 01:40:27] [Rank 0] step:4381/10000 train_time:352575ms step_avg:80.48ms +[2025-07-08 01:40:29] [Rank 0] step:4401/10000 train_time:354073ms step_avg:80.45ms +[2025-07-08 01:40:29] [Rank 0] step:4401/10000 train_time:354073ms step_avg:80.45ms +[2025-07-08 01:40:31] [Rank 0] step:4421/10000 train_time:356226ms step_avg:80.58ms +[2025-07-08 01:40:31] [Rank 0] step:4421/10000 train_time:356226ms step_avg:80.58ms +[2025-07-08 01:40:32] [Rank 0] step:4441/10000 train_time:357722ms step_avg:80.55ms +[2025-07-08 01:40:32] [Rank 0] step:4441/10000 train_time:357722ms step_avg:80.55ms +[2025-07-08 01:40:34] [Rank 0] step:4461/10000 train_time:359219ms step_avg:80.52ms +[2025-07-08 01:40:34] [Rank 0] step:4461/10000 train_time:359219ms step_avg:80.52ms +[2025-07-08 01:40:35] [Rank 0] step:4481/10000 train_time:360715ms step_avg:80.50ms +[2025-07-08 01:40:35] [Rank 0] step:4481/10000 train_time:360715ms step_avg:80.50ms +[2025-07-08 01:40:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:40:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:40:38] [Rank 0] PRINT: step:4500/10000 train_loss:0.8668 val_loss:0.8649 train_time:362211ms step_avg:80.49ms +[2025-07-08 01:40:38] [Rank 0] PRINT: step:4500/10000 train_loss:0.8668 val_loss:0.8649 train_time:362211ms step_avg:80.49ms +[2025-07-08 01:40:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:40:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:40:38] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:40:38] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:40:38] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:40:38] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:46:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:46:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:46:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:46:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:46:05] [Rank 0] Total Loss: 5.3880 +[2025-07-08 01:46:05] [Rank 0] Total Loss: 5.3880 +[2025-07-08 01:46:05] [Rank 0] Total FTA: 0.9043 +[2025-07-08 01:46:05] [Rank 0] Total FTA: 0.9043 +[2025-07-08 01:46:05] [Rank 0] Group 0 Loss: 5.5821 +[2025-07-08 01:46:05] [Rank 0] Group 0 Loss: 5.5821 +[2025-07-08 01:46:05] [Rank 0] Group 1 Loss: 5.5067 +[2025-07-08 01:46:05] [Rank 0] Group 1 Loss: 5.5067 +[2025-07-08 01:46:05] [Rank 0] Group 2 Loss: 5.2166 +[2025-07-08 01:46:05] [Rank 0] Group 2 Loss: 5.2166 +[2025-07-08 01:46:05] [Rank 0] Group 3 Loss: 5.2431 +[2025-07-08 01:46:05] [Rank 0] Group 3 Loss: 5.2431 +[2025-07-08 01:46:05] [Rank 0] Group 4 Loss: 5.3199 +[2025-07-08 01:46:05] [Rank 0] Group 4 Loss: 5.3199 +[2025-07-08 01:46:05] [Rank 0] Group 5 Loss: 5.3711 +[2025-07-08 01:46:05] [Rank 0] Group 5 Loss: 5.3711 +[2025-07-08 01:46:05] [Rank 0] Group 6 Loss: 5.2454 +[2025-07-08 01:46:05] [Rank 0] Group 6 Loss: 5.2454 +[2025-07-08 01:46:05] [Rank 0] Group 7 Loss: 5.4430 +[2025-07-08 01:46:05] [Rank 0] Group 7 Loss: 5.4430 +[2025-07-08 01:46:05] [Rank 0] Group 8 Loss: 5.3898 +[2025-07-08 01:46:05] [Rank 0] Group 8 Loss: 5.3898 +[2025-07-08 01:46:05] [Rank 0] Group 9 Loss: 5.3418 +[2025-07-08 01:46:05] [Rank 0] Group 9 Loss: 5.3418 +[2025-07-08 01:46:05] [Rank 0] Group 10 Loss: 5.3690 +[2025-07-08 01:46:05] [Rank 0] Group 10 Loss: 5.3690 +[2025-07-08 01:46:05] [Rank 0] Group 11 Loss: 5.4013 +[2025-07-08 01:46:05] [Rank 0] Group 11 Loss: 5.4013 +[2025-07-08 01:46:05] [Rank 0] Group 0 FTA: 0.8179 +[2025-07-08 01:46:05] [Rank 0] Group 0 FTA: 0.8179 +[2025-07-08 01:46:05] [Rank 0] Group 1 FTA: 0.8438 +[2025-07-08 01:46:05] [Rank 0] Group 1 FTA: 0.8438 +[2025-07-08 01:46:05] [Rank 0] Group 2 FTA: 0.9323 +[2025-07-08 01:46:05] [Rank 0] Group 2 FTA: 0.9323 +[2025-07-08 01:46:05] [Rank 0] Group 3 FTA: 0.8750 +[2025-07-08 01:46:05] [Rank 0] Group 3 FTA: 0.8750 +[2025-07-08 01:46:05] [Rank 0] Group 4 FTA: 0.9323 +[2025-07-08 01:46:05] [Rank 0] Group 4 FTA: 0.9323 +[2025-07-08 01:46:05] [Rank 0] Group 5 FTA: 0.9427 +[2025-07-08 01:46:05] [Rank 0] Group 5 FTA: 0.9427 +[2025-07-08 01:46:05] [Rank 0] Group 6 FTA: 0.9167 +[2025-07-08 01:46:05] [Rank 0] Group 6 FTA: 0.9167 +[2025-07-08 01:46:05] [Rank 0] Group 7 FTA: 0.9115 +[2025-07-08 01:46:05] [Rank 0] Group 7 FTA: 0.9115 +[2025-07-08 01:46:05] [Rank 0] Group 8 FTA: 0.9245 +[2025-07-08 01:46:05] [Rank 0] Group 8 FTA: 0.9245 +[2025-07-08 01:46:05] [Rank 0] Group 9 FTA: 0.9336 +[2025-07-08 01:46:05] [Rank 0] Group 9 FTA: 0.9336 +[2025-07-08 01:46:05] [Rank 0] Group 10 FTA: 0.9434 +[2025-07-08 01:46:05] [Rank 0] Group 10 FTA: 0.9434 +[2025-07-08 01:46:05] [Rank 0] Group 11 FTA: 0.9258 +[2025-07-08 01:46:05] [Rank 0] Group 11 FTA: 0.9258 +[2025-07-08 01:46:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 01:46:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 01:46:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 01:46:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 01:46:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 01:46:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 01:46:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 01:46:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 01:46:08] [Rank 0] step:4501/10000 train_time:362238ms step_avg:80.48ms +[2025-07-08 01:46:08] [Rank 0] step:4501/10000 train_time:362238ms step_avg:80.48ms +[2025-07-08 01:46:09] [Rank 0] step:4521/10000 train_time:364442ms step_avg:80.61ms +[2025-07-08 01:46:09] [Rank 0] step:4521/10000 train_time:364442ms step_avg:80.61ms +[2025-07-08 01:46:11] [Rank 0] step:4541/10000 train_time:365932ms step_avg:80.58ms +[2025-07-08 01:46:11] [Rank 0] step:4541/10000 train_time:365932ms step_avg:80.58ms +[2025-07-08 01:46:12] [Rank 0] step:4561/10000 train_time:367421ms step_avg:80.56ms +[2025-07-08 01:46:12] [Rank 0] step:4561/10000 train_time:367421ms step_avg:80.56ms +[2025-07-08 01:46:14] [Rank 0] step:4581/10000 train_time:368911ms step_avg:80.53ms +[2025-07-08 01:46:14] [Rank 0] step:4581/10000 train_time:368911ms step_avg:80.53ms +[2025-07-08 01:46:15] [Rank 0] step:4601/10000 train_time:370638ms step_avg:80.56ms +[2025-07-08 01:46:15] [Rank 0] step:4601/10000 train_time:370638ms step_avg:80.56ms +[2025-07-08 01:46:17] [Rank 0] step:4621/10000 train_time:372129ms step_avg:80.53ms +[2025-07-08 01:46:17] [Rank 0] step:4621/10000 train_time:372129ms step_avg:80.53ms +[2025-07-08 01:46:18] [Rank 0] step:4641/10000 train_time:373620ms step_avg:80.50ms +[2025-07-08 01:46:18] [Rank 0] step:4641/10000 train_time:373620ms step_avg:80.50ms +[2025-07-08 01:46:20] [Rank 0] step:4661/10000 train_time:375112ms step_avg:80.48ms +[2025-07-08 01:46:20] [Rank 0] step:4661/10000 train_time:375112ms step_avg:80.48ms +[2025-07-08 01:46:22] [Rank 0] step:4681/10000 train_time:377271ms step_avg:80.60ms +[2025-07-08 01:46:22] [Rank 0] step:4681/10000 train_time:377271ms step_avg:80.60ms +[2025-07-08 01:46:23] [Rank 0] step:4701/10000 train_time:378747ms step_avg:80.57ms +[2025-07-08 01:46:23] [Rank 0] step:4701/10000 train_time:378747ms step_avg:80.57ms +[2025-07-08 01:46:25] [Rank 0] step:4721/10000 train_time:380241ms step_avg:80.54ms +[2025-07-08 01:46:25] [Rank 0] step:4721/10000 train_time:380241ms step_avg:80.54ms +[2025-07-08 01:46:26] [Rank 0] step:4741/10000 train_time:381735ms step_avg:80.52ms +[2025-07-08 01:46:26] [Rank 0] step:4741/10000 train_time:381735ms step_avg:80.52ms +[2025-07-08 01:46:28] [Rank 0] step:4761/10000 train_time:383228ms step_avg:80.49ms +[2025-07-08 01:46:28] [Rank 0] step:4761/10000 train_time:383228ms step_avg:80.49ms +[2025-07-08 01:46:30] [Rank 0] step:4781/10000 train_time:385064ms step_avg:80.54ms +[2025-07-08 01:46:30] [Rank 0] step:4781/10000 train_time:385064ms step_avg:80.54ms +[2025-07-08 01:46:31] [Rank 0] step:4801/10000 train_time:386558ms step_avg:80.52ms +[2025-07-08 01:46:31] [Rank 0] step:4801/10000 train_time:386558ms step_avg:80.52ms +[2025-07-08 01:46:33] [Rank 0] step:4821/10000 train_time:388055ms step_avg:80.49ms +[2025-07-08 01:46:33] [Rank 0] step:4821/10000 train_time:388055ms step_avg:80.49ms +[2025-07-08 01:46:34] [Rank 0] step:4841/10000 train_time:389552ms step_avg:80.47ms +[2025-07-08 01:46:34] [Rank 0] step:4841/10000 train_time:389552ms step_avg:80.47ms +[2025-07-08 01:46:36] [Rank 0] step:4861/10000 train_time:391100ms step_avg:80.46ms +[2025-07-08 01:46:36] [Rank 0] step:4861/10000 train_time:391100ms step_avg:80.46ms +[2025-07-08 01:46:37] [Rank 0] step:4881/10000 train_time:392585ms step_avg:80.43ms +[2025-07-08 01:46:37] [Rank 0] step:4881/10000 train_time:392585ms step_avg:80.43ms +[2025-07-08 01:46:39] [Rank 0] step:4901/10000 train_time:394084ms step_avg:80.41ms +[2025-07-08 01:46:39] [Rank 0] step:4901/10000 train_time:394084ms step_avg:80.41ms +[2025-07-08 01:46:40] [Rank 0] step:4921/10000 train_time:395581ms step_avg:80.39ms +[2025-07-08 01:46:40] [Rank 0] step:4921/10000 train_time:395581ms step_avg:80.39ms +[2025-07-08 01:46:42] [Rank 0] step:4941/10000 train_time:397078ms step_avg:80.36ms +[2025-07-08 01:46:42] [Rank 0] step:4941/10000 train_time:397078ms step_avg:80.36ms +[2025-07-08 01:46:44] [Rank 0] step:4961/10000 train_time:399249ms step_avg:80.48ms +[2025-07-08 01:46:44] [Rank 0] step:4961/10000 train_time:399249ms step_avg:80.48ms +[2025-07-08 01:46:45] [Rank 0] step:4981/10000 train_time:400745ms step_avg:80.45ms +[2025-07-08 01:46:45] [Rank 0] step:4981/10000 train_time:400745ms step_avg:80.45ms +[2025-07-08 01:46:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:46:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:46:48] [Rank 0] PRINT: step:5000/10000 train_loss:0.8647 val_loss:0.8640 train_time:402243ms step_avg:80.45ms +[2025-07-08 01:46:48] [Rank 0] PRINT: step:5000/10000 train_loss:0.8647 val_loss:0.8640 train_time:402243ms step_avg:80.45ms +[2025-07-08 01:46:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:46:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:46:48] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:46:48] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:46:48] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:46:48] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:52:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:52:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:52:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:52:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:52:12] [Rank 0] Total Loss: 5.3913 +[2025-07-08 01:52:12] [Rank 0] Total Loss: 5.3913 +[2025-07-08 01:52:12] [Rank 0] Total FTA: 0.9130 +[2025-07-08 01:52:12] [Rank 0] Total FTA: 0.9130 +[2025-07-08 01:52:12] [Rank 0] Group 0 Loss: 5.7443 +[2025-07-08 01:52:12] [Rank 0] Group 0 Loss: 5.7443 +[2025-07-08 01:52:12] [Rank 0] Group 1 Loss: 5.3119 +[2025-07-08 01:52:12] [Rank 0] Group 1 Loss: 5.3119 +[2025-07-08 01:52:12] [Rank 0] Group 2 Loss: 5.1892 +[2025-07-08 01:52:12] [Rank 0] Group 2 Loss: 5.1892 +[2025-07-08 01:52:12] [Rank 0] Group 3 Loss: 5.3978 +[2025-07-08 01:52:12] [Rank 0] Group 3 Loss: 5.3978 +[2025-07-08 01:52:12] [Rank 0] Group 4 Loss: 5.3715 +[2025-07-08 01:52:12] [Rank 0] Group 4 Loss: 5.3715 +[2025-07-08 01:52:12] [Rank 0] Group 5 Loss: 5.3618 +[2025-07-08 01:52:12] [Rank 0] Group 5 Loss: 5.3618 +[2025-07-08 01:52:12] [Rank 0] Group 6 Loss: 5.2599 +[2025-07-08 01:52:12] [Rank 0] Group 6 Loss: 5.2599 +[2025-07-08 01:52:12] [Rank 0] Group 7 Loss: 5.3955 +[2025-07-08 01:52:12] [Rank 0] Group 7 Loss: 5.3955 +[2025-07-08 01:52:12] [Rank 0] Group 8 Loss: 5.3271 +[2025-07-08 01:52:12] [Rank 0] Group 8 Loss: 5.3271 +[2025-07-08 01:52:12] [Rank 0] Group 9 Loss: 5.2963 +[2025-07-08 01:52:12] [Rank 0] Group 9 Loss: 5.2963 +[2025-07-08 01:52:12] [Rank 0] Group 10 Loss: 5.3277 +[2025-07-08 01:52:12] [Rank 0] Group 10 Loss: 5.3277 +[2025-07-08 01:52:12] [Rank 0] Group 11 Loss: 5.3749 +[2025-07-08 01:52:12] [Rank 0] Group 11 Loss: 5.3749 +[2025-07-08 01:52:12] [Rank 0] Group 0 FTA: 0.7958 +[2025-07-08 01:52:12] [Rank 0] Group 0 FTA: 0.7958 +[2025-07-08 01:52:12] [Rank 0] Group 1 FTA: 0.7839 +[2025-07-08 01:52:12] [Rank 0] Group 1 FTA: 0.7839 +[2025-07-08 01:52:12] [Rank 0] Group 2 FTA: 0.9193 +[2025-07-08 01:52:12] [Rank 0] Group 2 FTA: 0.9193 +[2025-07-08 01:52:12] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 01:52:12] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 01:52:12] [Rank 0] Group 4 FTA: 0.9609 +[2025-07-08 01:52:12] [Rank 0] Group 4 FTA: 0.9609 +[2025-07-08 01:52:12] [Rank 0] Group 5 FTA: 0.9635 +[2025-07-08 01:52:12] [Rank 0] Group 5 FTA: 0.9635 +[2025-07-08 01:52:12] [Rank 0] Group 6 FTA: 0.9297 +[2025-07-08 01:52:12] [Rank 0] Group 6 FTA: 0.9297 +[2025-07-08 01:52:12] [Rank 0] Group 7 FTA: 0.9141 +[2025-07-08 01:52:12] [Rank 0] Group 7 FTA: 0.9141 +[2025-07-08 01:52:12] [Rank 0] Group 8 FTA: 0.9427 +[2025-07-08 01:52:12] [Rank 0] Group 8 FTA: 0.9427 +[2025-07-08 01:52:12] [Rank 0] Group 9 FTA: 0.9336 +[2025-07-08 01:52:12] [Rank 0] Group 9 FTA: 0.9336 +[2025-07-08 01:52:12] [Rank 0] Group 10 FTA: 0.9434 +[2025-07-08 01:52:12] [Rank 0] Group 10 FTA: 0.9434 +[2025-07-08 01:52:12] [Rank 0] Group 11 FTA: 0.9395 +[2025-07-08 01:52:12] [Rank 0] Group 11 FTA: 0.9395 +[2025-07-08 01:52:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 01:52:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 01:52:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 01:52:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 01:52:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 01:52:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 01:52:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 01:52:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 01:52:14] [Rank 0] step:5001/10000 train_time:402262ms step_avg:80.44ms +[2025-07-08 01:52:14] [Rank 0] step:5001/10000 train_time:402262ms step_avg:80.44ms +[2025-07-08 01:52:15] [Rank 0] step:5021/10000 train_time:403759ms step_avg:80.41ms +[2025-07-08 01:52:15] [Rank 0] step:5021/10000 train_time:403759ms step_avg:80.41ms +[2025-07-08 01:52:18] [Rank 0] step:5041/10000 train_time:405912ms step_avg:80.52ms +[2025-07-08 01:52:18] [Rank 0] step:5041/10000 train_time:405912ms step_avg:80.52ms +[2025-07-08 01:52:19] [Rank 0] step:5061/10000 train_time:407383ms step_avg:80.49ms +[2025-07-08 01:52:19] [Rank 0] step:5061/10000 train_time:407383ms step_avg:80.49ms +[2025-07-08 01:52:21] [Rank 0] step:5081/10000 train_time:408871ms step_avg:80.47ms +[2025-07-08 01:52:21] [Rank 0] step:5081/10000 train_time:408871ms step_avg:80.47ms +[2025-07-08 01:52:22] [Rank 0] step:5101/10000 train_time:410361ms step_avg:80.45ms +[2025-07-08 01:52:22] [Rank 0] step:5101/10000 train_time:410361ms step_avg:80.45ms +[2025-07-08 01:52:24] [Rank 0] step:5121/10000 train_time:412005ms step_avg:80.45ms +[2025-07-08 01:52:24] [Rank 0] step:5121/10000 train_time:412005ms step_avg:80.45ms +[2025-07-08 01:52:26] [Rank 0] step:5141/10000 train_time:414156ms step_avg:80.56ms +[2025-07-08 01:52:26] [Rank 0] step:5141/10000 train_time:414156ms step_avg:80.56ms +[2025-07-08 01:52:27] [Rank 0] step:5161/10000 train_time:415646ms step_avg:80.54ms +[2025-07-08 01:52:27] [Rank 0] step:5161/10000 train_time:415646ms step_avg:80.54ms +[2025-07-08 01:52:29] [Rank 0] step:5181/10000 train_time:417138ms step_avg:80.51ms +[2025-07-08 01:52:29] [Rank 0] step:5181/10000 train_time:417138ms step_avg:80.51ms +[2025-07-08 01:52:30] [Rank 0] step:5201/10000 train_time:418632ms step_avg:80.49ms +[2025-07-08 01:52:30] [Rank 0] step:5201/10000 train_time:418632ms step_avg:80.49ms +[2025-07-08 01:52:32] [Rank 0] step:5221/10000 train_time:420792ms step_avg:80.60ms +[2025-07-08 01:52:32] [Rank 0] step:5221/10000 train_time:420792ms step_avg:80.60ms +[2025-07-08 01:52:34] [Rank 0] step:5241/10000 train_time:422269ms step_avg:80.57ms +[2025-07-08 01:52:34] [Rank 0] step:5241/10000 train_time:422269ms step_avg:80.57ms +[2025-07-08 01:52:35] [Rank 0] step:5261/10000 train_time:423762ms step_avg:80.55ms +[2025-07-08 01:52:35] [Rank 0] step:5261/10000 train_time:423762ms step_avg:80.55ms +[2025-07-08 01:52:37] [Rank 0] step:5281/10000 train_time:425258ms step_avg:80.53ms +[2025-07-08 01:52:37] [Rank 0] step:5281/10000 train_time:425258ms step_avg:80.53ms +[2025-07-08 01:52:38] [Rank 0] step:5301/10000 train_time:426754ms step_avg:80.50ms +[2025-07-08 01:52:38] [Rank 0] step:5301/10000 train_time:426754ms step_avg:80.50ms +[2025-07-08 01:52:41] [Rank 0] step:5321/10000 train_time:428910ms step_avg:80.61ms +[2025-07-08 01:52:41] [Rank 0] step:5321/10000 train_time:428910ms step_avg:80.61ms +[2025-07-08 01:52:42] [Rank 0] step:5341/10000 train_time:430404ms step_avg:80.58ms +[2025-07-08 01:52:42] [Rank 0] step:5341/10000 train_time:430404ms step_avg:80.58ms +[2025-07-08 01:52:44] [Rank 0] step:5361/10000 train_time:431899ms step_avg:80.56ms +[2025-07-08 01:52:44] [Rank 0] step:5361/10000 train_time:431899ms step_avg:80.56ms +[2025-07-08 01:52:45] [Rank 0] step:5381/10000 train_time:433399ms step_avg:80.54ms +[2025-07-08 01:52:45] [Rank 0] step:5381/10000 train_time:433399ms step_avg:80.54ms +[2025-07-08 01:52:47] [Rank 0] step:5401/10000 train_time:434894ms step_avg:80.52ms +[2025-07-08 01:52:47] [Rank 0] step:5401/10000 train_time:434894ms step_avg:80.52ms +[2025-07-08 01:52:49] [Rank 0] step:5421/10000 train_time:437054ms step_avg:80.62ms +[2025-07-08 01:52:49] [Rank 0] step:5421/10000 train_time:437054ms step_avg:80.62ms +[2025-07-08 01:52:50] [Rank 0] step:5441/10000 train_time:438550ms step_avg:80.60ms +[2025-07-08 01:52:50] [Rank 0] step:5441/10000 train_time:438550ms step_avg:80.60ms +[2025-07-08 01:52:52] [Rank 0] step:5461/10000 train_time:440048ms step_avg:80.58ms +[2025-07-08 01:52:52] [Rank 0] step:5461/10000 train_time:440048ms step_avg:80.58ms +[2025-07-08 01:52:53] [Rank 0] step:5481/10000 train_time:441545ms step_avg:80.56ms +[2025-07-08 01:52:53] [Rank 0] step:5481/10000 train_time:441545ms step_avg:80.56ms +[2025-07-08 01:52:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:52:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:52:56] [Rank 0] PRINT: step:5500/10000 train_loss:0.8629 val_loss:0.8631 train_time:443277ms step_avg:80.60ms +[2025-07-08 01:52:56] [Rank 0] PRINT: step:5500/10000 train_loss:0.8629 val_loss:0.8631 train_time:443277ms step_avg:80.60ms +[2025-07-08 01:52:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:52:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:52:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:52:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:52:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:52:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:58:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:58:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 01:58:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:58:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 01:58:21] [Rank 0] Total Loss: 5.3737 +[2025-07-08 01:58:21] [Rank 0] Total Loss: 5.3737 +[2025-07-08 01:58:21] [Rank 0] Total FTA: 0.9595 +[2025-07-08 01:58:21] [Rank 0] Total FTA: 0.9595 +[2025-07-08 01:58:21] [Rank 0] Group 0 Loss: 5.7089 +[2025-07-08 01:58:21] [Rank 0] Group 0 Loss: 5.7089 +[2025-07-08 01:58:21] [Rank 0] Group 1 Loss: 5.2997 +[2025-07-08 01:58:21] [Rank 0] Group 1 Loss: 5.2997 +[2025-07-08 01:58:21] [Rank 0] Group 2 Loss: 5.2603 +[2025-07-08 01:58:21] [Rank 0] Group 2 Loss: 5.2603 +[2025-07-08 01:58:21] [Rank 0] Group 3 Loss: 5.3111 +[2025-07-08 01:58:21] [Rank 0] Group 3 Loss: 5.3111 +[2025-07-08 01:58:21] [Rank 0] Group 4 Loss: 5.3453 +[2025-07-08 01:58:21] [Rank 0] Group 4 Loss: 5.3453 +[2025-07-08 01:58:21] [Rank 0] Group 5 Loss: 5.3159 +[2025-07-08 01:58:21] [Rank 0] Group 5 Loss: 5.3159 +[2025-07-08 01:58:21] [Rank 0] Group 6 Loss: 5.2837 +[2025-07-08 01:58:21] [Rank 0] Group 6 Loss: 5.2837 +[2025-07-08 01:58:21] [Rank 0] Group 7 Loss: 5.3219 +[2025-07-08 01:58:21] [Rank 0] Group 7 Loss: 5.3219 +[2025-07-08 01:58:21] [Rank 0] Group 8 Loss: 5.3416 +[2025-07-08 01:58:21] [Rank 0] Group 8 Loss: 5.3416 +[2025-07-08 01:58:21] [Rank 0] Group 9 Loss: 5.3106 +[2025-07-08 01:58:21] [Rank 0] Group 9 Loss: 5.3106 +[2025-07-08 01:58:21] [Rank 0] Group 10 Loss: 5.3219 +[2025-07-08 01:58:21] [Rank 0] Group 10 Loss: 5.3219 +[2025-07-08 01:58:21] [Rank 0] Group 11 Loss: 5.3550 +[2025-07-08 01:58:21] [Rank 0] Group 11 Loss: 5.3550 +[2025-07-08 01:58:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 01:58:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 01:58:21] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 01:58:21] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 01:58:21] [Rank 0] Group 2 FTA: 0.9245 +[2025-07-08 01:58:21] [Rank 0] Group 2 FTA: 0.9245 +[2025-07-08 01:58:21] [Rank 0] Group 3 FTA: 0.9688 +[2025-07-08 01:58:21] [Rank 0] Group 3 FTA: 0.9688 +[2025-07-08 01:58:21] [Rank 0] Group 4 FTA: 0.9479 +[2025-07-08 01:58:21] [Rank 0] Group 4 FTA: 0.9479 +[2025-07-08 01:58:21] [Rank 0] Group 5 FTA: 0.9714 +[2025-07-08 01:58:21] [Rank 0] Group 5 FTA: 0.9714 +[2025-07-08 01:58:21] [Rank 0] Group 6 FTA: 0.9479 +[2025-07-08 01:58:21] [Rank 0] Group 6 FTA: 0.9479 +[2025-07-08 01:58:21] [Rank 0] Group 7 FTA: 0.9453 +[2025-07-08 01:58:21] [Rank 0] Group 7 FTA: 0.9453 +[2025-07-08 01:58:21] [Rank 0] Group 8 FTA: 0.9531 +[2025-07-08 01:58:21] [Rank 0] Group 8 FTA: 0.9531 +[2025-07-08 01:58:21] [Rank 0] Group 9 FTA: 0.9609 +[2025-07-08 01:58:21] [Rank 0] Group 9 FTA: 0.9609 +[2025-07-08 01:58:21] [Rank 0] Group 10 FTA: 0.9355 +[2025-07-08 01:58:21] [Rank 0] Group 10 FTA: 0.9355 +[2025-07-08 01:58:21] [Rank 0] Group 11 FTA: 0.9473 +[2025-07-08 01:58:21] [Rank 0] Group 11 FTA: 0.9473 +[2025-07-08 01:58:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 01:58:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 01:58:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 01:58:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 01:58:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 01:58:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 01:58:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 01:58:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 01:58:23] [Rank 0] step:5501/10000 train_time:443297ms step_avg:80.58ms +[2025-07-08 01:58:23] [Rank 0] step:5501/10000 train_time:443297ms step_avg:80.58ms +[2025-07-08 01:58:24] [Rank 0] step:5521/10000 train_time:444802ms step_avg:80.57ms +[2025-07-08 01:58:24] [Rank 0] step:5521/10000 train_time:444802ms step_avg:80.57ms +[2025-07-08 01:58:26] [Rank 0] step:5541/10000 train_time:446290ms step_avg:80.54ms +[2025-07-08 01:58:26] [Rank 0] step:5541/10000 train_time:446290ms step_avg:80.54ms +[2025-07-08 01:58:27] [Rank 0] step:5561/10000 train_time:447781ms step_avg:80.52ms +[2025-07-08 01:58:27] [Rank 0] step:5561/10000 train_time:447781ms step_avg:80.52ms +[2025-07-08 01:58:29] [Rank 0] step:5581/10000 train_time:449958ms step_avg:80.62ms +[2025-07-08 01:58:29] [Rank 0] step:5581/10000 train_time:449958ms step_avg:80.62ms +[2025-07-08 01:58:31] [Rank 0] step:5601/10000 train_time:451429ms step_avg:80.60ms +[2025-07-08 01:58:31] [Rank 0] step:5601/10000 train_time:451429ms step_avg:80.60ms +[2025-07-08 01:58:32] [Rank 0] step:5621/10000 train_time:452918ms step_avg:80.58ms +[2025-07-08 01:58:32] [Rank 0] step:5621/10000 train_time:452918ms step_avg:80.58ms +[2025-07-08 01:58:34] [Rank 0] step:5641/10000 train_time:454410ms step_avg:80.55ms +[2025-07-08 01:58:34] [Rank 0] step:5641/10000 train_time:454410ms step_avg:80.55ms +[2025-07-08 01:58:35] [Rank 0] step:5661/10000 train_time:455902ms step_avg:80.53ms +[2025-07-08 01:58:35] [Rank 0] step:5661/10000 train_time:455902ms step_avg:80.53ms +[2025-07-08 01:58:37] [Rank 0] step:5681/10000 train_time:458042ms step_avg:80.63ms +[2025-07-08 01:58:37] [Rank 0] step:5681/10000 train_time:458042ms step_avg:80.63ms +[2025-07-08 01:58:39] [Rank 0] step:5701/10000 train_time:459533ms step_avg:80.61ms +[2025-07-08 01:58:39] [Rank 0] step:5701/10000 train_time:459533ms step_avg:80.61ms +[2025-07-08 01:58:40] [Rank 0] step:5721/10000 train_time:461025ms step_avg:80.58ms +[2025-07-08 01:58:40] [Rank 0] step:5721/10000 train_time:461025ms step_avg:80.58ms +[2025-07-08 01:58:42] [Rank 0] step:5741/10000 train_time:462680ms step_avg:80.59ms +[2025-07-08 01:58:42] [Rank 0] step:5741/10000 train_time:462680ms step_avg:80.59ms +[2025-07-08 01:58:44] [Rank 0] step:5761/10000 train_time:464838ms step_avg:80.69ms +[2025-07-08 01:58:44] [Rank 0] step:5761/10000 train_time:464838ms step_avg:80.69ms +[2025-07-08 01:58:46] [Rank 0] step:5781/10000 train_time:466314ms step_avg:80.66ms +[2025-07-08 01:58:46] [Rank 0] step:5781/10000 train_time:466314ms step_avg:80.66ms +[2025-07-08 01:58:47] [Rank 0] step:5801/10000 train_time:467807ms step_avg:80.64ms +[2025-07-08 01:58:47] [Rank 0] step:5801/10000 train_time:467807ms step_avg:80.64ms +[2025-07-08 01:58:49] [Rank 0] step:5821/10000 train_time:469302ms step_avg:80.62ms +[2025-07-08 01:58:49] [Rank 0] step:5821/10000 train_time:469302ms step_avg:80.62ms +[2025-07-08 01:58:50] [Rank 0] step:5841/10000 train_time:470796ms step_avg:80.60ms +[2025-07-08 01:58:50] [Rank 0] step:5841/10000 train_time:470796ms step_avg:80.60ms +[2025-07-08 01:58:52] [Rank 0] step:5861/10000 train_time:472956ms step_avg:80.70ms +[2025-07-08 01:58:52] [Rank 0] step:5861/10000 train_time:472956ms step_avg:80.70ms +[2025-07-08 01:58:54] [Rank 0] step:5881/10000 train_time:474448ms step_avg:80.67ms +[2025-07-08 01:58:54] [Rank 0] step:5881/10000 train_time:474448ms step_avg:80.67ms +[2025-07-08 01:58:55] [Rank 0] step:5901/10000 train_time:475943ms step_avg:80.65ms +[2025-07-08 01:58:55] [Rank 0] step:5901/10000 train_time:475943ms step_avg:80.65ms +[2025-07-08 01:58:57] [Rank 0] step:5921/10000 train_time:477439ms step_avg:80.63ms +[2025-07-08 01:58:57] [Rank 0] step:5921/10000 train_time:477439ms step_avg:80.63ms +[2025-07-08 01:58:59] [Rank 0] step:5941/10000 train_time:478935ms step_avg:80.62ms +[2025-07-08 01:58:59] [Rank 0] step:5941/10000 train_time:478935ms step_avg:80.62ms +[2025-07-08 01:59:00] [Rank 0] step:5961/10000 train_time:481098ms step_avg:80.71ms +[2025-07-08 01:59:00] [Rank 0] step:5961/10000 train_time:481098ms step_avg:80.71ms +[2025-07-08 01:59:02] [Rank 0] step:5981/10000 train_time:482592ms step_avg:80.69ms +[2025-07-08 01:59:02] [Rank 0] step:5981/10000 train_time:482592ms step_avg:80.69ms +[2025-07-08 01:59:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:59:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 01:59:04] [Rank 0] PRINT: step:6000/10000 train_loss:0.8617 val_loss:0.8622 train_time:484087ms step_avg:80.68ms +[2025-07-08 01:59:04] [Rank 0] PRINT: step:6000/10000 train_loss:0.8617 val_loss:0.8622 train_time:484087ms step_avg:80.68ms +[2025-07-08 01:59:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:59:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 01:59:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:59:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 01:59:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 01:59:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:04:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:04:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:04:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:04:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:04:28] [Rank 0] Total Loss: 5.4465 +[2025-07-08 02:04:28] [Rank 0] Total Loss: 5.4465 +[2025-07-08 02:04:28] [Rank 0] Total FTA: 0.9462 +[2025-07-08 02:04:28] [Rank 0] Total FTA: 0.9462 +[2025-07-08 02:04:28] [Rank 0] Group 0 Loss: 5.5856 +[2025-07-08 02:04:28] [Rank 0] Group 0 Loss: 5.5856 +[2025-07-08 02:04:28] [Rank 0] Group 1 Loss: 5.4739 +[2025-07-08 02:04:28] [Rank 0] Group 1 Loss: 5.4739 +[2025-07-08 02:04:28] [Rank 0] Group 2 Loss: 5.3310 +[2025-07-08 02:04:28] [Rank 0] Group 2 Loss: 5.3310 +[2025-07-08 02:04:28] [Rank 0] Group 3 Loss: 5.5116 +[2025-07-08 02:04:28] [Rank 0] Group 3 Loss: 5.5116 +[2025-07-08 02:04:28] [Rank 0] Group 4 Loss: 5.4179 +[2025-07-08 02:04:28] [Rank 0] Group 4 Loss: 5.4179 +[2025-07-08 02:04:28] [Rank 0] Group 5 Loss: 5.4217 +[2025-07-08 02:04:28] [Rank 0] Group 5 Loss: 5.4217 +[2025-07-08 02:04:28] [Rank 0] Group 6 Loss: 5.3307 +[2025-07-08 02:04:28] [Rank 0] Group 6 Loss: 5.3307 +[2025-07-08 02:04:28] [Rank 0] Group 7 Loss: 5.4466 +[2025-07-08 02:04:28] [Rank 0] Group 7 Loss: 5.4466 +[2025-07-08 02:04:28] [Rank 0] Group 8 Loss: 5.4918 +[2025-07-08 02:04:28] [Rank 0] Group 8 Loss: 5.4918 +[2025-07-08 02:04:28] [Rank 0] Group 9 Loss: 5.3714 +[2025-07-08 02:04:28] [Rank 0] Group 9 Loss: 5.3714 +[2025-07-08 02:04:28] [Rank 0] Group 10 Loss: 5.4019 +[2025-07-08 02:04:28] [Rank 0] Group 10 Loss: 5.4019 +[2025-07-08 02:04:28] [Rank 0] Group 11 Loss: 5.4380 +[2025-07-08 02:04:28] [Rank 0] Group 11 Loss: 5.4380 +[2025-07-08 02:04:28] [Rank 0] Group 0 FTA: 0.8349 +[2025-07-08 02:04:28] [Rank 0] Group 0 FTA: 0.8349 +[2025-07-08 02:04:28] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 02:04:28] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 02:04:28] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 02:04:28] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 02:04:28] [Rank 0] Group 3 FTA: 0.9688 +[2025-07-08 02:04:28] [Rank 0] Group 3 FTA: 0.9688 +[2025-07-08 02:04:28] [Rank 0] Group 4 FTA: 0.9505 +[2025-07-08 02:04:28] [Rank 0] Group 4 FTA: 0.9505 +[2025-07-08 02:04:28] [Rank 0] Group 5 FTA: 0.9531 +[2025-07-08 02:04:28] [Rank 0] Group 5 FTA: 0.9531 +[2025-07-08 02:04:28] [Rank 0] Group 6 FTA: 0.9661 +[2025-07-08 02:04:28] [Rank 0] Group 6 FTA: 0.9661 +[2025-07-08 02:04:28] [Rank 0] Group 7 FTA: 0.9635 +[2025-07-08 02:04:28] [Rank 0] Group 7 FTA: 0.9635 +[2025-07-08 02:04:28] [Rank 0] Group 8 FTA: 0.9557 +[2025-07-08 02:04:28] [Rank 0] Group 8 FTA: 0.9557 +[2025-07-08 02:04:28] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-08 02:04:28] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-08 02:04:28] [Rank 0] Group 10 FTA: 0.9531 +[2025-07-08 02:04:28] [Rank 0] Group 10 FTA: 0.9531 +[2025-07-08 02:04:28] [Rank 0] Group 11 FTA: 0.9570 +[2025-07-08 02:04:28] [Rank 0] Group 11 FTA: 0.9570 +[2025-07-08 02:04:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 02:04:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 02:04:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 02:04:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 02:04:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 02:04:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 02:04:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 02:04:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 02:04:30] [Rank 0] step:6001/10000 train_time:484107ms step_avg:80.67ms +[2025-07-08 02:04:30] [Rank 0] step:6001/10000 train_time:484107ms step_avg:80.67ms +[2025-07-08 02:04:31] [Rank 0] step:6021/10000 train_time:485610ms step_avg:80.65ms +[2025-07-08 02:04:31] [Rank 0] step:6021/10000 train_time:485610ms step_avg:80.65ms +[2025-07-08 02:04:33] [Rank 0] step:6041/10000 train_time:487746ms step_avg:80.74ms +[2025-07-08 02:04:33] [Rank 0] step:6041/10000 train_time:487746ms step_avg:80.74ms +[2025-07-08 02:04:35] [Rank 0] step:6061/10000 train_time:489236ms step_avg:80.72ms +[2025-07-08 02:04:35] [Rank 0] step:6061/10000 train_time:489236ms step_avg:80.72ms +[2025-07-08 02:04:36] [Rank 0] step:6081/10000 train_time:490726ms step_avg:80.70ms +[2025-07-08 02:04:36] [Rank 0] step:6081/10000 train_time:490726ms step_avg:80.70ms +[2025-07-08 02:04:38] [Rank 0] step:6101/10000 train_time:492216ms step_avg:80.68ms +[2025-07-08 02:04:38] [Rank 0] step:6101/10000 train_time:492216ms step_avg:80.68ms +[2025-07-08 02:04:40] [Rank 0] step:6121/10000 train_time:494384ms step_avg:80.77ms +[2025-07-08 02:04:40] [Rank 0] step:6121/10000 train_time:494384ms step_avg:80.77ms +[2025-07-08 02:04:41] [Rank 0] step:6141/10000 train_time:495858ms step_avg:80.75ms +[2025-07-08 02:04:41] [Rank 0] step:6141/10000 train_time:495858ms step_avg:80.75ms +[2025-07-08 02:04:43] [Rank 0] step:6161/10000 train_time:497348ms step_avg:80.73ms +[2025-07-08 02:04:43] [Rank 0] step:6161/10000 train_time:497348ms step_avg:80.73ms +[2025-07-08 02:04:44] [Rank 0] step:6181/10000 train_time:498840ms step_avg:80.71ms +[2025-07-08 02:04:44] [Rank 0] step:6181/10000 train_time:498840ms step_avg:80.71ms +[2025-07-08 02:04:46] [Rank 0] step:6201/10000 train_time:500332ms step_avg:80.69ms +[2025-07-08 02:04:46] [Rank 0] step:6201/10000 train_time:500332ms step_avg:80.69ms +[2025-07-08 02:04:48] [Rank 0] step:6221/10000 train_time:502468ms step_avg:80.77ms +[2025-07-08 02:04:48] [Rank 0] step:6221/10000 train_time:502468ms step_avg:80.77ms +[2025-07-08 02:04:49] [Rank 0] step:6241/10000 train_time:503961ms step_avg:80.75ms +[2025-07-08 02:04:49] [Rank 0] step:6241/10000 train_time:503961ms step_avg:80.75ms +[2025-07-08 02:04:51] [Rank 0] step:6261/10000 train_time:505453ms step_avg:80.73ms +[2025-07-08 02:04:51] [Rank 0] step:6261/10000 train_time:505453ms step_avg:80.73ms +[2025-07-08 02:04:52] [Rank 0] step:6281/10000 train_time:506948ms step_avg:80.71ms +[2025-07-08 02:04:52] [Rank 0] step:6281/10000 train_time:506948ms step_avg:80.71ms +[2025-07-08 02:04:54] [Rank 0] step:6301/10000 train_time:508698ms step_avg:80.73ms +[2025-07-08 02:04:54] [Rank 0] step:6301/10000 train_time:508698ms step_avg:80.73ms +[2025-07-08 02:04:56] [Rank 0] step:6321/10000 train_time:510174ms step_avg:80.71ms +[2025-07-08 02:04:56] [Rank 0] step:6321/10000 train_time:510174ms step_avg:80.71ms +[2025-07-08 02:04:57] [Rank 0] step:6341/10000 train_time:511669ms step_avg:80.69ms +[2025-07-08 02:04:57] [Rank 0] step:6341/10000 train_time:511669ms step_avg:80.69ms +[2025-07-08 02:04:59] [Rank 0] step:6361/10000 train_time:513164ms step_avg:80.67ms +[2025-07-08 02:04:59] [Rank 0] step:6361/10000 train_time:513164ms step_avg:80.67ms +[2025-07-08 02:05:00] [Rank 0] step:6381/10000 train_time:514659ms step_avg:80.65ms +[2025-07-08 02:05:00] [Rank 0] step:6381/10000 train_time:514659ms step_avg:80.65ms +[2025-07-08 02:05:02] [Rank 0] step:6401/10000 train_time:516806ms step_avg:80.74ms +[2025-07-08 02:05:02] [Rank 0] step:6401/10000 train_time:516806ms step_avg:80.74ms +[2025-07-08 02:05:04] [Rank 0] step:6421/10000 train_time:518302ms step_avg:80.72ms +[2025-07-08 02:05:04] [Rank 0] step:6421/10000 train_time:518302ms step_avg:80.72ms +[2025-07-08 02:05:05] [Rank 0] step:6441/10000 train_time:519798ms step_avg:80.70ms +[2025-07-08 02:05:05] [Rank 0] step:6441/10000 train_time:519798ms step_avg:80.70ms +[2025-07-08 02:05:07] [Rank 0] step:6461/10000 train_time:521294ms step_avg:80.68ms +[2025-07-08 02:05:07] [Rank 0] step:6461/10000 train_time:521294ms step_avg:80.68ms +[2025-07-08 02:05:09] [Rank 0] step:6481/10000 train_time:523472ms step_avg:80.77ms +[2025-07-08 02:05:09] [Rank 0] step:6481/10000 train_time:523472ms step_avg:80.77ms +[2025-07-08 02:05:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:05:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:05:11] [Rank 0] PRINT: step:6500/10000 train_loss:0.8608 val_loss:0.8607 train_time:524947ms step_avg:80.76ms +[2025-07-08 02:05:11] [Rank 0] PRINT: step:6500/10000 train_loss:0.8608 val_loss:0.8607 train_time:524947ms step_avg:80.76ms +[2025-07-08 02:05:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:05:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:05:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:05:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:05:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:05:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:10:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:10:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:10:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:10:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:10:39] [Rank 0] Total Loss: 5.4131 +[2025-07-08 02:10:39] [Rank 0] Total Loss: 5.4131 +[2025-07-08 02:10:39] [Rank 0] Total FTA: 0.9592 +[2025-07-08 02:10:39] [Rank 0] Total FTA: 0.9592 +[2025-07-08 02:10:39] [Rank 0] Group 0 Loss: 5.5941 +[2025-07-08 02:10:39] [Rank 0] Group 0 Loss: 5.5941 +[2025-07-08 02:10:39] [Rank 0] Group 1 Loss: 5.3877 +[2025-07-08 02:10:39] [Rank 0] Group 1 Loss: 5.3877 +[2025-07-08 02:10:39] [Rank 0] Group 2 Loss: 5.1708 +[2025-07-08 02:10:39] [Rank 0] Group 2 Loss: 5.1708 +[2025-07-08 02:10:39] [Rank 0] Group 3 Loss: 5.5418 +[2025-07-08 02:10:39] [Rank 0] Group 3 Loss: 5.5418 +[2025-07-08 02:10:39] [Rank 0] Group 4 Loss: 5.4066 +[2025-07-08 02:10:39] [Rank 0] Group 4 Loss: 5.4066 +[2025-07-08 02:10:39] [Rank 0] Group 5 Loss: 5.4281 +[2025-07-08 02:10:39] [Rank 0] Group 5 Loss: 5.4281 +[2025-07-08 02:10:39] [Rank 0] Group 6 Loss: 5.2830 +[2025-07-08 02:10:39] [Rank 0] Group 6 Loss: 5.2830 +[2025-07-08 02:10:39] [Rank 0] Group 7 Loss: 5.4320 +[2025-07-08 02:10:39] [Rank 0] Group 7 Loss: 5.4320 +[2025-07-08 02:10:39] [Rank 0] Group 8 Loss: 5.3947 +[2025-07-08 02:10:39] [Rank 0] Group 8 Loss: 5.3947 +[2025-07-08 02:10:39] [Rank 0] Group 9 Loss: 5.3131 +[2025-07-08 02:10:39] [Rank 0] Group 9 Loss: 5.3131 +[2025-07-08 02:10:39] [Rank 0] Group 10 Loss: 5.3952 +[2025-07-08 02:10:39] [Rank 0] Group 10 Loss: 5.3952 +[2025-07-08 02:10:39] [Rank 0] Group 11 Loss: 5.4086 +[2025-07-08 02:10:39] [Rank 0] Group 11 Loss: 5.4086 +[2025-07-08 02:10:39] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 02:10:39] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 02:10:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 02:10:39] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 02:10:39] [Rank 0] Group 2 FTA: 0.8385 +[2025-07-08 02:10:39] [Rank 0] Group 2 FTA: 0.8385 +[2025-07-08 02:10:39] [Rank 0] Group 3 FTA: 0.9297 +[2025-07-08 02:10:39] [Rank 0] Group 3 FTA: 0.9297 +[2025-07-08 02:10:39] [Rank 0] Group 4 FTA: 0.9635 +[2025-07-08 02:10:39] [Rank 0] Group 4 FTA: 0.9635 +[2025-07-08 02:10:39] [Rank 0] Group 5 FTA: 0.9271 +[2025-07-08 02:10:39] [Rank 0] Group 5 FTA: 0.9271 +[2025-07-08 02:10:39] [Rank 0] Group 6 FTA: 0.9635 +[2025-07-08 02:10:39] [Rank 0] Group 6 FTA: 0.9635 +[2025-07-08 02:10:39] [Rank 0] Group 7 FTA: 0.9714 +[2025-07-08 02:10:39] [Rank 0] Group 7 FTA: 0.9714 +[2025-07-08 02:10:39] [Rank 0] Group 8 FTA: 0.9635 +[2025-07-08 02:10:39] [Rank 0] Group 8 FTA: 0.9635 +[2025-07-08 02:10:39] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-08 02:10:39] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-08 02:10:39] [Rank 0] Group 10 FTA: 0.9668 +[2025-07-08 02:10:39] [Rank 0] Group 10 FTA: 0.9668 +[2025-07-08 02:10:39] [Rank 0] Group 11 FTA: 0.9639 +[2025-07-08 02:10:39] [Rank 0] Group 11 FTA: 0.9639 +[2025-07-08 02:10:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 02:10:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 02:10:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 02:10:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 02:10:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 02:10:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 02:10:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 02:10:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 02:10:40] [Rank 0] step:6501/10000 train_time:524967ms step_avg:80.75ms +[2025-07-08 02:10:40] [Rank 0] step:6501/10000 train_time:524967ms step_avg:80.75ms +[2025-07-08 02:10:42] [Rank 0] step:6521/10000 train_time:526475ms step_avg:80.74ms +[2025-07-08 02:10:42] [Rank 0] step:6521/10000 train_time:526475ms step_avg:80.74ms +[2025-07-08 02:10:43] [Rank 0] step:6541/10000 train_time:527964ms step_avg:80.72ms +[2025-07-08 02:10:43] [Rank 0] step:6541/10000 train_time:527964ms step_avg:80.72ms +[2025-07-08 02:10:45] [Rank 0] step:6561/10000 train_time:529454ms step_avg:80.70ms +[2025-07-08 02:10:45] [Rank 0] step:6561/10000 train_time:529454ms step_avg:80.70ms +[2025-07-08 02:10:47] [Rank 0] step:6581/10000 train_time:531602ms step_avg:80.78ms +[2025-07-08 02:10:47] [Rank 0] step:6581/10000 train_time:531602ms step_avg:80.78ms +[2025-07-08 02:10:49] [Rank 0] step:6601/10000 train_time:533092ms step_avg:80.76ms +[2025-07-08 02:10:49] [Rank 0] step:6601/10000 train_time:533092ms step_avg:80.76ms +[2025-07-08 02:10:50] [Rank 0] step:6621/10000 train_time:534582ms step_avg:80.74ms +[2025-07-08 02:10:50] [Rank 0] step:6621/10000 train_time:534582ms step_avg:80.74ms +[2025-07-08 02:10:52] [Rank 0] step:6641/10000 train_time:536074ms step_avg:80.72ms +[2025-07-08 02:10:52] [Rank 0] step:6641/10000 train_time:536074ms step_avg:80.72ms +[2025-07-08 02:10:54] [Rank 0] step:6661/10000 train_time:537565ms step_avg:80.70ms +[2025-07-08 02:10:54] [Rank 0] step:6661/10000 train_time:537565ms step_avg:80.70ms +[2025-07-08 02:10:55] [Rank 0] step:6681/10000 train_time:539711ms step_avg:80.78ms +[2025-07-08 02:10:55] [Rank 0] step:6681/10000 train_time:539711ms step_avg:80.78ms +[2025-07-08 02:10:57] [Rank 0] step:6701/10000 train_time:541203ms step_avg:80.76ms +[2025-07-08 02:10:57] [Rank 0] step:6701/10000 train_time:541203ms step_avg:80.76ms +[2025-07-08 02:10:58] [Rank 0] step:6721/10000 train_time:542699ms step_avg:80.75ms +[2025-07-08 02:10:58] [Rank 0] step:6721/10000 train_time:542699ms step_avg:80.75ms +[2025-07-08 02:11:00] [Rank 0] step:6741/10000 train_time:544194ms step_avg:80.73ms +[2025-07-08 02:11:00] [Rank 0] step:6741/10000 train_time:544194ms step_avg:80.73ms +[2025-07-08 02:11:02] [Rank 0] step:6761/10000 train_time:546356ms step_avg:80.81ms +[2025-07-08 02:11:02] [Rank 0] step:6761/10000 train_time:546356ms step_avg:80.81ms +[2025-07-08 02:11:03] [Rank 0] step:6781/10000 train_time:547849ms step_avg:80.79ms +[2025-07-08 02:11:03] [Rank 0] step:6781/10000 train_time:547849ms step_avg:80.79ms +[2025-07-08 02:11:05] [Rank 0] step:6801/10000 train_time:549342ms step_avg:80.77ms +[2025-07-08 02:11:05] [Rank 0] step:6801/10000 train_time:549342ms step_avg:80.77ms +[2025-07-08 02:11:06] [Rank 0] step:6821/10000 train_time:550837ms step_avg:80.76ms +[2025-07-08 02:11:06] [Rank 0] step:6821/10000 train_time:550837ms step_avg:80.76ms +[2025-07-08 02:11:08] [Rank 0] step:6841/10000 train_time:552333ms step_avg:80.74ms +[2025-07-08 02:11:08] [Rank 0] step:6841/10000 train_time:552333ms step_avg:80.74ms +[2025-07-08 02:11:10] [Rank 0] step:6861/10000 train_time:554500ms step_avg:80.82ms +[2025-07-08 02:11:10] [Rank 0] step:6861/10000 train_time:554500ms step_avg:80.82ms +[2025-07-08 02:11:11] [Rank 0] step:6881/10000 train_time:555994ms step_avg:80.80ms +[2025-07-08 02:11:11] [Rank 0] step:6881/10000 train_time:555994ms step_avg:80.80ms +[2025-07-08 02:11:13] [Rank 0] step:6901/10000 train_time:557490ms step_avg:80.78ms +[2025-07-08 02:11:13] [Rank 0] step:6901/10000 train_time:557490ms step_avg:80.78ms +[2025-07-08 02:11:14] [Rank 0] step:6921/10000 train_time:558984ms step_avg:80.77ms +[2025-07-08 02:11:14] [Rank 0] step:6921/10000 train_time:558984ms step_avg:80.77ms +[2025-07-08 02:11:17] [Rank 0] step:6941/10000 train_time:561141ms step_avg:80.84ms +[2025-07-08 02:11:17] [Rank 0] step:6941/10000 train_time:561141ms step_avg:80.84ms +[2025-07-08 02:11:18] [Rank 0] step:6961/10000 train_time:562635ms step_avg:80.83ms +[2025-07-08 02:11:18] [Rank 0] step:6961/10000 train_time:562635ms step_avg:80.83ms +[2025-07-08 02:11:20] [Rank 0] step:6981/10000 train_time:564130ms step_avg:80.81ms +[2025-07-08 02:11:20] [Rank 0] step:6981/10000 train_time:564130ms step_avg:80.81ms +[2025-07-08 02:11:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:11:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:11:22] [Rank 0] PRINT: step:7000/10000 train_loss:0.8595 val_loss:0.8601 train_time:565788ms step_avg:80.83ms +[2025-07-08 02:11:22] [Rank 0] PRINT: step:7000/10000 train_loss:0.8595 val_loss:0.8601 train_time:565788ms step_avg:80.83ms +[2025-07-08 02:11:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:11:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:11:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:11:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:11:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:11:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:16:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:16:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:16:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:16:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:16:47] [Rank 0] Total Loss: 5.4400 +[2025-07-08 02:16:47] [Rank 0] Total Loss: 5.4400 +[2025-07-08 02:16:47] [Rank 0] Total FTA: 0.9743 +[2025-07-08 02:16:47] [Rank 0] Total FTA: 0.9743 +[2025-07-08 02:16:47] [Rank 0] Group 0 Loss: 5.6537 +[2025-07-08 02:16:47] [Rank 0] Group 0 Loss: 5.6537 +[2025-07-08 02:16:47] [Rank 0] Group 1 Loss: 5.3768 +[2025-07-08 02:16:47] [Rank 0] Group 1 Loss: 5.3768 +[2025-07-08 02:16:47] [Rank 0] Group 2 Loss: 5.2533 +[2025-07-08 02:16:47] [Rank 0] Group 2 Loss: 5.2533 +[2025-07-08 02:16:47] [Rank 0] Group 3 Loss: 5.4767 +[2025-07-08 02:16:47] [Rank 0] Group 3 Loss: 5.4767 +[2025-07-08 02:16:47] [Rank 0] Group 4 Loss: 5.3744 +[2025-07-08 02:16:47] [Rank 0] Group 4 Loss: 5.3744 +[2025-07-08 02:16:47] [Rank 0] Group 5 Loss: 5.4728 +[2025-07-08 02:16:47] [Rank 0] Group 5 Loss: 5.4728 +[2025-07-08 02:16:47] [Rank 0] Group 6 Loss: 5.3594 +[2025-07-08 02:16:47] [Rank 0] Group 6 Loss: 5.3594 +[2025-07-08 02:16:47] [Rank 0] Group 7 Loss: 5.4370 +[2025-07-08 02:16:47] [Rank 0] Group 7 Loss: 5.4370 +[2025-07-08 02:16:47] [Rank 0] Group 8 Loss: 5.3768 +[2025-07-08 02:16:47] [Rank 0] Group 8 Loss: 5.3768 +[2025-07-08 02:16:47] [Rank 0] Group 9 Loss: 5.4288 +[2025-07-08 02:16:47] [Rank 0] Group 9 Loss: 5.4288 +[2025-07-08 02:16:47] [Rank 0] Group 10 Loss: 5.4448 +[2025-07-08 02:16:47] [Rank 0] Group 10 Loss: 5.4448 +[2025-07-08 02:16:47] [Rank 0] Group 11 Loss: 5.4274 +[2025-07-08 02:16:47] [Rank 0] Group 11 Loss: 5.4274 +[2025-07-08 02:16:47] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 02:16:47] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 02:16:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 02:16:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 02:16:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 02:16:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 02:16:47] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 02:16:47] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 02:16:47] [Rank 0] Group 4 FTA: 0.9557 +[2025-07-08 02:16:47] [Rank 0] Group 4 FTA: 0.9557 +[2025-07-08 02:16:47] [Rank 0] Group 5 FTA: 0.9870 +[2025-07-08 02:16:47] [Rank 0] Group 5 FTA: 0.9870 +[2025-07-08 02:16:47] [Rank 0] Group 6 FTA: 0.9531 +[2025-07-08 02:16:47] [Rank 0] Group 6 FTA: 0.9531 +[2025-07-08 02:16:47] [Rank 0] Group 7 FTA: 0.9714 +[2025-07-08 02:16:47] [Rank 0] Group 7 FTA: 0.9714 +[2025-07-08 02:16:47] [Rank 0] Group 8 FTA: 0.9453 +[2025-07-08 02:16:47] [Rank 0] Group 8 FTA: 0.9453 +[2025-07-08 02:16:47] [Rank 0] Group 9 FTA: 0.9688 +[2025-07-08 02:16:47] [Rank 0] Group 9 FTA: 0.9688 +[2025-07-08 02:16:47] [Rank 0] Group 10 FTA: 0.9590 +[2025-07-08 02:16:47] [Rank 0] Group 10 FTA: 0.9590 +[2025-07-08 02:16:47] [Rank 0] Group 11 FTA: 0.9570 +[2025-07-08 02:16:47] [Rank 0] Group 11 FTA: 0.9570 +[2025-07-08 02:16:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 02:16:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 02:16:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 02:16:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 02:16:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 02:16:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 02:16:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 02:16:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 02:16:49] [Rank 0] step:7001/10000 train_time:565808ms step_avg:80.82ms +[2025-07-08 02:16:49] [Rank 0] step:7001/10000 train_time:565808ms step_avg:80.82ms +[2025-07-08 02:16:51] [Rank 0] step:7021/10000 train_time:567296ms step_avg:80.80ms +[2025-07-08 02:16:51] [Rank 0] step:7021/10000 train_time:567296ms step_avg:80.80ms +[2025-07-08 02:16:53] [Rank 0] step:7041/10000 train_time:569442ms step_avg:80.88ms +[2025-07-08 02:16:53] [Rank 0] step:7041/10000 train_time:569442ms step_avg:80.88ms +[2025-07-08 02:16:54] [Rank 0] step:7061/10000 train_time:570930ms step_avg:80.86ms +[2025-07-08 02:16:54] [Rank 0] step:7061/10000 train_time:570930ms step_avg:80.86ms +[2025-07-08 02:16:56] [Rank 0] step:7081/10000 train_time:572422ms step_avg:80.84ms +[2025-07-08 02:16:56] [Rank 0] step:7081/10000 train_time:572422ms step_avg:80.84ms +[2025-07-08 02:16:57] [Rank 0] step:7101/10000 train_time:573913ms step_avg:80.82ms +[2025-07-08 02:16:57] [Rank 0] step:7101/10000 train_time:573913ms step_avg:80.82ms +[2025-07-08 02:16:59] [Rank 0] step:7121/10000 train_time:576056ms step_avg:80.90ms +[2025-07-08 02:16:59] [Rank 0] step:7121/10000 train_time:576056ms step_avg:80.90ms +[2025-07-08 02:17:01] [Rank 0] step:7141/10000 train_time:577548ms step_avg:80.88ms +[2025-07-08 02:17:01] [Rank 0] step:7141/10000 train_time:577548ms step_avg:80.88ms +[2025-07-08 02:17:02] [Rank 0] step:7161/10000 train_time:579041ms step_avg:80.86ms +[2025-07-08 02:17:02] [Rank 0] step:7161/10000 train_time:579041ms step_avg:80.86ms +[2025-07-08 02:17:04] [Rank 0] step:7181/10000 train_time:580534ms step_avg:80.84ms +[2025-07-08 02:17:04] [Rank 0] step:7181/10000 train_time:580534ms step_avg:80.84ms +[2025-07-08 02:17:06] [Rank 0] step:7201/10000 train_time:582691ms step_avg:80.92ms +[2025-07-08 02:17:06] [Rank 0] step:7201/10000 train_time:582691ms step_avg:80.92ms +[2025-07-08 02:17:07] [Rank 0] step:7221/10000 train_time:584167ms step_avg:80.90ms +[2025-07-08 02:17:07] [Rank 0] step:7221/10000 train_time:584167ms step_avg:80.90ms +[2025-07-08 02:17:09] [Rank 0] step:7241/10000 train_time:585660ms step_avg:80.88ms +[2025-07-08 02:17:09] [Rank 0] step:7241/10000 train_time:585660ms step_avg:80.88ms +[2025-07-08 02:17:10] [Rank 0] step:7261/10000 train_time:587156ms step_avg:80.86ms +[2025-07-08 02:17:10] [Rank 0] step:7261/10000 train_time:587156ms step_avg:80.86ms +[2025-07-08 02:17:12] [Rank 0] step:7281/10000 train_time:588652ms step_avg:80.85ms +[2025-07-08 02:17:12] [Rank 0] step:7281/10000 train_time:588652ms step_avg:80.85ms +[2025-07-08 02:17:14] [Rank 0] step:7301/10000 train_time:590807ms step_avg:80.92ms +[2025-07-08 02:17:14] [Rank 0] step:7301/10000 train_time:590807ms step_avg:80.92ms +[2025-07-08 02:17:15] [Rank 0] step:7321/10000 train_time:592303ms step_avg:80.90ms +[2025-07-08 02:17:15] [Rank 0] step:7321/10000 train_time:592303ms step_avg:80.90ms +[2025-07-08 02:17:17] [Rank 0] step:7341/10000 train_time:593797ms step_avg:80.89ms +[2025-07-08 02:17:17] [Rank 0] step:7341/10000 train_time:593797ms step_avg:80.89ms +[2025-07-08 02:17:18] [Rank 0] step:7361/10000 train_time:595293ms step_avg:80.87ms +[2025-07-08 02:17:18] [Rank 0] step:7361/10000 train_time:595293ms step_avg:80.87ms +[2025-07-08 02:17:21] [Rank 0] step:7381/10000 train_time:597474ms step_avg:80.95ms +[2025-07-08 02:17:21] [Rank 0] step:7381/10000 train_time:597474ms step_avg:80.95ms +[2025-07-08 02:17:22] [Rank 0] step:7401/10000 train_time:598952ms step_avg:80.93ms +[2025-07-08 02:17:22] [Rank 0] step:7401/10000 train_time:598952ms step_avg:80.93ms +[2025-07-08 02:17:24] [Rank 0] step:7421/10000 train_time:600448ms step_avg:80.91ms +[2025-07-08 02:17:24] [Rank 0] step:7421/10000 train_time:600448ms step_avg:80.91ms +[2025-07-08 02:17:25] [Rank 0] step:7441/10000 train_time:601945ms step_avg:80.90ms +[2025-07-08 02:17:25] [Rank 0] step:7441/10000 train_time:601945ms step_avg:80.90ms +[2025-07-08 02:17:27] [Rank 0] step:7461/10000 train_time:603439ms step_avg:80.88ms +[2025-07-08 02:17:27] [Rank 0] step:7461/10000 train_time:603439ms step_avg:80.88ms +[2025-07-08 02:17:28] [Rank 0] step:7481/10000 train_time:605175ms step_avg:80.89ms +[2025-07-08 02:17:28] [Rank 0] step:7481/10000 train_time:605175ms step_avg:80.89ms +[2025-07-08 02:17:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:17:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:17:31] [Rank 0] PRINT: step:7500/10000 train_loss:0.8587 val_loss:0.8597 train_time:606672ms step_avg:80.89ms +[2025-07-08 02:17:31] [Rank 0] PRINT: step:7500/10000 train_loss:0.8587 val_loss:0.8597 train_time:606672ms step_avg:80.89ms +[2025-07-08 02:17:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:17:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:17:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:17:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:17:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:17:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:22:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:22:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:22:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:22:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:22:57] [Rank 0] Total Loss: 5.5058 +[2025-07-08 02:22:57] [Rank 0] Total Loss: 5.5058 +[2025-07-08 02:22:57] [Rank 0] Total FTA: 0.9743 +[2025-07-08 02:22:57] [Rank 0] Total FTA: 0.9743 +[2025-07-08 02:22:57] [Rank 0] Group 0 Loss: 5.6729 +[2025-07-08 02:22:57] [Rank 0] Group 0 Loss: 5.6729 +[2025-07-08 02:22:57] [Rank 0] Group 1 Loss: 5.3851 +[2025-07-08 02:22:57] [Rank 0] Group 1 Loss: 5.3851 +[2025-07-08 02:22:57] [Rank 0] Group 2 Loss: 5.2542 +[2025-07-08 02:22:57] [Rank 0] Group 2 Loss: 5.2542 +[2025-07-08 02:22:57] [Rank 0] Group 3 Loss: 5.7094 +[2025-07-08 02:22:57] [Rank 0] Group 3 Loss: 5.7094 +[2025-07-08 02:22:57] [Rank 0] Group 4 Loss: 5.4482 +[2025-07-08 02:22:57] [Rank 0] Group 4 Loss: 5.4482 +[2025-07-08 02:22:57] [Rank 0] Group 5 Loss: 5.5537 +[2025-07-08 02:22:57] [Rank 0] Group 5 Loss: 5.5537 +[2025-07-08 02:22:57] [Rank 0] Group 6 Loss: 5.4522 +[2025-07-08 02:22:57] [Rank 0] Group 6 Loss: 5.4522 +[2025-07-08 02:22:57] [Rank 0] Group 7 Loss: 5.4524 +[2025-07-08 02:22:57] [Rank 0] Group 7 Loss: 5.4524 +[2025-07-08 02:22:57] [Rank 0] Group 8 Loss: 5.4770 +[2025-07-08 02:22:57] [Rank 0] Group 8 Loss: 5.4770 +[2025-07-08 02:22:57] [Rank 0] Group 9 Loss: 5.4863 +[2025-07-08 02:22:57] [Rank 0] Group 9 Loss: 5.4863 +[2025-07-08 02:22:57] [Rank 0] Group 10 Loss: 5.5075 +[2025-07-08 02:22:57] [Rank 0] Group 10 Loss: 5.5075 +[2025-07-08 02:22:57] [Rank 0] Group 11 Loss: 5.5022 +[2025-07-08 02:22:57] [Rank 0] Group 11 Loss: 5.5022 +[2025-07-08 02:22:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 02:22:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 02:22:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 02:22:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 02:22:57] [Rank 0] Group 2 FTA: 0.9219 +[2025-07-08 02:22:57] [Rank 0] Group 2 FTA: 0.9219 +[2025-07-08 02:22:57] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 02:22:57] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 02:22:57] [Rank 0] Group 4 FTA: 0.9714 +[2025-07-08 02:22:57] [Rank 0] Group 4 FTA: 0.9714 +[2025-07-08 02:22:57] [Rank 0] Group 5 FTA: 0.9792 +[2025-07-08 02:22:57] [Rank 0] Group 5 FTA: 0.9792 +[2025-07-08 02:22:57] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-08 02:22:57] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-08 02:22:57] [Rank 0] Group 7 FTA: 0.9766 +[2025-07-08 02:22:57] [Rank 0] Group 7 FTA: 0.9766 +[2025-07-08 02:22:57] [Rank 0] Group 8 FTA: 0.9609 +[2025-07-08 02:22:57] [Rank 0] Group 8 FTA: 0.9609 +[2025-07-08 02:22:57] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-08 02:22:57] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-08 02:22:57] [Rank 0] Group 10 FTA: 0.9551 +[2025-07-08 02:22:57] [Rank 0] Group 10 FTA: 0.9551 +[2025-07-08 02:22:57] [Rank 0] Group 11 FTA: 0.9658 +[2025-07-08 02:22:57] [Rank 0] Group 11 FTA: 0.9658 +[2025-07-08 02:22:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 02:22:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 02:22:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 02:22:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 02:22:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 02:22:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 02:22:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 02:22:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 02:22:58] [Rank 0] step:7501/10000 train_time:606691ms step_avg:80.88ms +[2025-07-08 02:22:58] [Rank 0] step:7501/10000 train_time:606691ms step_avg:80.88ms +[2025-07-08 02:23:00] [Rank 0] step:7521/10000 train_time:608200ms step_avg:80.87ms +[2025-07-08 02:23:00] [Rank 0] step:7521/10000 train_time:608200ms step_avg:80.87ms +[2025-07-08 02:23:01] [Rank 0] step:7541/10000 train_time:609691ms step_avg:80.85ms +[2025-07-08 02:23:01] [Rank 0] step:7541/10000 train_time:609691ms step_avg:80.85ms +[2025-07-08 02:23:04] [Rank 0] step:7561/10000 train_time:611440ms step_avg:80.87ms +[2025-07-08 02:23:04] [Rank 0] step:7561/10000 train_time:611440ms step_avg:80.87ms +[2025-07-08 02:23:05] [Rank 0] step:7581/10000 train_time:613346ms step_avg:80.91ms +[2025-07-08 02:23:05] [Rank 0] step:7581/10000 train_time:613346ms step_avg:80.91ms +[2025-07-08 02:23:07] [Rank 0] step:7601/10000 train_time:614836ms step_avg:80.89ms +[2025-07-08 02:23:07] [Rank 0] step:7601/10000 train_time:614836ms step_avg:80.89ms +[2025-07-08 02:23:08] [Rank 0] step:7621/10000 train_time:616326ms step_avg:80.87ms +[2025-07-08 02:23:08] [Rank 0] step:7621/10000 train_time:616326ms step_avg:80.87ms +[2025-07-08 02:23:10] [Rank 0] step:7641/10000 train_time:617817ms step_avg:80.86ms +[2025-07-08 02:23:10] [Rank 0] step:7641/10000 train_time:617817ms step_avg:80.86ms +[2025-07-08 02:23:12] [Rank 0] step:7661/10000 train_time:619957ms step_avg:80.92ms +[2025-07-08 02:23:12] [Rank 0] step:7661/10000 train_time:619957ms step_avg:80.92ms +[2025-07-08 02:23:13] [Rank 0] step:7681/10000 train_time:621448ms step_avg:80.91ms +[2025-07-08 02:23:13] [Rank 0] step:7681/10000 train_time:621448ms step_avg:80.91ms +[2025-07-08 02:23:15] [Rank 0] step:7701/10000 train_time:622940ms step_avg:80.89ms +[2025-07-08 02:23:15] [Rank 0] step:7701/10000 train_time:622940ms step_avg:80.89ms +[2025-07-08 02:23:16] [Rank 0] step:7721/10000 train_time:624433ms step_avg:80.87ms +[2025-07-08 02:23:16] [Rank 0] step:7721/10000 train_time:624433ms step_avg:80.87ms +[2025-07-08 02:23:18] [Rank 0] step:7741/10000 train_time:626593ms step_avg:80.94ms +[2025-07-08 02:23:18] [Rank 0] step:7741/10000 train_time:626593ms step_avg:80.94ms +[2025-07-08 02:23:20] [Rank 0] step:7761/10000 train_time:628069ms step_avg:80.93ms +[2025-07-08 02:23:20] [Rank 0] step:7761/10000 train_time:628069ms step_avg:80.93ms +[2025-07-08 02:23:21] [Rank 0] step:7781/10000 train_time:629564ms step_avg:80.91ms +[2025-07-08 02:23:21] [Rank 0] step:7781/10000 train_time:629564ms step_avg:80.91ms +[2025-07-08 02:23:23] [Rank 0] step:7801/10000 train_time:631059ms step_avg:80.89ms +[2025-07-08 02:23:23] [Rank 0] step:7801/10000 train_time:631059ms step_avg:80.89ms +[2025-07-08 02:23:24] [Rank 0] step:7821/10000 train_time:632553ms step_avg:80.88ms +[2025-07-08 02:23:24] [Rank 0] step:7821/10000 train_time:632553ms step_avg:80.88ms +[2025-07-08 02:23:27] [Rank 0] step:7841/10000 train_time:634709ms step_avg:80.95ms +[2025-07-08 02:23:27] [Rank 0] step:7841/10000 train_time:634709ms step_avg:80.95ms +[2025-07-08 02:23:28] [Rank 0] step:7861/10000 train_time:636203ms step_avg:80.93ms +[2025-07-08 02:23:28] [Rank 0] step:7861/10000 train_time:636203ms step_avg:80.93ms +[2025-07-08 02:23:30] [Rank 0] step:7881/10000 train_time:637698ms step_avg:80.92ms +[2025-07-08 02:23:30] [Rank 0] step:7881/10000 train_time:637698ms step_avg:80.92ms +[2025-07-08 02:23:31] [Rank 0] step:7901/10000 train_time:639195ms step_avg:80.90ms +[2025-07-08 02:23:31] [Rank 0] step:7901/10000 train_time:639195ms step_avg:80.90ms +[2025-07-08 02:23:33] [Rank 0] step:7921/10000 train_time:640843ms step_avg:80.90ms +[2025-07-08 02:23:33] [Rank 0] step:7921/10000 train_time:640843ms step_avg:80.90ms +[2025-07-08 02:23:34] [Rank 0] step:7941/10000 train_time:642425ms step_avg:80.90ms +[2025-07-08 02:23:34] [Rank 0] step:7941/10000 train_time:642425ms step_avg:80.90ms +[2025-07-08 02:23:36] [Rank 0] step:7961/10000 train_time:643920ms step_avg:80.88ms +[2025-07-08 02:23:36] [Rank 0] step:7961/10000 train_time:643920ms step_avg:80.88ms +[2025-07-08 02:23:37] [Rank 0] step:7981/10000 train_time:645418ms step_avg:80.87ms +[2025-07-08 02:23:37] [Rank 0] step:7981/10000 train_time:645418ms step_avg:80.87ms +[2025-07-08 02:23:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:23:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:23:40] [Rank 0] PRINT: step:8000/10000 train_loss:0.8581 val_loss:0.8590 train_time:646916ms step_avg:80.86ms +[2025-07-08 02:23:40] [Rank 0] PRINT: step:8000/10000 train_loss:0.8581 val_loss:0.8590 train_time:646916ms step_avg:80.86ms +[2025-07-08 02:23:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:23:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:23:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:23:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:23:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:23:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:29:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:29:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:29:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:29:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:29:06] [Rank 0] Total Loss: 5.5407 +[2025-07-08 02:29:06] [Rank 0] Total Loss: 5.5407 +[2025-07-08 02:29:06] [Rank 0] Total FTA: 0.9769 +[2025-07-08 02:29:06] [Rank 0] Total FTA: 0.9769 +[2025-07-08 02:29:06] [Rank 0] Group 0 Loss: 5.6112 +[2025-07-08 02:29:06] [Rank 0] Group 0 Loss: 5.6112 +[2025-07-08 02:29:06] [Rank 0] Group 1 Loss: 5.6188 +[2025-07-08 02:29:06] [Rank 0] Group 1 Loss: 5.6188 +[2025-07-08 02:29:06] [Rank 0] Group 2 Loss: 5.2934 +[2025-07-08 02:29:06] [Rank 0] Group 2 Loss: 5.2934 +[2025-07-08 02:29:06] [Rank 0] Group 3 Loss: 5.6728 +[2025-07-08 02:29:06] [Rank 0] Group 3 Loss: 5.6728 +[2025-07-08 02:29:06] [Rank 0] Group 4 Loss: 5.4911 +[2025-07-08 02:29:06] [Rank 0] Group 4 Loss: 5.4911 +[2025-07-08 02:29:06] [Rank 0] Group 5 Loss: 5.5396 +[2025-07-08 02:29:06] [Rank 0] Group 5 Loss: 5.5396 +[2025-07-08 02:29:06] [Rank 0] Group 6 Loss: 5.5019 +[2025-07-08 02:29:06] [Rank 0] Group 6 Loss: 5.5019 +[2025-07-08 02:29:06] [Rank 0] Group 7 Loss: 5.5269 +[2025-07-08 02:29:06] [Rank 0] Group 7 Loss: 5.5269 +[2025-07-08 02:29:06] [Rank 0] Group 8 Loss: 5.5184 +[2025-07-08 02:29:06] [Rank 0] Group 8 Loss: 5.5184 +[2025-07-08 02:29:06] [Rank 0] Group 9 Loss: 5.4968 +[2025-07-08 02:29:06] [Rank 0] Group 9 Loss: 5.4968 +[2025-07-08 02:29:06] [Rank 0] Group 10 Loss: 5.5478 +[2025-07-08 02:29:06] [Rank 0] Group 10 Loss: 5.5478 +[2025-07-08 02:29:06] [Rank 0] Group 11 Loss: 5.5562 +[2025-07-08 02:29:06] [Rank 0] Group 11 Loss: 5.5562 +[2025-07-08 02:29:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 02:29:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 02:29:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 02:29:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 02:29:06] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 02:29:06] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 02:29:06] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 02:29:06] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 02:29:06] [Rank 0] Group 4 FTA: 0.9505 +[2025-07-08 02:29:06] [Rank 0] Group 4 FTA: 0.9505 +[2025-07-08 02:29:06] [Rank 0] Group 5 FTA: 0.9896 +[2025-07-08 02:29:06] [Rank 0] Group 5 FTA: 0.9896 +[2025-07-08 02:29:06] [Rank 0] Group 6 FTA: 0.9583 +[2025-07-08 02:29:06] [Rank 0] Group 6 FTA: 0.9583 +[2025-07-08 02:29:06] [Rank 0] Group 7 FTA: 0.9661 +[2025-07-08 02:29:06] [Rank 0] Group 7 FTA: 0.9661 +[2025-07-08 02:29:06] [Rank 0] Group 8 FTA: 0.9609 +[2025-07-08 02:29:06] [Rank 0] Group 8 FTA: 0.9609 +[2025-07-08 02:29:06] [Rank 0] Group 9 FTA: 0.9727 +[2025-07-08 02:29:06] [Rank 0] Group 9 FTA: 0.9727 +[2025-07-08 02:29:06] [Rank 0] Group 10 FTA: 0.9707 +[2025-07-08 02:29:06] [Rank 0] Group 10 FTA: 0.9707 +[2025-07-08 02:29:06] [Rank 0] Group 11 FTA: 0.9600 +[2025-07-08 02:29:06] [Rank 0] Group 11 FTA: 0.9600 +[2025-07-08 02:29:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 02:29:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 02:29:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 02:29:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 02:29:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 02:29:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 02:29:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 02:29:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 02:29:08] [Rank 0] step:8001/10000 train_time:646935ms step_avg:80.86ms +[2025-07-08 02:29:08] [Rank 0] step:8001/10000 train_time:646935ms step_avg:80.86ms +[2025-07-08 02:29:10] [Rank 0] step:8021/10000 train_time:649098ms step_avg:80.92ms +[2025-07-08 02:29:10] [Rank 0] step:8021/10000 train_time:649098ms step_avg:80.92ms +[2025-07-08 02:29:11] [Rank 0] step:8041/10000 train_time:650587ms step_avg:80.91ms +[2025-07-08 02:29:11] [Rank 0] step:8041/10000 train_time:650587ms step_avg:80.91ms +[2025-07-08 02:29:13] [Rank 0] step:8061/10000 train_time:652076ms step_avg:80.89ms +[2025-07-08 02:29:13] [Rank 0] step:8061/10000 train_time:652076ms step_avg:80.89ms +[2025-07-08 02:29:14] [Rank 0] step:8081/10000 train_time:653568ms step_avg:80.88ms +[2025-07-08 02:29:14] [Rank 0] step:8081/10000 train_time:653568ms step_avg:80.88ms +[2025-07-08 02:29:17] [Rank 0] step:8101/10000 train_time:655896ms step_avg:80.96ms +[2025-07-08 02:29:17] [Rank 0] step:8101/10000 train_time:655896ms step_avg:80.96ms +[2025-07-08 02:29:18] [Rank 0] step:8121/10000 train_time:657369ms step_avg:80.95ms +[2025-07-08 02:29:18] [Rank 0] step:8121/10000 train_time:657369ms step_avg:80.95ms +[2025-07-08 02:29:19] [Rank 0] step:8141/10000 train_time:658858ms step_avg:80.93ms +[2025-07-08 02:29:19] [Rank 0] step:8141/10000 train_time:658858ms step_avg:80.93ms +[2025-07-08 02:29:21] [Rank 0] step:8161/10000 train_time:660349ms step_avg:80.92ms +[2025-07-08 02:29:21] [Rank 0] step:8161/10000 train_time:660349ms step_avg:80.92ms +[2025-07-08 02:29:22] [Rank 0] step:8181/10000 train_time:661841ms step_avg:80.90ms +[2025-07-08 02:29:22] [Rank 0] step:8181/10000 train_time:661841ms step_avg:80.90ms +[2025-07-08 02:29:24] [Rank 0] step:8201/10000 train_time:663571ms step_avg:80.91ms +[2025-07-08 02:29:24] [Rank 0] step:8201/10000 train_time:663571ms step_avg:80.91ms +[2025-07-08 02:29:26] [Rank 0] step:8221/10000 train_time:665063ms step_avg:80.90ms +[2025-07-08 02:29:26] [Rank 0] step:8221/10000 train_time:665063ms step_avg:80.90ms +[2025-07-08 02:29:27] [Rank 0] step:8241/10000 train_time:666556ms step_avg:80.88ms +[2025-07-08 02:29:27] [Rank 0] step:8241/10000 train_time:666556ms step_avg:80.88ms +[2025-07-08 02:29:29] [Rank 0] step:8261/10000 train_time:668050ms step_avg:80.87ms +[2025-07-08 02:29:29] [Rank 0] step:8261/10000 train_time:668050ms step_avg:80.87ms +[2025-07-08 02:29:31] [Rank 0] step:8281/10000 train_time:669545ms step_avg:80.85ms +[2025-07-08 02:29:31] [Rank 0] step:8281/10000 train_time:669545ms step_avg:80.85ms +[2025-07-08 02:29:32] [Rank 0] step:8301/10000 train_time:671708ms step_avg:80.92ms +[2025-07-08 02:29:32] [Rank 0] step:8301/10000 train_time:671708ms step_avg:80.92ms +[2025-07-08 02:29:34] [Rank 0] step:8321/10000 train_time:673200ms step_avg:80.90ms +[2025-07-08 02:29:34] [Rank 0] step:8321/10000 train_time:673200ms step_avg:80.90ms +[2025-07-08 02:29:35] [Rank 0] step:8341/10000 train_time:674694ms step_avg:80.89ms +[2025-07-08 02:29:35] [Rank 0] step:8341/10000 train_time:674694ms step_avg:80.89ms +[2025-07-08 02:29:37] [Rank 0] step:8361/10000 train_time:676189ms step_avg:80.87ms +[2025-07-08 02:29:37] [Rank 0] step:8361/10000 train_time:676189ms step_avg:80.87ms +[2025-07-08 02:29:39] [Rank 0] step:8381/10000 train_time:678350ms step_avg:80.94ms +[2025-07-08 02:29:39] [Rank 0] step:8381/10000 train_time:678350ms step_avg:80.94ms +[2025-07-08 02:29:40] [Rank 0] step:8401/10000 train_time:679843ms step_avg:80.92ms +[2025-07-08 02:29:40] [Rank 0] step:8401/10000 train_time:679843ms step_avg:80.92ms +[2025-07-08 02:29:42] [Rank 0] step:8421/10000 train_time:681337ms step_avg:80.91ms +[2025-07-08 02:29:42] [Rank 0] step:8421/10000 train_time:681337ms step_avg:80.91ms +[2025-07-08 02:29:43] [Rank 0] step:8441/10000 train_time:682832ms step_avg:80.89ms +[2025-07-08 02:29:43] [Rank 0] step:8441/10000 train_time:682832ms step_avg:80.89ms +[2025-07-08 02:29:45] [Rank 0] step:8461/10000 train_time:684584ms step_avg:80.91ms +[2025-07-08 02:29:45] [Rank 0] step:8461/10000 train_time:684584ms step_avg:80.91ms +[2025-07-08 02:29:47] [Rank 0] step:8481/10000 train_time:686061ms step_avg:80.89ms +[2025-07-08 02:29:47] [Rank 0] step:8481/10000 train_time:686061ms step_avg:80.89ms +[2025-07-08 02:29:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:29:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:29:49] [Rank 0] PRINT: step:8500/10000 train_loss:0.8572 val_loss:0.8587 train_time:687555ms step_avg:80.89ms +[2025-07-08 02:29:49] [Rank 0] PRINT: step:8500/10000 train_loss:0.8572 val_loss:0.8587 train_time:687555ms step_avg:80.89ms +[2025-07-08 02:29:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:29:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:29:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:29:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:29:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:29:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:35:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:35:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:35:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:35:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:35:14] [Rank 0] Total Loss: 5.4638 +[2025-07-08 02:35:14] [Rank 0] Total Loss: 5.4638 +[2025-07-08 02:35:14] [Rank 0] Total FTA: 0.9558 +[2025-07-08 02:35:14] [Rank 0] Total FTA: 0.9558 +[2025-07-08 02:35:14] [Rank 0] Group 0 Loss: 5.5046 +[2025-07-08 02:35:14] [Rank 0] Group 0 Loss: 5.5046 +[2025-07-08 02:35:14] [Rank 0] Group 1 Loss: 5.5129 +[2025-07-08 02:35:14] [Rank 0] Group 1 Loss: 5.5129 +[2025-07-08 02:35:14] [Rank 0] Group 2 Loss: 5.2567 +[2025-07-08 02:35:14] [Rank 0] Group 2 Loss: 5.2567 +[2025-07-08 02:35:14] [Rank 0] Group 3 Loss: 5.5610 +[2025-07-08 02:35:14] [Rank 0] Group 3 Loss: 5.5610 +[2025-07-08 02:35:14] [Rank 0] Group 4 Loss: 5.5108 +[2025-07-08 02:35:14] [Rank 0] Group 4 Loss: 5.5108 +[2025-07-08 02:35:14] [Rank 0] Group 5 Loss: 5.5369 +[2025-07-08 02:35:14] [Rank 0] Group 5 Loss: 5.5369 +[2025-07-08 02:35:14] [Rank 0] Group 6 Loss: 5.3371 +[2025-07-08 02:35:14] [Rank 0] Group 6 Loss: 5.3371 +[2025-07-08 02:35:14] [Rank 0] Group 7 Loss: 5.4964 +[2025-07-08 02:35:14] [Rank 0] Group 7 Loss: 5.4964 +[2025-07-08 02:35:14] [Rank 0] Group 8 Loss: 5.4622 +[2025-07-08 02:35:14] [Rank 0] Group 8 Loss: 5.4622 +[2025-07-08 02:35:14] [Rank 0] Group 9 Loss: 5.4440 +[2025-07-08 02:35:14] [Rank 0] Group 9 Loss: 5.4440 +[2025-07-08 02:35:14] [Rank 0] Group 10 Loss: 5.4018 +[2025-07-08 02:35:14] [Rank 0] Group 10 Loss: 5.4018 +[2025-07-08 02:35:14] [Rank 0] Group 11 Loss: 5.4830 +[2025-07-08 02:35:14] [Rank 0] Group 11 Loss: 5.4830 +[2025-07-08 02:35:14] [Rank 0] Group 0 FTA: 0.8427 +[2025-07-08 02:35:14] [Rank 0] Group 0 FTA: 0.8427 +[2025-07-08 02:35:14] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 02:35:14] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 02:35:14] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 02:35:14] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 02:35:14] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 02:35:14] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 02:35:14] [Rank 0] Group 4 FTA: 0.9714 +[2025-07-08 02:35:14] [Rank 0] Group 4 FTA: 0.9714 +[2025-07-08 02:35:14] [Rank 0] Group 5 FTA: 0.9557 +[2025-07-08 02:35:14] [Rank 0] Group 5 FTA: 0.9557 +[2025-07-08 02:35:14] [Rank 0] Group 6 FTA: 0.9870 +[2025-07-08 02:35:14] [Rank 0] Group 6 FTA: 0.9870 +[2025-07-08 02:35:14] [Rank 0] Group 7 FTA: 0.9896 +[2025-07-08 02:35:14] [Rank 0] Group 7 FTA: 0.9896 +[2025-07-08 02:35:14] [Rank 0] Group 8 FTA: 0.9557 +[2025-07-08 02:35:14] [Rank 0] Group 8 FTA: 0.9557 +[2025-07-08 02:35:14] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-08 02:35:14] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-08 02:35:14] [Rank 0] Group 10 FTA: 0.9648 +[2025-07-08 02:35:14] [Rank 0] Group 10 FTA: 0.9648 +[2025-07-08 02:35:14] [Rank 0] Group 11 FTA: 0.9512 +[2025-07-08 02:35:14] [Rank 0] Group 11 FTA: 0.9512 +[2025-07-08 02:35:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 02:35:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 02:35:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 02:35:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 02:35:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 02:35:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 02:35:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 02:35:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 02:35:16] [Rank 0] step:8501/10000 train_time:687575ms step_avg:80.88ms +[2025-07-08 02:35:16] [Rank 0] step:8501/10000 train_time:687575ms step_avg:80.88ms +[2025-07-08 02:35:17] [Rank 0] step:8521/10000 train_time:689059ms step_avg:80.87ms +[2025-07-08 02:35:17] [Rank 0] step:8521/10000 train_time:689059ms step_avg:80.87ms +[2025-07-08 02:35:19] [Rank 0] step:8541/10000 train_time:690548ms step_avg:80.85ms +[2025-07-08 02:35:19] [Rank 0] step:8541/10000 train_time:690548ms step_avg:80.85ms +[2025-07-08 02:35:21] [Rank 0] step:8561/10000 train_time:692704ms step_avg:80.91ms +[2025-07-08 02:35:21] [Rank 0] step:8561/10000 train_time:692704ms step_avg:80.91ms +[2025-07-08 02:35:22] [Rank 0] step:8581/10000 train_time:694192ms step_avg:80.90ms +[2025-07-08 02:35:22] [Rank 0] step:8581/10000 train_time:694192ms step_avg:80.90ms +[2025-07-08 02:35:24] [Rank 0] step:8601/10000 train_time:695682ms step_avg:80.88ms +[2025-07-08 02:35:24] [Rank 0] step:8601/10000 train_time:695682ms step_avg:80.88ms +[2025-07-08 02:35:25] [Rank 0] step:8621/10000 train_time:697173ms step_avg:80.87ms +[2025-07-08 02:35:25] [Rank 0] step:8621/10000 train_time:697173ms step_avg:80.87ms +[2025-07-08 02:35:27] [Rank 0] step:8641/10000 train_time:698715ms step_avg:80.86ms +[2025-07-08 02:35:27] [Rank 0] step:8641/10000 train_time:698715ms step_avg:80.86ms +[2025-07-08 02:35:29] [Rank 0] step:8661/10000 train_time:700813ms step_avg:80.92ms +[2025-07-08 02:35:29] [Rank 0] step:8661/10000 train_time:700813ms step_avg:80.92ms +[2025-07-08 02:35:30] [Rank 0] step:8681/10000 train_time:702306ms step_avg:80.90ms +[2025-07-08 02:35:30] [Rank 0] step:8681/10000 train_time:702306ms step_avg:80.90ms +[2025-07-08 02:35:32] [Rank 0] step:8701/10000 train_time:703798ms step_avg:80.89ms +[2025-07-08 02:35:32] [Rank 0] step:8701/10000 train_time:703798ms step_avg:80.89ms +[2025-07-08 02:35:33] [Rank 0] step:8721/10000 train_time:705298ms step_avg:80.87ms +[2025-07-08 02:35:33] [Rank 0] step:8721/10000 train_time:705298ms step_avg:80.87ms +[2025-07-08 02:35:35] [Rank 0] step:8741/10000 train_time:706916ms step_avg:80.87ms +[2025-07-08 02:35:35] [Rank 0] step:8741/10000 train_time:706916ms step_avg:80.87ms +[2025-07-08 02:35:37] [Rank 0] step:8761/10000 train_time:708411ms step_avg:80.86ms +[2025-07-08 02:35:37] [Rank 0] step:8761/10000 train_time:708411ms step_avg:80.86ms +[2025-07-08 02:35:38] [Rank 0] step:8781/10000 train_time:709903ms step_avg:80.85ms +[2025-07-08 02:35:38] [Rank 0] step:8781/10000 train_time:709903ms step_avg:80.85ms +[2025-07-08 02:35:40] [Rank 0] step:8801/10000 train_time:711400ms step_avg:80.83ms +[2025-07-08 02:35:40] [Rank 0] step:8801/10000 train_time:711400ms step_avg:80.83ms +[2025-07-08 02:35:42] [Rank 0] step:8821/10000 train_time:713152ms step_avg:80.85ms +[2025-07-08 02:35:42] [Rank 0] step:8821/10000 train_time:713152ms step_avg:80.85ms +[2025-07-08 02:35:43] [Rank 0] step:8841/10000 train_time:715056ms step_avg:80.88ms +[2025-07-08 02:35:43] [Rank 0] step:8841/10000 train_time:715056ms step_avg:80.88ms +[2025-07-08 02:35:45] [Rank 0] step:8861/10000 train_time:716552ms step_avg:80.87ms +[2025-07-08 02:35:45] [Rank 0] step:8861/10000 train_time:716552ms step_avg:80.87ms +[2025-07-08 02:35:46] [Rank 0] step:8881/10000 train_time:718047ms step_avg:80.85ms +[2025-07-08 02:35:46] [Rank 0] step:8881/10000 train_time:718047ms step_avg:80.85ms +[2025-07-08 02:35:48] [Rank 0] step:8901/10000 train_time:719544ms step_avg:80.84ms +[2025-07-08 02:35:48] [Rank 0] step:8901/10000 train_time:719544ms step_avg:80.84ms +[2025-07-08 02:35:50] [Rank 0] step:8921/10000 train_time:721696ms step_avg:80.90ms +[2025-07-08 02:35:50] [Rank 0] step:8921/10000 train_time:721696ms step_avg:80.90ms +[2025-07-08 02:35:51] [Rank 0] step:8941/10000 train_time:723192ms step_avg:80.88ms +[2025-07-08 02:35:51] [Rank 0] step:8941/10000 train_time:723192ms step_avg:80.88ms +[2025-07-08 02:35:53] [Rank 0] step:8961/10000 train_time:724689ms step_avg:80.87ms +[2025-07-08 02:35:53] [Rank 0] step:8961/10000 train_time:724689ms step_avg:80.87ms +[2025-07-08 02:35:54] [Rank 0] step:8981/10000 train_time:726186ms step_avg:80.86ms +[2025-07-08 02:35:54] [Rank 0] step:8981/10000 train_time:726186ms step_avg:80.86ms +[2025-07-08 02:35:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:35:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:35:57] [Rank 0] PRINT: step:9000/10000 train_loss:0.8565 val_loss:0.8586 train_time:727684ms step_avg:80.85ms +[2025-07-08 02:35:57] [Rank 0] PRINT: step:9000/10000 train_loss:0.8565 val_loss:0.8586 train_time:727684ms step_avg:80.85ms +[2025-07-08 02:35:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:35:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:35:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:35:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:35:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:35:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:41:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:41:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:41:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:41:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:41:22] [Rank 0] Total Loss: 5.5101 +[2025-07-08 02:41:22] [Rank 0] Total Loss: 5.5101 +[2025-07-08 02:41:22] [Rank 0] Total FTA: 0.9382 +[2025-07-08 02:41:22] [Rank 0] Total FTA: 0.9382 +[2025-07-08 02:41:22] [Rank 0] Group 0 Loss: 5.5572 +[2025-07-08 02:41:22] [Rank 0] Group 0 Loss: 5.5572 +[2025-07-08 02:41:22] [Rank 0] Group 1 Loss: 5.5720 +[2025-07-08 02:41:22] [Rank 0] Group 1 Loss: 5.5720 +[2025-07-08 02:41:22] [Rank 0] Group 2 Loss: 5.3323 +[2025-07-08 02:41:22] [Rank 0] Group 2 Loss: 5.3323 +[2025-07-08 02:41:22] [Rank 0] Group 3 Loss: 5.5972 +[2025-07-08 02:41:22] [Rank 0] Group 3 Loss: 5.5972 +[2025-07-08 02:41:22] [Rank 0] Group 4 Loss: 5.5054 +[2025-07-08 02:41:22] [Rank 0] Group 4 Loss: 5.5054 +[2025-07-08 02:41:22] [Rank 0] Group 5 Loss: 5.5215 +[2025-07-08 02:41:22] [Rank 0] Group 5 Loss: 5.5215 +[2025-07-08 02:41:22] [Rank 0] Group 6 Loss: 5.4202 +[2025-07-08 02:41:22] [Rank 0] Group 6 Loss: 5.4202 +[2025-07-08 02:41:22] [Rank 0] Group 7 Loss: 5.5374 +[2025-07-08 02:41:22] [Rank 0] Group 7 Loss: 5.5374 +[2025-07-08 02:41:22] [Rank 0] Group 8 Loss: 5.4608 +[2025-07-08 02:41:22] [Rank 0] Group 8 Loss: 5.4608 +[2025-07-08 02:41:22] [Rank 0] Group 9 Loss: 5.4217 +[2025-07-08 02:41:22] [Rank 0] Group 9 Loss: 5.4217 +[2025-07-08 02:41:22] [Rank 0] Group 10 Loss: 5.4926 +[2025-07-08 02:41:22] [Rank 0] Group 10 Loss: 5.4926 +[2025-07-08 02:41:22] [Rank 0] Group 11 Loss: 5.5560 +[2025-07-08 02:41:22] [Rank 0] Group 11 Loss: 5.5560 +[2025-07-08 02:41:22] [Rank 0] Group 0 FTA: 0.6723 +[2025-07-08 02:41:22] [Rank 0] Group 0 FTA: 0.6723 +[2025-07-08 02:41:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 02:41:22] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 02:41:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 02:41:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 02:41:22] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 02:41:22] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 02:41:22] [Rank 0] Group 4 FTA: 0.9740 +[2025-07-08 02:41:22] [Rank 0] Group 4 FTA: 0.9740 +[2025-07-08 02:41:22] [Rank 0] Group 5 FTA: 0.9609 +[2025-07-08 02:41:22] [Rank 0] Group 5 FTA: 0.9609 +[2025-07-08 02:41:22] [Rank 0] Group 6 FTA: 0.9974 +[2025-07-08 02:41:22] [Rank 0] Group 6 FTA: 0.9974 +[2025-07-08 02:41:22] [Rank 0] Group 7 FTA: 0.9661 +[2025-07-08 02:41:22] [Rank 0] Group 7 FTA: 0.9661 +[2025-07-08 02:41:22] [Rank 0] Group 8 FTA: 0.9844 +[2025-07-08 02:41:22] [Rank 0] Group 8 FTA: 0.9844 +[2025-07-08 02:41:22] [Rank 0] Group 9 FTA: 0.9844 +[2025-07-08 02:41:22] [Rank 0] Group 9 FTA: 0.9844 +[2025-07-08 02:41:22] [Rank 0] Group 10 FTA: 0.9707 +[2025-07-08 02:41:22] [Rank 0] Group 10 FTA: 0.9707 +[2025-07-08 02:41:22] [Rank 0] Group 11 FTA: 0.9688 +[2025-07-08 02:41:22] [Rank 0] Group 11 FTA: 0.9688 +[2025-07-08 02:41:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 02:41:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 02:41:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 02:41:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 02:41:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 02:41:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 02:41:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 02:41:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 02:41:24] [Rank 0] step:9001/10000 train_time:727710ms step_avg:80.85ms +[2025-07-08 02:41:24] [Rank 0] step:9001/10000 train_time:727710ms step_avg:80.85ms +[2025-07-08 02:41:26] [Rank 0] step:9021/10000 train_time:729911ms step_avg:80.91ms +[2025-07-08 02:41:26] [Rank 0] step:9021/10000 train_time:729911ms step_avg:80.91ms +[2025-07-08 02:41:27] [Rank 0] step:9041/10000 train_time:731400ms step_avg:80.90ms +[2025-07-08 02:41:27] [Rank 0] step:9041/10000 train_time:731400ms step_avg:80.90ms +[2025-07-08 02:41:29] [Rank 0] step:9061/10000 train_time:732890ms step_avg:80.88ms +[2025-07-08 02:41:29] [Rank 0] step:9061/10000 train_time:732890ms step_avg:80.88ms +[2025-07-08 02:41:30] [Rank 0] step:9081/10000 train_time:734382ms step_avg:80.87ms +[2025-07-08 02:41:30] [Rank 0] step:9081/10000 train_time:734382ms step_avg:80.87ms +[2025-07-08 02:41:32] [Rank 0] step:9101/10000 train_time:736109ms step_avg:80.88ms +[2025-07-08 02:41:32] [Rank 0] step:9101/10000 train_time:736109ms step_avg:80.88ms +[2025-07-08 02:41:34] [Rank 0] step:9121/10000 train_time:737599ms step_avg:80.87ms +[2025-07-08 02:41:34] [Rank 0] step:9121/10000 train_time:737599ms step_avg:80.87ms +[2025-07-08 02:41:35] [Rank 0] step:9141/10000 train_time:739091ms step_avg:80.85ms +[2025-07-08 02:41:35] [Rank 0] step:9141/10000 train_time:739091ms step_avg:80.85ms +[2025-07-08 02:41:37] [Rank 0] step:9161/10000 train_time:740585ms step_avg:80.84ms +[2025-07-08 02:41:37] [Rank 0] step:9161/10000 train_time:740585ms step_avg:80.84ms +[2025-07-08 02:41:39] [Rank 0] step:9181/10000 train_time:742079ms step_avg:80.83ms +[2025-07-08 02:41:39] [Rank 0] step:9181/10000 train_time:742079ms step_avg:80.83ms +[2025-07-08 02:41:40] [Rank 0] step:9201/10000 train_time:744216ms step_avg:80.88ms +[2025-07-08 02:41:40] [Rank 0] step:9201/10000 train_time:744216ms step_avg:80.88ms +[2025-07-08 02:41:42] [Rank 0] step:9221/10000 train_time:745710ms step_avg:80.87ms +[2025-07-08 02:41:42] [Rank 0] step:9221/10000 train_time:745710ms step_avg:80.87ms +[2025-07-08 02:41:43] [Rank 0] step:9241/10000 train_time:747202ms step_avg:80.86ms +[2025-07-08 02:41:43] [Rank 0] step:9241/10000 train_time:747202ms step_avg:80.86ms +[2025-07-08 02:41:45] [Rank 0] step:9261/10000 train_time:748697ms step_avg:80.84ms +[2025-07-08 02:41:45] [Rank 0] step:9261/10000 train_time:748697ms step_avg:80.84ms +[2025-07-08 02:41:47] [Rank 0] step:9281/10000 train_time:750859ms step_avg:80.90ms +[2025-07-08 02:41:47] [Rank 0] step:9281/10000 train_time:750859ms step_avg:80.90ms +[2025-07-08 02:41:48] [Rank 0] step:9301/10000 train_time:752354ms step_avg:80.89ms +[2025-07-08 02:41:48] [Rank 0] step:9301/10000 train_time:752354ms step_avg:80.89ms +[2025-07-08 02:41:50] [Rank 0] step:9321/10000 train_time:753848ms step_avg:80.88ms +[2025-07-08 02:41:50] [Rank 0] step:9321/10000 train_time:753848ms step_avg:80.88ms +[2025-07-08 02:41:51] [Rank 0] step:9341/10000 train_time:755346ms step_avg:80.86ms +[2025-07-08 02:41:51] [Rank 0] step:9341/10000 train_time:755346ms step_avg:80.86ms +[2025-07-08 02:41:53] [Rank 0] step:9361/10000 train_time:757098ms step_avg:80.88ms +[2025-07-08 02:41:53] [Rank 0] step:9361/10000 train_time:757098ms step_avg:80.88ms +[2025-07-08 02:41:55] [Rank 0] step:9381/10000 train_time:758838ms step_avg:80.89ms +[2025-07-08 02:41:55] [Rank 0] step:9381/10000 train_time:758838ms step_avg:80.89ms +[2025-07-08 02:41:56] [Rank 0] step:9401/10000 train_time:760333ms step_avg:80.88ms +[2025-07-08 02:41:56] [Rank 0] step:9401/10000 train_time:760333ms step_avg:80.88ms +[2025-07-08 02:41:58] [Rank 0] step:9421/10000 train_time:761831ms step_avg:80.87ms +[2025-07-08 02:41:58] [Rank 0] step:9421/10000 train_time:761831ms step_avg:80.87ms +[2025-07-08 02:41:59] [Rank 0] step:9441/10000 train_time:763330ms step_avg:80.85ms +[2025-07-08 02:41:59] [Rank 0] step:9441/10000 train_time:763330ms step_avg:80.85ms +[2025-07-08 02:42:01] [Rank 0] step:9461/10000 train_time:765061ms step_avg:80.86ms +[2025-07-08 02:42:01] [Rank 0] step:9461/10000 train_time:765061ms step_avg:80.86ms +[2025-07-08 02:42:03] [Rank 0] step:9481/10000 train_time:766558ms step_avg:80.85ms +[2025-07-08 02:42:03] [Rank 0] step:9481/10000 train_time:766558ms step_avg:80.85ms +[2025-07-08 02:42:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:42:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:42:05] [Rank 0] PRINT: step:9500/10000 train_loss:0.8557 val_loss:0.8584 train_time:768057ms step_avg:80.85ms +[2025-07-08 02:42:05] [Rank 0] PRINT: step:9500/10000 train_loss:0.8557 val_loss:0.8584 train_time:768057ms step_avg:80.85ms +[2025-07-08 02:42:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:42:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:42:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:42:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:42:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:42:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:47:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:47:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:47:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:47:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:47:31] [Rank 0] Total Loss: 5.4830 +[2025-07-08 02:47:31] [Rank 0] Total Loss: 5.4830 +[2025-07-08 02:47:31] [Rank 0] Total FTA: 0.9876 +[2025-07-08 02:47:31] [Rank 0] Total FTA: 0.9876 +[2025-07-08 02:47:31] [Rank 0] Group 0 Loss: 5.4555 +[2025-07-08 02:47:31] [Rank 0] Group 0 Loss: 5.4555 +[2025-07-08 02:47:31] [Rank 0] Group 1 Loss: 5.5999 +[2025-07-08 02:47:31] [Rank 0] Group 1 Loss: 5.5999 +[2025-07-08 02:47:31] [Rank 0] Group 2 Loss: 5.3563 +[2025-07-08 02:47:31] [Rank 0] Group 2 Loss: 5.3563 +[2025-07-08 02:47:31] [Rank 0] Group 3 Loss: 5.5822 +[2025-07-08 02:47:31] [Rank 0] Group 3 Loss: 5.5822 +[2025-07-08 02:47:31] [Rank 0] Group 4 Loss: 5.4660 +[2025-07-08 02:47:31] [Rank 0] Group 4 Loss: 5.4660 +[2025-07-08 02:47:31] [Rank 0] Group 5 Loss: 5.4290 +[2025-07-08 02:47:31] [Rank 0] Group 5 Loss: 5.4290 +[2025-07-08 02:47:31] [Rank 0] Group 6 Loss: 5.3929 +[2025-07-08 02:47:31] [Rank 0] Group 6 Loss: 5.3929 +[2025-07-08 02:47:31] [Rank 0] Group 7 Loss: 5.5255 +[2025-07-08 02:47:31] [Rank 0] Group 7 Loss: 5.5255 +[2025-07-08 02:47:31] [Rank 0] Group 8 Loss: 5.4726 +[2025-07-08 02:47:31] [Rank 0] Group 8 Loss: 5.4726 +[2025-07-08 02:47:31] [Rank 0] Group 9 Loss: 5.5065 +[2025-07-08 02:47:31] [Rank 0] Group 9 Loss: 5.5065 +[2025-07-08 02:47:31] [Rank 0] Group 10 Loss: 5.4836 +[2025-07-08 02:47:31] [Rank 0] Group 10 Loss: 5.4836 +[2025-07-08 02:47:31] [Rank 0] Group 11 Loss: 5.5125 +[2025-07-08 02:47:31] [Rank 0] Group 11 Loss: 5.5125 +[2025-07-08 02:47:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 02:47:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 02:47:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 02:47:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 02:47:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 02:47:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 02:47:31] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 02:47:31] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 02:47:31] [Rank 0] Group 4 FTA: 0.9740 +[2025-07-08 02:47:31] [Rank 0] Group 4 FTA: 0.9740 +[2025-07-08 02:47:31] [Rank 0] Group 5 FTA: 0.9922 +[2025-07-08 02:47:31] [Rank 0] Group 5 FTA: 0.9922 +[2025-07-08 02:47:31] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-08 02:47:31] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-08 02:47:31] [Rank 0] Group 7 FTA: 0.9792 +[2025-07-08 02:47:31] [Rank 0] Group 7 FTA: 0.9792 +[2025-07-08 02:47:31] [Rank 0] Group 8 FTA: 0.9714 +[2025-07-08 02:47:31] [Rank 0] Group 8 FTA: 0.9714 +[2025-07-08 02:47:31] [Rank 0] Group 9 FTA: 0.9883 +[2025-07-08 02:47:31] [Rank 0] Group 9 FTA: 0.9883 +[2025-07-08 02:47:31] [Rank 0] Group 10 FTA: 0.9824 +[2025-07-08 02:47:31] [Rank 0] Group 10 FTA: 0.9824 +[2025-07-08 02:47:31] [Rank 0] Group 11 FTA: 0.9824 +[2025-07-08 02:47:31] [Rank 0] Group 11 FTA: 0.9824 +[2025-07-08 02:47:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 02:47:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 02:47:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 02:47:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 02:47:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 02:47:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 02:47:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 02:47:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 02:47:32] [Rank 0] step:9501/10000 train_time:768076ms step_avg:80.84ms +[2025-07-08 02:47:32] [Rank 0] step:9501/10000 train_time:768076ms step_avg:80.84ms +[2025-07-08 02:47:34] [Rank 0] step:9521/10000 train_time:769588ms step_avg:80.83ms +[2025-07-08 02:47:34] [Rank 0] step:9521/10000 train_time:769588ms step_avg:80.83ms +[2025-07-08 02:47:36] [Rank 0] step:9541/10000 train_time:771336ms step_avg:80.84ms +[2025-07-08 02:47:36] [Rank 0] step:9541/10000 train_time:771336ms step_avg:80.84ms +[2025-07-08 02:47:37] [Rank 0] step:9561/10000 train_time:773220ms step_avg:80.87ms +[2025-07-08 02:47:37] [Rank 0] step:9561/10000 train_time:773220ms step_avg:80.87ms +[2025-07-08 02:47:39] [Rank 0] step:9581/10000 train_time:774713ms step_avg:80.86ms +[2025-07-08 02:47:39] [Rank 0] step:9581/10000 train_time:774713ms step_avg:80.86ms +[2025-07-08 02:47:40] [Rank 0] step:9601/10000 train_time:776203ms step_avg:80.85ms +[2025-07-08 02:47:40] [Rank 0] step:9601/10000 train_time:776203ms step_avg:80.85ms +[2025-07-08 02:47:42] [Rank 0] step:9621/10000 train_time:777694ms step_avg:80.83ms +[2025-07-08 02:47:42] [Rank 0] step:9621/10000 train_time:777694ms step_avg:80.83ms +[2025-07-08 02:47:44] [Rank 0] step:9641/10000 train_time:779831ms step_avg:80.89ms +[2025-07-08 02:47:44] [Rank 0] step:9641/10000 train_time:779831ms step_avg:80.89ms +[2025-07-08 02:47:46] [Rank 0] step:9661/10000 train_time:781321ms step_avg:80.87ms +[2025-07-08 02:47:46] [Rank 0] step:9661/10000 train_time:781321ms step_avg:80.87ms +[2025-07-08 02:47:47] [Rank 0] step:9681/10000 train_time:782811ms step_avg:80.86ms +[2025-07-08 02:47:47] [Rank 0] step:9681/10000 train_time:782811ms step_avg:80.86ms +[2025-07-08 02:47:49] [Rank 0] step:9701/10000 train_time:784305ms step_avg:80.85ms +[2025-07-08 02:47:49] [Rank 0] step:9701/10000 train_time:784305ms step_avg:80.85ms +[2025-07-08 02:47:51] [Rank 0] step:9721/10000 train_time:785850ms step_avg:80.84ms +[2025-07-08 02:47:51] [Rank 0] step:9721/10000 train_time:785850ms step_avg:80.84ms +[2025-07-08 02:47:52] [Rank 0] step:9741/10000 train_time:787945ms step_avg:80.89ms +[2025-07-08 02:47:52] [Rank 0] step:9741/10000 train_time:787945ms step_avg:80.89ms +[2025-07-08 02:47:54] [Rank 0] step:9761/10000 train_time:789439ms step_avg:80.88ms +[2025-07-08 02:47:54] [Rank 0] step:9761/10000 train_time:789439ms step_avg:80.88ms +[2025-07-08 02:47:55] [Rank 0] step:9781/10000 train_time:790936ms step_avg:80.86ms +[2025-07-08 02:47:55] [Rank 0] step:9781/10000 train_time:790936ms step_avg:80.86ms +[2025-07-08 02:47:57] [Rank 0] step:9801/10000 train_time:792431ms step_avg:80.85ms +[2025-07-08 02:47:57] [Rank 0] step:9801/10000 train_time:792431ms step_avg:80.85ms +[2025-07-08 02:47:59] [Rank 0] step:9821/10000 train_time:794586ms step_avg:80.91ms +[2025-07-08 02:47:59] [Rank 0] step:9821/10000 train_time:794586ms step_avg:80.91ms +[2025-07-08 02:48:00] [Rank 0] step:9841/10000 train_time:796084ms step_avg:80.89ms +[2025-07-08 02:48:00] [Rank 0] step:9841/10000 train_time:796084ms step_avg:80.89ms +[2025-07-08 02:48:02] [Rank 0] step:9861/10000 train_time:797580ms step_avg:80.88ms +[2025-07-08 02:48:02] [Rank 0] step:9861/10000 train_time:797580ms step_avg:80.88ms +[2025-07-08 02:48:03] [Rank 0] step:9881/10000 train_time:799079ms step_avg:80.87ms +[2025-07-08 02:48:03] [Rank 0] step:9881/10000 train_time:799079ms step_avg:80.87ms +[2025-07-08 02:48:05] [Rank 0] step:9901/10000 train_time:800832ms step_avg:80.88ms +[2025-07-08 02:48:05] [Rank 0] step:9901/10000 train_time:800832ms step_avg:80.88ms +[2025-07-08 02:48:07] [Rank 0] step:9921/10000 train_time:802308ms step_avg:80.87ms +[2025-07-08 02:48:07] [Rank 0] step:9921/10000 train_time:802308ms step_avg:80.87ms +[2025-07-08 02:48:08] [Rank 0] step:9941/10000 train_time:803805ms step_avg:80.86ms +[2025-07-08 02:48:08] [Rank 0] step:9941/10000 train_time:803805ms step_avg:80.86ms +[2025-07-08 02:48:10] [Rank 0] step:9961/10000 train_time:805302ms step_avg:80.85ms +[2025-07-08 02:48:10] [Rank 0] step:9961/10000 train_time:805302ms step_avg:80.85ms +[2025-07-08 02:48:11] [Rank 0] step:9981/10000 train_time:806799ms step_avg:80.83ms +[2025-07-08 02:48:11] [Rank 0] step:9981/10000 train_time:806799ms step_avg:80.83ms +[2025-07-08 02:48:13] [Rank 0] step:10000/10000 train_time:808458ms step_avg:80.85ms +[2025-07-08 02:48:13] [Rank 0] step:10000/10000 train_time:808458ms step_avg:80.85ms +[2025-07-08 02:48:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:48:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 02:48:14] [Rank 0] PRINT: step:10000/10000 train_loss:0.8549 val_loss:0.8584 train_time:808579ms step_avg:80.86ms +[2025-07-08 02:48:14] [Rank 0] PRINT: step:10000/10000 train_loss:0.8549 val_loss:0.8584 train_time:808579ms step_avg:80.86ms +[2025-07-08 02:48:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:48:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 02:48:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:48:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 02:48:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:48:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 02:53:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:53:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 02:53:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:53:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 02:53:38] [Rank 0] Total Loss: 5.5331 +[2025-07-08 02:53:38] [Rank 0] Total Loss: 5.5331 +[2025-07-08 02:53:38] [Rank 0] Total FTA: 0.9904 +[2025-07-08 02:53:38] [Rank 0] Total FTA: 0.9904 +[2025-07-08 02:53:38] [Rank 0] Group 0 Loss: 5.5730 +[2025-07-08 02:53:38] [Rank 0] Group 0 Loss: 5.5730 +[2025-07-08 02:53:38] [Rank 0] Group 1 Loss: 5.6060 +[2025-07-08 02:53:38] [Rank 0] Group 1 Loss: 5.6060 +[2025-07-08 02:53:38] [Rank 0] Group 2 Loss: 5.3622 +[2025-07-08 02:53:38] [Rank 0] Group 2 Loss: 5.3622 +[2025-07-08 02:53:38] [Rank 0] Group 3 Loss: 5.6535 +[2025-07-08 02:53:38] [Rank 0] Group 3 Loss: 5.6535 +[2025-07-08 02:53:38] [Rank 0] Group 4 Loss: 5.5518 +[2025-07-08 02:53:38] [Rank 0] Group 4 Loss: 5.5518 +[2025-07-08 02:53:38] [Rank 0] Group 5 Loss: 5.5333 +[2025-07-08 02:53:38] [Rank 0] Group 5 Loss: 5.5333 +[2025-07-08 02:53:38] [Rank 0] Group 6 Loss: 5.4247 +[2025-07-08 02:53:38] [Rank 0] Group 6 Loss: 5.4247 +[2025-07-08 02:53:38] [Rank 0] Group 7 Loss: 5.5711 +[2025-07-08 02:53:38] [Rank 0] Group 7 Loss: 5.5711 +[2025-07-08 02:53:38] [Rank 0] Group 8 Loss: 5.4757 +[2025-07-08 02:53:38] [Rank 0] Group 8 Loss: 5.4757 +[2025-07-08 02:53:38] [Rank 0] Group 9 Loss: 5.5356 +[2025-07-08 02:53:38] [Rank 0] Group 9 Loss: 5.5356 +[2025-07-08 02:53:38] [Rank 0] Group 10 Loss: 5.5291 +[2025-07-08 02:53:38] [Rank 0] Group 10 Loss: 5.5291 +[2025-07-08 02:53:38] [Rank 0] Group 11 Loss: 5.5372 +[2025-07-08 02:53:38] [Rank 0] Group 11 Loss: 5.5372 +[2025-07-08 02:53:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 02:53:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-08 02:53:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 02:53:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-08 02:53:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 02:53:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-08 02:53:38] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 02:53:38] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-08 02:53:38] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-08 02:53:38] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-08 02:53:38] [Rank 0] Group 5 FTA: 0.9948 +[2025-07-08 02:53:38] [Rank 0] Group 5 FTA: 0.9948 +[2025-07-08 02:53:38] [Rank 0] Group 6 FTA: 0.9974 +[2025-07-08 02:53:38] [Rank 0] Group 6 FTA: 0.9974 +[2025-07-08 02:53:38] [Rank 0] Group 7 FTA: 0.9870 +[2025-07-08 02:53:38] [Rank 0] Group 7 FTA: 0.9870 +[2025-07-08 02:53:38] [Rank 0] Group 8 FTA: 0.9844 +[2025-07-08 02:53:38] [Rank 0] Group 8 FTA: 0.9844 +[2025-07-08 02:53:38] [Rank 0] Group 9 FTA: 0.9844 +[2025-07-08 02:53:38] [Rank 0] Group 9 FTA: 0.9844 +[2025-07-08 02:53:38] [Rank 0] Group 10 FTA: 0.9824 +[2025-07-08 02:53:38] [Rank 0] Group 10 FTA: 0.9824 +[2025-07-08 02:53:38] [Rank 0] Group 11 FTA: 0.9824 +[2025-07-08 02:53:38] [Rank 0] Group 11 FTA: 0.9824 +[2025-07-08 02:53:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 02:53:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_loss_curves.png +[2025-07-08 02:53:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 02:53:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/per_class_acc_curves.png +[2025-07-08 02:53:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 02:53:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_loss_curve.png +[2025-07-08 02:53:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 02:53:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_46/total_acc_curve.png +[2025-07-08 02:53:40] [Rank 0] step:10001/10000 train_time:808599ms step_avg:80.85ms +[2025-07-08 02:53:40] [Rank 0] step:10001/10000 train_time:808599ms step_avg:80.85ms +[2025-07-08 02:53:40] [Rank 0] PRINT: --- Training Finished: Tue Jul 8 02:53:40 2025 --- +[2025-07-08 02:53:40] [Rank 0] PRINT: --- Training Finished: Tue Jul 8 02:53:40 2025 --- +[2025-07-08 02:53:40] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 10036 MiB +[2025-07-08 02:53:40] [Rank 0] PRINT: Peak memory allocated: 8783 MiB reserved: 10036 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/config.json b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..1d0f9f1a6bd7c5cd9d73435522ce251101f14ee7 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "e26a3c6b-1b76-47b9-8196-a866a57b09a3", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..d92a755a22da369c655af517878e462f18fef635 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:43bba465316a65529afd31f5d692a7e375e53c1669ce75fec97c6055443e341a +size 344696 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..c64685f42d49503d9f943298db8712dca50c278d --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce70ff23f94e4faeac1e44bfbe9394da8a08bf7a77c8c7da217fd6e14a756393 +size 374138 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..31f56456812ca90e7035222a4ab5bbd800aee91c --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0237f5e7f6dc05dbd7ff7357e1ded199da61089da7754c5c0a6d7497206c17a4 +size 110282 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..a3fa0dcbd14fa08eea6446aef04023958215ee25 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee09adedf697bf02edbbbf0cc9c326778d092755902b2055438ae49386110823 +size 110703 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/training_log_e26a3c6b-1b76-47b9-8196-a866a57b09a3.txt b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/training_log_e26a3c6b-1b76-47b9-8196-a866a57b09a3.txt new file mode 100644 index 0000000000000000000000000000000000000000..9f34df77f4645ff7381bf236f20072a9950d465b --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/training_log_e26a3c6b-1b76-47b9-8196-a866a57b09a3.txt @@ -0,0 +1,5132 @@ +[2025-07-06 17:33:16] [Rank 0] PRINT: --- Script Start: Sun Jul 6 17:33:16 2025 --- +[2025-07-06 17:33:16] [Rank 0] PRINT: --- Script Start: Sun Jul 6 17:33:16 2025 --- +[2025-07-06 17:33:16] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-06 17:33:16] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-06 17:33:16] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 17:33:16] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 17:33:16] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-06 17:33:16] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-06 17:33:16] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48 +[2025-07-06 17:33:16] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48 +[2025-07-06 17:33:16] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 17:33:16] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 17:33:17] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 17:33:17] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 17:33:17] [Rank 0] PRINT: Constructing model... +[2025-07-06 17:33:17] [Rank 0] PRINT: Constructing model... +[2025-07-06 17:33:19] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 17:33:19] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 17:33:19] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 17:33:19] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 17:33:19] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 17:33:19] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 17:33:20] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 17:33:20] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 17:33:20] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 17:33:20] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 17:33:20] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 17:33:20] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 17:33:20] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 17:33:20] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 17:33:20] [Rank 0] PRINT: Model returns: +[2025-07-06 17:33:20] [Rank 0] PRINT: Model returns: +[2025-07-06 17:33:20] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 17:33:20] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 17:33:20] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 17:33:20] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-06 17:33:20] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 17:33:20] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-06 17:33:20] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 17:33:20] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-06 17:33:20] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 17:33:20] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-06 17:33:20] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 17:33:20] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 17:33:20] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 17:33:20] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 17:33:20] [Rank 0] PRINT: Starting warmup... +[2025-07-06 17:33:20] [Rank 0] PRINT: Starting warmup... +[2025-07-06 17:42:54] [Rank 0] PRINT: Warmup complete. +[2025-07-06 17:42:54] [Rank 0] PRINT: Warmup complete. +[2025-07-06 17:42:54] [Rank 0] PRINT: Starting training... +[2025-07-06 17:42:54] [Rank 0] PRINT: Starting training... +[2025-07-06 17:42:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:42:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:47:09] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 17:47:09] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 17:47:11] [Rank 0] step:21/10000 train_time:1756ms step_avg:83.63ms +[2025-07-06 17:47:11] [Rank 0] step:21/10000 train_time:1756ms step_avg:83.63ms +[2025-07-06 17:47:12] [Rank 0] step:41/10000 train_time:3449ms step_avg:84.13ms +[2025-07-06 17:47:12] [Rank 0] step:41/10000 train_time:3449ms step_avg:84.13ms +[2025-07-06 17:47:14] [Rank 0] step:61/10000 train_time:4917ms step_avg:80.60ms +[2025-07-06 17:47:14] [Rank 0] step:61/10000 train_time:4917ms step_avg:80.60ms +[2025-07-06 17:47:16] [Rank 0] step:81/10000 train_time:6463ms step_avg:79.79ms +[2025-07-06 17:47:16] [Rank 0] step:81/10000 train_time:6463ms step_avg:79.79ms +[2025-07-06 17:47:18] [Rank 0] step:101/10000 train_time:8603ms step_avg:85.18ms +[2025-07-06 17:47:18] [Rank 0] step:101/10000 train_time:8603ms step_avg:85.18ms +[2025-07-06 17:47:19] [Rank 0] step:121/10000 train_time:10067ms step_avg:83.20ms +[2025-07-06 17:47:19] [Rank 0] step:121/10000 train_time:10067ms step_avg:83.20ms +[2025-07-06 17:47:21] [Rank 0] step:141/10000 train_time:11531ms step_avg:81.78ms +[2025-07-06 17:47:21] [Rank 0] step:141/10000 train_time:11531ms step_avg:81.78ms +[2025-07-06 17:47:22] [Rank 0] step:161/10000 train_time:13001ms step_avg:80.75ms +[2025-07-06 17:47:22] [Rank 0] step:161/10000 train_time:13001ms step_avg:80.75ms +[2025-07-06 17:47:24] [Rank 0] step:181/10000 train_time:14465ms step_avg:79.92ms +[2025-07-06 17:47:24] [Rank 0] step:181/10000 train_time:14465ms step_avg:79.92ms +[2025-07-06 17:47:25] [Rank 0] step:201/10000 train_time:16170ms step_avg:80.45ms +[2025-07-06 17:47:25] [Rank 0] step:201/10000 train_time:16170ms step_avg:80.45ms +[2025-07-06 17:47:27] [Rank 0] step:221/10000 train_time:17635ms step_avg:79.80ms +[2025-07-06 17:47:27] [Rank 0] step:221/10000 train_time:17635ms step_avg:79.80ms +[2025-07-06 17:47:28] [Rank 0] step:241/10000 train_time:19103ms step_avg:79.27ms +[2025-07-06 17:47:28] [Rank 0] step:241/10000 train_time:19103ms step_avg:79.27ms +[2025-07-06 17:47:30] [Rank 0] step:261/10000 train_time:20573ms step_avg:78.82ms +[2025-07-06 17:47:30] [Rank 0] step:261/10000 train_time:20573ms step_avg:78.82ms +[2025-07-06 17:47:32] [Rank 0] step:281/10000 train_time:22711ms step_avg:80.82ms +[2025-07-06 17:47:32] [Rank 0] step:281/10000 train_time:22711ms step_avg:80.82ms +[2025-07-06 17:47:33] [Rank 0] step:301/10000 train_time:24176ms step_avg:80.32ms +[2025-07-06 17:47:33] [Rank 0] step:301/10000 train_time:24176ms step_avg:80.32ms +[2025-07-06 17:47:35] [Rank 0] step:321/10000 train_time:25643ms step_avg:79.88ms +[2025-07-06 17:47:35] [Rank 0] step:321/10000 train_time:25643ms step_avg:79.88ms +[2025-07-06 17:47:36] [Rank 0] step:341/10000 train_time:27112ms step_avg:79.51ms +[2025-07-06 17:47:36] [Rank 0] step:341/10000 train_time:27112ms step_avg:79.51ms +[2025-07-06 17:47:38] [Rank 0] step:361/10000 train_time:28576ms step_avg:79.16ms +[2025-07-06 17:47:38] [Rank 0] step:361/10000 train_time:28576ms step_avg:79.16ms +[2025-07-06 17:47:40] [Rank 0] step:381/10000 train_time:30709ms step_avg:80.60ms +[2025-07-06 17:47:40] [Rank 0] step:381/10000 train_time:30709ms step_avg:80.60ms +[2025-07-06 17:47:41] [Rank 0] step:401/10000 train_time:32174ms step_avg:80.24ms +[2025-07-06 17:47:41] [Rank 0] step:401/10000 train_time:32174ms step_avg:80.24ms +[2025-07-06 17:47:43] [Rank 0] step:421/10000 train_time:33643ms step_avg:79.91ms +[2025-07-06 17:47:43] [Rank 0] step:421/10000 train_time:33643ms step_avg:79.91ms +[2025-07-06 17:47:44] [Rank 0] step:441/10000 train_time:35107ms step_avg:79.61ms +[2025-07-06 17:47:44] [Rank 0] step:441/10000 train_time:35107ms step_avg:79.61ms +[2025-07-06 17:47:46] [Rank 0] step:461/10000 train_time:37237ms step_avg:80.77ms +[2025-07-06 17:47:46] [Rank 0] step:461/10000 train_time:37237ms step_avg:80.77ms +[2025-07-06 17:47:48] [Rank 0] step:481/10000 train_time:38700ms step_avg:80.46ms +[2025-07-06 17:47:48] [Rank 0] step:481/10000 train_time:38700ms step_avg:80.46ms +[2025-07-06 17:47:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:47:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:47:50] [Rank 0] PRINT: step:500/10000 train_loss:2.2941 val_loss:1.3669 train_time:40166ms step_avg:80.33ms +[2025-07-06 17:47:50] [Rank 0] PRINT: step:500/10000 train_loss:2.2941 val_loss:1.3669 train_time:40166ms step_avg:80.33ms +[2025-07-06 17:47:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:47:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:47:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:47:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:47:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:47:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:53:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:53:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:53:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:53:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:53:16] [Rank 0] Total Loss: 4.2280 +[2025-07-06 17:53:16] [Rank 0] Total Loss: 4.2280 +[2025-07-06 17:53:16] [Rank 0] Total FTA: 0.0850 +[2025-07-06 17:53:16] [Rank 0] Total FTA: 0.0850 +[2025-07-06 17:53:16] [Rank 0] Group 0 Loss: 4.3073 +[2025-07-06 17:53:16] [Rank 0] Group 0 Loss: 4.3073 +[2025-07-06 17:53:16] [Rank 0] Group 1 Loss: 4.0517 +[2025-07-06 17:53:16] [Rank 0] Group 1 Loss: 4.0517 +[2025-07-06 17:53:16] [Rank 0] Group 2 Loss: 4.0859 +[2025-07-06 17:53:16] [Rank 0] Group 2 Loss: 4.0859 +[2025-07-06 17:53:16] [Rank 0] Group 3 Loss: 4.2890 +[2025-07-06 17:53:16] [Rank 0] Group 3 Loss: 4.2890 +[2025-07-06 17:53:16] [Rank 0] Group 4 Loss: 4.2340 +[2025-07-06 17:53:16] [Rank 0] Group 4 Loss: 4.2340 +[2025-07-06 17:53:16] [Rank 0] Group 5 Loss: 4.2079 +[2025-07-06 17:53:16] [Rank 0] Group 5 Loss: 4.2079 +[2025-07-06 17:53:16] [Rank 0] Group 6 Loss: 4.1618 +[2025-07-06 17:53:16] [Rank 0] Group 6 Loss: 4.1618 +[2025-07-06 17:53:16] [Rank 0] Group 7 Loss: 4.2566 +[2025-07-06 17:53:16] [Rank 0] Group 7 Loss: 4.2566 +[2025-07-06 17:53:16] [Rank 0] Group 8 Loss: 4.2406 +[2025-07-06 17:53:16] [Rank 0] Group 8 Loss: 4.2406 +[2025-07-06 17:53:16] [Rank 0] Group 9 Loss: 4.2609 +[2025-07-06 17:53:16] [Rank 0] Group 9 Loss: 4.2609 +[2025-07-06 17:53:16] [Rank 0] Group 10 Loss: 4.2482 +[2025-07-06 17:53:16] [Rank 0] Group 10 Loss: 4.2482 +[2025-07-06 17:53:16] [Rank 0] Group 11 Loss: 4.2615 +[2025-07-06 17:53:16] [Rank 0] Group 11 Loss: 4.2615 +[2025-07-06 17:53:16] [Rank 0] Group 0 FTA: 0.1404 +[2025-07-06 17:53:16] [Rank 0] Group 0 FTA: 0.1404 +[2025-07-06 17:53:16] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 17:53:16] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 17:53:16] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-06 17:53:16] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-06 17:53:16] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-06 17:53:16] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-06 17:53:16] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-06 17:53:16] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-06 17:53:16] [Rank 0] Group 5 FTA: 0.1250 +[2025-07-06 17:53:16] [Rank 0] Group 5 FTA: 0.1250 +[2025-07-06 17:53:16] [Rank 0] Group 6 FTA: 0.0729 +[2025-07-06 17:53:16] [Rank 0] Group 6 FTA: 0.0729 +[2025-07-06 17:53:16] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-06 17:53:16] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-06 17:53:16] [Rank 0] Group 8 FTA: 0.1198 +[2025-07-06 17:53:16] [Rank 0] Group 8 FTA: 0.1198 +[2025-07-06 17:53:16] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-06 17:53:16] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-06 17:53:16] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-06 17:53:16] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-06 17:53:16] [Rank 0] Group 11 FTA: 0.0830 +[2025-07-06 17:53:16] [Rank 0] Group 11 FTA: 0.0830 +[2025-07-06 17:53:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 17:53:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 17:53:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 17:53:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 17:53:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 17:53:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 17:53:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 17:53:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 17:53:17] [Rank 0] step:501/10000 train_time:40188ms step_avg:80.22ms +[2025-07-06 17:53:17] [Rank 0] step:501/10000 train_time:40188ms step_avg:80.22ms +[2025-07-06 17:53:19] [Rank 0] step:521/10000 train_time:41654ms step_avg:79.95ms +[2025-07-06 17:53:19] [Rank 0] step:521/10000 train_time:41654ms step_avg:79.95ms +[2025-07-06 17:53:21] [Rank 0] step:541/10000 train_time:43778ms step_avg:80.92ms +[2025-07-06 17:53:21] [Rank 0] step:541/10000 train_time:43778ms step_avg:80.92ms +[2025-07-06 17:53:22] [Rank 0] step:561/10000 train_time:45216ms step_avg:80.60ms +[2025-07-06 17:53:22] [Rank 0] step:561/10000 train_time:45216ms step_avg:80.60ms +[2025-07-06 17:53:24] [Rank 0] step:581/10000 train_time:46672ms step_avg:80.33ms +[2025-07-06 17:53:24] [Rank 0] step:581/10000 train_time:46672ms step_avg:80.33ms +[2025-07-06 17:53:25] [Rank 0] step:601/10000 train_time:48130ms step_avg:80.08ms +[2025-07-06 17:53:25] [Rank 0] step:601/10000 train_time:48130ms step_avg:80.08ms +[2025-07-06 17:53:27] [Rank 0] step:621/10000 train_time:49593ms step_avg:79.86ms +[2025-07-06 17:53:27] [Rank 0] step:621/10000 train_time:49593ms step_avg:79.86ms +[2025-07-06 17:53:29] [Rank 0] step:641/10000 train_time:51717ms step_avg:80.68ms +[2025-07-06 17:53:29] [Rank 0] step:641/10000 train_time:51717ms step_avg:80.68ms +[2025-07-06 17:53:30] [Rank 0] step:661/10000 train_time:53176ms step_avg:80.45ms +[2025-07-06 17:53:30] [Rank 0] step:661/10000 train_time:53176ms step_avg:80.45ms +[2025-07-06 17:53:32] [Rank 0] step:681/10000 train_time:54913ms step_avg:80.64ms +[2025-07-06 17:53:32] [Rank 0] step:681/10000 train_time:54913ms step_avg:80.64ms +[2025-07-06 17:53:34] [Rank 0] step:701/10000 train_time:56372ms step_avg:80.42ms +[2025-07-06 17:53:34] [Rank 0] step:701/10000 train_time:56372ms step_avg:80.42ms +[2025-07-06 17:53:36] [Rank 0] step:721/10000 train_time:57905ms step_avg:80.31ms +[2025-07-06 17:53:36] [Rank 0] step:721/10000 train_time:57905ms step_avg:80.31ms +[2025-07-06 17:53:37] [Rank 0] step:741/10000 train_time:60035ms step_avg:81.02ms +[2025-07-06 17:53:37] [Rank 0] step:741/10000 train_time:60035ms step_avg:81.02ms +[2025-07-06 17:53:39] [Rank 0] step:761/10000 train_time:61508ms step_avg:80.82ms +[2025-07-06 17:53:39] [Rank 0] step:761/10000 train_time:61508ms step_avg:80.82ms +[2025-07-06 17:53:40] [Rank 0] step:781/10000 train_time:62985ms step_avg:80.65ms +[2025-07-06 17:53:40] [Rank 0] step:781/10000 train_time:62985ms step_avg:80.65ms +[2025-07-06 17:53:42] [Rank 0] step:801/10000 train_time:64458ms step_avg:80.47ms +[2025-07-06 17:53:42] [Rank 0] step:801/10000 train_time:64458ms step_avg:80.47ms +[2025-07-06 17:53:43] [Rank 0] step:821/10000 train_time:66174ms step_avg:80.60ms +[2025-07-06 17:53:43] [Rank 0] step:821/10000 train_time:66174ms step_avg:80.60ms +[2025-07-06 17:53:45] [Rank 0] step:841/10000 train_time:67647ms step_avg:80.44ms +[2025-07-06 17:53:45] [Rank 0] step:841/10000 train_time:67647ms step_avg:80.44ms +[2025-07-06 17:53:46] [Rank 0] step:861/10000 train_time:69122ms step_avg:80.28ms +[2025-07-06 17:53:46] [Rank 0] step:861/10000 train_time:69122ms step_avg:80.28ms +[2025-07-06 17:53:48] [Rank 0] step:881/10000 train_time:70600ms step_avg:80.14ms +[2025-07-06 17:53:48] [Rank 0] step:881/10000 train_time:70600ms step_avg:80.14ms +[2025-07-06 17:53:50] [Rank 0] step:901/10000 train_time:72074ms step_avg:79.99ms +[2025-07-06 17:53:50] [Rank 0] step:901/10000 train_time:72074ms step_avg:79.99ms +[2025-07-06 17:53:51] [Rank 0] step:921/10000 train_time:74196ms step_avg:80.56ms +[2025-07-06 17:53:51] [Rank 0] step:921/10000 train_time:74196ms step_avg:80.56ms +[2025-07-06 17:53:53] [Rank 0] step:941/10000 train_time:75670ms step_avg:80.41ms +[2025-07-06 17:53:53] [Rank 0] step:941/10000 train_time:75670ms step_avg:80.41ms +[2025-07-06 17:53:54] [Rank 0] step:961/10000 train_time:77146ms step_avg:80.28ms +[2025-07-06 17:53:54] [Rank 0] step:961/10000 train_time:77146ms step_avg:80.28ms +[2025-07-06 17:53:56] [Rank 0] step:981/10000 train_time:78623ms step_avg:80.15ms +[2025-07-06 17:53:56] [Rank 0] step:981/10000 train_time:78623ms step_avg:80.15ms +[2025-07-06 17:53:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:53:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:53:59] [Rank 0] PRINT: step:1000/10000 train_loss:1.2785 val_loss:1.1971 train_time:80747ms step_avg:80.75ms +[2025-07-06 17:53:59] [Rank 0] PRINT: step:1000/10000 train_loss:1.2785 val_loss:1.1971 train_time:80747ms step_avg:80.75ms +[2025-07-06 17:53:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:53:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:53:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:53:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:53:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:53:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:59:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:59:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:59:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:59:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:59:24] [Rank 0] Total Loss: 4.8470 +[2025-07-06 17:59:24] [Rank 0] Total Loss: 4.8470 +[2025-07-06 17:59:24] [Rank 0] Total FTA: 0.3235 +[2025-07-06 17:59:24] [Rank 0] Total FTA: 0.3235 +[2025-07-06 17:59:24] [Rank 0] Group 0 Loss: 5.1480 +[2025-07-06 17:59:24] [Rank 0] Group 0 Loss: 5.1480 +[2025-07-06 17:59:24] [Rank 0] Group 1 Loss: 4.7160 +[2025-07-06 17:59:24] [Rank 0] Group 1 Loss: 4.7160 +[2025-07-06 17:59:24] [Rank 0] Group 2 Loss: 4.7547 +[2025-07-06 17:59:24] [Rank 0] Group 2 Loss: 4.7547 +[2025-07-06 17:59:24] [Rank 0] Group 3 Loss: 4.8170 +[2025-07-06 17:59:24] [Rank 0] Group 3 Loss: 4.8170 +[2025-07-06 17:59:24] [Rank 0] Group 4 Loss: 4.8308 +[2025-07-06 17:59:24] [Rank 0] Group 4 Loss: 4.8308 +[2025-07-06 17:59:24] [Rank 0] Group 5 Loss: 4.7626 +[2025-07-06 17:59:24] [Rank 0] Group 5 Loss: 4.7626 +[2025-07-06 17:59:24] [Rank 0] Group 6 Loss: 4.7680 +[2025-07-06 17:59:24] [Rank 0] Group 6 Loss: 4.7680 +[2025-07-06 17:59:24] [Rank 0] Group 7 Loss: 4.8132 +[2025-07-06 17:59:24] [Rank 0] Group 7 Loss: 4.8132 +[2025-07-06 17:59:24] [Rank 0] Group 8 Loss: 4.7882 +[2025-07-06 17:59:24] [Rank 0] Group 8 Loss: 4.7882 +[2025-07-06 17:59:24] [Rank 0] Group 9 Loss: 4.8371 +[2025-07-06 17:59:24] [Rank 0] Group 9 Loss: 4.8371 +[2025-07-06 17:59:24] [Rank 0] Group 10 Loss: 4.8302 +[2025-07-06 17:59:24] [Rank 0] Group 10 Loss: 4.8302 +[2025-07-06 17:59:24] [Rank 0] Group 11 Loss: 4.8289 +[2025-07-06 17:59:24] [Rank 0] Group 11 Loss: 4.8289 +[2025-07-06 17:59:24] [Rank 0] Group 0 FTA: 0.3303 +[2025-07-06 17:59:24] [Rank 0] Group 0 FTA: 0.3303 +[2025-07-06 17:59:24] [Rank 0] Group 1 FTA: 0.4714 +[2025-07-06 17:59:24] [Rank 0] Group 1 FTA: 0.4714 +[2025-07-06 17:59:24] [Rank 0] Group 2 FTA: 0.3333 +[2025-07-06 17:59:24] [Rank 0] Group 2 FTA: 0.3333 +[2025-07-06 17:59:24] [Rank 0] Group 3 FTA: 0.2214 +[2025-07-06 17:59:24] [Rank 0] Group 3 FTA: 0.2214 +[2025-07-06 17:59:24] [Rank 0] Group 4 FTA: 0.3281 +[2025-07-06 17:59:24] [Rank 0] Group 4 FTA: 0.3281 +[2025-07-06 17:59:24] [Rank 0] Group 5 FTA: 0.3906 +[2025-07-06 17:59:24] [Rank 0] Group 5 FTA: 0.3906 +[2025-07-06 17:59:24] [Rank 0] Group 6 FTA: 0.2969 +[2025-07-06 17:59:24] [Rank 0] Group 6 FTA: 0.2969 +[2025-07-06 17:59:24] [Rank 0] Group 7 FTA: 0.3021 +[2025-07-06 17:59:24] [Rank 0] Group 7 FTA: 0.3021 +[2025-07-06 17:59:24] [Rank 0] Group 8 FTA: 0.2917 +[2025-07-06 17:59:24] [Rank 0] Group 8 FTA: 0.2917 +[2025-07-06 17:59:24] [Rank 0] Group 9 FTA: 0.3398 +[2025-07-06 17:59:24] [Rank 0] Group 9 FTA: 0.3398 +[2025-07-06 17:59:24] [Rank 0] Group 10 FTA: 0.3027 +[2025-07-06 17:59:24] [Rank 0] Group 10 FTA: 0.3027 +[2025-07-06 17:59:24] [Rank 0] Group 11 FTA: 0.3066 +[2025-07-06 17:59:24] [Rank 0] Group 11 FTA: 0.3066 +[2025-07-06 17:59:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 17:59:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 17:59:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 17:59:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 17:59:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 17:59:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 17:59:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 17:59:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 17:59:25] [Rank 0] step:1001/10000 train_time:80768ms step_avg:80.69ms +[2025-07-06 17:59:25] [Rank 0] step:1001/10000 train_time:80768ms step_avg:80.69ms +[2025-07-06 17:59:27] [Rank 0] step:1021/10000 train_time:82234ms step_avg:80.54ms +[2025-07-06 17:59:27] [Rank 0] step:1021/10000 train_time:82234ms step_avg:80.54ms +[2025-07-06 17:59:28] [Rank 0] step:1041/10000 train_time:83702ms step_avg:80.41ms +[2025-07-06 17:59:28] [Rank 0] step:1041/10000 train_time:83702ms step_avg:80.41ms +[2025-07-06 17:59:30] [Rank 0] step:1061/10000 train_time:85172ms step_avg:80.28ms +[2025-07-06 17:59:30] [Rank 0] step:1061/10000 train_time:85172ms step_avg:80.28ms +[2025-07-06 17:59:32] [Rank 0] step:1081/10000 train_time:86640ms step_avg:80.15ms +[2025-07-06 17:59:32] [Rank 0] step:1081/10000 train_time:86640ms step_avg:80.15ms +[2025-07-06 17:59:33] [Rank 0] step:1101/10000 train_time:88771ms step_avg:80.63ms +[2025-07-06 17:59:33] [Rank 0] step:1101/10000 train_time:88771ms step_avg:80.63ms +[2025-07-06 17:59:35] [Rank 0] step:1121/10000 train_time:90241ms step_avg:80.50ms +[2025-07-06 17:59:35] [Rank 0] step:1121/10000 train_time:90241ms step_avg:80.50ms +[2025-07-06 17:59:36] [Rank 0] step:1141/10000 train_time:91712ms step_avg:80.38ms +[2025-07-06 17:59:36] [Rank 0] step:1141/10000 train_time:91712ms step_avg:80.38ms +[2025-07-06 17:59:38] [Rank 0] step:1161/10000 train_time:93180ms step_avg:80.26ms +[2025-07-06 17:59:38] [Rank 0] step:1161/10000 train_time:93180ms step_avg:80.26ms +[2025-07-06 17:59:40] [Rank 0] step:1181/10000 train_time:94895ms step_avg:80.35ms +[2025-07-06 17:59:40] [Rank 0] step:1181/10000 train_time:94895ms step_avg:80.35ms +[2025-07-06 17:59:41] [Rank 0] step:1201/10000 train_time:96365ms step_avg:80.24ms +[2025-07-06 17:59:41] [Rank 0] step:1201/10000 train_time:96365ms step_avg:80.24ms +[2025-07-06 17:59:43] [Rank 0] step:1221/10000 train_time:97836ms step_avg:80.13ms +[2025-07-06 17:59:43] [Rank 0] step:1221/10000 train_time:97836ms step_avg:80.13ms +[2025-07-06 17:59:44] [Rank 0] step:1241/10000 train_time:99310ms step_avg:80.02ms +[2025-07-06 17:59:44] [Rank 0] step:1241/10000 train_time:99310ms step_avg:80.02ms +[2025-07-06 17:59:46] [Rank 0] step:1261/10000 train_time:100778ms step_avg:79.92ms +[2025-07-06 17:59:46] [Rank 0] step:1261/10000 train_time:100778ms step_avg:79.92ms +[2025-07-06 17:59:48] [Rank 0] step:1281/10000 train_time:102915ms step_avg:80.34ms +[2025-07-06 17:59:48] [Rank 0] step:1281/10000 train_time:102915ms step_avg:80.34ms +[2025-07-06 17:59:49] [Rank 0] step:1301/10000 train_time:104389ms step_avg:80.24ms +[2025-07-06 17:59:49] [Rank 0] step:1301/10000 train_time:104389ms step_avg:80.24ms +[2025-07-06 17:59:51] [Rank 0] step:1321/10000 train_time:106093ms step_avg:80.31ms +[2025-07-06 17:59:51] [Rank 0] step:1321/10000 train_time:106093ms step_avg:80.31ms +[2025-07-06 17:59:52] [Rank 0] step:1341/10000 train_time:107569ms step_avg:80.22ms +[2025-07-06 17:59:52] [Rank 0] step:1341/10000 train_time:107569ms step_avg:80.22ms +[2025-07-06 17:59:54] [Rank 0] step:1361/10000 train_time:109393ms step_avg:80.38ms +[2025-07-06 17:59:54] [Rank 0] step:1361/10000 train_time:109393ms step_avg:80.38ms +[2025-07-06 17:59:56] [Rank 0] step:1381/10000 train_time:110869ms step_avg:80.28ms +[2025-07-06 17:59:56] [Rank 0] step:1381/10000 train_time:110869ms step_avg:80.28ms +[2025-07-06 17:59:57] [Rank 0] step:1401/10000 train_time:112343ms step_avg:80.19ms +[2025-07-06 17:59:57] [Rank 0] step:1401/10000 train_time:112343ms step_avg:80.19ms +[2025-07-06 17:59:59] [Rank 0] step:1421/10000 train_time:113820ms step_avg:80.10ms +[2025-07-06 17:59:59] [Rank 0] step:1421/10000 train_time:113820ms step_avg:80.10ms +[2025-07-06 18:00:01] [Rank 0] step:1441/10000 train_time:115962ms step_avg:80.47ms +[2025-07-06 18:00:01] [Rank 0] step:1441/10000 train_time:115962ms step_avg:80.47ms +[2025-07-06 18:00:02] [Rank 0] step:1461/10000 train_time:117420ms step_avg:80.37ms +[2025-07-06 18:00:02] [Rank 0] step:1461/10000 train_time:117420ms step_avg:80.37ms +[2025-07-06 18:00:04] [Rank 0] step:1481/10000 train_time:118896ms step_avg:80.28ms +[2025-07-06 18:00:04] [Rank 0] step:1481/10000 train_time:118896ms step_avg:80.28ms +[2025-07-06 18:00:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:00:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:00:06] [Rank 0] PRINT: step:1500/10000 train_loss:1.0810 val_loss:1.0185 train_time:120371ms step_avg:80.25ms +[2025-07-06 18:00:06] [Rank 0] PRINT: step:1500/10000 train_loss:1.0810 val_loss:1.0185 train_time:120371ms step_avg:80.25ms +[2025-07-06 18:00:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:00:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:00:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:00:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:00:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:00:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:05:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:05:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:05:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:05:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:05:28] [Rank 0] Total Loss: 5.1305 +[2025-07-06 18:05:28] [Rank 0] Total Loss: 5.1305 +[2025-07-06 18:05:28] [Rank 0] Total FTA: 0.6496 +[2025-07-06 18:05:28] [Rank 0] Total FTA: 0.6496 +[2025-07-06 18:05:28] [Rank 0] Group 0 Loss: 5.3738 +[2025-07-06 18:05:28] [Rank 0] Group 0 Loss: 5.3738 +[2025-07-06 18:05:28] [Rank 0] Group 1 Loss: 4.9186 +[2025-07-06 18:05:28] [Rank 0] Group 1 Loss: 4.9186 +[2025-07-06 18:05:28] [Rank 0] Group 2 Loss: 4.9585 +[2025-07-06 18:05:28] [Rank 0] Group 2 Loss: 4.9585 +[2025-07-06 18:05:28] [Rank 0] Group 3 Loss: 5.2457 +[2025-07-06 18:05:28] [Rank 0] Group 3 Loss: 5.2457 +[2025-07-06 18:05:28] [Rank 0] Group 4 Loss: 5.1603 +[2025-07-06 18:05:28] [Rank 0] Group 4 Loss: 5.1603 +[2025-07-06 18:05:28] [Rank 0] Group 5 Loss: 5.0726 +[2025-07-06 18:05:28] [Rank 0] Group 5 Loss: 5.0726 +[2025-07-06 18:05:28] [Rank 0] Group 6 Loss: 5.0412 +[2025-07-06 18:05:28] [Rank 0] Group 6 Loss: 5.0412 +[2025-07-06 18:05:28] [Rank 0] Group 7 Loss: 5.1567 +[2025-07-06 18:05:28] [Rank 0] Group 7 Loss: 5.1567 +[2025-07-06 18:05:28] [Rank 0] Group 8 Loss: 5.1189 +[2025-07-06 18:05:28] [Rank 0] Group 8 Loss: 5.1189 +[2025-07-06 18:05:28] [Rank 0] Group 9 Loss: 5.0856 +[2025-07-06 18:05:28] [Rank 0] Group 9 Loss: 5.0856 +[2025-07-06 18:05:28] [Rank 0] Group 10 Loss: 5.0888 +[2025-07-06 18:05:28] [Rank 0] Group 10 Loss: 5.0888 +[2025-07-06 18:05:28] [Rank 0] Group 11 Loss: 5.1193 +[2025-07-06 18:05:28] [Rank 0] Group 11 Loss: 5.1193 +[2025-07-06 18:05:28] [Rank 0] Group 0 FTA: 0.3329 +[2025-07-06 18:05:28] [Rank 0] Group 0 FTA: 0.3329 +[2025-07-06 18:05:28] [Rank 0] Group 1 FTA: 0.8229 +[2025-07-06 18:05:28] [Rank 0] Group 1 FTA: 0.8229 +[2025-07-06 18:05:28] [Rank 0] Group 2 FTA: 0.7422 +[2025-07-06 18:05:28] [Rank 0] Group 2 FTA: 0.7422 +[2025-07-06 18:05:28] [Rank 0] Group 3 FTA: 0.7240 +[2025-07-06 18:05:28] [Rank 0] Group 3 FTA: 0.7240 +[2025-07-06 18:05:28] [Rank 0] Group 4 FTA: 0.7839 +[2025-07-06 18:05:28] [Rank 0] Group 4 FTA: 0.7839 +[2025-07-06 18:05:28] [Rank 0] Group 5 FTA: 0.7057 +[2025-07-06 18:05:28] [Rank 0] Group 5 FTA: 0.7057 +[2025-07-06 18:05:28] [Rank 0] Group 6 FTA: 0.6589 +[2025-07-06 18:05:28] [Rank 0] Group 6 FTA: 0.6589 +[2025-07-06 18:05:28] [Rank 0] Group 7 FTA: 0.6510 +[2025-07-06 18:05:28] [Rank 0] Group 7 FTA: 0.6510 +[2025-07-06 18:05:28] [Rank 0] Group 8 FTA: 0.6510 +[2025-07-06 18:05:28] [Rank 0] Group 8 FTA: 0.6510 +[2025-07-06 18:05:28] [Rank 0] Group 9 FTA: 0.6641 +[2025-07-06 18:05:28] [Rank 0] Group 9 FTA: 0.6641 +[2025-07-06 18:05:28] [Rank 0] Group 10 FTA: 0.6309 +[2025-07-06 18:05:28] [Rank 0] Group 10 FTA: 0.6309 +[2025-07-06 18:05:28] [Rank 0] Group 11 FTA: 0.6895 +[2025-07-06 18:05:28] [Rank 0] Group 11 FTA: 0.6895 +[2025-07-06 18:05:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 18:05:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 18:05:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 18:05:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 18:05:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 18:05:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 18:05:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 18:05:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 18:05:30] [Rank 0] step:1501/10000 train_time:120393ms step_avg:80.21ms +[2025-07-06 18:05:30] [Rank 0] step:1501/10000 train_time:120393ms step_avg:80.21ms +[2025-07-06 18:05:31] [Rank 0] step:1521/10000 train_time:121868ms step_avg:80.12ms +[2025-07-06 18:05:31] [Rank 0] step:1521/10000 train_time:121868ms step_avg:80.12ms +[2025-07-06 18:05:33] [Rank 0] step:1541/10000 train_time:123985ms step_avg:80.46ms +[2025-07-06 18:05:33] [Rank 0] step:1541/10000 train_time:123985ms step_avg:80.46ms +[2025-07-06 18:05:35] [Rank 0] step:1561/10000 train_time:125451ms step_avg:80.37ms +[2025-07-06 18:05:35] [Rank 0] step:1561/10000 train_time:125451ms step_avg:80.37ms +[2025-07-06 18:05:36] [Rank 0] step:1581/10000 train_time:126920ms step_avg:80.28ms +[2025-07-06 18:05:36] [Rank 0] step:1581/10000 train_time:126920ms step_avg:80.28ms +[2025-07-06 18:05:38] [Rank 0] step:1601/10000 train_time:128391ms step_avg:80.19ms +[2025-07-06 18:05:38] [Rank 0] step:1601/10000 train_time:128391ms step_avg:80.19ms +[2025-07-06 18:05:39] [Rank 0] step:1621/10000 train_time:129859ms step_avg:80.11ms +[2025-07-06 18:05:39] [Rank 0] step:1621/10000 train_time:129859ms step_avg:80.11ms +[2025-07-06 18:05:41] [Rank 0] step:1641/10000 train_time:131567ms step_avg:80.17ms +[2025-07-06 18:05:41] [Rank 0] step:1641/10000 train_time:131567ms step_avg:80.17ms +[2025-07-06 18:05:42] [Rank 0] step:1661/10000 train_time:133035ms step_avg:80.09ms +[2025-07-06 18:05:42] [Rank 0] step:1661/10000 train_time:133035ms step_avg:80.09ms +[2025-07-06 18:05:44] [Rank 0] step:1681/10000 train_time:134505ms step_avg:80.01ms +[2025-07-06 18:05:44] [Rank 0] step:1681/10000 train_time:134505ms step_avg:80.01ms +[2025-07-06 18:05:45] [Rank 0] step:1701/10000 train_time:135977ms step_avg:79.94ms +[2025-07-06 18:05:45] [Rank 0] step:1701/10000 train_time:135977ms step_avg:79.94ms +[2025-07-06 18:05:47] [Rank 0] step:1721/10000 train_time:138098ms step_avg:80.24ms +[2025-07-06 18:05:47] [Rank 0] step:1721/10000 train_time:138098ms step_avg:80.24ms +[2025-07-06 18:05:49] [Rank 0] step:1741/10000 train_time:139565ms step_avg:80.16ms +[2025-07-06 18:05:49] [Rank 0] step:1741/10000 train_time:139565ms step_avg:80.16ms +[2025-07-06 18:05:50] [Rank 0] step:1761/10000 train_time:141035ms step_avg:80.09ms +[2025-07-06 18:05:50] [Rank 0] step:1761/10000 train_time:141035ms step_avg:80.09ms +[2025-07-06 18:05:52] [Rank 0] step:1781/10000 train_time:142507ms step_avg:80.01ms +[2025-07-06 18:05:52] [Rank 0] step:1781/10000 train_time:142507ms step_avg:80.01ms +[2025-07-06 18:05:54] [Rank 0] step:1801/10000 train_time:144649ms step_avg:80.32ms +[2025-07-06 18:05:54] [Rank 0] step:1801/10000 train_time:144649ms step_avg:80.32ms +[2025-07-06 18:05:55] [Rank 0] step:1821/10000 train_time:146098ms step_avg:80.23ms +[2025-07-06 18:05:55] [Rank 0] step:1821/10000 train_time:146098ms step_avg:80.23ms +[2025-07-06 18:05:57] [Rank 0] step:1841/10000 train_time:147570ms step_avg:80.16ms +[2025-07-06 18:05:57] [Rank 0] step:1841/10000 train_time:147570ms step_avg:80.16ms +[2025-07-06 18:05:58] [Rank 0] step:1861/10000 train_time:149042ms step_avg:80.09ms +[2025-07-06 18:05:58] [Rank 0] step:1861/10000 train_time:149042ms step_avg:80.09ms +[2025-07-06 18:06:00] [Rank 0] step:1881/10000 train_time:150513ms step_avg:80.02ms +[2025-07-06 18:06:00] [Rank 0] step:1881/10000 train_time:150513ms step_avg:80.02ms +[2025-07-06 18:06:02] [Rank 0] step:1901/10000 train_time:152229ms step_avg:80.08ms +[2025-07-06 18:06:02] [Rank 0] step:1901/10000 train_time:152229ms step_avg:80.08ms +[2025-07-06 18:06:03] [Rank 0] step:1921/10000 train_time:153698ms step_avg:80.01ms +[2025-07-06 18:06:03] [Rank 0] step:1921/10000 train_time:153698ms step_avg:80.01ms +[2025-07-06 18:06:04] [Rank 0] step:1941/10000 train_time:155171ms step_avg:79.94ms +[2025-07-06 18:06:04] [Rank 0] step:1941/10000 train_time:155171ms step_avg:79.94ms +[2025-07-06 18:06:06] [Rank 0] step:1961/10000 train_time:156647ms step_avg:79.88ms +[2025-07-06 18:06:06] [Rank 0] step:1961/10000 train_time:156647ms step_avg:79.88ms +[2025-07-06 18:06:08] [Rank 0] step:1981/10000 train_time:158119ms step_avg:79.82ms +[2025-07-06 18:06:08] [Rank 0] step:1981/10000 train_time:158119ms step_avg:79.82ms +[2025-07-06 18:06:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:06:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:06:10] [Rank 0] PRINT: step:2000/10000 train_loss:0.9140 val_loss:0.9624 train_time:160247ms step_avg:80.12ms +[2025-07-06 18:06:10] [Rank 0] PRINT: step:2000/10000 train_loss:0.9140 val_loss:0.9624 train_time:160247ms step_avg:80.12ms +[2025-07-06 18:06:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:06:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:06:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:06:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:06:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:06:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:11:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:11:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:11:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:11:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:11:35] [Rank 0] Total Loss: 5.2795 +[2025-07-06 18:11:35] [Rank 0] Total Loss: 5.2795 +[2025-07-06 18:11:35] [Rank 0] Total FTA: 0.8335 +[2025-07-06 18:11:35] [Rank 0] Total FTA: 0.8335 +[2025-07-06 18:11:35] [Rank 0] Group 0 Loss: 5.6167 +[2025-07-06 18:11:35] [Rank 0] Group 0 Loss: 5.6167 +[2025-07-06 18:11:35] [Rank 0] Group 1 Loss: 5.1337 +[2025-07-06 18:11:35] [Rank 0] Group 1 Loss: 5.1337 +[2025-07-06 18:11:35] [Rank 0] Group 2 Loss: 5.2546 +[2025-07-06 18:11:35] [Rank 0] Group 2 Loss: 5.2546 +[2025-07-06 18:11:35] [Rank 0] Group 3 Loss: 5.3716 +[2025-07-06 18:11:35] [Rank 0] Group 3 Loss: 5.3716 +[2025-07-06 18:11:35] [Rank 0] Group 4 Loss: 5.1695 +[2025-07-06 18:11:35] [Rank 0] Group 4 Loss: 5.1695 +[2025-07-06 18:11:35] [Rank 0] Group 5 Loss: 5.1339 +[2025-07-06 18:11:35] [Rank 0] Group 5 Loss: 5.1339 +[2025-07-06 18:11:35] [Rank 0] Group 6 Loss: 5.1818 +[2025-07-06 18:11:35] [Rank 0] Group 6 Loss: 5.1818 +[2025-07-06 18:11:35] [Rank 0] Group 7 Loss: 5.3064 +[2025-07-06 18:11:35] [Rank 0] Group 7 Loss: 5.3064 +[2025-07-06 18:11:35] [Rank 0] Group 8 Loss: 5.2196 +[2025-07-06 18:11:35] [Rank 0] Group 8 Loss: 5.2196 +[2025-07-06 18:11:35] [Rank 0] Group 9 Loss: 5.1614 +[2025-07-06 18:11:35] [Rank 0] Group 9 Loss: 5.1614 +[2025-07-06 18:11:35] [Rank 0] Group 10 Loss: 5.2397 +[2025-07-06 18:11:35] [Rank 0] Group 10 Loss: 5.2397 +[2025-07-06 18:11:35] [Rank 0] Group 11 Loss: 5.2498 +[2025-07-06 18:11:35] [Rank 0] Group 11 Loss: 5.2498 +[2025-07-06 18:11:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:11:35] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:11:35] [Rank 0] Group 1 FTA: 0.8776 +[2025-07-06 18:11:35] [Rank 0] Group 1 FTA: 0.8776 +[2025-07-06 18:11:35] [Rank 0] Group 2 FTA: 0.8438 +[2025-07-06 18:11:35] [Rank 0] Group 2 FTA: 0.8438 +[2025-07-06 18:11:35] [Rank 0] Group 3 FTA: 0.7943 +[2025-07-06 18:11:35] [Rank 0] Group 3 FTA: 0.7943 +[2025-07-06 18:11:35] [Rank 0] Group 4 FTA: 0.7448 +[2025-07-06 18:11:35] [Rank 0] Group 4 FTA: 0.7448 +[2025-07-06 18:11:35] [Rank 0] Group 5 FTA: 0.8776 +[2025-07-06 18:11:35] [Rank 0] Group 5 FTA: 0.8776 +[2025-07-06 18:11:35] [Rank 0] Group 6 FTA: 0.7865 +[2025-07-06 18:11:35] [Rank 0] Group 6 FTA: 0.7865 +[2025-07-06 18:11:35] [Rank 0] Group 7 FTA: 0.7995 +[2025-07-06 18:11:35] [Rank 0] Group 7 FTA: 0.7995 +[2025-07-06 18:11:35] [Rank 0] Group 8 FTA: 0.8047 +[2025-07-06 18:11:35] [Rank 0] Group 8 FTA: 0.8047 +[2025-07-06 18:11:35] [Rank 0] Group 9 FTA: 0.7773 +[2025-07-06 18:11:35] [Rank 0] Group 9 FTA: 0.7773 +[2025-07-06 18:11:35] [Rank 0] Group 10 FTA: 0.7969 +[2025-07-06 18:11:35] [Rank 0] Group 10 FTA: 0.7969 +[2025-07-06 18:11:35] [Rank 0] Group 11 FTA: 0.7930 +[2025-07-06 18:11:35] [Rank 0] Group 11 FTA: 0.7930 +[2025-07-06 18:11:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 18:11:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 18:11:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 18:11:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 18:11:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 18:11:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 18:11:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 18:11:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 18:11:36] [Rank 0] step:2001/10000 train_time:160268ms step_avg:80.09ms +[2025-07-06 18:11:36] [Rank 0] step:2001/10000 train_time:160268ms step_avg:80.09ms +[2025-07-06 18:11:38] [Rank 0] step:2021/10000 train_time:161746ms step_avg:80.03ms +[2025-07-06 18:11:38] [Rank 0] step:2021/10000 train_time:161746ms step_avg:80.03ms +[2025-07-06 18:11:39] [Rank 0] step:2041/10000 train_time:163214ms step_avg:79.97ms +[2025-07-06 18:11:39] [Rank 0] step:2041/10000 train_time:163214ms step_avg:79.97ms +[2025-07-06 18:11:41] [Rank 0] step:2061/10000 train_time:164681ms step_avg:79.90ms +[2025-07-06 18:11:41] [Rank 0] step:2061/10000 train_time:164681ms step_avg:79.90ms +[2025-07-06 18:11:43] [Rank 0] step:2081/10000 train_time:166806ms step_avg:80.16ms +[2025-07-06 18:11:43] [Rank 0] step:2081/10000 train_time:166806ms step_avg:80.16ms +[2025-07-06 18:11:44] [Rank 0] step:2101/10000 train_time:168276ms step_avg:80.09ms +[2025-07-06 18:11:44] [Rank 0] step:2101/10000 train_time:168276ms step_avg:80.09ms +[2025-07-06 18:11:46] [Rank 0] step:2121/10000 train_time:169747ms step_avg:80.03ms +[2025-07-06 18:11:46] [Rank 0] step:2121/10000 train_time:169747ms step_avg:80.03ms +[2025-07-06 18:11:47] [Rank 0] step:2141/10000 train_time:171213ms step_avg:79.97ms +[2025-07-06 18:11:47] [Rank 0] step:2141/10000 train_time:171213ms step_avg:79.97ms +[2025-07-06 18:11:49] [Rank 0] step:2161/10000 train_time:172738ms step_avg:79.93ms +[2025-07-06 18:11:49] [Rank 0] step:2161/10000 train_time:172738ms step_avg:79.93ms +[2025-07-06 18:11:50] [Rank 0] step:2181/10000 train_time:174389ms step_avg:79.96ms +[2025-07-06 18:11:50] [Rank 0] step:2181/10000 train_time:174389ms step_avg:79.96ms +[2025-07-06 18:11:52] [Rank 0] step:2201/10000 train_time:175862ms step_avg:79.90ms +[2025-07-06 18:11:52] [Rank 0] step:2201/10000 train_time:175862ms step_avg:79.90ms +[2025-07-06 18:11:53] [Rank 0] step:2221/10000 train_time:177330ms step_avg:79.84ms +[2025-07-06 18:11:53] [Rank 0] step:2221/10000 train_time:177330ms step_avg:79.84ms +[2025-07-06 18:11:55] [Rank 0] step:2241/10000 train_time:178823ms step_avg:79.80ms +[2025-07-06 18:11:55] [Rank 0] step:2241/10000 train_time:178823ms step_avg:79.80ms +[2025-07-06 18:11:57] [Rank 0] step:2261/10000 train_time:180966ms step_avg:80.04ms +[2025-07-06 18:11:57] [Rank 0] step:2261/10000 train_time:180966ms step_avg:80.04ms +[2025-07-06 18:11:59] [Rank 0] step:2281/10000 train_time:182461ms step_avg:79.99ms +[2025-07-06 18:11:59] [Rank 0] step:2281/10000 train_time:182461ms step_avg:79.99ms +[2025-07-06 18:12:00] [Rank 0] step:2301/10000 train_time:183960ms step_avg:79.95ms +[2025-07-06 18:12:00] [Rank 0] step:2301/10000 train_time:183960ms step_avg:79.95ms +[2025-07-06 18:12:02] [Rank 0] step:2321/10000 train_time:185457ms step_avg:79.90ms +[2025-07-06 18:12:02] [Rank 0] step:2321/10000 train_time:185457ms step_avg:79.90ms +[2025-07-06 18:12:04] [Rank 0] step:2341/10000 train_time:186953ms step_avg:79.86ms +[2025-07-06 18:12:04] [Rank 0] step:2341/10000 train_time:186953ms step_avg:79.86ms +[2025-07-06 18:12:05] [Rank 0] step:2361/10000 train_time:189108ms step_avg:80.10ms +[2025-07-06 18:12:05] [Rank 0] step:2361/10000 train_time:189108ms step_avg:80.10ms +[2025-07-06 18:12:07] [Rank 0] step:2381/10000 train_time:190605ms step_avg:80.05ms +[2025-07-06 18:12:07] [Rank 0] step:2381/10000 train_time:190605ms step_avg:80.05ms +[2025-07-06 18:12:08] [Rank 0] step:2401/10000 train_time:192101ms step_avg:80.01ms +[2025-07-06 18:12:08] [Rank 0] step:2401/10000 train_time:192101ms step_avg:80.01ms +[2025-07-06 18:12:10] [Rank 0] step:2421/10000 train_time:193599ms step_avg:79.97ms +[2025-07-06 18:12:10] [Rank 0] step:2421/10000 train_time:193599ms step_avg:79.97ms +[2025-07-06 18:12:12] [Rank 0] step:2441/10000 train_time:195750ms step_avg:80.19ms +[2025-07-06 18:12:12] [Rank 0] step:2441/10000 train_time:195750ms step_avg:80.19ms +[2025-07-06 18:12:13] [Rank 0] step:2461/10000 train_time:197245ms step_avg:80.15ms +[2025-07-06 18:12:13] [Rank 0] step:2461/10000 train_time:197245ms step_avg:80.15ms +[2025-07-06 18:12:15] [Rank 0] step:2481/10000 train_time:198744ms step_avg:80.11ms +[2025-07-06 18:12:15] [Rank 0] step:2481/10000 train_time:198744ms step_avg:80.11ms +[2025-07-06 18:12:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:12:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:12:17] [Rank 0] PRINT: step:2500/10000 train_loss:0.8928 val_loss:0.8803 train_time:200242ms step_avg:80.10ms +[2025-07-06 18:12:17] [Rank 0] PRINT: step:2500/10000 train_loss:0.8928 val_loss:0.8803 train_time:200242ms step_avg:80.10ms +[2025-07-06 18:12:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:12:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:12:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:12:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:12:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:12:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:17:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:17:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:17:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:17:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:17:44] [Rank 0] Total Loss: 5.2834 +[2025-07-06 18:17:44] [Rank 0] Total Loss: 5.2834 +[2025-07-06 18:17:44] [Rank 0] Total FTA: 0.9460 +[2025-07-06 18:17:44] [Rank 0] Total FTA: 0.9460 +[2025-07-06 18:17:44] [Rank 0] Group 0 Loss: 5.5168 +[2025-07-06 18:17:44] [Rank 0] Group 0 Loss: 5.5168 +[2025-07-06 18:17:44] [Rank 0] Group 1 Loss: 5.1780 +[2025-07-06 18:17:44] [Rank 0] Group 1 Loss: 5.1780 +[2025-07-06 18:17:44] [Rank 0] Group 2 Loss: 5.1343 +[2025-07-06 18:17:44] [Rank 0] Group 2 Loss: 5.1343 +[2025-07-06 18:17:44] [Rank 0] Group 3 Loss: 5.4242 +[2025-07-06 18:17:44] [Rank 0] Group 3 Loss: 5.4242 +[2025-07-06 18:17:44] [Rank 0] Group 4 Loss: 5.2120 +[2025-07-06 18:17:44] [Rank 0] Group 4 Loss: 5.2120 +[2025-07-06 18:17:44] [Rank 0] Group 5 Loss: 5.2003 +[2025-07-06 18:17:44] [Rank 0] Group 5 Loss: 5.2003 +[2025-07-06 18:17:44] [Rank 0] Group 6 Loss: 5.1480 +[2025-07-06 18:17:44] [Rank 0] Group 6 Loss: 5.1480 +[2025-07-06 18:17:44] [Rank 0] Group 7 Loss: 5.2450 +[2025-07-06 18:17:44] [Rank 0] Group 7 Loss: 5.2450 +[2025-07-06 18:17:44] [Rank 0] Group 8 Loss: 5.2579 +[2025-07-06 18:17:44] [Rank 0] Group 8 Loss: 5.2579 +[2025-07-06 18:17:44] [Rank 0] Group 9 Loss: 5.2447 +[2025-07-06 18:17:44] [Rank 0] Group 9 Loss: 5.2447 +[2025-07-06 18:17:44] [Rank 0] Group 10 Loss: 5.3080 +[2025-07-06 18:17:44] [Rank 0] Group 10 Loss: 5.3080 +[2025-07-06 18:17:44] [Rank 0] Group 11 Loss: 5.2811 +[2025-07-06 18:17:44] [Rank 0] Group 11 Loss: 5.2811 +[2025-07-06 18:17:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:17:44] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:17:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:17:44] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:17:44] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 18:17:44] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 18:17:44] [Rank 0] Group 3 FTA: 0.8906 +[2025-07-06 18:17:44] [Rank 0] Group 3 FTA: 0.8906 +[2025-07-06 18:17:44] [Rank 0] Group 4 FTA: 0.8958 +[2025-07-06 18:17:44] [Rank 0] Group 4 FTA: 0.8958 +[2025-07-06 18:17:44] [Rank 0] Group 5 FTA: 0.9844 +[2025-07-06 18:17:44] [Rank 0] Group 5 FTA: 0.9844 +[2025-07-06 18:17:44] [Rank 0] Group 6 FTA: 0.9557 +[2025-07-06 18:17:44] [Rank 0] Group 6 FTA: 0.9557 +[2025-07-06 18:17:44] [Rank 0] Group 7 FTA: 0.9167 +[2025-07-06 18:17:44] [Rank 0] Group 7 FTA: 0.9167 +[2025-07-06 18:17:44] [Rank 0] Group 8 FTA: 0.9036 +[2025-07-06 18:17:44] [Rank 0] Group 8 FTA: 0.9036 +[2025-07-06 18:17:44] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-06 18:17:44] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-06 18:17:44] [Rank 0] Group 10 FTA: 0.9219 +[2025-07-06 18:17:44] [Rank 0] Group 10 FTA: 0.9219 +[2025-07-06 18:17:44] [Rank 0] Group 11 FTA: 0.9268 +[2025-07-06 18:17:44] [Rank 0] Group 11 FTA: 0.9268 +[2025-07-06 18:17:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 18:17:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 18:17:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 18:17:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 18:17:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 18:17:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 18:17:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 18:17:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 18:17:46] [Rank 0] step:2501/10000 train_time:200263ms step_avg:80.07ms +[2025-07-06 18:17:46] [Rank 0] step:2501/10000 train_time:200263ms step_avg:80.07ms +[2025-07-06 18:17:48] [Rank 0] step:2521/10000 train_time:201818ms step_avg:80.05ms +[2025-07-06 18:17:48] [Rank 0] step:2521/10000 train_time:201818ms step_avg:80.05ms +[2025-07-06 18:17:50] [Rank 0] step:2541/10000 train_time:203920ms step_avg:80.25ms +[2025-07-06 18:17:50] [Rank 0] step:2541/10000 train_time:203920ms step_avg:80.25ms +[2025-07-06 18:17:51] [Rank 0] step:2561/10000 train_time:205413ms step_avg:80.21ms +[2025-07-06 18:17:51] [Rank 0] step:2561/10000 train_time:205413ms step_avg:80.21ms +[2025-07-06 18:17:52] [Rank 0] step:2581/10000 train_time:206906ms step_avg:80.17ms +[2025-07-06 18:17:52] [Rank 0] step:2581/10000 train_time:206906ms step_avg:80.17ms +[2025-07-06 18:17:54] [Rank 0] step:2601/10000 train_time:208397ms step_avg:80.12ms +[2025-07-06 18:17:54] [Rank 0] step:2601/10000 train_time:208397ms step_avg:80.12ms +[2025-07-06 18:17:56] [Rank 0] step:2621/10000 train_time:210125ms step_avg:80.17ms +[2025-07-06 18:17:56] [Rank 0] step:2621/10000 train_time:210125ms step_avg:80.17ms +[2025-07-06 18:17:57] [Rank 0] step:2641/10000 train_time:211721ms step_avg:80.17ms +[2025-07-06 18:17:57] [Rank 0] step:2641/10000 train_time:211721ms step_avg:80.17ms +[2025-07-06 18:17:59] [Rank 0] step:2661/10000 train_time:213214ms step_avg:80.13ms +[2025-07-06 18:17:59] [Rank 0] step:2661/10000 train_time:213214ms step_avg:80.13ms +[2025-07-06 18:18:00] [Rank 0] step:2681/10000 train_time:214710ms step_avg:80.09ms +[2025-07-06 18:18:00] [Rank 0] step:2681/10000 train_time:214710ms step_avg:80.09ms +[2025-07-06 18:18:02] [Rank 0] step:2701/10000 train_time:216463ms step_avg:80.14ms +[2025-07-06 18:18:02] [Rank 0] step:2701/10000 train_time:216463ms step_avg:80.14ms +[2025-07-06 18:18:04] [Rank 0] step:2721/10000 train_time:218357ms step_avg:80.25ms +[2025-07-06 18:18:04] [Rank 0] step:2721/10000 train_time:218357ms step_avg:80.25ms +[2025-07-06 18:18:05] [Rank 0] step:2741/10000 train_time:219853ms step_avg:80.21ms +[2025-07-06 18:18:05] [Rank 0] step:2741/10000 train_time:219853ms step_avg:80.21ms +[2025-07-06 18:18:07] [Rank 0] step:2761/10000 train_time:221348ms step_avg:80.17ms +[2025-07-06 18:18:07] [Rank 0] step:2761/10000 train_time:221348ms step_avg:80.17ms +[2025-07-06 18:18:08] [Rank 0] step:2781/10000 train_time:222845ms step_avg:80.13ms +[2025-07-06 18:18:08] [Rank 0] step:2781/10000 train_time:222845ms step_avg:80.13ms +[2025-07-06 18:18:11] [Rank 0] step:2801/10000 train_time:224998ms step_avg:80.33ms +[2025-07-06 18:18:11] [Rank 0] step:2801/10000 train_time:224998ms step_avg:80.33ms +[2025-07-06 18:18:12] [Rank 0] step:2821/10000 train_time:226492ms step_avg:80.29ms +[2025-07-06 18:18:12] [Rank 0] step:2821/10000 train_time:226492ms step_avg:80.29ms +[2025-07-06 18:18:14] [Rank 0] step:2841/10000 train_time:227989ms step_avg:80.25ms +[2025-07-06 18:18:14] [Rank 0] step:2841/10000 train_time:227989ms step_avg:80.25ms +[2025-07-06 18:18:15] [Rank 0] step:2861/10000 train_time:229489ms step_avg:80.21ms +[2025-07-06 18:18:15] [Rank 0] step:2861/10000 train_time:229489ms step_avg:80.21ms +[2025-07-06 18:18:17] [Rank 0] step:2881/10000 train_time:230988ms step_avg:80.18ms +[2025-07-06 18:18:17] [Rank 0] step:2881/10000 train_time:230988ms step_avg:80.18ms +[2025-07-06 18:18:18] [Rank 0] step:2901/10000 train_time:232727ms step_avg:80.22ms +[2025-07-06 18:18:18] [Rank 0] step:2901/10000 train_time:232727ms step_avg:80.22ms +[2025-07-06 18:18:20] [Rank 0] step:2921/10000 train_time:234224ms step_avg:80.19ms +[2025-07-06 18:18:20] [Rank 0] step:2921/10000 train_time:234224ms step_avg:80.19ms +[2025-07-06 18:18:21] [Rank 0] step:2941/10000 train_time:235721ms step_avg:80.15ms +[2025-07-06 18:18:21] [Rank 0] step:2941/10000 train_time:235721ms step_avg:80.15ms +[2025-07-06 18:18:23] [Rank 0] step:2961/10000 train_time:237219ms step_avg:80.11ms +[2025-07-06 18:18:23] [Rank 0] step:2961/10000 train_time:237219ms step_avg:80.11ms +[2025-07-06 18:18:25] [Rank 0] step:2981/10000 train_time:239367ms step_avg:80.30ms +[2025-07-06 18:18:25] [Rank 0] step:2981/10000 train_time:239367ms step_avg:80.30ms +[2025-07-06 18:18:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:18:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:18:27] [Rank 0] PRINT: step:3000/10000 train_loss:0.8804 val_loss:0.8721 train_time:240862ms step_avg:80.29ms +[2025-07-06 18:18:27] [Rank 0] PRINT: step:3000/10000 train_loss:0.8804 val_loss:0.8721 train_time:240862ms step_avg:80.29ms +[2025-07-06 18:18:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:18:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:18:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:18:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:18:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:18:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:23:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:23:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:23:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:23:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:23:53] [Rank 0] Total Loss: 5.2693 +[2025-07-06 18:23:53] [Rank 0] Total Loss: 5.2693 +[2025-07-06 18:23:53] [Rank 0] Total FTA: 0.9164 +[2025-07-06 18:23:53] [Rank 0] Total FTA: 0.9164 +[2025-07-06 18:23:53] [Rank 0] Group 0 Loss: 5.5271 +[2025-07-06 18:23:53] [Rank 0] Group 0 Loss: 5.5271 +[2025-07-06 18:23:53] [Rank 0] Group 1 Loss: 5.1641 +[2025-07-06 18:23:53] [Rank 0] Group 1 Loss: 5.1641 +[2025-07-06 18:23:53] [Rank 0] Group 2 Loss: 5.0584 +[2025-07-06 18:23:53] [Rank 0] Group 2 Loss: 5.0584 +[2025-07-06 18:23:53] [Rank 0] Group 3 Loss: 5.3372 +[2025-07-06 18:23:53] [Rank 0] Group 3 Loss: 5.3372 +[2025-07-06 18:23:53] [Rank 0] Group 4 Loss: 5.1638 +[2025-07-06 18:23:53] [Rank 0] Group 4 Loss: 5.1638 +[2025-07-06 18:23:53] [Rank 0] Group 5 Loss: 5.2337 +[2025-07-06 18:23:53] [Rank 0] Group 5 Loss: 5.2337 +[2025-07-06 18:23:53] [Rank 0] Group 6 Loss: 5.1538 +[2025-07-06 18:23:53] [Rank 0] Group 6 Loss: 5.1538 +[2025-07-06 18:23:53] [Rank 0] Group 7 Loss: 5.2844 +[2025-07-06 18:23:53] [Rank 0] Group 7 Loss: 5.2844 +[2025-07-06 18:23:53] [Rank 0] Group 8 Loss: 5.2173 +[2025-07-06 18:23:53] [Rank 0] Group 8 Loss: 5.2173 +[2025-07-06 18:23:53] [Rank 0] Group 9 Loss: 5.2788 +[2025-07-06 18:23:53] [Rank 0] Group 9 Loss: 5.2788 +[2025-07-06 18:23:53] [Rank 0] Group 10 Loss: 5.2418 +[2025-07-06 18:23:53] [Rank 0] Group 10 Loss: 5.2418 +[2025-07-06 18:23:53] [Rank 0] Group 11 Loss: 5.2901 +[2025-07-06 18:23:53] [Rank 0] Group 11 Loss: 5.2901 +[2025-07-06 18:23:53] [Rank 0] Group 0 FTA: 0.8322 +[2025-07-06 18:23:53] [Rank 0] Group 0 FTA: 0.8322 +[2025-07-06 18:23:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:23:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:23:53] [Rank 0] Group 2 FTA: 0.9036 +[2025-07-06 18:23:53] [Rank 0] Group 2 FTA: 0.9036 +[2025-07-06 18:23:53] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 18:23:53] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 18:23:53] [Rank 0] Group 4 FTA: 0.9583 +[2025-07-06 18:23:53] [Rank 0] Group 4 FTA: 0.9583 +[2025-07-06 18:23:53] [Rank 0] Group 5 FTA: 0.9375 +[2025-07-06 18:23:53] [Rank 0] Group 5 FTA: 0.9375 +[2025-07-06 18:23:53] [Rank 0] Group 6 FTA: 0.8958 +[2025-07-06 18:23:53] [Rank 0] Group 6 FTA: 0.8958 +[2025-07-06 18:23:53] [Rank 0] Group 7 FTA: 0.9062 +[2025-07-06 18:23:53] [Rank 0] Group 7 FTA: 0.9062 +[2025-07-06 18:23:53] [Rank 0] Group 8 FTA: 0.9271 +[2025-07-06 18:23:53] [Rank 0] Group 8 FTA: 0.9271 +[2025-07-06 18:23:53] [Rank 0] Group 9 FTA: 0.8789 +[2025-07-06 18:23:53] [Rank 0] Group 9 FTA: 0.8789 +[2025-07-06 18:23:53] [Rank 0] Group 10 FTA: 0.9062 +[2025-07-06 18:23:53] [Rank 0] Group 10 FTA: 0.9062 +[2025-07-06 18:23:53] [Rank 0] Group 11 FTA: 0.9199 +[2025-07-06 18:23:53] [Rank 0] Group 11 FTA: 0.9199 +[2025-07-06 18:23:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 18:23:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 18:23:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 18:23:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 18:23:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 18:23:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 18:23:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 18:23:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 18:23:54] [Rank 0] step:3001/10000 train_time:240883ms step_avg:80.27ms +[2025-07-06 18:23:54] [Rank 0] step:3001/10000 train_time:240883ms step_avg:80.27ms +[2025-07-06 18:23:56] [Rank 0] step:3021/10000 train_time:242376ms step_avg:80.23ms +[2025-07-06 18:23:56] [Rank 0] step:3021/10000 train_time:242376ms step_avg:80.23ms +[2025-07-06 18:23:57] [Rank 0] step:3041/10000 train_time:243868ms step_avg:80.19ms +[2025-07-06 18:23:57] [Rank 0] step:3041/10000 train_time:243868ms step_avg:80.19ms +[2025-07-06 18:23:59] [Rank 0] step:3061/10000 train_time:245361ms step_avg:80.16ms +[2025-07-06 18:23:59] [Rank 0] step:3061/10000 train_time:245361ms step_avg:80.16ms +[2025-07-06 18:24:01] [Rank 0] step:3081/10000 train_time:247509ms step_avg:80.33ms +[2025-07-06 18:24:01] [Rank 0] step:3081/10000 train_time:247509ms step_avg:80.33ms +[2025-07-06 18:24:02] [Rank 0] step:3101/10000 train_time:249001ms step_avg:80.30ms +[2025-07-06 18:24:02] [Rank 0] step:3101/10000 train_time:249001ms step_avg:80.30ms +[2025-07-06 18:24:04] [Rank 0] step:3121/10000 train_time:250495ms step_avg:80.26ms +[2025-07-06 18:24:04] [Rank 0] step:3121/10000 train_time:250495ms step_avg:80.26ms +[2025-07-06 18:24:06] [Rank 0] step:3141/10000 train_time:252222ms step_avg:80.30ms +[2025-07-06 18:24:06] [Rank 0] step:3141/10000 train_time:252222ms step_avg:80.30ms +[2025-07-06 18:24:08] [Rank 0] step:3161/10000 train_time:254468ms step_avg:80.50ms +[2025-07-06 18:24:08] [Rank 0] step:3161/10000 train_time:254468ms step_avg:80.50ms +[2025-07-06 18:24:09] [Rank 0] step:3181/10000 train_time:255963ms step_avg:80.47ms +[2025-07-06 18:24:09] [Rank 0] step:3181/10000 train_time:255963ms step_avg:80.47ms +[2025-07-06 18:24:11] [Rank 0] step:3201/10000 train_time:257458ms step_avg:80.43ms +[2025-07-06 18:24:11] [Rank 0] step:3201/10000 train_time:257458ms step_avg:80.43ms +[2025-07-06 18:24:12] [Rank 0] step:3221/10000 train_time:258955ms step_avg:80.40ms +[2025-07-06 18:24:12] [Rank 0] step:3221/10000 train_time:258955ms step_avg:80.40ms +[2025-07-06 18:24:14] [Rank 0] step:3241/10000 train_time:260708ms step_avg:80.44ms +[2025-07-06 18:24:14] [Rank 0] step:3241/10000 train_time:260708ms step_avg:80.44ms +[2025-07-06 18:24:16] [Rank 0] step:3261/10000 train_time:262184ms step_avg:80.40ms +[2025-07-06 18:24:16] [Rank 0] step:3261/10000 train_time:262184ms step_avg:80.40ms +[2025-07-06 18:24:17] [Rank 0] step:3281/10000 train_time:263680ms step_avg:80.37ms +[2025-07-06 18:24:17] [Rank 0] step:3281/10000 train_time:263680ms step_avg:80.37ms +[2025-07-06 18:24:19] [Rank 0] step:3301/10000 train_time:265177ms step_avg:80.33ms +[2025-07-06 18:24:19] [Rank 0] step:3301/10000 train_time:265177ms step_avg:80.33ms +[2025-07-06 18:24:20] [Rank 0] step:3321/10000 train_time:266673ms step_avg:80.30ms +[2025-07-06 18:24:20] [Rank 0] step:3321/10000 train_time:266673ms step_avg:80.30ms +[2025-07-06 18:24:22] [Rank 0] step:3341/10000 train_time:268409ms step_avg:80.34ms +[2025-07-06 18:24:22] [Rank 0] step:3341/10000 train_time:268409ms step_avg:80.34ms +[2025-07-06 18:24:23] [Rank 0] step:3361/10000 train_time:269906ms step_avg:80.31ms +[2025-07-06 18:24:23] [Rank 0] step:3361/10000 train_time:269906ms step_avg:80.31ms +[2025-07-06 18:24:25] [Rank 0] step:3381/10000 train_time:271405ms step_avg:80.27ms +[2025-07-06 18:24:25] [Rank 0] step:3381/10000 train_time:271405ms step_avg:80.27ms +[2025-07-06 18:24:26] [Rank 0] step:3401/10000 train_time:272904ms step_avg:80.24ms +[2025-07-06 18:24:26] [Rank 0] step:3401/10000 train_time:272904ms step_avg:80.24ms +[2025-07-06 18:24:28] [Rank 0] step:3421/10000 train_time:274404ms step_avg:80.21ms +[2025-07-06 18:24:28] [Rank 0] step:3421/10000 train_time:274404ms step_avg:80.21ms +[2025-07-06 18:24:30] [Rank 0] step:3441/10000 train_time:276573ms step_avg:80.38ms +[2025-07-06 18:24:30] [Rank 0] step:3441/10000 train_time:276573ms step_avg:80.38ms +[2025-07-06 18:24:31] [Rank 0] step:3461/10000 train_time:278074ms step_avg:80.35ms +[2025-07-06 18:24:31] [Rank 0] step:3461/10000 train_time:278074ms step_avg:80.35ms +[2025-07-06 18:24:33] [Rank 0] step:3481/10000 train_time:279574ms step_avg:80.31ms +[2025-07-06 18:24:33] [Rank 0] step:3481/10000 train_time:279574ms step_avg:80.31ms +[2025-07-06 18:24:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:24:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:24:35] [Rank 0] PRINT: step:3500/10000 train_loss:0.8741 val_loss:0.8683 train_time:281082ms step_avg:80.31ms +[2025-07-06 18:24:35] [Rank 0] PRINT: step:3500/10000 train_loss:0.8741 val_loss:0.8683 train_time:281082ms step_avg:80.31ms +[2025-07-06 18:24:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:24:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:24:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:24:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:24:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:24:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:30:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:30:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:30:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:30:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:30:03] [Rank 0] Total Loss: 5.3254 +[2025-07-06 18:30:03] [Rank 0] Total Loss: 5.3254 +[2025-07-06 18:30:03] [Rank 0] Total FTA: 0.9162 +[2025-07-06 18:30:03] [Rank 0] Total FTA: 0.9162 +[2025-07-06 18:30:03] [Rank 0] Group 0 Loss: 5.5445 +[2025-07-06 18:30:03] [Rank 0] Group 0 Loss: 5.5445 +[2025-07-06 18:30:03] [Rank 0] Group 1 Loss: 5.2123 +[2025-07-06 18:30:03] [Rank 0] Group 1 Loss: 5.2123 +[2025-07-06 18:30:03] [Rank 0] Group 2 Loss: 5.3086 +[2025-07-06 18:30:03] [Rank 0] Group 2 Loss: 5.3086 +[2025-07-06 18:30:03] [Rank 0] Group 3 Loss: 5.2699 +[2025-07-06 18:30:03] [Rank 0] Group 3 Loss: 5.2699 +[2025-07-06 18:30:03] [Rank 0] Group 4 Loss: 5.2418 +[2025-07-06 18:30:03] [Rank 0] Group 4 Loss: 5.2418 +[2025-07-06 18:30:03] [Rank 0] Group 5 Loss: 5.3647 +[2025-07-06 18:30:03] [Rank 0] Group 5 Loss: 5.3647 +[2025-07-06 18:30:03] [Rank 0] Group 6 Loss: 5.2083 +[2025-07-06 18:30:03] [Rank 0] Group 6 Loss: 5.2083 +[2025-07-06 18:30:03] [Rank 0] Group 7 Loss: 5.3046 +[2025-07-06 18:30:03] [Rank 0] Group 7 Loss: 5.3046 +[2025-07-06 18:30:03] [Rank 0] Group 8 Loss: 5.3347 +[2025-07-06 18:30:03] [Rank 0] Group 8 Loss: 5.3347 +[2025-07-06 18:30:03] [Rank 0] Group 9 Loss: 5.2715 +[2025-07-06 18:30:03] [Rank 0] Group 9 Loss: 5.2715 +[2025-07-06 18:30:03] [Rank 0] Group 10 Loss: 5.3278 +[2025-07-06 18:30:03] [Rank 0] Group 10 Loss: 5.3278 +[2025-07-06 18:30:03] [Rank 0] Group 11 Loss: 5.3074 +[2025-07-06 18:30:03] [Rank 0] Group 11 Loss: 5.3074 +[2025-07-06 18:30:03] [Rank 0] Group 0 FTA: 0.8296 +[2025-07-06 18:30:03] [Rank 0] Group 0 FTA: 0.8296 +[2025-07-06 18:30:03] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:30:03] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:30:03] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 18:30:03] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 18:30:03] [Rank 0] Group 3 FTA: 0.8151 +[2025-07-06 18:30:03] [Rank 0] Group 3 FTA: 0.8151 +[2025-07-06 18:30:03] [Rank 0] Group 4 FTA: 0.9505 +[2025-07-06 18:30:03] [Rank 0] Group 4 FTA: 0.9505 +[2025-07-06 18:30:03] [Rank 0] Group 5 FTA: 0.9219 +[2025-07-06 18:30:03] [Rank 0] Group 5 FTA: 0.9219 +[2025-07-06 18:30:03] [Rank 0] Group 6 FTA: 0.9115 +[2025-07-06 18:30:03] [Rank 0] Group 6 FTA: 0.9115 +[2025-07-06 18:30:03] [Rank 0] Group 7 FTA: 0.9193 +[2025-07-06 18:30:03] [Rank 0] Group 7 FTA: 0.9193 +[2025-07-06 18:30:03] [Rank 0] Group 8 FTA: 0.9349 +[2025-07-06 18:30:03] [Rank 0] Group 8 FTA: 0.9349 +[2025-07-06 18:30:03] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-06 18:30:03] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-06 18:30:03] [Rank 0] Group 10 FTA: 0.9355 +[2025-07-06 18:30:03] [Rank 0] Group 10 FTA: 0.9355 +[2025-07-06 18:30:03] [Rank 0] Group 11 FTA: 0.9189 +[2025-07-06 18:30:03] [Rank 0] Group 11 FTA: 0.9189 +[2025-07-06 18:30:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 18:30:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 18:30:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 18:30:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 18:30:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 18:30:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 18:30:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 18:30:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 18:30:05] [Rank 0] step:3501/10000 train_time:281103ms step_avg:80.29ms +[2025-07-06 18:30:05] [Rank 0] step:3501/10000 train_time:281103ms step_avg:80.29ms +[2025-07-06 18:30:07] [Rank 0] step:3521/10000 train_time:283272ms step_avg:80.45ms +[2025-07-06 18:30:07] [Rank 0] step:3521/10000 train_time:283272ms step_avg:80.45ms +[2025-07-06 18:30:08] [Rank 0] step:3541/10000 train_time:284761ms step_avg:80.42ms +[2025-07-06 18:30:08] [Rank 0] step:3541/10000 train_time:284761ms step_avg:80.42ms +[2025-07-06 18:30:10] [Rank 0] step:3561/10000 train_time:286253ms step_avg:80.39ms +[2025-07-06 18:30:10] [Rank 0] step:3561/10000 train_time:286253ms step_avg:80.39ms +[2025-07-06 18:30:11] [Rank 0] step:3581/10000 train_time:287745ms step_avg:80.35ms +[2025-07-06 18:30:11] [Rank 0] step:3581/10000 train_time:287745ms step_avg:80.35ms +[2025-07-06 18:30:13] [Rank 0] step:3601/10000 train_time:289291ms step_avg:80.34ms +[2025-07-06 18:30:13] [Rank 0] step:3601/10000 train_time:289291ms step_avg:80.34ms +[2025-07-06 18:30:15] [Rank 0] step:3621/10000 train_time:290967ms step_avg:80.36ms +[2025-07-06 18:30:15] [Rank 0] step:3621/10000 train_time:290967ms step_avg:80.36ms +[2025-07-06 18:30:16] [Rank 0] step:3641/10000 train_time:292460ms step_avg:80.32ms +[2025-07-06 18:30:16] [Rank 0] step:3641/10000 train_time:292460ms step_avg:80.32ms +[2025-07-06 18:30:17] [Rank 0] step:3661/10000 train_time:293954ms step_avg:80.29ms +[2025-07-06 18:30:17] [Rank 0] step:3661/10000 train_time:293954ms step_avg:80.29ms +[2025-07-06 18:30:19] [Rank 0] step:3681/10000 train_time:295447ms step_avg:80.26ms +[2025-07-06 18:30:19] [Rank 0] step:3681/10000 train_time:295447ms step_avg:80.26ms +[2025-07-06 18:30:21] [Rank 0] step:3701/10000 train_time:297607ms step_avg:80.41ms +[2025-07-06 18:30:21] [Rank 0] step:3701/10000 train_time:297607ms step_avg:80.41ms +[2025-07-06 18:30:23] [Rank 0] step:3721/10000 train_time:299101ms step_avg:80.38ms +[2025-07-06 18:30:23] [Rank 0] step:3721/10000 train_time:299101ms step_avg:80.38ms +[2025-07-06 18:30:24] [Rank 0] step:3741/10000 train_time:300830ms step_avg:80.41ms +[2025-07-06 18:30:24] [Rank 0] step:3741/10000 train_time:300830ms step_avg:80.41ms +[2025-07-06 18:30:26] [Rank 0] step:3761/10000 train_time:302347ms step_avg:80.39ms +[2025-07-06 18:30:26] [Rank 0] step:3761/10000 train_time:302347ms step_avg:80.39ms +[2025-07-06 18:30:28] [Rank 0] step:3781/10000 train_time:304596ms step_avg:80.56ms +[2025-07-06 18:30:28] [Rank 0] step:3781/10000 train_time:304596ms step_avg:80.56ms +[2025-07-06 18:30:30] [Rank 0] step:3801/10000 train_time:306073ms step_avg:80.52ms +[2025-07-06 18:30:30] [Rank 0] step:3801/10000 train_time:306073ms step_avg:80.52ms +[2025-07-06 18:30:31] [Rank 0] step:3821/10000 train_time:307570ms step_avg:80.49ms +[2025-07-06 18:30:31] [Rank 0] step:3821/10000 train_time:307570ms step_avg:80.49ms +[2025-07-06 18:30:33] [Rank 0] step:3841/10000 train_time:309069ms step_avg:80.47ms +[2025-07-06 18:30:33] [Rank 0] step:3841/10000 train_time:309069ms step_avg:80.47ms +[2025-07-06 18:30:34] [Rank 0] step:3861/10000 train_time:310566ms step_avg:80.44ms +[2025-07-06 18:30:34] [Rank 0] step:3861/10000 train_time:310566ms step_avg:80.44ms +[2025-07-06 18:30:36] [Rank 0] step:3881/10000 train_time:312714ms step_avg:80.58ms +[2025-07-06 18:30:36] [Rank 0] step:3881/10000 train_time:312714ms step_avg:80.58ms +[2025-07-06 18:30:38] [Rank 0] step:3901/10000 train_time:314210ms step_avg:80.55ms +[2025-07-06 18:30:38] [Rank 0] step:3901/10000 train_time:314210ms step_avg:80.55ms +[2025-07-06 18:30:39] [Rank 0] step:3921/10000 train_time:315709ms step_avg:80.52ms +[2025-07-06 18:30:39] [Rank 0] step:3921/10000 train_time:315709ms step_avg:80.52ms +[2025-07-06 18:30:41] [Rank 0] step:3941/10000 train_time:317209ms step_avg:80.49ms +[2025-07-06 18:30:41] [Rank 0] step:3941/10000 train_time:317209ms step_avg:80.49ms +[2025-07-06 18:30:43] [Rank 0] step:3961/10000 train_time:318708ms step_avg:80.46ms +[2025-07-06 18:30:43] [Rank 0] step:3961/10000 train_time:318708ms step_avg:80.46ms +[2025-07-06 18:30:44] [Rank 0] step:3981/10000 train_time:320858ms step_avg:80.60ms +[2025-07-06 18:30:44] [Rank 0] step:3981/10000 train_time:320858ms step_avg:80.60ms +[2025-07-06 18:30:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:30:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:30:47] [Rank 0] PRINT: step:4000/10000 train_loss:0.8700 val_loss:0.8667 train_time:322360ms step_avg:80.59ms +[2025-07-06 18:30:47] [Rank 0] PRINT: step:4000/10000 train_loss:0.8700 val_loss:0.8667 train_time:322360ms step_avg:80.59ms +[2025-07-06 18:30:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:30:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:30:47] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:30:47] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:30:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:30:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:36:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:36:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:36:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:36:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:36:11] [Rank 0] Total Loss: 5.4018 +[2025-07-06 18:36:11] [Rank 0] Total Loss: 5.4018 +[2025-07-06 18:36:11] [Rank 0] Total FTA: 0.9286 +[2025-07-06 18:36:11] [Rank 0] Total FTA: 0.9286 +[2025-07-06 18:36:12] [Rank 0] Group 0 Loss: 5.6804 +[2025-07-06 18:36:12] [Rank 0] Group 0 Loss: 5.6804 +[2025-07-06 18:36:12] [Rank 0] Group 1 Loss: 5.2152 +[2025-07-06 18:36:12] [Rank 0] Group 1 Loss: 5.2152 +[2025-07-06 18:36:12] [Rank 0] Group 2 Loss: 5.4517 +[2025-07-06 18:36:12] [Rank 0] Group 2 Loss: 5.4517 +[2025-07-06 18:36:12] [Rank 0] Group 3 Loss: 5.5060 +[2025-07-06 18:36:12] [Rank 0] Group 3 Loss: 5.5060 +[2025-07-06 18:36:12] [Rank 0] Group 4 Loss: 5.3447 +[2025-07-06 18:36:12] [Rank 0] Group 4 Loss: 5.3447 +[2025-07-06 18:36:12] [Rank 0] Group 5 Loss: 5.3380 +[2025-07-06 18:36:12] [Rank 0] Group 5 Loss: 5.3380 +[2025-07-06 18:36:12] [Rank 0] Group 6 Loss: 5.2515 +[2025-07-06 18:36:12] [Rank 0] Group 6 Loss: 5.2515 +[2025-07-06 18:36:12] [Rank 0] Group 7 Loss: 5.4200 +[2025-07-06 18:36:12] [Rank 0] Group 7 Loss: 5.4200 +[2025-07-06 18:36:12] [Rank 0] Group 8 Loss: 5.3143 +[2025-07-06 18:36:12] [Rank 0] Group 8 Loss: 5.3143 +[2025-07-06 18:36:12] [Rank 0] Group 9 Loss: 5.3669 +[2025-07-06 18:36:12] [Rank 0] Group 9 Loss: 5.3669 +[2025-07-06 18:36:12] [Rank 0] Group 10 Loss: 5.3703 +[2025-07-06 18:36:12] [Rank 0] Group 10 Loss: 5.3703 +[2025-07-06 18:36:12] [Rank 0] Group 11 Loss: 5.3570 +[2025-07-06 18:36:12] [Rank 0] Group 11 Loss: 5.3570 +[2025-07-06 18:36:12] [Rank 0] Group 0 FTA: 0.8492 +[2025-07-06 18:36:12] [Rank 0] Group 0 FTA: 0.8492 +[2025-07-06 18:36:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:36:12] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:36:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 18:36:12] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 18:36:12] [Rank 0] Group 3 FTA: 0.8203 +[2025-07-06 18:36:12] [Rank 0] Group 3 FTA: 0.8203 +[2025-07-06 18:36:12] [Rank 0] Group 4 FTA: 0.9401 +[2025-07-06 18:36:12] [Rank 0] Group 4 FTA: 0.9401 +[2025-07-06 18:36:12] [Rank 0] Group 5 FTA: 0.9818 +[2025-07-06 18:36:12] [Rank 0] Group 5 FTA: 0.9818 +[2025-07-06 18:36:12] [Rank 0] Group 6 FTA: 0.9271 +[2025-07-06 18:36:12] [Rank 0] Group 6 FTA: 0.9271 +[2025-07-06 18:36:12] [Rank 0] Group 7 FTA: 0.9531 +[2025-07-06 18:36:12] [Rank 0] Group 7 FTA: 0.9531 +[2025-07-06 18:36:12] [Rank 0] Group 8 FTA: 0.9349 +[2025-07-06 18:36:12] [Rank 0] Group 8 FTA: 0.9349 +[2025-07-06 18:36:12] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-06 18:36:12] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-06 18:36:12] [Rank 0] Group 10 FTA: 0.9199 +[2025-07-06 18:36:12] [Rank 0] Group 10 FTA: 0.9199 +[2025-07-06 18:36:12] [Rank 0] Group 11 FTA: 0.9414 +[2025-07-06 18:36:12] [Rank 0] Group 11 FTA: 0.9414 +[2025-07-06 18:36:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 18:36:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 18:36:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 18:36:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 18:36:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 18:36:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 18:36:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 18:36:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 18:36:13] [Rank 0] step:4001/10000 train_time:322381ms step_avg:80.58ms +[2025-07-06 18:36:13] [Rank 0] step:4001/10000 train_time:322381ms step_avg:80.58ms +[2025-07-06 18:36:15] [Rank 0] step:4021/10000 train_time:323887ms step_avg:80.55ms +[2025-07-06 18:36:15] [Rank 0] step:4021/10000 train_time:323887ms step_avg:80.55ms +[2025-07-06 18:36:16] [Rank 0] step:4041/10000 train_time:325378ms step_avg:80.52ms +[2025-07-06 18:36:16] [Rank 0] step:4041/10000 train_time:325378ms step_avg:80.52ms +[2025-07-06 18:36:18] [Rank 0] step:4061/10000 train_time:327515ms step_avg:80.65ms +[2025-07-06 18:36:18] [Rank 0] step:4061/10000 train_time:327515ms step_avg:80.65ms +[2025-07-06 18:36:20] [Rank 0] step:4081/10000 train_time:329006ms step_avg:80.62ms +[2025-07-06 18:36:20] [Rank 0] step:4081/10000 train_time:329006ms step_avg:80.62ms +[2025-07-06 18:36:21] [Rank 0] step:4101/10000 train_time:330497ms step_avg:80.59ms +[2025-07-06 18:36:21] [Rank 0] step:4101/10000 train_time:330497ms step_avg:80.59ms +[2025-07-06 18:36:23] [Rank 0] step:4121/10000 train_time:331994ms step_avg:80.56ms +[2025-07-06 18:36:23] [Rank 0] step:4121/10000 train_time:331994ms step_avg:80.56ms +[2025-07-06 18:36:25] [Rank 0] step:4141/10000 train_time:333742ms step_avg:80.59ms +[2025-07-06 18:36:25] [Rank 0] step:4141/10000 train_time:333742ms step_avg:80.59ms +[2025-07-06 18:36:26] [Rank 0] step:4161/10000 train_time:335625ms step_avg:80.66ms +[2025-07-06 18:36:26] [Rank 0] step:4161/10000 train_time:335625ms step_avg:80.66ms +[2025-07-06 18:36:28] [Rank 0] step:4181/10000 train_time:337119ms step_avg:80.63ms +[2025-07-06 18:36:28] [Rank 0] step:4181/10000 train_time:337119ms step_avg:80.63ms +[2025-07-06 18:36:29] [Rank 0] step:4201/10000 train_time:338612ms step_avg:80.60ms +[2025-07-06 18:36:29] [Rank 0] step:4201/10000 train_time:338612ms step_avg:80.60ms +[2025-07-06 18:36:31] [Rank 0] step:4221/10000 train_time:340107ms step_avg:80.57ms +[2025-07-06 18:36:31] [Rank 0] step:4221/10000 train_time:340107ms step_avg:80.57ms +[2025-07-06 18:36:33] [Rank 0] step:4241/10000 train_time:342269ms step_avg:80.70ms +[2025-07-06 18:36:33] [Rank 0] step:4241/10000 train_time:342269ms step_avg:80.70ms +[2025-07-06 18:36:35] [Rank 0] step:4261/10000 train_time:343763ms step_avg:80.68ms +[2025-07-06 18:36:35] [Rank 0] step:4261/10000 train_time:343763ms step_avg:80.68ms +[2025-07-06 18:36:36] [Rank 0] step:4281/10000 train_time:345258ms step_avg:80.65ms +[2025-07-06 18:36:36] [Rank 0] step:4281/10000 train_time:345258ms step_avg:80.65ms +[2025-07-06 18:36:38] [Rank 0] step:4301/10000 train_time:346759ms step_avg:80.62ms +[2025-07-06 18:36:38] [Rank 0] step:4301/10000 train_time:346759ms step_avg:80.62ms +[2025-07-06 18:36:40] [Rank 0] step:4321/10000 train_time:348255ms step_avg:80.60ms +[2025-07-06 18:36:40] [Rank 0] step:4321/10000 train_time:348255ms step_avg:80.60ms +[2025-07-06 18:36:41] [Rank 0] step:4341/10000 train_time:350414ms step_avg:80.72ms +[2025-07-06 18:36:41] [Rank 0] step:4341/10000 train_time:350414ms step_avg:80.72ms +[2025-07-06 18:36:43] [Rank 0] step:4361/10000 train_time:351912ms step_avg:80.70ms +[2025-07-06 18:36:43] [Rank 0] step:4361/10000 train_time:351912ms step_avg:80.70ms +[2025-07-06 18:36:44] [Rank 0] step:4381/10000 train_time:353623ms step_avg:80.72ms +[2025-07-06 18:36:44] [Rank 0] step:4381/10000 train_time:353623ms step_avg:80.72ms +[2025-07-06 18:36:46] [Rank 0] step:4401/10000 train_time:355277ms step_avg:80.73ms +[2025-07-06 18:36:46] [Rank 0] step:4401/10000 train_time:355277ms step_avg:80.73ms +[2025-07-06 18:36:48] [Rank 0] step:4421/10000 train_time:357438ms step_avg:80.85ms +[2025-07-06 18:36:48] [Rank 0] step:4421/10000 train_time:357438ms step_avg:80.85ms +[2025-07-06 18:36:50] [Rank 0] step:4441/10000 train_time:358930ms step_avg:80.82ms +[2025-07-06 18:36:50] [Rank 0] step:4441/10000 train_time:358930ms step_avg:80.82ms +[2025-07-06 18:36:51] [Rank 0] step:4461/10000 train_time:360425ms step_avg:80.79ms +[2025-07-06 18:36:51] [Rank 0] step:4461/10000 train_time:360425ms step_avg:80.79ms +[2025-07-06 18:36:53] [Rank 0] step:4481/10000 train_time:361922ms step_avg:80.77ms +[2025-07-06 18:36:53] [Rank 0] step:4481/10000 train_time:361922ms step_avg:80.77ms +[2025-07-06 18:36:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:36:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:36:55] [Rank 0] PRINT: step:4500/10000 train_loss:0.8670 val_loss:0.8641 train_time:363419ms step_avg:80.76ms +[2025-07-06 18:36:55] [Rank 0] PRINT: step:4500/10000 train_loss:0.8670 val_loss:0.8641 train_time:363419ms step_avg:80.76ms +[2025-07-06 18:36:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:36:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:36:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:36:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:36:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:36:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:42:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:42:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:42:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:42:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:42:22] [Rank 0] Total Loss: 5.3967 +[2025-07-06 18:42:22] [Rank 0] Total Loss: 5.3967 +[2025-07-06 18:42:22] [Rank 0] Total FTA: 0.9533 +[2025-07-06 18:42:22] [Rank 0] Total FTA: 0.9533 +[2025-07-06 18:42:22] [Rank 0] Group 0 Loss: 5.5986 +[2025-07-06 18:42:22] [Rank 0] Group 0 Loss: 5.5986 +[2025-07-06 18:42:22] [Rank 0] Group 1 Loss: 5.1789 +[2025-07-06 18:42:22] [Rank 0] Group 1 Loss: 5.1789 +[2025-07-06 18:42:22] [Rank 0] Group 2 Loss: 5.1886 +[2025-07-06 18:42:22] [Rank 0] Group 2 Loss: 5.1886 +[2025-07-06 18:42:22] [Rank 0] Group 3 Loss: 5.5642 +[2025-07-06 18:42:22] [Rank 0] Group 3 Loss: 5.5642 +[2025-07-06 18:42:22] [Rank 0] Group 4 Loss: 5.3745 +[2025-07-06 18:42:22] [Rank 0] Group 4 Loss: 5.3745 +[2025-07-06 18:42:22] [Rank 0] Group 5 Loss: 5.3657 +[2025-07-06 18:42:22] [Rank 0] Group 5 Loss: 5.3657 +[2025-07-06 18:42:22] [Rank 0] Group 6 Loss: 5.3606 +[2025-07-06 18:42:22] [Rank 0] Group 6 Loss: 5.3606 +[2025-07-06 18:42:22] [Rank 0] Group 7 Loss: 5.3915 +[2025-07-06 18:42:22] [Rank 0] Group 7 Loss: 5.3915 +[2025-07-06 18:42:22] [Rank 0] Group 8 Loss: 5.3666 +[2025-07-06 18:42:22] [Rank 0] Group 8 Loss: 5.3666 +[2025-07-06 18:42:22] [Rank 0] Group 9 Loss: 5.3928 +[2025-07-06 18:42:22] [Rank 0] Group 9 Loss: 5.3928 +[2025-07-06 18:42:22] [Rank 0] Group 10 Loss: 5.3907 +[2025-07-06 18:42:22] [Rank 0] Group 10 Loss: 5.3907 +[2025-07-06 18:42:22] [Rank 0] Group 11 Loss: 5.3929 +[2025-07-06 18:42:22] [Rank 0] Group 11 Loss: 5.3929 +[2025-07-06 18:42:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:42:22] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:42:22] [Rank 0] Group 1 FTA: 0.8229 +[2025-07-06 18:42:22] [Rank 0] Group 1 FTA: 0.8229 +[2025-07-06 18:42:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 18:42:22] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 18:42:22] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 18:42:22] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 18:42:22] [Rank 0] Group 4 FTA: 0.9062 +[2025-07-06 18:42:22] [Rank 0] Group 4 FTA: 0.9062 +[2025-07-06 18:42:22] [Rank 0] Group 5 FTA: 0.9766 +[2025-07-06 18:42:22] [Rank 0] Group 5 FTA: 0.9766 +[2025-07-06 18:42:22] [Rank 0] Group 6 FTA: 0.9740 +[2025-07-06 18:42:22] [Rank 0] Group 6 FTA: 0.9740 +[2025-07-06 18:42:22] [Rank 0] Group 7 FTA: 0.9453 +[2025-07-06 18:42:22] [Rank 0] Group 7 FTA: 0.9453 +[2025-07-06 18:42:22] [Rank 0] Group 8 FTA: 0.9427 +[2025-07-06 18:42:22] [Rank 0] Group 8 FTA: 0.9427 +[2025-07-06 18:42:22] [Rank 0] Group 9 FTA: 0.9570 +[2025-07-06 18:42:22] [Rank 0] Group 9 FTA: 0.9570 +[2025-07-06 18:42:22] [Rank 0] Group 10 FTA: 0.9551 +[2025-07-06 18:42:22] [Rank 0] Group 10 FTA: 0.9551 +[2025-07-06 18:42:22] [Rank 0] Group 11 FTA: 0.9385 +[2025-07-06 18:42:22] [Rank 0] Group 11 FTA: 0.9385 +[2025-07-06 18:42:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 18:42:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 18:42:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 18:42:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 18:42:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 18:42:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 18:42:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 18:42:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 18:42:24] [Rank 0] step:4501/10000 train_time:363447ms step_avg:80.75ms +[2025-07-06 18:42:24] [Rank 0] step:4501/10000 train_time:363447ms step_avg:80.75ms +[2025-07-06 18:42:26] [Rank 0] step:4521/10000 train_time:365323ms step_avg:80.81ms +[2025-07-06 18:42:26] [Rank 0] step:4521/10000 train_time:365323ms step_avg:80.81ms +[2025-07-06 18:42:27] [Rank 0] step:4541/10000 train_time:366813ms step_avg:80.78ms +[2025-07-06 18:42:27] [Rank 0] step:4541/10000 train_time:366813ms step_avg:80.78ms +[2025-07-06 18:42:29] [Rank 0] step:4561/10000 train_time:368304ms step_avg:80.75ms +[2025-07-06 18:42:29] [Rank 0] step:4561/10000 train_time:368304ms step_avg:80.75ms +[2025-07-06 18:42:30] [Rank 0] step:4581/10000 train_time:369798ms step_avg:80.72ms +[2025-07-06 18:42:30] [Rank 0] step:4581/10000 train_time:369798ms step_avg:80.72ms +[2025-07-06 18:42:32] [Rank 0] step:4601/10000 train_time:371944ms step_avg:80.84ms +[2025-07-06 18:42:32] [Rank 0] step:4601/10000 train_time:371944ms step_avg:80.84ms +[2025-07-06 18:42:34] [Rank 0] step:4621/10000 train_time:373435ms step_avg:80.81ms +[2025-07-06 18:42:34] [Rank 0] step:4621/10000 train_time:373435ms step_avg:80.81ms +[2025-07-06 18:42:35] [Rank 0] step:4641/10000 train_time:374929ms step_avg:80.79ms +[2025-07-06 18:42:35] [Rank 0] step:4641/10000 train_time:374929ms step_avg:80.79ms +[2025-07-06 18:42:37] [Rank 0] step:4661/10000 train_time:376423ms step_avg:80.76ms +[2025-07-06 18:42:37] [Rank 0] step:4661/10000 train_time:376423ms step_avg:80.76ms +[2025-07-06 18:42:39] [Rank 0] step:4681/10000 train_time:377973ms step_avg:80.75ms +[2025-07-06 18:42:39] [Rank 0] step:4681/10000 train_time:377973ms step_avg:80.75ms +[2025-07-06 18:42:40] [Rank 0] step:4701/10000 train_time:379658ms step_avg:80.76ms +[2025-07-06 18:42:40] [Rank 0] step:4701/10000 train_time:379658ms step_avg:80.76ms +[2025-07-06 18:42:42] [Rank 0] step:4721/10000 train_time:381151ms step_avg:80.74ms +[2025-07-06 18:42:42] [Rank 0] step:4721/10000 train_time:381151ms step_avg:80.74ms +[2025-07-06 18:42:43] [Rank 0] step:4741/10000 train_time:382647ms step_avg:80.71ms +[2025-07-06 18:42:43] [Rank 0] step:4741/10000 train_time:382647ms step_avg:80.71ms +[2025-07-06 18:42:45] [Rank 0] step:4761/10000 train_time:384142ms step_avg:80.69ms +[2025-07-06 18:42:45] [Rank 0] step:4761/10000 train_time:384142ms step_avg:80.69ms +[2025-07-06 18:42:47] [Rank 0] step:4781/10000 train_time:386281ms step_avg:80.79ms +[2025-07-06 18:42:47] [Rank 0] step:4781/10000 train_time:386281ms step_avg:80.79ms +[2025-07-06 18:42:48] [Rank 0] step:4801/10000 train_time:387777ms step_avg:80.77ms +[2025-07-06 18:42:48] [Rank 0] step:4801/10000 train_time:387777ms step_avg:80.77ms +[2025-07-06 18:42:50] [Rank 0] step:4821/10000 train_time:389272ms step_avg:80.75ms +[2025-07-06 18:42:50] [Rank 0] step:4821/10000 train_time:389272ms step_avg:80.75ms +[2025-07-06 18:42:51] [Rank 0] step:4841/10000 train_time:390768ms step_avg:80.72ms +[2025-07-06 18:42:51] [Rank 0] step:4841/10000 train_time:390768ms step_avg:80.72ms +[2025-07-06 18:42:53] [Rank 0] step:4861/10000 train_time:392947ms step_avg:80.84ms +[2025-07-06 18:42:53] [Rank 0] step:4861/10000 train_time:392947ms step_avg:80.84ms +[2025-07-06 18:42:55] [Rank 0] step:4881/10000 train_time:394427ms step_avg:80.81ms +[2025-07-06 18:42:55] [Rank 0] step:4881/10000 train_time:394427ms step_avg:80.81ms +[2025-07-06 18:42:56] [Rank 0] step:4901/10000 train_time:395920ms step_avg:80.78ms +[2025-07-06 18:42:56] [Rank 0] step:4901/10000 train_time:395920ms step_avg:80.78ms +[2025-07-06 18:42:58] [Rank 0] step:4921/10000 train_time:397417ms step_avg:80.76ms +[2025-07-06 18:42:58] [Rank 0] step:4921/10000 train_time:397417ms step_avg:80.76ms +[2025-07-06 18:42:59] [Rank 0] step:4941/10000 train_time:398913ms step_avg:80.74ms +[2025-07-06 18:42:59] [Rank 0] step:4941/10000 train_time:398913ms step_avg:80.74ms +[2025-07-06 18:43:01] [Rank 0] step:4961/10000 train_time:400645ms step_avg:80.76ms +[2025-07-06 18:43:01] [Rank 0] step:4961/10000 train_time:400645ms step_avg:80.76ms +[2025-07-06 18:43:03] [Rank 0] step:4981/10000 train_time:402237ms step_avg:80.75ms +[2025-07-06 18:43:03] [Rank 0] step:4981/10000 train_time:402237ms step_avg:80.75ms +[2025-07-06 18:43:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:43:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:43:05] [Rank 0] PRINT: step:5000/10000 train_loss:0.8644 val_loss:0.8642 train_time:403810ms step_avg:80.76ms +[2025-07-06 18:43:05] [Rank 0] PRINT: step:5000/10000 train_loss:0.8644 val_loss:0.8642 train_time:403810ms step_avg:80.76ms +[2025-07-06 18:43:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:43:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:43:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:43:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:43:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:43:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:48:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:48:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:48:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:48:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:48:31] [Rank 0] Total Loss: 5.3290 +[2025-07-06 18:48:31] [Rank 0] Total Loss: 5.3290 +[2025-07-06 18:48:31] [Rank 0] Total FTA: 0.9650 +[2025-07-06 18:48:31] [Rank 0] Total FTA: 0.9650 +[2025-07-06 18:48:31] [Rank 0] Group 0 Loss: 5.5051 +[2025-07-06 18:48:31] [Rank 0] Group 0 Loss: 5.5051 +[2025-07-06 18:48:31] [Rank 0] Group 1 Loss: 5.2526 +[2025-07-06 18:48:31] [Rank 0] Group 1 Loss: 5.2526 +[2025-07-06 18:48:31] [Rank 0] Group 2 Loss: 5.1240 +[2025-07-06 18:48:31] [Rank 0] Group 2 Loss: 5.1240 +[2025-07-06 18:48:31] [Rank 0] Group 3 Loss: 5.3568 +[2025-07-06 18:48:31] [Rank 0] Group 3 Loss: 5.3568 +[2025-07-06 18:48:31] [Rank 0] Group 4 Loss: 5.2971 +[2025-07-06 18:48:31] [Rank 0] Group 4 Loss: 5.2971 +[2025-07-06 18:48:31] [Rank 0] Group 5 Loss: 5.3096 +[2025-07-06 18:48:31] [Rank 0] Group 5 Loss: 5.3096 +[2025-07-06 18:48:31] [Rank 0] Group 6 Loss: 5.2455 +[2025-07-06 18:48:31] [Rank 0] Group 6 Loss: 5.2455 +[2025-07-06 18:48:31] [Rank 0] Group 7 Loss: 5.3439 +[2025-07-06 18:48:31] [Rank 0] Group 7 Loss: 5.3439 +[2025-07-06 18:48:31] [Rank 0] Group 8 Loss: 5.2806 +[2025-07-06 18:48:31] [Rank 0] Group 8 Loss: 5.2806 +[2025-07-06 18:48:31] [Rank 0] Group 9 Loss: 5.3091 +[2025-07-06 18:48:31] [Rank 0] Group 9 Loss: 5.3091 +[2025-07-06 18:48:31] [Rank 0] Group 10 Loss: 5.3867 +[2025-07-06 18:48:31] [Rank 0] Group 10 Loss: 5.3867 +[2025-07-06 18:48:31] [Rank 0] Group 11 Loss: 5.3310 +[2025-07-06 18:48:31] [Rank 0] Group 11 Loss: 5.3310 +[2025-07-06 18:48:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:48:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:48:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:48:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:48:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 18:48:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 18:48:31] [Rank 0] Group 3 FTA: 0.9688 +[2025-07-06 18:48:31] [Rank 0] Group 3 FTA: 0.9688 +[2025-07-06 18:48:31] [Rank 0] Group 4 FTA: 0.9141 +[2025-07-06 18:48:31] [Rank 0] Group 4 FTA: 0.9141 +[2025-07-06 18:48:31] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-06 18:48:31] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-06 18:48:31] [Rank 0] Group 6 FTA: 0.9740 +[2025-07-06 18:48:31] [Rank 0] Group 6 FTA: 0.9740 +[2025-07-06 18:48:31] [Rank 0] Group 7 FTA: 0.9453 +[2025-07-06 18:48:31] [Rank 0] Group 7 FTA: 0.9453 +[2025-07-06 18:48:31] [Rank 0] Group 8 FTA: 0.9271 +[2025-07-06 18:48:31] [Rank 0] Group 8 FTA: 0.9271 +[2025-07-06 18:48:31] [Rank 0] Group 9 FTA: 0.9648 +[2025-07-06 18:48:31] [Rank 0] Group 9 FTA: 0.9648 +[2025-07-06 18:48:31] [Rank 0] Group 10 FTA: 0.9727 +[2025-07-06 18:48:31] [Rank 0] Group 10 FTA: 0.9727 +[2025-07-06 18:48:31] [Rank 0] Group 11 FTA: 0.9541 +[2025-07-06 18:48:31] [Rank 0] Group 11 FTA: 0.9541 +[2025-07-06 18:48:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 18:48:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 18:48:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 18:48:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 18:48:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 18:48:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 18:48:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 18:48:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 18:48:32] [Rank 0] step:5001/10000 train_time:403831ms step_avg:80.75ms +[2025-07-06 18:48:32] [Rank 0] step:5001/10000 train_time:403831ms step_avg:80.75ms +[2025-07-06 18:48:34] [Rank 0] step:5021/10000 train_time:405328ms step_avg:80.73ms +[2025-07-06 18:48:34] [Rank 0] step:5021/10000 train_time:405328ms step_avg:80.73ms +[2025-07-06 18:48:36] [Rank 0] step:5041/10000 train_time:406871ms step_avg:80.71ms +[2025-07-06 18:48:36] [Rank 0] step:5041/10000 train_time:406871ms step_avg:80.71ms +[2025-07-06 18:48:37] [Rank 0] step:5061/10000 train_time:408961ms step_avg:80.81ms +[2025-07-06 18:48:37] [Rank 0] step:5061/10000 train_time:408961ms step_avg:80.81ms +[2025-07-06 18:48:39] [Rank 0] step:5081/10000 train_time:410452ms step_avg:80.78ms +[2025-07-06 18:48:39] [Rank 0] step:5081/10000 train_time:410452ms step_avg:80.78ms +[2025-07-06 18:48:40] [Rank 0] step:5101/10000 train_time:411943ms step_avg:80.76ms +[2025-07-06 18:48:40] [Rank 0] step:5101/10000 train_time:411943ms step_avg:80.76ms +[2025-07-06 18:48:42] [Rank 0] step:5121/10000 train_time:413436ms step_avg:80.73ms +[2025-07-06 18:48:42] [Rank 0] step:5121/10000 train_time:413436ms step_avg:80.73ms +[2025-07-06 18:48:44] [Rank 0] step:5141/10000 train_time:415169ms step_avg:80.76ms +[2025-07-06 18:48:44] [Rank 0] step:5141/10000 train_time:415169ms step_avg:80.76ms +[2025-07-06 18:48:45] [Rank 0] step:5161/10000 train_time:416660ms step_avg:80.73ms +[2025-07-06 18:48:45] [Rank 0] step:5161/10000 train_time:416660ms step_avg:80.73ms +[2025-07-06 18:48:47] [Rank 0] step:5181/10000 train_time:418155ms step_avg:80.71ms +[2025-07-06 18:48:47] [Rank 0] step:5181/10000 train_time:418155ms step_avg:80.71ms +[2025-07-06 18:48:48] [Rank 0] step:5201/10000 train_time:419649ms step_avg:80.69ms +[2025-07-06 18:48:48] [Rank 0] step:5201/10000 train_time:419649ms step_avg:80.69ms +[2025-07-06 18:48:50] [Rank 0] step:5221/10000 train_time:421152ms step_avg:80.67ms +[2025-07-06 18:48:50] [Rank 0] step:5221/10000 train_time:421152ms step_avg:80.67ms +[2025-07-06 18:48:51] [Rank 0] step:5241/10000 train_time:422887ms step_avg:80.69ms +[2025-07-06 18:48:51] [Rank 0] step:5241/10000 train_time:422887ms step_avg:80.69ms +[2025-07-06 18:48:53] [Rank 0] step:5261/10000 train_time:424381ms step_avg:80.67ms +[2025-07-06 18:48:53] [Rank 0] step:5261/10000 train_time:424381ms step_avg:80.67ms +[2025-07-06 18:48:54] [Rank 0] step:5281/10000 train_time:425878ms step_avg:80.64ms +[2025-07-06 18:48:54] [Rank 0] step:5281/10000 train_time:425878ms step_avg:80.64ms +[2025-07-06 18:48:56] [Rank 0] step:5301/10000 train_time:427375ms step_avg:80.62ms +[2025-07-06 18:48:56] [Rank 0] step:5301/10000 train_time:427375ms step_avg:80.62ms +[2025-07-06 18:48:58] [Rank 0] step:5321/10000 train_time:429528ms step_avg:80.72ms +[2025-07-06 18:48:58] [Rank 0] step:5321/10000 train_time:429528ms step_avg:80.72ms +[2025-07-06 18:49:00] [Rank 0] step:5341/10000 train_time:431023ms step_avg:80.70ms +[2025-07-06 18:49:00] [Rank 0] step:5341/10000 train_time:431023ms step_avg:80.70ms +[2025-07-06 18:49:01] [Rank 0] step:5361/10000 train_time:432521ms step_avg:80.68ms +[2025-07-06 18:49:01] [Rank 0] step:5361/10000 train_time:432521ms step_avg:80.68ms +[2025-07-06 18:49:03] [Rank 0] step:5381/10000 train_time:434018ms step_avg:80.66ms +[2025-07-06 18:49:03] [Rank 0] step:5381/10000 train_time:434018ms step_avg:80.66ms +[2025-07-06 18:49:05] [Rank 0] step:5401/10000 train_time:435515ms step_avg:80.64ms +[2025-07-06 18:49:05] [Rank 0] step:5401/10000 train_time:435515ms step_avg:80.64ms +[2025-07-06 18:49:06] [Rank 0] step:5421/10000 train_time:437672ms step_avg:80.74ms +[2025-07-06 18:49:06] [Rank 0] step:5421/10000 train_time:437672ms step_avg:80.74ms +[2025-07-06 18:49:08] [Rank 0] step:5441/10000 train_time:439170ms step_avg:80.71ms +[2025-07-06 18:49:08] [Rank 0] step:5441/10000 train_time:439170ms step_avg:80.71ms +[2025-07-06 18:49:09] [Rank 0] step:5461/10000 train_time:440670ms step_avg:80.69ms +[2025-07-06 18:49:09] [Rank 0] step:5461/10000 train_time:440670ms step_avg:80.69ms +[2025-07-06 18:49:11] [Rank 0] step:5481/10000 train_time:442167ms step_avg:80.67ms +[2025-07-06 18:49:11] [Rank 0] step:5481/10000 train_time:442167ms step_avg:80.67ms +[2025-07-06 18:49:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:49:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:49:13] [Rank 0] PRINT: step:5500/10000 train_loss:0.8625 val_loss:0.8648 train_time:443903ms step_avg:80.71ms +[2025-07-06 18:49:13] [Rank 0] PRINT: step:5500/10000 train_loss:0.8625 val_loss:0.8648 train_time:443903ms step_avg:80.71ms +[2025-07-06 18:49:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:49:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:49:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:49:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:49:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:49:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:54:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:54:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:54:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:54:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:54:37] [Rank 0] Total Loss: 5.4621 +[2025-07-06 18:54:37] [Rank 0] Total Loss: 5.4621 +[2025-07-06 18:54:37] [Rank 0] Total FTA: 0.9727 +[2025-07-06 18:54:37] [Rank 0] Total FTA: 0.9727 +[2025-07-06 18:54:37] [Rank 0] Group 0 Loss: 5.6959 +[2025-07-06 18:54:37] [Rank 0] Group 0 Loss: 5.6959 +[2025-07-06 18:54:37] [Rank 0] Group 1 Loss: 5.3285 +[2025-07-06 18:54:37] [Rank 0] Group 1 Loss: 5.3285 +[2025-07-06 18:54:37] [Rank 0] Group 2 Loss: 5.2955 +[2025-07-06 18:54:37] [Rank 0] Group 2 Loss: 5.2955 +[2025-07-06 18:54:37] [Rank 0] Group 3 Loss: 5.4269 +[2025-07-06 18:54:37] [Rank 0] Group 3 Loss: 5.4269 +[2025-07-06 18:54:37] [Rank 0] Group 4 Loss: 5.3355 +[2025-07-06 18:54:37] [Rank 0] Group 4 Loss: 5.3355 +[2025-07-06 18:54:37] [Rank 0] Group 5 Loss: 5.4243 +[2025-07-06 18:54:37] [Rank 0] Group 5 Loss: 5.4243 +[2025-07-06 18:54:37] [Rank 0] Group 6 Loss: 5.3319 +[2025-07-06 18:54:37] [Rank 0] Group 6 Loss: 5.3319 +[2025-07-06 18:54:37] [Rank 0] Group 7 Loss: 5.4913 +[2025-07-06 18:54:37] [Rank 0] Group 7 Loss: 5.4913 +[2025-07-06 18:54:37] [Rank 0] Group 8 Loss: 5.5193 +[2025-07-06 18:54:37] [Rank 0] Group 8 Loss: 5.5193 +[2025-07-06 18:54:37] [Rank 0] Group 9 Loss: 5.3970 +[2025-07-06 18:54:37] [Rank 0] Group 9 Loss: 5.3970 +[2025-07-06 18:54:37] [Rank 0] Group 10 Loss: 5.4686 +[2025-07-06 18:54:37] [Rank 0] Group 10 Loss: 5.4686 +[2025-07-06 18:54:37] [Rank 0] Group 11 Loss: 5.5032 +[2025-07-06 18:54:37] [Rank 0] Group 11 Loss: 5.5032 +[2025-07-06 18:54:37] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:54:37] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 18:54:37] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:54:37] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 18:54:37] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 18:54:37] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 18:54:37] [Rank 0] Group 3 FTA: 0.9740 +[2025-07-06 18:54:37] [Rank 0] Group 3 FTA: 0.9740 +[2025-07-06 18:54:37] [Rank 0] Group 4 FTA: 0.9349 +[2025-07-06 18:54:37] [Rank 0] Group 4 FTA: 0.9349 +[2025-07-06 18:54:37] [Rank 0] Group 5 FTA: 0.9818 +[2025-07-06 18:54:37] [Rank 0] Group 5 FTA: 0.9818 +[2025-07-06 18:54:37] [Rank 0] Group 6 FTA: 0.9740 +[2025-07-06 18:54:37] [Rank 0] Group 6 FTA: 0.9740 +[2025-07-06 18:54:37] [Rank 0] Group 7 FTA: 0.9531 +[2025-07-06 18:54:37] [Rank 0] Group 7 FTA: 0.9531 +[2025-07-06 18:54:37] [Rank 0] Group 8 FTA: 0.9740 +[2025-07-06 18:54:37] [Rank 0] Group 8 FTA: 0.9740 +[2025-07-06 18:54:37] [Rank 0] Group 9 FTA: 0.9609 +[2025-07-06 18:54:37] [Rank 0] Group 9 FTA: 0.9609 +[2025-07-06 18:54:38] [Rank 0] Group 10 FTA: 0.9551 +[2025-07-06 18:54:38] [Rank 0] Group 10 FTA: 0.9551 +[2025-07-06 18:54:38] [Rank 0] Group 11 FTA: 0.9600 +[2025-07-06 18:54:38] [Rank 0] Group 11 FTA: 0.9600 +[2025-07-06 18:54:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 18:54:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 18:54:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 18:54:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 18:54:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 18:54:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 18:54:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 18:54:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 18:54:39] [Rank 0] step:5501/10000 train_time:443924ms step_avg:80.70ms +[2025-07-06 18:54:39] [Rank 0] step:5501/10000 train_time:443924ms step_avg:80.70ms +[2025-07-06 18:54:41] [Rank 0] step:5521/10000 train_time:445500ms step_avg:80.69ms +[2025-07-06 18:54:41] [Rank 0] step:5521/10000 train_time:445500ms step_avg:80.69ms +[2025-07-06 18:54:42] [Rank 0] step:5541/10000 train_time:446988ms step_avg:80.67ms +[2025-07-06 18:54:42] [Rank 0] step:5541/10000 train_time:446988ms step_avg:80.67ms +[2025-07-06 18:54:44] [Rank 0] step:5561/10000 train_time:448480ms step_avg:80.65ms +[2025-07-06 18:54:44] [Rank 0] step:5561/10000 train_time:448480ms step_avg:80.65ms +[2025-07-06 18:54:46] [Rank 0] step:5581/10000 train_time:450025ms step_avg:80.64ms +[2025-07-06 18:54:46] [Rank 0] step:5581/10000 train_time:450025ms step_avg:80.64ms +[2025-07-06 18:54:47] [Rank 0] step:5601/10000 train_time:452127ms step_avg:80.72ms +[2025-07-06 18:54:47] [Rank 0] step:5601/10000 train_time:452127ms step_avg:80.72ms +[2025-07-06 18:54:49] [Rank 0] step:5621/10000 train_time:453620ms step_avg:80.70ms +[2025-07-06 18:54:49] [Rank 0] step:5621/10000 train_time:453620ms step_avg:80.70ms +[2025-07-06 18:54:50] [Rank 0] step:5641/10000 train_time:455113ms step_avg:80.68ms +[2025-07-06 18:54:50] [Rank 0] step:5641/10000 train_time:455113ms step_avg:80.68ms +[2025-07-06 18:54:52] [Rank 0] step:5661/10000 train_time:456608ms step_avg:80.66ms +[2025-07-06 18:54:52] [Rank 0] step:5661/10000 train_time:456608ms step_avg:80.66ms +[2025-07-06 18:54:53] [Rank 0] step:5681/10000 train_time:458343ms step_avg:80.68ms +[2025-07-06 18:54:53] [Rank 0] step:5681/10000 train_time:458343ms step_avg:80.68ms +[2025-07-06 18:54:55] [Rank 0] step:5701/10000 train_time:459834ms step_avg:80.66ms +[2025-07-06 18:54:55] [Rank 0] step:5701/10000 train_time:459834ms step_avg:80.66ms +[2025-07-06 18:54:56] [Rank 0] step:5721/10000 train_time:461327ms step_avg:80.64ms +[2025-07-06 18:54:56] [Rank 0] step:5721/10000 train_time:461327ms step_avg:80.64ms +[2025-07-06 18:54:58] [Rank 0] step:5741/10000 train_time:462822ms step_avg:80.62ms +[2025-07-06 18:54:58] [Rank 0] step:5741/10000 train_time:462822ms step_avg:80.62ms +[2025-07-06 18:55:00] [Rank 0] step:5761/10000 train_time:464318ms step_avg:80.60ms +[2025-07-06 18:55:00] [Rank 0] step:5761/10000 train_time:464318ms step_avg:80.60ms +[2025-07-06 18:55:01] [Rank 0] step:5781/10000 train_time:466050ms step_avg:80.62ms +[2025-07-06 18:55:01] [Rank 0] step:5781/10000 train_time:466050ms step_avg:80.62ms +[2025-07-06 18:55:03] [Rank 0] step:5801/10000 train_time:467546ms step_avg:80.60ms +[2025-07-06 18:55:03] [Rank 0] step:5801/10000 train_time:467546ms step_avg:80.60ms +[2025-07-06 18:55:04] [Rank 0] step:5821/10000 train_time:469045ms step_avg:80.58ms +[2025-07-06 18:55:04] [Rank 0] step:5821/10000 train_time:469045ms step_avg:80.58ms +[2025-07-06 18:55:06] [Rank 0] step:5841/10000 train_time:470547ms step_avg:80.56ms +[2025-07-06 18:55:06] [Rank 0] step:5841/10000 train_time:470547ms step_avg:80.56ms +[2025-07-06 18:55:08] [Rank 0] step:5861/10000 train_time:472691ms step_avg:80.65ms +[2025-07-06 18:55:08] [Rank 0] step:5861/10000 train_time:472691ms step_avg:80.65ms +[2025-07-06 18:55:09] [Rank 0] step:5881/10000 train_time:474189ms step_avg:80.63ms +[2025-07-06 18:55:09] [Rank 0] step:5881/10000 train_time:474189ms step_avg:80.63ms +[2025-07-06 18:55:11] [Rank 0] step:5901/10000 train_time:475690ms step_avg:80.61ms +[2025-07-06 18:55:11] [Rank 0] step:5901/10000 train_time:475690ms step_avg:80.61ms +[2025-07-06 18:55:12] [Rank 0] step:5921/10000 train_time:477188ms step_avg:80.59ms +[2025-07-06 18:55:12] [Rank 0] step:5921/10000 train_time:477188ms step_avg:80.59ms +[2025-07-06 18:55:14] [Rank 0] step:5941/10000 train_time:478689ms step_avg:80.57ms +[2025-07-06 18:55:14] [Rank 0] step:5941/10000 train_time:478689ms step_avg:80.57ms +[2025-07-06 18:55:16] [Rank 0] step:5961/10000 train_time:480837ms step_avg:80.66ms +[2025-07-06 18:55:16] [Rank 0] step:5961/10000 train_time:480837ms step_avg:80.66ms +[2025-07-06 18:55:17] [Rank 0] step:5981/10000 train_time:482337ms step_avg:80.64ms +[2025-07-06 18:55:17] [Rank 0] step:5981/10000 train_time:482337ms step_avg:80.64ms +[2025-07-06 18:55:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:55:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:55:20] [Rank 0] PRINT: step:6000/10000 train_loss:0.8614 val_loss:0.8617 train_time:483836ms step_avg:80.64ms +[2025-07-06 18:55:20] [Rank 0] PRINT: step:6000/10000 train_loss:0.8614 val_loss:0.8617 train_time:483836ms step_avg:80.64ms +[2025-07-06 18:55:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:55:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:55:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:55:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:55:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:55:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:00:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:00:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:00:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:00:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:00:47] [Rank 0] Total Loss: 5.4472 +[2025-07-06 19:00:47] [Rank 0] Total Loss: 5.4472 +[2025-07-06 19:00:47] [Rank 0] Total FTA: 0.9657 +[2025-07-06 19:00:47] [Rank 0] Total FTA: 0.9657 +[2025-07-06 19:00:47] [Rank 0] Group 0 Loss: 5.5652 +[2025-07-06 19:00:47] [Rank 0] Group 0 Loss: 5.5652 +[2025-07-06 19:00:47] [Rank 0] Group 1 Loss: 5.2861 +[2025-07-06 19:00:47] [Rank 0] Group 1 Loss: 5.2861 +[2025-07-06 19:00:47] [Rank 0] Group 2 Loss: 5.3029 +[2025-07-06 19:00:47] [Rank 0] Group 2 Loss: 5.3029 +[2025-07-06 19:00:47] [Rank 0] Group 3 Loss: 5.4239 +[2025-07-06 19:00:47] [Rank 0] Group 3 Loss: 5.4239 +[2025-07-06 19:00:47] [Rank 0] Group 4 Loss: 5.3977 +[2025-07-06 19:00:47] [Rank 0] Group 4 Loss: 5.3977 +[2025-07-06 19:00:47] [Rank 0] Group 5 Loss: 5.4659 +[2025-07-06 19:00:47] [Rank 0] Group 5 Loss: 5.4659 +[2025-07-06 19:00:47] [Rank 0] Group 6 Loss: 5.3636 +[2025-07-06 19:00:47] [Rank 0] Group 6 Loss: 5.3636 +[2025-07-06 19:00:47] [Rank 0] Group 7 Loss: 5.4479 +[2025-07-06 19:00:47] [Rank 0] Group 7 Loss: 5.4479 +[2025-07-06 19:00:47] [Rank 0] Group 8 Loss: 5.4507 +[2025-07-06 19:00:47] [Rank 0] Group 8 Loss: 5.4507 +[2025-07-06 19:00:47] [Rank 0] Group 9 Loss: 5.4429 +[2025-07-06 19:00:47] [Rank 0] Group 9 Loss: 5.4429 +[2025-07-06 19:00:47] [Rank 0] Group 10 Loss: 5.5329 +[2025-07-06 19:00:47] [Rank 0] Group 10 Loss: 5.5329 +[2025-07-06 19:00:47] [Rank 0] Group 11 Loss: 5.4814 +[2025-07-06 19:00:47] [Rank 0] Group 11 Loss: 5.4814 +[2025-07-06 19:00:47] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 19:00:47] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 19:00:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:00:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:00:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:00:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:00:47] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 19:00:47] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 19:00:48] [Rank 0] Group 4 FTA: 0.9271 +[2025-07-06 19:00:48] [Rank 0] Group 4 FTA: 0.9271 +[2025-07-06 19:00:48] [Rank 0] Group 5 FTA: 0.9349 +[2025-07-06 19:00:48] [Rank 0] Group 5 FTA: 0.9349 +[2025-07-06 19:00:48] [Rank 0] Group 6 FTA: 0.9167 +[2025-07-06 19:00:48] [Rank 0] Group 6 FTA: 0.9167 +[2025-07-06 19:00:48] [Rank 0] Group 7 FTA: 0.9635 +[2025-07-06 19:00:48] [Rank 0] Group 7 FTA: 0.9635 +[2025-07-06 19:00:48] [Rank 0] Group 8 FTA: 0.9583 +[2025-07-06 19:00:48] [Rank 0] Group 8 FTA: 0.9583 +[2025-07-06 19:00:48] [Rank 0] Group 9 FTA: 0.9727 +[2025-07-06 19:00:48] [Rank 0] Group 9 FTA: 0.9727 +[2025-07-06 19:00:48] [Rank 0] Group 10 FTA: 0.9492 +[2025-07-06 19:00:48] [Rank 0] Group 10 FTA: 0.9492 +[2025-07-06 19:00:48] [Rank 0] Group 11 FTA: 0.9561 +[2025-07-06 19:00:48] [Rank 0] Group 11 FTA: 0.9561 +[2025-07-06 19:00:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 19:00:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 19:00:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 19:00:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 19:00:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 19:00:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 19:00:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 19:00:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 19:00:49] [Rank 0] step:6001/10000 train_time:483857ms step_avg:80.63ms +[2025-07-06 19:00:49] [Rank 0] step:6001/10000 train_time:483857ms step_avg:80.63ms +[2025-07-06 19:00:51] [Rank 0] step:6021/10000 train_time:485364ms step_avg:80.61ms +[2025-07-06 19:00:51] [Rank 0] step:6021/10000 train_time:485364ms step_avg:80.61ms +[2025-07-06 19:00:53] [Rank 0] step:6041/10000 train_time:487498ms step_avg:80.70ms +[2025-07-06 19:00:53] [Rank 0] step:6041/10000 train_time:487498ms step_avg:80.70ms +[2025-07-06 19:00:54] [Rank 0] step:6061/10000 train_time:488987ms step_avg:80.68ms +[2025-07-06 19:00:54] [Rank 0] step:6061/10000 train_time:488987ms step_avg:80.68ms +[2025-07-06 19:00:56] [Rank 0] step:6081/10000 train_time:490480ms step_avg:80.66ms +[2025-07-06 19:00:56] [Rank 0] step:6081/10000 train_time:490480ms step_avg:80.66ms +[2025-07-06 19:00:57] [Rank 0] step:6101/10000 train_time:492040ms step_avg:80.65ms +[2025-07-06 19:00:57] [Rank 0] step:6101/10000 train_time:492040ms step_avg:80.65ms +[2025-07-06 19:00:59] [Rank 0] step:6121/10000 train_time:493584ms step_avg:80.64ms +[2025-07-06 19:00:59] [Rank 0] step:6121/10000 train_time:493584ms step_avg:80.64ms +[2025-07-06 19:01:01] [Rank 0] step:6141/10000 train_time:495383ms step_avg:80.67ms +[2025-07-06 19:01:01] [Rank 0] step:6141/10000 train_time:495383ms step_avg:80.67ms +[2025-07-06 19:01:02] [Rank 0] step:6161/10000 train_time:496876ms step_avg:80.65ms +[2025-07-06 19:01:02] [Rank 0] step:6161/10000 train_time:496876ms step_avg:80.65ms +[2025-07-06 19:01:04] [Rank 0] step:6181/10000 train_time:498368ms step_avg:80.63ms +[2025-07-06 19:01:04] [Rank 0] step:6181/10000 train_time:498368ms step_avg:80.63ms +[2025-07-06 19:01:05] [Rank 0] step:6201/10000 train_time:499864ms step_avg:80.61ms +[2025-07-06 19:01:05] [Rank 0] step:6201/10000 train_time:499864ms step_avg:80.61ms +[2025-07-06 19:01:07] [Rank 0] step:6221/10000 train_time:501593ms step_avg:80.63ms +[2025-07-06 19:01:07] [Rank 0] step:6221/10000 train_time:501593ms step_avg:80.63ms +[2025-07-06 19:01:08] [Rank 0] step:6241/10000 train_time:503089ms step_avg:80.61ms +[2025-07-06 19:01:08] [Rank 0] step:6241/10000 train_time:503089ms step_avg:80.61ms +[2025-07-06 19:01:10] [Rank 0] step:6261/10000 train_time:504584ms step_avg:80.59ms +[2025-07-06 19:01:10] [Rank 0] step:6261/10000 train_time:504584ms step_avg:80.59ms +[2025-07-06 19:01:11] [Rank 0] step:6281/10000 train_time:506079ms step_avg:80.57ms +[2025-07-06 19:01:11] [Rank 0] step:6281/10000 train_time:506079ms step_avg:80.57ms +[2025-07-06 19:01:13] [Rank 0] step:6301/10000 train_time:507833ms step_avg:80.60ms +[2025-07-06 19:01:13] [Rank 0] step:6301/10000 train_time:507833ms step_avg:80.60ms +[2025-07-06 19:01:15] [Rank 0] step:6321/10000 train_time:509721ms step_avg:80.64ms +[2025-07-06 19:01:15] [Rank 0] step:6321/10000 train_time:509721ms step_avg:80.64ms +[2025-07-06 19:01:16] [Rank 0] step:6341/10000 train_time:511217ms step_avg:80.62ms +[2025-07-06 19:01:16] [Rank 0] step:6341/10000 train_time:511217ms step_avg:80.62ms +[2025-07-06 19:01:18] [Rank 0] step:6361/10000 train_time:512715ms step_avg:80.60ms +[2025-07-06 19:01:18] [Rank 0] step:6361/10000 train_time:512715ms step_avg:80.60ms +[2025-07-06 19:01:19] [Rank 0] step:6381/10000 train_time:514214ms step_avg:80.59ms +[2025-07-06 19:01:19] [Rank 0] step:6381/10000 train_time:514214ms step_avg:80.59ms +[2025-07-06 19:01:22] [Rank 0] step:6401/10000 train_time:516366ms step_avg:80.67ms +[2025-07-06 19:01:22] [Rank 0] step:6401/10000 train_time:516366ms step_avg:80.67ms +[2025-07-06 19:01:23] [Rank 0] step:6421/10000 train_time:517862ms step_avg:80.65ms +[2025-07-06 19:01:23] [Rank 0] step:6421/10000 train_time:517862ms step_avg:80.65ms +[2025-07-06 19:01:25] [Rank 0] step:6441/10000 train_time:519360ms step_avg:80.63ms +[2025-07-06 19:01:25] [Rank 0] step:6441/10000 train_time:519360ms step_avg:80.63ms +[2025-07-06 19:01:26] [Rank 0] step:6461/10000 train_time:520857ms step_avg:80.62ms +[2025-07-06 19:01:26] [Rank 0] step:6461/10000 train_time:520857ms step_avg:80.62ms +[2025-07-06 19:01:28] [Rank 0] step:6481/10000 train_time:522358ms step_avg:80.60ms +[2025-07-06 19:01:28] [Rank 0] step:6481/10000 train_time:522358ms step_avg:80.60ms +[2025-07-06 19:01:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:01:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:01:31] [Rank 0] PRINT: step:6500/10000 train_loss:0.8608 val_loss:0.8605 train_time:524510ms step_avg:80.69ms +[2025-07-06 19:01:31] [Rank 0] PRINT: step:6500/10000 train_loss:0.8608 val_loss:0.8605 train_time:524510ms step_avg:80.69ms +[2025-07-06 19:01:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:01:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:01:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:01:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:01:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:01:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:06:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:06:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:06:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:06:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:06:57] [Rank 0] Total Loss: 5.4638 +[2025-07-06 19:06:57] [Rank 0] Total Loss: 5.4638 +[2025-07-06 19:06:57] [Rank 0] Total FTA: 0.9721 +[2025-07-06 19:06:57] [Rank 0] Total FTA: 0.9721 +[2025-07-06 19:06:57] [Rank 0] Group 0 Loss: 5.5657 +[2025-07-06 19:06:57] [Rank 0] Group 0 Loss: 5.5657 +[2025-07-06 19:06:57] [Rank 0] Group 1 Loss: 5.2984 +[2025-07-06 19:06:57] [Rank 0] Group 1 Loss: 5.2984 +[2025-07-06 19:06:57] [Rank 0] Group 2 Loss: 5.3500 +[2025-07-06 19:06:57] [Rank 0] Group 2 Loss: 5.3500 +[2025-07-06 19:06:57] [Rank 0] Group 3 Loss: 5.5355 +[2025-07-06 19:06:57] [Rank 0] Group 3 Loss: 5.5355 +[2025-07-06 19:06:57] [Rank 0] Group 4 Loss: 5.4408 +[2025-07-06 19:06:57] [Rank 0] Group 4 Loss: 5.4408 +[2025-07-06 19:06:57] [Rank 0] Group 5 Loss: 5.4487 +[2025-07-06 19:06:57] [Rank 0] Group 5 Loss: 5.4487 +[2025-07-06 19:06:57] [Rank 0] Group 6 Loss: 5.4251 +[2025-07-06 19:06:57] [Rank 0] Group 6 Loss: 5.4251 +[2025-07-06 19:06:57] [Rank 0] Group 7 Loss: 5.4615 +[2025-07-06 19:06:57] [Rank 0] Group 7 Loss: 5.4615 +[2025-07-06 19:06:57] [Rank 0] Group 8 Loss: 5.4392 +[2025-07-06 19:06:57] [Rank 0] Group 8 Loss: 5.4392 +[2025-07-06 19:06:57] [Rank 0] Group 9 Loss: 5.4596 +[2025-07-06 19:06:57] [Rank 0] Group 9 Loss: 5.4596 +[2025-07-06 19:06:57] [Rank 0] Group 10 Loss: 5.5348 +[2025-07-06 19:06:57] [Rank 0] Group 10 Loss: 5.5348 +[2025-07-06 19:06:57] [Rank 0] Group 11 Loss: 5.4695 +[2025-07-06 19:06:57] [Rank 0] Group 11 Loss: 5.4695 +[2025-07-06 19:06:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 19:06:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 19:06:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:06:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:06:57] [Rank 0] Group 2 FTA: 0.9115 +[2025-07-06 19:06:57] [Rank 0] Group 2 FTA: 0.9115 +[2025-07-06 19:06:57] [Rank 0] Group 3 FTA: 0.9323 +[2025-07-06 19:06:57] [Rank 0] Group 3 FTA: 0.9323 +[2025-07-06 19:06:57] [Rank 0] Group 4 FTA: 0.9635 +[2025-07-06 19:06:57] [Rank 0] Group 4 FTA: 0.9635 +[2025-07-06 19:06:57] [Rank 0] Group 5 FTA: 1.0000 +[2025-07-06 19:06:57] [Rank 0] Group 5 FTA: 1.0000 +[2025-07-06 19:06:57] [Rank 0] Group 6 FTA: 0.9661 +[2025-07-06 19:06:57] [Rank 0] Group 6 FTA: 0.9661 +[2025-07-06 19:06:57] [Rank 0] Group 7 FTA: 0.9714 +[2025-07-06 19:06:57] [Rank 0] Group 7 FTA: 0.9714 +[2025-07-06 19:06:57] [Rank 0] Group 8 FTA: 0.9505 +[2025-07-06 19:06:57] [Rank 0] Group 8 FTA: 0.9505 +[2025-07-06 19:06:57] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 19:06:57] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 19:06:57] [Rank 0] Group 10 FTA: 0.9785 +[2025-07-06 19:06:57] [Rank 0] Group 10 FTA: 0.9785 +[2025-07-06 19:06:57] [Rank 0] Group 11 FTA: 0.9775 +[2025-07-06 19:06:57] [Rank 0] Group 11 FTA: 0.9775 +[2025-07-06 19:06:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 19:06:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 19:06:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 19:06:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 19:06:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 19:06:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 19:06:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 19:06:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 19:06:59] [Rank 0] step:6501/10000 train_time:524531ms step_avg:80.68ms +[2025-07-06 19:06:59] [Rank 0] step:6501/10000 train_time:524531ms step_avg:80.68ms +[2025-07-06 19:07:00] [Rank 0] step:6521/10000 train_time:526041ms step_avg:80.67ms +[2025-07-06 19:07:00] [Rank 0] step:6521/10000 train_time:526041ms step_avg:80.67ms +[2025-07-06 19:07:02] [Rank 0] step:6541/10000 train_time:527532ms step_avg:80.65ms +[2025-07-06 19:07:02] [Rank 0] step:6541/10000 train_time:527532ms step_avg:80.65ms +[2025-07-06 19:07:03] [Rank 0] step:6561/10000 train_time:529024ms step_avg:80.63ms +[2025-07-06 19:07:03] [Rank 0] step:6561/10000 train_time:529024ms step_avg:80.63ms +[2025-07-06 19:07:05] [Rank 0] step:6581/10000 train_time:531185ms step_avg:80.71ms +[2025-07-06 19:07:05] [Rank 0] step:6581/10000 train_time:531185ms step_avg:80.71ms +[2025-07-06 19:07:07] [Rank 0] step:6601/10000 train_time:532675ms step_avg:80.70ms +[2025-07-06 19:07:07] [Rank 0] step:6601/10000 train_time:532675ms step_avg:80.70ms +[2025-07-06 19:07:08] [Rank 0] step:6621/10000 train_time:534170ms step_avg:80.68ms +[2025-07-06 19:07:08] [Rank 0] step:6621/10000 train_time:534170ms step_avg:80.68ms +[2025-07-06 19:07:10] [Rank 0] step:6641/10000 train_time:535662ms step_avg:80.66ms +[2025-07-06 19:07:10] [Rank 0] step:6641/10000 train_time:535662ms step_avg:80.66ms +[2025-07-06 19:07:11] [Rank 0] step:6661/10000 train_time:537211ms step_avg:80.65ms +[2025-07-06 19:07:11] [Rank 0] step:6661/10000 train_time:537211ms step_avg:80.65ms +[2025-07-06 19:07:13] [Rank 0] step:6681/10000 train_time:538889ms step_avg:80.66ms +[2025-07-06 19:07:13] [Rank 0] step:6681/10000 train_time:538889ms step_avg:80.66ms +[2025-07-06 19:07:14] [Rank 0] step:6701/10000 train_time:540385ms step_avg:80.64ms +[2025-07-06 19:07:14] [Rank 0] step:6701/10000 train_time:540385ms step_avg:80.64ms +[2025-07-06 19:07:16] [Rank 0] step:6721/10000 train_time:541881ms step_avg:80.63ms +[2025-07-06 19:07:16] [Rank 0] step:6721/10000 train_time:541881ms step_avg:80.63ms +[2025-07-06 19:07:18] [Rank 0] step:6741/10000 train_time:543625ms step_avg:80.64ms +[2025-07-06 19:07:18] [Rank 0] step:6741/10000 train_time:543625ms step_avg:80.64ms +[2025-07-06 19:07:20] [Rank 0] step:6761/10000 train_time:545780ms step_avg:80.72ms +[2025-07-06 19:07:20] [Rank 0] step:6761/10000 train_time:545780ms step_avg:80.72ms +[2025-07-06 19:07:21] [Rank 0] step:6781/10000 train_time:547273ms step_avg:80.71ms +[2025-07-06 19:07:21] [Rank 0] step:6781/10000 train_time:547273ms step_avg:80.71ms +[2025-07-06 19:07:23] [Rank 0] step:6801/10000 train_time:548768ms step_avg:80.69ms +[2025-07-06 19:07:23] [Rank 0] step:6801/10000 train_time:548768ms step_avg:80.69ms +[2025-07-06 19:07:24] [Rank 0] step:6821/10000 train_time:550265ms step_avg:80.67ms +[2025-07-06 19:07:24] [Rank 0] step:6821/10000 train_time:550265ms step_avg:80.67ms +[2025-07-06 19:07:26] [Rank 0] step:6841/10000 train_time:551761ms step_avg:80.66ms +[2025-07-06 19:07:26] [Rank 0] step:6841/10000 train_time:551761ms step_avg:80.66ms +[2025-07-06 19:07:28] [Rank 0] step:6861/10000 train_time:553495ms step_avg:80.67ms +[2025-07-06 19:07:28] [Rank 0] step:6861/10000 train_time:553495ms step_avg:80.67ms +[2025-07-06 19:07:29] [Rank 0] step:6881/10000 train_time:554991ms step_avg:80.66ms +[2025-07-06 19:07:29] [Rank 0] step:6881/10000 train_time:554991ms step_avg:80.66ms +[2025-07-06 19:07:31] [Rank 0] step:6901/10000 train_time:556488ms step_avg:80.64ms +[2025-07-06 19:07:31] [Rank 0] step:6901/10000 train_time:556488ms step_avg:80.64ms +[2025-07-06 19:07:32] [Rank 0] step:6921/10000 train_time:557986ms step_avg:80.62ms +[2025-07-06 19:07:32] [Rank 0] step:6921/10000 train_time:557986ms step_avg:80.62ms +[2025-07-06 19:07:34] [Rank 0] step:6941/10000 train_time:560145ms step_avg:80.70ms +[2025-07-06 19:07:34] [Rank 0] step:6941/10000 train_time:560145ms step_avg:80.70ms +[2025-07-06 19:07:36] [Rank 0] step:6961/10000 train_time:561643ms step_avg:80.68ms +[2025-07-06 19:07:36] [Rank 0] step:6961/10000 train_time:561643ms step_avg:80.68ms +[2025-07-06 19:07:37] [Rank 0] step:6981/10000 train_time:563138ms step_avg:80.67ms +[2025-07-06 19:07:37] [Rank 0] step:6981/10000 train_time:563138ms step_avg:80.67ms +[2025-07-06 19:07:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:07:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:07:40] [Rank 0] PRINT: step:7000/10000 train_loss:0.8596 val_loss:0.8602 train_time:564635ms step_avg:80.66ms +[2025-07-06 19:07:40] [Rank 0] PRINT: step:7000/10000 train_loss:0.8596 val_loss:0.8602 train_time:564635ms step_avg:80.66ms +[2025-07-06 19:07:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:07:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:07:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:07:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:07:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:07:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:13:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:13:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:13:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:13:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:13:07] [Rank 0] Total Loss: 5.4143 +[2025-07-06 19:13:07] [Rank 0] Total Loss: 5.4143 +[2025-07-06 19:13:07] [Rank 0] Total FTA: 0.9592 +[2025-07-06 19:13:07] [Rank 0] Total FTA: 0.9592 +[2025-07-06 19:13:07] [Rank 0] Group 0 Loss: 5.5129 +[2025-07-06 19:13:07] [Rank 0] Group 0 Loss: 5.5129 +[2025-07-06 19:13:07] [Rank 0] Group 1 Loss: 5.1897 +[2025-07-06 19:13:07] [Rank 0] Group 1 Loss: 5.1897 +[2025-07-06 19:13:07] [Rank 0] Group 2 Loss: 5.3049 +[2025-07-06 19:13:07] [Rank 0] Group 2 Loss: 5.3049 +[2025-07-06 19:13:07] [Rank 0] Group 3 Loss: 5.4363 +[2025-07-06 19:13:07] [Rank 0] Group 3 Loss: 5.4363 +[2025-07-06 19:13:07] [Rank 0] Group 4 Loss: 5.4041 +[2025-07-06 19:13:07] [Rank 0] Group 4 Loss: 5.4041 +[2025-07-06 19:13:07] [Rank 0] Group 5 Loss: 5.4008 +[2025-07-06 19:13:07] [Rank 0] Group 5 Loss: 5.4008 +[2025-07-06 19:13:07] [Rank 0] Group 6 Loss: 5.4481 +[2025-07-06 19:13:07] [Rank 0] Group 6 Loss: 5.4481 +[2025-07-06 19:13:07] [Rank 0] Group 7 Loss: 5.4191 +[2025-07-06 19:13:07] [Rank 0] Group 7 Loss: 5.4191 +[2025-07-06 19:13:07] [Rank 0] Group 8 Loss: 5.3500 +[2025-07-06 19:13:07] [Rank 0] Group 8 Loss: 5.3500 +[2025-07-06 19:13:07] [Rank 0] Group 9 Loss: 5.4372 +[2025-07-06 19:13:07] [Rank 0] Group 9 Loss: 5.4372 +[2025-07-06 19:13:07] [Rank 0] Group 10 Loss: 5.4505 +[2025-07-06 19:13:07] [Rank 0] Group 10 Loss: 5.4505 +[2025-07-06 19:13:07] [Rank 0] Group 11 Loss: 5.4521 +[2025-07-06 19:13:07] [Rank 0] Group 11 Loss: 5.4521 +[2025-07-06 19:13:07] [Rank 0] Group 0 FTA: 0.8440 +[2025-07-06 19:13:07] [Rank 0] Group 0 FTA: 0.8440 +[2025-07-06 19:13:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:13:07] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:13:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:13:07] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:13:07] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 19:13:07] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 19:13:07] [Rank 0] Group 4 FTA: 0.9870 +[2025-07-06 19:13:07] [Rank 0] Group 4 FTA: 0.9870 +[2025-07-06 19:13:07] [Rank 0] Group 5 FTA: 0.9635 +[2025-07-06 19:13:07] [Rank 0] Group 5 FTA: 0.9635 +[2025-07-06 19:13:07] [Rank 0] Group 6 FTA: 0.9844 +[2025-07-06 19:13:07] [Rank 0] Group 6 FTA: 0.9844 +[2025-07-06 19:13:07] [Rank 0] Group 7 FTA: 0.9531 +[2025-07-06 19:13:07] [Rank 0] Group 7 FTA: 0.9531 +[2025-07-06 19:13:07] [Rank 0] Group 8 FTA: 0.9479 +[2025-07-06 19:13:07] [Rank 0] Group 8 FTA: 0.9479 +[2025-07-06 19:13:07] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 19:13:07] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 19:13:07] [Rank 0] Group 10 FTA: 0.9629 +[2025-07-06 19:13:07] [Rank 0] Group 10 FTA: 0.9629 +[2025-07-06 19:13:07] [Rank 0] Group 11 FTA: 0.9785 +[2025-07-06 19:13:07] [Rank 0] Group 11 FTA: 0.9785 +[2025-07-06 19:13:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 19:13:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 19:13:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 19:13:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 19:13:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 19:13:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 19:13:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 19:13:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 19:13:09] [Rank 0] step:7001/10000 train_time:564656ms step_avg:80.65ms +[2025-07-06 19:13:09] [Rank 0] step:7001/10000 train_time:564656ms step_avg:80.65ms +[2025-07-06 19:13:11] [Rank 0] step:7021/10000 train_time:566402ms step_avg:80.67ms +[2025-07-06 19:13:11] [Rank 0] step:7021/10000 train_time:566402ms step_avg:80.67ms +[2025-07-06 19:13:12] [Rank 0] step:7041/10000 train_time:568281ms step_avg:80.71ms +[2025-07-06 19:13:12] [Rank 0] step:7041/10000 train_time:568281ms step_avg:80.71ms +[2025-07-06 19:13:14] [Rank 0] step:7061/10000 train_time:569771ms step_avg:80.69ms +[2025-07-06 19:13:14] [Rank 0] step:7061/10000 train_time:569771ms step_avg:80.69ms +[2025-07-06 19:13:15] [Rank 0] step:7081/10000 train_time:571262ms step_avg:80.68ms +[2025-07-06 19:13:15] [Rank 0] step:7081/10000 train_time:571262ms step_avg:80.68ms +[2025-07-06 19:13:17] [Rank 0] step:7101/10000 train_time:572756ms step_avg:80.66ms +[2025-07-06 19:13:17] [Rank 0] step:7101/10000 train_time:572756ms step_avg:80.66ms +[2025-07-06 19:13:19] [Rank 0] step:7121/10000 train_time:574894ms step_avg:80.73ms +[2025-07-06 19:13:19] [Rank 0] step:7121/10000 train_time:574894ms step_avg:80.73ms +[2025-07-06 19:13:20] [Rank 0] step:7141/10000 train_time:576389ms step_avg:80.72ms +[2025-07-06 19:13:20] [Rank 0] step:7141/10000 train_time:576389ms step_avg:80.72ms +[2025-07-06 19:13:22] [Rank 0] step:7161/10000 train_time:577881ms step_avg:80.70ms +[2025-07-06 19:13:22] [Rank 0] step:7161/10000 train_time:577881ms step_avg:80.70ms +[2025-07-06 19:13:23] [Rank 0] step:7181/10000 train_time:579376ms step_avg:80.68ms +[2025-07-06 19:13:23] [Rank 0] step:7181/10000 train_time:579376ms step_avg:80.68ms +[2025-07-06 19:13:25] [Rank 0] step:7201/10000 train_time:580871ms step_avg:80.67ms +[2025-07-06 19:13:25] [Rank 0] step:7201/10000 train_time:580871ms step_avg:80.67ms +[2025-07-06 19:13:27] [Rank 0] step:7221/10000 train_time:583012ms step_avg:80.74ms +[2025-07-06 19:13:27] [Rank 0] step:7221/10000 train_time:583012ms step_avg:80.74ms +[2025-07-06 19:13:28] [Rank 0] step:7241/10000 train_time:584507ms step_avg:80.72ms +[2025-07-06 19:13:28] [Rank 0] step:7241/10000 train_time:584507ms step_avg:80.72ms +[2025-07-06 19:13:30] [Rank 0] step:7261/10000 train_time:586003ms step_avg:80.71ms +[2025-07-06 19:13:30] [Rank 0] step:7261/10000 train_time:586003ms step_avg:80.71ms +[2025-07-06 19:13:31] [Rank 0] step:7281/10000 train_time:587499ms step_avg:80.69ms +[2025-07-06 19:13:31] [Rank 0] step:7281/10000 train_time:587499ms step_avg:80.69ms +[2025-07-06 19:13:33] [Rank 0] step:7301/10000 train_time:589235ms step_avg:80.71ms +[2025-07-06 19:13:33] [Rank 0] step:7301/10000 train_time:589235ms step_avg:80.71ms +[2025-07-06 19:13:35] [Rank 0] step:7321/10000 train_time:590732ms step_avg:80.69ms +[2025-07-06 19:13:35] [Rank 0] step:7321/10000 train_time:590732ms step_avg:80.69ms +[2025-07-06 19:13:36] [Rank 0] step:7341/10000 train_time:592353ms step_avg:80.69ms +[2025-07-06 19:13:36] [Rank 0] step:7341/10000 train_time:592353ms step_avg:80.69ms +[2025-07-06 19:13:38] [Rank 0] step:7361/10000 train_time:593893ms step_avg:80.68ms +[2025-07-06 19:13:38] [Rank 0] step:7361/10000 train_time:593893ms step_avg:80.68ms +[2025-07-06 19:13:39] [Rank 0] step:7381/10000 train_time:595513ms step_avg:80.68ms +[2025-07-06 19:13:39] [Rank 0] step:7381/10000 train_time:595513ms step_avg:80.68ms +[2025-07-06 19:13:41] [Rank 0] step:7401/10000 train_time:596992ms step_avg:80.66ms +[2025-07-06 19:13:41] [Rank 0] step:7401/10000 train_time:596992ms step_avg:80.66ms +[2025-07-06 19:13:42] [Rank 0] step:7421/10000 train_time:598488ms step_avg:80.65ms +[2025-07-06 19:13:42] [Rank 0] step:7421/10000 train_time:598488ms step_avg:80.65ms +[2025-07-06 19:13:44] [Rank 0] step:7441/10000 train_time:599985ms step_avg:80.63ms +[2025-07-06 19:13:44] [Rank 0] step:7441/10000 train_time:599985ms step_avg:80.63ms +[2025-07-06 19:13:45] [Rank 0] step:7461/10000 train_time:601482ms step_avg:80.62ms +[2025-07-06 19:13:45] [Rank 0] step:7461/10000 train_time:601482ms step_avg:80.62ms +[2025-07-06 19:13:48] [Rank 0] step:7481/10000 train_time:603634ms step_avg:80.69ms +[2025-07-06 19:13:48] [Rank 0] step:7481/10000 train_time:603634ms step_avg:80.69ms +[2025-07-06 19:13:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:13:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:13:50] [Rank 0] PRINT: step:7500/10000 train_loss:0.8588 val_loss:0.8594 train_time:605129ms step_avg:80.68ms +[2025-07-06 19:13:50] [Rank 0] PRINT: step:7500/10000 train_loss:0.8588 val_loss:0.8594 train_time:605129ms step_avg:80.68ms +[2025-07-06 19:13:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:13:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:13:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:13:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:13:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:13:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:19:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:19:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:19:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:19:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:19:17] [Rank 0] Total Loss: 5.4275 +[2025-07-06 19:19:17] [Rank 0] Total Loss: 5.4275 +[2025-07-06 19:19:17] [Rank 0] Total FTA: 0.9805 +[2025-07-06 19:19:17] [Rank 0] Total FTA: 0.9805 +[2025-07-06 19:19:17] [Rank 0] Group 0 Loss: 5.5692 +[2025-07-06 19:19:17] [Rank 0] Group 0 Loss: 5.5692 +[2025-07-06 19:19:17] [Rank 0] Group 1 Loss: 5.2510 +[2025-07-06 19:19:17] [Rank 0] Group 1 Loss: 5.2510 +[2025-07-06 19:19:17] [Rank 0] Group 2 Loss: 5.4498 +[2025-07-06 19:19:17] [Rank 0] Group 2 Loss: 5.4498 +[2025-07-06 19:19:17] [Rank 0] Group 3 Loss: 5.3601 +[2025-07-06 19:19:17] [Rank 0] Group 3 Loss: 5.3601 +[2025-07-06 19:19:17] [Rank 0] Group 4 Loss: 5.4305 +[2025-07-06 19:19:17] [Rank 0] Group 4 Loss: 5.4305 +[2025-07-06 19:19:17] [Rank 0] Group 5 Loss: 5.3984 +[2025-07-06 19:19:17] [Rank 0] Group 5 Loss: 5.3984 +[2025-07-06 19:19:17] [Rank 0] Group 6 Loss: 5.3046 +[2025-07-06 19:19:17] [Rank 0] Group 6 Loss: 5.3046 +[2025-07-06 19:19:17] [Rank 0] Group 7 Loss: 5.4240 +[2025-07-06 19:19:17] [Rank 0] Group 7 Loss: 5.4240 +[2025-07-06 19:19:17] [Rank 0] Group 8 Loss: 5.4404 +[2025-07-06 19:19:17] [Rank 0] Group 8 Loss: 5.4404 +[2025-07-06 19:19:17] [Rank 0] Group 9 Loss: 5.4224 +[2025-07-06 19:19:17] [Rank 0] Group 9 Loss: 5.4224 +[2025-07-06 19:19:17] [Rank 0] Group 10 Loss: 5.4726 +[2025-07-06 19:19:17] [Rank 0] Group 10 Loss: 5.4726 +[2025-07-06 19:19:17] [Rank 0] Group 11 Loss: 5.4354 +[2025-07-06 19:19:17] [Rank 0] Group 11 Loss: 5.4354 +[2025-07-06 19:19:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 19:19:17] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 19:19:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:19:17] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:19:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:19:17] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:19:17] [Rank 0] Group 3 FTA: 0.9479 +[2025-07-06 19:19:17] [Rank 0] Group 3 FTA: 0.9479 +[2025-07-06 19:19:17] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-06 19:19:17] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-06 19:19:17] [Rank 0] Group 5 FTA: 0.9974 +[2025-07-06 19:19:17] [Rank 0] Group 5 FTA: 0.9974 +[2025-07-06 19:19:17] [Rank 0] Group 6 FTA: 0.9844 +[2025-07-06 19:19:17] [Rank 0] Group 6 FTA: 0.9844 +[2025-07-06 19:19:17] [Rank 0] Group 7 FTA: 0.9609 +[2025-07-06 19:19:17] [Rank 0] Group 7 FTA: 0.9609 +[2025-07-06 19:19:17] [Rank 0] Group 8 FTA: 0.9792 +[2025-07-06 19:19:17] [Rank 0] Group 8 FTA: 0.9792 +[2025-07-06 19:19:17] [Rank 0] Group 9 FTA: 0.9805 +[2025-07-06 19:19:17] [Rank 0] Group 9 FTA: 0.9805 +[2025-07-06 19:19:17] [Rank 0] Group 10 FTA: 0.9688 +[2025-07-06 19:19:17] [Rank 0] Group 10 FTA: 0.9688 +[2025-07-06 19:19:17] [Rank 0] Group 11 FTA: 0.9707 +[2025-07-06 19:19:17] [Rank 0] Group 11 FTA: 0.9707 +[2025-07-06 19:19:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 19:19:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 19:19:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 19:19:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 19:19:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 19:19:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 19:19:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 19:19:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 19:19:18] [Rank 0] step:7501/10000 train_time:605151ms step_avg:80.68ms +[2025-07-06 19:19:18] [Rank 0] step:7501/10000 train_time:605151ms step_avg:80.68ms +[2025-07-06 19:19:20] [Rank 0] step:7521/10000 train_time:606642ms step_avg:80.66ms +[2025-07-06 19:19:20] [Rank 0] step:7521/10000 train_time:606642ms step_avg:80.66ms +[2025-07-06 19:19:21] [Rank 0] step:7541/10000 train_time:608133ms step_avg:80.64ms +[2025-07-06 19:19:21] [Rank 0] step:7541/10000 train_time:608133ms step_avg:80.64ms +[2025-07-06 19:19:23] [Rank 0] step:7561/10000 train_time:609883ms step_avg:80.66ms +[2025-07-06 19:19:23] [Rank 0] step:7561/10000 train_time:609883ms step_avg:80.66ms +[2025-07-06 19:19:25] [Rank 0] step:7581/10000 train_time:611780ms step_avg:80.70ms +[2025-07-06 19:19:25] [Rank 0] step:7581/10000 train_time:611780ms step_avg:80.70ms +[2025-07-06 19:19:26] [Rank 0] step:7601/10000 train_time:613270ms step_avg:80.68ms +[2025-07-06 19:19:26] [Rank 0] step:7601/10000 train_time:613270ms step_avg:80.68ms +[2025-07-06 19:19:28] [Rank 0] step:7621/10000 train_time:614764ms step_avg:80.67ms +[2025-07-06 19:19:28] [Rank 0] step:7621/10000 train_time:614764ms step_avg:80.67ms +[2025-07-06 19:19:29] [Rank 0] step:7641/10000 train_time:616259ms step_avg:80.65ms +[2025-07-06 19:19:29] [Rank 0] step:7641/10000 train_time:616259ms step_avg:80.65ms +[2025-07-06 19:19:32] [Rank 0] step:7661/10000 train_time:618422ms step_avg:80.72ms +[2025-07-06 19:19:32] [Rank 0] step:7661/10000 train_time:618422ms step_avg:80.72ms +[2025-07-06 19:19:33] [Rank 0] step:7681/10000 train_time:619916ms step_avg:80.71ms +[2025-07-06 19:19:33] [Rank 0] step:7681/10000 train_time:619916ms step_avg:80.71ms +[2025-07-06 19:19:35] [Rank 0] step:7701/10000 train_time:621410ms step_avg:80.69ms +[2025-07-06 19:19:35] [Rank 0] step:7701/10000 train_time:621410ms step_avg:80.69ms +[2025-07-06 19:19:36] [Rank 0] step:7721/10000 train_time:622905ms step_avg:80.68ms +[2025-07-06 19:19:36] [Rank 0] step:7721/10000 train_time:622905ms step_avg:80.68ms +[2025-07-06 19:19:38] [Rank 0] step:7741/10000 train_time:624399ms step_avg:80.66ms +[2025-07-06 19:19:38] [Rank 0] step:7741/10000 train_time:624399ms step_avg:80.66ms +[2025-07-06 19:19:40] [Rank 0] step:7761/10000 train_time:626537ms step_avg:80.73ms +[2025-07-06 19:19:40] [Rank 0] step:7761/10000 train_time:626537ms step_avg:80.73ms +[2025-07-06 19:19:41] [Rank 0] step:7781/10000 train_time:628032ms step_avg:80.71ms +[2025-07-06 19:19:41] [Rank 0] step:7781/10000 train_time:628032ms step_avg:80.71ms +[2025-07-06 19:19:43] [Rank 0] step:7801/10000 train_time:629529ms step_avg:80.70ms +[2025-07-06 19:19:43] [Rank 0] step:7801/10000 train_time:629529ms step_avg:80.70ms +[2025-07-06 19:19:44] [Rank 0] step:7821/10000 train_time:631027ms step_avg:80.68ms +[2025-07-06 19:19:44] [Rank 0] step:7821/10000 train_time:631027ms step_avg:80.68ms +[2025-07-06 19:19:46] [Rank 0] step:7841/10000 train_time:633176ms step_avg:80.75ms +[2025-07-06 19:19:46] [Rank 0] step:7841/10000 train_time:633176ms step_avg:80.75ms +[2025-07-06 19:19:48] [Rank 0] step:7861/10000 train_time:634674ms step_avg:80.74ms +[2025-07-06 19:19:48] [Rank 0] step:7861/10000 train_time:634674ms step_avg:80.74ms +[2025-07-06 19:19:49] [Rank 0] step:7881/10000 train_time:636172ms step_avg:80.72ms +[2025-07-06 19:19:49] [Rank 0] step:7881/10000 train_time:636172ms step_avg:80.72ms +[2025-07-06 19:19:51] [Rank 0] step:7901/10000 train_time:637669ms step_avg:80.71ms +[2025-07-06 19:19:51] [Rank 0] step:7901/10000 train_time:637669ms step_avg:80.71ms +[2025-07-06 19:19:53] [Rank 0] step:7921/10000 train_time:639168ms step_avg:80.69ms +[2025-07-06 19:19:53] [Rank 0] step:7921/10000 train_time:639168ms step_avg:80.69ms +[2025-07-06 19:19:54] [Rank 0] step:7941/10000 train_time:641321ms step_avg:80.76ms +[2025-07-06 19:19:54] [Rank 0] step:7941/10000 train_time:641321ms step_avg:80.76ms +[2025-07-06 19:19:56] [Rank 0] step:7961/10000 train_time:643046ms step_avg:80.77ms +[2025-07-06 19:19:56] [Rank 0] step:7961/10000 train_time:643046ms step_avg:80.77ms +[2025-07-06 19:19:58] [Rank 0] step:7981/10000 train_time:644700ms step_avg:80.78ms +[2025-07-06 19:19:58] [Rank 0] step:7981/10000 train_time:644700ms step_avg:80.78ms +[2025-07-06 19:19:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:19:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:20:00] [Rank 0] PRINT: step:8000/10000 train_loss:0.8579 val_loss:0.8591 train_time:646197ms step_avg:80.77ms +[2025-07-06 19:20:00] [Rank 0] PRINT: step:8000/10000 train_loss:0.8579 val_loss:0.8591 train_time:646197ms step_avg:80.77ms +[2025-07-06 19:20:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:20:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:20:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:20:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:20:00] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:20:00] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:25:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:25:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:25:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:25:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:25:27] [Rank 0] Total Loss: 5.4597 +[2025-07-06 19:25:27] [Rank 0] Total Loss: 5.4597 +[2025-07-06 19:25:27] [Rank 0] Total FTA: 0.9796 +[2025-07-06 19:25:27] [Rank 0] Total FTA: 0.9796 +[2025-07-06 19:25:27] [Rank 0] Group 0 Loss: 5.5932 +[2025-07-06 19:25:27] [Rank 0] Group 0 Loss: 5.5932 +[2025-07-06 19:25:27] [Rank 0] Group 1 Loss: 5.2914 +[2025-07-06 19:25:27] [Rank 0] Group 1 Loss: 5.2914 +[2025-07-06 19:25:27] [Rank 0] Group 2 Loss: 5.4640 +[2025-07-06 19:25:27] [Rank 0] Group 2 Loss: 5.4640 +[2025-07-06 19:25:27] [Rank 0] Group 3 Loss: 5.5321 +[2025-07-06 19:25:27] [Rank 0] Group 3 Loss: 5.5321 +[2025-07-06 19:25:27] [Rank 0] Group 4 Loss: 5.4181 +[2025-07-06 19:25:27] [Rank 0] Group 4 Loss: 5.4181 +[2025-07-06 19:25:27] [Rank 0] Group 5 Loss: 5.4138 +[2025-07-06 19:25:27] [Rank 0] Group 5 Loss: 5.4138 +[2025-07-06 19:25:27] [Rank 0] Group 6 Loss: 5.3366 +[2025-07-06 19:25:27] [Rank 0] Group 6 Loss: 5.3366 +[2025-07-06 19:25:27] [Rank 0] Group 7 Loss: 5.4537 +[2025-07-06 19:25:27] [Rank 0] Group 7 Loss: 5.4537 +[2025-07-06 19:25:27] [Rank 0] Group 8 Loss: 5.5019 +[2025-07-06 19:25:27] [Rank 0] Group 8 Loss: 5.5019 +[2025-07-06 19:25:27] [Rank 0] Group 9 Loss: 5.5273 +[2025-07-06 19:25:27] [Rank 0] Group 9 Loss: 5.5273 +[2025-07-06 19:25:27] [Rank 0] Group 10 Loss: 5.4330 +[2025-07-06 19:25:27] [Rank 0] Group 10 Loss: 5.4330 +[2025-07-06 19:25:27] [Rank 0] Group 11 Loss: 5.4558 +[2025-07-06 19:25:27] [Rank 0] Group 11 Loss: 5.4558 +[2025-07-06 19:25:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 19:25:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 19:25:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:25:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:25:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:25:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:25:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 19:25:27] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 19:25:27] [Rank 0] Group 4 FTA: 0.9635 +[2025-07-06 19:25:27] [Rank 0] Group 4 FTA: 0.9635 +[2025-07-06 19:25:27] [Rank 0] Group 5 FTA: 0.9922 +[2025-07-06 19:25:27] [Rank 0] Group 5 FTA: 0.9922 +[2025-07-06 19:25:27] [Rank 0] Group 6 FTA: 0.9688 +[2025-07-06 19:25:27] [Rank 0] Group 6 FTA: 0.9688 +[2025-07-06 19:25:27] [Rank 0] Group 7 FTA: 0.9661 +[2025-07-06 19:25:27] [Rank 0] Group 7 FTA: 0.9661 +[2025-07-06 19:25:27] [Rank 0] Group 8 FTA: 0.9714 +[2025-07-06 19:25:27] [Rank 0] Group 8 FTA: 0.9714 +[2025-07-06 19:25:27] [Rank 0] Group 9 FTA: 0.9727 +[2025-07-06 19:25:27] [Rank 0] Group 9 FTA: 0.9727 +[2025-07-06 19:25:27] [Rank 0] Group 10 FTA: 0.9609 +[2025-07-06 19:25:27] [Rank 0] Group 10 FTA: 0.9609 +[2025-07-06 19:25:27] [Rank 0] Group 11 FTA: 0.9658 +[2025-07-06 19:25:27] [Rank 0] Group 11 FTA: 0.9658 +[2025-07-06 19:25:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 19:25:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 19:25:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 19:25:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 19:25:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 19:25:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 19:25:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 19:25:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 19:25:29] [Rank 0] step:8001/10000 train_time:646218ms step_avg:80.77ms +[2025-07-06 19:25:29] [Rank 0] step:8001/10000 train_time:646218ms step_avg:80.77ms +[2025-07-06 19:25:31] [Rank 0] step:8021/10000 train_time:648357ms step_avg:80.83ms +[2025-07-06 19:25:31] [Rank 0] step:8021/10000 train_time:648357ms step_avg:80.83ms +[2025-07-06 19:25:32] [Rank 0] step:8041/10000 train_time:649845ms step_avg:80.82ms +[2025-07-06 19:25:32] [Rank 0] step:8041/10000 train_time:649845ms step_avg:80.82ms +[2025-07-06 19:25:34] [Rank 0] step:8061/10000 train_time:651335ms step_avg:80.80ms +[2025-07-06 19:25:34] [Rank 0] step:8061/10000 train_time:651335ms step_avg:80.80ms +[2025-07-06 19:25:35] [Rank 0] step:8081/10000 train_time:652827ms step_avg:80.79ms +[2025-07-06 19:25:35] [Rank 0] step:8081/10000 train_time:652827ms step_avg:80.79ms +[2025-07-06 19:25:37] [Rank 0] step:8101/10000 train_time:654375ms step_avg:80.78ms +[2025-07-06 19:25:37] [Rank 0] step:8101/10000 train_time:654375ms step_avg:80.78ms +[2025-07-06 19:25:39] [Rank 0] step:8121/10000 train_time:656054ms step_avg:80.78ms +[2025-07-06 19:25:39] [Rank 0] step:8121/10000 train_time:656054ms step_avg:80.78ms +[2025-07-06 19:25:40] [Rank 0] step:8141/10000 train_time:657547ms step_avg:80.77ms +[2025-07-06 19:25:40] [Rank 0] step:8141/10000 train_time:657547ms step_avg:80.77ms +[2025-07-06 19:25:41] [Rank 0] step:8161/10000 train_time:659040ms step_avg:80.75ms +[2025-07-06 19:25:41] [Rank 0] step:8161/10000 train_time:659040ms step_avg:80.75ms +[2025-07-06 19:25:43] [Rank 0] step:8181/10000 train_time:660534ms step_avg:80.74ms +[2025-07-06 19:25:43] [Rank 0] step:8181/10000 train_time:660534ms step_avg:80.74ms +[2025-07-06 19:25:45] [Rank 0] step:8201/10000 train_time:662696ms step_avg:80.81ms +[2025-07-06 19:25:45] [Rank 0] step:8201/10000 train_time:662696ms step_avg:80.81ms +[2025-07-06 19:25:47] [Rank 0] step:8221/10000 train_time:664190ms step_avg:80.79ms +[2025-07-06 19:25:47] [Rank 0] step:8221/10000 train_time:664190ms step_avg:80.79ms +[2025-07-06 19:25:48] [Rank 0] step:8241/10000 train_time:665685ms step_avg:80.78ms +[2025-07-06 19:25:48] [Rank 0] step:8241/10000 train_time:665685ms step_avg:80.78ms +[2025-07-06 19:25:50] [Rank 0] step:8261/10000 train_time:667182ms step_avg:80.76ms +[2025-07-06 19:25:50] [Rank 0] step:8261/10000 train_time:667182ms step_avg:80.76ms +[2025-07-06 19:25:51] [Rank 0] step:8281/10000 train_time:668678ms step_avg:80.75ms +[2025-07-06 19:25:51] [Rank 0] step:8281/10000 train_time:668678ms step_avg:80.75ms +[2025-07-06 19:25:53] [Rank 0] step:8301/10000 train_time:670411ms step_avg:80.76ms +[2025-07-06 19:25:53] [Rank 0] step:8301/10000 train_time:670411ms step_avg:80.76ms +[2025-07-06 19:25:54] [Rank 0] step:8321/10000 train_time:671906ms step_avg:80.75ms +[2025-07-06 19:25:54] [Rank 0] step:8321/10000 train_time:671906ms step_avg:80.75ms +[2025-07-06 19:25:56] [Rank 0] step:8341/10000 train_time:673403ms step_avg:80.73ms +[2025-07-06 19:25:56] [Rank 0] step:8341/10000 train_time:673403ms step_avg:80.73ms +[2025-07-06 19:25:57] [Rank 0] step:8361/10000 train_time:674901ms step_avg:80.72ms +[2025-07-06 19:25:57] [Rank 0] step:8361/10000 train_time:674901ms step_avg:80.72ms +[2025-07-06 19:26:00] [Rank 0] step:8381/10000 train_time:677065ms step_avg:80.79ms +[2025-07-06 19:26:00] [Rank 0] step:8381/10000 train_time:677065ms step_avg:80.79ms +[2025-07-06 19:26:01] [Rank 0] step:8401/10000 train_time:678562ms step_avg:80.77ms +[2025-07-06 19:26:01] [Rank 0] step:8401/10000 train_time:678562ms step_avg:80.77ms +[2025-07-06 19:26:03] [Rank 0] step:8421/10000 train_time:680060ms step_avg:80.76ms +[2025-07-06 19:26:03] [Rank 0] step:8421/10000 train_time:680060ms step_avg:80.76ms +[2025-07-06 19:26:04] [Rank 0] step:8441/10000 train_time:681559ms step_avg:80.74ms +[2025-07-06 19:26:04] [Rank 0] step:8441/10000 train_time:681559ms step_avg:80.74ms +[2025-07-06 19:26:06] [Rank 0] step:8461/10000 train_time:683059ms step_avg:80.73ms +[2025-07-06 19:26:06] [Rank 0] step:8461/10000 train_time:683059ms step_avg:80.73ms +[2025-07-06 19:26:07] [Rank 0] step:8481/10000 train_time:684797ms step_avg:80.74ms +[2025-07-06 19:26:07] [Rank 0] step:8481/10000 train_time:684797ms step_avg:80.74ms +[2025-07-06 19:26:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:26:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:26:10] [Rank 0] PRINT: step:8500/10000 train_loss:0.8572 val_loss:0.8588 train_time:686297ms step_avg:80.74ms +[2025-07-06 19:26:10] [Rank 0] PRINT: step:8500/10000 train_loss:0.8572 val_loss:0.8588 train_time:686297ms step_avg:80.74ms +[2025-07-06 19:26:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:26:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:26:10] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:26:10] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:26:10] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:26:10] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:31:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:31:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:31:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:31:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:31:38] [Rank 0] Total Loss: 5.5038 +[2025-07-06 19:31:38] [Rank 0] Total Loss: 5.5038 +[2025-07-06 19:31:38] [Rank 0] Total FTA: 0.9862 +[2025-07-06 19:31:38] [Rank 0] Total FTA: 0.9862 +[2025-07-06 19:31:38] [Rank 0] Group 0 Loss: 5.6428 +[2025-07-06 19:31:38] [Rank 0] Group 0 Loss: 5.6428 +[2025-07-06 19:31:38] [Rank 0] Group 1 Loss: 5.3365 +[2025-07-06 19:31:38] [Rank 0] Group 1 Loss: 5.3365 +[2025-07-06 19:31:38] [Rank 0] Group 2 Loss: 5.4985 +[2025-07-06 19:31:38] [Rank 0] Group 2 Loss: 5.4985 +[2025-07-06 19:31:38] [Rank 0] Group 3 Loss: 5.4907 +[2025-07-06 19:31:38] [Rank 0] Group 3 Loss: 5.4907 +[2025-07-06 19:31:38] [Rank 0] Group 4 Loss: 5.5107 +[2025-07-06 19:31:38] [Rank 0] Group 4 Loss: 5.5107 +[2025-07-06 19:31:38] [Rank 0] Group 5 Loss: 5.4929 +[2025-07-06 19:31:38] [Rank 0] Group 5 Loss: 5.4929 +[2025-07-06 19:31:38] [Rank 0] Group 6 Loss: 5.4550 +[2025-07-06 19:31:38] [Rank 0] Group 6 Loss: 5.4550 +[2025-07-06 19:31:38] [Rank 0] Group 7 Loss: 5.5146 +[2025-07-06 19:31:38] [Rank 0] Group 7 Loss: 5.5146 +[2025-07-06 19:31:38] [Rank 0] Group 8 Loss: 5.5237 +[2025-07-06 19:31:38] [Rank 0] Group 8 Loss: 5.5237 +[2025-07-06 19:31:38] [Rank 0] Group 9 Loss: 5.5803 +[2025-07-06 19:31:38] [Rank 0] Group 9 Loss: 5.5803 +[2025-07-06 19:31:38] [Rank 0] Group 10 Loss: 5.5049 +[2025-07-06 19:31:38] [Rank 0] Group 10 Loss: 5.5049 +[2025-07-06 19:31:38] [Rank 0] Group 11 Loss: 5.4579 +[2025-07-06 19:31:38] [Rank 0] Group 11 Loss: 5.4579 +[2025-07-06 19:31:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 19:31:38] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-06 19:31:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:31:38] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:31:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:31:38] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:31:38] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 19:31:38] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 19:31:38] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-06 19:31:38] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-06 19:31:38] [Rank 0] Group 5 FTA: 0.9896 +[2025-07-06 19:31:38] [Rank 0] Group 5 FTA: 0.9896 +[2025-07-06 19:31:38] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-06 19:31:38] [Rank 0] Group 6 FTA: 0.9792 +[2025-07-06 19:31:38] [Rank 0] Group 7 FTA: 0.9609 +[2025-07-06 19:31:38] [Rank 0] Group 7 FTA: 0.9609 +[2025-07-06 19:31:38] [Rank 0] Group 8 FTA: 0.9635 +[2025-07-06 19:31:38] [Rank 0] Group 8 FTA: 0.9635 +[2025-07-06 19:31:38] [Rank 0] Group 9 FTA: 0.9844 +[2025-07-06 19:31:38] [Rank 0] Group 9 FTA: 0.9844 +[2025-07-06 19:31:38] [Rank 0] Group 10 FTA: 0.9824 +[2025-07-06 19:31:38] [Rank 0] Group 10 FTA: 0.9824 +[2025-07-06 19:31:38] [Rank 0] Group 11 FTA: 0.9854 +[2025-07-06 19:31:38] [Rank 0] Group 11 FTA: 0.9854 +[2025-07-06 19:31:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 19:31:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 19:31:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 19:31:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 19:31:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 19:31:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 19:31:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 19:31:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 19:31:40] [Rank 0] step:8501/10000 train_time:686318ms step_avg:80.73ms +[2025-07-06 19:31:40] [Rank 0] step:8501/10000 train_time:686318ms step_avg:80.73ms +[2025-07-06 19:31:41] [Rank 0] step:8521/10000 train_time:687817ms step_avg:80.72ms +[2025-07-06 19:31:41] [Rank 0] step:8521/10000 train_time:687817ms step_avg:80.72ms +[2025-07-06 19:31:43] [Rank 0] step:8541/10000 train_time:689306ms step_avg:80.71ms +[2025-07-06 19:31:43] [Rank 0] step:8541/10000 train_time:689306ms step_avg:80.71ms +[2025-07-06 19:31:45] [Rank 0] step:8561/10000 train_time:691458ms step_avg:80.77ms +[2025-07-06 19:31:45] [Rank 0] step:8561/10000 train_time:691458ms step_avg:80.77ms +[2025-07-06 19:31:46] [Rank 0] step:8581/10000 train_time:692948ms step_avg:80.75ms +[2025-07-06 19:31:46] [Rank 0] step:8581/10000 train_time:692948ms step_avg:80.75ms +[2025-07-06 19:31:48] [Rank 0] step:8601/10000 train_time:694445ms step_avg:80.74ms +[2025-07-06 19:31:48] [Rank 0] step:8601/10000 train_time:694445ms step_avg:80.74ms +[2025-07-06 19:31:49] [Rank 0] step:8621/10000 train_time:695940ms step_avg:80.73ms +[2025-07-06 19:31:49] [Rank 0] step:8621/10000 train_time:695940ms step_avg:80.73ms +[2025-07-06 19:31:51] [Rank 0] step:8641/10000 train_time:697487ms step_avg:80.72ms +[2025-07-06 19:31:51] [Rank 0] step:8641/10000 train_time:697487ms step_avg:80.72ms +[2025-07-06 19:31:52] [Rank 0] step:8661/10000 train_time:699059ms step_avg:80.71ms +[2025-07-06 19:31:52] [Rank 0] step:8661/10000 train_time:699059ms step_avg:80.71ms +[2025-07-06 19:31:54] [Rank 0] step:8681/10000 train_time:700554ms step_avg:80.70ms +[2025-07-06 19:31:54] [Rank 0] step:8681/10000 train_time:700554ms step_avg:80.70ms +[2025-07-06 19:31:55] [Rank 0] step:8701/10000 train_time:702045ms step_avg:80.69ms +[2025-07-06 19:31:55] [Rank 0] step:8701/10000 train_time:702045ms step_avg:80.69ms +[2025-07-06 19:31:57] [Rank 0] step:8721/10000 train_time:703541ms step_avg:80.67ms +[2025-07-06 19:31:57] [Rank 0] step:8721/10000 train_time:703541ms step_avg:80.67ms +[2025-07-06 19:31:59] [Rank 0] step:8741/10000 train_time:705699ms step_avg:80.73ms +[2025-07-06 19:31:59] [Rank 0] step:8741/10000 train_time:705699ms step_avg:80.73ms +[2025-07-06 19:32:00] [Rank 0] step:8761/10000 train_time:707193ms step_avg:80.72ms +[2025-07-06 19:32:00] [Rank 0] step:8761/10000 train_time:707193ms step_avg:80.72ms +[2025-07-06 19:32:02] [Rank 0] step:8781/10000 train_time:708690ms step_avg:80.71ms +[2025-07-06 19:32:02] [Rank 0] step:8781/10000 train_time:708690ms step_avg:80.71ms +[2025-07-06 19:32:03] [Rank 0] step:8801/10000 train_time:710188ms step_avg:80.69ms +[2025-07-06 19:32:03] [Rank 0] step:8801/10000 train_time:710188ms step_avg:80.69ms +[2025-07-06 19:32:06] [Rank 0] step:8821/10000 train_time:711685ms step_avg:80.68ms +[2025-07-06 19:32:06] [Rank 0] step:8821/10000 train_time:711685ms step_avg:80.68ms +[2025-07-06 19:32:07] [Rank 0] step:8841/10000 train_time:713841ms step_avg:80.74ms +[2025-07-06 19:32:07] [Rank 0] step:8841/10000 train_time:713841ms step_avg:80.74ms +[2025-07-06 19:32:09] [Rank 0] step:8861/10000 train_time:715339ms step_avg:80.73ms +[2025-07-06 19:32:09] [Rank 0] step:8861/10000 train_time:715339ms step_avg:80.73ms +[2025-07-06 19:32:10] [Rank 0] step:8881/10000 train_time:716838ms step_avg:80.72ms +[2025-07-06 19:32:10] [Rank 0] step:8881/10000 train_time:716838ms step_avg:80.72ms +[2025-07-06 19:32:12] [Rank 0] step:8901/10000 train_time:718338ms step_avg:80.70ms +[2025-07-06 19:32:12] [Rank 0] step:8901/10000 train_time:718338ms step_avg:80.70ms +[2025-07-06 19:32:14] [Rank 0] step:8921/10000 train_time:720484ms step_avg:80.76ms +[2025-07-06 19:32:14] [Rank 0] step:8921/10000 train_time:720484ms step_avg:80.76ms +[2025-07-06 19:32:15] [Rank 0] step:8941/10000 train_time:721980ms step_avg:80.75ms +[2025-07-06 19:32:15] [Rank 0] step:8941/10000 train_time:721980ms step_avg:80.75ms +[2025-07-06 19:32:17] [Rank 0] step:8961/10000 train_time:723479ms step_avg:80.74ms +[2025-07-06 19:32:17] [Rank 0] step:8961/10000 train_time:723479ms step_avg:80.74ms +[2025-07-06 19:32:18] [Rank 0] step:8981/10000 train_time:724978ms step_avg:80.72ms +[2025-07-06 19:32:18] [Rank 0] step:8981/10000 train_time:724978ms step_avg:80.72ms +[2025-07-06 19:32:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:32:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:32:21] [Rank 0] PRINT: step:9000/10000 train_loss:0.8564 val_loss:0.8587 train_time:726477ms step_avg:80.72ms +[2025-07-06 19:32:21] [Rank 0] PRINT: step:9000/10000 train_loss:0.8564 val_loss:0.8587 train_time:726477ms step_avg:80.72ms +[2025-07-06 19:32:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:32:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:32:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:32:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:32:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:32:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:37:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:37:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:37:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:37:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:37:47] [Rank 0] Total Loss: 5.5172 +[2025-07-06 19:37:47] [Rank 0] Total Loss: 5.5172 +[2025-07-06 19:37:47] [Rank 0] Total FTA: 0.9645 +[2025-07-06 19:37:47] [Rank 0] Total FTA: 0.9645 +[2025-07-06 19:37:47] [Rank 0] Group 0 Loss: 5.7021 +[2025-07-06 19:37:47] [Rank 0] Group 0 Loss: 5.7021 +[2025-07-06 19:37:47] [Rank 0] Group 1 Loss: 5.3084 +[2025-07-06 19:37:47] [Rank 0] Group 1 Loss: 5.3084 +[2025-07-06 19:37:47] [Rank 0] Group 2 Loss: 5.5157 +[2025-07-06 19:37:47] [Rank 0] Group 2 Loss: 5.5157 +[2025-07-06 19:37:47] [Rank 0] Group 3 Loss: 5.5259 +[2025-07-06 19:37:47] [Rank 0] Group 3 Loss: 5.5259 +[2025-07-06 19:37:47] [Rank 0] Group 4 Loss: 5.5677 +[2025-07-06 19:37:47] [Rank 0] Group 4 Loss: 5.5677 +[2025-07-06 19:37:47] [Rank 0] Group 5 Loss: 5.4910 +[2025-07-06 19:37:47] [Rank 0] Group 5 Loss: 5.4910 +[2025-07-06 19:37:47] [Rank 0] Group 6 Loss: 5.4096 +[2025-07-06 19:37:47] [Rank 0] Group 6 Loss: 5.4096 +[2025-07-06 19:37:47] [Rank 0] Group 7 Loss: 5.5189 +[2025-07-06 19:37:47] [Rank 0] Group 7 Loss: 5.5189 +[2025-07-06 19:37:47] [Rank 0] Group 8 Loss: 5.5258 +[2025-07-06 19:37:47] [Rank 0] Group 8 Loss: 5.5258 +[2025-07-06 19:37:47] [Rank 0] Group 9 Loss: 5.5010 +[2025-07-06 19:37:47] [Rank 0] Group 9 Loss: 5.5010 +[2025-07-06 19:37:47] [Rank 0] Group 10 Loss: 5.5160 +[2025-07-06 19:37:47] [Rank 0] Group 10 Loss: 5.5160 +[2025-07-06 19:37:47] [Rank 0] Group 11 Loss: 5.4861 +[2025-07-06 19:37:47] [Rank 0] Group 11 Loss: 5.4861 +[2025-07-06 19:37:47] [Rank 0] Group 0 FTA: 0.8401 +[2025-07-06 19:37:47] [Rank 0] Group 0 FTA: 0.8401 +[2025-07-06 19:37:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:37:47] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:37:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:37:47] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:37:47] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 19:37:47] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 19:37:47] [Rank 0] Group 4 FTA: 0.9792 +[2025-07-06 19:37:47] [Rank 0] Group 4 FTA: 0.9792 +[2025-07-06 19:37:47] [Rank 0] Group 5 FTA: 0.9896 +[2025-07-06 19:37:47] [Rank 0] Group 5 FTA: 0.9896 +[2025-07-06 19:37:47] [Rank 0] Group 6 FTA: 0.9844 +[2025-07-06 19:37:47] [Rank 0] Group 6 FTA: 0.9844 +[2025-07-06 19:37:47] [Rank 0] Group 7 FTA: 0.9766 +[2025-07-06 19:37:47] [Rank 0] Group 7 FTA: 0.9766 +[2025-07-06 19:37:47] [Rank 0] Group 8 FTA: 0.9609 +[2025-07-06 19:37:47] [Rank 0] Group 8 FTA: 0.9609 +[2025-07-06 19:37:47] [Rank 0] Group 9 FTA: 0.9688 +[2025-07-06 19:37:47] [Rank 0] Group 9 FTA: 0.9688 +[2025-07-06 19:37:47] [Rank 0] Group 10 FTA: 0.9688 +[2025-07-06 19:37:47] [Rank 0] Group 10 FTA: 0.9688 +[2025-07-06 19:37:47] [Rank 0] Group 11 FTA: 0.9893 +[2025-07-06 19:37:47] [Rank 0] Group 11 FTA: 0.9893 +[2025-07-06 19:37:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 19:37:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 19:37:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 19:37:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 19:37:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 19:37:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 19:37:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 19:37:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 19:37:49] [Rank 0] step:9001/10000 train_time:726505ms step_avg:80.71ms +[2025-07-06 19:37:49] [Rank 0] step:9001/10000 train_time:726505ms step_avg:80.71ms +[2025-07-06 19:37:51] [Rank 0] step:9021/10000 train_time:728955ms step_avg:80.81ms +[2025-07-06 19:37:51] [Rank 0] step:9021/10000 train_time:728955ms step_avg:80.81ms +[2025-07-06 19:37:53] [Rank 0] step:9041/10000 train_time:730585ms step_avg:80.81ms +[2025-07-06 19:37:53] [Rank 0] step:9041/10000 train_time:730585ms step_avg:80.81ms +[2025-07-06 19:37:54] [Rank 0] step:9061/10000 train_time:732083ms step_avg:80.79ms +[2025-07-06 19:37:54] [Rank 0] step:9061/10000 train_time:732083ms step_avg:80.79ms +[2025-07-06 19:37:56] [Rank 0] step:9081/10000 train_time:733582ms step_avg:80.78ms +[2025-07-06 19:37:56] [Rank 0] step:9081/10000 train_time:733582ms step_avg:80.78ms +[2025-07-06 19:37:58] [Rank 0] step:9101/10000 train_time:735745ms step_avg:80.84ms +[2025-07-06 19:37:58] [Rank 0] step:9101/10000 train_time:735745ms step_avg:80.84ms +[2025-07-06 19:37:59] [Rank 0] step:9121/10000 train_time:737240ms step_avg:80.83ms +[2025-07-06 19:37:59] [Rank 0] step:9121/10000 train_time:737240ms step_avg:80.83ms +[2025-07-06 19:38:01] [Rank 0] step:9141/10000 train_time:738737ms step_avg:80.82ms +[2025-07-06 19:38:01] [Rank 0] step:9141/10000 train_time:738737ms step_avg:80.82ms +[2025-07-06 19:38:02] [Rank 0] step:9161/10000 train_time:740233ms step_avg:80.80ms +[2025-07-06 19:38:02] [Rank 0] step:9161/10000 train_time:740233ms step_avg:80.80ms +[2025-07-06 19:38:04] [Rank 0] step:9181/10000 train_time:741781ms step_avg:80.80ms +[2025-07-06 19:38:04] [Rank 0] step:9181/10000 train_time:741781ms step_avg:80.80ms +[2025-07-06 19:38:06] [Rank 0] step:9201/10000 train_time:743884ms step_avg:80.85ms +[2025-07-06 19:38:06] [Rank 0] step:9201/10000 train_time:743884ms step_avg:80.85ms +[2025-07-06 19:38:07] [Rank 0] step:9221/10000 train_time:745376ms step_avg:80.83ms +[2025-07-06 19:38:07] [Rank 0] step:9221/10000 train_time:745376ms step_avg:80.83ms +[2025-07-06 19:38:09] [Rank 0] step:9241/10000 train_time:746871ms step_avg:80.82ms +[2025-07-06 19:38:09] [Rank 0] step:9241/10000 train_time:746871ms step_avg:80.82ms +[2025-07-06 19:38:10] [Rank 0] step:9261/10000 train_time:748368ms step_avg:80.81ms +[2025-07-06 19:38:10] [Rank 0] step:9261/10000 train_time:748368ms step_avg:80.81ms +[2025-07-06 19:38:13] [Rank 0] step:9281/10000 train_time:750527ms step_avg:80.87ms +[2025-07-06 19:38:13] [Rank 0] step:9281/10000 train_time:750527ms step_avg:80.87ms +[2025-07-06 19:38:14] [Rank 0] step:9301/10000 train_time:752020ms step_avg:80.85ms +[2025-07-06 19:38:14] [Rank 0] step:9301/10000 train_time:752020ms step_avg:80.85ms +[2025-07-06 19:38:16] [Rank 0] step:9321/10000 train_time:753517ms step_avg:80.84ms +[2025-07-06 19:38:16] [Rank 0] step:9321/10000 train_time:753517ms step_avg:80.84ms +[2025-07-06 19:38:17] [Rank 0] step:9341/10000 train_time:755009ms step_avg:80.83ms +[2025-07-06 19:38:17] [Rank 0] step:9341/10000 train_time:755009ms step_avg:80.83ms +[2025-07-06 19:38:19] [Rank 0] step:9361/10000 train_time:756560ms step_avg:80.82ms +[2025-07-06 19:38:19] [Rank 0] step:9361/10000 train_time:756560ms step_avg:80.82ms +[2025-07-06 19:38:20] [Rank 0] step:9381/10000 train_time:758134ms step_avg:80.82ms +[2025-07-06 19:38:20] [Rank 0] step:9381/10000 train_time:758134ms step_avg:80.82ms +[2025-07-06 19:38:22] [Rank 0] step:9401/10000 train_time:759630ms step_avg:80.80ms +[2025-07-06 19:38:22] [Rank 0] step:9401/10000 train_time:759630ms step_avg:80.80ms +[2025-07-06 19:38:23] [Rank 0] step:9421/10000 train_time:761126ms step_avg:80.79ms +[2025-07-06 19:38:23] [Rank 0] step:9421/10000 train_time:761126ms step_avg:80.79ms +[2025-07-06 19:38:25] [Rank 0] step:9441/10000 train_time:762621ms step_avg:80.78ms +[2025-07-06 19:38:25] [Rank 0] step:9441/10000 train_time:762621ms step_avg:80.78ms +[2025-07-06 19:38:27] [Rank 0] step:9461/10000 train_time:764768ms step_avg:80.83ms +[2025-07-06 19:38:27] [Rank 0] step:9461/10000 train_time:764768ms step_avg:80.83ms +[2025-07-06 19:38:28] [Rank 0] step:9481/10000 train_time:766263ms step_avg:80.82ms +[2025-07-06 19:38:28] [Rank 0] step:9481/10000 train_time:766263ms step_avg:80.82ms +[2025-07-06 19:38:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:38:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:38:31] [Rank 0] PRINT: step:9500/10000 train_loss:0.8557 val_loss:0.8585 train_time:767759ms step_avg:80.82ms +[2025-07-06 19:38:31] [Rank 0] PRINT: step:9500/10000 train_loss:0.8557 val_loss:0.8585 train_time:767759ms step_avg:80.82ms +[2025-07-06 19:38:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:38:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:38:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:38:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:38:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:38:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:43:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:43:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:43:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:43:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:43:54] [Rank 0] Total Loss: 5.5267 +[2025-07-06 19:43:54] [Rank 0] Total Loss: 5.5267 +[2025-07-06 19:43:54] [Rank 0] Total FTA: 0.9421 +[2025-07-06 19:43:54] [Rank 0] Total FTA: 0.9421 +[2025-07-06 19:43:54] [Rank 0] Group 0 Loss: 5.6646 +[2025-07-06 19:43:54] [Rank 0] Group 0 Loss: 5.6646 +[2025-07-06 19:43:54] [Rank 0] Group 1 Loss: 5.3496 +[2025-07-06 19:43:54] [Rank 0] Group 1 Loss: 5.3496 +[2025-07-06 19:43:54] [Rank 0] Group 2 Loss: 5.4740 +[2025-07-06 19:43:54] [Rank 0] Group 2 Loss: 5.4740 +[2025-07-06 19:43:54] [Rank 0] Group 3 Loss: 5.5416 +[2025-07-06 19:43:54] [Rank 0] Group 3 Loss: 5.5416 +[2025-07-06 19:43:54] [Rank 0] Group 4 Loss: 5.4731 +[2025-07-06 19:43:54] [Rank 0] Group 4 Loss: 5.4731 +[2025-07-06 19:43:54] [Rank 0] Group 5 Loss: 5.5036 +[2025-07-06 19:43:54] [Rank 0] Group 5 Loss: 5.5036 +[2025-07-06 19:43:54] [Rank 0] Group 6 Loss: 5.4324 +[2025-07-06 19:43:54] [Rank 0] Group 6 Loss: 5.4324 +[2025-07-06 19:43:54] [Rank 0] Group 7 Loss: 5.5602 +[2025-07-06 19:43:54] [Rank 0] Group 7 Loss: 5.5602 +[2025-07-06 19:43:54] [Rank 0] Group 8 Loss: 5.5351 +[2025-07-06 19:43:54] [Rank 0] Group 8 Loss: 5.5351 +[2025-07-06 19:43:54] [Rank 0] Group 9 Loss: 5.5195 +[2025-07-06 19:43:54] [Rank 0] Group 9 Loss: 5.5195 +[2025-07-06 19:43:54] [Rank 0] Group 10 Loss: 5.5519 +[2025-07-06 19:43:54] [Rank 0] Group 10 Loss: 5.5519 +[2025-07-06 19:43:54] [Rank 0] Group 11 Loss: 5.5413 +[2025-07-06 19:43:54] [Rank 0] Group 11 Loss: 5.5413 +[2025-07-06 19:43:54] [Rank 0] Group 0 FTA: 0.6528 +[2025-07-06 19:43:54] [Rank 0] Group 0 FTA: 0.6528 +[2025-07-06 19:43:54] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:43:54] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:43:54] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:43:54] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:43:54] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 19:43:54] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 19:43:54] [Rank 0] Group 4 FTA: 0.9714 +[2025-07-06 19:43:54] [Rank 0] Group 4 FTA: 0.9714 +[2025-07-06 19:43:54] [Rank 0] Group 5 FTA: 0.9792 +[2025-07-06 19:43:54] [Rank 0] Group 5 FTA: 0.9792 +[2025-07-06 19:43:54] [Rank 0] Group 6 FTA: 0.9844 +[2025-07-06 19:43:54] [Rank 0] Group 6 FTA: 0.9844 +[2025-07-06 19:43:54] [Rank 0] Group 7 FTA: 0.9922 +[2025-07-06 19:43:54] [Rank 0] Group 7 FTA: 0.9922 +[2025-07-06 19:43:54] [Rank 0] Group 8 FTA: 0.9740 +[2025-07-06 19:43:54] [Rank 0] Group 8 FTA: 0.9740 +[2025-07-06 19:43:54] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 19:43:54] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-06 19:43:54] [Rank 0] Group 10 FTA: 0.9883 +[2025-07-06 19:43:54] [Rank 0] Group 10 FTA: 0.9883 +[2025-07-06 19:43:54] [Rank 0] Group 11 FTA: 0.9912 +[2025-07-06 19:43:54] [Rank 0] Group 11 FTA: 0.9912 +[2025-07-06 19:43:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 19:43:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 19:43:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 19:43:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 19:43:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 19:43:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 19:43:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 19:43:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 19:43:55] [Rank 0] step:9501/10000 train_time:767780ms step_avg:80.81ms +[2025-07-06 19:43:55] [Rank 0] step:9501/10000 train_time:767780ms step_avg:80.81ms +[2025-07-06 19:43:57] [Rank 0] step:9521/10000 train_time:769285ms step_avg:80.80ms +[2025-07-06 19:43:57] [Rank 0] step:9521/10000 train_time:769285ms step_avg:80.80ms +[2025-07-06 19:43:59] [Rank 0] step:9541/10000 train_time:770776ms step_avg:80.79ms +[2025-07-06 19:43:59] [Rank 0] step:9541/10000 train_time:770776ms step_avg:80.79ms +[2025-07-06 19:44:00] [Rank 0] step:9561/10000 train_time:772929ms step_avg:80.84ms +[2025-07-06 19:44:00] [Rank 0] step:9561/10000 train_time:772929ms step_avg:80.84ms +[2025-07-06 19:44:02] [Rank 0] step:9581/10000 train_time:774421ms step_avg:80.83ms +[2025-07-06 19:44:02] [Rank 0] step:9581/10000 train_time:774421ms step_avg:80.83ms +[2025-07-06 19:44:03] [Rank 0] step:9601/10000 train_time:775912ms step_avg:80.82ms +[2025-07-06 19:44:03] [Rank 0] step:9601/10000 train_time:775912ms step_avg:80.82ms +[2025-07-06 19:44:05] [Rank 0] step:9621/10000 train_time:777405ms step_avg:80.80ms +[2025-07-06 19:44:05] [Rank 0] step:9621/10000 train_time:777405ms step_avg:80.80ms +[2025-07-06 19:44:07] [Rank 0] step:9641/10000 train_time:779135ms step_avg:80.81ms +[2025-07-06 19:44:07] [Rank 0] step:9641/10000 train_time:779135ms step_avg:80.81ms +[2025-07-06 19:44:08] [Rank 0] step:9661/10000 train_time:780629ms step_avg:80.80ms +[2025-07-06 19:44:08] [Rank 0] step:9661/10000 train_time:780629ms step_avg:80.80ms +[2025-07-06 19:44:10] [Rank 0] step:9681/10000 train_time:782187ms step_avg:80.80ms +[2025-07-06 19:44:10] [Rank 0] step:9681/10000 train_time:782187ms step_avg:80.80ms +[2025-07-06 19:44:11] [Rank 0] step:9701/10000 train_time:783751ms step_avg:80.79ms +[2025-07-06 19:44:11] [Rank 0] step:9701/10000 train_time:783751ms step_avg:80.79ms +[2025-07-06 19:44:13] [Rank 0] step:9721/10000 train_time:785354ms step_avg:80.79ms +[2025-07-06 19:44:13] [Rank 0] step:9721/10000 train_time:785354ms step_avg:80.79ms +[2025-07-06 19:44:15] [Rank 0] step:9741/10000 train_time:787458ms step_avg:80.84ms +[2025-07-06 19:44:15] [Rank 0] step:9741/10000 train_time:787458ms step_avg:80.84ms +[2025-07-06 19:44:16] [Rank 0] step:9761/10000 train_time:788955ms step_avg:80.83ms +[2025-07-06 19:44:16] [Rank 0] step:9761/10000 train_time:788955ms step_avg:80.83ms +[2025-07-06 19:44:18] [Rank 0] step:9781/10000 train_time:790452ms step_avg:80.82ms +[2025-07-06 19:44:18] [Rank 0] step:9781/10000 train_time:790452ms step_avg:80.82ms +[2025-07-06 19:44:19] [Rank 0] step:9801/10000 train_time:791946ms step_avg:80.80ms +[2025-07-06 19:44:19] [Rank 0] step:9801/10000 train_time:791946ms step_avg:80.80ms +[2025-07-06 19:44:22] [Rank 0] step:9821/10000 train_time:794100ms step_avg:80.86ms +[2025-07-06 19:44:22] [Rank 0] step:9821/10000 train_time:794100ms step_avg:80.86ms +[2025-07-06 19:44:23] [Rank 0] step:9841/10000 train_time:795594ms step_avg:80.84ms +[2025-07-06 19:44:23] [Rank 0] step:9841/10000 train_time:795594ms step_avg:80.84ms +[2025-07-06 19:44:25] [Rank 0] step:9861/10000 train_time:797089ms step_avg:80.83ms +[2025-07-06 19:44:25] [Rank 0] step:9861/10000 train_time:797089ms step_avg:80.83ms +[2025-07-06 19:44:26] [Rank 0] step:9881/10000 train_time:798583ms step_avg:80.82ms +[2025-07-06 19:44:26] [Rank 0] step:9881/10000 train_time:798583ms step_avg:80.82ms +[2025-07-06 19:44:28] [Rank 0] step:9901/10000 train_time:800335ms step_avg:80.83ms +[2025-07-06 19:44:28] [Rank 0] step:9901/10000 train_time:800335ms step_avg:80.83ms +[2025-07-06 19:44:30] [Rank 0] step:9921/10000 train_time:802252ms step_avg:80.86ms +[2025-07-06 19:44:30] [Rank 0] step:9921/10000 train_time:802252ms step_avg:80.86ms +[2025-07-06 19:44:31] [Rank 0] step:9941/10000 train_time:803750ms step_avg:80.85ms +[2025-07-06 19:44:31] [Rank 0] step:9941/10000 train_time:803750ms step_avg:80.85ms +[2025-07-06 19:44:33] [Rank 0] step:9961/10000 train_time:805252ms step_avg:80.84ms +[2025-07-06 19:44:33] [Rank 0] step:9961/10000 train_time:805252ms step_avg:80.84ms +[2025-07-06 19:44:34] [Rank 0] step:9981/10000 train_time:806755ms step_avg:80.83ms +[2025-07-06 19:44:34] [Rank 0] step:9981/10000 train_time:806755ms step_avg:80.83ms +[2025-07-06 19:44:36] [Rank 0] step:10000/10000 train_time:808842ms step_avg:80.88ms +[2025-07-06 19:44:36] [Rank 0] step:10000/10000 train_time:808842ms step_avg:80.88ms +[2025-07-06 19:44:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:44:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 19:44:37] [Rank 0] PRINT: step:10000/10000 train_loss:0.8549 val_loss:0.8585 train_time:808926ms step_avg:80.89ms +[2025-07-06 19:44:37] [Rank 0] PRINT: step:10000/10000 train_loss:0.8549 val_loss:0.8585 train_time:808926ms step_avg:80.89ms +[2025-07-06 19:44:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:44:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 19:44:38] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:44:38] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 19:44:38] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:44:38] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:50:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:50:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:50:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:50:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:50:00] [Rank 0] Total Loss: 5.5405 +[2025-07-06 19:50:00] [Rank 0] Total Loss: 5.5405 +[2025-07-06 19:50:00] [Rank 0] Total FTA: 0.9640 +[2025-07-06 19:50:00] [Rank 0] Total FTA: 0.9640 +[2025-07-06 19:50:00] [Rank 0] Group 0 Loss: 5.7034 +[2025-07-06 19:50:00] [Rank 0] Group 0 Loss: 5.7034 +[2025-07-06 19:50:00] [Rank 0] Group 1 Loss: 5.3180 +[2025-07-06 19:50:00] [Rank 0] Group 1 Loss: 5.3180 +[2025-07-06 19:50:00] [Rank 0] Group 2 Loss: 5.5339 +[2025-07-06 19:50:00] [Rank 0] Group 2 Loss: 5.5339 +[2025-07-06 19:50:00] [Rank 0] Group 3 Loss: 5.5845 +[2025-07-06 19:50:00] [Rank 0] Group 3 Loss: 5.5845 +[2025-07-06 19:50:00] [Rank 0] Group 4 Loss: 5.6255 +[2025-07-06 19:50:00] [Rank 0] Group 4 Loss: 5.6255 +[2025-07-06 19:50:00] [Rank 0] Group 5 Loss: 5.4650 +[2025-07-06 19:50:00] [Rank 0] Group 5 Loss: 5.4650 +[2025-07-06 19:50:00] [Rank 0] Group 6 Loss: 5.4598 +[2025-07-06 19:50:00] [Rank 0] Group 6 Loss: 5.4598 +[2025-07-06 19:50:00] [Rank 0] Group 7 Loss: 5.4733 +[2025-07-06 19:50:00] [Rank 0] Group 7 Loss: 5.4733 +[2025-07-06 19:50:00] [Rank 0] Group 8 Loss: 5.5320 +[2025-07-06 19:50:00] [Rank 0] Group 8 Loss: 5.5320 +[2025-07-06 19:50:00] [Rank 0] Group 9 Loss: 5.5269 +[2025-07-06 19:50:00] [Rank 0] Group 9 Loss: 5.5269 +[2025-07-06 19:50:00] [Rank 0] Group 10 Loss: 5.5253 +[2025-07-06 19:50:00] [Rank 0] Group 10 Loss: 5.5253 +[2025-07-06 19:50:00] [Rank 0] Group 11 Loss: 5.5538 +[2025-07-06 19:50:00] [Rank 0] Group 11 Loss: 5.5538 +[2025-07-06 19:50:00] [Rank 0] Group 0 FTA: 0.8075 +[2025-07-06 19:50:00] [Rank 0] Group 0 FTA: 0.8075 +[2025-07-06 19:50:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:50:00] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:50:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:50:00] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-06 19:50:00] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 19:50:00] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-06 19:50:00] [Rank 0] Group 4 FTA: 0.9922 +[2025-07-06 19:50:00] [Rank 0] Group 4 FTA: 0.9922 +[2025-07-06 19:50:00] [Rank 0] Group 5 FTA: 0.9818 +[2025-07-06 19:50:00] [Rank 0] Group 5 FTA: 0.9818 +[2025-07-06 19:50:00] [Rank 0] Group 6 FTA: 0.9870 +[2025-07-06 19:50:00] [Rank 0] Group 6 FTA: 0.9870 +[2025-07-06 19:50:00] [Rank 0] Group 7 FTA: 0.9844 +[2025-07-06 19:50:00] [Rank 0] Group 7 FTA: 0.9844 +[2025-07-06 19:50:00] [Rank 0] Group 8 FTA: 0.9818 +[2025-07-06 19:50:00] [Rank 0] Group 8 FTA: 0.9818 +[2025-07-06 19:50:00] [Rank 0] Group 9 FTA: 0.9844 +[2025-07-06 19:50:00] [Rank 0] Group 9 FTA: 0.9844 +[2025-07-06 19:50:00] [Rank 0] Group 10 FTA: 0.9824 +[2025-07-06 19:50:00] [Rank 0] Group 10 FTA: 0.9824 +[2025-07-06 19:50:00] [Rank 0] Group 11 FTA: 0.9863 +[2025-07-06 19:50:00] [Rank 0] Group 11 FTA: 0.9863 +[2025-07-06 19:50:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 19:50:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-06 19:50:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 19:50:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-06 19:50:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 19:50:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-06 19:50:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 19:50:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-06 19:50:02] [Rank 0] step:10001/10000 train_time:808948ms step_avg:80.89ms +[2025-07-06 19:50:02] [Rank 0] step:10001/10000 train_time:808948ms step_avg:80.89ms +[2025-07-06 19:50:02] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 19:50:02 2025 --- +[2025-07-06 19:50:02] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 19:50:02 2025 --- +[2025-07-06 19:50:02] [Rank 0] PRINT: Peak memory allocated: 9183 MiB reserved: 10636 MiB +[2025-07-06 19:50:02] [Rank 0] PRINT: Peak memory allocated: 9183 MiB reserved: 10636 MiB diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/config.json b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/config.json new file mode 100644 index 0000000000000000000000000000000000000000..87b3a0b7ecd5f1cc5ff50ba9514d08ef612cd6fe --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 49, + "optimizer_mode": 0, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "753521f9-8fd0-4b5d-8e86-afb9ef604a6e", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..a683cea1f4c0f0d507f7298bb3b08e64ae32d6a0 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:759acc8d30f567a47aeb18609c4b1d5d3debd2ec984535f53e988a33f633441b +size 352227 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..e963fc7a28df36d6c983f367b5c2326bb20334b1 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:853529e5433aaa9927d5e0ba73001e55431ee202184391dcf2a9ca9a8ee9bbc5 +size 362236 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..72af2ef7877da8a6a3a4f62515fbf8f4f135be5e --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1503991fe05b40a12aae763d2f67e6e9b63fa595d149cd36417b508e613c79a +size 109672 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..aa8dade850d073af5f3db335155cb091a8c549f8 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9af0ffdd7f3042b9ac7d232df0feb3b25b126053d8d23b3614f388dc130a3f8d +size 109408 diff --git a/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/training_log_753521f9-8fd0-4b5d-8e86-afb9ef604a6e.txt b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/training_log_753521f9-8fd0-4b5d-8e86-afb9ef604a6e.txt new file mode 100644 index 0000000000000000000000000000000000000000..c8a2c226d3d320875252a2a3dff2d89170d91156 --- /dev/null +++ b/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/training_log_753521f9-8fd0-4b5d-8e86-afb9ef604a6e.txt @@ -0,0 +1,5144 @@ +[2025-07-09 04:23:08] [Rank 0] PRINT: --- Script Start: Wed Jul 9 04:23:08 2025 --- +[2025-07-09 04:23:08] [Rank 0] PRINT: --- Script Start: Wed Jul 9 04:23:08 2025 --- +[2025-07-09 04:23:08] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-09 04:23:08] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=49, optimizer_mode=0, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-09 04:23:08] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-09 04:23:08] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-09 04:23:08] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-09 04:23:08] [Rank 0] PRINT: Using fixed seed: 49 +[2025-07-09 04:23:08] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49 +[2025-07-09 04:23:08] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49 +[2025-07-09 04:23:08] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-09 04:23:08] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-09 04:23:09] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-09 04:23:09] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-09 04:23:09] [Rank 0] PRINT: Constructing model... +[2025-07-09 04:23:09] [Rank 0] PRINT: Constructing model... +[2025-07-09 04:23:12] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-09 04:23:12] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-09 04:23:12] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-09 04:23:12] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-09 04:23:12] [Rank 0] PRINT: Testing model forward function: +[2025-07-09 04:23:12] [Rank 0] PRINT: Testing model forward function: +[2025-07-09 04:23:13] [Rank 0] PRINT: Model test - Result type: +[2025-07-09 04:23:13] [Rank 0] PRINT: Model test - Result type: +[2025-07-09 04:23:13] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-09 04:23:13] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-09 04:23:13] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-09 04:23:13] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-09 04:23:13] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-09 04:23:13] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-09 04:23:13] [Rank 0] PRINT: Model returns: +[2025-07-09 04:23:13] [Rank 0] PRINT: Model returns: +[2025-07-09 04:23:13] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-09 04:23:13] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-09 04:23:13] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-09 04:23:13] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 0 +[2025-07-09 04:23:13] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-09 04:23:13] [Rank 0] PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices. +[2025-07-09 04:23:13] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-09 04:23:13] [Rank 0] PRINT: Optimizers configured. Total optimizers: 2 +[2025-07-09 04:23:13] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-09 04:23:13] [Rank 0] PRINT: Muon optimizer is active with 68 parameters. +[2025-07-09 04:23:13] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-09 04:23:13] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-09 04:23:13] [Rank 0] PRINT: Model compilation complete. +[2025-07-09 04:23:13] [Rank 0] PRINT: Model compilation complete. +[2025-07-09 04:23:13] [Rank 0] PRINT: Starting warmup... +[2025-07-09 04:23:13] [Rank 0] PRINT: Starting warmup... +[2025-07-09 04:25:08] [Rank 0] PRINT: Warmup complete. +[2025-07-09 04:25:08] [Rank 0] PRINT: Warmup complete. +[2025-07-09 04:25:08] [Rank 0] PRINT: Starting training... +[2025-07-09 04:25:08] [Rank 0] PRINT: Starting training... +[2025-07-09 04:25:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 04:25:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 04:25:15] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-09 04:25:15] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-09 04:25:17] [Rank 0] step:21/10000 train_time:1554ms step_avg:73.98ms +[2025-07-09 04:25:17] [Rank 0] step:21/10000 train_time:1554ms step_avg:73.98ms +[2025-07-09 04:25:19] [Rank 0] step:41/10000 train_time:3007ms step_avg:73.34ms +[2025-07-09 04:25:19] [Rank 0] step:41/10000 train_time:3007ms step_avg:73.34ms +[2025-07-09 04:25:20] [Rank 0] step:61/10000 train_time:4460ms step_avg:73.12ms +[2025-07-09 04:25:20] [Rank 0] step:61/10000 train_time:4460ms step_avg:73.12ms +[2025-07-09 04:25:21] [Rank 0] step:81/10000 train_time:5920ms step_avg:73.08ms +[2025-07-09 04:25:21] [Rank 0] step:81/10000 train_time:5920ms step_avg:73.08ms +[2025-07-09 04:25:24] [Rank 0] step:101/10000 train_time:8034ms step_avg:79.55ms +[2025-07-09 04:25:24] [Rank 0] step:101/10000 train_time:8034ms step_avg:79.55ms +[2025-07-09 04:25:25] [Rank 0] step:121/10000 train_time:9489ms step_avg:78.42ms +[2025-07-09 04:25:25] [Rank 0] step:121/10000 train_time:9489ms step_avg:78.42ms +[2025-07-09 04:25:26] [Rank 0] step:141/10000 train_time:10949ms step_avg:77.65ms +[2025-07-09 04:25:26] [Rank 0] step:141/10000 train_time:10949ms step_avg:77.65ms +[2025-07-09 04:25:28] [Rank 0] step:161/10000 train_time:12409ms step_avg:77.07ms +[2025-07-09 04:25:28] [Rank 0] step:161/10000 train_time:12409ms step_avg:77.07ms +[2025-07-09 04:25:30] [Rank 0] step:181/10000 train_time:14127ms step_avg:78.05ms +[2025-07-09 04:25:30] [Rank 0] step:181/10000 train_time:14127ms step_avg:78.05ms +[2025-07-09 04:25:32] [Rank 0] step:201/10000 train_time:15996ms step_avg:79.58ms +[2025-07-09 04:25:32] [Rank 0] step:201/10000 train_time:15996ms step_avg:79.58ms +[2025-07-09 04:25:33] [Rank 0] step:221/10000 train_time:17459ms step_avg:79.00ms +[2025-07-09 04:25:33] [Rank 0] step:221/10000 train_time:17459ms step_avg:79.00ms +[2025-07-09 04:25:34] [Rank 0] step:241/10000 train_time:18921ms step_avg:78.51ms +[2025-07-09 04:25:34] [Rank 0] step:241/10000 train_time:18921ms step_avg:78.51ms +[2025-07-09 04:25:36] [Rank 0] step:261/10000 train_time:20383ms step_avg:78.09ms +[2025-07-09 04:25:36] [Rank 0] step:261/10000 train_time:20383ms step_avg:78.09ms +[2025-07-09 04:25:37] [Rank 0] step:281/10000 train_time:21879ms step_avg:77.86ms +[2025-07-09 04:25:37] [Rank 0] step:281/10000 train_time:21879ms step_avg:77.86ms +[2025-07-09 04:25:39] [Rank 0] step:301/10000 train_time:23341ms step_avg:77.54ms +[2025-07-09 04:25:39] [Rank 0] step:301/10000 train_time:23341ms step_avg:77.54ms +[2025-07-09 04:25:40] [Rank 0] step:321/10000 train_time:24805ms step_avg:77.28ms +[2025-07-09 04:25:40] [Rank 0] step:321/10000 train_time:24805ms step_avg:77.28ms +[2025-07-09 04:25:42] [Rank 0] step:341/10000 train_time:26265ms step_avg:77.02ms +[2025-07-09 04:25:42] [Rank 0] step:341/10000 train_time:26265ms step_avg:77.02ms +[2025-07-09 04:25:43] [Rank 0] step:361/10000 train_time:27727ms step_avg:76.81ms +[2025-07-09 04:25:43] [Rank 0] step:361/10000 train_time:27727ms step_avg:76.81ms +[2025-07-09 04:25:45] [Rank 0] step:381/10000 train_time:29426ms step_avg:77.23ms +[2025-07-09 04:25:45] [Rank 0] step:381/10000 train_time:29426ms step_avg:77.23ms +[2025-07-09 04:25:46] [Rank 0] step:401/10000 train_time:30889ms step_avg:77.03ms +[2025-07-09 04:25:46] [Rank 0] step:401/10000 train_time:30889ms step_avg:77.03ms +[2025-07-09 04:25:48] [Rank 0] step:421/10000 train_time:32349ms step_avg:76.84ms +[2025-07-09 04:25:48] [Rank 0] step:421/10000 train_time:32349ms step_avg:76.84ms +[2025-07-09 04:25:49] [Rank 0] step:441/10000 train_time:33810ms step_avg:76.67ms +[2025-07-09 04:25:49] [Rank 0] step:441/10000 train_time:33810ms step_avg:76.67ms +[2025-07-09 04:25:51] [Rank 0] step:461/10000 train_time:35936ms step_avg:77.95ms +[2025-07-09 04:25:51] [Rank 0] step:461/10000 train_time:35936ms step_avg:77.95ms +[2025-07-09 04:25:53] [Rank 0] step:481/10000 train_time:37398ms step_avg:77.75ms +[2025-07-09 04:25:53] [Rank 0] step:481/10000 train_time:37398ms step_avg:77.75ms +[2025-07-09 04:25:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 04:25:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 04:25:55] [Rank 0] PRINT: step:500/10000 train_loss:2.2961 val_loss:1.3673 train_time:38858ms step_avg:77.72ms +[2025-07-09 04:25:55] [Rank 0] PRINT: step:500/10000 train_loss:2.2961 val_loss:1.3673 train_time:38858ms step_avg:77.72ms +[2025-07-09 04:25:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 04:25:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 04:25:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 04:25:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 04:25:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 04:25:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 04:31:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 04:31:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 04:31:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 04:31:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 04:31:17] [Rank 0] Total Loss: 4.3277 +[2025-07-09 04:31:17] [Rank 0] Total Loss: 4.3277 +[2025-07-09 04:31:17] [Rank 0] Total FTA: 0.1026 +[2025-07-09 04:31:17] [Rank 0] Total FTA: 0.1026 +[2025-07-09 04:31:17] [Rank 0] Group 0 Loss: 4.5736 +[2025-07-09 04:31:17] [Rank 0] Group 0 Loss: 4.5736 +[2025-07-09 04:31:17] [Rank 0] Group 1 Loss: 4.1533 +[2025-07-09 04:31:17] [Rank 0] Group 1 Loss: 4.1533 +[2025-07-09 04:31:17] [Rank 0] Group 2 Loss: 4.1284 +[2025-07-09 04:31:17] [Rank 0] Group 2 Loss: 4.1284 +[2025-07-09 04:31:17] [Rank 0] Group 3 Loss: 4.3731 +[2025-07-09 04:31:17] [Rank 0] Group 3 Loss: 4.3731 +[2025-07-09 04:31:17] [Rank 0] Group 4 Loss: 4.3050 +[2025-07-09 04:31:17] [Rank 0] Group 4 Loss: 4.3050 +[2025-07-09 04:31:17] [Rank 0] Group 5 Loss: 4.2641 +[2025-07-09 04:31:17] [Rank 0] Group 5 Loss: 4.2641 +[2025-07-09 04:31:17] [Rank 0] Group 6 Loss: 4.2414 +[2025-07-09 04:31:17] [Rank 0] Group 6 Loss: 4.2414 +[2025-07-09 04:31:17] [Rank 0] Group 7 Loss: 4.3306 +[2025-07-09 04:31:17] [Rank 0] Group 7 Loss: 4.3306 +[2025-07-09 04:31:17] [Rank 0] Group 8 Loss: 4.3264 +[2025-07-09 04:31:17] [Rank 0] Group 8 Loss: 4.3264 +[2025-07-09 04:31:17] [Rank 0] Group 9 Loss: 4.3084 +[2025-07-09 04:31:17] [Rank 0] Group 9 Loss: 4.3084 +[2025-07-09 04:31:17] [Rank 0] Group 10 Loss: 4.3183 +[2025-07-09 04:31:17] [Rank 0] Group 10 Loss: 4.3183 +[2025-07-09 04:31:17] [Rank 0] Group 11 Loss: 4.3397 +[2025-07-09 04:31:17] [Rank 0] Group 11 Loss: 4.3397 +[2025-07-09 04:31:17] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-09 04:31:17] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-09 04:31:17] [Rank 0] Group 1 FTA: 0.1641 +[2025-07-09 04:31:17] [Rank 0] Group 1 FTA: 0.1641 +[2025-07-09 04:31:17] [Rank 0] Group 2 FTA: 0.0521 +[2025-07-09 04:31:17] [Rank 0] Group 2 FTA: 0.0521 +[2025-07-09 04:31:17] [Rank 0] Group 3 FTA: 0.1172 +[2025-07-09 04:31:17] [Rank 0] Group 3 FTA: 0.1172 +[2025-07-09 04:31:17] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-09 04:31:17] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-09 04:31:17] [Rank 0] Group 5 FTA: 0.1224 +[2025-07-09 04:31:17] [Rank 0] Group 5 FTA: 0.1224 +[2025-07-09 04:31:17] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-09 04:31:17] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-09 04:31:17] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-09 04:31:17] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-09 04:31:17] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-09 04:31:17] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-09 04:31:17] [Rank 0] Group 9 FTA: 0.1094 +[2025-07-09 04:31:17] [Rank 0] Group 9 FTA: 0.1094 +[2025-07-09 04:31:17] [Rank 0] Group 10 FTA: 0.1074 +[2025-07-09 04:31:17] [Rank 0] Group 10 FTA: 0.1074 +[2025-07-09 04:31:17] [Rank 0] Group 11 FTA: 0.0703 +[2025-07-09 04:31:17] [Rank 0] Group 11 FTA: 0.0703 +[2025-07-09 04:31:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 04:31:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 04:31:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 04:31:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 04:31:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 04:31:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 04:31:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 04:31:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 04:31:18] [Rank 0] step:501/10000 train_time:38884ms step_avg:77.61ms +[2025-07-09 04:31:18] [Rank 0] step:501/10000 train_time:38884ms step_avg:77.61ms +[2025-07-09 04:31:20] [Rank 0] step:521/10000 train_time:40353ms step_avg:77.45ms +[2025-07-09 04:31:20] [Rank 0] step:521/10000 train_time:40353ms step_avg:77.45ms +[2025-07-09 04:31:22] [Rank 0] step:541/10000 train_time:42481ms step_avg:78.52ms +[2025-07-09 04:31:22] [Rank 0] step:541/10000 train_time:42481ms step_avg:78.52ms +[2025-07-09 04:31:23] [Rank 0] step:561/10000 train_time:43914ms step_avg:78.28ms +[2025-07-09 04:31:23] [Rank 0] step:561/10000 train_time:43914ms step_avg:78.28ms +[2025-07-09 04:31:25] [Rank 0] step:581/10000 train_time:45364ms step_avg:78.08ms +[2025-07-09 04:31:25] [Rank 0] step:581/10000 train_time:45364ms step_avg:78.08ms +[2025-07-09 04:31:26] [Rank 0] step:601/10000 train_time:46815ms step_avg:77.90ms +[2025-07-09 04:31:26] [Rank 0] step:601/10000 train_time:46815ms step_avg:77.90ms +[2025-07-09 04:31:28] [Rank 0] step:621/10000 train_time:48269ms step_avg:77.73ms +[2025-07-09 04:31:28] [Rank 0] step:621/10000 train_time:48269ms step_avg:77.73ms +[2025-07-09 04:31:29] [Rank 0] step:641/10000 train_time:50104ms step_avg:78.17ms +[2025-07-09 04:31:29] [Rank 0] step:641/10000 train_time:50104ms step_avg:78.17ms +[2025-07-09 04:31:31] [Rank 0] step:661/10000 train_time:51650ms step_avg:78.14ms +[2025-07-09 04:31:31] [Rank 0] step:661/10000 train_time:51650ms step_avg:78.14ms +[2025-07-09 04:31:32] [Rank 0] step:681/10000 train_time:53115ms step_avg:78.00ms +[2025-07-09 04:31:32] [Rank 0] step:681/10000 train_time:53115ms step_avg:78.00ms +[2025-07-09 04:31:34] [Rank 0] step:701/10000 train_time:54568ms step_avg:77.84ms +[2025-07-09 04:31:34] [Rank 0] step:701/10000 train_time:54568ms step_avg:77.84ms +[2025-07-09 04:31:36] [Rank 0] step:721/10000 train_time:56687ms step_avg:78.62ms +[2025-07-09 04:31:36] [Rank 0] step:721/10000 train_time:56687ms step_avg:78.62ms +[2025-07-09 04:31:37] [Rank 0] step:741/10000 train_time:58125ms step_avg:78.44ms +[2025-07-09 04:31:37] [Rank 0] step:741/10000 train_time:58125ms step_avg:78.44ms +[2025-07-09 04:31:39] [Rank 0] step:761/10000 train_time:59593ms step_avg:78.31ms +[2025-07-09 04:31:39] [Rank 0] step:761/10000 train_time:59593ms step_avg:78.31ms +[2025-07-09 04:31:40] [Rank 0] step:781/10000 train_time:61057ms step_avg:78.18ms +[2025-07-09 04:31:40] [Rank 0] step:781/10000 train_time:61057ms step_avg:78.18ms +[2025-07-09 04:31:42] [Rank 0] step:801/10000 train_time:62524ms step_avg:78.06ms +[2025-07-09 04:31:42] [Rank 0] step:801/10000 train_time:62524ms step_avg:78.06ms +[2025-07-09 04:31:44] [Rank 0] step:821/10000 train_time:64630ms step_avg:78.72ms +[2025-07-09 04:31:44] [Rank 0] step:821/10000 train_time:64630ms step_avg:78.72ms +[2025-07-09 04:31:45] [Rank 0] step:841/10000 train_time:66096ms step_avg:78.59ms +[2025-07-09 04:31:45] [Rank 0] step:841/10000 train_time:66096ms step_avg:78.59ms +[2025-07-09 04:31:47] [Rank 0] step:861/10000 train_time:67565ms step_avg:78.47ms +[2025-07-09 04:31:47] [Rank 0] step:861/10000 train_time:67565ms step_avg:78.47ms +[2025-07-09 04:31:48] [Rank 0] step:881/10000 train_time:69033ms step_avg:78.36ms +[2025-07-09 04:31:48] [Rank 0] step:881/10000 train_time:69033ms step_avg:78.36ms +[2025-07-09 04:31:50] [Rank 0] step:901/10000 train_time:70558ms step_avg:78.31ms +[2025-07-09 04:31:50] [Rank 0] step:901/10000 train_time:70558ms step_avg:78.31ms +[2025-07-09 04:31:51] [Rank 0] step:921/10000 train_time:72209ms step_avg:78.40ms +[2025-07-09 04:31:51] [Rank 0] step:921/10000 train_time:72209ms step_avg:78.40ms +[2025-07-09 04:31:53] [Rank 0] step:941/10000 train_time:73681ms step_avg:78.30ms +[2025-07-09 04:31:53] [Rank 0] step:941/10000 train_time:73681ms step_avg:78.30ms +[2025-07-09 04:31:54] [Rank 0] step:961/10000 train_time:75150ms step_avg:78.20ms +[2025-07-09 04:31:54] [Rank 0] step:961/10000 train_time:75150ms step_avg:78.20ms +[2025-07-09 04:31:56] [Rank 0] step:981/10000 train_time:76622ms step_avg:78.11ms +[2025-07-09 04:31:56] [Rank 0] step:981/10000 train_time:76622ms step_avg:78.11ms +[2025-07-09 04:31:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 04:31:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 04:31:59] [Rank 0] PRINT: step:1000/10000 train_loss:1.2805 val_loss:1.2015 train_time:78745ms step_avg:78.74ms +[2025-07-09 04:31:59] [Rank 0] PRINT: step:1000/10000 train_loss:1.2805 val_loss:1.2015 train_time:78745ms step_avg:78.74ms +[2025-07-09 04:31:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 04:31:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 04:31:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 04:31:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 04:31:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 04:31:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 04:37:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 04:37:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 04:37:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 04:37:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 04:37:24] [Rank 0] Total Loss: 4.6744 +[2025-07-09 04:37:24] [Rank 0] Total Loss: 4.6744 +[2025-07-09 04:37:24] [Rank 0] Total FTA: 0.3016 +[2025-07-09 04:37:24] [Rank 0] Total FTA: 0.3016 +[2025-07-09 04:37:24] [Rank 0] Group 0 Loss: 4.8152 +[2025-07-09 04:37:24] [Rank 0] Group 0 Loss: 4.8152 +[2025-07-09 04:37:24] [Rank 0] Group 1 Loss: 4.5160 +[2025-07-09 04:37:24] [Rank 0] Group 1 Loss: 4.5160 +[2025-07-09 04:37:24] [Rank 0] Group 2 Loss: 4.5639 +[2025-07-09 04:37:24] [Rank 0] Group 2 Loss: 4.5639 +[2025-07-09 04:37:24] [Rank 0] Group 3 Loss: 4.7037 +[2025-07-09 04:37:24] [Rank 0] Group 3 Loss: 4.7037 +[2025-07-09 04:37:24] [Rank 0] Group 4 Loss: 4.6455 +[2025-07-09 04:37:24] [Rank 0] Group 4 Loss: 4.6455 +[2025-07-09 04:37:24] [Rank 0] Group 5 Loss: 4.6332 +[2025-07-09 04:37:24] [Rank 0] Group 5 Loss: 4.6332 +[2025-07-09 04:37:24] [Rank 0] Group 6 Loss: 4.5877 +[2025-07-09 04:37:24] [Rank 0] Group 6 Loss: 4.5877 +[2025-07-09 04:37:24] [Rank 0] Group 7 Loss: 4.6971 +[2025-07-09 04:37:24] [Rank 0] Group 7 Loss: 4.6971 +[2025-07-09 04:37:24] [Rank 0] Group 8 Loss: 4.6997 +[2025-07-09 04:37:24] [Rank 0] Group 8 Loss: 4.6997 +[2025-07-09 04:37:24] [Rank 0] Group 9 Loss: 4.6199 +[2025-07-09 04:37:24] [Rank 0] Group 9 Loss: 4.6199 +[2025-07-09 04:37:24] [Rank 0] Group 10 Loss: 4.7310 +[2025-07-09 04:37:24] [Rank 0] Group 10 Loss: 4.7310 +[2025-07-09 04:37:24] [Rank 0] Group 11 Loss: 4.6846 +[2025-07-09 04:37:24] [Rank 0] Group 11 Loss: 4.6846 +[2025-07-09 04:37:24] [Rank 0] Group 0 FTA: 0.3446 +[2025-07-09 04:37:24] [Rank 0] Group 0 FTA: 0.3446 +[2025-07-09 04:37:24] [Rank 0] Group 1 FTA: 0.1875 +[2025-07-09 04:37:24] [Rank 0] Group 1 FTA: 0.1875 +[2025-07-09 04:37:24] [Rank 0] Group 2 FTA: 0.4062 +[2025-07-09 04:37:24] [Rank 0] Group 2 FTA: 0.4062 +[2025-07-09 04:37:24] [Rank 0] Group 3 FTA: 0.2135 +[2025-07-09 04:37:24] [Rank 0] Group 3 FTA: 0.2135 +[2025-07-09 04:37:24] [Rank 0] Group 4 FTA: 0.3464 +[2025-07-09 04:37:24] [Rank 0] Group 4 FTA: 0.3464 +[2025-07-09 04:37:24] [Rank 0] Group 5 FTA: 0.3177 +[2025-07-09 04:37:24] [Rank 0] Group 5 FTA: 0.3177 +[2025-07-09 04:37:24] [Rank 0] Group 6 FTA: 0.2708 +[2025-07-09 04:37:24] [Rank 0] Group 6 FTA: 0.2708 +[2025-07-09 04:37:24] [Rank 0] Group 7 FTA: 0.2969 +[2025-07-09 04:37:24] [Rank 0] Group 7 FTA: 0.2969 +[2025-07-09 04:37:24] [Rank 0] Group 8 FTA: 0.3021 +[2025-07-09 04:37:24] [Rank 0] Group 8 FTA: 0.3021 +[2025-07-09 04:37:24] [Rank 0] Group 9 FTA: 0.2812 +[2025-07-09 04:37:24] [Rank 0] Group 9 FTA: 0.2812 +[2025-07-09 04:37:24] [Rank 0] Group 10 FTA: 0.3203 +[2025-07-09 04:37:24] [Rank 0] Group 10 FTA: 0.3203 +[2025-07-09 04:37:24] [Rank 0] Group 11 FTA: 0.2920 +[2025-07-09 04:37:24] [Rank 0] Group 11 FTA: 0.2920 +[2025-07-09 04:37:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 04:37:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 04:37:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 04:37:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 04:37:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 04:37:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 04:37:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 04:37:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 04:37:26] [Rank 0] step:1001/10000 train_time:78767ms step_avg:78.69ms +[2025-07-09 04:37:26] [Rank 0] step:1001/10000 train_time:78767ms step_avg:78.69ms +[2025-07-09 04:37:27] [Rank 0] step:1021/10000 train_time:80248ms step_avg:78.60ms +[2025-07-09 04:37:27] [Rank 0] step:1021/10000 train_time:80248ms step_avg:78.60ms +[2025-07-09 04:37:29] [Rank 0] step:1041/10000 train_time:81708ms step_avg:78.49ms +[2025-07-09 04:37:29] [Rank 0] step:1041/10000 train_time:81708ms step_avg:78.49ms +[2025-07-09 04:37:30] [Rank 0] step:1061/10000 train_time:83166ms step_avg:78.38ms +[2025-07-09 04:37:30] [Rank 0] step:1061/10000 train_time:83166ms step_avg:78.38ms +[2025-07-09 04:37:32] [Rank 0] step:1081/10000 train_time:84627ms step_avg:78.29ms +[2025-07-09 04:37:32] [Rank 0] step:1081/10000 train_time:84627ms step_avg:78.29ms +[2025-07-09 04:37:34] [Rank 0] step:1101/10000 train_time:86736ms step_avg:78.78ms +[2025-07-09 04:37:34] [Rank 0] step:1101/10000 train_time:86736ms step_avg:78.78ms +[2025-07-09 04:37:35] [Rank 0] step:1121/10000 train_time:88198ms step_avg:78.68ms +[2025-07-09 04:37:35] [Rank 0] step:1121/10000 train_time:88198ms step_avg:78.68ms +[2025-07-09 04:37:36] [Rank 0] step:1141/10000 train_time:89659ms step_avg:78.58ms +[2025-07-09 04:37:36] [Rank 0] step:1141/10000 train_time:89659ms step_avg:78.58ms +[2025-07-09 04:37:38] [Rank 0] step:1161/10000 train_time:91122ms step_avg:78.49ms +[2025-07-09 04:37:38] [Rank 0] step:1161/10000 train_time:91122ms step_avg:78.49ms +[2025-07-09 04:37:40] [Rank 0] step:1181/10000 train_time:93236ms step_avg:78.95ms +[2025-07-09 04:37:40] [Rank 0] step:1181/10000 train_time:93236ms step_avg:78.95ms +[2025-07-09 04:37:42] [Rank 0] step:1201/10000 train_time:94701ms step_avg:78.85ms +[2025-07-09 04:37:42] [Rank 0] step:1201/10000 train_time:94701ms step_avg:78.85ms +[2025-07-09 04:37:43] [Rank 0] step:1221/10000 train_time:96164ms step_avg:78.76ms +[2025-07-09 04:37:43] [Rank 0] step:1221/10000 train_time:96164ms step_avg:78.76ms +[2025-07-09 04:37:44] [Rank 0] step:1241/10000 train_time:97629ms step_avg:78.67ms +[2025-07-09 04:37:44] [Rank 0] step:1241/10000 train_time:97629ms step_avg:78.67ms +[2025-07-09 04:37:47] [Rank 0] step:1261/10000 train_time:99349ms step_avg:78.79ms +[2025-07-09 04:37:47] [Rank 0] step:1261/10000 train_time:99349ms step_avg:78.79ms +[2025-07-09 04:37:48] [Rank 0] step:1281/10000 train_time:101445ms step_avg:79.19ms +[2025-07-09 04:37:48] [Rank 0] step:1281/10000 train_time:101445ms step_avg:79.19ms +[2025-07-09 04:37:50] [Rank 0] step:1301/10000 train_time:102910ms step_avg:79.10ms +[2025-07-09 04:37:50] [Rank 0] step:1301/10000 train_time:102910ms step_avg:79.10ms +[2025-07-09 04:37:51] [Rank 0] step:1321/10000 train_time:104490ms step_avg:79.10ms +[2025-07-09 04:37:51] [Rank 0] step:1321/10000 train_time:104490ms step_avg:79.10ms +[2025-07-09 04:37:53] [Rank 0] step:1341/10000 train_time:105954ms step_avg:79.01ms +[2025-07-09 04:37:53] [Rank 0] step:1341/10000 train_time:105954ms step_avg:79.01ms +[2025-07-09 04:37:55] [Rank 0] step:1361/10000 train_time:108086ms step_avg:79.42ms +[2025-07-09 04:37:55] [Rank 0] step:1361/10000 train_time:108086ms step_avg:79.42ms +[2025-07-09 04:37:56] [Rank 0] step:1381/10000 train_time:109552ms step_avg:79.33ms +[2025-07-09 04:37:56] [Rank 0] step:1381/10000 train_time:109552ms step_avg:79.33ms +[2025-07-09 04:37:58] [Rank 0] step:1401/10000 train_time:111022ms step_avg:79.24ms +[2025-07-09 04:37:58] [Rank 0] step:1401/10000 train_time:111022ms step_avg:79.24ms +[2025-07-09 04:37:59] [Rank 0] step:1421/10000 train_time:112488ms step_avg:79.16ms +[2025-07-09 04:37:59] [Rank 0] step:1421/10000 train_time:112488ms step_avg:79.16ms +[2025-07-09 04:38:01] [Rank 0] step:1441/10000 train_time:114209ms step_avg:79.26ms +[2025-07-09 04:38:01] [Rank 0] step:1441/10000 train_time:114209ms step_avg:79.26ms +[2025-07-09 04:38:03] [Rank 0] step:1461/10000 train_time:116085ms step_avg:79.46ms +[2025-07-09 04:38:03] [Rank 0] step:1461/10000 train_time:116085ms step_avg:79.46ms +[2025-07-09 04:38:04] [Rank 0] step:1481/10000 train_time:117553ms step_avg:79.37ms +[2025-07-09 04:38:04] [Rank 0] step:1481/10000 train_time:117553ms step_avg:79.37ms +[2025-07-09 04:38:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 04:38:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 04:38:07] [Rank 0] PRINT: step:1500/10000 train_loss:1.0869 val_loss:1.0213 train_time:119018ms step_avg:79.35ms +[2025-07-09 04:38:07] [Rank 0] PRINT: step:1500/10000 train_loss:1.0869 val_loss:1.0213 train_time:119018ms step_avg:79.35ms +[2025-07-09 04:38:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 04:38:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 04:38:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 04:38:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 04:38:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 04:38:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 04:43:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 04:43:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 04:43:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 04:43:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 04:43:32] [Rank 0] Total Loss: 5.2273 +[2025-07-09 04:43:32] [Rank 0] Total Loss: 5.2273 +[2025-07-09 04:43:32] [Rank 0] Total FTA: 0.6686 +[2025-07-09 04:43:32] [Rank 0] Total FTA: 0.6686 +[2025-07-09 04:43:32] [Rank 0] Group 0 Loss: 5.6639 +[2025-07-09 04:43:32] [Rank 0] Group 0 Loss: 5.6639 +[2025-07-09 04:43:32] [Rank 0] Group 1 Loss: 5.0734 +[2025-07-09 04:43:32] [Rank 0] Group 1 Loss: 5.0734 +[2025-07-09 04:43:32] [Rank 0] Group 2 Loss: 5.1544 +[2025-07-09 04:43:32] [Rank 0] Group 2 Loss: 5.1544 +[2025-07-09 04:43:32] [Rank 0] Group 3 Loss: 5.1984 +[2025-07-09 04:43:32] [Rank 0] Group 3 Loss: 5.1984 +[2025-07-09 04:43:32] [Rank 0] Group 4 Loss: 5.2472 +[2025-07-09 04:43:32] [Rank 0] Group 4 Loss: 5.2472 +[2025-07-09 04:43:32] [Rank 0] Group 5 Loss: 5.1233 +[2025-07-09 04:43:32] [Rank 0] Group 5 Loss: 5.1233 +[2025-07-09 04:43:32] [Rank 0] Group 6 Loss: 5.0706 +[2025-07-09 04:43:32] [Rank 0] Group 6 Loss: 5.0706 +[2025-07-09 04:43:32] [Rank 0] Group 7 Loss: 5.1950 +[2025-07-09 04:43:32] [Rank 0] Group 7 Loss: 5.1950 +[2025-07-09 04:43:32] [Rank 0] Group 8 Loss: 5.1265 +[2025-07-09 04:43:32] [Rank 0] Group 8 Loss: 5.1265 +[2025-07-09 04:43:32] [Rank 0] Group 9 Loss: 5.0873 +[2025-07-09 04:43:32] [Rank 0] Group 9 Loss: 5.0873 +[2025-07-09 04:43:32] [Rank 0] Group 10 Loss: 5.1697 +[2025-07-09 04:43:32] [Rank 0] Group 10 Loss: 5.1697 +[2025-07-09 04:43:32] [Rank 0] Group 11 Loss: 5.1994 +[2025-07-09 04:43:32] [Rank 0] Group 11 Loss: 5.1994 +[2025-07-09 04:43:32] [Rank 0] Group 0 FTA: 0.6528 +[2025-07-09 04:43:32] [Rank 0] Group 0 FTA: 0.6528 +[2025-07-09 04:43:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 04:43:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 04:43:32] [Rank 0] Group 2 FTA: 0.6641 +[2025-07-09 04:43:32] [Rank 0] Group 2 FTA: 0.6641 +[2025-07-09 04:43:32] [Rank 0] Group 3 FTA: 0.4349 +[2025-07-09 04:43:32] [Rank 0] Group 3 FTA: 0.4349 +[2025-07-09 04:43:32] [Rank 0] Group 4 FTA: 0.5755 +[2025-07-09 04:43:32] [Rank 0] Group 4 FTA: 0.5755 +[2025-07-09 04:43:32] [Rank 0] Group 5 FTA: 0.6797 +[2025-07-09 04:43:32] [Rank 0] Group 5 FTA: 0.6797 +[2025-07-09 04:43:32] [Rank 0] Group 6 FTA: 0.6745 +[2025-07-09 04:43:32] [Rank 0] Group 6 FTA: 0.6745 +[2025-07-09 04:43:32] [Rank 0] Group 7 FTA: 0.7057 +[2025-07-09 04:43:32] [Rank 0] Group 7 FTA: 0.7057 +[2025-07-09 04:43:32] [Rank 0] Group 8 FTA: 0.6641 +[2025-07-09 04:43:32] [Rank 0] Group 8 FTA: 0.6641 +[2025-07-09 04:43:32] [Rank 0] Group 9 FTA: 0.6797 +[2025-07-09 04:43:32] [Rank 0] Group 9 FTA: 0.6797 +[2025-07-09 04:43:32] [Rank 0] Group 10 FTA: 0.6777 +[2025-07-09 04:43:32] [Rank 0] Group 10 FTA: 0.6777 +[2025-07-09 04:43:32] [Rank 0] Group 11 FTA: 0.6543 +[2025-07-09 04:43:32] [Rank 0] Group 11 FTA: 0.6543 +[2025-07-09 04:43:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 04:43:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 04:43:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 04:43:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 04:43:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 04:43:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 04:43:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 04:43:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 04:43:34] [Rank 0] step:1501/10000 train_time:119040ms step_avg:79.31ms +[2025-07-09 04:43:34] [Rank 0] step:1501/10000 train_time:119040ms step_avg:79.31ms +[2025-07-09 04:43:35] [Rank 0] step:1521/10000 train_time:120527ms step_avg:79.24ms +[2025-07-09 04:43:35] [Rank 0] step:1521/10000 train_time:120527ms step_avg:79.24ms +[2025-07-09 04:43:37] [Rank 0] step:1541/10000 train_time:122664ms step_avg:79.60ms +[2025-07-09 04:43:37] [Rank 0] step:1541/10000 train_time:122664ms step_avg:79.60ms +[2025-07-09 04:43:39] [Rank 0] step:1561/10000 train_time:124123ms step_avg:79.52ms +[2025-07-09 04:43:39] [Rank 0] step:1561/10000 train_time:124123ms step_avg:79.52ms +[2025-07-09 04:43:40] [Rank 0] step:1581/10000 train_time:125585ms step_avg:79.43ms +[2025-07-09 04:43:40] [Rank 0] step:1581/10000 train_time:125585ms step_avg:79.43ms +[2025-07-09 04:43:42] [Rank 0] step:1601/10000 train_time:127045ms step_avg:79.35ms +[2025-07-09 04:43:42] [Rank 0] step:1601/10000 train_time:127045ms step_avg:79.35ms +[2025-07-09 04:43:44] [Rank 0] step:1621/10000 train_time:128559ms step_avg:79.31ms +[2025-07-09 04:43:44] [Rank 0] step:1621/10000 train_time:128559ms step_avg:79.31ms +[2025-07-09 04:43:45] [Rank 0] step:1641/10000 train_time:130625ms step_avg:79.60ms +[2025-07-09 04:43:45] [Rank 0] step:1641/10000 train_time:130625ms step_avg:79.60ms +[2025-07-09 04:43:47] [Rank 0] step:1661/10000 train_time:132086ms step_avg:79.52ms +[2025-07-09 04:43:47] [Rank 0] step:1661/10000 train_time:132086ms step_avg:79.52ms +[2025-07-09 04:43:48] [Rank 0] step:1681/10000 train_time:133547ms step_avg:79.44ms +[2025-07-09 04:43:48] [Rank 0] step:1681/10000 train_time:133547ms step_avg:79.44ms +[2025-07-09 04:43:50] [Rank 0] step:1701/10000 train_time:135010ms step_avg:79.37ms +[2025-07-09 04:43:50] [Rank 0] step:1701/10000 train_time:135010ms step_avg:79.37ms +[2025-07-09 04:43:52] [Rank 0] step:1721/10000 train_time:137126ms step_avg:79.68ms +[2025-07-09 04:43:52] [Rank 0] step:1721/10000 train_time:137126ms step_avg:79.68ms +[2025-07-09 04:43:53] [Rank 0] step:1741/10000 train_time:138589ms step_avg:79.60ms +[2025-07-09 04:43:53] [Rank 0] step:1741/10000 train_time:138589ms step_avg:79.60ms +[2025-07-09 04:43:55] [Rank 0] step:1761/10000 train_time:140054ms step_avg:79.53ms +[2025-07-09 04:43:55] [Rank 0] step:1761/10000 train_time:140054ms step_avg:79.53ms +[2025-07-09 04:43:56] [Rank 0] step:1781/10000 train_time:141520ms step_avg:79.46ms +[2025-07-09 04:43:56] [Rank 0] step:1781/10000 train_time:141520ms step_avg:79.46ms +[2025-07-09 04:43:58] [Rank 0] step:1801/10000 train_time:142985ms step_avg:79.39ms +[2025-07-09 04:43:58] [Rank 0] step:1801/10000 train_time:142985ms step_avg:79.39ms +[2025-07-09 04:43:59] [Rank 0] step:1821/10000 train_time:144686ms step_avg:79.45ms +[2025-07-09 04:43:59] [Rank 0] step:1821/10000 train_time:144686ms step_avg:79.45ms +[2025-07-09 04:44:01] [Rank 0] step:1841/10000 train_time:146151ms step_avg:79.39ms +[2025-07-09 04:44:01] [Rank 0] step:1841/10000 train_time:146151ms step_avg:79.39ms +[2025-07-09 04:44:02] [Rank 0] step:1861/10000 train_time:147616ms step_avg:79.32ms +[2025-07-09 04:44:02] [Rank 0] step:1861/10000 train_time:147616ms step_avg:79.32ms +[2025-07-09 04:44:04] [Rank 0] step:1881/10000 train_time:149082ms step_avg:79.26ms +[2025-07-09 04:44:04] [Rank 0] step:1881/10000 train_time:149082ms step_avg:79.26ms +[2025-07-09 04:44:06] [Rank 0] step:1901/10000 train_time:150786ms step_avg:79.32ms +[2025-07-09 04:44:06] [Rank 0] step:1901/10000 train_time:150786ms step_avg:79.32ms +[2025-07-09 04:44:07] [Rank 0] step:1921/10000 train_time:152409ms step_avg:79.34ms +[2025-07-09 04:44:07] [Rank 0] step:1921/10000 train_time:152409ms step_avg:79.34ms +[2025-07-09 04:44:09] [Rank 0] step:1941/10000 train_time:153937ms step_avg:79.31ms +[2025-07-09 04:44:09] [Rank 0] step:1941/10000 train_time:153937ms step_avg:79.31ms +[2025-07-09 04:44:10] [Rank 0] step:1961/10000 train_time:155548ms step_avg:79.32ms +[2025-07-09 04:44:10] [Rank 0] step:1961/10000 train_time:155548ms step_avg:79.32ms +[2025-07-09 04:44:12] [Rank 0] step:1981/10000 train_time:157272ms step_avg:79.39ms +[2025-07-09 04:44:12] [Rank 0] step:1981/10000 train_time:157272ms step_avg:79.39ms +[2025-07-09 04:44:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 04:44:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 04:44:15] [Rank 0] PRINT: step:2000/10000 train_loss:0.9135 val_loss:0.9611 train_time:159147ms step_avg:79.57ms +[2025-07-09 04:44:15] [Rank 0] PRINT: step:2000/10000 train_loss:0.9135 val_loss:0.9611 train_time:159147ms step_avg:79.57ms +[2025-07-09 04:44:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 04:44:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 04:44:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 04:44:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 04:44:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 04:44:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 04:49:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 04:49:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 04:49:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 04:49:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 04:49:40] [Rank 0] Total Loss: 5.3224 +[2025-07-09 04:49:40] [Rank 0] Total Loss: 5.3224 +[2025-07-09 04:49:40] [Rank 0] Total FTA: 0.7996 +[2025-07-09 04:49:40] [Rank 0] Total FTA: 0.7996 +[2025-07-09 04:49:40] [Rank 0] Group 0 Loss: 5.7842 +[2025-07-09 04:49:40] [Rank 0] Group 0 Loss: 5.7842 +[2025-07-09 04:49:40] [Rank 0] Group 1 Loss: 5.0318 +[2025-07-09 04:49:40] [Rank 0] Group 1 Loss: 5.0318 +[2025-07-09 04:49:40] [Rank 0] Group 2 Loss: 5.1077 +[2025-07-09 04:49:40] [Rank 0] Group 2 Loss: 5.1077 +[2025-07-09 04:49:40] [Rank 0] Group 3 Loss: 5.3271 +[2025-07-09 04:49:40] [Rank 0] Group 3 Loss: 5.3271 +[2025-07-09 04:49:40] [Rank 0] Group 4 Loss: 5.3385 +[2025-07-09 04:49:40] [Rank 0] Group 4 Loss: 5.3385 +[2025-07-09 04:49:40] [Rank 0] Group 5 Loss: 5.2676 +[2025-07-09 04:49:40] [Rank 0] Group 5 Loss: 5.2676 +[2025-07-09 04:49:40] [Rank 0] Group 6 Loss: 5.2258 +[2025-07-09 04:49:40] [Rank 0] Group 6 Loss: 5.2258 +[2025-07-09 04:49:40] [Rank 0] Group 7 Loss: 5.2470 +[2025-07-09 04:49:40] [Rank 0] Group 7 Loss: 5.2470 +[2025-07-09 04:49:40] [Rank 0] Group 8 Loss: 5.3149 +[2025-07-09 04:49:40] [Rank 0] Group 8 Loss: 5.3149 +[2025-07-09 04:49:40] [Rank 0] Group 9 Loss: 5.2237 +[2025-07-09 04:49:40] [Rank 0] Group 9 Loss: 5.2237 +[2025-07-09 04:49:40] [Rank 0] Group 10 Loss: 5.2631 +[2025-07-09 04:49:40] [Rank 0] Group 10 Loss: 5.2631 +[2025-07-09 04:49:40] [Rank 0] Group 11 Loss: 5.2994 +[2025-07-09 04:49:40] [Rank 0] Group 11 Loss: 5.2994 +[2025-07-09 04:49:40] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 04:49:40] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 04:49:40] [Rank 0] Group 1 FTA: 0.8203 +[2025-07-09 04:49:40] [Rank 0] Group 1 FTA: 0.8203 +[2025-07-09 04:49:40] [Rank 0] Group 2 FTA: 0.6615 +[2025-07-09 04:49:40] [Rank 0] Group 2 FTA: 0.6615 +[2025-07-09 04:49:40] [Rank 0] Group 3 FTA: 0.7031 +[2025-07-09 04:49:40] [Rank 0] Group 3 FTA: 0.7031 +[2025-07-09 04:49:40] [Rank 0] Group 4 FTA: 0.7682 +[2025-07-09 04:49:40] [Rank 0] Group 4 FTA: 0.7682 +[2025-07-09 04:49:40] [Rank 0] Group 5 FTA: 0.7500 +[2025-07-09 04:49:40] [Rank 0] Group 5 FTA: 0.7500 +[2025-07-09 04:49:40] [Rank 0] Group 6 FTA: 0.7865 +[2025-07-09 04:49:40] [Rank 0] Group 6 FTA: 0.7865 +[2025-07-09 04:49:40] [Rank 0] Group 7 FTA: 0.8151 +[2025-07-09 04:49:40] [Rank 0] Group 7 FTA: 0.8151 +[2025-07-09 04:49:40] [Rank 0] Group 8 FTA: 0.7682 +[2025-07-09 04:49:40] [Rank 0] Group 8 FTA: 0.7682 +[2025-07-09 04:49:40] [Rank 0] Group 9 FTA: 0.7773 +[2025-07-09 04:49:40] [Rank 0] Group 9 FTA: 0.7773 +[2025-07-09 04:49:40] [Rank 0] Group 10 FTA: 0.7773 +[2025-07-09 04:49:40] [Rank 0] Group 10 FTA: 0.7773 +[2025-07-09 04:49:40] [Rank 0] Group 11 FTA: 0.7871 +[2025-07-09 04:49:40] [Rank 0] Group 11 FTA: 0.7871 +[2025-07-09 04:49:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 04:49:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 04:49:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 04:49:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 04:49:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 04:49:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 04:49:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 04:49:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 04:49:41] [Rank 0] step:2001/10000 train_time:159167ms step_avg:79.54ms +[2025-07-09 04:49:41] [Rank 0] step:2001/10000 train_time:159167ms step_avg:79.54ms +[2025-07-09 04:49:43] [Rank 0] step:2021/10000 train_time:160640ms step_avg:79.49ms +[2025-07-09 04:49:43] [Rank 0] step:2021/10000 train_time:160640ms step_avg:79.49ms +[2025-07-09 04:49:44] [Rank 0] step:2041/10000 train_time:162098ms step_avg:79.42ms +[2025-07-09 04:49:44] [Rank 0] step:2041/10000 train_time:162098ms step_avg:79.42ms +[2025-07-09 04:49:46] [Rank 0] step:2061/10000 train_time:163558ms step_avg:79.36ms +[2025-07-09 04:49:46] [Rank 0] step:2061/10000 train_time:163558ms step_avg:79.36ms +[2025-07-09 04:49:48] [Rank 0] step:2081/10000 train_time:165677ms step_avg:79.61ms +[2025-07-09 04:49:48] [Rank 0] step:2081/10000 train_time:165677ms step_avg:79.61ms +[2025-07-09 04:49:49] [Rank 0] step:2101/10000 train_time:167136ms step_avg:79.55ms +[2025-07-09 04:49:49] [Rank 0] step:2101/10000 train_time:167136ms step_avg:79.55ms +[2025-07-09 04:49:51] [Rank 0] step:2121/10000 train_time:168596ms step_avg:79.49ms +[2025-07-09 04:49:51] [Rank 0] step:2121/10000 train_time:168596ms step_avg:79.49ms +[2025-07-09 04:49:52] [Rank 0] step:2141/10000 train_time:170059ms step_avg:79.43ms +[2025-07-09 04:49:52] [Rank 0] step:2141/10000 train_time:170059ms step_avg:79.43ms +[2025-07-09 04:49:54] [Rank 0] step:2161/10000 train_time:171777ms step_avg:79.49ms +[2025-07-09 04:49:54] [Rank 0] step:2161/10000 train_time:171777ms step_avg:79.49ms +[2025-07-09 04:49:56] [Rank 0] step:2181/10000 train_time:173644ms step_avg:79.62ms +[2025-07-09 04:49:56] [Rank 0] step:2181/10000 train_time:173644ms step_avg:79.62ms +[2025-07-09 04:49:57] [Rank 0] step:2201/10000 train_time:175105ms step_avg:79.56ms +[2025-07-09 04:49:57] [Rank 0] step:2201/10000 train_time:175105ms step_avg:79.56ms +[2025-07-09 04:49:59] [Rank 0] step:2221/10000 train_time:176568ms step_avg:79.50ms +[2025-07-09 04:49:59] [Rank 0] step:2221/10000 train_time:176568ms step_avg:79.50ms +[2025-07-09 04:50:00] [Rank 0] step:2241/10000 train_time:178056ms step_avg:79.45ms +[2025-07-09 04:50:00] [Rank 0] step:2241/10000 train_time:178056ms step_avg:79.45ms +[2025-07-09 04:50:02] [Rank 0] step:2261/10000 train_time:179784ms step_avg:79.52ms +[2025-07-09 04:50:02] [Rank 0] step:2261/10000 train_time:179784ms step_avg:79.52ms +[2025-07-09 04:50:03] [Rank 0] step:2281/10000 train_time:181272ms step_avg:79.47ms +[2025-07-09 04:50:03] [Rank 0] step:2281/10000 train_time:181272ms step_avg:79.47ms +[2025-07-09 04:50:05] [Rank 0] step:2301/10000 train_time:182761ms step_avg:79.43ms +[2025-07-09 04:50:05] [Rank 0] step:2301/10000 train_time:182761ms step_avg:79.43ms +[2025-07-09 04:50:06] [Rank 0] step:2321/10000 train_time:184252ms step_avg:79.38ms +[2025-07-09 04:50:06] [Rank 0] step:2321/10000 train_time:184252ms step_avg:79.38ms +[2025-07-09 04:50:08] [Rank 0] step:2341/10000 train_time:185743ms step_avg:79.34ms +[2025-07-09 04:50:08] [Rank 0] step:2341/10000 train_time:185743ms step_avg:79.34ms +[2025-07-09 04:50:10] [Rank 0] step:2361/10000 train_time:187467ms step_avg:79.40ms +[2025-07-09 04:50:10] [Rank 0] step:2361/10000 train_time:187467ms step_avg:79.40ms +[2025-07-09 04:50:11] [Rank 0] step:2381/10000 train_time:188959ms step_avg:79.36ms +[2025-07-09 04:50:11] [Rank 0] step:2381/10000 train_time:188959ms step_avg:79.36ms +[2025-07-09 04:50:13] [Rank 0] step:2401/10000 train_time:190453ms step_avg:79.32ms +[2025-07-09 04:50:13] [Rank 0] step:2401/10000 train_time:190453ms step_avg:79.32ms +[2025-07-09 04:50:14] [Rank 0] step:2421/10000 train_time:191944ms step_avg:79.28ms +[2025-07-09 04:50:14] [Rank 0] step:2421/10000 train_time:191944ms step_avg:79.28ms +[2025-07-09 04:50:16] [Rank 0] step:2441/10000 train_time:194076ms step_avg:79.51ms +[2025-07-09 04:50:16] [Rank 0] step:2441/10000 train_time:194076ms step_avg:79.51ms +[2025-07-09 04:50:18] [Rank 0] step:2461/10000 train_time:195567ms step_avg:79.47ms +[2025-07-09 04:50:18] [Rank 0] step:2461/10000 train_time:195567ms step_avg:79.47ms +[2025-07-09 04:50:19] [Rank 0] step:2481/10000 train_time:197059ms step_avg:79.43ms +[2025-07-09 04:50:19] [Rank 0] step:2481/10000 train_time:197059ms step_avg:79.43ms +[2025-07-09 04:50:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 04:50:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 04:50:22] [Rank 0] PRINT: step:2500/10000 train_loss:0.8916 val_loss:0.8800 train_time:198550ms step_avg:79.42ms +[2025-07-09 04:50:22] [Rank 0] PRINT: step:2500/10000 train_loss:0.8916 val_loss:0.8800 train_time:198550ms step_avg:79.42ms +[2025-07-09 04:50:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 04:50:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 04:50:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 04:50:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 04:50:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 04:50:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 04:55:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 04:55:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 04:55:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 04:55:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 04:55:45] [Rank 0] Total Loss: 5.2692 +[2025-07-09 04:55:45] [Rank 0] Total Loss: 5.2692 +[2025-07-09 04:55:45] [Rank 0] Total FTA: 0.8724 +[2025-07-09 04:55:45] [Rank 0] Total FTA: 0.8724 +[2025-07-09 04:55:45] [Rank 0] Group 0 Loss: 5.5604 +[2025-07-09 04:55:45] [Rank 0] Group 0 Loss: 5.5604 +[2025-07-09 04:55:45] [Rank 0] Group 1 Loss: 5.1910 +[2025-07-09 04:55:45] [Rank 0] Group 1 Loss: 5.1910 +[2025-07-09 04:55:45] [Rank 0] Group 2 Loss: 5.1383 +[2025-07-09 04:55:45] [Rank 0] Group 2 Loss: 5.1383 +[2025-07-09 04:55:45] [Rank 0] Group 3 Loss: 5.2461 +[2025-07-09 04:55:45] [Rank 0] Group 3 Loss: 5.2461 +[2025-07-09 04:55:45] [Rank 0] Group 4 Loss: 5.3048 +[2025-07-09 04:55:45] [Rank 0] Group 4 Loss: 5.3048 +[2025-07-09 04:55:45] [Rank 0] Group 5 Loss: 5.1588 +[2025-07-09 04:55:45] [Rank 0] Group 5 Loss: 5.1588 +[2025-07-09 04:55:45] [Rank 0] Group 6 Loss: 5.1004 +[2025-07-09 04:55:45] [Rank 0] Group 6 Loss: 5.1004 +[2025-07-09 04:55:45] [Rank 0] Group 7 Loss: 5.2952 +[2025-07-09 04:55:45] [Rank 0] Group 7 Loss: 5.2952 +[2025-07-09 04:55:45] [Rank 0] Group 8 Loss: 5.2425 +[2025-07-09 04:55:45] [Rank 0] Group 8 Loss: 5.2425 +[2025-07-09 04:55:45] [Rank 0] Group 9 Loss: 5.2451 +[2025-07-09 04:55:45] [Rank 0] Group 9 Loss: 5.2451 +[2025-07-09 04:55:45] [Rank 0] Group 10 Loss: 5.2503 +[2025-07-09 04:55:45] [Rank 0] Group 10 Loss: 5.2503 +[2025-07-09 04:55:45] [Rank 0] Group 11 Loss: 5.2449 +[2025-07-09 04:55:45] [Rank 0] Group 11 Loss: 5.2449 +[2025-07-09 04:55:45] [Rank 0] Group 0 FTA: 0.8218 +[2025-07-09 04:55:45] [Rank 0] Group 0 FTA: 0.8218 +[2025-07-09 04:55:45] [Rank 0] Group 1 FTA: 0.8229 +[2025-07-09 04:55:45] [Rank 0] Group 1 FTA: 0.8229 +[2025-07-09 04:55:45] [Rank 0] Group 2 FTA: 0.8307 +[2025-07-09 04:55:45] [Rank 0] Group 2 FTA: 0.8307 +[2025-07-09 04:55:45] [Rank 0] Group 3 FTA: 0.8438 +[2025-07-09 04:55:45] [Rank 0] Group 3 FTA: 0.8438 +[2025-07-09 04:55:45] [Rank 0] Group 4 FTA: 0.9089 +[2025-07-09 04:55:45] [Rank 0] Group 4 FTA: 0.9089 +[2025-07-09 04:55:45] [Rank 0] Group 5 FTA: 0.9219 +[2025-07-09 04:55:45] [Rank 0] Group 5 FTA: 0.9219 +[2025-07-09 04:55:45] [Rank 0] Group 6 FTA: 0.8932 +[2025-07-09 04:55:45] [Rank 0] Group 6 FTA: 0.8932 +[2025-07-09 04:55:45] [Rank 0] Group 7 FTA: 0.8802 +[2025-07-09 04:55:45] [Rank 0] Group 7 FTA: 0.8802 +[2025-07-09 04:55:45] [Rank 0] Group 8 FTA: 0.8672 +[2025-07-09 04:55:45] [Rank 0] Group 8 FTA: 0.8672 +[2025-07-09 04:55:45] [Rank 0] Group 9 FTA: 0.8906 +[2025-07-09 04:55:45] [Rank 0] Group 9 FTA: 0.8906 +[2025-07-09 04:55:45] [Rank 0] Group 10 FTA: 0.8828 +[2025-07-09 04:55:45] [Rank 0] Group 10 FTA: 0.8828 +[2025-07-09 04:55:45] [Rank 0] Group 11 FTA: 0.9043 +[2025-07-09 04:55:45] [Rank 0] Group 11 FTA: 0.9043 +[2025-07-09 04:55:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 04:55:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 04:55:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 04:55:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 04:55:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 04:55:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 04:55:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 04:55:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 04:55:47] [Rank 0] step:2501/10000 train_time:198574ms step_avg:79.40ms +[2025-07-09 04:55:47] [Rank 0] step:2501/10000 train_time:198574ms step_avg:79.40ms +[2025-07-09 04:55:49] [Rank 0] step:2521/10000 train_time:200123ms step_avg:79.38ms +[2025-07-09 04:55:49] [Rank 0] step:2521/10000 train_time:200123ms step_avg:79.38ms +[2025-07-09 04:55:51] [Rank 0] step:2541/10000 train_time:202195ms step_avg:79.57ms +[2025-07-09 04:55:51] [Rank 0] step:2541/10000 train_time:202195ms step_avg:79.57ms +[2025-07-09 04:55:52] [Rank 0] step:2561/10000 train_time:203678ms step_avg:79.53ms +[2025-07-09 04:55:52] [Rank 0] step:2561/10000 train_time:203678ms step_avg:79.53ms +[2025-07-09 04:55:54] [Rank 0] step:2581/10000 train_time:205161ms step_avg:79.49ms +[2025-07-09 04:55:54] [Rank 0] step:2581/10000 train_time:205161ms step_avg:79.49ms +[2025-07-09 04:55:55] [Rank 0] step:2601/10000 train_time:206647ms step_avg:79.45ms +[2025-07-09 04:55:55] [Rank 0] step:2601/10000 train_time:206647ms step_avg:79.45ms +[2025-07-09 04:55:57] [Rank 0] step:2621/10000 train_time:208779ms step_avg:79.66ms +[2025-07-09 04:55:57] [Rank 0] step:2621/10000 train_time:208779ms step_avg:79.66ms +[2025-07-09 04:55:59] [Rank 0] step:2641/10000 train_time:210262ms step_avg:79.61ms +[2025-07-09 04:55:59] [Rank 0] step:2641/10000 train_time:210262ms step_avg:79.61ms +[2025-07-09 04:56:00] [Rank 0] step:2661/10000 train_time:211747ms step_avg:79.57ms +[2025-07-09 04:56:00] [Rank 0] step:2661/10000 train_time:211747ms step_avg:79.57ms +[2025-07-09 04:56:02] [Rank 0] step:2681/10000 train_time:213236ms step_avg:79.54ms +[2025-07-09 04:56:02] [Rank 0] step:2681/10000 train_time:213236ms step_avg:79.54ms +[2025-07-09 04:56:03] [Rank 0] step:2701/10000 train_time:214776ms step_avg:79.52ms +[2025-07-09 04:56:03] [Rank 0] step:2701/10000 train_time:214776ms step_avg:79.52ms +[2025-07-09 04:56:05] [Rank 0] step:2721/10000 train_time:216449ms step_avg:79.55ms +[2025-07-09 04:56:05] [Rank 0] step:2721/10000 train_time:216449ms step_avg:79.55ms +[2025-07-09 04:56:06] [Rank 0] step:2741/10000 train_time:217937ms step_avg:79.51ms +[2025-07-09 04:56:06] [Rank 0] step:2741/10000 train_time:217937ms step_avg:79.51ms +[2025-07-09 04:56:08] [Rank 0] step:2761/10000 train_time:219427ms step_avg:79.47ms +[2025-07-09 04:56:08] [Rank 0] step:2761/10000 train_time:219427ms step_avg:79.47ms +[2025-07-09 04:56:09] [Rank 0] step:2781/10000 train_time:220915ms step_avg:79.44ms +[2025-07-09 04:56:09] [Rank 0] step:2781/10000 train_time:220915ms step_avg:79.44ms +[2025-07-09 04:56:11] [Rank 0] step:2801/10000 train_time:222638ms step_avg:79.49ms +[2025-07-09 04:56:11] [Rank 0] step:2801/10000 train_time:222638ms step_avg:79.49ms +[2025-07-09 04:56:13] [Rank 0] step:2821/10000 train_time:224130ms step_avg:79.45ms +[2025-07-09 04:56:13] [Rank 0] step:2821/10000 train_time:224130ms step_avg:79.45ms +[2025-07-09 04:56:14] [Rank 0] step:2841/10000 train_time:225621ms step_avg:79.42ms +[2025-07-09 04:56:14] [Rank 0] step:2841/10000 train_time:225621ms step_avg:79.42ms +[2025-07-09 04:56:16] [Rank 0] step:2861/10000 train_time:227111ms step_avg:79.38ms +[2025-07-09 04:56:16] [Rank 0] step:2861/10000 train_time:227111ms step_avg:79.38ms +[2025-07-09 04:56:17] [Rank 0] step:2881/10000 train_time:228650ms step_avg:79.36ms +[2025-07-09 04:56:17] [Rank 0] step:2881/10000 train_time:228650ms step_avg:79.36ms +[2025-07-09 04:56:19] [Rank 0] step:2901/10000 train_time:230326ms step_avg:79.40ms +[2025-07-09 04:56:19] [Rank 0] step:2901/10000 train_time:230326ms step_avg:79.40ms +[2025-07-09 04:56:20] [Rank 0] step:2921/10000 train_time:231819ms step_avg:79.36ms +[2025-07-09 04:56:20] [Rank 0] step:2921/10000 train_time:231819ms step_avg:79.36ms +[2025-07-09 04:56:22] [Rank 0] step:2941/10000 train_time:233310ms step_avg:79.33ms +[2025-07-09 04:56:22] [Rank 0] step:2941/10000 train_time:233310ms step_avg:79.33ms +[2025-07-09 04:56:23] [Rank 0] step:2961/10000 train_time:234801ms step_avg:79.30ms +[2025-07-09 04:56:23] [Rank 0] step:2961/10000 train_time:234801ms step_avg:79.30ms +[2025-07-09 04:56:25] [Rank 0] step:2981/10000 train_time:236527ms step_avg:79.34ms +[2025-07-09 04:56:25] [Rank 0] step:2981/10000 train_time:236527ms step_avg:79.34ms +[2025-07-09 04:56:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 04:56:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 04:56:27] [Rank 0] PRINT: step:3000/10000 train_loss:0.8801 val_loss:0.8735 train_time:238020ms step_avg:79.34ms +[2025-07-09 04:56:27] [Rank 0] PRINT: step:3000/10000 train_loss:0.8801 val_loss:0.8735 train_time:238020ms step_avg:79.34ms +[2025-07-09 04:56:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 04:56:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 04:56:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 04:56:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 04:56:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 04:56:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 05:01:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 05:01:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 05:01:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 05:01:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 05:01:52] [Rank 0] Total Loss: 5.2834 +[2025-07-09 05:01:52] [Rank 0] Total Loss: 5.2834 +[2025-07-09 05:01:52] [Rank 0] Total FTA: 0.8550 +[2025-07-09 05:01:52] [Rank 0] Total FTA: 0.8550 +[2025-07-09 05:01:52] [Rank 0] Group 0 Loss: 5.4759 +[2025-07-09 05:01:52] [Rank 0] Group 0 Loss: 5.4759 +[2025-07-09 05:01:52] [Rank 0] Group 1 Loss: 5.5274 +[2025-07-09 05:01:52] [Rank 0] Group 1 Loss: 5.5274 +[2025-07-09 05:01:52] [Rank 0] Group 2 Loss: 5.0223 +[2025-07-09 05:01:52] [Rank 0] Group 2 Loss: 5.0223 +[2025-07-09 05:01:52] [Rank 0] Group 3 Loss: 5.2800 +[2025-07-09 05:01:52] [Rank 0] Group 3 Loss: 5.2800 +[2025-07-09 05:01:52] [Rank 0] Group 4 Loss: 5.2916 +[2025-07-09 05:01:52] [Rank 0] Group 4 Loss: 5.2916 +[2025-07-09 05:01:52] [Rank 0] Group 5 Loss: 5.2390 +[2025-07-09 05:01:52] [Rank 0] Group 5 Loss: 5.2390 +[2025-07-09 05:01:52] [Rank 0] Group 6 Loss: 5.1314 +[2025-07-09 05:01:52] [Rank 0] Group 6 Loss: 5.1314 +[2025-07-09 05:01:52] [Rank 0] Group 7 Loss: 5.2715 +[2025-07-09 05:01:52] [Rank 0] Group 7 Loss: 5.2715 +[2025-07-09 05:01:52] [Rank 0] Group 8 Loss: 5.2413 +[2025-07-09 05:01:52] [Rank 0] Group 8 Loss: 5.2413 +[2025-07-09 05:01:52] [Rank 0] Group 9 Loss: 5.2789 +[2025-07-09 05:01:52] [Rank 0] Group 9 Loss: 5.2789 +[2025-07-09 05:01:52] [Rank 0] Group 10 Loss: 5.2035 +[2025-07-09 05:01:52] [Rank 0] Group 10 Loss: 5.2035 +[2025-07-09 05:01:52] [Rank 0] Group 11 Loss: 5.2784 +[2025-07-09 05:01:52] [Rank 0] Group 11 Loss: 5.2784 +[2025-07-09 05:01:52] [Rank 0] Group 0 FTA: 0.8283 +[2025-07-09 05:01:52] [Rank 0] Group 0 FTA: 0.8283 +[2025-07-09 05:01:52] [Rank 0] Group 1 FTA: 0.8255 +[2025-07-09 05:01:52] [Rank 0] Group 1 FTA: 0.8255 +[2025-07-09 05:01:52] [Rank 0] Group 2 FTA: 0.6484 +[2025-07-09 05:01:52] [Rank 0] Group 2 FTA: 0.6484 +[2025-07-09 05:01:52] [Rank 0] Group 3 FTA: 0.8255 +[2025-07-09 05:01:52] [Rank 0] Group 3 FTA: 0.8255 +[2025-07-09 05:01:52] [Rank 0] Group 4 FTA: 0.9036 +[2025-07-09 05:01:52] [Rank 0] Group 4 FTA: 0.9036 +[2025-07-09 05:01:52] [Rank 0] Group 5 FTA: 0.8724 +[2025-07-09 05:01:52] [Rank 0] Group 5 FTA: 0.8724 +[2025-07-09 05:01:52] [Rank 0] Group 6 FTA: 0.8828 +[2025-07-09 05:01:52] [Rank 0] Group 6 FTA: 0.8828 +[2025-07-09 05:01:52] [Rank 0] Group 7 FTA: 0.8568 +[2025-07-09 05:01:52] [Rank 0] Group 7 FTA: 0.8568 +[2025-07-09 05:01:52] [Rank 0] Group 8 FTA: 0.8880 +[2025-07-09 05:01:52] [Rank 0] Group 8 FTA: 0.8880 +[2025-07-09 05:01:52] [Rank 0] Group 9 FTA: 0.8906 +[2025-07-09 05:01:52] [Rank 0] Group 9 FTA: 0.8906 +[2025-07-09 05:01:52] [Rank 0] Group 10 FTA: 0.9043 +[2025-07-09 05:01:52] [Rank 0] Group 10 FTA: 0.9043 +[2025-07-09 05:01:52] [Rank 0] Group 11 FTA: 0.8926 +[2025-07-09 05:01:52] [Rank 0] Group 11 FTA: 0.8926 +[2025-07-09 05:01:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 05:01:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 05:01:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 05:01:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 05:01:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 05:01:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 05:01:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 05:01:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 05:01:54] [Rank 0] step:3001/10000 train_time:238040ms step_avg:79.32ms +[2025-07-09 05:01:54] [Rank 0] step:3001/10000 train_time:238040ms step_avg:79.32ms +[2025-07-09 05:01:55] [Rank 0] step:3021/10000 train_time:239534ms step_avg:79.29ms +[2025-07-09 05:01:55] [Rank 0] step:3021/10000 train_time:239534ms step_avg:79.29ms +[2025-07-09 05:01:57] [Rank 0] step:3041/10000 train_time:241016ms step_avg:79.26ms +[2025-07-09 05:01:57] [Rank 0] step:3041/10000 train_time:241016ms step_avg:79.26ms +[2025-07-09 05:01:59] [Rank 0] step:3061/10000 train_time:242757ms step_avg:79.31ms +[2025-07-09 05:01:59] [Rank 0] step:3061/10000 train_time:242757ms step_avg:79.31ms +[2025-07-09 05:02:00] [Rank 0] step:3081/10000 train_time:244638ms step_avg:79.40ms +[2025-07-09 05:02:00] [Rank 0] step:3081/10000 train_time:244638ms step_avg:79.40ms +[2025-07-09 05:02:02] [Rank 0] step:3101/10000 train_time:246288ms step_avg:79.42ms +[2025-07-09 05:02:02] [Rank 0] step:3101/10000 train_time:246288ms step_avg:79.42ms +[2025-07-09 05:02:04] [Rank 0] step:3121/10000 train_time:247862ms step_avg:79.42ms +[2025-07-09 05:02:04] [Rank 0] step:3121/10000 train_time:247862ms step_avg:79.42ms +[2025-07-09 05:02:05] [Rank 0] step:3141/10000 train_time:249457ms step_avg:79.42ms +[2025-07-09 05:02:05] [Rank 0] step:3141/10000 train_time:249457ms step_avg:79.42ms +[2025-07-09 05:02:07] [Rank 0] step:3161/10000 train_time:251608ms step_avg:79.60ms +[2025-07-09 05:02:07] [Rank 0] step:3161/10000 train_time:251608ms step_avg:79.60ms +[2025-07-09 05:02:09] [Rank 0] step:3181/10000 train_time:253095ms step_avg:79.56ms +[2025-07-09 05:02:09] [Rank 0] step:3181/10000 train_time:253095ms step_avg:79.56ms +[2025-07-09 05:02:10] [Rank 0] step:3201/10000 train_time:254585ms step_avg:79.53ms +[2025-07-09 05:02:10] [Rank 0] step:3201/10000 train_time:254585ms step_avg:79.53ms +[2025-07-09 05:02:12] [Rank 0] step:3221/10000 train_time:256073ms step_avg:79.50ms +[2025-07-09 05:02:12] [Rank 0] step:3221/10000 train_time:256073ms step_avg:79.50ms +[2025-07-09 05:02:14] [Rank 0] step:3241/10000 train_time:257562ms step_avg:79.47ms +[2025-07-09 05:02:14] [Rank 0] step:3241/10000 train_time:257562ms step_avg:79.47ms +[2025-07-09 05:02:16] [Rank 0] step:3261/10000 train_time:259716ms step_avg:79.64ms +[2025-07-09 05:02:16] [Rank 0] step:3261/10000 train_time:259716ms step_avg:79.64ms +[2025-07-09 05:02:17] [Rank 0] step:3281/10000 train_time:261204ms step_avg:79.61ms +[2025-07-09 05:02:17] [Rank 0] step:3281/10000 train_time:261204ms step_avg:79.61ms +[2025-07-09 05:02:19] [Rank 0] step:3301/10000 train_time:262693ms step_avg:79.58ms +[2025-07-09 05:02:19] [Rank 0] step:3301/10000 train_time:262693ms step_avg:79.58ms +[2025-07-09 05:02:20] [Rank 0] step:3321/10000 train_time:264183ms step_avg:79.55ms +[2025-07-09 05:02:20] [Rank 0] step:3321/10000 train_time:264183ms step_avg:79.55ms +[2025-07-09 05:02:22] [Rank 0] step:3341/10000 train_time:266328ms step_avg:79.72ms +[2025-07-09 05:02:22] [Rank 0] step:3341/10000 train_time:266328ms step_avg:79.72ms +[2025-07-09 05:02:24] [Rank 0] step:3361/10000 train_time:267817ms step_avg:79.68ms +[2025-07-09 05:02:24] [Rank 0] step:3361/10000 train_time:267817ms step_avg:79.68ms +[2025-07-09 05:02:25] [Rank 0] step:3381/10000 train_time:269307ms step_avg:79.65ms +[2025-07-09 05:02:25] [Rank 0] step:3381/10000 train_time:269307ms step_avg:79.65ms +[2025-07-09 05:02:27] [Rank 0] step:3401/10000 train_time:270798ms step_avg:79.62ms +[2025-07-09 05:02:27] [Rank 0] step:3401/10000 train_time:270798ms step_avg:79.62ms +[2025-07-09 05:02:29] [Rank 0] step:3421/10000 train_time:272338ms step_avg:79.61ms +[2025-07-09 05:02:29] [Rank 0] step:3421/10000 train_time:272338ms step_avg:79.61ms +[2025-07-09 05:02:30] [Rank 0] step:3441/10000 train_time:274437ms step_avg:79.76ms +[2025-07-09 05:02:30] [Rank 0] step:3441/10000 train_time:274437ms step_avg:79.76ms +[2025-07-09 05:02:32] [Rank 0] step:3461/10000 train_time:275928ms step_avg:79.72ms +[2025-07-09 05:02:32] [Rank 0] step:3461/10000 train_time:275928ms step_avg:79.72ms +[2025-07-09 05:02:33] [Rank 0] step:3481/10000 train_time:277420ms step_avg:79.70ms +[2025-07-09 05:02:33] [Rank 0] step:3481/10000 train_time:277420ms step_avg:79.70ms +[2025-07-09 05:02:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 05:02:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 05:02:36] [Rank 0] PRINT: step:3500/10000 train_loss:0.8734 val_loss:0.8690 train_time:278911ms step_avg:79.69ms +[2025-07-09 05:02:36] [Rank 0] PRINT: step:3500/10000 train_loss:0.8734 val_loss:0.8690 train_time:278911ms step_avg:79.69ms +[2025-07-09 05:02:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 05:02:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 05:02:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 05:02:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 05:02:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 05:02:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 05:07:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 05:07:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 05:07:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 05:07:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 05:07:59] [Rank 0] Total Loss: 5.3583 +[2025-07-09 05:07:59] [Rank 0] Total Loss: 5.3583 +[2025-07-09 05:07:59] [Rank 0] Total FTA: 0.9514 +[2025-07-09 05:07:59] [Rank 0] Total FTA: 0.9514 +[2025-07-09 05:07:59] [Rank 0] Group 0 Loss: 5.4310 +[2025-07-09 05:07:59] [Rank 0] Group 0 Loss: 5.4310 +[2025-07-09 05:07:59] [Rank 0] Group 1 Loss: 5.3689 +[2025-07-09 05:07:59] [Rank 0] Group 1 Loss: 5.3689 +[2025-07-09 05:07:59] [Rank 0] Group 2 Loss: 4.9375 +[2025-07-09 05:07:59] [Rank 0] Group 2 Loss: 4.9375 +[2025-07-09 05:07:59] [Rank 0] Group 3 Loss: 5.4474 +[2025-07-09 05:07:59] [Rank 0] Group 3 Loss: 5.4474 +[2025-07-09 05:07:59] [Rank 0] Group 4 Loss: 5.4275 +[2025-07-09 05:07:59] [Rank 0] Group 4 Loss: 5.4275 +[2025-07-09 05:07:59] [Rank 0] Group 5 Loss: 5.3182 +[2025-07-09 05:07:59] [Rank 0] Group 5 Loss: 5.3182 +[2025-07-09 05:07:59] [Rank 0] Group 6 Loss: 5.3114 +[2025-07-09 05:07:59] [Rank 0] Group 6 Loss: 5.3114 +[2025-07-09 05:07:59] [Rank 0] Group 7 Loss: 5.3978 +[2025-07-09 05:07:59] [Rank 0] Group 7 Loss: 5.3978 +[2025-07-09 05:07:59] [Rank 0] Group 8 Loss: 5.4024 +[2025-07-09 05:07:59] [Rank 0] Group 8 Loss: 5.4024 +[2025-07-09 05:07:59] [Rank 0] Group 9 Loss: 5.3288 +[2025-07-09 05:07:59] [Rank 0] Group 9 Loss: 5.3288 +[2025-07-09 05:07:59] [Rank 0] Group 10 Loss: 5.3794 +[2025-07-09 05:07:59] [Rank 0] Group 10 Loss: 5.3794 +[2025-07-09 05:07:59] [Rank 0] Group 11 Loss: 5.3962 +[2025-07-09 05:07:59] [Rank 0] Group 11 Loss: 5.3962 +[2025-07-09 05:07:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 05:07:59] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 05:07:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 05:07:59] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 05:07:59] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 05:07:59] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 05:08:00] [Rank 0] Group 3 FTA: 0.9531 +[2025-07-09 05:08:00] [Rank 0] Group 3 FTA: 0.9531 +[2025-07-09 05:08:00] [Rank 0] Group 4 FTA: 0.9635 +[2025-07-09 05:08:00] [Rank 0] Group 4 FTA: 0.9635 +[2025-07-09 05:08:00] [Rank 0] Group 5 FTA: 0.8620 +[2025-07-09 05:08:00] [Rank 0] Group 5 FTA: 0.8620 +[2025-07-09 05:08:00] [Rank 0] Group 6 FTA: 0.9193 +[2025-07-09 05:08:00] [Rank 0] Group 6 FTA: 0.9193 +[2025-07-09 05:08:00] [Rank 0] Group 7 FTA: 0.9167 +[2025-07-09 05:08:00] [Rank 0] Group 7 FTA: 0.9167 +[2025-07-09 05:08:00] [Rank 0] Group 8 FTA: 0.9349 +[2025-07-09 05:08:00] [Rank 0] Group 8 FTA: 0.9349 +[2025-07-09 05:08:00] [Rank 0] Group 9 FTA: 0.9375 +[2025-07-09 05:08:00] [Rank 0] Group 9 FTA: 0.9375 +[2025-07-09 05:08:00] [Rank 0] Group 10 FTA: 0.9512 +[2025-07-09 05:08:00] [Rank 0] Group 10 FTA: 0.9512 +[2025-07-09 05:08:00] [Rank 0] Group 11 FTA: 0.9414 +[2025-07-09 05:08:00] [Rank 0] Group 11 FTA: 0.9414 +[2025-07-09 05:08:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 05:08:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 05:08:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 05:08:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 05:08:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 05:08:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 05:08:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 05:08:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 05:08:01] [Rank 0] step:3501/10000 train_time:278934ms step_avg:79.67ms +[2025-07-09 05:08:01] [Rank 0] step:3501/10000 train_time:278934ms step_avg:79.67ms +[2025-07-09 05:08:03] [Rank 0] step:3521/10000 train_time:281071ms step_avg:79.83ms +[2025-07-09 05:08:03] [Rank 0] step:3521/10000 train_time:281071ms step_avg:79.83ms +[2025-07-09 05:08:05] [Rank 0] step:3541/10000 train_time:282551ms step_avg:79.79ms +[2025-07-09 05:08:05] [Rank 0] step:3541/10000 train_time:282551ms step_avg:79.79ms +[2025-07-09 05:08:06] [Rank 0] step:3561/10000 train_time:284034ms step_avg:79.76ms +[2025-07-09 05:08:06] [Rank 0] step:3561/10000 train_time:284034ms step_avg:79.76ms +[2025-07-09 05:08:08] [Rank 0] step:3581/10000 train_time:285519ms step_avg:79.73ms +[2025-07-09 05:08:08] [Rank 0] step:3581/10000 train_time:285519ms step_avg:79.73ms +[2025-07-09 05:08:10] [Rank 0] step:3601/10000 train_time:287676ms step_avg:79.89ms +[2025-07-09 05:08:10] [Rank 0] step:3601/10000 train_time:287676ms step_avg:79.89ms +[2025-07-09 05:08:11] [Rank 0] step:3621/10000 train_time:289141ms step_avg:79.85ms +[2025-07-09 05:08:11] [Rank 0] step:3621/10000 train_time:289141ms step_avg:79.85ms +[2025-07-09 05:08:13] [Rank 0] step:3641/10000 train_time:290625ms step_avg:79.82ms +[2025-07-09 05:08:13] [Rank 0] step:3641/10000 train_time:290625ms step_avg:79.82ms +[2025-07-09 05:08:14] [Rank 0] step:3661/10000 train_time:292112ms step_avg:79.79ms +[2025-07-09 05:08:14] [Rank 0] step:3661/10000 train_time:292112ms step_avg:79.79ms +[2025-07-09 05:08:16] [Rank 0] step:3681/10000 train_time:293597ms step_avg:79.76ms +[2025-07-09 05:08:16] [Rank 0] step:3681/10000 train_time:293597ms step_avg:79.76ms +[2025-07-09 05:08:18] [Rank 0] step:3701/10000 train_time:295726ms step_avg:79.90ms +[2025-07-09 05:08:18] [Rank 0] step:3701/10000 train_time:295726ms step_avg:79.90ms +[2025-07-09 05:08:19] [Rank 0] step:3721/10000 train_time:297214ms step_avg:79.87ms +[2025-07-09 05:08:19] [Rank 0] step:3721/10000 train_time:297214ms step_avg:79.87ms +[2025-07-09 05:08:21] [Rank 0] step:3741/10000 train_time:298703ms step_avg:79.85ms +[2025-07-09 05:08:21] [Rank 0] step:3741/10000 train_time:298703ms step_avg:79.85ms +[2025-07-09 05:08:22] [Rank 0] step:3761/10000 train_time:300319ms step_avg:79.85ms +[2025-07-09 05:08:22] [Rank 0] step:3761/10000 train_time:300319ms step_avg:79.85ms +[2025-07-09 05:08:25] [Rank 0] step:3781/10000 train_time:302493ms step_avg:80.00ms +[2025-07-09 05:08:25] [Rank 0] step:3781/10000 train_time:302493ms step_avg:80.00ms +[2025-07-09 05:08:26] [Rank 0] step:3801/10000 train_time:303962ms step_avg:79.97ms +[2025-07-09 05:08:26] [Rank 0] step:3801/10000 train_time:303962ms step_avg:79.97ms +[2025-07-09 05:08:28] [Rank 0] step:3821/10000 train_time:305451ms step_avg:79.94ms +[2025-07-09 05:08:28] [Rank 0] step:3821/10000 train_time:305451ms step_avg:79.94ms +[2025-07-09 05:08:29] [Rank 0] step:3841/10000 train_time:306940ms step_avg:79.91ms +[2025-07-09 05:08:29] [Rank 0] step:3841/10000 train_time:306940ms step_avg:79.91ms +[2025-07-09 05:08:31] [Rank 0] step:3861/10000 train_time:308430ms step_avg:79.88ms +[2025-07-09 05:08:31] [Rank 0] step:3861/10000 train_time:308430ms step_avg:79.88ms +[2025-07-09 05:08:33] [Rank 0] step:3881/10000 train_time:310574ms step_avg:80.02ms +[2025-07-09 05:08:33] [Rank 0] step:3881/10000 train_time:310574ms step_avg:80.02ms +[2025-07-09 05:08:34] [Rank 0] step:3901/10000 train_time:312064ms step_avg:80.00ms +[2025-07-09 05:08:34] [Rank 0] step:3901/10000 train_time:312064ms step_avg:80.00ms +[2025-07-09 05:08:36] [Rank 0] step:3921/10000 train_time:313553ms step_avg:79.97ms +[2025-07-09 05:08:36] [Rank 0] step:3921/10000 train_time:313553ms step_avg:79.97ms +[2025-07-09 05:08:37] [Rank 0] step:3941/10000 train_time:315043ms step_avg:79.94ms +[2025-07-09 05:08:37] [Rank 0] step:3941/10000 train_time:315043ms step_avg:79.94ms +[2025-07-09 05:08:39] [Rank 0] step:3961/10000 train_time:316583ms step_avg:79.93ms +[2025-07-09 05:08:39] [Rank 0] step:3961/10000 train_time:316583ms step_avg:79.93ms +[2025-07-09 05:08:41] [Rank 0] step:3981/10000 train_time:318682ms step_avg:80.05ms +[2025-07-09 05:08:41] [Rank 0] step:3981/10000 train_time:318682ms step_avg:80.05ms +[2025-07-09 05:08:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 05:08:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 05:08:43] [Rank 0] PRINT: step:4000/10000 train_loss:0.8696 val_loss:0.8665 train_time:320171ms step_avg:80.04ms +[2025-07-09 05:08:43] [Rank 0] PRINT: step:4000/10000 train_loss:0.8696 val_loss:0.8665 train_time:320171ms step_avg:80.04ms +[2025-07-09 05:08:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 05:08:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 05:08:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 05:08:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 05:08:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 05:08:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 05:14:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 05:14:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 05:14:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 05:14:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 05:14:08] [Rank 0] Total Loss: 5.3832 +[2025-07-09 05:14:08] [Rank 0] Total Loss: 5.3832 +[2025-07-09 05:14:08] [Rank 0] Total FTA: 0.9526 +[2025-07-09 05:14:08] [Rank 0] Total FTA: 0.9526 +[2025-07-09 05:14:08] [Rank 0] Group 0 Loss: 5.5642 +[2025-07-09 05:14:08] [Rank 0] Group 0 Loss: 5.5642 +[2025-07-09 05:14:08] [Rank 0] Group 1 Loss: 5.5967 +[2025-07-09 05:14:08] [Rank 0] Group 1 Loss: 5.5967 +[2025-07-09 05:14:08] [Rank 0] Group 2 Loss: 5.1487 +[2025-07-09 05:14:08] [Rank 0] Group 2 Loss: 5.1487 +[2025-07-09 05:14:08] [Rank 0] Group 3 Loss: 5.3762 +[2025-07-09 05:14:08] [Rank 0] Group 3 Loss: 5.3762 +[2025-07-09 05:14:08] [Rank 0] Group 4 Loss: 5.4285 +[2025-07-09 05:14:08] [Rank 0] Group 4 Loss: 5.4285 +[2025-07-09 05:14:08] [Rank 0] Group 5 Loss: 5.2812 +[2025-07-09 05:14:08] [Rank 0] Group 5 Loss: 5.2812 +[2025-07-09 05:14:08] [Rank 0] Group 6 Loss: 5.2610 +[2025-07-09 05:14:08] [Rank 0] Group 6 Loss: 5.2610 +[2025-07-09 05:14:08] [Rank 0] Group 7 Loss: 5.4036 +[2025-07-09 05:14:08] [Rank 0] Group 7 Loss: 5.4036 +[2025-07-09 05:14:08] [Rank 0] Group 8 Loss: 5.3428 +[2025-07-09 05:14:08] [Rank 0] Group 8 Loss: 5.3428 +[2025-07-09 05:14:08] [Rank 0] Group 9 Loss: 5.3127 +[2025-07-09 05:14:08] [Rank 0] Group 9 Loss: 5.3127 +[2025-07-09 05:14:08] [Rank 0] Group 10 Loss: 5.3386 +[2025-07-09 05:14:08] [Rank 0] Group 10 Loss: 5.3386 +[2025-07-09 05:14:08] [Rank 0] Group 11 Loss: 5.3726 +[2025-07-09 05:14:08] [Rank 0] Group 11 Loss: 5.3726 +[2025-07-09 05:14:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 05:14:08] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 05:14:08] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 05:14:08] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 05:14:08] [Rank 0] Group 2 FTA: 0.9219 +[2025-07-09 05:14:08] [Rank 0] Group 2 FTA: 0.9219 +[2025-07-09 05:14:08] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-09 05:14:08] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-09 05:14:08] [Rank 0] Group 4 FTA: 0.9401 +[2025-07-09 05:14:08] [Rank 0] Group 4 FTA: 0.9401 +[2025-07-09 05:14:08] [Rank 0] Group 5 FTA: 0.9479 +[2025-07-09 05:14:08] [Rank 0] Group 5 FTA: 0.9479 +[2025-07-09 05:14:08] [Rank 0] Group 6 FTA: 0.9531 +[2025-07-09 05:14:08] [Rank 0] Group 6 FTA: 0.9531 +[2025-07-09 05:14:08] [Rank 0] Group 7 FTA: 0.9219 +[2025-07-09 05:14:08] [Rank 0] Group 7 FTA: 0.9219 +[2025-07-09 05:14:08] [Rank 0] Group 8 FTA: 0.9375 +[2025-07-09 05:14:08] [Rank 0] Group 8 FTA: 0.9375 +[2025-07-09 05:14:08] [Rank 0] Group 9 FTA: 0.9258 +[2025-07-09 05:14:08] [Rank 0] Group 9 FTA: 0.9258 +[2025-07-09 05:14:08] [Rank 0] Group 10 FTA: 0.9316 +[2025-07-09 05:14:08] [Rank 0] Group 10 FTA: 0.9316 +[2025-07-09 05:14:08] [Rank 0] Group 11 FTA: 0.9336 +[2025-07-09 05:14:08] [Rank 0] Group 11 FTA: 0.9336 +[2025-07-09 05:14:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 05:14:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 05:14:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 05:14:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 05:14:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 05:14:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 05:14:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 05:14:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 05:14:10] [Rank 0] step:4001/10000 train_time:320194ms step_avg:80.03ms +[2025-07-09 05:14:10] [Rank 0] step:4001/10000 train_time:320194ms step_avg:80.03ms +[2025-07-09 05:14:12] [Rank 0] step:4021/10000 train_time:321681ms step_avg:80.00ms +[2025-07-09 05:14:12] [Rank 0] step:4021/10000 train_time:321681ms step_avg:80.00ms +[2025-07-09 05:14:13] [Rank 0] step:4041/10000 train_time:323165ms step_avg:79.97ms +[2025-07-09 05:14:13] [Rank 0] step:4041/10000 train_time:323165ms step_avg:79.97ms +[2025-07-09 05:14:15] [Rank 0] step:4061/10000 train_time:325311ms step_avg:80.11ms +[2025-07-09 05:14:15] [Rank 0] step:4061/10000 train_time:325311ms step_avg:80.11ms +[2025-07-09 05:14:17] [Rank 0] step:4081/10000 train_time:326794ms step_avg:80.08ms +[2025-07-09 05:14:17] [Rank 0] step:4081/10000 train_time:326794ms step_avg:80.08ms +[2025-07-09 05:14:18] [Rank 0] step:4101/10000 train_time:328279ms step_avg:80.05ms +[2025-07-09 05:14:18] [Rank 0] step:4101/10000 train_time:328279ms step_avg:80.05ms +[2025-07-09 05:14:20] [Rank 0] step:4121/10000 train_time:329764ms step_avg:80.02ms +[2025-07-09 05:14:20] [Rank 0] step:4121/10000 train_time:329764ms step_avg:80.02ms +[2025-07-09 05:14:21] [Rank 0] step:4141/10000 train_time:331252ms step_avg:79.99ms +[2025-07-09 05:14:21] [Rank 0] step:4141/10000 train_time:331252ms step_avg:79.99ms +[2025-07-09 05:14:23] [Rank 0] step:4161/10000 train_time:332975ms step_avg:80.02ms +[2025-07-09 05:14:23] [Rank 0] step:4161/10000 train_time:332975ms step_avg:80.02ms +[2025-07-09 05:14:24] [Rank 0] step:4181/10000 train_time:334465ms step_avg:80.00ms +[2025-07-09 05:14:24] [Rank 0] step:4181/10000 train_time:334465ms step_avg:80.00ms +[2025-07-09 05:14:26] [Rank 0] step:4201/10000 train_time:335952ms step_avg:79.97ms +[2025-07-09 05:14:26] [Rank 0] step:4201/10000 train_time:335952ms step_avg:79.97ms +[2025-07-09 05:14:27] [Rank 0] step:4221/10000 train_time:337441ms step_avg:79.94ms +[2025-07-09 05:14:27] [Rank 0] step:4221/10000 train_time:337441ms step_avg:79.94ms +[2025-07-09 05:14:29] [Rank 0] step:4241/10000 train_time:339585ms step_avg:80.07ms +[2025-07-09 05:14:29] [Rank 0] step:4241/10000 train_time:339585ms step_avg:80.07ms +[2025-07-09 05:14:31] [Rank 0] step:4261/10000 train_time:341074ms step_avg:80.05ms +[2025-07-09 05:14:31] [Rank 0] step:4261/10000 train_time:341074ms step_avg:80.05ms +[2025-07-09 05:14:32] [Rank 0] step:4281/10000 train_time:342561ms step_avg:80.02ms +[2025-07-09 05:14:32] [Rank 0] step:4281/10000 train_time:342561ms step_avg:80.02ms +[2025-07-09 05:14:34] [Rank 0] step:4301/10000 train_time:344051ms step_avg:79.99ms +[2025-07-09 05:14:34] [Rank 0] step:4301/10000 train_time:344051ms step_avg:79.99ms +[2025-07-09 05:14:36] [Rank 0] step:4321/10000 train_time:345591ms step_avg:79.98ms +[2025-07-09 05:14:36] [Rank 0] step:4321/10000 train_time:345591ms step_avg:79.98ms +[2025-07-09 05:14:37] [Rank 0] step:4341/10000 train_time:347271ms step_avg:80.00ms +[2025-07-09 05:14:37] [Rank 0] step:4341/10000 train_time:347271ms step_avg:80.00ms +[2025-07-09 05:14:39] [Rank 0] step:4361/10000 train_time:348759ms step_avg:79.97ms +[2025-07-09 05:14:39] [Rank 0] step:4361/10000 train_time:348759ms step_avg:79.97ms +[2025-07-09 05:14:40] [Rank 0] step:4381/10000 train_time:350249ms step_avg:79.95ms +[2025-07-09 05:14:40] [Rank 0] step:4381/10000 train_time:350249ms step_avg:79.95ms +[2025-07-09 05:14:42] [Rank 0] step:4401/10000 train_time:351868ms step_avg:79.95ms +[2025-07-09 05:14:42] [Rank 0] step:4401/10000 train_time:351868ms step_avg:79.95ms +[2025-07-09 05:14:43] [Rank 0] step:4421/10000 train_time:353601ms step_avg:79.98ms +[2025-07-09 05:14:43] [Rank 0] step:4421/10000 train_time:353601ms step_avg:79.98ms +[2025-07-09 05:14:45] [Rank 0] step:4441/10000 train_time:355196ms step_avg:79.98ms +[2025-07-09 05:14:45] [Rank 0] step:4441/10000 train_time:355196ms step_avg:79.98ms +[2025-07-09 05:14:47] [Rank 0] step:4461/10000 train_time:356690ms step_avg:79.96ms +[2025-07-09 05:14:47] [Rank 0] step:4461/10000 train_time:356690ms step_avg:79.96ms +[2025-07-09 05:14:48] [Rank 0] step:4481/10000 train_time:358181ms step_avg:79.93ms +[2025-07-09 05:14:48] [Rank 0] step:4481/10000 train_time:358181ms step_avg:79.93ms +[2025-07-09 05:14:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 05:14:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 05:14:50] [Rank 0] PRINT: step:4500/10000 train_loss:0.8665 val_loss:0.8648 train_time:359673ms step_avg:79.93ms +[2025-07-09 05:14:50] [Rank 0] PRINT: step:4500/10000 train_loss:0.8665 val_loss:0.8648 train_time:359673ms step_avg:79.93ms +[2025-07-09 05:14:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 05:14:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 05:14:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 05:14:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 05:14:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 05:14:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 05:20:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 05:20:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 05:20:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 05:20:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 05:20:15] [Rank 0] Total Loss: 5.4276 +[2025-07-09 05:20:15] [Rank 0] Total Loss: 5.4276 +[2025-07-09 05:20:15] [Rank 0] Total FTA: 0.9322 +[2025-07-09 05:20:15] [Rank 0] Total FTA: 0.9322 +[2025-07-09 05:20:15] [Rank 0] Group 0 Loss: 5.7193 +[2025-07-09 05:20:15] [Rank 0] Group 0 Loss: 5.7193 +[2025-07-09 05:20:15] [Rank 0] Group 1 Loss: 5.4546 +[2025-07-09 05:20:15] [Rank 0] Group 1 Loss: 5.4546 +[2025-07-09 05:20:15] [Rank 0] Group 2 Loss: 5.1786 +[2025-07-09 05:20:15] [Rank 0] Group 2 Loss: 5.1786 +[2025-07-09 05:20:15] [Rank 0] Group 3 Loss: 5.4400 +[2025-07-09 05:20:15] [Rank 0] Group 3 Loss: 5.4400 +[2025-07-09 05:20:15] [Rank 0] Group 4 Loss: 5.4892 +[2025-07-09 05:20:15] [Rank 0] Group 4 Loss: 5.4892 +[2025-07-09 05:20:15] [Rank 0] Group 5 Loss: 5.3433 +[2025-07-09 05:20:15] [Rank 0] Group 5 Loss: 5.3433 +[2025-07-09 05:20:15] [Rank 0] Group 6 Loss: 5.2800 +[2025-07-09 05:20:15] [Rank 0] Group 6 Loss: 5.2800 +[2025-07-09 05:20:15] [Rank 0] Group 7 Loss: 5.4317 +[2025-07-09 05:20:15] [Rank 0] Group 7 Loss: 5.4317 +[2025-07-09 05:20:15] [Rank 0] Group 8 Loss: 5.3955 +[2025-07-09 05:20:15] [Rank 0] Group 8 Loss: 5.3955 +[2025-07-09 05:20:15] [Rank 0] Group 9 Loss: 5.3588 +[2025-07-09 05:20:15] [Rank 0] Group 9 Loss: 5.3588 +[2025-07-09 05:20:15] [Rank 0] Group 10 Loss: 5.4000 +[2025-07-09 05:20:15] [Rank 0] Group 10 Loss: 5.4000 +[2025-07-09 05:20:15] [Rank 0] Group 11 Loss: 5.3926 +[2025-07-09 05:20:15] [Rank 0] Group 11 Loss: 5.3926 +[2025-07-09 05:20:15] [Rank 0] Group 0 FTA: 0.8336 +[2025-07-09 05:20:15] [Rank 0] Group 0 FTA: 0.8336 +[2025-07-09 05:20:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 05:20:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 05:20:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 05:20:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 05:20:15] [Rank 0] Group 3 FTA: 0.9583 +[2025-07-09 05:20:15] [Rank 0] Group 3 FTA: 0.9583 +[2025-07-09 05:20:15] [Rank 0] Group 4 FTA: 0.9375 +[2025-07-09 05:20:15] [Rank 0] Group 4 FTA: 0.9375 +[2025-07-09 05:20:15] [Rank 0] Group 5 FTA: 0.9245 +[2025-07-09 05:20:15] [Rank 0] Group 5 FTA: 0.9245 +[2025-07-09 05:20:15] [Rank 0] Group 6 FTA: 0.9401 +[2025-07-09 05:20:15] [Rank 0] Group 6 FTA: 0.9401 +[2025-07-09 05:20:15] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-09 05:20:15] [Rank 0] Group 7 FTA: 0.9401 +[2025-07-09 05:20:15] [Rank 0] Group 8 FTA: 0.9375 +[2025-07-09 05:20:15] [Rank 0] Group 8 FTA: 0.9375 +[2025-07-09 05:20:15] [Rank 0] Group 9 FTA: 0.9336 +[2025-07-09 05:20:15] [Rank 0] Group 9 FTA: 0.9336 +[2025-07-09 05:20:15] [Rank 0] Group 10 FTA: 0.9375 +[2025-07-09 05:20:15] [Rank 0] Group 10 FTA: 0.9375 +[2025-07-09 05:20:15] [Rank 0] Group 11 FTA: 0.9355 +[2025-07-09 05:20:15] [Rank 0] Group 11 FTA: 0.9355 +[2025-07-09 05:20:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 05:20:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 05:20:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 05:20:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 05:20:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 05:20:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 05:20:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 05:20:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 05:20:17] [Rank 0] step:4501/10000 train_time:359909ms step_avg:79.96ms +[2025-07-09 05:20:17] [Rank 0] step:4501/10000 train_time:359909ms step_avg:79.96ms +[2025-07-09 05:20:19] [Rank 0] step:4521/10000 train_time:361486ms step_avg:79.96ms +[2025-07-09 05:20:19] [Rank 0] step:4521/10000 train_time:361486ms step_avg:79.96ms +[2025-07-09 05:20:20] [Rank 0] step:4541/10000 train_time:362969ms step_avg:79.93ms +[2025-07-09 05:20:20] [Rank 0] step:4541/10000 train_time:362969ms step_avg:79.93ms +[2025-07-09 05:20:22] [Rank 0] step:4561/10000 train_time:364453ms step_avg:79.91ms +[2025-07-09 05:20:22] [Rank 0] step:4561/10000 train_time:364453ms step_avg:79.91ms +[2025-07-09 05:20:23] [Rank 0] step:4581/10000 train_time:365936ms step_avg:79.88ms +[2025-07-09 05:20:23] [Rank 0] step:4581/10000 train_time:365936ms step_avg:79.88ms +[2025-07-09 05:20:25] [Rank 0] step:4601/10000 train_time:367660ms step_avg:79.91ms +[2025-07-09 05:20:25] [Rank 0] step:4601/10000 train_time:367660ms step_avg:79.91ms +[2025-07-09 05:20:26] [Rank 0] step:4621/10000 train_time:369143ms step_avg:79.88ms +[2025-07-09 05:20:26] [Rank 0] step:4621/10000 train_time:369143ms step_avg:79.88ms +[2025-07-09 05:20:28] [Rank 0] step:4641/10000 train_time:370628ms step_avg:79.86ms +[2025-07-09 05:20:28] [Rank 0] step:4641/10000 train_time:370628ms step_avg:79.86ms +[2025-07-09 05:20:29] [Rank 0] step:4661/10000 train_time:372114ms step_avg:79.84ms +[2025-07-09 05:20:29] [Rank 0] step:4661/10000 train_time:372114ms step_avg:79.84ms +[2025-07-09 05:20:31] [Rank 0] step:4681/10000 train_time:373599ms step_avg:79.81ms +[2025-07-09 05:20:31] [Rank 0] step:4681/10000 train_time:373599ms step_avg:79.81ms +[2025-07-09 05:20:33] [Rank 0] step:4701/10000 train_time:375743ms step_avg:79.93ms +[2025-07-09 05:20:33] [Rank 0] step:4701/10000 train_time:375743ms step_avg:79.93ms +[2025-07-09 05:20:34] [Rank 0] step:4721/10000 train_time:377231ms step_avg:79.90ms +[2025-07-09 05:20:34] [Rank 0] step:4721/10000 train_time:377231ms step_avg:79.90ms +[2025-07-09 05:20:36] [Rank 0] step:4741/10000 train_time:378719ms step_avg:79.88ms +[2025-07-09 05:20:36] [Rank 0] step:4741/10000 train_time:378719ms step_avg:79.88ms +[2025-07-09 05:20:37] [Rank 0] step:4761/10000 train_time:380206ms step_avg:79.86ms +[2025-07-09 05:20:37] [Rank 0] step:4761/10000 train_time:380206ms step_avg:79.86ms +[2025-07-09 05:20:39] [Rank 0] step:4781/10000 train_time:382357ms step_avg:79.97ms +[2025-07-09 05:20:39] [Rank 0] step:4781/10000 train_time:382357ms step_avg:79.97ms +[2025-07-09 05:20:41] [Rank 0] step:4801/10000 train_time:383846ms step_avg:79.95ms +[2025-07-09 05:20:41] [Rank 0] step:4801/10000 train_time:383846ms step_avg:79.95ms +[2025-07-09 05:20:42] [Rank 0] step:4821/10000 train_time:385336ms step_avg:79.93ms +[2025-07-09 05:20:42] [Rank 0] step:4821/10000 train_time:385336ms step_avg:79.93ms +[2025-07-09 05:20:44] [Rank 0] step:4841/10000 train_time:386827ms step_avg:79.91ms +[2025-07-09 05:20:44] [Rank 0] step:4841/10000 train_time:386827ms step_avg:79.91ms +[2025-07-09 05:20:46] [Rank 0] step:4861/10000 train_time:388368ms step_avg:79.89ms +[2025-07-09 05:20:46] [Rank 0] step:4861/10000 train_time:388368ms step_avg:79.89ms +[2025-07-09 05:20:48] [Rank 0] step:4881/10000 train_time:390464ms step_avg:80.00ms +[2025-07-09 05:20:48] [Rank 0] step:4881/10000 train_time:390464ms step_avg:80.00ms +[2025-07-09 05:20:49] [Rank 0] step:4901/10000 train_time:391956ms step_avg:79.97ms +[2025-07-09 05:20:49] [Rank 0] step:4901/10000 train_time:391956ms step_avg:79.97ms +[2025-07-09 05:20:51] [Rank 0] step:4921/10000 train_time:393445ms step_avg:79.95ms +[2025-07-09 05:20:51] [Rank 0] step:4921/10000 train_time:393445ms step_avg:79.95ms +[2025-07-09 05:20:52] [Rank 0] step:4941/10000 train_time:394937ms step_avg:79.93ms +[2025-07-09 05:20:52] [Rank 0] step:4941/10000 train_time:394937ms step_avg:79.93ms +[2025-07-09 05:20:54] [Rank 0] step:4961/10000 train_time:397077ms step_avg:80.04ms +[2025-07-09 05:20:54] [Rank 0] step:4961/10000 train_time:397077ms step_avg:80.04ms +[2025-07-09 05:20:56] [Rank 0] step:4981/10000 train_time:398567ms step_avg:80.02ms +[2025-07-09 05:20:56] [Rank 0] step:4981/10000 train_time:398567ms step_avg:80.02ms +[2025-07-09 05:20:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 05:20:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 05:20:58] [Rank 0] PRINT: step:5000/10000 train_loss:0.8640 val_loss:0.8667 train_time:400058ms step_avg:80.01ms +[2025-07-09 05:20:58] [Rank 0] PRINT: step:5000/10000 train_loss:0.8640 val_loss:0.8667 train_time:400058ms step_avg:80.01ms +[2025-07-09 05:20:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 05:20:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 05:20:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 05:20:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 05:20:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 05:20:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 05:26:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 05:26:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 05:26:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 05:26:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 05:26:23] [Rank 0] Total Loss: 5.4526 +[2025-07-09 05:26:23] [Rank 0] Total Loss: 5.4526 +[2025-07-09 05:26:23] [Rank 0] Total FTA: 0.9576 +[2025-07-09 05:26:23] [Rank 0] Total FTA: 0.9576 +[2025-07-09 05:26:23] [Rank 0] Group 0 Loss: 5.5762 +[2025-07-09 05:26:23] [Rank 0] Group 0 Loss: 5.5762 +[2025-07-09 05:26:23] [Rank 0] Group 1 Loss: 5.3907 +[2025-07-09 05:26:23] [Rank 0] Group 1 Loss: 5.3907 +[2025-07-09 05:26:23] [Rank 0] Group 2 Loss: 5.2793 +[2025-07-09 05:26:23] [Rank 0] Group 2 Loss: 5.2793 +[2025-07-09 05:26:23] [Rank 0] Group 3 Loss: 5.3844 +[2025-07-09 05:26:23] [Rank 0] Group 3 Loss: 5.3844 +[2025-07-09 05:26:23] [Rank 0] Group 4 Loss: 5.5400 +[2025-07-09 05:26:23] [Rank 0] Group 4 Loss: 5.5400 +[2025-07-09 05:26:23] [Rank 0] Group 5 Loss: 5.4198 +[2025-07-09 05:26:23] [Rank 0] Group 5 Loss: 5.4198 +[2025-07-09 05:26:23] [Rank 0] Group 6 Loss: 5.3384 +[2025-07-09 05:26:23] [Rank 0] Group 6 Loss: 5.3384 +[2025-07-09 05:26:23] [Rank 0] Group 7 Loss: 5.4394 +[2025-07-09 05:26:23] [Rank 0] Group 7 Loss: 5.4394 +[2025-07-09 05:26:23] [Rank 0] Group 8 Loss: 5.5311 +[2025-07-09 05:26:23] [Rank 0] Group 8 Loss: 5.5311 +[2025-07-09 05:26:23] [Rank 0] Group 9 Loss: 5.4222 +[2025-07-09 05:26:23] [Rank 0] Group 9 Loss: 5.4222 +[2025-07-09 05:26:23] [Rank 0] Group 10 Loss: 5.4466 +[2025-07-09 05:26:23] [Rank 0] Group 10 Loss: 5.4466 +[2025-07-09 05:26:23] [Rank 0] Group 11 Loss: 5.4821 +[2025-07-09 05:26:23] [Rank 0] Group 11 Loss: 5.4821 +[2025-07-09 05:26:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 05:26:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 05:26:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 05:26:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 05:26:23] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 05:26:23] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 05:26:23] [Rank 0] Group 3 FTA: 0.9167 +[2025-07-09 05:26:23] [Rank 0] Group 3 FTA: 0.9167 +[2025-07-09 05:26:23] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-09 05:26:23] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-09 05:26:23] [Rank 0] Group 5 FTA: 0.9688 +[2025-07-09 05:26:23] [Rank 0] Group 5 FTA: 0.9688 +[2025-07-09 05:26:23] [Rank 0] Group 6 FTA: 0.9010 +[2025-07-09 05:26:23] [Rank 0] Group 6 FTA: 0.9010 +[2025-07-09 05:26:23] [Rank 0] Group 7 FTA: 0.9323 +[2025-07-09 05:26:23] [Rank 0] Group 7 FTA: 0.9323 +[2025-07-09 05:26:23] [Rank 0] Group 8 FTA: 0.9505 +[2025-07-09 05:26:23] [Rank 0] Group 8 FTA: 0.9505 +[2025-07-09 05:26:23] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-09 05:26:23] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-09 05:26:23] [Rank 0] Group 10 FTA: 0.9473 +[2025-07-09 05:26:23] [Rank 0] Group 10 FTA: 0.9473 +[2025-07-09 05:26:23] [Rank 0] Group 11 FTA: 0.9316 +[2025-07-09 05:26:23] [Rank 0] Group 11 FTA: 0.9316 +[2025-07-09 05:26:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 05:26:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 05:26:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 05:26:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 05:26:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 05:26:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 05:26:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 05:26:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 05:26:25] [Rank 0] step:5001/10000 train_time:400080ms step_avg:80.00ms +[2025-07-09 05:26:25] [Rank 0] step:5001/10000 train_time:400080ms step_avg:80.00ms +[2025-07-09 05:26:26] [Rank 0] step:5021/10000 train_time:401580ms step_avg:79.98ms +[2025-07-09 05:26:26] [Rank 0] step:5021/10000 train_time:401580ms step_avg:79.98ms +[2025-07-09 05:26:28] [Rank 0] step:5041/10000 train_time:403063ms step_avg:79.96ms +[2025-07-09 05:26:28] [Rank 0] step:5041/10000 train_time:403063ms step_avg:79.96ms +[2025-07-09 05:26:30] [Rank 0] step:5061/10000 train_time:405216ms step_avg:80.07ms +[2025-07-09 05:26:30] [Rank 0] step:5061/10000 train_time:405216ms step_avg:80.07ms +[2025-07-09 05:26:31] [Rank 0] step:5081/10000 train_time:406698ms step_avg:80.04ms +[2025-07-09 05:26:31] [Rank 0] step:5081/10000 train_time:406698ms step_avg:80.04ms +[2025-07-09 05:26:33] [Rank 0] step:5101/10000 train_time:408182ms step_avg:80.02ms +[2025-07-09 05:26:33] [Rank 0] step:5101/10000 train_time:408182ms step_avg:80.02ms +[2025-07-09 05:26:34] [Rank 0] step:5121/10000 train_time:409669ms step_avg:80.00ms +[2025-07-09 05:26:34] [Rank 0] step:5121/10000 train_time:409669ms step_avg:80.00ms +[2025-07-09 05:26:36] [Rank 0] step:5141/10000 train_time:411799ms step_avg:80.10ms +[2025-07-09 05:26:36] [Rank 0] step:5141/10000 train_time:411799ms step_avg:80.10ms +[2025-07-09 05:26:38] [Rank 0] step:5161/10000 train_time:413285ms step_avg:80.08ms +[2025-07-09 05:26:38] [Rank 0] step:5161/10000 train_time:413285ms step_avg:80.08ms +[2025-07-09 05:26:39] [Rank 0] step:5181/10000 train_time:414770ms step_avg:80.06ms +[2025-07-09 05:26:39] [Rank 0] step:5181/10000 train_time:414770ms step_avg:80.06ms +[2025-07-09 05:26:41] [Rank 0] step:5201/10000 train_time:416256ms step_avg:80.03ms +[2025-07-09 05:26:41] [Rank 0] step:5201/10000 train_time:416256ms step_avg:80.03ms +[2025-07-09 05:26:43] [Rank 0] step:5221/10000 train_time:417794ms step_avg:80.02ms +[2025-07-09 05:26:43] [Rank 0] step:5221/10000 train_time:417794ms step_avg:80.02ms +[2025-07-09 05:26:44] [Rank 0] step:5241/10000 train_time:419875ms step_avg:80.11ms +[2025-07-09 05:26:44] [Rank 0] step:5241/10000 train_time:419875ms step_avg:80.11ms +[2025-07-09 05:26:46] [Rank 0] step:5261/10000 train_time:421362ms step_avg:80.09ms +[2025-07-09 05:26:46] [Rank 0] step:5261/10000 train_time:421362ms step_avg:80.09ms +[2025-07-09 05:26:47] [Rank 0] step:5281/10000 train_time:422851ms step_avg:80.07ms +[2025-07-09 05:26:47] [Rank 0] step:5281/10000 train_time:422851ms step_avg:80.07ms +[2025-07-09 05:26:49] [Rank 0] step:5301/10000 train_time:424339ms step_avg:80.05ms +[2025-07-09 05:26:49] [Rank 0] step:5301/10000 train_time:424339ms step_avg:80.05ms +[2025-07-09 05:26:51] [Rank 0] step:5321/10000 train_time:426490ms step_avg:80.15ms +[2025-07-09 05:26:51] [Rank 0] step:5321/10000 train_time:426490ms step_avg:80.15ms +[2025-07-09 05:26:53] [Rank 0] step:5341/10000 train_time:427979ms step_avg:80.13ms +[2025-07-09 05:26:53] [Rank 0] step:5341/10000 train_time:427979ms step_avg:80.13ms +[2025-07-09 05:26:54] [Rank 0] step:5361/10000 train_time:429468ms step_avg:80.11ms +[2025-07-09 05:26:54] [Rank 0] step:5361/10000 train_time:429468ms step_avg:80.11ms +[2025-07-09 05:26:56] [Rank 0] step:5381/10000 train_time:430959ms step_avg:80.09ms +[2025-07-09 05:26:56] [Rank 0] step:5381/10000 train_time:430959ms step_avg:80.09ms +[2025-07-09 05:26:58] [Rank 0] step:5401/10000 train_time:433127ms step_avg:80.19ms +[2025-07-09 05:26:58] [Rank 0] step:5401/10000 train_time:433127ms step_avg:80.19ms +[2025-07-09 05:26:59] [Rank 0] step:5421/10000 train_time:434597ms step_avg:80.17ms +[2025-07-09 05:26:59] [Rank 0] step:5421/10000 train_time:434597ms step_avg:80.17ms +[2025-07-09 05:27:01] [Rank 0] step:5441/10000 train_time:436088ms step_avg:80.15ms +[2025-07-09 05:27:01] [Rank 0] step:5441/10000 train_time:436088ms step_avg:80.15ms +[2025-07-09 05:27:02] [Rank 0] step:5461/10000 train_time:437578ms step_avg:80.13ms +[2025-07-09 05:27:02] [Rank 0] step:5461/10000 train_time:437578ms step_avg:80.13ms +[2025-07-09 05:27:04] [Rank 0] step:5481/10000 train_time:439068ms step_avg:80.11ms +[2025-07-09 05:27:04] [Rank 0] step:5481/10000 train_time:439068ms step_avg:80.11ms +[2025-07-09 05:27:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 05:27:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 05:27:07] [Rank 0] PRINT: step:5500/10000 train_loss:0.8627 val_loss:0.8637 train_time:441209ms step_avg:80.22ms +[2025-07-09 05:27:07] [Rank 0] PRINT: step:5500/10000 train_loss:0.8627 val_loss:0.8637 train_time:441209ms step_avg:80.22ms +[2025-07-09 05:27:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 05:27:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 05:27:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 05:27:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 05:27:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 05:27:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 05:32:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 05:32:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 05:32:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 05:32:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 05:32:31] [Rank 0] Total Loss: 5.3808 +[2025-07-09 05:32:31] [Rank 0] Total Loss: 5.3808 +[2025-07-09 05:32:31] [Rank 0] Total FTA: 0.9755 +[2025-07-09 05:32:31] [Rank 0] Total FTA: 0.9755 +[2025-07-09 05:32:31] [Rank 0] Group 0 Loss: 5.6668 +[2025-07-09 05:32:31] [Rank 0] Group 0 Loss: 5.6668 +[2025-07-09 05:32:31] [Rank 0] Group 1 Loss: 5.2425 +[2025-07-09 05:32:31] [Rank 0] Group 1 Loss: 5.2425 +[2025-07-09 05:32:31] [Rank 0] Group 2 Loss: 5.1371 +[2025-07-09 05:32:31] [Rank 0] Group 2 Loss: 5.1371 +[2025-07-09 05:32:31] [Rank 0] Group 3 Loss: 5.4400 +[2025-07-09 05:32:31] [Rank 0] Group 3 Loss: 5.4400 +[2025-07-09 05:32:31] [Rank 0] Group 4 Loss: 5.3646 +[2025-07-09 05:32:31] [Rank 0] Group 4 Loss: 5.3646 +[2025-07-09 05:32:31] [Rank 0] Group 5 Loss: 5.3538 +[2025-07-09 05:32:31] [Rank 0] Group 5 Loss: 5.3538 +[2025-07-09 05:32:31] [Rank 0] Group 6 Loss: 5.3043 +[2025-07-09 05:32:31] [Rank 0] Group 6 Loss: 5.3043 +[2025-07-09 05:32:31] [Rank 0] Group 7 Loss: 5.3267 +[2025-07-09 05:32:31] [Rank 0] Group 7 Loss: 5.3267 +[2025-07-09 05:32:31] [Rank 0] Group 8 Loss: 5.3887 +[2025-07-09 05:32:31] [Rank 0] Group 8 Loss: 5.3887 +[2025-07-09 05:32:31] [Rank 0] Group 9 Loss: 5.3449 +[2025-07-09 05:32:31] [Rank 0] Group 9 Loss: 5.3449 +[2025-07-09 05:32:31] [Rank 0] Group 10 Loss: 5.3387 +[2025-07-09 05:32:31] [Rank 0] Group 10 Loss: 5.3387 +[2025-07-09 05:32:31] [Rank 0] Group 11 Loss: 5.3792 +[2025-07-09 05:32:31] [Rank 0] Group 11 Loss: 5.3792 +[2025-07-09 05:32:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 05:32:31] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 05:32:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 05:32:31] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 05:32:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 05:32:31] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 05:32:31] [Rank 0] Group 3 FTA: 0.9557 +[2025-07-09 05:32:31] [Rank 0] Group 3 FTA: 0.9557 +[2025-07-09 05:32:31] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-09 05:32:31] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-09 05:32:31] [Rank 0] Group 5 FTA: 0.9661 +[2025-07-09 05:32:31] [Rank 0] Group 5 FTA: 0.9661 +[2025-07-09 05:32:31] [Rank 0] Group 6 FTA: 0.9766 +[2025-07-09 05:32:31] [Rank 0] Group 6 FTA: 0.9766 +[2025-07-09 05:32:31] [Rank 0] Group 7 FTA: 0.9688 +[2025-07-09 05:32:31] [Rank 0] Group 7 FTA: 0.9688 +[2025-07-09 05:32:31] [Rank 0] Group 8 FTA: 0.9661 +[2025-07-09 05:32:31] [Rank 0] Group 8 FTA: 0.9661 +[2025-07-09 05:32:31] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-09 05:32:31] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-09 05:32:31] [Rank 0] Group 10 FTA: 0.9707 +[2025-07-09 05:32:31] [Rank 0] Group 10 FTA: 0.9707 +[2025-07-09 05:32:31] [Rank 0] Group 11 FTA: 0.9570 +[2025-07-09 05:32:31] [Rank 0] Group 11 FTA: 0.9570 +[2025-07-09 05:32:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 05:32:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 05:32:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 05:32:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 05:32:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 05:32:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 05:32:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 05:32:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 05:32:32] [Rank 0] step:5501/10000 train_time:441231ms step_avg:80.21ms +[2025-07-09 05:32:32] [Rank 0] step:5501/10000 train_time:441231ms step_avg:80.21ms +[2025-07-09 05:32:34] [Rank 0] step:5521/10000 train_time:442724ms step_avg:80.19ms +[2025-07-09 05:32:34] [Rank 0] step:5521/10000 train_time:442724ms step_avg:80.19ms +[2025-07-09 05:32:35] [Rank 0] step:5541/10000 train_time:444207ms step_avg:80.17ms +[2025-07-09 05:32:35] [Rank 0] step:5541/10000 train_time:444207ms step_avg:80.17ms +[2025-07-09 05:32:37] [Rank 0] step:5561/10000 train_time:445836ms step_avg:80.17ms +[2025-07-09 05:32:37] [Rank 0] step:5561/10000 train_time:445836ms step_avg:80.17ms +[2025-07-09 05:32:39] [Rank 0] step:5581/10000 train_time:448055ms step_avg:80.28ms +[2025-07-09 05:32:39] [Rank 0] step:5581/10000 train_time:448055ms step_avg:80.28ms +[2025-07-09 05:32:41] [Rank 0] step:5601/10000 train_time:449548ms step_avg:80.26ms +[2025-07-09 05:32:41] [Rank 0] step:5601/10000 train_time:449548ms step_avg:80.26ms +[2025-07-09 05:32:42] [Rank 0] step:5621/10000 train_time:451034ms step_avg:80.24ms +[2025-07-09 05:32:42] [Rank 0] step:5621/10000 train_time:451034ms step_avg:80.24ms +[2025-07-09 05:32:44] [Rank 0] step:5641/10000 train_time:452521ms step_avg:80.22ms +[2025-07-09 05:32:44] [Rank 0] step:5641/10000 train_time:452521ms step_avg:80.22ms +[2025-07-09 05:32:45] [Rank 0] step:5661/10000 train_time:454007ms step_avg:80.20ms +[2025-07-09 05:32:45] [Rank 0] step:5661/10000 train_time:454007ms step_avg:80.20ms +[2025-07-09 05:32:47] [Rank 0] step:5681/10000 train_time:456135ms step_avg:80.29ms +[2025-07-09 05:32:47] [Rank 0] step:5681/10000 train_time:456135ms step_avg:80.29ms +[2025-07-09 05:32:49] [Rank 0] step:5701/10000 train_time:457620ms step_avg:80.27ms +[2025-07-09 05:32:49] [Rank 0] step:5701/10000 train_time:457620ms step_avg:80.27ms +[2025-07-09 05:32:50] [Rank 0] step:5721/10000 train_time:459107ms step_avg:80.25ms +[2025-07-09 05:32:50] [Rank 0] step:5721/10000 train_time:459107ms step_avg:80.25ms +[2025-07-09 05:32:52] [Rank 0] step:5741/10000 train_time:460593ms step_avg:80.23ms +[2025-07-09 05:32:52] [Rank 0] step:5741/10000 train_time:460593ms step_avg:80.23ms +[2025-07-09 05:32:54] [Rank 0] step:5761/10000 train_time:462083ms step_avg:80.21ms +[2025-07-09 05:32:54] [Rank 0] step:5761/10000 train_time:462083ms step_avg:80.21ms +[2025-07-09 05:32:55] [Rank 0] step:5781/10000 train_time:464210ms step_avg:80.30ms +[2025-07-09 05:32:55] [Rank 0] step:5781/10000 train_time:464210ms step_avg:80.30ms +[2025-07-09 05:32:57] [Rank 0] step:5801/10000 train_time:465699ms step_avg:80.28ms +[2025-07-09 05:32:57] [Rank 0] step:5801/10000 train_time:465699ms step_avg:80.28ms +[2025-07-09 05:32:58] [Rank 0] step:5821/10000 train_time:467187ms step_avg:80.26ms +[2025-07-09 05:32:58] [Rank 0] step:5821/10000 train_time:467187ms step_avg:80.26ms +[2025-07-09 05:33:00] [Rank 0] step:5841/10000 train_time:468677ms step_avg:80.24ms +[2025-07-09 05:33:00] [Rank 0] step:5841/10000 train_time:468677ms step_avg:80.24ms +[2025-07-09 05:33:01] [Rank 0] step:5861/10000 train_time:470405ms step_avg:80.26ms +[2025-07-09 05:33:01] [Rank 0] step:5861/10000 train_time:470405ms step_avg:80.26ms +[2025-07-09 05:33:03] [Rank 0] step:5881/10000 train_time:471896ms step_avg:80.24ms +[2025-07-09 05:33:03] [Rank 0] step:5881/10000 train_time:471896ms step_avg:80.24ms +[2025-07-09 05:33:04] [Rank 0] step:5901/10000 train_time:473385ms step_avg:80.22ms +[2025-07-09 05:33:04] [Rank 0] step:5901/10000 train_time:473385ms step_avg:80.22ms +[2025-07-09 05:33:06] [Rank 0] step:5921/10000 train_time:474874ms step_avg:80.20ms +[2025-07-09 05:33:06] [Rank 0] step:5921/10000 train_time:474874ms step_avg:80.20ms +[2025-07-09 05:33:08] [Rank 0] step:5941/10000 train_time:476415ms step_avg:80.19ms +[2025-07-09 05:33:08] [Rank 0] step:5941/10000 train_time:476415ms step_avg:80.19ms +[2025-07-09 05:33:10] [Rank 0] step:5961/10000 train_time:478520ms step_avg:80.28ms +[2025-07-09 05:33:10] [Rank 0] step:5961/10000 train_time:478520ms step_avg:80.28ms +[2025-07-09 05:33:11] [Rank 0] step:5981/10000 train_time:480009ms step_avg:80.26ms +[2025-07-09 05:33:11] [Rank 0] step:5981/10000 train_time:480009ms step_avg:80.26ms +[2025-07-09 05:33:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 05:33:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 05:33:13] [Rank 0] PRINT: step:6000/10000 train_loss:0.8617 val_loss:0.8629 train_time:481498ms step_avg:80.25ms +[2025-07-09 05:33:13] [Rank 0] PRINT: step:6000/10000 train_loss:0.8617 val_loss:0.8629 train_time:481498ms step_avg:80.25ms +[2025-07-09 05:33:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 05:33:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 05:33:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 05:33:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 05:33:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 05:33:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 05:38:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 05:38:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 05:38:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 05:38:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 05:38:41] [Rank 0] Total Loss: 5.5219 +[2025-07-09 05:38:41] [Rank 0] Total Loss: 5.5219 +[2025-07-09 05:38:41] [Rank 0] Total FTA: 0.9700 +[2025-07-09 05:38:41] [Rank 0] Total FTA: 0.9700 +[2025-07-09 05:38:41] [Rank 0] Group 0 Loss: 5.7043 +[2025-07-09 05:38:41] [Rank 0] Group 0 Loss: 5.7043 +[2025-07-09 05:38:41] [Rank 0] Group 1 Loss: 5.5294 +[2025-07-09 05:38:41] [Rank 0] Group 1 Loss: 5.5294 +[2025-07-09 05:38:41] [Rank 0] Group 2 Loss: 5.3266 +[2025-07-09 05:38:41] [Rank 0] Group 2 Loss: 5.3266 +[2025-07-09 05:38:41] [Rank 0] Group 3 Loss: 5.6067 +[2025-07-09 05:38:41] [Rank 0] Group 3 Loss: 5.6067 +[2025-07-09 05:38:41] [Rank 0] Group 4 Loss: 5.5297 +[2025-07-09 05:38:41] [Rank 0] Group 4 Loss: 5.5297 +[2025-07-09 05:38:41] [Rank 0] Group 5 Loss: 5.4886 +[2025-07-09 05:38:41] [Rank 0] Group 5 Loss: 5.4886 +[2025-07-09 05:38:41] [Rank 0] Group 6 Loss: 5.4661 +[2025-07-09 05:38:41] [Rank 0] Group 6 Loss: 5.4661 +[2025-07-09 05:38:41] [Rank 0] Group 7 Loss: 5.4930 +[2025-07-09 05:38:41] [Rank 0] Group 7 Loss: 5.4930 +[2025-07-09 05:38:41] [Rank 0] Group 8 Loss: 5.5330 +[2025-07-09 05:38:41] [Rank 0] Group 8 Loss: 5.5330 +[2025-07-09 05:38:41] [Rank 0] Group 9 Loss: 5.5059 +[2025-07-09 05:38:41] [Rank 0] Group 9 Loss: 5.5059 +[2025-07-09 05:38:41] [Rank 0] Group 10 Loss: 5.4838 +[2025-07-09 05:38:41] [Rank 0] Group 10 Loss: 5.4838 +[2025-07-09 05:38:41] [Rank 0] Group 11 Loss: 5.4837 +[2025-07-09 05:38:41] [Rank 0] Group 11 Loss: 5.4837 +[2025-07-09 05:38:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 05:38:41] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 05:38:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 05:38:41] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 05:38:41] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 05:38:41] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 05:38:41] [Rank 0] Group 3 FTA: 0.9609 +[2025-07-09 05:38:41] [Rank 0] Group 3 FTA: 0.9609 +[2025-07-09 05:38:41] [Rank 0] Group 4 FTA: 0.9818 +[2025-07-09 05:38:41] [Rank 0] Group 4 FTA: 0.9818 +[2025-07-09 05:38:41] [Rank 0] Group 5 FTA: 0.9349 +[2025-07-09 05:38:41] [Rank 0] Group 5 FTA: 0.9349 +[2025-07-09 05:38:41] [Rank 0] Group 6 FTA: 0.9688 +[2025-07-09 05:38:41] [Rank 0] Group 6 FTA: 0.9688 +[2025-07-09 05:38:41] [Rank 0] Group 7 FTA: 0.9635 +[2025-07-09 05:38:41] [Rank 0] Group 7 FTA: 0.9635 +[2025-07-09 05:38:41] [Rank 0] Group 8 FTA: 0.9427 +[2025-07-09 05:38:41] [Rank 0] Group 8 FTA: 0.9427 +[2025-07-09 05:38:41] [Rank 0] Group 9 FTA: 0.9688 +[2025-07-09 05:38:41] [Rank 0] Group 9 FTA: 0.9688 +[2025-07-09 05:38:41] [Rank 0] Group 10 FTA: 0.9609 +[2025-07-09 05:38:41] [Rank 0] Group 10 FTA: 0.9609 +[2025-07-09 05:38:41] [Rank 0] Group 11 FTA: 0.9551 +[2025-07-09 05:38:41] [Rank 0] Group 11 FTA: 0.9551 +[2025-07-09 05:38:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 05:38:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 05:38:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 05:38:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 05:38:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 05:38:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 05:38:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 05:38:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 05:38:42] [Rank 0] step:6001/10000 train_time:481521ms step_avg:80.24ms +[2025-07-09 05:38:42] [Rank 0] step:6001/10000 train_time:481521ms step_avg:80.24ms +[2025-07-09 05:38:44] [Rank 0] step:6021/10000 train_time:483015ms step_avg:80.22ms +[2025-07-09 05:38:44] [Rank 0] step:6021/10000 train_time:483015ms step_avg:80.22ms +[2025-07-09 05:38:46] [Rank 0] step:6041/10000 train_time:485166ms step_avg:80.31ms +[2025-07-09 05:38:46] [Rank 0] step:6041/10000 train_time:485166ms step_avg:80.31ms +[2025-07-09 05:38:47] [Rank 0] step:6061/10000 train_time:486647ms step_avg:80.29ms +[2025-07-09 05:38:47] [Rank 0] step:6061/10000 train_time:486647ms step_avg:80.29ms +[2025-07-09 05:38:49] [Rank 0] step:6081/10000 train_time:488130ms step_avg:80.27ms +[2025-07-09 05:38:49] [Rank 0] step:6081/10000 train_time:488130ms step_avg:80.27ms +[2025-07-09 05:38:50] [Rank 0] step:6101/10000 train_time:489615ms step_avg:80.25ms +[2025-07-09 05:38:50] [Rank 0] step:6101/10000 train_time:489615ms step_avg:80.25ms +[2025-07-09 05:38:52] [Rank 0] step:6121/10000 train_time:491153ms step_avg:80.24ms +[2025-07-09 05:38:52] [Rank 0] step:6121/10000 train_time:491153ms step_avg:80.24ms +[2025-07-09 05:38:54] [Rank 0] step:6141/10000 train_time:493237ms step_avg:80.32ms +[2025-07-09 05:38:54] [Rank 0] step:6141/10000 train_time:493237ms step_avg:80.32ms +[2025-07-09 05:38:55] [Rank 0] step:6161/10000 train_time:494723ms step_avg:80.30ms +[2025-07-09 05:38:55] [Rank 0] step:6161/10000 train_time:494723ms step_avg:80.30ms +[2025-07-09 05:38:57] [Rank 0] step:6181/10000 train_time:496451ms step_avg:80.32ms +[2025-07-09 05:38:57] [Rank 0] step:6181/10000 train_time:496451ms step_avg:80.32ms +[2025-07-09 05:38:59] [Rank 0] step:6201/10000 train_time:498092ms step_avg:80.32ms +[2025-07-09 05:38:59] [Rank 0] step:6201/10000 train_time:498092ms step_avg:80.32ms +[2025-07-09 05:39:01] [Rank 0] step:6221/10000 train_time:500240ms step_avg:80.41ms +[2025-07-09 05:39:01] [Rank 0] step:6221/10000 train_time:500240ms step_avg:80.41ms +[2025-07-09 05:39:02] [Rank 0] step:6241/10000 train_time:501727ms step_avg:80.39ms +[2025-07-09 05:39:02] [Rank 0] step:6241/10000 train_time:501727ms step_avg:80.39ms +[2025-07-09 05:39:04] [Rank 0] step:6261/10000 train_time:503217ms step_avg:80.37ms +[2025-07-09 05:39:04] [Rank 0] step:6261/10000 train_time:503217ms step_avg:80.37ms +[2025-07-09 05:39:05] [Rank 0] step:6281/10000 train_time:504706ms step_avg:80.35ms +[2025-07-09 05:39:05] [Rank 0] step:6281/10000 train_time:504706ms step_avg:80.35ms +[2025-07-09 05:39:08] [Rank 0] step:6301/10000 train_time:506449ms step_avg:80.38ms +[2025-07-09 05:39:08] [Rank 0] step:6301/10000 train_time:506449ms step_avg:80.38ms +[2025-07-09 05:39:09] [Rank 0] step:6321/10000 train_time:508346ms step_avg:80.42ms +[2025-07-09 05:39:09] [Rank 0] step:6321/10000 train_time:508346ms step_avg:80.42ms +[2025-07-09 05:39:11] [Rank 0] step:6341/10000 train_time:509835ms step_avg:80.40ms +[2025-07-09 05:39:11] [Rank 0] step:6341/10000 train_time:509835ms step_avg:80.40ms +[2025-07-09 05:39:12] [Rank 0] step:6361/10000 train_time:511324ms step_avg:80.38ms +[2025-07-09 05:39:12] [Rank 0] step:6361/10000 train_time:511324ms step_avg:80.38ms +[2025-07-09 05:39:14] [Rank 0] step:6381/10000 train_time:512815ms step_avg:80.37ms +[2025-07-09 05:39:14] [Rank 0] step:6381/10000 train_time:512815ms step_avg:80.37ms +[2025-07-09 05:39:16] [Rank 0] step:6401/10000 train_time:514961ms step_avg:80.45ms +[2025-07-09 05:39:16] [Rank 0] step:6401/10000 train_time:514961ms step_avg:80.45ms +[2025-07-09 05:39:17] [Rank 0] step:6421/10000 train_time:516451ms step_avg:80.43ms +[2025-07-09 05:39:17] [Rank 0] step:6421/10000 train_time:516451ms step_avg:80.43ms +[2025-07-09 05:39:19] [Rank 0] step:6441/10000 train_time:517942ms step_avg:80.41ms +[2025-07-09 05:39:19] [Rank 0] step:6441/10000 train_time:517942ms step_avg:80.41ms +[2025-07-09 05:39:20] [Rank 0] step:6461/10000 train_time:519434ms step_avg:80.40ms +[2025-07-09 05:39:20] [Rank 0] step:6461/10000 train_time:519434ms step_avg:80.40ms +[2025-07-09 05:39:22] [Rank 0] step:6481/10000 train_time:521598ms step_avg:80.48ms +[2025-07-09 05:39:22] [Rank 0] step:6481/10000 train_time:521598ms step_avg:80.48ms +[2025-07-09 05:39:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 05:39:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 05:39:25] [Rank 0] PRINT: step:6500/10000 train_loss:0.8609 val_loss:0.8608 train_time:523067ms step_avg:80.47ms +[2025-07-09 05:39:25] [Rank 0] PRINT: step:6500/10000 train_loss:0.8609 val_loss:0.8608 train_time:523067ms step_avg:80.47ms +[2025-07-09 05:39:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 05:39:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 05:39:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 05:39:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 05:39:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 05:39:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 05:44:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 05:44:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 05:44:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 05:44:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 05:44:53] [Rank 0] Total Loss: 5.4743 +[2025-07-09 05:44:53] [Rank 0] Total Loss: 5.4743 +[2025-07-09 05:44:53] [Rank 0] Total FTA: 0.9673 +[2025-07-09 05:44:53] [Rank 0] Total FTA: 0.9673 +[2025-07-09 05:44:53] [Rank 0] Group 0 Loss: 5.9077 +[2025-07-09 05:44:53] [Rank 0] Group 0 Loss: 5.9077 +[2025-07-09 05:44:53] [Rank 0] Group 1 Loss: 5.4209 +[2025-07-09 05:44:53] [Rank 0] Group 1 Loss: 5.4209 +[2025-07-09 05:44:53] [Rank 0] Group 2 Loss: 5.2894 +[2025-07-09 05:44:53] [Rank 0] Group 2 Loss: 5.2894 +[2025-07-09 05:44:53] [Rank 0] Group 3 Loss: 5.4820 +[2025-07-09 05:44:53] [Rank 0] Group 3 Loss: 5.4820 +[2025-07-09 05:44:53] [Rank 0] Group 4 Loss: 5.4608 +[2025-07-09 05:44:53] [Rank 0] Group 4 Loss: 5.4608 +[2025-07-09 05:44:53] [Rank 0] Group 5 Loss: 5.3868 +[2025-07-09 05:44:53] [Rank 0] Group 5 Loss: 5.3868 +[2025-07-09 05:44:53] [Rank 0] Group 6 Loss: 5.3512 +[2025-07-09 05:44:53] [Rank 0] Group 6 Loss: 5.3512 +[2025-07-09 05:44:53] [Rank 0] Group 7 Loss: 5.4404 +[2025-07-09 05:44:53] [Rank 0] Group 7 Loss: 5.4404 +[2025-07-09 05:44:53] [Rank 0] Group 8 Loss: 5.4088 +[2025-07-09 05:44:53] [Rank 0] Group 8 Loss: 5.4088 +[2025-07-09 05:44:53] [Rank 0] Group 9 Loss: 5.3532 +[2025-07-09 05:44:53] [Rank 0] Group 9 Loss: 5.3532 +[2025-07-09 05:44:53] [Rank 0] Group 10 Loss: 5.4237 +[2025-07-09 05:44:53] [Rank 0] Group 10 Loss: 5.4237 +[2025-07-09 05:44:53] [Rank 0] Group 11 Loss: 5.4123 +[2025-07-09 05:44:53] [Rank 0] Group 11 Loss: 5.4123 +[2025-07-09 05:44:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 05:44:53] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 05:44:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 05:44:53] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 05:44:53] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 05:44:53] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 05:44:53] [Rank 0] Group 3 FTA: 0.9193 +[2025-07-09 05:44:53] [Rank 0] Group 3 FTA: 0.9193 +[2025-07-09 05:44:53] [Rank 0] Group 4 FTA: 0.9453 +[2025-07-09 05:44:53] [Rank 0] Group 4 FTA: 0.9453 +[2025-07-09 05:44:53] [Rank 0] Group 5 FTA: 0.9740 +[2025-07-09 05:44:53] [Rank 0] Group 5 FTA: 0.9740 +[2025-07-09 05:44:53] [Rank 0] Group 6 FTA: 0.9531 +[2025-07-09 05:44:53] [Rank 0] Group 6 FTA: 0.9531 +[2025-07-09 05:44:53] [Rank 0] Group 7 FTA: 0.9479 +[2025-07-09 05:44:53] [Rank 0] Group 7 FTA: 0.9479 +[2025-07-09 05:44:53] [Rank 0] Group 8 FTA: 0.9531 +[2025-07-09 05:44:53] [Rank 0] Group 8 FTA: 0.9531 +[2025-07-09 05:44:53] [Rank 0] Group 9 FTA: 0.9805 +[2025-07-09 05:44:53] [Rank 0] Group 9 FTA: 0.9805 +[2025-07-09 05:44:53] [Rank 0] Group 10 FTA: 0.9570 +[2025-07-09 05:44:53] [Rank 0] Group 10 FTA: 0.9570 +[2025-07-09 05:44:53] [Rank 0] Group 11 FTA: 0.9619 +[2025-07-09 05:44:53] [Rank 0] Group 11 FTA: 0.9619 +[2025-07-09 05:44:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 05:44:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 05:44:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 05:44:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 05:44:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 05:44:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 05:44:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 05:44:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 05:44:54] [Rank 0] step:6501/10000 train_time:523090ms step_avg:80.46ms +[2025-07-09 05:44:54] [Rank 0] step:6501/10000 train_time:523090ms step_avg:80.46ms +[2025-07-09 05:44:56] [Rank 0] step:6521/10000 train_time:524593ms step_avg:80.45ms +[2025-07-09 05:44:56] [Rank 0] step:6521/10000 train_time:524593ms step_avg:80.45ms +[2025-07-09 05:44:57] [Rank 0] step:6541/10000 train_time:526076ms step_avg:80.43ms +[2025-07-09 05:44:57] [Rank 0] step:6541/10000 train_time:526076ms step_avg:80.43ms +[2025-07-09 05:44:59] [Rank 0] step:6561/10000 train_time:527557ms step_avg:80.41ms +[2025-07-09 05:44:59] [Rank 0] step:6561/10000 train_time:527557ms step_avg:80.41ms +[2025-07-09 05:45:01] [Rank 0] step:6581/10000 train_time:529695ms step_avg:80.49ms +[2025-07-09 05:45:01] [Rank 0] step:6581/10000 train_time:529695ms step_avg:80.49ms +[2025-07-09 05:45:02] [Rank 0] step:6601/10000 train_time:531179ms step_avg:80.47ms +[2025-07-09 05:45:02] [Rank 0] step:6601/10000 train_time:531179ms step_avg:80.47ms +[2025-07-09 05:45:04] [Rank 0] step:6621/10000 train_time:532665ms step_avg:80.45ms +[2025-07-09 05:45:04] [Rank 0] step:6621/10000 train_time:532665ms step_avg:80.45ms +[2025-07-09 05:45:05] [Rank 0] step:6641/10000 train_time:534149ms step_avg:80.43ms +[2025-07-09 05:45:05] [Rank 0] step:6641/10000 train_time:534149ms step_avg:80.43ms +[2025-07-09 05:45:07] [Rank 0] step:6661/10000 train_time:535687ms step_avg:80.42ms +[2025-07-09 05:45:07] [Rank 0] step:6661/10000 train_time:535687ms step_avg:80.42ms +[2025-07-09 05:45:09] [Rank 0] step:6681/10000 train_time:537769ms step_avg:80.49ms +[2025-07-09 05:45:09] [Rank 0] step:6681/10000 train_time:537769ms step_avg:80.49ms +[2025-07-09 05:45:10] [Rank 0] step:6701/10000 train_time:539257ms step_avg:80.47ms +[2025-07-09 05:45:10] [Rank 0] step:6701/10000 train_time:539257ms step_avg:80.47ms +[2025-07-09 05:45:12] [Rank 0] step:6721/10000 train_time:540742ms step_avg:80.46ms +[2025-07-09 05:45:12] [Rank 0] step:6721/10000 train_time:540742ms step_avg:80.46ms +[2025-07-09 05:45:13] [Rank 0] step:6741/10000 train_time:542231ms step_avg:80.44ms +[2025-07-09 05:45:13] [Rank 0] step:6741/10000 train_time:542231ms step_avg:80.44ms +[2025-07-09 05:45:16] [Rank 0] step:6761/10000 train_time:544465ms step_avg:80.53ms +[2025-07-09 05:45:16] [Rank 0] step:6761/10000 train_time:544465ms step_avg:80.53ms +[2025-07-09 05:45:17] [Rank 0] step:6781/10000 train_time:546069ms step_avg:80.53ms +[2025-07-09 05:45:17] [Rank 0] step:6781/10000 train_time:546069ms step_avg:80.53ms +[2025-07-09 05:45:19] [Rank 0] step:6801/10000 train_time:547647ms step_avg:80.52ms +[2025-07-09 05:45:19] [Rank 0] step:6801/10000 train_time:547647ms step_avg:80.52ms +[2025-07-09 05:45:20] [Rank 0] step:6821/10000 train_time:549136ms step_avg:80.51ms +[2025-07-09 05:45:20] [Rank 0] step:6821/10000 train_time:549136ms step_avg:80.51ms +[2025-07-09 05:45:22] [Rank 0] step:6841/10000 train_time:551312ms step_avg:80.59ms +[2025-07-09 05:45:22] [Rank 0] step:6841/10000 train_time:551312ms step_avg:80.59ms +[2025-07-09 05:45:24] [Rank 0] step:6861/10000 train_time:552782ms step_avg:80.57ms +[2025-07-09 05:45:24] [Rank 0] step:6861/10000 train_time:552782ms step_avg:80.57ms +[2025-07-09 05:45:25] [Rank 0] step:6881/10000 train_time:554274ms step_avg:80.55ms +[2025-07-09 05:45:25] [Rank 0] step:6881/10000 train_time:554274ms step_avg:80.55ms +[2025-07-09 05:45:27] [Rank 0] step:6901/10000 train_time:555764ms step_avg:80.53ms +[2025-07-09 05:45:27] [Rank 0] step:6901/10000 train_time:555764ms step_avg:80.53ms +[2025-07-09 05:45:28] [Rank 0] step:6921/10000 train_time:557255ms step_avg:80.52ms +[2025-07-09 05:45:28] [Rank 0] step:6921/10000 train_time:557255ms step_avg:80.52ms +[2025-07-09 05:45:31] [Rank 0] step:6941/10000 train_time:559396ms step_avg:80.59ms +[2025-07-09 05:45:31] [Rank 0] step:6941/10000 train_time:559396ms step_avg:80.59ms +[2025-07-09 05:45:32] [Rank 0] step:6961/10000 train_time:560886ms step_avg:80.58ms +[2025-07-09 05:45:32] [Rank 0] step:6961/10000 train_time:560886ms step_avg:80.58ms +[2025-07-09 05:45:34] [Rank 0] step:6981/10000 train_time:562376ms step_avg:80.56ms +[2025-07-09 05:45:34] [Rank 0] step:6981/10000 train_time:562376ms step_avg:80.56ms +[2025-07-09 05:45:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 05:45:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 05:45:36] [Rank 0] PRINT: step:7000/10000 train_loss:0.8597 val_loss:0.8605 train_time:563868ms step_avg:80.55ms +[2025-07-09 05:45:36] [Rank 0] PRINT: step:7000/10000 train_loss:0.8597 val_loss:0.8605 train_time:563868ms step_avg:80.55ms +[2025-07-09 05:45:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 05:45:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 05:45:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 05:45:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 05:45:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 05:45:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 05:51:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 05:51:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 05:51:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 05:51:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 05:51:02] [Rank 0] Total Loss: 5.4798 +[2025-07-09 05:51:02] [Rank 0] Total Loss: 5.4798 +[2025-07-09 05:51:02] [Rank 0] Total FTA: 0.9487 +[2025-07-09 05:51:02] [Rank 0] Total FTA: 0.9487 +[2025-07-09 05:51:02] [Rank 0] Group 0 Loss: 5.9140 +[2025-07-09 05:51:02] [Rank 0] Group 0 Loss: 5.9140 +[2025-07-09 05:51:02] [Rank 0] Group 1 Loss: 5.3533 +[2025-07-09 05:51:02] [Rank 0] Group 1 Loss: 5.3533 +[2025-07-09 05:51:02] [Rank 0] Group 2 Loss: 5.2725 +[2025-07-09 05:51:02] [Rank 0] Group 2 Loss: 5.2725 +[2025-07-09 05:51:02] [Rank 0] Group 3 Loss: 5.3084 +[2025-07-09 05:51:02] [Rank 0] Group 3 Loss: 5.3084 +[2025-07-09 05:51:02] [Rank 0] Group 4 Loss: 5.4244 +[2025-07-09 05:51:02] [Rank 0] Group 4 Loss: 5.4244 +[2025-07-09 05:51:02] [Rank 0] Group 5 Loss: 5.4368 +[2025-07-09 05:51:02] [Rank 0] Group 5 Loss: 5.4368 +[2025-07-09 05:51:02] [Rank 0] Group 6 Loss: 5.3937 +[2025-07-09 05:51:02] [Rank 0] Group 6 Loss: 5.3937 +[2025-07-09 05:51:02] [Rank 0] Group 7 Loss: 5.4539 +[2025-07-09 05:51:02] [Rank 0] Group 7 Loss: 5.4539 +[2025-07-09 05:51:02] [Rank 0] Group 8 Loss: 5.5126 +[2025-07-09 05:51:02] [Rank 0] Group 8 Loss: 5.5126 +[2025-07-09 05:51:02] [Rank 0] Group 9 Loss: 5.3706 +[2025-07-09 05:51:02] [Rank 0] Group 9 Loss: 5.3706 +[2025-07-09 05:51:02] [Rank 0] Group 10 Loss: 5.4583 +[2025-07-09 05:51:02] [Rank 0] Group 10 Loss: 5.4583 +[2025-07-09 05:51:02] [Rank 0] Group 11 Loss: 5.4478 +[2025-07-09 05:51:02] [Rank 0] Group 11 Loss: 5.4478 +[2025-07-09 05:51:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 05:51:02] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 05:51:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 05:51:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 05:51:02] [Rank 0] Group 2 FTA: 0.8984 +[2025-07-09 05:51:02] [Rank 0] Group 2 FTA: 0.8984 +[2025-07-09 05:51:02] [Rank 0] Group 3 FTA: 0.8698 +[2025-07-09 05:51:02] [Rank 0] Group 3 FTA: 0.8698 +[2025-07-09 05:51:02] [Rank 0] Group 4 FTA: 0.8255 +[2025-07-09 05:51:02] [Rank 0] Group 4 FTA: 0.8255 +[2025-07-09 05:51:02] [Rank 0] Group 5 FTA: 0.9271 +[2025-07-09 05:51:02] [Rank 0] Group 5 FTA: 0.9271 +[2025-07-09 05:51:02] [Rank 0] Group 6 FTA: 0.9870 +[2025-07-09 05:51:02] [Rank 0] Group 6 FTA: 0.9870 +[2025-07-09 05:51:02] [Rank 0] Group 7 FTA: 0.9766 +[2025-07-09 05:51:02] [Rank 0] Group 7 FTA: 0.9766 +[2025-07-09 05:51:02] [Rank 0] Group 8 FTA: 0.9427 +[2025-07-09 05:51:02] [Rank 0] Group 8 FTA: 0.9427 +[2025-07-09 05:51:02] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-09 05:51:02] [Rank 0] Group 9 FTA: 0.9766 +[2025-07-09 05:51:02] [Rank 0] Group 10 FTA: 0.9473 +[2025-07-09 05:51:02] [Rank 0] Group 10 FTA: 0.9473 +[2025-07-09 05:51:02] [Rank 0] Group 11 FTA: 0.9648 +[2025-07-09 05:51:02] [Rank 0] Group 11 FTA: 0.9648 +[2025-07-09 05:51:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 05:51:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 05:51:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 05:51:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 05:51:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 05:51:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 05:51:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 05:51:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 05:51:03] [Rank 0] step:7001/10000 train_time:563890ms step_avg:80.54ms +[2025-07-09 05:51:03] [Rank 0] step:7001/10000 train_time:563890ms step_avg:80.54ms +[2025-07-09 05:51:06] [Rank 0] step:7021/10000 train_time:565442ms step_avg:80.54ms +[2025-07-09 05:51:06] [Rank 0] step:7021/10000 train_time:565442ms step_avg:80.54ms +[2025-07-09 05:51:07] [Rank 0] step:7041/10000 train_time:567537ms step_avg:80.60ms +[2025-07-09 05:51:07] [Rank 0] step:7041/10000 train_time:567537ms step_avg:80.60ms +[2025-07-09 05:51:08] [Rank 0] step:7061/10000 train_time:569019ms step_avg:80.59ms +[2025-07-09 05:51:08] [Rank 0] step:7061/10000 train_time:569019ms step_avg:80.59ms +[2025-07-09 05:51:10] [Rank 0] step:7081/10000 train_time:570504ms step_avg:80.57ms +[2025-07-09 05:51:10] [Rank 0] step:7081/10000 train_time:570504ms step_avg:80.57ms +[2025-07-09 05:51:11] [Rank 0] step:7101/10000 train_time:571987ms step_avg:80.55ms +[2025-07-09 05:51:11] [Rank 0] step:7101/10000 train_time:571987ms step_avg:80.55ms +[2025-07-09 05:51:13] [Rank 0] step:7121/10000 train_time:573705ms step_avg:80.57ms +[2025-07-09 05:51:13] [Rank 0] step:7121/10000 train_time:573705ms step_avg:80.57ms +[2025-07-09 05:51:15] [Rank 0] step:7141/10000 train_time:575190ms step_avg:80.55ms +[2025-07-09 05:51:15] [Rank 0] step:7141/10000 train_time:575190ms step_avg:80.55ms +[2025-07-09 05:51:16] [Rank 0] step:7161/10000 train_time:576675ms step_avg:80.53ms +[2025-07-09 05:51:16] [Rank 0] step:7161/10000 train_time:576675ms step_avg:80.53ms +[2025-07-09 05:51:18] [Rank 0] step:7181/10000 train_time:578163ms step_avg:80.51ms +[2025-07-09 05:51:18] [Rank 0] step:7181/10000 train_time:578163ms step_avg:80.51ms +[2025-07-09 05:51:20] [Rank 0] step:7201/10000 train_time:579701ms step_avg:80.50ms +[2025-07-09 05:51:20] [Rank 0] step:7201/10000 train_time:579701ms step_avg:80.50ms +[2025-07-09 05:51:21] [Rank 0] step:7221/10000 train_time:581781ms step_avg:80.57ms +[2025-07-09 05:51:21] [Rank 0] step:7221/10000 train_time:581781ms step_avg:80.57ms +[2025-07-09 05:51:23] [Rank 0] step:7241/10000 train_time:583269ms step_avg:80.55ms +[2025-07-09 05:51:23] [Rank 0] step:7241/10000 train_time:583269ms step_avg:80.55ms +[2025-07-09 05:51:24] [Rank 0] step:7261/10000 train_time:584759ms step_avg:80.53ms +[2025-07-09 05:51:24] [Rank 0] step:7261/10000 train_time:584759ms step_avg:80.53ms +[2025-07-09 05:51:26] [Rank 0] step:7281/10000 train_time:586248ms step_avg:80.52ms +[2025-07-09 05:51:26] [Rank 0] step:7281/10000 train_time:586248ms step_avg:80.52ms +[2025-07-09 05:51:28] [Rank 0] step:7301/10000 train_time:588395ms step_avg:80.59ms +[2025-07-09 05:51:28] [Rank 0] step:7301/10000 train_time:588395ms step_avg:80.59ms +[2025-07-09 05:51:29] [Rank 0] step:7321/10000 train_time:589885ms step_avg:80.57ms +[2025-07-09 05:51:29] [Rank 0] step:7321/10000 train_time:589885ms step_avg:80.57ms +[2025-07-09 05:51:31] [Rank 0] step:7341/10000 train_time:591376ms step_avg:80.56ms +[2025-07-09 05:51:31] [Rank 0] step:7341/10000 train_time:591376ms step_avg:80.56ms +[2025-07-09 05:51:32] [Rank 0] step:7361/10000 train_time:592866ms step_avg:80.54ms +[2025-07-09 05:51:32] [Rank 0] step:7361/10000 train_time:592866ms step_avg:80.54ms +[2025-07-09 05:51:34] [Rank 0] step:7381/10000 train_time:594408ms step_avg:80.53ms +[2025-07-09 05:51:34] [Rank 0] step:7381/10000 train_time:594408ms step_avg:80.53ms +[2025-07-09 05:51:36] [Rank 0] step:7401/10000 train_time:596648ms step_avg:80.62ms +[2025-07-09 05:51:36] [Rank 0] step:7401/10000 train_time:596648ms step_avg:80.62ms +[2025-07-09 05:51:38] [Rank 0] step:7421/10000 train_time:598205ms step_avg:80.61ms +[2025-07-09 05:51:38] [Rank 0] step:7421/10000 train_time:598205ms step_avg:80.61ms +[2025-07-09 05:51:39] [Rank 0] step:7441/10000 train_time:599697ms step_avg:80.59ms +[2025-07-09 05:51:39] [Rank 0] step:7441/10000 train_time:599697ms step_avg:80.59ms +[2025-07-09 05:51:41] [Rank 0] step:7461/10000 train_time:601190ms step_avg:80.58ms +[2025-07-09 05:51:41] [Rank 0] step:7461/10000 train_time:601190ms step_avg:80.58ms +[2025-07-09 05:51:43] [Rank 0] step:7481/10000 train_time:603343ms step_avg:80.65ms +[2025-07-09 05:51:43] [Rank 0] step:7481/10000 train_time:603343ms step_avg:80.65ms +[2025-07-09 05:51:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 05:51:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 05:51:45] [Rank 0] PRINT: step:7500/10000 train_loss:0.8589 val_loss:0.8595 train_time:604833ms step_avg:80.64ms +[2025-07-09 05:51:45] [Rank 0] PRINT: step:7500/10000 train_loss:0.8589 val_loss:0.8595 train_time:604833ms step_avg:80.64ms +[2025-07-09 05:51:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 05:51:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 05:51:45] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 05:51:45] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 05:51:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 05:51:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 05:57:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 05:57:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 05:57:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 05:57:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 05:57:10] [Rank 0] Total Loss: 5.5100 +[2025-07-09 05:57:10] [Rank 0] Total Loss: 5.5100 +[2025-07-09 05:57:10] [Rank 0] Total FTA: 0.9712 +[2025-07-09 05:57:10] [Rank 0] Total FTA: 0.9712 +[2025-07-09 05:57:10] [Rank 0] Group 0 Loss: 6.0757 +[2025-07-09 05:57:10] [Rank 0] Group 0 Loss: 6.0757 +[2025-07-09 05:57:10] [Rank 0] Group 1 Loss: 5.3213 +[2025-07-09 05:57:10] [Rank 0] Group 1 Loss: 5.3213 +[2025-07-09 05:57:10] [Rank 0] Group 2 Loss: 5.2881 +[2025-07-09 05:57:10] [Rank 0] Group 2 Loss: 5.2881 +[2025-07-09 05:57:10] [Rank 0] Group 3 Loss: 5.5358 +[2025-07-09 05:57:10] [Rank 0] Group 3 Loss: 5.5358 +[2025-07-09 05:57:10] [Rank 0] Group 4 Loss: 5.4762 +[2025-07-09 05:57:10] [Rank 0] Group 4 Loss: 5.4762 +[2025-07-09 05:57:10] [Rank 0] Group 5 Loss: 5.3307 +[2025-07-09 05:57:10] [Rank 0] Group 5 Loss: 5.3307 +[2025-07-09 05:57:10] [Rank 0] Group 6 Loss: 5.3525 +[2025-07-09 05:57:10] [Rank 0] Group 6 Loss: 5.3525 +[2025-07-09 05:57:10] [Rank 0] Group 7 Loss: 5.4525 +[2025-07-09 05:57:10] [Rank 0] Group 7 Loss: 5.4525 +[2025-07-09 05:57:10] [Rank 0] Group 8 Loss: 5.5463 +[2025-07-09 05:57:10] [Rank 0] Group 8 Loss: 5.5463 +[2025-07-09 05:57:10] [Rank 0] Group 9 Loss: 5.4303 +[2025-07-09 05:57:10] [Rank 0] Group 9 Loss: 5.4303 +[2025-07-09 05:57:10] [Rank 0] Group 10 Loss: 5.3803 +[2025-07-09 05:57:10] [Rank 0] Group 10 Loss: 5.3803 +[2025-07-09 05:57:10] [Rank 0] Group 11 Loss: 5.4612 +[2025-07-09 05:57:10] [Rank 0] Group 11 Loss: 5.4612 +[2025-07-09 05:57:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 05:57:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 05:57:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 05:57:10] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 05:57:10] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 05:57:10] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 05:57:10] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-09 05:57:10] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-09 05:57:10] [Rank 0] Group 4 FTA: 0.9271 +[2025-07-09 05:57:10] [Rank 0] Group 4 FTA: 0.9271 +[2025-07-09 05:57:10] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-09 05:57:10] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-09 05:57:10] [Rank 0] Group 6 FTA: 0.9740 +[2025-07-09 05:57:10] [Rank 0] Group 6 FTA: 0.9740 +[2025-07-09 05:57:10] [Rank 0] Group 7 FTA: 0.9427 +[2025-07-09 05:57:10] [Rank 0] Group 7 FTA: 0.9427 +[2025-07-09 05:57:10] [Rank 0] Group 8 FTA: 0.9609 +[2025-07-09 05:57:10] [Rank 0] Group 8 FTA: 0.9609 +[2025-07-09 05:57:10] [Rank 0] Group 9 FTA: 0.9492 +[2025-07-09 05:57:10] [Rank 0] Group 9 FTA: 0.9492 +[2025-07-09 05:57:10] [Rank 0] Group 10 FTA: 0.9590 +[2025-07-09 05:57:10] [Rank 0] Group 10 FTA: 0.9590 +[2025-07-09 05:57:10] [Rank 0] Group 11 FTA: 0.9639 +[2025-07-09 05:57:10] [Rank 0] Group 11 FTA: 0.9639 +[2025-07-09 05:57:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 05:57:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 05:57:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 05:57:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 05:57:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 05:57:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 05:57:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 05:57:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 05:57:11] [Rank 0] step:7501/10000 train_time:604856ms step_avg:80.64ms +[2025-07-09 05:57:11] [Rank 0] step:7501/10000 train_time:604856ms step_avg:80.64ms +[2025-07-09 05:57:13] [Rank 0] step:7521/10000 train_time:606350ms step_avg:80.62ms +[2025-07-09 05:57:13] [Rank 0] step:7521/10000 train_time:606350ms step_avg:80.62ms +[2025-07-09 05:57:14] [Rank 0] step:7541/10000 train_time:607831ms step_avg:80.60ms +[2025-07-09 05:57:14] [Rank 0] step:7541/10000 train_time:607831ms step_avg:80.60ms +[2025-07-09 05:57:16] [Rank 0] step:7561/10000 train_time:609314ms step_avg:80.59ms +[2025-07-09 05:57:16] [Rank 0] step:7561/10000 train_time:609314ms step_avg:80.59ms +[2025-07-09 05:57:18] [Rank 0] step:7581/10000 train_time:611468ms step_avg:80.66ms +[2025-07-09 05:57:18] [Rank 0] step:7581/10000 train_time:611468ms step_avg:80.66ms +[2025-07-09 05:57:19] [Rank 0] step:7601/10000 train_time:612950ms step_avg:80.64ms +[2025-07-09 05:57:19] [Rank 0] step:7601/10000 train_time:612950ms step_avg:80.64ms +[2025-07-09 05:57:21] [Rank 0] step:7621/10000 train_time:614438ms step_avg:80.62ms +[2025-07-09 05:57:21] [Rank 0] step:7621/10000 train_time:614438ms step_avg:80.62ms +[2025-07-09 05:57:22] [Rank 0] step:7641/10000 train_time:615925ms step_avg:80.61ms +[2025-07-09 05:57:22] [Rank 0] step:7641/10000 train_time:615925ms step_avg:80.61ms +[2025-07-09 05:57:24] [Rank 0] step:7661/10000 train_time:618053ms step_avg:80.68ms +[2025-07-09 05:57:24] [Rank 0] step:7661/10000 train_time:618053ms step_avg:80.68ms +[2025-07-09 05:57:26] [Rank 0] step:7681/10000 train_time:619538ms step_avg:80.66ms +[2025-07-09 05:57:26] [Rank 0] step:7681/10000 train_time:619538ms step_avg:80.66ms +[2025-07-09 05:57:27] [Rank 0] step:7701/10000 train_time:621024ms step_avg:80.64ms +[2025-07-09 05:57:27] [Rank 0] step:7701/10000 train_time:621024ms step_avg:80.64ms +[2025-07-09 05:57:29] [Rank 0] step:7721/10000 train_time:622513ms step_avg:80.63ms +[2025-07-09 05:57:29] [Rank 0] step:7721/10000 train_time:622513ms step_avg:80.63ms +[2025-07-09 05:57:31] [Rank 0] step:7741/10000 train_time:624254ms step_avg:80.64ms +[2025-07-09 05:57:31] [Rank 0] step:7741/10000 train_time:624254ms step_avg:80.64ms +[2025-07-09 05:57:33] [Rank 0] step:7761/10000 train_time:626128ms step_avg:80.68ms +[2025-07-09 05:57:33] [Rank 0] step:7761/10000 train_time:626128ms step_avg:80.68ms +[2025-07-09 05:57:34] [Rank 0] step:7781/10000 train_time:627617ms step_avg:80.66ms +[2025-07-09 05:57:34] [Rank 0] step:7781/10000 train_time:627617ms step_avg:80.66ms +[2025-07-09 05:57:36] [Rank 0] step:7801/10000 train_time:629107ms step_avg:80.64ms +[2025-07-09 05:57:36] [Rank 0] step:7801/10000 train_time:629107ms step_avg:80.64ms +[2025-07-09 05:57:37] [Rank 0] step:7821/10000 train_time:630594ms step_avg:80.63ms +[2025-07-09 05:57:37] [Rank 0] step:7821/10000 train_time:630594ms step_avg:80.63ms +[2025-07-09 05:57:39] [Rank 0] step:7841/10000 train_time:632741ms step_avg:80.70ms +[2025-07-09 05:57:39] [Rank 0] step:7841/10000 train_time:632741ms step_avg:80.70ms +[2025-07-09 05:57:41] [Rank 0] step:7861/10000 train_time:634229ms step_avg:80.68ms +[2025-07-09 05:57:41] [Rank 0] step:7861/10000 train_time:634229ms step_avg:80.68ms +[2025-07-09 05:57:42] [Rank 0] step:7881/10000 train_time:635720ms step_avg:80.66ms +[2025-07-09 05:57:42] [Rank 0] step:7881/10000 train_time:635720ms step_avg:80.66ms +[2025-07-09 05:57:44] [Rank 0] step:7901/10000 train_time:637208ms step_avg:80.65ms +[2025-07-09 05:57:44] [Rank 0] step:7901/10000 train_time:637208ms step_avg:80.65ms +[2025-07-09 05:57:46] [Rank 0] step:7921/10000 train_time:638698ms step_avg:80.63ms +[2025-07-09 05:57:46] [Rank 0] step:7921/10000 train_time:638698ms step_avg:80.63ms +[2025-07-09 05:57:47] [Rank 0] step:7941/10000 train_time:640854ms step_avg:80.70ms +[2025-07-09 05:57:47] [Rank 0] step:7941/10000 train_time:640854ms step_avg:80.70ms +[2025-07-09 05:57:49] [Rank 0] step:7961/10000 train_time:642342ms step_avg:80.69ms +[2025-07-09 05:57:49] [Rank 0] step:7961/10000 train_time:642342ms step_avg:80.69ms +[2025-07-09 05:57:50] [Rank 0] step:7981/10000 train_time:643832ms step_avg:80.67ms +[2025-07-09 05:57:50] [Rank 0] step:7981/10000 train_time:643832ms step_avg:80.67ms +[2025-07-09 05:57:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 05:57:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 05:57:53] [Rank 0] PRINT: step:8000/10000 train_loss:0.8579 val_loss:0.8590 train_time:645323ms step_avg:80.67ms +[2025-07-09 05:57:53] [Rank 0] PRINT: step:8000/10000 train_loss:0.8579 val_loss:0.8590 train_time:645323ms step_avg:80.67ms +[2025-07-09 05:57:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 05:57:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 05:57:53] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 05:57:53] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 05:57:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 05:57:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 06:03:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 06:03:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 06:03:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 06:03:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 06:03:21] [Rank 0] Total Loss: 5.4489 +[2025-07-09 06:03:21] [Rank 0] Total Loss: 5.4489 +[2025-07-09 06:03:21] [Rank 0] Total FTA: 0.9698 +[2025-07-09 06:03:21] [Rank 0] Total FTA: 0.9698 +[2025-07-09 06:03:21] [Rank 0] Group 0 Loss: 5.7683 +[2025-07-09 06:03:21] [Rank 0] Group 0 Loss: 5.7683 +[2025-07-09 06:03:21] [Rank 0] Group 1 Loss: 5.3111 +[2025-07-09 06:03:21] [Rank 0] Group 1 Loss: 5.3111 +[2025-07-09 06:03:21] [Rank 0] Group 2 Loss: 5.3045 +[2025-07-09 06:03:21] [Rank 0] Group 2 Loss: 5.3045 +[2025-07-09 06:03:21] [Rank 0] Group 3 Loss: 5.3848 +[2025-07-09 06:03:21] [Rank 0] Group 3 Loss: 5.3848 +[2025-07-09 06:03:21] [Rank 0] Group 4 Loss: 5.4154 +[2025-07-09 06:03:21] [Rank 0] Group 4 Loss: 5.4154 +[2025-07-09 06:03:21] [Rank 0] Group 5 Loss: 5.3975 +[2025-07-09 06:03:21] [Rank 0] Group 5 Loss: 5.3975 +[2025-07-09 06:03:21] [Rank 0] Group 6 Loss: 5.3216 +[2025-07-09 06:03:21] [Rank 0] Group 6 Loss: 5.3216 +[2025-07-09 06:03:21] [Rank 0] Group 7 Loss: 5.3985 +[2025-07-09 06:03:21] [Rank 0] Group 7 Loss: 5.3985 +[2025-07-09 06:03:21] [Rank 0] Group 8 Loss: 5.5326 +[2025-07-09 06:03:21] [Rank 0] Group 8 Loss: 5.5326 +[2025-07-09 06:03:21] [Rank 0] Group 9 Loss: 5.4179 +[2025-07-09 06:03:21] [Rank 0] Group 9 Loss: 5.4179 +[2025-07-09 06:03:21] [Rank 0] Group 10 Loss: 5.4102 +[2025-07-09 06:03:21] [Rank 0] Group 10 Loss: 5.4102 +[2025-07-09 06:03:21] [Rank 0] Group 11 Loss: 5.4332 +[2025-07-09 06:03:21] [Rank 0] Group 11 Loss: 5.4332 +[2025-07-09 06:03:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 06:03:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 06:03:21] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 06:03:21] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 06:03:21] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 06:03:21] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 06:03:21] [Rank 0] Group 3 FTA: 0.9505 +[2025-07-09 06:03:21] [Rank 0] Group 3 FTA: 0.9505 +[2025-07-09 06:03:21] [Rank 0] Group 4 FTA: 0.9635 +[2025-07-09 06:03:21] [Rank 0] Group 4 FTA: 0.9635 +[2025-07-09 06:03:21] [Rank 0] Group 5 FTA: 0.9375 +[2025-07-09 06:03:21] [Rank 0] Group 5 FTA: 0.9375 +[2025-07-09 06:03:21] [Rank 0] Group 6 FTA: 0.9661 +[2025-07-09 06:03:21] [Rank 0] Group 6 FTA: 0.9661 +[2025-07-09 06:03:21] [Rank 0] Group 7 FTA: 0.9349 +[2025-07-09 06:03:21] [Rank 0] Group 7 FTA: 0.9349 +[2025-07-09 06:03:21] [Rank 0] Group 8 FTA: 0.9609 +[2025-07-09 06:03:21] [Rank 0] Group 8 FTA: 0.9609 +[2025-07-09 06:03:21] [Rank 0] Group 9 FTA: 0.9609 +[2025-07-09 06:03:21] [Rank 0] Group 9 FTA: 0.9609 +[2025-07-09 06:03:21] [Rank 0] Group 10 FTA: 0.9766 +[2025-07-09 06:03:21] [Rank 0] Group 10 FTA: 0.9766 +[2025-07-09 06:03:21] [Rank 0] Group 11 FTA: 0.9629 +[2025-07-09 06:03:21] [Rank 0] Group 11 FTA: 0.9629 +[2025-07-09 06:03:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 06:03:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 06:03:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 06:03:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 06:03:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 06:03:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 06:03:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 06:03:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 06:03:23] [Rank 0] step:8001/10000 train_time:645346ms step_avg:80.66ms +[2025-07-09 06:03:23] [Rank 0] step:8001/10000 train_time:645346ms step_avg:80.66ms +[2025-07-09 06:03:25] [Rank 0] step:8021/10000 train_time:647504ms step_avg:80.73ms +[2025-07-09 06:03:25] [Rank 0] step:8021/10000 train_time:647504ms step_avg:80.73ms +[2025-07-09 06:03:26] [Rank 0] step:8041/10000 train_time:648985ms step_avg:80.71ms +[2025-07-09 06:03:26] [Rank 0] step:8041/10000 train_time:648985ms step_avg:80.71ms +[2025-07-09 06:03:28] [Rank 0] step:8061/10000 train_time:650469ms step_avg:80.69ms +[2025-07-09 06:03:28] [Rank 0] step:8061/10000 train_time:650469ms step_avg:80.69ms +[2025-07-09 06:03:29] [Rank 0] step:8081/10000 train_time:651955ms step_avg:80.68ms +[2025-07-09 06:03:29] [Rank 0] step:8081/10000 train_time:651955ms step_avg:80.68ms +[2025-07-09 06:03:31] [Rank 0] step:8101/10000 train_time:653488ms step_avg:80.67ms +[2025-07-09 06:03:31] [Rank 0] step:8101/10000 train_time:653488ms step_avg:80.67ms +[2025-07-09 06:03:33] [Rank 0] step:8121/10000 train_time:655577ms step_avg:80.73ms +[2025-07-09 06:03:33] [Rank 0] step:8121/10000 train_time:655577ms step_avg:80.73ms +[2025-07-09 06:03:34] [Rank 0] step:8141/10000 train_time:657062ms step_avg:80.71ms +[2025-07-09 06:03:34] [Rank 0] step:8141/10000 train_time:657062ms step_avg:80.71ms +[2025-07-09 06:03:36] [Rank 0] step:8161/10000 train_time:658550ms step_avg:80.69ms +[2025-07-09 06:03:36] [Rank 0] step:8161/10000 train_time:658550ms step_avg:80.69ms +[2025-07-09 06:03:37] [Rank 0] step:8181/10000 train_time:660038ms step_avg:80.68ms +[2025-07-09 06:03:37] [Rank 0] step:8181/10000 train_time:660038ms step_avg:80.68ms +[2025-07-09 06:03:40] [Rank 0] step:8201/10000 train_time:662192ms step_avg:80.75ms +[2025-07-09 06:03:40] [Rank 0] step:8201/10000 train_time:662192ms step_avg:80.75ms +[2025-07-09 06:03:41] [Rank 0] step:8221/10000 train_time:663678ms step_avg:80.73ms +[2025-07-09 06:03:41] [Rank 0] step:8221/10000 train_time:663678ms step_avg:80.73ms +[2025-07-09 06:03:43] [Rank 0] step:8241/10000 train_time:665167ms step_avg:80.71ms +[2025-07-09 06:03:43] [Rank 0] step:8241/10000 train_time:665167ms step_avg:80.71ms +[2025-07-09 06:03:44] [Rank 0] step:8261/10000 train_time:666656ms step_avg:80.70ms +[2025-07-09 06:03:44] [Rank 0] step:8261/10000 train_time:666656ms step_avg:80.70ms +[2025-07-09 06:03:46] [Rank 0] step:8281/10000 train_time:668143ms step_avg:80.68ms +[2025-07-09 06:03:46] [Rank 0] step:8281/10000 train_time:668143ms step_avg:80.68ms +[2025-07-09 06:03:48] [Rank 0] step:8301/10000 train_time:670300ms step_avg:80.75ms +[2025-07-09 06:03:48] [Rank 0] step:8301/10000 train_time:670300ms step_avg:80.75ms +[2025-07-09 06:03:49] [Rank 0] step:8321/10000 train_time:671790ms step_avg:80.73ms +[2025-07-09 06:03:49] [Rank 0] step:8321/10000 train_time:671790ms step_avg:80.73ms +[2025-07-09 06:03:51] [Rank 0] step:8341/10000 train_time:673279ms step_avg:80.72ms +[2025-07-09 06:03:51] [Rank 0] step:8341/10000 train_time:673279ms step_avg:80.72ms +[2025-07-09 06:03:52] [Rank 0] step:8361/10000 train_time:674770ms step_avg:80.70ms +[2025-07-09 06:03:52] [Rank 0] step:8361/10000 train_time:674770ms step_avg:80.70ms +[2025-07-09 06:03:54] [Rank 0] step:8381/10000 train_time:676915ms step_avg:80.77ms +[2025-07-09 06:03:54] [Rank 0] step:8381/10000 train_time:676915ms step_avg:80.77ms +[2025-07-09 06:03:56] [Rank 0] step:8401/10000 train_time:678404ms step_avg:80.75ms +[2025-07-09 06:03:56] [Rank 0] step:8401/10000 train_time:678404ms step_avg:80.75ms +[2025-07-09 06:03:57] [Rank 0] step:8421/10000 train_time:679894ms step_avg:80.74ms +[2025-07-09 06:03:57] [Rank 0] step:8421/10000 train_time:679894ms step_avg:80.74ms +[2025-07-09 06:03:59] [Rank 0] step:8441/10000 train_time:681384ms step_avg:80.72ms +[2025-07-09 06:03:59] [Rank 0] step:8441/10000 train_time:681384ms step_avg:80.72ms +[2025-07-09 06:04:01] [Rank 0] step:8461/10000 train_time:683554ms step_avg:80.79ms +[2025-07-09 06:04:01] [Rank 0] step:8461/10000 train_time:683554ms step_avg:80.79ms +[2025-07-09 06:04:02] [Rank 0] step:8481/10000 train_time:685026ms step_avg:80.77ms +[2025-07-09 06:04:02] [Rank 0] step:8481/10000 train_time:685026ms step_avg:80.77ms +[2025-07-09 06:04:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 06:04:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 06:04:05] [Rank 0] PRINT: step:8500/10000 train_loss:0.8571 val_loss:0.8588 train_time:686515ms step_avg:80.77ms +[2025-07-09 06:04:05] [Rank 0] PRINT: step:8500/10000 train_loss:0.8571 val_loss:0.8588 train_time:686515ms step_avg:80.77ms +[2025-07-09 06:04:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 06:04:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 06:04:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 06:04:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 06:04:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 06:04:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 06:09:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 06:09:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 06:09:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 06:09:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 06:09:32] [Rank 0] Total Loss: 5.4931 +[2025-07-09 06:09:32] [Rank 0] Total Loss: 5.4931 +[2025-07-09 06:09:32] [Rank 0] Total FTA: 0.9620 +[2025-07-09 06:09:32] [Rank 0] Total FTA: 0.9620 +[2025-07-09 06:09:32] [Rank 0] Group 0 Loss: 5.8018 +[2025-07-09 06:09:32] [Rank 0] Group 0 Loss: 5.8018 +[2025-07-09 06:09:32] [Rank 0] Group 1 Loss: 5.3762 +[2025-07-09 06:09:32] [Rank 0] Group 1 Loss: 5.3762 +[2025-07-09 06:09:32] [Rank 0] Group 2 Loss: 5.3713 +[2025-07-09 06:09:32] [Rank 0] Group 2 Loss: 5.3713 +[2025-07-09 06:09:32] [Rank 0] Group 3 Loss: 5.4899 +[2025-07-09 06:09:32] [Rank 0] Group 3 Loss: 5.4899 +[2025-07-09 06:09:32] [Rank 0] Group 4 Loss: 5.5299 +[2025-07-09 06:09:32] [Rank 0] Group 4 Loss: 5.5299 +[2025-07-09 06:09:32] [Rank 0] Group 5 Loss: 5.3869 +[2025-07-09 06:09:32] [Rank 0] Group 5 Loss: 5.3869 +[2025-07-09 06:09:32] [Rank 0] Group 6 Loss: 5.4280 +[2025-07-09 06:09:32] [Rank 0] Group 6 Loss: 5.4280 +[2025-07-09 06:09:32] [Rank 0] Group 7 Loss: 5.4719 +[2025-07-09 06:09:32] [Rank 0] Group 7 Loss: 5.4719 +[2025-07-09 06:09:32] [Rank 0] Group 8 Loss: 5.4536 +[2025-07-09 06:09:32] [Rank 0] Group 8 Loss: 5.4536 +[2025-07-09 06:09:32] [Rank 0] Group 9 Loss: 5.3533 +[2025-07-09 06:09:32] [Rank 0] Group 9 Loss: 5.3533 +[2025-07-09 06:09:32] [Rank 0] Group 10 Loss: 5.4744 +[2025-07-09 06:09:32] [Rank 0] Group 10 Loss: 5.4744 +[2025-07-09 06:09:32] [Rank 0] Group 11 Loss: 5.4696 +[2025-07-09 06:09:32] [Rank 0] Group 11 Loss: 5.4696 +[2025-07-09 06:09:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 06:09:32] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 06:09:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 06:09:32] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 06:09:32] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 06:09:32] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 06:09:32] [Rank 0] Group 3 FTA: 0.9141 +[2025-07-09 06:09:32] [Rank 0] Group 3 FTA: 0.9141 +[2025-07-09 06:09:32] [Rank 0] Group 4 FTA: 0.9167 +[2025-07-09 06:09:32] [Rank 0] Group 4 FTA: 0.9167 +[2025-07-09 06:09:32] [Rank 0] Group 5 FTA: 0.9661 +[2025-07-09 06:09:32] [Rank 0] Group 5 FTA: 0.9661 +[2025-07-09 06:09:32] [Rank 0] Group 6 FTA: 0.9557 +[2025-07-09 06:09:32] [Rank 0] Group 6 FTA: 0.9557 +[2025-07-09 06:09:32] [Rank 0] Group 7 FTA: 0.9505 +[2025-07-09 06:09:32] [Rank 0] Group 7 FTA: 0.9505 +[2025-07-09 06:09:32] [Rank 0] Group 8 FTA: 0.9375 +[2025-07-09 06:09:32] [Rank 0] Group 8 FTA: 0.9375 +[2025-07-09 06:09:32] [Rank 0] Group 9 FTA: 0.9570 +[2025-07-09 06:09:32] [Rank 0] Group 9 FTA: 0.9570 +[2025-07-09 06:09:32] [Rank 0] Group 10 FTA: 0.9512 +[2025-07-09 06:09:32] [Rank 0] Group 10 FTA: 0.9512 +[2025-07-09 06:09:32] [Rank 0] Group 11 FTA: 0.9609 +[2025-07-09 06:09:32] [Rank 0] Group 11 FTA: 0.9609 +[2025-07-09 06:09:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 06:09:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 06:09:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 06:09:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 06:09:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 06:09:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 06:09:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 06:09:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 06:09:34] [Rank 0] step:8501/10000 train_time:686538ms step_avg:80.76ms +[2025-07-09 06:09:34] [Rank 0] step:8501/10000 train_time:686538ms step_avg:80.76ms +[2025-07-09 06:09:35] [Rank 0] step:8521/10000 train_time:688018ms step_avg:80.74ms +[2025-07-09 06:09:35] [Rank 0] step:8521/10000 train_time:688018ms step_avg:80.74ms +[2025-07-09 06:09:37] [Rank 0] step:8541/10000 train_time:689500ms step_avg:80.73ms +[2025-07-09 06:09:37] [Rank 0] step:8541/10000 train_time:689500ms step_avg:80.73ms +[2025-07-09 06:09:39] [Rank 0] step:8561/10000 train_time:691627ms step_avg:80.79ms +[2025-07-09 06:09:39] [Rank 0] step:8561/10000 train_time:691627ms step_avg:80.79ms +[2025-07-09 06:09:40] [Rank 0] step:8581/10000 train_time:693109ms step_avg:80.77ms +[2025-07-09 06:09:40] [Rank 0] step:8581/10000 train_time:693109ms step_avg:80.77ms +[2025-07-09 06:09:42] [Rank 0] step:8601/10000 train_time:694594ms step_avg:80.76ms +[2025-07-09 06:09:42] [Rank 0] step:8601/10000 train_time:694594ms step_avg:80.76ms +[2025-07-09 06:09:43] [Rank 0] step:8621/10000 train_time:696079ms step_avg:80.74ms +[2025-07-09 06:09:43] [Rank 0] step:8621/10000 train_time:696079ms step_avg:80.74ms +[2025-07-09 06:09:45] [Rank 0] step:8641/10000 train_time:697618ms step_avg:80.73ms +[2025-07-09 06:09:45] [Rank 0] step:8641/10000 train_time:697618ms step_avg:80.73ms +[2025-07-09 06:09:46] [Rank 0] step:8661/10000 train_time:699290ms step_avg:80.74ms +[2025-07-09 06:09:46] [Rank 0] step:8661/10000 train_time:699290ms step_avg:80.74ms +[2025-07-09 06:09:48] [Rank 0] step:8681/10000 train_time:700777ms step_avg:80.73ms +[2025-07-09 06:09:48] [Rank 0] step:8681/10000 train_time:700777ms step_avg:80.73ms +[2025-07-09 06:09:49] [Rank 0] step:8701/10000 train_time:702264ms step_avg:80.71ms +[2025-07-09 06:09:49] [Rank 0] step:8701/10000 train_time:702264ms step_avg:80.71ms +[2025-07-09 06:09:51] [Rank 0] step:8721/10000 train_time:703751ms step_avg:80.70ms +[2025-07-09 06:09:51] [Rank 0] step:8721/10000 train_time:703751ms step_avg:80.70ms +[2025-07-09 06:09:53] [Rank 0] step:8741/10000 train_time:705903ms step_avg:80.76ms +[2025-07-09 06:09:53] [Rank 0] step:8741/10000 train_time:705903ms step_avg:80.76ms +[2025-07-09 06:09:55] [Rank 0] step:8761/10000 train_time:707392ms step_avg:80.74ms +[2025-07-09 06:09:55] [Rank 0] step:8761/10000 train_time:707392ms step_avg:80.74ms +[2025-07-09 06:09:56] [Rank 0] step:8781/10000 train_time:708881ms step_avg:80.73ms +[2025-07-09 06:09:56] [Rank 0] step:8781/10000 train_time:708881ms step_avg:80.73ms +[2025-07-09 06:09:58] [Rank 0] step:8801/10000 train_time:710370ms step_avg:80.71ms +[2025-07-09 06:09:58] [Rank 0] step:8801/10000 train_time:710370ms step_avg:80.71ms +[2025-07-09 06:09:59] [Rank 0] step:8821/10000 train_time:711914ms step_avg:80.71ms +[2025-07-09 06:09:59] [Rank 0] step:8821/10000 train_time:711914ms step_avg:80.71ms +[2025-07-09 06:10:01] [Rank 0] step:8841/10000 train_time:713588ms step_avg:80.71ms +[2025-07-09 06:10:01] [Rank 0] step:8841/10000 train_time:713588ms step_avg:80.71ms +[2025-07-09 06:10:02] [Rank 0] step:8861/10000 train_time:715079ms step_avg:80.70ms +[2025-07-09 06:10:02] [Rank 0] step:8861/10000 train_time:715079ms step_avg:80.70ms +[2025-07-09 06:10:04] [Rank 0] step:8881/10000 train_time:716567ms step_avg:80.69ms +[2025-07-09 06:10:04] [Rank 0] step:8881/10000 train_time:716567ms step_avg:80.69ms +[2025-07-09 06:10:05] [Rank 0] step:8901/10000 train_time:718056ms step_avg:80.67ms +[2025-07-09 06:10:05] [Rank 0] step:8901/10000 train_time:718056ms step_avg:80.67ms +[2025-07-09 06:10:07] [Rank 0] step:8921/10000 train_time:719780ms step_avg:80.68ms +[2025-07-09 06:10:07] [Rank 0] step:8921/10000 train_time:719780ms step_avg:80.68ms +[2025-07-09 06:10:08] [Rank 0] step:8941/10000 train_time:721272ms step_avg:80.67ms +[2025-07-09 06:10:08] [Rank 0] step:8941/10000 train_time:721272ms step_avg:80.67ms +[2025-07-09 06:10:10] [Rank 0] step:8961/10000 train_time:722759ms step_avg:80.66ms +[2025-07-09 06:10:10] [Rank 0] step:8961/10000 train_time:722759ms step_avg:80.66ms +[2025-07-09 06:10:11] [Rank 0] step:8981/10000 train_time:724251ms step_avg:80.64ms +[2025-07-09 06:10:11] [Rank 0] step:8981/10000 train_time:724251ms step_avg:80.64ms +[2025-07-09 06:10:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 06:10:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 06:10:14] [Rank 0] PRINT: step:9000/10000 train_loss:0.8564 val_loss:0.8586 train_time:725741ms step_avg:80.64ms +[2025-07-09 06:10:14] [Rank 0] PRINT: step:9000/10000 train_loss:0.8564 val_loss:0.8586 train_time:725741ms step_avg:80.64ms +[2025-07-09 06:10:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 06:10:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 06:10:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 06:10:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 06:10:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 06:10:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 06:15:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 06:15:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 06:15:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 06:15:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 06:15:40] [Rank 0] Total Loss: 5.4678 +[2025-07-09 06:15:40] [Rank 0] Total Loss: 5.4678 +[2025-07-09 06:15:40] [Rank 0] Total FTA: 0.9730 +[2025-07-09 06:15:40] [Rank 0] Total FTA: 0.9730 +[2025-07-09 06:15:40] [Rank 0] Group 0 Loss: 5.6900 +[2025-07-09 06:15:40] [Rank 0] Group 0 Loss: 5.6900 +[2025-07-09 06:15:40] [Rank 0] Group 1 Loss: 5.3582 +[2025-07-09 06:15:40] [Rank 0] Group 1 Loss: 5.3582 +[2025-07-09 06:15:40] [Rank 0] Group 2 Loss: 5.3560 +[2025-07-09 06:15:40] [Rank 0] Group 2 Loss: 5.3560 +[2025-07-09 06:15:40] [Rank 0] Group 3 Loss: 5.5091 +[2025-07-09 06:15:40] [Rank 0] Group 3 Loss: 5.5091 +[2025-07-09 06:15:40] [Rank 0] Group 4 Loss: 5.4455 +[2025-07-09 06:15:40] [Rank 0] Group 4 Loss: 5.4455 +[2025-07-09 06:15:40] [Rank 0] Group 5 Loss: 5.3242 +[2025-07-09 06:15:40] [Rank 0] Group 5 Loss: 5.3242 +[2025-07-09 06:15:40] [Rank 0] Group 6 Loss: 5.3675 +[2025-07-09 06:15:40] [Rank 0] Group 6 Loss: 5.3675 +[2025-07-09 06:15:40] [Rank 0] Group 7 Loss: 5.4628 +[2025-07-09 06:15:40] [Rank 0] Group 7 Loss: 5.4628 +[2025-07-09 06:15:40] [Rank 0] Group 8 Loss: 5.5078 +[2025-07-09 06:15:40] [Rank 0] Group 8 Loss: 5.5078 +[2025-07-09 06:15:40] [Rank 0] Group 9 Loss: 5.3496 +[2025-07-09 06:15:40] [Rank 0] Group 9 Loss: 5.3496 +[2025-07-09 06:15:40] [Rank 0] Group 10 Loss: 5.4424 +[2025-07-09 06:15:40] [Rank 0] Group 10 Loss: 5.4424 +[2025-07-09 06:15:40] [Rank 0] Group 11 Loss: 5.4971 +[2025-07-09 06:15:40] [Rank 0] Group 11 Loss: 5.4971 +[2025-07-09 06:15:40] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 06:15:40] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 06:15:40] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 06:15:40] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 06:15:40] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 06:15:40] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 06:15:40] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-09 06:15:40] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-09 06:15:40] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-09 06:15:40] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-09 06:15:40] [Rank 0] Group 5 FTA: 0.9792 +[2025-07-09 06:15:40] [Rank 0] Group 5 FTA: 0.9792 +[2025-07-09 06:15:40] [Rank 0] Group 6 FTA: 0.9271 +[2025-07-09 06:15:40] [Rank 0] Group 6 FTA: 0.9271 +[2025-07-09 06:15:40] [Rank 0] Group 7 FTA: 0.9740 +[2025-07-09 06:15:40] [Rank 0] Group 7 FTA: 0.9740 +[2025-07-09 06:15:40] [Rank 0] Group 8 FTA: 0.9505 +[2025-07-09 06:15:40] [Rank 0] Group 8 FTA: 0.9505 +[2025-07-09 06:15:40] [Rank 0] Group 9 FTA: 0.9492 +[2025-07-09 06:15:40] [Rank 0] Group 9 FTA: 0.9492 +[2025-07-09 06:15:40] [Rank 0] Group 10 FTA: 0.9570 +[2025-07-09 06:15:40] [Rank 0] Group 10 FTA: 0.9570 +[2025-07-09 06:15:40] [Rank 0] Group 11 FTA: 0.9580 +[2025-07-09 06:15:40] [Rank 0] Group 11 FTA: 0.9580 +[2025-07-09 06:15:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 06:15:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 06:15:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 06:15:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 06:15:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 06:15:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 06:15:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 06:15:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 06:15:42] [Rank 0] step:9001/10000 train_time:725872ms step_avg:80.64ms +[2025-07-09 06:15:42] [Rank 0] step:9001/10000 train_time:725872ms step_avg:80.64ms +[2025-07-09 06:15:43] [Rank 0] step:9021/10000 train_time:727959ms step_avg:80.70ms +[2025-07-09 06:15:43] [Rank 0] step:9021/10000 train_time:727959ms step_avg:80.70ms +[2025-07-09 06:15:45] [Rank 0] step:9041/10000 train_time:729443ms step_avg:80.68ms +[2025-07-09 06:15:45] [Rank 0] step:9041/10000 train_time:729443ms step_avg:80.68ms +[2025-07-09 06:15:46] [Rank 0] step:9061/10000 train_time:730925ms step_avg:80.67ms +[2025-07-09 06:15:46] [Rank 0] step:9061/10000 train_time:730925ms step_avg:80.67ms +[2025-07-09 06:15:48] [Rank 0] step:9081/10000 train_time:732410ms step_avg:80.65ms +[2025-07-09 06:15:48] [Rank 0] step:9081/10000 train_time:732410ms step_avg:80.65ms +[2025-07-09 06:15:50] [Rank 0] step:9101/10000 train_time:734748ms step_avg:80.73ms +[2025-07-09 06:15:50] [Rank 0] step:9101/10000 train_time:734748ms step_avg:80.73ms +[2025-07-09 06:15:52] [Rank 0] step:9121/10000 train_time:736236ms step_avg:80.72ms +[2025-07-09 06:15:52] [Rank 0] step:9121/10000 train_time:736236ms step_avg:80.72ms +[2025-07-09 06:15:53] [Rank 0] step:9141/10000 train_time:737765ms step_avg:80.71ms +[2025-07-09 06:15:53] [Rank 0] step:9141/10000 train_time:737765ms step_avg:80.71ms +[2025-07-09 06:15:55] [Rank 0] step:9161/10000 train_time:739250ms step_avg:80.70ms +[2025-07-09 06:15:55] [Rank 0] step:9161/10000 train_time:739250ms step_avg:80.70ms +[2025-07-09 06:15:57] [Rank 0] step:9181/10000 train_time:740992ms step_avg:80.71ms +[2025-07-09 06:15:57] [Rank 0] step:9181/10000 train_time:740992ms step_avg:80.71ms +[2025-07-09 06:15:58] [Rank 0] step:9201/10000 train_time:742874ms step_avg:80.74ms +[2025-07-09 06:15:58] [Rank 0] step:9201/10000 train_time:742874ms step_avg:80.74ms +[2025-07-09 06:16:00] [Rank 0] step:9221/10000 train_time:744361ms step_avg:80.72ms +[2025-07-09 06:16:00] [Rank 0] step:9221/10000 train_time:744361ms step_avg:80.72ms +[2025-07-09 06:16:01] [Rank 0] step:9241/10000 train_time:745849ms step_avg:80.71ms +[2025-07-09 06:16:01] [Rank 0] step:9241/10000 train_time:745849ms step_avg:80.71ms +[2025-07-09 06:16:03] [Rank 0] step:9261/10000 train_time:747339ms step_avg:80.70ms +[2025-07-09 06:16:03] [Rank 0] step:9261/10000 train_time:747339ms step_avg:80.70ms +[2025-07-09 06:16:05] [Rank 0] step:9281/10000 train_time:749489ms step_avg:80.76ms +[2025-07-09 06:16:05] [Rank 0] step:9281/10000 train_time:749489ms step_avg:80.76ms +[2025-07-09 06:16:06] [Rank 0] step:9301/10000 train_time:750976ms step_avg:80.74ms +[2025-07-09 06:16:06] [Rank 0] step:9301/10000 train_time:750976ms step_avg:80.74ms +[2025-07-09 06:16:08] [Rank 0] step:9321/10000 train_time:752466ms step_avg:80.73ms +[2025-07-09 06:16:08] [Rank 0] step:9321/10000 train_time:752466ms step_avg:80.73ms +[2025-07-09 06:16:09] [Rank 0] step:9341/10000 train_time:753956ms step_avg:80.71ms +[2025-07-09 06:16:09] [Rank 0] step:9341/10000 train_time:753956ms step_avg:80.71ms +[2025-07-09 06:16:12] [Rank 0] step:9361/10000 train_time:755445ms step_avg:80.70ms +[2025-07-09 06:16:12] [Rank 0] step:9361/10000 train_time:755445ms step_avg:80.70ms +[2025-07-09 06:16:13] [Rank 0] step:9381/10000 train_time:757594ms step_avg:80.76ms +[2025-07-09 06:16:13] [Rank 0] step:9381/10000 train_time:757594ms step_avg:80.76ms +[2025-07-09 06:16:15] [Rank 0] step:9401/10000 train_time:759086ms step_avg:80.75ms +[2025-07-09 06:16:15] [Rank 0] step:9401/10000 train_time:759086ms step_avg:80.75ms +[2025-07-09 06:16:16] [Rank 0] step:9421/10000 train_time:760578ms step_avg:80.73ms +[2025-07-09 06:16:16] [Rank 0] step:9421/10000 train_time:760578ms step_avg:80.73ms +[2025-07-09 06:16:18] [Rank 0] step:9441/10000 train_time:762068ms step_avg:80.72ms +[2025-07-09 06:16:18] [Rank 0] step:9441/10000 train_time:762068ms step_avg:80.72ms +[2025-07-09 06:16:20] [Rank 0] step:9461/10000 train_time:764207ms step_avg:80.77ms +[2025-07-09 06:16:20] [Rank 0] step:9461/10000 train_time:764207ms step_avg:80.77ms +[2025-07-09 06:16:21] [Rank 0] step:9481/10000 train_time:765696ms step_avg:80.76ms +[2025-07-09 06:16:21] [Rank 0] step:9481/10000 train_time:765696ms step_avg:80.76ms +[2025-07-09 06:16:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 06:16:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 06:16:24] [Rank 0] PRINT: step:9500/10000 train_loss:0.8557 val_loss:0.8584 train_time:767186ms step_avg:80.76ms +[2025-07-09 06:16:24] [Rank 0] PRINT: step:9500/10000 train_loss:0.8557 val_loss:0.8584 train_time:767186ms step_avg:80.76ms +[2025-07-09 06:16:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 06:16:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 06:16:24] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 06:16:24] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 06:16:24] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 06:16:24] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 06:21:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 06:21:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 06:21:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 06:21:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 06:21:50] [Rank 0] Total Loss: 5.4703 +[2025-07-09 06:21:50] [Rank 0] Total Loss: 5.4703 +[2025-07-09 06:21:50] [Rank 0] Total FTA: 0.9826 +[2025-07-09 06:21:50] [Rank 0] Total FTA: 0.9826 +[2025-07-09 06:21:50] [Rank 0] Group 0 Loss: 5.6968 +[2025-07-09 06:21:50] [Rank 0] Group 0 Loss: 5.6968 +[2025-07-09 06:21:50] [Rank 0] Group 1 Loss: 5.4058 +[2025-07-09 06:21:50] [Rank 0] Group 1 Loss: 5.4058 +[2025-07-09 06:21:50] [Rank 0] Group 2 Loss: 5.2782 +[2025-07-09 06:21:50] [Rank 0] Group 2 Loss: 5.2782 +[2025-07-09 06:21:50] [Rank 0] Group 3 Loss: 5.3930 +[2025-07-09 06:21:50] [Rank 0] Group 3 Loss: 5.3930 +[2025-07-09 06:21:50] [Rank 0] Group 4 Loss: 5.4619 +[2025-07-09 06:21:50] [Rank 0] Group 4 Loss: 5.4619 +[2025-07-09 06:21:50] [Rank 0] Group 5 Loss: 5.4168 +[2025-07-09 06:21:50] [Rank 0] Group 5 Loss: 5.4168 +[2025-07-09 06:21:50] [Rank 0] Group 6 Loss: 5.4155 +[2025-07-09 06:21:50] [Rank 0] Group 6 Loss: 5.4155 +[2025-07-09 06:21:50] [Rank 0] Group 7 Loss: 5.4657 +[2025-07-09 06:21:50] [Rank 0] Group 7 Loss: 5.4657 +[2025-07-09 06:21:50] [Rank 0] Group 8 Loss: 5.4514 +[2025-07-09 06:21:50] [Rank 0] Group 8 Loss: 5.4514 +[2025-07-09 06:21:50] [Rank 0] Group 9 Loss: 5.4629 +[2025-07-09 06:21:50] [Rank 0] Group 9 Loss: 5.4629 +[2025-07-09 06:21:50] [Rank 0] Group 10 Loss: 5.5111 +[2025-07-09 06:21:50] [Rank 0] Group 10 Loss: 5.5111 +[2025-07-09 06:21:50] [Rank 0] Group 11 Loss: 5.4593 +[2025-07-09 06:21:50] [Rank 0] Group 11 Loss: 5.4593 +[2025-07-09 06:21:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 06:21:50] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 06:21:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 06:21:50] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 06:21:50] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 06:21:50] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 06:21:50] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-09 06:21:50] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-09 06:21:50] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-09 06:21:50] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-09 06:21:50] [Rank 0] Group 5 FTA: 0.9714 +[2025-07-09 06:21:50] [Rank 0] Group 5 FTA: 0.9714 +[2025-07-09 06:21:50] [Rank 0] Group 6 FTA: 0.9635 +[2025-07-09 06:21:50] [Rank 0] Group 6 FTA: 0.9635 +[2025-07-09 06:21:50] [Rank 0] Group 7 FTA: 0.9792 +[2025-07-09 06:21:50] [Rank 0] Group 7 FTA: 0.9792 +[2025-07-09 06:21:50] [Rank 0] Group 8 FTA: 0.9583 +[2025-07-09 06:21:50] [Rank 0] Group 8 FTA: 0.9583 +[2025-07-09 06:21:50] [Rank 0] Group 9 FTA: 0.9648 +[2025-07-09 06:21:50] [Rank 0] Group 9 FTA: 0.9648 +[2025-07-09 06:21:50] [Rank 0] Group 10 FTA: 0.9785 +[2025-07-09 06:21:50] [Rank 0] Group 10 FTA: 0.9785 +[2025-07-09 06:21:50] [Rank 0] Group 11 FTA: 0.9717 +[2025-07-09 06:21:50] [Rank 0] Group 11 FTA: 0.9717 +[2025-07-09 06:21:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 06:21:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 06:21:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 06:21:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 06:21:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 06:21:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 06:21:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 06:21:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 06:21:51] [Rank 0] step:9501/10000 train_time:767207ms step_avg:80.75ms +[2025-07-09 06:21:51] [Rank 0] step:9501/10000 train_time:767207ms step_avg:80.75ms +[2025-07-09 06:21:53] [Rank 0] step:9521/10000 train_time:768687ms step_avg:80.74ms +[2025-07-09 06:21:53] [Rank 0] step:9521/10000 train_time:768687ms step_avg:80.74ms +[2025-07-09 06:21:55] [Rank 0] step:9541/10000 train_time:770428ms step_avg:80.75ms +[2025-07-09 06:21:55] [Rank 0] step:9541/10000 train_time:770428ms step_avg:80.75ms +[2025-07-09 06:21:56] [Rank 0] step:9561/10000 train_time:772301ms step_avg:80.78ms +[2025-07-09 06:21:56] [Rank 0] step:9561/10000 train_time:772301ms step_avg:80.78ms +[2025-07-09 06:21:58] [Rank 0] step:9581/10000 train_time:773783ms step_avg:80.76ms +[2025-07-09 06:21:58] [Rank 0] step:9581/10000 train_time:773783ms step_avg:80.76ms +[2025-07-09 06:21:59] [Rank 0] step:9601/10000 train_time:775268ms step_avg:80.75ms +[2025-07-09 06:21:59] [Rank 0] step:9601/10000 train_time:775268ms step_avg:80.75ms +[2025-07-09 06:22:01] [Rank 0] step:9621/10000 train_time:776751ms step_avg:80.73ms +[2025-07-09 06:22:01] [Rank 0] step:9621/10000 train_time:776751ms step_avg:80.73ms +[2025-07-09 06:22:03] [Rank 0] step:9641/10000 train_time:778884ms step_avg:80.79ms +[2025-07-09 06:22:03] [Rank 0] step:9641/10000 train_time:778884ms step_avg:80.79ms +[2025-07-09 06:22:04] [Rank 0] step:9661/10000 train_time:780370ms step_avg:80.78ms +[2025-07-09 06:22:04] [Rank 0] step:9661/10000 train_time:780370ms step_avg:80.78ms +[2025-07-09 06:22:06] [Rank 0] step:9681/10000 train_time:781855ms step_avg:80.76ms +[2025-07-09 06:22:06] [Rank 0] step:9681/10000 train_time:781855ms step_avg:80.76ms +[2025-07-09 06:22:07] [Rank 0] step:9701/10000 train_time:783342ms step_avg:80.75ms +[2025-07-09 06:22:07] [Rank 0] step:9701/10000 train_time:783342ms step_avg:80.75ms +[2025-07-09 06:22:09] [Rank 0] step:9721/10000 train_time:784914ms step_avg:80.74ms +[2025-07-09 06:22:09] [Rank 0] step:9721/10000 train_time:784914ms step_avg:80.74ms +[2025-07-09 06:22:11] [Rank 0] step:9741/10000 train_time:786518ms step_avg:80.74ms +[2025-07-09 06:22:11] [Rank 0] step:9741/10000 train_time:786518ms step_avg:80.74ms +[2025-07-09 06:22:12] [Rank 0] step:9761/10000 train_time:788161ms step_avg:80.75ms +[2025-07-09 06:22:12] [Rank 0] step:9761/10000 train_time:788161ms step_avg:80.75ms +[2025-07-09 06:22:14] [Rank 0] step:9781/10000 train_time:789651ms step_avg:80.73ms +[2025-07-09 06:22:14] [Rank 0] step:9781/10000 train_time:789651ms step_avg:80.73ms +[2025-07-09 06:22:15] [Rank 0] step:9801/10000 train_time:791140ms step_avg:80.72ms +[2025-07-09 06:22:15] [Rank 0] step:9801/10000 train_time:791140ms step_avg:80.72ms +[2025-07-09 06:22:17] [Rank 0] step:9821/10000 train_time:793285ms step_avg:80.77ms +[2025-07-09 06:22:17] [Rank 0] step:9821/10000 train_time:793285ms step_avg:80.77ms +[2025-07-09 06:22:19] [Rank 0] step:9841/10000 train_time:794775ms step_avg:80.76ms +[2025-07-09 06:22:19] [Rank 0] step:9841/10000 train_time:794775ms step_avg:80.76ms +[2025-07-09 06:22:20] [Rank 0] step:9861/10000 train_time:796266ms step_avg:80.75ms +[2025-07-09 06:22:20] [Rank 0] step:9861/10000 train_time:796266ms step_avg:80.75ms +[2025-07-09 06:22:22] [Rank 0] step:9881/10000 train_time:797756ms step_avg:80.74ms +[2025-07-09 06:22:22] [Rank 0] step:9881/10000 train_time:797756ms step_avg:80.74ms +[2025-07-09 06:22:24] [Rank 0] step:9901/10000 train_time:799296ms step_avg:80.73ms +[2025-07-09 06:22:24] [Rank 0] step:9901/10000 train_time:799296ms step_avg:80.73ms +[2025-07-09 06:22:25] [Rank 0] step:9921/10000 train_time:801075ms step_avg:80.75ms +[2025-07-09 06:22:25] [Rank 0] step:9921/10000 train_time:801075ms step_avg:80.75ms +[2025-07-09 06:22:27] [Rank 0] step:9941/10000 train_time:802564ms step_avg:80.73ms +[2025-07-09 06:22:27] [Rank 0] step:9941/10000 train_time:802564ms step_avg:80.73ms +[2025-07-09 06:22:28] [Rank 0] step:9961/10000 train_time:804056ms step_avg:80.72ms +[2025-07-09 06:22:28] [Rank 0] step:9961/10000 train_time:804056ms step_avg:80.72ms +[2025-07-09 06:22:30] [Rank 0] step:9981/10000 train_time:805547ms step_avg:80.71ms +[2025-07-09 06:22:30] [Rank 0] step:9981/10000 train_time:805547ms step_avg:80.71ms +[2025-07-09 06:22:31] [Rank 0] step:10000/10000 train_time:807201ms step_avg:80.72ms +[2025-07-09 06:22:31] [Rank 0] step:10000/10000 train_time:807201ms step_avg:80.72ms +[2025-07-09 06:22:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 06:22:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 06:22:32] [Rank 0] PRINT: step:10000/10000 train_loss:0.8549 val_loss:0.8584 train_time:807279ms step_avg:80.73ms +[2025-07-09 06:22:32] [Rank 0] PRINT: step:10000/10000 train_loss:0.8549 val_loss:0.8584 train_time:807279ms step_avg:80.73ms +[2025-07-09 06:22:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 06:22:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 06:22:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 06:22:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 06:22:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 06:22:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 06:27:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 06:27:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 06:27:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 06:27:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 06:27:57] [Rank 0] Total Loss: 5.4694 +[2025-07-09 06:27:57] [Rank 0] Total Loss: 5.4694 +[2025-07-09 06:27:57] [Rank 0] Total FTA: 0.9888 +[2025-07-09 06:27:57] [Rank 0] Total FTA: 0.9888 +[2025-07-09 06:27:57] [Rank 0] Group 0 Loss: 5.7058 +[2025-07-09 06:27:57] [Rank 0] Group 0 Loss: 5.7058 +[2025-07-09 06:27:57] [Rank 0] Group 1 Loss: 5.3958 +[2025-07-09 06:27:57] [Rank 0] Group 1 Loss: 5.3958 +[2025-07-09 06:27:57] [Rank 0] Group 2 Loss: 5.2998 +[2025-07-09 06:27:57] [Rank 0] Group 2 Loss: 5.2998 +[2025-07-09 06:27:57] [Rank 0] Group 3 Loss: 5.4283 +[2025-07-09 06:27:57] [Rank 0] Group 3 Loss: 5.4283 +[2025-07-09 06:27:57] [Rank 0] Group 4 Loss: 5.4550 +[2025-07-09 06:27:57] [Rank 0] Group 4 Loss: 5.4550 +[2025-07-09 06:27:57] [Rank 0] Group 5 Loss: 5.3657 +[2025-07-09 06:27:57] [Rank 0] Group 5 Loss: 5.3657 +[2025-07-09 06:27:57] [Rank 0] Group 6 Loss: 5.4592 +[2025-07-09 06:27:57] [Rank 0] Group 6 Loss: 5.4592 +[2025-07-09 06:27:57] [Rank 0] Group 7 Loss: 5.4609 +[2025-07-09 06:27:57] [Rank 0] Group 7 Loss: 5.4609 +[2025-07-09 06:27:57] [Rank 0] Group 8 Loss: 5.4890 +[2025-07-09 06:27:57] [Rank 0] Group 8 Loss: 5.4890 +[2025-07-09 06:27:57] [Rank 0] Group 9 Loss: 5.4295 +[2025-07-09 06:27:57] [Rank 0] Group 9 Loss: 5.4295 +[2025-07-09 06:27:57] [Rank 0] Group 10 Loss: 5.4860 +[2025-07-09 06:27:57] [Rank 0] Group 10 Loss: 5.4860 +[2025-07-09 06:27:57] [Rank 0] Group 11 Loss: 5.4443 +[2025-07-09 06:27:57] [Rank 0] Group 11 Loss: 5.4443 +[2025-07-09 06:27:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 06:27:57] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-09 06:27:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 06:27:57] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-09 06:27:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 06:27:57] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-09 06:27:57] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-09 06:27:57] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-09 06:27:57] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-09 06:27:57] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-09 06:27:57] [Rank 0] Group 5 FTA: 0.9974 +[2025-07-09 06:27:57] [Rank 0] Group 5 FTA: 0.9974 +[2025-07-09 06:27:57] [Rank 0] Group 6 FTA: 0.9740 +[2025-07-09 06:27:57] [Rank 0] Group 6 FTA: 0.9740 +[2025-07-09 06:27:57] [Rank 0] Group 7 FTA: 0.9818 +[2025-07-09 06:27:57] [Rank 0] Group 7 FTA: 0.9818 +[2025-07-09 06:27:57] [Rank 0] Group 8 FTA: 0.9714 +[2025-07-09 06:27:57] [Rank 0] Group 8 FTA: 0.9714 +[2025-07-09 06:27:57] [Rank 0] Group 9 FTA: 0.9805 +[2025-07-09 06:27:57] [Rank 0] Group 9 FTA: 0.9805 +[2025-07-09 06:27:57] [Rank 0] Group 10 FTA: 0.9727 +[2025-07-09 06:27:57] [Rank 0] Group 10 FTA: 0.9727 +[2025-07-09 06:27:57] [Rank 0] Group 11 FTA: 0.9854 +[2025-07-09 06:27:57] [Rank 0] Group 11 FTA: 0.9854 +[2025-07-09 06:27:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 06:27:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_loss_curves.png +[2025-07-09 06:27:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 06:27:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/per_class_acc_curves.png +[2025-07-09 06:27:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 06:27:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_loss_curve.png +[2025-07-09 06:27:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 06:27:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_0_param_qkvo_lr_0.005_seed_49/total_acc_curve.png +[2025-07-09 06:27:58] [Rank 0] step:10001/10000 train_time:807303ms step_avg:80.72ms +[2025-07-09 06:27:58] [Rank 0] step:10001/10000 train_time:807303ms step_avg:80.72ms +[2025-07-09 06:27:58] [Rank 0] PRINT: --- Training Finished: Wed Jul 9 06:27:58 2025 --- +[2025-07-09 06:27:58] [Rank 0] PRINT: --- Training Finished: Wed Jul 9 06:27:58 2025 --- +[2025-07-09 06:27:58] [Rank 0] PRINT: Peak memory allocated: 9109 MiB reserved: 10356 MiB +[2025-07-09 06:27:58] [Rank 0] PRINT: Peak memory allocated: 9109 MiB reserved: 10356 MiB diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/config.json b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..0c2d792621f4bf093d1f91c922e9e9c20f582ae7 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "0d19c147-46aa-497d-9867-9a8e5491ee45", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..260213162a4ab39b1a092b9624d7723e8a2558fa --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a65f671332ece8cb63e1a070abb1827fb0987a2d870392cebb3e3843480ec430 +size 260459 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..818a782dd9d6cab0f064b7a2df075be4454157aa --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a9fcbbce97b5e59cbdc1c42e39769f5f3a61106b220c8cb66df825cd29e91b5 +size 260868 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..9f1eeed7058b439f5c38a068d9cbbbb690998713 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f3bfb28e0381009cf2b75a429f8ba7dd5142c50598c42ed81000a60b81a2f2b +size 84765 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..34daede6e94a3f99c9cace13d8177e4d1249f77b --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e157bd0a247a3848369526bbb9cd8b7d564279c10be7e798dd20762e5e6329f +size 100560 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/training_log_0d19c147-46aa-497d-9867-9a8e5491ee45.txt b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/training_log_0d19c147-46aa-497d-9867-9a8e5491ee45.txt new file mode 100644 index 0000000000000000000000000000000000000000..e296fb2fb6786a127e909bd452164668113becf1 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/training_log_0d19c147-46aa-497d-9867-9a8e5491ee45.txt @@ -0,0 +1,5144 @@ +[2025-07-07 16:31:04] [Rank 0] PRINT: --- Script Start: Mon Jul 7 16:31:04 2025 --- +[2025-07-07 16:31:04] [Rank 0] PRINT: --- Script Start: Mon Jul 7 16:31:04 2025 --- +[2025-07-07 16:31:04] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-07 16:31:04] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-07 16:31:04] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 16:31:04] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 16:31:04] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-07 16:31:04] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-07 16:31:04] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42 +[2025-07-07 16:31:04] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42 +[2025-07-07 16:31:04] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 16:31:04] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 16:31:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 16:31:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 16:31:04] [Rank 0] PRINT: Constructing model... +[2025-07-07 16:31:04] [Rank 0] PRINT: Constructing model... +[2025-07-07 16:31:06] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 16:31:06] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 16:31:06] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 16:31:06] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 16:31:06] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 16:31:06] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 16:31:07] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 16:31:07] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 16:31:07] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 16:31:07] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 16:31:07] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 16:31:07] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 16:31:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 16:31:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 16:31:07] [Rank 0] PRINT: Model returns: +[2025-07-07 16:31:07] [Rank 0] PRINT: Model returns: +[2025-07-07 16:31:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 16:31:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 16:31:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 16:31:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 16:31:07] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-07 16:31:07] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-07 16:31:07] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 16:31:07] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 16:31:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 16:31:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 16:31:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 16:31:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 16:31:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 16:31:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 16:31:07] [Rank 0] PRINT: Starting warmup... +[2025-07-07 16:31:07] [Rank 0] PRINT: Starting warmup... +[2025-07-07 16:39:37] [Rank 0] PRINT: Warmup complete. +[2025-07-07 16:39:37] [Rank 0] PRINT: Warmup complete. +[2025-07-07 16:39:38] [Rank 0] PRINT: Starting training... +[2025-07-07 16:39:38] [Rank 0] PRINT: Starting training... +[2025-07-07 16:39:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:39:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:43:48] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 16:43:48] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 16:43:49] [Rank 0] step:21/10000 train_time:1024ms step_avg:48.74ms +[2025-07-07 16:43:49] [Rank 0] step:21/10000 train_time:1024ms step_avg:48.74ms +[2025-07-07 16:43:51] [Rank 0] step:41/10000 train_time:2355ms step_avg:57.44ms +[2025-07-07 16:43:51] [Rank 0] step:41/10000 train_time:2355ms step_avg:57.44ms +[2025-07-07 16:43:52] [Rank 0] step:61/10000 train_time:3688ms step_avg:60.46ms +[2025-07-07 16:43:52] [Rank 0] step:61/10000 train_time:3688ms step_avg:60.46ms +[2025-07-07 16:43:53] [Rank 0] step:81/10000 train_time:5021ms step_avg:61.99ms +[2025-07-07 16:43:53] [Rank 0] step:81/10000 train_time:5021ms step_avg:61.99ms +[2025-07-07 16:43:55] [Rank 0] step:101/10000 train_time:6356ms step_avg:62.93ms +[2025-07-07 16:43:55] [Rank 0] step:101/10000 train_time:6356ms step_avg:62.93ms +[2025-07-07 16:43:56] [Rank 0] step:121/10000 train_time:7690ms step_avg:63.55ms +[2025-07-07 16:43:56] [Rank 0] step:121/10000 train_time:7690ms step_avg:63.55ms +[2025-07-07 16:43:57] [Rank 0] step:141/10000 train_time:9023ms step_avg:63.99ms +[2025-07-07 16:43:57] [Rank 0] step:141/10000 train_time:9023ms step_avg:63.99ms +[2025-07-07 16:43:59] [Rank 0] step:161/10000 train_time:10358ms step_avg:64.34ms +[2025-07-07 16:43:59] [Rank 0] step:161/10000 train_time:10358ms step_avg:64.34ms +[2025-07-07 16:44:00] [Rank 0] step:181/10000 train_time:11692ms step_avg:64.60ms +[2025-07-07 16:44:00] [Rank 0] step:181/10000 train_time:11692ms step_avg:64.60ms +[2025-07-07 16:44:01] [Rank 0] step:201/10000 train_time:13087ms step_avg:65.11ms +[2025-07-07 16:44:01] [Rank 0] step:201/10000 train_time:13087ms step_avg:65.11ms +[2025-07-07 16:44:03] [Rank 0] step:221/10000 train_time:14427ms step_avg:65.28ms +[2025-07-07 16:44:03] [Rank 0] step:221/10000 train_time:14427ms step_avg:65.28ms +[2025-07-07 16:44:04] [Rank 0] step:241/10000 train_time:15762ms step_avg:65.40ms +[2025-07-07 16:44:04] [Rank 0] step:241/10000 train_time:15762ms step_avg:65.40ms +[2025-07-07 16:44:05] [Rank 0] step:261/10000 train_time:17094ms step_avg:65.50ms +[2025-07-07 16:44:05] [Rank 0] step:261/10000 train_time:17094ms step_avg:65.50ms +[2025-07-07 16:44:07] [Rank 0] step:281/10000 train_time:18427ms step_avg:65.57ms +[2025-07-07 16:44:07] [Rank 0] step:281/10000 train_time:18427ms step_avg:65.57ms +[2025-07-07 16:44:08] [Rank 0] step:301/10000 train_time:19760ms step_avg:65.65ms +[2025-07-07 16:44:08] [Rank 0] step:301/10000 train_time:19760ms step_avg:65.65ms +[2025-07-07 16:44:09] [Rank 0] step:321/10000 train_time:21094ms step_avg:65.71ms +[2025-07-07 16:44:09] [Rank 0] step:321/10000 train_time:21094ms step_avg:65.71ms +[2025-07-07 16:44:11] [Rank 0] step:341/10000 train_time:22428ms step_avg:65.77ms +[2025-07-07 16:44:11] [Rank 0] step:341/10000 train_time:22428ms step_avg:65.77ms +[2025-07-07 16:44:12] [Rank 0] step:361/10000 train_time:23813ms step_avg:65.96ms +[2025-07-07 16:44:12] [Rank 0] step:361/10000 train_time:23813ms step_avg:65.96ms +[2025-07-07 16:44:13] [Rank 0] step:381/10000 train_time:25156ms step_avg:66.03ms +[2025-07-07 16:44:13] [Rank 0] step:381/10000 train_time:25156ms step_avg:66.03ms +[2025-07-07 16:44:15] [Rank 0] step:401/10000 train_time:26492ms step_avg:66.06ms +[2025-07-07 16:44:15] [Rank 0] step:401/10000 train_time:26492ms step_avg:66.06ms +[2025-07-07 16:44:16] [Rank 0] step:421/10000 train_time:27826ms step_avg:66.10ms +[2025-07-07 16:44:16] [Rank 0] step:421/10000 train_time:27826ms step_avg:66.10ms +[2025-07-07 16:44:17] [Rank 0] step:441/10000 train_time:29161ms step_avg:66.12ms +[2025-07-07 16:44:17] [Rank 0] step:441/10000 train_time:29161ms step_avg:66.12ms +[2025-07-07 16:44:19] [Rank 0] step:461/10000 train_time:30496ms step_avg:66.15ms +[2025-07-07 16:44:19] [Rank 0] step:461/10000 train_time:30496ms step_avg:66.15ms +[2025-07-07 16:44:20] [Rank 0] step:481/10000 train_time:31831ms step_avg:66.18ms +[2025-07-07 16:44:20] [Rank 0] step:481/10000 train_time:31831ms step_avg:66.18ms +[2025-07-07 16:44:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:44:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:44:22] [Rank 0] PRINT: step:500/10000 train_loss:9.6506 val_loss:8.5996 train_time:33772ms step_avg:67.54ms +[2025-07-07 16:44:22] [Rank 0] PRINT: step:500/10000 train_loss:9.6506 val_loss:8.5996 train_time:33772ms step_avg:67.54ms +[2025-07-07 16:44:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 16:44:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 16:44:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 16:44:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 16:44:23] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 16:44:23] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 16:49:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 16:49:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 16:49:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 16:49:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 16:49:51] [Rank 0] Total Loss: 8.9174 +[2025-07-07 16:49:51] [Rank 0] Total Loss: 8.9174 +[2025-07-07 16:49:51] [Rank 0] Total FTA: 0.0000 +[2025-07-07 16:49:51] [Rank 0] Total FTA: 0.0000 +[2025-07-07 16:49:51] [Rank 0] Group 0 Loss: 8.9368 +[2025-07-07 16:49:51] [Rank 0] Group 0 Loss: 8.9368 +[2025-07-07 16:49:51] [Rank 0] Group 1 Loss: 8.9150 +[2025-07-07 16:49:51] [Rank 0] Group 1 Loss: 8.9150 +[2025-07-07 16:49:51] [Rank 0] Group 2 Loss: 8.9566 +[2025-07-07 16:49:51] [Rank 0] Group 2 Loss: 8.9566 +[2025-07-07 16:49:51] [Rank 0] Group 3 Loss: 8.8962 +[2025-07-07 16:49:51] [Rank 0] Group 3 Loss: 8.8962 +[2025-07-07 16:49:51] [Rank 0] Group 4 Loss: 8.9143 +[2025-07-07 16:49:51] [Rank 0] Group 4 Loss: 8.9143 +[2025-07-07 16:49:51] [Rank 0] Group 5 Loss: 8.8988 +[2025-07-07 16:49:51] [Rank 0] Group 5 Loss: 8.8988 +[2025-07-07 16:49:51] [Rank 0] Group 6 Loss: 8.9154 +[2025-07-07 16:49:51] [Rank 0] Group 6 Loss: 8.9154 +[2025-07-07 16:49:52] [Rank 0] Group 7 Loss: 8.9153 +[2025-07-07 16:49:52] [Rank 0] Group 7 Loss: 8.9153 +[2025-07-07 16:49:52] [Rank 0] Group 8 Loss: 8.9091 +[2025-07-07 16:49:52] [Rank 0] Group 8 Loss: 8.9091 +[2025-07-07 16:49:52] [Rank 0] Group 9 Loss: 8.9014 +[2025-07-07 16:49:52] [Rank 0] Group 9 Loss: 8.9014 +[2025-07-07 16:49:52] [Rank 0] Group 10 Loss: 8.9089 +[2025-07-07 16:49:52] [Rank 0] Group 10 Loss: 8.9089 +[2025-07-07 16:49:52] [Rank 0] Group 11 Loss: 8.9179 +[2025-07-07 16:49:52] [Rank 0] Group 11 Loss: 8.9179 +[2025-07-07 16:49:52] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 16:49:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 16:49:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 16:49:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 16:49:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 16:49:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 16:49:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 16:49:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 16:49:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 16:49:53] [Rank 0] step:501/10000 train_time:33783ms step_avg:67.43ms +[2025-07-07 16:49:53] [Rank 0] step:501/10000 train_time:33783ms step_avg:67.43ms +[2025-07-07 16:49:54] [Rank 0] step:521/10000 train_time:34514ms step_avg:66.25ms +[2025-07-07 16:49:54] [Rank 0] step:521/10000 train_time:34514ms step_avg:66.25ms +[2025-07-07 16:49:56] [Rank 0] step:541/10000 train_time:35842ms step_avg:66.25ms +[2025-07-07 16:49:56] [Rank 0] step:541/10000 train_time:35842ms step_avg:66.25ms +[2025-07-07 16:49:57] [Rank 0] step:561/10000 train_time:37239ms step_avg:66.38ms +[2025-07-07 16:49:57] [Rank 0] step:561/10000 train_time:37239ms step_avg:66.38ms +[2025-07-07 16:49:58] [Rank 0] step:581/10000 train_time:38568ms step_avg:66.38ms +[2025-07-07 16:49:58] [Rank 0] step:581/10000 train_time:38568ms step_avg:66.38ms +[2025-07-07 16:50:00] [Rank 0] step:601/10000 train_time:39898ms step_avg:66.39ms +[2025-07-07 16:50:00] [Rank 0] step:601/10000 train_time:39898ms step_avg:66.39ms +[2025-07-07 16:50:01] [Rank 0] step:621/10000 train_time:41229ms step_avg:66.39ms +[2025-07-07 16:50:01] [Rank 0] step:621/10000 train_time:41229ms step_avg:66.39ms +[2025-07-07 16:50:02] [Rank 0] step:641/10000 train_time:42559ms step_avg:66.40ms +[2025-07-07 16:50:02] [Rank 0] step:641/10000 train_time:42559ms step_avg:66.40ms +[2025-07-07 16:50:04] [Rank 0] step:661/10000 train_time:43887ms step_avg:66.40ms +[2025-07-07 16:50:04] [Rank 0] step:661/10000 train_time:43887ms step_avg:66.40ms +[2025-07-07 16:50:05] [Rank 0] step:681/10000 train_time:45220ms step_avg:66.40ms +[2025-07-07 16:50:05] [Rank 0] step:681/10000 train_time:45220ms step_avg:66.40ms +[2025-07-07 16:50:06] [Rank 0] step:701/10000 train_time:46550ms step_avg:66.41ms +[2025-07-07 16:50:06] [Rank 0] step:701/10000 train_time:46550ms step_avg:66.41ms +[2025-07-07 16:50:08] [Rank 0] step:721/10000 train_time:47881ms step_avg:66.41ms +[2025-07-07 16:50:08] [Rank 0] step:721/10000 train_time:47881ms step_avg:66.41ms +[2025-07-07 16:50:09] [Rank 0] step:741/10000 train_time:49274ms step_avg:66.50ms +[2025-07-07 16:50:09] [Rank 0] step:741/10000 train_time:49274ms step_avg:66.50ms +[2025-07-07 16:50:10] [Rank 0] step:761/10000 train_time:50612ms step_avg:66.51ms +[2025-07-07 16:50:10] [Rank 0] step:761/10000 train_time:50612ms step_avg:66.51ms +[2025-07-07 16:50:12] [Rank 0] step:781/10000 train_time:51954ms step_avg:66.52ms +[2025-07-07 16:50:12] [Rank 0] step:781/10000 train_time:51954ms step_avg:66.52ms +[2025-07-07 16:50:13] [Rank 0] step:801/10000 train_time:53295ms step_avg:66.54ms +[2025-07-07 16:50:13] [Rank 0] step:801/10000 train_time:53295ms step_avg:66.54ms +[2025-07-07 16:50:14] [Rank 0] step:821/10000 train_time:54665ms step_avg:66.58ms +[2025-07-07 16:50:14] [Rank 0] step:821/10000 train_time:54665ms step_avg:66.58ms +[2025-07-07 16:50:16] [Rank 0] step:841/10000 train_time:56010ms step_avg:66.60ms +[2025-07-07 16:50:16] [Rank 0] step:841/10000 train_time:56010ms step_avg:66.60ms +[2025-07-07 16:50:17] [Rank 0] step:861/10000 train_time:57353ms step_avg:66.61ms +[2025-07-07 16:50:17] [Rank 0] step:861/10000 train_time:57353ms step_avg:66.61ms +[2025-07-07 16:50:18] [Rank 0] step:881/10000 train_time:58695ms step_avg:66.62ms +[2025-07-07 16:50:18] [Rank 0] step:881/10000 train_time:58695ms step_avg:66.62ms +[2025-07-07 16:50:20] [Rank 0] step:901/10000 train_time:60041ms step_avg:66.64ms +[2025-07-07 16:50:20] [Rank 0] step:901/10000 train_time:60041ms step_avg:66.64ms +[2025-07-07 16:50:21] [Rank 0] step:921/10000 train_time:61438ms step_avg:66.71ms +[2025-07-07 16:50:21] [Rank 0] step:921/10000 train_time:61438ms step_avg:66.71ms +[2025-07-07 16:50:23] [Rank 0] step:941/10000 train_time:62783ms step_avg:66.72ms +[2025-07-07 16:50:23] [Rank 0] step:941/10000 train_time:62783ms step_avg:66.72ms +[2025-07-07 16:50:24] [Rank 0] step:961/10000 train_time:64129ms step_avg:66.73ms +[2025-07-07 16:50:24] [Rank 0] step:961/10000 train_time:64129ms step_avg:66.73ms +[2025-07-07 16:50:25] [Rank 0] step:981/10000 train_time:65477ms step_avg:66.74ms +[2025-07-07 16:50:25] [Rank 0] step:981/10000 train_time:65477ms step_avg:66.74ms +[2025-07-07 16:50:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:50:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:50:28] [Rank 0] PRINT: step:1000/10000 train_loss:7.8088 val_loss:7.1245 train_time:67613ms step_avg:67.61ms +[2025-07-07 16:50:28] [Rank 0] PRINT: step:1000/10000 train_loss:7.8088 val_loss:7.1245 train_time:67613ms step_avg:67.61ms +[2025-07-07 16:50:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 16:50:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 16:50:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 16:50:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 16:50:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 16:50:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 16:55:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 16:55:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 16:55:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 16:55:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 16:55:57] [Rank 0] Total Loss: 7.6893 +[2025-07-07 16:55:57] [Rank 0] Total Loss: 7.6893 +[2025-07-07 16:55:57] [Rank 0] Total FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Total FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 0 Loss: 7.6843 +[2025-07-07 16:55:57] [Rank 0] Group 0 Loss: 7.6843 +[2025-07-07 16:55:57] [Rank 0] Group 1 Loss: 7.6420 +[2025-07-07 16:55:57] [Rank 0] Group 1 Loss: 7.6420 +[2025-07-07 16:55:57] [Rank 0] Group 2 Loss: 7.8353 +[2025-07-07 16:55:57] [Rank 0] Group 2 Loss: 7.8353 +[2025-07-07 16:55:57] [Rank 0] Group 3 Loss: 7.6492 +[2025-07-07 16:55:57] [Rank 0] Group 3 Loss: 7.6492 +[2025-07-07 16:55:57] [Rank 0] Group 4 Loss: 7.7062 +[2025-07-07 16:55:57] [Rank 0] Group 4 Loss: 7.7062 +[2025-07-07 16:55:57] [Rank 0] Group 5 Loss: 7.6566 +[2025-07-07 16:55:57] [Rank 0] Group 5 Loss: 7.6566 +[2025-07-07 16:55:57] [Rank 0] Group 6 Loss: 7.7072 +[2025-07-07 16:55:57] [Rank 0] Group 6 Loss: 7.7072 +[2025-07-07 16:55:57] [Rank 0] Group 7 Loss: 7.6784 +[2025-07-07 16:55:57] [Rank 0] Group 7 Loss: 7.6784 +[2025-07-07 16:55:57] [Rank 0] Group 8 Loss: 7.6616 +[2025-07-07 16:55:57] [Rank 0] Group 8 Loss: 7.6616 +[2025-07-07 16:55:57] [Rank 0] Group 9 Loss: 7.6747 +[2025-07-07 16:55:57] [Rank 0] Group 9 Loss: 7.6747 +[2025-07-07 16:55:57] [Rank 0] Group 10 Loss: 7.6863 +[2025-07-07 16:55:57] [Rank 0] Group 10 Loss: 7.6863 +[2025-07-07 16:55:57] [Rank 0] Group 11 Loss: 7.6901 +[2025-07-07 16:55:57] [Rank 0] Group 11 Loss: 7.6901 +[2025-07-07 16:55:57] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 16:55:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 16:55:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 16:55:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 16:55:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 16:55:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 16:55:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 16:55:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 16:55:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 16:55:58] [Rank 0] step:1001/10000 train_time:67623ms step_avg:67.56ms +[2025-07-07 16:55:58] [Rank 0] step:1001/10000 train_time:67623ms step_avg:67.56ms +[2025-07-07 16:56:00] [Rank 0] step:1021/10000 train_time:68359ms step_avg:66.95ms +[2025-07-07 16:56:00] [Rank 0] step:1021/10000 train_time:68359ms step_avg:66.95ms +[2025-07-07 16:56:01] [Rank 0] step:1041/10000 train_time:69696ms step_avg:66.95ms +[2025-07-07 16:56:01] [Rank 0] step:1041/10000 train_time:69696ms step_avg:66.95ms +[2025-07-07 16:56:02] [Rank 0] step:1061/10000 train_time:71036ms step_avg:66.95ms +[2025-07-07 16:56:02] [Rank 0] step:1061/10000 train_time:71036ms step_avg:66.95ms +[2025-07-07 16:56:04] [Rank 0] step:1081/10000 train_time:72376ms step_avg:66.95ms +[2025-07-07 16:56:04] [Rank 0] step:1081/10000 train_time:72376ms step_avg:66.95ms +[2025-07-07 16:56:05] [Rank 0] step:1101/10000 train_time:73762ms step_avg:67.00ms +[2025-07-07 16:56:05] [Rank 0] step:1101/10000 train_time:73762ms step_avg:67.00ms +[2025-07-07 16:56:07] [Rank 0] step:1121/10000 train_time:75103ms step_avg:67.00ms +[2025-07-07 16:56:07] [Rank 0] step:1121/10000 train_time:75103ms step_avg:67.00ms +[2025-07-07 16:56:08] [Rank 0] step:1141/10000 train_time:76444ms step_avg:67.00ms +[2025-07-07 16:56:08] [Rank 0] step:1141/10000 train_time:76444ms step_avg:67.00ms +[2025-07-07 16:56:09] [Rank 0] step:1161/10000 train_time:77787ms step_avg:67.00ms +[2025-07-07 16:56:09] [Rank 0] step:1161/10000 train_time:77787ms step_avg:67.00ms +[2025-07-07 16:56:11] [Rank 0] step:1181/10000 train_time:79131ms step_avg:67.00ms +[2025-07-07 16:56:11] [Rank 0] step:1181/10000 train_time:79131ms step_avg:67.00ms +[2025-07-07 16:56:12] [Rank 0] step:1201/10000 train_time:80476ms step_avg:67.01ms +[2025-07-07 16:56:12] [Rank 0] step:1201/10000 train_time:80476ms step_avg:67.01ms +[2025-07-07 16:56:13] [Rank 0] step:1221/10000 train_time:81820ms step_avg:67.01ms +[2025-07-07 16:56:13] [Rank 0] step:1221/10000 train_time:81820ms step_avg:67.01ms +[2025-07-07 16:56:15] [Rank 0] step:1241/10000 train_time:83167ms step_avg:67.02ms +[2025-07-07 16:56:15] [Rank 0] step:1241/10000 train_time:83167ms step_avg:67.02ms +[2025-07-07 16:56:16] [Rank 0] step:1261/10000 train_time:84563ms step_avg:67.06ms +[2025-07-07 16:56:16] [Rank 0] step:1261/10000 train_time:84563ms step_avg:67.06ms +[2025-07-07 16:56:17] [Rank 0] step:1281/10000 train_time:85924ms step_avg:67.08ms +[2025-07-07 16:56:17] [Rank 0] step:1281/10000 train_time:85924ms step_avg:67.08ms +[2025-07-07 16:56:19] [Rank 0] step:1301/10000 train_time:87272ms step_avg:67.08ms +[2025-07-07 16:56:19] [Rank 0] step:1301/10000 train_time:87272ms step_avg:67.08ms +[2025-07-07 16:56:20] [Rank 0] step:1321/10000 train_time:88620ms step_avg:67.09ms +[2025-07-07 16:56:20] [Rank 0] step:1321/10000 train_time:88620ms step_avg:67.09ms +[2025-07-07 16:56:21] [Rank 0] step:1341/10000 train_time:89970ms step_avg:67.09ms +[2025-07-07 16:56:21] [Rank 0] step:1341/10000 train_time:89970ms step_avg:67.09ms +[2025-07-07 16:56:23] [Rank 0] step:1361/10000 train_time:91320ms step_avg:67.10ms +[2025-07-07 16:56:23] [Rank 0] step:1361/10000 train_time:91320ms step_avg:67.10ms +[2025-07-07 16:56:24] [Rank 0] step:1381/10000 train_time:92673ms step_avg:67.11ms +[2025-07-07 16:56:24] [Rank 0] step:1381/10000 train_time:92673ms step_avg:67.11ms +[2025-07-07 16:56:25] [Rank 0] step:1401/10000 train_time:94023ms step_avg:67.11ms +[2025-07-07 16:56:25] [Rank 0] step:1401/10000 train_time:94023ms step_avg:67.11ms +[2025-07-07 16:56:27] [Rank 0] step:1421/10000 train_time:95373ms step_avg:67.12ms +[2025-07-07 16:56:27] [Rank 0] step:1421/10000 train_time:95373ms step_avg:67.12ms +[2025-07-07 16:56:28] [Rank 0] step:1441/10000 train_time:96723ms step_avg:67.12ms +[2025-07-07 16:56:28] [Rank 0] step:1441/10000 train_time:96723ms step_avg:67.12ms +[2025-07-07 16:56:30] [Rank 0] step:1461/10000 train_time:98119ms step_avg:67.16ms +[2025-07-07 16:56:30] [Rank 0] step:1461/10000 train_time:98119ms step_avg:67.16ms +[2025-07-07 16:56:31] [Rank 0] step:1481/10000 train_time:99473ms step_avg:67.17ms +[2025-07-07 16:56:31] [Rank 0] step:1481/10000 train_time:99473ms step_avg:67.17ms +[2025-07-07 16:56:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:56:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:56:33] [Rank 0] PRINT: step:1500/10000 train_loss:6.5874 val_loss:6.1105 train_time:101440ms step_avg:67.63ms +[2025-07-07 16:56:33] [Rank 0] PRINT: step:1500/10000 train_loss:6.5874 val_loss:6.1105 train_time:101440ms step_avg:67.63ms +[2025-07-07 16:56:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 16:56:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 16:56:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 16:56:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 16:56:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 16:56:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:02:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:02:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:02:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:02:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:02:05] [Rank 0] Total Loss: 6.8854 +[2025-07-07 17:02:05] [Rank 0] Total Loss: 6.8854 +[2025-07-07 17:02:05] [Rank 0] Total FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Total FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 0 Loss: 6.8318 +[2025-07-07 17:02:05] [Rank 0] Group 0 Loss: 6.8318 +[2025-07-07 17:02:05] [Rank 0] Group 1 Loss: 6.8413 +[2025-07-07 17:02:05] [Rank 0] Group 1 Loss: 6.8413 +[2025-07-07 17:02:05] [Rank 0] Group 2 Loss: 7.0408 +[2025-07-07 17:02:05] [Rank 0] Group 2 Loss: 7.0408 +[2025-07-07 17:02:05] [Rank 0] Group 3 Loss: 6.8472 +[2025-07-07 17:02:05] [Rank 0] Group 3 Loss: 6.8472 +[2025-07-07 17:02:05] [Rank 0] Group 4 Loss: 6.9293 +[2025-07-07 17:02:05] [Rank 0] Group 4 Loss: 6.9293 +[2025-07-07 17:02:05] [Rank 0] Group 5 Loss: 6.8558 +[2025-07-07 17:02:05] [Rank 0] Group 5 Loss: 6.8558 +[2025-07-07 17:02:05] [Rank 0] Group 6 Loss: 6.9075 +[2025-07-07 17:02:05] [Rank 0] Group 6 Loss: 6.9075 +[2025-07-07 17:02:05] [Rank 0] Group 7 Loss: 6.9004 +[2025-07-07 17:02:05] [Rank 0] Group 7 Loss: 6.9004 +[2025-07-07 17:02:05] [Rank 0] Group 8 Loss: 6.8587 +[2025-07-07 17:02:05] [Rank 0] Group 8 Loss: 6.8587 +[2025-07-07 17:02:05] [Rank 0] Group 9 Loss: 6.8944 +[2025-07-07 17:02:05] [Rank 0] Group 9 Loss: 6.8944 +[2025-07-07 17:02:05] [Rank 0] Group 10 Loss: 6.8971 +[2025-07-07 17:02:05] [Rank 0] Group 10 Loss: 6.8971 +[2025-07-07 17:02:05] [Rank 0] Group 11 Loss: 6.8806 +[2025-07-07 17:02:05] [Rank 0] Group 11 Loss: 6.8806 +[2025-07-07 17:02:05] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 17:02:05] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 17:02:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 17:02:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 17:02:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 17:02:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 17:02:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 17:02:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 17:02:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 17:02:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 17:02:07] [Rank 0] step:1501/10000 train_time:101451ms step_avg:67.59ms +[2025-07-07 17:02:07] [Rank 0] step:1501/10000 train_time:101451ms step_avg:67.59ms +[2025-07-07 17:02:08] [Rank 0] step:1521/10000 train_time:102198ms step_avg:67.19ms +[2025-07-07 17:02:08] [Rank 0] step:1521/10000 train_time:102198ms step_avg:67.19ms +[2025-07-07 17:02:10] [Rank 0] step:1541/10000 train_time:103542ms step_avg:67.19ms +[2025-07-07 17:02:10] [Rank 0] step:1541/10000 train_time:103542ms step_avg:67.19ms +[2025-07-07 17:02:11] [Rank 0] step:1561/10000 train_time:104890ms step_avg:67.19ms +[2025-07-07 17:02:11] [Rank 0] step:1561/10000 train_time:104890ms step_avg:67.19ms +[2025-07-07 17:02:12] [Rank 0] step:1581/10000 train_time:106238ms step_avg:67.20ms +[2025-07-07 17:02:12] [Rank 0] step:1581/10000 train_time:106238ms step_avg:67.20ms +[2025-07-07 17:02:14] [Rank 0] step:1601/10000 train_time:107585ms step_avg:67.20ms +[2025-07-07 17:02:14] [Rank 0] step:1601/10000 train_time:107585ms step_avg:67.20ms +[2025-07-07 17:02:15] [Rank 0] step:1621/10000 train_time:108931ms step_avg:67.20ms +[2025-07-07 17:02:15] [Rank 0] step:1621/10000 train_time:108931ms step_avg:67.20ms +[2025-07-07 17:02:16] [Rank 0] step:1641/10000 train_time:110343ms step_avg:67.24ms +[2025-07-07 17:02:16] [Rank 0] step:1641/10000 train_time:110343ms step_avg:67.24ms +[2025-07-07 17:02:18] [Rank 0] step:1661/10000 train_time:111691ms step_avg:67.24ms +[2025-07-07 17:02:18] [Rank 0] step:1661/10000 train_time:111691ms step_avg:67.24ms +[2025-07-07 17:02:19] [Rank 0] step:1681/10000 train_time:113042ms step_avg:67.25ms +[2025-07-07 17:02:19] [Rank 0] step:1681/10000 train_time:113042ms step_avg:67.25ms +[2025-07-07 17:02:20] [Rank 0] step:1701/10000 train_time:114393ms step_avg:67.25ms +[2025-07-07 17:02:20] [Rank 0] step:1701/10000 train_time:114393ms step_avg:67.25ms +[2025-07-07 17:02:22] [Rank 0] step:1721/10000 train_time:115744ms step_avg:67.25ms +[2025-07-07 17:02:22] [Rank 0] step:1721/10000 train_time:115744ms step_avg:67.25ms +[2025-07-07 17:02:23] [Rank 0] step:1741/10000 train_time:117097ms step_avg:67.26ms +[2025-07-07 17:02:23] [Rank 0] step:1741/10000 train_time:117097ms step_avg:67.26ms +[2025-07-07 17:02:24] [Rank 0] step:1761/10000 train_time:118451ms step_avg:67.26ms +[2025-07-07 17:02:24] [Rank 0] step:1761/10000 train_time:118451ms step_avg:67.26ms +[2025-07-07 17:02:26] [Rank 0] step:1781/10000 train_time:119805ms step_avg:67.27ms +[2025-07-07 17:02:26] [Rank 0] step:1781/10000 train_time:119805ms step_avg:67.27ms +[2025-07-07 17:02:27] [Rank 0] step:1801/10000 train_time:121159ms step_avg:67.27ms +[2025-07-07 17:02:27] [Rank 0] step:1801/10000 train_time:121159ms step_avg:67.27ms +[2025-07-07 17:02:29] [Rank 0] step:1821/10000 train_time:122570ms step_avg:67.31ms +[2025-07-07 17:02:29] [Rank 0] step:1821/10000 train_time:122570ms step_avg:67.31ms +[2025-07-07 17:02:30] [Rank 0] step:1841/10000 train_time:123925ms step_avg:67.31ms +[2025-07-07 17:02:30] [Rank 0] step:1841/10000 train_time:123925ms step_avg:67.31ms +[2025-07-07 17:02:31] [Rank 0] step:1861/10000 train_time:125279ms step_avg:67.32ms +[2025-07-07 17:02:31] [Rank 0] step:1861/10000 train_time:125279ms step_avg:67.32ms +[2025-07-07 17:02:33] [Rank 0] step:1881/10000 train_time:126635ms step_avg:67.32ms +[2025-07-07 17:02:33] [Rank 0] step:1881/10000 train_time:126635ms step_avg:67.32ms +[2025-07-07 17:02:34] [Rank 0] step:1901/10000 train_time:127991ms step_avg:67.33ms +[2025-07-07 17:02:34] [Rank 0] step:1901/10000 train_time:127991ms step_avg:67.33ms +[2025-07-07 17:02:35] [Rank 0] step:1921/10000 train_time:129348ms step_avg:67.33ms +[2025-07-07 17:02:35] [Rank 0] step:1921/10000 train_time:129348ms step_avg:67.33ms +[2025-07-07 17:02:37] [Rank 0] step:1941/10000 train_time:130704ms step_avg:67.34ms +[2025-07-07 17:02:37] [Rank 0] step:1941/10000 train_time:130704ms step_avg:67.34ms +[2025-07-07 17:02:38] [Rank 0] step:1961/10000 train_time:132060ms step_avg:67.34ms +[2025-07-07 17:02:38] [Rank 0] step:1961/10000 train_time:132060ms step_avg:67.34ms +[2025-07-07 17:02:39] [Rank 0] step:1981/10000 train_time:133672ms step_avg:67.48ms +[2025-07-07 17:02:39] [Rank 0] step:1981/10000 train_time:133672ms step_avg:67.48ms +[2025-07-07 17:02:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:02:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:02:42] [Rank 0] PRINT: step:2000/10000 train_loss:5.6890 val_loss:5.3089 train_time:135441ms step_avg:67.72ms +[2025-07-07 17:02:42] [Rank 0] PRINT: step:2000/10000 train_loss:5.6890 val_loss:5.3089 train_time:135441ms step_avg:67.72ms +[2025-07-07 17:02:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:02:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:02:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:02:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:02:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:02:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:08:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:08:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:08:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:08:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:08:13] [Rank 0] Total Loss: 6.3119 +[2025-07-07 17:08:13] [Rank 0] Total Loss: 6.3119 +[2025-07-07 17:08:13] [Rank 0] Total FTA: 0.0000 +[2025-07-07 17:08:13] [Rank 0] Total FTA: 0.0000 +[2025-07-07 17:08:13] [Rank 0] Group 0 Loss: 6.2147 +[2025-07-07 17:08:13] [Rank 0] Group 0 Loss: 6.2147 +[2025-07-07 17:08:13] [Rank 0] Group 1 Loss: 6.2966 +[2025-07-07 17:08:13] [Rank 0] Group 1 Loss: 6.2966 +[2025-07-07 17:08:13] [Rank 0] Group 2 Loss: 6.4572 +[2025-07-07 17:08:13] [Rank 0] Group 2 Loss: 6.4572 +[2025-07-07 17:08:13] [Rank 0] Group 3 Loss: 6.2835 +[2025-07-07 17:08:13] [Rank 0] Group 3 Loss: 6.2835 +[2025-07-07 17:08:13] [Rank 0] Group 4 Loss: 6.3488 +[2025-07-07 17:08:13] [Rank 0] Group 4 Loss: 6.3488 +[2025-07-07 17:08:14] [Rank 0] Group 5 Loss: 6.2901 +[2025-07-07 17:08:14] [Rank 0] Group 5 Loss: 6.2901 +[2025-07-07 17:08:14] [Rank 0] Group 6 Loss: 6.3644 +[2025-07-07 17:08:14] [Rank 0] Group 6 Loss: 6.3644 +[2025-07-07 17:08:14] [Rank 0] Group 7 Loss: 6.3495 +[2025-07-07 17:08:14] [Rank 0] Group 7 Loss: 6.3495 +[2025-07-07 17:08:14] [Rank 0] Group 8 Loss: 6.2846 +[2025-07-07 17:08:14] [Rank 0] Group 8 Loss: 6.2846 +[2025-07-07 17:08:14] [Rank 0] Group 9 Loss: 6.3141 +[2025-07-07 17:08:14] [Rank 0] Group 9 Loss: 6.3141 +[2025-07-07 17:08:14] [Rank 0] Group 10 Loss: 6.3168 +[2025-07-07 17:08:14] [Rank 0] Group 10 Loss: 6.3168 +[2025-07-07 17:08:14] [Rank 0] Group 11 Loss: 6.3148 +[2025-07-07 17:08:14] [Rank 0] Group 11 Loss: 6.3148 +[2025-07-07 17:08:14] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 17:08:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 17:08:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 17:08:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 17:08:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 17:08:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 17:08:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 17:08:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 17:08:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 17:08:15] [Rank 0] step:2001/10000 train_time:135452ms step_avg:67.69ms +[2025-07-07 17:08:15] [Rank 0] step:2001/10000 train_time:135452ms step_avg:67.69ms +[2025-07-07 17:08:16] [Rank 0] step:2021/10000 train_time:136205ms step_avg:67.39ms +[2025-07-07 17:08:16] [Rank 0] step:2021/10000 train_time:136205ms step_avg:67.39ms +[2025-07-07 17:08:18] [Rank 0] step:2041/10000 train_time:137553ms step_avg:67.39ms +[2025-07-07 17:08:18] [Rank 0] step:2041/10000 train_time:137553ms step_avg:67.39ms +[2025-07-07 17:08:19] [Rank 0] step:2061/10000 train_time:138902ms step_avg:67.40ms +[2025-07-07 17:08:19] [Rank 0] step:2061/10000 train_time:138902ms step_avg:67.40ms +[2025-07-07 17:08:21] [Rank 0] step:2081/10000 train_time:140469ms step_avg:67.50ms +[2025-07-07 17:08:21] [Rank 0] step:2081/10000 train_time:140469ms step_avg:67.50ms +[2025-07-07 17:08:22] [Rank 0] step:2101/10000 train_time:141788ms step_avg:67.49ms +[2025-07-07 17:08:22] [Rank 0] step:2101/10000 train_time:141788ms step_avg:67.49ms +[2025-07-07 17:08:23] [Rank 0] step:2121/10000 train_time:143137ms step_avg:67.49ms +[2025-07-07 17:08:23] [Rank 0] step:2121/10000 train_time:143137ms step_avg:67.49ms +[2025-07-07 17:08:25] [Rank 0] step:2141/10000 train_time:144489ms step_avg:67.49ms +[2025-07-07 17:08:25] [Rank 0] step:2141/10000 train_time:144489ms step_avg:67.49ms +[2025-07-07 17:08:26] [Rank 0] step:2161/10000 train_time:145843ms step_avg:67.49ms +[2025-07-07 17:08:26] [Rank 0] step:2161/10000 train_time:145843ms step_avg:67.49ms +[2025-07-07 17:08:27] [Rank 0] step:2181/10000 train_time:147232ms step_avg:67.51ms +[2025-07-07 17:08:27] [Rank 0] step:2181/10000 train_time:147232ms step_avg:67.51ms +[2025-07-07 17:08:29] [Rank 0] step:2201/10000 train_time:148586ms step_avg:67.51ms +[2025-07-07 17:08:29] [Rank 0] step:2201/10000 train_time:148586ms step_avg:67.51ms +[2025-07-07 17:08:30] [Rank 0] step:2221/10000 train_time:149941ms step_avg:67.51ms +[2025-07-07 17:08:30] [Rank 0] step:2221/10000 train_time:149941ms step_avg:67.51ms +[2025-07-07 17:08:32] [Rank 0] step:2241/10000 train_time:151305ms step_avg:67.52ms +[2025-07-07 17:08:32] [Rank 0] step:2241/10000 train_time:151305ms step_avg:67.52ms +[2025-07-07 17:08:33] [Rank 0] step:2261/10000 train_time:152682ms step_avg:67.53ms +[2025-07-07 17:08:33] [Rank 0] step:2261/10000 train_time:152682ms step_avg:67.53ms +[2025-07-07 17:08:34] [Rank 0] step:2281/10000 train_time:154061ms step_avg:67.54ms +[2025-07-07 17:08:34] [Rank 0] step:2281/10000 train_time:154061ms step_avg:67.54ms +[2025-07-07 17:08:36] [Rank 0] step:2301/10000 train_time:155439ms step_avg:67.55ms +[2025-07-07 17:08:36] [Rank 0] step:2301/10000 train_time:155439ms step_avg:67.55ms +[2025-07-07 17:08:37] [Rank 0] step:2321/10000 train_time:156818ms step_avg:67.56ms +[2025-07-07 17:08:37] [Rank 0] step:2321/10000 train_time:156818ms step_avg:67.56ms +[2025-07-07 17:08:38] [Rank 0] step:2341/10000 train_time:158217ms step_avg:67.59ms +[2025-07-07 17:08:38] [Rank 0] step:2341/10000 train_time:158217ms step_avg:67.59ms +[2025-07-07 17:08:40] [Rank 0] step:2361/10000 train_time:159597ms step_avg:67.60ms +[2025-07-07 17:08:40] [Rank 0] step:2361/10000 train_time:159597ms step_avg:67.60ms +[2025-07-07 17:08:41] [Rank 0] step:2381/10000 train_time:160978ms step_avg:67.61ms +[2025-07-07 17:08:41] [Rank 0] step:2381/10000 train_time:160978ms step_avg:67.61ms +[2025-07-07 17:08:43] [Rank 0] step:2401/10000 train_time:162359ms step_avg:67.62ms +[2025-07-07 17:08:43] [Rank 0] step:2401/10000 train_time:162359ms step_avg:67.62ms +[2025-07-07 17:08:44] [Rank 0] step:2421/10000 train_time:163739ms step_avg:67.63ms +[2025-07-07 17:08:44] [Rank 0] step:2421/10000 train_time:163739ms step_avg:67.63ms +[2025-07-07 17:08:45] [Rank 0] step:2441/10000 train_time:165120ms step_avg:67.64ms +[2025-07-07 17:08:45] [Rank 0] step:2441/10000 train_time:165120ms step_avg:67.64ms +[2025-07-07 17:08:47] [Rank 0] step:2461/10000 train_time:166503ms step_avg:67.66ms +[2025-07-07 17:08:47] [Rank 0] step:2461/10000 train_time:166503ms step_avg:67.66ms +[2025-07-07 17:08:48] [Rank 0] step:2481/10000 train_time:167884ms step_avg:67.67ms +[2025-07-07 17:08:48] [Rank 0] step:2481/10000 train_time:167884ms step_avg:67.67ms +[2025-07-07 17:08:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:08:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:08:50] [Rank 0] PRINT: step:2500/10000 train_loss:5.0069 val_loss:4.7236 train_time:169894ms step_avg:67.96ms +[2025-07-07 17:08:50] [Rank 0] PRINT: step:2500/10000 train_loss:5.0069 val_loss:4.7236 train_time:169894ms step_avg:67.96ms +[2025-07-07 17:08:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:08:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:08:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:08:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:08:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:08:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:14:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:14:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:14:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:14:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:14:24] [Rank 0] Total Loss: 5.9158 +[2025-07-07 17:14:24] [Rank 0] Total Loss: 5.9158 +[2025-07-07 17:14:24] [Rank 0] Total FTA: 0.0749 +[2025-07-07 17:14:24] [Rank 0] Total FTA: 0.0749 +[2025-07-07 17:14:24] [Rank 0] Group 0 Loss: 5.7972 +[2025-07-07 17:14:24] [Rank 0] Group 0 Loss: 5.7972 +[2025-07-07 17:14:24] [Rank 0] Group 1 Loss: 5.9196 +[2025-07-07 17:14:24] [Rank 0] Group 1 Loss: 5.9196 +[2025-07-07 17:14:24] [Rank 0] Group 2 Loss: 6.0084 +[2025-07-07 17:14:24] [Rank 0] Group 2 Loss: 6.0084 +[2025-07-07 17:14:24] [Rank 0] Group 3 Loss: 5.8560 +[2025-07-07 17:14:24] [Rank 0] Group 3 Loss: 5.8560 +[2025-07-07 17:14:24] [Rank 0] Group 4 Loss: 5.9876 +[2025-07-07 17:14:24] [Rank 0] Group 4 Loss: 5.9876 +[2025-07-07 17:14:24] [Rank 0] Group 5 Loss: 5.9157 +[2025-07-07 17:14:24] [Rank 0] Group 5 Loss: 5.9157 +[2025-07-07 17:14:24] [Rank 0] Group 6 Loss: 5.9657 +[2025-07-07 17:14:24] [Rank 0] Group 6 Loss: 5.9657 +[2025-07-07 17:14:24] [Rank 0] Group 7 Loss: 5.9113 +[2025-07-07 17:14:24] [Rank 0] Group 7 Loss: 5.9113 +[2025-07-07 17:14:24] [Rank 0] Group 8 Loss: 5.9244 +[2025-07-07 17:14:24] [Rank 0] Group 8 Loss: 5.9244 +[2025-07-07 17:14:24] [Rank 0] Group 9 Loss: 5.9325 +[2025-07-07 17:14:24] [Rank 0] Group 9 Loss: 5.9325 +[2025-07-07 17:14:24] [Rank 0] Group 10 Loss: 5.9248 +[2025-07-07 17:14:24] [Rank 0] Group 10 Loss: 5.9248 +[2025-07-07 17:14:24] [Rank 0] Group 11 Loss: 5.9353 +[2025-07-07 17:14:24] [Rank 0] Group 11 Loss: 5.9353 +[2025-07-07 17:14:24] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-07 17:14:24] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-07 17:14:24] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:14:24] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:14:24] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-07 17:14:24] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-07 17:14:24] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 17:14:24] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 17:14:24] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 17:14:24] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 17:14:24] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-07 17:14:24] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-07 17:14:24] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 17:14:24] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 17:14:24] [Rank 0] Group 7 FTA: 0.0729 +[2025-07-07 17:14:24] [Rank 0] Group 7 FTA: 0.0729 +[2025-07-07 17:14:24] [Rank 0] Group 8 FTA: 0.0729 +[2025-07-07 17:14:24] [Rank 0] Group 8 FTA: 0.0729 +[2025-07-07 17:14:24] [Rank 0] Group 9 FTA: 0.0664 +[2025-07-07 17:14:24] [Rank 0] Group 9 FTA: 0.0664 +[2025-07-07 17:14:24] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 17:14:24] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 17:14:24] [Rank 0] Group 11 FTA: 0.0684 +[2025-07-07 17:14:24] [Rank 0] Group 11 FTA: 0.0684 +[2025-07-07 17:14:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 17:14:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 17:14:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 17:14:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 17:14:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 17:14:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 17:14:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 17:14:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 17:14:25] [Rank 0] step:2501/10000 train_time:169906ms step_avg:67.94ms +[2025-07-07 17:14:25] [Rank 0] step:2501/10000 train_time:169906ms step_avg:67.94ms +[2025-07-07 17:14:27] [Rank 0] step:2521/10000 train_time:170719ms step_avg:67.72ms +[2025-07-07 17:14:27] [Rank 0] step:2521/10000 train_time:170719ms step_avg:67.72ms +[2025-07-07 17:14:28] [Rank 0] step:2541/10000 train_time:172079ms step_avg:67.72ms +[2025-07-07 17:14:28] [Rank 0] step:2541/10000 train_time:172079ms step_avg:67.72ms +[2025-07-07 17:14:29] [Rank 0] step:2561/10000 train_time:173452ms step_avg:67.73ms +[2025-07-07 17:14:29] [Rank 0] step:2561/10000 train_time:173452ms step_avg:67.73ms +[2025-07-07 17:14:31] [Rank 0] step:2581/10000 train_time:174825ms step_avg:67.74ms +[2025-07-07 17:14:31] [Rank 0] step:2581/10000 train_time:174825ms step_avg:67.74ms +[2025-07-07 17:14:32] [Rank 0] step:2601/10000 train_time:176198ms step_avg:67.74ms +[2025-07-07 17:14:32] [Rank 0] step:2601/10000 train_time:176198ms step_avg:67.74ms +[2025-07-07 17:14:34] [Rank 0] step:2621/10000 train_time:177572ms step_avg:67.75ms +[2025-07-07 17:14:34] [Rank 0] step:2621/10000 train_time:177572ms step_avg:67.75ms +[2025-07-07 17:14:35] [Rank 0] step:2641/10000 train_time:178946ms step_avg:67.76ms +[2025-07-07 17:14:35] [Rank 0] step:2641/10000 train_time:178946ms step_avg:67.76ms +[2025-07-07 17:14:36] [Rank 0] step:2661/10000 train_time:180319ms step_avg:67.76ms +[2025-07-07 17:14:36] [Rank 0] step:2661/10000 train_time:180319ms step_avg:67.76ms +[2025-07-07 17:14:38] [Rank 0] step:2681/10000 train_time:181697ms step_avg:67.77ms +[2025-07-07 17:14:38] [Rank 0] step:2681/10000 train_time:181697ms step_avg:67.77ms +[2025-07-07 17:14:39] [Rank 0] step:2701/10000 train_time:183076ms step_avg:67.78ms +[2025-07-07 17:14:39] [Rank 0] step:2701/10000 train_time:183076ms step_avg:67.78ms +[2025-07-07 17:14:40] [Rank 0] step:2721/10000 train_time:184500ms step_avg:67.81ms +[2025-07-07 17:14:40] [Rank 0] step:2721/10000 train_time:184500ms step_avg:67.81ms +[2025-07-07 17:14:42] [Rank 0] step:2741/10000 train_time:185878ms step_avg:67.81ms +[2025-07-07 17:14:42] [Rank 0] step:2741/10000 train_time:185878ms step_avg:67.81ms +[2025-07-07 17:14:43] [Rank 0] step:2761/10000 train_time:187255ms step_avg:67.82ms +[2025-07-07 17:14:43] [Rank 0] step:2761/10000 train_time:187255ms step_avg:67.82ms +[2025-07-07 17:14:45] [Rank 0] step:2781/10000 train_time:188633ms step_avg:67.83ms +[2025-07-07 17:14:45] [Rank 0] step:2781/10000 train_time:188633ms step_avg:67.83ms +[2025-07-07 17:14:46] [Rank 0] step:2801/10000 train_time:190012ms step_avg:67.84ms +[2025-07-07 17:14:46] [Rank 0] step:2801/10000 train_time:190012ms step_avg:67.84ms +[2025-07-07 17:14:47] [Rank 0] step:2821/10000 train_time:191394ms step_avg:67.85ms +[2025-07-07 17:14:47] [Rank 0] step:2821/10000 train_time:191394ms step_avg:67.85ms +[2025-07-07 17:14:49] [Rank 0] step:2841/10000 train_time:192773ms step_avg:67.85ms +[2025-07-07 17:14:49] [Rank 0] step:2841/10000 train_time:192773ms step_avg:67.85ms +[2025-07-07 17:14:50] [Rank 0] step:2861/10000 train_time:194155ms step_avg:67.86ms +[2025-07-07 17:14:50] [Rank 0] step:2861/10000 train_time:194155ms step_avg:67.86ms +[2025-07-07 17:14:52] [Rank 0] step:2881/10000 train_time:195535ms step_avg:67.87ms +[2025-07-07 17:14:52] [Rank 0] step:2881/10000 train_time:195535ms step_avg:67.87ms +[2025-07-07 17:14:53] [Rank 0] step:2901/10000 train_time:196947ms step_avg:67.89ms +[2025-07-07 17:14:53] [Rank 0] step:2901/10000 train_time:196947ms step_avg:67.89ms +[2025-07-07 17:14:54] [Rank 0] step:2921/10000 train_time:198328ms step_avg:67.90ms +[2025-07-07 17:14:54] [Rank 0] step:2921/10000 train_time:198328ms step_avg:67.90ms +[2025-07-07 17:14:56] [Rank 0] step:2941/10000 train_time:199710ms step_avg:67.91ms +[2025-07-07 17:14:56] [Rank 0] step:2941/10000 train_time:199710ms step_avg:67.91ms +[2025-07-07 17:14:57] [Rank 0] step:2961/10000 train_time:201092ms step_avg:67.91ms +[2025-07-07 17:14:57] [Rank 0] step:2961/10000 train_time:201092ms step_avg:67.91ms +[2025-07-07 17:14:59] [Rank 0] step:2981/10000 train_time:202517ms step_avg:67.94ms +[2025-07-07 17:14:59] [Rank 0] step:2981/10000 train_time:202517ms step_avg:67.94ms +[2025-07-07 17:15:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:15:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:15:01] [Rank 0] PRINT: step:3000/10000 train_loss:4.4656 val_loss:4.2213 train_time:204527ms step_avg:68.18ms +[2025-07-07 17:15:01] [Rank 0] PRINT: step:3000/10000 train_loss:4.4656 val_loss:4.2213 train_time:204527ms step_avg:68.18ms +[2025-07-07 17:15:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:15:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:15:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:15:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:15:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:15:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:20:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:20:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:20:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:20:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:20:31] [Rank 0] Total Loss: 5.6302 +[2025-07-07 17:20:31] [Rank 0] Total Loss: 5.6302 +[2025-07-07 17:20:32] [Rank 0] Total FTA: 0.0731 +[2025-07-07 17:20:32] [Rank 0] Total FTA: 0.0731 +[2025-07-07 17:20:32] [Rank 0] Group 0 Loss: 5.5161 +[2025-07-07 17:20:32] [Rank 0] Group 0 Loss: 5.5161 +[2025-07-07 17:20:32] [Rank 0] Group 1 Loss: 5.6160 +[2025-07-07 17:20:32] [Rank 0] Group 1 Loss: 5.6160 +[2025-07-07 17:20:32] [Rank 0] Group 2 Loss: 5.7639 +[2025-07-07 17:20:32] [Rank 0] Group 2 Loss: 5.7639 +[2025-07-07 17:20:32] [Rank 0] Group 3 Loss: 5.5239 +[2025-07-07 17:20:32] [Rank 0] Group 3 Loss: 5.5239 +[2025-07-07 17:20:32] [Rank 0] Group 4 Loss: 5.7251 +[2025-07-07 17:20:32] [Rank 0] Group 4 Loss: 5.7251 +[2025-07-07 17:20:32] [Rank 0] Group 5 Loss: 5.6427 +[2025-07-07 17:20:32] [Rank 0] Group 5 Loss: 5.6427 +[2025-07-07 17:20:32] [Rank 0] Group 6 Loss: 5.6947 +[2025-07-07 17:20:32] [Rank 0] Group 6 Loss: 5.6947 +[2025-07-07 17:20:32] [Rank 0] Group 7 Loss: 5.6495 +[2025-07-07 17:20:32] [Rank 0] Group 7 Loss: 5.6495 +[2025-07-07 17:20:32] [Rank 0] Group 8 Loss: 5.6144 +[2025-07-07 17:20:32] [Rank 0] Group 8 Loss: 5.6144 +[2025-07-07 17:20:32] [Rank 0] Group 9 Loss: 5.6395 +[2025-07-07 17:20:32] [Rank 0] Group 9 Loss: 5.6395 +[2025-07-07 17:20:32] [Rank 0] Group 10 Loss: 5.6398 +[2025-07-07 17:20:32] [Rank 0] Group 10 Loss: 5.6398 +[2025-07-07 17:20:32] [Rank 0] Group 11 Loss: 5.6380 +[2025-07-07 17:20:32] [Rank 0] Group 11 Loss: 5.6380 +[2025-07-07 17:20:32] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 17:20:32] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 17:20:32] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:20:32] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:20:32] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 17:20:32] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 17:20:32] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 17:20:32] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 17:20:32] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 17:20:32] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 17:20:32] [Rank 0] Group 5 FTA: 0.0469 +[2025-07-07 17:20:32] [Rank 0] Group 5 FTA: 0.0469 +[2025-07-07 17:20:32] [Rank 0] Group 6 FTA: 0.0729 +[2025-07-07 17:20:32] [Rank 0] Group 6 FTA: 0.0729 +[2025-07-07 17:20:32] [Rank 0] Group 7 FTA: 0.0677 +[2025-07-07 17:20:32] [Rank 0] Group 7 FTA: 0.0677 +[2025-07-07 17:20:32] [Rank 0] Group 8 FTA: 0.0521 +[2025-07-07 17:20:32] [Rank 0] Group 8 FTA: 0.0521 +[2025-07-07 17:20:32] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-07 17:20:32] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-07 17:20:32] [Rank 0] Group 10 FTA: 0.0703 +[2025-07-07 17:20:32] [Rank 0] Group 10 FTA: 0.0703 +[2025-07-07 17:20:32] [Rank 0] Group 11 FTA: 0.0703 +[2025-07-07 17:20:32] [Rank 0] Group 11 FTA: 0.0703 +[2025-07-07 17:20:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 17:20:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 17:20:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 17:20:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 17:20:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 17:20:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 17:20:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 17:20:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 17:20:33] [Rank 0] step:3001/10000 train_time:204538ms step_avg:68.16ms +[2025-07-07 17:20:33] [Rank 0] step:3001/10000 train_time:204538ms step_avg:68.16ms +[2025-07-07 17:20:34] [Rank 0] step:3021/10000 train_time:205316ms step_avg:67.96ms +[2025-07-07 17:20:34] [Rank 0] step:3021/10000 train_time:205316ms step_avg:67.96ms +[2025-07-07 17:20:36] [Rank 0] step:3041/10000 train_time:206686ms step_avg:67.97ms +[2025-07-07 17:20:36] [Rank 0] step:3041/10000 train_time:206686ms step_avg:67.97ms +[2025-07-07 17:20:37] [Rank 0] step:3061/10000 train_time:208058ms step_avg:67.97ms +[2025-07-07 17:20:37] [Rank 0] step:3061/10000 train_time:208058ms step_avg:67.97ms +[2025-07-07 17:20:39] [Rank 0] step:3081/10000 train_time:209478ms step_avg:67.99ms +[2025-07-07 17:20:39] [Rank 0] step:3081/10000 train_time:209478ms step_avg:67.99ms +[2025-07-07 17:20:40] [Rank 0] step:3101/10000 train_time:210853ms step_avg:68.00ms +[2025-07-07 17:20:40] [Rank 0] step:3101/10000 train_time:210853ms step_avg:68.00ms +[2025-07-07 17:20:41] [Rank 0] step:3121/10000 train_time:212227ms step_avg:68.00ms +[2025-07-07 17:20:41] [Rank 0] step:3121/10000 train_time:212227ms step_avg:68.00ms +[2025-07-07 17:20:43] [Rank 0] step:3141/10000 train_time:213602ms step_avg:68.00ms +[2025-07-07 17:20:43] [Rank 0] step:3141/10000 train_time:213602ms step_avg:68.00ms +[2025-07-07 17:20:44] [Rank 0] step:3161/10000 train_time:214980ms step_avg:68.01ms +[2025-07-07 17:20:44] [Rank 0] step:3161/10000 train_time:214980ms step_avg:68.01ms +[2025-07-07 17:20:46] [Rank 0] step:3181/10000 train_time:216357ms step_avg:68.02ms +[2025-07-07 17:20:46] [Rank 0] step:3181/10000 train_time:216357ms step_avg:68.02ms +[2025-07-07 17:20:47] [Rank 0] step:3201/10000 train_time:217733ms step_avg:68.02ms +[2025-07-07 17:20:47] [Rank 0] step:3201/10000 train_time:217733ms step_avg:68.02ms +[2025-07-07 17:20:48] [Rank 0] step:3221/10000 train_time:219111ms step_avg:68.03ms +[2025-07-07 17:20:48] [Rank 0] step:3221/10000 train_time:219111ms step_avg:68.03ms +[2025-07-07 17:20:50] [Rank 0] step:3241/10000 train_time:220536ms step_avg:68.05ms +[2025-07-07 17:20:50] [Rank 0] step:3241/10000 train_time:220536ms step_avg:68.05ms +[2025-07-07 17:20:51] [Rank 0] step:3261/10000 train_time:221897ms step_avg:68.05ms +[2025-07-07 17:20:51] [Rank 0] step:3261/10000 train_time:221897ms step_avg:68.05ms +[2025-07-07 17:20:52] [Rank 0] step:3281/10000 train_time:223275ms step_avg:68.05ms +[2025-07-07 17:20:52] [Rank 0] step:3281/10000 train_time:223275ms step_avg:68.05ms +[2025-07-07 17:20:54] [Rank 0] step:3301/10000 train_time:224653ms step_avg:68.06ms +[2025-07-07 17:20:54] [Rank 0] step:3301/10000 train_time:224653ms step_avg:68.06ms +[2025-07-07 17:20:55] [Rank 0] step:3321/10000 train_time:226033ms step_avg:68.06ms +[2025-07-07 17:20:55] [Rank 0] step:3321/10000 train_time:226033ms step_avg:68.06ms +[2025-07-07 17:20:57] [Rank 0] step:3341/10000 train_time:227413ms step_avg:68.07ms +[2025-07-07 17:20:57] [Rank 0] step:3341/10000 train_time:227413ms step_avg:68.07ms +[2025-07-07 17:20:58] [Rank 0] step:3361/10000 train_time:228793ms step_avg:68.07ms +[2025-07-07 17:20:58] [Rank 0] step:3361/10000 train_time:228793ms step_avg:68.07ms +[2025-07-07 17:20:59] [Rank 0] step:3381/10000 train_time:230171ms step_avg:68.08ms +[2025-07-07 17:20:59] [Rank 0] step:3381/10000 train_time:230171ms step_avg:68.08ms +[2025-07-07 17:21:01] [Rank 0] step:3401/10000 train_time:231550ms step_avg:68.08ms +[2025-07-07 17:21:01] [Rank 0] step:3401/10000 train_time:231550ms step_avg:68.08ms +[2025-07-07 17:21:02] [Rank 0] step:3421/10000 train_time:232977ms step_avg:68.10ms +[2025-07-07 17:21:02] [Rank 0] step:3421/10000 train_time:232977ms step_avg:68.10ms +[2025-07-07 17:21:03] [Rank 0] step:3441/10000 train_time:234349ms step_avg:68.10ms +[2025-07-07 17:21:03] [Rank 0] step:3441/10000 train_time:234349ms step_avg:68.10ms +[2025-07-07 17:21:05] [Rank 0] step:3461/10000 train_time:235729ms step_avg:68.11ms +[2025-07-07 17:21:05] [Rank 0] step:3461/10000 train_time:235729ms step_avg:68.11ms +[2025-07-07 17:21:06] [Rank 0] step:3481/10000 train_time:237111ms step_avg:68.12ms +[2025-07-07 17:21:06] [Rank 0] step:3481/10000 train_time:237111ms step_avg:68.12ms +[2025-07-07 17:21:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:21:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:21:09] [Rank 0] PRINT: step:3500/10000 train_loss:4.0068 val_loss:3.8094 train_time:239121ms step_avg:68.32ms +[2025-07-07 17:21:09] [Rank 0] PRINT: step:3500/10000 train_loss:4.0068 val_loss:3.8094 train_time:239121ms step_avg:68.32ms +[2025-07-07 17:21:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:21:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:21:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:21:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:21:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:21:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:26:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:26:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:26:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:26:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:26:39] [Rank 0] Total Loss: 5.4521 +[2025-07-07 17:26:39] [Rank 0] Total Loss: 5.4521 +[2025-07-07 17:26:39] [Rank 0] Total FTA: 0.0861 +[2025-07-07 17:26:39] [Rank 0] Total FTA: 0.0861 +[2025-07-07 17:26:39] [Rank 0] Group 0 Loss: 5.4799 +[2025-07-07 17:26:39] [Rank 0] Group 0 Loss: 5.4799 +[2025-07-07 17:26:39] [Rank 0] Group 1 Loss: 5.3598 +[2025-07-07 17:26:39] [Rank 0] Group 1 Loss: 5.3598 +[2025-07-07 17:26:39] [Rank 0] Group 2 Loss: 5.5837 +[2025-07-07 17:26:39] [Rank 0] Group 2 Loss: 5.5837 +[2025-07-07 17:26:39] [Rank 0] Group 3 Loss: 5.3728 +[2025-07-07 17:26:39] [Rank 0] Group 3 Loss: 5.3728 +[2025-07-07 17:26:39] [Rank 0] Group 4 Loss: 5.5162 +[2025-07-07 17:26:39] [Rank 0] Group 4 Loss: 5.5162 +[2025-07-07 17:26:39] [Rank 0] Group 5 Loss: 5.4411 +[2025-07-07 17:26:39] [Rank 0] Group 5 Loss: 5.4411 +[2025-07-07 17:26:39] [Rank 0] Group 6 Loss: 5.5031 +[2025-07-07 17:26:39] [Rank 0] Group 6 Loss: 5.5031 +[2025-07-07 17:26:39] [Rank 0] Group 7 Loss: 5.4413 +[2025-07-07 17:26:39] [Rank 0] Group 7 Loss: 5.4413 +[2025-07-07 17:26:39] [Rank 0] Group 8 Loss: 5.4418 +[2025-07-07 17:26:39] [Rank 0] Group 8 Loss: 5.4418 +[2025-07-07 17:26:39] [Rank 0] Group 9 Loss: 5.4008 +[2025-07-07 17:26:39] [Rank 0] Group 9 Loss: 5.4008 +[2025-07-07 17:26:39] [Rank 0] Group 10 Loss: 5.4364 +[2025-07-07 17:26:39] [Rank 0] Group 10 Loss: 5.4364 +[2025-07-07 17:26:39] [Rank 0] Group 11 Loss: 5.4359 +[2025-07-07 17:26:39] [Rank 0] Group 11 Loss: 5.4359 +[2025-07-07 17:26:39] [Rank 0] Group 0 FTA: 0.1821 +[2025-07-07 17:26:39] [Rank 0] Group 0 FTA: 0.1821 +[2025-07-07 17:26:39] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:26:39] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:26:39] [Rank 0] Group 2 FTA: 0.1094 +[2025-07-07 17:26:39] [Rank 0] Group 2 FTA: 0.1094 +[2025-07-07 17:26:39] [Rank 0] Group 3 FTA: 0.0573 +[2025-07-07 17:26:39] [Rank 0] Group 3 FTA: 0.0573 +[2025-07-07 17:26:39] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 17:26:39] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 17:26:39] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-07 17:26:39] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-07 17:26:39] [Rank 0] Group 6 FTA: 0.0729 +[2025-07-07 17:26:39] [Rank 0] Group 6 FTA: 0.0729 +[2025-07-07 17:26:39] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 17:26:39] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 17:26:39] [Rank 0] Group 8 FTA: 0.0859 +[2025-07-07 17:26:39] [Rank 0] Group 8 FTA: 0.0859 +[2025-07-07 17:26:39] [Rank 0] Group 9 FTA: 0.0547 +[2025-07-07 17:26:39] [Rank 0] Group 9 FTA: 0.0547 +[2025-07-07 17:26:39] [Rank 0] Group 10 FTA: 0.0840 +[2025-07-07 17:26:39] [Rank 0] Group 10 FTA: 0.0840 +[2025-07-07 17:26:39] [Rank 0] Group 11 FTA: 0.0840 +[2025-07-07 17:26:39] [Rank 0] Group 11 FTA: 0.0840 +[2025-07-07 17:26:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 17:26:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 17:26:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 17:26:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 17:26:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 17:26:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 17:26:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 17:26:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 17:26:41] [Rank 0] step:3501/10000 train_time:239132ms step_avg:68.30ms +[2025-07-07 17:26:41] [Rank 0] step:3501/10000 train_time:239132ms step_avg:68.30ms +[2025-07-07 17:26:42] [Rank 0] step:3521/10000 train_time:239890ms step_avg:68.13ms +[2025-07-07 17:26:42] [Rank 0] step:3521/10000 train_time:239890ms step_avg:68.13ms +[2025-07-07 17:26:43] [Rank 0] step:3541/10000 train_time:241261ms step_avg:68.13ms +[2025-07-07 17:26:43] [Rank 0] step:3541/10000 train_time:241261ms step_avg:68.13ms +[2025-07-07 17:26:45] [Rank 0] step:3561/10000 train_time:242633ms step_avg:68.14ms +[2025-07-07 17:26:45] [Rank 0] step:3561/10000 train_time:242633ms step_avg:68.14ms +[2025-07-07 17:26:46] [Rank 0] step:3581/10000 train_time:244007ms step_avg:68.14ms +[2025-07-07 17:26:46] [Rank 0] step:3581/10000 train_time:244007ms step_avg:68.14ms +[2025-07-07 17:26:48] [Rank 0] step:3601/10000 train_time:245430ms step_avg:68.16ms +[2025-07-07 17:26:48] [Rank 0] step:3601/10000 train_time:245430ms step_avg:68.16ms +[2025-07-07 17:26:49] [Rank 0] step:3621/10000 train_time:246808ms step_avg:68.16ms +[2025-07-07 17:26:49] [Rank 0] step:3621/10000 train_time:246808ms step_avg:68.16ms +[2025-07-07 17:26:50] [Rank 0] step:3641/10000 train_time:248183ms step_avg:68.16ms +[2025-07-07 17:26:50] [Rank 0] step:3641/10000 train_time:248183ms step_avg:68.16ms +[2025-07-07 17:26:52] [Rank 0] step:3661/10000 train_time:249559ms step_avg:68.17ms +[2025-07-07 17:26:52] [Rank 0] step:3661/10000 train_time:249559ms step_avg:68.17ms +[2025-07-07 17:26:53] [Rank 0] step:3681/10000 train_time:250937ms step_avg:68.17ms +[2025-07-07 17:26:53] [Rank 0] step:3681/10000 train_time:250937ms step_avg:68.17ms +[2025-07-07 17:26:55] [Rank 0] step:3701/10000 train_time:252315ms step_avg:68.17ms +[2025-07-07 17:26:55] [Rank 0] step:3701/10000 train_time:252315ms step_avg:68.17ms +[2025-07-07 17:26:56] [Rank 0] step:3721/10000 train_time:253694ms step_avg:68.18ms +[2025-07-07 17:26:56] [Rank 0] step:3721/10000 train_time:253694ms step_avg:68.18ms +[2025-07-07 17:26:57] [Rank 0] step:3741/10000 train_time:255073ms step_avg:68.18ms +[2025-07-07 17:26:57] [Rank 0] step:3741/10000 train_time:255073ms step_avg:68.18ms +[2025-07-07 17:26:59] [Rank 0] step:3761/10000 train_time:256452ms step_avg:68.19ms +[2025-07-07 17:26:59] [Rank 0] step:3761/10000 train_time:256452ms step_avg:68.19ms +[2025-07-07 17:27:00] [Rank 0] step:3781/10000 train_time:257831ms step_avg:68.19ms +[2025-07-07 17:27:00] [Rank 0] step:3781/10000 train_time:257831ms step_avg:68.19ms +[2025-07-07 17:27:01] [Rank 0] step:3801/10000 train_time:259260ms step_avg:68.21ms +[2025-07-07 17:27:01] [Rank 0] step:3801/10000 train_time:259260ms step_avg:68.21ms +[2025-07-07 17:27:03] [Rank 0] step:3821/10000 train_time:260641ms step_avg:68.21ms +[2025-07-07 17:27:03] [Rank 0] step:3821/10000 train_time:260641ms step_avg:68.21ms +[2025-07-07 17:27:04] [Rank 0] step:3841/10000 train_time:262021ms step_avg:68.22ms +[2025-07-07 17:27:04] [Rank 0] step:3841/10000 train_time:262021ms step_avg:68.22ms +[2025-07-07 17:27:06] [Rank 0] step:3861/10000 train_time:263400ms step_avg:68.22ms +[2025-07-07 17:27:06] [Rank 0] step:3861/10000 train_time:263400ms step_avg:68.22ms +[2025-07-07 17:27:07] [Rank 0] step:3881/10000 train_time:264781ms step_avg:68.22ms +[2025-07-07 17:27:07] [Rank 0] step:3881/10000 train_time:264781ms step_avg:68.22ms +[2025-07-07 17:27:08] [Rank 0] step:3901/10000 train_time:266161ms step_avg:68.23ms +[2025-07-07 17:27:08] [Rank 0] step:3901/10000 train_time:266161ms step_avg:68.23ms +[2025-07-07 17:27:10] [Rank 0] step:3921/10000 train_time:267541ms step_avg:68.23ms +[2025-07-07 17:27:10] [Rank 0] step:3921/10000 train_time:267541ms step_avg:68.23ms +[2025-07-07 17:27:11] [Rank 0] step:3941/10000 train_time:268921ms step_avg:68.24ms +[2025-07-07 17:27:11] [Rank 0] step:3941/10000 train_time:268921ms step_avg:68.24ms +[2025-07-07 17:27:13] [Rank 0] step:3961/10000 train_time:270348ms step_avg:68.25ms +[2025-07-07 17:27:13] [Rank 0] step:3961/10000 train_time:270348ms step_avg:68.25ms +[2025-07-07 17:27:14] [Rank 0] step:3981/10000 train_time:271709ms step_avg:68.25ms +[2025-07-07 17:27:14] [Rank 0] step:3981/10000 train_time:271709ms step_avg:68.25ms +[2025-07-07 17:27:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:27:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:27:16] [Rank 0] PRINT: step:4000/10000 train_loss:3.6335 val_loss:3.4729 train_time:273716ms step_avg:68.43ms +[2025-07-07 17:27:16] [Rank 0] PRINT: step:4000/10000 train_loss:3.6335 val_loss:3.4729 train_time:273716ms step_avg:68.43ms +[2025-07-07 17:27:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:27:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:27:16] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:27:16] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:27:16] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:27:16] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:32:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:32:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:32:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:32:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:32:46] [Rank 0] Total Loss: 5.2866 +[2025-07-07 17:32:46] [Rank 0] Total Loss: 5.2866 +[2025-07-07 17:32:46] [Rank 0] Total FTA: 0.0728 +[2025-07-07 17:32:46] [Rank 0] Total FTA: 0.0728 +[2025-07-07 17:32:46] [Rank 0] Group 0 Loss: 5.3660 +[2025-07-07 17:32:46] [Rank 0] Group 0 Loss: 5.3660 +[2025-07-07 17:32:46] [Rank 0] Group 1 Loss: 5.1847 +[2025-07-07 17:32:46] [Rank 0] Group 1 Loss: 5.1847 +[2025-07-07 17:32:46] [Rank 0] Group 2 Loss: 5.3541 +[2025-07-07 17:32:46] [Rank 0] Group 2 Loss: 5.3541 +[2025-07-07 17:32:46] [Rank 0] Group 3 Loss: 5.1359 +[2025-07-07 17:32:46] [Rank 0] Group 3 Loss: 5.1359 +[2025-07-07 17:32:46] [Rank 0] Group 4 Loss: 5.3994 +[2025-07-07 17:32:46] [Rank 0] Group 4 Loss: 5.3994 +[2025-07-07 17:32:46] [Rank 0] Group 5 Loss: 5.2572 +[2025-07-07 17:32:46] [Rank 0] Group 5 Loss: 5.2572 +[2025-07-07 17:32:46] [Rank 0] Group 6 Loss: 5.3333 +[2025-07-07 17:32:46] [Rank 0] Group 6 Loss: 5.3333 +[2025-07-07 17:32:46] [Rank 0] Group 7 Loss: 5.2630 +[2025-07-07 17:32:46] [Rank 0] Group 7 Loss: 5.2630 +[2025-07-07 17:32:46] [Rank 0] Group 8 Loss: 5.2529 +[2025-07-07 17:32:46] [Rank 0] Group 8 Loss: 5.2529 +[2025-07-07 17:32:46] [Rank 0] Group 9 Loss: 5.2800 +[2025-07-07 17:32:46] [Rank 0] Group 9 Loss: 5.2800 +[2025-07-07 17:32:46] [Rank 0] Group 10 Loss: 5.2957 +[2025-07-07 17:32:46] [Rank 0] Group 10 Loss: 5.2957 +[2025-07-07 17:32:46] [Rank 0] Group 11 Loss: 5.2664 +[2025-07-07 17:32:46] [Rank 0] Group 11 Loss: 5.2664 +[2025-07-07 17:32:46] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 17:32:46] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 17:32:46] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:32:46] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:32:46] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 17:32:46] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 17:32:47] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-07 17:32:47] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-07 17:32:47] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-07 17:32:47] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-07 17:32:47] [Rank 0] Group 5 FTA: 0.0312 +[2025-07-07 17:32:47] [Rank 0] Group 5 FTA: 0.0312 +[2025-07-07 17:32:47] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 17:32:47] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 17:32:47] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-07 17:32:47] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-07 17:32:47] [Rank 0] Group 8 FTA: 0.0833 +[2025-07-07 17:32:47] [Rank 0] Group 8 FTA: 0.0833 +[2025-07-07 17:32:47] [Rank 0] Group 9 FTA: 0.0625 +[2025-07-07 17:32:47] [Rank 0] Group 9 FTA: 0.0625 +[2025-07-07 17:32:47] [Rank 0] Group 10 FTA: 0.1035 +[2025-07-07 17:32:47] [Rank 0] Group 10 FTA: 0.1035 +[2025-07-07 17:32:47] [Rank 0] Group 11 FTA: 0.0576 +[2025-07-07 17:32:47] [Rank 0] Group 11 FTA: 0.0576 +[2025-07-07 17:32:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 17:32:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 17:32:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 17:32:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 17:32:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 17:32:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 17:32:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 17:32:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 17:32:48] [Rank 0] step:4001/10000 train_time:273726ms step_avg:68.41ms +[2025-07-07 17:32:48] [Rank 0] step:4001/10000 train_time:273726ms step_avg:68.41ms +[2025-07-07 17:32:49] [Rank 0] step:4021/10000 train_time:274495ms step_avg:68.27ms +[2025-07-07 17:32:49] [Rank 0] step:4021/10000 train_time:274495ms step_avg:68.27ms +[2025-07-07 17:32:51] [Rank 0] step:4041/10000 train_time:275866ms step_avg:68.27ms +[2025-07-07 17:32:51] [Rank 0] step:4041/10000 train_time:275866ms step_avg:68.27ms +[2025-07-07 17:32:52] [Rank 0] step:4061/10000 train_time:277237ms step_avg:68.27ms +[2025-07-07 17:32:52] [Rank 0] step:4061/10000 train_time:277237ms step_avg:68.27ms +[2025-07-07 17:32:54] [Rank 0] step:4081/10000 train_time:278689ms step_avg:68.29ms +[2025-07-07 17:32:54] [Rank 0] step:4081/10000 train_time:278689ms step_avg:68.29ms +[2025-07-07 17:32:55] [Rank 0] step:4101/10000 train_time:280064ms step_avg:68.29ms +[2025-07-07 17:32:55] [Rank 0] step:4101/10000 train_time:280064ms step_avg:68.29ms +[2025-07-07 17:32:56] [Rank 0] step:4121/10000 train_time:281441ms step_avg:68.29ms +[2025-07-07 17:32:56] [Rank 0] step:4121/10000 train_time:281441ms step_avg:68.29ms +[2025-07-07 17:32:58] [Rank 0] step:4141/10000 train_time:282818ms step_avg:68.30ms +[2025-07-07 17:32:58] [Rank 0] step:4141/10000 train_time:282818ms step_avg:68.30ms +[2025-07-07 17:32:59] [Rank 0] step:4161/10000 train_time:284237ms step_avg:68.31ms +[2025-07-07 17:32:59] [Rank 0] step:4161/10000 train_time:284237ms step_avg:68.31ms +[2025-07-07 17:33:01] [Rank 0] step:4181/10000 train_time:285614ms step_avg:68.31ms +[2025-07-07 17:33:01] [Rank 0] step:4181/10000 train_time:285614ms step_avg:68.31ms +[2025-07-07 17:33:02] [Rank 0] step:4201/10000 train_time:286993ms step_avg:68.32ms +[2025-07-07 17:33:02] [Rank 0] step:4201/10000 train_time:286993ms step_avg:68.32ms +[2025-07-07 17:33:03] [Rank 0] step:4221/10000 train_time:288374ms step_avg:68.32ms +[2025-07-07 17:33:03] [Rank 0] step:4221/10000 train_time:288374ms step_avg:68.32ms +[2025-07-07 17:33:05] [Rank 0] step:4241/10000 train_time:289754ms step_avg:68.32ms +[2025-07-07 17:33:05] [Rank 0] step:4241/10000 train_time:289754ms step_avg:68.32ms +[2025-07-07 17:33:06] [Rank 0] step:4261/10000 train_time:291134ms step_avg:68.33ms +[2025-07-07 17:33:06] [Rank 0] step:4261/10000 train_time:291134ms step_avg:68.33ms +[2025-07-07 17:33:07] [Rank 0] step:4281/10000 train_time:292513ms step_avg:68.33ms +[2025-07-07 17:33:07] [Rank 0] step:4281/10000 train_time:292513ms step_avg:68.33ms +[2025-07-07 17:33:09] [Rank 0] step:4301/10000 train_time:293893ms step_avg:68.33ms +[2025-07-07 17:33:09] [Rank 0] step:4301/10000 train_time:293893ms step_avg:68.33ms +[2025-07-07 17:33:10] [Rank 0] step:4321/10000 train_time:295273ms step_avg:68.33ms +[2025-07-07 17:33:10] [Rank 0] step:4321/10000 train_time:295273ms step_avg:68.33ms +[2025-07-07 17:33:12] [Rank 0] step:4341/10000 train_time:296683ms step_avg:68.34ms +[2025-07-07 17:33:12] [Rank 0] step:4341/10000 train_time:296683ms step_avg:68.34ms +[2025-07-07 17:33:13] [Rank 0] step:4361/10000 train_time:298062ms step_avg:68.35ms +[2025-07-07 17:33:13] [Rank 0] step:4361/10000 train_time:298062ms step_avg:68.35ms +[2025-07-07 17:33:14] [Rank 0] step:4381/10000 train_time:299442ms step_avg:68.35ms +[2025-07-07 17:33:14] [Rank 0] step:4381/10000 train_time:299442ms step_avg:68.35ms +[2025-07-07 17:33:16] [Rank 0] step:4401/10000 train_time:300824ms step_avg:68.35ms +[2025-07-07 17:33:16] [Rank 0] step:4401/10000 train_time:300824ms step_avg:68.35ms +[2025-07-07 17:33:17] [Rank 0] step:4421/10000 train_time:302204ms step_avg:68.36ms +[2025-07-07 17:33:17] [Rank 0] step:4421/10000 train_time:302204ms step_avg:68.36ms +[2025-07-07 17:33:18] [Rank 0] step:4441/10000 train_time:303586ms step_avg:68.36ms +[2025-07-07 17:33:18] [Rank 0] step:4441/10000 train_time:303586ms step_avg:68.36ms +[2025-07-07 17:33:20] [Rank 0] step:4461/10000 train_time:304968ms step_avg:68.36ms +[2025-07-07 17:33:20] [Rank 0] step:4461/10000 train_time:304968ms step_avg:68.36ms +[2025-07-07 17:33:21] [Rank 0] step:4481/10000 train_time:306349ms step_avg:68.37ms +[2025-07-07 17:33:21] [Rank 0] step:4481/10000 train_time:306349ms step_avg:68.37ms +[2025-07-07 17:33:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:33:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:33:24] [Rank 0] PRINT: step:4500/10000 train_loss:3.3267 val_loss:3.1928 train_time:308357ms step_avg:68.52ms +[2025-07-07 17:33:24] [Rank 0] PRINT: step:4500/10000 train_loss:3.3267 val_loss:3.1928 train_time:308357ms step_avg:68.52ms +[2025-07-07 17:33:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:33:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:33:24] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:33:24] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:33:24] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:33:24] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:38:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:38:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:38:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:38:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:38:55] [Rank 0] Total Loss: 5.1492 +[2025-07-07 17:38:55] [Rank 0] Total Loss: 5.1492 +[2025-07-07 17:38:55] [Rank 0] Total FTA: 0.0765 +[2025-07-07 17:38:55] [Rank 0] Total FTA: 0.0765 +[2025-07-07 17:38:55] [Rank 0] Group 0 Loss: 5.2591 +[2025-07-07 17:38:55] [Rank 0] Group 0 Loss: 5.2591 +[2025-07-07 17:38:55] [Rank 0] Group 1 Loss: 5.0272 +[2025-07-07 17:38:55] [Rank 0] Group 1 Loss: 5.0272 +[2025-07-07 17:38:55] [Rank 0] Group 2 Loss: 5.2108 +[2025-07-07 17:38:55] [Rank 0] Group 2 Loss: 5.2108 +[2025-07-07 17:38:55] [Rank 0] Group 3 Loss: 4.9947 +[2025-07-07 17:38:55] [Rank 0] Group 3 Loss: 4.9947 +[2025-07-07 17:38:55] [Rank 0] Group 4 Loss: 5.1582 +[2025-07-07 17:38:55] [Rank 0] Group 4 Loss: 5.1582 +[2025-07-07 17:38:55] [Rank 0] Group 5 Loss: 5.1340 +[2025-07-07 17:38:55] [Rank 0] Group 5 Loss: 5.1340 +[2025-07-07 17:38:55] [Rank 0] Group 6 Loss: 5.2058 +[2025-07-07 17:38:55] [Rank 0] Group 6 Loss: 5.2058 +[2025-07-07 17:38:55] [Rank 0] Group 7 Loss: 5.1605 +[2025-07-07 17:38:55] [Rank 0] Group 7 Loss: 5.1605 +[2025-07-07 17:38:55] [Rank 0] Group 8 Loss: 5.1198 +[2025-07-07 17:38:55] [Rank 0] Group 8 Loss: 5.1198 +[2025-07-07 17:38:55] [Rank 0] Group 9 Loss: 5.1559 +[2025-07-07 17:38:55] [Rank 0] Group 9 Loss: 5.1559 +[2025-07-07 17:38:55] [Rank 0] Group 10 Loss: 5.1298 +[2025-07-07 17:38:55] [Rank 0] Group 10 Loss: 5.1298 +[2025-07-07 17:38:55] [Rank 0] Group 11 Loss: 5.1432 +[2025-07-07 17:38:55] [Rank 0] Group 11 Loss: 5.1432 +[2025-07-07 17:38:55] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-07 17:38:55] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-07 17:38:55] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:38:55] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:38:55] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-07 17:38:55] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-07 17:38:55] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-07 17:38:55] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-07 17:38:55] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-07 17:38:55] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-07 17:38:55] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-07 17:38:55] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-07 17:38:55] [Rank 0] Group 6 FTA: 0.0703 +[2025-07-07 17:38:55] [Rank 0] Group 6 FTA: 0.0703 +[2025-07-07 17:38:55] [Rank 0] Group 7 FTA: 0.0807 +[2025-07-07 17:38:55] [Rank 0] Group 7 FTA: 0.0807 +[2025-07-07 17:38:55] [Rank 0] Group 8 FTA: 0.1172 +[2025-07-07 17:38:55] [Rank 0] Group 8 FTA: 0.1172 +[2025-07-07 17:38:55] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-07 17:38:55] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-07 17:38:55] [Rank 0] Group 10 FTA: 0.0664 +[2025-07-07 17:38:55] [Rank 0] Group 10 FTA: 0.0664 +[2025-07-07 17:38:55] [Rank 0] Group 11 FTA: 0.0713 +[2025-07-07 17:38:55] [Rank 0] Group 11 FTA: 0.0713 +[2025-07-07 17:38:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 17:38:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 17:38:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 17:38:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 17:38:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 17:38:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 17:38:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 17:38:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 17:38:58] [Rank 0] step:4501/10000 train_time:308374ms step_avg:68.51ms +[2025-07-07 17:38:58] [Rank 0] step:4501/10000 train_time:308374ms step_avg:68.51ms +[2025-07-07 17:38:59] [Rank 0] step:4521/10000 train_time:309824ms step_avg:68.53ms +[2025-07-07 17:38:59] [Rank 0] step:4521/10000 train_time:309824ms step_avg:68.53ms +[2025-07-07 17:39:00] [Rank 0] step:4541/10000 train_time:311196ms step_avg:68.53ms +[2025-07-07 17:39:00] [Rank 0] step:4541/10000 train_time:311196ms step_avg:68.53ms +[2025-07-07 17:39:02] [Rank 0] step:4561/10000 train_time:312568ms step_avg:68.53ms +[2025-07-07 17:39:02] [Rank 0] step:4561/10000 train_time:312568ms step_avg:68.53ms +[2025-07-07 17:39:03] [Rank 0] step:4581/10000 train_time:313941ms step_avg:68.53ms +[2025-07-07 17:39:03] [Rank 0] step:4581/10000 train_time:313941ms step_avg:68.53ms +[2025-07-07 17:39:04] [Rank 0] step:4601/10000 train_time:315315ms step_avg:68.53ms +[2025-07-07 17:39:04] [Rank 0] step:4601/10000 train_time:315315ms step_avg:68.53ms +[2025-07-07 17:39:06] [Rank 0] step:4621/10000 train_time:316690ms step_avg:68.53ms +[2025-07-07 17:39:06] [Rank 0] step:4621/10000 train_time:316690ms step_avg:68.53ms +[2025-07-07 17:39:07] [Rank 0] step:4641/10000 train_time:318065ms step_avg:68.53ms +[2025-07-07 17:39:07] [Rank 0] step:4641/10000 train_time:318065ms step_avg:68.53ms +[2025-07-07 17:39:09] [Rank 0] step:4661/10000 train_time:319442ms step_avg:68.54ms +[2025-07-07 17:39:09] [Rank 0] step:4661/10000 train_time:319442ms step_avg:68.54ms +[2025-07-07 17:39:10] [Rank 0] step:4681/10000 train_time:320831ms step_avg:68.54ms +[2025-07-07 17:39:10] [Rank 0] step:4681/10000 train_time:320831ms step_avg:68.54ms +[2025-07-07 17:39:11] [Rank 0] step:4701/10000 train_time:322242ms step_avg:68.55ms +[2025-07-07 17:39:11] [Rank 0] step:4701/10000 train_time:322242ms step_avg:68.55ms +[2025-07-07 17:39:13] [Rank 0] step:4721/10000 train_time:323685ms step_avg:68.56ms +[2025-07-07 17:39:13] [Rank 0] step:4721/10000 train_time:323685ms step_avg:68.56ms +[2025-07-07 17:39:14] [Rank 0] step:4741/10000 train_time:325064ms step_avg:68.56ms +[2025-07-07 17:39:14] [Rank 0] step:4741/10000 train_time:325064ms step_avg:68.56ms +[2025-07-07 17:39:16] [Rank 0] step:4761/10000 train_time:326445ms step_avg:68.57ms +[2025-07-07 17:39:16] [Rank 0] step:4761/10000 train_time:326445ms step_avg:68.57ms +[2025-07-07 17:39:17] [Rank 0] step:4781/10000 train_time:327826ms step_avg:68.57ms +[2025-07-07 17:39:17] [Rank 0] step:4781/10000 train_time:327826ms step_avg:68.57ms +[2025-07-07 17:39:18] [Rank 0] step:4801/10000 train_time:329205ms step_avg:68.57ms +[2025-07-07 17:39:18] [Rank 0] step:4801/10000 train_time:329205ms step_avg:68.57ms +[2025-07-07 17:39:20] [Rank 0] step:4821/10000 train_time:330586ms step_avg:68.57ms +[2025-07-07 17:39:20] [Rank 0] step:4821/10000 train_time:330586ms step_avg:68.57ms +[2025-07-07 17:39:21] [Rank 0] step:4841/10000 train_time:331968ms step_avg:68.57ms +[2025-07-07 17:39:21] [Rank 0] step:4841/10000 train_time:331968ms step_avg:68.57ms +[2025-07-07 17:39:22] [Rank 0] step:4861/10000 train_time:333349ms step_avg:68.58ms +[2025-07-07 17:39:22] [Rank 0] step:4861/10000 train_time:333349ms step_avg:68.58ms +[2025-07-07 17:39:24] [Rank 0] step:4881/10000 train_time:334755ms step_avg:68.58ms +[2025-07-07 17:39:24] [Rank 0] step:4881/10000 train_time:334755ms step_avg:68.58ms +[2025-07-07 17:39:25] [Rank 0] step:4901/10000 train_time:336137ms step_avg:68.59ms +[2025-07-07 17:39:25] [Rank 0] step:4901/10000 train_time:336137ms step_avg:68.59ms +[2025-07-07 17:39:27] [Rank 0] step:4921/10000 train_time:337518ms step_avg:68.59ms +[2025-07-07 17:39:27] [Rank 0] step:4921/10000 train_time:337518ms step_avg:68.59ms +[2025-07-07 17:39:28] [Rank 0] step:4941/10000 train_time:338900ms step_avg:68.59ms +[2025-07-07 17:39:28] [Rank 0] step:4941/10000 train_time:338900ms step_avg:68.59ms +[2025-07-07 17:39:29] [Rank 0] step:4961/10000 train_time:340282ms step_avg:68.59ms +[2025-07-07 17:39:29] [Rank 0] step:4961/10000 train_time:340282ms step_avg:68.59ms +[2025-07-07 17:39:31] [Rank 0] step:4981/10000 train_time:341663ms step_avg:68.59ms +[2025-07-07 17:39:31] [Rank 0] step:4981/10000 train_time:341663ms step_avg:68.59ms +[2025-07-07 17:39:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:39:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:39:33] [Rank 0] PRINT: step:5000/10000 train_loss:3.0745 val_loss:2.9666 train_time:343673ms step_avg:68.73ms +[2025-07-07 17:39:33] [Rank 0] PRINT: step:5000/10000 train_loss:3.0745 val_loss:2.9666 train_time:343673ms step_avg:68.73ms +[2025-07-07 17:39:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:39:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:39:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:39:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:39:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:39:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:45:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:45:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:45:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:45:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:45:03] [Rank 0] Total Loss: 5.0516 +[2025-07-07 17:45:03] [Rank 0] Total Loss: 5.0516 +[2025-07-07 17:45:03] [Rank 0] Total FTA: 0.0863 +[2025-07-07 17:45:03] [Rank 0] Total FTA: 0.0863 +[2025-07-07 17:45:03] [Rank 0] Group 0 Loss: 5.1986 +[2025-07-07 17:45:03] [Rank 0] Group 0 Loss: 5.1986 +[2025-07-07 17:45:03] [Rank 0] Group 1 Loss: 4.9165 +[2025-07-07 17:45:03] [Rank 0] Group 1 Loss: 4.9165 +[2025-07-07 17:45:03] [Rank 0] Group 2 Loss: 5.0974 +[2025-07-07 17:45:03] [Rank 0] Group 2 Loss: 5.0974 +[2025-07-07 17:45:03] [Rank 0] Group 3 Loss: 4.9314 +[2025-07-07 17:45:03] [Rank 0] Group 3 Loss: 4.9314 +[2025-07-07 17:45:03] [Rank 0] Group 4 Loss: 5.0859 +[2025-07-07 17:45:03] [Rank 0] Group 4 Loss: 5.0859 +[2025-07-07 17:45:03] [Rank 0] Group 5 Loss: 5.0208 +[2025-07-07 17:45:03] [Rank 0] Group 5 Loss: 5.0208 +[2025-07-07 17:45:03] [Rank 0] Group 6 Loss: 5.0949 +[2025-07-07 17:45:03] [Rank 0] Group 6 Loss: 5.0949 +[2025-07-07 17:45:03] [Rank 0] Group 7 Loss: 5.0558 +[2025-07-07 17:45:03] [Rank 0] Group 7 Loss: 5.0558 +[2025-07-07 17:45:03] [Rank 0] Group 8 Loss: 5.0637 +[2025-07-07 17:45:03] [Rank 0] Group 8 Loss: 5.0637 +[2025-07-07 17:45:03] [Rank 0] Group 9 Loss: 5.0457 +[2025-07-07 17:45:03] [Rank 0] Group 9 Loss: 5.0457 +[2025-07-07 17:45:03] [Rank 0] Group 10 Loss: 5.0398 +[2025-07-07 17:45:03] [Rank 0] Group 10 Loss: 5.0398 +[2025-07-07 17:45:03] [Rank 0] Group 11 Loss: 5.0032 +[2025-07-07 17:45:03] [Rank 0] Group 11 Loss: 5.0032 +[2025-07-07 17:45:03] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 17:45:03] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 17:45:03] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:45:03] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:45:03] [Rank 0] Group 2 FTA: 0.1693 +[2025-07-07 17:45:03] [Rank 0] Group 2 FTA: 0.1693 +[2025-07-07 17:45:03] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 17:45:03] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 17:45:03] [Rank 0] Group 4 FTA: 0.0339 +[2025-07-07 17:45:03] [Rank 0] Group 4 FTA: 0.0339 +[2025-07-07 17:45:03] [Rank 0] Group 5 FTA: 0.0599 +[2025-07-07 17:45:03] [Rank 0] Group 5 FTA: 0.0599 +[2025-07-07 17:45:03] [Rank 0] Group 6 FTA: 0.0833 +[2025-07-07 17:45:03] [Rank 0] Group 6 FTA: 0.0833 +[2025-07-07 17:45:03] [Rank 0] Group 7 FTA: 0.0781 +[2025-07-07 17:45:03] [Rank 0] Group 7 FTA: 0.0781 +[2025-07-07 17:45:03] [Rank 0] Group 8 FTA: 0.0911 +[2025-07-07 17:45:03] [Rank 0] Group 8 FTA: 0.0911 +[2025-07-07 17:45:03] [Rank 0] Group 9 FTA: 0.0703 +[2025-07-07 17:45:03] [Rank 0] Group 9 FTA: 0.0703 +[2025-07-07 17:45:03] [Rank 0] Group 10 FTA: 0.0801 +[2025-07-07 17:45:03] [Rank 0] Group 10 FTA: 0.0801 +[2025-07-07 17:45:03] [Rank 0] Group 11 FTA: 0.0811 +[2025-07-07 17:45:03] [Rank 0] Group 11 FTA: 0.0811 +[2025-07-07 17:45:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 17:45:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 17:45:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 17:45:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 17:45:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 17:45:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 17:45:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 17:45:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 17:45:04] [Rank 0] step:5001/10000 train_time:343683ms step_avg:68.72ms +[2025-07-07 17:45:04] [Rank 0] step:5001/10000 train_time:343683ms step_avg:68.72ms +[2025-07-07 17:45:06] [Rank 0] step:5021/10000 train_time:344461ms step_avg:68.60ms +[2025-07-07 17:45:06] [Rank 0] step:5021/10000 train_time:344461ms step_avg:68.60ms +[2025-07-07 17:45:07] [Rank 0] step:5041/10000 train_time:346496ms step_avg:68.74ms +[2025-07-07 17:45:07] [Rank 0] step:5041/10000 train_time:346496ms step_avg:68.74ms +[2025-07-07 17:45:08] [Rank 0] step:5061/10000 train_time:347237ms step_avg:68.61ms +[2025-07-07 17:45:08] [Rank 0] step:5061/10000 train_time:347237ms step_avg:68.61ms +[2025-07-07 17:45:10] [Rank 0] step:5081/10000 train_time:348611ms step_avg:68.61ms +[2025-07-07 17:45:10] [Rank 0] step:5081/10000 train_time:348611ms step_avg:68.61ms +[2025-07-07 17:45:11] [Rank 0] step:5101/10000 train_time:349986ms step_avg:68.61ms +[2025-07-07 17:45:11] [Rank 0] step:5101/10000 train_time:349986ms step_avg:68.61ms +[2025-07-07 17:45:13] [Rank 0] step:5121/10000 train_time:351362ms step_avg:68.61ms +[2025-07-07 17:45:13] [Rank 0] step:5121/10000 train_time:351362ms step_avg:68.61ms +[2025-07-07 17:45:14] [Rank 0] step:5141/10000 train_time:352739ms step_avg:68.61ms +[2025-07-07 17:45:14] [Rank 0] step:5141/10000 train_time:352739ms step_avg:68.61ms +[2025-07-07 17:45:15] [Rank 0] step:5161/10000 train_time:354114ms step_avg:68.61ms +[2025-07-07 17:45:15] [Rank 0] step:5161/10000 train_time:354114ms step_avg:68.61ms +[2025-07-07 17:45:17] [Rank 0] step:5181/10000 train_time:355492ms step_avg:68.61ms +[2025-07-07 17:45:17] [Rank 0] step:5181/10000 train_time:355492ms step_avg:68.61ms +[2025-07-07 17:45:18] [Rank 0] step:5201/10000 train_time:356871ms step_avg:68.62ms +[2025-07-07 17:45:18] [Rank 0] step:5201/10000 train_time:356871ms step_avg:68.62ms +[2025-07-07 17:45:19] [Rank 0] step:5221/10000 train_time:358296ms step_avg:68.63ms +[2025-07-07 17:45:19] [Rank 0] step:5221/10000 train_time:358296ms step_avg:68.63ms +[2025-07-07 17:45:21] [Rank 0] step:5241/10000 train_time:359657ms step_avg:68.62ms +[2025-07-07 17:45:21] [Rank 0] step:5241/10000 train_time:359657ms step_avg:68.62ms +[2025-07-07 17:45:22] [Rank 0] step:5261/10000 train_time:361035ms step_avg:68.62ms +[2025-07-07 17:45:22] [Rank 0] step:5261/10000 train_time:361035ms step_avg:68.62ms +[2025-07-07 17:45:24] [Rank 0] step:5281/10000 train_time:362415ms step_avg:68.63ms +[2025-07-07 17:45:24] [Rank 0] step:5281/10000 train_time:362415ms step_avg:68.63ms +[2025-07-07 17:45:25] [Rank 0] step:5301/10000 train_time:363795ms step_avg:68.63ms +[2025-07-07 17:45:25] [Rank 0] step:5301/10000 train_time:363795ms step_avg:68.63ms +[2025-07-07 17:45:26] [Rank 0] step:5321/10000 train_time:365175ms step_avg:68.63ms +[2025-07-07 17:45:26] [Rank 0] step:5321/10000 train_time:365175ms step_avg:68.63ms +[2025-07-07 17:45:28] [Rank 0] step:5341/10000 train_time:366554ms step_avg:68.63ms +[2025-07-07 17:45:28] [Rank 0] step:5341/10000 train_time:366554ms step_avg:68.63ms +[2025-07-07 17:45:29] [Rank 0] step:5361/10000 train_time:367934ms step_avg:68.63ms +[2025-07-07 17:45:29] [Rank 0] step:5361/10000 train_time:367934ms step_avg:68.63ms +[2025-07-07 17:45:31] [Rank 0] step:5381/10000 train_time:369316ms step_avg:68.63ms +[2025-07-07 17:45:31] [Rank 0] step:5381/10000 train_time:369316ms step_avg:68.63ms +[2025-07-07 17:45:32] [Rank 0] step:5401/10000 train_time:370762ms step_avg:68.65ms +[2025-07-07 17:45:32] [Rank 0] step:5401/10000 train_time:370762ms step_avg:68.65ms +[2025-07-07 17:45:33] [Rank 0] step:5421/10000 train_time:372136ms step_avg:68.65ms +[2025-07-07 17:45:33] [Rank 0] step:5421/10000 train_time:372136ms step_avg:68.65ms +[2025-07-07 17:45:35] [Rank 0] step:5441/10000 train_time:373518ms step_avg:68.65ms +[2025-07-07 17:45:35] [Rank 0] step:5441/10000 train_time:373518ms step_avg:68.65ms +[2025-07-07 17:45:36] [Rank 0] step:5461/10000 train_time:374900ms step_avg:68.65ms +[2025-07-07 17:45:36] [Rank 0] step:5461/10000 train_time:374900ms step_avg:68.65ms +[2025-07-07 17:45:37] [Rank 0] step:5481/10000 train_time:376283ms step_avg:68.65ms +[2025-07-07 17:45:37] [Rank 0] step:5481/10000 train_time:376283ms step_avg:68.65ms +[2025-07-07 17:45:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:45:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:45:40] [Rank 0] PRINT: step:5500/10000 train_loss:2.8640 val_loss:2.7720 train_time:378295ms step_avg:68.78ms +[2025-07-07 17:45:40] [Rank 0] PRINT: step:5500/10000 train_loss:2.8640 val_loss:2.7720 train_time:378295ms step_avg:68.78ms +[2025-07-07 17:45:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:45:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:45:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:45:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:45:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:45:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:51:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:51:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:51:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:51:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:51:09] [Rank 0] Total Loss: 4.9915 +[2025-07-07 17:51:09] [Rank 0] Total Loss: 4.9915 +[2025-07-07 17:51:09] [Rank 0] Total FTA: 0.0912 +[2025-07-07 17:51:09] [Rank 0] Total FTA: 0.0912 +[2025-07-07 17:51:09] [Rank 0] Group 0 Loss: 5.2167 +[2025-07-07 17:51:09] [Rank 0] Group 0 Loss: 5.2167 +[2025-07-07 17:51:09] [Rank 0] Group 1 Loss: 4.7828 +[2025-07-07 17:51:09] [Rank 0] Group 1 Loss: 4.7828 +[2025-07-07 17:51:09] [Rank 0] Group 2 Loss: 5.0435 +[2025-07-07 17:51:09] [Rank 0] Group 2 Loss: 5.0435 +[2025-07-07 17:51:09] [Rank 0] Group 3 Loss: 4.8474 +[2025-07-07 17:51:09] [Rank 0] Group 3 Loss: 4.8474 +[2025-07-07 17:51:09] [Rank 0] Group 4 Loss: 5.0158 +[2025-07-07 17:51:09] [Rank 0] Group 4 Loss: 5.0158 +[2025-07-07 17:51:09] [Rank 0] Group 5 Loss: 4.9683 +[2025-07-07 17:51:09] [Rank 0] Group 5 Loss: 4.9683 +[2025-07-07 17:51:09] [Rank 0] Group 6 Loss: 5.0406 +[2025-07-07 17:51:09] [Rank 0] Group 6 Loss: 5.0406 +[2025-07-07 17:51:09] [Rank 0] Group 7 Loss: 4.9803 +[2025-07-07 17:51:09] [Rank 0] Group 7 Loss: 4.9803 +[2025-07-07 17:51:09] [Rank 0] Group 8 Loss: 4.9402 +[2025-07-07 17:51:09] [Rank 0] Group 8 Loss: 4.9402 +[2025-07-07 17:51:09] [Rank 0] Group 9 Loss: 4.9526 +[2025-07-07 17:51:09] [Rank 0] Group 9 Loss: 4.9526 +[2025-07-07 17:51:09] [Rank 0] Group 10 Loss: 4.9484 +[2025-07-07 17:51:09] [Rank 0] Group 10 Loss: 4.9484 +[2025-07-07 17:51:09] [Rank 0] Group 11 Loss: 4.9713 +[2025-07-07 17:51:09] [Rank 0] Group 11 Loss: 4.9713 +[2025-07-07 17:51:09] [Rank 0] Group 0 FTA: 0.1912 +[2025-07-07 17:51:09] [Rank 0] Group 0 FTA: 0.1912 +[2025-07-07 17:51:09] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:51:09] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:51:09] [Rank 0] Group 2 FTA: 0.1068 +[2025-07-07 17:51:09] [Rank 0] Group 2 FTA: 0.1068 +[2025-07-07 17:51:09] [Rank 0] Group 3 FTA: 0.0807 +[2025-07-07 17:51:09] [Rank 0] Group 3 FTA: 0.0807 +[2025-07-07 17:51:09] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 17:51:09] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 17:51:09] [Rank 0] Group 5 FTA: 0.0599 +[2025-07-07 17:51:09] [Rank 0] Group 5 FTA: 0.0599 +[2025-07-07 17:51:09] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 17:51:09] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 17:51:10] [Rank 0] Group 7 FTA: 0.1146 +[2025-07-07 17:51:10] [Rank 0] Group 7 FTA: 0.1146 +[2025-07-07 17:51:10] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 17:51:10] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 17:51:10] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 17:51:10] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 17:51:10] [Rank 0] Group 10 FTA: 0.0859 +[2025-07-07 17:51:10] [Rank 0] Group 10 FTA: 0.0859 +[2025-07-07 17:51:10] [Rank 0] Group 11 FTA: 0.0850 +[2025-07-07 17:51:10] [Rank 0] Group 11 FTA: 0.0850 +[2025-07-07 17:51:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 17:51:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 17:51:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 17:51:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 17:51:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 17:51:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 17:51:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 17:51:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 17:51:11] [Rank 0] step:5501/10000 train_time:378307ms step_avg:68.77ms +[2025-07-07 17:51:11] [Rank 0] step:5501/10000 train_time:378307ms step_avg:68.77ms +[2025-07-07 17:51:12] [Rank 0] step:5521/10000 train_time:379078ms step_avg:68.66ms +[2025-07-07 17:51:12] [Rank 0] step:5521/10000 train_time:379078ms step_avg:68.66ms +[2025-07-07 17:51:14] [Rank 0] step:5541/10000 train_time:380450ms step_avg:68.66ms +[2025-07-07 17:51:14] [Rank 0] step:5541/10000 train_time:380450ms step_avg:68.66ms +[2025-07-07 17:51:15] [Rank 0] step:5561/10000 train_time:381824ms step_avg:68.66ms +[2025-07-07 17:51:15] [Rank 0] step:5561/10000 train_time:381824ms step_avg:68.66ms +[2025-07-07 17:51:16] [Rank 0] step:5581/10000 train_time:383456ms step_avg:68.71ms +[2025-07-07 17:51:16] [Rank 0] step:5581/10000 train_time:383456ms step_avg:68.71ms +[2025-07-07 17:51:18] [Rank 0] step:5601/10000 train_time:384601ms step_avg:68.67ms +[2025-07-07 17:51:18] [Rank 0] step:5601/10000 train_time:384601ms step_avg:68.67ms +[2025-07-07 17:51:19] [Rank 0] step:5621/10000 train_time:385979ms step_avg:68.67ms +[2025-07-07 17:51:19] [Rank 0] step:5621/10000 train_time:385979ms step_avg:68.67ms +[2025-07-07 17:51:21] [Rank 0] step:5641/10000 train_time:387357ms step_avg:68.67ms +[2025-07-07 17:51:21] [Rank 0] step:5641/10000 train_time:387357ms step_avg:68.67ms +[2025-07-07 17:51:22] [Rank 0] step:5661/10000 train_time:388736ms step_avg:68.67ms +[2025-07-07 17:51:22] [Rank 0] step:5661/10000 train_time:388736ms step_avg:68.67ms +[2025-07-07 17:51:23] [Rank 0] step:5681/10000 train_time:390115ms step_avg:68.67ms +[2025-07-07 17:51:23] [Rank 0] step:5681/10000 train_time:390115ms step_avg:68.67ms +[2025-07-07 17:51:25] [Rank 0] step:5701/10000 train_time:391494ms step_avg:68.67ms +[2025-07-07 17:51:25] [Rank 0] step:5701/10000 train_time:391494ms step_avg:68.67ms +[2025-07-07 17:51:26] [Rank 0] step:5721/10000 train_time:392873ms step_avg:68.67ms +[2025-07-07 17:51:26] [Rank 0] step:5721/10000 train_time:392873ms step_avg:68.67ms +[2025-07-07 17:51:27] [Rank 0] step:5741/10000 train_time:394252ms step_avg:68.67ms +[2025-07-07 17:51:27] [Rank 0] step:5741/10000 train_time:394252ms step_avg:68.67ms +[2025-07-07 17:51:29] [Rank 0] step:5761/10000 train_time:395883ms step_avg:68.72ms +[2025-07-07 17:51:29] [Rank 0] step:5761/10000 train_time:395883ms step_avg:68.72ms +[2025-07-07 17:51:30] [Rank 0] step:5781/10000 train_time:397052ms step_avg:68.68ms +[2025-07-07 17:51:30] [Rank 0] step:5781/10000 train_time:397052ms step_avg:68.68ms +[2025-07-07 17:51:32] [Rank 0] step:5801/10000 train_time:398431ms step_avg:68.68ms +[2025-07-07 17:51:32] [Rank 0] step:5801/10000 train_time:398431ms step_avg:68.68ms +[2025-07-07 17:51:33] [Rank 0] step:5821/10000 train_time:399812ms step_avg:68.68ms +[2025-07-07 17:51:33] [Rank 0] step:5821/10000 train_time:399812ms step_avg:68.68ms +[2025-07-07 17:51:34] [Rank 0] step:5841/10000 train_time:401193ms step_avg:68.69ms +[2025-07-07 17:51:34] [Rank 0] step:5841/10000 train_time:401193ms step_avg:68.69ms +[2025-07-07 17:51:36] [Rank 0] step:5861/10000 train_time:402573ms step_avg:68.69ms +[2025-07-07 17:51:36] [Rank 0] step:5861/10000 train_time:402573ms step_avg:68.69ms +[2025-07-07 17:51:37] [Rank 0] step:5881/10000 train_time:403953ms step_avg:68.69ms +[2025-07-07 17:51:37] [Rank 0] step:5881/10000 train_time:403953ms step_avg:68.69ms +[2025-07-07 17:51:39] [Rank 0] step:5901/10000 train_time:405333ms step_avg:68.69ms +[2025-07-07 17:51:39] [Rank 0] step:5901/10000 train_time:405333ms step_avg:68.69ms +[2025-07-07 17:51:40] [Rank 0] step:5921/10000 train_time:406715ms step_avg:68.69ms +[2025-07-07 17:51:40] [Rank 0] step:5921/10000 train_time:406715ms step_avg:68.69ms +[2025-07-07 17:51:41] [Rank 0] step:5941/10000 train_time:408143ms step_avg:68.70ms +[2025-07-07 17:51:41] [Rank 0] step:5941/10000 train_time:408143ms step_avg:68.70ms +[2025-07-07 17:51:43] [Rank 0] step:5961/10000 train_time:409500ms step_avg:68.70ms +[2025-07-07 17:51:43] [Rank 0] step:5961/10000 train_time:409500ms step_avg:68.70ms +[2025-07-07 17:51:44] [Rank 0] step:5981/10000 train_time:410880ms step_avg:68.70ms +[2025-07-07 17:51:44] [Rank 0] step:5981/10000 train_time:410880ms step_avg:68.70ms +[2025-07-07 17:51:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:51:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:51:46] [Rank 0] PRINT: step:6000/10000 train_loss:2.6880 val_loss:2.6133 train_time:412888ms step_avg:68.81ms +[2025-07-07 17:51:46] [Rank 0] PRINT: step:6000/10000 train_loss:2.6880 val_loss:2.6133 train_time:412888ms step_avg:68.81ms +[2025-07-07 17:51:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:51:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:51:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:51:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:51:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:51:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:57:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:57:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:57:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:57:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:57:17] [Rank 0] Total Loss: 4.8978 +[2025-07-07 17:57:17] [Rank 0] Total Loss: 4.8978 +[2025-07-07 17:57:17] [Rank 0] Total FTA: 0.0877 +[2025-07-07 17:57:17] [Rank 0] Total FTA: 0.0877 +[2025-07-07 17:57:17] [Rank 0] Group 0 Loss: 5.1955 +[2025-07-07 17:57:17] [Rank 0] Group 0 Loss: 5.1955 +[2025-07-07 17:57:17] [Rank 0] Group 1 Loss: 4.6558 +[2025-07-07 17:57:17] [Rank 0] Group 1 Loss: 4.6558 +[2025-07-07 17:57:17] [Rank 0] Group 2 Loss: 4.9675 +[2025-07-07 17:57:17] [Rank 0] Group 2 Loss: 4.9675 +[2025-07-07 17:57:17] [Rank 0] Group 3 Loss: 4.7667 +[2025-07-07 17:57:17] [Rank 0] Group 3 Loss: 4.7667 +[2025-07-07 17:57:17] [Rank 0] Group 4 Loss: 4.9323 +[2025-07-07 17:57:17] [Rank 0] Group 4 Loss: 4.9323 +[2025-07-07 17:57:17] [Rank 0] Group 5 Loss: 4.8160 +[2025-07-07 17:57:17] [Rank 0] Group 5 Loss: 4.8160 +[2025-07-07 17:57:17] [Rank 0] Group 6 Loss: 4.8889 +[2025-07-07 17:57:17] [Rank 0] Group 6 Loss: 4.8889 +[2025-07-07 17:57:17] [Rank 0] Group 7 Loss: 4.8830 +[2025-07-07 17:57:17] [Rank 0] Group 7 Loss: 4.8830 +[2025-07-07 17:57:17] [Rank 0] Group 8 Loss: 4.8700 +[2025-07-07 17:57:17] [Rank 0] Group 8 Loss: 4.8700 +[2025-07-07 17:57:17] [Rank 0] Group 9 Loss: 4.8182 +[2025-07-07 17:57:17] [Rank 0] Group 9 Loss: 4.8182 +[2025-07-07 17:57:17] [Rank 0] Group 10 Loss: 4.8617 +[2025-07-07 17:57:17] [Rank 0] Group 10 Loss: 4.8617 +[2025-07-07 17:57:17] [Rank 0] Group 11 Loss: 4.8632 +[2025-07-07 17:57:17] [Rank 0] Group 11 Loss: 4.8632 +[2025-07-07 17:57:17] [Rank 0] Group 0 FTA: 0.1808 +[2025-07-07 17:57:17] [Rank 0] Group 0 FTA: 0.1808 +[2025-07-07 17:57:17] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:57:17] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:57:17] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 17:57:17] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 17:57:17] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-07 17:57:17] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-07 17:57:17] [Rank 0] Group 4 FTA: 0.0260 +[2025-07-07 17:57:17] [Rank 0] Group 4 FTA: 0.0260 +[2025-07-07 17:57:17] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-07 17:57:17] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-07 17:57:17] [Rank 0] Group 6 FTA: 0.0859 +[2025-07-07 17:57:17] [Rank 0] Group 6 FTA: 0.0859 +[2025-07-07 17:57:17] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 17:57:17] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 17:57:17] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 17:57:17] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 17:57:17] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 17:57:17] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 17:57:17] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-07 17:57:17] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-07 17:57:17] [Rank 0] Group 11 FTA: 0.0879 +[2025-07-07 17:57:17] [Rank 0] Group 11 FTA: 0.0879 +[2025-07-07 17:57:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 17:57:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 17:57:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 17:57:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 17:57:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 17:57:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 17:57:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 17:57:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 17:57:18] [Rank 0] step:6001/10000 train_time:412898ms step_avg:68.80ms +[2025-07-07 17:57:18] [Rank 0] step:6001/10000 train_time:412898ms step_avg:68.80ms +[2025-07-07 17:57:19] [Rank 0] step:6021/10000 train_time:413653ms step_avg:68.70ms +[2025-07-07 17:57:19] [Rank 0] step:6021/10000 train_time:413653ms step_avg:68.70ms +[2025-07-07 17:57:21] [Rank 0] step:6041/10000 train_time:415022ms step_avg:68.70ms +[2025-07-07 17:57:21] [Rank 0] step:6041/10000 train_time:415022ms step_avg:68.70ms +[2025-07-07 17:57:22] [Rank 0] step:6061/10000 train_time:416393ms step_avg:68.70ms +[2025-07-07 17:57:22] [Rank 0] step:6061/10000 train_time:416393ms step_avg:68.70ms +[2025-07-07 17:57:24] [Rank 0] step:6081/10000 train_time:417765ms step_avg:68.70ms +[2025-07-07 17:57:24] [Rank 0] step:6081/10000 train_time:417765ms step_avg:68.70ms +[2025-07-07 17:57:25] [Rank 0] step:6101/10000 train_time:419139ms step_avg:68.70ms +[2025-07-07 17:57:25] [Rank 0] step:6101/10000 train_time:419139ms step_avg:68.70ms +[2025-07-07 17:57:26] [Rank 0] step:6121/10000 train_time:420513ms step_avg:68.70ms +[2025-07-07 17:57:26] [Rank 0] step:6121/10000 train_time:420513ms step_avg:68.70ms +[2025-07-07 17:57:28] [Rank 0] step:6141/10000 train_time:421914ms step_avg:68.70ms +[2025-07-07 17:57:28] [Rank 0] step:6141/10000 train_time:421914ms step_avg:68.70ms +[2025-07-07 17:57:29] [Rank 0] step:6161/10000 train_time:423292ms step_avg:68.71ms +[2025-07-07 17:57:29] [Rank 0] step:6161/10000 train_time:423292ms step_avg:68.71ms +[2025-07-07 17:57:30] [Rank 0] step:6181/10000 train_time:424669ms step_avg:68.71ms +[2025-07-07 17:57:30] [Rank 0] step:6181/10000 train_time:424669ms step_avg:68.71ms +[2025-07-07 17:57:32] [Rank 0] step:6201/10000 train_time:426047ms step_avg:68.71ms +[2025-07-07 17:57:32] [Rank 0] step:6201/10000 train_time:426047ms step_avg:68.71ms +[2025-07-07 17:57:33] [Rank 0] step:6221/10000 train_time:427425ms step_avg:68.71ms +[2025-07-07 17:57:33] [Rank 0] step:6221/10000 train_time:427425ms step_avg:68.71ms +[2025-07-07 17:57:35] [Rank 0] step:6241/10000 train_time:428802ms step_avg:68.71ms +[2025-07-07 17:57:35] [Rank 0] step:6241/10000 train_time:428802ms step_avg:68.71ms +[2025-07-07 17:57:36] [Rank 0] step:6261/10000 train_time:430180ms step_avg:68.71ms +[2025-07-07 17:57:36] [Rank 0] step:6261/10000 train_time:430180ms step_avg:68.71ms +[2025-07-07 17:57:37] [Rank 0] step:6281/10000 train_time:431560ms step_avg:68.71ms +[2025-07-07 17:57:37] [Rank 0] step:6281/10000 train_time:431560ms step_avg:68.71ms +[2025-07-07 17:57:39] [Rank 0] step:6301/10000 train_time:432939ms step_avg:68.71ms +[2025-07-07 17:57:39] [Rank 0] step:6301/10000 train_time:432939ms step_avg:68.71ms +[2025-07-07 17:57:40] [Rank 0] step:6321/10000 train_time:434365ms step_avg:68.72ms +[2025-07-07 17:57:40] [Rank 0] step:6321/10000 train_time:434365ms step_avg:68.72ms +[2025-07-07 17:57:42] [Rank 0] step:6341/10000 train_time:435746ms step_avg:68.72ms +[2025-07-07 17:57:42] [Rank 0] step:6341/10000 train_time:435746ms step_avg:68.72ms +[2025-07-07 17:57:43] [Rank 0] step:6361/10000 train_time:437126ms step_avg:68.72ms +[2025-07-07 17:57:43] [Rank 0] step:6361/10000 train_time:437126ms step_avg:68.72ms +[2025-07-07 17:57:44] [Rank 0] step:6381/10000 train_time:438507ms step_avg:68.72ms +[2025-07-07 17:57:44] [Rank 0] step:6381/10000 train_time:438507ms step_avg:68.72ms +[2025-07-07 17:57:46] [Rank 0] step:6401/10000 train_time:439887ms step_avg:68.72ms +[2025-07-07 17:57:46] [Rank 0] step:6401/10000 train_time:439887ms step_avg:68.72ms +[2025-07-07 17:57:47] [Rank 0] step:6421/10000 train_time:441269ms step_avg:68.72ms +[2025-07-07 17:57:47] [Rank 0] step:6421/10000 train_time:441269ms step_avg:68.72ms +[2025-07-07 17:57:48] [Rank 0] step:6441/10000 train_time:442651ms step_avg:68.72ms +[2025-07-07 17:57:48] [Rank 0] step:6441/10000 train_time:442651ms step_avg:68.72ms +[2025-07-07 17:57:50] [Rank 0] step:6461/10000 train_time:444033ms step_avg:68.73ms +[2025-07-07 17:57:50] [Rank 0] step:6461/10000 train_time:444033ms step_avg:68.73ms +[2025-07-07 17:57:51] [Rank 0] step:6481/10000 train_time:445416ms step_avg:68.73ms +[2025-07-07 17:57:51] [Rank 0] step:6481/10000 train_time:445416ms step_avg:68.73ms +[2025-07-07 17:57:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:57:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:57:54] [Rank 0] PRINT: step:6500/10000 train_loss:2.5438 val_loss:2.4846 train_time:447474ms step_avg:68.84ms +[2025-07-07 17:57:54] [Rank 0] PRINT: step:6500/10000 train_loss:2.5438 val_loss:2.4846 train_time:447474ms step_avg:68.84ms +[2025-07-07 17:57:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:57:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:57:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:57:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:57:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:57:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:03:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:03:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:03:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:03:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:03:22] [Rank 0] Total Loss: 4.8637 +[2025-07-07 18:03:22] [Rank 0] Total Loss: 4.8637 +[2025-07-07 18:03:22] [Rank 0] Total FTA: 0.0953 +[2025-07-07 18:03:22] [Rank 0] Total FTA: 0.0953 +[2025-07-07 18:03:22] [Rank 0] Group 0 Loss: 5.1488 +[2025-07-07 18:03:22] [Rank 0] Group 0 Loss: 5.1488 +[2025-07-07 18:03:22] [Rank 0] Group 1 Loss: 4.6581 +[2025-07-07 18:03:22] [Rank 0] Group 1 Loss: 4.6581 +[2025-07-07 18:03:22] [Rank 0] Group 2 Loss: 4.9071 +[2025-07-07 18:03:22] [Rank 0] Group 2 Loss: 4.9071 +[2025-07-07 18:03:22] [Rank 0] Group 3 Loss: 4.7937 +[2025-07-07 18:03:22] [Rank 0] Group 3 Loss: 4.7937 +[2025-07-07 18:03:22] [Rank 0] Group 4 Loss: 4.8712 +[2025-07-07 18:03:22] [Rank 0] Group 4 Loss: 4.8712 +[2025-07-07 18:03:22] [Rank 0] Group 5 Loss: 4.8085 +[2025-07-07 18:03:22] [Rank 0] Group 5 Loss: 4.8085 +[2025-07-07 18:03:22] [Rank 0] Group 6 Loss: 4.8959 +[2025-07-07 18:03:22] [Rank 0] Group 6 Loss: 4.8959 +[2025-07-07 18:03:22] [Rank 0] Group 7 Loss: 4.8290 +[2025-07-07 18:03:22] [Rank 0] Group 7 Loss: 4.8290 +[2025-07-07 18:03:22] [Rank 0] Group 8 Loss: 4.7985 +[2025-07-07 18:03:22] [Rank 0] Group 8 Loss: 4.7985 +[2025-07-07 18:03:22] [Rank 0] Group 9 Loss: 4.8121 +[2025-07-07 18:03:22] [Rank 0] Group 9 Loss: 4.8121 +[2025-07-07 18:03:22] [Rank 0] Group 10 Loss: 4.8219 +[2025-07-07 18:03:22] [Rank 0] Group 10 Loss: 4.8219 +[2025-07-07 18:03:22] [Rank 0] Group 11 Loss: 4.8137 +[2025-07-07 18:03:22] [Rank 0] Group 11 Loss: 4.8137 +[2025-07-07 18:03:22] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 18:03:22] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 18:03:23] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 18:03:23] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 18:03:23] [Rank 0] Group 2 FTA: 0.1771 +[2025-07-07 18:03:23] [Rank 0] Group 2 FTA: 0.1771 +[2025-07-07 18:03:23] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-07 18:03:23] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-07 18:03:23] [Rank 0] Group 4 FTA: 0.0339 +[2025-07-07 18:03:23] [Rank 0] Group 4 FTA: 0.0339 +[2025-07-07 18:03:23] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-07 18:03:23] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-07 18:03:23] [Rank 0] Group 6 FTA: 0.1094 +[2025-07-07 18:03:23] [Rank 0] Group 6 FTA: 0.1094 +[2025-07-07 18:03:23] [Rank 0] Group 7 FTA: 0.1250 +[2025-07-07 18:03:23] [Rank 0] Group 7 FTA: 0.1250 +[2025-07-07 18:03:23] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 18:03:23] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 18:03:23] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-07 18:03:23] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-07 18:03:23] [Rank 0] Group 10 FTA: 0.0801 +[2025-07-07 18:03:23] [Rank 0] Group 10 FTA: 0.0801 +[2025-07-07 18:03:23] [Rank 0] Group 11 FTA: 0.0996 +[2025-07-07 18:03:23] [Rank 0] Group 11 FTA: 0.0996 +[2025-07-07 18:03:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 18:03:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 18:03:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 18:03:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 18:03:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 18:03:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 18:03:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 18:03:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 18:03:24] [Rank 0] step:6501/10000 train_time:447484ms step_avg:68.83ms +[2025-07-07 18:03:24] [Rank 0] step:6501/10000 train_time:447484ms step_avg:68.83ms +[2025-07-07 18:03:25] [Rank 0] step:6521/10000 train_time:448265ms step_avg:68.74ms +[2025-07-07 18:03:25] [Rank 0] step:6521/10000 train_time:448265ms step_avg:68.74ms +[2025-07-07 18:03:27] [Rank 0] step:6541/10000 train_time:449660ms step_avg:68.74ms +[2025-07-07 18:03:27] [Rank 0] step:6541/10000 train_time:449660ms step_avg:68.74ms +[2025-07-07 18:03:28] [Rank 0] step:6561/10000 train_time:451031ms step_avg:68.74ms +[2025-07-07 18:03:28] [Rank 0] step:6561/10000 train_time:451031ms step_avg:68.74ms +[2025-07-07 18:03:29] [Rank 0] step:6581/10000 train_time:452403ms step_avg:68.74ms +[2025-07-07 18:03:29] [Rank 0] step:6581/10000 train_time:452403ms step_avg:68.74ms +[2025-07-07 18:03:31] [Rank 0] step:6601/10000 train_time:453777ms step_avg:68.74ms +[2025-07-07 18:03:31] [Rank 0] step:6601/10000 train_time:453777ms step_avg:68.74ms +[2025-07-07 18:03:32] [Rank 0] step:6621/10000 train_time:455153ms step_avg:68.74ms +[2025-07-07 18:03:32] [Rank 0] step:6621/10000 train_time:455153ms step_avg:68.74ms +[2025-07-07 18:03:34] [Rank 0] step:6641/10000 train_time:456530ms step_avg:68.74ms +[2025-07-07 18:03:34] [Rank 0] step:6641/10000 train_time:456530ms step_avg:68.74ms +[2025-07-07 18:03:35] [Rank 0] step:6661/10000 train_time:457954ms step_avg:68.75ms +[2025-07-07 18:03:35] [Rank 0] step:6661/10000 train_time:457954ms step_avg:68.75ms +[2025-07-07 18:03:36] [Rank 0] step:6681/10000 train_time:459331ms step_avg:68.75ms +[2025-07-07 18:03:36] [Rank 0] step:6681/10000 train_time:459331ms step_avg:68.75ms +[2025-07-07 18:03:38] [Rank 0] step:6701/10000 train_time:460708ms step_avg:68.75ms +[2025-07-07 18:03:38] [Rank 0] step:6701/10000 train_time:460708ms step_avg:68.75ms +[2025-07-07 18:03:39] [Rank 0] step:6721/10000 train_time:462087ms step_avg:68.75ms +[2025-07-07 18:03:39] [Rank 0] step:6721/10000 train_time:462087ms step_avg:68.75ms +[2025-07-07 18:03:41] [Rank 0] step:6741/10000 train_time:463465ms step_avg:68.75ms +[2025-07-07 18:03:41] [Rank 0] step:6741/10000 train_time:463465ms step_avg:68.75ms +[2025-07-07 18:03:42] [Rank 0] step:6761/10000 train_time:464844ms step_avg:68.75ms +[2025-07-07 18:03:42] [Rank 0] step:6761/10000 train_time:464844ms step_avg:68.75ms +[2025-07-07 18:03:43] [Rank 0] step:6781/10000 train_time:466223ms step_avg:68.75ms +[2025-07-07 18:03:43] [Rank 0] step:6781/10000 train_time:466223ms step_avg:68.75ms +[2025-07-07 18:03:45] [Rank 0] step:6801/10000 train_time:467602ms step_avg:68.75ms +[2025-07-07 18:03:45] [Rank 0] step:6801/10000 train_time:467602ms step_avg:68.75ms +[2025-07-07 18:03:46] [Rank 0] step:6821/10000 train_time:468983ms step_avg:68.76ms +[2025-07-07 18:03:46] [Rank 0] step:6821/10000 train_time:468983ms step_avg:68.76ms +[2025-07-07 18:03:47] [Rank 0] step:6841/10000 train_time:470410ms step_avg:68.76ms +[2025-07-07 18:03:47] [Rank 0] step:6841/10000 train_time:470410ms step_avg:68.76ms +[2025-07-07 18:03:49] [Rank 0] step:6861/10000 train_time:471781ms step_avg:68.76ms +[2025-07-07 18:03:49] [Rank 0] step:6861/10000 train_time:471781ms step_avg:68.76ms +[2025-07-07 18:03:50] [Rank 0] step:6881/10000 train_time:473160ms step_avg:68.76ms +[2025-07-07 18:03:50] [Rank 0] step:6881/10000 train_time:473160ms step_avg:68.76ms +[2025-07-07 18:03:52] [Rank 0] step:6901/10000 train_time:474541ms step_avg:68.76ms +[2025-07-07 18:03:52] [Rank 0] step:6901/10000 train_time:474541ms step_avg:68.76ms +[2025-07-07 18:03:53] [Rank 0] step:6921/10000 train_time:475922ms step_avg:68.76ms +[2025-07-07 18:03:53] [Rank 0] step:6921/10000 train_time:475922ms step_avg:68.76ms +[2025-07-07 18:03:54] [Rank 0] step:6941/10000 train_time:477303ms step_avg:68.77ms +[2025-07-07 18:03:54] [Rank 0] step:6941/10000 train_time:477303ms step_avg:68.77ms +[2025-07-07 18:03:56] [Rank 0] step:6961/10000 train_time:478685ms step_avg:68.77ms +[2025-07-07 18:03:56] [Rank 0] step:6961/10000 train_time:478685ms step_avg:68.77ms +[2025-07-07 18:03:57] [Rank 0] step:6981/10000 train_time:480066ms step_avg:68.77ms +[2025-07-07 18:03:57] [Rank 0] step:6981/10000 train_time:480066ms step_avg:68.77ms +[2025-07-07 18:03:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:03:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:03:59] [Rank 0] PRINT: step:7000/10000 train_loss:2.4268 val_loss:2.3784 train_time:482078ms step_avg:68.87ms +[2025-07-07 18:03:59] [Rank 0] PRINT: step:7000/10000 train_loss:2.4268 val_loss:2.3784 train_time:482078ms step_avg:68.87ms +[2025-07-07 18:03:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:03:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:04:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:04:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:04:00] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:04:00] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:09:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:09:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:09:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:09:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:09:31] [Rank 0] Total Loss: 4.8138 +[2025-07-07 18:09:31] [Rank 0] Total Loss: 4.8138 +[2025-07-07 18:09:31] [Rank 0] Total FTA: 0.0987 +[2025-07-07 18:09:31] [Rank 0] Total FTA: 0.0987 +[2025-07-07 18:09:31] [Rank 0] Group 0 Loss: 5.2037 +[2025-07-07 18:09:31] [Rank 0] Group 0 Loss: 5.2037 +[2025-07-07 18:09:31] [Rank 0] Group 1 Loss: 4.4702 +[2025-07-07 18:09:31] [Rank 0] Group 1 Loss: 4.4702 +[2025-07-07 18:09:31] [Rank 0] Group 2 Loss: 4.9134 +[2025-07-07 18:09:31] [Rank 0] Group 2 Loss: 4.9134 +[2025-07-07 18:09:31] [Rank 0] Group 3 Loss: 4.6946 +[2025-07-07 18:09:31] [Rank 0] Group 3 Loss: 4.6946 +[2025-07-07 18:09:31] [Rank 0] Group 4 Loss: 4.8637 +[2025-07-07 18:09:31] [Rank 0] Group 4 Loss: 4.8637 +[2025-07-07 18:09:31] [Rank 0] Group 5 Loss: 4.7059 +[2025-07-07 18:09:31] [Rank 0] Group 5 Loss: 4.7059 +[2025-07-07 18:09:31] [Rank 0] Group 6 Loss: 4.7965 +[2025-07-07 18:09:31] [Rank 0] Group 6 Loss: 4.7965 +[2025-07-07 18:09:31] [Rank 0] Group 7 Loss: 4.7929 +[2025-07-07 18:09:31] [Rank 0] Group 7 Loss: 4.7929 +[2025-07-07 18:09:31] [Rank 0] Group 8 Loss: 4.7436 +[2025-07-07 18:09:31] [Rank 0] Group 8 Loss: 4.7436 +[2025-07-07 18:09:31] [Rank 0] Group 9 Loss: 4.7786 +[2025-07-07 18:09:31] [Rank 0] Group 9 Loss: 4.7786 +[2025-07-07 18:09:31] [Rank 0] Group 10 Loss: 4.7525 +[2025-07-07 18:09:31] [Rank 0] Group 10 Loss: 4.7525 +[2025-07-07 18:09:31] [Rank 0] Group 11 Loss: 4.7589 +[2025-07-07 18:09:31] [Rank 0] Group 11 Loss: 4.7589 +[2025-07-07 18:09:31] [Rank 0] Group 0 FTA: 0.1808 +[2025-07-07 18:09:31] [Rank 0] Group 0 FTA: 0.1808 +[2025-07-07 18:09:31] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 18:09:31] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 18:09:31] [Rank 0] Group 2 FTA: 0.1693 +[2025-07-07 18:09:31] [Rank 0] Group 2 FTA: 0.1693 +[2025-07-07 18:09:31] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-07 18:09:31] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-07 18:09:31] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 18:09:31] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 18:09:31] [Rank 0] Group 5 FTA: 0.0703 +[2025-07-07 18:09:31] [Rank 0] Group 5 FTA: 0.0703 +[2025-07-07 18:09:31] [Rank 0] Group 6 FTA: 0.0938 +[2025-07-07 18:09:31] [Rank 0] Group 6 FTA: 0.0938 +[2025-07-07 18:09:31] [Rank 0] Group 7 FTA: 0.1250 +[2025-07-07 18:09:31] [Rank 0] Group 7 FTA: 0.1250 +[2025-07-07 18:09:31] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-07 18:09:31] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-07 18:09:31] [Rank 0] Group 9 FTA: 0.1172 +[2025-07-07 18:09:31] [Rank 0] Group 9 FTA: 0.1172 +[2025-07-07 18:09:31] [Rank 0] Group 10 FTA: 0.0977 +[2025-07-07 18:09:31] [Rank 0] Group 10 FTA: 0.0977 +[2025-07-07 18:09:31] [Rank 0] Group 11 FTA: 0.0957 +[2025-07-07 18:09:31] [Rank 0] Group 11 FTA: 0.0957 +[2025-07-07 18:09:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 18:09:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 18:09:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 18:09:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 18:09:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 18:09:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 18:09:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 18:09:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 18:09:32] [Rank 0] step:7001/10000 train_time:482089ms step_avg:68.86ms +[2025-07-07 18:09:32] [Rank 0] step:7001/10000 train_time:482089ms step_avg:68.86ms +[2025-07-07 18:09:34] [Rank 0] step:7021/10000 train_time:483101ms step_avg:68.81ms +[2025-07-07 18:09:34] [Rank 0] step:7021/10000 train_time:483101ms step_avg:68.81ms +[2025-07-07 18:09:35] [Rank 0] step:7041/10000 train_time:484255ms step_avg:68.78ms +[2025-07-07 18:09:35] [Rank 0] step:7041/10000 train_time:484255ms step_avg:68.78ms +[2025-07-07 18:09:36] [Rank 0] step:7061/10000 train_time:485626ms step_avg:68.78ms +[2025-07-07 18:09:36] [Rank 0] step:7061/10000 train_time:485626ms step_avg:68.78ms +[2025-07-07 18:09:38] [Rank 0] step:7081/10000 train_time:486999ms step_avg:68.78ms +[2025-07-07 18:09:38] [Rank 0] step:7081/10000 train_time:486999ms step_avg:68.78ms +[2025-07-07 18:09:39] [Rank 0] step:7101/10000 train_time:488372ms step_avg:68.78ms +[2025-07-07 18:09:39] [Rank 0] step:7101/10000 train_time:488372ms step_avg:68.78ms +[2025-07-07 18:09:41] [Rank 0] step:7121/10000 train_time:489748ms step_avg:68.78ms +[2025-07-07 18:09:41] [Rank 0] step:7121/10000 train_time:489748ms step_avg:68.78ms +[2025-07-07 18:09:42] [Rank 0] step:7141/10000 train_time:491124ms step_avg:68.78ms +[2025-07-07 18:09:42] [Rank 0] step:7141/10000 train_time:491124ms step_avg:68.78ms +[2025-07-07 18:09:43] [Rank 0] step:7161/10000 train_time:492499ms step_avg:68.78ms +[2025-07-07 18:09:43] [Rank 0] step:7161/10000 train_time:492499ms step_avg:68.78ms +[2025-07-07 18:09:45] [Rank 0] step:7181/10000 train_time:493878ms step_avg:68.78ms +[2025-07-07 18:09:45] [Rank 0] step:7181/10000 train_time:493878ms step_avg:68.78ms +[2025-07-07 18:09:46] [Rank 0] step:7201/10000 train_time:495549ms step_avg:68.82ms +[2025-07-07 18:09:46] [Rank 0] step:7201/10000 train_time:495549ms step_avg:68.82ms +[2025-07-07 18:09:48] [Rank 0] step:7221/10000 train_time:496710ms step_avg:68.79ms +[2025-07-07 18:09:48] [Rank 0] step:7221/10000 train_time:496710ms step_avg:68.79ms +[2025-07-07 18:09:49] [Rank 0] step:7241/10000 train_time:498089ms step_avg:68.79ms +[2025-07-07 18:09:49] [Rank 0] step:7241/10000 train_time:498089ms step_avg:68.79ms +[2025-07-07 18:09:50] [Rank 0] step:7261/10000 train_time:499467ms step_avg:68.79ms +[2025-07-07 18:09:50] [Rank 0] step:7261/10000 train_time:499467ms step_avg:68.79ms +[2025-07-07 18:09:52] [Rank 0] step:7281/10000 train_time:500844ms step_avg:68.79ms +[2025-07-07 18:09:52] [Rank 0] step:7281/10000 train_time:500844ms step_avg:68.79ms +[2025-07-07 18:09:53] [Rank 0] step:7301/10000 train_time:502225ms step_avg:68.79ms +[2025-07-07 18:09:53] [Rank 0] step:7301/10000 train_time:502225ms step_avg:68.79ms +[2025-07-07 18:09:54] [Rank 0] step:7321/10000 train_time:503606ms step_avg:68.79ms +[2025-07-07 18:09:54] [Rank 0] step:7321/10000 train_time:503606ms step_avg:68.79ms +[2025-07-07 18:09:56] [Rank 0] step:7341/10000 train_time:504987ms step_avg:68.79ms +[2025-07-07 18:09:56] [Rank 0] step:7341/10000 train_time:504987ms step_avg:68.79ms +[2025-07-07 18:09:57] [Rank 0] step:7361/10000 train_time:506368ms step_avg:68.79ms +[2025-07-07 18:09:57] [Rank 0] step:7361/10000 train_time:506368ms step_avg:68.79ms +[2025-07-07 18:09:59] [Rank 0] step:7381/10000 train_time:507751ms step_avg:68.79ms +[2025-07-07 18:09:59] [Rank 0] step:7381/10000 train_time:507751ms step_avg:68.79ms +[2025-07-07 18:10:00] [Rank 0] step:7401/10000 train_time:509160ms step_avg:68.80ms +[2025-07-07 18:10:00] [Rank 0] step:7401/10000 train_time:509160ms step_avg:68.80ms +[2025-07-07 18:10:01] [Rank 0] step:7421/10000 train_time:510601ms step_avg:68.80ms +[2025-07-07 18:10:01] [Rank 0] step:7421/10000 train_time:510601ms step_avg:68.80ms +[2025-07-07 18:10:03] [Rank 0] step:7441/10000 train_time:511923ms step_avg:68.80ms +[2025-07-07 18:10:03] [Rank 0] step:7441/10000 train_time:511923ms step_avg:68.80ms +[2025-07-07 18:10:04] [Rank 0] step:7461/10000 train_time:513306ms step_avg:68.80ms +[2025-07-07 18:10:04] [Rank 0] step:7461/10000 train_time:513306ms step_avg:68.80ms +[2025-07-07 18:10:06] [Rank 0] step:7481/10000 train_time:514690ms step_avg:68.80ms +[2025-07-07 18:10:06] [Rank 0] step:7481/10000 train_time:514690ms step_avg:68.80ms +[2025-07-07 18:10:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:10:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:10:08] [Rank 0] PRINT: step:7500/10000 train_loss:2.3323 val_loss:2.2953 train_time:516701ms step_avg:68.89ms +[2025-07-07 18:10:08] [Rank 0] PRINT: step:7500/10000 train_loss:2.3323 val_loss:2.2953 train_time:516701ms step_avg:68.89ms +[2025-07-07 18:10:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:10:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:10:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:10:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:10:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:10:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:15:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:15:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:15:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:15:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:15:36] [Rank 0] Total Loss: 4.7965 +[2025-07-07 18:15:36] [Rank 0] Total Loss: 4.7965 +[2025-07-07 18:15:36] [Rank 0] Total FTA: 0.0955 +[2025-07-07 18:15:36] [Rank 0] Total FTA: 0.0955 +[2025-07-07 18:15:36] [Rank 0] Group 0 Loss: 5.1432 +[2025-07-07 18:15:36] [Rank 0] Group 0 Loss: 5.1432 +[2025-07-07 18:15:36] [Rank 0] Group 1 Loss: 4.4567 +[2025-07-07 18:15:36] [Rank 0] Group 1 Loss: 4.4567 +[2025-07-07 18:15:36] [Rank 0] Group 2 Loss: 4.9312 +[2025-07-07 18:15:36] [Rank 0] Group 2 Loss: 4.9312 +[2025-07-07 18:15:36] [Rank 0] Group 3 Loss: 4.6845 +[2025-07-07 18:15:36] [Rank 0] Group 3 Loss: 4.6845 +[2025-07-07 18:15:36] [Rank 0] Group 4 Loss: 4.8443 +[2025-07-07 18:15:36] [Rank 0] Group 4 Loss: 4.8443 +[2025-07-07 18:15:36] [Rank 0] Group 5 Loss: 4.7173 +[2025-07-07 18:15:36] [Rank 0] Group 5 Loss: 4.7173 +[2025-07-07 18:15:36] [Rank 0] Group 6 Loss: 4.8062 +[2025-07-07 18:15:36] [Rank 0] Group 6 Loss: 4.8062 +[2025-07-07 18:15:36] [Rank 0] Group 7 Loss: 4.7112 +[2025-07-07 18:15:36] [Rank 0] Group 7 Loss: 4.7112 +[2025-07-07 18:15:36] [Rank 0] Group 8 Loss: 4.7346 +[2025-07-07 18:15:36] [Rank 0] Group 8 Loss: 4.7346 +[2025-07-07 18:15:36] [Rank 0] Group 9 Loss: 4.7614 +[2025-07-07 18:15:36] [Rank 0] Group 9 Loss: 4.7614 +[2025-07-07 18:15:36] [Rank 0] Group 10 Loss: 4.7623 +[2025-07-07 18:15:36] [Rank 0] Group 10 Loss: 4.7623 +[2025-07-07 18:15:36] [Rank 0] Group 11 Loss: 4.7442 +[2025-07-07 18:15:36] [Rank 0] Group 11 Loss: 4.7442 +[2025-07-07 18:15:36] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-07 18:15:36] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-07 18:15:36] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 18:15:36] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 18:15:36] [Rank 0] Group 2 FTA: 0.1719 +[2025-07-07 18:15:36] [Rank 0] Group 2 FTA: 0.1719 +[2025-07-07 18:15:36] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 18:15:36] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 18:15:36] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 18:15:36] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 18:15:36] [Rank 0] Group 5 FTA: 0.0938 +[2025-07-07 18:15:36] [Rank 0] Group 5 FTA: 0.0938 +[2025-07-07 18:15:36] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-07 18:15:36] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-07 18:15:36] [Rank 0] Group 7 FTA: 0.1094 +[2025-07-07 18:15:36] [Rank 0] Group 7 FTA: 0.1094 +[2025-07-07 18:15:36] [Rank 0] Group 8 FTA: 0.0911 +[2025-07-07 18:15:36] [Rank 0] Group 8 FTA: 0.0911 +[2025-07-07 18:15:36] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-07 18:15:36] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-07 18:15:36] [Rank 0] Group 10 FTA: 0.1074 +[2025-07-07 18:15:36] [Rank 0] Group 10 FTA: 0.1074 +[2025-07-07 18:15:36] [Rank 0] Group 11 FTA: 0.0947 +[2025-07-07 18:15:36] [Rank 0] Group 11 FTA: 0.0947 +[2025-07-07 18:15:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 18:15:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 18:15:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 18:15:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 18:15:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 18:15:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 18:15:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 18:15:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 18:15:38] [Rank 0] step:7501/10000 train_time:516711ms step_avg:68.89ms +[2025-07-07 18:15:38] [Rank 0] step:7501/10000 train_time:516711ms step_avg:68.89ms +[2025-07-07 18:15:39] [Rank 0] step:7521/10000 train_time:517478ms step_avg:68.80ms +[2025-07-07 18:15:39] [Rank 0] step:7521/10000 train_time:517478ms step_avg:68.80ms +[2025-07-07 18:15:41] [Rank 0] step:7541/10000 train_time:518849ms step_avg:68.80ms +[2025-07-07 18:15:41] [Rank 0] step:7541/10000 train_time:518849ms step_avg:68.80ms +[2025-07-07 18:15:42] [Rank 0] step:7561/10000 train_time:520900ms step_avg:68.89ms +[2025-07-07 18:15:42] [Rank 0] step:7561/10000 train_time:520900ms step_avg:68.89ms +[2025-07-07 18:15:43] [Rank 0] step:7581/10000 train_time:521641ms step_avg:68.81ms +[2025-07-07 18:15:43] [Rank 0] step:7581/10000 train_time:521641ms step_avg:68.81ms +[2025-07-07 18:15:45] [Rank 0] step:7601/10000 train_time:523017ms step_avg:68.81ms +[2025-07-07 18:15:45] [Rank 0] step:7601/10000 train_time:523017ms step_avg:68.81ms +[2025-07-07 18:15:46] [Rank 0] step:7621/10000 train_time:524392ms step_avg:68.81ms +[2025-07-07 18:15:46] [Rank 0] step:7621/10000 train_time:524392ms step_avg:68.81ms +[2025-07-07 18:15:48] [Rank 0] step:7641/10000 train_time:525768ms step_avg:68.81ms +[2025-07-07 18:15:48] [Rank 0] step:7641/10000 train_time:525768ms step_avg:68.81ms +[2025-07-07 18:15:49] [Rank 0] step:7661/10000 train_time:527146ms step_avg:68.81ms +[2025-07-07 18:15:49] [Rank 0] step:7661/10000 train_time:527146ms step_avg:68.81ms +[2025-07-07 18:15:50] [Rank 0] step:7681/10000 train_time:528524ms step_avg:68.81ms +[2025-07-07 18:15:50] [Rank 0] step:7681/10000 train_time:528524ms step_avg:68.81ms +[2025-07-07 18:15:52] [Rank 0] step:7701/10000 train_time:529903ms step_avg:68.81ms +[2025-07-07 18:15:52] [Rank 0] step:7701/10000 train_time:529903ms step_avg:68.81ms +[2025-07-07 18:15:53] [Rank 0] step:7721/10000 train_time:531282ms step_avg:68.81ms +[2025-07-07 18:15:53] [Rank 0] step:7721/10000 train_time:531282ms step_avg:68.81ms +[2025-07-07 18:15:54] [Rank 0] step:7741/10000 train_time:532662ms step_avg:68.81ms +[2025-07-07 18:15:54] [Rank 0] step:7741/10000 train_time:532662ms step_avg:68.81ms +[2025-07-07 18:15:56] [Rank 0] step:7761/10000 train_time:534063ms step_avg:68.81ms +[2025-07-07 18:15:56] [Rank 0] step:7761/10000 train_time:534063ms step_avg:68.81ms +[2025-07-07 18:15:57] [Rank 0] step:7781/10000 train_time:535442ms step_avg:68.81ms +[2025-07-07 18:15:57] [Rank 0] step:7781/10000 train_time:535442ms step_avg:68.81ms +[2025-07-07 18:15:59] [Rank 0] step:7801/10000 train_time:536825ms step_avg:68.81ms +[2025-07-07 18:15:59] [Rank 0] step:7801/10000 train_time:536825ms step_avg:68.81ms +[2025-07-07 18:16:00] [Rank 0] step:7821/10000 train_time:538206ms step_avg:68.82ms +[2025-07-07 18:16:00] [Rank 0] step:7821/10000 train_time:538206ms step_avg:68.82ms +[2025-07-07 18:16:01] [Rank 0] step:7841/10000 train_time:539587ms step_avg:68.82ms +[2025-07-07 18:16:01] [Rank 0] step:7841/10000 train_time:539587ms step_avg:68.82ms +[2025-07-07 18:16:03] [Rank 0] step:7861/10000 train_time:540969ms step_avg:68.82ms +[2025-07-07 18:16:03] [Rank 0] step:7861/10000 train_time:540969ms step_avg:68.82ms +[2025-07-07 18:16:04] [Rank 0] step:7881/10000 train_time:542350ms step_avg:68.82ms +[2025-07-07 18:16:04] [Rank 0] step:7881/10000 train_time:542350ms step_avg:68.82ms +[2025-07-07 18:16:06] [Rank 0] step:7901/10000 train_time:543814ms step_avg:68.83ms +[2025-07-07 18:16:06] [Rank 0] step:7901/10000 train_time:543814ms step_avg:68.83ms +[2025-07-07 18:16:07] [Rank 0] step:7921/10000 train_time:545196ms step_avg:68.83ms +[2025-07-07 18:16:07] [Rank 0] step:7921/10000 train_time:545196ms step_avg:68.83ms +[2025-07-07 18:16:08] [Rank 0] step:7941/10000 train_time:546579ms step_avg:68.83ms +[2025-07-07 18:16:08] [Rank 0] step:7941/10000 train_time:546579ms step_avg:68.83ms +[2025-07-07 18:16:10] [Rank 0] step:7961/10000 train_time:547962ms step_avg:68.83ms +[2025-07-07 18:16:10] [Rank 0] step:7961/10000 train_time:547962ms step_avg:68.83ms +[2025-07-07 18:16:11] [Rank 0] step:7981/10000 train_time:549344ms step_avg:68.83ms +[2025-07-07 18:16:11] [Rank 0] step:7981/10000 train_time:549344ms step_avg:68.83ms +[2025-07-07 18:16:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:16:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:16:13] [Rank 0] PRINT: step:8000/10000 train_loss:2.2565 val_loss:2.2263 train_time:551356ms step_avg:68.92ms +[2025-07-07 18:16:13] [Rank 0] PRINT: step:8000/10000 train_loss:2.2565 val_loss:2.2263 train_time:551356ms step_avg:68.92ms +[2025-07-07 18:16:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:16:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:16:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:16:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:16:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:16:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:21:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:21:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:21:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:21:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:21:44] [Rank 0] Total Loss: 4.7730 +[2025-07-07 18:21:44] [Rank 0] Total Loss: 4.7730 +[2025-07-07 18:21:44] [Rank 0] Total FTA: 0.0955 +[2025-07-07 18:21:44] [Rank 0] Total FTA: 0.0955 +[2025-07-07 18:21:44] [Rank 0] Group 0 Loss: 5.0464 +[2025-07-07 18:21:44] [Rank 0] Group 0 Loss: 5.0464 +[2025-07-07 18:21:44] [Rank 0] Group 1 Loss: 4.4543 +[2025-07-07 18:21:44] [Rank 0] Group 1 Loss: 4.4543 +[2025-07-07 18:21:44] [Rank 0] Group 2 Loss: 4.9305 +[2025-07-07 18:21:44] [Rank 0] Group 2 Loss: 4.9305 +[2025-07-07 18:21:44] [Rank 0] Group 3 Loss: 4.6982 +[2025-07-07 18:21:44] [Rank 0] Group 3 Loss: 4.6982 +[2025-07-07 18:21:44] [Rank 0] Group 4 Loss: 4.7214 +[2025-07-07 18:21:44] [Rank 0] Group 4 Loss: 4.7214 +[2025-07-07 18:21:44] [Rank 0] Group 5 Loss: 4.7360 +[2025-07-07 18:21:44] [Rank 0] Group 5 Loss: 4.7360 +[2025-07-07 18:21:44] [Rank 0] Group 6 Loss: 4.7722 +[2025-07-07 18:21:44] [Rank 0] Group 6 Loss: 4.7722 +[2025-07-07 18:21:44] [Rank 0] Group 7 Loss: 4.7387 +[2025-07-07 18:21:44] [Rank 0] Group 7 Loss: 4.7387 +[2025-07-07 18:21:44] [Rank 0] Group 8 Loss: 4.7328 +[2025-07-07 18:21:44] [Rank 0] Group 8 Loss: 4.7328 +[2025-07-07 18:21:44] [Rank 0] Group 9 Loss: 4.7457 +[2025-07-07 18:21:44] [Rank 0] Group 9 Loss: 4.7457 +[2025-07-07 18:21:44] [Rank 0] Group 10 Loss: 4.7640 +[2025-07-07 18:21:44] [Rank 0] Group 10 Loss: 4.7640 +[2025-07-07 18:21:44] [Rank 0] Group 11 Loss: 4.7288 +[2025-07-07 18:21:44] [Rank 0] Group 11 Loss: 4.7288 +[2025-07-07 18:21:44] [Rank 0] Group 0 FTA: 0.1508 +[2025-07-07 18:21:44] [Rank 0] Group 0 FTA: 0.1508 +[2025-07-07 18:21:44] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 18:21:44] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 18:21:44] [Rank 0] Group 2 FTA: 0.1849 +[2025-07-07 18:21:44] [Rank 0] Group 2 FTA: 0.1849 +[2025-07-07 18:21:44] [Rank 0] Group 3 FTA: 0.0443 +[2025-07-07 18:21:44] [Rank 0] Group 3 FTA: 0.0443 +[2025-07-07 18:21:44] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 18:21:44] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 18:21:44] [Rank 0] Group 5 FTA: 0.0859 +[2025-07-07 18:21:44] [Rank 0] Group 5 FTA: 0.0859 +[2025-07-07 18:21:44] [Rank 0] Group 6 FTA: 0.1016 +[2025-07-07 18:21:44] [Rank 0] Group 6 FTA: 0.1016 +[2025-07-07 18:21:44] [Rank 0] Group 7 FTA: 0.1146 +[2025-07-07 18:21:44] [Rank 0] Group 7 FTA: 0.1146 +[2025-07-07 18:21:44] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-07 18:21:44] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-07 18:21:44] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 18:21:44] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 18:21:44] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 18:21:44] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 18:21:44] [Rank 0] Group 11 FTA: 0.0977 +[2025-07-07 18:21:44] [Rank 0] Group 11 FTA: 0.0977 +[2025-07-07 18:21:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 18:21:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 18:21:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 18:21:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 18:21:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 18:21:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 18:21:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 18:21:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 18:21:45] [Rank 0] step:8001/10000 train_time:551367ms step_avg:68.91ms +[2025-07-07 18:21:45] [Rank 0] step:8001/10000 train_time:551367ms step_avg:68.91ms +[2025-07-07 18:21:47] [Rank 0] step:8021/10000 train_time:552142ms step_avg:68.84ms +[2025-07-07 18:21:47] [Rank 0] step:8021/10000 train_time:552142ms step_avg:68.84ms +[2025-07-07 18:21:48] [Rank 0] step:8041/10000 train_time:553516ms step_avg:68.84ms +[2025-07-07 18:21:48] [Rank 0] step:8041/10000 train_time:553516ms step_avg:68.84ms +[2025-07-07 18:21:50] [Rank 0] step:8061/10000 train_time:554888ms step_avg:68.84ms +[2025-07-07 18:21:50] [Rank 0] step:8061/10000 train_time:554888ms step_avg:68.84ms +[2025-07-07 18:21:51] [Rank 0] step:8081/10000 train_time:556262ms step_avg:68.84ms +[2025-07-07 18:21:51] [Rank 0] step:8081/10000 train_time:556262ms step_avg:68.84ms +[2025-07-07 18:21:52] [Rank 0] step:8101/10000 train_time:558300ms step_avg:68.92ms +[2025-07-07 18:21:52] [Rank 0] step:8101/10000 train_time:558300ms step_avg:68.92ms +[2025-07-07 18:21:54] [Rank 0] step:8121/10000 train_time:559040ms step_avg:68.84ms +[2025-07-07 18:21:54] [Rank 0] step:8121/10000 train_time:559040ms step_avg:68.84ms +[2025-07-07 18:21:55] [Rank 0] step:8141/10000 train_time:560413ms step_avg:68.84ms +[2025-07-07 18:21:55] [Rank 0] step:8141/10000 train_time:560413ms step_avg:68.84ms +[2025-07-07 18:21:56] [Rank 0] step:8161/10000 train_time:561789ms step_avg:68.84ms +[2025-07-07 18:21:56] [Rank 0] step:8161/10000 train_time:561789ms step_avg:68.84ms +[2025-07-07 18:21:58] [Rank 0] step:8181/10000 train_time:563166ms step_avg:68.84ms +[2025-07-07 18:21:58] [Rank 0] step:8181/10000 train_time:563166ms step_avg:68.84ms +[2025-07-07 18:21:59] [Rank 0] step:8201/10000 train_time:564546ms step_avg:68.84ms +[2025-07-07 18:21:59] [Rank 0] step:8201/10000 train_time:564546ms step_avg:68.84ms +[2025-07-07 18:22:01] [Rank 0] step:8221/10000 train_time:565927ms step_avg:68.84ms +[2025-07-07 18:22:01] [Rank 0] step:8221/10000 train_time:565927ms step_avg:68.84ms +[2025-07-07 18:22:02] [Rank 0] step:8241/10000 train_time:567308ms step_avg:68.84ms +[2025-07-07 18:22:02] [Rank 0] step:8241/10000 train_time:567308ms step_avg:68.84ms +[2025-07-07 18:22:03] [Rank 0] step:8261/10000 train_time:568689ms step_avg:68.84ms +[2025-07-07 18:22:03] [Rank 0] step:8261/10000 train_time:568689ms step_avg:68.84ms +[2025-07-07 18:22:05] [Rank 0] step:8281/10000 train_time:570069ms step_avg:68.84ms +[2025-07-07 18:22:05] [Rank 0] step:8281/10000 train_time:570069ms step_avg:68.84ms +[2025-07-07 18:22:06] [Rank 0] step:8301/10000 train_time:571491ms step_avg:68.85ms +[2025-07-07 18:22:06] [Rank 0] step:8301/10000 train_time:571491ms step_avg:68.85ms +[2025-07-07 18:22:08] [Rank 0] step:8321/10000 train_time:572871ms step_avg:68.85ms +[2025-07-07 18:22:08] [Rank 0] step:8321/10000 train_time:572871ms step_avg:68.85ms +[2025-07-07 18:22:09] [Rank 0] step:8341/10000 train_time:574251ms step_avg:68.85ms +[2025-07-07 18:22:09] [Rank 0] step:8341/10000 train_time:574251ms step_avg:68.85ms +[2025-07-07 18:22:10] [Rank 0] step:8361/10000 train_time:575632ms step_avg:68.85ms +[2025-07-07 18:22:10] [Rank 0] step:8361/10000 train_time:575632ms step_avg:68.85ms +[2025-07-07 18:22:12] [Rank 0] step:8381/10000 train_time:577014ms step_avg:68.85ms +[2025-07-07 18:22:12] [Rank 0] step:8381/10000 train_time:577014ms step_avg:68.85ms +[2025-07-07 18:22:13] [Rank 0] step:8401/10000 train_time:578396ms step_avg:68.85ms +[2025-07-07 18:22:13] [Rank 0] step:8401/10000 train_time:578396ms step_avg:68.85ms +[2025-07-07 18:22:14] [Rank 0] step:8421/10000 train_time:579778ms step_avg:68.85ms +[2025-07-07 18:22:14] [Rank 0] step:8421/10000 train_time:579778ms step_avg:68.85ms +[2025-07-07 18:22:16] [Rank 0] step:8441/10000 train_time:581163ms step_avg:68.85ms +[2025-07-07 18:22:16] [Rank 0] step:8441/10000 train_time:581163ms step_avg:68.85ms +[2025-07-07 18:22:17] [Rank 0] step:8461/10000 train_time:583227ms step_avg:68.93ms +[2025-07-07 18:22:17] [Rank 0] step:8461/10000 train_time:583227ms step_avg:68.93ms +[2025-07-07 18:22:19] [Rank 0] step:8481/10000 train_time:583973ms step_avg:68.86ms +[2025-07-07 18:22:19] [Rank 0] step:8481/10000 train_time:583973ms step_avg:68.86ms +[2025-07-07 18:22:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:22:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:22:21] [Rank 0] PRINT: step:8500/10000 train_loss:2.1960 val_loss:2.1734 train_time:585981ms step_avg:68.94ms +[2025-07-07 18:22:21] [Rank 0] PRINT: step:8500/10000 train_loss:2.1960 val_loss:2.1734 train_time:585981ms step_avg:68.94ms +[2025-07-07 18:22:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:22:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:22:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:22:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:22:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:22:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:27:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:27:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:27:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:27:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:27:51] [Rank 0] Total Loss: 4.7608 +[2025-07-07 18:27:51] [Rank 0] Total Loss: 4.7608 +[2025-07-07 18:27:51] [Rank 0] Total FTA: 0.1047 +[2025-07-07 18:27:51] [Rank 0] Total FTA: 0.1047 +[2025-07-07 18:27:51] [Rank 0] Group 0 Loss: 5.0424 +[2025-07-07 18:27:51] [Rank 0] Group 0 Loss: 5.0424 +[2025-07-07 18:27:51] [Rank 0] Group 1 Loss: 4.4316 +[2025-07-07 18:27:51] [Rank 0] Group 1 Loss: 4.4316 +[2025-07-07 18:27:51] [Rank 0] Group 2 Loss: 4.9255 +[2025-07-07 18:27:51] [Rank 0] Group 2 Loss: 4.9255 +[2025-07-07 18:27:51] [Rank 0] Group 3 Loss: 4.7030 +[2025-07-07 18:27:51] [Rank 0] Group 3 Loss: 4.7030 +[2025-07-07 18:27:51] [Rank 0] Group 4 Loss: 4.8018 +[2025-07-07 18:27:51] [Rank 0] Group 4 Loss: 4.8018 +[2025-07-07 18:27:51] [Rank 0] Group 5 Loss: 4.7374 +[2025-07-07 18:27:51] [Rank 0] Group 5 Loss: 4.7374 +[2025-07-07 18:27:51] [Rank 0] Group 6 Loss: 4.7583 +[2025-07-07 18:27:51] [Rank 0] Group 6 Loss: 4.7583 +[2025-07-07 18:27:51] [Rank 0] Group 7 Loss: 4.7126 +[2025-07-07 18:27:51] [Rank 0] Group 7 Loss: 4.7126 +[2025-07-07 18:27:51] [Rank 0] Group 8 Loss: 4.7096 +[2025-07-07 18:27:51] [Rank 0] Group 8 Loss: 4.7096 +[2025-07-07 18:27:51] [Rank 0] Group 9 Loss: 4.7014 +[2025-07-07 18:27:51] [Rank 0] Group 9 Loss: 4.7014 +[2025-07-07 18:27:51] [Rank 0] Group 10 Loss: 4.7048 +[2025-07-07 18:27:51] [Rank 0] Group 10 Loss: 4.7048 +[2025-07-07 18:27:51] [Rank 0] Group 11 Loss: 4.7072 +[2025-07-07 18:27:51] [Rank 0] Group 11 Loss: 4.7072 +[2025-07-07 18:27:51] [Rank 0] Group 0 FTA: 0.1834 +[2025-07-07 18:27:51] [Rank 0] Group 0 FTA: 0.1834 +[2025-07-07 18:27:51] [Rank 0] Group 1 FTA: 0.1615 +[2025-07-07 18:27:51] [Rank 0] Group 1 FTA: 0.1615 +[2025-07-07 18:27:51] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 18:27:51] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 18:27:51] [Rank 0] Group 3 FTA: 0.0755 +[2025-07-07 18:27:51] [Rank 0] Group 3 FTA: 0.0755 +[2025-07-07 18:27:51] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-07 18:27:51] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-07 18:27:51] [Rank 0] Group 5 FTA: 0.0833 +[2025-07-07 18:27:51] [Rank 0] Group 5 FTA: 0.0833 +[2025-07-07 18:27:51] [Rank 0] Group 6 FTA: 0.0625 +[2025-07-07 18:27:51] [Rank 0] Group 6 FTA: 0.0625 +[2025-07-07 18:27:51] [Rank 0] Group 7 FTA: 0.1406 +[2025-07-07 18:27:51] [Rank 0] Group 7 FTA: 0.1406 +[2025-07-07 18:27:51] [Rank 0] Group 8 FTA: 0.1016 +[2025-07-07 18:27:51] [Rank 0] Group 8 FTA: 0.1016 +[2025-07-07 18:27:51] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-07 18:27:51] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-07 18:27:51] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-07 18:27:51] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-07 18:27:51] [Rank 0] Group 11 FTA: 0.0879 +[2025-07-07 18:27:51] [Rank 0] Group 11 FTA: 0.0879 +[2025-07-07 18:27:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 18:27:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 18:27:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 18:27:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 18:27:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 18:27:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 18:27:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 18:27:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 18:27:53] [Rank 0] step:8501/10000 train_time:585991ms step_avg:68.93ms +[2025-07-07 18:27:53] [Rank 0] step:8501/10000 train_time:585991ms step_avg:68.93ms +[2025-07-07 18:27:54] [Rank 0] step:8521/10000 train_time:586768ms step_avg:68.86ms +[2025-07-07 18:27:54] [Rank 0] step:8521/10000 train_time:586768ms step_avg:68.86ms +[2025-07-07 18:27:55] [Rank 0] step:8541/10000 train_time:588138ms step_avg:68.86ms +[2025-07-07 18:27:55] [Rank 0] step:8541/10000 train_time:588138ms step_avg:68.86ms +[2025-07-07 18:27:57] [Rank 0] step:8561/10000 train_time:589514ms step_avg:68.86ms +[2025-07-07 18:27:57] [Rank 0] step:8561/10000 train_time:589514ms step_avg:68.86ms +[2025-07-07 18:27:58] [Rank 0] step:8581/10000 train_time:590893ms step_avg:68.86ms +[2025-07-07 18:27:58] [Rank 0] step:8581/10000 train_time:590893ms step_avg:68.86ms +[2025-07-07 18:28:00] [Rank 0] step:8601/10000 train_time:592273ms step_avg:68.86ms +[2025-07-07 18:28:00] [Rank 0] step:8601/10000 train_time:592273ms step_avg:68.86ms +[2025-07-07 18:28:01] [Rank 0] step:8621/10000 train_time:593651ms step_avg:68.86ms +[2025-07-07 18:28:01] [Rank 0] step:8621/10000 train_time:593651ms step_avg:68.86ms +[2025-07-07 18:28:02] [Rank 0] step:8641/10000 train_time:595281ms step_avg:68.89ms +[2025-07-07 18:28:02] [Rank 0] step:8641/10000 train_time:595281ms step_avg:68.89ms +[2025-07-07 18:28:04] [Rank 0] step:8661/10000 train_time:596443ms step_avg:68.87ms +[2025-07-07 18:28:04] [Rank 0] step:8661/10000 train_time:596443ms step_avg:68.87ms +[2025-07-07 18:28:05] [Rank 0] step:8681/10000 train_time:597822ms step_avg:68.87ms +[2025-07-07 18:28:05] [Rank 0] step:8681/10000 train_time:597822ms step_avg:68.87ms +[2025-07-07 18:28:06] [Rank 0] step:8701/10000 train_time:599201ms step_avg:68.87ms +[2025-07-07 18:28:06] [Rank 0] step:8701/10000 train_time:599201ms step_avg:68.87ms +[2025-07-07 18:28:08] [Rank 0] step:8721/10000 train_time:600579ms step_avg:68.87ms +[2025-07-07 18:28:08] [Rank 0] step:8721/10000 train_time:600579ms step_avg:68.87ms +[2025-07-07 18:28:09] [Rank 0] step:8741/10000 train_time:601960ms step_avg:68.87ms +[2025-07-07 18:28:09] [Rank 0] step:8741/10000 train_time:601960ms step_avg:68.87ms +[2025-07-07 18:28:11] [Rank 0] step:8761/10000 train_time:603341ms step_avg:68.87ms +[2025-07-07 18:28:11] [Rank 0] step:8761/10000 train_time:603341ms step_avg:68.87ms +[2025-07-07 18:28:12] [Rank 0] step:8781/10000 train_time:604722ms step_avg:68.87ms +[2025-07-07 18:28:12] [Rank 0] step:8781/10000 train_time:604722ms step_avg:68.87ms +[2025-07-07 18:28:13] [Rank 0] step:8801/10000 train_time:606105ms step_avg:68.87ms +[2025-07-07 18:28:13] [Rank 0] step:8801/10000 train_time:606105ms step_avg:68.87ms +[2025-07-07 18:28:15] [Rank 0] step:8821/10000 train_time:607738ms step_avg:68.90ms +[2025-07-07 18:28:15] [Rank 0] step:8821/10000 train_time:607738ms step_avg:68.90ms +[2025-07-07 18:28:16] [Rank 0] step:8841/10000 train_time:608891ms step_avg:68.87ms +[2025-07-07 18:28:16] [Rank 0] step:8841/10000 train_time:608891ms step_avg:68.87ms +[2025-07-07 18:28:18] [Rank 0] step:8861/10000 train_time:610273ms step_avg:68.87ms +[2025-07-07 18:28:18] [Rank 0] step:8861/10000 train_time:610273ms step_avg:68.87ms +[2025-07-07 18:28:19] [Rank 0] step:8881/10000 train_time:611654ms step_avg:68.87ms +[2025-07-07 18:28:19] [Rank 0] step:8881/10000 train_time:611654ms step_avg:68.87ms +[2025-07-07 18:28:20] [Rank 0] step:8901/10000 train_time:613037ms step_avg:68.87ms +[2025-07-07 18:28:20] [Rank 0] step:8901/10000 train_time:613037ms step_avg:68.87ms +[2025-07-07 18:28:22] [Rank 0] step:8921/10000 train_time:614420ms step_avg:68.87ms +[2025-07-07 18:28:22] [Rank 0] step:8921/10000 train_time:614420ms step_avg:68.87ms +[2025-07-07 18:28:23] [Rank 0] step:8941/10000 train_time:615802ms step_avg:68.87ms +[2025-07-07 18:28:23] [Rank 0] step:8941/10000 train_time:615802ms step_avg:68.87ms +[2025-07-07 18:28:24] [Rank 0] step:8961/10000 train_time:617185ms step_avg:68.87ms +[2025-07-07 18:28:24] [Rank 0] step:8961/10000 train_time:617185ms step_avg:68.87ms +[2025-07-07 18:28:26] [Rank 0] step:8981/10000 train_time:618573ms step_avg:68.88ms +[2025-07-07 18:28:26] [Rank 0] step:8981/10000 train_time:618573ms step_avg:68.88ms +[2025-07-07 18:28:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:28:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:28:28] [Rank 0] PRINT: step:9000/10000 train_loss:2.1486 val_loss:2.1313 train_time:620584ms step_avg:68.95ms +[2025-07-07 18:28:28] [Rank 0] PRINT: step:9000/10000 train_loss:2.1486 val_loss:2.1313 train_time:620584ms step_avg:68.95ms +[2025-07-07 18:28:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:28:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:28:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:28:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:28:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:28:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:33:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:33:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:33:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:33:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:33:57] [Rank 0] Total Loss: 4.7722 +[2025-07-07 18:33:57] [Rank 0] Total Loss: 4.7722 +[2025-07-07 18:33:57] [Rank 0] Total FTA: 0.1008 +[2025-07-07 18:33:57] [Rank 0] Total FTA: 0.1008 +[2025-07-07 18:33:57] [Rank 0] Group 0 Loss: 5.1318 +[2025-07-07 18:33:57] [Rank 0] Group 0 Loss: 5.1318 +[2025-07-07 18:33:57] [Rank 0] Group 1 Loss: 4.4668 +[2025-07-07 18:33:57] [Rank 0] Group 1 Loss: 4.4668 +[2025-07-07 18:33:57] [Rank 0] Group 2 Loss: 4.8484 +[2025-07-07 18:33:57] [Rank 0] Group 2 Loss: 4.8484 +[2025-07-07 18:33:57] [Rank 0] Group 3 Loss: 4.7898 +[2025-07-07 18:33:57] [Rank 0] Group 3 Loss: 4.7898 +[2025-07-07 18:33:57] [Rank 0] Group 4 Loss: 4.7952 +[2025-07-07 18:33:57] [Rank 0] Group 4 Loss: 4.7952 +[2025-07-07 18:33:57] [Rank 0] Group 5 Loss: 4.6918 +[2025-07-07 18:33:57] [Rank 0] Group 5 Loss: 4.6918 +[2025-07-07 18:33:57] [Rank 0] Group 6 Loss: 4.7291 +[2025-07-07 18:33:57] [Rank 0] Group 6 Loss: 4.7291 +[2025-07-07 18:33:57] [Rank 0] Group 7 Loss: 4.6419 +[2025-07-07 18:33:57] [Rank 0] Group 7 Loss: 4.6419 +[2025-07-07 18:33:57] [Rank 0] Group 8 Loss: 4.7255 +[2025-07-07 18:33:57] [Rank 0] Group 8 Loss: 4.7255 +[2025-07-07 18:33:57] [Rank 0] Group 9 Loss: 4.6803 +[2025-07-07 18:33:57] [Rank 0] Group 9 Loss: 4.6803 +[2025-07-07 18:33:57] [Rank 0] Group 10 Loss: 4.7243 +[2025-07-07 18:33:57] [Rank 0] Group 10 Loss: 4.7243 +[2025-07-07 18:33:57] [Rank 0] Group 11 Loss: 4.7326 +[2025-07-07 18:33:57] [Rank 0] Group 11 Loss: 4.7326 +[2025-07-07 18:33:57] [Rank 0] Group 0 FTA: 0.1560 +[2025-07-07 18:33:57] [Rank 0] Group 0 FTA: 0.1560 +[2025-07-07 18:33:57] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-07 18:33:57] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-07 18:33:57] [Rank 0] Group 2 FTA: 0.0938 +[2025-07-07 18:33:57] [Rank 0] Group 2 FTA: 0.0938 +[2025-07-07 18:33:57] [Rank 0] Group 3 FTA: 0.0443 +[2025-07-07 18:33:57] [Rank 0] Group 3 FTA: 0.0443 +[2025-07-07 18:33:57] [Rank 0] Group 4 FTA: 0.0521 +[2025-07-07 18:33:57] [Rank 0] Group 4 FTA: 0.0521 +[2025-07-07 18:33:57] [Rank 0] Group 5 FTA: 0.0807 +[2025-07-07 18:33:57] [Rank 0] Group 5 FTA: 0.0807 +[2025-07-07 18:33:57] [Rank 0] Group 6 FTA: 0.1016 +[2025-07-07 18:33:57] [Rank 0] Group 6 FTA: 0.1016 +[2025-07-07 18:33:57] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-07 18:33:57] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-07 18:33:57] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-07 18:33:57] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-07 18:33:57] [Rank 0] Group 9 FTA: 0.1094 +[2025-07-07 18:33:57] [Rank 0] Group 9 FTA: 0.1094 +[2025-07-07 18:33:57] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-07 18:33:57] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-07 18:33:57] [Rank 0] Group 11 FTA: 0.0918 +[2025-07-07 18:33:57] [Rank 0] Group 11 FTA: 0.0918 +[2025-07-07 18:33:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 18:33:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 18:33:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 18:33:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 18:33:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 18:33:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 18:33:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 18:33:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 18:33:59] [Rank 0] step:9001/10000 train_time:620601ms step_avg:68.95ms +[2025-07-07 18:33:59] [Rank 0] step:9001/10000 train_time:620601ms step_avg:68.95ms +[2025-07-07 18:34:00] [Rank 0] step:9021/10000 train_time:622059ms step_avg:68.96ms +[2025-07-07 18:34:00] [Rank 0] step:9021/10000 train_time:622059ms step_avg:68.96ms +[2025-07-07 18:34:01] [Rank 0] step:9041/10000 train_time:623430ms step_avg:68.96ms +[2025-07-07 18:34:01] [Rank 0] step:9041/10000 train_time:623430ms step_avg:68.96ms +[2025-07-07 18:34:03] [Rank 0] step:9061/10000 train_time:624801ms step_avg:68.96ms +[2025-07-07 18:34:03] [Rank 0] step:9061/10000 train_time:624801ms step_avg:68.96ms +[2025-07-07 18:34:04] [Rank 0] step:9081/10000 train_time:626175ms step_avg:68.95ms +[2025-07-07 18:34:04] [Rank 0] step:9081/10000 train_time:626175ms step_avg:68.95ms +[2025-07-07 18:34:06] [Rank 0] step:9101/10000 train_time:627551ms step_avg:68.95ms +[2025-07-07 18:34:06] [Rank 0] step:9101/10000 train_time:627551ms step_avg:68.95ms +[2025-07-07 18:34:07] [Rank 0] step:9121/10000 train_time:628929ms step_avg:68.95ms +[2025-07-07 18:34:07] [Rank 0] step:9121/10000 train_time:628929ms step_avg:68.95ms +[2025-07-07 18:34:08] [Rank 0] step:9141/10000 train_time:630309ms step_avg:68.95ms +[2025-07-07 18:34:08] [Rank 0] step:9141/10000 train_time:630309ms step_avg:68.95ms +[2025-07-07 18:34:10] [Rank 0] step:9161/10000 train_time:631690ms step_avg:68.95ms +[2025-07-07 18:34:10] [Rank 0] step:9161/10000 train_time:631690ms step_avg:68.95ms +[2025-07-07 18:34:11] [Rank 0] step:9181/10000 train_time:633070ms step_avg:68.95ms +[2025-07-07 18:34:11] [Rank 0] step:9181/10000 train_time:633070ms step_avg:68.95ms +[2025-07-07 18:34:13] [Rank 0] step:9201/10000 train_time:634478ms step_avg:68.96ms +[2025-07-07 18:34:13] [Rank 0] step:9201/10000 train_time:634478ms step_avg:68.96ms +[2025-07-07 18:34:14] [Rank 0] step:9221/10000 train_time:635857ms step_avg:68.96ms +[2025-07-07 18:34:14] [Rank 0] step:9221/10000 train_time:635857ms step_avg:68.96ms +[2025-07-07 18:34:15] [Rank 0] step:9241/10000 train_time:637236ms step_avg:68.96ms +[2025-07-07 18:34:15] [Rank 0] step:9241/10000 train_time:637236ms step_avg:68.96ms +[2025-07-07 18:34:17] [Rank 0] step:9261/10000 train_time:638616ms step_avg:68.96ms +[2025-07-07 18:34:17] [Rank 0] step:9261/10000 train_time:638616ms step_avg:68.96ms +[2025-07-07 18:34:18] [Rank 0] step:9281/10000 train_time:639996ms step_avg:68.96ms +[2025-07-07 18:34:18] [Rank 0] step:9281/10000 train_time:639996ms step_avg:68.96ms +[2025-07-07 18:34:19] [Rank 0] step:9301/10000 train_time:641376ms step_avg:68.96ms +[2025-07-07 18:34:19] [Rank 0] step:9301/10000 train_time:641376ms step_avg:68.96ms +[2025-07-07 18:34:21] [Rank 0] step:9321/10000 train_time:642756ms step_avg:68.96ms +[2025-07-07 18:34:21] [Rank 0] step:9321/10000 train_time:642756ms step_avg:68.96ms +[2025-07-07 18:34:22] [Rank 0] step:9341/10000 train_time:644135ms step_avg:68.96ms +[2025-07-07 18:34:22] [Rank 0] step:9341/10000 train_time:644135ms step_avg:68.96ms +[2025-07-07 18:34:24] [Rank 0] step:9361/10000 train_time:645516ms step_avg:68.96ms +[2025-07-07 18:34:24] [Rank 0] step:9361/10000 train_time:645516ms step_avg:68.96ms +[2025-07-07 18:34:25] [Rank 0] step:9381/10000 train_time:646926ms step_avg:68.96ms +[2025-07-07 18:34:25] [Rank 0] step:9381/10000 train_time:646926ms step_avg:68.96ms +[2025-07-07 18:34:26] [Rank 0] step:9401/10000 train_time:648306ms step_avg:68.96ms +[2025-07-07 18:34:26] [Rank 0] step:9401/10000 train_time:648306ms step_avg:68.96ms +[2025-07-07 18:34:28] [Rank 0] step:9421/10000 train_time:649684ms step_avg:68.96ms +[2025-07-07 18:34:28] [Rank 0] step:9421/10000 train_time:649684ms step_avg:68.96ms +[2025-07-07 18:34:29] [Rank 0] step:9441/10000 train_time:651065ms step_avg:68.96ms +[2025-07-07 18:34:29] [Rank 0] step:9441/10000 train_time:651065ms step_avg:68.96ms +[2025-07-07 18:34:30] [Rank 0] step:9461/10000 train_time:652447ms step_avg:68.96ms +[2025-07-07 18:34:30] [Rank 0] step:9461/10000 train_time:652447ms step_avg:68.96ms +[2025-07-07 18:34:32] [Rank 0] step:9481/10000 train_time:653828ms step_avg:68.96ms +[2025-07-07 18:34:32] [Rank 0] step:9481/10000 train_time:653828ms step_avg:68.96ms +[2025-07-07 18:34:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:34:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:34:34] [Rank 0] PRINT: step:9500/10000 train_loss:2.1123 val_loss:2.1012 train_time:655837ms step_avg:69.04ms +[2025-07-07 18:34:34] [Rank 0] PRINT: step:9500/10000 train_loss:2.1123 val_loss:2.1012 train_time:655837ms step_avg:69.04ms +[2025-07-07 18:34:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:34:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:34:34] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:34:34] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:34:34] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:34:34] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:40:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:40:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:40:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:40:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:40:05] [Rank 0] Total Loss: 4.7504 +[2025-07-07 18:40:05] [Rank 0] Total Loss: 4.7504 +[2025-07-07 18:40:05] [Rank 0] Total FTA: 0.1058 +[2025-07-07 18:40:05] [Rank 0] Total FTA: 0.1058 +[2025-07-07 18:40:05] [Rank 0] Group 0 Loss: 5.0670 +[2025-07-07 18:40:05] [Rank 0] Group 0 Loss: 5.0670 +[2025-07-07 18:40:05] [Rank 0] Group 1 Loss: 4.3933 +[2025-07-07 18:40:05] [Rank 0] Group 1 Loss: 4.3933 +[2025-07-07 18:40:05] [Rank 0] Group 2 Loss: 4.8496 +[2025-07-07 18:40:05] [Rank 0] Group 2 Loss: 4.8496 +[2025-07-07 18:40:05] [Rank 0] Group 3 Loss: 4.6855 +[2025-07-07 18:40:05] [Rank 0] Group 3 Loss: 4.6855 +[2025-07-07 18:40:05] [Rank 0] Group 4 Loss: 4.7917 +[2025-07-07 18:40:05] [Rank 0] Group 4 Loss: 4.7917 +[2025-07-07 18:40:05] [Rank 0] Group 5 Loss: 4.6807 +[2025-07-07 18:40:05] [Rank 0] Group 5 Loss: 4.6807 +[2025-07-07 18:40:05] [Rank 0] Group 6 Loss: 4.7786 +[2025-07-07 18:40:05] [Rank 0] Group 6 Loss: 4.7786 +[2025-07-07 18:40:05] [Rank 0] Group 7 Loss: 4.6824 +[2025-07-07 18:40:05] [Rank 0] Group 7 Loss: 4.6824 +[2025-07-07 18:40:05] [Rank 0] Group 8 Loss: 4.7236 +[2025-07-07 18:40:05] [Rank 0] Group 8 Loss: 4.7236 +[2025-07-07 18:40:05] [Rank 0] Group 9 Loss: 4.7395 +[2025-07-07 18:40:05] [Rank 0] Group 9 Loss: 4.7395 +[2025-07-07 18:40:05] [Rank 0] Group 10 Loss: 4.6709 +[2025-07-07 18:40:05] [Rank 0] Group 10 Loss: 4.6709 +[2025-07-07 18:40:05] [Rank 0] Group 11 Loss: 4.7116 +[2025-07-07 18:40:05] [Rank 0] Group 11 Loss: 4.7116 +[2025-07-07 18:40:05] [Rank 0] Group 0 FTA: 0.1873 +[2025-07-07 18:40:05] [Rank 0] Group 0 FTA: 0.1873 +[2025-07-07 18:40:05] [Rank 0] Group 1 FTA: 0.1745 +[2025-07-07 18:40:05] [Rank 0] Group 1 FTA: 0.1745 +[2025-07-07 18:40:05] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-07 18:40:05] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-07 18:40:05] [Rank 0] Group 3 FTA: 0.0260 +[2025-07-07 18:40:05] [Rank 0] Group 3 FTA: 0.0260 +[2025-07-07 18:40:05] [Rank 0] Group 4 FTA: 0.0547 +[2025-07-07 18:40:05] [Rank 0] Group 4 FTA: 0.0547 +[2025-07-07 18:40:05] [Rank 0] Group 5 FTA: 0.0677 +[2025-07-07 18:40:05] [Rank 0] Group 5 FTA: 0.0677 +[2025-07-07 18:40:05] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-07 18:40:05] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-07 18:40:05] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-07 18:40:05] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-07 18:40:05] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 18:40:05] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 18:40:05] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 18:40:05] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 18:40:05] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-07 18:40:05] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-07 18:40:05] [Rank 0] Group 11 FTA: 0.0762 +[2025-07-07 18:40:05] [Rank 0] Group 11 FTA: 0.0762 +[2025-07-07 18:40:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 18:40:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 18:40:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 18:40:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 18:40:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 18:40:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 18:40:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 18:40:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 18:40:06] [Rank 0] step:9501/10000 train_time:655847ms step_avg:69.03ms +[2025-07-07 18:40:06] [Rank 0] step:9501/10000 train_time:655847ms step_avg:69.03ms +[2025-07-07 18:40:08] [Rank 0] step:9521/10000 train_time:656613ms step_avg:68.96ms +[2025-07-07 18:40:08] [Rank 0] step:9521/10000 train_time:656613ms step_avg:68.96ms +[2025-07-07 18:40:09] [Rank 0] step:9541/10000 train_time:658036ms step_avg:68.97ms +[2025-07-07 18:40:09] [Rank 0] step:9541/10000 train_time:658036ms step_avg:68.97ms +[2025-07-07 18:40:11] [Rank 0] step:9561/10000 train_time:659408ms step_avg:68.97ms +[2025-07-07 18:40:11] [Rank 0] step:9561/10000 train_time:659408ms step_avg:68.97ms +[2025-07-07 18:40:12] [Rank 0] step:9581/10000 train_time:660779ms step_avg:68.97ms +[2025-07-07 18:40:12] [Rank 0] step:9581/10000 train_time:660779ms step_avg:68.97ms +[2025-07-07 18:40:13] [Rank 0] step:9601/10000 train_time:662150ms step_avg:68.97ms +[2025-07-07 18:40:13] [Rank 0] step:9601/10000 train_time:662150ms step_avg:68.97ms +[2025-07-07 18:40:15] [Rank 0] step:9621/10000 train_time:663524ms step_avg:68.97ms +[2025-07-07 18:40:15] [Rank 0] step:9621/10000 train_time:663524ms step_avg:68.97ms +[2025-07-07 18:40:16] [Rank 0] step:9641/10000 train_time:664898ms step_avg:68.97ms +[2025-07-07 18:40:16] [Rank 0] step:9641/10000 train_time:664898ms step_avg:68.97ms +[2025-07-07 18:40:17] [Rank 0] step:9661/10000 train_time:666273ms step_avg:68.97ms +[2025-07-07 18:40:17] [Rank 0] step:9661/10000 train_time:666273ms step_avg:68.97ms +[2025-07-07 18:40:19] [Rank 0] step:9681/10000 train_time:667800ms step_avg:68.98ms +[2025-07-07 18:40:19] [Rank 0] step:9681/10000 train_time:667800ms step_avg:68.98ms +[2025-07-07 18:40:20] [Rank 0] step:9701/10000 train_time:669115ms step_avg:68.97ms +[2025-07-07 18:40:20] [Rank 0] step:9701/10000 train_time:669115ms step_avg:68.97ms +[2025-07-07 18:40:22] [Rank 0] step:9721/10000 train_time:671181ms step_avg:69.04ms +[2025-07-07 18:40:22] [Rank 0] step:9721/10000 train_time:671181ms step_avg:69.04ms +[2025-07-07 18:40:23] [Rank 0] step:9741/10000 train_time:671922ms step_avg:68.98ms +[2025-07-07 18:40:23] [Rank 0] step:9741/10000 train_time:671922ms step_avg:68.98ms +[2025-07-07 18:40:24] [Rank 0] step:9761/10000 train_time:673301ms step_avg:68.98ms +[2025-07-07 18:40:24] [Rank 0] step:9761/10000 train_time:673301ms step_avg:68.98ms +[2025-07-07 18:40:26] [Rank 0] step:9781/10000 train_time:674680ms step_avg:68.98ms +[2025-07-07 18:40:26] [Rank 0] step:9781/10000 train_time:674680ms step_avg:68.98ms +[2025-07-07 18:40:27] [Rank 0] step:9801/10000 train_time:676060ms step_avg:68.98ms +[2025-07-07 18:40:27] [Rank 0] step:9801/10000 train_time:676060ms step_avg:68.98ms +[2025-07-07 18:40:29] [Rank 0] step:9821/10000 train_time:677440ms step_avg:68.98ms +[2025-07-07 18:40:29] [Rank 0] step:9821/10000 train_time:677440ms step_avg:68.98ms +[2025-07-07 18:40:30] [Rank 0] step:9841/10000 train_time:678820ms step_avg:68.98ms +[2025-07-07 18:40:30] [Rank 0] step:9841/10000 train_time:678820ms step_avg:68.98ms +[2025-07-07 18:40:31] [Rank 0] step:9861/10000 train_time:680201ms step_avg:68.98ms +[2025-07-07 18:40:31] [Rank 0] step:9861/10000 train_time:680201ms step_avg:68.98ms +[2025-07-07 18:40:33] [Rank 0] step:9881/10000 train_time:681582ms step_avg:68.98ms +[2025-07-07 18:40:33] [Rank 0] step:9881/10000 train_time:681582ms step_avg:68.98ms +[2025-07-07 18:40:34] [Rank 0] step:9901/10000 train_time:682961ms step_avg:68.98ms +[2025-07-07 18:40:34] [Rank 0] step:9901/10000 train_time:682961ms step_avg:68.98ms +[2025-07-07 18:40:35] [Rank 0] step:9921/10000 train_time:684372ms step_avg:68.98ms +[2025-07-07 18:40:35] [Rank 0] step:9921/10000 train_time:684372ms step_avg:68.98ms +[2025-07-07 18:40:37] [Rank 0] step:9941/10000 train_time:685751ms step_avg:68.98ms +[2025-07-07 18:40:37] [Rank 0] step:9941/10000 train_time:685751ms step_avg:68.98ms +[2025-07-07 18:40:38] [Rank 0] step:9961/10000 train_time:687132ms step_avg:68.98ms +[2025-07-07 18:40:38] [Rank 0] step:9961/10000 train_time:687132ms step_avg:68.98ms +[2025-07-07 18:40:40] [Rank 0] step:9981/10000 train_time:688513ms step_avg:68.98ms +[2025-07-07 18:40:40] [Rank 0] step:9981/10000 train_time:688513ms step_avg:68.98ms +[2025-07-07 18:40:41] [Rank 0] step:10000/10000 train_time:689827ms step_avg:68.98ms +[2025-07-07 18:40:41] [Rank 0] step:10000/10000 train_time:689827ms step_avg:68.98ms +[2025-07-07 18:40:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:40:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:40:42] [Rank 0] PRINT: step:10000/10000 train_loss:2.0861 val_loss:2.0799 train_time:690530ms step_avg:69.05ms +[2025-07-07 18:40:42] [Rank 0] PRINT: step:10000/10000 train_loss:2.0861 val_loss:2.0799 train_time:690530ms step_avg:69.05ms +[2025-07-07 18:40:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:40:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:40:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:40:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:40:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:40:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:46:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:46:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:46:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:46:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:46:15] [Rank 0] Total Loss: 4.7568 +[2025-07-07 18:46:15] [Rank 0] Total Loss: 4.7568 +[2025-07-07 18:46:15] [Rank 0] Total FTA: 0.0975 +[2025-07-07 18:46:15] [Rank 0] Total FTA: 0.0975 +[2025-07-07 18:46:15] [Rank 0] Group 0 Loss: 5.1125 +[2025-07-07 18:46:15] [Rank 0] Group 0 Loss: 5.1125 +[2025-07-07 18:46:15] [Rank 0] Group 1 Loss: 4.4489 +[2025-07-07 18:46:15] [Rank 0] Group 1 Loss: 4.4489 +[2025-07-07 18:46:15] [Rank 0] Group 2 Loss: 4.7838 +[2025-07-07 18:46:15] [Rank 0] Group 2 Loss: 4.7838 +[2025-07-07 18:46:15] [Rank 0] Group 3 Loss: 4.7198 +[2025-07-07 18:46:15] [Rank 0] Group 3 Loss: 4.7198 +[2025-07-07 18:46:15] [Rank 0] Group 4 Loss: 4.7927 +[2025-07-07 18:46:15] [Rank 0] Group 4 Loss: 4.7927 +[2025-07-07 18:46:15] [Rank 0] Group 5 Loss: 4.6694 +[2025-07-07 18:46:15] [Rank 0] Group 5 Loss: 4.6694 +[2025-07-07 18:46:15] [Rank 0] Group 6 Loss: 4.7263 +[2025-07-07 18:46:15] [Rank 0] Group 6 Loss: 4.7263 +[2025-07-07 18:46:15] [Rank 0] Group 7 Loss: 4.7038 +[2025-07-07 18:46:15] [Rank 0] Group 7 Loss: 4.7038 +[2025-07-07 18:46:15] [Rank 0] Group 8 Loss: 4.7246 +[2025-07-07 18:46:15] [Rank 0] Group 8 Loss: 4.7246 +[2025-07-07 18:46:15] [Rank 0] Group 9 Loss: 4.7038 +[2025-07-07 18:46:15] [Rank 0] Group 9 Loss: 4.7038 +[2025-07-07 18:46:15] [Rank 0] Group 10 Loss: 4.6943 +[2025-07-07 18:46:15] [Rank 0] Group 10 Loss: 4.6943 +[2025-07-07 18:46:15] [Rank 0] Group 11 Loss: 4.7159 +[2025-07-07 18:46:15] [Rank 0] Group 11 Loss: 4.7159 +[2025-07-07 18:46:15] [Rank 0] Group 0 FTA: 0.1326 +[2025-07-07 18:46:15] [Rank 0] Group 0 FTA: 0.1326 +[2025-07-07 18:46:15] [Rank 0] Group 1 FTA: 0.1302 +[2025-07-07 18:46:15] [Rank 0] Group 1 FTA: 0.1302 +[2025-07-07 18:46:15] [Rank 0] Group 2 FTA: 0.1484 +[2025-07-07 18:46:15] [Rank 0] Group 2 FTA: 0.1484 +[2025-07-07 18:46:15] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-07 18:46:15] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-07 18:46:15] [Rank 0] Group 4 FTA: 0.0339 +[2025-07-07 18:46:15] [Rank 0] Group 4 FTA: 0.0339 +[2025-07-07 18:46:15] [Rank 0] Group 5 FTA: 0.0729 +[2025-07-07 18:46:15] [Rank 0] Group 5 FTA: 0.0729 +[2025-07-07 18:46:15] [Rank 0] Group 6 FTA: 0.1120 +[2025-07-07 18:46:15] [Rank 0] Group 6 FTA: 0.1120 +[2025-07-07 18:46:15] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-07 18:46:15] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-07 18:46:15] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 18:46:15] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 18:46:15] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 18:46:15] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 18:46:15] [Rank 0] Group 10 FTA: 0.1035 +[2025-07-07 18:46:15] [Rank 0] Group 10 FTA: 0.1035 +[2025-07-07 18:46:15] [Rank 0] Group 11 FTA: 0.0869 +[2025-07-07 18:46:15] [Rank 0] Group 11 FTA: 0.0869 +[2025-07-07 18:46:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 18:46:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_loss_curves.png +[2025-07-07 18:46:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 18:46:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/per_class_acc_curves.png +[2025-07-07 18:46:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 18:46:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_loss_curve.png +[2025-07-07 18:46:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 18:46:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_42/total_acc_curve.png +[2025-07-07 18:46:16] [Rank 0] step:10001/10000 train_time:690540ms step_avg:69.05ms +[2025-07-07 18:46:16] [Rank 0] step:10001/10000 train_time:690540ms step_avg:69.05ms +[2025-07-07 18:46:16] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 18:46:16 2025 --- +[2025-07-07 18:46:16] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 18:46:16 2025 --- +[2025-07-07 18:46:16] [Rank 0] PRINT: Peak memory allocated: 9138 MiB reserved: 10596 MiB +[2025-07-07 18:46:16] [Rank 0] PRINT: Peak memory allocated: 9138 MiB reserved: 10596 MiB diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/config.json b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..e4a86a696f267e044cffa5ba197f8911e89f198c --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "720970ad-bdbe-41ca-b391-24ce92bd183b", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..09fa567545c84b37c547359c6133e4b185ed7a72 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:757285af84e39c6b2ff35151377a296e53fdcb76b383216c94efe1f201733228 +size 247441 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..6aa473f08aec9ecfeec6f28f5f0e61cc5b9cbe02 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3db803a9348f2593a5ef52313d28939db2fe2d0870f7e1e0815cbe3734f555d1 +size 254351 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..d79e21837c68890db4743fe9f13ef9c8cec49130 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:023191681e05c27d091de4eb3cf8cc5bb682352ea2787775b008a2d884217141 +size 81620 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..c0f0df6bc824a39f31962e99ef67ff8d842429c6 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b5e8fdbe073ca8976591707ce415996826b2c7dedc888550d394f735a1cd3f6 +size 101025 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/training_log_720970ad-bdbe-41ca-b391-24ce92bd183b.txt b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/training_log_720970ad-bdbe-41ca-b391-24ce92bd183b.txt new file mode 100644 index 0000000000000000000000000000000000000000..8fbb202a41e93e2dae6077c99fca7af86619509b --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/training_log_720970ad-bdbe-41ca-b391-24ce92bd183b.txt @@ -0,0 +1,5144 @@ +[2025-07-07 22:45:35] [Rank 0] PRINT: --- Script Start: Mon Jul 7 22:45:35 2025 --- +[2025-07-07 22:45:35] [Rank 0] PRINT: --- Script Start: Mon Jul 7 22:45:35 2025 --- +[2025-07-07 22:45:35] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-07 22:45:35] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-07 22:45:35] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 22:45:35] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 22:45:35] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-07 22:45:35] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-07 22:45:35] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45 +[2025-07-07 22:45:35] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45 +[2025-07-07 22:45:35] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 22:45:35] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 22:45:36] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 22:45:36] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 22:45:36] [Rank 0] PRINT: Constructing model... +[2025-07-07 22:45:36] [Rank 0] PRINT: Constructing model... +[2025-07-07 22:45:38] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 22:45:38] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 22:45:38] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 22:45:38] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 22:45:38] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 22:45:38] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 22:45:38] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 22:45:38] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 22:45:39] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 22:45:39] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 22:45:39] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 22:45:39] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 22:45:39] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 22:45:39] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 22:45:39] [Rank 0] PRINT: Model returns: +[2025-07-07 22:45:39] [Rank 0] PRINT: Model returns: +[2025-07-07 22:45:39] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 22:45:39] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 22:45:39] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 22:45:39] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 22:45:39] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-07 22:45:39] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-07 22:45:39] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 22:45:39] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 22:45:39] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 22:45:39] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 22:45:39] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 22:45:39] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 22:45:39] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 22:45:39] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 22:45:39] [Rank 0] PRINT: Starting warmup... +[2025-07-07 22:45:39] [Rank 0] PRINT: Starting warmup... +[2025-07-07 22:46:45] [Rank 0] PRINT: Warmup complete. +[2025-07-07 22:46:45] [Rank 0] PRINT: Warmup complete. +[2025-07-07 22:46:45] [Rank 0] PRINT: Starting training... +[2025-07-07 22:46:45] [Rank 0] PRINT: Starting training... +[2025-07-07 22:46:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:46:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:46:54] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 22:46:54] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 22:46:55] [Rank 0] step:21/10000 train_time:818ms step_avg:38.94ms +[2025-07-07 22:46:55] [Rank 0] step:21/10000 train_time:818ms step_avg:38.94ms +[2025-07-07 22:46:56] [Rank 0] step:41/10000 train_time:2176ms step_avg:53.07ms +[2025-07-07 22:46:56] [Rank 0] step:41/10000 train_time:2176ms step_avg:53.07ms +[2025-07-07 22:46:58] [Rank 0] step:61/10000 train_time:3499ms step_avg:57.36ms +[2025-07-07 22:46:58] [Rank 0] step:61/10000 train_time:3499ms step_avg:57.36ms +[2025-07-07 22:46:59] [Rank 0] step:81/10000 train_time:4823ms step_avg:59.55ms +[2025-07-07 22:46:59] [Rank 0] step:81/10000 train_time:4823ms step_avg:59.55ms +[2025-07-07 22:47:00] [Rank 0] step:101/10000 train_time:6148ms step_avg:60.87ms +[2025-07-07 22:47:00] [Rank 0] step:101/10000 train_time:6148ms step_avg:60.87ms +[2025-07-07 22:47:02] [Rank 0] step:121/10000 train_time:7471ms step_avg:61.74ms +[2025-07-07 22:47:02] [Rank 0] step:121/10000 train_time:7471ms step_avg:61.74ms +[2025-07-07 22:47:03] [Rank 0] step:141/10000 train_time:8798ms step_avg:62.40ms +[2025-07-07 22:47:03] [Rank 0] step:141/10000 train_time:8798ms step_avg:62.40ms +[2025-07-07 22:47:04] [Rank 0] step:161/10000 train_time:10122ms step_avg:62.87ms +[2025-07-07 22:47:04] [Rank 0] step:161/10000 train_time:10122ms step_avg:62.87ms +[2025-07-07 22:47:06] [Rank 0] step:181/10000 train_time:11446ms step_avg:63.24ms +[2025-07-07 22:47:06] [Rank 0] step:181/10000 train_time:11446ms step_avg:63.24ms +[2025-07-07 22:47:07] [Rank 0] step:201/10000 train_time:12838ms step_avg:63.87ms +[2025-07-07 22:47:07] [Rank 0] step:201/10000 train_time:12838ms step_avg:63.87ms +[2025-07-07 22:47:08] [Rank 0] step:221/10000 train_time:14164ms step_avg:64.09ms +[2025-07-07 22:47:08] [Rank 0] step:221/10000 train_time:14164ms step_avg:64.09ms +[2025-07-07 22:47:10] [Rank 0] step:241/10000 train_time:15491ms step_avg:64.28ms +[2025-07-07 22:47:10] [Rank 0] step:241/10000 train_time:15491ms step_avg:64.28ms +[2025-07-07 22:47:11] [Rank 0] step:261/10000 train_time:16819ms step_avg:64.44ms +[2025-07-07 22:47:11] [Rank 0] step:261/10000 train_time:16819ms step_avg:64.44ms +[2025-07-07 22:47:12] [Rank 0] step:281/10000 train_time:18145ms step_avg:64.57ms +[2025-07-07 22:47:12] [Rank 0] step:281/10000 train_time:18145ms step_avg:64.57ms +[2025-07-07 22:47:14] [Rank 0] step:301/10000 train_time:19472ms step_avg:64.69ms +[2025-07-07 22:47:14] [Rank 0] step:301/10000 train_time:19472ms step_avg:64.69ms +[2025-07-07 22:47:15] [Rank 0] step:321/10000 train_time:20800ms step_avg:64.80ms +[2025-07-07 22:47:15] [Rank 0] step:321/10000 train_time:20800ms step_avg:64.80ms +[2025-07-07 22:47:16] [Rank 0] step:341/10000 train_time:22126ms step_avg:64.89ms +[2025-07-07 22:47:16] [Rank 0] step:341/10000 train_time:22126ms step_avg:64.89ms +[2025-07-07 22:47:18] [Rank 0] step:361/10000 train_time:23504ms step_avg:65.11ms +[2025-07-07 22:47:18] [Rank 0] step:361/10000 train_time:23504ms step_avg:65.11ms +[2025-07-07 22:47:19] [Rank 0] step:381/10000 train_time:24839ms step_avg:65.19ms +[2025-07-07 22:47:19] [Rank 0] step:381/10000 train_time:24839ms step_avg:65.19ms +[2025-07-07 22:47:20] [Rank 0] step:401/10000 train_time:26164ms step_avg:65.25ms +[2025-07-07 22:47:20] [Rank 0] step:401/10000 train_time:26164ms step_avg:65.25ms +[2025-07-07 22:47:22] [Rank 0] step:421/10000 train_time:27492ms step_avg:65.30ms +[2025-07-07 22:47:22] [Rank 0] step:421/10000 train_time:27492ms step_avg:65.30ms +[2025-07-07 22:47:23] [Rank 0] step:441/10000 train_time:28820ms step_avg:65.35ms +[2025-07-07 22:47:23] [Rank 0] step:441/10000 train_time:28820ms step_avg:65.35ms +[2025-07-07 22:47:24] [Rank 0] step:461/10000 train_time:30147ms step_avg:65.39ms +[2025-07-07 22:47:24] [Rank 0] step:461/10000 train_time:30147ms step_avg:65.39ms +[2025-07-07 22:47:26] [Rank 0] step:481/10000 train_time:31475ms step_avg:65.44ms +[2025-07-07 22:47:26] [Rank 0] step:481/10000 train_time:31475ms step_avg:65.44ms +[2025-07-07 22:47:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:47:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:47:28] [Rank 0] PRINT: step:500/10000 train_loss:9.6506 val_loss:8.5998 train_time:33407ms step_avg:66.81ms +[2025-07-07 22:47:28] [Rank 0] PRINT: step:500/10000 train_loss:9.6506 val_loss:8.5998 train_time:33407ms step_avg:66.81ms +[2025-07-07 22:47:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:47:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:47:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:47:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:47:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:47:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:52:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:52:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:52:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:52:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:52:53] [Rank 0] Total Loss: 8.9186 +[2025-07-07 22:52:53] [Rank 0] Total Loss: 8.9186 +[2025-07-07 22:52:53] [Rank 0] Total FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Total FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 0 Loss: 8.9369 +[2025-07-07 22:52:53] [Rank 0] Group 0 Loss: 8.9369 +[2025-07-07 22:52:53] [Rank 0] Group 1 Loss: 8.9177 +[2025-07-07 22:52:53] [Rank 0] Group 1 Loss: 8.9177 +[2025-07-07 22:52:53] [Rank 0] Group 2 Loss: 8.9623 +[2025-07-07 22:52:53] [Rank 0] Group 2 Loss: 8.9623 +[2025-07-07 22:52:53] [Rank 0] Group 3 Loss: 8.8955 +[2025-07-07 22:52:53] [Rank 0] Group 3 Loss: 8.8955 +[2025-07-07 22:52:53] [Rank 0] Group 4 Loss: 8.9171 +[2025-07-07 22:52:53] [Rank 0] Group 4 Loss: 8.9171 +[2025-07-07 22:52:53] [Rank 0] Group 5 Loss: 8.8997 +[2025-07-07 22:52:53] [Rank 0] Group 5 Loss: 8.8997 +[2025-07-07 22:52:53] [Rank 0] Group 6 Loss: 8.9208 +[2025-07-07 22:52:53] [Rank 0] Group 6 Loss: 8.9208 +[2025-07-07 22:52:53] [Rank 0] Group 7 Loss: 8.9136 +[2025-07-07 22:52:53] [Rank 0] Group 7 Loss: 8.9136 +[2025-07-07 22:52:53] [Rank 0] Group 8 Loss: 8.9076 +[2025-07-07 22:52:53] [Rank 0] Group 8 Loss: 8.9076 +[2025-07-07 22:52:53] [Rank 0] Group 9 Loss: 8.9120 +[2025-07-07 22:52:53] [Rank 0] Group 9 Loss: 8.9120 +[2025-07-07 22:52:53] [Rank 0] Group 10 Loss: 8.9161 +[2025-07-07 22:52:53] [Rank 0] Group 10 Loss: 8.9161 +[2025-07-07 22:52:53] [Rank 0] Group 11 Loss: 8.9131 +[2025-07-07 22:52:53] [Rank 0] Group 11 Loss: 8.9131 +[2025-07-07 22:52:53] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 22:52:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 22:52:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 22:52:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 22:52:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 22:52:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 22:52:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 22:52:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 22:52:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 22:52:54] [Rank 0] step:501/10000 train_time:33417ms step_avg:66.70ms +[2025-07-07 22:52:54] [Rank 0] step:501/10000 train_time:33417ms step_avg:66.70ms +[2025-07-07 22:52:56] [Rank 0] step:521/10000 train_time:34145ms step_avg:65.54ms +[2025-07-07 22:52:56] [Rank 0] step:521/10000 train_time:34145ms step_avg:65.54ms +[2025-07-07 22:52:57] [Rank 0] step:541/10000 train_time:35464ms step_avg:65.55ms +[2025-07-07 22:52:57] [Rank 0] step:541/10000 train_time:35464ms step_avg:65.55ms +[2025-07-07 22:52:58] [Rank 0] step:561/10000 train_time:36862ms step_avg:65.71ms +[2025-07-07 22:52:58] [Rank 0] step:561/10000 train_time:36862ms step_avg:65.71ms +[2025-07-07 22:53:00] [Rank 0] step:581/10000 train_time:38183ms step_avg:65.72ms +[2025-07-07 22:53:00] [Rank 0] step:581/10000 train_time:38183ms step_avg:65.72ms +[2025-07-07 22:53:01] [Rank 0] step:601/10000 train_time:39504ms step_avg:65.73ms +[2025-07-07 22:53:01] [Rank 0] step:601/10000 train_time:39504ms step_avg:65.73ms +[2025-07-07 22:53:02] [Rank 0] step:621/10000 train_time:40825ms step_avg:65.74ms +[2025-07-07 22:53:02] [Rank 0] step:621/10000 train_time:40825ms step_avg:65.74ms +[2025-07-07 22:53:04] [Rank 0] step:641/10000 train_time:42148ms step_avg:65.75ms +[2025-07-07 22:53:04] [Rank 0] step:641/10000 train_time:42148ms step_avg:65.75ms +[2025-07-07 22:53:05] [Rank 0] step:661/10000 train_time:43470ms step_avg:65.76ms +[2025-07-07 22:53:05] [Rank 0] step:661/10000 train_time:43470ms step_avg:65.76ms +[2025-07-07 22:53:06] [Rank 0] step:681/10000 train_time:44793ms step_avg:65.78ms +[2025-07-07 22:53:06] [Rank 0] step:681/10000 train_time:44793ms step_avg:65.78ms +[2025-07-07 22:53:08] [Rank 0] step:701/10000 train_time:46115ms step_avg:65.78ms +[2025-07-07 22:53:08] [Rank 0] step:701/10000 train_time:46115ms step_avg:65.78ms +[2025-07-07 22:53:09] [Rank 0] step:721/10000 train_time:47440ms step_avg:65.80ms +[2025-07-07 22:53:09] [Rank 0] step:721/10000 train_time:47440ms step_avg:65.80ms +[2025-07-07 22:53:10] [Rank 0] step:741/10000 train_time:48765ms step_avg:65.81ms +[2025-07-07 22:53:10] [Rank 0] step:741/10000 train_time:48765ms step_avg:65.81ms +[2025-07-07 22:53:12] [Rank 0] step:761/10000 train_time:50095ms step_avg:65.83ms +[2025-07-07 22:53:12] [Rank 0] step:761/10000 train_time:50095ms step_avg:65.83ms +[2025-07-07 22:53:13] [Rank 0] step:781/10000 train_time:51431ms step_avg:65.85ms +[2025-07-07 22:53:13] [Rank 0] step:781/10000 train_time:51431ms step_avg:65.85ms +[2025-07-07 22:53:14] [Rank 0] step:801/10000 train_time:52765ms step_avg:65.87ms +[2025-07-07 22:53:14] [Rank 0] step:801/10000 train_time:52765ms step_avg:65.87ms +[2025-07-07 22:53:16] [Rank 0] step:821/10000 train_time:54133ms step_avg:65.94ms +[2025-07-07 22:53:16] [Rank 0] step:821/10000 train_time:54133ms step_avg:65.94ms +[2025-07-07 22:53:17] [Rank 0] step:841/10000 train_time:55469ms step_avg:65.96ms +[2025-07-07 22:53:17] [Rank 0] step:841/10000 train_time:55469ms step_avg:65.96ms +[2025-07-07 22:53:18] [Rank 0] step:861/10000 train_time:56806ms step_avg:65.98ms +[2025-07-07 22:53:18] [Rank 0] step:861/10000 train_time:56806ms step_avg:65.98ms +[2025-07-07 22:53:20] [Rank 0] step:881/10000 train_time:58143ms step_avg:66.00ms +[2025-07-07 22:53:20] [Rank 0] step:881/10000 train_time:58143ms step_avg:66.00ms +[2025-07-07 22:53:21] [Rank 0] step:901/10000 train_time:60148ms step_avg:66.76ms +[2025-07-07 22:53:21] [Rank 0] step:901/10000 train_time:60148ms step_avg:66.76ms +[2025-07-07 22:53:22] [Rank 0] step:921/10000 train_time:60868ms step_avg:66.09ms +[2025-07-07 22:53:22] [Rank 0] step:921/10000 train_time:60868ms step_avg:66.09ms +[2025-07-07 22:53:24] [Rank 0] step:941/10000 train_time:62205ms step_avg:66.11ms +[2025-07-07 22:53:24] [Rank 0] step:941/10000 train_time:62205ms step_avg:66.11ms +[2025-07-07 22:53:25] [Rank 0] step:961/10000 train_time:63544ms step_avg:66.12ms +[2025-07-07 22:53:25] [Rank 0] step:961/10000 train_time:63544ms step_avg:66.12ms +[2025-07-07 22:53:26] [Rank 0] step:981/10000 train_time:64883ms step_avg:66.14ms +[2025-07-07 22:53:26] [Rank 0] step:981/10000 train_time:64883ms step_avg:66.14ms +[2025-07-07 22:53:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:53:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:53:29] [Rank 0] PRINT: step:1000/10000 train_loss:7.8089 val_loss:7.1249 train_time:66831ms step_avg:66.83ms +[2025-07-07 22:53:29] [Rank 0] PRINT: step:1000/10000 train_loss:7.8089 val_loss:7.1249 train_time:66831ms step_avg:66.83ms +[2025-07-07 22:53:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:53:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:53:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:53:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:53:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:53:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:58:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:58:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:58:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:58:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:58:55] [Rank 0] Total Loss: 7.6884 +[2025-07-07 22:58:55] [Rank 0] Total Loss: 7.6884 +[2025-07-07 22:58:55] [Rank 0] Total FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Total FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 0 Loss: 7.6875 +[2025-07-07 22:58:55] [Rank 0] Group 0 Loss: 7.6875 +[2025-07-07 22:58:55] [Rank 0] Group 1 Loss: 7.6369 +[2025-07-07 22:58:55] [Rank 0] Group 1 Loss: 7.6369 +[2025-07-07 22:58:55] [Rank 0] Group 2 Loss: 7.8150 +[2025-07-07 22:58:55] [Rank 0] Group 2 Loss: 7.8150 +[2025-07-07 22:58:55] [Rank 0] Group 3 Loss: 7.6755 +[2025-07-07 22:58:55] [Rank 0] Group 3 Loss: 7.6755 +[2025-07-07 22:58:55] [Rank 0] Group 4 Loss: 7.6994 +[2025-07-07 22:58:55] [Rank 0] Group 4 Loss: 7.6994 +[2025-07-07 22:58:55] [Rank 0] Group 5 Loss: 7.6556 +[2025-07-07 22:58:55] [Rank 0] Group 5 Loss: 7.6556 +[2025-07-07 22:58:55] [Rank 0] Group 6 Loss: 7.7006 +[2025-07-07 22:58:55] [Rank 0] Group 6 Loss: 7.7006 +[2025-07-07 22:58:55] [Rank 0] Group 7 Loss: 7.6930 +[2025-07-07 22:58:55] [Rank 0] Group 7 Loss: 7.6930 +[2025-07-07 22:58:55] [Rank 0] Group 8 Loss: 7.6562 +[2025-07-07 22:58:55] [Rank 0] Group 8 Loss: 7.6562 +[2025-07-07 22:58:55] [Rank 0] Group 9 Loss: 7.6824 +[2025-07-07 22:58:55] [Rank 0] Group 9 Loss: 7.6824 +[2025-07-07 22:58:55] [Rank 0] Group 10 Loss: 7.6826 +[2025-07-07 22:58:55] [Rank 0] Group 10 Loss: 7.6826 +[2025-07-07 22:58:55] [Rank 0] Group 11 Loss: 7.6839 +[2025-07-07 22:58:55] [Rank 0] Group 11 Loss: 7.6839 +[2025-07-07 22:58:55] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 22:58:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 22:58:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 22:58:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 22:58:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 22:58:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 22:58:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 22:58:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 22:58:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 22:58:56] [Rank 0] step:1001/10000 train_time:66841ms step_avg:66.77ms +[2025-07-07 22:58:56] [Rank 0] step:1001/10000 train_time:66841ms step_avg:66.77ms +[2025-07-07 22:58:57] [Rank 0] step:1021/10000 train_time:67582ms step_avg:66.19ms +[2025-07-07 22:58:57] [Rank 0] step:1021/10000 train_time:67582ms step_avg:66.19ms +[2025-07-07 22:58:59] [Rank 0] step:1041/10000 train_time:68915ms step_avg:66.20ms +[2025-07-07 22:58:59] [Rank 0] step:1041/10000 train_time:68915ms step_avg:66.20ms +[2025-07-07 22:59:00] [Rank 0] step:1061/10000 train_time:70246ms step_avg:66.21ms +[2025-07-07 22:59:00] [Rank 0] step:1061/10000 train_time:70246ms step_avg:66.21ms +[2025-07-07 22:59:02] [Rank 0] step:1081/10000 train_time:71836ms step_avg:66.45ms +[2025-07-07 22:59:02] [Rank 0] step:1081/10000 train_time:71836ms step_avg:66.45ms +[2025-07-07 22:59:03] [Rank 0] step:1101/10000 train_time:72989ms step_avg:66.29ms +[2025-07-07 22:59:03] [Rank 0] step:1101/10000 train_time:72989ms step_avg:66.29ms +[2025-07-07 22:59:04] [Rank 0] step:1121/10000 train_time:74324ms step_avg:66.30ms +[2025-07-07 22:59:04] [Rank 0] step:1121/10000 train_time:74324ms step_avg:66.30ms +[2025-07-07 22:59:06] [Rank 0] step:1141/10000 train_time:75658ms step_avg:66.31ms +[2025-07-07 22:59:06] [Rank 0] step:1141/10000 train_time:75658ms step_avg:66.31ms +[2025-07-07 22:59:07] [Rank 0] step:1161/10000 train_time:76993ms step_avg:66.32ms +[2025-07-07 22:59:07] [Rank 0] step:1161/10000 train_time:76993ms step_avg:66.32ms +[2025-07-07 22:59:08] [Rank 0] step:1181/10000 train_time:78327ms step_avg:66.32ms +[2025-07-07 22:59:08] [Rank 0] step:1181/10000 train_time:78327ms step_avg:66.32ms +[2025-07-07 22:59:10] [Rank 0] step:1201/10000 train_time:79664ms step_avg:66.33ms +[2025-07-07 22:59:10] [Rank 0] step:1201/10000 train_time:79664ms step_avg:66.33ms +[2025-07-07 22:59:11] [Rank 0] step:1221/10000 train_time:81000ms step_avg:66.34ms +[2025-07-07 22:59:11] [Rank 0] step:1221/10000 train_time:81000ms step_avg:66.34ms +[2025-07-07 22:59:12] [Rank 0] step:1241/10000 train_time:82339ms step_avg:66.35ms +[2025-07-07 22:59:12] [Rank 0] step:1241/10000 train_time:82339ms step_avg:66.35ms +[2025-07-07 22:59:14] [Rank 0] step:1261/10000 train_time:83932ms step_avg:66.56ms +[2025-07-07 22:59:14] [Rank 0] step:1261/10000 train_time:83932ms step_avg:66.56ms +[2025-07-07 22:59:15] [Rank 0] step:1281/10000 train_time:85087ms step_avg:66.42ms +[2025-07-07 22:59:15] [Rank 0] step:1281/10000 train_time:85087ms step_avg:66.42ms +[2025-07-07 22:59:16] [Rank 0] step:1301/10000 train_time:86427ms step_avg:66.43ms +[2025-07-07 22:59:16] [Rank 0] step:1301/10000 train_time:86427ms step_avg:66.43ms +[2025-07-07 22:59:18] [Rank 0] step:1321/10000 train_time:87769ms step_avg:66.44ms +[2025-07-07 22:59:18] [Rank 0] step:1321/10000 train_time:87769ms step_avg:66.44ms +[2025-07-07 22:59:19] [Rank 0] step:1341/10000 train_time:89112ms step_avg:66.45ms +[2025-07-07 22:59:19] [Rank 0] step:1341/10000 train_time:89112ms step_avg:66.45ms +[2025-07-07 22:59:20] [Rank 0] step:1361/10000 train_time:90456ms step_avg:66.46ms +[2025-07-07 22:59:20] [Rank 0] step:1361/10000 train_time:90456ms step_avg:66.46ms +[2025-07-07 22:59:22] [Rank 0] step:1381/10000 train_time:91801ms step_avg:66.47ms +[2025-07-07 22:59:22] [Rank 0] step:1381/10000 train_time:91801ms step_avg:66.47ms +[2025-07-07 22:59:23] [Rank 0] step:1401/10000 train_time:93143ms step_avg:66.48ms +[2025-07-07 22:59:23] [Rank 0] step:1401/10000 train_time:93143ms step_avg:66.48ms +[2025-07-07 22:59:24] [Rank 0] step:1421/10000 train_time:94487ms step_avg:66.49ms +[2025-07-07 22:59:24] [Rank 0] step:1421/10000 train_time:94487ms step_avg:66.49ms +[2025-07-07 22:59:26] [Rank 0] step:1441/10000 train_time:96084ms step_avg:66.68ms +[2025-07-07 22:59:26] [Rank 0] step:1441/10000 train_time:96084ms step_avg:66.68ms +[2025-07-07 22:59:27] [Rank 0] step:1461/10000 train_time:97176ms step_avg:66.51ms +[2025-07-07 22:59:27] [Rank 0] step:1461/10000 train_time:97176ms step_avg:66.51ms +[2025-07-07 22:59:28] [Rank 0] step:1481/10000 train_time:98520ms step_avg:66.52ms +[2025-07-07 22:59:28] [Rank 0] step:1481/10000 train_time:98520ms step_avg:66.52ms +[2025-07-07 22:59:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:59:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:59:31] [Rank 0] PRINT: step:1500/10000 train_loss:6.5865 val_loss:6.1094 train_time:100479ms step_avg:66.99ms +[2025-07-07 22:59:31] [Rank 0] PRINT: step:1500/10000 train_loss:6.5865 val_loss:6.1094 train_time:100479ms step_avg:66.99ms +[2025-07-07 22:59:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:59:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:59:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:59:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:59:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:59:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:05:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:05:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:05:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:05:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:05:00] [Rank 0] Total Loss: 6.8729 +[2025-07-07 23:05:00] [Rank 0] Total Loss: 6.8729 +[2025-07-07 23:05:00] [Rank 0] Total FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Total FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 0 Loss: 6.8062 +[2025-07-07 23:05:00] [Rank 0] Group 0 Loss: 6.8062 +[2025-07-07 23:05:00] [Rank 0] Group 1 Loss: 6.8635 +[2025-07-07 23:05:00] [Rank 0] Group 1 Loss: 6.8635 +[2025-07-07 23:05:00] [Rank 0] Group 2 Loss: 7.0280 +[2025-07-07 23:05:00] [Rank 0] Group 2 Loss: 7.0280 +[2025-07-07 23:05:00] [Rank 0] Group 3 Loss: 6.8154 +[2025-07-07 23:05:00] [Rank 0] Group 3 Loss: 6.8154 +[2025-07-07 23:05:00] [Rank 0] Group 4 Loss: 6.9118 +[2025-07-07 23:05:00] [Rank 0] Group 4 Loss: 6.9118 +[2025-07-07 23:05:00] [Rank 0] Group 5 Loss: 6.8545 +[2025-07-07 23:05:00] [Rank 0] Group 5 Loss: 6.8545 +[2025-07-07 23:05:00] [Rank 0] Group 6 Loss: 6.8998 +[2025-07-07 23:05:00] [Rank 0] Group 6 Loss: 6.8998 +[2025-07-07 23:05:00] [Rank 0] Group 7 Loss: 6.8912 +[2025-07-07 23:05:00] [Rank 0] Group 7 Loss: 6.8912 +[2025-07-07 23:05:00] [Rank 0] Group 8 Loss: 6.8512 +[2025-07-07 23:05:00] [Rank 0] Group 8 Loss: 6.8512 +[2025-07-07 23:05:00] [Rank 0] Group 9 Loss: 6.8656 +[2025-07-07 23:05:00] [Rank 0] Group 9 Loss: 6.8656 +[2025-07-07 23:05:00] [Rank 0] Group 10 Loss: 6.8729 +[2025-07-07 23:05:00] [Rank 0] Group 10 Loss: 6.8729 +[2025-07-07 23:05:00] [Rank 0] Group 11 Loss: 6.8752 +[2025-07-07 23:05:00] [Rank 0] Group 11 Loss: 6.8752 +[2025-07-07 23:05:00] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 23:05:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 23:05:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 23:05:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 23:05:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 23:05:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 23:05:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 23:05:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 23:05:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 23:05:01] [Rank 0] step:1501/10000 train_time:100489ms step_avg:66.95ms +[2025-07-07 23:05:01] [Rank 0] step:1501/10000 train_time:100489ms step_avg:66.95ms +[2025-07-07 23:05:03] [Rank 0] step:1521/10000 train_time:101228ms step_avg:66.55ms +[2025-07-07 23:05:03] [Rank 0] step:1521/10000 train_time:101228ms step_avg:66.55ms +[2025-07-07 23:05:04] [Rank 0] step:1541/10000 train_time:102566ms step_avg:66.56ms +[2025-07-07 23:05:04] [Rank 0] step:1541/10000 train_time:102566ms step_avg:66.56ms +[2025-07-07 23:05:05] [Rank 0] step:1561/10000 train_time:103904ms step_avg:66.56ms +[2025-07-07 23:05:05] [Rank 0] step:1561/10000 train_time:103904ms step_avg:66.56ms +[2025-07-07 23:05:07] [Rank 0] step:1581/10000 train_time:105243ms step_avg:66.57ms +[2025-07-07 23:05:07] [Rank 0] step:1581/10000 train_time:105243ms step_avg:66.57ms +[2025-07-07 23:05:08] [Rank 0] step:1601/10000 train_time:106582ms step_avg:66.57ms +[2025-07-07 23:05:08] [Rank 0] step:1601/10000 train_time:106582ms step_avg:66.57ms +[2025-07-07 23:05:09] [Rank 0] step:1621/10000 train_time:108174ms step_avg:66.73ms +[2025-07-07 23:05:09] [Rank 0] step:1621/10000 train_time:108174ms step_avg:66.73ms +[2025-07-07 23:05:11] [Rank 0] step:1641/10000 train_time:109332ms step_avg:66.63ms +[2025-07-07 23:05:11] [Rank 0] step:1641/10000 train_time:109332ms step_avg:66.63ms +[2025-07-07 23:05:12] [Rank 0] step:1661/10000 train_time:110672ms step_avg:66.63ms +[2025-07-07 23:05:12] [Rank 0] step:1661/10000 train_time:110672ms step_avg:66.63ms +[2025-07-07 23:05:13] [Rank 0] step:1681/10000 train_time:112013ms step_avg:66.63ms +[2025-07-07 23:05:13] [Rank 0] step:1681/10000 train_time:112013ms step_avg:66.63ms +[2025-07-07 23:05:15] [Rank 0] step:1701/10000 train_time:113355ms step_avg:66.64ms +[2025-07-07 23:05:15] [Rank 0] step:1701/10000 train_time:113355ms step_avg:66.64ms +[2025-07-07 23:05:16] [Rank 0] step:1721/10000 train_time:114697ms step_avg:66.65ms +[2025-07-07 23:05:16] [Rank 0] step:1721/10000 train_time:114697ms step_avg:66.65ms +[2025-07-07 23:05:18] [Rank 0] step:1741/10000 train_time:116041ms step_avg:66.65ms +[2025-07-07 23:05:18] [Rank 0] step:1741/10000 train_time:116041ms step_avg:66.65ms +[2025-07-07 23:05:19] [Rank 0] step:1761/10000 train_time:117387ms step_avg:66.66ms +[2025-07-07 23:05:19] [Rank 0] step:1761/10000 train_time:117387ms step_avg:66.66ms +[2025-07-07 23:05:20] [Rank 0] step:1781/10000 train_time:118734ms step_avg:66.67ms +[2025-07-07 23:05:20] [Rank 0] step:1781/10000 train_time:118734ms step_avg:66.67ms +[2025-07-07 23:05:22] [Rank 0] step:1801/10000 train_time:120334ms step_avg:66.81ms +[2025-07-07 23:05:22] [Rank 0] step:1801/10000 train_time:120334ms step_avg:66.81ms +[2025-07-07 23:05:23] [Rank 0] step:1821/10000 train_time:121483ms step_avg:66.71ms +[2025-07-07 23:05:23] [Rank 0] step:1821/10000 train_time:121483ms step_avg:66.71ms +[2025-07-07 23:05:24] [Rank 0] step:1841/10000 train_time:122830ms step_avg:66.72ms +[2025-07-07 23:05:24] [Rank 0] step:1841/10000 train_time:122830ms step_avg:66.72ms +[2025-07-07 23:05:26] [Rank 0] step:1861/10000 train_time:124178ms step_avg:66.73ms +[2025-07-07 23:05:26] [Rank 0] step:1861/10000 train_time:124178ms step_avg:66.73ms +[2025-07-07 23:05:27] [Rank 0] step:1881/10000 train_time:125527ms step_avg:66.73ms +[2025-07-07 23:05:27] [Rank 0] step:1881/10000 train_time:125527ms step_avg:66.73ms +[2025-07-07 23:05:28] [Rank 0] step:1901/10000 train_time:126874ms step_avg:66.74ms +[2025-07-07 23:05:28] [Rank 0] step:1901/10000 train_time:126874ms step_avg:66.74ms +[2025-07-07 23:05:30] [Rank 0] step:1921/10000 train_time:128222ms step_avg:66.75ms +[2025-07-07 23:05:30] [Rank 0] step:1921/10000 train_time:128222ms step_avg:66.75ms +[2025-07-07 23:05:31] [Rank 0] step:1941/10000 train_time:129571ms step_avg:66.75ms +[2025-07-07 23:05:31] [Rank 0] step:1941/10000 train_time:129571ms step_avg:66.75ms +[2025-07-07 23:05:32] [Rank 0] step:1961/10000 train_time:130922ms step_avg:66.76ms +[2025-07-07 23:05:32] [Rank 0] step:1961/10000 train_time:130922ms step_avg:66.76ms +[2025-07-07 23:05:34] [Rank 0] step:1981/10000 train_time:132525ms step_avg:66.90ms +[2025-07-07 23:05:34] [Rank 0] step:1981/10000 train_time:132525ms step_avg:66.90ms +[2025-07-07 23:05:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:05:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:05:36] [Rank 0] PRINT: step:2000/10000 train_loss:5.6859 val_loss:5.3048 train_time:134292ms step_avg:67.15ms +[2025-07-07 23:05:36] [Rank 0] PRINT: step:2000/10000 train_loss:5.6859 val_loss:5.3048 train_time:134292ms step_avg:67.15ms +[2025-07-07 23:05:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:05:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:05:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:05:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:05:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:05:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:11:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:11:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:11:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:11:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:11:03] [Rank 0] Total Loss: 6.2582 +[2025-07-07 23:11:03] [Rank 0] Total Loss: 6.2582 +[2025-07-07 23:11:03] [Rank 0] Total FTA: 0.0007 +[2025-07-07 23:11:03] [Rank 0] Total FTA: 0.0007 +[2025-07-07 23:11:03] [Rank 0] Group 0 Loss: 6.1728 +[2025-07-07 23:11:03] [Rank 0] Group 0 Loss: 6.1728 +[2025-07-07 23:11:03] [Rank 0] Group 1 Loss: 6.2321 +[2025-07-07 23:11:03] [Rank 0] Group 1 Loss: 6.2321 +[2025-07-07 23:11:03] [Rank 0] Group 2 Loss: 6.3861 +[2025-07-07 23:11:03] [Rank 0] Group 2 Loss: 6.3861 +[2025-07-07 23:11:03] [Rank 0] Group 3 Loss: 6.2058 +[2025-07-07 23:11:03] [Rank 0] Group 3 Loss: 6.2058 +[2025-07-07 23:11:03] [Rank 0] Group 4 Loss: 6.3016 +[2025-07-07 23:11:03] [Rank 0] Group 4 Loss: 6.3016 +[2025-07-07 23:11:03] [Rank 0] Group 5 Loss: 6.2517 +[2025-07-07 23:11:03] [Rank 0] Group 5 Loss: 6.2517 +[2025-07-07 23:11:03] [Rank 0] Group 6 Loss: 6.3117 +[2025-07-07 23:11:03] [Rank 0] Group 6 Loss: 6.3117 +[2025-07-07 23:11:03] [Rank 0] Group 7 Loss: 6.2597 +[2025-07-07 23:11:03] [Rank 0] Group 7 Loss: 6.2597 +[2025-07-07 23:11:03] [Rank 0] Group 8 Loss: 6.2376 +[2025-07-07 23:11:03] [Rank 0] Group 8 Loss: 6.2376 +[2025-07-07 23:11:03] [Rank 0] Group 9 Loss: 6.2882 +[2025-07-07 23:11:03] [Rank 0] Group 9 Loss: 6.2882 +[2025-07-07 23:11:03] [Rank 0] Group 10 Loss: 6.2737 +[2025-07-07 23:11:03] [Rank 0] Group 10 Loss: 6.2737 +[2025-07-07 23:11:03] [Rank 0] Group 11 Loss: 6.2617 +[2025-07-07 23:11:03] [Rank 0] Group 11 Loss: 6.2617 +[2025-07-07 23:11:03] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 23:11:03] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 23:11:03] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 23:11:03] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 23:11:03] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 23:11:03] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 23:11:03] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 23:11:03] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 23:11:03] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 23:11:03] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 23:11:03] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 23:11:03] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 23:11:03] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 23:11:03] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 23:11:03] [Rank 0] Group 7 FTA: 0.0026 +[2025-07-07 23:11:03] [Rank 0] Group 7 FTA: 0.0026 +[2025-07-07 23:11:03] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 23:11:03] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 23:11:03] [Rank 0] Group 9 FTA: 0.0039 +[2025-07-07 23:11:03] [Rank 0] Group 9 FTA: 0.0039 +[2025-07-07 23:11:03] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 23:11:03] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 23:11:03] [Rank 0] Group 11 FTA: 0.0020 +[2025-07-07 23:11:03] [Rank 0] Group 11 FTA: 0.0020 +[2025-07-07 23:11:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 23:11:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 23:11:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 23:11:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 23:11:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 23:11:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 23:11:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 23:11:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 23:11:05] [Rank 0] step:2001/10000 train_time:134302ms step_avg:67.12ms +[2025-07-07 23:11:05] [Rank 0] step:2001/10000 train_time:134302ms step_avg:67.12ms +[2025-07-07 23:11:06] [Rank 0] step:2021/10000 train_time:135051ms step_avg:66.82ms +[2025-07-07 23:11:06] [Rank 0] step:2021/10000 train_time:135051ms step_avg:66.82ms +[2025-07-07 23:11:08] [Rank 0] step:2041/10000 train_time:136391ms step_avg:66.83ms +[2025-07-07 23:11:08] [Rank 0] step:2041/10000 train_time:136391ms step_avg:66.83ms +[2025-07-07 23:11:09] [Rank 0] step:2061/10000 train_time:137732ms step_avg:66.83ms +[2025-07-07 23:11:09] [Rank 0] step:2061/10000 train_time:137732ms step_avg:66.83ms +[2025-07-07 23:11:10] [Rank 0] step:2081/10000 train_time:139101ms step_avg:66.84ms +[2025-07-07 23:11:10] [Rank 0] step:2081/10000 train_time:139101ms step_avg:66.84ms +[2025-07-07 23:11:12] [Rank 0] step:2101/10000 train_time:140442ms step_avg:66.85ms +[2025-07-07 23:11:12] [Rank 0] step:2101/10000 train_time:140442ms step_avg:66.85ms +[2025-07-07 23:11:13] [Rank 0] step:2121/10000 train_time:141784ms step_avg:66.85ms +[2025-07-07 23:11:13] [Rank 0] step:2121/10000 train_time:141784ms step_avg:66.85ms +[2025-07-07 23:11:14] [Rank 0] step:2141/10000 train_time:143127ms step_avg:66.85ms +[2025-07-07 23:11:14] [Rank 0] step:2141/10000 train_time:143127ms step_avg:66.85ms +[2025-07-07 23:11:16] [Rank 0] step:2161/10000 train_time:144723ms step_avg:66.97ms +[2025-07-07 23:11:16] [Rank 0] step:2161/10000 train_time:144723ms step_avg:66.97ms +[2025-07-07 23:11:17] [Rank 0] step:2181/10000 train_time:145862ms step_avg:66.88ms +[2025-07-07 23:11:17] [Rank 0] step:2181/10000 train_time:145862ms step_avg:66.88ms +[2025-07-07 23:11:18] [Rank 0] step:2201/10000 train_time:147208ms step_avg:66.88ms +[2025-07-07 23:11:18] [Rank 0] step:2201/10000 train_time:147208ms step_avg:66.88ms +[2025-07-07 23:11:20] [Rank 0] step:2221/10000 train_time:148554ms step_avg:66.89ms +[2025-07-07 23:11:20] [Rank 0] step:2221/10000 train_time:148554ms step_avg:66.89ms +[2025-07-07 23:11:21] [Rank 0] step:2241/10000 train_time:149911ms step_avg:66.89ms +[2025-07-07 23:11:21] [Rank 0] step:2241/10000 train_time:149911ms step_avg:66.89ms +[2025-07-07 23:11:22] [Rank 0] step:2261/10000 train_time:151282ms step_avg:66.91ms +[2025-07-07 23:11:22] [Rank 0] step:2261/10000 train_time:151282ms step_avg:66.91ms +[2025-07-07 23:11:24] [Rank 0] step:2281/10000 train_time:152651ms step_avg:66.92ms +[2025-07-07 23:11:24] [Rank 0] step:2281/10000 train_time:152651ms step_avg:66.92ms +[2025-07-07 23:11:25] [Rank 0] step:2301/10000 train_time:154022ms step_avg:66.94ms +[2025-07-07 23:11:25] [Rank 0] step:2301/10000 train_time:154022ms step_avg:66.94ms +[2025-07-07 23:11:27] [Rank 0] step:2321/10000 train_time:155394ms step_avg:66.95ms +[2025-07-07 23:11:27] [Rank 0] step:2321/10000 train_time:155394ms step_avg:66.95ms +[2025-07-07 23:11:28] [Rank 0] step:2341/10000 train_time:157014ms step_avg:67.07ms +[2025-07-07 23:11:28] [Rank 0] step:2341/10000 train_time:157014ms step_avg:67.07ms +[2025-07-07 23:11:29] [Rank 0] step:2361/10000 train_time:158166ms step_avg:66.99ms +[2025-07-07 23:11:29] [Rank 0] step:2361/10000 train_time:158166ms step_avg:66.99ms +[2025-07-07 23:11:31] [Rank 0] step:2381/10000 train_time:159539ms step_avg:67.01ms +[2025-07-07 23:11:31] [Rank 0] step:2381/10000 train_time:159539ms step_avg:67.01ms +[2025-07-07 23:11:32] [Rank 0] step:2401/10000 train_time:160913ms step_avg:67.02ms +[2025-07-07 23:11:32] [Rank 0] step:2401/10000 train_time:160913ms step_avg:67.02ms +[2025-07-07 23:11:34] [Rank 0] step:2421/10000 train_time:162329ms step_avg:67.05ms +[2025-07-07 23:11:34] [Rank 0] step:2421/10000 train_time:162329ms step_avg:67.05ms +[2025-07-07 23:11:35] [Rank 0] step:2441/10000 train_time:163704ms step_avg:67.06ms +[2025-07-07 23:11:35] [Rank 0] step:2441/10000 train_time:163704ms step_avg:67.06ms +[2025-07-07 23:11:36] [Rank 0] step:2461/10000 train_time:165079ms step_avg:67.08ms +[2025-07-07 23:11:36] [Rank 0] step:2461/10000 train_time:165079ms step_avg:67.08ms +[2025-07-07 23:11:38] [Rank 0] step:2481/10000 train_time:166453ms step_avg:67.09ms +[2025-07-07 23:11:38] [Rank 0] step:2481/10000 train_time:166453ms step_avg:67.09ms +[2025-07-07 23:11:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:11:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:11:40] [Rank 0] PRINT: step:2500/10000 train_loss:4.9985 val_loss:4.7121 train_time:168452ms step_avg:67.38ms +[2025-07-07 23:11:40] [Rank 0] PRINT: step:2500/10000 train_loss:4.9985 val_loss:4.7121 train_time:168452ms step_avg:67.38ms +[2025-07-07 23:11:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:11:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:11:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:11:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:11:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:11:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:17:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:17:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:17:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:17:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:17:09] [Rank 0] Total Loss: 5.8612 +[2025-07-07 23:17:09] [Rank 0] Total Loss: 5.8612 +[2025-07-07 23:17:09] [Rank 0] Total FTA: 0.0692 +[2025-07-07 23:17:09] [Rank 0] Total FTA: 0.0692 +[2025-07-07 23:17:09] [Rank 0] Group 0 Loss: 5.7814 +[2025-07-07 23:17:09] [Rank 0] Group 0 Loss: 5.7814 +[2025-07-07 23:17:09] [Rank 0] Group 1 Loss: 5.8075 +[2025-07-07 23:17:09] [Rank 0] Group 1 Loss: 5.8075 +[2025-07-07 23:17:09] [Rank 0] Group 2 Loss: 6.0199 +[2025-07-07 23:17:09] [Rank 0] Group 2 Loss: 6.0199 +[2025-07-07 23:17:09] [Rank 0] Group 3 Loss: 5.7533 +[2025-07-07 23:17:09] [Rank 0] Group 3 Loss: 5.7533 +[2025-07-07 23:17:09] [Rank 0] Group 4 Loss: 5.8969 +[2025-07-07 23:17:09] [Rank 0] Group 4 Loss: 5.8969 +[2025-07-07 23:17:09] [Rank 0] Group 5 Loss: 5.8612 +[2025-07-07 23:17:09] [Rank 0] Group 5 Loss: 5.8612 +[2025-07-07 23:17:09] [Rank 0] Group 6 Loss: 5.8988 +[2025-07-07 23:17:09] [Rank 0] Group 6 Loss: 5.8988 +[2025-07-07 23:17:09] [Rank 0] Group 7 Loss: 5.8734 +[2025-07-07 23:17:09] [Rank 0] Group 7 Loss: 5.8734 +[2025-07-07 23:17:09] [Rank 0] Group 8 Loss: 5.8465 +[2025-07-07 23:17:09] [Rank 0] Group 8 Loss: 5.8465 +[2025-07-07 23:17:09] [Rank 0] Group 9 Loss: 5.8680 +[2025-07-07 23:17:09] [Rank 0] Group 9 Loss: 5.8680 +[2025-07-07 23:17:09] [Rank 0] Group 10 Loss: 5.8932 +[2025-07-07 23:17:09] [Rank 0] Group 10 Loss: 5.8932 +[2025-07-07 23:17:09] [Rank 0] Group 11 Loss: 5.8778 +[2025-07-07 23:17:09] [Rank 0] Group 11 Loss: 5.8778 +[2025-07-07 23:17:09] [Rank 0] Group 0 FTA: 0.1730 +[2025-07-07 23:17:09] [Rank 0] Group 0 FTA: 0.1730 +[2025-07-07 23:17:09] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 23:17:09] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 23:17:09] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-07 23:17:09] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-07 23:17:09] [Rank 0] Group 3 FTA: 0.0286 +[2025-07-07 23:17:09] [Rank 0] Group 3 FTA: 0.0286 +[2025-07-07 23:17:09] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-07 23:17:09] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-07 23:17:10] [Rank 0] Group 5 FTA: 0.0312 +[2025-07-07 23:17:10] [Rank 0] Group 5 FTA: 0.0312 +[2025-07-07 23:17:10] [Rank 0] Group 6 FTA: 0.0599 +[2025-07-07 23:17:10] [Rank 0] Group 6 FTA: 0.0599 +[2025-07-07 23:17:10] [Rank 0] Group 7 FTA: 0.0703 +[2025-07-07 23:17:10] [Rank 0] Group 7 FTA: 0.0703 +[2025-07-07 23:17:10] [Rank 0] Group 8 FTA: 0.0547 +[2025-07-07 23:17:10] [Rank 0] Group 8 FTA: 0.0547 +[2025-07-07 23:17:10] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 23:17:10] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 23:17:10] [Rank 0] Group 10 FTA: 0.0762 +[2025-07-07 23:17:10] [Rank 0] Group 10 FTA: 0.0762 +[2025-07-07 23:17:10] [Rank 0] Group 11 FTA: 0.0635 +[2025-07-07 23:17:10] [Rank 0] Group 11 FTA: 0.0635 +[2025-07-07 23:17:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 23:17:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 23:17:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 23:17:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 23:17:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 23:17:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 23:17:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 23:17:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 23:17:11] [Rank 0] step:2501/10000 train_time:168461ms step_avg:67.36ms +[2025-07-07 23:17:11] [Rank 0] step:2501/10000 train_time:168461ms step_avg:67.36ms +[2025-07-07 23:17:12] [Rank 0] step:2521/10000 train_time:169234ms step_avg:67.13ms +[2025-07-07 23:17:12] [Rank 0] step:2521/10000 train_time:169234ms step_avg:67.13ms +[2025-07-07 23:17:14] [Rank 0] step:2541/10000 train_time:170653ms step_avg:67.16ms +[2025-07-07 23:17:14] [Rank 0] step:2541/10000 train_time:170653ms step_avg:67.16ms +[2025-07-07 23:17:15] [Rank 0] step:2561/10000 train_time:172019ms step_avg:67.17ms +[2025-07-07 23:17:15] [Rank 0] step:2561/10000 train_time:172019ms step_avg:67.17ms +[2025-07-07 23:17:17] [Rank 0] step:2581/10000 train_time:173384ms step_avg:67.18ms +[2025-07-07 23:17:17] [Rank 0] step:2581/10000 train_time:173384ms step_avg:67.18ms +[2025-07-07 23:17:18] [Rank 0] step:2601/10000 train_time:174750ms step_avg:67.19ms +[2025-07-07 23:17:18] [Rank 0] step:2601/10000 train_time:174750ms step_avg:67.19ms +[2025-07-07 23:17:19] [Rank 0] step:2621/10000 train_time:176117ms step_avg:67.19ms +[2025-07-07 23:17:19] [Rank 0] step:2621/10000 train_time:176117ms step_avg:67.19ms +[2025-07-07 23:17:21] [Rank 0] step:2641/10000 train_time:177484ms step_avg:67.20ms +[2025-07-07 23:17:21] [Rank 0] step:2641/10000 train_time:177484ms step_avg:67.20ms +[2025-07-07 23:17:22] [Rank 0] step:2661/10000 train_time:178852ms step_avg:67.21ms +[2025-07-07 23:17:22] [Rank 0] step:2661/10000 train_time:178852ms step_avg:67.21ms +[2025-07-07 23:17:23] [Rank 0] step:2681/10000 train_time:180218ms step_avg:67.22ms +[2025-07-07 23:17:23] [Rank 0] step:2681/10000 train_time:180218ms step_avg:67.22ms +[2025-07-07 23:17:25] [Rank 0] step:2701/10000 train_time:182270ms step_avg:67.48ms +[2025-07-07 23:17:25] [Rank 0] step:2701/10000 train_time:182270ms step_avg:67.48ms +[2025-07-07 23:17:26] [Rank 0] step:2721/10000 train_time:183020ms step_avg:67.26ms +[2025-07-07 23:17:26] [Rank 0] step:2721/10000 train_time:183020ms step_avg:67.26ms +[2025-07-07 23:17:28] [Rank 0] step:2741/10000 train_time:184378ms step_avg:67.27ms +[2025-07-07 23:17:28] [Rank 0] step:2741/10000 train_time:184378ms step_avg:67.27ms +[2025-07-07 23:17:29] [Rank 0] step:2761/10000 train_time:185748ms step_avg:67.28ms +[2025-07-07 23:17:29] [Rank 0] step:2761/10000 train_time:185748ms step_avg:67.28ms +[2025-07-07 23:17:30] [Rank 0] step:2781/10000 train_time:187121ms step_avg:67.29ms +[2025-07-07 23:17:30] [Rank 0] step:2781/10000 train_time:187121ms step_avg:67.29ms +[2025-07-07 23:17:32] [Rank 0] step:2801/10000 train_time:188492ms step_avg:67.29ms +[2025-07-07 23:17:32] [Rank 0] step:2801/10000 train_time:188492ms step_avg:67.29ms +[2025-07-07 23:17:33] [Rank 0] step:2821/10000 train_time:189865ms step_avg:67.30ms +[2025-07-07 23:17:33] [Rank 0] step:2821/10000 train_time:189865ms step_avg:67.30ms +[2025-07-07 23:17:34] [Rank 0] step:2841/10000 train_time:191237ms step_avg:67.31ms +[2025-07-07 23:17:34] [Rank 0] step:2841/10000 train_time:191237ms step_avg:67.31ms +[2025-07-07 23:17:36] [Rank 0] step:2861/10000 train_time:192612ms step_avg:67.32ms +[2025-07-07 23:17:36] [Rank 0] step:2861/10000 train_time:192612ms step_avg:67.32ms +[2025-07-07 23:17:37] [Rank 0] step:2881/10000 train_time:194032ms step_avg:67.35ms +[2025-07-07 23:17:37] [Rank 0] step:2881/10000 train_time:194032ms step_avg:67.35ms +[2025-07-07 23:17:39] [Rank 0] step:2901/10000 train_time:195395ms step_avg:67.35ms +[2025-07-07 23:17:39] [Rank 0] step:2901/10000 train_time:195395ms step_avg:67.35ms +[2025-07-07 23:17:40] [Rank 0] step:2921/10000 train_time:196769ms step_avg:67.36ms +[2025-07-07 23:17:40] [Rank 0] step:2921/10000 train_time:196769ms step_avg:67.36ms +[2025-07-07 23:17:41] [Rank 0] step:2941/10000 train_time:198144ms step_avg:67.37ms +[2025-07-07 23:17:41] [Rank 0] step:2941/10000 train_time:198144ms step_avg:67.37ms +[2025-07-07 23:17:43] [Rank 0] step:2961/10000 train_time:199519ms step_avg:67.38ms +[2025-07-07 23:17:43] [Rank 0] step:2961/10000 train_time:199519ms step_avg:67.38ms +[2025-07-07 23:17:44] [Rank 0] step:2981/10000 train_time:200894ms step_avg:67.39ms +[2025-07-07 23:17:44] [Rank 0] step:2981/10000 train_time:200894ms step_avg:67.39ms +[2025-07-07 23:17:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:17:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:17:46] [Rank 0] PRINT: step:3000/10000 train_loss:4.4502 val_loss:4.2046 train_time:202893ms step_avg:67.63ms +[2025-07-07 23:17:46] [Rank 0] PRINT: step:3000/10000 train_loss:4.4502 val_loss:4.2046 train_time:202893ms step_avg:67.63ms +[2025-07-07 23:17:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:17:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:17:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:17:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:17:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:17:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:23:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:23:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:23:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:23:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:23:15] [Rank 0] Total Loss: 5.5592 +[2025-07-07 23:23:15] [Rank 0] Total Loss: 5.5592 +[2025-07-07 23:23:15] [Rank 0] Total FTA: 0.0714 +[2025-07-07 23:23:15] [Rank 0] Total FTA: 0.0714 +[2025-07-07 23:23:15] [Rank 0] Group 0 Loss: 5.5954 +[2025-07-07 23:23:15] [Rank 0] Group 0 Loss: 5.5954 +[2025-07-07 23:23:15] [Rank 0] Group 1 Loss: 5.5270 +[2025-07-07 23:23:15] [Rank 0] Group 1 Loss: 5.5270 +[2025-07-07 23:23:15] [Rank 0] Group 2 Loss: 5.6664 +[2025-07-07 23:23:15] [Rank 0] Group 2 Loss: 5.6664 +[2025-07-07 23:23:15] [Rank 0] Group 3 Loss: 5.4372 +[2025-07-07 23:23:15] [Rank 0] Group 3 Loss: 5.4372 +[2025-07-07 23:23:15] [Rank 0] Group 4 Loss: 5.5871 +[2025-07-07 23:23:15] [Rank 0] Group 4 Loss: 5.5871 +[2025-07-07 23:23:15] [Rank 0] Group 5 Loss: 5.5442 +[2025-07-07 23:23:15] [Rank 0] Group 5 Loss: 5.5442 +[2025-07-07 23:23:15] [Rank 0] Group 6 Loss: 5.5894 +[2025-07-07 23:23:15] [Rank 0] Group 6 Loss: 5.5894 +[2025-07-07 23:23:15] [Rank 0] Group 7 Loss: 5.5395 +[2025-07-07 23:23:15] [Rank 0] Group 7 Loss: 5.5395 +[2025-07-07 23:23:15] [Rank 0] Group 8 Loss: 5.5482 +[2025-07-07 23:23:15] [Rank 0] Group 8 Loss: 5.5482 +[2025-07-07 23:23:15] [Rank 0] Group 9 Loss: 5.5781 +[2025-07-07 23:23:15] [Rank 0] Group 9 Loss: 5.5781 +[2025-07-07 23:23:15] [Rank 0] Group 10 Loss: 5.5597 +[2025-07-07 23:23:15] [Rank 0] Group 10 Loss: 5.5597 +[2025-07-07 23:23:15] [Rank 0] Group 11 Loss: 5.5401 +[2025-07-07 23:23:15] [Rank 0] Group 11 Loss: 5.5401 +[2025-07-07 23:23:15] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-07 23:23:15] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-07 23:23:15] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 23:23:15] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 23:23:15] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 23:23:15] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 23:23:15] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 23:23:15] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 23:23:15] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-07 23:23:15] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-07 23:23:15] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-07 23:23:15] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-07 23:23:15] [Rank 0] Group 6 FTA: 0.0677 +[2025-07-07 23:23:15] [Rank 0] Group 6 FTA: 0.0677 +[2025-07-07 23:23:15] [Rank 0] Group 7 FTA: 0.0833 +[2025-07-07 23:23:15] [Rank 0] Group 7 FTA: 0.0833 +[2025-07-07 23:23:15] [Rank 0] Group 8 FTA: 0.0755 +[2025-07-07 23:23:15] [Rank 0] Group 8 FTA: 0.0755 +[2025-07-07 23:23:15] [Rank 0] Group 9 FTA: 0.0586 +[2025-07-07 23:23:15] [Rank 0] Group 9 FTA: 0.0586 +[2025-07-07 23:23:15] [Rank 0] Group 10 FTA: 0.0742 +[2025-07-07 23:23:15] [Rank 0] Group 10 FTA: 0.0742 +[2025-07-07 23:23:15] [Rank 0] Group 11 FTA: 0.0645 +[2025-07-07 23:23:15] [Rank 0] Group 11 FTA: 0.0645 +[2025-07-07 23:23:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 23:23:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 23:23:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 23:23:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 23:23:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 23:23:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 23:23:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 23:23:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 23:23:17] [Rank 0] step:3001/10000 train_time:202902ms step_avg:67.61ms +[2025-07-07 23:23:17] [Rank 0] step:3001/10000 train_time:202902ms step_avg:67.61ms +[2025-07-07 23:23:18] [Rank 0] step:3021/10000 train_time:203677ms step_avg:67.42ms +[2025-07-07 23:23:18] [Rank 0] step:3021/10000 train_time:203677ms step_avg:67.42ms +[2025-07-07 23:23:20] [Rank 0] step:3041/10000 train_time:205041ms step_avg:67.43ms +[2025-07-07 23:23:20] [Rank 0] step:3041/10000 train_time:205041ms step_avg:67.43ms +[2025-07-07 23:23:21] [Rank 0] step:3061/10000 train_time:206408ms step_avg:67.43ms +[2025-07-07 23:23:21] [Rank 0] step:3061/10000 train_time:206408ms step_avg:67.43ms +[2025-07-07 23:23:22] [Rank 0] step:3081/10000 train_time:207831ms step_avg:67.46ms +[2025-07-07 23:23:22] [Rank 0] step:3081/10000 train_time:207831ms step_avg:67.46ms +[2025-07-07 23:23:24] [Rank 0] step:3101/10000 train_time:209197ms step_avg:67.46ms +[2025-07-07 23:23:24] [Rank 0] step:3101/10000 train_time:209197ms step_avg:67.46ms +[2025-07-07 23:23:25] [Rank 0] step:3121/10000 train_time:210566ms step_avg:67.47ms +[2025-07-07 23:23:25] [Rank 0] step:3121/10000 train_time:210566ms step_avg:67.47ms +[2025-07-07 23:23:26] [Rank 0] step:3141/10000 train_time:211935ms step_avg:67.47ms +[2025-07-07 23:23:26] [Rank 0] step:3141/10000 train_time:211935ms step_avg:67.47ms +[2025-07-07 23:23:28] [Rank 0] step:3161/10000 train_time:213305ms step_avg:67.48ms +[2025-07-07 23:23:28] [Rank 0] step:3161/10000 train_time:213305ms step_avg:67.48ms +[2025-07-07 23:23:29] [Rank 0] step:3181/10000 train_time:214676ms step_avg:67.49ms +[2025-07-07 23:23:29] [Rank 0] step:3181/10000 train_time:214676ms step_avg:67.49ms +[2025-07-07 23:23:31] [Rank 0] step:3201/10000 train_time:216049ms step_avg:67.49ms +[2025-07-07 23:23:31] [Rank 0] step:3201/10000 train_time:216049ms step_avg:67.49ms +[2025-07-07 23:23:32] [Rank 0] step:3221/10000 train_time:217419ms step_avg:67.50ms +[2025-07-07 23:23:32] [Rank 0] step:3221/10000 train_time:217419ms step_avg:67.50ms +[2025-07-07 23:23:33] [Rank 0] step:3241/10000 train_time:219041ms step_avg:67.58ms +[2025-07-07 23:23:33] [Rank 0] step:3241/10000 train_time:219041ms step_avg:67.58ms +[2025-07-07 23:23:35] [Rank 0] step:3261/10000 train_time:220187ms step_avg:67.52ms +[2025-07-07 23:23:35] [Rank 0] step:3261/10000 train_time:220187ms step_avg:67.52ms +[2025-07-07 23:23:36] [Rank 0] step:3281/10000 train_time:221558ms step_avg:67.53ms +[2025-07-07 23:23:36] [Rank 0] step:3281/10000 train_time:221558ms step_avg:67.53ms +[2025-07-07 23:23:37] [Rank 0] step:3301/10000 train_time:222930ms step_avg:67.53ms +[2025-07-07 23:23:37] [Rank 0] step:3301/10000 train_time:222930ms step_avg:67.53ms +[2025-07-07 23:23:39] [Rank 0] step:3321/10000 train_time:224302ms step_avg:67.54ms +[2025-07-07 23:23:39] [Rank 0] step:3321/10000 train_time:224302ms step_avg:67.54ms +[2025-07-07 23:23:40] [Rank 0] step:3341/10000 train_time:225677ms step_avg:67.55ms +[2025-07-07 23:23:40] [Rank 0] step:3341/10000 train_time:225677ms step_avg:67.55ms +[2025-07-07 23:23:42] [Rank 0] step:3361/10000 train_time:227052ms step_avg:67.55ms +[2025-07-07 23:23:42] [Rank 0] step:3361/10000 train_time:227052ms step_avg:67.55ms +[2025-07-07 23:23:43] [Rank 0] step:3381/10000 train_time:228426ms step_avg:67.56ms +[2025-07-07 23:23:43] [Rank 0] step:3381/10000 train_time:228426ms step_avg:67.56ms +[2025-07-07 23:23:44] [Rank 0] step:3401/10000 train_time:229800ms step_avg:67.57ms +[2025-07-07 23:23:44] [Rank 0] step:3401/10000 train_time:229800ms step_avg:67.57ms +[2025-07-07 23:23:46] [Rank 0] step:3421/10000 train_time:231175ms step_avg:67.58ms +[2025-07-07 23:23:46] [Rank 0] step:3421/10000 train_time:231175ms step_avg:67.58ms +[2025-07-07 23:23:47] [Rank 0] step:3441/10000 train_time:232571ms step_avg:67.59ms +[2025-07-07 23:23:47] [Rank 0] step:3441/10000 train_time:232571ms step_avg:67.59ms +[2025-07-07 23:23:48] [Rank 0] step:3461/10000 train_time:233947ms step_avg:67.60ms +[2025-07-07 23:23:48] [Rank 0] step:3461/10000 train_time:233947ms step_avg:67.60ms +[2025-07-07 23:23:50] [Rank 0] step:3481/10000 train_time:235323ms step_avg:67.60ms +[2025-07-07 23:23:50] [Rank 0] step:3481/10000 train_time:235323ms step_avg:67.60ms +[2025-07-07 23:23:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:23:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:23:52] [Rank 0] PRINT: step:3500/10000 train_loss:3.9856 val_loss:3.7848 train_time:237324ms step_avg:67.81ms +[2025-07-07 23:23:52] [Rank 0] PRINT: step:3500/10000 train_loss:3.9856 val_loss:3.7848 train_time:237324ms step_avg:67.81ms +[2025-07-07 23:23:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:23:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:23:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:23:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:23:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:23:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:29:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:29:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:29:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:29:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:29:20] [Rank 0] Total Loss: 5.3194 +[2025-07-07 23:29:20] [Rank 0] Total Loss: 5.3194 +[2025-07-07 23:29:20] [Rank 0] Total FTA: 0.0683 +[2025-07-07 23:29:20] [Rank 0] Total FTA: 0.0683 +[2025-07-07 23:29:20] [Rank 0] Group 0 Loss: 5.3876 +[2025-07-07 23:29:20] [Rank 0] Group 0 Loss: 5.3876 +[2025-07-07 23:29:20] [Rank 0] Group 1 Loss: 5.2017 +[2025-07-07 23:29:20] [Rank 0] Group 1 Loss: 5.2017 +[2025-07-07 23:29:20] [Rank 0] Group 2 Loss: 5.4360 +[2025-07-07 23:29:20] [Rank 0] Group 2 Loss: 5.4360 +[2025-07-07 23:29:20] [Rank 0] Group 3 Loss: 5.2870 +[2025-07-07 23:29:20] [Rank 0] Group 3 Loss: 5.2870 +[2025-07-07 23:29:20] [Rank 0] Group 4 Loss: 5.3572 +[2025-07-07 23:29:20] [Rank 0] Group 4 Loss: 5.3572 +[2025-07-07 23:29:20] [Rank 0] Group 5 Loss: 5.2744 +[2025-07-07 23:29:20] [Rank 0] Group 5 Loss: 5.2744 +[2025-07-07 23:29:20] [Rank 0] Group 6 Loss: 5.3871 +[2025-07-07 23:29:20] [Rank 0] Group 6 Loss: 5.3871 +[2025-07-07 23:29:20] [Rank 0] Group 7 Loss: 5.2777 +[2025-07-07 23:29:20] [Rank 0] Group 7 Loss: 5.2777 +[2025-07-07 23:29:20] [Rank 0] Group 8 Loss: 5.2451 +[2025-07-07 23:29:20] [Rank 0] Group 8 Loss: 5.2451 +[2025-07-07 23:29:20] [Rank 0] Group 9 Loss: 5.3193 +[2025-07-07 23:29:20] [Rank 0] Group 9 Loss: 5.3193 +[2025-07-07 23:29:20] [Rank 0] Group 10 Loss: 5.2915 +[2025-07-07 23:29:20] [Rank 0] Group 10 Loss: 5.2915 +[2025-07-07 23:29:20] [Rank 0] Group 11 Loss: 5.3154 +[2025-07-07 23:29:20] [Rank 0] Group 11 Loss: 5.3154 +[2025-07-07 23:29:20] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 23:29:20] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 23:29:20] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 23:29:20] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 23:29:20] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-07 23:29:20] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-07 23:29:20] [Rank 0] Group 3 FTA: 0.0443 +[2025-07-07 23:29:20] [Rank 0] Group 3 FTA: 0.0443 +[2025-07-07 23:29:20] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 23:29:20] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 23:29:20] [Rank 0] Group 5 FTA: 0.0391 +[2025-07-07 23:29:20] [Rank 0] Group 5 FTA: 0.0391 +[2025-07-07 23:29:20] [Rank 0] Group 6 FTA: 0.0469 +[2025-07-07 23:29:20] [Rank 0] Group 6 FTA: 0.0469 +[2025-07-07 23:29:20] [Rank 0] Group 7 FTA: 0.0625 +[2025-07-07 23:29:20] [Rank 0] Group 7 FTA: 0.0625 +[2025-07-07 23:29:20] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 23:29:20] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 23:29:20] [Rank 0] Group 9 FTA: 0.0664 +[2025-07-07 23:29:20] [Rank 0] Group 9 FTA: 0.0664 +[2025-07-07 23:29:20] [Rank 0] Group 10 FTA: 0.0586 +[2025-07-07 23:29:20] [Rank 0] Group 10 FTA: 0.0586 +[2025-07-07 23:29:20] [Rank 0] Group 11 FTA: 0.0645 +[2025-07-07 23:29:20] [Rank 0] Group 11 FTA: 0.0645 +[2025-07-07 23:29:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 23:29:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 23:29:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 23:29:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 23:29:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 23:29:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 23:29:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 23:29:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 23:29:22] [Rank 0] step:3501/10000 train_time:237333ms step_avg:67.79ms +[2025-07-07 23:29:22] [Rank 0] step:3501/10000 train_time:237333ms step_avg:67.79ms +[2025-07-07 23:29:23] [Rank 0] step:3521/10000 train_time:238095ms step_avg:67.62ms +[2025-07-07 23:29:23] [Rank 0] step:3521/10000 train_time:238095ms step_avg:67.62ms +[2025-07-07 23:29:25] [Rank 0] step:3541/10000 train_time:239461ms step_avg:67.63ms +[2025-07-07 23:29:25] [Rank 0] step:3541/10000 train_time:239461ms step_avg:67.63ms +[2025-07-07 23:29:26] [Rank 0] step:3561/10000 train_time:240825ms step_avg:67.63ms +[2025-07-07 23:29:26] [Rank 0] step:3561/10000 train_time:240825ms step_avg:67.63ms +[2025-07-07 23:29:27] [Rank 0] step:3581/10000 train_time:242190ms step_avg:67.63ms +[2025-07-07 23:29:27] [Rank 0] step:3581/10000 train_time:242190ms step_avg:67.63ms +[2025-07-07 23:29:29] [Rank 0] step:3601/10000 train_time:244216ms step_avg:67.82ms +[2025-07-07 23:29:29] [Rank 0] step:3601/10000 train_time:244216ms step_avg:67.82ms +[2025-07-07 23:29:30] [Rank 0] step:3621/10000 train_time:244956ms step_avg:67.65ms +[2025-07-07 23:29:30] [Rank 0] step:3621/10000 train_time:244956ms step_avg:67.65ms +[2025-07-07 23:29:32] [Rank 0] step:3641/10000 train_time:246323ms step_avg:67.65ms +[2025-07-07 23:29:32] [Rank 0] step:3641/10000 train_time:246323ms step_avg:67.65ms +[2025-07-07 23:29:33] [Rank 0] step:3661/10000 train_time:247690ms step_avg:67.66ms +[2025-07-07 23:29:33] [Rank 0] step:3661/10000 train_time:247690ms step_avg:67.66ms +[2025-07-07 23:29:34] [Rank 0] step:3681/10000 train_time:249058ms step_avg:67.66ms +[2025-07-07 23:29:34] [Rank 0] step:3681/10000 train_time:249058ms step_avg:67.66ms +[2025-07-07 23:29:36] [Rank 0] step:3701/10000 train_time:250428ms step_avg:67.66ms +[2025-07-07 23:29:36] [Rank 0] step:3701/10000 train_time:250428ms step_avg:67.66ms +[2025-07-07 23:29:37] [Rank 0] step:3721/10000 train_time:251800ms step_avg:67.67ms +[2025-07-07 23:29:37] [Rank 0] step:3721/10000 train_time:251800ms step_avg:67.67ms +[2025-07-07 23:29:38] [Rank 0] step:3741/10000 train_time:253170ms step_avg:67.67ms +[2025-07-07 23:29:38] [Rank 0] step:3741/10000 train_time:253170ms step_avg:67.67ms +[2025-07-07 23:29:40] [Rank 0] step:3761/10000 train_time:254543ms step_avg:67.68ms +[2025-07-07 23:29:40] [Rank 0] step:3761/10000 train_time:254543ms step_avg:67.68ms +[2025-07-07 23:29:41] [Rank 0] step:3781/10000 train_time:256599ms step_avg:67.87ms +[2025-07-07 23:29:41] [Rank 0] step:3781/10000 train_time:256599ms step_avg:67.87ms +[2025-07-07 23:29:43] [Rank 0] step:3801/10000 train_time:257339ms step_avg:67.70ms +[2025-07-07 23:29:43] [Rank 0] step:3801/10000 train_time:257339ms step_avg:67.70ms +[2025-07-07 23:29:44] [Rank 0] step:3821/10000 train_time:258713ms step_avg:67.71ms +[2025-07-07 23:29:44] [Rank 0] step:3821/10000 train_time:258713ms step_avg:67.71ms +[2025-07-07 23:29:45] [Rank 0] step:3841/10000 train_time:260087ms step_avg:67.71ms +[2025-07-07 23:29:45] [Rank 0] step:3841/10000 train_time:260087ms step_avg:67.71ms +[2025-07-07 23:29:47] [Rank 0] step:3861/10000 train_time:261459ms step_avg:67.72ms +[2025-07-07 23:29:47] [Rank 0] step:3861/10000 train_time:261459ms step_avg:67.72ms +[2025-07-07 23:29:48] [Rank 0] step:3881/10000 train_time:262834ms step_avg:67.72ms +[2025-07-07 23:29:48] [Rank 0] step:3881/10000 train_time:262834ms step_avg:67.72ms +[2025-07-07 23:29:49] [Rank 0] step:3901/10000 train_time:264210ms step_avg:67.73ms +[2025-07-07 23:29:49] [Rank 0] step:3901/10000 train_time:264210ms step_avg:67.73ms +[2025-07-07 23:29:51] [Rank 0] step:3921/10000 train_time:265586ms step_avg:67.73ms +[2025-07-07 23:29:51] [Rank 0] step:3921/10000 train_time:265586ms step_avg:67.73ms +[2025-07-07 23:29:52] [Rank 0] step:3941/10000 train_time:266962ms step_avg:67.74ms +[2025-07-07 23:29:52] [Rank 0] step:3941/10000 train_time:266962ms step_avg:67.74ms +[2025-07-07 23:29:54] [Rank 0] step:3961/10000 train_time:269015ms step_avg:67.92ms +[2025-07-07 23:29:54] [Rank 0] step:3961/10000 train_time:269015ms step_avg:67.92ms +[2025-07-07 23:29:55] [Rank 0] step:3981/10000 train_time:269756ms step_avg:67.76ms +[2025-07-07 23:29:55] [Rank 0] step:3981/10000 train_time:269756ms step_avg:67.76ms +[2025-07-07 23:29:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:29:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:29:57] [Rank 0] PRINT: step:4000/10000 train_loss:3.6089 val_loss:3.4463 train_time:271757ms step_avg:67.94ms +[2025-07-07 23:29:57] [Rank 0] PRINT: step:4000/10000 train_loss:3.6089 val_loss:3.4463 train_time:271757ms step_avg:67.94ms +[2025-07-07 23:29:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:29:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:29:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:29:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:29:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:29:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:35:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:35:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:35:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:35:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:35:27] [Rank 0] Total Loss: 5.1298 +[2025-07-07 23:35:27] [Rank 0] Total Loss: 5.1298 +[2025-07-07 23:35:27] [Rank 0] Total FTA: 0.0779 +[2025-07-07 23:35:27] [Rank 0] Total FTA: 0.0779 +[2025-07-07 23:35:27] [Rank 0] Group 0 Loss: 5.2475 +[2025-07-07 23:35:27] [Rank 0] Group 0 Loss: 5.2475 +[2025-07-07 23:35:27] [Rank 0] Group 1 Loss: 4.9602 +[2025-07-07 23:35:27] [Rank 0] Group 1 Loss: 4.9602 +[2025-07-07 23:35:27] [Rank 0] Group 2 Loss: 5.1132 +[2025-07-07 23:35:27] [Rank 0] Group 2 Loss: 5.1132 +[2025-07-07 23:35:27] [Rank 0] Group 3 Loss: 5.0449 +[2025-07-07 23:35:27] [Rank 0] Group 3 Loss: 5.0449 +[2025-07-07 23:35:27] [Rank 0] Group 4 Loss: 5.1537 +[2025-07-07 23:35:27] [Rank 0] Group 4 Loss: 5.1537 +[2025-07-07 23:35:27] [Rank 0] Group 5 Loss: 5.0827 +[2025-07-07 23:35:27] [Rank 0] Group 5 Loss: 5.0827 +[2025-07-07 23:35:27] [Rank 0] Group 6 Loss: 5.2047 +[2025-07-07 23:35:27] [Rank 0] Group 6 Loss: 5.2047 +[2025-07-07 23:35:27] [Rank 0] Group 7 Loss: 5.1073 +[2025-07-07 23:35:27] [Rank 0] Group 7 Loss: 5.1073 +[2025-07-07 23:35:27] [Rank 0] Group 8 Loss: 5.0932 +[2025-07-07 23:35:27] [Rank 0] Group 8 Loss: 5.0932 +[2025-07-07 23:35:27] [Rank 0] Group 9 Loss: 5.1520 +[2025-07-07 23:35:27] [Rank 0] Group 9 Loss: 5.1520 +[2025-07-07 23:35:27] [Rank 0] Group 10 Loss: 5.1430 +[2025-07-07 23:35:27] [Rank 0] Group 10 Loss: 5.1430 +[2025-07-07 23:35:27] [Rank 0] Group 11 Loss: 5.1335 +[2025-07-07 23:35:27] [Rank 0] Group 11 Loss: 5.1335 +[2025-07-07 23:35:27] [Rank 0] Group 0 FTA: 0.1899 +[2025-07-07 23:35:27] [Rank 0] Group 0 FTA: 0.1899 +[2025-07-07 23:35:27] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 23:35:27] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 23:35:27] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 23:35:27] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 23:35:27] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-07 23:35:27] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-07 23:35:27] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 23:35:27] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 23:35:27] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-07 23:35:27] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-07 23:35:27] [Rank 0] Group 6 FTA: 0.0651 +[2025-07-07 23:35:27] [Rank 0] Group 6 FTA: 0.0651 +[2025-07-07 23:35:27] [Rank 0] Group 7 FTA: 0.0833 +[2025-07-07 23:35:27] [Rank 0] Group 7 FTA: 0.0833 +[2025-07-07 23:35:27] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-07 23:35:27] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-07 23:35:27] [Rank 0] Group 9 FTA: 0.0469 +[2025-07-07 23:35:27] [Rank 0] Group 9 FTA: 0.0469 +[2025-07-07 23:35:27] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 23:35:27] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 23:35:27] [Rank 0] Group 11 FTA: 0.0703 +[2025-07-07 23:35:27] [Rank 0] Group 11 FTA: 0.0703 +[2025-07-07 23:35:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 23:35:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 23:35:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 23:35:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 23:35:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 23:35:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 23:35:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 23:35:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 23:35:29] [Rank 0] step:4001/10000 train_time:271766ms step_avg:67.92ms +[2025-07-07 23:35:29] [Rank 0] step:4001/10000 train_time:271766ms step_avg:67.92ms +[2025-07-07 23:35:30] [Rank 0] step:4021/10000 train_time:272536ms step_avg:67.78ms +[2025-07-07 23:35:30] [Rank 0] step:4021/10000 train_time:272536ms step_avg:67.78ms +[2025-07-07 23:35:31] [Rank 0] step:4041/10000 train_time:273901ms step_avg:67.78ms +[2025-07-07 23:35:31] [Rank 0] step:4041/10000 train_time:273901ms step_avg:67.78ms +[2025-07-07 23:35:33] [Rank 0] step:4061/10000 train_time:275266ms step_avg:67.78ms +[2025-07-07 23:35:33] [Rank 0] step:4061/10000 train_time:275266ms step_avg:67.78ms +[2025-07-07 23:35:34] [Rank 0] step:4081/10000 train_time:276633ms step_avg:67.79ms +[2025-07-07 23:35:34] [Rank 0] step:4081/10000 train_time:276633ms step_avg:67.79ms +[2025-07-07 23:35:36] [Rank 0] step:4101/10000 train_time:277999ms step_avg:67.79ms +[2025-07-07 23:35:36] [Rank 0] step:4101/10000 train_time:277999ms step_avg:67.79ms +[2025-07-07 23:35:37] [Rank 0] step:4121/10000 train_time:279366ms step_avg:67.79ms +[2025-07-07 23:35:37] [Rank 0] step:4121/10000 train_time:279366ms step_avg:67.79ms +[2025-07-07 23:35:38] [Rank 0] step:4141/10000 train_time:280779ms step_avg:67.80ms +[2025-07-07 23:35:38] [Rank 0] step:4141/10000 train_time:280779ms step_avg:67.80ms +[2025-07-07 23:35:40] [Rank 0] step:4161/10000 train_time:282126ms step_avg:67.80ms +[2025-07-07 23:35:40] [Rank 0] step:4161/10000 train_time:282126ms step_avg:67.80ms +[2025-07-07 23:35:41] [Rank 0] step:4181/10000 train_time:283495ms step_avg:67.81ms +[2025-07-07 23:35:41] [Rank 0] step:4181/10000 train_time:283495ms step_avg:67.81ms +[2025-07-07 23:35:42] [Rank 0] step:4201/10000 train_time:284864ms step_avg:67.81ms +[2025-07-07 23:35:42] [Rank 0] step:4201/10000 train_time:284864ms step_avg:67.81ms +[2025-07-07 23:35:44] [Rank 0] step:4221/10000 train_time:286237ms step_avg:67.81ms +[2025-07-07 23:35:44] [Rank 0] step:4221/10000 train_time:286237ms step_avg:67.81ms +[2025-07-07 23:35:45] [Rank 0] step:4241/10000 train_time:287609ms step_avg:67.82ms +[2025-07-07 23:35:45] [Rank 0] step:4241/10000 train_time:287609ms step_avg:67.82ms +[2025-07-07 23:35:47] [Rank 0] step:4261/10000 train_time:288981ms step_avg:67.82ms +[2025-07-07 23:35:47] [Rank 0] step:4261/10000 train_time:288981ms step_avg:67.82ms +[2025-07-07 23:35:48] [Rank 0] step:4281/10000 train_time:290353ms step_avg:67.82ms +[2025-07-07 23:35:48] [Rank 0] step:4281/10000 train_time:290353ms step_avg:67.82ms +[2025-07-07 23:35:49] [Rank 0] step:4301/10000 train_time:291725ms step_avg:67.83ms +[2025-07-07 23:35:49] [Rank 0] step:4301/10000 train_time:291725ms step_avg:67.83ms +[2025-07-07 23:35:51] [Rank 0] step:4321/10000 train_time:293772ms step_avg:67.99ms +[2025-07-07 23:35:51] [Rank 0] step:4321/10000 train_time:293772ms step_avg:67.99ms +[2025-07-07 23:35:52] [Rank 0] step:4341/10000 train_time:294513ms step_avg:67.84ms +[2025-07-07 23:35:52] [Rank 0] step:4341/10000 train_time:294513ms step_avg:67.84ms +[2025-07-07 23:35:53] [Rank 0] step:4361/10000 train_time:295887ms step_avg:67.85ms +[2025-07-07 23:35:53] [Rank 0] step:4361/10000 train_time:295887ms step_avg:67.85ms +[2025-07-07 23:35:55] [Rank 0] step:4381/10000 train_time:297261ms step_avg:67.85ms +[2025-07-07 23:35:55] [Rank 0] step:4381/10000 train_time:297261ms step_avg:67.85ms +[2025-07-07 23:35:56] [Rank 0] step:4401/10000 train_time:298636ms step_avg:67.86ms +[2025-07-07 23:35:56] [Rank 0] step:4401/10000 train_time:298636ms step_avg:67.86ms +[2025-07-07 23:35:58] [Rank 0] step:4421/10000 train_time:300008ms step_avg:67.86ms +[2025-07-07 23:35:58] [Rank 0] step:4421/10000 train_time:300008ms step_avg:67.86ms +[2025-07-07 23:35:59] [Rank 0] step:4441/10000 train_time:301381ms step_avg:67.86ms +[2025-07-07 23:35:59] [Rank 0] step:4441/10000 train_time:301381ms step_avg:67.86ms +[2025-07-07 23:36:00] [Rank 0] step:4461/10000 train_time:302755ms step_avg:67.87ms +[2025-07-07 23:36:00] [Rank 0] step:4461/10000 train_time:302755ms step_avg:67.87ms +[2025-07-07 23:36:02] [Rank 0] step:4481/10000 train_time:304129ms step_avg:67.87ms +[2025-07-07 23:36:02] [Rank 0] step:4481/10000 train_time:304129ms step_avg:67.87ms +[2025-07-07 23:36:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:36:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:36:04] [Rank 0] PRINT: step:4500/10000 train_loss:3.2993 val_loss:3.1647 train_time:306127ms step_avg:68.03ms +[2025-07-07 23:36:04] [Rank 0] PRINT: step:4500/10000 train_loss:3.2993 val_loss:3.1647 train_time:306127ms step_avg:68.03ms +[2025-07-07 23:36:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:36:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:36:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:36:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:36:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:36:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:41:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:41:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:41:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:41:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:41:35] [Rank 0] Total Loss: 4.9886 +[2025-07-07 23:41:35] [Rank 0] Total Loss: 4.9886 +[2025-07-07 23:41:35] [Rank 0] Total FTA: 0.0747 +[2025-07-07 23:41:35] [Rank 0] Total FTA: 0.0747 +[2025-07-07 23:41:35] [Rank 0] Group 0 Loss: 5.0726 +[2025-07-07 23:41:35] [Rank 0] Group 0 Loss: 5.0726 +[2025-07-07 23:41:35] [Rank 0] Group 1 Loss: 4.7746 +[2025-07-07 23:41:35] [Rank 0] Group 1 Loss: 4.7746 +[2025-07-07 23:41:35] [Rank 0] Group 2 Loss: 5.0542 +[2025-07-07 23:41:35] [Rank 0] Group 2 Loss: 5.0542 +[2025-07-07 23:41:36] [Rank 0] Group 3 Loss: 4.8830 +[2025-07-07 23:41:36] [Rank 0] Group 3 Loss: 4.8830 +[2025-07-07 23:41:36] [Rank 0] Group 4 Loss: 5.0309 +[2025-07-07 23:41:36] [Rank 0] Group 4 Loss: 5.0309 +[2025-07-07 23:41:36] [Rank 0] Group 5 Loss: 4.9498 +[2025-07-07 23:41:36] [Rank 0] Group 5 Loss: 4.9498 +[2025-07-07 23:41:36] [Rank 0] Group 6 Loss: 5.1018 +[2025-07-07 23:41:36] [Rank 0] Group 6 Loss: 5.1018 +[2025-07-07 23:41:36] [Rank 0] Group 7 Loss: 4.9733 +[2025-07-07 23:41:36] [Rank 0] Group 7 Loss: 4.9733 +[2025-07-07 23:41:36] [Rank 0] Group 8 Loss: 4.9621 +[2025-07-07 23:41:36] [Rank 0] Group 8 Loss: 4.9621 +[2025-07-07 23:41:36] [Rank 0] Group 9 Loss: 5.0004 +[2025-07-07 23:41:36] [Rank 0] Group 9 Loss: 5.0004 +[2025-07-07 23:41:36] [Rank 0] Group 10 Loss: 4.9807 +[2025-07-07 23:41:36] [Rank 0] Group 10 Loss: 4.9807 +[2025-07-07 23:41:36] [Rank 0] Group 11 Loss: 4.9936 +[2025-07-07 23:41:36] [Rank 0] Group 11 Loss: 4.9936 +[2025-07-07 23:41:36] [Rank 0] Group 0 FTA: 0.1782 +[2025-07-07 23:41:36] [Rank 0] Group 0 FTA: 0.1782 +[2025-07-07 23:41:36] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 23:41:36] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 23:41:36] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 23:41:36] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 23:41:36] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-07 23:41:36] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-07 23:41:36] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-07 23:41:36] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-07 23:41:36] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-07 23:41:36] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-07 23:41:36] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-07 23:41:36] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-07 23:41:36] [Rank 0] Group 7 FTA: 0.0703 +[2025-07-07 23:41:36] [Rank 0] Group 7 FTA: 0.0703 +[2025-07-07 23:41:36] [Rank 0] Group 8 FTA: 0.0859 +[2025-07-07 23:41:36] [Rank 0] Group 8 FTA: 0.0859 +[2025-07-07 23:41:36] [Rank 0] Group 9 FTA: 0.0625 +[2025-07-07 23:41:36] [Rank 0] Group 9 FTA: 0.0625 +[2025-07-07 23:41:36] [Rank 0] Group 10 FTA: 0.0742 +[2025-07-07 23:41:36] [Rank 0] Group 10 FTA: 0.0742 +[2025-07-07 23:41:36] [Rank 0] Group 11 FTA: 0.0693 +[2025-07-07 23:41:36] [Rank 0] Group 11 FTA: 0.0693 +[2025-07-07 23:41:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 23:41:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 23:41:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 23:41:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 23:41:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 23:41:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 23:41:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 23:41:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 23:41:38] [Rank 0] step:4501/10000 train_time:306245ms step_avg:68.04ms +[2025-07-07 23:41:38] [Rank 0] step:4501/10000 train_time:306245ms step_avg:68.04ms +[2025-07-07 23:41:39] [Rank 0] step:4521/10000 train_time:307597ms step_avg:68.04ms +[2025-07-07 23:41:39] [Rank 0] step:4521/10000 train_time:307597ms step_avg:68.04ms +[2025-07-07 23:41:40] [Rank 0] step:4541/10000 train_time:308962ms step_avg:68.04ms +[2025-07-07 23:41:40] [Rank 0] step:4541/10000 train_time:308962ms step_avg:68.04ms +[2025-07-07 23:41:42] [Rank 0] step:4561/10000 train_time:310328ms step_avg:68.04ms +[2025-07-07 23:41:42] [Rank 0] step:4561/10000 train_time:310328ms step_avg:68.04ms +[2025-07-07 23:41:43] [Rank 0] step:4581/10000 train_time:311695ms step_avg:68.04ms +[2025-07-07 23:41:43] [Rank 0] step:4581/10000 train_time:311695ms step_avg:68.04ms +[2025-07-07 23:41:45] [Rank 0] step:4601/10000 train_time:313062ms step_avg:68.04ms +[2025-07-07 23:41:45] [Rank 0] step:4601/10000 train_time:313062ms step_avg:68.04ms +[2025-07-07 23:41:46] [Rank 0] step:4621/10000 train_time:314430ms step_avg:68.04ms +[2025-07-07 23:41:46] [Rank 0] step:4621/10000 train_time:314430ms step_avg:68.04ms +[2025-07-07 23:41:47] [Rank 0] step:4641/10000 train_time:315805ms step_avg:68.05ms +[2025-07-07 23:41:47] [Rank 0] step:4641/10000 train_time:315805ms step_avg:68.05ms +[2025-07-07 23:41:49] [Rank 0] step:4661/10000 train_time:317176ms step_avg:68.05ms +[2025-07-07 23:41:49] [Rank 0] step:4661/10000 train_time:317176ms step_avg:68.05ms +[2025-07-07 23:41:50] [Rank 0] step:4681/10000 train_time:318548ms step_avg:68.05ms +[2025-07-07 23:41:50] [Rank 0] step:4681/10000 train_time:318548ms step_avg:68.05ms +[2025-07-07 23:41:51] [Rank 0] step:4701/10000 train_time:319952ms step_avg:68.06ms +[2025-07-07 23:41:51] [Rank 0] step:4701/10000 train_time:319952ms step_avg:68.06ms +[2025-07-07 23:41:53] [Rank 0] step:4721/10000 train_time:321323ms step_avg:68.06ms +[2025-07-07 23:41:53] [Rank 0] step:4721/10000 train_time:321323ms step_avg:68.06ms +[2025-07-07 23:41:54] [Rank 0] step:4741/10000 train_time:322697ms step_avg:68.07ms +[2025-07-07 23:41:54] [Rank 0] step:4741/10000 train_time:322697ms step_avg:68.07ms +[2025-07-07 23:41:56] [Rank 0] step:4761/10000 train_time:324071ms step_avg:68.07ms +[2025-07-07 23:41:56] [Rank 0] step:4761/10000 train_time:324071ms step_avg:68.07ms +[2025-07-07 23:41:57] [Rank 0] step:4781/10000 train_time:325443ms step_avg:68.07ms +[2025-07-07 23:41:57] [Rank 0] step:4781/10000 train_time:325443ms step_avg:68.07ms +[2025-07-07 23:41:58] [Rank 0] step:4801/10000 train_time:326816ms step_avg:68.07ms +[2025-07-07 23:41:58] [Rank 0] step:4801/10000 train_time:326816ms step_avg:68.07ms +[2025-07-07 23:42:00] [Rank 0] step:4821/10000 train_time:328190ms step_avg:68.08ms +[2025-07-07 23:42:00] [Rank 0] step:4821/10000 train_time:328190ms step_avg:68.08ms +[2025-07-07 23:42:01] [Rank 0] step:4841/10000 train_time:329565ms step_avg:68.08ms +[2025-07-07 23:42:01] [Rank 0] step:4841/10000 train_time:329565ms step_avg:68.08ms +[2025-07-07 23:42:02] [Rank 0] step:4861/10000 train_time:331597ms step_avg:68.22ms +[2025-07-07 23:42:02] [Rank 0] step:4861/10000 train_time:331597ms step_avg:68.22ms +[2025-07-07 23:42:04] [Rank 0] step:4881/10000 train_time:332340ms step_avg:68.09ms +[2025-07-07 23:42:04] [Rank 0] step:4881/10000 train_time:332340ms step_avg:68.09ms +[2025-07-07 23:42:05] [Rank 0] step:4901/10000 train_time:333716ms step_avg:68.09ms +[2025-07-07 23:42:05] [Rank 0] step:4901/10000 train_time:333716ms step_avg:68.09ms +[2025-07-07 23:42:07] [Rank 0] step:4921/10000 train_time:335091ms step_avg:68.09ms +[2025-07-07 23:42:07] [Rank 0] step:4921/10000 train_time:335091ms step_avg:68.09ms +[2025-07-07 23:42:08] [Rank 0] step:4941/10000 train_time:336466ms step_avg:68.10ms +[2025-07-07 23:42:08] [Rank 0] step:4941/10000 train_time:336466ms step_avg:68.10ms +[2025-07-07 23:42:09] [Rank 0] step:4961/10000 train_time:337840ms step_avg:68.10ms +[2025-07-07 23:42:09] [Rank 0] step:4961/10000 train_time:337840ms step_avg:68.10ms +[2025-07-07 23:42:11] [Rank 0] step:4981/10000 train_time:339215ms step_avg:68.10ms +[2025-07-07 23:42:11] [Rank 0] step:4981/10000 train_time:339215ms step_avg:68.10ms +[2025-07-07 23:42:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:42:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:42:13] [Rank 0] PRINT: step:5000/10000 train_loss:3.0415 val_loss:2.9303 train_time:341214ms step_avg:68.24ms +[2025-07-07 23:42:13] [Rank 0] PRINT: step:5000/10000 train_loss:3.0415 val_loss:2.9303 train_time:341214ms step_avg:68.24ms +[2025-07-07 23:42:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:42:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:42:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:42:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:42:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:42:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:47:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:47:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:47:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:47:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:47:44] [Rank 0] Total Loss: 4.8559 +[2025-07-07 23:47:44] [Rank 0] Total Loss: 4.8559 +[2025-07-07 23:47:44] [Rank 0] Total FTA: 0.0735 +[2025-07-07 23:47:44] [Rank 0] Total FTA: 0.0735 +[2025-07-07 23:47:44] [Rank 0] Group 0 Loss: 4.9278 +[2025-07-07 23:47:44] [Rank 0] Group 0 Loss: 4.9278 +[2025-07-07 23:47:44] [Rank 0] Group 1 Loss: 4.5776 +[2025-07-07 23:47:44] [Rank 0] Group 1 Loss: 4.5776 +[2025-07-07 23:47:44] [Rank 0] Group 2 Loss: 4.8718 +[2025-07-07 23:47:44] [Rank 0] Group 2 Loss: 4.8718 +[2025-07-07 23:47:44] [Rank 0] Group 3 Loss: 4.7309 +[2025-07-07 23:47:44] [Rank 0] Group 3 Loss: 4.7309 +[2025-07-07 23:47:44] [Rank 0] Group 4 Loss: 4.9101 +[2025-07-07 23:47:44] [Rank 0] Group 4 Loss: 4.9101 +[2025-07-07 23:47:44] [Rank 0] Group 5 Loss: 4.8612 +[2025-07-07 23:47:44] [Rank 0] Group 5 Loss: 4.8612 +[2025-07-07 23:47:44] [Rank 0] Group 6 Loss: 4.9385 +[2025-07-07 23:47:44] [Rank 0] Group 6 Loss: 4.9385 +[2025-07-07 23:47:44] [Rank 0] Group 7 Loss: 4.8558 +[2025-07-07 23:47:44] [Rank 0] Group 7 Loss: 4.8558 +[2025-07-07 23:47:44] [Rank 0] Group 8 Loss: 4.8650 +[2025-07-07 23:47:44] [Rank 0] Group 8 Loss: 4.8650 +[2025-07-07 23:47:44] [Rank 0] Group 9 Loss: 4.8816 +[2025-07-07 23:47:44] [Rank 0] Group 9 Loss: 4.8816 +[2025-07-07 23:47:44] [Rank 0] Group 10 Loss: 4.8549 +[2025-07-07 23:47:44] [Rank 0] Group 10 Loss: 4.8549 +[2025-07-07 23:47:44] [Rank 0] Group 11 Loss: 4.8844 +[2025-07-07 23:47:44] [Rank 0] Group 11 Loss: 4.8844 +[2025-07-07 23:47:44] [Rank 0] Group 0 FTA: 0.1756 +[2025-07-07 23:47:44] [Rank 0] Group 0 FTA: 0.1756 +[2025-07-07 23:47:44] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 23:47:44] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 23:47:44] [Rank 0] Group 2 FTA: 0.0703 +[2025-07-07 23:47:44] [Rank 0] Group 2 FTA: 0.0703 +[2025-07-07 23:47:44] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 23:47:44] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 23:47:44] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-07 23:47:44] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-07 23:47:44] [Rank 0] Group 5 FTA: 0.0573 +[2025-07-07 23:47:44] [Rank 0] Group 5 FTA: 0.0573 +[2025-07-07 23:47:44] [Rank 0] Group 6 FTA: 0.0938 +[2025-07-07 23:47:44] [Rank 0] Group 6 FTA: 0.0938 +[2025-07-07 23:47:44] [Rank 0] Group 7 FTA: 0.0677 +[2025-07-07 23:47:44] [Rank 0] Group 7 FTA: 0.0677 +[2025-07-07 23:47:44] [Rank 0] Group 8 FTA: 0.0625 +[2025-07-07 23:47:44] [Rank 0] Group 8 FTA: 0.0625 +[2025-07-07 23:47:44] [Rank 0] Group 9 FTA: 0.0312 +[2025-07-07 23:47:44] [Rank 0] Group 9 FTA: 0.0312 +[2025-07-07 23:47:44] [Rank 0] Group 10 FTA: 0.0820 +[2025-07-07 23:47:44] [Rank 0] Group 10 FTA: 0.0820 +[2025-07-07 23:47:44] [Rank 0] Group 11 FTA: 0.0664 +[2025-07-07 23:47:44] [Rank 0] Group 11 FTA: 0.0664 +[2025-07-07 23:47:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 23:47:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 23:47:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 23:47:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 23:47:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 23:47:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 23:47:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 23:47:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 23:47:45] [Rank 0] step:5001/10000 train_time:341223ms step_avg:68.23ms +[2025-07-07 23:47:45] [Rank 0] step:5001/10000 train_time:341223ms step_avg:68.23ms +[2025-07-07 23:47:47] [Rank 0] step:5021/10000 train_time:341990ms step_avg:68.11ms +[2025-07-07 23:47:47] [Rank 0] step:5021/10000 train_time:341990ms step_avg:68.11ms +[2025-07-07 23:47:48] [Rank 0] step:5041/10000 train_time:343403ms step_avg:68.12ms +[2025-07-07 23:47:48] [Rank 0] step:5041/10000 train_time:343403ms step_avg:68.12ms +[2025-07-07 23:47:50] [Rank 0] step:5061/10000 train_time:344770ms step_avg:68.12ms +[2025-07-07 23:47:50] [Rank 0] step:5061/10000 train_time:344770ms step_avg:68.12ms +[2025-07-07 23:47:51] [Rank 0] step:5081/10000 train_time:346138ms step_avg:68.12ms +[2025-07-07 23:47:51] [Rank 0] step:5081/10000 train_time:346138ms step_avg:68.12ms +[2025-07-07 23:47:52] [Rank 0] step:5101/10000 train_time:347505ms step_avg:68.12ms +[2025-07-07 23:47:52] [Rank 0] step:5101/10000 train_time:347505ms step_avg:68.12ms +[2025-07-07 23:47:54] [Rank 0] step:5121/10000 train_time:348873ms step_avg:68.13ms +[2025-07-07 23:47:54] [Rank 0] step:5121/10000 train_time:348873ms step_avg:68.13ms +[2025-07-07 23:47:55] [Rank 0] step:5141/10000 train_time:350241ms step_avg:68.13ms +[2025-07-07 23:47:55] [Rank 0] step:5141/10000 train_time:350241ms step_avg:68.13ms +[2025-07-07 23:47:56] [Rank 0] step:5161/10000 train_time:351610ms step_avg:68.13ms +[2025-07-07 23:47:56] [Rank 0] step:5161/10000 train_time:351610ms step_avg:68.13ms +[2025-07-07 23:47:58] [Rank 0] step:5181/10000 train_time:352977ms step_avg:68.13ms +[2025-07-07 23:47:58] [Rank 0] step:5181/10000 train_time:352977ms step_avg:68.13ms +[2025-07-07 23:47:59] [Rank 0] step:5201/10000 train_time:354348ms step_avg:68.13ms +[2025-07-07 23:47:59] [Rank 0] step:5201/10000 train_time:354348ms step_avg:68.13ms +[2025-07-07 23:48:01] [Rank 0] step:5221/10000 train_time:356386ms step_avg:68.26ms +[2025-07-07 23:48:01] [Rank 0] step:5221/10000 train_time:356386ms step_avg:68.26ms +[2025-07-07 23:48:02] [Rank 0] step:5241/10000 train_time:357126ms step_avg:68.14ms +[2025-07-07 23:48:02] [Rank 0] step:5241/10000 train_time:357126ms step_avg:68.14ms +[2025-07-07 23:48:03] [Rank 0] step:5261/10000 train_time:358577ms step_avg:68.16ms +[2025-07-07 23:48:03] [Rank 0] step:5261/10000 train_time:358577ms step_avg:68.16ms +[2025-07-07 23:48:05] [Rank 0] step:5281/10000 train_time:359953ms step_avg:68.16ms +[2025-07-07 23:48:05] [Rank 0] step:5281/10000 train_time:359953ms step_avg:68.16ms +[2025-07-07 23:48:06] [Rank 0] step:5301/10000 train_time:361328ms step_avg:68.16ms +[2025-07-07 23:48:06] [Rank 0] step:5301/10000 train_time:361328ms step_avg:68.16ms +[2025-07-07 23:48:07] [Rank 0] step:5321/10000 train_time:362701ms step_avg:68.16ms +[2025-07-07 23:48:07] [Rank 0] step:5321/10000 train_time:362701ms step_avg:68.16ms +[2025-07-07 23:48:09] [Rank 0] step:5341/10000 train_time:364075ms step_avg:68.17ms +[2025-07-07 23:48:09] [Rank 0] step:5341/10000 train_time:364075ms step_avg:68.17ms +[2025-07-07 23:48:10] [Rank 0] step:5361/10000 train_time:365449ms step_avg:68.17ms +[2025-07-07 23:48:10] [Rank 0] step:5361/10000 train_time:365449ms step_avg:68.17ms +[2025-07-07 23:48:12] [Rank 0] step:5381/10000 train_time:366825ms step_avg:68.17ms +[2025-07-07 23:48:12] [Rank 0] step:5381/10000 train_time:366825ms step_avg:68.17ms +[2025-07-07 23:48:13] [Rank 0] step:5401/10000 train_time:368204ms step_avg:68.17ms +[2025-07-07 23:48:13] [Rank 0] step:5401/10000 train_time:368204ms step_avg:68.17ms +[2025-07-07 23:48:14] [Rank 0] step:5421/10000 train_time:369606ms step_avg:68.18ms +[2025-07-07 23:48:14] [Rank 0] step:5421/10000 train_time:369606ms step_avg:68.18ms +[2025-07-07 23:48:16] [Rank 0] step:5441/10000 train_time:370986ms step_avg:68.18ms +[2025-07-07 23:48:16] [Rank 0] step:5441/10000 train_time:370986ms step_avg:68.18ms +[2025-07-07 23:48:17] [Rank 0] step:5461/10000 train_time:372363ms step_avg:68.19ms +[2025-07-07 23:48:17] [Rank 0] step:5461/10000 train_time:372363ms step_avg:68.19ms +[2025-07-07 23:48:19] [Rank 0] step:5481/10000 train_time:373739ms step_avg:68.19ms +[2025-07-07 23:48:19] [Rank 0] step:5481/10000 train_time:373739ms step_avg:68.19ms +[2025-07-07 23:48:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:48:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:48:21] [Rank 0] PRINT: step:5500/10000 train_loss:2.8272 val_loss:2.7365 train_time:375740ms step_avg:68.32ms +[2025-07-07 23:48:21] [Rank 0] PRINT: step:5500/10000 train_loss:2.8272 val_loss:2.7365 train_time:375740ms step_avg:68.32ms +[2025-07-07 23:48:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:48:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:48:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:48:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:48:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:48:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:53:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:53:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 23:53:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:53:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 23:53:52] [Rank 0] Total Loss: 4.7453 +[2025-07-07 23:53:52] [Rank 0] Total Loss: 4.7453 +[2025-07-07 23:53:52] [Rank 0] Total FTA: 0.0932 +[2025-07-07 23:53:52] [Rank 0] Total FTA: 0.0932 +[2025-07-07 23:53:52] [Rank 0] Group 0 Loss: 4.8103 +[2025-07-07 23:53:52] [Rank 0] Group 0 Loss: 4.8103 +[2025-07-07 23:53:52] [Rank 0] Group 1 Loss: 4.4731 +[2025-07-07 23:53:52] [Rank 0] Group 1 Loss: 4.4731 +[2025-07-07 23:53:52] [Rank 0] Group 2 Loss: 4.7385 +[2025-07-07 23:53:52] [Rank 0] Group 2 Loss: 4.7385 +[2025-07-07 23:53:52] [Rank 0] Group 3 Loss: 4.7402 +[2025-07-07 23:53:52] [Rank 0] Group 3 Loss: 4.7402 +[2025-07-07 23:53:52] [Rank 0] Group 4 Loss: 4.8025 +[2025-07-07 23:53:52] [Rank 0] Group 4 Loss: 4.8025 +[2025-07-07 23:53:52] [Rank 0] Group 5 Loss: 4.6963 +[2025-07-07 23:53:52] [Rank 0] Group 5 Loss: 4.6963 +[2025-07-07 23:53:52] [Rank 0] Group 6 Loss: 4.8313 +[2025-07-07 23:53:52] [Rank 0] Group 6 Loss: 4.8313 +[2025-07-07 23:53:52] [Rank 0] Group 7 Loss: 4.7323 +[2025-07-07 23:53:52] [Rank 0] Group 7 Loss: 4.7323 +[2025-07-07 23:53:52] [Rank 0] Group 8 Loss: 4.7342 +[2025-07-07 23:53:52] [Rank 0] Group 8 Loss: 4.7342 +[2025-07-07 23:53:52] [Rank 0] Group 9 Loss: 4.7392 +[2025-07-07 23:53:52] [Rank 0] Group 9 Loss: 4.7392 +[2025-07-07 23:53:52] [Rank 0] Group 10 Loss: 4.7512 +[2025-07-07 23:53:52] [Rank 0] Group 10 Loss: 4.7512 +[2025-07-07 23:53:52] [Rank 0] Group 11 Loss: 4.7754 +[2025-07-07 23:53:52] [Rank 0] Group 11 Loss: 4.7754 +[2025-07-07 23:53:52] [Rank 0] Group 0 FTA: 0.1678 +[2025-07-07 23:53:52] [Rank 0] Group 0 FTA: 0.1678 +[2025-07-07 23:53:52] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 23:53:52] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 23:53:52] [Rank 0] Group 2 FTA: 0.1979 +[2025-07-07 23:53:52] [Rank 0] Group 2 FTA: 0.1979 +[2025-07-07 23:53:52] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 23:53:52] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 23:53:52] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 23:53:52] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 23:53:52] [Rank 0] Group 5 FTA: 0.0469 +[2025-07-07 23:53:52] [Rank 0] Group 5 FTA: 0.0469 +[2025-07-07 23:53:52] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 23:53:52] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 23:53:52] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-07 23:53:52] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-07 23:53:52] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-07 23:53:52] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-07 23:53:52] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 23:53:52] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 23:53:52] [Rank 0] Group 10 FTA: 0.1055 +[2025-07-07 23:53:52] [Rank 0] Group 10 FTA: 0.1055 +[2025-07-07 23:53:52] [Rank 0] Group 11 FTA: 0.0938 +[2025-07-07 23:53:52] [Rank 0] Group 11 FTA: 0.0938 +[2025-07-07 23:53:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 23:53:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-07 23:53:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 23:53:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-07 23:53:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 23:53:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-07 23:53:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 23:53:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-07 23:53:54] [Rank 0] step:5501/10000 train_time:375750ms step_avg:68.31ms +[2025-07-07 23:53:54] [Rank 0] step:5501/10000 train_time:375750ms step_avg:68.31ms +[2025-07-07 23:53:55] [Rank 0] step:5521/10000 train_time:376507ms step_avg:68.20ms +[2025-07-07 23:53:55] [Rank 0] step:5521/10000 train_time:376507ms step_avg:68.20ms +[2025-07-07 23:53:56] [Rank 0] step:5541/10000 train_time:377872ms step_avg:68.20ms +[2025-07-07 23:53:56] [Rank 0] step:5541/10000 train_time:377872ms step_avg:68.20ms +[2025-07-07 23:53:58] [Rank 0] step:5561/10000 train_time:379238ms step_avg:68.20ms +[2025-07-07 23:53:58] [Rank 0] step:5561/10000 train_time:379238ms step_avg:68.20ms +[2025-07-07 23:53:59] [Rank 0] step:5581/10000 train_time:380607ms step_avg:68.20ms +[2025-07-07 23:53:59] [Rank 0] step:5581/10000 train_time:380607ms step_avg:68.20ms +[2025-07-07 23:54:01] [Rank 0] step:5601/10000 train_time:382019ms step_avg:68.21ms +[2025-07-07 23:54:01] [Rank 0] step:5601/10000 train_time:382019ms step_avg:68.21ms +[2025-07-07 23:54:02] [Rank 0] step:5621/10000 train_time:383386ms step_avg:68.21ms +[2025-07-07 23:54:02] [Rank 0] step:5621/10000 train_time:383386ms step_avg:68.21ms +[2025-07-07 23:54:03] [Rank 0] step:5641/10000 train_time:384755ms step_avg:68.21ms +[2025-07-07 23:54:03] [Rank 0] step:5641/10000 train_time:384755ms step_avg:68.21ms +[2025-07-07 23:54:05] [Rank 0] step:5661/10000 train_time:386122ms step_avg:68.21ms +[2025-07-07 23:54:05] [Rank 0] step:5661/10000 train_time:386122ms step_avg:68.21ms +[2025-07-07 23:54:06] [Rank 0] step:5681/10000 train_time:387492ms step_avg:68.21ms +[2025-07-07 23:54:06] [Rank 0] step:5681/10000 train_time:387492ms step_avg:68.21ms +[2025-07-07 23:54:07] [Rank 0] step:5701/10000 train_time:388862ms step_avg:68.21ms +[2025-07-07 23:54:07] [Rank 0] step:5701/10000 train_time:388862ms step_avg:68.21ms +[2025-07-07 23:54:09] [Rank 0] step:5721/10000 train_time:390232ms step_avg:68.21ms +[2025-07-07 23:54:09] [Rank 0] step:5721/10000 train_time:390232ms step_avg:68.21ms +[2025-07-07 23:54:10] [Rank 0] step:5741/10000 train_time:391603ms step_avg:68.21ms +[2025-07-07 23:54:10] [Rank 0] step:5741/10000 train_time:391603ms step_avg:68.21ms +[2025-07-07 23:54:12] [Rank 0] step:5761/10000 train_time:393022ms step_avg:68.22ms +[2025-07-07 23:54:12] [Rank 0] step:5761/10000 train_time:393022ms step_avg:68.22ms +[2025-07-07 23:54:13] [Rank 0] step:5781/10000 train_time:394374ms step_avg:68.22ms +[2025-07-07 23:54:13] [Rank 0] step:5781/10000 train_time:394374ms step_avg:68.22ms +[2025-07-07 23:54:14] [Rank 0] step:5801/10000 train_time:395749ms step_avg:68.22ms +[2025-07-07 23:54:14] [Rank 0] step:5801/10000 train_time:395749ms step_avg:68.22ms +[2025-07-07 23:54:16] [Rank 0] step:5821/10000 train_time:397121ms step_avg:68.22ms +[2025-07-07 23:54:16] [Rank 0] step:5821/10000 train_time:397121ms step_avg:68.22ms +[2025-07-07 23:54:17] [Rank 0] step:5841/10000 train_time:398496ms step_avg:68.22ms +[2025-07-07 23:54:17] [Rank 0] step:5841/10000 train_time:398496ms step_avg:68.22ms +[2025-07-07 23:54:18] [Rank 0] step:5861/10000 train_time:399869ms step_avg:68.23ms +[2025-07-07 23:54:18] [Rank 0] step:5861/10000 train_time:399869ms step_avg:68.23ms +[2025-07-07 23:54:20] [Rank 0] step:5881/10000 train_time:401242ms step_avg:68.23ms +[2025-07-07 23:54:20] [Rank 0] step:5881/10000 train_time:401242ms step_avg:68.23ms +[2025-07-07 23:54:21] [Rank 0] step:5901/10000 train_time:402616ms step_avg:68.23ms +[2025-07-07 23:54:21] [Rank 0] step:5901/10000 train_time:402616ms step_avg:68.23ms +[2025-07-07 23:54:23] [Rank 0] step:5921/10000 train_time:404023ms step_avg:68.24ms +[2025-07-07 23:54:23] [Rank 0] step:5921/10000 train_time:404023ms step_avg:68.24ms +[2025-07-07 23:54:24] [Rank 0] step:5941/10000 train_time:405646ms step_avg:68.28ms +[2025-07-07 23:54:24] [Rank 0] step:5941/10000 train_time:405646ms step_avg:68.28ms +[2025-07-07 23:54:25] [Rank 0] step:5961/10000 train_time:406820ms step_avg:68.25ms +[2025-07-07 23:54:25] [Rank 0] step:5961/10000 train_time:406820ms step_avg:68.25ms +[2025-07-07 23:54:27] [Rank 0] step:5981/10000 train_time:408195ms step_avg:68.25ms +[2025-07-07 23:54:27] [Rank 0] step:5981/10000 train_time:408195ms step_avg:68.25ms +[2025-07-07 23:54:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:54:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 23:54:29] [Rank 0] PRINT: step:6000/10000 train_loss:2.6510 val_loss:2.5776 train_time:410195ms step_avg:68.37ms +[2025-07-07 23:54:29] [Rank 0] PRINT: step:6000/10000 train_loss:2.6510 val_loss:2.5776 train_time:410195ms step_avg:68.37ms +[2025-07-07 23:54:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:54:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 23:54:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:54:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 23:54:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 23:54:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:00:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:00:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:00:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:00:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:00:00] [Rank 0] Total Loss: 4.6752 +[2025-07-08 00:00:00] [Rank 0] Total Loss: 4.6752 +[2025-07-08 00:00:00] [Rank 0] Total FTA: 0.0849 +[2025-07-08 00:00:00] [Rank 0] Total FTA: 0.0849 +[2025-07-08 00:00:00] [Rank 0] Group 0 Loss: 4.8607 +[2025-07-08 00:00:00] [Rank 0] Group 0 Loss: 4.8607 +[2025-07-08 00:00:00] [Rank 0] Group 1 Loss: 4.3330 +[2025-07-08 00:00:00] [Rank 0] Group 1 Loss: 4.3330 +[2025-07-08 00:00:00] [Rank 0] Group 2 Loss: 4.7101 +[2025-07-08 00:00:00] [Rank 0] Group 2 Loss: 4.7101 +[2025-07-08 00:00:00] [Rank 0] Group 3 Loss: 4.5866 +[2025-07-08 00:00:00] [Rank 0] Group 3 Loss: 4.5866 +[2025-07-08 00:00:00] [Rank 0] Group 4 Loss: 4.7194 +[2025-07-08 00:00:00] [Rank 0] Group 4 Loss: 4.7194 +[2025-07-08 00:00:00] [Rank 0] Group 5 Loss: 4.6210 +[2025-07-08 00:00:00] [Rank 0] Group 5 Loss: 4.6210 +[2025-07-08 00:00:00] [Rank 0] Group 6 Loss: 4.7039 +[2025-07-08 00:00:00] [Rank 0] Group 6 Loss: 4.7039 +[2025-07-08 00:00:00] [Rank 0] Group 7 Loss: 4.6782 +[2025-07-08 00:00:00] [Rank 0] Group 7 Loss: 4.6782 +[2025-07-08 00:00:00] [Rank 0] Group 8 Loss: 4.6498 +[2025-07-08 00:00:00] [Rank 0] Group 8 Loss: 4.6498 +[2025-07-08 00:00:00] [Rank 0] Group 9 Loss: 4.6711 +[2025-07-08 00:00:00] [Rank 0] Group 9 Loss: 4.6711 +[2025-07-08 00:00:00] [Rank 0] Group 10 Loss: 4.6438 +[2025-07-08 00:00:00] [Rank 0] Group 10 Loss: 4.6438 +[2025-07-08 00:00:00] [Rank 0] Group 11 Loss: 4.7025 +[2025-07-08 00:00:00] [Rank 0] Group 11 Loss: 4.7025 +[2025-07-08 00:00:00] [Rank 0] Group 0 FTA: 0.1782 +[2025-07-08 00:00:00] [Rank 0] Group 0 FTA: 0.1782 +[2025-07-08 00:00:00] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 00:00:00] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 00:00:00] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-08 00:00:00] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-08 00:00:00] [Rank 0] Group 3 FTA: 0.0208 +[2025-07-08 00:00:00] [Rank 0] Group 3 FTA: 0.0208 +[2025-07-08 00:00:00] [Rank 0] Group 4 FTA: 0.0130 +[2025-07-08 00:00:00] [Rank 0] Group 4 FTA: 0.0130 +[2025-07-08 00:00:00] [Rank 0] Group 5 FTA: 0.0729 +[2025-07-08 00:00:00] [Rank 0] Group 5 FTA: 0.0729 +[2025-07-08 00:00:00] [Rank 0] Group 6 FTA: 0.0938 +[2025-07-08 00:00:00] [Rank 0] Group 6 FTA: 0.0938 +[2025-07-08 00:00:00] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-08 00:00:00] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-08 00:00:00] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-08 00:00:00] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-08 00:00:00] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-08 00:00:00] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-08 00:00:00] [Rank 0] Group 10 FTA: 0.0723 +[2025-07-08 00:00:00] [Rank 0] Group 10 FTA: 0.0723 +[2025-07-08 00:00:00] [Rank 0] Group 11 FTA: 0.0879 +[2025-07-08 00:00:00] [Rank 0] Group 11 FTA: 0.0879 +[2025-07-08 00:00:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-08 00:00:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-08 00:00:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-08 00:00:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-08 00:00:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-08 00:00:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-08 00:00:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-08 00:00:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-08 00:00:02] [Rank 0] step:6001/10000 train_time:410206ms step_avg:68.36ms +[2025-07-08 00:00:02] [Rank 0] step:6001/10000 train_time:410206ms step_avg:68.36ms +[2025-07-08 00:00:04] [Rank 0] step:6021/10000 train_time:410973ms step_avg:68.26ms +[2025-07-08 00:00:04] [Rank 0] step:6021/10000 train_time:410973ms step_avg:68.26ms +[2025-07-08 00:00:05] [Rank 0] step:6041/10000 train_time:412338ms step_avg:68.26ms +[2025-07-08 00:00:05] [Rank 0] step:6041/10000 train_time:412338ms step_avg:68.26ms +[2025-07-08 00:00:07] [Rank 0] step:6061/10000 train_time:413702ms step_avg:68.26ms +[2025-07-08 00:00:07] [Rank 0] step:6061/10000 train_time:413702ms step_avg:68.26ms +[2025-07-08 00:00:08] [Rank 0] step:6081/10000 train_time:415067ms step_avg:68.26ms +[2025-07-08 00:00:08] [Rank 0] step:6081/10000 train_time:415067ms step_avg:68.26ms +[2025-07-08 00:00:09] [Rank 0] step:6101/10000 train_time:416432ms step_avg:68.26ms +[2025-07-08 00:00:09] [Rank 0] step:6101/10000 train_time:416432ms step_avg:68.26ms +[2025-07-08 00:00:11] [Rank 0] step:6121/10000 train_time:417847ms step_avg:68.26ms +[2025-07-08 00:00:11] [Rank 0] step:6121/10000 train_time:417847ms step_avg:68.26ms +[2025-07-08 00:00:12] [Rank 0] step:6141/10000 train_time:419215ms step_avg:68.26ms +[2025-07-08 00:00:12] [Rank 0] step:6141/10000 train_time:419215ms step_avg:68.26ms +[2025-07-08 00:00:13] [Rank 0] step:6161/10000 train_time:420582ms step_avg:68.27ms +[2025-07-08 00:00:13] [Rank 0] step:6161/10000 train_time:420582ms step_avg:68.27ms +[2025-07-08 00:00:15] [Rank 0] step:6181/10000 train_time:421950ms step_avg:68.27ms +[2025-07-08 00:00:15] [Rank 0] step:6181/10000 train_time:421950ms step_avg:68.27ms +[2025-07-08 00:00:16] [Rank 0] step:6201/10000 train_time:423320ms step_avg:68.27ms +[2025-07-08 00:00:16] [Rank 0] step:6201/10000 train_time:423320ms step_avg:68.27ms +[2025-07-08 00:00:18] [Rank 0] step:6221/10000 train_time:424690ms step_avg:68.27ms +[2025-07-08 00:00:18] [Rank 0] step:6221/10000 train_time:424690ms step_avg:68.27ms +[2025-07-08 00:00:19] [Rank 0] step:6241/10000 train_time:426059ms step_avg:68.27ms +[2025-07-08 00:00:19] [Rank 0] step:6241/10000 train_time:426059ms step_avg:68.27ms +[2025-07-08 00:00:20] [Rank 0] step:6261/10000 train_time:427430ms step_avg:68.27ms +[2025-07-08 00:00:20] [Rank 0] step:6261/10000 train_time:427430ms step_avg:68.27ms +[2025-07-08 00:00:22] [Rank 0] step:6281/10000 train_time:428801ms step_avg:68.27ms +[2025-07-08 00:00:22] [Rank 0] step:6281/10000 train_time:428801ms step_avg:68.27ms +[2025-07-08 00:00:23] [Rank 0] step:6301/10000 train_time:430423ms step_avg:68.31ms +[2025-07-08 00:00:23] [Rank 0] step:6301/10000 train_time:430423ms step_avg:68.31ms +[2025-07-08 00:00:24] [Rank 0] step:6321/10000 train_time:431566ms step_avg:68.27ms +[2025-07-08 00:00:24] [Rank 0] step:6321/10000 train_time:431566ms step_avg:68.27ms +[2025-07-08 00:00:26] [Rank 0] step:6341/10000 train_time:432938ms step_avg:68.28ms +[2025-07-08 00:00:26] [Rank 0] step:6341/10000 train_time:432938ms step_avg:68.28ms +[2025-07-08 00:00:27] [Rank 0] step:6361/10000 train_time:434309ms step_avg:68.28ms +[2025-07-08 00:00:27] [Rank 0] step:6361/10000 train_time:434309ms step_avg:68.28ms +[2025-07-08 00:00:29] [Rank 0] step:6381/10000 train_time:435681ms step_avg:68.28ms +[2025-07-08 00:00:29] [Rank 0] step:6381/10000 train_time:435681ms step_avg:68.28ms +[2025-07-08 00:00:30] [Rank 0] step:6401/10000 train_time:437055ms step_avg:68.28ms +[2025-07-08 00:00:30] [Rank 0] step:6401/10000 train_time:437055ms step_avg:68.28ms +[2025-07-08 00:00:31] [Rank 0] step:6421/10000 train_time:438428ms step_avg:68.28ms +[2025-07-08 00:00:31] [Rank 0] step:6421/10000 train_time:438428ms step_avg:68.28ms +[2025-07-08 00:00:33] [Rank 0] step:6441/10000 train_time:439801ms step_avg:68.28ms +[2025-07-08 00:00:33] [Rank 0] step:6441/10000 train_time:439801ms step_avg:68.28ms +[2025-07-08 00:00:34] [Rank 0] step:6461/10000 train_time:441175ms step_avg:68.28ms +[2025-07-08 00:00:34] [Rank 0] step:6461/10000 train_time:441175ms step_avg:68.28ms +[2025-07-08 00:00:35] [Rank 0] step:6481/10000 train_time:442595ms step_avg:68.29ms +[2025-07-08 00:00:35] [Rank 0] step:6481/10000 train_time:442595ms step_avg:68.29ms +[2025-07-08 00:00:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:00:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:00:38] [Rank 0] PRINT: step:6500/10000 train_loss:2.5078 val_loss:2.4478 train_time:444576ms step_avg:68.40ms +[2025-07-08 00:00:38] [Rank 0] PRINT: step:6500/10000 train_loss:2.5078 val_loss:2.4478 train_time:444576ms step_avg:68.40ms +[2025-07-08 00:00:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:00:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:00:38] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:00:38] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:00:38] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:00:38] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:06:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:06:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:06:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:06:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:06:05] [Rank 0] Total Loss: 4.6439 +[2025-07-08 00:06:05] [Rank 0] Total Loss: 4.6439 +[2025-07-08 00:06:05] [Rank 0] Total FTA: 0.0838 +[2025-07-08 00:06:05] [Rank 0] Total FTA: 0.0838 +[2025-07-08 00:06:05] [Rank 0] Group 0 Loss: 4.8540 +[2025-07-08 00:06:05] [Rank 0] Group 0 Loss: 4.8540 +[2025-07-08 00:06:05] [Rank 0] Group 1 Loss: 4.3420 +[2025-07-08 00:06:05] [Rank 0] Group 1 Loss: 4.3420 +[2025-07-08 00:06:05] [Rank 0] Group 2 Loss: 4.5877 +[2025-07-08 00:06:05] [Rank 0] Group 2 Loss: 4.5877 +[2025-07-08 00:06:05] [Rank 0] Group 3 Loss: 4.5995 +[2025-07-08 00:06:05] [Rank 0] Group 3 Loss: 4.5995 +[2025-07-08 00:06:05] [Rank 0] Group 4 Loss: 4.7144 +[2025-07-08 00:06:05] [Rank 0] Group 4 Loss: 4.7144 +[2025-07-08 00:06:05] [Rank 0] Group 5 Loss: 4.5862 +[2025-07-08 00:06:05] [Rank 0] Group 5 Loss: 4.5862 +[2025-07-08 00:06:05] [Rank 0] Group 6 Loss: 4.6375 +[2025-07-08 00:06:05] [Rank 0] Group 6 Loss: 4.6375 +[2025-07-08 00:06:05] [Rank 0] Group 7 Loss: 4.6348 +[2025-07-08 00:06:05] [Rank 0] Group 7 Loss: 4.6348 +[2025-07-08 00:06:05] [Rank 0] Group 8 Loss: 4.6219 +[2025-07-08 00:06:05] [Rank 0] Group 8 Loss: 4.6219 +[2025-07-08 00:06:05] [Rank 0] Group 9 Loss: 4.6256 +[2025-07-08 00:06:05] [Rank 0] Group 9 Loss: 4.6256 +[2025-07-08 00:06:05] [Rank 0] Group 10 Loss: 4.6384 +[2025-07-08 00:06:05] [Rank 0] Group 10 Loss: 4.6384 +[2025-07-08 00:06:05] [Rank 0] Group 11 Loss: 4.6536 +[2025-07-08 00:06:05] [Rank 0] Group 11 Loss: 4.6536 +[2025-07-08 00:06:05] [Rank 0] Group 0 FTA: 0.1534 +[2025-07-08 00:06:05] [Rank 0] Group 0 FTA: 0.1534 +[2025-07-08 00:06:05] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 00:06:05] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 00:06:05] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-08 00:06:05] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-08 00:06:05] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-08 00:06:05] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-08 00:06:05] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-08 00:06:05] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-08 00:06:05] [Rank 0] Group 5 FTA: 0.0599 +[2025-07-08 00:06:05] [Rank 0] Group 5 FTA: 0.0599 +[2025-07-08 00:06:05] [Rank 0] Group 6 FTA: 0.0729 +[2025-07-08 00:06:05] [Rank 0] Group 6 FTA: 0.0729 +[2025-07-08 00:06:05] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-08 00:06:05] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-08 00:06:05] [Rank 0] Group 8 FTA: 0.0859 +[2025-07-08 00:06:05] [Rank 0] Group 8 FTA: 0.0859 +[2025-07-08 00:06:05] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-08 00:06:05] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-08 00:06:05] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-08 00:06:05] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-08 00:06:05] [Rank 0] Group 11 FTA: 0.0918 +[2025-07-08 00:06:05] [Rank 0] Group 11 FTA: 0.0918 +[2025-07-08 00:06:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-08 00:06:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-08 00:06:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-08 00:06:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-08 00:06:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-08 00:06:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-08 00:06:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-08 00:06:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-08 00:06:06] [Rank 0] step:6501/10000 train_time:444587ms step_avg:68.39ms +[2025-07-08 00:06:06] [Rank 0] step:6501/10000 train_time:444587ms step_avg:68.39ms +[2025-07-08 00:06:08] [Rank 0] step:6521/10000 train_time:445362ms step_avg:68.30ms +[2025-07-08 00:06:08] [Rank 0] step:6521/10000 train_time:445362ms step_avg:68.30ms +[2025-07-08 00:06:09] [Rank 0] step:6541/10000 train_time:446725ms step_avg:68.30ms +[2025-07-08 00:06:09] [Rank 0] step:6541/10000 train_time:446725ms step_avg:68.30ms +[2025-07-08 00:06:10] [Rank 0] step:6561/10000 train_time:448088ms step_avg:68.30ms +[2025-07-08 00:06:10] [Rank 0] step:6561/10000 train_time:448088ms step_avg:68.30ms +[2025-07-08 00:06:12] [Rank 0] step:6581/10000 train_time:449453ms step_avg:68.30ms +[2025-07-08 00:06:12] [Rank 0] step:6581/10000 train_time:449453ms step_avg:68.30ms +[2025-07-08 00:06:13] [Rank 0] step:6601/10000 train_time:450818ms step_avg:68.30ms +[2025-07-08 00:06:13] [Rank 0] step:6601/10000 train_time:450818ms step_avg:68.30ms +[2025-07-08 00:06:14] [Rank 0] step:6621/10000 train_time:452185ms step_avg:68.30ms +[2025-07-08 00:06:14] [Rank 0] step:6621/10000 train_time:452185ms step_avg:68.30ms +[2025-07-08 00:06:16] [Rank 0] step:6641/10000 train_time:453552ms step_avg:68.30ms +[2025-07-08 00:06:16] [Rank 0] step:6641/10000 train_time:453552ms step_avg:68.30ms +[2025-07-08 00:06:17] [Rank 0] step:6661/10000 train_time:454967ms step_avg:68.30ms +[2025-07-08 00:06:17] [Rank 0] step:6661/10000 train_time:454967ms step_avg:68.30ms +[2025-07-08 00:06:19] [Rank 0] step:6681/10000 train_time:456337ms step_avg:68.30ms +[2025-07-08 00:06:19] [Rank 0] step:6681/10000 train_time:456337ms step_avg:68.30ms +[2025-07-08 00:06:20] [Rank 0] step:6701/10000 train_time:457705ms step_avg:68.30ms +[2025-07-08 00:06:20] [Rank 0] step:6701/10000 train_time:457705ms step_avg:68.30ms +[2025-07-08 00:06:21] [Rank 0] step:6721/10000 train_time:459073ms step_avg:68.30ms +[2025-07-08 00:06:21] [Rank 0] step:6721/10000 train_time:459073ms step_avg:68.30ms +[2025-07-08 00:06:23] [Rank 0] step:6741/10000 train_time:460443ms step_avg:68.30ms +[2025-07-08 00:06:23] [Rank 0] step:6741/10000 train_time:460443ms step_avg:68.30ms +[2025-07-08 00:06:24] [Rank 0] step:6761/10000 train_time:461812ms step_avg:68.31ms +[2025-07-08 00:06:24] [Rank 0] step:6761/10000 train_time:461812ms step_avg:68.31ms +[2025-07-08 00:06:25] [Rank 0] step:6781/10000 train_time:463185ms step_avg:68.31ms +[2025-07-08 00:06:25] [Rank 0] step:6781/10000 train_time:463185ms step_avg:68.31ms +[2025-07-08 00:06:27] [Rank 0] step:6801/10000 train_time:464557ms step_avg:68.31ms +[2025-07-08 00:06:27] [Rank 0] step:6801/10000 train_time:464557ms step_avg:68.31ms +[2025-07-08 00:06:28] [Rank 0] step:6821/10000 train_time:465931ms step_avg:68.31ms +[2025-07-08 00:06:28] [Rank 0] step:6821/10000 train_time:465931ms step_avg:68.31ms +[2025-07-08 00:06:30] [Rank 0] step:6841/10000 train_time:467307ms step_avg:68.31ms +[2025-07-08 00:06:30] [Rank 0] step:6841/10000 train_time:467307ms step_avg:68.31ms +[2025-07-08 00:06:31] [Rank 0] step:6861/10000 train_time:468724ms step_avg:68.32ms +[2025-07-08 00:06:31] [Rank 0] step:6861/10000 train_time:468724ms step_avg:68.32ms +[2025-07-08 00:06:32] [Rank 0] step:6881/10000 train_time:470097ms step_avg:68.32ms +[2025-07-08 00:06:32] [Rank 0] step:6881/10000 train_time:470097ms step_avg:68.32ms +[2025-07-08 00:06:34] [Rank 0] step:6901/10000 train_time:471471ms step_avg:68.32ms +[2025-07-08 00:06:34] [Rank 0] step:6901/10000 train_time:471471ms step_avg:68.32ms +[2025-07-08 00:06:35] [Rank 0] step:6921/10000 train_time:472845ms step_avg:68.32ms +[2025-07-08 00:06:35] [Rank 0] step:6921/10000 train_time:472845ms step_avg:68.32ms +[2025-07-08 00:06:36] [Rank 0] step:6941/10000 train_time:474219ms step_avg:68.32ms +[2025-07-08 00:06:36] [Rank 0] step:6941/10000 train_time:474219ms step_avg:68.32ms +[2025-07-08 00:06:38] [Rank 0] step:6961/10000 train_time:475595ms step_avg:68.32ms +[2025-07-08 00:06:38] [Rank 0] step:6961/10000 train_time:475595ms step_avg:68.32ms +[2025-07-08 00:06:39] [Rank 0] step:6981/10000 train_time:476969ms step_avg:68.32ms +[2025-07-08 00:06:39] [Rank 0] step:6981/10000 train_time:476969ms step_avg:68.32ms +[2025-07-08 00:06:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:06:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:06:41] [Rank 0] PRINT: step:7000/10000 train_loss:2.3924 val_loss:2.3442 train_time:478969ms step_avg:68.42ms +[2025-07-08 00:06:41] [Rank 0] PRINT: step:7000/10000 train_loss:2.3924 val_loss:2.3442 train_time:478969ms step_avg:68.42ms +[2025-07-08 00:06:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:06:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:06:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:06:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:06:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:06:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:12:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:12:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:12:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:12:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:12:16] [Rank 0] Total Loss: 4.5989 +[2025-07-08 00:12:16] [Rank 0] Total Loss: 4.5989 +[2025-07-08 00:12:16] [Rank 0] Total FTA: 0.0895 +[2025-07-08 00:12:16] [Rank 0] Total FTA: 0.0895 +[2025-07-08 00:12:16] [Rank 0] Group 0 Loss: 4.8486 +[2025-07-08 00:12:16] [Rank 0] Group 0 Loss: 4.8486 +[2025-07-08 00:12:16] [Rank 0] Group 1 Loss: 4.3166 +[2025-07-08 00:12:16] [Rank 0] Group 1 Loss: 4.3166 +[2025-07-08 00:12:16] [Rank 0] Group 2 Loss: 4.5048 +[2025-07-08 00:12:16] [Rank 0] Group 2 Loss: 4.5048 +[2025-07-08 00:12:16] [Rank 0] Group 3 Loss: 4.5752 +[2025-07-08 00:12:16] [Rank 0] Group 3 Loss: 4.5752 +[2025-07-08 00:12:16] [Rank 0] Group 4 Loss: 4.6346 +[2025-07-08 00:12:16] [Rank 0] Group 4 Loss: 4.6346 +[2025-07-08 00:12:16] [Rank 0] Group 5 Loss: 4.5231 +[2025-07-08 00:12:16] [Rank 0] Group 5 Loss: 4.5231 +[2025-07-08 00:12:16] [Rank 0] Group 6 Loss: 4.5923 +[2025-07-08 00:12:16] [Rank 0] Group 6 Loss: 4.5923 +[2025-07-08 00:12:16] [Rank 0] Group 7 Loss: 4.5805 +[2025-07-08 00:12:16] [Rank 0] Group 7 Loss: 4.5805 +[2025-07-08 00:12:16] [Rank 0] Group 8 Loss: 4.5619 +[2025-07-08 00:12:16] [Rank 0] Group 8 Loss: 4.5619 +[2025-07-08 00:12:16] [Rank 0] Group 9 Loss: 4.5481 +[2025-07-08 00:12:16] [Rank 0] Group 9 Loss: 4.5481 +[2025-07-08 00:12:16] [Rank 0] Group 10 Loss: 4.6172 +[2025-07-08 00:12:16] [Rank 0] Group 10 Loss: 4.6172 +[2025-07-08 00:12:16] [Rank 0] Group 11 Loss: 4.6032 +[2025-07-08 00:12:16] [Rank 0] Group 11 Loss: 4.6032 +[2025-07-08 00:12:16] [Rank 0] Group 0 FTA: 0.1795 +[2025-07-08 00:12:16] [Rank 0] Group 0 FTA: 0.1795 +[2025-07-08 00:12:16] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 00:12:16] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 00:12:16] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-08 00:12:16] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-08 00:12:16] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-08 00:12:16] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-08 00:12:16] [Rank 0] Group 4 FTA: 0.0339 +[2025-07-08 00:12:16] [Rank 0] Group 4 FTA: 0.0339 +[2025-07-08 00:12:16] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-08 00:12:16] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-08 00:12:16] [Rank 0] Group 6 FTA: 0.1172 +[2025-07-08 00:12:16] [Rank 0] Group 6 FTA: 0.1172 +[2025-07-08 00:12:16] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-08 00:12:16] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-08 00:12:16] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-08 00:12:16] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-08 00:12:16] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-08 00:12:16] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-08 00:12:16] [Rank 0] Group 10 FTA: 0.0762 +[2025-07-08 00:12:16] [Rank 0] Group 10 FTA: 0.0762 +[2025-07-08 00:12:16] [Rank 0] Group 11 FTA: 0.1055 +[2025-07-08 00:12:16] [Rank 0] Group 11 FTA: 0.1055 +[2025-07-08 00:12:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-08 00:12:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-08 00:12:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-08 00:12:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-08 00:12:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-08 00:12:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-08 00:12:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-08 00:12:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-08 00:12:17] [Rank 0] step:7001/10000 train_time:478979ms step_avg:68.42ms +[2025-07-08 00:12:17] [Rank 0] step:7001/10000 train_time:478979ms step_avg:68.42ms +[2025-07-08 00:12:18] [Rank 0] step:7021/10000 train_time:480411ms step_avg:68.42ms +[2025-07-08 00:12:18] [Rank 0] step:7021/10000 train_time:480411ms step_avg:68.42ms +[2025-07-08 00:12:20] [Rank 0] step:7041/10000 train_time:481146ms step_avg:68.33ms +[2025-07-08 00:12:20] [Rank 0] step:7041/10000 train_time:481146ms step_avg:68.33ms +[2025-07-08 00:12:21] [Rank 0] step:7061/10000 train_time:482512ms step_avg:68.33ms +[2025-07-08 00:12:21] [Rank 0] step:7061/10000 train_time:482512ms step_avg:68.33ms +[2025-07-08 00:12:23] [Rank 0] step:7081/10000 train_time:483879ms step_avg:68.33ms +[2025-07-08 00:12:23] [Rank 0] step:7081/10000 train_time:483879ms step_avg:68.33ms +[2025-07-08 00:12:24] [Rank 0] step:7101/10000 train_time:485247ms step_avg:68.33ms +[2025-07-08 00:12:24] [Rank 0] step:7101/10000 train_time:485247ms step_avg:68.33ms +[2025-07-08 00:12:25] [Rank 0] step:7121/10000 train_time:486613ms step_avg:68.33ms +[2025-07-08 00:12:25] [Rank 0] step:7121/10000 train_time:486613ms step_avg:68.33ms +[2025-07-08 00:12:27] [Rank 0] step:7141/10000 train_time:487982ms step_avg:68.34ms +[2025-07-08 00:12:27] [Rank 0] step:7141/10000 train_time:487982ms step_avg:68.34ms +[2025-07-08 00:12:28] [Rank 0] step:7161/10000 train_time:489351ms step_avg:68.34ms +[2025-07-08 00:12:28] [Rank 0] step:7161/10000 train_time:489351ms step_avg:68.34ms +[2025-07-08 00:12:29] [Rank 0] step:7181/10000 train_time:490720ms step_avg:68.34ms +[2025-07-08 00:12:29] [Rank 0] step:7181/10000 train_time:490720ms step_avg:68.34ms +[2025-07-08 00:12:31] [Rank 0] step:7201/10000 train_time:492339ms step_avg:68.37ms +[2025-07-08 00:12:31] [Rank 0] step:7201/10000 train_time:492339ms step_avg:68.37ms +[2025-07-08 00:12:32] [Rank 0] step:7221/10000 train_time:493500ms step_avg:68.34ms +[2025-07-08 00:12:32] [Rank 0] step:7221/10000 train_time:493500ms step_avg:68.34ms +[2025-07-08 00:12:34] [Rank 0] step:7241/10000 train_time:494870ms step_avg:68.34ms +[2025-07-08 00:12:34] [Rank 0] step:7241/10000 train_time:494870ms step_avg:68.34ms +[2025-07-08 00:12:35] [Rank 0] step:7261/10000 train_time:496240ms step_avg:68.34ms +[2025-07-08 00:12:35] [Rank 0] step:7261/10000 train_time:496240ms step_avg:68.34ms +[2025-07-08 00:12:36] [Rank 0] step:7281/10000 train_time:497611ms step_avg:68.34ms +[2025-07-08 00:12:36] [Rank 0] step:7281/10000 train_time:497611ms step_avg:68.34ms +[2025-07-08 00:12:38] [Rank 0] step:7301/10000 train_time:498984ms step_avg:68.34ms +[2025-07-08 00:12:38] [Rank 0] step:7301/10000 train_time:498984ms step_avg:68.34ms +[2025-07-08 00:12:39] [Rank 0] step:7321/10000 train_time:500358ms step_avg:68.35ms +[2025-07-08 00:12:39] [Rank 0] step:7321/10000 train_time:500358ms step_avg:68.35ms +[2025-07-08 00:12:40] [Rank 0] step:7341/10000 train_time:501730ms step_avg:68.35ms +[2025-07-08 00:12:40] [Rank 0] step:7341/10000 train_time:501730ms step_avg:68.35ms +[2025-07-08 00:12:42] [Rank 0] step:7361/10000 train_time:503103ms step_avg:68.35ms +[2025-07-08 00:12:42] [Rank 0] step:7361/10000 train_time:503103ms step_avg:68.35ms +[2025-07-08 00:12:43] [Rank 0] step:7381/10000 train_time:504727ms step_avg:68.38ms +[2025-07-08 00:12:43] [Rank 0] step:7381/10000 train_time:504727ms step_avg:68.38ms +[2025-07-08 00:12:45] [Rank 0] step:7401/10000 train_time:505887ms step_avg:68.35ms +[2025-07-08 00:12:45] [Rank 0] step:7401/10000 train_time:505887ms step_avg:68.35ms +[2025-07-08 00:12:46] [Rank 0] step:7421/10000 train_time:507262ms step_avg:68.35ms +[2025-07-08 00:12:46] [Rank 0] step:7421/10000 train_time:507262ms step_avg:68.35ms +[2025-07-08 00:12:47] [Rank 0] step:7441/10000 train_time:508637ms step_avg:68.36ms +[2025-07-08 00:12:47] [Rank 0] step:7441/10000 train_time:508637ms step_avg:68.36ms +[2025-07-08 00:12:49] [Rank 0] step:7461/10000 train_time:510011ms step_avg:68.36ms +[2025-07-08 00:12:49] [Rank 0] step:7461/10000 train_time:510011ms step_avg:68.36ms +[2025-07-08 00:12:50] [Rank 0] step:7481/10000 train_time:511386ms step_avg:68.36ms +[2025-07-08 00:12:50] [Rank 0] step:7481/10000 train_time:511386ms step_avg:68.36ms +[2025-07-08 00:12:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:12:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:12:52] [Rank 0] PRINT: step:7500/10000 train_loss:2.2996 val_loss:2.2632 train_time:513388ms step_avg:68.45ms +[2025-07-08 00:12:52] [Rank 0] PRINT: step:7500/10000 train_loss:2.2996 val_loss:2.2632 train_time:513388ms step_avg:68.45ms +[2025-07-08 00:12:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:12:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:12:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:12:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:12:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:12:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:18:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:18:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:18:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:18:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:18:25] [Rank 0] Total Loss: 4.5774 +[2025-07-08 00:18:25] [Rank 0] Total Loss: 4.5774 +[2025-07-08 00:18:25] [Rank 0] Total FTA: 0.0921 +[2025-07-08 00:18:25] [Rank 0] Total FTA: 0.0921 +[2025-07-08 00:18:25] [Rank 0] Group 0 Loss: 4.8198 +[2025-07-08 00:18:25] [Rank 0] Group 0 Loss: 4.8198 +[2025-07-08 00:18:25] [Rank 0] Group 1 Loss: 4.2993 +[2025-07-08 00:18:25] [Rank 0] Group 1 Loss: 4.2993 +[2025-07-08 00:18:25] [Rank 0] Group 2 Loss: 4.5161 +[2025-07-08 00:18:25] [Rank 0] Group 2 Loss: 4.5161 +[2025-07-08 00:18:25] [Rank 0] Group 3 Loss: 4.5279 +[2025-07-08 00:18:25] [Rank 0] Group 3 Loss: 4.5279 +[2025-07-08 00:18:25] [Rank 0] Group 4 Loss: 4.5687 +[2025-07-08 00:18:25] [Rank 0] Group 4 Loss: 4.5687 +[2025-07-08 00:18:25] [Rank 0] Group 5 Loss: 4.5128 +[2025-07-08 00:18:25] [Rank 0] Group 5 Loss: 4.5128 +[2025-07-08 00:18:25] [Rank 0] Group 6 Loss: 4.6113 +[2025-07-08 00:18:25] [Rank 0] Group 6 Loss: 4.6113 +[2025-07-08 00:18:25] [Rank 0] Group 7 Loss: 4.5470 +[2025-07-08 00:18:25] [Rank 0] Group 7 Loss: 4.5470 +[2025-07-08 00:18:25] [Rank 0] Group 8 Loss: 4.5615 +[2025-07-08 00:18:25] [Rank 0] Group 8 Loss: 4.5615 +[2025-07-08 00:18:25] [Rank 0] Group 9 Loss: 4.5875 +[2025-07-08 00:18:25] [Rank 0] Group 9 Loss: 4.5875 +[2025-07-08 00:18:25] [Rank 0] Group 10 Loss: 4.5796 +[2025-07-08 00:18:25] [Rank 0] Group 10 Loss: 4.5796 +[2025-07-08 00:18:25] [Rank 0] Group 11 Loss: 4.5694 +[2025-07-08 00:18:25] [Rank 0] Group 11 Loss: 4.5694 +[2025-07-08 00:18:25] [Rank 0] Group 0 FTA: 0.1521 +[2025-07-08 00:18:25] [Rank 0] Group 0 FTA: 0.1521 +[2025-07-08 00:18:25] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 00:18:25] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 00:18:25] [Rank 0] Group 2 FTA: 0.1068 +[2025-07-08 00:18:25] [Rank 0] Group 2 FTA: 0.1068 +[2025-07-08 00:18:25] [Rank 0] Group 3 FTA: 0.0443 +[2025-07-08 00:18:25] [Rank 0] Group 3 FTA: 0.0443 +[2025-07-08 00:18:25] [Rank 0] Group 4 FTA: 0.0469 +[2025-07-08 00:18:25] [Rank 0] Group 4 FTA: 0.0469 +[2025-07-08 00:18:25] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-08 00:18:25] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-08 00:18:25] [Rank 0] Group 6 FTA: 0.0807 +[2025-07-08 00:18:25] [Rank 0] Group 6 FTA: 0.0807 +[2025-07-08 00:18:25] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-08 00:18:25] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-08 00:18:25] [Rank 0] Group 8 FTA: 0.1016 +[2025-07-08 00:18:25] [Rank 0] Group 8 FTA: 0.1016 +[2025-07-08 00:18:25] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-08 00:18:25] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-08 00:18:25] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-08 00:18:25] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-08 00:18:25] [Rank 0] Group 11 FTA: 0.1006 +[2025-07-08 00:18:25] [Rank 0] Group 11 FTA: 0.1006 +[2025-07-08 00:18:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-08 00:18:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-08 00:18:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-08 00:18:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-08 00:18:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-08 00:18:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-08 00:18:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-08 00:18:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-08 00:18:27] [Rank 0] step:7501/10000 train_time:513397ms step_avg:68.44ms +[2025-07-08 00:18:27] [Rank 0] step:7501/10000 train_time:513397ms step_avg:68.44ms +[2025-07-08 00:18:28] [Rank 0] step:7521/10000 train_time:514171ms step_avg:68.36ms +[2025-07-08 00:18:28] [Rank 0] step:7521/10000 train_time:514171ms step_avg:68.36ms +[2025-07-08 00:18:30] [Rank 0] step:7541/10000 train_time:515537ms step_avg:68.36ms +[2025-07-08 00:18:30] [Rank 0] step:7541/10000 train_time:515537ms step_avg:68.36ms +[2025-07-08 00:18:31] [Rank 0] step:7561/10000 train_time:517576ms step_avg:68.45ms +[2025-07-08 00:18:31] [Rank 0] step:7561/10000 train_time:517576ms step_avg:68.45ms +[2025-07-08 00:18:32] [Rank 0] step:7581/10000 train_time:518314ms step_avg:68.37ms +[2025-07-08 00:18:32] [Rank 0] step:7581/10000 train_time:518314ms step_avg:68.37ms +[2025-07-08 00:18:34] [Rank 0] step:7601/10000 train_time:519688ms step_avg:68.37ms +[2025-07-08 00:18:34] [Rank 0] step:7601/10000 train_time:519688ms step_avg:68.37ms +[2025-07-08 00:18:35] [Rank 0] step:7621/10000 train_time:521050ms step_avg:68.37ms +[2025-07-08 00:18:35] [Rank 0] step:7621/10000 train_time:521050ms step_avg:68.37ms +[2025-07-08 00:18:37] [Rank 0] step:7641/10000 train_time:522422ms step_avg:68.37ms +[2025-07-08 00:18:37] [Rank 0] step:7641/10000 train_time:522422ms step_avg:68.37ms +[2025-07-08 00:18:38] [Rank 0] step:7661/10000 train_time:523790ms step_avg:68.37ms +[2025-07-08 00:18:38] [Rank 0] step:7661/10000 train_time:523790ms step_avg:68.37ms +[2025-07-08 00:18:39] [Rank 0] step:7681/10000 train_time:525159ms step_avg:68.37ms +[2025-07-08 00:18:39] [Rank 0] step:7681/10000 train_time:525159ms step_avg:68.37ms +[2025-07-08 00:18:41] [Rank 0] step:7701/10000 train_time:526529ms step_avg:68.37ms +[2025-07-08 00:18:41] [Rank 0] step:7701/10000 train_time:526529ms step_avg:68.37ms +[2025-07-08 00:18:42] [Rank 0] step:7721/10000 train_time:527901ms step_avg:68.37ms +[2025-07-08 00:18:42] [Rank 0] step:7721/10000 train_time:527901ms step_avg:68.37ms +[2025-07-08 00:18:43] [Rank 0] step:7741/10000 train_time:529524ms step_avg:68.41ms +[2025-07-08 00:18:43] [Rank 0] step:7741/10000 train_time:529524ms step_avg:68.41ms +[2025-07-08 00:18:45] [Rank 0] step:7761/10000 train_time:530668ms step_avg:68.38ms +[2025-07-08 00:18:45] [Rank 0] step:7761/10000 train_time:530668ms step_avg:68.38ms +[2025-07-08 00:18:46] [Rank 0] step:7781/10000 train_time:532040ms step_avg:68.38ms +[2025-07-08 00:18:46] [Rank 0] step:7781/10000 train_time:532040ms step_avg:68.38ms +[2025-07-08 00:18:48] [Rank 0] step:7801/10000 train_time:533414ms step_avg:68.38ms +[2025-07-08 00:18:48] [Rank 0] step:7801/10000 train_time:533414ms step_avg:68.38ms +[2025-07-08 00:18:49] [Rank 0] step:7821/10000 train_time:534789ms step_avg:68.38ms +[2025-07-08 00:18:49] [Rank 0] step:7821/10000 train_time:534789ms step_avg:68.38ms +[2025-07-08 00:18:50] [Rank 0] step:7841/10000 train_time:536163ms step_avg:68.38ms +[2025-07-08 00:18:50] [Rank 0] step:7841/10000 train_time:536163ms step_avg:68.38ms +[2025-07-08 00:18:52] [Rank 0] step:7861/10000 train_time:537539ms step_avg:68.38ms +[2025-07-08 00:18:52] [Rank 0] step:7861/10000 train_time:537539ms step_avg:68.38ms +[2025-07-08 00:18:53] [Rank 0] step:7881/10000 train_time:538913ms step_avg:68.38ms +[2025-07-08 00:18:53] [Rank 0] step:7881/10000 train_time:538913ms step_avg:68.38ms +[2025-07-08 00:18:54] [Rank 0] step:7901/10000 train_time:540287ms step_avg:68.38ms +[2025-07-08 00:18:54] [Rank 0] step:7901/10000 train_time:540287ms step_avg:68.38ms +[2025-07-08 00:18:56] [Rank 0] step:7921/10000 train_time:541661ms step_avg:68.38ms +[2025-07-08 00:18:56] [Rank 0] step:7921/10000 train_time:541661ms step_avg:68.38ms +[2025-07-08 00:18:57] [Rank 0] step:7941/10000 train_time:543085ms step_avg:68.39ms +[2025-07-08 00:18:57] [Rank 0] step:7941/10000 train_time:543085ms step_avg:68.39ms +[2025-07-08 00:18:59] [Rank 0] step:7961/10000 train_time:544461ms step_avg:68.39ms +[2025-07-08 00:18:59] [Rank 0] step:7961/10000 train_time:544461ms step_avg:68.39ms +[2025-07-08 00:19:00] [Rank 0] step:7981/10000 train_time:545839ms step_avg:68.39ms +[2025-07-08 00:19:00] [Rank 0] step:7981/10000 train_time:545839ms step_avg:68.39ms +[2025-07-08 00:19:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:19:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:19:02] [Rank 0] PRINT: step:8000/10000 train_loss:2.2253 val_loss:2.1973 train_time:547843ms step_avg:68.48ms +[2025-07-08 00:19:02] [Rank 0] PRINT: step:8000/10000 train_loss:2.2253 val_loss:2.1973 train_time:547843ms step_avg:68.48ms +[2025-07-08 00:19:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:19:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:19:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:19:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:19:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:19:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:24:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:24:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:24:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:24:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:24:35] [Rank 0] Total Loss: 4.5810 +[2025-07-08 00:24:35] [Rank 0] Total Loss: 4.5810 +[2025-07-08 00:24:35] [Rank 0] Total FTA: 0.0904 +[2025-07-08 00:24:35] [Rank 0] Total FTA: 0.0904 +[2025-07-08 00:24:35] [Rank 0] Group 0 Loss: 4.8467 +[2025-07-08 00:24:35] [Rank 0] Group 0 Loss: 4.8467 +[2025-07-08 00:24:35] [Rank 0] Group 1 Loss: 4.1960 +[2025-07-08 00:24:35] [Rank 0] Group 1 Loss: 4.1960 +[2025-07-08 00:24:35] [Rank 0] Group 2 Loss: 4.4692 +[2025-07-08 00:24:35] [Rank 0] Group 2 Loss: 4.4692 +[2025-07-08 00:24:35] [Rank 0] Group 3 Loss: 4.6068 +[2025-07-08 00:24:35] [Rank 0] Group 3 Loss: 4.6068 +[2025-07-08 00:24:35] [Rank 0] Group 4 Loss: 4.6667 +[2025-07-08 00:24:35] [Rank 0] Group 4 Loss: 4.6667 +[2025-07-08 00:24:35] [Rank 0] Group 5 Loss: 4.5192 +[2025-07-08 00:24:35] [Rank 0] Group 5 Loss: 4.5192 +[2025-07-08 00:24:35] [Rank 0] Group 6 Loss: 4.5884 +[2025-07-08 00:24:35] [Rank 0] Group 6 Loss: 4.5884 +[2025-07-08 00:24:35] [Rank 0] Group 7 Loss: 4.5851 +[2025-07-08 00:24:35] [Rank 0] Group 7 Loss: 4.5851 +[2025-07-08 00:24:35] [Rank 0] Group 8 Loss: 4.5450 +[2025-07-08 00:24:35] [Rank 0] Group 8 Loss: 4.5450 +[2025-07-08 00:24:35] [Rank 0] Group 9 Loss: 4.5801 +[2025-07-08 00:24:35] [Rank 0] Group 9 Loss: 4.5801 +[2025-07-08 00:24:35] [Rank 0] Group 10 Loss: 4.5370 +[2025-07-08 00:24:35] [Rank 0] Group 10 Loss: 4.5370 +[2025-07-08 00:24:35] [Rank 0] Group 11 Loss: 4.5803 +[2025-07-08 00:24:35] [Rank 0] Group 11 Loss: 4.5803 +[2025-07-08 00:24:35] [Rank 0] Group 0 FTA: 0.1586 +[2025-07-08 00:24:35] [Rank 0] Group 0 FTA: 0.1586 +[2025-07-08 00:24:35] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 00:24:35] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 00:24:35] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-08 00:24:35] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-08 00:24:35] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-08 00:24:35] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-08 00:24:35] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-08 00:24:35] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-08 00:24:35] [Rank 0] Group 5 FTA: 0.0859 +[2025-07-08 00:24:35] [Rank 0] Group 5 FTA: 0.0859 +[2025-07-08 00:24:35] [Rank 0] Group 6 FTA: 0.0911 +[2025-07-08 00:24:35] [Rank 0] Group 6 FTA: 0.0911 +[2025-07-08 00:24:35] [Rank 0] Group 7 FTA: 0.1172 +[2025-07-08 00:24:35] [Rank 0] Group 7 FTA: 0.1172 +[2025-07-08 00:24:35] [Rank 0] Group 8 FTA: 0.1380 +[2025-07-08 00:24:35] [Rank 0] Group 8 FTA: 0.1380 +[2025-07-08 00:24:35] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-08 00:24:35] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-08 00:24:35] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-08 00:24:35] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-08 00:24:35] [Rank 0] Group 11 FTA: 0.0908 +[2025-07-08 00:24:35] [Rank 0] Group 11 FTA: 0.0908 +[2025-07-08 00:24:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-08 00:24:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-08 00:24:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-08 00:24:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-08 00:24:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-08 00:24:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-08 00:24:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-08 00:24:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-08 00:24:36] [Rank 0] step:8001/10000 train_time:547853ms step_avg:68.47ms +[2025-07-08 00:24:36] [Rank 0] step:8001/10000 train_time:547853ms step_avg:68.47ms +[2025-07-08 00:24:38] [Rank 0] step:8021/10000 train_time:548616ms step_avg:68.40ms +[2025-07-08 00:24:38] [Rank 0] step:8021/10000 train_time:548616ms step_avg:68.40ms +[2025-07-08 00:24:39] [Rank 0] step:8041/10000 train_time:549982ms step_avg:68.40ms +[2025-07-08 00:24:39] [Rank 0] step:8041/10000 train_time:549982ms step_avg:68.40ms +[2025-07-08 00:24:40] [Rank 0] step:8061/10000 train_time:551347ms step_avg:68.40ms +[2025-07-08 00:24:40] [Rank 0] step:8061/10000 train_time:551347ms step_avg:68.40ms +[2025-07-08 00:24:42] [Rank 0] step:8081/10000 train_time:552711ms step_avg:68.40ms +[2025-07-08 00:24:42] [Rank 0] step:8081/10000 train_time:552711ms step_avg:68.40ms +[2025-07-08 00:24:43] [Rank 0] step:8101/10000 train_time:554328ms step_avg:68.43ms +[2025-07-08 00:24:43] [Rank 0] step:8101/10000 train_time:554328ms step_avg:68.43ms +[2025-07-08 00:24:44] [Rank 0] step:8121/10000 train_time:555500ms step_avg:68.40ms +[2025-07-08 00:24:44] [Rank 0] step:8121/10000 train_time:555500ms step_avg:68.40ms +[2025-07-08 00:24:46] [Rank 0] step:8141/10000 train_time:556870ms step_avg:68.40ms +[2025-07-08 00:24:46] [Rank 0] step:8141/10000 train_time:556870ms step_avg:68.40ms +[2025-07-08 00:24:47] [Rank 0] step:8161/10000 train_time:558237ms step_avg:68.40ms +[2025-07-08 00:24:47] [Rank 0] step:8161/10000 train_time:558237ms step_avg:68.40ms +[2025-07-08 00:24:49] [Rank 0] step:8181/10000 train_time:559606ms step_avg:68.40ms +[2025-07-08 00:24:49] [Rank 0] step:8181/10000 train_time:559606ms step_avg:68.40ms +[2025-07-08 00:24:50] [Rank 0] step:8201/10000 train_time:560977ms step_avg:68.40ms +[2025-07-08 00:24:50] [Rank 0] step:8201/10000 train_time:560977ms step_avg:68.40ms +[2025-07-08 00:24:51] [Rank 0] step:8221/10000 train_time:562349ms step_avg:68.40ms +[2025-07-08 00:24:51] [Rank 0] step:8221/10000 train_time:562349ms step_avg:68.40ms +[2025-07-08 00:24:53] [Rank 0] step:8241/10000 train_time:563719ms step_avg:68.40ms +[2025-07-08 00:24:53] [Rank 0] step:8241/10000 train_time:563719ms step_avg:68.40ms +[2025-07-08 00:24:54] [Rank 0] step:8261/10000 train_time:565090ms step_avg:68.40ms +[2025-07-08 00:24:54] [Rank 0] step:8261/10000 train_time:565090ms step_avg:68.40ms +[2025-07-08 00:24:55] [Rank 0] step:8281/10000 train_time:566509ms step_avg:68.41ms +[2025-07-08 00:24:55] [Rank 0] step:8281/10000 train_time:566509ms step_avg:68.41ms +[2025-07-08 00:24:57] [Rank 0] step:8301/10000 train_time:567887ms step_avg:68.41ms +[2025-07-08 00:24:57] [Rank 0] step:8301/10000 train_time:567887ms step_avg:68.41ms +[2025-07-08 00:24:58] [Rank 0] step:8321/10000 train_time:569261ms step_avg:68.41ms +[2025-07-08 00:24:58] [Rank 0] step:8321/10000 train_time:569261ms step_avg:68.41ms +[2025-07-08 00:25:00] [Rank 0] step:8341/10000 train_time:570638ms step_avg:68.41ms +[2025-07-08 00:25:00] [Rank 0] step:8341/10000 train_time:570638ms step_avg:68.41ms +[2025-07-08 00:25:01] [Rank 0] step:8361/10000 train_time:572013ms step_avg:68.41ms +[2025-07-08 00:25:01] [Rank 0] step:8361/10000 train_time:572013ms step_avg:68.41ms +[2025-07-08 00:25:02] [Rank 0] step:8381/10000 train_time:573391ms step_avg:68.42ms +[2025-07-08 00:25:02] [Rank 0] step:8381/10000 train_time:573391ms step_avg:68.42ms +[2025-07-08 00:25:04] [Rank 0] step:8401/10000 train_time:574767ms step_avg:68.42ms +[2025-07-08 00:25:04] [Rank 0] step:8401/10000 train_time:574767ms step_avg:68.42ms +[2025-07-08 00:25:05] [Rank 0] step:8421/10000 train_time:576141ms step_avg:68.42ms +[2025-07-08 00:25:05] [Rank 0] step:8421/10000 train_time:576141ms step_avg:68.42ms +[2025-07-08 00:25:06] [Rank 0] step:8441/10000 train_time:577518ms step_avg:68.42ms +[2025-07-08 00:25:06] [Rank 0] step:8441/10000 train_time:577518ms step_avg:68.42ms +[2025-07-08 00:25:08] [Rank 0] step:8461/10000 train_time:578941ms step_avg:68.42ms +[2025-07-08 00:25:08] [Rank 0] step:8461/10000 train_time:578941ms step_avg:68.42ms +[2025-07-08 00:25:09] [Rank 0] step:8481/10000 train_time:580306ms step_avg:68.42ms +[2025-07-08 00:25:09] [Rank 0] step:8481/10000 train_time:580306ms step_avg:68.42ms +[2025-07-08 00:25:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:25:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:25:12] [Rank 0] PRINT: step:8500/10000 train_loss:2.1664 val_loss:2.1443 train_time:582308ms step_avg:68.51ms +[2025-07-08 00:25:12] [Rank 0] PRINT: step:8500/10000 train_loss:2.1664 val_loss:2.1443 train_time:582308ms step_avg:68.51ms +[2025-07-08 00:25:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:25:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:25:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:25:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:25:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:25:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:30:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:30:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:30:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:30:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:30:44] [Rank 0] Total Loss: 4.5777 +[2025-07-08 00:30:44] [Rank 0] Total Loss: 4.5777 +[2025-07-08 00:30:44] [Rank 0] Total FTA: 0.0872 +[2025-07-08 00:30:44] [Rank 0] Total FTA: 0.0872 +[2025-07-08 00:30:44] [Rank 0] Group 0 Loss: 4.8190 +[2025-07-08 00:30:44] [Rank 0] Group 0 Loss: 4.8190 +[2025-07-08 00:30:44] [Rank 0] Group 1 Loss: 4.2016 +[2025-07-08 00:30:44] [Rank 0] Group 1 Loss: 4.2016 +[2025-07-08 00:30:44] [Rank 0] Group 2 Loss: 4.4252 +[2025-07-08 00:30:44] [Rank 0] Group 2 Loss: 4.4252 +[2025-07-08 00:30:44] [Rank 0] Group 3 Loss: 4.6486 +[2025-07-08 00:30:44] [Rank 0] Group 3 Loss: 4.6486 +[2025-07-08 00:30:44] [Rank 0] Group 4 Loss: 4.6174 +[2025-07-08 00:30:44] [Rank 0] Group 4 Loss: 4.6174 +[2025-07-08 00:30:44] [Rank 0] Group 5 Loss: 4.5249 +[2025-07-08 00:30:44] [Rank 0] Group 5 Loss: 4.5249 +[2025-07-08 00:30:44] [Rank 0] Group 6 Loss: 4.5583 +[2025-07-08 00:30:44] [Rank 0] Group 6 Loss: 4.5583 +[2025-07-08 00:30:44] [Rank 0] Group 7 Loss: 4.5490 +[2025-07-08 00:30:44] [Rank 0] Group 7 Loss: 4.5490 +[2025-07-08 00:30:44] [Rank 0] Group 8 Loss: 4.5418 +[2025-07-08 00:30:44] [Rank 0] Group 8 Loss: 4.5418 +[2025-07-08 00:30:44] [Rank 0] Group 9 Loss: 4.5417 +[2025-07-08 00:30:44] [Rank 0] Group 9 Loss: 4.5417 +[2025-07-08 00:30:44] [Rank 0] Group 10 Loss: 4.5746 +[2025-07-08 00:30:44] [Rank 0] Group 10 Loss: 4.5746 +[2025-07-08 00:30:44] [Rank 0] Group 11 Loss: 4.6152 +[2025-07-08 00:30:44] [Rank 0] Group 11 Loss: 4.6152 +[2025-07-08 00:30:44] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-08 00:30:44] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-08 00:30:44] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 00:30:44] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 00:30:44] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-08 00:30:44] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-08 00:30:44] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-08 00:30:44] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-08 00:30:44] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-08 00:30:44] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-08 00:30:44] [Rank 0] Group 5 FTA: 0.0964 +[2025-07-08 00:30:44] [Rank 0] Group 5 FTA: 0.0964 +[2025-07-08 00:30:44] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-08 00:30:44] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-08 00:30:44] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-08 00:30:44] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-08 00:30:44] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-08 00:30:44] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-08 00:30:44] [Rank 0] Group 9 FTA: 0.1055 +[2025-07-08 00:30:44] [Rank 0] Group 9 FTA: 0.1055 +[2025-07-08 00:30:44] [Rank 0] Group 10 FTA: 0.0742 +[2025-07-08 00:30:44] [Rank 0] Group 10 FTA: 0.0742 +[2025-07-08 00:30:44] [Rank 0] Group 11 FTA: 0.1035 +[2025-07-08 00:30:44] [Rank 0] Group 11 FTA: 0.1035 +[2025-07-08 00:30:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-08 00:30:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-08 00:30:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-08 00:30:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-08 00:30:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-08 00:30:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-08 00:30:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-08 00:30:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-08 00:30:45] [Rank 0] step:8501/10000 train_time:582317ms step_avg:68.50ms +[2025-07-08 00:30:45] [Rank 0] step:8501/10000 train_time:582317ms step_avg:68.50ms +[2025-07-08 00:30:47] [Rank 0] step:8521/10000 train_time:583071ms step_avg:68.43ms +[2025-07-08 00:30:47] [Rank 0] step:8521/10000 train_time:583071ms step_avg:68.43ms +[2025-07-08 00:30:48] [Rank 0] step:8541/10000 train_time:584437ms step_avg:68.43ms +[2025-07-08 00:30:48] [Rank 0] step:8541/10000 train_time:584437ms step_avg:68.43ms +[2025-07-08 00:30:50] [Rank 0] step:8561/10000 train_time:585804ms step_avg:68.43ms +[2025-07-08 00:30:50] [Rank 0] step:8561/10000 train_time:585804ms step_avg:68.43ms +[2025-07-08 00:30:51] [Rank 0] step:8581/10000 train_time:587170ms step_avg:68.43ms +[2025-07-08 00:30:51] [Rank 0] step:8581/10000 train_time:587170ms step_avg:68.43ms +[2025-07-08 00:30:52] [Rank 0] step:8601/10000 train_time:588536ms step_avg:68.43ms +[2025-07-08 00:30:52] [Rank 0] step:8601/10000 train_time:588536ms step_avg:68.43ms +[2025-07-08 00:30:54] [Rank 0] step:8621/10000 train_time:589905ms step_avg:68.43ms +[2025-07-08 00:30:54] [Rank 0] step:8621/10000 train_time:589905ms step_avg:68.43ms +[2025-07-08 00:30:55] [Rank 0] step:8641/10000 train_time:591934ms step_avg:68.50ms +[2025-07-08 00:30:55] [Rank 0] step:8641/10000 train_time:591934ms step_avg:68.50ms +[2025-07-08 00:30:56] [Rank 0] step:8661/10000 train_time:592671ms step_avg:68.43ms +[2025-07-08 00:30:56] [Rank 0] step:8661/10000 train_time:592671ms step_avg:68.43ms +[2025-07-08 00:30:58] [Rank 0] step:8681/10000 train_time:594040ms step_avg:68.43ms +[2025-07-08 00:30:58] [Rank 0] step:8681/10000 train_time:594040ms step_avg:68.43ms +[2025-07-08 00:30:59] [Rank 0] step:8701/10000 train_time:595408ms step_avg:68.43ms +[2025-07-08 00:30:59] [Rank 0] step:8701/10000 train_time:595408ms step_avg:68.43ms +[2025-07-08 00:31:01] [Rank 0] step:8721/10000 train_time:596779ms step_avg:68.43ms +[2025-07-08 00:31:01] [Rank 0] step:8721/10000 train_time:596779ms step_avg:68.43ms +[2025-07-08 00:31:02] [Rank 0] step:8741/10000 train_time:598150ms step_avg:68.43ms +[2025-07-08 00:31:02] [Rank 0] step:8741/10000 train_time:598150ms step_avg:68.43ms +[2025-07-08 00:31:03] [Rank 0] step:8761/10000 train_time:599522ms step_avg:68.43ms +[2025-07-08 00:31:03] [Rank 0] step:8761/10000 train_time:599522ms step_avg:68.43ms +[2025-07-08 00:31:05] [Rank 0] step:8781/10000 train_time:600892ms step_avg:68.43ms +[2025-07-08 00:31:05] [Rank 0] step:8781/10000 train_time:600892ms step_avg:68.43ms +[2025-07-08 00:31:06] [Rank 0] step:8801/10000 train_time:602264ms step_avg:68.43ms +[2025-07-08 00:31:06] [Rank 0] step:8801/10000 train_time:602264ms step_avg:68.43ms +[2025-07-08 00:31:07] [Rank 0] step:8821/10000 train_time:603638ms step_avg:68.43ms +[2025-07-08 00:31:07] [Rank 0] step:8821/10000 train_time:603638ms step_avg:68.43ms +[2025-07-08 00:31:09] [Rank 0] step:8841/10000 train_time:605060ms step_avg:68.44ms +[2025-07-08 00:31:09] [Rank 0] step:8841/10000 train_time:605060ms step_avg:68.44ms +[2025-07-08 00:31:10] [Rank 0] step:8861/10000 train_time:606434ms step_avg:68.44ms +[2025-07-08 00:31:10] [Rank 0] step:8861/10000 train_time:606434ms step_avg:68.44ms +[2025-07-08 00:31:12] [Rank 0] step:8881/10000 train_time:607809ms step_avg:68.44ms +[2025-07-08 00:31:12] [Rank 0] step:8881/10000 train_time:607809ms step_avg:68.44ms +[2025-07-08 00:31:13] [Rank 0] step:8901/10000 train_time:609184ms step_avg:68.44ms +[2025-07-08 00:31:13] [Rank 0] step:8901/10000 train_time:609184ms step_avg:68.44ms +[2025-07-08 00:31:14] [Rank 0] step:8921/10000 train_time:610557ms step_avg:68.44ms +[2025-07-08 00:31:14] [Rank 0] step:8921/10000 train_time:610557ms step_avg:68.44ms +[2025-07-08 00:31:16] [Rank 0] step:8941/10000 train_time:611959ms step_avg:68.44ms +[2025-07-08 00:31:16] [Rank 0] step:8941/10000 train_time:611959ms step_avg:68.44ms +[2025-07-08 00:31:17] [Rank 0] step:8961/10000 train_time:613333ms step_avg:68.44ms +[2025-07-08 00:31:17] [Rank 0] step:8961/10000 train_time:613333ms step_avg:68.44ms +[2025-07-08 00:31:18] [Rank 0] step:8981/10000 train_time:614707ms step_avg:68.45ms +[2025-07-08 00:31:18] [Rank 0] step:8981/10000 train_time:614707ms step_avg:68.45ms +[2025-07-08 00:31:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:31:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:31:21] [Rank 0] PRINT: step:9000/10000 train_loss:2.1201 val_loss:2.1057 train_time:616707ms step_avg:68.52ms +[2025-07-08 00:31:21] [Rank 0] PRINT: step:9000/10000 train_loss:2.1201 val_loss:2.1057 train_time:616707ms step_avg:68.52ms +[2025-07-08 00:31:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:31:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:31:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:31:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:31:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:31:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:36:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:36:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:36:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:36:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:36:54] [Rank 0] Total Loss: 4.6017 +[2025-07-08 00:36:54] [Rank 0] Total Loss: 4.6017 +[2025-07-08 00:36:54] [Rank 0] Total FTA: 0.0866 +[2025-07-08 00:36:54] [Rank 0] Total FTA: 0.0866 +[2025-07-08 00:36:54] [Rank 0] Group 0 Loss: 4.8137 +[2025-07-08 00:36:54] [Rank 0] Group 0 Loss: 4.8137 +[2025-07-08 00:36:54] [Rank 0] Group 1 Loss: 4.2086 +[2025-07-08 00:36:54] [Rank 0] Group 1 Loss: 4.2086 +[2025-07-08 00:36:54] [Rank 0] Group 2 Loss: 4.4818 +[2025-07-08 00:36:54] [Rank 0] Group 2 Loss: 4.4818 +[2025-07-08 00:36:54] [Rank 0] Group 3 Loss: 4.6701 +[2025-07-08 00:36:54] [Rank 0] Group 3 Loss: 4.6701 +[2025-07-08 00:36:54] [Rank 0] Group 4 Loss: 4.6660 +[2025-07-08 00:36:54] [Rank 0] Group 4 Loss: 4.6660 +[2025-07-08 00:36:54] [Rank 0] Group 5 Loss: 4.5573 +[2025-07-08 00:36:54] [Rank 0] Group 5 Loss: 4.5573 +[2025-07-08 00:36:54] [Rank 0] Group 6 Loss: 4.5327 +[2025-07-08 00:36:54] [Rank 0] Group 6 Loss: 4.5327 +[2025-07-08 00:36:54] [Rank 0] Group 7 Loss: 4.6269 +[2025-07-08 00:36:54] [Rank 0] Group 7 Loss: 4.6269 +[2025-07-08 00:36:54] [Rank 0] Group 8 Loss: 4.6010 +[2025-07-08 00:36:54] [Rank 0] Group 8 Loss: 4.6010 +[2025-07-08 00:36:54] [Rank 0] Group 9 Loss: 4.5732 +[2025-07-08 00:36:54] [Rank 0] Group 9 Loss: 4.5732 +[2025-07-08 00:36:54] [Rank 0] Group 10 Loss: 4.6140 +[2025-07-08 00:36:54] [Rank 0] Group 10 Loss: 4.6140 +[2025-07-08 00:36:54] [Rank 0] Group 11 Loss: 4.6194 +[2025-07-08 00:36:54] [Rank 0] Group 11 Loss: 4.6194 +[2025-07-08 00:36:54] [Rank 0] Group 0 FTA: 0.1730 +[2025-07-08 00:36:54] [Rank 0] Group 0 FTA: 0.1730 +[2025-07-08 00:36:54] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 00:36:54] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 00:36:54] [Rank 0] Group 2 FTA: 0.0781 +[2025-07-08 00:36:54] [Rank 0] Group 2 FTA: 0.0781 +[2025-07-08 00:36:54] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-08 00:36:54] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-08 00:36:54] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-08 00:36:54] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-08 00:36:54] [Rank 0] Group 5 FTA: 0.1120 +[2025-07-08 00:36:54] [Rank 0] Group 5 FTA: 0.1120 +[2025-07-08 00:36:54] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-08 00:36:54] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-08 00:36:54] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-08 00:36:54] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-08 00:36:54] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-08 00:36:54] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-08 00:36:54] [Rank 0] Group 9 FTA: 0.0508 +[2025-07-08 00:36:54] [Rank 0] Group 9 FTA: 0.0508 +[2025-07-08 00:36:54] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-08 00:36:54] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-08 00:36:54] [Rank 0] Group 11 FTA: 0.0850 +[2025-07-08 00:36:54] [Rank 0] Group 11 FTA: 0.0850 +[2025-07-08 00:36:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-08 00:36:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-08 00:36:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-08 00:36:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-08 00:36:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-08 00:36:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-08 00:36:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-08 00:36:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-08 00:36:56] [Rank 0] step:9001/10000 train_time:617031ms step_avg:68.55ms +[2025-07-08 00:36:56] [Rank 0] step:9001/10000 train_time:617031ms step_avg:68.55ms +[2025-07-08 00:36:58] [Rank 0] step:9021/10000 train_time:618174ms step_avg:68.53ms +[2025-07-08 00:36:58] [Rank 0] step:9021/10000 train_time:618174ms step_avg:68.53ms +[2025-07-08 00:36:59] [Rank 0] step:9041/10000 train_time:619538ms step_avg:68.53ms +[2025-07-08 00:36:59] [Rank 0] step:9041/10000 train_time:619538ms step_avg:68.53ms +[2025-07-08 00:37:00] [Rank 0] step:9061/10000 train_time:620904ms step_avg:68.52ms +[2025-07-08 00:37:00] [Rank 0] step:9061/10000 train_time:620904ms step_avg:68.52ms +[2025-07-08 00:37:02] [Rank 0] step:9081/10000 train_time:622272ms step_avg:68.52ms +[2025-07-08 00:37:02] [Rank 0] step:9081/10000 train_time:622272ms step_avg:68.52ms +[2025-07-08 00:37:03] [Rank 0] step:9101/10000 train_time:623641ms step_avg:68.52ms +[2025-07-08 00:37:03] [Rank 0] step:9101/10000 train_time:623641ms step_avg:68.52ms +[2025-07-08 00:37:04] [Rank 0] step:9121/10000 train_time:625009ms step_avg:68.52ms +[2025-07-08 00:37:04] [Rank 0] step:9121/10000 train_time:625009ms step_avg:68.52ms +[2025-07-08 00:37:06] [Rank 0] step:9141/10000 train_time:626377ms step_avg:68.52ms +[2025-07-08 00:37:06] [Rank 0] step:9141/10000 train_time:626377ms step_avg:68.52ms +[2025-07-08 00:37:07] [Rank 0] step:9161/10000 train_time:627745ms step_avg:68.52ms +[2025-07-08 00:37:07] [Rank 0] step:9161/10000 train_time:627745ms step_avg:68.52ms +[2025-07-08 00:37:09] [Rank 0] step:9181/10000 train_time:629365ms step_avg:68.55ms +[2025-07-08 00:37:09] [Rank 0] step:9181/10000 train_time:629365ms step_avg:68.55ms +[2025-07-08 00:37:10] [Rank 0] step:9201/10000 train_time:630528ms step_avg:68.53ms +[2025-07-08 00:37:10] [Rank 0] step:9201/10000 train_time:630528ms step_avg:68.53ms +[2025-07-08 00:37:11] [Rank 0] step:9221/10000 train_time:631898ms step_avg:68.53ms +[2025-07-08 00:37:11] [Rank 0] step:9221/10000 train_time:631898ms step_avg:68.53ms +[2025-07-08 00:37:13] [Rank 0] step:9241/10000 train_time:633271ms step_avg:68.53ms +[2025-07-08 00:37:13] [Rank 0] step:9241/10000 train_time:633271ms step_avg:68.53ms +[2025-07-08 00:37:14] [Rank 0] step:9261/10000 train_time:634643ms step_avg:68.53ms +[2025-07-08 00:37:14] [Rank 0] step:9261/10000 train_time:634643ms step_avg:68.53ms +[2025-07-08 00:37:15] [Rank 0] step:9281/10000 train_time:636016ms step_avg:68.53ms +[2025-07-08 00:37:15] [Rank 0] step:9281/10000 train_time:636016ms step_avg:68.53ms +[2025-07-08 00:37:17] [Rank 0] step:9301/10000 train_time:637390ms step_avg:68.53ms +[2025-07-08 00:37:17] [Rank 0] step:9301/10000 train_time:637390ms step_avg:68.53ms +[2025-07-08 00:37:18] [Rank 0] step:9321/10000 train_time:638762ms step_avg:68.53ms +[2025-07-08 00:37:18] [Rank 0] step:9321/10000 train_time:638762ms step_avg:68.53ms +[2025-07-08 00:37:20] [Rank 0] step:9341/10000 train_time:640135ms step_avg:68.53ms +[2025-07-08 00:37:20] [Rank 0] step:9341/10000 train_time:640135ms step_avg:68.53ms +[2025-07-08 00:37:21] [Rank 0] step:9361/10000 train_time:641553ms step_avg:68.53ms +[2025-07-08 00:37:21] [Rank 0] step:9361/10000 train_time:641553ms step_avg:68.53ms +[2025-07-08 00:37:22] [Rank 0] step:9381/10000 train_time:642914ms step_avg:68.53ms +[2025-07-08 00:37:22] [Rank 0] step:9381/10000 train_time:642914ms step_avg:68.53ms +[2025-07-08 00:37:24] [Rank 0] step:9401/10000 train_time:644287ms step_avg:68.53ms +[2025-07-08 00:37:24] [Rank 0] step:9401/10000 train_time:644287ms step_avg:68.53ms +[2025-07-08 00:37:25] [Rank 0] step:9421/10000 train_time:645659ms step_avg:68.53ms +[2025-07-08 00:37:25] [Rank 0] step:9421/10000 train_time:645659ms step_avg:68.53ms +[2025-07-08 00:37:26] [Rank 0] step:9441/10000 train_time:647033ms step_avg:68.53ms +[2025-07-08 00:37:26] [Rank 0] step:9441/10000 train_time:647033ms step_avg:68.53ms +[2025-07-08 00:37:28] [Rank 0] step:9461/10000 train_time:648408ms step_avg:68.53ms +[2025-07-08 00:37:28] [Rank 0] step:9461/10000 train_time:648408ms step_avg:68.53ms +[2025-07-08 00:37:29] [Rank 0] step:9481/10000 train_time:649783ms step_avg:68.54ms +[2025-07-08 00:37:29] [Rank 0] step:9481/10000 train_time:649783ms step_avg:68.54ms +[2025-07-08 00:37:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:37:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:37:31] [Rank 0] PRINT: step:9500/10000 train_loss:2.0851 val_loss:2.0745 train_time:651782ms step_avg:68.61ms +[2025-07-08 00:37:31] [Rank 0] PRINT: step:9500/10000 train_loss:2.0851 val_loss:2.0745 train_time:651782ms step_avg:68.61ms +[2025-07-08 00:37:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:37:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:37:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:37:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:37:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:37:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:42:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:42:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:42:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:42:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:42:58] [Rank 0] Total Loss: 4.5845 +[2025-07-08 00:42:58] [Rank 0] Total Loss: 4.5845 +[2025-07-08 00:42:58] [Rank 0] Total FTA: 0.0891 +[2025-07-08 00:42:58] [Rank 0] Total FTA: 0.0891 +[2025-07-08 00:42:58] [Rank 0] Group 0 Loss: 4.7890 +[2025-07-08 00:42:58] [Rank 0] Group 0 Loss: 4.7890 +[2025-07-08 00:42:58] [Rank 0] Group 1 Loss: 4.1581 +[2025-07-08 00:42:58] [Rank 0] Group 1 Loss: 4.1581 +[2025-07-08 00:42:58] [Rank 0] Group 2 Loss: 4.4290 +[2025-07-08 00:42:58] [Rank 0] Group 2 Loss: 4.4290 +[2025-07-08 00:42:58] [Rank 0] Group 3 Loss: 4.6637 +[2025-07-08 00:42:58] [Rank 0] Group 3 Loss: 4.6637 +[2025-07-08 00:42:58] [Rank 0] Group 4 Loss: 4.6528 +[2025-07-08 00:42:58] [Rank 0] Group 4 Loss: 4.6528 +[2025-07-08 00:42:58] [Rank 0] Group 5 Loss: 4.5769 +[2025-07-08 00:42:58] [Rank 0] Group 5 Loss: 4.5769 +[2025-07-08 00:42:58] [Rank 0] Group 6 Loss: 4.5935 +[2025-07-08 00:42:58] [Rank 0] Group 6 Loss: 4.5935 +[2025-07-08 00:42:58] [Rank 0] Group 7 Loss: 4.5988 +[2025-07-08 00:42:58] [Rank 0] Group 7 Loss: 4.5988 +[2025-07-08 00:42:58] [Rank 0] Group 8 Loss: 4.5587 +[2025-07-08 00:42:58] [Rank 0] Group 8 Loss: 4.5587 +[2025-07-08 00:42:58] [Rank 0] Group 9 Loss: 4.6489 +[2025-07-08 00:42:58] [Rank 0] Group 9 Loss: 4.6489 +[2025-07-08 00:42:59] [Rank 0] Group 10 Loss: 4.5890 +[2025-07-08 00:42:59] [Rank 0] Group 10 Loss: 4.5890 +[2025-07-08 00:42:59] [Rank 0] Group 11 Loss: 4.5791 +[2025-07-08 00:42:59] [Rank 0] Group 11 Loss: 4.5791 +[2025-07-08 00:42:59] [Rank 0] Group 0 FTA: 0.1756 +[2025-07-08 00:42:59] [Rank 0] Group 0 FTA: 0.1756 +[2025-07-08 00:42:59] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 00:42:59] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 00:42:59] [Rank 0] Group 2 FTA: 0.1719 +[2025-07-08 00:42:59] [Rank 0] Group 2 FTA: 0.1719 +[2025-07-08 00:42:59] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-08 00:42:59] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-08 00:42:59] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-08 00:42:59] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-08 00:42:59] [Rank 0] Group 5 FTA: 0.0677 +[2025-07-08 00:42:59] [Rank 0] Group 5 FTA: 0.0677 +[2025-07-08 00:42:59] [Rank 0] Group 6 FTA: 0.0521 +[2025-07-08 00:42:59] [Rank 0] Group 6 FTA: 0.0521 +[2025-07-08 00:42:59] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-08 00:42:59] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-08 00:42:59] [Rank 0] Group 8 FTA: 0.0703 +[2025-07-08 00:42:59] [Rank 0] Group 8 FTA: 0.0703 +[2025-07-08 00:42:59] [Rank 0] Group 9 FTA: 0.0625 +[2025-07-08 00:42:59] [Rank 0] Group 9 FTA: 0.0625 +[2025-07-08 00:42:59] [Rank 0] Group 10 FTA: 0.1113 +[2025-07-08 00:42:59] [Rank 0] Group 10 FTA: 0.1113 +[2025-07-08 00:42:59] [Rank 0] Group 11 FTA: 0.0869 +[2025-07-08 00:42:59] [Rank 0] Group 11 FTA: 0.0869 +[2025-07-08 00:42:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-08 00:42:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-08 00:42:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-08 00:42:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-08 00:43:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-08 00:43:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-08 00:43:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-08 00:43:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-08 00:43:00] [Rank 0] step:9501/10000 train_time:651792ms step_avg:68.60ms +[2025-07-08 00:43:00] [Rank 0] step:9501/10000 train_time:651792ms step_avg:68.60ms +[2025-07-08 00:43:01] [Rank 0] step:9521/10000 train_time:652543ms step_avg:68.54ms +[2025-07-08 00:43:01] [Rank 0] step:9521/10000 train_time:652543ms step_avg:68.54ms +[2025-07-08 00:43:03] [Rank 0] step:9541/10000 train_time:653960ms step_avg:68.54ms +[2025-07-08 00:43:03] [Rank 0] step:9541/10000 train_time:653960ms step_avg:68.54ms +[2025-07-08 00:43:04] [Rank 0] step:9561/10000 train_time:655332ms step_avg:68.54ms +[2025-07-08 00:43:04] [Rank 0] step:9561/10000 train_time:655332ms step_avg:68.54ms +[2025-07-08 00:43:05] [Rank 0] step:9581/10000 train_time:656698ms step_avg:68.54ms +[2025-07-08 00:43:05] [Rank 0] step:9581/10000 train_time:656698ms step_avg:68.54ms +[2025-07-08 00:43:07] [Rank 0] step:9601/10000 train_time:658066ms step_avg:68.54ms +[2025-07-08 00:43:07] [Rank 0] step:9601/10000 train_time:658066ms step_avg:68.54ms +[2025-07-08 00:43:08] [Rank 0] step:9621/10000 train_time:659434ms step_avg:68.54ms +[2025-07-08 00:43:08] [Rank 0] step:9621/10000 train_time:659434ms step_avg:68.54ms +[2025-07-08 00:43:10] [Rank 0] step:9641/10000 train_time:660803ms step_avg:68.54ms +[2025-07-08 00:43:10] [Rank 0] step:9641/10000 train_time:660803ms step_avg:68.54ms +[2025-07-08 00:43:11] [Rank 0] step:9661/10000 train_time:662171ms step_avg:68.54ms +[2025-07-08 00:43:11] [Rank 0] step:9661/10000 train_time:662171ms step_avg:68.54ms +[2025-07-08 00:43:12] [Rank 0] step:9681/10000 train_time:663540ms step_avg:68.54ms +[2025-07-08 00:43:12] [Rank 0] step:9681/10000 train_time:663540ms step_avg:68.54ms +[2025-07-08 00:43:14] [Rank 0] step:9701/10000 train_time:664909ms step_avg:68.54ms +[2025-07-08 00:43:14] [Rank 0] step:9701/10000 train_time:664909ms step_avg:68.54ms +[2025-07-08 00:43:15] [Rank 0] step:9721/10000 train_time:666947ms step_avg:68.61ms +[2025-07-08 00:43:15] [Rank 0] step:9721/10000 train_time:666947ms step_avg:68.61ms +[2025-07-08 00:43:16] [Rank 0] step:9741/10000 train_time:667687ms step_avg:68.54ms +[2025-07-08 00:43:16] [Rank 0] step:9741/10000 train_time:667687ms step_avg:68.54ms +[2025-07-08 00:43:18] [Rank 0] step:9761/10000 train_time:669059ms step_avg:68.54ms +[2025-07-08 00:43:18] [Rank 0] step:9761/10000 train_time:669059ms step_avg:68.54ms +[2025-07-08 00:43:19] [Rank 0] step:9781/10000 train_time:670432ms step_avg:68.54ms +[2025-07-08 00:43:19] [Rank 0] step:9781/10000 train_time:670432ms step_avg:68.54ms +[2025-07-08 00:43:21] [Rank 0] step:9801/10000 train_time:671807ms step_avg:68.54ms +[2025-07-08 00:43:21] [Rank 0] step:9801/10000 train_time:671807ms step_avg:68.54ms +[2025-07-08 00:43:22] [Rank 0] step:9821/10000 train_time:673181ms step_avg:68.55ms +[2025-07-08 00:43:22] [Rank 0] step:9821/10000 train_time:673181ms step_avg:68.55ms +[2025-07-08 00:43:23] [Rank 0] step:9841/10000 train_time:674557ms step_avg:68.55ms +[2025-07-08 00:43:23] [Rank 0] step:9841/10000 train_time:674557ms step_avg:68.55ms +[2025-07-08 00:43:25] [Rank 0] step:9861/10000 train_time:675929ms step_avg:68.55ms +[2025-07-08 00:43:25] [Rank 0] step:9861/10000 train_time:675929ms step_avg:68.55ms +[2025-07-08 00:43:26] [Rank 0] step:9881/10000 train_time:677303ms step_avg:68.55ms +[2025-07-08 00:43:26] [Rank 0] step:9881/10000 train_time:677303ms step_avg:68.55ms +[2025-07-08 00:43:27] [Rank 0] step:9901/10000 train_time:678723ms step_avg:68.55ms +[2025-07-08 00:43:27] [Rank 0] step:9901/10000 train_time:678723ms step_avg:68.55ms +[2025-07-08 00:43:29] [Rank 0] step:9921/10000 train_time:680074ms step_avg:68.55ms +[2025-07-08 00:43:29] [Rank 0] step:9921/10000 train_time:680074ms step_avg:68.55ms +[2025-07-08 00:43:30] [Rank 0] step:9941/10000 train_time:681451ms step_avg:68.55ms +[2025-07-08 00:43:30] [Rank 0] step:9941/10000 train_time:681451ms step_avg:68.55ms +[2025-07-08 00:43:32] [Rank 0] step:9961/10000 train_time:682827ms step_avg:68.55ms +[2025-07-08 00:43:32] [Rank 0] step:9961/10000 train_time:682827ms step_avg:68.55ms +[2025-07-08 00:43:33] [Rank 0] step:9981/10000 train_time:684203ms step_avg:68.55ms +[2025-07-08 00:43:33] [Rank 0] step:9981/10000 train_time:684203ms step_avg:68.55ms +[2025-07-08 00:43:34] [Rank 0] step:10000/10000 train_time:685511ms step_avg:68.55ms +[2025-07-08 00:43:34] [Rank 0] step:10000/10000 train_time:685511ms step_avg:68.55ms +[2025-07-08 00:43:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:43:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 00:43:35] [Rank 0] PRINT: step:10000/10000 train_loss:2.0596 val_loss:2.0542 train_time:686212ms step_avg:68.62ms +[2025-07-08 00:43:35] [Rank 0] PRINT: step:10000/10000 train_loss:2.0596 val_loss:2.0542 train_time:686212ms step_avg:68.62ms +[2025-07-08 00:43:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:43:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 00:43:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:43:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 00:43:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:43:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 00:49:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:49:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 00:49:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:49:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 00:49:01] [Rank 0] Total Loss: 4.6050 +[2025-07-08 00:49:01] [Rank 0] Total Loss: 4.6050 +[2025-07-08 00:49:01] [Rank 0] Total FTA: 0.0950 +[2025-07-08 00:49:01] [Rank 0] Total FTA: 0.0950 +[2025-07-08 00:49:01] [Rank 0] Group 0 Loss: 4.7724 +[2025-07-08 00:49:01] [Rank 0] Group 0 Loss: 4.7724 +[2025-07-08 00:49:01] [Rank 0] Group 1 Loss: 4.1486 +[2025-07-08 00:49:01] [Rank 0] Group 1 Loss: 4.1486 +[2025-07-08 00:49:02] [Rank 0] Group 2 Loss: 4.4682 +[2025-07-08 00:49:02] [Rank 0] Group 2 Loss: 4.4682 +[2025-07-08 00:49:02] [Rank 0] Group 3 Loss: 4.6721 +[2025-07-08 00:49:02] [Rank 0] Group 3 Loss: 4.6721 +[2025-07-08 00:49:02] [Rank 0] Group 4 Loss: 4.6625 +[2025-07-08 00:49:02] [Rank 0] Group 4 Loss: 4.6625 +[2025-07-08 00:49:02] [Rank 0] Group 5 Loss: 4.5769 +[2025-07-08 00:49:02] [Rank 0] Group 5 Loss: 4.5769 +[2025-07-08 00:49:02] [Rank 0] Group 6 Loss: 4.5932 +[2025-07-08 00:49:02] [Rank 0] Group 6 Loss: 4.5932 +[2025-07-08 00:49:02] [Rank 0] Group 7 Loss: 4.6687 +[2025-07-08 00:49:02] [Rank 0] Group 7 Loss: 4.6687 +[2025-07-08 00:49:02] [Rank 0] Group 8 Loss: 4.5779 +[2025-07-08 00:49:02] [Rank 0] Group 8 Loss: 4.5779 +[2025-07-08 00:49:02] [Rank 0] Group 9 Loss: 4.6494 +[2025-07-08 00:49:02] [Rank 0] Group 9 Loss: 4.6494 +[2025-07-08 00:49:02] [Rank 0] Group 10 Loss: 4.6432 +[2025-07-08 00:49:02] [Rank 0] Group 10 Loss: 4.6432 +[2025-07-08 00:49:02] [Rank 0] Group 11 Loss: 4.6261 +[2025-07-08 00:49:02] [Rank 0] Group 11 Loss: 4.6261 +[2025-07-08 00:49:02] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-08 00:49:02] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-08 00:49:02] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 00:49:02] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 00:49:02] [Rank 0] Group 2 FTA: 0.1536 +[2025-07-08 00:49:02] [Rank 0] Group 2 FTA: 0.1536 +[2025-07-08 00:49:02] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-08 00:49:02] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-08 00:49:02] [Rank 0] Group 4 FTA: 0.0469 +[2025-07-08 00:49:02] [Rank 0] Group 4 FTA: 0.0469 +[2025-07-08 00:49:02] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-08 00:49:02] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-08 00:49:02] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-08 00:49:02] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-08 00:49:02] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-08 00:49:02] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-08 00:49:02] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-08 00:49:02] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-08 00:49:02] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-08 00:49:02] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-08 00:49:02] [Rank 0] Group 10 FTA: 0.1211 +[2025-07-08 00:49:02] [Rank 0] Group 10 FTA: 0.1211 +[2025-07-08 00:49:02] [Rank 0] Group 11 FTA: 0.0947 +[2025-07-08 00:49:02] [Rank 0] Group 11 FTA: 0.0947 +[2025-07-08 00:49:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-08 00:49:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_loss_curves.png +[2025-07-08 00:49:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-08 00:49:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/per_class_acc_curves.png +[2025-07-08 00:49:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-08 00:49:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_loss_curve.png +[2025-07-08 00:49:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-08 00:49:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_45/total_acc_curve.png +[2025-07-08 00:49:03] [Rank 0] step:10001/10000 train_time:686221ms step_avg:68.62ms +[2025-07-08 00:49:03] [Rank 0] step:10001/10000 train_time:686221ms step_avg:68.62ms +[2025-07-08 00:49:03] [Rank 0] PRINT: --- Training Finished: Tue Jul 8 00:49:03 2025 --- +[2025-07-08 00:49:03] [Rank 0] PRINT: --- Training Finished: Tue Jul 8 00:49:03 2025 --- +[2025-07-08 00:49:03] [Rank 0] PRINT: Peak memory allocated: 8720 MiB reserved: 10316 MiB +[2025-07-08 00:49:03] [Rank 0] PRINT: Peak memory allocated: 8720 MiB reserved: 10316 MiB diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/config.json b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..9892f74cb7c85d1adab55caeb5f2f97d0854e3bb --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "8bf7704b-6cc6-48f8-8a55-461afd468e3e", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..17c8a412ab33a30fe48b400a7a59848667144ad2 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b994f4e19fb4241c727b35a6ee443d9d06793db4e6c31e32aff321bef39913c +size 252422 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..b2ad1b534355f40406bfa0dde1cebe64fa809d1b --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fef64ec2ec9b8c20676f6740319a051d0e947e992fc9157ec815880ca154eb4 +size 256798 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..77b131e8f815b8fcc52fe1647dee9f3430f1fd1c --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:574c21e7b5ad23308eaec9b2b43708812ba016fdc5f6310ae94a5c649b060136 +size 84169 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..5f31a2e02194cd6c170fddede5af13955083fb5b --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4a35e349bd01ae60922e14e6d4e773ecaee7a92f186c2d2b4700a4fa7a2822c +size 101038 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/training_log_8bf7704b-6cc6-48f8-8a55-461afd468e3e.txt b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/training_log_8bf7704b-6cc6-48f8-8a55-461afd468e3e.txt new file mode 100644 index 0000000000000000000000000000000000000000..10a365ca9d8031ca02c6ef9329478c71885ceab4 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/training_log_8bf7704b-6cc6-48f8-8a55-461afd468e3e.txt @@ -0,0 +1,5144 @@ +[2025-07-09 02:08:28] [Rank 0] PRINT: --- Script Start: Wed Jul 9 02:08:28 2025 --- +[2025-07-09 02:08:28] [Rank 0] PRINT: --- Script Start: Wed Jul 9 02:08:28 2025 --- +[2025-07-09 02:08:28] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-09 02:08:28] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0001) +[2025-07-09 02:08:28] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-09 02:08:28] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-09 02:08:28] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-09 02:08:28] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-09 02:08:28] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48 +[2025-07-09 02:08:28] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48 +[2025-07-09 02:08:28] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-09 02:08:28] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-09 02:08:28] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-09 02:08:28] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-09 02:08:28] [Rank 0] PRINT: Constructing model... +[2025-07-09 02:08:28] [Rank 0] PRINT: Constructing model... +[2025-07-09 02:08:30] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-09 02:08:30] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-09 02:08:30] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-09 02:08:30] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-09 02:08:30] [Rank 0] PRINT: Testing model forward function: +[2025-07-09 02:08:30] [Rank 0] PRINT: Testing model forward function: +[2025-07-09 02:08:31] [Rank 0] PRINT: Model test - Result type: +[2025-07-09 02:08:31] [Rank 0] PRINT: Model test - Result type: +[2025-07-09 02:08:31] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-09 02:08:31] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-09 02:08:32] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-09 02:08:32] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-09 02:08:32] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-09 02:08:32] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-09 02:08:32] [Rank 0] PRINT: Model returns: +[2025-07-09 02:08:32] [Rank 0] PRINT: Model returns: +[2025-07-09 02:08:32] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-09 02:08:32] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-09 02:08:32] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-09 02:08:32] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-09 02:08:32] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-09 02:08:32] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0001). +[2025-07-09 02:08:32] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-09 02:08:32] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-09 02:08:32] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-09 02:08:32] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-09 02:08:32] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-09 02:08:32] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-09 02:08:32] [Rank 0] PRINT: Model compilation complete. +[2025-07-09 02:08:32] [Rank 0] PRINT: Model compilation complete. +[2025-07-09 02:08:32] [Rank 0] PRINT: Starting warmup... +[2025-07-09 02:08:32] [Rank 0] PRINT: Starting warmup... +[2025-07-09 02:18:16] [Rank 0] PRINT: Warmup complete. +[2025-07-09 02:18:16] [Rank 0] PRINT: Warmup complete. +[2025-07-09 02:18:17] [Rank 0] PRINT: Starting training... +[2025-07-09 02:18:17] [Rank 0] PRINT: Starting training... +[2025-07-09 02:18:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 02:18:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 02:22:25] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-09 02:22:25] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-09 02:22:27] [Rank 0] step:21/10000 train_time:1554ms step_avg:74.01ms +[2025-07-09 02:22:27] [Rank 0] step:21/10000 train_time:1554ms step_avg:74.01ms +[2025-07-09 02:22:28] [Rank 0] step:41/10000 train_time:2879ms step_avg:70.22ms +[2025-07-09 02:22:28] [Rank 0] step:41/10000 train_time:2879ms step_avg:70.22ms +[2025-07-09 02:22:30] [Rank 0] step:61/10000 train_time:4205ms step_avg:68.93ms +[2025-07-09 02:22:30] [Rank 0] step:61/10000 train_time:4205ms step_avg:68.93ms +[2025-07-09 02:22:31] [Rank 0] step:81/10000 train_time:5532ms step_avg:68.30ms +[2025-07-09 02:22:31] [Rank 0] step:81/10000 train_time:5532ms step_avg:68.30ms +[2025-07-09 02:22:32] [Rank 0] step:101/10000 train_time:6860ms step_avg:67.92ms +[2025-07-09 02:22:32] [Rank 0] step:101/10000 train_time:6860ms step_avg:67.92ms +[2025-07-09 02:22:34] [Rank 0] step:121/10000 train_time:8187ms step_avg:67.67ms +[2025-07-09 02:22:34] [Rank 0] step:121/10000 train_time:8187ms step_avg:67.67ms +[2025-07-09 02:22:35] [Rank 0] step:141/10000 train_time:9514ms step_avg:67.48ms +[2025-07-09 02:22:35] [Rank 0] step:141/10000 train_time:9514ms step_avg:67.48ms +[2025-07-09 02:22:36] [Rank 0] step:161/10000 train_time:10844ms step_avg:67.35ms +[2025-07-09 02:22:36] [Rank 0] step:161/10000 train_time:10844ms step_avg:67.35ms +[2025-07-09 02:22:38] [Rank 0] step:181/10000 train_time:12220ms step_avg:67.51ms +[2025-07-09 02:22:38] [Rank 0] step:181/10000 train_time:12220ms step_avg:67.51ms +[2025-07-09 02:22:39] [Rank 0] step:201/10000 train_time:13566ms step_avg:67.49ms +[2025-07-09 02:22:39] [Rank 0] step:201/10000 train_time:13566ms step_avg:67.49ms +[2025-07-09 02:22:40] [Rank 0] step:221/10000 train_time:14896ms step_avg:67.40ms +[2025-07-09 02:22:40] [Rank 0] step:221/10000 train_time:14896ms step_avg:67.40ms +[2025-07-09 02:22:42] [Rank 0] step:241/10000 train_time:16225ms step_avg:67.32ms +[2025-07-09 02:22:42] [Rank 0] step:241/10000 train_time:16225ms step_avg:67.32ms +[2025-07-09 02:22:43] [Rank 0] step:261/10000 train_time:17556ms step_avg:67.26ms +[2025-07-09 02:22:43] [Rank 0] step:261/10000 train_time:17556ms step_avg:67.26ms +[2025-07-09 02:22:44] [Rank 0] step:281/10000 train_time:18886ms step_avg:67.21ms +[2025-07-09 02:22:44] [Rank 0] step:281/10000 train_time:18886ms step_avg:67.21ms +[2025-07-09 02:22:46] [Rank 0] step:301/10000 train_time:20217ms step_avg:67.17ms +[2025-07-09 02:22:46] [Rank 0] step:301/10000 train_time:20217ms step_avg:67.17ms +[2025-07-09 02:22:47] [Rank 0] step:321/10000 train_time:21547ms step_avg:67.12ms +[2025-07-09 02:22:47] [Rank 0] step:321/10000 train_time:21547ms step_avg:67.12ms +[2025-07-09 02:22:48] [Rank 0] step:341/10000 train_time:22877ms step_avg:67.09ms +[2025-07-09 02:22:48] [Rank 0] step:341/10000 train_time:22877ms step_avg:67.09ms +[2025-07-09 02:22:50] [Rank 0] step:361/10000 train_time:24208ms step_avg:67.06ms +[2025-07-09 02:22:50] [Rank 0] step:361/10000 train_time:24208ms step_avg:67.06ms +[2025-07-09 02:22:51] [Rank 0] step:381/10000 train_time:25600ms step_avg:67.19ms +[2025-07-09 02:22:51] [Rank 0] step:381/10000 train_time:25600ms step_avg:67.19ms +[2025-07-09 02:22:53] [Rank 0] step:401/10000 train_time:27016ms step_avg:67.37ms +[2025-07-09 02:22:53] [Rank 0] step:401/10000 train_time:27016ms step_avg:67.37ms +[2025-07-09 02:22:54] [Rank 0] step:421/10000 train_time:28265ms step_avg:67.14ms +[2025-07-09 02:22:54] [Rank 0] step:421/10000 train_time:28265ms step_avg:67.14ms +[2025-07-09 02:22:55] [Rank 0] step:441/10000 train_time:29596ms step_avg:67.11ms +[2025-07-09 02:22:55] [Rank 0] step:441/10000 train_time:29596ms step_avg:67.11ms +[2025-07-09 02:22:57] [Rank 0] step:461/10000 train_time:30927ms step_avg:67.09ms +[2025-07-09 02:22:57] [Rank 0] step:461/10000 train_time:30927ms step_avg:67.09ms +[2025-07-09 02:22:58] [Rank 0] step:481/10000 train_time:32258ms step_avg:67.06ms +[2025-07-09 02:22:58] [Rank 0] step:481/10000 train_time:32258ms step_avg:67.06ms +[2025-07-09 02:22:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 02:22:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 02:23:00] [Rank 0] PRINT: step:500/10000 train_loss:9.6506 val_loss:8.5999 train_time:34193ms step_avg:68.39ms +[2025-07-09 02:23:00] [Rank 0] PRINT: step:500/10000 train_loss:9.6506 val_loss:8.5999 train_time:34193ms step_avg:68.39ms +[2025-07-09 02:23:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 02:23:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 02:23:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 02:23:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 02:23:00] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 02:23:00] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 02:28:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 02:28:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 02:28:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 02:28:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 02:28:21] [Rank 0] Total Loss: 8.9196 +[2025-07-09 02:28:21] [Rank 0] Total Loss: 8.9196 +[2025-07-09 02:28:21] [Rank 0] Total FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Total FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 0 Loss: 8.9401 +[2025-07-09 02:28:21] [Rank 0] Group 0 Loss: 8.9401 +[2025-07-09 02:28:21] [Rank 0] Group 1 Loss: 8.9198 +[2025-07-09 02:28:21] [Rank 0] Group 1 Loss: 8.9198 +[2025-07-09 02:28:21] [Rank 0] Group 2 Loss: 8.9619 +[2025-07-09 02:28:21] [Rank 0] Group 2 Loss: 8.9619 +[2025-07-09 02:28:21] [Rank 0] Group 3 Loss: 8.9015 +[2025-07-09 02:28:21] [Rank 0] Group 3 Loss: 8.9015 +[2025-07-09 02:28:21] [Rank 0] Group 4 Loss: 8.9120 +[2025-07-09 02:28:21] [Rank 0] Group 4 Loss: 8.9120 +[2025-07-09 02:28:21] [Rank 0] Group 5 Loss: 8.8990 +[2025-07-09 02:28:21] [Rank 0] Group 5 Loss: 8.8990 +[2025-07-09 02:28:21] [Rank 0] Group 6 Loss: 8.9212 +[2025-07-09 02:28:21] [Rank 0] Group 6 Loss: 8.9212 +[2025-07-09 02:28:21] [Rank 0] Group 7 Loss: 8.9145 +[2025-07-09 02:28:21] [Rank 0] Group 7 Loss: 8.9145 +[2025-07-09 02:28:21] [Rank 0] Group 8 Loss: 8.9129 +[2025-07-09 02:28:21] [Rank 0] Group 8 Loss: 8.9129 +[2025-07-09 02:28:21] [Rank 0] Group 9 Loss: 8.9167 +[2025-07-09 02:28:21] [Rank 0] Group 9 Loss: 8.9167 +[2025-07-09 02:28:21] [Rank 0] Group 10 Loss: 8.9104 +[2025-07-09 02:28:21] [Rank 0] Group 10 Loss: 8.9104 +[2025-07-09 02:28:21] [Rank 0] Group 11 Loss: 8.9147 +[2025-07-09 02:28:21] [Rank 0] Group 11 Loss: 8.9147 +[2025-07-09 02:28:21] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-09 02:28:21] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-09 02:28:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 02:28:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 02:28:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 02:28:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 02:28:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 02:28:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 02:28:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 02:28:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 02:28:23] [Rank 0] step:501/10000 train_time:34204ms step_avg:68.27ms +[2025-07-09 02:28:23] [Rank 0] step:501/10000 train_time:34204ms step_avg:68.27ms +[2025-07-09 02:28:24] [Rank 0] step:521/10000 train_time:34955ms step_avg:67.09ms +[2025-07-09 02:28:24] [Rank 0] step:521/10000 train_time:34955ms step_avg:67.09ms +[2025-07-09 02:28:25] [Rank 0] step:541/10000 train_time:36276ms step_avg:67.05ms +[2025-07-09 02:28:25] [Rank 0] step:541/10000 train_time:36276ms step_avg:67.05ms +[2025-07-09 02:28:27] [Rank 0] step:561/10000 train_time:37678ms step_avg:67.16ms +[2025-07-09 02:28:27] [Rank 0] step:561/10000 train_time:37678ms step_avg:67.16ms +[2025-07-09 02:28:28] [Rank 0] step:581/10000 train_time:39000ms step_avg:67.13ms +[2025-07-09 02:28:28] [Rank 0] step:581/10000 train_time:39000ms step_avg:67.13ms +[2025-07-09 02:28:29] [Rank 0] step:601/10000 train_time:40326ms step_avg:67.10ms +[2025-07-09 02:28:29] [Rank 0] step:601/10000 train_time:40326ms step_avg:67.10ms +[2025-07-09 02:28:31] [Rank 0] step:621/10000 train_time:41651ms step_avg:67.07ms +[2025-07-09 02:28:31] [Rank 0] step:621/10000 train_time:41651ms step_avg:67.07ms +[2025-07-09 02:28:32] [Rank 0] step:641/10000 train_time:42977ms step_avg:67.05ms +[2025-07-09 02:28:32] [Rank 0] step:641/10000 train_time:42977ms step_avg:67.05ms +[2025-07-09 02:28:33] [Rank 0] step:661/10000 train_time:44302ms step_avg:67.02ms +[2025-07-09 02:28:33] [Rank 0] step:661/10000 train_time:44302ms step_avg:67.02ms +[2025-07-09 02:28:35] [Rank 0] step:681/10000 train_time:45627ms step_avg:67.00ms +[2025-07-09 02:28:35] [Rank 0] step:681/10000 train_time:45627ms step_avg:67.00ms +[2025-07-09 02:28:36] [Rank 0] step:701/10000 train_time:46955ms step_avg:66.98ms +[2025-07-09 02:28:36] [Rank 0] step:701/10000 train_time:46955ms step_avg:66.98ms +[2025-07-09 02:28:37] [Rank 0] step:721/10000 train_time:48335ms step_avg:67.04ms +[2025-07-09 02:28:37] [Rank 0] step:721/10000 train_time:48335ms step_avg:67.04ms +[2025-07-09 02:28:39] [Rank 0] step:741/10000 train_time:49613ms step_avg:66.95ms +[2025-07-09 02:28:39] [Rank 0] step:741/10000 train_time:49613ms step_avg:66.95ms +[2025-07-09 02:28:40] [Rank 0] step:761/10000 train_time:50948ms step_avg:66.95ms +[2025-07-09 02:28:40] [Rank 0] step:761/10000 train_time:50948ms step_avg:66.95ms +[2025-07-09 02:28:41] [Rank 0] step:781/10000 train_time:52286ms step_avg:66.95ms +[2025-07-09 02:28:41] [Rank 0] step:781/10000 train_time:52286ms step_avg:66.95ms +[2025-07-09 02:28:43] [Rank 0] step:801/10000 train_time:53623ms step_avg:66.95ms +[2025-07-09 02:28:43] [Rank 0] step:801/10000 train_time:53623ms step_avg:66.95ms +[2025-07-09 02:28:44] [Rank 0] step:821/10000 train_time:54963ms step_avg:66.95ms +[2025-07-09 02:28:44] [Rank 0] step:821/10000 train_time:54963ms step_avg:66.95ms +[2025-07-09 02:28:45] [Rank 0] step:841/10000 train_time:56302ms step_avg:66.95ms +[2025-07-09 02:28:45] [Rank 0] step:841/10000 train_time:56302ms step_avg:66.95ms +[2025-07-09 02:28:47] [Rank 0] step:861/10000 train_time:57641ms step_avg:66.95ms +[2025-07-09 02:28:47] [Rank 0] step:861/10000 train_time:57641ms step_avg:66.95ms +[2025-07-09 02:28:48] [Rank 0] step:881/10000 train_time:58982ms step_avg:66.95ms +[2025-07-09 02:28:48] [Rank 0] step:881/10000 train_time:58982ms step_avg:66.95ms +[2025-07-09 02:28:49] [Rank 0] step:901/10000 train_time:60369ms step_avg:67.00ms +[2025-07-09 02:28:49] [Rank 0] step:901/10000 train_time:60369ms step_avg:67.00ms +[2025-07-09 02:28:51] [Rank 0] step:921/10000 train_time:61717ms step_avg:67.01ms +[2025-07-09 02:28:51] [Rank 0] step:921/10000 train_time:61717ms step_avg:67.01ms +[2025-07-09 02:28:52] [Rank 0] step:941/10000 train_time:63059ms step_avg:67.01ms +[2025-07-09 02:28:52] [Rank 0] step:941/10000 train_time:63059ms step_avg:67.01ms +[2025-07-09 02:28:53] [Rank 0] step:961/10000 train_time:64400ms step_avg:67.01ms +[2025-07-09 02:28:53] [Rank 0] step:961/10000 train_time:64400ms step_avg:67.01ms +[2025-07-09 02:28:55] [Rank 0] step:981/10000 train_time:65741ms step_avg:67.01ms +[2025-07-09 02:28:55] [Rank 0] step:981/10000 train_time:65741ms step_avg:67.01ms +[2025-07-09 02:28:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 02:28:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 02:28:57] [Rank 0] PRINT: step:1000/10000 train_loss:7.8085 val_loss:7.1232 train_time:67724ms step_avg:67.72ms +[2025-07-09 02:28:57] [Rank 0] PRINT: step:1000/10000 train_loss:7.8085 val_loss:7.1232 train_time:67724ms step_avg:67.72ms +[2025-07-09 02:28:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 02:28:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 02:28:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 02:28:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 02:28:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 02:28:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 02:34:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 02:34:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 02:34:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 02:34:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 02:34:19] [Rank 0] Total Loss: 7.6888 +[2025-07-09 02:34:19] [Rank 0] Total Loss: 7.6888 +[2025-07-09 02:34:19] [Rank 0] Total FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Total FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 0 Loss: 7.6939 +[2025-07-09 02:34:19] [Rank 0] Group 0 Loss: 7.6939 +[2025-07-09 02:34:19] [Rank 0] Group 1 Loss: 7.6422 +[2025-07-09 02:34:19] [Rank 0] Group 1 Loss: 7.6422 +[2025-07-09 02:34:19] [Rank 0] Group 2 Loss: 7.8214 +[2025-07-09 02:34:19] [Rank 0] Group 2 Loss: 7.8214 +[2025-07-09 02:34:19] [Rank 0] Group 3 Loss: 7.6567 +[2025-07-09 02:34:19] [Rank 0] Group 3 Loss: 7.6567 +[2025-07-09 02:34:19] [Rank 0] Group 4 Loss: 7.7178 +[2025-07-09 02:34:19] [Rank 0] Group 4 Loss: 7.7178 +[2025-07-09 02:34:19] [Rank 0] Group 5 Loss: 7.6432 +[2025-07-09 02:34:19] [Rank 0] Group 5 Loss: 7.6432 +[2025-07-09 02:34:19] [Rank 0] Group 6 Loss: 7.6977 +[2025-07-09 02:34:19] [Rank 0] Group 6 Loss: 7.6977 +[2025-07-09 02:34:19] [Rank 0] Group 7 Loss: 7.6985 +[2025-07-09 02:34:19] [Rank 0] Group 7 Loss: 7.6985 +[2025-07-09 02:34:19] [Rank 0] Group 8 Loss: 7.6499 +[2025-07-09 02:34:19] [Rank 0] Group 8 Loss: 7.6499 +[2025-07-09 02:34:19] [Rank 0] Group 9 Loss: 7.6789 +[2025-07-09 02:34:19] [Rank 0] Group 9 Loss: 7.6789 +[2025-07-09 02:34:19] [Rank 0] Group 10 Loss: 7.6863 +[2025-07-09 02:34:19] [Rank 0] Group 10 Loss: 7.6863 +[2025-07-09 02:34:19] [Rank 0] Group 11 Loss: 7.6825 +[2025-07-09 02:34:19] [Rank 0] Group 11 Loss: 7.6825 +[2025-07-09 02:34:19] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-09 02:34:19] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-09 02:34:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 02:34:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 02:34:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 02:34:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 02:34:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 02:34:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 02:34:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 02:34:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 02:34:21] [Rank 0] step:1001/10000 train_time:67735ms step_avg:67.67ms +[2025-07-09 02:34:21] [Rank 0] step:1001/10000 train_time:67735ms step_avg:67.67ms +[2025-07-09 02:34:22] [Rank 0] step:1021/10000 train_time:68473ms step_avg:67.06ms +[2025-07-09 02:34:22] [Rank 0] step:1021/10000 train_time:68473ms step_avg:67.06ms +[2025-07-09 02:34:23] [Rank 0] step:1041/10000 train_time:69808ms step_avg:67.06ms +[2025-07-09 02:34:23] [Rank 0] step:1041/10000 train_time:69808ms step_avg:67.06ms +[2025-07-09 02:34:25] [Rank 0] step:1061/10000 train_time:71142ms step_avg:67.05ms +[2025-07-09 02:34:25] [Rank 0] step:1061/10000 train_time:71142ms step_avg:67.05ms +[2025-07-09 02:34:26] [Rank 0] step:1081/10000 train_time:72729ms step_avg:67.28ms +[2025-07-09 02:34:26] [Rank 0] step:1081/10000 train_time:72729ms step_avg:67.28ms +[2025-07-09 02:34:28] [Rank 0] step:1101/10000 train_time:73875ms step_avg:67.10ms +[2025-07-09 02:34:28] [Rank 0] step:1101/10000 train_time:73875ms step_avg:67.10ms +[2025-07-09 02:34:29] [Rank 0] step:1121/10000 train_time:75252ms step_avg:67.13ms +[2025-07-09 02:34:29] [Rank 0] step:1121/10000 train_time:75252ms step_avg:67.13ms +[2025-07-09 02:34:30] [Rank 0] step:1141/10000 train_time:76587ms step_avg:67.12ms +[2025-07-09 02:34:30] [Rank 0] step:1141/10000 train_time:76587ms step_avg:67.12ms +[2025-07-09 02:34:32] [Rank 0] step:1161/10000 train_time:77925ms step_avg:67.12ms +[2025-07-09 02:34:32] [Rank 0] step:1161/10000 train_time:77925ms step_avg:67.12ms +[2025-07-09 02:34:33] [Rank 0] step:1181/10000 train_time:79262ms step_avg:67.11ms +[2025-07-09 02:34:33] [Rank 0] step:1181/10000 train_time:79262ms step_avg:67.11ms +[2025-07-09 02:34:34] [Rank 0] step:1201/10000 train_time:80601ms step_avg:67.11ms +[2025-07-09 02:34:34] [Rank 0] step:1201/10000 train_time:80601ms step_avg:67.11ms +[2025-07-09 02:34:36] [Rank 0] step:1221/10000 train_time:81942ms step_avg:67.11ms +[2025-07-09 02:34:36] [Rank 0] step:1221/10000 train_time:81942ms step_avg:67.11ms +[2025-07-09 02:34:37] [Rank 0] step:1241/10000 train_time:83282ms step_avg:67.11ms +[2025-07-09 02:34:37] [Rank 0] step:1241/10000 train_time:83282ms step_avg:67.11ms +[2025-07-09 02:34:38] [Rank 0] step:1261/10000 train_time:84625ms step_avg:67.11ms +[2025-07-09 02:34:38] [Rank 0] step:1261/10000 train_time:84625ms step_avg:67.11ms +[2025-07-09 02:34:40] [Rank 0] step:1281/10000 train_time:86004ms step_avg:67.14ms +[2025-07-09 02:34:40] [Rank 0] step:1281/10000 train_time:86004ms step_avg:67.14ms +[2025-07-09 02:34:41] [Rank 0] step:1301/10000 train_time:87382ms step_avg:67.17ms +[2025-07-09 02:34:41] [Rank 0] step:1301/10000 train_time:87382ms step_avg:67.17ms +[2025-07-09 02:34:42] [Rank 0] step:1321/10000 train_time:88725ms step_avg:67.17ms +[2025-07-09 02:34:42] [Rank 0] step:1321/10000 train_time:88725ms step_avg:67.17ms +[2025-07-09 02:34:44] [Rank 0] step:1341/10000 train_time:90070ms step_avg:67.17ms +[2025-07-09 02:34:44] [Rank 0] step:1341/10000 train_time:90070ms step_avg:67.17ms +[2025-07-09 02:34:45] [Rank 0] step:1361/10000 train_time:91416ms step_avg:67.17ms +[2025-07-09 02:34:45] [Rank 0] step:1361/10000 train_time:91416ms step_avg:67.17ms +[2025-07-09 02:34:46] [Rank 0] step:1381/10000 train_time:92761ms step_avg:67.17ms +[2025-07-09 02:34:46] [Rank 0] step:1381/10000 train_time:92761ms step_avg:67.17ms +[2025-07-09 02:34:48] [Rank 0] step:1401/10000 train_time:94107ms step_avg:67.17ms +[2025-07-09 02:34:48] [Rank 0] step:1401/10000 train_time:94107ms step_avg:67.17ms +[2025-07-09 02:34:49] [Rank 0] step:1421/10000 train_time:95454ms step_avg:67.17ms +[2025-07-09 02:34:49] [Rank 0] step:1421/10000 train_time:95454ms step_avg:67.17ms +[2025-07-09 02:34:51] [Rank 0] step:1441/10000 train_time:96799ms step_avg:67.17ms +[2025-07-09 02:34:51] [Rank 0] step:1441/10000 train_time:96799ms step_avg:67.17ms +[2025-07-09 02:34:52] [Rank 0] step:1461/10000 train_time:98198ms step_avg:67.21ms +[2025-07-09 02:34:52] [Rank 0] step:1461/10000 train_time:98198ms step_avg:67.21ms +[2025-07-09 02:34:53] [Rank 0] step:1481/10000 train_time:99544ms step_avg:67.21ms +[2025-07-09 02:34:53] [Rank 0] step:1481/10000 train_time:99544ms step_avg:67.21ms +[2025-07-09 02:34:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 02:34:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 02:34:55] [Rank 0] PRINT: step:1500/10000 train_loss:6.5841 val_loss:6.1055 train_time:101504ms step_avg:67.67ms +[2025-07-09 02:34:55] [Rank 0] PRINT: step:1500/10000 train_loss:6.5841 val_loss:6.1055 train_time:101504ms step_avg:67.67ms +[2025-07-09 02:34:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 02:34:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 02:34:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 02:34:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 02:34:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 02:34:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 02:40:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 02:40:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 02:40:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 02:40:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 02:40:18] [Rank 0] Total Loss: 6.8987 +[2025-07-09 02:40:18] [Rank 0] Total Loss: 6.8987 +[2025-07-09 02:40:18] [Rank 0] Total FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Total FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 0 Loss: 6.8360 +[2025-07-09 02:40:18] [Rank 0] Group 0 Loss: 6.8360 +[2025-07-09 02:40:18] [Rank 0] Group 1 Loss: 6.8756 +[2025-07-09 02:40:18] [Rank 0] Group 1 Loss: 6.8756 +[2025-07-09 02:40:18] [Rank 0] Group 2 Loss: 7.0724 +[2025-07-09 02:40:18] [Rank 0] Group 2 Loss: 7.0724 +[2025-07-09 02:40:18] [Rank 0] Group 3 Loss: 6.8504 +[2025-07-09 02:40:18] [Rank 0] Group 3 Loss: 6.8504 +[2025-07-09 02:40:18] [Rank 0] Group 4 Loss: 6.9335 +[2025-07-09 02:40:18] [Rank 0] Group 4 Loss: 6.9335 +[2025-07-09 02:40:18] [Rank 0] Group 5 Loss: 6.8845 +[2025-07-09 02:40:18] [Rank 0] Group 5 Loss: 6.8845 +[2025-07-09 02:40:18] [Rank 0] Group 6 Loss: 6.9267 +[2025-07-09 02:40:18] [Rank 0] Group 6 Loss: 6.9267 +[2025-07-09 02:40:18] [Rank 0] Group 7 Loss: 6.9138 +[2025-07-09 02:40:18] [Rank 0] Group 7 Loss: 6.9138 +[2025-07-09 02:40:18] [Rank 0] Group 8 Loss: 6.8726 +[2025-07-09 02:40:18] [Rank 0] Group 8 Loss: 6.8726 +[2025-07-09 02:40:18] [Rank 0] Group 9 Loss: 6.9153 +[2025-07-09 02:40:18] [Rank 0] Group 9 Loss: 6.9153 +[2025-07-09 02:40:18] [Rank 0] Group 10 Loss: 6.8989 +[2025-07-09 02:40:18] [Rank 0] Group 10 Loss: 6.8989 +[2025-07-09 02:40:18] [Rank 0] Group 11 Loss: 6.8889 +[2025-07-09 02:40:18] [Rank 0] Group 11 Loss: 6.8889 +[2025-07-09 02:40:18] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-09 02:40:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 02:40:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 02:40:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 02:40:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 02:40:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 02:40:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 02:40:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 02:40:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 02:40:19] [Rank 0] step:1501/10000 train_time:101515ms step_avg:67.63ms +[2025-07-09 02:40:19] [Rank 0] step:1501/10000 train_time:101515ms step_avg:67.63ms +[2025-07-09 02:40:21] [Rank 0] step:1521/10000 train_time:102251ms step_avg:67.23ms +[2025-07-09 02:40:21] [Rank 0] step:1521/10000 train_time:102251ms step_avg:67.23ms +[2025-07-09 02:40:22] [Rank 0] step:1541/10000 train_time:103589ms step_avg:67.22ms +[2025-07-09 02:40:22] [Rank 0] step:1541/10000 train_time:103589ms step_avg:67.22ms +[2025-07-09 02:40:23] [Rank 0] step:1561/10000 train_time:104929ms step_avg:67.22ms +[2025-07-09 02:40:23] [Rank 0] step:1561/10000 train_time:104929ms step_avg:67.22ms +[2025-07-09 02:40:25] [Rank 0] step:1581/10000 train_time:106270ms step_avg:67.22ms +[2025-07-09 02:40:25] [Rank 0] step:1581/10000 train_time:106270ms step_avg:67.22ms +[2025-07-09 02:40:26] [Rank 0] step:1601/10000 train_time:107638ms step_avg:67.23ms +[2025-07-09 02:40:26] [Rank 0] step:1601/10000 train_time:107638ms step_avg:67.23ms +[2025-07-09 02:40:27] [Rank 0] step:1621/10000 train_time:108980ms step_avg:67.23ms +[2025-07-09 02:40:27] [Rank 0] step:1621/10000 train_time:108980ms step_avg:67.23ms +[2025-07-09 02:40:29] [Rank 0] step:1641/10000 train_time:110376ms step_avg:67.26ms +[2025-07-09 02:40:29] [Rank 0] step:1641/10000 train_time:110376ms step_avg:67.26ms +[2025-07-09 02:40:30] [Rank 0] step:1661/10000 train_time:111721ms step_avg:67.26ms +[2025-07-09 02:40:30] [Rank 0] step:1661/10000 train_time:111721ms step_avg:67.26ms +[2025-07-09 02:40:31] [Rank 0] step:1681/10000 train_time:113067ms step_avg:67.26ms +[2025-07-09 02:40:31] [Rank 0] step:1681/10000 train_time:113067ms step_avg:67.26ms +[2025-07-09 02:40:33] [Rank 0] step:1701/10000 train_time:114413ms step_avg:67.26ms +[2025-07-09 02:40:33] [Rank 0] step:1701/10000 train_time:114413ms step_avg:67.26ms +[2025-07-09 02:40:34] [Rank 0] step:1721/10000 train_time:115760ms step_avg:67.26ms +[2025-07-09 02:40:34] [Rank 0] step:1721/10000 train_time:115760ms step_avg:67.26ms +[2025-07-09 02:40:35] [Rank 0] step:1741/10000 train_time:117107ms step_avg:67.26ms +[2025-07-09 02:40:35] [Rank 0] step:1741/10000 train_time:117107ms step_avg:67.26ms +[2025-07-09 02:40:37] [Rank 0] step:1761/10000 train_time:118456ms step_avg:67.27ms +[2025-07-09 02:40:37] [Rank 0] step:1761/10000 train_time:118456ms step_avg:67.27ms +[2025-07-09 02:40:38] [Rank 0] step:1781/10000 train_time:119803ms step_avg:67.27ms +[2025-07-09 02:40:38] [Rank 0] step:1781/10000 train_time:119803ms step_avg:67.27ms +[2025-07-09 02:40:39] [Rank 0] step:1801/10000 train_time:121152ms step_avg:67.27ms +[2025-07-09 02:40:39] [Rank 0] step:1801/10000 train_time:121152ms step_avg:67.27ms +[2025-07-09 02:40:41] [Rank 0] step:1821/10000 train_time:122538ms step_avg:67.29ms +[2025-07-09 02:40:41] [Rank 0] step:1821/10000 train_time:122538ms step_avg:67.29ms +[2025-07-09 02:40:42] [Rank 0] step:1841/10000 train_time:123889ms step_avg:67.29ms +[2025-07-09 02:40:42] [Rank 0] step:1841/10000 train_time:123889ms step_avg:67.29ms +[2025-07-09 02:40:44] [Rank 0] step:1861/10000 train_time:125240ms step_avg:67.30ms +[2025-07-09 02:40:44] [Rank 0] step:1861/10000 train_time:125240ms step_avg:67.30ms +[2025-07-09 02:40:45] [Rank 0] step:1881/10000 train_time:126590ms step_avg:67.30ms +[2025-07-09 02:40:45] [Rank 0] step:1881/10000 train_time:126590ms step_avg:67.30ms +[2025-07-09 02:40:46] [Rank 0] step:1901/10000 train_time:127941ms step_avg:67.30ms +[2025-07-09 02:40:46] [Rank 0] step:1901/10000 train_time:127941ms step_avg:67.30ms +[2025-07-09 02:40:48] [Rank 0] step:1921/10000 train_time:129368ms step_avg:67.34ms +[2025-07-09 02:40:48] [Rank 0] step:1921/10000 train_time:129368ms step_avg:67.34ms +[2025-07-09 02:40:49] [Rank 0] step:1941/10000 train_time:130642ms step_avg:67.31ms +[2025-07-09 02:40:49] [Rank 0] step:1941/10000 train_time:130642ms step_avg:67.31ms +[2025-07-09 02:40:50] [Rank 0] step:1961/10000 train_time:131992ms step_avg:67.31ms +[2025-07-09 02:40:50] [Rank 0] step:1961/10000 train_time:131992ms step_avg:67.31ms +[2025-07-09 02:40:52] [Rank 0] step:1981/10000 train_time:133342ms step_avg:67.31ms +[2025-07-09 02:40:52] [Rank 0] step:1981/10000 train_time:133342ms step_avg:67.31ms +[2025-07-09 02:40:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 02:40:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 02:40:54] [Rank 0] PRINT: step:2000/10000 train_loss:5.6806 val_loss:5.2972 train_time:135345ms step_avg:67.67ms +[2025-07-09 02:40:54] [Rank 0] PRINT: step:2000/10000 train_loss:5.6806 val_loss:5.2972 train_time:135345ms step_avg:67.67ms +[2025-07-09 02:40:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 02:40:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 02:40:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 02:40:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 02:40:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 02:40:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 02:46:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 02:46:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 02:46:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 02:46:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 02:46:21] [Rank 0] Total Loss: 6.3335 +[2025-07-09 02:46:21] [Rank 0] Total Loss: 6.3335 +[2025-07-09 02:46:21] [Rank 0] Total FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Total FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 0 Loss: 6.2661 +[2025-07-09 02:46:21] [Rank 0] Group 0 Loss: 6.2661 +[2025-07-09 02:46:21] [Rank 0] Group 1 Loss: 6.3209 +[2025-07-09 02:46:21] [Rank 0] Group 1 Loss: 6.3209 +[2025-07-09 02:46:21] [Rank 0] Group 2 Loss: 6.4856 +[2025-07-09 02:46:21] [Rank 0] Group 2 Loss: 6.4856 +[2025-07-09 02:46:21] [Rank 0] Group 3 Loss: 6.2756 +[2025-07-09 02:46:21] [Rank 0] Group 3 Loss: 6.2756 +[2025-07-09 02:46:21] [Rank 0] Group 4 Loss: 6.3654 +[2025-07-09 02:46:21] [Rank 0] Group 4 Loss: 6.3654 +[2025-07-09 02:46:21] [Rank 0] Group 5 Loss: 6.3267 +[2025-07-09 02:46:21] [Rank 0] Group 5 Loss: 6.3267 +[2025-07-09 02:46:21] [Rank 0] Group 6 Loss: 6.3788 +[2025-07-09 02:46:21] [Rank 0] Group 6 Loss: 6.3788 +[2025-07-09 02:46:21] [Rank 0] Group 7 Loss: 6.3609 +[2025-07-09 02:46:21] [Rank 0] Group 7 Loss: 6.3609 +[2025-07-09 02:46:21] [Rank 0] Group 8 Loss: 6.3093 +[2025-07-09 02:46:21] [Rank 0] Group 8 Loss: 6.3093 +[2025-07-09 02:46:21] [Rank 0] Group 9 Loss: 6.3049 +[2025-07-09 02:46:21] [Rank 0] Group 9 Loss: 6.3049 +[2025-07-09 02:46:21] [Rank 0] Group 10 Loss: 6.3235 +[2025-07-09 02:46:21] [Rank 0] Group 10 Loss: 6.3235 +[2025-07-09 02:46:21] [Rank 0] Group 11 Loss: 6.3380 +[2025-07-09 02:46:21] [Rank 0] Group 11 Loss: 6.3380 +[2025-07-09 02:46:21] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-09 02:46:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 02:46:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 02:46:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 02:46:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 02:46:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 02:46:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 02:46:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 02:46:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 02:46:22] [Rank 0] step:2001/10000 train_time:135355ms step_avg:67.64ms +[2025-07-09 02:46:22] [Rank 0] step:2001/10000 train_time:135355ms step_avg:67.64ms +[2025-07-09 02:46:24] [Rank 0] step:2021/10000 train_time:136097ms step_avg:67.34ms +[2025-07-09 02:46:24] [Rank 0] step:2021/10000 train_time:136097ms step_avg:67.34ms +[2025-07-09 02:46:25] [Rank 0] step:2041/10000 train_time:137437ms step_avg:67.34ms +[2025-07-09 02:46:25] [Rank 0] step:2041/10000 train_time:137437ms step_avg:67.34ms +[2025-07-09 02:46:26] [Rank 0] step:2061/10000 train_time:138779ms step_avg:67.34ms +[2025-07-09 02:46:26] [Rank 0] step:2061/10000 train_time:138779ms step_avg:67.34ms +[2025-07-09 02:46:28] [Rank 0] step:2081/10000 train_time:140123ms step_avg:67.33ms +[2025-07-09 02:46:28] [Rank 0] step:2081/10000 train_time:140123ms step_avg:67.33ms +[2025-07-09 02:46:29] [Rank 0] step:2101/10000 train_time:141468ms step_avg:67.33ms +[2025-07-09 02:46:29] [Rank 0] step:2101/10000 train_time:141468ms step_avg:67.33ms +[2025-07-09 02:46:30] [Rank 0] step:2121/10000 train_time:142812ms step_avg:67.33ms +[2025-07-09 02:46:30] [Rank 0] step:2121/10000 train_time:142812ms step_avg:67.33ms +[2025-07-09 02:46:32] [Rank 0] step:2141/10000 train_time:144157ms step_avg:67.33ms +[2025-07-09 02:46:32] [Rank 0] step:2141/10000 train_time:144157ms step_avg:67.33ms +[2025-07-09 02:46:33] [Rank 0] step:2161/10000 train_time:145504ms step_avg:67.33ms +[2025-07-09 02:46:33] [Rank 0] step:2161/10000 train_time:145504ms step_avg:67.33ms +[2025-07-09 02:46:35] [Rank 0] step:2181/10000 train_time:146889ms step_avg:67.35ms +[2025-07-09 02:46:35] [Rank 0] step:2181/10000 train_time:146889ms step_avg:67.35ms +[2025-07-09 02:46:36] [Rank 0] step:2201/10000 train_time:148237ms step_avg:67.35ms +[2025-07-09 02:46:36] [Rank 0] step:2201/10000 train_time:148237ms step_avg:67.35ms +[2025-07-09 02:46:37] [Rank 0] step:2221/10000 train_time:149583ms step_avg:67.35ms +[2025-07-09 02:46:37] [Rank 0] step:2221/10000 train_time:149583ms step_avg:67.35ms +[2025-07-09 02:46:39] [Rank 0] step:2241/10000 train_time:150941ms step_avg:67.35ms +[2025-07-09 02:46:39] [Rank 0] step:2241/10000 train_time:150941ms step_avg:67.35ms +[2025-07-09 02:46:40] [Rank 0] step:2261/10000 train_time:152314ms step_avg:67.37ms +[2025-07-09 02:46:40] [Rank 0] step:2261/10000 train_time:152314ms step_avg:67.37ms +[2025-07-09 02:46:41] [Rank 0] step:2281/10000 train_time:153688ms step_avg:67.38ms +[2025-07-09 02:46:41] [Rank 0] step:2281/10000 train_time:153688ms step_avg:67.38ms +[2025-07-09 02:46:43] [Rank 0] step:2301/10000 train_time:155062ms step_avg:67.39ms +[2025-07-09 02:46:43] [Rank 0] step:2301/10000 train_time:155062ms step_avg:67.39ms +[2025-07-09 02:46:44] [Rank 0] step:2321/10000 train_time:156436ms step_avg:67.40ms +[2025-07-09 02:46:44] [Rank 0] step:2321/10000 train_time:156436ms step_avg:67.40ms +[2025-07-09 02:46:45] [Rank 0] step:2341/10000 train_time:157811ms step_avg:67.41ms +[2025-07-09 02:46:45] [Rank 0] step:2341/10000 train_time:157811ms step_avg:67.41ms +[2025-07-09 02:46:47] [Rank 0] step:2361/10000 train_time:159223ms step_avg:67.44ms +[2025-07-09 02:46:47] [Rank 0] step:2361/10000 train_time:159223ms step_avg:67.44ms +[2025-07-09 02:46:48] [Rank 0] step:2381/10000 train_time:160598ms step_avg:67.45ms +[2025-07-09 02:46:48] [Rank 0] step:2381/10000 train_time:160598ms step_avg:67.45ms +[2025-07-09 02:46:50] [Rank 0] step:2401/10000 train_time:161973ms step_avg:67.46ms +[2025-07-09 02:46:50] [Rank 0] step:2401/10000 train_time:161973ms step_avg:67.46ms +[2025-07-09 02:46:51] [Rank 0] step:2421/10000 train_time:163349ms step_avg:67.47ms +[2025-07-09 02:46:51] [Rank 0] step:2421/10000 train_time:163349ms step_avg:67.47ms +[2025-07-09 02:46:52] [Rank 0] step:2441/10000 train_time:164725ms step_avg:67.48ms +[2025-07-09 02:46:52] [Rank 0] step:2441/10000 train_time:164725ms step_avg:67.48ms +[2025-07-09 02:46:54] [Rank 0] step:2461/10000 train_time:166101ms step_avg:67.49ms +[2025-07-09 02:46:54] [Rank 0] step:2461/10000 train_time:166101ms step_avg:67.49ms +[2025-07-09 02:46:55] [Rank 0] step:2481/10000 train_time:167476ms step_avg:67.50ms +[2025-07-09 02:46:55] [Rank 0] step:2481/10000 train_time:167476ms step_avg:67.50ms +[2025-07-09 02:46:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 02:46:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 02:46:57] [Rank 0] PRINT: step:2500/10000 train_loss:4.9908 val_loss:4.7065 train_time:169478ms step_avg:67.79ms +[2025-07-09 02:46:57] [Rank 0] PRINT: step:2500/10000 train_loss:4.9908 val_loss:4.7065 train_time:169478ms step_avg:67.79ms +[2025-07-09 02:46:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 02:46:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 02:46:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 02:46:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 02:46:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 02:46:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 02:52:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 02:52:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 02:52:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 02:52:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 02:52:19] [Rank 0] Total Loss: 5.9472 +[2025-07-09 02:52:19] [Rank 0] Total Loss: 5.9472 +[2025-07-09 02:52:19] [Rank 0] Total FTA: 0.0609 +[2025-07-09 02:52:19] [Rank 0] Total FTA: 0.0609 +[2025-07-09 02:52:19] [Rank 0] Group 0 Loss: 5.8459 +[2025-07-09 02:52:19] [Rank 0] Group 0 Loss: 5.8459 +[2025-07-09 02:52:19] [Rank 0] Group 1 Loss: 5.9017 +[2025-07-09 02:52:19] [Rank 0] Group 1 Loss: 5.9017 +[2025-07-09 02:52:19] [Rank 0] Group 2 Loss: 6.0566 +[2025-07-09 02:52:19] [Rank 0] Group 2 Loss: 6.0566 +[2025-07-09 02:52:19] [Rank 0] Group 3 Loss: 5.9264 +[2025-07-09 02:52:19] [Rank 0] Group 3 Loss: 5.9264 +[2025-07-09 02:52:19] [Rank 0] Group 4 Loss: 6.0091 +[2025-07-09 02:52:19] [Rank 0] Group 4 Loss: 6.0091 +[2025-07-09 02:52:19] [Rank 0] Group 5 Loss: 5.9629 +[2025-07-09 02:52:19] [Rank 0] Group 5 Loss: 5.9629 +[2025-07-09 02:52:19] [Rank 0] Group 6 Loss: 5.9767 +[2025-07-09 02:52:19] [Rank 0] Group 6 Loss: 5.9767 +[2025-07-09 02:52:19] [Rank 0] Group 7 Loss: 5.9589 +[2025-07-09 02:52:19] [Rank 0] Group 7 Loss: 5.9589 +[2025-07-09 02:52:19] [Rank 0] Group 8 Loss: 5.9461 +[2025-07-09 02:52:19] [Rank 0] Group 8 Loss: 5.9461 +[2025-07-09 02:52:19] [Rank 0] Group 9 Loss: 5.9472 +[2025-07-09 02:52:19] [Rank 0] Group 9 Loss: 5.9472 +[2025-07-09 02:52:19] [Rank 0] Group 10 Loss: 5.9589 +[2025-07-09 02:52:19] [Rank 0] Group 10 Loss: 5.9589 +[2025-07-09 02:52:19] [Rank 0] Group 11 Loss: 5.9574 +[2025-07-09 02:52:19] [Rank 0] Group 11 Loss: 5.9574 +[2025-07-09 02:52:19] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-09 02:52:19] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-09 02:52:19] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 02:52:19] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 02:52:19] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-09 02:52:19] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-09 02:52:19] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-09 02:52:19] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-09 02:52:19] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-09 02:52:19] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-09 02:52:19] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-09 02:52:19] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-09 02:52:19] [Rank 0] Group 6 FTA: 0.0599 +[2025-07-09 02:52:19] [Rank 0] Group 6 FTA: 0.0599 +[2025-07-09 02:52:19] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-09 02:52:19] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-09 02:52:19] [Rank 0] Group 8 FTA: 0.0599 +[2025-07-09 02:52:19] [Rank 0] Group 8 FTA: 0.0599 +[2025-07-09 02:52:19] [Rank 0] Group 9 FTA: 0.0430 +[2025-07-09 02:52:19] [Rank 0] Group 9 FTA: 0.0430 +[2025-07-09 02:52:19] [Rank 0] Group 10 FTA: 0.0645 +[2025-07-09 02:52:19] [Rank 0] Group 10 FTA: 0.0645 +[2025-07-09 02:52:19] [Rank 0] Group 11 FTA: 0.0605 +[2025-07-09 02:52:19] [Rank 0] Group 11 FTA: 0.0605 +[2025-07-09 02:52:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 02:52:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 02:52:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 02:52:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 02:52:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 02:52:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 02:52:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 02:52:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 02:52:21] [Rank 0] step:2501/10000 train_time:169491ms step_avg:67.77ms +[2025-07-09 02:52:21] [Rank 0] step:2501/10000 train_time:169491ms step_avg:67.77ms +[2025-07-09 02:52:22] [Rank 0] step:2521/10000 train_time:170403ms step_avg:67.59ms +[2025-07-09 02:52:22] [Rank 0] step:2521/10000 train_time:170403ms step_avg:67.59ms +[2025-07-09 02:52:24] [Rank 0] step:2541/10000 train_time:171615ms step_avg:67.54ms +[2025-07-09 02:52:24] [Rank 0] step:2541/10000 train_time:171615ms step_avg:67.54ms +[2025-07-09 02:52:25] [Rank 0] step:2561/10000 train_time:173004ms step_avg:67.55ms +[2025-07-09 02:52:25] [Rank 0] step:2561/10000 train_time:173004ms step_avg:67.55ms +[2025-07-09 02:52:26] [Rank 0] step:2581/10000 train_time:174382ms step_avg:67.56ms +[2025-07-09 02:52:26] [Rank 0] step:2581/10000 train_time:174382ms step_avg:67.56ms +[2025-07-09 02:52:28] [Rank 0] step:2601/10000 train_time:175762ms step_avg:67.57ms +[2025-07-09 02:52:28] [Rank 0] step:2601/10000 train_time:175762ms step_avg:67.57ms +[2025-07-09 02:52:29] [Rank 0] step:2621/10000 train_time:177140ms step_avg:67.58ms +[2025-07-09 02:52:29] [Rank 0] step:2621/10000 train_time:177140ms step_avg:67.58ms +[2025-07-09 02:52:31] [Rank 0] step:2641/10000 train_time:178524ms step_avg:67.60ms +[2025-07-09 02:52:31] [Rank 0] step:2641/10000 train_time:178524ms step_avg:67.60ms +[2025-07-09 02:52:32] [Rank 0] step:2661/10000 train_time:179893ms step_avg:67.60ms +[2025-07-09 02:52:32] [Rank 0] step:2661/10000 train_time:179893ms step_avg:67.60ms +[2025-07-09 02:52:33] [Rank 0] step:2681/10000 train_time:181266ms step_avg:67.61ms +[2025-07-09 02:52:33] [Rank 0] step:2681/10000 train_time:181266ms step_avg:67.61ms +[2025-07-09 02:52:35] [Rank 0] step:2701/10000 train_time:182650ms step_avg:67.62ms +[2025-07-09 02:52:35] [Rank 0] step:2701/10000 train_time:182650ms step_avg:67.62ms +[2025-07-09 02:52:36] [Rank 0] step:2721/10000 train_time:184035ms step_avg:67.63ms +[2025-07-09 02:52:36] [Rank 0] step:2721/10000 train_time:184035ms step_avg:67.63ms +[2025-07-09 02:52:37] [Rank 0] step:2741/10000 train_time:185417ms step_avg:67.65ms +[2025-07-09 02:52:37] [Rank 0] step:2741/10000 train_time:185417ms step_avg:67.65ms +[2025-07-09 02:52:39] [Rank 0] step:2761/10000 train_time:186799ms step_avg:67.66ms +[2025-07-09 02:52:39] [Rank 0] step:2761/10000 train_time:186799ms step_avg:67.66ms +[2025-07-09 02:52:40] [Rank 0] step:2781/10000 train_time:188183ms step_avg:67.67ms +[2025-07-09 02:52:40] [Rank 0] step:2781/10000 train_time:188183ms step_avg:67.67ms +[2025-07-09 02:52:42] [Rank 0] step:2801/10000 train_time:189567ms step_avg:67.68ms +[2025-07-09 02:52:42] [Rank 0] step:2801/10000 train_time:189567ms step_avg:67.68ms +[2025-07-09 02:52:43] [Rank 0] step:2821/10000 train_time:190951ms step_avg:67.69ms +[2025-07-09 02:52:43] [Rank 0] step:2821/10000 train_time:190951ms step_avg:67.69ms +[2025-07-09 02:52:44] [Rank 0] step:2841/10000 train_time:192335ms step_avg:67.70ms +[2025-07-09 02:52:44] [Rank 0] step:2841/10000 train_time:192335ms step_avg:67.70ms +[2025-07-09 02:52:46] [Rank 0] step:2861/10000 train_time:193716ms step_avg:67.71ms +[2025-07-09 02:52:46] [Rank 0] step:2861/10000 train_time:193716ms step_avg:67.71ms +[2025-07-09 02:52:47] [Rank 0] step:2881/10000 train_time:195146ms step_avg:67.74ms +[2025-07-09 02:52:47] [Rank 0] step:2881/10000 train_time:195146ms step_avg:67.74ms +[2025-07-09 02:52:49] [Rank 0] step:2901/10000 train_time:196554ms step_avg:67.75ms +[2025-07-09 02:52:49] [Rank 0] step:2901/10000 train_time:196554ms step_avg:67.75ms +[2025-07-09 02:52:50] [Rank 0] step:2921/10000 train_time:197993ms step_avg:67.78ms +[2025-07-09 02:52:50] [Rank 0] step:2921/10000 train_time:197993ms step_avg:67.78ms +[2025-07-09 02:52:51] [Rank 0] step:2941/10000 train_time:199394ms step_avg:67.80ms +[2025-07-09 02:52:51] [Rank 0] step:2941/10000 train_time:199394ms step_avg:67.80ms +[2025-07-09 02:52:53] [Rank 0] step:2961/10000 train_time:200830ms step_avg:67.82ms +[2025-07-09 02:52:53] [Rank 0] step:2961/10000 train_time:200830ms step_avg:67.82ms +[2025-07-09 02:52:54] [Rank 0] step:2981/10000 train_time:202286ms step_avg:67.86ms +[2025-07-09 02:52:54] [Rank 0] step:2981/10000 train_time:202286ms step_avg:67.86ms +[2025-07-09 02:52:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 02:52:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 02:52:57] [Rank 0] PRINT: step:3000/10000 train_loss:4.4434 val_loss:4.1958 train_time:204332ms step_avg:68.11ms +[2025-07-09 02:52:57] [Rank 0] PRINT: step:3000/10000 train_loss:4.4434 val_loss:4.1958 train_time:204332ms step_avg:68.11ms +[2025-07-09 02:52:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 02:52:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 02:52:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 02:52:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 02:52:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 02:52:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 02:58:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 02:58:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 02:58:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 02:58:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 02:58:22] [Rank 0] Total Loss: 5.6553 +[2025-07-09 02:58:22] [Rank 0] Total Loss: 5.6553 +[2025-07-09 02:58:22] [Rank 0] Total FTA: 0.0600 +[2025-07-09 02:58:22] [Rank 0] Total FTA: 0.0600 +[2025-07-09 02:58:22] [Rank 0] Group 0 Loss: 5.5337 +[2025-07-09 02:58:22] [Rank 0] Group 0 Loss: 5.5337 +[2025-07-09 02:58:22] [Rank 0] Group 1 Loss: 5.6692 +[2025-07-09 02:58:22] [Rank 0] Group 1 Loss: 5.6692 +[2025-07-09 02:58:22] [Rank 0] Group 2 Loss: 5.7473 +[2025-07-09 02:58:22] [Rank 0] Group 2 Loss: 5.7473 +[2025-07-09 02:58:22] [Rank 0] Group 3 Loss: 5.6212 +[2025-07-09 02:58:22] [Rank 0] Group 3 Loss: 5.6212 +[2025-07-09 02:58:22] [Rank 0] Group 4 Loss: 5.7395 +[2025-07-09 02:58:22] [Rank 0] Group 4 Loss: 5.7395 +[2025-07-09 02:58:22] [Rank 0] Group 5 Loss: 5.6692 +[2025-07-09 02:58:22] [Rank 0] Group 5 Loss: 5.6692 +[2025-07-09 02:58:22] [Rank 0] Group 6 Loss: 5.6774 +[2025-07-09 02:58:22] [Rank 0] Group 6 Loss: 5.6774 +[2025-07-09 02:58:22] [Rank 0] Group 7 Loss: 5.6804 +[2025-07-09 02:58:22] [Rank 0] Group 7 Loss: 5.6804 +[2025-07-09 02:58:22] [Rank 0] Group 8 Loss: 5.6344 +[2025-07-09 02:58:22] [Rank 0] Group 8 Loss: 5.6344 +[2025-07-09 02:58:22] [Rank 0] Group 9 Loss: 5.6690 +[2025-07-09 02:58:22] [Rank 0] Group 9 Loss: 5.6690 +[2025-07-09 02:58:22] [Rank 0] Group 10 Loss: 5.6502 +[2025-07-09 02:58:22] [Rank 0] Group 10 Loss: 5.6502 +[2025-07-09 02:58:22] [Rank 0] Group 11 Loss: 5.6723 +[2025-07-09 02:58:22] [Rank 0] Group 11 Loss: 5.6723 +[2025-07-09 02:58:22] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-09 02:58:22] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-09 02:58:22] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 02:58:22] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 02:58:22] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-09 02:58:22] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-09 02:58:22] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-09 02:58:22] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-09 02:58:22] [Rank 0] Group 4 FTA: 0.0260 +[2025-07-09 02:58:22] [Rank 0] Group 4 FTA: 0.0260 +[2025-07-09 02:58:22] [Rank 0] Group 5 FTA: 0.0286 +[2025-07-09 02:58:22] [Rank 0] Group 5 FTA: 0.0286 +[2025-07-09 02:58:22] [Rank 0] Group 6 FTA: 0.0625 +[2025-07-09 02:58:22] [Rank 0] Group 6 FTA: 0.0625 +[2025-07-09 02:58:22] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-09 02:58:22] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-09 02:58:22] [Rank 0] Group 8 FTA: 0.0495 +[2025-07-09 02:58:22] [Rank 0] Group 8 FTA: 0.0495 +[2025-07-09 02:58:22] [Rank 0] Group 9 FTA: 0.0547 +[2025-07-09 02:58:22] [Rank 0] Group 9 FTA: 0.0547 +[2025-07-09 02:58:22] [Rank 0] Group 10 FTA: 0.0547 +[2025-07-09 02:58:22] [Rank 0] Group 10 FTA: 0.0547 +[2025-07-09 02:58:22] [Rank 0] Group 11 FTA: 0.0557 +[2025-07-09 02:58:22] [Rank 0] Group 11 FTA: 0.0557 +[2025-07-09 02:58:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 02:58:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 02:58:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 02:58:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 02:58:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 02:58:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 02:58:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 02:58:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 02:58:24] [Rank 0] step:3001/10000 train_time:204342ms step_avg:68.09ms +[2025-07-09 02:58:24] [Rank 0] step:3001/10000 train_time:204342ms step_avg:68.09ms +[2025-07-09 02:58:25] [Rank 0] step:3021/10000 train_time:205113ms step_avg:67.90ms +[2025-07-09 02:58:25] [Rank 0] step:3021/10000 train_time:205113ms step_avg:67.90ms +[2025-07-09 02:58:27] [Rank 0] step:3041/10000 train_time:206479ms step_avg:67.90ms +[2025-07-09 02:58:27] [Rank 0] step:3041/10000 train_time:206479ms step_avg:67.90ms +[2025-07-09 02:58:28] [Rank 0] step:3061/10000 train_time:208525ms step_avg:68.12ms +[2025-07-09 02:58:28] [Rank 0] step:3061/10000 train_time:208525ms step_avg:68.12ms +[2025-07-09 02:58:29] [Rank 0] step:3081/10000 train_time:209258ms step_avg:67.92ms +[2025-07-09 02:58:29] [Rank 0] step:3081/10000 train_time:209258ms step_avg:67.92ms +[2025-07-09 02:58:31] [Rank 0] step:3101/10000 train_time:210625ms step_avg:67.92ms +[2025-07-09 02:58:31] [Rank 0] step:3101/10000 train_time:210625ms step_avg:67.92ms +[2025-07-09 02:58:32] [Rank 0] step:3121/10000 train_time:211995ms step_avg:67.93ms +[2025-07-09 02:58:32] [Rank 0] step:3121/10000 train_time:211995ms step_avg:67.93ms +[2025-07-09 02:58:33] [Rank 0] step:3141/10000 train_time:213365ms step_avg:67.93ms +[2025-07-09 02:58:33] [Rank 0] step:3141/10000 train_time:213365ms step_avg:67.93ms +[2025-07-09 02:58:35] [Rank 0] step:3161/10000 train_time:214737ms step_avg:67.93ms +[2025-07-09 02:58:35] [Rank 0] step:3161/10000 train_time:214737ms step_avg:67.93ms +[2025-07-09 02:58:36] [Rank 0] step:3181/10000 train_time:216108ms step_avg:67.94ms +[2025-07-09 02:58:36] [Rank 0] step:3181/10000 train_time:216108ms step_avg:67.94ms +[2025-07-09 02:58:38] [Rank 0] step:3201/10000 train_time:217481ms step_avg:67.94ms +[2025-07-09 02:58:38] [Rank 0] step:3201/10000 train_time:217481ms step_avg:67.94ms +[2025-07-09 02:58:39] [Rank 0] step:3221/10000 train_time:218853ms step_avg:67.95ms +[2025-07-09 02:58:39] [Rank 0] step:3221/10000 train_time:218853ms step_avg:67.95ms +[2025-07-09 02:58:40] [Rank 0] step:3241/10000 train_time:220227ms step_avg:67.95ms +[2025-07-09 02:58:40] [Rank 0] step:3241/10000 train_time:220227ms step_avg:67.95ms +[2025-07-09 02:58:42] [Rank 0] step:3261/10000 train_time:221643ms step_avg:67.97ms +[2025-07-09 02:58:42] [Rank 0] step:3261/10000 train_time:221643ms step_avg:67.97ms +[2025-07-09 02:58:43] [Rank 0] step:3281/10000 train_time:223096ms step_avg:68.00ms +[2025-07-09 02:58:43] [Rank 0] step:3281/10000 train_time:223096ms step_avg:68.00ms +[2025-07-09 02:58:45] [Rank 0] step:3301/10000 train_time:224421ms step_avg:67.99ms +[2025-07-09 02:58:45] [Rank 0] step:3301/10000 train_time:224421ms step_avg:67.99ms +[2025-07-09 02:58:46] [Rank 0] step:3321/10000 train_time:225795ms step_avg:67.99ms +[2025-07-09 02:58:46] [Rank 0] step:3321/10000 train_time:225795ms step_avg:67.99ms +[2025-07-09 02:58:47] [Rank 0] step:3341/10000 train_time:227168ms step_avg:67.99ms +[2025-07-09 02:58:47] [Rank 0] step:3341/10000 train_time:227168ms step_avg:67.99ms +[2025-07-09 02:58:49] [Rank 0] step:3361/10000 train_time:228541ms step_avg:68.00ms +[2025-07-09 02:58:49] [Rank 0] step:3361/10000 train_time:228541ms step_avg:68.00ms +[2025-07-09 02:58:50] [Rank 0] step:3381/10000 train_time:229915ms step_avg:68.00ms +[2025-07-09 02:58:50] [Rank 0] step:3381/10000 train_time:229915ms step_avg:68.00ms +[2025-07-09 02:58:51] [Rank 0] step:3401/10000 train_time:231289ms step_avg:68.01ms +[2025-07-09 02:58:51] [Rank 0] step:3401/10000 train_time:231289ms step_avg:68.01ms +[2025-07-09 02:58:53] [Rank 0] step:3421/10000 train_time:232914ms step_avg:68.08ms +[2025-07-09 02:58:53] [Rank 0] step:3421/10000 train_time:232914ms step_avg:68.08ms +[2025-07-09 02:58:54] [Rank 0] step:3441/10000 train_time:234060ms step_avg:68.02ms +[2025-07-09 02:58:54] [Rank 0] step:3441/10000 train_time:234060ms step_avg:68.02ms +[2025-07-09 02:58:56] [Rank 0] step:3461/10000 train_time:235436ms step_avg:68.03ms +[2025-07-09 02:58:56] [Rank 0] step:3461/10000 train_time:235436ms step_avg:68.03ms +[2025-07-09 02:58:57] [Rank 0] step:3481/10000 train_time:236812ms step_avg:68.03ms +[2025-07-09 02:58:57] [Rank 0] step:3481/10000 train_time:236812ms step_avg:68.03ms +[2025-07-09 02:58:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 02:58:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 02:58:59] [Rank 0] PRINT: step:3500/10000 train_loss:3.9757 val_loss:3.7729 train_time:238813ms step_avg:68.23ms +[2025-07-09 02:58:59] [Rank 0] PRINT: step:3500/10000 train_loss:3.9757 val_loss:3.7729 train_time:238813ms step_avg:68.23ms +[2025-07-09 02:58:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 02:58:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 02:58:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 02:58:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 02:58:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 02:58:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 03:04:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 03:04:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 03:04:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 03:04:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 03:04:22] [Rank 0] Total Loss: 5.4091 +[2025-07-09 03:04:22] [Rank 0] Total Loss: 5.4091 +[2025-07-09 03:04:22] [Rank 0] Total FTA: 0.0683 +[2025-07-09 03:04:22] [Rank 0] Total FTA: 0.0683 +[2025-07-09 03:04:22] [Rank 0] Group 0 Loss: 5.3061 +[2025-07-09 03:04:22] [Rank 0] Group 0 Loss: 5.3061 +[2025-07-09 03:04:22] [Rank 0] Group 1 Loss: 5.3707 +[2025-07-09 03:04:22] [Rank 0] Group 1 Loss: 5.3707 +[2025-07-09 03:04:22] [Rank 0] Group 2 Loss: 5.5074 +[2025-07-09 03:04:22] [Rank 0] Group 2 Loss: 5.5074 +[2025-07-09 03:04:22] [Rank 0] Group 3 Loss: 5.3195 +[2025-07-09 03:04:22] [Rank 0] Group 3 Loss: 5.3195 +[2025-07-09 03:04:22] [Rank 0] Group 4 Loss: 5.4845 +[2025-07-09 03:04:22] [Rank 0] Group 4 Loss: 5.4845 +[2025-07-09 03:04:22] [Rank 0] Group 5 Loss: 5.4285 +[2025-07-09 03:04:22] [Rank 0] Group 5 Loss: 5.4285 +[2025-07-09 03:04:22] [Rank 0] Group 6 Loss: 5.4458 +[2025-07-09 03:04:22] [Rank 0] Group 6 Loss: 5.4458 +[2025-07-09 03:04:22] [Rank 0] Group 7 Loss: 5.4308 +[2025-07-09 03:04:22] [Rank 0] Group 7 Loss: 5.4308 +[2025-07-09 03:04:22] [Rank 0] Group 8 Loss: 5.4164 +[2025-07-09 03:04:22] [Rank 0] Group 8 Loss: 5.4164 +[2025-07-09 03:04:22] [Rank 0] Group 9 Loss: 5.4233 +[2025-07-09 03:04:22] [Rank 0] Group 9 Loss: 5.4233 +[2025-07-09 03:04:22] [Rank 0] Group 10 Loss: 5.4214 +[2025-07-09 03:04:22] [Rank 0] Group 10 Loss: 5.4214 +[2025-07-09 03:04:22] [Rank 0] Group 11 Loss: 5.4276 +[2025-07-09 03:04:22] [Rank 0] Group 11 Loss: 5.4276 +[2025-07-09 03:04:22] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-09 03:04:22] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-09 03:04:22] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 03:04:22] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 03:04:22] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-09 03:04:22] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-09 03:04:22] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-09 03:04:22] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-09 03:04:22] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-09 03:04:22] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-09 03:04:22] [Rank 0] Group 5 FTA: 0.0521 +[2025-07-09 03:04:22] [Rank 0] Group 5 FTA: 0.0521 +[2025-07-09 03:04:22] [Rank 0] Group 6 FTA: 0.0573 +[2025-07-09 03:04:22] [Rank 0] Group 6 FTA: 0.0573 +[2025-07-09 03:04:22] [Rank 0] Group 7 FTA: 0.0703 +[2025-07-09 03:04:22] [Rank 0] Group 7 FTA: 0.0703 +[2025-07-09 03:04:22] [Rank 0] Group 8 FTA: 0.0859 +[2025-07-09 03:04:22] [Rank 0] Group 8 FTA: 0.0859 +[2025-07-09 03:04:22] [Rank 0] Group 9 FTA: 0.0625 +[2025-07-09 03:04:22] [Rank 0] Group 9 FTA: 0.0625 +[2025-07-09 03:04:22] [Rank 0] Group 10 FTA: 0.0645 +[2025-07-09 03:04:22] [Rank 0] Group 10 FTA: 0.0645 +[2025-07-09 03:04:22] [Rank 0] Group 11 FTA: 0.0498 +[2025-07-09 03:04:22] [Rank 0] Group 11 FTA: 0.0498 +[2025-07-09 03:04:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 03:04:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 03:04:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 03:04:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 03:04:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 03:04:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 03:04:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 03:04:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 03:04:24] [Rank 0] step:3501/10000 train_time:238824ms step_avg:68.22ms +[2025-07-09 03:04:24] [Rank 0] step:3501/10000 train_time:238824ms step_avg:68.22ms +[2025-07-09 03:04:25] [Rank 0] step:3521/10000 train_time:239575ms step_avg:68.04ms +[2025-07-09 03:04:25] [Rank 0] step:3521/10000 train_time:239575ms step_avg:68.04ms +[2025-07-09 03:04:26] [Rank 0] step:3541/10000 train_time:240940ms step_avg:68.04ms +[2025-07-09 03:04:26] [Rank 0] step:3541/10000 train_time:240940ms step_avg:68.04ms +[2025-07-09 03:04:28] [Rank 0] step:3561/10000 train_time:242305ms step_avg:68.04ms +[2025-07-09 03:04:28] [Rank 0] step:3561/10000 train_time:242305ms step_avg:68.04ms +[2025-07-09 03:04:29] [Rank 0] step:3581/10000 train_time:243673ms step_avg:68.05ms +[2025-07-09 03:04:29] [Rank 0] step:3581/10000 train_time:243673ms step_avg:68.05ms +[2025-07-09 03:04:30] [Rank 0] step:3601/10000 train_time:245040ms step_avg:68.05ms +[2025-07-09 03:04:30] [Rank 0] step:3601/10000 train_time:245040ms step_avg:68.05ms +[2025-07-09 03:04:32] [Rank 0] step:3621/10000 train_time:246449ms step_avg:68.06ms +[2025-07-09 03:04:32] [Rank 0] step:3621/10000 train_time:246449ms step_avg:68.06ms +[2025-07-09 03:04:33] [Rank 0] step:3641/10000 train_time:247819ms step_avg:68.06ms +[2025-07-09 03:04:33] [Rank 0] step:3641/10000 train_time:247819ms step_avg:68.06ms +[2025-07-09 03:04:35] [Rank 0] step:3661/10000 train_time:249190ms step_avg:68.07ms +[2025-07-09 03:04:35] [Rank 0] step:3661/10000 train_time:249190ms step_avg:68.07ms +[2025-07-09 03:04:36] [Rank 0] step:3681/10000 train_time:250561ms step_avg:68.07ms +[2025-07-09 03:04:36] [Rank 0] step:3681/10000 train_time:250561ms step_avg:68.07ms +[2025-07-09 03:04:37] [Rank 0] step:3701/10000 train_time:251932ms step_avg:68.07ms +[2025-07-09 03:04:37] [Rank 0] step:3701/10000 train_time:251932ms step_avg:68.07ms +[2025-07-09 03:04:39] [Rank 0] step:3721/10000 train_time:253306ms step_avg:68.07ms +[2025-07-09 03:04:39] [Rank 0] step:3721/10000 train_time:253306ms step_avg:68.07ms +[2025-07-09 03:04:40] [Rank 0] step:3741/10000 train_time:254680ms step_avg:68.08ms +[2025-07-09 03:04:40] [Rank 0] step:3741/10000 train_time:254680ms step_avg:68.08ms +[2025-07-09 03:04:41] [Rank 0] step:3761/10000 train_time:256054ms step_avg:68.08ms +[2025-07-09 03:04:41] [Rank 0] step:3761/10000 train_time:256054ms step_avg:68.08ms +[2025-07-09 03:04:43] [Rank 0] step:3781/10000 train_time:257426ms step_avg:68.08ms +[2025-07-09 03:04:43] [Rank 0] step:3781/10000 train_time:257426ms step_avg:68.08ms +[2025-07-09 03:04:44] [Rank 0] step:3801/10000 train_time:258835ms step_avg:68.10ms +[2025-07-09 03:04:44] [Rank 0] step:3801/10000 train_time:258835ms step_avg:68.10ms +[2025-07-09 03:04:46] [Rank 0] step:3821/10000 train_time:260210ms step_avg:68.10ms +[2025-07-09 03:04:46] [Rank 0] step:3821/10000 train_time:260210ms step_avg:68.10ms +[2025-07-09 03:04:47] [Rank 0] step:3841/10000 train_time:261586ms step_avg:68.10ms +[2025-07-09 03:04:47] [Rank 0] step:3841/10000 train_time:261586ms step_avg:68.10ms +[2025-07-09 03:04:48] [Rank 0] step:3861/10000 train_time:262960ms step_avg:68.11ms +[2025-07-09 03:04:48] [Rank 0] step:3861/10000 train_time:262960ms step_avg:68.11ms +[2025-07-09 03:04:50] [Rank 0] step:3881/10000 train_time:264337ms step_avg:68.11ms +[2025-07-09 03:04:50] [Rank 0] step:3881/10000 train_time:264337ms step_avg:68.11ms +[2025-07-09 03:04:51] [Rank 0] step:3901/10000 train_time:265712ms step_avg:68.11ms +[2025-07-09 03:04:51] [Rank 0] step:3901/10000 train_time:265712ms step_avg:68.11ms +[2025-07-09 03:04:52] [Rank 0] step:3921/10000 train_time:267089ms step_avg:68.12ms +[2025-07-09 03:04:52] [Rank 0] step:3921/10000 train_time:267089ms step_avg:68.12ms +[2025-07-09 03:04:54] [Rank 0] step:3941/10000 train_time:268466ms step_avg:68.12ms +[2025-07-09 03:04:54] [Rank 0] step:3941/10000 train_time:268466ms step_avg:68.12ms +[2025-07-09 03:04:55] [Rank 0] step:3961/10000 train_time:270510ms step_avg:68.29ms +[2025-07-09 03:04:55] [Rank 0] step:3961/10000 train_time:270510ms step_avg:68.29ms +[2025-07-09 03:04:57] [Rank 0] step:3981/10000 train_time:271253ms step_avg:68.14ms +[2025-07-09 03:04:57] [Rank 0] step:3981/10000 train_time:271253ms step_avg:68.14ms +[2025-07-09 03:04:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 03:04:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 03:04:59] [Rank 0] PRINT: step:4000/10000 train_loss:3.5934 val_loss:3.4308 train_time:273256ms step_avg:68.31ms +[2025-07-09 03:04:59] [Rank 0] PRINT: step:4000/10000 train_loss:3.5934 val_loss:3.4308 train_time:273256ms step_avg:68.31ms +[2025-07-09 03:04:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 03:04:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 03:04:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 03:04:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 03:04:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 03:04:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 03:10:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 03:10:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 03:10:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 03:10:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 03:10:22] [Rank 0] Total Loss: 5.2505 +[2025-07-09 03:10:22] [Rank 0] Total Loss: 5.2505 +[2025-07-09 03:10:22] [Rank 0] Total FTA: 0.0669 +[2025-07-09 03:10:22] [Rank 0] Total FTA: 0.0669 +[2025-07-09 03:10:22] [Rank 0] Group 0 Loss: 5.1695 +[2025-07-09 03:10:22] [Rank 0] Group 0 Loss: 5.1695 +[2025-07-09 03:10:22] [Rank 0] Group 1 Loss: 5.1789 +[2025-07-09 03:10:22] [Rank 0] Group 1 Loss: 5.1789 +[2025-07-09 03:10:22] [Rank 0] Group 2 Loss: 5.3071 +[2025-07-09 03:10:22] [Rank 0] Group 2 Loss: 5.3071 +[2025-07-09 03:10:22] [Rank 0] Group 3 Loss: 5.2145 +[2025-07-09 03:10:22] [Rank 0] Group 3 Loss: 5.2145 +[2025-07-09 03:10:22] [Rank 0] Group 4 Loss: 5.3528 +[2025-07-09 03:10:22] [Rank 0] Group 4 Loss: 5.3528 +[2025-07-09 03:10:22] [Rank 0] Group 5 Loss: 5.2863 +[2025-07-09 03:10:22] [Rank 0] Group 5 Loss: 5.2863 +[2025-07-09 03:10:22] [Rank 0] Group 6 Loss: 5.2573 +[2025-07-09 03:10:22] [Rank 0] Group 6 Loss: 5.2573 +[2025-07-09 03:10:22] [Rank 0] Group 7 Loss: 5.2668 +[2025-07-09 03:10:22] [Rank 0] Group 7 Loss: 5.2668 +[2025-07-09 03:10:22] [Rank 0] Group 8 Loss: 5.2495 +[2025-07-09 03:10:22] [Rank 0] Group 8 Loss: 5.2495 +[2025-07-09 03:10:22] [Rank 0] Group 9 Loss: 5.2508 +[2025-07-09 03:10:22] [Rank 0] Group 9 Loss: 5.2508 +[2025-07-09 03:10:22] [Rank 0] Group 10 Loss: 5.2787 +[2025-07-09 03:10:22] [Rank 0] Group 10 Loss: 5.2787 +[2025-07-09 03:10:22] [Rank 0] Group 11 Loss: 5.2564 +[2025-07-09 03:10:22] [Rank 0] Group 11 Loss: 5.2564 +[2025-07-09 03:10:22] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-09 03:10:22] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-09 03:10:22] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 03:10:22] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 03:10:22] [Rank 0] Group 2 FTA: 0.0677 +[2025-07-09 03:10:22] [Rank 0] Group 2 FTA: 0.0677 +[2025-07-09 03:10:22] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-09 03:10:22] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-09 03:10:22] [Rank 0] Group 4 FTA: 0.0078 +[2025-07-09 03:10:22] [Rank 0] Group 4 FTA: 0.0078 +[2025-07-09 03:10:22] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-09 03:10:22] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-09 03:10:22] [Rank 0] Group 6 FTA: 0.0703 +[2025-07-09 03:10:22] [Rank 0] Group 6 FTA: 0.0703 +[2025-07-09 03:10:22] [Rank 0] Group 7 FTA: 0.0859 +[2025-07-09 03:10:22] [Rank 0] Group 7 FTA: 0.0859 +[2025-07-09 03:10:22] [Rank 0] Group 8 FTA: 0.0911 +[2025-07-09 03:10:22] [Rank 0] Group 8 FTA: 0.0911 +[2025-07-09 03:10:22] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-09 03:10:22] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-09 03:10:22] [Rank 0] Group 10 FTA: 0.0566 +[2025-07-09 03:10:22] [Rank 0] Group 10 FTA: 0.0566 +[2025-07-09 03:10:22] [Rank 0] Group 11 FTA: 0.0654 +[2025-07-09 03:10:22] [Rank 0] Group 11 FTA: 0.0654 +[2025-07-09 03:10:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 03:10:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 03:10:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 03:10:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 03:10:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 03:10:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 03:10:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 03:10:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 03:10:24] [Rank 0] step:4001/10000 train_time:273268ms step_avg:68.30ms +[2025-07-09 03:10:24] [Rank 0] step:4001/10000 train_time:273268ms step_avg:68.30ms +[2025-07-09 03:10:25] [Rank 0] step:4021/10000 train_time:274024ms step_avg:68.15ms +[2025-07-09 03:10:25] [Rank 0] step:4021/10000 train_time:274024ms step_avg:68.15ms +[2025-07-09 03:10:26] [Rank 0] step:4041/10000 train_time:275391ms step_avg:68.15ms +[2025-07-09 03:10:26] [Rank 0] step:4041/10000 train_time:275391ms step_avg:68.15ms +[2025-07-09 03:10:28] [Rank 0] step:4061/10000 train_time:276756ms step_avg:68.15ms +[2025-07-09 03:10:28] [Rank 0] step:4061/10000 train_time:276756ms step_avg:68.15ms +[2025-07-09 03:10:29] [Rank 0] step:4081/10000 train_time:278122ms step_avg:68.15ms +[2025-07-09 03:10:29] [Rank 0] step:4081/10000 train_time:278122ms step_avg:68.15ms +[2025-07-09 03:10:31] [Rank 0] step:4101/10000 train_time:279489ms step_avg:68.15ms +[2025-07-09 03:10:31] [Rank 0] step:4101/10000 train_time:279489ms step_avg:68.15ms +[2025-07-09 03:10:32] [Rank 0] step:4121/10000 train_time:280858ms step_avg:68.15ms +[2025-07-09 03:10:32] [Rank 0] step:4121/10000 train_time:280858ms step_avg:68.15ms +[2025-07-09 03:10:33] [Rank 0] step:4141/10000 train_time:282896ms step_avg:68.32ms +[2025-07-09 03:10:33] [Rank 0] step:4141/10000 train_time:282896ms step_avg:68.32ms +[2025-07-09 03:10:35] [Rank 0] step:4161/10000 train_time:283636ms step_avg:68.17ms +[2025-07-09 03:10:35] [Rank 0] step:4161/10000 train_time:283636ms step_avg:68.17ms +[2025-07-09 03:10:36] [Rank 0] step:4181/10000 train_time:285008ms step_avg:68.17ms +[2025-07-09 03:10:36] [Rank 0] step:4181/10000 train_time:285008ms step_avg:68.17ms +[2025-07-09 03:10:37] [Rank 0] step:4201/10000 train_time:286381ms step_avg:68.17ms +[2025-07-09 03:10:37] [Rank 0] step:4201/10000 train_time:286381ms step_avg:68.17ms +[2025-07-09 03:10:39] [Rank 0] step:4221/10000 train_time:287754ms step_avg:68.17ms +[2025-07-09 03:10:39] [Rank 0] step:4221/10000 train_time:287754ms step_avg:68.17ms +[2025-07-09 03:10:40] [Rank 0] step:4241/10000 train_time:289127ms step_avg:68.17ms +[2025-07-09 03:10:40] [Rank 0] step:4241/10000 train_time:289127ms step_avg:68.17ms +[2025-07-09 03:10:42] [Rank 0] step:4261/10000 train_time:290501ms step_avg:68.18ms +[2025-07-09 03:10:42] [Rank 0] step:4261/10000 train_time:290501ms step_avg:68.18ms +[2025-07-09 03:10:43] [Rank 0] step:4281/10000 train_time:291874ms step_avg:68.18ms +[2025-07-09 03:10:43] [Rank 0] step:4281/10000 train_time:291874ms step_avg:68.18ms +[2025-07-09 03:10:44] [Rank 0] step:4301/10000 train_time:293249ms step_avg:68.18ms +[2025-07-09 03:10:44] [Rank 0] step:4301/10000 train_time:293249ms step_avg:68.18ms +[2025-07-09 03:10:46] [Rank 0] step:4321/10000 train_time:294623ms step_avg:68.18ms +[2025-07-09 03:10:46] [Rank 0] step:4321/10000 train_time:294623ms step_avg:68.18ms +[2025-07-09 03:10:47] [Rank 0] step:4341/10000 train_time:296022ms step_avg:68.19ms +[2025-07-09 03:10:47] [Rank 0] step:4341/10000 train_time:296022ms step_avg:68.19ms +[2025-07-09 03:10:48] [Rank 0] step:4361/10000 train_time:297396ms step_avg:68.19ms +[2025-07-09 03:10:48] [Rank 0] step:4361/10000 train_time:297396ms step_avg:68.19ms +[2025-07-09 03:10:50] [Rank 0] step:4381/10000 train_time:298771ms step_avg:68.20ms +[2025-07-09 03:10:50] [Rank 0] step:4381/10000 train_time:298771ms step_avg:68.20ms +[2025-07-09 03:10:51] [Rank 0] step:4401/10000 train_time:300146ms step_avg:68.20ms +[2025-07-09 03:10:51] [Rank 0] step:4401/10000 train_time:300146ms step_avg:68.20ms +[2025-07-09 03:10:53] [Rank 0] step:4421/10000 train_time:301522ms step_avg:68.20ms +[2025-07-09 03:10:53] [Rank 0] step:4421/10000 train_time:301522ms step_avg:68.20ms +[2025-07-09 03:10:54] [Rank 0] step:4441/10000 train_time:302898ms step_avg:68.20ms +[2025-07-09 03:10:54] [Rank 0] step:4441/10000 train_time:302898ms step_avg:68.20ms +[2025-07-09 03:10:55] [Rank 0] step:4461/10000 train_time:304273ms step_avg:68.21ms +[2025-07-09 03:10:55] [Rank 0] step:4461/10000 train_time:304273ms step_avg:68.21ms +[2025-07-09 03:10:57] [Rank 0] step:4481/10000 train_time:305650ms step_avg:68.21ms +[2025-07-09 03:10:57] [Rank 0] step:4481/10000 train_time:305650ms step_avg:68.21ms +[2025-07-09 03:10:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 03:10:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 03:10:59] [Rank 0] PRINT: step:4500/10000 train_loss:3.2878 val_loss:3.1554 train_time:307652ms step_avg:68.37ms +[2025-07-09 03:10:59] [Rank 0] PRINT: step:4500/10000 train_loss:3.2878 val_loss:3.1554 train_time:307652ms step_avg:68.37ms +[2025-07-09 03:10:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 03:10:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 03:10:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 03:10:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 03:10:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 03:10:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 03:16:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 03:16:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 03:16:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 03:16:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 03:16:25] [Rank 0] Total Loss: 5.0790 +[2025-07-09 03:16:25] [Rank 0] Total Loss: 5.0790 +[2025-07-09 03:16:25] [Rank 0] Total FTA: 0.0795 +[2025-07-09 03:16:25] [Rank 0] Total FTA: 0.0795 +[2025-07-09 03:16:25] [Rank 0] Group 0 Loss: 4.9987 +[2025-07-09 03:16:25] [Rank 0] Group 0 Loss: 4.9987 +[2025-07-09 03:16:25] [Rank 0] Group 1 Loss: 5.0289 +[2025-07-09 03:16:25] [Rank 0] Group 1 Loss: 5.0289 +[2025-07-09 03:16:25] [Rank 0] Group 2 Loss: 5.1290 +[2025-07-09 03:16:25] [Rank 0] Group 2 Loss: 5.1290 +[2025-07-09 03:16:25] [Rank 0] Group 3 Loss: 5.0507 +[2025-07-09 03:16:25] [Rank 0] Group 3 Loss: 5.0507 +[2025-07-09 03:16:25] [Rank 0] Group 4 Loss: 5.1157 +[2025-07-09 03:16:25] [Rank 0] Group 4 Loss: 5.1157 +[2025-07-09 03:16:25] [Rank 0] Group 5 Loss: 5.0939 +[2025-07-09 03:16:25] [Rank 0] Group 5 Loss: 5.0939 +[2025-07-09 03:16:25] [Rank 0] Group 6 Loss: 5.1434 +[2025-07-09 03:16:25] [Rank 0] Group 6 Loss: 5.1434 +[2025-07-09 03:16:25] [Rank 0] Group 7 Loss: 5.0918 +[2025-07-09 03:16:25] [Rank 0] Group 7 Loss: 5.0918 +[2025-07-09 03:16:25] [Rank 0] Group 8 Loss: 5.0597 +[2025-07-09 03:16:25] [Rank 0] Group 8 Loss: 5.0597 +[2025-07-09 03:16:25] [Rank 0] Group 9 Loss: 5.0888 +[2025-07-09 03:16:25] [Rank 0] Group 9 Loss: 5.0888 +[2025-07-09 03:16:25] [Rank 0] Group 10 Loss: 5.0948 +[2025-07-09 03:16:25] [Rank 0] Group 10 Loss: 5.0948 +[2025-07-09 03:16:25] [Rank 0] Group 11 Loss: 5.0984 +[2025-07-09 03:16:25] [Rank 0] Group 11 Loss: 5.0984 +[2025-07-09 03:16:25] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-09 03:16:25] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-09 03:16:25] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 03:16:25] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 03:16:25] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-09 03:16:25] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-09 03:16:25] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-09 03:16:25] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-09 03:16:25] [Rank 0] Group 4 FTA: 0.0260 +[2025-07-09 03:16:25] [Rank 0] Group 4 FTA: 0.0260 +[2025-07-09 03:16:25] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-09 03:16:25] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-09 03:16:25] [Rank 0] Group 6 FTA: 0.1016 +[2025-07-09 03:16:25] [Rank 0] Group 6 FTA: 0.1016 +[2025-07-09 03:16:25] [Rank 0] Group 7 FTA: 0.0651 +[2025-07-09 03:16:25] [Rank 0] Group 7 FTA: 0.0651 +[2025-07-09 03:16:25] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-09 03:16:25] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-09 03:16:25] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-09 03:16:25] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-09 03:16:25] [Rank 0] Group 10 FTA: 0.0586 +[2025-07-09 03:16:25] [Rank 0] Group 10 FTA: 0.0586 +[2025-07-09 03:16:25] [Rank 0] Group 11 FTA: 0.0889 +[2025-07-09 03:16:25] [Rank 0] Group 11 FTA: 0.0889 +[2025-07-09 03:16:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 03:16:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 03:16:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 03:16:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 03:16:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 03:16:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 03:16:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 03:16:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 03:16:27] [Rank 0] step:4501/10000 train_time:307669ms step_avg:68.36ms +[2025-07-09 03:16:27] [Rank 0] step:4501/10000 train_time:307669ms step_avg:68.36ms +[2025-07-09 03:16:29] [Rank 0] step:4521/10000 train_time:309120ms step_avg:68.37ms +[2025-07-09 03:16:29] [Rank 0] step:4521/10000 train_time:309120ms step_avg:68.37ms +[2025-07-09 03:16:30] [Rank 0] step:4541/10000 train_time:310485ms step_avg:68.37ms +[2025-07-09 03:16:30] [Rank 0] step:4541/10000 train_time:310485ms step_avg:68.37ms +[2025-07-09 03:16:32] [Rank 0] step:4561/10000 train_time:311852ms step_avg:68.37ms +[2025-07-09 03:16:32] [Rank 0] step:4561/10000 train_time:311852ms step_avg:68.37ms +[2025-07-09 03:16:33] [Rank 0] step:4581/10000 train_time:313219ms step_avg:68.37ms +[2025-07-09 03:16:33] [Rank 0] step:4581/10000 train_time:313219ms step_avg:68.37ms +[2025-07-09 03:16:34] [Rank 0] step:4601/10000 train_time:314587ms step_avg:68.37ms +[2025-07-09 03:16:34] [Rank 0] step:4601/10000 train_time:314587ms step_avg:68.37ms +[2025-07-09 03:16:36] [Rank 0] step:4621/10000 train_time:315956ms step_avg:68.37ms +[2025-07-09 03:16:36] [Rank 0] step:4621/10000 train_time:315956ms step_avg:68.37ms +[2025-07-09 03:16:37] [Rank 0] step:4641/10000 train_time:317326ms step_avg:68.37ms +[2025-07-09 03:16:37] [Rank 0] step:4641/10000 train_time:317326ms step_avg:68.37ms +[2025-07-09 03:16:38] [Rank 0] step:4661/10000 train_time:318698ms step_avg:68.38ms +[2025-07-09 03:16:38] [Rank 0] step:4661/10000 train_time:318698ms step_avg:68.38ms +[2025-07-09 03:16:40] [Rank 0] step:4681/10000 train_time:320070ms step_avg:68.38ms +[2025-07-09 03:16:40] [Rank 0] step:4681/10000 train_time:320070ms step_avg:68.38ms +[2025-07-09 03:16:41] [Rank 0] step:4701/10000 train_time:321476ms step_avg:68.38ms +[2025-07-09 03:16:41] [Rank 0] step:4701/10000 train_time:321476ms step_avg:68.38ms +[2025-07-09 03:16:43] [Rank 0] step:4721/10000 train_time:322848ms step_avg:68.39ms +[2025-07-09 03:16:43] [Rank 0] step:4721/10000 train_time:322848ms step_avg:68.39ms +[2025-07-09 03:16:44] [Rank 0] step:4741/10000 train_time:324222ms step_avg:68.39ms +[2025-07-09 03:16:44] [Rank 0] step:4741/10000 train_time:324222ms step_avg:68.39ms +[2025-07-09 03:16:45] [Rank 0] step:4761/10000 train_time:325596ms step_avg:68.39ms +[2025-07-09 03:16:45] [Rank 0] step:4761/10000 train_time:325596ms step_avg:68.39ms +[2025-07-09 03:16:47] [Rank 0] step:4781/10000 train_time:326969ms step_avg:68.39ms +[2025-07-09 03:16:47] [Rank 0] step:4781/10000 train_time:326969ms step_avg:68.39ms +[2025-07-09 03:16:48] [Rank 0] step:4801/10000 train_time:328344ms step_avg:68.39ms +[2025-07-09 03:16:48] [Rank 0] step:4801/10000 train_time:328344ms step_avg:68.39ms +[2025-07-09 03:16:49] [Rank 0] step:4821/10000 train_time:329719ms step_avg:68.39ms +[2025-07-09 03:16:49] [Rank 0] step:4821/10000 train_time:329719ms step_avg:68.39ms +[2025-07-09 03:16:51] [Rank 0] step:4841/10000 train_time:331095ms step_avg:68.39ms +[2025-07-09 03:16:51] [Rank 0] step:4841/10000 train_time:331095ms step_avg:68.39ms +[2025-07-09 03:16:52] [Rank 0] step:4861/10000 train_time:332470ms step_avg:68.40ms +[2025-07-09 03:16:52] [Rank 0] step:4861/10000 train_time:332470ms step_avg:68.40ms +[2025-07-09 03:16:54] [Rank 0] step:4881/10000 train_time:333893ms step_avg:68.41ms +[2025-07-09 03:16:54] [Rank 0] step:4881/10000 train_time:333893ms step_avg:68.41ms +[2025-07-09 03:16:55] [Rank 0] step:4901/10000 train_time:335267ms step_avg:68.41ms +[2025-07-09 03:16:55] [Rank 0] step:4901/10000 train_time:335267ms step_avg:68.41ms +[2025-07-09 03:16:56] [Rank 0] step:4921/10000 train_time:336642ms step_avg:68.41ms +[2025-07-09 03:16:56] [Rank 0] step:4921/10000 train_time:336642ms step_avg:68.41ms +[2025-07-09 03:16:58] [Rank 0] step:4941/10000 train_time:338017ms step_avg:68.41ms +[2025-07-09 03:16:58] [Rank 0] step:4941/10000 train_time:338017ms step_avg:68.41ms +[2025-07-09 03:16:59] [Rank 0] step:4961/10000 train_time:339393ms step_avg:68.41ms +[2025-07-09 03:16:59] [Rank 0] step:4961/10000 train_time:339393ms step_avg:68.41ms +[2025-07-09 03:17:00] [Rank 0] step:4981/10000 train_time:340768ms step_avg:68.41ms +[2025-07-09 03:17:00] [Rank 0] step:4981/10000 train_time:340768ms step_avg:68.41ms +[2025-07-09 03:17:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 03:17:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 03:17:03] [Rank 0] PRINT: step:5000/10000 train_loss:3.0331 val_loss:2.9210 train_time:342771ms step_avg:68.55ms +[2025-07-09 03:17:03] [Rank 0] PRINT: step:5000/10000 train_loss:3.0331 val_loss:2.9210 train_time:342771ms step_avg:68.55ms +[2025-07-09 03:17:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 03:17:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 03:17:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 03:17:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 03:17:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 03:17:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 03:22:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 03:22:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 03:22:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 03:22:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 03:22:26] [Rank 0] Total Loss: 4.9612 +[2025-07-09 03:22:26] [Rank 0] Total Loss: 4.9612 +[2025-07-09 03:22:26] [Rank 0] Total FTA: 0.0790 +[2025-07-09 03:22:26] [Rank 0] Total FTA: 0.0790 +[2025-07-09 03:22:26] [Rank 0] Group 0 Loss: 4.9637 +[2025-07-09 03:22:26] [Rank 0] Group 0 Loss: 4.9637 +[2025-07-09 03:22:26] [Rank 0] Group 1 Loss: 4.8919 +[2025-07-09 03:22:26] [Rank 0] Group 1 Loss: 4.8919 +[2025-07-09 03:22:26] [Rank 0] Group 2 Loss: 4.9772 +[2025-07-09 03:22:26] [Rank 0] Group 2 Loss: 4.9772 +[2025-07-09 03:22:26] [Rank 0] Group 3 Loss: 4.8435 +[2025-07-09 03:22:26] [Rank 0] Group 3 Loss: 4.8435 +[2025-07-09 03:22:26] [Rank 0] Group 4 Loss: 5.0043 +[2025-07-09 03:22:26] [Rank 0] Group 4 Loss: 5.0043 +[2025-07-09 03:22:26] [Rank 0] Group 5 Loss: 4.9498 +[2025-07-09 03:22:26] [Rank 0] Group 5 Loss: 4.9498 +[2025-07-09 03:22:26] [Rank 0] Group 6 Loss: 4.9637 +[2025-07-09 03:22:26] [Rank 0] Group 6 Loss: 4.9637 +[2025-07-09 03:22:26] [Rank 0] Group 7 Loss: 4.9831 +[2025-07-09 03:22:26] [Rank 0] Group 7 Loss: 4.9831 +[2025-07-09 03:22:26] [Rank 0] Group 8 Loss: 4.9374 +[2025-07-09 03:22:26] [Rank 0] Group 8 Loss: 4.9374 +[2025-07-09 03:22:26] [Rank 0] Group 9 Loss: 4.9822 +[2025-07-09 03:22:26] [Rank 0] Group 9 Loss: 4.9822 +[2025-07-09 03:22:26] [Rank 0] Group 10 Loss: 5.0027 +[2025-07-09 03:22:26] [Rank 0] Group 10 Loss: 5.0027 +[2025-07-09 03:22:26] [Rank 0] Group 11 Loss: 4.9851 +[2025-07-09 03:22:26] [Rank 0] Group 11 Loss: 4.9851 +[2025-07-09 03:22:26] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-09 03:22:26] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-09 03:22:26] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 03:22:26] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 03:22:26] [Rank 0] Group 2 FTA: 0.0521 +[2025-07-09 03:22:26] [Rank 0] Group 2 FTA: 0.0521 +[2025-07-09 03:22:26] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-09 03:22:26] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-09 03:22:26] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-09 03:22:26] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-09 03:22:26] [Rank 0] Group 5 FTA: 0.0443 +[2025-07-09 03:22:26] [Rank 0] Group 5 FTA: 0.0443 +[2025-07-09 03:22:26] [Rank 0] Group 6 FTA: 0.0703 +[2025-07-09 03:22:26] [Rank 0] Group 6 FTA: 0.0703 +[2025-07-09 03:22:26] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-09 03:22:26] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-09 03:22:26] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-09 03:22:26] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-09 03:22:26] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-09 03:22:26] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-09 03:22:26] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-09 03:22:26] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-09 03:22:26] [Rank 0] Group 11 FTA: 0.0791 +[2025-07-09 03:22:26] [Rank 0] Group 11 FTA: 0.0791 +[2025-07-09 03:22:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 03:22:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 03:22:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 03:22:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 03:22:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 03:22:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 03:22:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 03:22:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 03:22:27] [Rank 0] step:5001/10000 train_time:342781ms step_avg:68.54ms +[2025-07-09 03:22:27] [Rank 0] step:5001/10000 train_time:342781ms step_avg:68.54ms +[2025-07-09 03:22:29] [Rank 0] step:5021/10000 train_time:343543ms step_avg:68.42ms +[2025-07-09 03:22:29] [Rank 0] step:5021/10000 train_time:343543ms step_avg:68.42ms +[2025-07-09 03:22:30] [Rank 0] step:5041/10000 train_time:344957ms step_avg:68.43ms +[2025-07-09 03:22:30] [Rank 0] step:5041/10000 train_time:344957ms step_avg:68.43ms +[2025-07-09 03:22:32] [Rank 0] step:5061/10000 train_time:346311ms step_avg:68.43ms +[2025-07-09 03:22:32] [Rank 0] step:5061/10000 train_time:346311ms step_avg:68.43ms +[2025-07-09 03:22:33] [Rank 0] step:5081/10000 train_time:347680ms step_avg:68.43ms +[2025-07-09 03:22:33] [Rank 0] step:5081/10000 train_time:347680ms step_avg:68.43ms +[2025-07-09 03:22:34] [Rank 0] step:5101/10000 train_time:349047ms step_avg:68.43ms +[2025-07-09 03:22:34] [Rank 0] step:5101/10000 train_time:349047ms step_avg:68.43ms +[2025-07-09 03:22:36] [Rank 0] step:5121/10000 train_time:350419ms step_avg:68.43ms +[2025-07-09 03:22:36] [Rank 0] step:5121/10000 train_time:350419ms step_avg:68.43ms +[2025-07-09 03:22:37] [Rank 0] step:5141/10000 train_time:351791ms step_avg:68.43ms +[2025-07-09 03:22:37] [Rank 0] step:5141/10000 train_time:351791ms step_avg:68.43ms +[2025-07-09 03:22:38] [Rank 0] step:5161/10000 train_time:353162ms step_avg:68.43ms +[2025-07-09 03:22:38] [Rank 0] step:5161/10000 train_time:353162ms step_avg:68.43ms +[2025-07-09 03:22:40] [Rank 0] step:5181/10000 train_time:354533ms step_avg:68.43ms +[2025-07-09 03:22:40] [Rank 0] step:5181/10000 train_time:354533ms step_avg:68.43ms +[2025-07-09 03:22:41] [Rank 0] step:5201/10000 train_time:355906ms step_avg:68.43ms +[2025-07-09 03:22:41] [Rank 0] step:5201/10000 train_time:355906ms step_avg:68.43ms +[2025-07-09 03:22:43] [Rank 0] step:5221/10000 train_time:357530ms step_avg:68.48ms +[2025-07-09 03:22:43] [Rank 0] step:5221/10000 train_time:357530ms step_avg:68.48ms +[2025-07-09 03:22:44] [Rank 0] step:5241/10000 train_time:358699ms step_avg:68.44ms +[2025-07-09 03:22:44] [Rank 0] step:5241/10000 train_time:358699ms step_avg:68.44ms +[2025-07-09 03:22:45] [Rank 0] step:5261/10000 train_time:360074ms step_avg:68.44ms +[2025-07-09 03:22:45] [Rank 0] step:5261/10000 train_time:360074ms step_avg:68.44ms +[2025-07-09 03:22:47] [Rank 0] step:5281/10000 train_time:361448ms step_avg:68.44ms +[2025-07-09 03:22:47] [Rank 0] step:5281/10000 train_time:361448ms step_avg:68.44ms +[2025-07-09 03:22:48] [Rank 0] step:5301/10000 train_time:362821ms step_avg:68.44ms +[2025-07-09 03:22:48] [Rank 0] step:5301/10000 train_time:362821ms step_avg:68.44ms +[2025-07-09 03:22:49] [Rank 0] step:5321/10000 train_time:364196ms step_avg:68.44ms +[2025-07-09 03:22:49] [Rank 0] step:5321/10000 train_time:364196ms step_avg:68.44ms +[2025-07-09 03:22:51] [Rank 0] step:5341/10000 train_time:365572ms step_avg:68.45ms +[2025-07-09 03:22:51] [Rank 0] step:5341/10000 train_time:365572ms step_avg:68.45ms +[2025-07-09 03:22:52] [Rank 0] step:5361/10000 train_time:366946ms step_avg:68.45ms +[2025-07-09 03:22:52] [Rank 0] step:5361/10000 train_time:366946ms step_avg:68.45ms +[2025-07-09 03:22:54] [Rank 0] step:5381/10000 train_time:368322ms step_avg:68.45ms +[2025-07-09 03:22:54] [Rank 0] step:5381/10000 train_time:368322ms step_avg:68.45ms +[2025-07-09 03:22:55] [Rank 0] step:5401/10000 train_time:369697ms step_avg:68.45ms +[2025-07-09 03:22:55] [Rank 0] step:5401/10000 train_time:369697ms step_avg:68.45ms +[2025-07-09 03:22:56] [Rank 0] step:5421/10000 train_time:371115ms step_avg:68.46ms +[2025-07-09 03:22:56] [Rank 0] step:5421/10000 train_time:371115ms step_avg:68.46ms +[2025-07-09 03:22:58] [Rank 0] step:5441/10000 train_time:372492ms step_avg:68.46ms +[2025-07-09 03:22:58] [Rank 0] step:5441/10000 train_time:372492ms step_avg:68.46ms +[2025-07-09 03:22:59] [Rank 0] step:5461/10000 train_time:373869ms step_avg:68.46ms +[2025-07-09 03:22:59] [Rank 0] step:5461/10000 train_time:373869ms step_avg:68.46ms +[2025-07-09 03:23:01] [Rank 0] step:5481/10000 train_time:375265ms step_avg:68.47ms +[2025-07-09 03:23:01] [Rank 0] step:5481/10000 train_time:375265ms step_avg:68.47ms +[2025-07-09 03:23:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 03:23:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 03:23:03] [Rank 0] PRINT: step:5500/10000 train_loss:2.8151 val_loss:2.7210 train_time:377270ms step_avg:68.59ms +[2025-07-09 03:23:03] [Rank 0] PRINT: step:5500/10000 train_loss:2.8151 val_loss:2.7210 train_time:377270ms step_avg:68.59ms +[2025-07-09 03:23:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 03:23:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 03:23:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 03:23:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 03:23:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 03:23:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 03:28:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 03:28:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 03:28:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 03:28:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 03:28:27] [Rank 0] Total Loss: 4.8497 +[2025-07-09 03:28:27] [Rank 0] Total Loss: 4.8497 +[2025-07-09 03:28:27] [Rank 0] Total FTA: 0.0843 +[2025-07-09 03:28:27] [Rank 0] Total FTA: 0.0843 +[2025-07-09 03:28:27] [Rank 0] Group 0 Loss: 4.8900 +[2025-07-09 03:28:27] [Rank 0] Group 0 Loss: 4.8900 +[2025-07-09 03:28:27] [Rank 0] Group 1 Loss: 4.7821 +[2025-07-09 03:28:27] [Rank 0] Group 1 Loss: 4.7821 +[2025-07-09 03:28:27] [Rank 0] Group 2 Loss: 4.8859 +[2025-07-09 03:28:27] [Rank 0] Group 2 Loss: 4.8859 +[2025-07-09 03:28:27] [Rank 0] Group 3 Loss: 4.7250 +[2025-07-09 03:28:27] [Rank 0] Group 3 Loss: 4.7250 +[2025-07-09 03:28:27] [Rank 0] Group 4 Loss: 4.8649 +[2025-07-09 03:28:27] [Rank 0] Group 4 Loss: 4.8649 +[2025-07-09 03:28:27] [Rank 0] Group 5 Loss: 4.8290 +[2025-07-09 03:28:27] [Rank 0] Group 5 Loss: 4.8290 +[2025-07-09 03:28:27] [Rank 0] Group 6 Loss: 4.8123 +[2025-07-09 03:28:27] [Rank 0] Group 6 Loss: 4.8123 +[2025-07-09 03:28:27] [Rank 0] Group 7 Loss: 4.9096 +[2025-07-09 03:28:27] [Rank 0] Group 7 Loss: 4.9096 +[2025-07-09 03:28:27] [Rank 0] Group 8 Loss: 4.8586 +[2025-07-09 03:28:27] [Rank 0] Group 8 Loss: 4.8586 +[2025-07-09 03:28:27] [Rank 0] Group 9 Loss: 4.8507 +[2025-07-09 03:28:27] [Rank 0] Group 9 Loss: 4.8507 +[2025-07-09 03:28:27] [Rank 0] Group 10 Loss: 4.8341 +[2025-07-09 03:28:27] [Rank 0] Group 10 Loss: 4.8341 +[2025-07-09 03:28:27] [Rank 0] Group 11 Loss: 4.8756 +[2025-07-09 03:28:27] [Rank 0] Group 11 Loss: 4.8756 +[2025-07-09 03:28:27] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-09 03:28:27] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-09 03:28:27] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 03:28:27] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 03:28:27] [Rank 0] Group 2 FTA: 0.1406 +[2025-07-09 03:28:27] [Rank 0] Group 2 FTA: 0.1406 +[2025-07-09 03:28:27] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-09 03:28:27] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-09 03:28:28] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-09 03:28:28] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-09 03:28:28] [Rank 0] Group 5 FTA: 0.0443 +[2025-07-09 03:28:28] [Rank 0] Group 5 FTA: 0.0443 +[2025-07-09 03:28:28] [Rank 0] Group 6 FTA: 0.0833 +[2025-07-09 03:28:28] [Rank 0] Group 6 FTA: 0.0833 +[2025-07-09 03:28:28] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-09 03:28:28] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-09 03:28:28] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-09 03:28:28] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-09 03:28:28] [Rank 0] Group 9 FTA: 0.0664 +[2025-07-09 03:28:28] [Rank 0] Group 9 FTA: 0.0664 +[2025-07-09 03:28:28] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-09 03:28:28] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-09 03:28:28] [Rank 0] Group 11 FTA: 0.0801 +[2025-07-09 03:28:28] [Rank 0] Group 11 FTA: 0.0801 +[2025-07-09 03:28:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 03:28:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 03:28:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 03:28:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 03:28:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 03:28:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 03:28:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 03:28:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 03:28:29] [Rank 0] step:5501/10000 train_time:377282ms step_avg:68.58ms +[2025-07-09 03:28:29] [Rank 0] step:5501/10000 train_time:377282ms step_avg:68.58ms +[2025-07-09 03:28:30] [Rank 0] step:5521/10000 train_time:378034ms step_avg:68.47ms +[2025-07-09 03:28:30] [Rank 0] step:5521/10000 train_time:378034ms step_avg:68.47ms +[2025-07-09 03:28:32] [Rank 0] step:5541/10000 train_time:379401ms step_avg:68.47ms +[2025-07-09 03:28:32] [Rank 0] step:5541/10000 train_time:379401ms step_avg:68.47ms +[2025-07-09 03:28:33] [Rank 0] step:5561/10000 train_time:380769ms step_avg:68.47ms +[2025-07-09 03:28:33] [Rank 0] step:5561/10000 train_time:380769ms step_avg:68.47ms +[2025-07-09 03:28:34] [Rank 0] step:5581/10000 train_time:382393ms step_avg:68.52ms +[2025-07-09 03:28:34] [Rank 0] step:5581/10000 train_time:382393ms step_avg:68.52ms +[2025-07-09 03:28:36] [Rank 0] step:5601/10000 train_time:383559ms step_avg:68.48ms +[2025-07-09 03:28:36] [Rank 0] step:5601/10000 train_time:383559ms step_avg:68.48ms +[2025-07-09 03:28:37] [Rank 0] step:5621/10000 train_time:384929ms step_avg:68.48ms +[2025-07-09 03:28:37] [Rank 0] step:5621/10000 train_time:384929ms step_avg:68.48ms +[2025-07-09 03:28:39] [Rank 0] step:5641/10000 train_time:386300ms step_avg:68.48ms +[2025-07-09 03:28:39] [Rank 0] step:5641/10000 train_time:386300ms step_avg:68.48ms +[2025-07-09 03:28:40] [Rank 0] step:5661/10000 train_time:387672ms step_avg:68.48ms +[2025-07-09 03:28:40] [Rank 0] step:5661/10000 train_time:387672ms step_avg:68.48ms +[2025-07-09 03:28:41] [Rank 0] step:5681/10000 train_time:389041ms step_avg:68.48ms +[2025-07-09 03:28:41] [Rank 0] step:5681/10000 train_time:389041ms step_avg:68.48ms +[2025-07-09 03:28:43] [Rank 0] step:5701/10000 train_time:390413ms step_avg:68.48ms +[2025-07-09 03:28:43] [Rank 0] step:5701/10000 train_time:390413ms step_avg:68.48ms +[2025-07-09 03:28:44] [Rank 0] step:5721/10000 train_time:391786ms step_avg:68.48ms +[2025-07-09 03:28:44] [Rank 0] step:5721/10000 train_time:391786ms step_avg:68.48ms +[2025-07-09 03:28:45] [Rank 0] step:5741/10000 train_time:393160ms step_avg:68.48ms +[2025-07-09 03:28:45] [Rank 0] step:5741/10000 train_time:393160ms step_avg:68.48ms +[2025-07-09 03:28:47] [Rank 0] step:5761/10000 train_time:395205ms step_avg:68.60ms +[2025-07-09 03:28:47] [Rank 0] step:5761/10000 train_time:395205ms step_avg:68.60ms +[2025-07-09 03:28:48] [Rank 0] step:5781/10000 train_time:395945ms step_avg:68.49ms +[2025-07-09 03:28:48] [Rank 0] step:5781/10000 train_time:395945ms step_avg:68.49ms +[2025-07-09 03:28:50] [Rank 0] step:5801/10000 train_time:397321ms step_avg:68.49ms +[2025-07-09 03:28:50] [Rank 0] step:5801/10000 train_time:397321ms step_avg:68.49ms +[2025-07-09 03:28:51] [Rank 0] step:5821/10000 train_time:398695ms step_avg:68.49ms +[2025-07-09 03:28:51] [Rank 0] step:5821/10000 train_time:398695ms step_avg:68.49ms +[2025-07-09 03:28:52] [Rank 0] step:5841/10000 train_time:400070ms step_avg:68.49ms +[2025-07-09 03:28:52] [Rank 0] step:5841/10000 train_time:400070ms step_avg:68.49ms +[2025-07-09 03:28:54] [Rank 0] step:5861/10000 train_time:401446ms step_avg:68.49ms +[2025-07-09 03:28:54] [Rank 0] step:5861/10000 train_time:401446ms step_avg:68.49ms +[2025-07-09 03:28:55] [Rank 0] step:5881/10000 train_time:402819ms step_avg:68.50ms +[2025-07-09 03:28:55] [Rank 0] step:5881/10000 train_time:402819ms step_avg:68.50ms +[2025-07-09 03:28:56] [Rank 0] step:5901/10000 train_time:404193ms step_avg:68.50ms +[2025-07-09 03:28:56] [Rank 0] step:5901/10000 train_time:404193ms step_avg:68.50ms +[2025-07-09 03:28:58] [Rank 0] step:5921/10000 train_time:405566ms step_avg:68.50ms +[2025-07-09 03:28:58] [Rank 0] step:5921/10000 train_time:405566ms step_avg:68.50ms +[2025-07-09 03:28:59] [Rank 0] step:5941/10000 train_time:406939ms step_avg:68.50ms +[2025-07-09 03:28:59] [Rank 0] step:5941/10000 train_time:406939ms step_avg:68.50ms +[2025-07-09 03:29:01] [Rank 0] step:5961/10000 train_time:408361ms step_avg:68.51ms +[2025-07-09 03:29:01] [Rank 0] step:5961/10000 train_time:408361ms step_avg:68.51ms +[2025-07-09 03:29:02] [Rank 0] step:5981/10000 train_time:409734ms step_avg:68.51ms +[2025-07-09 03:29:02] [Rank 0] step:5981/10000 train_time:409734ms step_avg:68.51ms +[2025-07-09 03:29:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 03:29:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 03:29:04] [Rank 0] PRINT: step:6000/10000 train_loss:2.6336 val_loss:2.5563 train_time:411733ms step_avg:68.62ms +[2025-07-09 03:29:04] [Rank 0] PRINT: step:6000/10000 train_loss:2.6336 val_loss:2.5563 train_time:411733ms step_avg:68.62ms +[2025-07-09 03:29:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 03:29:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 03:29:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 03:29:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 03:29:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 03:29:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 03:34:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 03:34:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 03:34:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 03:34:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 03:34:28] [Rank 0] Total Loss: 4.7840 +[2025-07-09 03:34:28] [Rank 0] Total Loss: 4.7840 +[2025-07-09 03:34:28] [Rank 0] Total FTA: 0.0847 +[2025-07-09 03:34:28] [Rank 0] Total FTA: 0.0847 +[2025-07-09 03:34:28] [Rank 0] Group 0 Loss: 4.8989 +[2025-07-09 03:34:28] [Rank 0] Group 0 Loss: 4.8989 +[2025-07-09 03:34:28] [Rank 0] Group 1 Loss: 4.6810 +[2025-07-09 03:34:28] [Rank 0] Group 1 Loss: 4.6810 +[2025-07-09 03:34:28] [Rank 0] Group 2 Loss: 4.8459 +[2025-07-09 03:34:28] [Rank 0] Group 2 Loss: 4.8459 +[2025-07-09 03:34:28] [Rank 0] Group 3 Loss: 4.6458 +[2025-07-09 03:34:28] [Rank 0] Group 3 Loss: 4.6458 +[2025-07-09 03:34:28] [Rank 0] Group 4 Loss: 4.7771 +[2025-07-09 03:34:28] [Rank 0] Group 4 Loss: 4.7771 +[2025-07-09 03:34:28] [Rank 0] Group 5 Loss: 4.7369 +[2025-07-09 03:34:28] [Rank 0] Group 5 Loss: 4.7369 +[2025-07-09 03:34:28] [Rank 0] Group 6 Loss: 4.7350 +[2025-07-09 03:34:28] [Rank 0] Group 6 Loss: 4.7350 +[2025-07-09 03:34:28] [Rank 0] Group 7 Loss: 4.7935 +[2025-07-09 03:34:28] [Rank 0] Group 7 Loss: 4.7935 +[2025-07-09 03:34:28] [Rank 0] Group 8 Loss: 4.7876 +[2025-07-09 03:34:28] [Rank 0] Group 8 Loss: 4.7876 +[2025-07-09 03:34:28] [Rank 0] Group 9 Loss: 4.7583 +[2025-07-09 03:34:28] [Rank 0] Group 9 Loss: 4.7583 +[2025-07-09 03:34:28] [Rank 0] Group 10 Loss: 4.8178 +[2025-07-09 03:34:28] [Rank 0] Group 10 Loss: 4.8178 +[2025-07-09 03:34:28] [Rank 0] Group 11 Loss: 4.7884 +[2025-07-09 03:34:28] [Rank 0] Group 11 Loss: 4.7884 +[2025-07-09 03:34:28] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-09 03:34:28] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-09 03:34:28] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 03:34:28] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 03:34:28] [Rank 0] Group 2 FTA: 0.1068 +[2025-07-09 03:34:28] [Rank 0] Group 2 FTA: 0.1068 +[2025-07-09 03:34:28] [Rank 0] Group 3 FTA: 0.0286 +[2025-07-09 03:34:28] [Rank 0] Group 3 FTA: 0.0286 +[2025-07-09 03:34:28] [Rank 0] Group 4 FTA: 0.0104 +[2025-07-09 03:34:28] [Rank 0] Group 4 FTA: 0.0104 +[2025-07-09 03:34:28] [Rank 0] Group 5 FTA: 0.0339 +[2025-07-09 03:34:28] [Rank 0] Group 5 FTA: 0.0339 +[2025-07-09 03:34:28] [Rank 0] Group 6 FTA: 0.1120 +[2025-07-09 03:34:28] [Rank 0] Group 6 FTA: 0.1120 +[2025-07-09 03:34:28] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-09 03:34:28] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-09 03:34:28] [Rank 0] Group 8 FTA: 0.0833 +[2025-07-09 03:34:28] [Rank 0] Group 8 FTA: 0.0833 +[2025-07-09 03:34:28] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-09 03:34:28] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-09 03:34:28] [Rank 0] Group 10 FTA: 0.1152 +[2025-07-09 03:34:28] [Rank 0] Group 10 FTA: 0.1152 +[2025-07-09 03:34:28] [Rank 0] Group 11 FTA: 0.0879 +[2025-07-09 03:34:28] [Rank 0] Group 11 FTA: 0.0879 +[2025-07-09 03:34:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 03:34:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 03:34:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 03:34:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 03:34:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 03:34:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 03:34:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 03:34:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 03:34:29] [Rank 0] step:6001/10000 train_time:411746ms step_avg:68.61ms +[2025-07-09 03:34:29] [Rank 0] step:6001/10000 train_time:411746ms step_avg:68.61ms +[2025-07-09 03:34:31] [Rank 0] step:6021/10000 train_time:412520ms step_avg:68.51ms +[2025-07-09 03:34:31] [Rank 0] step:6021/10000 train_time:412520ms step_avg:68.51ms +[2025-07-09 03:34:32] [Rank 0] step:6041/10000 train_time:413884ms step_avg:68.51ms +[2025-07-09 03:34:32] [Rank 0] step:6041/10000 train_time:413884ms step_avg:68.51ms +[2025-07-09 03:34:34] [Rank 0] step:6061/10000 train_time:415249ms step_avg:68.51ms +[2025-07-09 03:34:34] [Rank 0] step:6061/10000 train_time:415249ms step_avg:68.51ms +[2025-07-09 03:34:35] [Rank 0] step:6081/10000 train_time:416616ms step_avg:68.51ms +[2025-07-09 03:34:35] [Rank 0] step:6081/10000 train_time:416616ms step_avg:68.51ms +[2025-07-09 03:34:36] [Rank 0] step:6101/10000 train_time:417983ms step_avg:68.51ms +[2025-07-09 03:34:36] [Rank 0] step:6101/10000 train_time:417983ms step_avg:68.51ms +[2025-07-09 03:34:38] [Rank 0] step:6121/10000 train_time:419350ms step_avg:68.51ms +[2025-07-09 03:34:38] [Rank 0] step:6121/10000 train_time:419350ms step_avg:68.51ms +[2025-07-09 03:34:39] [Rank 0] step:6141/10000 train_time:420756ms step_avg:68.52ms +[2025-07-09 03:34:39] [Rank 0] step:6141/10000 train_time:420756ms step_avg:68.52ms +[2025-07-09 03:34:40] [Rank 0] step:6161/10000 train_time:422125ms step_avg:68.52ms +[2025-07-09 03:34:40] [Rank 0] step:6161/10000 train_time:422125ms step_avg:68.52ms +[2025-07-09 03:34:42] [Rank 0] step:6181/10000 train_time:423498ms step_avg:68.52ms +[2025-07-09 03:34:42] [Rank 0] step:6181/10000 train_time:423498ms step_avg:68.52ms +[2025-07-09 03:34:43] [Rank 0] step:6201/10000 train_time:424868ms step_avg:68.52ms +[2025-07-09 03:34:43] [Rank 0] step:6201/10000 train_time:424868ms step_avg:68.52ms +[2025-07-09 03:34:45] [Rank 0] step:6221/10000 train_time:426240ms step_avg:68.52ms +[2025-07-09 03:34:45] [Rank 0] step:6221/10000 train_time:426240ms step_avg:68.52ms +[2025-07-09 03:34:46] [Rank 0] step:6241/10000 train_time:427612ms step_avg:68.52ms +[2025-07-09 03:34:46] [Rank 0] step:6241/10000 train_time:427612ms step_avg:68.52ms +[2025-07-09 03:34:47] [Rank 0] step:6261/10000 train_time:428984ms step_avg:68.52ms +[2025-07-09 03:34:47] [Rank 0] step:6261/10000 train_time:428984ms step_avg:68.52ms +[2025-07-09 03:34:49] [Rank 0] step:6281/10000 train_time:430359ms step_avg:68.52ms +[2025-07-09 03:34:49] [Rank 0] step:6281/10000 train_time:430359ms step_avg:68.52ms +[2025-07-09 03:34:50] [Rank 0] step:6301/10000 train_time:431730ms step_avg:68.52ms +[2025-07-09 03:34:50] [Rank 0] step:6301/10000 train_time:431730ms step_avg:68.52ms +[2025-07-09 03:34:51] [Rank 0] step:6321/10000 train_time:433141ms step_avg:68.52ms +[2025-07-09 03:34:51] [Rank 0] step:6321/10000 train_time:433141ms step_avg:68.52ms +[2025-07-09 03:34:53] [Rank 0] step:6341/10000 train_time:434515ms step_avg:68.52ms +[2025-07-09 03:34:53] [Rank 0] step:6341/10000 train_time:434515ms step_avg:68.52ms +[2025-07-09 03:34:54] [Rank 0] step:6361/10000 train_time:435889ms step_avg:68.53ms +[2025-07-09 03:34:54] [Rank 0] step:6361/10000 train_time:435889ms step_avg:68.53ms +[2025-07-09 03:34:56] [Rank 0] step:6381/10000 train_time:437264ms step_avg:68.53ms +[2025-07-09 03:34:56] [Rank 0] step:6381/10000 train_time:437264ms step_avg:68.53ms +[2025-07-09 03:34:57] [Rank 0] step:6401/10000 train_time:438639ms step_avg:68.53ms +[2025-07-09 03:34:57] [Rank 0] step:6401/10000 train_time:438639ms step_avg:68.53ms +[2025-07-09 03:34:58] [Rank 0] step:6421/10000 train_time:440016ms step_avg:68.53ms +[2025-07-09 03:34:58] [Rank 0] step:6421/10000 train_time:440016ms step_avg:68.53ms +[2025-07-09 03:35:00] [Rank 0] step:6441/10000 train_time:441393ms step_avg:68.53ms +[2025-07-09 03:35:00] [Rank 0] step:6441/10000 train_time:441393ms step_avg:68.53ms +[2025-07-09 03:35:01] [Rank 0] step:6461/10000 train_time:442769ms step_avg:68.53ms +[2025-07-09 03:35:01] [Rank 0] step:6461/10000 train_time:442769ms step_avg:68.53ms +[2025-07-09 03:35:02] [Rank 0] step:6481/10000 train_time:444146ms step_avg:68.53ms +[2025-07-09 03:35:02] [Rank 0] step:6481/10000 train_time:444146ms step_avg:68.53ms +[2025-07-09 03:35:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 03:35:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 03:35:05] [Rank 0] PRINT: step:6500/10000 train_loss:2.4849 val_loss:2.4232 train_time:446183ms step_avg:68.64ms +[2025-07-09 03:35:05] [Rank 0] PRINT: step:6500/10000 train_loss:2.4849 val_loss:2.4232 train_time:446183ms step_avg:68.64ms +[2025-07-09 03:35:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 03:35:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 03:35:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 03:35:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 03:35:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 03:35:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 03:40:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 03:40:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 03:40:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 03:40:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 03:40:29] [Rank 0] Total Loss: 4.7524 +[2025-07-09 03:40:29] [Rank 0] Total Loss: 4.7524 +[2025-07-09 03:40:29] [Rank 0] Total FTA: 0.0948 +[2025-07-09 03:40:29] [Rank 0] Total FTA: 0.0948 +[2025-07-09 03:40:29] [Rank 0] Group 0 Loss: 4.9248 +[2025-07-09 03:40:29] [Rank 0] Group 0 Loss: 4.9248 +[2025-07-09 03:40:29] [Rank 0] Group 1 Loss: 4.6141 +[2025-07-09 03:40:29] [Rank 0] Group 1 Loss: 4.6141 +[2025-07-09 03:40:29] [Rank 0] Group 2 Loss: 4.8159 +[2025-07-09 03:40:29] [Rank 0] Group 2 Loss: 4.8159 +[2025-07-09 03:40:29] [Rank 0] Group 3 Loss: 4.6224 +[2025-07-09 03:40:29] [Rank 0] Group 3 Loss: 4.6224 +[2025-07-09 03:40:29] [Rank 0] Group 4 Loss: 4.6688 +[2025-07-09 03:40:29] [Rank 0] Group 4 Loss: 4.6688 +[2025-07-09 03:40:29] [Rank 0] Group 5 Loss: 4.7056 +[2025-07-09 03:40:29] [Rank 0] Group 5 Loss: 4.7056 +[2025-07-09 03:40:29] [Rank 0] Group 6 Loss: 4.7188 +[2025-07-09 03:40:29] [Rank 0] Group 6 Loss: 4.7188 +[2025-07-09 03:40:29] [Rank 0] Group 7 Loss: 4.7583 +[2025-07-09 03:40:29] [Rank 0] Group 7 Loss: 4.7583 +[2025-07-09 03:40:29] [Rank 0] Group 8 Loss: 4.7505 +[2025-07-09 03:40:29] [Rank 0] Group 8 Loss: 4.7505 +[2025-07-09 03:40:29] [Rank 0] Group 9 Loss: 4.7446 +[2025-07-09 03:40:29] [Rank 0] Group 9 Loss: 4.7446 +[2025-07-09 03:40:29] [Rank 0] Group 10 Loss: 4.7731 +[2025-07-09 03:40:29] [Rank 0] Group 10 Loss: 4.7731 +[2025-07-09 03:40:29] [Rank 0] Group 11 Loss: 4.7511 +[2025-07-09 03:40:29] [Rank 0] Group 11 Loss: 4.7511 +[2025-07-09 03:40:29] [Rank 0] Group 0 FTA: 0.1834 +[2025-07-09 03:40:29] [Rank 0] Group 0 FTA: 0.1834 +[2025-07-09 03:40:29] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 03:40:29] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 03:40:29] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-09 03:40:29] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-09 03:40:29] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-09 03:40:29] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-09 03:40:29] [Rank 0] Group 4 FTA: 0.0260 +[2025-07-09 03:40:29] [Rank 0] Group 4 FTA: 0.0260 +[2025-07-09 03:40:29] [Rank 0] Group 5 FTA: 0.0521 +[2025-07-09 03:40:29] [Rank 0] Group 5 FTA: 0.0521 +[2025-07-09 03:40:29] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-09 03:40:29] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-09 03:40:29] [Rank 0] Group 7 FTA: 0.1250 +[2025-07-09 03:40:29] [Rank 0] Group 7 FTA: 0.1250 +[2025-07-09 03:40:29] [Rank 0] Group 8 FTA: 0.1224 +[2025-07-09 03:40:29] [Rank 0] Group 8 FTA: 0.1224 +[2025-07-09 03:40:29] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-09 03:40:29] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-09 03:40:29] [Rank 0] Group 10 FTA: 0.1074 +[2025-07-09 03:40:29] [Rank 0] Group 10 FTA: 0.1074 +[2025-07-09 03:40:29] [Rank 0] Group 11 FTA: 0.0889 +[2025-07-09 03:40:29] [Rank 0] Group 11 FTA: 0.0889 +[2025-07-09 03:40:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 03:40:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 03:40:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 03:40:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 03:40:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 03:40:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 03:40:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 03:40:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 03:40:30] [Rank 0] step:6501/10000 train_time:446195ms step_avg:68.63ms +[2025-07-09 03:40:30] [Rank 0] step:6501/10000 train_time:446195ms step_avg:68.63ms +[2025-07-09 03:40:32] [Rank 0] step:6521/10000 train_time:446967ms step_avg:68.54ms +[2025-07-09 03:40:32] [Rank 0] step:6521/10000 train_time:446967ms step_avg:68.54ms +[2025-07-09 03:40:33] [Rank 0] step:6541/10000 train_time:448331ms step_avg:68.54ms +[2025-07-09 03:40:33] [Rank 0] step:6541/10000 train_time:448331ms step_avg:68.54ms +[2025-07-09 03:40:34] [Rank 0] step:6561/10000 train_time:449696ms step_avg:68.54ms +[2025-07-09 03:40:34] [Rank 0] step:6561/10000 train_time:449696ms step_avg:68.54ms +[2025-07-09 03:40:36] [Rank 0] step:6581/10000 train_time:451062ms step_avg:68.54ms +[2025-07-09 03:40:36] [Rank 0] step:6581/10000 train_time:451062ms step_avg:68.54ms +[2025-07-09 03:40:37] [Rank 0] step:6601/10000 train_time:452429ms step_avg:68.54ms +[2025-07-09 03:40:37] [Rank 0] step:6601/10000 train_time:452429ms step_avg:68.54ms +[2025-07-09 03:40:39] [Rank 0] step:6621/10000 train_time:453797ms step_avg:68.54ms +[2025-07-09 03:40:39] [Rank 0] step:6621/10000 train_time:453797ms step_avg:68.54ms +[2025-07-09 03:40:40] [Rank 0] step:6641/10000 train_time:455166ms step_avg:68.54ms +[2025-07-09 03:40:40] [Rank 0] step:6641/10000 train_time:455166ms step_avg:68.54ms +[2025-07-09 03:40:41] [Rank 0] step:6661/10000 train_time:456537ms step_avg:68.54ms +[2025-07-09 03:40:41] [Rank 0] step:6661/10000 train_time:456537ms step_avg:68.54ms +[2025-07-09 03:40:43] [Rank 0] step:6681/10000 train_time:457959ms step_avg:68.55ms +[2025-07-09 03:40:43] [Rank 0] step:6681/10000 train_time:457959ms step_avg:68.55ms +[2025-07-09 03:40:44] [Rank 0] step:6701/10000 train_time:459330ms step_avg:68.55ms +[2025-07-09 03:40:44] [Rank 0] step:6701/10000 train_time:459330ms step_avg:68.55ms +[2025-07-09 03:40:45] [Rank 0] step:6721/10000 train_time:460700ms step_avg:68.55ms +[2025-07-09 03:40:45] [Rank 0] step:6721/10000 train_time:460700ms step_avg:68.55ms +[2025-07-09 03:40:47] [Rank 0] step:6741/10000 train_time:462072ms step_avg:68.55ms +[2025-07-09 03:40:47] [Rank 0] step:6741/10000 train_time:462072ms step_avg:68.55ms +[2025-07-09 03:40:48] [Rank 0] step:6761/10000 train_time:463446ms step_avg:68.55ms +[2025-07-09 03:40:48] [Rank 0] step:6761/10000 train_time:463446ms step_avg:68.55ms +[2025-07-09 03:40:50] [Rank 0] step:6781/10000 train_time:464819ms step_avg:68.55ms +[2025-07-09 03:40:50] [Rank 0] step:6781/10000 train_time:464819ms step_avg:68.55ms +[2025-07-09 03:40:51] [Rank 0] step:6801/10000 train_time:466192ms step_avg:68.55ms +[2025-07-09 03:40:51] [Rank 0] step:6801/10000 train_time:466192ms step_avg:68.55ms +[2025-07-09 03:40:52] [Rank 0] step:6821/10000 train_time:467565ms step_avg:68.55ms +[2025-07-09 03:40:52] [Rank 0] step:6821/10000 train_time:467565ms step_avg:68.55ms +[2025-07-09 03:40:54] [Rank 0] step:6841/10000 train_time:468986ms step_avg:68.56ms +[2025-07-09 03:40:54] [Rank 0] step:6841/10000 train_time:468986ms step_avg:68.56ms +[2025-07-09 03:40:55] [Rank 0] step:6861/10000 train_time:470312ms step_avg:68.55ms +[2025-07-09 03:40:55] [Rank 0] step:6861/10000 train_time:470312ms step_avg:68.55ms +[2025-07-09 03:40:56] [Rank 0] step:6881/10000 train_time:471714ms step_avg:68.55ms +[2025-07-09 03:40:56] [Rank 0] step:6881/10000 train_time:471714ms step_avg:68.55ms +[2025-07-09 03:40:58] [Rank 0] step:6901/10000 train_time:473088ms step_avg:68.55ms +[2025-07-09 03:40:58] [Rank 0] step:6901/10000 train_time:473088ms step_avg:68.55ms +[2025-07-09 03:40:59] [Rank 0] step:6921/10000 train_time:474463ms step_avg:68.55ms +[2025-07-09 03:40:59] [Rank 0] step:6921/10000 train_time:474463ms step_avg:68.55ms +[2025-07-09 03:41:01] [Rank 0] step:6941/10000 train_time:475839ms step_avg:68.55ms +[2025-07-09 03:41:01] [Rank 0] step:6941/10000 train_time:475839ms step_avg:68.55ms +[2025-07-09 03:41:02] [Rank 0] step:6961/10000 train_time:477215ms step_avg:68.56ms +[2025-07-09 03:41:02] [Rank 0] step:6961/10000 train_time:477215ms step_avg:68.56ms +[2025-07-09 03:41:03] [Rank 0] step:6981/10000 train_time:478591ms step_avg:68.56ms +[2025-07-09 03:41:03] [Rank 0] step:6981/10000 train_time:478591ms step_avg:68.56ms +[2025-07-09 03:41:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 03:41:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 03:41:06] [Rank 0] PRINT: step:7000/10000 train_loss:2.3653 val_loss:2.3154 train_time:480595ms step_avg:68.66ms +[2025-07-09 03:41:06] [Rank 0] PRINT: step:7000/10000 train_loss:2.3653 val_loss:2.3154 train_time:480595ms step_avg:68.66ms +[2025-07-09 03:41:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 03:41:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 03:41:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 03:41:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 03:41:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 03:41:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 03:46:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 03:46:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 03:46:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 03:46:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 03:46:32] [Rank 0] Total Loss: 4.7413 +[2025-07-09 03:46:32] [Rank 0] Total Loss: 4.7413 +[2025-07-09 03:46:32] [Rank 0] Total FTA: 0.0916 +[2025-07-09 03:46:32] [Rank 0] Total FTA: 0.0916 +[2025-07-09 03:46:32] [Rank 0] Group 0 Loss: 4.9346 +[2025-07-09 03:46:32] [Rank 0] Group 0 Loss: 4.9346 +[2025-07-09 03:46:32] [Rank 0] Group 1 Loss: 4.6075 +[2025-07-09 03:46:32] [Rank 0] Group 1 Loss: 4.6075 +[2025-07-09 03:46:32] [Rank 0] Group 2 Loss: 4.7615 +[2025-07-09 03:46:32] [Rank 0] Group 2 Loss: 4.7615 +[2025-07-09 03:46:32] [Rank 0] Group 3 Loss: 4.6336 +[2025-07-09 03:46:32] [Rank 0] Group 3 Loss: 4.6336 +[2025-07-09 03:46:32] [Rank 0] Group 4 Loss: 4.6677 +[2025-07-09 03:46:32] [Rank 0] Group 4 Loss: 4.6677 +[2025-07-09 03:46:32] [Rank 0] Group 5 Loss: 4.6828 +[2025-07-09 03:46:32] [Rank 0] Group 5 Loss: 4.6828 +[2025-07-09 03:46:32] [Rank 0] Group 6 Loss: 4.6848 +[2025-07-09 03:46:32] [Rank 0] Group 6 Loss: 4.6848 +[2025-07-09 03:46:32] [Rank 0] Group 7 Loss: 4.7644 +[2025-07-09 03:46:32] [Rank 0] Group 7 Loss: 4.7644 +[2025-07-09 03:46:32] [Rank 0] Group 8 Loss: 4.7073 +[2025-07-09 03:46:32] [Rank 0] Group 8 Loss: 4.7073 +[2025-07-09 03:46:32] [Rank 0] Group 9 Loss: 4.7235 +[2025-07-09 03:46:32] [Rank 0] Group 9 Loss: 4.7235 +[2025-07-09 03:46:32] [Rank 0] Group 10 Loss: 4.7454 +[2025-07-09 03:46:32] [Rank 0] Group 10 Loss: 4.7454 +[2025-07-09 03:46:32] [Rank 0] Group 11 Loss: 4.7564 +[2025-07-09 03:46:32] [Rank 0] Group 11 Loss: 4.7564 +[2025-07-09 03:46:32] [Rank 0] Group 0 FTA: 0.1834 +[2025-07-09 03:46:32] [Rank 0] Group 0 FTA: 0.1834 +[2025-07-09 03:46:32] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 03:46:32] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 03:46:32] [Rank 0] Group 2 FTA: 0.0885 +[2025-07-09 03:46:32] [Rank 0] Group 2 FTA: 0.0885 +[2025-07-09 03:46:32] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-09 03:46:32] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-09 03:46:32] [Rank 0] Group 4 FTA: 0.0312 +[2025-07-09 03:46:32] [Rank 0] Group 4 FTA: 0.0312 +[2025-07-09 03:46:32] [Rank 0] Group 5 FTA: 0.0990 +[2025-07-09 03:46:32] [Rank 0] Group 5 FTA: 0.0990 +[2025-07-09 03:46:32] [Rank 0] Group 6 FTA: 0.1146 +[2025-07-09 03:46:32] [Rank 0] Group 6 FTA: 0.1146 +[2025-07-09 03:46:32] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-09 03:46:32] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-09 03:46:32] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-09 03:46:32] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-09 03:46:32] [Rank 0] Group 9 FTA: 0.1211 +[2025-07-09 03:46:32] [Rank 0] Group 9 FTA: 0.1211 +[2025-07-09 03:46:32] [Rank 0] Group 10 FTA: 0.0723 +[2025-07-09 03:46:32] [Rank 0] Group 10 FTA: 0.0723 +[2025-07-09 03:46:32] [Rank 0] Group 11 FTA: 0.0938 +[2025-07-09 03:46:32] [Rank 0] Group 11 FTA: 0.0938 +[2025-07-09 03:46:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 03:46:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 03:46:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 03:46:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 03:46:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 03:46:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 03:46:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 03:46:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 03:46:34] [Rank 0] step:7001/10000 train_time:480605ms step_avg:68.65ms +[2025-07-09 03:46:34] [Rank 0] step:7001/10000 train_time:480605ms step_avg:68.65ms +[2025-07-09 03:46:35] [Rank 0] step:7021/10000 train_time:482074ms step_avg:68.66ms +[2025-07-09 03:46:35] [Rank 0] step:7021/10000 train_time:482074ms step_avg:68.66ms +[2025-07-09 03:46:37] [Rank 0] step:7041/10000 train_time:482811ms step_avg:68.57ms +[2025-07-09 03:46:37] [Rank 0] step:7041/10000 train_time:482811ms step_avg:68.57ms +[2025-07-09 03:46:38] [Rank 0] step:7061/10000 train_time:484178ms step_avg:68.57ms +[2025-07-09 03:46:38] [Rank 0] step:7061/10000 train_time:484178ms step_avg:68.57ms +[2025-07-09 03:46:39] [Rank 0] step:7081/10000 train_time:485545ms step_avg:68.57ms +[2025-07-09 03:46:39] [Rank 0] step:7081/10000 train_time:485545ms step_avg:68.57ms +[2025-07-09 03:46:41] [Rank 0] step:7101/10000 train_time:486913ms step_avg:68.57ms +[2025-07-09 03:46:41] [Rank 0] step:7101/10000 train_time:486913ms step_avg:68.57ms +[2025-07-09 03:46:42] [Rank 0] step:7121/10000 train_time:488283ms step_avg:68.57ms +[2025-07-09 03:46:42] [Rank 0] step:7121/10000 train_time:488283ms step_avg:68.57ms +[2025-07-09 03:46:43] [Rank 0] step:7141/10000 train_time:489654ms step_avg:68.57ms +[2025-07-09 03:46:43] [Rank 0] step:7141/10000 train_time:489654ms step_avg:68.57ms +[2025-07-09 03:46:45] [Rank 0] step:7161/10000 train_time:491025ms step_avg:68.57ms +[2025-07-09 03:46:45] [Rank 0] step:7161/10000 train_time:491025ms step_avg:68.57ms +[2025-07-09 03:46:46] [Rank 0] step:7181/10000 train_time:492399ms step_avg:68.57ms +[2025-07-09 03:46:46] [Rank 0] step:7181/10000 train_time:492399ms step_avg:68.57ms +[2025-07-09 03:46:48] [Rank 0] step:7201/10000 train_time:493772ms step_avg:68.57ms +[2025-07-09 03:46:48] [Rank 0] step:7201/10000 train_time:493772ms step_avg:68.57ms +[2025-07-09 03:46:49] [Rank 0] step:7221/10000 train_time:495166ms step_avg:68.57ms +[2025-07-09 03:46:49] [Rank 0] step:7221/10000 train_time:495166ms step_avg:68.57ms +[2025-07-09 03:46:50] [Rank 0] step:7241/10000 train_time:496538ms step_avg:68.57ms +[2025-07-09 03:46:50] [Rank 0] step:7241/10000 train_time:496538ms step_avg:68.57ms +[2025-07-09 03:46:52] [Rank 0] step:7261/10000 train_time:497909ms step_avg:68.57ms +[2025-07-09 03:46:52] [Rank 0] step:7261/10000 train_time:497909ms step_avg:68.57ms +[2025-07-09 03:46:53] [Rank 0] step:7281/10000 train_time:499282ms step_avg:68.57ms +[2025-07-09 03:46:53] [Rank 0] step:7281/10000 train_time:499282ms step_avg:68.57ms +[2025-07-09 03:46:54] [Rank 0] step:7301/10000 train_time:500656ms step_avg:68.57ms +[2025-07-09 03:46:54] [Rank 0] step:7301/10000 train_time:500656ms step_avg:68.57ms +[2025-07-09 03:46:56] [Rank 0] step:7321/10000 train_time:502030ms step_avg:68.57ms +[2025-07-09 03:46:56] [Rank 0] step:7321/10000 train_time:502030ms step_avg:68.57ms +[2025-07-09 03:46:57] [Rank 0] step:7341/10000 train_time:503405ms step_avg:68.57ms +[2025-07-09 03:46:57] [Rank 0] step:7341/10000 train_time:503405ms step_avg:68.57ms +[2025-07-09 03:46:59] [Rank 0] step:7361/10000 train_time:504780ms step_avg:68.57ms +[2025-07-09 03:46:59] [Rank 0] step:7361/10000 train_time:504780ms step_avg:68.57ms +[2025-07-09 03:47:00] [Rank 0] step:7381/10000 train_time:506154ms step_avg:68.58ms +[2025-07-09 03:47:00] [Rank 0] step:7381/10000 train_time:506154ms step_avg:68.58ms +[2025-07-09 03:47:01] [Rank 0] step:7401/10000 train_time:507549ms step_avg:68.58ms +[2025-07-09 03:47:01] [Rank 0] step:7401/10000 train_time:507549ms step_avg:68.58ms +[2025-07-09 03:47:03] [Rank 0] step:7421/10000 train_time:508925ms step_avg:68.58ms +[2025-07-09 03:47:03] [Rank 0] step:7421/10000 train_time:508925ms step_avg:68.58ms +[2025-07-09 03:47:04] [Rank 0] step:7441/10000 train_time:510300ms step_avg:68.58ms +[2025-07-09 03:47:04] [Rank 0] step:7441/10000 train_time:510300ms step_avg:68.58ms +[2025-07-09 03:47:05] [Rank 0] step:7461/10000 train_time:511676ms step_avg:68.58ms +[2025-07-09 03:47:05] [Rank 0] step:7461/10000 train_time:511676ms step_avg:68.58ms +[2025-07-09 03:47:07] [Rank 0] step:7481/10000 train_time:513052ms step_avg:68.58ms +[2025-07-09 03:47:07] [Rank 0] step:7481/10000 train_time:513052ms step_avg:68.58ms +[2025-07-09 03:47:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 03:47:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 03:47:09] [Rank 0] PRINT: step:7500/10000 train_loss:2.2673 val_loss:2.2262 train_time:515053ms step_avg:68.67ms +[2025-07-09 03:47:09] [Rank 0] PRINT: step:7500/10000 train_loss:2.2673 val_loss:2.2262 train_time:515053ms step_avg:68.67ms +[2025-07-09 03:47:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 03:47:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 03:47:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 03:47:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 03:47:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 03:47:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 03:52:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 03:52:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 03:52:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 03:52:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 03:52:34] [Rank 0] Total Loss: 4.6820 +[2025-07-09 03:52:34] [Rank 0] Total Loss: 4.6820 +[2025-07-09 03:52:34] [Rank 0] Total FTA: 0.0897 +[2025-07-09 03:52:34] [Rank 0] Total FTA: 0.0897 +[2025-07-09 03:52:34] [Rank 0] Group 0 Loss: 4.9402 +[2025-07-09 03:52:34] [Rank 0] Group 0 Loss: 4.9402 +[2025-07-09 03:52:34] [Rank 0] Group 1 Loss: 4.4923 +[2025-07-09 03:52:34] [Rank 0] Group 1 Loss: 4.4923 +[2025-07-09 03:52:34] [Rank 0] Group 2 Loss: 4.7182 +[2025-07-09 03:52:34] [Rank 0] Group 2 Loss: 4.7182 +[2025-07-09 03:52:34] [Rank 0] Group 3 Loss: 4.5258 +[2025-07-09 03:52:34] [Rank 0] Group 3 Loss: 4.5258 +[2025-07-09 03:52:34] [Rank 0] Group 4 Loss: 4.6356 +[2025-07-09 03:52:34] [Rank 0] Group 4 Loss: 4.6356 +[2025-07-09 03:52:34] [Rank 0] Group 5 Loss: 4.6311 +[2025-07-09 03:52:34] [Rank 0] Group 5 Loss: 4.6311 +[2025-07-09 03:52:34] [Rank 0] Group 6 Loss: 4.6281 +[2025-07-09 03:52:34] [Rank 0] Group 6 Loss: 4.6281 +[2025-07-09 03:52:34] [Rank 0] Group 7 Loss: 4.6842 +[2025-07-09 03:52:34] [Rank 0] Group 7 Loss: 4.6842 +[2025-07-09 03:52:34] [Rank 0] Group 8 Loss: 4.6729 +[2025-07-09 03:52:34] [Rank 0] Group 8 Loss: 4.6729 +[2025-07-09 03:52:34] [Rank 0] Group 9 Loss: 4.6444 +[2025-07-09 03:52:34] [Rank 0] Group 9 Loss: 4.6444 +[2025-07-09 03:52:34] [Rank 0] Group 10 Loss: 4.6868 +[2025-07-09 03:52:34] [Rank 0] Group 10 Loss: 4.6868 +[2025-07-09 03:52:34] [Rank 0] Group 11 Loss: 4.6707 +[2025-07-09 03:52:34] [Rank 0] Group 11 Loss: 4.6707 +[2025-07-09 03:52:34] [Rank 0] Group 0 FTA: 0.1964 +[2025-07-09 03:52:34] [Rank 0] Group 0 FTA: 0.1964 +[2025-07-09 03:52:34] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 03:52:34] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 03:52:34] [Rank 0] Group 2 FTA: 0.0781 +[2025-07-09 03:52:34] [Rank 0] Group 2 FTA: 0.0781 +[2025-07-09 03:52:34] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-09 03:52:34] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-09 03:52:34] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-09 03:52:34] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-09 03:52:34] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-09 03:52:34] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-09 03:52:34] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-09 03:52:34] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-09 03:52:34] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-09 03:52:34] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-09 03:52:34] [Rank 0] Group 8 FTA: 0.0729 +[2025-07-09 03:52:34] [Rank 0] Group 8 FTA: 0.0729 +[2025-07-09 03:52:34] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-09 03:52:34] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-09 03:52:34] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-09 03:52:34] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-09 03:52:34] [Rank 0] Group 11 FTA: 0.0967 +[2025-07-09 03:52:34] [Rank 0] Group 11 FTA: 0.0967 +[2025-07-09 03:52:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 03:52:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 03:52:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 03:52:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 03:52:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 03:52:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 03:52:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 03:52:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 03:52:36] [Rank 0] step:7501/10000 train_time:515062ms step_avg:68.67ms +[2025-07-09 03:52:36] [Rank 0] step:7501/10000 train_time:515062ms step_avg:68.67ms +[2025-07-09 03:52:37] [Rank 0] step:7521/10000 train_time:515814ms step_avg:68.58ms +[2025-07-09 03:52:37] [Rank 0] step:7521/10000 train_time:515814ms step_avg:68.58ms +[2025-07-09 03:52:38] [Rank 0] step:7541/10000 train_time:517178ms step_avg:68.58ms +[2025-07-09 03:52:38] [Rank 0] step:7541/10000 train_time:517178ms step_avg:68.58ms +[2025-07-09 03:52:40] [Rank 0] step:7561/10000 train_time:519208ms step_avg:68.67ms +[2025-07-09 03:52:40] [Rank 0] step:7561/10000 train_time:519208ms step_avg:68.67ms +[2025-07-09 03:52:41] [Rank 0] step:7581/10000 train_time:519945ms step_avg:68.59ms +[2025-07-09 03:52:41] [Rank 0] step:7581/10000 train_time:519945ms step_avg:68.59ms +[2025-07-09 03:52:43] [Rank 0] step:7601/10000 train_time:521312ms step_avg:68.58ms +[2025-07-09 03:52:43] [Rank 0] step:7601/10000 train_time:521312ms step_avg:68.58ms +[2025-07-09 03:52:44] [Rank 0] step:7621/10000 train_time:522680ms step_avg:68.58ms +[2025-07-09 03:52:44] [Rank 0] step:7621/10000 train_time:522680ms step_avg:68.58ms +[2025-07-09 03:52:45] [Rank 0] step:7641/10000 train_time:524049ms step_avg:68.58ms +[2025-07-09 03:52:45] [Rank 0] step:7641/10000 train_time:524049ms step_avg:68.58ms +[2025-07-09 03:52:47] [Rank 0] step:7661/10000 train_time:525418ms step_avg:68.58ms +[2025-07-09 03:52:47] [Rank 0] step:7661/10000 train_time:525418ms step_avg:68.58ms +[2025-07-09 03:52:48] [Rank 0] step:7681/10000 train_time:526790ms step_avg:68.58ms +[2025-07-09 03:52:48] [Rank 0] step:7681/10000 train_time:526790ms step_avg:68.58ms +[2025-07-09 03:52:49] [Rank 0] step:7701/10000 train_time:528161ms step_avg:68.58ms +[2025-07-09 03:52:49] [Rank 0] step:7701/10000 train_time:528161ms step_avg:68.58ms +[2025-07-09 03:52:51] [Rank 0] step:7721/10000 train_time:529535ms step_avg:68.58ms +[2025-07-09 03:52:51] [Rank 0] step:7721/10000 train_time:529535ms step_avg:68.58ms +[2025-07-09 03:52:52] [Rank 0] step:7741/10000 train_time:531159ms step_avg:68.62ms +[2025-07-09 03:52:52] [Rank 0] step:7741/10000 train_time:531159ms step_avg:68.62ms +[2025-07-09 03:52:54] [Rank 0] step:7761/10000 train_time:532330ms step_avg:68.59ms +[2025-07-09 03:52:54] [Rank 0] step:7761/10000 train_time:532330ms step_avg:68.59ms +[2025-07-09 03:52:55] [Rank 0] step:7781/10000 train_time:533703ms step_avg:68.59ms +[2025-07-09 03:52:55] [Rank 0] step:7781/10000 train_time:533703ms step_avg:68.59ms +[2025-07-09 03:52:56] [Rank 0] step:7801/10000 train_time:535077ms step_avg:68.59ms +[2025-07-09 03:52:56] [Rank 0] step:7801/10000 train_time:535077ms step_avg:68.59ms +[2025-07-09 03:52:58] [Rank 0] step:7821/10000 train_time:536452ms step_avg:68.59ms +[2025-07-09 03:52:58] [Rank 0] step:7821/10000 train_time:536452ms step_avg:68.59ms +[2025-07-09 03:52:59] [Rank 0] step:7841/10000 train_time:537826ms step_avg:68.59ms +[2025-07-09 03:52:59] [Rank 0] step:7841/10000 train_time:537826ms step_avg:68.59ms +[2025-07-09 03:53:00] [Rank 0] step:7861/10000 train_time:539201ms step_avg:68.59ms +[2025-07-09 03:53:00] [Rank 0] step:7861/10000 train_time:539201ms step_avg:68.59ms +[2025-07-09 03:53:02] [Rank 0] step:7881/10000 train_time:540577ms step_avg:68.59ms +[2025-07-09 03:53:02] [Rank 0] step:7881/10000 train_time:540577ms step_avg:68.59ms +[2025-07-09 03:53:03] [Rank 0] step:7901/10000 train_time:541953ms step_avg:68.59ms +[2025-07-09 03:53:03] [Rank 0] step:7901/10000 train_time:541953ms step_avg:68.59ms +[2025-07-09 03:53:05] [Rank 0] step:7921/10000 train_time:543329ms step_avg:68.59ms +[2025-07-09 03:53:05] [Rank 0] step:7921/10000 train_time:543329ms step_avg:68.59ms +[2025-07-09 03:53:06] [Rank 0] step:7941/10000 train_time:544750ms step_avg:68.60ms +[2025-07-09 03:53:06] [Rank 0] step:7941/10000 train_time:544750ms step_avg:68.60ms +[2025-07-09 03:53:07] [Rank 0] step:7961/10000 train_time:546126ms step_avg:68.60ms +[2025-07-09 03:53:07] [Rank 0] step:7961/10000 train_time:546126ms step_avg:68.60ms +[2025-07-09 03:53:09] [Rank 0] step:7981/10000 train_time:547504ms step_avg:68.60ms +[2025-07-09 03:53:09] [Rank 0] step:7981/10000 train_time:547504ms step_avg:68.60ms +[2025-07-09 03:53:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 03:53:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 03:53:11] [Rank 0] PRINT: step:8000/10000 train_loss:2.1852 val_loss:2.1509 train_time:549508ms step_avg:68.69ms +[2025-07-09 03:53:11] [Rank 0] PRINT: step:8000/10000 train_loss:2.1852 val_loss:2.1509 train_time:549508ms step_avg:68.69ms +[2025-07-09 03:53:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 03:53:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 03:53:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 03:53:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 03:53:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 03:53:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 03:58:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 03:58:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 03:58:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 03:58:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 03:58:36] [Rank 0] Total Loss: 4.6579 +[2025-07-09 03:58:36] [Rank 0] Total Loss: 4.6579 +[2025-07-09 03:58:36] [Rank 0] Total FTA: 0.0882 +[2025-07-09 03:58:36] [Rank 0] Total FTA: 0.0882 +[2025-07-09 03:58:36] [Rank 0] Group 0 Loss: 4.8672 +[2025-07-09 03:58:36] [Rank 0] Group 0 Loss: 4.8672 +[2025-07-09 03:58:36] [Rank 0] Group 1 Loss: 4.4633 +[2025-07-09 03:58:36] [Rank 0] Group 1 Loss: 4.4633 +[2025-07-09 03:58:36] [Rank 0] Group 2 Loss: 4.6773 +[2025-07-09 03:58:36] [Rank 0] Group 2 Loss: 4.6773 +[2025-07-09 03:58:36] [Rank 0] Group 3 Loss: 4.5540 +[2025-07-09 03:58:36] [Rank 0] Group 3 Loss: 4.5540 +[2025-07-09 03:58:36] [Rank 0] Group 4 Loss: 4.5979 +[2025-07-09 03:58:36] [Rank 0] Group 4 Loss: 4.5979 +[2025-07-09 03:58:36] [Rank 0] Group 5 Loss: 4.5838 +[2025-07-09 03:58:36] [Rank 0] Group 5 Loss: 4.5838 +[2025-07-09 03:58:36] [Rank 0] Group 6 Loss: 4.5840 +[2025-07-09 03:58:36] [Rank 0] Group 6 Loss: 4.5840 +[2025-07-09 03:58:36] [Rank 0] Group 7 Loss: 4.6750 +[2025-07-09 03:58:36] [Rank 0] Group 7 Loss: 4.6750 +[2025-07-09 03:58:36] [Rank 0] Group 8 Loss: 4.6712 +[2025-07-09 03:58:36] [Rank 0] Group 8 Loss: 4.6712 +[2025-07-09 03:58:36] [Rank 0] Group 9 Loss: 4.6763 +[2025-07-09 03:58:36] [Rank 0] Group 9 Loss: 4.6763 +[2025-07-09 03:58:36] [Rank 0] Group 10 Loss: 4.6661 +[2025-07-09 03:58:36] [Rank 0] Group 10 Loss: 4.6661 +[2025-07-09 03:58:36] [Rank 0] Group 11 Loss: 4.6630 +[2025-07-09 03:58:36] [Rank 0] Group 11 Loss: 4.6630 +[2025-07-09 03:58:36] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-09 03:58:36] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-09 03:58:36] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 03:58:36] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 03:58:36] [Rank 0] Group 2 FTA: 0.0703 +[2025-07-09 03:58:36] [Rank 0] Group 2 FTA: 0.0703 +[2025-07-09 03:58:36] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-09 03:58:36] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-09 03:58:36] [Rank 0] Group 4 FTA: 0.0130 +[2025-07-09 03:58:36] [Rank 0] Group 4 FTA: 0.0130 +[2025-07-09 03:58:36] [Rank 0] Group 5 FTA: 0.0938 +[2025-07-09 03:58:36] [Rank 0] Group 5 FTA: 0.0938 +[2025-07-09 03:58:36] [Rank 0] Group 6 FTA: 0.1068 +[2025-07-09 03:58:36] [Rank 0] Group 6 FTA: 0.1068 +[2025-07-09 03:58:36] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-09 03:58:36] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-09 03:58:36] [Rank 0] Group 8 FTA: 0.1198 +[2025-07-09 03:58:36] [Rank 0] Group 8 FTA: 0.1198 +[2025-07-09 03:58:36] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-09 03:58:36] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-09 03:58:36] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-09 03:58:36] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-09 03:58:36] [Rank 0] Group 11 FTA: 0.0928 +[2025-07-09 03:58:36] [Rank 0] Group 11 FTA: 0.0928 +[2025-07-09 03:58:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 03:58:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 03:58:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 03:58:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 03:58:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 03:58:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 03:58:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 03:58:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 03:58:38] [Rank 0] step:8001/10000 train_time:549518ms step_avg:68.68ms +[2025-07-09 03:58:38] [Rank 0] step:8001/10000 train_time:549518ms step_avg:68.68ms +[2025-07-09 03:58:39] [Rank 0] step:8021/10000 train_time:550434ms step_avg:68.62ms +[2025-07-09 03:58:39] [Rank 0] step:8021/10000 train_time:550434ms step_avg:68.62ms +[2025-07-09 03:58:40] [Rank 0] step:8041/10000 train_time:551799ms step_avg:68.62ms +[2025-07-09 03:58:40] [Rank 0] step:8041/10000 train_time:551799ms step_avg:68.62ms +[2025-07-09 03:58:42] [Rank 0] step:8061/10000 train_time:553165ms step_avg:68.62ms +[2025-07-09 03:58:42] [Rank 0] step:8061/10000 train_time:553165ms step_avg:68.62ms +[2025-07-09 03:58:43] [Rank 0] step:8081/10000 train_time:554532ms step_avg:68.62ms +[2025-07-09 03:58:43] [Rank 0] step:8081/10000 train_time:554532ms step_avg:68.62ms +[2025-07-09 03:58:45] [Rank 0] step:8101/10000 train_time:555899ms step_avg:68.62ms +[2025-07-09 03:58:45] [Rank 0] step:8101/10000 train_time:555899ms step_avg:68.62ms +[2025-07-09 03:58:46] [Rank 0] step:8121/10000 train_time:557297ms step_avg:68.62ms +[2025-07-09 03:58:46] [Rank 0] step:8121/10000 train_time:557297ms step_avg:68.62ms +[2025-07-09 03:58:47] [Rank 0] step:8141/10000 train_time:558665ms step_avg:68.62ms +[2025-07-09 03:58:47] [Rank 0] step:8141/10000 train_time:558665ms step_avg:68.62ms +[2025-07-09 03:58:49] [Rank 0] step:8161/10000 train_time:560062ms step_avg:68.63ms +[2025-07-09 03:58:49] [Rank 0] step:8161/10000 train_time:560062ms step_avg:68.63ms +[2025-07-09 03:58:50] [Rank 0] step:8181/10000 train_time:561434ms step_avg:68.63ms +[2025-07-09 03:58:50] [Rank 0] step:8181/10000 train_time:561434ms step_avg:68.63ms +[2025-07-09 03:58:51] [Rank 0] step:8201/10000 train_time:562809ms step_avg:68.63ms +[2025-07-09 03:58:51] [Rank 0] step:8201/10000 train_time:562809ms step_avg:68.63ms +[2025-07-09 03:58:53] [Rank 0] step:8221/10000 train_time:564182ms step_avg:68.63ms +[2025-07-09 03:58:53] [Rank 0] step:8221/10000 train_time:564182ms step_avg:68.63ms +[2025-07-09 03:58:54] [Rank 0] step:8241/10000 train_time:565556ms step_avg:68.63ms +[2025-07-09 03:58:54] [Rank 0] step:8241/10000 train_time:565556ms step_avg:68.63ms +[2025-07-09 03:58:56] [Rank 0] step:8261/10000 train_time:566930ms step_avg:68.63ms +[2025-07-09 03:58:56] [Rank 0] step:8261/10000 train_time:566930ms step_avg:68.63ms +[2025-07-09 03:58:57] [Rank 0] step:8281/10000 train_time:568555ms step_avg:68.66ms +[2025-07-09 03:58:57] [Rank 0] step:8281/10000 train_time:568555ms step_avg:68.66ms +[2025-07-09 03:58:58] [Rank 0] step:8301/10000 train_time:569716ms step_avg:68.63ms +[2025-07-09 03:58:58] [Rank 0] step:8301/10000 train_time:569716ms step_avg:68.63ms +[2025-07-09 03:59:00] [Rank 0] step:8321/10000 train_time:571090ms step_avg:68.63ms +[2025-07-09 03:59:00] [Rank 0] step:8321/10000 train_time:571090ms step_avg:68.63ms +[2025-07-09 03:59:01] [Rank 0] step:8341/10000 train_time:572465ms step_avg:68.63ms +[2025-07-09 03:59:01] [Rank 0] step:8341/10000 train_time:572465ms step_avg:68.63ms +[2025-07-09 03:59:02] [Rank 0] step:8361/10000 train_time:573842ms step_avg:68.63ms +[2025-07-09 03:59:02] [Rank 0] step:8361/10000 train_time:573842ms step_avg:68.63ms +[2025-07-09 03:59:04] [Rank 0] step:8381/10000 train_time:575218ms step_avg:68.63ms +[2025-07-09 03:59:04] [Rank 0] step:8381/10000 train_time:575218ms step_avg:68.63ms +[2025-07-09 03:59:05] [Rank 0] step:8401/10000 train_time:576592ms step_avg:68.63ms +[2025-07-09 03:59:05] [Rank 0] step:8401/10000 train_time:576592ms step_avg:68.63ms +[2025-07-09 03:59:07] [Rank 0] step:8421/10000 train_time:577967ms step_avg:68.63ms +[2025-07-09 03:59:07] [Rank 0] step:8421/10000 train_time:577967ms step_avg:68.63ms +[2025-07-09 03:59:08] [Rank 0] step:8441/10000 train_time:579343ms step_avg:68.63ms +[2025-07-09 03:59:08] [Rank 0] step:8441/10000 train_time:579343ms step_avg:68.63ms +[2025-07-09 03:59:09] [Rank 0] step:8461/10000 train_time:580762ms step_avg:68.64ms +[2025-07-09 03:59:09] [Rank 0] step:8461/10000 train_time:580762ms step_avg:68.64ms +[2025-07-09 03:59:11] [Rank 0] step:8481/10000 train_time:582135ms step_avg:68.64ms +[2025-07-09 03:59:11] [Rank 0] step:8481/10000 train_time:582135ms step_avg:68.64ms +[2025-07-09 03:59:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 03:59:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 03:59:13] [Rank 0] PRINT: step:8500/10000 train_loss:2.1175 val_loss:2.0924 train_time:584139ms step_avg:68.72ms +[2025-07-09 03:59:13] [Rank 0] PRINT: step:8500/10000 train_loss:2.1175 val_loss:2.0924 train_time:584139ms step_avg:68.72ms +[2025-07-09 03:59:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 03:59:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 03:59:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 03:59:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 03:59:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 03:59:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 04:04:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 04:04:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 04:04:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 04:04:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 04:04:36] [Rank 0] Total Loss: 4.6409 +[2025-07-09 04:04:36] [Rank 0] Total Loss: 4.6409 +[2025-07-09 04:04:36] [Rank 0] Total FTA: 0.0959 +[2025-07-09 04:04:36] [Rank 0] Total FTA: 0.0959 +[2025-07-09 04:04:36] [Rank 0] Group 0 Loss: 4.8871 +[2025-07-09 04:04:36] [Rank 0] Group 0 Loss: 4.8871 +[2025-07-09 04:04:36] [Rank 0] Group 1 Loss: 4.5019 +[2025-07-09 04:04:36] [Rank 0] Group 1 Loss: 4.5019 +[2025-07-09 04:04:36] [Rank 0] Group 2 Loss: 4.6301 +[2025-07-09 04:04:36] [Rank 0] Group 2 Loss: 4.6301 +[2025-07-09 04:04:36] [Rank 0] Group 3 Loss: 4.5153 +[2025-07-09 04:04:36] [Rank 0] Group 3 Loss: 4.5153 +[2025-07-09 04:04:36] [Rank 0] Group 4 Loss: 4.6332 +[2025-07-09 04:04:36] [Rank 0] Group 4 Loss: 4.6332 +[2025-07-09 04:04:36] [Rank 0] Group 5 Loss: 4.5538 +[2025-07-09 04:04:36] [Rank 0] Group 5 Loss: 4.5538 +[2025-07-09 04:04:36] [Rank 0] Group 6 Loss: 4.5722 +[2025-07-09 04:04:36] [Rank 0] Group 6 Loss: 4.5722 +[2025-07-09 04:04:36] [Rank 0] Group 7 Loss: 4.6618 +[2025-07-09 04:04:36] [Rank 0] Group 7 Loss: 4.6618 +[2025-07-09 04:04:36] [Rank 0] Group 8 Loss: 4.6496 +[2025-07-09 04:04:36] [Rank 0] Group 8 Loss: 4.6496 +[2025-07-09 04:04:36] [Rank 0] Group 9 Loss: 4.6979 +[2025-07-09 04:04:36] [Rank 0] Group 9 Loss: 4.6979 +[2025-07-09 04:04:36] [Rank 0] Group 10 Loss: 4.6433 +[2025-07-09 04:04:36] [Rank 0] Group 10 Loss: 4.6433 +[2025-07-09 04:04:36] [Rank 0] Group 11 Loss: 4.5938 +[2025-07-09 04:04:36] [Rank 0] Group 11 Loss: 4.5938 +[2025-07-09 04:04:36] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-09 04:04:36] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-09 04:04:36] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 04:04:36] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 04:04:36] [Rank 0] Group 2 FTA: 0.2031 +[2025-07-09 04:04:36] [Rank 0] Group 2 FTA: 0.2031 +[2025-07-09 04:04:36] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-09 04:04:36] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-09 04:04:36] [Rank 0] Group 4 FTA: 0.0417 +[2025-07-09 04:04:36] [Rank 0] Group 4 FTA: 0.0417 +[2025-07-09 04:04:36] [Rank 0] Group 5 FTA: 0.0859 +[2025-07-09 04:04:36] [Rank 0] Group 5 FTA: 0.0859 +[2025-07-09 04:04:36] [Rank 0] Group 6 FTA: 0.0859 +[2025-07-09 04:04:36] [Rank 0] Group 6 FTA: 0.0859 +[2025-07-09 04:04:36] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-09 04:04:36] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-09 04:04:36] [Rank 0] Group 8 FTA: 0.0807 +[2025-07-09 04:04:36] [Rank 0] Group 8 FTA: 0.0807 +[2025-07-09 04:04:36] [Rank 0] Group 9 FTA: 0.0703 +[2025-07-09 04:04:36] [Rank 0] Group 9 FTA: 0.0703 +[2025-07-09 04:04:36] [Rank 0] Group 10 FTA: 0.0859 +[2025-07-09 04:04:36] [Rank 0] Group 10 FTA: 0.0859 +[2025-07-09 04:04:36] [Rank 0] Group 11 FTA: 0.0986 +[2025-07-09 04:04:36] [Rank 0] Group 11 FTA: 0.0986 +[2025-07-09 04:04:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 04:04:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 04:04:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 04:04:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 04:04:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 04:04:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 04:04:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 04:04:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 04:04:38] [Rank 0] step:8501/10000 train_time:584151ms step_avg:68.72ms +[2025-07-09 04:04:38] [Rank 0] step:8501/10000 train_time:584151ms step_avg:68.72ms +[2025-07-09 04:04:39] [Rank 0] step:8521/10000 train_time:584911ms step_avg:68.64ms +[2025-07-09 04:04:39] [Rank 0] step:8521/10000 train_time:584911ms step_avg:68.64ms +[2025-07-09 04:04:41] [Rank 0] step:8541/10000 train_time:586279ms step_avg:68.64ms +[2025-07-09 04:04:41] [Rank 0] step:8541/10000 train_time:586279ms step_avg:68.64ms +[2025-07-09 04:04:42] [Rank 0] step:8561/10000 train_time:587647ms step_avg:68.64ms +[2025-07-09 04:04:42] [Rank 0] step:8561/10000 train_time:587647ms step_avg:68.64ms +[2025-07-09 04:04:43] [Rank 0] step:8581/10000 train_time:589017ms step_avg:68.64ms +[2025-07-09 04:04:43] [Rank 0] step:8581/10000 train_time:589017ms step_avg:68.64ms +[2025-07-09 04:04:45] [Rank 0] step:8601/10000 train_time:590389ms step_avg:68.64ms +[2025-07-09 04:04:45] [Rank 0] step:8601/10000 train_time:590389ms step_avg:68.64ms +[2025-07-09 04:04:46] [Rank 0] step:8621/10000 train_time:591758ms step_avg:68.64ms +[2025-07-09 04:04:46] [Rank 0] step:8621/10000 train_time:591758ms step_avg:68.64ms +[2025-07-09 04:04:47] [Rank 0] step:8641/10000 train_time:593130ms step_avg:68.64ms +[2025-07-09 04:04:47] [Rank 0] step:8641/10000 train_time:593130ms step_avg:68.64ms +[2025-07-09 04:04:49] [Rank 0] step:8661/10000 train_time:594541ms step_avg:68.65ms +[2025-07-09 04:04:49] [Rank 0] step:8661/10000 train_time:594541ms step_avg:68.65ms +[2025-07-09 04:04:50] [Rank 0] step:8681/10000 train_time:595913ms step_avg:68.65ms +[2025-07-09 04:04:50] [Rank 0] step:8681/10000 train_time:595913ms step_avg:68.65ms +[2025-07-09 04:04:52] [Rank 0] step:8701/10000 train_time:597286ms step_avg:68.65ms +[2025-07-09 04:04:52] [Rank 0] step:8701/10000 train_time:597286ms step_avg:68.65ms +[2025-07-09 04:04:53] [Rank 0] step:8721/10000 train_time:598660ms step_avg:68.65ms +[2025-07-09 04:04:53] [Rank 0] step:8721/10000 train_time:598660ms step_avg:68.65ms +[2025-07-09 04:04:54] [Rank 0] step:8741/10000 train_time:600034ms step_avg:68.65ms +[2025-07-09 04:04:54] [Rank 0] step:8741/10000 train_time:600034ms step_avg:68.65ms +[2025-07-09 04:04:56] [Rank 0] step:8761/10000 train_time:601408ms step_avg:68.65ms +[2025-07-09 04:04:56] [Rank 0] step:8761/10000 train_time:601408ms step_avg:68.65ms +[2025-07-09 04:04:57] [Rank 0] step:8781/10000 train_time:602781ms step_avg:68.65ms +[2025-07-09 04:04:57] [Rank 0] step:8781/10000 train_time:602781ms step_avg:68.65ms +[2025-07-09 04:04:58] [Rank 0] step:8801/10000 train_time:604156ms step_avg:68.65ms +[2025-07-09 04:04:58] [Rank 0] step:8801/10000 train_time:604156ms step_avg:68.65ms +[2025-07-09 04:05:00] [Rank 0] step:8821/10000 train_time:605530ms step_avg:68.65ms +[2025-07-09 04:05:00] [Rank 0] step:8821/10000 train_time:605530ms step_avg:68.65ms +[2025-07-09 04:05:01] [Rank 0] step:8841/10000 train_time:606926ms step_avg:68.65ms +[2025-07-09 04:05:01] [Rank 0] step:8841/10000 train_time:606926ms step_avg:68.65ms +[2025-07-09 04:05:03] [Rank 0] step:8861/10000 train_time:608301ms step_avg:68.65ms +[2025-07-09 04:05:03] [Rank 0] step:8861/10000 train_time:608301ms step_avg:68.65ms +[2025-07-09 04:05:04] [Rank 0] step:8881/10000 train_time:609676ms step_avg:68.65ms +[2025-07-09 04:05:04] [Rank 0] step:8881/10000 train_time:609676ms step_avg:68.65ms +[2025-07-09 04:05:05] [Rank 0] step:8901/10000 train_time:611051ms step_avg:68.65ms +[2025-07-09 04:05:05] [Rank 0] step:8901/10000 train_time:611051ms step_avg:68.65ms +[2025-07-09 04:05:07] [Rank 0] step:8921/10000 train_time:612427ms step_avg:68.65ms +[2025-07-09 04:05:07] [Rank 0] step:8921/10000 train_time:612427ms step_avg:68.65ms +[2025-07-09 04:05:08] [Rank 0] step:8941/10000 train_time:613809ms step_avg:68.65ms +[2025-07-09 04:05:08] [Rank 0] step:8941/10000 train_time:613809ms step_avg:68.65ms +[2025-07-09 04:05:09] [Rank 0] step:8961/10000 train_time:615185ms step_avg:68.65ms +[2025-07-09 04:05:09] [Rank 0] step:8961/10000 train_time:615185ms step_avg:68.65ms +[2025-07-09 04:05:11] [Rank 0] step:8981/10000 train_time:616589ms step_avg:68.65ms +[2025-07-09 04:05:11] [Rank 0] step:8981/10000 train_time:616589ms step_avg:68.65ms +[2025-07-09 04:05:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 04:05:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 04:05:13] [Rank 0] PRINT: step:9000/10000 train_loss:2.0647 val_loss:2.0453 train_time:618591ms step_avg:68.73ms +[2025-07-09 04:05:13] [Rank 0] PRINT: step:9000/10000 train_loss:2.0647 val_loss:2.0453 train_time:618591ms step_avg:68.73ms +[2025-07-09 04:05:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 04:05:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 04:05:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 04:05:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 04:05:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 04:05:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 04:10:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 04:10:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 04:10:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 04:10:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 04:10:37] [Rank 0] Total Loss: 4.6315 +[2025-07-09 04:10:37] [Rank 0] Total Loss: 4.6315 +[2025-07-09 04:10:37] [Rank 0] Total FTA: 0.0849 +[2025-07-09 04:10:37] [Rank 0] Total FTA: 0.0849 +[2025-07-09 04:10:37] [Rank 0] Group 0 Loss: 4.9008 +[2025-07-09 04:10:37] [Rank 0] Group 0 Loss: 4.9008 +[2025-07-09 04:10:37] [Rank 0] Group 1 Loss: 4.4577 +[2025-07-09 04:10:37] [Rank 0] Group 1 Loss: 4.4577 +[2025-07-09 04:10:37] [Rank 0] Group 2 Loss: 4.6442 +[2025-07-09 04:10:37] [Rank 0] Group 2 Loss: 4.6442 +[2025-07-09 04:10:37] [Rank 0] Group 3 Loss: 4.5263 +[2025-07-09 04:10:37] [Rank 0] Group 3 Loss: 4.5263 +[2025-07-09 04:10:37] [Rank 0] Group 4 Loss: 4.6121 +[2025-07-09 04:10:37] [Rank 0] Group 4 Loss: 4.6121 +[2025-07-09 04:10:37] [Rank 0] Group 5 Loss: 4.5476 +[2025-07-09 04:10:37] [Rank 0] Group 5 Loss: 4.5476 +[2025-07-09 04:10:37] [Rank 0] Group 6 Loss: 4.5665 +[2025-07-09 04:10:37] [Rank 0] Group 6 Loss: 4.5665 +[2025-07-09 04:10:37] [Rank 0] Group 7 Loss: 4.6586 +[2025-07-09 04:10:37] [Rank 0] Group 7 Loss: 4.6586 +[2025-07-09 04:10:37] [Rank 0] Group 8 Loss: 4.6163 +[2025-07-09 04:10:37] [Rank 0] Group 8 Loss: 4.6163 +[2025-07-09 04:10:37] [Rank 0] Group 9 Loss: 4.5881 +[2025-07-09 04:10:37] [Rank 0] Group 9 Loss: 4.5881 +[2025-07-09 04:10:37] [Rank 0] Group 10 Loss: 4.6324 +[2025-07-09 04:10:37] [Rank 0] Group 10 Loss: 4.6324 +[2025-07-09 04:10:37] [Rank 0] Group 11 Loss: 4.5981 +[2025-07-09 04:10:37] [Rank 0] Group 11 Loss: 4.5981 +[2025-07-09 04:10:37] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-09 04:10:37] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-09 04:10:37] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 04:10:37] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-09 04:10:37] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-09 04:10:37] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-09 04:10:37] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-09 04:10:37] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-09 04:10:37] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-09 04:10:37] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-09 04:10:37] [Rank 0] Group 5 FTA: 0.0885 +[2025-07-09 04:10:37] [Rank 0] Group 5 FTA: 0.0885 +[2025-07-09 04:10:37] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-09 04:10:37] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-09 04:10:37] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-09 04:10:37] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-09 04:10:37] [Rank 0] Group 8 FTA: 0.0807 +[2025-07-09 04:10:37] [Rank 0] Group 8 FTA: 0.0807 +[2025-07-09 04:10:37] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-09 04:10:37] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-09 04:10:37] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-09 04:10:37] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-09 04:10:37] [Rank 0] Group 11 FTA: 0.0869 +[2025-07-09 04:10:37] [Rank 0] Group 11 FTA: 0.0869 +[2025-07-09 04:10:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 04:10:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 04:10:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 04:10:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 04:10:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 04:10:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 04:10:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 04:10:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 04:10:38] [Rank 0] step:9001/10000 train_time:618613ms step_avg:68.73ms +[2025-07-09 04:10:38] [Rank 0] step:9001/10000 train_time:618613ms step_avg:68.73ms +[2025-07-09 04:10:40] [Rank 0] step:9021/10000 train_time:619650ms step_avg:68.69ms +[2025-07-09 04:10:40] [Rank 0] step:9021/10000 train_time:619650ms step_avg:68.69ms +[2025-07-09 04:10:41] [Rank 0] step:9041/10000 train_time:621014ms step_avg:68.69ms +[2025-07-09 04:10:41] [Rank 0] step:9041/10000 train_time:621014ms step_avg:68.69ms +[2025-07-09 04:10:43] [Rank 0] step:9061/10000 train_time:622381ms step_avg:68.69ms +[2025-07-09 04:10:43] [Rank 0] step:9061/10000 train_time:622381ms step_avg:68.69ms +[2025-07-09 04:10:44] [Rank 0] step:9081/10000 train_time:623748ms step_avg:68.69ms +[2025-07-09 04:10:44] [Rank 0] step:9081/10000 train_time:623748ms step_avg:68.69ms +[2025-07-09 04:10:45] [Rank 0] step:9101/10000 train_time:625117ms step_avg:68.69ms +[2025-07-09 04:10:45] [Rank 0] step:9101/10000 train_time:625117ms step_avg:68.69ms +[2025-07-09 04:10:47] [Rank 0] step:9121/10000 train_time:626489ms step_avg:68.69ms +[2025-07-09 04:10:47] [Rank 0] step:9121/10000 train_time:626489ms step_avg:68.69ms +[2025-07-09 04:10:48] [Rank 0] step:9141/10000 train_time:627861ms step_avg:68.69ms +[2025-07-09 04:10:48] [Rank 0] step:9141/10000 train_time:627861ms step_avg:68.69ms +[2025-07-09 04:10:49] [Rank 0] step:9161/10000 train_time:629233ms step_avg:68.69ms +[2025-07-09 04:10:49] [Rank 0] step:9161/10000 train_time:629233ms step_avg:68.69ms +[2025-07-09 04:10:51] [Rank 0] step:9181/10000 train_time:630857ms step_avg:68.71ms +[2025-07-09 04:10:51] [Rank 0] step:9181/10000 train_time:630857ms step_avg:68.71ms +[2025-07-09 04:10:52] [Rank 0] step:9201/10000 train_time:632026ms step_avg:68.69ms +[2025-07-09 04:10:52] [Rank 0] step:9201/10000 train_time:632026ms step_avg:68.69ms +[2025-07-09 04:10:54] [Rank 0] step:9221/10000 train_time:633400ms step_avg:68.69ms +[2025-07-09 04:10:54] [Rank 0] step:9221/10000 train_time:633400ms step_avg:68.69ms +[2025-07-09 04:10:55] [Rank 0] step:9241/10000 train_time:634771ms step_avg:68.69ms +[2025-07-09 04:10:55] [Rank 0] step:9241/10000 train_time:634771ms step_avg:68.69ms +[2025-07-09 04:10:56] [Rank 0] step:9261/10000 train_time:636145ms step_avg:68.69ms +[2025-07-09 04:10:56] [Rank 0] step:9261/10000 train_time:636145ms step_avg:68.69ms +[2025-07-09 04:10:58] [Rank 0] step:9281/10000 train_time:637519ms step_avg:68.69ms +[2025-07-09 04:10:58] [Rank 0] step:9281/10000 train_time:637519ms step_avg:68.69ms +[2025-07-09 04:10:59] [Rank 0] step:9301/10000 train_time:638894ms step_avg:68.69ms +[2025-07-09 04:10:59] [Rank 0] step:9301/10000 train_time:638894ms step_avg:68.69ms +[2025-07-09 04:11:00] [Rank 0] step:9321/10000 train_time:640268ms step_avg:68.69ms +[2025-07-09 04:11:00] [Rank 0] step:9321/10000 train_time:640268ms step_avg:68.69ms +[2025-07-09 04:11:02] [Rank 0] step:9341/10000 train_time:641641ms step_avg:68.69ms +[2025-07-09 04:11:02] [Rank 0] step:9341/10000 train_time:641641ms step_avg:68.69ms +[2025-07-09 04:11:03] [Rank 0] step:9361/10000 train_time:643015ms step_avg:68.69ms +[2025-07-09 04:11:03] [Rank 0] step:9361/10000 train_time:643015ms step_avg:68.69ms +[2025-07-09 04:11:05] [Rank 0] step:9381/10000 train_time:644410ms step_avg:68.69ms +[2025-07-09 04:11:05] [Rank 0] step:9381/10000 train_time:644410ms step_avg:68.69ms +[2025-07-09 04:11:06] [Rank 0] step:9401/10000 train_time:645783ms step_avg:68.69ms +[2025-07-09 04:11:06] [Rank 0] step:9401/10000 train_time:645783ms step_avg:68.69ms +[2025-07-09 04:11:07] [Rank 0] step:9421/10000 train_time:647157ms step_avg:68.69ms +[2025-07-09 04:11:07] [Rank 0] step:9421/10000 train_time:647157ms step_avg:68.69ms +[2025-07-09 04:11:09] [Rank 0] step:9441/10000 train_time:648531ms step_avg:68.69ms +[2025-07-09 04:11:09] [Rank 0] step:9441/10000 train_time:648531ms step_avg:68.69ms +[2025-07-09 04:11:10] [Rank 0] step:9461/10000 train_time:649904ms step_avg:68.69ms +[2025-07-09 04:11:10] [Rank 0] step:9461/10000 train_time:649904ms step_avg:68.69ms +[2025-07-09 04:11:11] [Rank 0] step:9481/10000 train_time:651278ms step_avg:68.69ms +[2025-07-09 04:11:11] [Rank 0] step:9481/10000 train_time:651278ms step_avg:68.69ms +[2025-07-09 04:11:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 04:11:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 04:11:14] [Rank 0] PRINT: step:9500/10000 train_loss:2.0240 val_loss:2.0124 train_time:653278ms step_avg:68.77ms +[2025-07-09 04:11:14] [Rank 0] PRINT: step:9500/10000 train_loss:2.0240 val_loss:2.0124 train_time:653278ms step_avg:68.77ms +[2025-07-09 04:11:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 04:11:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 04:11:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 04:11:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 04:11:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 04:11:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 04:16:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 04:16:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 04:16:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 04:16:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 04:16:40] [Rank 0] Total Loss: 4.6317 +[2025-07-09 04:16:40] [Rank 0] Total Loss: 4.6317 +[2025-07-09 04:16:40] [Rank 0] Total FTA: 0.1053 +[2025-07-09 04:16:40] [Rank 0] Total FTA: 0.1053 +[2025-07-09 04:16:40] [Rank 0] Group 0 Loss: 4.8154 +[2025-07-09 04:16:40] [Rank 0] Group 0 Loss: 4.8154 +[2025-07-09 04:16:40] [Rank 0] Group 1 Loss: 4.4551 +[2025-07-09 04:16:40] [Rank 0] Group 1 Loss: 4.4551 +[2025-07-09 04:16:40] [Rank 0] Group 2 Loss: 4.6456 +[2025-07-09 04:16:40] [Rank 0] Group 2 Loss: 4.6456 +[2025-07-09 04:16:40] [Rank 0] Group 3 Loss: 4.5542 +[2025-07-09 04:16:40] [Rank 0] Group 3 Loss: 4.5542 +[2025-07-09 04:16:40] [Rank 0] Group 4 Loss: 4.5850 +[2025-07-09 04:16:40] [Rank 0] Group 4 Loss: 4.5850 +[2025-07-09 04:16:40] [Rank 0] Group 5 Loss: 4.5737 +[2025-07-09 04:16:40] [Rank 0] Group 5 Loss: 4.5737 +[2025-07-09 04:16:40] [Rank 0] Group 6 Loss: 4.5523 +[2025-07-09 04:16:40] [Rank 0] Group 6 Loss: 4.5523 +[2025-07-09 04:16:40] [Rank 0] Group 7 Loss: 4.6801 +[2025-07-09 04:16:40] [Rank 0] Group 7 Loss: 4.6801 +[2025-07-09 04:16:40] [Rank 0] Group 8 Loss: 4.6321 +[2025-07-09 04:16:40] [Rank 0] Group 8 Loss: 4.6321 +[2025-07-09 04:16:40] [Rank 0] Group 9 Loss: 4.6644 +[2025-07-09 04:16:40] [Rank 0] Group 9 Loss: 4.6644 +[2025-07-09 04:16:40] [Rank 0] Group 10 Loss: 4.6420 +[2025-07-09 04:16:40] [Rank 0] Group 10 Loss: 4.6420 +[2025-07-09 04:16:40] [Rank 0] Group 11 Loss: 4.6213 +[2025-07-09 04:16:40] [Rank 0] Group 11 Loss: 4.6213 +[2025-07-09 04:16:40] [Rank 0] Group 0 FTA: 0.1521 +[2025-07-09 04:16:40] [Rank 0] Group 0 FTA: 0.1521 +[2025-07-09 04:16:40] [Rank 0] Group 1 FTA: 0.1510 +[2025-07-09 04:16:40] [Rank 0] Group 1 FTA: 0.1510 +[2025-07-09 04:16:40] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-09 04:16:40] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-09 04:16:40] [Rank 0] Group 3 FTA: 0.0547 +[2025-07-09 04:16:40] [Rank 0] Group 3 FTA: 0.0547 +[2025-07-09 04:16:40] [Rank 0] Group 4 FTA: 0.0469 +[2025-07-09 04:16:40] [Rank 0] Group 4 FTA: 0.0469 +[2025-07-09 04:16:40] [Rank 0] Group 5 FTA: 0.0964 +[2025-07-09 04:16:40] [Rank 0] Group 5 FTA: 0.0964 +[2025-07-09 04:16:40] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-09 04:16:40] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-09 04:16:40] [Rank 0] Group 7 FTA: 0.1328 +[2025-07-09 04:16:40] [Rank 0] Group 7 FTA: 0.1328 +[2025-07-09 04:16:40] [Rank 0] Group 8 FTA: 0.1172 +[2025-07-09 04:16:40] [Rank 0] Group 8 FTA: 0.1172 +[2025-07-09 04:16:40] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-09 04:16:40] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-09 04:16:40] [Rank 0] Group 10 FTA: 0.1191 +[2025-07-09 04:16:40] [Rank 0] Group 10 FTA: 0.1191 +[2025-07-09 04:16:40] [Rank 0] Group 11 FTA: 0.0918 +[2025-07-09 04:16:40] [Rank 0] Group 11 FTA: 0.0918 +[2025-07-09 04:16:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 04:16:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 04:16:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 04:16:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 04:16:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 04:16:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 04:16:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 04:16:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 04:16:41] [Rank 0] step:9501/10000 train_time:653288ms step_avg:68.76ms +[2025-07-09 04:16:41] [Rank 0] step:9501/10000 train_time:653288ms step_avg:68.76ms +[2025-07-09 04:16:43] [Rank 0] step:9521/10000 train_time:654075ms step_avg:68.70ms +[2025-07-09 04:16:43] [Rank 0] step:9521/10000 train_time:654075ms step_avg:68.70ms +[2025-07-09 04:16:44] [Rank 0] step:9541/10000 train_time:655440ms step_avg:68.70ms +[2025-07-09 04:16:44] [Rank 0] step:9541/10000 train_time:655440ms step_avg:68.70ms +[2025-07-09 04:16:45] [Rank 0] step:9561/10000 train_time:656806ms step_avg:68.70ms +[2025-07-09 04:16:45] [Rank 0] step:9561/10000 train_time:656806ms step_avg:68.70ms +[2025-07-09 04:16:47] [Rank 0] step:9581/10000 train_time:658178ms step_avg:68.70ms +[2025-07-09 04:16:47] [Rank 0] step:9581/10000 train_time:658178ms step_avg:68.70ms +[2025-07-09 04:16:48] [Rank 0] step:9601/10000 train_time:659546ms step_avg:68.70ms +[2025-07-09 04:16:48] [Rank 0] step:9601/10000 train_time:659546ms step_avg:68.70ms +[2025-07-09 04:16:50] [Rank 0] step:9621/10000 train_time:660914ms step_avg:68.69ms +[2025-07-09 04:16:50] [Rank 0] step:9621/10000 train_time:660914ms step_avg:68.69ms +[2025-07-09 04:16:51] [Rank 0] step:9641/10000 train_time:662284ms step_avg:68.69ms +[2025-07-09 04:16:51] [Rank 0] step:9641/10000 train_time:662284ms step_avg:68.69ms +[2025-07-09 04:16:52] [Rank 0] step:9661/10000 train_time:663655ms step_avg:68.69ms +[2025-07-09 04:16:52] [Rank 0] step:9661/10000 train_time:663655ms step_avg:68.69ms +[2025-07-09 04:16:54] [Rank 0] step:9681/10000 train_time:665026ms step_avg:68.69ms +[2025-07-09 04:16:54] [Rank 0] step:9681/10000 train_time:665026ms step_avg:68.69ms +[2025-07-09 04:16:55] [Rank 0] step:9701/10000 train_time:666397ms step_avg:68.69ms +[2025-07-09 04:16:55] [Rank 0] step:9701/10000 train_time:666397ms step_avg:68.69ms +[2025-07-09 04:16:56] [Rank 0] step:9721/10000 train_time:667769ms step_avg:68.69ms +[2025-07-09 04:16:56] [Rank 0] step:9721/10000 train_time:667769ms step_avg:68.69ms +[2025-07-09 04:16:58] [Rank 0] step:9741/10000 train_time:669192ms step_avg:68.70ms +[2025-07-09 04:16:58] [Rank 0] step:9741/10000 train_time:669192ms step_avg:68.70ms +[2025-07-09 04:16:59] [Rank 0] step:9761/10000 train_time:670565ms step_avg:68.70ms +[2025-07-09 04:16:59] [Rank 0] step:9761/10000 train_time:670565ms step_avg:68.70ms +[2025-07-09 04:17:01] [Rank 0] step:9781/10000 train_time:671939ms step_avg:68.70ms +[2025-07-09 04:17:01] [Rank 0] step:9781/10000 train_time:671939ms step_avg:68.70ms +[2025-07-09 04:17:02] [Rank 0] step:9801/10000 train_time:673314ms step_avg:68.70ms +[2025-07-09 04:17:02] [Rank 0] step:9801/10000 train_time:673314ms step_avg:68.70ms +[2025-07-09 04:17:03] [Rank 0] step:9821/10000 train_time:674689ms step_avg:68.70ms +[2025-07-09 04:17:03] [Rank 0] step:9821/10000 train_time:674689ms step_avg:68.70ms +[2025-07-09 04:17:05] [Rank 0] step:9841/10000 train_time:676065ms step_avg:68.70ms +[2025-07-09 04:17:05] [Rank 0] step:9841/10000 train_time:676065ms step_avg:68.70ms +[2025-07-09 04:17:06] [Rank 0] step:9861/10000 train_time:677440ms step_avg:68.70ms +[2025-07-09 04:17:06] [Rank 0] step:9861/10000 train_time:677440ms step_avg:68.70ms +[2025-07-09 04:17:07] [Rank 0] step:9881/10000 train_time:678815ms step_avg:68.70ms +[2025-07-09 04:17:07] [Rank 0] step:9881/10000 train_time:678815ms step_avg:68.70ms +[2025-07-09 04:17:09] [Rank 0] step:9901/10000 train_time:680190ms step_avg:68.70ms +[2025-07-09 04:17:09] [Rank 0] step:9901/10000 train_time:680190ms step_avg:68.70ms +[2025-07-09 04:17:10] [Rank 0] step:9921/10000 train_time:681611ms step_avg:68.70ms +[2025-07-09 04:17:10] [Rank 0] step:9921/10000 train_time:681611ms step_avg:68.70ms +[2025-07-09 04:17:12] [Rank 0] step:9941/10000 train_time:682986ms step_avg:68.70ms +[2025-07-09 04:17:12] [Rank 0] step:9941/10000 train_time:682986ms step_avg:68.70ms +[2025-07-09 04:17:13] [Rank 0] step:9961/10000 train_time:684362ms step_avg:68.70ms +[2025-07-09 04:17:13] [Rank 0] step:9961/10000 train_time:684362ms step_avg:68.70ms +[2025-07-09 04:17:14] [Rank 0] step:9981/10000 train_time:685737ms step_avg:68.70ms +[2025-07-09 04:17:14] [Rank 0] step:9981/10000 train_time:685737ms step_avg:68.70ms +[2025-07-09 04:17:16] [Rank 0] step:10000/10000 train_time:687045ms step_avg:68.70ms +[2025-07-09 04:17:16] [Rank 0] step:10000/10000 train_time:687045ms step_avg:68.70ms +[2025-07-09 04:17:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 04:17:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-09 04:17:17] [Rank 0] PRINT: step:10000/10000 train_loss:1.9951 val_loss:1.9902 train_time:687745ms step_avg:68.77ms +[2025-07-09 04:17:17] [Rank 0] PRINT: step:10000/10000 train_loss:1.9951 val_loss:1.9902 train_time:687745ms step_avg:68.77ms +[2025-07-09 04:17:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 04:17:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-09 04:17:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 04:17:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-09 04:17:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 04:17:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-09 04:22:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 04:22:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-09 04:22:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 04:22:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-09 04:22:38] [Rank 0] Total Loss: 4.6232 +[2025-07-09 04:22:38] [Rank 0] Total Loss: 4.6232 +[2025-07-09 04:22:38] [Rank 0] Total FTA: 0.1007 +[2025-07-09 04:22:38] [Rank 0] Total FTA: 0.1007 +[2025-07-09 04:22:38] [Rank 0] Group 0 Loss: 4.8248 +[2025-07-09 04:22:38] [Rank 0] Group 0 Loss: 4.8248 +[2025-07-09 04:22:38] [Rank 0] Group 1 Loss: 4.4460 +[2025-07-09 04:22:38] [Rank 0] Group 1 Loss: 4.4460 +[2025-07-09 04:22:38] [Rank 0] Group 2 Loss: 4.6569 +[2025-07-09 04:22:38] [Rank 0] Group 2 Loss: 4.6569 +[2025-07-09 04:22:38] [Rank 0] Group 3 Loss: 4.5243 +[2025-07-09 04:22:38] [Rank 0] Group 3 Loss: 4.5243 +[2025-07-09 04:22:38] [Rank 0] Group 4 Loss: 4.6509 +[2025-07-09 04:22:38] [Rank 0] Group 4 Loss: 4.6509 +[2025-07-09 04:22:38] [Rank 0] Group 5 Loss: 4.5294 +[2025-07-09 04:22:38] [Rank 0] Group 5 Loss: 4.5294 +[2025-07-09 04:22:38] [Rank 0] Group 6 Loss: 4.5496 +[2025-07-09 04:22:38] [Rank 0] Group 6 Loss: 4.5496 +[2025-07-09 04:22:38] [Rank 0] Group 7 Loss: 4.6148 +[2025-07-09 04:22:38] [Rank 0] Group 7 Loss: 4.6148 +[2025-07-09 04:22:38] [Rank 0] Group 8 Loss: 4.6026 +[2025-07-09 04:22:38] [Rank 0] Group 8 Loss: 4.6026 +[2025-07-09 04:22:38] [Rank 0] Group 9 Loss: 4.6360 +[2025-07-09 04:22:38] [Rank 0] Group 9 Loss: 4.6360 +[2025-07-09 04:22:38] [Rank 0] Group 10 Loss: 4.6232 +[2025-07-09 04:22:38] [Rank 0] Group 10 Loss: 4.6232 +[2025-07-09 04:22:38] [Rank 0] Group 11 Loss: 4.6228 +[2025-07-09 04:22:38] [Rank 0] Group 11 Loss: 4.6228 +[2025-07-09 04:22:38] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-09 04:22:38] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-09 04:22:38] [Rank 0] Group 1 FTA: 0.2005 +[2025-07-09 04:22:38] [Rank 0] Group 1 FTA: 0.2005 +[2025-07-09 04:22:38] [Rank 0] Group 2 FTA: 0.0677 +[2025-07-09 04:22:38] [Rank 0] Group 2 FTA: 0.0677 +[2025-07-09 04:22:38] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-09 04:22:38] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-09 04:22:38] [Rank 0] Group 4 FTA: 0.0312 +[2025-07-09 04:22:38] [Rank 0] Group 4 FTA: 0.0312 +[2025-07-09 04:22:38] [Rank 0] Group 5 FTA: 0.1146 +[2025-07-09 04:22:38] [Rank 0] Group 5 FTA: 0.1146 +[2025-07-09 04:22:38] [Rank 0] Group 6 FTA: 0.0938 +[2025-07-09 04:22:38] [Rank 0] Group 6 FTA: 0.0938 +[2025-07-09 04:22:38] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-09 04:22:38] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-09 04:22:38] [Rank 0] Group 8 FTA: 0.0807 +[2025-07-09 04:22:38] [Rank 0] Group 8 FTA: 0.0807 +[2025-07-09 04:22:38] [Rank 0] Group 9 FTA: 0.0625 +[2025-07-09 04:22:38] [Rank 0] Group 9 FTA: 0.0625 +[2025-07-09 04:22:38] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-09 04:22:38] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-09 04:22:38] [Rank 0] Group 11 FTA: 0.0996 +[2025-07-09 04:22:38] [Rank 0] Group 11 FTA: 0.0996 +[2025-07-09 04:22:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 04:22:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_loss_curves.png +[2025-07-09 04:22:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 04:22:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/per_class_acc_curves.png +[2025-07-09 04:22:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 04:22:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_loss_curve.png +[2025-07-09 04:22:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 04:22:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0001_seed_48/total_acc_curve.png +[2025-07-09 04:22:40] [Rank 0] step:10001/10000 train_time:687757ms step_avg:68.77ms +[2025-07-09 04:22:40] [Rank 0] step:10001/10000 train_time:687757ms step_avg:68.77ms +[2025-07-09 04:22:40] [Rank 0] PRINT: --- Training Finished: Wed Jul 9 04:22:40 2025 --- +[2025-07-09 04:22:40] [Rank 0] PRINT: --- Training Finished: Wed Jul 9 04:22:40 2025 --- +[2025-07-09 04:22:40] [Rank 0] PRINT: Peak memory allocated: 9138 MiB reserved: 10596 MiB +[2025-07-09 04:22:40] [Rank 0] PRINT: Peak memory allocated: 9138 MiB reserved: 10596 MiB diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/config.json b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ddf50f1039604c7c5e94c0682fd5fedc3256a4f4 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "2d1a10fa-93b0-433f-a03c-235076d16d4a", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..12ffaa9c9aa67f22b19ad7a07db709c90b370e62 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3587c57e7e34f8d56dd1c3f33f9b6dbfe646b9b7077d89d3c6643a2ad1fd9af5 +size 342063 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..e43b6fe9faa9ab3794996edc1d17d2fd306499fe --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a417f86205411946109cab070a7c51028b64d2179398c1669da7fbdcff5f63f9 +size 291633 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..98df5bbd5e2139cd9757aee61528b6d7ce53f180 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:517e4f2b3a02b0125466700eedd418612368de6068153a24cb21b71b3b9af496 +size 94395 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..09d58d745371103855ba2e00b17357b0abba2f6b --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49ebebab72b832b5c13a632fd99569a06cbbb1c005ef0580d7c6f462577c0820 +size 108025 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/training_log_2d1a10fa-93b0-433f-a03c-235076d16d4a.txt b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/training_log_2d1a10fa-93b0-433f-a03c-235076d16d4a.txt new file mode 100644 index 0000000000000000000000000000000000000000..61bbbf9c26fcf3a726a8e9149b59478c0aea9d4c --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/training_log_2d1a10fa-93b0-433f-a03c-235076d16d4a.txt @@ -0,0 +1,5132 @@ +[2025-07-07 12:11:58] [Rank 0] PRINT: --- Script Start: Mon Jul 7 12:11:58 2025 --- +[2025-07-07 12:11:58] [Rank 0] PRINT: --- Script Start: Mon Jul 7 12:11:58 2025 --- +[2025-07-07 12:11:58] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-07 12:11:58] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-07 12:11:58] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 12:11:58] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 12:11:58] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-07 12:11:58] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-07 12:11:58] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42 +[2025-07-07 12:11:58] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42 +[2025-07-07 12:11:58] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 12:11:58] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 12:11:59] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 12:11:59] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 12:11:59] [Rank 0] PRINT: Constructing model... +[2025-07-07 12:11:59] [Rank 0] PRINT: Constructing model... +[2025-07-07 12:12:01] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 12:12:01] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 12:12:01] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 12:12:01] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 12:12:01] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 12:12:01] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 12:12:02] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 12:12:02] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 12:12:02] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 12:12:02] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 12:12:02] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 12:12:02] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 12:12:02] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 12:12:02] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 12:12:02] [Rank 0] PRINT: Model returns: +[2025-07-07 12:12:02] [Rank 0] PRINT: Model returns: +[2025-07-07 12:12:02] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 12:12:02] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 12:12:02] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 12:12:02] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 12:12:02] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-07 12:12:02] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-07 12:12:02] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 12:12:02] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 12:12:02] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 12:12:02] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 12:12:02] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 12:12:02] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 12:12:02] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 12:12:02] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 12:12:02] [Rank 0] PRINT: Starting warmup... +[2025-07-07 12:12:02] [Rank 0] PRINT: Starting warmup... +[2025-07-07 12:13:07] [Rank 0] PRINT: Warmup complete. +[2025-07-07 12:13:07] [Rank 0] PRINT: Warmup complete. +[2025-07-07 12:13:08] [Rank 0] PRINT: Starting training... +[2025-07-07 12:13:08] [Rank 0] PRINT: Starting training... +[2025-07-07 12:13:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 12:13:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 12:13:15] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 12:13:15] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 12:13:17] [Rank 0] step:21/10000 train_time:920ms step_avg:43.80ms +[2025-07-07 12:13:17] [Rank 0] step:21/10000 train_time:920ms step_avg:43.80ms +[2025-07-07 12:13:18] [Rank 0] step:41/10000 train_time:2246ms step_avg:54.79ms +[2025-07-07 12:13:18] [Rank 0] step:41/10000 train_time:2246ms step_avg:54.79ms +[2025-07-07 12:13:19] [Rank 0] step:61/10000 train_time:3571ms step_avg:58.55ms +[2025-07-07 12:13:19] [Rank 0] step:61/10000 train_time:3571ms step_avg:58.55ms +[2025-07-07 12:13:21] [Rank 0] step:81/10000 train_time:4897ms step_avg:60.46ms +[2025-07-07 12:13:21] [Rank 0] step:81/10000 train_time:4897ms step_avg:60.46ms +[2025-07-07 12:13:22] [Rank 0] step:101/10000 train_time:6222ms step_avg:61.60ms +[2025-07-07 12:13:22] [Rank 0] step:101/10000 train_time:6222ms step_avg:61.60ms +[2025-07-07 12:13:23] [Rank 0] step:121/10000 train_time:7547ms step_avg:62.37ms +[2025-07-07 12:13:23] [Rank 0] step:121/10000 train_time:7547ms step_avg:62.37ms +[2025-07-07 12:13:25] [Rank 0] step:141/10000 train_time:8872ms step_avg:62.92ms +[2025-07-07 12:13:25] [Rank 0] step:141/10000 train_time:8872ms step_avg:62.92ms +[2025-07-07 12:13:26] [Rank 0] step:161/10000 train_time:10197ms step_avg:63.34ms +[2025-07-07 12:13:26] [Rank 0] step:161/10000 train_time:10197ms step_avg:63.34ms +[2025-07-07 12:13:27] [Rank 0] step:181/10000 train_time:12194ms step_avg:67.37ms +[2025-07-07 12:13:27] [Rank 0] step:181/10000 train_time:12194ms step_avg:67.37ms +[2025-07-07 12:13:29] [Rank 0] step:201/10000 train_time:12908ms step_avg:64.22ms +[2025-07-07 12:13:29] [Rank 0] step:201/10000 train_time:12908ms step_avg:64.22ms +[2025-07-07 12:13:30] [Rank 0] step:221/10000 train_time:14234ms step_avg:64.41ms +[2025-07-07 12:13:30] [Rank 0] step:221/10000 train_time:14234ms step_avg:64.41ms +[2025-07-07 12:13:31] [Rank 0] step:241/10000 train_time:15561ms step_avg:64.57ms +[2025-07-07 12:13:31] [Rank 0] step:241/10000 train_time:15561ms step_avg:64.57ms +[2025-07-07 12:13:33] [Rank 0] step:261/10000 train_time:16888ms step_avg:64.71ms +[2025-07-07 12:13:33] [Rank 0] step:261/10000 train_time:16888ms step_avg:64.71ms +[2025-07-07 12:13:34] [Rank 0] step:281/10000 train_time:18214ms step_avg:64.82ms +[2025-07-07 12:13:34] [Rank 0] step:281/10000 train_time:18214ms step_avg:64.82ms +[2025-07-07 12:13:35] [Rank 0] step:301/10000 train_time:19541ms step_avg:64.92ms +[2025-07-07 12:13:35] [Rank 0] step:301/10000 train_time:19541ms step_avg:64.92ms +[2025-07-07 12:13:37] [Rank 0] step:321/10000 train_time:20869ms step_avg:65.01ms +[2025-07-07 12:13:37] [Rank 0] step:321/10000 train_time:20869ms step_avg:65.01ms +[2025-07-07 12:13:38] [Rank 0] step:341/10000 train_time:22196ms step_avg:65.09ms +[2025-07-07 12:13:38] [Rank 0] step:341/10000 train_time:22196ms step_avg:65.09ms +[2025-07-07 12:13:39] [Rank 0] step:361/10000 train_time:23574ms step_avg:65.30ms +[2025-07-07 12:13:39] [Rank 0] step:361/10000 train_time:23574ms step_avg:65.30ms +[2025-07-07 12:13:41] [Rank 0] step:381/10000 train_time:24901ms step_avg:65.36ms +[2025-07-07 12:13:41] [Rank 0] step:381/10000 train_time:24901ms step_avg:65.36ms +[2025-07-07 12:13:42] [Rank 0] step:401/10000 train_time:26230ms step_avg:65.41ms +[2025-07-07 12:13:42] [Rank 0] step:401/10000 train_time:26230ms step_avg:65.41ms +[2025-07-07 12:13:43] [Rank 0] step:421/10000 train_time:27559ms step_avg:65.46ms +[2025-07-07 12:13:43] [Rank 0] step:421/10000 train_time:27559ms step_avg:65.46ms +[2025-07-07 12:13:45] [Rank 0] step:441/10000 train_time:28889ms step_avg:65.51ms +[2025-07-07 12:13:45] [Rank 0] step:441/10000 train_time:28889ms step_avg:65.51ms +[2025-07-07 12:13:46] [Rank 0] step:461/10000 train_time:30217ms step_avg:65.55ms +[2025-07-07 12:13:46] [Rank 0] step:461/10000 train_time:30217ms step_avg:65.55ms +[2025-07-07 12:13:47] [Rank 0] step:481/10000 train_time:31545ms step_avg:65.58ms +[2025-07-07 12:13:47] [Rank 0] step:481/10000 train_time:31545ms step_avg:65.58ms +[2025-07-07 12:13:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 12:13:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 12:13:49] [Rank 0] PRINT: step:500/10000 train_loss:8.7432 val_loss:7.1365 train_time:33480ms step_avg:66.96ms +[2025-07-07 12:13:49] [Rank 0] PRINT: step:500/10000 train_loss:8.7432 val_loss:7.1365 train_time:33480ms step_avg:66.96ms +[2025-07-07 12:13:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 12:13:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 12:13:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 12:13:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 12:13:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 12:13:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 12:19:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 12:19:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 12:19:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 12:19:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 12:19:13] [Rank 0] Total Loss: 7.6989 +[2025-07-07 12:19:13] [Rank 0] Total Loss: 7.6989 +[2025-07-07 12:19:13] [Rank 0] Total FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Total FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 0 Loss: 7.7077 +[2025-07-07 12:19:13] [Rank 0] Group 0 Loss: 7.7077 +[2025-07-07 12:19:13] [Rank 0] Group 1 Loss: 7.6336 +[2025-07-07 12:19:13] [Rank 0] Group 1 Loss: 7.6336 +[2025-07-07 12:19:13] [Rank 0] Group 2 Loss: 7.8363 +[2025-07-07 12:19:13] [Rank 0] Group 2 Loss: 7.8363 +[2025-07-07 12:19:13] [Rank 0] Group 3 Loss: 7.6584 +[2025-07-07 12:19:13] [Rank 0] Group 3 Loss: 7.6584 +[2025-07-07 12:19:13] [Rank 0] Group 4 Loss: 7.7168 +[2025-07-07 12:19:13] [Rank 0] Group 4 Loss: 7.7168 +[2025-07-07 12:19:13] [Rank 0] Group 5 Loss: 7.6696 +[2025-07-07 12:19:13] [Rank 0] Group 5 Loss: 7.6696 +[2025-07-07 12:19:13] [Rank 0] Group 6 Loss: 7.7048 +[2025-07-07 12:19:13] [Rank 0] Group 6 Loss: 7.7048 +[2025-07-07 12:19:13] [Rank 0] Group 7 Loss: 7.7005 +[2025-07-07 12:19:13] [Rank 0] Group 7 Loss: 7.7005 +[2025-07-07 12:19:13] [Rank 0] Group 8 Loss: 7.6674 +[2025-07-07 12:19:13] [Rank 0] Group 8 Loss: 7.6674 +[2025-07-07 12:19:13] [Rank 0] Group 9 Loss: 7.6739 +[2025-07-07 12:19:13] [Rank 0] Group 9 Loss: 7.6739 +[2025-07-07 12:19:13] [Rank 0] Group 10 Loss: 7.6900 +[2025-07-07 12:19:13] [Rank 0] Group 10 Loss: 7.6900 +[2025-07-07 12:19:13] [Rank 0] Group 11 Loss: 7.7045 +[2025-07-07 12:19:13] [Rank 0] Group 11 Loss: 7.7045 +[2025-07-07 12:19:13] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 12:19:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 12:19:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 12:19:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 12:19:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 12:19:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 12:19:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 12:19:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 12:19:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 12:19:14] [Rank 0] step:501/10000 train_time:33488ms step_avg:66.84ms +[2025-07-07 12:19:14] [Rank 0] step:501/10000 train_time:33488ms step_avg:66.84ms +[2025-07-07 12:19:15] [Rank 0] step:521/10000 train_time:34218ms step_avg:65.68ms +[2025-07-07 12:19:15] [Rank 0] step:521/10000 train_time:34218ms step_avg:65.68ms +[2025-07-07 12:19:17] [Rank 0] step:541/10000 train_time:36201ms step_avg:66.92ms +[2025-07-07 12:19:17] [Rank 0] step:541/10000 train_time:36201ms step_avg:66.92ms +[2025-07-07 12:19:18] [Rank 0] step:561/10000 train_time:36915ms step_avg:65.80ms +[2025-07-07 12:19:18] [Rank 0] step:561/10000 train_time:36915ms step_avg:65.80ms +[2025-07-07 12:19:20] [Rank 0] step:581/10000 train_time:38237ms step_avg:65.81ms +[2025-07-07 12:19:20] [Rank 0] step:581/10000 train_time:38237ms step_avg:65.81ms +[2025-07-07 12:19:21] [Rank 0] step:601/10000 train_time:39561ms step_avg:65.83ms +[2025-07-07 12:19:21] [Rank 0] step:601/10000 train_time:39561ms step_avg:65.83ms +[2025-07-07 12:19:22] [Rank 0] step:621/10000 train_time:40888ms step_avg:65.84ms +[2025-07-07 12:19:22] [Rank 0] step:621/10000 train_time:40888ms step_avg:65.84ms +[2025-07-07 12:19:23] [Rank 0] step:641/10000 train_time:42213ms step_avg:65.86ms +[2025-07-07 12:19:23] [Rank 0] step:641/10000 train_time:42213ms step_avg:65.86ms +[2025-07-07 12:19:25] [Rank 0] step:661/10000 train_time:43542ms step_avg:65.87ms +[2025-07-07 12:19:25] [Rank 0] step:661/10000 train_time:43542ms step_avg:65.87ms +[2025-07-07 12:19:26] [Rank 0] step:681/10000 train_time:44872ms step_avg:65.89ms +[2025-07-07 12:19:26] [Rank 0] step:681/10000 train_time:44872ms step_avg:65.89ms +[2025-07-07 12:19:27] [Rank 0] step:701/10000 train_time:46205ms step_avg:65.91ms +[2025-07-07 12:19:27] [Rank 0] step:701/10000 train_time:46205ms step_avg:65.91ms +[2025-07-07 12:19:29] [Rank 0] step:721/10000 train_time:48202ms step_avg:66.85ms +[2025-07-07 12:19:29] [Rank 0] step:721/10000 train_time:48202ms step_avg:66.85ms +[2025-07-07 12:19:30] [Rank 0] step:741/10000 train_time:48919ms step_avg:66.02ms +[2025-07-07 12:19:30] [Rank 0] step:741/10000 train_time:48919ms step_avg:66.02ms +[2025-07-07 12:19:32] [Rank 0] step:761/10000 train_time:50256ms step_avg:66.04ms +[2025-07-07 12:19:32] [Rank 0] step:761/10000 train_time:50256ms step_avg:66.04ms +[2025-07-07 12:19:33] [Rank 0] step:781/10000 train_time:51599ms step_avg:66.07ms +[2025-07-07 12:19:33] [Rank 0] step:781/10000 train_time:51599ms step_avg:66.07ms +[2025-07-07 12:19:34] [Rank 0] step:801/10000 train_time:52943ms step_avg:66.10ms +[2025-07-07 12:19:34] [Rank 0] step:801/10000 train_time:52943ms step_avg:66.10ms +[2025-07-07 12:19:36] [Rank 0] step:821/10000 train_time:54287ms step_avg:66.12ms +[2025-07-07 12:19:36] [Rank 0] step:821/10000 train_time:54287ms step_avg:66.12ms +[2025-07-07 12:19:37] [Rank 0] step:841/10000 train_time:55632ms step_avg:66.15ms +[2025-07-07 12:19:37] [Rank 0] step:841/10000 train_time:55632ms step_avg:66.15ms +[2025-07-07 12:19:38] [Rank 0] step:861/10000 train_time:56980ms step_avg:66.18ms +[2025-07-07 12:19:38] [Rank 0] step:861/10000 train_time:56980ms step_avg:66.18ms +[2025-07-07 12:19:40] [Rank 0] step:881/10000 train_time:58325ms step_avg:66.20ms +[2025-07-07 12:19:40] [Rank 0] step:881/10000 train_time:58325ms step_avg:66.20ms +[2025-07-07 12:19:41] [Rank 0] step:901/10000 train_time:59923ms step_avg:66.51ms +[2025-07-07 12:19:41] [Rank 0] step:901/10000 train_time:59923ms step_avg:66.51ms +[2025-07-07 12:19:42] [Rank 0] step:921/10000 train_time:61054ms step_avg:66.29ms +[2025-07-07 12:19:42] [Rank 0] step:921/10000 train_time:61054ms step_avg:66.29ms +[2025-07-07 12:19:44] [Rank 0] step:941/10000 train_time:62401ms step_avg:66.31ms +[2025-07-07 12:19:44] [Rank 0] step:941/10000 train_time:62401ms step_avg:66.31ms +[2025-07-07 12:19:45] [Rank 0] step:961/10000 train_time:63747ms step_avg:66.33ms +[2025-07-07 12:19:45] [Rank 0] step:961/10000 train_time:63747ms step_avg:66.33ms +[2025-07-07 12:19:46] [Rank 0] step:981/10000 train_time:65096ms step_avg:66.36ms +[2025-07-07 12:19:46] [Rank 0] step:981/10000 train_time:65096ms step_avg:66.36ms +[2025-07-07 12:19:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 12:19:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 12:19:49] [Rank 0] PRINT: step:1000/10000 train_loss:6.1672 val_loss:5.3581 train_time:67057ms step_avg:67.06ms +[2025-07-07 12:19:49] [Rank 0] PRINT: step:1000/10000 train_loss:6.1672 val_loss:5.3581 train_time:67057ms step_avg:67.06ms +[2025-07-07 12:19:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 12:19:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 12:19:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 12:19:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 12:19:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 12:19:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 12:25:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 12:25:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 12:25:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 12:25:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 12:25:14] [Rank 0] Total Loss: 6.2766 +[2025-07-07 12:25:14] [Rank 0] Total Loss: 6.2766 +[2025-07-07 12:25:14] [Rank 0] Total FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Total FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 0 Loss: 6.1702 +[2025-07-07 12:25:14] [Rank 0] Group 0 Loss: 6.1702 +[2025-07-07 12:25:14] [Rank 0] Group 1 Loss: 6.2860 +[2025-07-07 12:25:14] [Rank 0] Group 1 Loss: 6.2860 +[2025-07-07 12:25:14] [Rank 0] Group 2 Loss: 6.4285 +[2025-07-07 12:25:14] [Rank 0] Group 2 Loss: 6.4285 +[2025-07-07 12:25:14] [Rank 0] Group 3 Loss: 6.2269 +[2025-07-07 12:25:14] [Rank 0] Group 3 Loss: 6.2269 +[2025-07-07 12:25:14] [Rank 0] Group 4 Loss: 6.3246 +[2025-07-07 12:25:14] [Rank 0] Group 4 Loss: 6.3246 +[2025-07-07 12:25:14] [Rank 0] Group 5 Loss: 6.2835 +[2025-07-07 12:25:14] [Rank 0] Group 5 Loss: 6.2835 +[2025-07-07 12:25:14] [Rank 0] Group 6 Loss: 6.3131 +[2025-07-07 12:25:14] [Rank 0] Group 6 Loss: 6.3131 +[2025-07-07 12:25:14] [Rank 0] Group 7 Loss: 6.2704 +[2025-07-07 12:25:14] [Rank 0] Group 7 Loss: 6.2704 +[2025-07-07 12:25:14] [Rank 0] Group 8 Loss: 6.2557 +[2025-07-07 12:25:14] [Rank 0] Group 8 Loss: 6.2557 +[2025-07-07 12:25:14] [Rank 0] Group 9 Loss: 6.2691 +[2025-07-07 12:25:14] [Rank 0] Group 9 Loss: 6.2691 +[2025-07-07 12:25:14] [Rank 0] Group 10 Loss: 6.2826 +[2025-07-07 12:25:14] [Rank 0] Group 10 Loss: 6.2826 +[2025-07-07 12:25:14] [Rank 0] Group 11 Loss: 6.2892 +[2025-07-07 12:25:14] [Rank 0] Group 11 Loss: 6.2892 +[2025-07-07 12:25:14] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 12:25:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 12:25:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 12:25:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 12:25:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 12:25:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 12:25:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 12:25:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 12:25:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 12:25:15] [Rank 0] step:1001/10000 train_time:67066ms step_avg:67.00ms +[2025-07-07 12:25:15] [Rank 0] step:1001/10000 train_time:67066ms step_avg:67.00ms +[2025-07-07 12:25:17] [Rank 0] step:1021/10000 train_time:67823ms step_avg:66.43ms +[2025-07-07 12:25:17] [Rank 0] step:1021/10000 train_time:67823ms step_avg:66.43ms +[2025-07-07 12:25:18] [Rank 0] step:1041/10000 train_time:69166ms step_avg:66.44ms +[2025-07-07 12:25:18] [Rank 0] step:1041/10000 train_time:69166ms step_avg:66.44ms +[2025-07-07 12:25:19] [Rank 0] step:1061/10000 train_time:70509ms step_avg:66.46ms +[2025-07-07 12:25:19] [Rank 0] step:1061/10000 train_time:70509ms step_avg:66.46ms +[2025-07-07 12:25:21] [Rank 0] step:1081/10000 train_time:71904ms step_avg:66.52ms +[2025-07-07 12:25:21] [Rank 0] step:1081/10000 train_time:71904ms step_avg:66.52ms +[2025-07-07 12:25:22] [Rank 0] step:1101/10000 train_time:73266ms step_avg:66.54ms +[2025-07-07 12:25:22] [Rank 0] step:1101/10000 train_time:73266ms step_avg:66.54ms +[2025-07-07 12:25:24] [Rank 0] step:1121/10000 train_time:74610ms step_avg:66.56ms +[2025-07-07 12:25:24] [Rank 0] step:1121/10000 train_time:74610ms step_avg:66.56ms +[2025-07-07 12:25:25] [Rank 0] step:1141/10000 train_time:75954ms step_avg:66.57ms +[2025-07-07 12:25:25] [Rank 0] step:1141/10000 train_time:75954ms step_avg:66.57ms +[2025-07-07 12:25:26] [Rank 0] step:1161/10000 train_time:77300ms step_avg:66.58ms +[2025-07-07 12:25:26] [Rank 0] step:1161/10000 train_time:77300ms step_avg:66.58ms +[2025-07-07 12:25:28] [Rank 0] step:1181/10000 train_time:78648ms step_avg:66.59ms +[2025-07-07 12:25:28] [Rank 0] step:1181/10000 train_time:78648ms step_avg:66.59ms +[2025-07-07 12:25:29] [Rank 0] step:1201/10000 train_time:79995ms step_avg:66.61ms +[2025-07-07 12:25:29] [Rank 0] step:1201/10000 train_time:79995ms step_avg:66.61ms +[2025-07-07 12:25:30] [Rank 0] step:1221/10000 train_time:81342ms step_avg:66.62ms +[2025-07-07 12:25:30] [Rank 0] step:1221/10000 train_time:81342ms step_avg:66.62ms +[2025-07-07 12:25:32] [Rank 0] step:1241/10000 train_time:82689ms step_avg:66.63ms +[2025-07-07 12:25:32] [Rank 0] step:1241/10000 train_time:82689ms step_avg:66.63ms +[2025-07-07 12:25:33] [Rank 0] step:1261/10000 train_time:84084ms step_avg:66.68ms +[2025-07-07 12:25:33] [Rank 0] step:1261/10000 train_time:84084ms step_avg:66.68ms +[2025-07-07 12:25:34] [Rank 0] step:1281/10000 train_time:85427ms step_avg:66.69ms +[2025-07-07 12:25:34] [Rank 0] step:1281/10000 train_time:85427ms step_avg:66.69ms +[2025-07-07 12:25:36] [Rank 0] step:1301/10000 train_time:86776ms step_avg:66.70ms +[2025-07-07 12:25:36] [Rank 0] step:1301/10000 train_time:86776ms step_avg:66.70ms +[2025-07-07 12:25:37] [Rank 0] step:1321/10000 train_time:88123ms step_avg:66.71ms +[2025-07-07 12:25:37] [Rank 0] step:1321/10000 train_time:88123ms step_avg:66.71ms +[2025-07-07 12:25:38] [Rank 0] step:1341/10000 train_time:89474ms step_avg:66.72ms +[2025-07-07 12:25:38] [Rank 0] step:1341/10000 train_time:89474ms step_avg:66.72ms +[2025-07-07 12:25:40] [Rank 0] step:1361/10000 train_time:90823ms step_avg:66.73ms +[2025-07-07 12:25:40] [Rank 0] step:1361/10000 train_time:90823ms step_avg:66.73ms +[2025-07-07 12:25:41] [Rank 0] step:1381/10000 train_time:92172ms step_avg:66.74ms +[2025-07-07 12:25:41] [Rank 0] step:1381/10000 train_time:92172ms step_avg:66.74ms +[2025-07-07 12:25:42] [Rank 0] step:1401/10000 train_time:93522ms step_avg:66.75ms +[2025-07-07 12:25:42] [Rank 0] step:1401/10000 train_time:93522ms step_avg:66.75ms +[2025-07-07 12:25:44] [Rank 0] step:1421/10000 train_time:94873ms step_avg:66.76ms +[2025-07-07 12:25:44] [Rank 0] step:1421/10000 train_time:94873ms step_avg:66.76ms +[2025-07-07 12:25:45] [Rank 0] step:1441/10000 train_time:96893ms step_avg:67.24ms +[2025-07-07 12:25:45] [Rank 0] step:1441/10000 train_time:96893ms step_avg:67.24ms +[2025-07-07 12:25:47] [Rank 0] step:1461/10000 train_time:97620ms step_avg:66.82ms +[2025-07-07 12:25:47] [Rank 0] step:1461/10000 train_time:97620ms step_avg:66.82ms +[2025-07-07 12:25:48] [Rank 0] step:1481/10000 train_time:98971ms step_avg:66.83ms +[2025-07-07 12:25:48] [Rank 0] step:1481/10000 train_time:98971ms step_avg:66.83ms +[2025-07-07 12:25:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 12:25:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 12:25:50] [Rank 0] PRINT: step:1500/10000 train_loss:4.7910 val_loss:4.2740 train_time:100937ms step_avg:67.29ms +[2025-07-07 12:25:50] [Rank 0] PRINT: step:1500/10000 train_loss:4.7910 val_loss:4.2740 train_time:100937ms step_avg:67.29ms +[2025-07-07 12:25:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 12:25:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 12:25:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 12:25:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 12:25:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 12:25:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 12:31:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 12:31:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 12:31:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 12:31:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 12:31:16] [Rank 0] Total Loss: 5.5359 +[2025-07-07 12:31:16] [Rank 0] Total Loss: 5.5359 +[2025-07-07 12:31:16] [Rank 0] Total FTA: 0.0728 +[2025-07-07 12:31:16] [Rank 0] Total FTA: 0.0728 +[2025-07-07 12:31:16] [Rank 0] Group 0 Loss: 5.4001 +[2025-07-07 12:31:16] [Rank 0] Group 0 Loss: 5.4001 +[2025-07-07 12:31:16] [Rank 0] Group 1 Loss: 5.5167 +[2025-07-07 12:31:16] [Rank 0] Group 1 Loss: 5.5167 +[2025-07-07 12:31:16] [Rank 0] Group 2 Loss: 5.6330 +[2025-07-07 12:31:16] [Rank 0] Group 2 Loss: 5.6330 +[2025-07-07 12:31:16] [Rank 0] Group 3 Loss: 5.4692 +[2025-07-07 12:31:16] [Rank 0] Group 3 Loss: 5.4692 +[2025-07-07 12:31:16] [Rank 0] Group 4 Loss: 5.6166 +[2025-07-07 12:31:16] [Rank 0] Group 4 Loss: 5.6166 +[2025-07-07 12:31:16] [Rank 0] Group 5 Loss: 5.5372 +[2025-07-07 12:31:16] [Rank 0] Group 5 Loss: 5.5372 +[2025-07-07 12:31:16] [Rank 0] Group 6 Loss: 5.5814 +[2025-07-07 12:31:16] [Rank 0] Group 6 Loss: 5.5814 +[2025-07-07 12:31:16] [Rank 0] Group 7 Loss: 5.5646 +[2025-07-07 12:31:16] [Rank 0] Group 7 Loss: 5.5646 +[2025-07-07 12:31:16] [Rank 0] Group 8 Loss: 5.5289 +[2025-07-07 12:31:16] [Rank 0] Group 8 Loss: 5.5289 +[2025-07-07 12:31:16] [Rank 0] Group 9 Loss: 5.5705 +[2025-07-07 12:31:16] [Rank 0] Group 9 Loss: 5.5705 +[2025-07-07 12:31:16] [Rank 0] Group 10 Loss: 5.5679 +[2025-07-07 12:31:16] [Rank 0] Group 10 Loss: 5.5679 +[2025-07-07 12:31:16] [Rank 0] Group 11 Loss: 5.5533 +[2025-07-07 12:31:16] [Rank 0] Group 11 Loss: 5.5533 +[2025-07-07 12:31:16] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 12:31:16] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 12:31:16] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 12:31:16] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 12:31:16] [Rank 0] Group 2 FTA: 0.0885 +[2025-07-07 12:31:16] [Rank 0] Group 2 FTA: 0.0885 +[2025-07-07 12:31:16] [Rank 0] Group 3 FTA: 0.0521 +[2025-07-07 12:31:16] [Rank 0] Group 3 FTA: 0.0521 +[2025-07-07 12:31:16] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 12:31:16] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 12:31:16] [Rank 0] Group 5 FTA: 0.0286 +[2025-07-07 12:31:16] [Rank 0] Group 5 FTA: 0.0286 +[2025-07-07 12:31:16] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-07 12:31:16] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-07 12:31:16] [Rank 0] Group 7 FTA: 0.0677 +[2025-07-07 12:31:16] [Rank 0] Group 7 FTA: 0.0677 +[2025-07-07 12:31:16] [Rank 0] Group 8 FTA: 0.0859 +[2025-07-07 12:31:16] [Rank 0] Group 8 FTA: 0.0859 +[2025-07-07 12:31:16] [Rank 0] Group 9 FTA: 0.0430 +[2025-07-07 12:31:16] [Rank 0] Group 9 FTA: 0.0430 +[2025-07-07 12:31:16] [Rank 0] Group 10 FTA: 0.0664 +[2025-07-07 12:31:16] [Rank 0] Group 10 FTA: 0.0664 +[2025-07-07 12:31:16] [Rank 0] Group 11 FTA: 0.0713 +[2025-07-07 12:31:16] [Rank 0] Group 11 FTA: 0.0713 +[2025-07-07 12:31:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 12:31:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 12:31:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 12:31:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 12:31:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 12:31:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 12:31:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 12:31:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 12:31:18] [Rank 0] step:1501/10000 train_time:100947ms step_avg:67.25ms +[2025-07-07 12:31:18] [Rank 0] step:1501/10000 train_time:100947ms step_avg:67.25ms +[2025-07-07 12:31:19] [Rank 0] step:1521/10000 train_time:101690ms step_avg:66.86ms +[2025-07-07 12:31:19] [Rank 0] step:1521/10000 train_time:101690ms step_avg:66.86ms +[2025-07-07 12:31:20] [Rank 0] step:1541/10000 train_time:103034ms step_avg:66.86ms +[2025-07-07 12:31:20] [Rank 0] step:1541/10000 train_time:103034ms step_avg:66.86ms +[2025-07-07 12:31:22] [Rank 0] step:1561/10000 train_time:104379ms step_avg:66.87ms +[2025-07-07 12:31:22] [Rank 0] step:1561/10000 train_time:104379ms step_avg:66.87ms +[2025-07-07 12:31:23] [Rank 0] step:1581/10000 train_time:105725ms step_avg:66.87ms +[2025-07-07 12:31:23] [Rank 0] step:1581/10000 train_time:105725ms step_avg:66.87ms +[2025-07-07 12:31:24] [Rank 0] step:1601/10000 train_time:107072ms step_avg:66.88ms +[2025-07-07 12:31:24] [Rank 0] step:1601/10000 train_time:107072ms step_avg:66.88ms +[2025-07-07 12:31:26] [Rank 0] step:1621/10000 train_time:109086ms step_avg:67.30ms +[2025-07-07 12:31:26] [Rank 0] step:1621/10000 train_time:109086ms step_avg:67.30ms +[2025-07-07 12:31:27] [Rank 0] step:1641/10000 train_time:109812ms step_avg:66.92ms +[2025-07-07 12:31:27] [Rank 0] step:1641/10000 train_time:109812ms step_avg:66.92ms +[2025-07-07 12:31:28] [Rank 0] step:1661/10000 train_time:111160ms step_avg:66.92ms +[2025-07-07 12:31:28] [Rank 0] step:1661/10000 train_time:111160ms step_avg:66.92ms +[2025-07-07 12:31:30] [Rank 0] step:1681/10000 train_time:112509ms step_avg:66.93ms +[2025-07-07 12:31:30] [Rank 0] step:1681/10000 train_time:112509ms step_avg:66.93ms +[2025-07-07 12:31:31] [Rank 0] step:1701/10000 train_time:113858ms step_avg:66.94ms +[2025-07-07 12:31:31] [Rank 0] step:1701/10000 train_time:113858ms step_avg:66.94ms +[2025-07-07 12:31:33] [Rank 0] step:1721/10000 train_time:115208ms step_avg:66.94ms +[2025-07-07 12:31:33] [Rank 0] step:1721/10000 train_time:115208ms step_avg:66.94ms +[2025-07-07 12:31:34] [Rank 0] step:1741/10000 train_time:116559ms step_avg:66.95ms +[2025-07-07 12:31:34] [Rank 0] step:1741/10000 train_time:116559ms step_avg:66.95ms +[2025-07-07 12:31:35] [Rank 0] step:1761/10000 train_time:117909ms step_avg:66.96ms +[2025-07-07 12:31:35] [Rank 0] step:1761/10000 train_time:117909ms step_avg:66.96ms +[2025-07-07 12:31:37] [Rank 0] step:1781/10000 train_time:119259ms step_avg:66.96ms +[2025-07-07 12:31:37] [Rank 0] step:1781/10000 train_time:119259ms step_avg:66.96ms +[2025-07-07 12:31:38] [Rank 0] step:1801/10000 train_time:120857ms step_avg:67.11ms +[2025-07-07 12:31:38] [Rank 0] step:1801/10000 train_time:120857ms step_avg:67.11ms +[2025-07-07 12:31:39] [Rank 0] step:1821/10000 train_time:122005ms step_avg:67.00ms +[2025-07-07 12:31:39] [Rank 0] step:1821/10000 train_time:122005ms step_avg:67.00ms +[2025-07-07 12:31:41] [Rank 0] step:1841/10000 train_time:123354ms step_avg:67.00ms +[2025-07-07 12:31:41] [Rank 0] step:1841/10000 train_time:123354ms step_avg:67.00ms +[2025-07-07 12:31:42] [Rank 0] step:1861/10000 train_time:124704ms step_avg:67.01ms +[2025-07-07 12:31:42] [Rank 0] step:1861/10000 train_time:124704ms step_avg:67.01ms +[2025-07-07 12:31:43] [Rank 0] step:1881/10000 train_time:126054ms step_avg:67.01ms +[2025-07-07 12:31:43] [Rank 0] step:1881/10000 train_time:126054ms step_avg:67.01ms +[2025-07-07 12:31:45] [Rank 0] step:1901/10000 train_time:127405ms step_avg:67.02ms +[2025-07-07 12:31:45] [Rank 0] step:1901/10000 train_time:127405ms step_avg:67.02ms +[2025-07-07 12:31:46] [Rank 0] step:1921/10000 train_time:128754ms step_avg:67.02ms +[2025-07-07 12:31:46] [Rank 0] step:1921/10000 train_time:128754ms step_avg:67.02ms +[2025-07-07 12:31:47] [Rank 0] step:1941/10000 train_time:130104ms step_avg:67.03ms +[2025-07-07 12:31:47] [Rank 0] step:1941/10000 train_time:130104ms step_avg:67.03ms +[2025-07-07 12:31:49] [Rank 0] step:1961/10000 train_time:131453ms step_avg:67.03ms +[2025-07-07 12:31:49] [Rank 0] step:1961/10000 train_time:131453ms step_avg:67.03ms +[2025-07-07 12:31:50] [Rank 0] step:1981/10000 train_time:133054ms step_avg:67.17ms +[2025-07-07 12:31:50] [Rank 0] step:1981/10000 train_time:133054ms step_avg:67.17ms +[2025-07-07 12:31:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 12:31:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 12:31:52] [Rank 0] PRINT: step:2000/10000 train_loss:3.8009 val_loss:3.3638 train_time:134811ms step_avg:67.41ms +[2025-07-07 12:31:52] [Rank 0] PRINT: step:2000/10000 train_loss:3.8009 val_loss:3.3638 train_time:134811ms step_avg:67.41ms +[2025-07-07 12:31:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 12:31:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 12:31:53] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 12:31:53] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 12:31:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 12:31:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 12:37:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 12:37:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 12:37:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 12:37:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 12:37:18] [Rank 0] Total Loss: 4.9619 +[2025-07-07 12:37:18] [Rank 0] Total Loss: 4.9619 +[2025-07-07 12:37:18] [Rank 0] Total FTA: 0.0696 +[2025-07-07 12:37:18] [Rank 0] Total FTA: 0.0696 +[2025-07-07 12:37:18] [Rank 0] Group 0 Loss: 4.8467 +[2025-07-07 12:37:18] [Rank 0] Group 0 Loss: 4.8467 +[2025-07-07 12:37:18] [Rank 0] Group 1 Loss: 4.9501 +[2025-07-07 12:37:18] [Rank 0] Group 1 Loss: 4.9501 +[2025-07-07 12:37:18] [Rank 0] Group 2 Loss: 5.0299 +[2025-07-07 12:37:18] [Rank 0] Group 2 Loss: 5.0299 +[2025-07-07 12:37:18] [Rank 0] Group 3 Loss: 4.9049 +[2025-07-07 12:37:18] [Rank 0] Group 3 Loss: 4.9049 +[2025-07-07 12:37:18] [Rank 0] Group 4 Loss: 5.0341 +[2025-07-07 12:37:18] [Rank 0] Group 4 Loss: 5.0341 +[2025-07-07 12:37:18] [Rank 0] Group 5 Loss: 4.9498 +[2025-07-07 12:37:18] [Rank 0] Group 5 Loss: 4.9498 +[2025-07-07 12:37:18] [Rank 0] Group 6 Loss: 4.9968 +[2025-07-07 12:37:18] [Rank 0] Group 6 Loss: 4.9968 +[2025-07-07 12:37:18] [Rank 0] Group 7 Loss: 5.0040 +[2025-07-07 12:37:18] [Rank 0] Group 7 Loss: 5.0040 +[2025-07-07 12:37:18] [Rank 0] Group 8 Loss: 4.9592 +[2025-07-07 12:37:18] [Rank 0] Group 8 Loss: 4.9592 +[2025-07-07 12:37:18] [Rank 0] Group 9 Loss: 4.9784 +[2025-07-07 12:37:18] [Rank 0] Group 9 Loss: 4.9784 +[2025-07-07 12:37:18] [Rank 0] Group 10 Loss: 4.9841 +[2025-07-07 12:37:18] [Rank 0] Group 10 Loss: 4.9841 +[2025-07-07 12:37:18] [Rank 0] Group 11 Loss: 4.9830 +[2025-07-07 12:37:18] [Rank 0] Group 11 Loss: 4.9830 +[2025-07-07 12:37:18] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-07 12:37:18] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-07 12:37:18] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 12:37:18] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 12:37:18] [Rank 0] Group 2 FTA: 0.0964 +[2025-07-07 12:37:18] [Rank 0] Group 2 FTA: 0.0964 +[2025-07-07 12:37:18] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 12:37:18] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 12:37:18] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 12:37:18] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 12:37:18] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-07 12:37:18] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-07 12:37:18] [Rank 0] Group 6 FTA: 0.0651 +[2025-07-07 12:37:18] [Rank 0] Group 6 FTA: 0.0651 +[2025-07-07 12:37:18] [Rank 0] Group 7 FTA: 0.0625 +[2025-07-07 12:37:18] [Rank 0] Group 7 FTA: 0.0625 +[2025-07-07 12:37:18] [Rank 0] Group 8 FTA: 0.0469 +[2025-07-07 12:37:18] [Rank 0] Group 8 FTA: 0.0469 +[2025-07-07 12:37:18] [Rank 0] Group 9 FTA: 0.0625 +[2025-07-07 12:37:18] [Rank 0] Group 9 FTA: 0.0625 +[2025-07-07 12:37:18] [Rank 0] Group 10 FTA: 0.0742 +[2025-07-07 12:37:18] [Rank 0] Group 10 FTA: 0.0742 +[2025-07-07 12:37:18] [Rank 0] Group 11 FTA: 0.0674 +[2025-07-07 12:37:18] [Rank 0] Group 11 FTA: 0.0674 +[2025-07-07 12:37:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 12:37:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 12:37:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 12:37:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 12:37:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 12:37:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 12:37:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 12:37:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 12:37:20] [Rank 0] step:2001/10000 train_time:134821ms step_avg:67.38ms +[2025-07-07 12:37:20] [Rank 0] step:2001/10000 train_time:134821ms step_avg:67.38ms +[2025-07-07 12:37:21] [Rank 0] step:2021/10000 train_time:135564ms step_avg:67.08ms +[2025-07-07 12:37:21] [Rank 0] step:2021/10000 train_time:135564ms step_avg:67.08ms +[2025-07-07 12:37:22] [Rank 0] step:2041/10000 train_time:136915ms step_avg:67.08ms +[2025-07-07 12:37:22] [Rank 0] step:2041/10000 train_time:136915ms step_avg:67.08ms +[2025-07-07 12:37:24] [Rank 0] step:2061/10000 train_time:138260ms step_avg:67.08ms +[2025-07-07 12:37:24] [Rank 0] step:2061/10000 train_time:138260ms step_avg:67.08ms +[2025-07-07 12:37:25] [Rank 0] step:2081/10000 train_time:139605ms step_avg:67.09ms +[2025-07-07 12:37:25] [Rank 0] step:2081/10000 train_time:139605ms step_avg:67.09ms +[2025-07-07 12:37:26] [Rank 0] step:2101/10000 train_time:140951ms step_avg:67.09ms +[2025-07-07 12:37:26] [Rank 0] step:2101/10000 train_time:140951ms step_avg:67.09ms +[2025-07-07 12:37:28] [Rank 0] step:2121/10000 train_time:142297ms step_avg:67.09ms +[2025-07-07 12:37:28] [Rank 0] step:2121/10000 train_time:142297ms step_avg:67.09ms +[2025-07-07 12:37:29] [Rank 0] step:2141/10000 train_time:143643ms step_avg:67.09ms +[2025-07-07 12:37:29] [Rank 0] step:2141/10000 train_time:143643ms step_avg:67.09ms +[2025-07-07 12:37:30] [Rank 0] step:2161/10000 train_time:145652ms step_avg:67.40ms +[2025-07-07 12:37:30] [Rank 0] step:2161/10000 train_time:145652ms step_avg:67.40ms +[2025-07-07 12:37:32] [Rank 0] step:2181/10000 train_time:146378ms step_avg:67.12ms +[2025-07-07 12:37:32] [Rank 0] step:2181/10000 train_time:146378ms step_avg:67.12ms +[2025-07-07 12:37:33] [Rank 0] step:2201/10000 train_time:147726ms step_avg:67.12ms +[2025-07-07 12:37:33] [Rank 0] step:2201/10000 train_time:147726ms step_avg:67.12ms +[2025-07-07 12:37:35] [Rank 0] step:2221/10000 train_time:149074ms step_avg:67.12ms +[2025-07-07 12:37:35] [Rank 0] step:2221/10000 train_time:149074ms step_avg:67.12ms +[2025-07-07 12:37:36] [Rank 0] step:2241/10000 train_time:150432ms step_avg:67.13ms +[2025-07-07 12:37:36] [Rank 0] step:2241/10000 train_time:150432ms step_avg:67.13ms +[2025-07-07 12:37:37] [Rank 0] step:2261/10000 train_time:151806ms step_avg:67.14ms +[2025-07-07 12:37:37] [Rank 0] step:2261/10000 train_time:151806ms step_avg:67.14ms +[2025-07-07 12:37:39] [Rank 0] step:2281/10000 train_time:153179ms step_avg:67.15ms +[2025-07-07 12:37:39] [Rank 0] step:2281/10000 train_time:153179ms step_avg:67.15ms +[2025-07-07 12:37:40] [Rank 0] step:2301/10000 train_time:154552ms step_avg:67.17ms +[2025-07-07 12:37:40] [Rank 0] step:2301/10000 train_time:154552ms step_avg:67.17ms +[2025-07-07 12:37:41] [Rank 0] step:2321/10000 train_time:155927ms step_avg:67.18ms +[2025-07-07 12:37:41] [Rank 0] step:2321/10000 train_time:155927ms step_avg:67.18ms +[2025-07-07 12:37:43] [Rank 0] step:2341/10000 train_time:157974ms step_avg:67.48ms +[2025-07-07 12:37:43] [Rank 0] step:2341/10000 train_time:157974ms step_avg:67.48ms +[2025-07-07 12:37:44] [Rank 0] step:2361/10000 train_time:158713ms step_avg:67.22ms +[2025-07-07 12:37:44] [Rank 0] step:2361/10000 train_time:158713ms step_avg:67.22ms +[2025-07-07 12:37:46] [Rank 0] step:2381/10000 train_time:160087ms step_avg:67.24ms +[2025-07-07 12:37:46] [Rank 0] step:2381/10000 train_time:160087ms step_avg:67.24ms +[2025-07-07 12:37:47] [Rank 0] step:2401/10000 train_time:161462ms step_avg:67.25ms +[2025-07-07 12:37:47] [Rank 0] step:2401/10000 train_time:161462ms step_avg:67.25ms +[2025-07-07 12:37:48] [Rank 0] step:2421/10000 train_time:162837ms step_avg:67.26ms +[2025-07-07 12:37:48] [Rank 0] step:2421/10000 train_time:162837ms step_avg:67.26ms +[2025-07-07 12:37:50] [Rank 0] step:2441/10000 train_time:164214ms step_avg:67.27ms +[2025-07-07 12:37:50] [Rank 0] step:2441/10000 train_time:164214ms step_avg:67.27ms +[2025-07-07 12:37:51] [Rank 0] step:2461/10000 train_time:165590ms step_avg:67.29ms +[2025-07-07 12:37:51] [Rank 0] step:2461/10000 train_time:165590ms step_avg:67.29ms +[2025-07-07 12:37:52] [Rank 0] step:2481/10000 train_time:166966ms step_avg:67.30ms +[2025-07-07 12:37:52] [Rank 0] step:2481/10000 train_time:166966ms step_avg:67.30ms +[2025-07-07 12:37:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 12:37:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 12:37:55] [Rank 0] PRINT: step:2500/10000 train_loss:2.9945 val_loss:2.6724 train_time:168965ms step_avg:67.59ms +[2025-07-07 12:37:55] [Rank 0] PRINT: step:2500/10000 train_loss:2.9945 val_loss:2.6724 train_time:168965ms step_avg:67.59ms +[2025-07-07 12:37:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 12:37:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 12:37:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 12:37:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 12:37:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 12:37:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 12:43:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 12:43:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 12:43:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 12:43:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 12:43:24] [Rank 0] Total Loss: 4.6431 +[2025-07-07 12:43:24] [Rank 0] Total Loss: 4.6431 +[2025-07-07 12:43:24] [Rank 0] Total FTA: 0.0859 +[2025-07-07 12:43:24] [Rank 0] Total FTA: 0.0859 +[2025-07-07 12:43:24] [Rank 0] Group 0 Loss: 4.6396 +[2025-07-07 12:43:24] [Rank 0] Group 0 Loss: 4.6396 +[2025-07-07 12:43:24] [Rank 0] Group 1 Loss: 4.6108 +[2025-07-07 12:43:24] [Rank 0] Group 1 Loss: 4.6108 +[2025-07-07 12:43:24] [Rank 0] Group 2 Loss: 4.5974 +[2025-07-07 12:43:24] [Rank 0] Group 2 Loss: 4.5974 +[2025-07-07 12:43:24] [Rank 0] Group 3 Loss: 4.6351 +[2025-07-07 12:43:24] [Rank 0] Group 3 Loss: 4.6351 +[2025-07-07 12:43:24] [Rank 0] Group 4 Loss: 4.7133 +[2025-07-07 12:43:24] [Rank 0] Group 4 Loss: 4.7133 +[2025-07-07 12:43:24] [Rank 0] Group 5 Loss: 4.5874 +[2025-07-07 12:43:24] [Rank 0] Group 5 Loss: 4.5874 +[2025-07-07 12:43:24] [Rank 0] Group 6 Loss: 4.6405 +[2025-07-07 12:43:24] [Rank 0] Group 6 Loss: 4.6405 +[2025-07-07 12:43:24] [Rank 0] Group 7 Loss: 4.6599 +[2025-07-07 12:43:24] [Rank 0] Group 7 Loss: 4.6599 +[2025-07-07 12:43:24] [Rank 0] Group 8 Loss: 4.6662 +[2025-07-07 12:43:24] [Rank 0] Group 8 Loss: 4.6662 +[2025-07-07 12:43:24] [Rank 0] Group 9 Loss: 4.6306 +[2025-07-07 12:43:24] [Rank 0] Group 9 Loss: 4.6306 +[2025-07-07 12:43:24] [Rank 0] Group 10 Loss: 4.6474 +[2025-07-07 12:43:24] [Rank 0] Group 10 Loss: 4.6474 +[2025-07-07 12:43:24] [Rank 0] Group 11 Loss: 4.6592 +[2025-07-07 12:43:24] [Rank 0] Group 11 Loss: 4.6592 +[2025-07-07 12:43:24] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-07 12:43:24] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-07 12:43:24] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 12:43:24] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 12:43:24] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-07 12:43:24] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-07 12:43:24] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 12:43:24] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 12:43:24] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 12:43:24] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 12:43:24] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-07 12:43:24] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-07 12:43:24] [Rank 0] Group 6 FTA: 0.0859 +[2025-07-07 12:43:24] [Rank 0] Group 6 FTA: 0.0859 +[2025-07-07 12:43:24] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-07 12:43:24] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-07 12:43:24] [Rank 0] Group 8 FTA: 0.1042 +[2025-07-07 12:43:24] [Rank 0] Group 8 FTA: 0.1042 +[2025-07-07 12:43:24] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 12:43:24] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 12:43:24] [Rank 0] Group 10 FTA: 0.1211 +[2025-07-07 12:43:24] [Rank 0] Group 10 FTA: 0.1211 +[2025-07-07 12:43:24] [Rank 0] Group 11 FTA: 0.0869 +[2025-07-07 12:43:24] [Rank 0] Group 11 FTA: 0.0869 +[2025-07-07 12:43:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 12:43:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 12:43:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 12:43:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 12:43:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 12:43:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 12:43:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 12:43:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 12:43:25] [Rank 0] step:2501/10000 train_time:168976ms step_avg:67.56ms +[2025-07-07 12:43:25] [Rank 0] step:2501/10000 train_time:168976ms step_avg:67.56ms +[2025-07-07 12:43:27] [Rank 0] step:2521/10000 train_time:169783ms step_avg:67.35ms +[2025-07-07 12:43:27] [Rank 0] step:2521/10000 train_time:169783ms step_avg:67.35ms +[2025-07-07 12:43:28] [Rank 0] step:2541/10000 train_time:171152ms step_avg:67.36ms +[2025-07-07 12:43:28] [Rank 0] step:2541/10000 train_time:171152ms step_avg:67.36ms +[2025-07-07 12:43:30] [Rank 0] step:2561/10000 train_time:172521ms step_avg:67.36ms +[2025-07-07 12:43:30] [Rank 0] step:2561/10000 train_time:172521ms step_avg:67.36ms +[2025-07-07 12:43:31] [Rank 0] step:2581/10000 train_time:173891ms step_avg:67.37ms +[2025-07-07 12:43:31] [Rank 0] step:2581/10000 train_time:173891ms step_avg:67.37ms +[2025-07-07 12:43:32] [Rank 0] step:2601/10000 train_time:175261ms step_avg:67.38ms +[2025-07-07 12:43:32] [Rank 0] step:2601/10000 train_time:175261ms step_avg:67.38ms +[2025-07-07 12:43:34] [Rank 0] step:2621/10000 train_time:176630ms step_avg:67.39ms +[2025-07-07 12:43:34] [Rank 0] step:2621/10000 train_time:176630ms step_avg:67.39ms +[2025-07-07 12:43:35] [Rank 0] step:2641/10000 train_time:178001ms step_avg:67.40ms +[2025-07-07 12:43:35] [Rank 0] step:2641/10000 train_time:178001ms step_avg:67.40ms +[2025-07-07 12:43:36] [Rank 0] step:2661/10000 train_time:179371ms step_avg:67.41ms +[2025-07-07 12:43:36] [Rank 0] step:2661/10000 train_time:179371ms step_avg:67.41ms +[2025-07-07 12:43:38] [Rank 0] step:2681/10000 train_time:180742ms step_avg:67.42ms +[2025-07-07 12:43:38] [Rank 0] step:2681/10000 train_time:180742ms step_avg:67.42ms +[2025-07-07 12:43:39] [Rank 0] step:2701/10000 train_time:182183ms step_avg:67.45ms +[2025-07-07 12:43:39] [Rank 0] step:2701/10000 train_time:182183ms step_avg:67.45ms +[2025-07-07 12:43:41] [Rank 0] step:2721/10000 train_time:183509ms step_avg:67.44ms +[2025-07-07 12:43:41] [Rank 0] step:2721/10000 train_time:183509ms step_avg:67.44ms +[2025-07-07 12:43:42] [Rank 0] step:2741/10000 train_time:184882ms step_avg:67.45ms +[2025-07-07 12:43:42] [Rank 0] step:2741/10000 train_time:184882ms step_avg:67.45ms +[2025-07-07 12:43:43] [Rank 0] step:2761/10000 train_time:186255ms step_avg:67.46ms +[2025-07-07 12:43:43] [Rank 0] step:2761/10000 train_time:186255ms step_avg:67.46ms +[2025-07-07 12:43:45] [Rank 0] step:2781/10000 train_time:187627ms step_avg:67.47ms +[2025-07-07 12:43:45] [Rank 0] step:2781/10000 train_time:187627ms step_avg:67.47ms +[2025-07-07 12:43:46] [Rank 0] step:2801/10000 train_time:189001ms step_avg:67.48ms +[2025-07-07 12:43:46] [Rank 0] step:2801/10000 train_time:189001ms step_avg:67.48ms +[2025-07-07 12:43:47] [Rank 0] step:2821/10000 train_time:190375ms step_avg:67.48ms +[2025-07-07 12:43:47] [Rank 0] step:2821/10000 train_time:190375ms step_avg:67.48ms +[2025-07-07 12:43:49] [Rank 0] step:2841/10000 train_time:191749ms step_avg:67.49ms +[2025-07-07 12:43:49] [Rank 0] step:2841/10000 train_time:191749ms step_avg:67.49ms +[2025-07-07 12:43:50] [Rank 0] step:2861/10000 train_time:193123ms step_avg:67.50ms +[2025-07-07 12:43:50] [Rank 0] step:2861/10000 train_time:193123ms step_avg:67.50ms +[2025-07-07 12:43:52] [Rank 0] step:2881/10000 train_time:195151ms step_avg:67.74ms +[2025-07-07 12:43:52] [Rank 0] step:2881/10000 train_time:195151ms step_avg:67.74ms +[2025-07-07 12:43:53] [Rank 0] step:2901/10000 train_time:195892ms step_avg:67.53ms +[2025-07-07 12:43:53] [Rank 0] step:2901/10000 train_time:195892ms step_avg:67.53ms +[2025-07-07 12:43:54] [Rank 0] step:2921/10000 train_time:197265ms step_avg:67.53ms +[2025-07-07 12:43:54] [Rank 0] step:2921/10000 train_time:197265ms step_avg:67.53ms +[2025-07-07 12:43:56] [Rank 0] step:2941/10000 train_time:198640ms step_avg:67.54ms +[2025-07-07 12:43:56] [Rank 0] step:2941/10000 train_time:198640ms step_avg:67.54ms +[2025-07-07 12:43:57] [Rank 0] step:2961/10000 train_time:200013ms step_avg:67.55ms +[2025-07-07 12:43:57] [Rank 0] step:2961/10000 train_time:200013ms step_avg:67.55ms +[2025-07-07 12:43:59] [Rank 0] step:2981/10000 train_time:201388ms step_avg:67.56ms +[2025-07-07 12:43:59] [Rank 0] step:2981/10000 train_time:201388ms step_avg:67.56ms +[2025-07-07 12:44:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 12:44:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 12:44:01] [Rank 0] PRINT: step:3000/10000 train_loss:2.4360 val_loss:2.2410 train_time:203388ms step_avg:67.80ms +[2025-07-07 12:44:01] [Rank 0] PRINT: step:3000/10000 train_loss:2.4360 val_loss:2.2410 train_time:203388ms step_avg:67.80ms +[2025-07-07 12:44:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 12:44:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 12:44:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 12:44:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 12:44:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 12:44:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 12:49:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 12:49:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 12:49:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 12:49:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 12:49:27] [Rank 0] Total Loss: 4.5058 +[2025-07-07 12:49:27] [Rank 0] Total Loss: 4.5058 +[2025-07-07 12:49:27] [Rank 0] Total FTA: 0.0888 +[2025-07-07 12:49:27] [Rank 0] Total FTA: 0.0888 +[2025-07-07 12:49:27] [Rank 0] Group 0 Loss: 4.6102 +[2025-07-07 12:49:27] [Rank 0] Group 0 Loss: 4.6102 +[2025-07-07 12:49:27] [Rank 0] Group 1 Loss: 4.4393 +[2025-07-07 12:49:27] [Rank 0] Group 1 Loss: 4.4393 +[2025-07-07 12:49:27] [Rank 0] Group 2 Loss: 4.4949 +[2025-07-07 12:49:27] [Rank 0] Group 2 Loss: 4.4949 +[2025-07-07 12:49:27] [Rank 0] Group 3 Loss: 4.5184 +[2025-07-07 12:49:27] [Rank 0] Group 3 Loss: 4.5184 +[2025-07-07 12:49:27] [Rank 0] Group 4 Loss: 4.4818 +[2025-07-07 12:49:27] [Rank 0] Group 4 Loss: 4.4818 +[2025-07-07 12:49:27] [Rank 0] Group 5 Loss: 4.4436 +[2025-07-07 12:49:27] [Rank 0] Group 5 Loss: 4.4436 +[2025-07-07 12:49:27] [Rank 0] Group 6 Loss: 4.4539 +[2025-07-07 12:49:27] [Rank 0] Group 6 Loss: 4.4539 +[2025-07-07 12:49:27] [Rank 0] Group 7 Loss: 4.5343 +[2025-07-07 12:49:27] [Rank 0] Group 7 Loss: 4.5343 +[2025-07-07 12:49:27] [Rank 0] Group 8 Loss: 4.4882 +[2025-07-07 12:49:27] [Rank 0] Group 8 Loss: 4.4882 +[2025-07-07 12:49:27] [Rank 0] Group 9 Loss: 4.4839 +[2025-07-07 12:49:27] [Rank 0] Group 9 Loss: 4.4839 +[2025-07-07 12:49:27] [Rank 0] Group 10 Loss: 4.4990 +[2025-07-07 12:49:27] [Rank 0] Group 10 Loss: 4.4990 +[2025-07-07 12:49:27] [Rank 0] Group 11 Loss: 4.5082 +[2025-07-07 12:49:27] [Rank 0] Group 11 Loss: 4.5082 +[2025-07-07 12:49:27] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 12:49:27] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 12:49:27] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 12:49:27] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 12:49:27] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 12:49:27] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 12:49:27] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 12:49:27] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 12:49:27] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 12:49:27] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 12:49:27] [Rank 0] Group 5 FTA: 0.0729 +[2025-07-07 12:49:27] [Rank 0] Group 5 FTA: 0.0729 +[2025-07-07 12:49:27] [Rank 0] Group 6 FTA: 0.0859 +[2025-07-07 12:49:27] [Rank 0] Group 6 FTA: 0.0859 +[2025-07-07 12:49:27] [Rank 0] Group 7 FTA: 0.0859 +[2025-07-07 12:49:27] [Rank 0] Group 7 FTA: 0.0859 +[2025-07-07 12:49:27] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-07 12:49:27] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-07 12:49:27] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 12:49:27] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 12:49:27] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-07 12:49:27] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-07 12:49:27] [Rank 0] Group 11 FTA: 0.1084 +[2025-07-07 12:49:27] [Rank 0] Group 11 FTA: 0.1084 +[2025-07-07 12:49:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 12:49:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 12:49:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 12:49:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 12:49:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 12:49:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 12:49:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 12:49:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 12:49:28] [Rank 0] step:3001/10000 train_time:203398ms step_avg:67.78ms +[2025-07-07 12:49:28] [Rank 0] step:3001/10000 train_time:203398ms step_avg:67.78ms +[2025-07-07 12:49:30] [Rank 0] step:3021/10000 train_time:204172ms step_avg:67.58ms +[2025-07-07 12:49:30] [Rank 0] step:3021/10000 train_time:204172ms step_avg:67.58ms +[2025-07-07 12:49:31] [Rank 0] step:3041/10000 train_time:205538ms step_avg:67.59ms +[2025-07-07 12:49:31] [Rank 0] step:3041/10000 train_time:205538ms step_avg:67.59ms +[2025-07-07 12:49:33] [Rank 0] step:3061/10000 train_time:207586ms step_avg:67.82ms +[2025-07-07 12:49:33] [Rank 0] step:3061/10000 train_time:207586ms step_avg:67.82ms +[2025-07-07 12:49:34] [Rank 0] step:3081/10000 train_time:208324ms step_avg:67.62ms +[2025-07-07 12:49:34] [Rank 0] step:3081/10000 train_time:208324ms step_avg:67.62ms +[2025-07-07 12:49:35] [Rank 0] step:3101/10000 train_time:209696ms step_avg:67.62ms +[2025-07-07 12:49:35] [Rank 0] step:3101/10000 train_time:209696ms step_avg:67.62ms +[2025-07-07 12:49:37] [Rank 0] step:3121/10000 train_time:211067ms step_avg:67.63ms +[2025-07-07 12:49:37] [Rank 0] step:3121/10000 train_time:211067ms step_avg:67.63ms +[2025-07-07 12:49:38] [Rank 0] step:3141/10000 train_time:212437ms step_avg:67.63ms +[2025-07-07 12:49:38] [Rank 0] step:3141/10000 train_time:212437ms step_avg:67.63ms +[2025-07-07 12:49:40] [Rank 0] step:3161/10000 train_time:213809ms step_avg:67.64ms +[2025-07-07 12:49:40] [Rank 0] step:3161/10000 train_time:213809ms step_avg:67.64ms +[2025-07-07 12:49:41] [Rank 0] step:3181/10000 train_time:215181ms step_avg:67.65ms +[2025-07-07 12:49:41] [Rank 0] step:3181/10000 train_time:215181ms step_avg:67.65ms +[2025-07-07 12:49:42] [Rank 0] step:3201/10000 train_time:216554ms step_avg:67.65ms +[2025-07-07 12:49:42] [Rank 0] step:3201/10000 train_time:216554ms step_avg:67.65ms +[2025-07-07 12:49:44] [Rank 0] step:3221/10000 train_time:217926ms step_avg:67.66ms +[2025-07-07 12:49:44] [Rank 0] step:3221/10000 train_time:217926ms step_avg:67.66ms +[2025-07-07 12:49:45] [Rank 0] step:3241/10000 train_time:219971ms step_avg:67.87ms +[2025-07-07 12:49:45] [Rank 0] step:3241/10000 train_time:219971ms step_avg:67.87ms +[2025-07-07 12:49:46] [Rank 0] step:3261/10000 train_time:220711ms step_avg:67.68ms +[2025-07-07 12:49:46] [Rank 0] step:3261/10000 train_time:220711ms step_avg:67.68ms +[2025-07-07 12:49:48] [Rank 0] step:3281/10000 train_time:222083ms step_avg:67.69ms +[2025-07-07 12:49:48] [Rank 0] step:3281/10000 train_time:222083ms step_avg:67.69ms +[2025-07-07 12:49:49] [Rank 0] step:3301/10000 train_time:223457ms step_avg:67.69ms +[2025-07-07 12:49:49] [Rank 0] step:3301/10000 train_time:223457ms step_avg:67.69ms +[2025-07-07 12:49:51] [Rank 0] step:3321/10000 train_time:224828ms step_avg:67.70ms +[2025-07-07 12:49:51] [Rank 0] step:3321/10000 train_time:224828ms step_avg:67.70ms +[2025-07-07 12:49:52] [Rank 0] step:3341/10000 train_time:226201ms step_avg:67.70ms +[2025-07-07 12:49:52] [Rank 0] step:3341/10000 train_time:226201ms step_avg:67.70ms +[2025-07-07 12:49:53] [Rank 0] step:3361/10000 train_time:227574ms step_avg:67.71ms +[2025-07-07 12:49:53] [Rank 0] step:3361/10000 train_time:227574ms step_avg:67.71ms +[2025-07-07 12:49:55] [Rank 0] step:3381/10000 train_time:228946ms step_avg:67.72ms +[2025-07-07 12:49:55] [Rank 0] step:3381/10000 train_time:228946ms step_avg:67.72ms +[2025-07-07 12:49:56] [Rank 0] step:3401/10000 train_time:230320ms step_avg:67.72ms +[2025-07-07 12:49:56] [Rank 0] step:3401/10000 train_time:230320ms step_avg:67.72ms +[2025-07-07 12:49:57] [Rank 0] step:3421/10000 train_time:231738ms step_avg:67.74ms +[2025-07-07 12:49:57] [Rank 0] step:3421/10000 train_time:231738ms step_avg:67.74ms +[2025-07-07 12:49:59] [Rank 0] step:3441/10000 train_time:233096ms step_avg:67.74ms +[2025-07-07 12:49:59] [Rank 0] step:3441/10000 train_time:233096ms step_avg:67.74ms +[2025-07-07 12:50:00] [Rank 0] step:3461/10000 train_time:234470ms step_avg:67.75ms +[2025-07-07 12:50:00] [Rank 0] step:3461/10000 train_time:234470ms step_avg:67.75ms +[2025-07-07 12:50:02] [Rank 0] step:3481/10000 train_time:235867ms step_avg:67.76ms +[2025-07-07 12:50:02] [Rank 0] step:3481/10000 train_time:235867ms step_avg:67.76ms +[2025-07-07 12:50:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 12:50:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 12:50:04] [Rank 0] PRINT: step:3500/10000 train_loss:2.1025 val_loss:1.9877 train_time:237865ms step_avg:67.96ms +[2025-07-07 12:50:04] [Rank 0] PRINT: step:3500/10000 train_loss:2.1025 val_loss:1.9877 train_time:237865ms step_avg:67.96ms +[2025-07-07 12:50:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 12:50:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 12:50:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 12:50:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 12:50:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 12:50:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 12:55:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 12:55:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 12:55:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 12:55:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 12:55:30] [Rank 0] Total Loss: 4.4595 +[2025-07-07 12:55:30] [Rank 0] Total Loss: 4.4595 +[2025-07-07 12:55:30] [Rank 0] Total FTA: 0.0976 +[2025-07-07 12:55:30] [Rank 0] Total FTA: 0.0976 +[2025-07-07 12:55:30] [Rank 0] Group 0 Loss: 4.7104 +[2025-07-07 12:55:30] [Rank 0] Group 0 Loss: 4.7104 +[2025-07-07 12:55:30] [Rank 0] Group 1 Loss: 4.2683 +[2025-07-07 12:55:30] [Rank 0] Group 1 Loss: 4.2683 +[2025-07-07 12:55:30] [Rank 0] Group 2 Loss: 4.3917 +[2025-07-07 12:55:30] [Rank 0] Group 2 Loss: 4.3917 +[2025-07-07 12:55:30] [Rank 0] Group 3 Loss: 4.5659 +[2025-07-07 12:55:30] [Rank 0] Group 3 Loss: 4.5659 +[2025-07-07 12:55:30] [Rank 0] Group 4 Loss: 4.4146 +[2025-07-07 12:55:30] [Rank 0] Group 4 Loss: 4.4146 +[2025-07-07 12:55:30] [Rank 0] Group 5 Loss: 4.3451 +[2025-07-07 12:55:30] [Rank 0] Group 5 Loss: 4.3451 +[2025-07-07 12:55:30] [Rank 0] Group 6 Loss: 4.3881 +[2025-07-07 12:55:30] [Rank 0] Group 6 Loss: 4.3881 +[2025-07-07 12:55:30] [Rank 0] Group 7 Loss: 4.4740 +[2025-07-07 12:55:30] [Rank 0] Group 7 Loss: 4.4740 +[2025-07-07 12:55:30] [Rank 0] Group 8 Loss: 4.4881 +[2025-07-07 12:55:30] [Rank 0] Group 8 Loss: 4.4881 +[2025-07-07 12:55:30] [Rank 0] Group 9 Loss: 4.3946 +[2025-07-07 12:55:30] [Rank 0] Group 9 Loss: 4.3946 +[2025-07-07 12:55:30] [Rank 0] Group 10 Loss: 4.4209 +[2025-07-07 12:55:30] [Rank 0] Group 10 Loss: 4.4209 +[2025-07-07 12:55:30] [Rank 0] Group 11 Loss: 4.4344 +[2025-07-07 12:55:30] [Rank 0] Group 11 Loss: 4.4344 +[2025-07-07 12:55:30] [Rank 0] Group 0 FTA: 0.1821 +[2025-07-07 12:55:30] [Rank 0] Group 0 FTA: 0.1821 +[2025-07-07 12:55:30] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 12:55:30] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 12:55:30] [Rank 0] Group 2 FTA: 0.1094 +[2025-07-07 12:55:30] [Rank 0] Group 2 FTA: 0.1094 +[2025-07-07 12:55:30] [Rank 0] Group 3 FTA: 0.0573 +[2025-07-07 12:55:30] [Rank 0] Group 3 FTA: 0.0573 +[2025-07-07 12:55:30] [Rank 0] Group 4 FTA: 0.0755 +[2025-07-07 12:55:30] [Rank 0] Group 4 FTA: 0.0755 +[2025-07-07 12:55:30] [Rank 0] Group 5 FTA: 0.1068 +[2025-07-07 12:55:30] [Rank 0] Group 5 FTA: 0.1068 +[2025-07-07 12:55:30] [Rank 0] Group 6 FTA: 0.0859 +[2025-07-07 12:55:30] [Rank 0] Group 6 FTA: 0.0859 +[2025-07-07 12:55:30] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-07 12:55:30] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-07 12:55:30] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 12:55:30] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 12:55:30] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-07 12:55:30] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-07 12:55:30] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-07 12:55:30] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-07 12:55:30] [Rank 0] Group 11 FTA: 0.0967 +[2025-07-07 12:55:30] [Rank 0] Group 11 FTA: 0.0967 +[2025-07-07 12:55:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 12:55:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 12:55:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 12:55:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 12:55:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 12:55:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 12:55:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 12:55:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 12:55:32] [Rank 0] step:3501/10000 train_time:237875ms step_avg:67.94ms +[2025-07-07 12:55:32] [Rank 0] step:3501/10000 train_time:237875ms step_avg:67.94ms +[2025-07-07 12:55:33] [Rank 0] step:3521/10000 train_time:238634ms step_avg:67.77ms +[2025-07-07 12:55:33] [Rank 0] step:3521/10000 train_time:238634ms step_avg:67.77ms +[2025-07-07 12:55:34] [Rank 0] step:3541/10000 train_time:240003ms step_avg:67.78ms +[2025-07-07 12:55:34] [Rank 0] step:3541/10000 train_time:240003ms step_avg:67.78ms +[2025-07-07 12:55:36] [Rank 0] step:3561/10000 train_time:241372ms step_avg:67.78ms +[2025-07-07 12:55:36] [Rank 0] step:3561/10000 train_time:241372ms step_avg:67.78ms +[2025-07-07 12:55:37] [Rank 0] step:3581/10000 train_time:242742ms step_avg:67.79ms +[2025-07-07 12:55:37] [Rank 0] step:3581/10000 train_time:242742ms step_avg:67.79ms +[2025-07-07 12:55:39] [Rank 0] step:3601/10000 train_time:244795ms step_avg:67.98ms +[2025-07-07 12:55:39] [Rank 0] step:3601/10000 train_time:244795ms step_avg:67.98ms +[2025-07-07 12:55:40] [Rank 0] step:3621/10000 train_time:245533ms step_avg:67.81ms +[2025-07-07 12:55:40] [Rank 0] step:3621/10000 train_time:245533ms step_avg:67.81ms +[2025-07-07 12:55:41] [Rank 0] step:3641/10000 train_time:246905ms step_avg:67.81ms +[2025-07-07 12:55:41] [Rank 0] step:3641/10000 train_time:246905ms step_avg:67.81ms +[2025-07-07 12:55:43] [Rank 0] step:3661/10000 train_time:248277ms step_avg:67.82ms +[2025-07-07 12:55:43] [Rank 0] step:3661/10000 train_time:248277ms step_avg:67.82ms +[2025-07-07 12:55:44] [Rank 0] step:3681/10000 train_time:249651ms step_avg:67.82ms +[2025-07-07 12:55:44] [Rank 0] step:3681/10000 train_time:249651ms step_avg:67.82ms +[2025-07-07 12:55:46] [Rank 0] step:3701/10000 train_time:251023ms step_avg:67.83ms +[2025-07-07 12:55:46] [Rank 0] step:3701/10000 train_time:251023ms step_avg:67.83ms +[2025-07-07 12:55:47] [Rank 0] step:3721/10000 train_time:252394ms step_avg:67.83ms +[2025-07-07 12:55:47] [Rank 0] step:3721/10000 train_time:252394ms step_avg:67.83ms +[2025-07-07 12:55:48] [Rank 0] step:3741/10000 train_time:253768ms step_avg:67.83ms +[2025-07-07 12:55:48] [Rank 0] step:3741/10000 train_time:253768ms step_avg:67.83ms +[2025-07-07 12:55:50] [Rank 0] step:3761/10000 train_time:255140ms step_avg:67.84ms +[2025-07-07 12:55:50] [Rank 0] step:3761/10000 train_time:255140ms step_avg:67.84ms +[2025-07-07 12:55:51] [Rank 0] step:3781/10000 train_time:257178ms step_avg:68.02ms +[2025-07-07 12:55:51] [Rank 0] step:3781/10000 train_time:257178ms step_avg:68.02ms +[2025-07-07 12:55:52] [Rank 0] step:3801/10000 train_time:257918ms step_avg:67.86ms +[2025-07-07 12:55:52] [Rank 0] step:3801/10000 train_time:257918ms step_avg:67.86ms +[2025-07-07 12:55:54] [Rank 0] step:3821/10000 train_time:259292ms step_avg:67.86ms +[2025-07-07 12:55:54] [Rank 0] step:3821/10000 train_time:259292ms step_avg:67.86ms +[2025-07-07 12:55:55] [Rank 0] step:3841/10000 train_time:260666ms step_avg:67.86ms +[2025-07-07 12:55:55] [Rank 0] step:3841/10000 train_time:260666ms step_avg:67.86ms +[2025-07-07 12:55:57] [Rank 0] step:3861/10000 train_time:262041ms step_avg:67.87ms +[2025-07-07 12:55:57] [Rank 0] step:3861/10000 train_time:262041ms step_avg:67.87ms +[2025-07-07 12:55:58] [Rank 0] step:3881/10000 train_time:263415ms step_avg:67.87ms +[2025-07-07 12:55:58] [Rank 0] step:3881/10000 train_time:263415ms step_avg:67.87ms +[2025-07-07 12:55:59] [Rank 0] step:3901/10000 train_time:264789ms step_avg:67.88ms +[2025-07-07 12:55:59] [Rank 0] step:3901/10000 train_time:264789ms step_avg:67.88ms +[2025-07-07 12:56:01] [Rank 0] step:3921/10000 train_time:266164ms step_avg:67.88ms +[2025-07-07 12:56:01] [Rank 0] step:3921/10000 train_time:266164ms step_avg:67.88ms +[2025-07-07 12:56:02] [Rank 0] step:3941/10000 train_time:267538ms step_avg:67.89ms +[2025-07-07 12:56:02] [Rank 0] step:3941/10000 train_time:267538ms step_avg:67.89ms +[2025-07-07 12:56:03] [Rank 0] step:3961/10000 train_time:268959ms step_avg:67.90ms +[2025-07-07 12:56:03] [Rank 0] step:3961/10000 train_time:268959ms step_avg:67.90ms +[2025-07-07 12:56:05] [Rank 0] step:3981/10000 train_time:270335ms step_avg:67.91ms +[2025-07-07 12:56:05] [Rank 0] step:3981/10000 train_time:270335ms step_avg:67.91ms +[2025-07-07 12:56:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 12:56:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 12:56:07] [Rank 0] PRINT: step:4000/10000 train_loss:1.8946 val_loss:1.8134 train_time:272335ms step_avg:68.08ms +[2025-07-07 12:56:07] [Rank 0] PRINT: step:4000/10000 train_loss:1.8946 val_loss:1.8134 train_time:272335ms step_avg:68.08ms +[2025-07-07 12:56:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 12:56:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 12:56:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 12:56:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 12:56:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 12:56:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 13:01:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 13:01:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 13:01:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 13:01:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 13:01:34] [Rank 0] Total Loss: 4.4801 +[2025-07-07 13:01:34] [Rank 0] Total Loss: 4.4801 +[2025-07-07 13:01:34] [Rank 0] Total FTA: 0.1021 +[2025-07-07 13:01:34] [Rank 0] Total FTA: 0.1021 +[2025-07-07 13:01:34] [Rank 0] Group 0 Loss: 4.7581 +[2025-07-07 13:01:34] [Rank 0] Group 0 Loss: 4.7581 +[2025-07-07 13:01:34] [Rank 0] Group 1 Loss: 4.3098 +[2025-07-07 13:01:34] [Rank 0] Group 1 Loss: 4.3098 +[2025-07-07 13:01:34] [Rank 0] Group 2 Loss: 4.2997 +[2025-07-07 13:01:34] [Rank 0] Group 2 Loss: 4.2997 +[2025-07-07 13:01:34] [Rank 0] Group 3 Loss: 4.6176 +[2025-07-07 13:01:34] [Rank 0] Group 3 Loss: 4.6176 +[2025-07-07 13:01:34] [Rank 0] Group 4 Loss: 4.4721 +[2025-07-07 13:01:34] [Rank 0] Group 4 Loss: 4.4721 +[2025-07-07 13:01:34] [Rank 0] Group 5 Loss: 4.3594 +[2025-07-07 13:01:34] [Rank 0] Group 5 Loss: 4.3594 +[2025-07-07 13:01:34] [Rank 0] Group 6 Loss: 4.3518 +[2025-07-07 13:01:34] [Rank 0] Group 6 Loss: 4.3518 +[2025-07-07 13:01:34] [Rank 0] Group 7 Loss: 4.5001 +[2025-07-07 13:01:34] [Rank 0] Group 7 Loss: 4.5001 +[2025-07-07 13:01:34] [Rank 0] Group 8 Loss: 4.4770 +[2025-07-07 13:01:34] [Rank 0] Group 8 Loss: 4.4770 +[2025-07-07 13:01:34] [Rank 0] Group 9 Loss: 4.4404 +[2025-07-07 13:01:34] [Rank 0] Group 9 Loss: 4.4404 +[2025-07-07 13:01:34] [Rank 0] Group 10 Loss: 4.4737 +[2025-07-07 13:01:34] [Rank 0] Group 10 Loss: 4.4737 +[2025-07-07 13:01:34] [Rank 0] Group 11 Loss: 4.4545 +[2025-07-07 13:01:34] [Rank 0] Group 11 Loss: 4.4545 +[2025-07-07 13:01:34] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 13:01:34] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 13:01:34] [Rank 0] Group 1 FTA: 0.1641 +[2025-07-07 13:01:34] [Rank 0] Group 1 FTA: 0.1641 +[2025-07-07 13:01:34] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-07 13:01:34] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-07 13:01:34] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-07 13:01:34] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-07 13:01:34] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-07 13:01:34] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-07 13:01:34] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-07 13:01:34] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-07 13:01:34] [Rank 0] Group 6 FTA: 0.1172 +[2025-07-07 13:01:34] [Rank 0] Group 6 FTA: 0.1172 +[2025-07-07 13:01:34] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-07 13:01:34] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-07 13:01:34] [Rank 0] Group 8 FTA: 0.1016 +[2025-07-07 13:01:34] [Rank 0] Group 8 FTA: 0.1016 +[2025-07-07 13:01:34] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-07 13:01:34] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-07 13:01:34] [Rank 0] Group 10 FTA: 0.1133 +[2025-07-07 13:01:34] [Rank 0] Group 10 FTA: 0.1133 +[2025-07-07 13:01:34] [Rank 0] Group 11 FTA: 0.0811 +[2025-07-07 13:01:34] [Rank 0] Group 11 FTA: 0.0811 +[2025-07-07 13:01:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 13:01:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 13:01:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 13:01:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 13:01:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 13:01:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 13:01:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 13:01:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 13:01:36] [Rank 0] step:4001/10000 train_time:272344ms step_avg:68.07ms +[2025-07-07 13:01:36] [Rank 0] step:4001/10000 train_time:272344ms step_avg:68.07ms +[2025-07-07 13:01:37] [Rank 0] step:4021/10000 train_time:273107ms step_avg:67.92ms +[2025-07-07 13:01:37] [Rank 0] step:4021/10000 train_time:273107ms step_avg:67.92ms +[2025-07-07 13:01:38] [Rank 0] step:4041/10000 train_time:274474ms step_avg:67.92ms +[2025-07-07 13:01:38] [Rank 0] step:4041/10000 train_time:274474ms step_avg:67.92ms +[2025-07-07 13:01:40] [Rank 0] step:4061/10000 train_time:275842ms step_avg:67.92ms +[2025-07-07 13:01:40] [Rank 0] step:4061/10000 train_time:275842ms step_avg:67.92ms +[2025-07-07 13:01:41] [Rank 0] step:4081/10000 train_time:277213ms step_avg:67.93ms +[2025-07-07 13:01:41] [Rank 0] step:4081/10000 train_time:277213ms step_avg:67.93ms +[2025-07-07 13:01:42] [Rank 0] step:4101/10000 train_time:278582ms step_avg:67.93ms +[2025-07-07 13:01:42] [Rank 0] step:4101/10000 train_time:278582ms step_avg:67.93ms +[2025-07-07 13:01:44] [Rank 0] step:4121/10000 train_time:279950ms step_avg:67.93ms +[2025-07-07 13:01:44] [Rank 0] step:4121/10000 train_time:279950ms step_avg:67.93ms +[2025-07-07 13:01:45] [Rank 0] step:4141/10000 train_time:281569ms step_avg:68.00ms +[2025-07-07 13:01:45] [Rank 0] step:4141/10000 train_time:281569ms step_avg:68.00ms +[2025-07-07 13:01:47] [Rank 0] step:4161/10000 train_time:282735ms step_avg:67.95ms +[2025-07-07 13:01:47] [Rank 0] step:4161/10000 train_time:282735ms step_avg:67.95ms +[2025-07-07 13:01:48] [Rank 0] step:4181/10000 train_time:284106ms step_avg:67.95ms +[2025-07-07 13:01:48] [Rank 0] step:4181/10000 train_time:284106ms step_avg:67.95ms +[2025-07-07 13:01:49] [Rank 0] step:4201/10000 train_time:285477ms step_avg:67.95ms +[2025-07-07 13:01:49] [Rank 0] step:4201/10000 train_time:285477ms step_avg:67.95ms +[2025-07-07 13:01:51] [Rank 0] step:4221/10000 train_time:286849ms step_avg:67.96ms +[2025-07-07 13:01:51] [Rank 0] step:4221/10000 train_time:286849ms step_avg:67.96ms +[2025-07-07 13:01:52] [Rank 0] step:4241/10000 train_time:288223ms step_avg:67.96ms +[2025-07-07 13:01:52] [Rank 0] step:4241/10000 train_time:288223ms step_avg:67.96ms +[2025-07-07 13:01:53] [Rank 0] step:4261/10000 train_time:289597ms step_avg:67.96ms +[2025-07-07 13:01:53] [Rank 0] step:4261/10000 train_time:289597ms step_avg:67.96ms +[2025-07-07 13:01:55] [Rank 0] step:4281/10000 train_time:290970ms step_avg:67.97ms +[2025-07-07 13:01:55] [Rank 0] step:4281/10000 train_time:290970ms step_avg:67.97ms +[2025-07-07 13:01:56] [Rank 0] step:4301/10000 train_time:292345ms step_avg:67.97ms +[2025-07-07 13:01:56] [Rank 0] step:4301/10000 train_time:292345ms step_avg:67.97ms +[2025-07-07 13:01:58] [Rank 0] step:4321/10000 train_time:294382ms step_avg:68.13ms +[2025-07-07 13:01:58] [Rank 0] step:4321/10000 train_time:294382ms step_avg:68.13ms +[2025-07-07 13:01:59] [Rank 0] step:4341/10000 train_time:295122ms step_avg:67.98ms +[2025-07-07 13:01:59] [Rank 0] step:4341/10000 train_time:295122ms step_avg:67.98ms +[2025-07-07 13:02:00] [Rank 0] step:4361/10000 train_time:296497ms step_avg:67.99ms +[2025-07-07 13:02:00] [Rank 0] step:4361/10000 train_time:296497ms step_avg:67.99ms +[2025-07-07 13:02:02] [Rank 0] step:4381/10000 train_time:297872ms step_avg:67.99ms +[2025-07-07 13:02:02] [Rank 0] step:4381/10000 train_time:297872ms step_avg:67.99ms +[2025-07-07 13:02:03] [Rank 0] step:4401/10000 train_time:299247ms step_avg:68.00ms +[2025-07-07 13:02:03] [Rank 0] step:4401/10000 train_time:299247ms step_avg:68.00ms +[2025-07-07 13:02:04] [Rank 0] step:4421/10000 train_time:300622ms step_avg:68.00ms +[2025-07-07 13:02:04] [Rank 0] step:4421/10000 train_time:300622ms step_avg:68.00ms +[2025-07-07 13:02:06] [Rank 0] step:4441/10000 train_time:301998ms step_avg:68.00ms +[2025-07-07 13:02:06] [Rank 0] step:4441/10000 train_time:301998ms step_avg:68.00ms +[2025-07-07 13:02:07] [Rank 0] step:4461/10000 train_time:303374ms step_avg:68.01ms +[2025-07-07 13:02:07] [Rank 0] step:4461/10000 train_time:303374ms step_avg:68.01ms +[2025-07-07 13:02:09] [Rank 0] step:4481/10000 train_time:304748ms step_avg:68.01ms +[2025-07-07 13:02:09] [Rank 0] step:4481/10000 train_time:304748ms step_avg:68.01ms +[2025-07-07 13:02:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 13:02:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 13:02:11] [Rank 0] PRINT: step:4500/10000 train_loss:1.7541 val_loss:1.7058 train_time:306750ms step_avg:68.17ms +[2025-07-07 13:02:11] [Rank 0] PRINT: step:4500/10000 train_loss:1.7541 val_loss:1.7058 train_time:306750ms step_avg:68.17ms +[2025-07-07 13:02:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 13:02:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 13:02:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 13:02:11] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 13:02:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 13:02:11] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 13:07:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 13:07:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 13:07:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 13:07:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 13:07:40] [Rank 0] Total Loss: 4.5297 +[2025-07-07 13:07:40] [Rank 0] Total Loss: 4.5297 +[2025-07-07 13:07:40] [Rank 0] Total FTA: 0.0889 +[2025-07-07 13:07:40] [Rank 0] Total FTA: 0.0889 +[2025-07-07 13:07:40] [Rank 0] Group 0 Loss: 4.8068 +[2025-07-07 13:07:40] [Rank 0] Group 0 Loss: 4.8068 +[2025-07-07 13:07:40] [Rank 0] Group 1 Loss: 4.2624 +[2025-07-07 13:07:40] [Rank 0] Group 1 Loss: 4.2624 +[2025-07-07 13:07:40] [Rank 0] Group 2 Loss: 4.3516 +[2025-07-07 13:07:40] [Rank 0] Group 2 Loss: 4.3516 +[2025-07-07 13:07:40] [Rank 0] Group 3 Loss: 4.7341 +[2025-07-07 13:07:40] [Rank 0] Group 3 Loss: 4.7341 +[2025-07-07 13:07:40] [Rank 0] Group 4 Loss: 4.4791 +[2025-07-07 13:07:40] [Rank 0] Group 4 Loss: 4.4791 +[2025-07-07 13:07:40] [Rank 0] Group 5 Loss: 4.4261 +[2025-07-07 13:07:40] [Rank 0] Group 5 Loss: 4.4261 +[2025-07-07 13:07:40] [Rank 0] Group 6 Loss: 4.4129 +[2025-07-07 13:07:40] [Rank 0] Group 6 Loss: 4.4129 +[2025-07-07 13:07:40] [Rank 0] Group 7 Loss: 4.5320 +[2025-07-07 13:07:40] [Rank 0] Group 7 Loss: 4.5320 +[2025-07-07 13:07:40] [Rank 0] Group 8 Loss: 4.5220 +[2025-07-07 13:07:40] [Rank 0] Group 8 Loss: 4.5220 +[2025-07-07 13:07:40] [Rank 0] Group 9 Loss: 4.4807 +[2025-07-07 13:07:40] [Rank 0] Group 9 Loss: 4.4807 +[2025-07-07 13:07:40] [Rank 0] Group 10 Loss: 4.5047 +[2025-07-07 13:07:40] [Rank 0] Group 10 Loss: 4.5047 +[2025-07-07 13:07:40] [Rank 0] Group 11 Loss: 4.5404 +[2025-07-07 13:07:40] [Rank 0] Group 11 Loss: 4.5404 +[2025-07-07 13:07:40] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-07 13:07:40] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-07 13:07:40] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 13:07:40] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 13:07:40] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-07 13:07:40] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-07 13:07:40] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-07 13:07:40] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-07 13:07:40] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 13:07:40] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 13:07:40] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-07 13:07:40] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-07 13:07:40] [Rank 0] Group 6 FTA: 0.0703 +[2025-07-07 13:07:40] [Rank 0] Group 6 FTA: 0.0703 +[2025-07-07 13:07:40] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 13:07:40] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 13:07:40] [Rank 0] Group 8 FTA: 0.1406 +[2025-07-07 13:07:40] [Rank 0] Group 8 FTA: 0.1406 +[2025-07-07 13:07:40] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-07 13:07:40] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-07 13:07:40] [Rank 0] Group 10 FTA: 0.0820 +[2025-07-07 13:07:40] [Rank 0] Group 10 FTA: 0.0820 +[2025-07-07 13:07:40] [Rank 0] Group 11 FTA: 0.0771 +[2025-07-07 13:07:40] [Rank 0] Group 11 FTA: 0.0771 +[2025-07-07 13:07:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 13:07:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 13:07:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 13:07:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 13:07:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 13:07:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 13:07:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 13:07:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 13:07:42] [Rank 0] step:4501/10000 train_time:306767ms step_avg:68.16ms +[2025-07-07 13:07:42] [Rank 0] step:4501/10000 train_time:306767ms step_avg:68.16ms +[2025-07-07 13:07:44] [Rank 0] step:4521/10000 train_time:308232ms step_avg:68.18ms +[2025-07-07 13:07:44] [Rank 0] step:4521/10000 train_time:308232ms step_avg:68.18ms +[2025-07-07 13:07:45] [Rank 0] step:4541/10000 train_time:309598ms step_avg:68.18ms +[2025-07-07 13:07:45] [Rank 0] step:4541/10000 train_time:309598ms step_avg:68.18ms +[2025-07-07 13:07:46] [Rank 0] step:4561/10000 train_time:310966ms step_avg:68.18ms +[2025-07-07 13:07:46] [Rank 0] step:4561/10000 train_time:310966ms step_avg:68.18ms +[2025-07-07 13:07:48] [Rank 0] step:4581/10000 train_time:312334ms step_avg:68.18ms +[2025-07-07 13:07:48] [Rank 0] step:4581/10000 train_time:312334ms step_avg:68.18ms +[2025-07-07 13:07:49] [Rank 0] step:4601/10000 train_time:313703ms step_avg:68.18ms +[2025-07-07 13:07:49] [Rank 0] step:4601/10000 train_time:313703ms step_avg:68.18ms +[2025-07-07 13:07:50] [Rank 0] step:4621/10000 train_time:315073ms step_avg:68.18ms +[2025-07-07 13:07:50] [Rank 0] step:4621/10000 train_time:315073ms step_avg:68.18ms +[2025-07-07 13:07:52] [Rank 0] step:4641/10000 train_time:316444ms step_avg:68.18ms +[2025-07-07 13:07:52] [Rank 0] step:4641/10000 train_time:316444ms step_avg:68.18ms +[2025-07-07 13:07:53] [Rank 0] step:4661/10000 train_time:317814ms step_avg:68.19ms +[2025-07-07 13:07:53] [Rank 0] step:4661/10000 train_time:317814ms step_avg:68.19ms +[2025-07-07 13:07:55] [Rank 0] step:4681/10000 train_time:319230ms step_avg:68.20ms +[2025-07-07 13:07:55] [Rank 0] step:4681/10000 train_time:319230ms step_avg:68.20ms +[2025-07-07 13:07:56] [Rank 0] step:4701/10000 train_time:320585ms step_avg:68.20ms +[2025-07-07 13:07:56] [Rank 0] step:4701/10000 train_time:320585ms step_avg:68.20ms +[2025-07-07 13:07:57] [Rank 0] step:4721/10000 train_time:321956ms step_avg:68.20ms +[2025-07-07 13:07:57] [Rank 0] step:4721/10000 train_time:321956ms step_avg:68.20ms +[2025-07-07 13:07:59] [Rank 0] step:4741/10000 train_time:323329ms step_avg:68.20ms +[2025-07-07 13:07:59] [Rank 0] step:4741/10000 train_time:323329ms step_avg:68.20ms +[2025-07-07 13:08:00] [Rank 0] step:4761/10000 train_time:324701ms step_avg:68.20ms +[2025-07-07 13:08:00] [Rank 0] step:4761/10000 train_time:324701ms step_avg:68.20ms +[2025-07-07 13:08:01] [Rank 0] step:4781/10000 train_time:326073ms step_avg:68.20ms +[2025-07-07 13:08:01] [Rank 0] step:4781/10000 train_time:326073ms step_avg:68.20ms +[2025-07-07 13:08:03] [Rank 0] step:4801/10000 train_time:327446ms step_avg:68.20ms +[2025-07-07 13:08:03] [Rank 0] step:4801/10000 train_time:327446ms step_avg:68.20ms +[2025-07-07 13:08:04] [Rank 0] step:4821/10000 train_time:328819ms step_avg:68.21ms +[2025-07-07 13:08:04] [Rank 0] step:4821/10000 train_time:328819ms step_avg:68.21ms +[2025-07-07 13:08:06] [Rank 0] step:4841/10000 train_time:330193ms step_avg:68.21ms +[2025-07-07 13:08:06] [Rank 0] step:4841/10000 train_time:330193ms step_avg:68.21ms +[2025-07-07 13:08:07] [Rank 0] step:4861/10000 train_time:331613ms step_avg:68.22ms +[2025-07-07 13:08:07] [Rank 0] step:4861/10000 train_time:331613ms step_avg:68.22ms +[2025-07-07 13:08:08] [Rank 0] step:4881/10000 train_time:332970ms step_avg:68.22ms +[2025-07-07 13:08:08] [Rank 0] step:4881/10000 train_time:332970ms step_avg:68.22ms +[2025-07-07 13:08:10] [Rank 0] step:4901/10000 train_time:334344ms step_avg:68.22ms +[2025-07-07 13:08:10] [Rank 0] step:4901/10000 train_time:334344ms step_avg:68.22ms +[2025-07-07 13:08:11] [Rank 0] step:4921/10000 train_time:335718ms step_avg:68.22ms +[2025-07-07 13:08:11] [Rank 0] step:4921/10000 train_time:335718ms step_avg:68.22ms +[2025-07-07 13:08:12] [Rank 0] step:4941/10000 train_time:337091ms step_avg:68.22ms +[2025-07-07 13:08:12] [Rank 0] step:4941/10000 train_time:337091ms step_avg:68.22ms +[2025-07-07 13:08:14] [Rank 0] step:4961/10000 train_time:338467ms step_avg:68.23ms +[2025-07-07 13:08:14] [Rank 0] step:4961/10000 train_time:338467ms step_avg:68.23ms +[2025-07-07 13:08:15] [Rank 0] step:4981/10000 train_time:339840ms step_avg:68.23ms +[2025-07-07 13:08:15] [Rank 0] step:4981/10000 train_time:339840ms step_avg:68.23ms +[2025-07-07 13:08:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 13:08:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 13:08:18] [Rank 0] PRINT: step:5000/10000 train_loss:1.6669 val_loss:1.6373 train_time:341838ms step_avg:68.37ms +[2025-07-07 13:08:18] [Rank 0] PRINT: step:5000/10000 train_loss:1.6669 val_loss:1.6373 train_time:341838ms step_avg:68.37ms +[2025-07-07 13:08:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 13:08:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 13:08:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 13:08:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 13:08:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 13:08:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 13:13:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 13:13:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 13:13:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 13:13:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 13:13:46] [Rank 0] Total Loss: 4.6083 +[2025-07-07 13:13:46] [Rank 0] Total Loss: 4.6083 +[2025-07-07 13:13:46] [Rank 0] Total FTA: 0.1037 +[2025-07-07 13:13:46] [Rank 0] Total FTA: 0.1037 +[2025-07-07 13:13:46] [Rank 0] Group 0 Loss: 4.8839 +[2025-07-07 13:13:46] [Rank 0] Group 0 Loss: 4.8839 +[2025-07-07 13:13:46] [Rank 0] Group 1 Loss: 4.3655 +[2025-07-07 13:13:46] [Rank 0] Group 1 Loss: 4.3655 +[2025-07-07 13:13:46] [Rank 0] Group 2 Loss: 4.4917 +[2025-07-07 13:13:46] [Rank 0] Group 2 Loss: 4.4917 +[2025-07-07 13:13:46] [Rank 0] Group 3 Loss: 4.8654 +[2025-07-07 13:13:46] [Rank 0] Group 3 Loss: 4.8654 +[2025-07-07 13:13:46] [Rank 0] Group 4 Loss: 4.5441 +[2025-07-07 13:13:46] [Rank 0] Group 4 Loss: 4.5441 +[2025-07-07 13:13:46] [Rank 0] Group 5 Loss: 4.5087 +[2025-07-07 13:13:46] [Rank 0] Group 5 Loss: 4.5087 +[2025-07-07 13:13:46] [Rank 0] Group 6 Loss: 4.4682 +[2025-07-07 13:13:46] [Rank 0] Group 6 Loss: 4.4682 +[2025-07-07 13:13:46] [Rank 0] Group 7 Loss: 4.5901 +[2025-07-07 13:13:46] [Rank 0] Group 7 Loss: 4.5901 +[2025-07-07 13:13:46] [Rank 0] Group 8 Loss: 4.5979 +[2025-07-07 13:13:46] [Rank 0] Group 8 Loss: 4.5979 +[2025-07-07 13:13:46] [Rank 0] Group 9 Loss: 4.5551 +[2025-07-07 13:13:46] [Rank 0] Group 9 Loss: 4.5551 +[2025-07-07 13:13:46] [Rank 0] Group 10 Loss: 4.6037 +[2025-07-07 13:13:46] [Rank 0] Group 10 Loss: 4.6037 +[2025-07-07 13:13:46] [Rank 0] Group 11 Loss: 4.5798 +[2025-07-07 13:13:46] [Rank 0] Group 11 Loss: 4.5798 +[2025-07-07 13:13:46] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 13:13:46] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 13:13:46] [Rank 0] Group 1 FTA: 0.1432 +[2025-07-07 13:13:46] [Rank 0] Group 1 FTA: 0.1432 +[2025-07-07 13:13:46] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 13:13:46] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 13:13:46] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 13:13:46] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 13:13:46] [Rank 0] Group 4 FTA: 0.0885 +[2025-07-07 13:13:46] [Rank 0] Group 4 FTA: 0.0885 +[2025-07-07 13:13:46] [Rank 0] Group 5 FTA: 0.0807 +[2025-07-07 13:13:46] [Rank 0] Group 5 FTA: 0.0807 +[2025-07-07 13:13:46] [Rank 0] Group 6 FTA: 0.1250 +[2025-07-07 13:13:46] [Rank 0] Group 6 FTA: 0.1250 +[2025-07-07 13:13:46] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-07 13:13:46] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-07 13:13:46] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-07 13:13:46] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-07 13:13:46] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 13:13:46] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 13:13:46] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-07 13:13:46] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-07 13:13:46] [Rank 0] Group 11 FTA: 0.0869 +[2025-07-07 13:13:46] [Rank 0] Group 11 FTA: 0.0869 +[2025-07-07 13:13:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 13:13:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 13:13:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 13:13:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 13:13:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 13:13:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 13:13:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 13:13:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 13:13:48] [Rank 0] step:5001/10000 train_time:341847ms step_avg:68.36ms +[2025-07-07 13:13:48] [Rank 0] step:5001/10000 train_time:341847ms step_avg:68.36ms +[2025-07-07 13:13:49] [Rank 0] step:5021/10000 train_time:342620ms step_avg:68.24ms +[2025-07-07 13:13:49] [Rank 0] step:5021/10000 train_time:342620ms step_avg:68.24ms +[2025-07-07 13:13:50] [Rank 0] step:5041/10000 train_time:344676ms step_avg:68.37ms +[2025-07-07 13:13:50] [Rank 0] step:5041/10000 train_time:344676ms step_avg:68.37ms +[2025-07-07 13:13:52] [Rank 0] step:5061/10000 train_time:345414ms step_avg:68.25ms +[2025-07-07 13:13:52] [Rank 0] step:5061/10000 train_time:345414ms step_avg:68.25ms +[2025-07-07 13:13:53] [Rank 0] step:5081/10000 train_time:346783ms step_avg:68.25ms +[2025-07-07 13:13:53] [Rank 0] step:5081/10000 train_time:346783ms step_avg:68.25ms +[2025-07-07 13:13:54] [Rank 0] step:5101/10000 train_time:348153ms step_avg:68.25ms +[2025-07-07 13:13:54] [Rank 0] step:5101/10000 train_time:348153ms step_avg:68.25ms +[2025-07-07 13:13:56] [Rank 0] step:5121/10000 train_time:349523ms step_avg:68.25ms +[2025-07-07 13:13:56] [Rank 0] step:5121/10000 train_time:349523ms step_avg:68.25ms +[2025-07-07 13:13:57] [Rank 0] step:5141/10000 train_time:350893ms step_avg:68.25ms +[2025-07-07 13:13:57] [Rank 0] step:5141/10000 train_time:350893ms step_avg:68.25ms +[2025-07-07 13:13:59] [Rank 0] step:5161/10000 train_time:352264ms step_avg:68.25ms +[2025-07-07 13:13:59] [Rank 0] step:5161/10000 train_time:352264ms step_avg:68.25ms +[2025-07-07 13:14:00] [Rank 0] step:5181/10000 train_time:353635ms step_avg:68.26ms +[2025-07-07 13:14:00] [Rank 0] step:5181/10000 train_time:353635ms step_avg:68.26ms +[2025-07-07 13:14:01] [Rank 0] step:5201/10000 train_time:355006ms step_avg:68.26ms +[2025-07-07 13:14:01] [Rank 0] step:5201/10000 train_time:355006ms step_avg:68.26ms +[2025-07-07 13:14:03] [Rank 0] step:5221/10000 train_time:357059ms step_avg:68.39ms +[2025-07-07 13:14:03] [Rank 0] step:5221/10000 train_time:357059ms step_avg:68.39ms +[2025-07-07 13:14:04] [Rank 0] step:5241/10000 train_time:357798ms step_avg:68.27ms +[2025-07-07 13:14:04] [Rank 0] step:5241/10000 train_time:357798ms step_avg:68.27ms +[2025-07-07 13:14:05] [Rank 0] step:5261/10000 train_time:359172ms step_avg:68.27ms +[2025-07-07 13:14:05] [Rank 0] step:5261/10000 train_time:359172ms step_avg:68.27ms +[2025-07-07 13:14:07] [Rank 0] step:5281/10000 train_time:360545ms step_avg:68.27ms +[2025-07-07 13:14:07] [Rank 0] step:5281/10000 train_time:360545ms step_avg:68.27ms +[2025-07-07 13:14:08] [Rank 0] step:5301/10000 train_time:361919ms step_avg:68.27ms +[2025-07-07 13:14:08] [Rank 0] step:5301/10000 train_time:361919ms step_avg:68.27ms +[2025-07-07 13:14:10] [Rank 0] step:5321/10000 train_time:363292ms step_avg:68.28ms +[2025-07-07 13:14:10] [Rank 0] step:5321/10000 train_time:363292ms step_avg:68.28ms +[2025-07-07 13:14:11] [Rank 0] step:5341/10000 train_time:364667ms step_avg:68.28ms +[2025-07-07 13:14:11] [Rank 0] step:5341/10000 train_time:364667ms step_avg:68.28ms +[2025-07-07 13:14:12] [Rank 0] step:5361/10000 train_time:366041ms step_avg:68.28ms +[2025-07-07 13:14:12] [Rank 0] step:5361/10000 train_time:366041ms step_avg:68.28ms +[2025-07-07 13:14:14] [Rank 0] step:5381/10000 train_time:367460ms step_avg:68.29ms +[2025-07-07 13:14:14] [Rank 0] step:5381/10000 train_time:367460ms step_avg:68.29ms +[2025-07-07 13:14:15] [Rank 0] step:5401/10000 train_time:368879ms step_avg:68.30ms +[2025-07-07 13:14:15] [Rank 0] step:5401/10000 train_time:368879ms step_avg:68.30ms +[2025-07-07 13:14:17] [Rank 0] step:5421/10000 train_time:370211ms step_avg:68.29ms +[2025-07-07 13:14:17] [Rank 0] step:5421/10000 train_time:370211ms step_avg:68.29ms +[2025-07-07 13:14:18] [Rank 0] step:5441/10000 train_time:371584ms step_avg:68.29ms +[2025-07-07 13:14:18] [Rank 0] step:5441/10000 train_time:371584ms step_avg:68.29ms +[2025-07-07 13:14:19] [Rank 0] step:5461/10000 train_time:372959ms step_avg:68.29ms +[2025-07-07 13:14:19] [Rank 0] step:5461/10000 train_time:372959ms step_avg:68.29ms +[2025-07-07 13:14:21] [Rank 0] step:5481/10000 train_time:374333ms step_avg:68.30ms +[2025-07-07 13:14:21] [Rank 0] step:5481/10000 train_time:374333ms step_avg:68.30ms +[2025-07-07 13:14:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 13:14:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 13:14:23] [Rank 0] PRINT: step:5500/10000 train_loss:1.6072 val_loss:1.5867 train_time:376332ms step_avg:68.42ms +[2025-07-07 13:14:23] [Rank 0] PRINT: step:5500/10000 train_loss:1.6072 val_loss:1.5867 train_time:376332ms step_avg:68.42ms +[2025-07-07 13:14:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 13:14:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 13:14:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 13:14:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 13:14:23] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 13:14:23] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 13:19:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 13:19:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 13:19:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 13:19:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 13:19:49] [Rank 0] Total Loss: 4.6787 +[2025-07-07 13:19:49] [Rank 0] Total Loss: 4.6787 +[2025-07-07 13:19:49] [Rank 0] Total FTA: 0.1243 +[2025-07-07 13:19:49] [Rank 0] Total FTA: 0.1243 +[2025-07-07 13:19:49] [Rank 0] Group 0 Loss: 4.9977 +[2025-07-07 13:19:49] [Rank 0] Group 0 Loss: 4.9977 +[2025-07-07 13:19:49] [Rank 0] Group 1 Loss: 4.4193 +[2025-07-07 13:19:49] [Rank 0] Group 1 Loss: 4.4193 +[2025-07-07 13:19:49] [Rank 0] Group 2 Loss: 4.5441 +[2025-07-07 13:19:49] [Rank 0] Group 2 Loss: 4.5441 +[2025-07-07 13:19:49] [Rank 0] Group 3 Loss: 4.8885 +[2025-07-07 13:19:49] [Rank 0] Group 3 Loss: 4.8885 +[2025-07-07 13:19:49] [Rank 0] Group 4 Loss: 4.5982 +[2025-07-07 13:19:49] [Rank 0] Group 4 Loss: 4.5982 +[2025-07-07 13:19:49] [Rank 0] Group 5 Loss: 4.5730 +[2025-07-07 13:19:49] [Rank 0] Group 5 Loss: 4.5730 +[2025-07-07 13:19:49] [Rank 0] Group 6 Loss: 4.5528 +[2025-07-07 13:19:49] [Rank 0] Group 6 Loss: 4.5528 +[2025-07-07 13:19:49] [Rank 0] Group 7 Loss: 4.7046 +[2025-07-07 13:19:49] [Rank 0] Group 7 Loss: 4.7046 +[2025-07-07 13:19:49] [Rank 0] Group 8 Loss: 4.6514 +[2025-07-07 13:19:49] [Rank 0] Group 8 Loss: 4.6514 +[2025-07-07 13:19:49] [Rank 0] Group 9 Loss: 4.5949 +[2025-07-07 13:19:49] [Rank 0] Group 9 Loss: 4.5949 +[2025-07-07 13:19:49] [Rank 0] Group 10 Loss: 4.6398 +[2025-07-07 13:19:49] [Rank 0] Group 10 Loss: 4.6398 +[2025-07-07 13:19:49] [Rank 0] Group 11 Loss: 4.6661 +[2025-07-07 13:19:49] [Rank 0] Group 11 Loss: 4.6661 +[2025-07-07 13:19:49] [Rank 0] Group 0 FTA: 0.1912 +[2025-07-07 13:19:49] [Rank 0] Group 0 FTA: 0.1912 +[2025-07-07 13:19:49] [Rank 0] Group 1 FTA: 0.3594 +[2025-07-07 13:19:49] [Rank 0] Group 1 FTA: 0.3594 +[2025-07-07 13:19:49] [Rank 0] Group 2 FTA: 0.1641 +[2025-07-07 13:19:49] [Rank 0] Group 2 FTA: 0.1641 +[2025-07-07 13:19:49] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-07 13:19:49] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-07 13:19:49] [Rank 0] Group 4 FTA: 0.0703 +[2025-07-07 13:19:49] [Rank 0] Group 4 FTA: 0.0703 +[2025-07-07 13:19:49] [Rank 0] Group 5 FTA: 0.0677 +[2025-07-07 13:19:49] [Rank 0] Group 5 FTA: 0.0677 +[2025-07-07 13:19:49] [Rank 0] Group 6 FTA: 0.0911 +[2025-07-07 13:19:49] [Rank 0] Group 6 FTA: 0.0911 +[2025-07-07 13:19:49] [Rank 0] Group 7 FTA: 0.1224 +[2025-07-07 13:19:49] [Rank 0] Group 7 FTA: 0.1224 +[2025-07-07 13:19:49] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-07 13:19:49] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-07 13:19:49] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 13:19:49] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 13:19:49] [Rank 0] Group 10 FTA: 0.0859 +[2025-07-07 13:19:49] [Rank 0] Group 10 FTA: 0.0859 +[2025-07-07 13:19:49] [Rank 0] Group 11 FTA: 0.0908 +[2025-07-07 13:19:49] [Rank 0] Group 11 FTA: 0.0908 +[2025-07-07 13:19:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 13:19:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 13:19:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 13:19:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 13:19:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 13:19:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 13:19:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 13:19:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 13:19:50] [Rank 0] step:5501/10000 train_time:376343ms step_avg:68.41ms +[2025-07-07 13:19:50] [Rank 0] step:5501/10000 train_time:376343ms step_avg:68.41ms +[2025-07-07 13:19:52] [Rank 0] step:5521/10000 train_time:377114ms step_avg:68.31ms +[2025-07-07 13:19:52] [Rank 0] step:5521/10000 train_time:377114ms step_avg:68.31ms +[2025-07-07 13:19:53] [Rank 0] step:5541/10000 train_time:378483ms step_avg:68.31ms +[2025-07-07 13:19:53] [Rank 0] step:5541/10000 train_time:378483ms step_avg:68.31ms +[2025-07-07 13:19:54] [Rank 0] step:5561/10000 train_time:379852ms step_avg:68.31ms +[2025-07-07 13:19:54] [Rank 0] step:5561/10000 train_time:379852ms step_avg:68.31ms +[2025-07-07 13:19:56] [Rank 0] step:5581/10000 train_time:381269ms step_avg:68.32ms +[2025-07-07 13:19:56] [Rank 0] step:5581/10000 train_time:381269ms step_avg:68.32ms +[2025-07-07 13:19:57] [Rank 0] step:5601/10000 train_time:382623ms step_avg:68.31ms +[2025-07-07 13:19:57] [Rank 0] step:5601/10000 train_time:382623ms step_avg:68.31ms +[2025-07-07 13:19:59] [Rank 0] step:5621/10000 train_time:383994ms step_avg:68.31ms +[2025-07-07 13:19:59] [Rank 0] step:5621/10000 train_time:383994ms step_avg:68.31ms +[2025-07-07 13:20:00] [Rank 0] step:5641/10000 train_time:385365ms step_avg:68.32ms +[2025-07-07 13:20:00] [Rank 0] step:5641/10000 train_time:385365ms step_avg:68.32ms +[2025-07-07 13:20:01] [Rank 0] step:5661/10000 train_time:386739ms step_avg:68.32ms +[2025-07-07 13:20:01] [Rank 0] step:5661/10000 train_time:386739ms step_avg:68.32ms +[2025-07-07 13:20:03] [Rank 0] step:5681/10000 train_time:388111ms step_avg:68.32ms +[2025-07-07 13:20:03] [Rank 0] step:5681/10000 train_time:388111ms step_avg:68.32ms +[2025-07-07 13:20:04] [Rank 0] step:5701/10000 train_time:389483ms step_avg:68.32ms +[2025-07-07 13:20:04] [Rank 0] step:5701/10000 train_time:389483ms step_avg:68.32ms +[2025-07-07 13:20:05] [Rank 0] step:5721/10000 train_time:390855ms step_avg:68.32ms +[2025-07-07 13:20:05] [Rank 0] step:5721/10000 train_time:390855ms step_avg:68.32ms +[2025-07-07 13:20:07] [Rank 0] step:5741/10000 train_time:392228ms step_avg:68.32ms +[2025-07-07 13:20:07] [Rank 0] step:5741/10000 train_time:392228ms step_avg:68.32ms +[2025-07-07 13:20:08] [Rank 0] step:5761/10000 train_time:393853ms step_avg:68.37ms +[2025-07-07 13:20:08] [Rank 0] step:5761/10000 train_time:393853ms step_avg:68.37ms +[2025-07-07 13:20:10] [Rank 0] step:5781/10000 train_time:395007ms step_avg:68.33ms +[2025-07-07 13:20:10] [Rank 0] step:5781/10000 train_time:395007ms step_avg:68.33ms +[2025-07-07 13:20:11] [Rank 0] step:5801/10000 train_time:396380ms step_avg:68.33ms +[2025-07-07 13:20:11] [Rank 0] step:5801/10000 train_time:396380ms step_avg:68.33ms +[2025-07-07 13:20:12] [Rank 0] step:5821/10000 train_time:397754ms step_avg:68.33ms +[2025-07-07 13:20:12] [Rank 0] step:5821/10000 train_time:397754ms step_avg:68.33ms +[2025-07-07 13:20:14] [Rank 0] step:5841/10000 train_time:399128ms step_avg:68.33ms +[2025-07-07 13:20:14] [Rank 0] step:5841/10000 train_time:399128ms step_avg:68.33ms +[2025-07-07 13:20:15] [Rank 0] step:5861/10000 train_time:400503ms step_avg:68.33ms +[2025-07-07 13:20:15] [Rank 0] step:5861/10000 train_time:400503ms step_avg:68.33ms +[2025-07-07 13:20:16] [Rank 0] step:5881/10000 train_time:401877ms step_avg:68.33ms +[2025-07-07 13:20:16] [Rank 0] step:5881/10000 train_time:401877ms step_avg:68.33ms +[2025-07-07 13:20:18] [Rank 0] step:5901/10000 train_time:403252ms step_avg:68.34ms +[2025-07-07 13:20:18] [Rank 0] step:5901/10000 train_time:403252ms step_avg:68.34ms +[2025-07-07 13:20:19] [Rank 0] step:5921/10000 train_time:404626ms step_avg:68.34ms +[2025-07-07 13:20:19] [Rank 0] step:5921/10000 train_time:404626ms step_avg:68.34ms +[2025-07-07 13:20:21] [Rank 0] step:5941/10000 train_time:406685ms step_avg:68.45ms +[2025-07-07 13:20:21] [Rank 0] step:5941/10000 train_time:406685ms step_avg:68.45ms +[2025-07-07 13:20:22] [Rank 0] step:5961/10000 train_time:407424ms step_avg:68.35ms +[2025-07-07 13:20:22] [Rank 0] step:5961/10000 train_time:407424ms step_avg:68.35ms +[2025-07-07 13:20:23] [Rank 0] step:5981/10000 train_time:408798ms step_avg:68.35ms +[2025-07-07 13:20:23] [Rank 0] step:5981/10000 train_time:408798ms step_avg:68.35ms +[2025-07-07 13:20:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 13:20:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 13:20:26] [Rank 0] PRINT: step:6000/10000 train_loss:1.5628 val_loss:1.5510 train_time:410797ms step_avg:68.47ms +[2025-07-07 13:20:26] [Rank 0] PRINT: step:6000/10000 train_loss:1.5628 val_loss:1.5510 train_time:410797ms step_avg:68.47ms +[2025-07-07 13:20:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 13:20:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 13:20:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 13:20:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 13:20:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 13:20:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 13:25:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 13:25:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 13:25:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 13:25:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 13:25:54] [Rank 0] Total Loss: 4.6772 +[2025-07-07 13:25:54] [Rank 0] Total Loss: 4.6772 +[2025-07-07 13:25:54] [Rank 0] Total FTA: 0.0982 +[2025-07-07 13:25:54] [Rank 0] Total FTA: 0.0982 +[2025-07-07 13:25:54] [Rank 0] Group 0 Loss: 4.9455 +[2025-07-07 13:25:54] [Rank 0] Group 0 Loss: 4.9455 +[2025-07-07 13:25:54] [Rank 0] Group 1 Loss: 4.3822 +[2025-07-07 13:25:54] [Rank 0] Group 1 Loss: 4.3822 +[2025-07-07 13:25:54] [Rank 0] Group 2 Loss: 4.4648 +[2025-07-07 13:25:54] [Rank 0] Group 2 Loss: 4.4648 +[2025-07-07 13:25:54] [Rank 0] Group 3 Loss: 4.8997 +[2025-07-07 13:25:54] [Rank 0] Group 3 Loss: 4.8997 +[2025-07-07 13:25:54] [Rank 0] Group 4 Loss: 4.5809 +[2025-07-07 13:25:54] [Rank 0] Group 4 Loss: 4.5809 +[2025-07-07 13:25:54] [Rank 0] Group 5 Loss: 4.5931 +[2025-07-07 13:25:54] [Rank 0] Group 5 Loss: 4.5931 +[2025-07-07 13:25:54] [Rank 0] Group 6 Loss: 4.5499 +[2025-07-07 13:25:54] [Rank 0] Group 6 Loss: 4.5499 +[2025-07-07 13:25:54] [Rank 0] Group 7 Loss: 4.7131 +[2025-07-07 13:25:54] [Rank 0] Group 7 Loss: 4.7131 +[2025-07-07 13:25:54] [Rank 0] Group 8 Loss: 4.7280 +[2025-07-07 13:25:54] [Rank 0] Group 8 Loss: 4.7280 +[2025-07-07 13:25:54] [Rank 0] Group 9 Loss: 4.6243 +[2025-07-07 13:25:54] [Rank 0] Group 9 Loss: 4.6243 +[2025-07-07 13:25:54] [Rank 0] Group 10 Loss: 4.6829 +[2025-07-07 13:25:54] [Rank 0] Group 10 Loss: 4.6829 +[2025-07-07 13:25:54] [Rank 0] Group 11 Loss: 4.6758 +[2025-07-07 13:25:54] [Rank 0] Group 11 Loss: 4.6758 +[2025-07-07 13:25:54] [Rank 0] Group 0 FTA: 0.1808 +[2025-07-07 13:25:54] [Rank 0] Group 0 FTA: 0.1808 +[2025-07-07 13:25:54] [Rank 0] Group 1 FTA: 0.1328 +[2025-07-07 13:25:54] [Rank 0] Group 1 FTA: 0.1328 +[2025-07-07 13:25:54] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 13:25:54] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 13:25:54] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-07 13:25:54] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-07 13:25:54] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-07 13:25:54] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-07 13:25:54] [Rank 0] Group 5 FTA: 0.0833 +[2025-07-07 13:25:54] [Rank 0] Group 5 FTA: 0.0833 +[2025-07-07 13:25:54] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-07 13:25:54] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-07 13:25:54] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-07 13:25:54] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-07 13:25:54] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-07 13:25:54] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-07 13:25:54] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-07 13:25:54] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-07 13:25:54] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-07 13:25:54] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-07 13:25:54] [Rank 0] Group 11 FTA: 0.0898 +[2025-07-07 13:25:54] [Rank 0] Group 11 FTA: 0.0898 +[2025-07-07 13:25:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 13:25:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 13:25:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 13:25:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 13:25:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 13:25:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 13:25:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 13:25:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 13:25:55] [Rank 0] step:6001/10000 train_time:410807ms step_avg:68.46ms +[2025-07-07 13:25:55] [Rank 0] step:6001/10000 train_time:410807ms step_avg:68.46ms +[2025-07-07 13:25:57] [Rank 0] step:6021/10000 train_time:411581ms step_avg:68.36ms +[2025-07-07 13:25:57] [Rank 0] step:6021/10000 train_time:411581ms step_avg:68.36ms +[2025-07-07 13:25:58] [Rank 0] step:6041/10000 train_time:412949ms step_avg:68.36ms +[2025-07-07 13:25:58] [Rank 0] step:6041/10000 train_time:412949ms step_avg:68.36ms +[2025-07-07 13:26:00] [Rank 0] step:6061/10000 train_time:414315ms step_avg:68.36ms +[2025-07-07 13:26:00] [Rank 0] step:6061/10000 train_time:414315ms step_avg:68.36ms +[2025-07-07 13:26:01] [Rank 0] step:6081/10000 train_time:415684ms step_avg:68.36ms +[2025-07-07 13:26:01] [Rank 0] step:6081/10000 train_time:415684ms step_avg:68.36ms +[2025-07-07 13:26:02] [Rank 0] step:6101/10000 train_time:417052ms step_avg:68.36ms +[2025-07-07 13:26:02] [Rank 0] step:6101/10000 train_time:417052ms step_avg:68.36ms +[2025-07-07 13:26:04] [Rank 0] step:6121/10000 train_time:418673ms step_avg:68.40ms +[2025-07-07 13:26:04] [Rank 0] step:6121/10000 train_time:418673ms step_avg:68.40ms +[2025-07-07 13:26:05] [Rank 0] step:6141/10000 train_time:419842ms step_avg:68.37ms +[2025-07-07 13:26:05] [Rank 0] step:6141/10000 train_time:419842ms step_avg:68.37ms +[2025-07-07 13:26:06] [Rank 0] step:6161/10000 train_time:421211ms step_avg:68.37ms +[2025-07-07 13:26:06] [Rank 0] step:6161/10000 train_time:421211ms step_avg:68.37ms +[2025-07-07 13:26:08] [Rank 0] step:6181/10000 train_time:422581ms step_avg:68.37ms +[2025-07-07 13:26:08] [Rank 0] step:6181/10000 train_time:422581ms step_avg:68.37ms +[2025-07-07 13:26:09] [Rank 0] step:6201/10000 train_time:423953ms step_avg:68.37ms +[2025-07-07 13:26:09] [Rank 0] step:6201/10000 train_time:423953ms step_avg:68.37ms +[2025-07-07 13:26:11] [Rank 0] step:6221/10000 train_time:425325ms step_avg:68.37ms +[2025-07-07 13:26:11] [Rank 0] step:6221/10000 train_time:425325ms step_avg:68.37ms +[2025-07-07 13:26:12] [Rank 0] step:6241/10000 train_time:426697ms step_avg:68.37ms +[2025-07-07 13:26:12] [Rank 0] step:6241/10000 train_time:426697ms step_avg:68.37ms +[2025-07-07 13:26:13] [Rank 0] step:6261/10000 train_time:428069ms step_avg:68.37ms +[2025-07-07 13:26:13] [Rank 0] step:6261/10000 train_time:428069ms step_avg:68.37ms +[2025-07-07 13:26:15] [Rank 0] step:6281/10000 train_time:429441ms step_avg:68.37ms +[2025-07-07 13:26:15] [Rank 0] step:6281/10000 train_time:429441ms step_avg:68.37ms +[2025-07-07 13:26:16] [Rank 0] step:6301/10000 train_time:431489ms step_avg:68.48ms +[2025-07-07 13:26:16] [Rank 0] step:6301/10000 train_time:431489ms step_avg:68.48ms +[2025-07-07 13:26:17] [Rank 0] step:6321/10000 train_time:432228ms step_avg:68.38ms +[2025-07-07 13:26:17] [Rank 0] step:6321/10000 train_time:432228ms step_avg:68.38ms +[2025-07-07 13:26:19] [Rank 0] step:6341/10000 train_time:433600ms step_avg:68.38ms +[2025-07-07 13:26:19] [Rank 0] step:6341/10000 train_time:433600ms step_avg:68.38ms +[2025-07-07 13:26:20] [Rank 0] step:6361/10000 train_time:434973ms step_avg:68.38ms +[2025-07-07 13:26:20] [Rank 0] step:6361/10000 train_time:434973ms step_avg:68.38ms +[2025-07-07 13:26:22] [Rank 0] step:6381/10000 train_time:436346ms step_avg:68.38ms +[2025-07-07 13:26:22] [Rank 0] step:6381/10000 train_time:436346ms step_avg:68.38ms +[2025-07-07 13:26:23] [Rank 0] step:6401/10000 train_time:437719ms step_avg:68.38ms +[2025-07-07 13:26:23] [Rank 0] step:6401/10000 train_time:437719ms step_avg:68.38ms +[2025-07-07 13:26:24] [Rank 0] step:6421/10000 train_time:439093ms step_avg:68.38ms +[2025-07-07 13:26:24] [Rank 0] step:6421/10000 train_time:439093ms step_avg:68.38ms +[2025-07-07 13:26:26] [Rank 0] step:6441/10000 train_time:440468ms step_avg:68.38ms +[2025-07-07 13:26:26] [Rank 0] step:6441/10000 train_time:440468ms step_avg:68.38ms +[2025-07-07 13:26:27] [Rank 0] step:6461/10000 train_time:441841ms step_avg:68.39ms +[2025-07-07 13:26:27] [Rank 0] step:6461/10000 train_time:441841ms step_avg:68.39ms +[2025-07-07 13:26:29] [Rank 0] step:6481/10000 train_time:443464ms step_avg:68.43ms +[2025-07-07 13:26:29] [Rank 0] step:6481/10000 train_time:443464ms step_avg:68.43ms +[2025-07-07 13:26:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 13:26:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 13:26:31] [Rank 0] PRINT: step:6500/10000 train_loss:1.5270 val_loss:1.5222 train_time:445238ms step_avg:68.50ms +[2025-07-07 13:26:31] [Rank 0] PRINT: step:6500/10000 train_loss:1.5270 val_loss:1.5222 train_time:445238ms step_avg:68.50ms +[2025-07-07 13:26:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 13:26:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 13:26:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 13:26:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 13:26:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 13:26:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 13:31:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 13:31:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 13:31:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 13:31:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 13:31:58] [Rank 0] Total Loss: 4.6506 +[2025-07-07 13:31:58] [Rank 0] Total Loss: 4.6506 +[2025-07-07 13:31:58] [Rank 0] Total FTA: 0.1259 +[2025-07-07 13:31:58] [Rank 0] Total FTA: 0.1259 +[2025-07-07 13:31:58] [Rank 0] Group 0 Loss: 4.9232 +[2025-07-07 13:31:58] [Rank 0] Group 0 Loss: 4.9232 +[2025-07-07 13:31:58] [Rank 0] Group 1 Loss: 4.4010 +[2025-07-07 13:31:58] [Rank 0] Group 1 Loss: 4.4010 +[2025-07-07 13:31:58] [Rank 0] Group 2 Loss: 4.3759 +[2025-07-07 13:31:58] [Rank 0] Group 2 Loss: 4.3759 +[2025-07-07 13:31:58] [Rank 0] Group 3 Loss: 4.9843 +[2025-07-07 13:31:58] [Rank 0] Group 3 Loss: 4.9843 +[2025-07-07 13:31:58] [Rank 0] Group 4 Loss: 4.5690 +[2025-07-07 13:31:58] [Rank 0] Group 4 Loss: 4.5690 +[2025-07-07 13:31:58] [Rank 0] Group 5 Loss: 4.5048 +[2025-07-07 13:31:58] [Rank 0] Group 5 Loss: 4.5048 +[2025-07-07 13:31:58] [Rank 0] Group 6 Loss: 4.5546 +[2025-07-07 13:31:58] [Rank 0] Group 6 Loss: 4.5546 +[2025-07-07 13:31:58] [Rank 0] Group 7 Loss: 4.6736 +[2025-07-07 13:31:58] [Rank 0] Group 7 Loss: 4.6736 +[2025-07-07 13:31:58] [Rank 0] Group 8 Loss: 4.6374 +[2025-07-07 13:31:58] [Rank 0] Group 8 Loss: 4.6374 +[2025-07-07 13:31:58] [Rank 0] Group 9 Loss: 4.6050 +[2025-07-07 13:31:58] [Rank 0] Group 9 Loss: 4.6050 +[2025-07-07 13:31:58] [Rank 0] Group 10 Loss: 4.6370 +[2025-07-07 13:31:58] [Rank 0] Group 10 Loss: 4.6370 +[2025-07-07 13:31:58] [Rank 0] Group 11 Loss: 4.6529 +[2025-07-07 13:31:58] [Rank 0] Group 11 Loss: 4.6529 +[2025-07-07 13:31:58] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 13:31:58] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 13:31:58] [Rank 0] Group 1 FTA: 0.3125 +[2025-07-07 13:31:58] [Rank 0] Group 1 FTA: 0.3125 +[2025-07-07 13:31:58] [Rank 0] Group 2 FTA: 0.1771 +[2025-07-07 13:31:58] [Rank 0] Group 2 FTA: 0.1771 +[2025-07-07 13:31:58] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-07 13:31:58] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-07 13:31:58] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-07 13:31:58] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-07 13:31:58] [Rank 0] Group 5 FTA: 0.1016 +[2025-07-07 13:31:58] [Rank 0] Group 5 FTA: 0.1016 +[2025-07-07 13:31:58] [Rank 0] Group 6 FTA: 0.1094 +[2025-07-07 13:31:58] [Rank 0] Group 6 FTA: 0.1094 +[2025-07-07 13:31:58] [Rank 0] Group 7 FTA: 0.1328 +[2025-07-07 13:31:58] [Rank 0] Group 7 FTA: 0.1328 +[2025-07-07 13:31:58] [Rank 0] Group 8 FTA: 0.1328 +[2025-07-07 13:31:58] [Rank 0] Group 8 FTA: 0.1328 +[2025-07-07 13:31:58] [Rank 0] Group 9 FTA: 0.1172 +[2025-07-07 13:31:58] [Rank 0] Group 9 FTA: 0.1172 +[2025-07-07 13:31:58] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 13:31:58] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 13:31:58] [Rank 0] Group 11 FTA: 0.1123 +[2025-07-07 13:31:58] [Rank 0] Group 11 FTA: 0.1123 +[2025-07-07 13:31:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 13:31:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 13:32:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 13:32:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 13:32:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 13:32:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 13:32:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 13:32:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 13:32:01] [Rank 0] step:6501/10000 train_time:445247ms step_avg:68.49ms +[2025-07-07 13:32:01] [Rank 0] step:6501/10000 train_time:445247ms step_avg:68.49ms +[2025-07-07 13:32:02] [Rank 0] step:6521/10000 train_time:446007ms step_avg:68.40ms +[2025-07-07 13:32:02] [Rank 0] step:6521/10000 train_time:446007ms step_avg:68.40ms +[2025-07-07 13:32:03] [Rank 0] step:6541/10000 train_time:447375ms step_avg:68.40ms +[2025-07-07 13:32:03] [Rank 0] step:6541/10000 train_time:447375ms step_avg:68.40ms +[2025-07-07 13:32:05] [Rank 0] step:6561/10000 train_time:448744ms step_avg:68.40ms +[2025-07-07 13:32:05] [Rank 0] step:6561/10000 train_time:448744ms step_avg:68.40ms +[2025-07-07 13:32:06] [Rank 0] step:6581/10000 train_time:450111ms step_avg:68.40ms +[2025-07-07 13:32:06] [Rank 0] step:6581/10000 train_time:450111ms step_avg:68.40ms +[2025-07-07 13:32:07] [Rank 0] step:6601/10000 train_time:451480ms step_avg:68.40ms +[2025-07-07 13:32:07] [Rank 0] step:6601/10000 train_time:451480ms step_avg:68.40ms +[2025-07-07 13:32:09] [Rank 0] step:6621/10000 train_time:452880ms step_avg:68.40ms +[2025-07-07 13:32:09] [Rank 0] step:6621/10000 train_time:452880ms step_avg:68.40ms +[2025-07-07 13:32:10] [Rank 0] step:6641/10000 train_time:454250ms step_avg:68.40ms +[2025-07-07 13:32:10] [Rank 0] step:6641/10000 train_time:454250ms step_avg:68.40ms +[2025-07-07 13:32:12] [Rank 0] step:6661/10000 train_time:455619ms step_avg:68.40ms +[2025-07-07 13:32:12] [Rank 0] step:6661/10000 train_time:455619ms step_avg:68.40ms +[2025-07-07 13:32:13] [Rank 0] step:6681/10000 train_time:457024ms step_avg:68.41ms +[2025-07-07 13:32:13] [Rank 0] step:6681/10000 train_time:457024ms step_avg:68.41ms +[2025-07-07 13:32:14] [Rank 0] step:6701/10000 train_time:458395ms step_avg:68.41ms +[2025-07-07 13:32:14] [Rank 0] step:6701/10000 train_time:458395ms step_avg:68.41ms +[2025-07-07 13:32:16] [Rank 0] step:6721/10000 train_time:459766ms step_avg:68.41ms +[2025-07-07 13:32:16] [Rank 0] step:6721/10000 train_time:459766ms step_avg:68.41ms +[2025-07-07 13:32:17] [Rank 0] step:6741/10000 train_time:461137ms step_avg:68.41ms +[2025-07-07 13:32:17] [Rank 0] step:6741/10000 train_time:461137ms step_avg:68.41ms +[2025-07-07 13:32:18] [Rank 0] step:6761/10000 train_time:462509ms step_avg:68.41ms +[2025-07-07 13:32:18] [Rank 0] step:6761/10000 train_time:462509ms step_avg:68.41ms +[2025-07-07 13:32:20] [Rank 0] step:6781/10000 train_time:463882ms step_avg:68.41ms +[2025-07-07 13:32:20] [Rank 0] step:6781/10000 train_time:463882ms step_avg:68.41ms +[2025-07-07 13:32:21] [Rank 0] step:6801/10000 train_time:465253ms step_avg:68.41ms +[2025-07-07 13:32:21] [Rank 0] step:6801/10000 train_time:465253ms step_avg:68.41ms +[2025-07-07 13:32:23] [Rank 0] step:6821/10000 train_time:466626ms step_avg:68.41ms +[2025-07-07 13:32:23] [Rank 0] step:6821/10000 train_time:466626ms step_avg:68.41ms +[2025-07-07 13:32:24] [Rank 0] step:6841/10000 train_time:468046ms step_avg:68.42ms +[2025-07-07 13:32:24] [Rank 0] step:6841/10000 train_time:468046ms step_avg:68.42ms +[2025-07-07 13:32:25] [Rank 0] step:6861/10000 train_time:469408ms step_avg:68.42ms +[2025-07-07 13:32:25] [Rank 0] step:6861/10000 train_time:469408ms step_avg:68.42ms +[2025-07-07 13:32:27] [Rank 0] step:6881/10000 train_time:470781ms step_avg:68.42ms +[2025-07-07 13:32:27] [Rank 0] step:6881/10000 train_time:470781ms step_avg:68.42ms +[2025-07-07 13:32:28] [Rank 0] step:6901/10000 train_time:472157ms step_avg:68.42ms +[2025-07-07 13:32:28] [Rank 0] step:6901/10000 train_time:472157ms step_avg:68.42ms +[2025-07-07 13:32:29] [Rank 0] step:6921/10000 train_time:473531ms step_avg:68.42ms +[2025-07-07 13:32:29] [Rank 0] step:6921/10000 train_time:473531ms step_avg:68.42ms +[2025-07-07 13:32:31] [Rank 0] step:6941/10000 train_time:474906ms step_avg:68.42ms +[2025-07-07 13:32:31] [Rank 0] step:6941/10000 train_time:474906ms step_avg:68.42ms +[2025-07-07 13:32:32] [Rank 0] step:6961/10000 train_time:476280ms step_avg:68.42ms +[2025-07-07 13:32:32] [Rank 0] step:6961/10000 train_time:476280ms step_avg:68.42ms +[2025-07-07 13:32:34] [Rank 0] step:6981/10000 train_time:477656ms step_avg:68.42ms +[2025-07-07 13:32:34] [Rank 0] step:6981/10000 train_time:477656ms step_avg:68.42ms +[2025-07-07 13:32:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 13:32:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 13:32:36] [Rank 0] PRINT: step:7000/10000 train_loss:1.4974 val_loss:1.4933 train_time:479656ms step_avg:68.52ms +[2025-07-07 13:32:36] [Rank 0] PRINT: step:7000/10000 train_loss:1.4974 val_loss:1.4933 train_time:479656ms step_avg:68.52ms +[2025-07-07 13:32:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 13:32:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 13:32:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 13:32:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 13:32:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 13:32:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 13:38:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 13:38:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 13:38:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 13:38:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 13:38:06] [Rank 0] Total Loss: 4.6878 +[2025-07-07 13:38:06] [Rank 0] Total Loss: 4.6878 +[2025-07-07 13:38:06] [Rank 0] Total FTA: 0.1269 +[2025-07-07 13:38:06] [Rank 0] Total FTA: 0.1269 +[2025-07-07 13:38:06] [Rank 0] Group 0 Loss: 4.9622 +[2025-07-07 13:38:06] [Rank 0] Group 0 Loss: 4.9622 +[2025-07-07 13:38:06] [Rank 0] Group 1 Loss: 4.3775 +[2025-07-07 13:38:06] [Rank 0] Group 1 Loss: 4.3775 +[2025-07-07 13:38:06] [Rank 0] Group 2 Loss: 4.3886 +[2025-07-07 13:38:06] [Rank 0] Group 2 Loss: 4.3886 +[2025-07-07 13:38:06] [Rank 0] Group 3 Loss: 4.9721 +[2025-07-07 13:38:06] [Rank 0] Group 3 Loss: 4.9721 +[2025-07-07 13:38:06] [Rank 0] Group 4 Loss: 4.6575 +[2025-07-07 13:38:06] [Rank 0] Group 4 Loss: 4.6575 +[2025-07-07 13:38:06] [Rank 0] Group 5 Loss: 4.5580 +[2025-07-07 13:38:06] [Rank 0] Group 5 Loss: 4.5580 +[2025-07-07 13:38:06] [Rank 0] Group 6 Loss: 4.5538 +[2025-07-07 13:38:06] [Rank 0] Group 6 Loss: 4.5538 +[2025-07-07 13:38:06] [Rank 0] Group 7 Loss: 4.7306 +[2025-07-07 13:38:06] [Rank 0] Group 7 Loss: 4.7306 +[2025-07-07 13:38:06] [Rank 0] Group 8 Loss: 4.6829 +[2025-07-07 13:38:06] [Rank 0] Group 8 Loss: 4.6829 +[2025-07-07 13:38:06] [Rank 0] Group 9 Loss: 4.6806 +[2025-07-07 13:38:06] [Rank 0] Group 9 Loss: 4.6806 +[2025-07-07 13:38:06] [Rank 0] Group 10 Loss: 4.6966 +[2025-07-07 13:38:06] [Rank 0] Group 10 Loss: 4.6966 +[2025-07-07 13:38:06] [Rank 0] Group 11 Loss: 4.6970 +[2025-07-07 13:38:06] [Rank 0] Group 11 Loss: 4.6970 +[2025-07-07 13:38:06] [Rank 0] Group 0 FTA: 0.1808 +[2025-07-07 13:38:06] [Rank 0] Group 0 FTA: 0.1808 +[2025-07-07 13:38:06] [Rank 0] Group 1 FTA: 0.3385 +[2025-07-07 13:38:06] [Rank 0] Group 1 FTA: 0.3385 +[2025-07-07 13:38:06] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 13:38:06] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 13:38:06] [Rank 0] Group 3 FTA: 0.1068 +[2025-07-07 13:38:06] [Rank 0] Group 3 FTA: 0.1068 +[2025-07-07 13:38:06] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-07 13:38:06] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-07 13:38:06] [Rank 0] Group 5 FTA: 0.0885 +[2025-07-07 13:38:06] [Rank 0] Group 5 FTA: 0.0885 +[2025-07-07 13:38:06] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-07 13:38:06] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-07 13:38:06] [Rank 0] Group 7 FTA: 0.1250 +[2025-07-07 13:38:06] [Rank 0] Group 7 FTA: 0.1250 +[2025-07-07 13:38:06] [Rank 0] Group 8 FTA: 0.1250 +[2025-07-07 13:38:06] [Rank 0] Group 8 FTA: 0.1250 +[2025-07-07 13:38:06] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-07 13:38:06] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-07 13:38:06] [Rank 0] Group 10 FTA: 0.1133 +[2025-07-07 13:38:06] [Rank 0] Group 10 FTA: 0.1133 +[2025-07-07 13:38:06] [Rank 0] Group 11 FTA: 0.1113 +[2025-07-07 13:38:06] [Rank 0] Group 11 FTA: 0.1113 +[2025-07-07 13:38:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 13:38:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 13:38:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 13:38:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 13:38:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 13:38:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 13:38:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 13:38:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 13:38:07] [Rank 0] step:7001/10000 train_time:479668ms step_avg:68.51ms +[2025-07-07 13:38:07] [Rank 0] step:7001/10000 train_time:479668ms step_avg:68.51ms +[2025-07-07 13:38:09] [Rank 0] step:7021/10000 train_time:481099ms step_avg:68.52ms +[2025-07-07 13:38:09] [Rank 0] step:7021/10000 train_time:481099ms step_avg:68.52ms +[2025-07-07 13:38:10] [Rank 0] step:7041/10000 train_time:481836ms step_avg:68.43ms +[2025-07-07 13:38:10] [Rank 0] step:7041/10000 train_time:481836ms step_avg:68.43ms +[2025-07-07 13:38:12] [Rank 0] step:7061/10000 train_time:483204ms step_avg:68.43ms +[2025-07-07 13:38:12] [Rank 0] step:7061/10000 train_time:483204ms step_avg:68.43ms +[2025-07-07 13:38:13] [Rank 0] step:7081/10000 train_time:484572ms step_avg:68.43ms +[2025-07-07 13:38:13] [Rank 0] step:7081/10000 train_time:484572ms step_avg:68.43ms +[2025-07-07 13:38:14] [Rank 0] step:7101/10000 train_time:485941ms step_avg:68.43ms +[2025-07-07 13:38:14] [Rank 0] step:7101/10000 train_time:485941ms step_avg:68.43ms +[2025-07-07 13:38:16] [Rank 0] step:7121/10000 train_time:487311ms step_avg:68.43ms +[2025-07-07 13:38:16] [Rank 0] step:7121/10000 train_time:487311ms step_avg:68.43ms +[2025-07-07 13:38:17] [Rank 0] step:7141/10000 train_time:488681ms step_avg:68.43ms +[2025-07-07 13:38:17] [Rank 0] step:7141/10000 train_time:488681ms step_avg:68.43ms +[2025-07-07 13:38:18] [Rank 0] step:7161/10000 train_time:490051ms step_avg:68.43ms +[2025-07-07 13:38:18] [Rank 0] step:7161/10000 train_time:490051ms step_avg:68.43ms +[2025-07-07 13:38:20] [Rank 0] step:7181/10000 train_time:491421ms step_avg:68.43ms +[2025-07-07 13:38:20] [Rank 0] step:7181/10000 train_time:491421ms step_avg:68.43ms +[2025-07-07 13:38:21] [Rank 0] step:7201/10000 train_time:492839ms step_avg:68.44ms +[2025-07-07 13:38:21] [Rank 0] step:7201/10000 train_time:492839ms step_avg:68.44ms +[2025-07-07 13:38:23] [Rank 0] step:7221/10000 train_time:494191ms step_avg:68.44ms +[2025-07-07 13:38:23] [Rank 0] step:7221/10000 train_time:494191ms step_avg:68.44ms +[2025-07-07 13:38:24] [Rank 0] step:7241/10000 train_time:495563ms step_avg:68.44ms +[2025-07-07 13:38:24] [Rank 0] step:7241/10000 train_time:495563ms step_avg:68.44ms +[2025-07-07 13:38:25] [Rank 0] step:7261/10000 train_time:496935ms step_avg:68.44ms +[2025-07-07 13:38:25] [Rank 0] step:7261/10000 train_time:496935ms step_avg:68.44ms +[2025-07-07 13:38:27] [Rank 0] step:7281/10000 train_time:498309ms step_avg:68.44ms +[2025-07-07 13:38:27] [Rank 0] step:7281/10000 train_time:498309ms step_avg:68.44ms +[2025-07-07 13:38:28] [Rank 0] step:7301/10000 train_time:499695ms step_avg:68.44ms +[2025-07-07 13:38:28] [Rank 0] step:7301/10000 train_time:499695ms step_avg:68.44ms +[2025-07-07 13:38:29] [Rank 0] step:7321/10000 train_time:501068ms step_avg:68.44ms +[2025-07-07 13:38:29] [Rank 0] step:7321/10000 train_time:501068ms step_avg:68.44ms +[2025-07-07 13:38:31] [Rank 0] step:7341/10000 train_time:502443ms step_avg:68.44ms +[2025-07-07 13:38:31] [Rank 0] step:7341/10000 train_time:502443ms step_avg:68.44ms +[2025-07-07 13:38:32] [Rank 0] step:7361/10000 train_time:503817ms step_avg:68.44ms +[2025-07-07 13:38:32] [Rank 0] step:7361/10000 train_time:503817ms step_avg:68.44ms +[2025-07-07 13:38:34] [Rank 0] step:7381/10000 train_time:505868ms step_avg:68.54ms +[2025-07-07 13:38:34] [Rank 0] step:7381/10000 train_time:505868ms step_avg:68.54ms +[2025-07-07 13:38:35] [Rank 0] step:7401/10000 train_time:506609ms step_avg:68.45ms +[2025-07-07 13:38:35] [Rank 0] step:7401/10000 train_time:506609ms step_avg:68.45ms +[2025-07-07 13:38:36] [Rank 0] step:7421/10000 train_time:507983ms step_avg:68.45ms +[2025-07-07 13:38:36] [Rank 0] step:7421/10000 train_time:507983ms step_avg:68.45ms +[2025-07-07 13:38:38] [Rank 0] step:7441/10000 train_time:509357ms step_avg:68.45ms +[2025-07-07 13:38:38] [Rank 0] step:7441/10000 train_time:509357ms step_avg:68.45ms +[2025-07-07 13:38:39] [Rank 0] step:7461/10000 train_time:510731ms step_avg:68.45ms +[2025-07-07 13:38:39] [Rank 0] step:7461/10000 train_time:510731ms step_avg:68.45ms +[2025-07-07 13:38:40] [Rank 0] step:7481/10000 train_time:512104ms step_avg:68.45ms +[2025-07-07 13:38:40] [Rank 0] step:7481/10000 train_time:512104ms step_avg:68.45ms +[2025-07-07 13:38:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 13:38:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 13:38:43] [Rank 0] PRINT: step:7500/10000 train_loss:1.4708 val_loss:1.4709 train_time:514104ms step_avg:68.55ms +[2025-07-07 13:38:43] [Rank 0] PRINT: step:7500/10000 train_loss:1.4708 val_loss:1.4709 train_time:514104ms step_avg:68.55ms +[2025-07-07 13:38:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 13:38:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 13:38:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 13:38:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 13:38:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 13:38:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 13:44:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 13:44:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 13:44:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 13:44:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 13:44:15] [Rank 0] Total Loss: 4.6862 +[2025-07-07 13:44:15] [Rank 0] Total Loss: 4.6862 +[2025-07-07 13:44:15] [Rank 0] Total FTA: 0.1076 +[2025-07-07 13:44:15] [Rank 0] Total FTA: 0.1076 +[2025-07-07 13:44:15] [Rank 0] Group 0 Loss: 4.9647 +[2025-07-07 13:44:15] [Rank 0] Group 0 Loss: 4.9647 +[2025-07-07 13:44:15] [Rank 0] Group 1 Loss: 4.3348 +[2025-07-07 13:44:15] [Rank 0] Group 1 Loss: 4.3348 +[2025-07-07 13:44:15] [Rank 0] Group 2 Loss: 4.3545 +[2025-07-07 13:44:15] [Rank 0] Group 2 Loss: 4.3545 +[2025-07-07 13:44:15] [Rank 0] Group 3 Loss: 5.0586 +[2025-07-07 13:44:15] [Rank 0] Group 3 Loss: 5.0586 +[2025-07-07 13:44:15] [Rank 0] Group 4 Loss: 4.6341 +[2025-07-07 13:44:15] [Rank 0] Group 4 Loss: 4.6341 +[2025-07-07 13:44:15] [Rank 0] Group 5 Loss: 4.5754 +[2025-07-07 13:44:15] [Rank 0] Group 5 Loss: 4.5754 +[2025-07-07 13:44:15] [Rank 0] Group 6 Loss: 4.5615 +[2025-07-07 13:44:15] [Rank 0] Group 6 Loss: 4.5615 +[2025-07-07 13:44:15] [Rank 0] Group 7 Loss: 4.7155 +[2025-07-07 13:44:15] [Rank 0] Group 7 Loss: 4.7155 +[2025-07-07 13:44:15] [Rank 0] Group 8 Loss: 4.6746 +[2025-07-07 13:44:15] [Rank 0] Group 8 Loss: 4.6746 +[2025-07-07 13:44:15] [Rank 0] Group 9 Loss: 4.6807 +[2025-07-07 13:44:15] [Rank 0] Group 9 Loss: 4.6807 +[2025-07-07 13:44:15] [Rank 0] Group 10 Loss: 4.7260 +[2025-07-07 13:44:15] [Rank 0] Group 10 Loss: 4.7260 +[2025-07-07 13:44:15] [Rank 0] Group 11 Loss: 4.6760 +[2025-07-07 13:44:15] [Rank 0] Group 11 Loss: 4.6760 +[2025-07-07 13:44:15] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-07 13:44:15] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-07 13:44:15] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 13:44:15] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 13:44:15] [Rank 0] Group 2 FTA: 0.1719 +[2025-07-07 13:44:15] [Rank 0] Group 2 FTA: 0.1719 +[2025-07-07 13:44:15] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 13:44:15] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 13:44:15] [Rank 0] Group 4 FTA: 0.0729 +[2025-07-07 13:44:15] [Rank 0] Group 4 FTA: 0.0729 +[2025-07-07 13:44:15] [Rank 0] Group 5 FTA: 0.1016 +[2025-07-07 13:44:15] [Rank 0] Group 5 FTA: 0.1016 +[2025-07-07 13:44:15] [Rank 0] Group 6 FTA: 0.0938 +[2025-07-07 13:44:15] [Rank 0] Group 6 FTA: 0.0938 +[2025-07-07 13:44:15] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-07 13:44:15] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-07 13:44:15] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-07 13:44:15] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-07 13:44:15] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-07 13:44:15] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-07 13:44:15] [Rank 0] Group 10 FTA: 0.1465 +[2025-07-07 13:44:15] [Rank 0] Group 10 FTA: 0.1465 +[2025-07-07 13:44:15] [Rank 0] Group 11 FTA: 0.1094 +[2025-07-07 13:44:15] [Rank 0] Group 11 FTA: 0.1094 +[2025-07-07 13:44:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 13:44:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 13:44:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 13:44:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 13:44:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 13:44:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 13:44:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 13:44:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 13:44:16] [Rank 0] step:7501/10000 train_time:514114ms step_avg:68.54ms +[2025-07-07 13:44:16] [Rank 0] step:7501/10000 train_time:514114ms step_avg:68.54ms +[2025-07-07 13:44:18] [Rank 0] step:7521/10000 train_time:514869ms step_avg:68.46ms +[2025-07-07 13:44:18] [Rank 0] step:7521/10000 train_time:514869ms step_avg:68.46ms +[2025-07-07 13:44:19] [Rank 0] step:7541/10000 train_time:516236ms step_avg:68.46ms +[2025-07-07 13:44:19] [Rank 0] step:7541/10000 train_time:516236ms step_avg:68.46ms +[2025-07-07 13:44:21] [Rank 0] step:7561/10000 train_time:517653ms step_avg:68.46ms +[2025-07-07 13:44:21] [Rank 0] step:7561/10000 train_time:517653ms step_avg:68.46ms +[2025-07-07 13:44:22] [Rank 0] step:7581/10000 train_time:519009ms step_avg:68.46ms +[2025-07-07 13:44:22] [Rank 0] step:7581/10000 train_time:519009ms step_avg:68.46ms +[2025-07-07 13:44:23] [Rank 0] step:7601/10000 train_time:520378ms step_avg:68.46ms +[2025-07-07 13:44:23] [Rank 0] step:7601/10000 train_time:520378ms step_avg:68.46ms +[2025-07-07 13:44:25] [Rank 0] step:7621/10000 train_time:521746ms step_avg:68.46ms +[2025-07-07 13:44:25] [Rank 0] step:7621/10000 train_time:521746ms step_avg:68.46ms +[2025-07-07 13:44:26] [Rank 0] step:7641/10000 train_time:523115ms step_avg:68.46ms +[2025-07-07 13:44:26] [Rank 0] step:7641/10000 train_time:523115ms step_avg:68.46ms +[2025-07-07 13:44:27] [Rank 0] step:7661/10000 train_time:524485ms step_avg:68.46ms +[2025-07-07 13:44:27] [Rank 0] step:7661/10000 train_time:524485ms step_avg:68.46ms +[2025-07-07 13:44:29] [Rank 0] step:7681/10000 train_time:525855ms step_avg:68.46ms +[2025-07-07 13:44:29] [Rank 0] step:7681/10000 train_time:525855ms step_avg:68.46ms +[2025-07-07 13:44:30] [Rank 0] step:7701/10000 train_time:527227ms step_avg:68.46ms +[2025-07-07 13:44:30] [Rank 0] step:7701/10000 train_time:527227ms step_avg:68.46ms +[2025-07-07 13:44:31] [Rank 0] step:7721/10000 train_time:528598ms step_avg:68.46ms +[2025-07-07 13:44:31] [Rank 0] step:7721/10000 train_time:528598ms step_avg:68.46ms +[2025-07-07 13:44:33] [Rank 0] step:7741/10000 train_time:530653ms step_avg:68.55ms +[2025-07-07 13:44:33] [Rank 0] step:7741/10000 train_time:530653ms step_avg:68.55ms +[2025-07-07 13:44:34] [Rank 0] step:7761/10000 train_time:531393ms step_avg:68.47ms +[2025-07-07 13:44:34] [Rank 0] step:7761/10000 train_time:531393ms step_avg:68.47ms +[2025-07-07 13:44:36] [Rank 0] step:7781/10000 train_time:532766ms step_avg:68.47ms +[2025-07-07 13:44:36] [Rank 0] step:7781/10000 train_time:532766ms step_avg:68.47ms +[2025-07-07 13:44:37] [Rank 0] step:7801/10000 train_time:534138ms step_avg:68.47ms +[2025-07-07 13:44:37] [Rank 0] step:7801/10000 train_time:534138ms step_avg:68.47ms +[2025-07-07 13:44:38] [Rank 0] step:7821/10000 train_time:535510ms step_avg:68.47ms +[2025-07-07 13:44:38] [Rank 0] step:7821/10000 train_time:535510ms step_avg:68.47ms +[2025-07-07 13:44:40] [Rank 0] step:7841/10000 train_time:536883ms step_avg:68.47ms +[2025-07-07 13:44:40] [Rank 0] step:7841/10000 train_time:536883ms step_avg:68.47ms +[2025-07-07 13:44:41] [Rank 0] step:7861/10000 train_time:538257ms step_avg:68.47ms +[2025-07-07 13:44:41] [Rank 0] step:7861/10000 train_time:538257ms step_avg:68.47ms +[2025-07-07 13:44:42] [Rank 0] step:7881/10000 train_time:539632ms step_avg:68.47ms +[2025-07-07 13:44:42] [Rank 0] step:7881/10000 train_time:539632ms step_avg:68.47ms +[2025-07-07 13:44:44] [Rank 0] step:7901/10000 train_time:541007ms step_avg:68.47ms +[2025-07-07 13:44:44] [Rank 0] step:7901/10000 train_time:541007ms step_avg:68.47ms +[2025-07-07 13:44:45] [Rank 0] step:7921/10000 train_time:542633ms step_avg:68.51ms +[2025-07-07 13:44:45] [Rank 0] step:7921/10000 train_time:542633ms step_avg:68.51ms +[2025-07-07 13:44:47] [Rank 0] step:7941/10000 train_time:543778ms step_avg:68.48ms +[2025-07-07 13:44:47] [Rank 0] step:7941/10000 train_time:543778ms step_avg:68.48ms +[2025-07-07 13:44:48] [Rank 0] step:7961/10000 train_time:545154ms step_avg:68.48ms +[2025-07-07 13:44:48] [Rank 0] step:7961/10000 train_time:545154ms step_avg:68.48ms +[2025-07-07 13:44:49] [Rank 0] step:7981/10000 train_time:546548ms step_avg:68.48ms +[2025-07-07 13:44:49] [Rank 0] step:7981/10000 train_time:546548ms step_avg:68.48ms +[2025-07-07 13:44:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 13:44:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 13:44:52] [Rank 0] PRINT: step:8000/10000 train_loss:1.4468 val_loss:1.4501 train_time:548549ms step_avg:68.57ms +[2025-07-07 13:44:52] [Rank 0] PRINT: step:8000/10000 train_loss:1.4468 val_loss:1.4501 train_time:548549ms step_avg:68.57ms +[2025-07-07 13:44:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 13:44:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 13:44:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 13:44:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 13:44:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 13:44:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 13:50:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 13:50:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 13:50:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 13:50:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 13:50:22] [Rank 0] Total Loss: 4.7289 +[2025-07-07 13:50:22] [Rank 0] Total Loss: 4.7289 +[2025-07-07 13:50:22] [Rank 0] Total FTA: 0.1299 +[2025-07-07 13:50:22] [Rank 0] Total FTA: 0.1299 +[2025-07-07 13:50:22] [Rank 0] Group 0 Loss: 5.0180 +[2025-07-07 13:50:22] [Rank 0] Group 0 Loss: 5.0180 +[2025-07-07 13:50:22] [Rank 0] Group 1 Loss: 4.4161 +[2025-07-07 13:50:22] [Rank 0] Group 1 Loss: 4.4161 +[2025-07-07 13:50:22] [Rank 0] Group 2 Loss: 4.3292 +[2025-07-07 13:50:22] [Rank 0] Group 2 Loss: 4.3292 +[2025-07-07 13:50:22] [Rank 0] Group 3 Loss: 5.0222 +[2025-07-07 13:50:22] [Rank 0] Group 3 Loss: 5.0222 +[2025-07-07 13:50:22] [Rank 0] Group 4 Loss: 4.6585 +[2025-07-07 13:50:22] [Rank 0] Group 4 Loss: 4.6585 +[2025-07-07 13:50:22] [Rank 0] Group 5 Loss: 4.6493 +[2025-07-07 13:50:22] [Rank 0] Group 5 Loss: 4.6493 +[2025-07-07 13:50:22] [Rank 0] Group 6 Loss: 4.6260 +[2025-07-07 13:50:22] [Rank 0] Group 6 Loss: 4.6260 +[2025-07-07 13:50:22] [Rank 0] Group 7 Loss: 4.7665 +[2025-07-07 13:50:22] [Rank 0] Group 7 Loss: 4.7665 +[2025-07-07 13:50:22] [Rank 0] Group 8 Loss: 4.7663 +[2025-07-07 13:50:22] [Rank 0] Group 8 Loss: 4.7663 +[2025-07-07 13:50:22] [Rank 0] Group 9 Loss: 4.7223 +[2025-07-07 13:50:22] [Rank 0] Group 9 Loss: 4.7223 +[2025-07-07 13:50:22] [Rank 0] Group 10 Loss: 4.7258 +[2025-07-07 13:50:22] [Rank 0] Group 10 Loss: 4.7258 +[2025-07-07 13:50:22] [Rank 0] Group 11 Loss: 4.7390 +[2025-07-07 13:50:22] [Rank 0] Group 11 Loss: 4.7390 +[2025-07-07 13:50:22] [Rank 0] Group 0 FTA: 0.1508 +[2025-07-07 13:50:22] [Rank 0] Group 0 FTA: 0.1508 +[2025-07-07 13:50:22] [Rank 0] Group 1 FTA: 0.1667 +[2025-07-07 13:50:22] [Rank 0] Group 1 FTA: 0.1667 +[2025-07-07 13:50:22] [Rank 0] Group 2 FTA: 0.1042 +[2025-07-07 13:50:22] [Rank 0] Group 2 FTA: 0.1042 +[2025-07-07 13:50:22] [Rank 0] Group 3 FTA: 0.0964 +[2025-07-07 13:50:22] [Rank 0] Group 3 FTA: 0.0964 +[2025-07-07 13:50:22] [Rank 0] Group 4 FTA: 0.0625 +[2025-07-07 13:50:22] [Rank 0] Group 4 FTA: 0.0625 +[2025-07-07 13:50:22] [Rank 0] Group 5 FTA: 0.1120 +[2025-07-07 13:50:22] [Rank 0] Group 5 FTA: 0.1120 +[2025-07-07 13:50:22] [Rank 0] Group 6 FTA: 0.1484 +[2025-07-07 13:50:22] [Rank 0] Group 6 FTA: 0.1484 +[2025-07-07 13:50:22] [Rank 0] Group 7 FTA: 0.1484 +[2025-07-07 13:50:22] [Rank 0] Group 7 FTA: 0.1484 +[2025-07-07 13:50:22] [Rank 0] Group 8 FTA: 0.1276 +[2025-07-07 13:50:22] [Rank 0] Group 8 FTA: 0.1276 +[2025-07-07 13:50:22] [Rank 0] Group 9 FTA: 0.1484 +[2025-07-07 13:50:22] [Rank 0] Group 9 FTA: 0.1484 +[2025-07-07 13:50:22] [Rank 0] Group 10 FTA: 0.1250 +[2025-07-07 13:50:22] [Rank 0] Group 10 FTA: 0.1250 +[2025-07-07 13:50:22] [Rank 0] Group 11 FTA: 0.1396 +[2025-07-07 13:50:22] [Rank 0] Group 11 FTA: 0.1396 +[2025-07-07 13:50:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 13:50:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 13:50:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 13:50:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 13:50:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 13:50:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 13:50:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 13:50:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 13:50:24] [Rank 0] step:8001/10000 train_time:548559ms step_avg:68.56ms +[2025-07-07 13:50:24] [Rank 0] step:8001/10000 train_time:548559ms step_avg:68.56ms +[2025-07-07 13:50:25] [Rank 0] step:8021/10000 train_time:549317ms step_avg:68.48ms +[2025-07-07 13:50:25] [Rank 0] step:8021/10000 train_time:549317ms step_avg:68.48ms +[2025-07-07 13:50:26] [Rank 0] step:8041/10000 train_time:550684ms step_avg:68.48ms +[2025-07-07 13:50:26] [Rank 0] step:8041/10000 train_time:550684ms step_avg:68.48ms +[2025-07-07 13:50:28] [Rank 0] step:8061/10000 train_time:552052ms step_avg:68.48ms +[2025-07-07 13:50:28] [Rank 0] step:8061/10000 train_time:552052ms step_avg:68.48ms +[2025-07-07 13:50:29] [Rank 0] step:8081/10000 train_time:553421ms step_avg:68.48ms +[2025-07-07 13:50:29] [Rank 0] step:8081/10000 train_time:553421ms step_avg:68.48ms +[2025-07-07 13:50:31] [Rank 0] step:8101/10000 train_time:555038ms step_avg:68.51ms +[2025-07-07 13:50:31] [Rank 0] step:8101/10000 train_time:555038ms step_avg:68.51ms +[2025-07-07 13:50:32] [Rank 0] step:8121/10000 train_time:556158ms step_avg:68.48ms +[2025-07-07 13:50:32] [Rank 0] step:8121/10000 train_time:556158ms step_avg:68.48ms +[2025-07-07 13:50:33] [Rank 0] step:8141/10000 train_time:557528ms step_avg:68.48ms +[2025-07-07 13:50:33] [Rank 0] step:8141/10000 train_time:557528ms step_avg:68.48ms +[2025-07-07 13:50:35] [Rank 0] step:8161/10000 train_time:558900ms step_avg:68.48ms +[2025-07-07 13:50:35] [Rank 0] step:8161/10000 train_time:558900ms step_avg:68.48ms +[2025-07-07 13:50:36] [Rank 0] step:8181/10000 train_time:560272ms step_avg:68.48ms +[2025-07-07 13:50:36] [Rank 0] step:8181/10000 train_time:560272ms step_avg:68.48ms +[2025-07-07 13:50:37] [Rank 0] step:8201/10000 train_time:561644ms step_avg:68.48ms +[2025-07-07 13:50:37] [Rank 0] step:8201/10000 train_time:561644ms step_avg:68.48ms +[2025-07-07 13:50:39] [Rank 0] step:8221/10000 train_time:563016ms step_avg:68.49ms +[2025-07-07 13:50:39] [Rank 0] step:8221/10000 train_time:563016ms step_avg:68.49ms +[2025-07-07 13:50:40] [Rank 0] step:8241/10000 train_time:564388ms step_avg:68.49ms +[2025-07-07 13:50:40] [Rank 0] step:8241/10000 train_time:564388ms step_avg:68.49ms +[2025-07-07 13:50:42] [Rank 0] step:8261/10000 train_time:565761ms step_avg:68.49ms +[2025-07-07 13:50:42] [Rank 0] step:8261/10000 train_time:565761ms step_avg:68.49ms +[2025-07-07 13:50:43] [Rank 0] step:8281/10000 train_time:567182ms step_avg:68.49ms +[2025-07-07 13:50:43] [Rank 0] step:8281/10000 train_time:567182ms step_avg:68.49ms +[2025-07-07 13:50:44] [Rank 0] step:8301/10000 train_time:568553ms step_avg:68.49ms +[2025-07-07 13:50:44] [Rank 0] step:8301/10000 train_time:568553ms step_avg:68.49ms +[2025-07-07 13:50:46] [Rank 0] step:8321/10000 train_time:569925ms step_avg:68.49ms +[2025-07-07 13:50:46] [Rank 0] step:8321/10000 train_time:569925ms step_avg:68.49ms +[2025-07-07 13:50:47] [Rank 0] step:8341/10000 train_time:571298ms step_avg:68.49ms +[2025-07-07 13:50:47] [Rank 0] step:8341/10000 train_time:571298ms step_avg:68.49ms +[2025-07-07 13:50:48] [Rank 0] step:8361/10000 train_time:572671ms step_avg:68.49ms +[2025-07-07 13:50:48] [Rank 0] step:8361/10000 train_time:572671ms step_avg:68.49ms +[2025-07-07 13:50:50] [Rank 0] step:8381/10000 train_time:574042ms step_avg:68.49ms +[2025-07-07 13:50:50] [Rank 0] step:8381/10000 train_time:574042ms step_avg:68.49ms +[2025-07-07 13:50:51] [Rank 0] step:8401/10000 train_time:575415ms step_avg:68.49ms +[2025-07-07 13:50:51] [Rank 0] step:8401/10000 train_time:575415ms step_avg:68.49ms +[2025-07-07 13:50:53] [Rank 0] step:8421/10000 train_time:576789ms step_avg:68.49ms +[2025-07-07 13:50:53] [Rank 0] step:8421/10000 train_time:576789ms step_avg:68.49ms +[2025-07-07 13:50:54] [Rank 0] step:8441/10000 train_time:578161ms step_avg:68.49ms +[2025-07-07 13:50:54] [Rank 0] step:8441/10000 train_time:578161ms step_avg:68.49ms +[2025-07-07 13:50:55] [Rank 0] step:8461/10000 train_time:579582ms step_avg:68.50ms +[2025-07-07 13:50:55] [Rank 0] step:8461/10000 train_time:579582ms step_avg:68.50ms +[2025-07-07 13:50:57] [Rank 0] step:8481/10000 train_time:580938ms step_avg:68.50ms +[2025-07-07 13:50:57] [Rank 0] step:8481/10000 train_time:580938ms step_avg:68.50ms +[2025-07-07 13:50:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 13:50:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 13:50:59] [Rank 0] PRINT: step:8500/10000 train_loss:1.4254 val_loss:1.4353 train_time:582937ms step_avg:68.58ms +[2025-07-07 13:50:59] [Rank 0] PRINT: step:8500/10000 train_loss:1.4254 val_loss:1.4353 train_time:582937ms step_avg:68.58ms +[2025-07-07 13:50:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 13:50:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 13:50:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 13:50:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 13:50:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 13:50:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 13:56:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 13:56:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 13:56:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 13:56:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 13:56:30] [Rank 0] Total Loss: 4.7452 +[2025-07-07 13:56:30] [Rank 0] Total Loss: 4.7452 +[2025-07-07 13:56:30] [Rank 0] Total FTA: 0.1520 +[2025-07-07 13:56:30] [Rank 0] Total FTA: 0.1520 +[2025-07-07 13:56:30] [Rank 0] Group 0 Loss: 5.0714 +[2025-07-07 13:56:30] [Rank 0] Group 0 Loss: 5.0714 +[2025-07-07 13:56:30] [Rank 0] Group 1 Loss: 4.3795 +[2025-07-07 13:56:30] [Rank 0] Group 1 Loss: 4.3795 +[2025-07-07 13:56:30] [Rank 0] Group 2 Loss: 4.3293 +[2025-07-07 13:56:30] [Rank 0] Group 2 Loss: 4.3293 +[2025-07-07 13:56:30] [Rank 0] Group 3 Loss: 5.1774 +[2025-07-07 13:56:30] [Rank 0] Group 3 Loss: 5.1774 +[2025-07-07 13:56:30] [Rank 0] Group 4 Loss: 4.6420 +[2025-07-07 13:56:30] [Rank 0] Group 4 Loss: 4.6420 +[2025-07-07 13:56:30] [Rank 0] Group 5 Loss: 4.6774 +[2025-07-07 13:56:30] [Rank 0] Group 5 Loss: 4.6774 +[2025-07-07 13:56:30] [Rank 0] Group 6 Loss: 4.5984 +[2025-07-07 13:56:30] [Rank 0] Group 6 Loss: 4.5984 +[2025-07-07 13:56:30] [Rank 0] Group 7 Loss: 4.7846 +[2025-07-07 13:56:30] [Rank 0] Group 7 Loss: 4.7846 +[2025-07-07 13:56:30] [Rank 0] Group 8 Loss: 4.7579 +[2025-07-07 13:56:30] [Rank 0] Group 8 Loss: 4.7579 +[2025-07-07 13:56:30] [Rank 0] Group 9 Loss: 4.7255 +[2025-07-07 13:56:30] [Rank 0] Group 9 Loss: 4.7255 +[2025-07-07 13:56:30] [Rank 0] Group 10 Loss: 4.7045 +[2025-07-07 13:56:30] [Rank 0] Group 10 Loss: 4.7045 +[2025-07-07 13:56:30] [Rank 0] Group 11 Loss: 4.7559 +[2025-07-07 13:56:30] [Rank 0] Group 11 Loss: 4.7559 +[2025-07-07 13:56:30] [Rank 0] Group 0 FTA: 0.1834 +[2025-07-07 13:56:30] [Rank 0] Group 0 FTA: 0.1834 +[2025-07-07 13:56:30] [Rank 0] Group 1 FTA: 0.3333 +[2025-07-07 13:56:30] [Rank 0] Group 1 FTA: 0.3333 +[2025-07-07 13:56:30] [Rank 0] Group 2 FTA: 0.1979 +[2025-07-07 13:56:30] [Rank 0] Group 2 FTA: 0.1979 +[2025-07-07 13:56:30] [Rank 0] Group 3 FTA: 0.1172 +[2025-07-07 13:56:30] [Rank 0] Group 3 FTA: 0.1172 +[2025-07-07 13:56:30] [Rank 0] Group 4 FTA: 0.0938 +[2025-07-07 13:56:30] [Rank 0] Group 4 FTA: 0.0938 +[2025-07-07 13:56:30] [Rank 0] Group 5 FTA: 0.1380 +[2025-07-07 13:56:30] [Rank 0] Group 5 FTA: 0.1380 +[2025-07-07 13:56:30] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-07 13:56:30] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-07 13:56:30] [Rank 0] Group 7 FTA: 0.1458 +[2025-07-07 13:56:30] [Rank 0] Group 7 FTA: 0.1458 +[2025-07-07 13:56:30] [Rank 0] Group 8 FTA: 0.1406 +[2025-07-07 13:56:30] [Rank 0] Group 8 FTA: 0.1406 +[2025-07-07 13:56:30] [Rank 0] Group 9 FTA: 0.1445 +[2025-07-07 13:56:30] [Rank 0] Group 9 FTA: 0.1445 +[2025-07-07 13:56:30] [Rank 0] Group 10 FTA: 0.1504 +[2025-07-07 13:56:30] [Rank 0] Group 10 FTA: 0.1504 +[2025-07-07 13:56:30] [Rank 0] Group 11 FTA: 0.1162 +[2025-07-07 13:56:30] [Rank 0] Group 11 FTA: 0.1162 +[2025-07-07 13:56:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 13:56:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 13:56:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 13:56:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 13:56:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 13:56:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 13:56:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 13:56:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 13:56:32] [Rank 0] step:8501/10000 train_time:582948ms step_avg:68.57ms +[2025-07-07 13:56:32] [Rank 0] step:8501/10000 train_time:582948ms step_avg:68.57ms +[2025-07-07 13:56:33] [Rank 0] step:8521/10000 train_time:583717ms step_avg:68.50ms +[2025-07-07 13:56:33] [Rank 0] step:8521/10000 train_time:583717ms step_avg:68.50ms +[2025-07-07 13:56:35] [Rank 0] step:8541/10000 train_time:585084ms step_avg:68.50ms +[2025-07-07 13:56:35] [Rank 0] step:8541/10000 train_time:585084ms step_avg:68.50ms +[2025-07-07 13:56:36] [Rank 0] step:8561/10000 train_time:586452ms step_avg:68.50ms +[2025-07-07 13:56:36] [Rank 0] step:8561/10000 train_time:586452ms step_avg:68.50ms +[2025-07-07 13:56:37] [Rank 0] step:8581/10000 train_time:587821ms step_avg:68.50ms +[2025-07-07 13:56:37] [Rank 0] step:8581/10000 train_time:587821ms step_avg:68.50ms +[2025-07-07 13:56:39] [Rank 0] step:8601/10000 train_time:589190ms step_avg:68.50ms +[2025-07-07 13:56:39] [Rank 0] step:8601/10000 train_time:589190ms step_avg:68.50ms +[2025-07-07 13:56:40] [Rank 0] step:8621/10000 train_time:590560ms step_avg:68.50ms +[2025-07-07 13:56:40] [Rank 0] step:8621/10000 train_time:590560ms step_avg:68.50ms +[2025-07-07 13:56:41] [Rank 0] step:8641/10000 train_time:592181ms step_avg:68.53ms +[2025-07-07 13:56:41] [Rank 0] step:8641/10000 train_time:592181ms step_avg:68.53ms +[2025-07-07 13:56:43] [Rank 0] step:8661/10000 train_time:593347ms step_avg:68.51ms +[2025-07-07 13:56:43] [Rank 0] step:8661/10000 train_time:593347ms step_avg:68.51ms +[2025-07-07 13:56:44] [Rank 0] step:8681/10000 train_time:594719ms step_avg:68.51ms +[2025-07-07 13:56:44] [Rank 0] step:8681/10000 train_time:594719ms step_avg:68.51ms +[2025-07-07 13:56:46] [Rank 0] step:8701/10000 train_time:596090ms step_avg:68.51ms +[2025-07-07 13:56:46] [Rank 0] step:8701/10000 train_time:596090ms step_avg:68.51ms +[2025-07-07 13:56:47] [Rank 0] step:8721/10000 train_time:597462ms step_avg:68.51ms +[2025-07-07 13:56:47] [Rank 0] step:8721/10000 train_time:597462ms step_avg:68.51ms +[2025-07-07 13:56:48] [Rank 0] step:8741/10000 train_time:598834ms step_avg:68.51ms +[2025-07-07 13:56:48] [Rank 0] step:8741/10000 train_time:598834ms step_avg:68.51ms +[2025-07-07 13:56:50] [Rank 0] step:8761/10000 train_time:600207ms step_avg:68.51ms +[2025-07-07 13:56:50] [Rank 0] step:8761/10000 train_time:600207ms step_avg:68.51ms +[2025-07-07 13:56:51] [Rank 0] step:8781/10000 train_time:601581ms step_avg:68.51ms +[2025-07-07 13:56:51] [Rank 0] step:8781/10000 train_time:601581ms step_avg:68.51ms +[2025-07-07 13:56:52] [Rank 0] step:8801/10000 train_time:602955ms step_avg:68.51ms +[2025-07-07 13:56:52] [Rank 0] step:8801/10000 train_time:602955ms step_avg:68.51ms +[2025-07-07 13:56:54] [Rank 0] step:8821/10000 train_time:604329ms step_avg:68.51ms +[2025-07-07 13:56:54] [Rank 0] step:8821/10000 train_time:604329ms step_avg:68.51ms +[2025-07-07 13:56:55] [Rank 0] step:8841/10000 train_time:605734ms step_avg:68.51ms +[2025-07-07 13:56:55] [Rank 0] step:8841/10000 train_time:605734ms step_avg:68.51ms +[2025-07-07 13:56:57] [Rank 0] step:8861/10000 train_time:607110ms step_avg:68.51ms +[2025-07-07 13:56:57] [Rank 0] step:8861/10000 train_time:607110ms step_avg:68.51ms +[2025-07-07 13:56:58] [Rank 0] step:8881/10000 train_time:608486ms step_avg:68.52ms +[2025-07-07 13:56:58] [Rank 0] step:8881/10000 train_time:608486ms step_avg:68.52ms +[2025-07-07 13:56:59] [Rank 0] step:8901/10000 train_time:609862ms step_avg:68.52ms +[2025-07-07 13:56:59] [Rank 0] step:8901/10000 train_time:609862ms step_avg:68.52ms +[2025-07-07 13:57:01] [Rank 0] step:8921/10000 train_time:611237ms step_avg:68.52ms +[2025-07-07 13:57:01] [Rank 0] step:8921/10000 train_time:611237ms step_avg:68.52ms +[2025-07-07 13:57:02] [Rank 0] step:8941/10000 train_time:612613ms step_avg:68.52ms +[2025-07-07 13:57:02] [Rank 0] step:8941/10000 train_time:612613ms step_avg:68.52ms +[2025-07-07 13:57:03] [Rank 0] step:8961/10000 train_time:613989ms step_avg:68.52ms +[2025-07-07 13:57:03] [Rank 0] step:8961/10000 train_time:613989ms step_avg:68.52ms +[2025-07-07 13:57:05] [Rank 0] step:8981/10000 train_time:615363ms step_avg:68.52ms +[2025-07-07 13:57:05] [Rank 0] step:8981/10000 train_time:615363ms step_avg:68.52ms +[2025-07-07 13:57:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 13:57:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 13:57:07] [Rank 0] PRINT: step:9000/10000 train_loss:1.4079 val_loss:1.4180 train_time:617366ms step_avg:68.60ms +[2025-07-07 13:57:07] [Rank 0] PRINT: step:9000/10000 train_loss:1.4079 val_loss:1.4180 train_time:617366ms step_avg:68.60ms +[2025-07-07 13:57:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 13:57:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 13:57:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 13:57:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 13:57:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 13:57:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 14:02:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 14:02:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 14:02:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 14:02:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 14:02:37] [Rank 0] Total Loss: 4.7845 +[2025-07-07 14:02:37] [Rank 0] Total Loss: 4.7845 +[2025-07-07 14:02:37] [Rank 0] Total FTA: 0.1592 +[2025-07-07 14:02:37] [Rank 0] Total FTA: 0.1592 +[2025-07-07 14:02:37] [Rank 0] Group 0 Loss: 5.0116 +[2025-07-07 14:02:37] [Rank 0] Group 0 Loss: 5.0116 +[2025-07-07 14:02:37] [Rank 0] Group 1 Loss: 4.4994 +[2025-07-07 14:02:37] [Rank 0] Group 1 Loss: 4.4994 +[2025-07-07 14:02:37] [Rank 0] Group 2 Loss: 4.4180 +[2025-07-07 14:02:37] [Rank 0] Group 2 Loss: 4.4180 +[2025-07-07 14:02:37] [Rank 0] Group 3 Loss: 5.1582 +[2025-07-07 14:02:37] [Rank 0] Group 3 Loss: 5.1582 +[2025-07-07 14:02:37] [Rank 0] Group 4 Loss: 4.6778 +[2025-07-07 14:02:37] [Rank 0] Group 4 Loss: 4.6778 +[2025-07-07 14:02:37] [Rank 0] Group 5 Loss: 4.6989 +[2025-07-07 14:02:37] [Rank 0] Group 5 Loss: 4.6989 +[2025-07-07 14:02:37] [Rank 0] Group 6 Loss: 4.6974 +[2025-07-07 14:02:37] [Rank 0] Group 6 Loss: 4.6974 +[2025-07-07 14:02:37] [Rank 0] Group 7 Loss: 4.7566 +[2025-07-07 14:02:37] [Rank 0] Group 7 Loss: 4.7566 +[2025-07-07 14:02:37] [Rank 0] Group 8 Loss: 4.8326 +[2025-07-07 14:02:37] [Rank 0] Group 8 Loss: 4.8326 +[2025-07-07 14:02:38] [Rank 0] Group 9 Loss: 4.7795 +[2025-07-07 14:02:38] [Rank 0] Group 9 Loss: 4.7795 +[2025-07-07 14:02:38] [Rank 0] Group 10 Loss: 4.8050 +[2025-07-07 14:02:38] [Rank 0] Group 10 Loss: 4.8050 +[2025-07-07 14:02:38] [Rank 0] Group 11 Loss: 4.8067 +[2025-07-07 14:02:38] [Rank 0] Group 11 Loss: 4.8067 +[2025-07-07 14:02:38] [Rank 0] Group 0 FTA: 0.3212 +[2025-07-07 14:02:38] [Rank 0] Group 0 FTA: 0.3212 +[2025-07-07 14:02:38] [Rank 0] Group 1 FTA: 0.2865 +[2025-07-07 14:02:38] [Rank 0] Group 1 FTA: 0.2865 +[2025-07-07 14:02:38] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-07 14:02:38] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-07 14:02:38] [Rank 0] Group 3 FTA: 0.0807 +[2025-07-07 14:02:38] [Rank 0] Group 3 FTA: 0.0807 +[2025-07-07 14:02:38] [Rank 0] Group 4 FTA: 0.0677 +[2025-07-07 14:02:38] [Rank 0] Group 4 FTA: 0.0677 +[2025-07-07 14:02:38] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-07 14:02:38] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-07 14:02:38] [Rank 0] Group 6 FTA: 0.1328 +[2025-07-07 14:02:38] [Rank 0] Group 6 FTA: 0.1328 +[2025-07-07 14:02:38] [Rank 0] Group 7 FTA: 0.1380 +[2025-07-07 14:02:38] [Rank 0] Group 7 FTA: 0.1380 +[2025-07-07 14:02:38] [Rank 0] Group 8 FTA: 0.1797 +[2025-07-07 14:02:38] [Rank 0] Group 8 FTA: 0.1797 +[2025-07-07 14:02:38] [Rank 0] Group 9 FTA: 0.1328 +[2025-07-07 14:02:38] [Rank 0] Group 9 FTA: 0.1328 +[2025-07-07 14:02:38] [Rank 0] Group 10 FTA: 0.1523 +[2025-07-07 14:02:38] [Rank 0] Group 10 FTA: 0.1523 +[2025-07-07 14:02:38] [Rank 0] Group 11 FTA: 0.1328 +[2025-07-07 14:02:38] [Rank 0] Group 11 FTA: 0.1328 +[2025-07-07 14:02:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 14:02:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 14:02:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 14:02:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 14:02:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 14:02:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 14:02:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 14:02:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 14:02:40] [Rank 0] step:9001/10000 train_time:617383ms step_avg:68.59ms +[2025-07-07 14:02:40] [Rank 0] step:9001/10000 train_time:617383ms step_avg:68.59ms +[2025-07-07 14:02:41] [Rank 0] step:9021/10000 train_time:618846ms step_avg:68.60ms +[2025-07-07 14:02:41] [Rank 0] step:9021/10000 train_time:618846ms step_avg:68.60ms +[2025-07-07 14:02:42] [Rank 0] step:9041/10000 train_time:620204ms step_avg:68.60ms +[2025-07-07 14:02:42] [Rank 0] step:9041/10000 train_time:620204ms step_avg:68.60ms +[2025-07-07 14:02:44] [Rank 0] step:9061/10000 train_time:621574ms step_avg:68.60ms +[2025-07-07 14:02:44] [Rank 0] step:9061/10000 train_time:621574ms step_avg:68.60ms +[2025-07-07 14:02:45] [Rank 0] step:9081/10000 train_time:622944ms step_avg:68.60ms +[2025-07-07 14:02:45] [Rank 0] step:9081/10000 train_time:622944ms step_avg:68.60ms +[2025-07-07 14:02:47] [Rank 0] step:9101/10000 train_time:624316ms step_avg:68.60ms +[2025-07-07 14:02:47] [Rank 0] step:9101/10000 train_time:624316ms step_avg:68.60ms +[2025-07-07 14:02:48] [Rank 0] step:9121/10000 train_time:625686ms step_avg:68.60ms +[2025-07-07 14:02:48] [Rank 0] step:9121/10000 train_time:625686ms step_avg:68.60ms +[2025-07-07 14:02:49] [Rank 0] step:9141/10000 train_time:627059ms step_avg:68.60ms +[2025-07-07 14:02:49] [Rank 0] step:9141/10000 train_time:627059ms step_avg:68.60ms +[2025-07-07 14:02:51] [Rank 0] step:9161/10000 train_time:628431ms step_avg:68.60ms +[2025-07-07 14:02:51] [Rank 0] step:9161/10000 train_time:628431ms step_avg:68.60ms +[2025-07-07 14:02:52] [Rank 0] step:9181/10000 train_time:629849ms step_avg:68.60ms +[2025-07-07 14:02:52] [Rank 0] step:9181/10000 train_time:629849ms step_avg:68.60ms +[2025-07-07 14:02:53] [Rank 0] step:9201/10000 train_time:631221ms step_avg:68.60ms +[2025-07-07 14:02:53] [Rank 0] step:9201/10000 train_time:631221ms step_avg:68.60ms +[2025-07-07 14:02:55] [Rank 0] step:9221/10000 train_time:632595ms step_avg:68.60ms +[2025-07-07 14:02:55] [Rank 0] step:9221/10000 train_time:632595ms step_avg:68.60ms +[2025-07-07 14:02:56] [Rank 0] step:9241/10000 train_time:633970ms step_avg:68.60ms +[2025-07-07 14:02:56] [Rank 0] step:9241/10000 train_time:633970ms step_avg:68.60ms +[2025-07-07 14:02:58] [Rank 0] step:9261/10000 train_time:635346ms step_avg:68.60ms +[2025-07-07 14:02:58] [Rank 0] step:9261/10000 train_time:635346ms step_avg:68.60ms +[2025-07-07 14:02:59] [Rank 0] step:9281/10000 train_time:636721ms step_avg:68.60ms +[2025-07-07 14:02:59] [Rank 0] step:9281/10000 train_time:636721ms step_avg:68.60ms +[2025-07-07 14:03:00] [Rank 0] step:9301/10000 train_time:638096ms step_avg:68.61ms +[2025-07-07 14:03:00] [Rank 0] step:9301/10000 train_time:638096ms step_avg:68.61ms +[2025-07-07 14:03:02] [Rank 0] step:9321/10000 train_time:639472ms step_avg:68.61ms +[2025-07-07 14:03:02] [Rank 0] step:9321/10000 train_time:639472ms step_avg:68.61ms +[2025-07-07 14:03:03] [Rank 0] step:9341/10000 train_time:640849ms step_avg:68.61ms +[2025-07-07 14:03:03] [Rank 0] step:9341/10000 train_time:640849ms step_avg:68.61ms +[2025-07-07 14:03:05] [Rank 0] step:9361/10000 train_time:642274ms step_avg:68.61ms +[2025-07-07 14:03:05] [Rank 0] step:9361/10000 train_time:642274ms step_avg:68.61ms +[2025-07-07 14:03:06] [Rank 0] step:9381/10000 train_time:643638ms step_avg:68.61ms +[2025-07-07 14:03:06] [Rank 0] step:9381/10000 train_time:643638ms step_avg:68.61ms +[2025-07-07 14:03:07] [Rank 0] step:9401/10000 train_time:645022ms step_avg:68.61ms +[2025-07-07 14:03:07] [Rank 0] step:9401/10000 train_time:645022ms step_avg:68.61ms +[2025-07-07 14:03:09] [Rank 0] step:9421/10000 train_time:646399ms step_avg:68.61ms +[2025-07-07 14:03:09] [Rank 0] step:9421/10000 train_time:646399ms step_avg:68.61ms +[2025-07-07 14:03:10] [Rank 0] step:9441/10000 train_time:647775ms step_avg:68.61ms +[2025-07-07 14:03:10] [Rank 0] step:9441/10000 train_time:647775ms step_avg:68.61ms +[2025-07-07 14:03:11] [Rank 0] step:9461/10000 train_time:649153ms step_avg:68.61ms +[2025-07-07 14:03:11] [Rank 0] step:9461/10000 train_time:649153ms step_avg:68.61ms +[2025-07-07 14:03:13] [Rank 0] step:9481/10000 train_time:650530ms step_avg:68.61ms +[2025-07-07 14:03:13] [Rank 0] step:9481/10000 train_time:650530ms step_avg:68.61ms +[2025-07-07 14:03:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 14:03:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 14:03:15] [Rank 0] PRINT: step:9500/10000 train_loss:1.3928 val_loss:1.4044 train_time:652534ms step_avg:68.69ms +[2025-07-07 14:03:15] [Rank 0] PRINT: step:9500/10000 train_loss:1.3928 val_loss:1.4044 train_time:652534ms step_avg:68.69ms +[2025-07-07 14:03:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 14:03:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 14:03:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 14:03:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 14:03:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 14:03:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 14:08:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 14:08:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 14:08:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 14:08:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 14:08:45] [Rank 0] Total Loss: 4.8086 +[2025-07-07 14:08:45] [Rank 0] Total Loss: 4.8086 +[2025-07-07 14:08:45] [Rank 0] Total FTA: 0.1482 +[2025-07-07 14:08:45] [Rank 0] Total FTA: 0.1482 +[2025-07-07 14:08:45] [Rank 0] Group 0 Loss: 5.0830 +[2025-07-07 14:08:45] [Rank 0] Group 0 Loss: 5.0830 +[2025-07-07 14:08:45] [Rank 0] Group 1 Loss: 4.4657 +[2025-07-07 14:08:45] [Rank 0] Group 1 Loss: 4.4657 +[2025-07-07 14:08:45] [Rank 0] Group 2 Loss: 4.4645 +[2025-07-07 14:08:45] [Rank 0] Group 2 Loss: 4.4645 +[2025-07-07 14:08:45] [Rank 0] Group 3 Loss: 5.2116 +[2025-07-07 14:08:45] [Rank 0] Group 3 Loss: 5.2116 +[2025-07-07 14:08:45] [Rank 0] Group 4 Loss: 4.7674 +[2025-07-07 14:08:45] [Rank 0] Group 4 Loss: 4.7674 +[2025-07-07 14:08:45] [Rank 0] Group 5 Loss: 4.7398 +[2025-07-07 14:08:45] [Rank 0] Group 5 Loss: 4.7398 +[2025-07-07 14:08:45] [Rank 0] Group 6 Loss: 4.6937 +[2025-07-07 14:08:45] [Rank 0] Group 6 Loss: 4.6937 +[2025-07-07 14:08:45] [Rank 0] Group 7 Loss: 4.8221 +[2025-07-07 14:08:45] [Rank 0] Group 7 Loss: 4.8221 +[2025-07-07 14:08:45] [Rank 0] Group 8 Loss: 4.8025 +[2025-07-07 14:08:45] [Rank 0] Group 8 Loss: 4.8025 +[2025-07-07 14:08:45] [Rank 0] Group 9 Loss: 4.7849 +[2025-07-07 14:08:45] [Rank 0] Group 9 Loss: 4.7849 +[2025-07-07 14:08:45] [Rank 0] Group 10 Loss: 4.7863 +[2025-07-07 14:08:45] [Rank 0] Group 10 Loss: 4.7863 +[2025-07-07 14:08:45] [Rank 0] Group 11 Loss: 4.8074 +[2025-07-07 14:08:45] [Rank 0] Group 11 Loss: 4.8074 +[2025-07-07 14:08:45] [Rank 0] Group 0 FTA: 0.1873 +[2025-07-07 14:08:45] [Rank 0] Group 0 FTA: 0.1873 +[2025-07-07 14:08:45] [Rank 0] Group 1 FTA: 0.3281 +[2025-07-07 14:08:45] [Rank 0] Group 1 FTA: 0.3281 +[2025-07-07 14:08:45] [Rank 0] Group 2 FTA: 0.1536 +[2025-07-07 14:08:45] [Rank 0] Group 2 FTA: 0.1536 +[2025-07-07 14:08:45] [Rank 0] Group 3 FTA: 0.0755 +[2025-07-07 14:08:45] [Rank 0] Group 3 FTA: 0.0755 +[2025-07-07 14:08:45] [Rank 0] Group 4 FTA: 0.0781 +[2025-07-07 14:08:45] [Rank 0] Group 4 FTA: 0.0781 +[2025-07-07 14:08:45] [Rank 0] Group 5 FTA: 0.1354 +[2025-07-07 14:08:45] [Rank 0] Group 5 FTA: 0.1354 +[2025-07-07 14:08:45] [Rank 0] Group 6 FTA: 0.1172 +[2025-07-07 14:08:45] [Rank 0] Group 6 FTA: 0.1172 +[2025-07-07 14:08:45] [Rank 0] Group 7 FTA: 0.1536 +[2025-07-07 14:08:45] [Rank 0] Group 7 FTA: 0.1536 +[2025-07-07 14:08:45] [Rank 0] Group 8 FTA: 0.1276 +[2025-07-07 14:08:45] [Rank 0] Group 8 FTA: 0.1276 +[2025-07-07 14:08:45] [Rank 0] Group 9 FTA: 0.1602 +[2025-07-07 14:08:45] [Rank 0] Group 9 FTA: 0.1602 +[2025-07-07 14:08:45] [Rank 0] Group 10 FTA: 0.1504 +[2025-07-07 14:08:45] [Rank 0] Group 10 FTA: 0.1504 +[2025-07-07 14:08:45] [Rank 0] Group 11 FTA: 0.1211 +[2025-07-07 14:08:45] [Rank 0] Group 11 FTA: 0.1211 +[2025-07-07 14:08:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 14:08:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 14:08:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 14:08:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 14:08:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 14:08:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 14:08:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 14:08:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 14:08:47] [Rank 0] step:9501/10000 train_time:652545ms step_avg:68.68ms +[2025-07-07 14:08:47] [Rank 0] step:9501/10000 train_time:652545ms step_avg:68.68ms +[2025-07-07 14:08:48] [Rank 0] step:9521/10000 train_time:653310ms step_avg:68.62ms +[2025-07-07 14:08:48] [Rank 0] step:9521/10000 train_time:653310ms step_avg:68.62ms +[2025-07-07 14:08:49] [Rank 0] step:9541/10000 train_time:654932ms step_avg:68.64ms +[2025-07-07 14:08:49] [Rank 0] step:9541/10000 train_time:654932ms step_avg:68.64ms +[2025-07-07 14:08:51] [Rank 0] step:9561/10000 train_time:656077ms step_avg:68.62ms +[2025-07-07 14:08:51] [Rank 0] step:9561/10000 train_time:656077ms step_avg:68.62ms +[2025-07-07 14:08:52] [Rank 0] step:9581/10000 train_time:657449ms step_avg:68.62ms +[2025-07-07 14:08:52] [Rank 0] step:9581/10000 train_time:657449ms step_avg:68.62ms +[2025-07-07 14:08:54] [Rank 0] step:9601/10000 train_time:658819ms step_avg:68.62ms +[2025-07-07 14:08:54] [Rank 0] step:9601/10000 train_time:658819ms step_avg:68.62ms +[2025-07-07 14:08:55] [Rank 0] step:9621/10000 train_time:660189ms step_avg:68.62ms +[2025-07-07 14:08:55] [Rank 0] step:9621/10000 train_time:660189ms step_avg:68.62ms +[2025-07-07 14:08:56] [Rank 0] step:9641/10000 train_time:661562ms step_avg:68.62ms +[2025-07-07 14:08:56] [Rank 0] step:9641/10000 train_time:661562ms step_avg:68.62ms +[2025-07-07 14:08:58] [Rank 0] step:9661/10000 train_time:662935ms step_avg:68.62ms +[2025-07-07 14:08:58] [Rank 0] step:9661/10000 train_time:662935ms step_avg:68.62ms +[2025-07-07 14:08:59] [Rank 0] step:9681/10000 train_time:664309ms step_avg:68.62ms +[2025-07-07 14:08:59] [Rank 0] step:9681/10000 train_time:664309ms step_avg:68.62ms +[2025-07-07 14:09:00] [Rank 0] step:9701/10000 train_time:665683ms step_avg:68.62ms +[2025-07-07 14:09:00] [Rank 0] step:9701/10000 train_time:665683ms step_avg:68.62ms +[2025-07-07 14:09:02] [Rank 0] step:9721/10000 train_time:667057ms step_avg:68.62ms +[2025-07-07 14:09:02] [Rank 0] step:9721/10000 train_time:667057ms step_avg:68.62ms +[2025-07-07 14:09:03] [Rank 0] step:9741/10000 train_time:668465ms step_avg:68.62ms +[2025-07-07 14:09:03] [Rank 0] step:9741/10000 train_time:668465ms step_avg:68.62ms +[2025-07-07 14:09:05] [Rank 0] step:9761/10000 train_time:669841ms step_avg:68.62ms +[2025-07-07 14:09:05] [Rank 0] step:9761/10000 train_time:669841ms step_avg:68.62ms +[2025-07-07 14:09:06] [Rank 0] step:9781/10000 train_time:671216ms step_avg:68.62ms +[2025-07-07 14:09:06] [Rank 0] step:9781/10000 train_time:671216ms step_avg:68.62ms +[2025-07-07 14:09:07] [Rank 0] step:9801/10000 train_time:672593ms step_avg:68.62ms +[2025-07-07 14:09:07] [Rank 0] step:9801/10000 train_time:672593ms step_avg:68.62ms +[2025-07-07 14:09:09] [Rank 0] step:9821/10000 train_time:673969ms step_avg:68.63ms +[2025-07-07 14:09:09] [Rank 0] step:9821/10000 train_time:673969ms step_avg:68.63ms +[2025-07-07 14:09:10] [Rank 0] step:9841/10000 train_time:675346ms step_avg:68.63ms +[2025-07-07 14:09:10] [Rank 0] step:9841/10000 train_time:675346ms step_avg:68.63ms +[2025-07-07 14:09:11] [Rank 0] step:9861/10000 train_time:676722ms step_avg:68.63ms +[2025-07-07 14:09:11] [Rank 0] step:9861/10000 train_time:676722ms step_avg:68.63ms +[2025-07-07 14:09:13] [Rank 0] step:9881/10000 train_time:678098ms step_avg:68.63ms +[2025-07-07 14:09:13] [Rank 0] step:9881/10000 train_time:678098ms step_avg:68.63ms +[2025-07-07 14:09:14] [Rank 0] step:9901/10000 train_time:680140ms step_avg:68.69ms +[2025-07-07 14:09:14] [Rank 0] step:9901/10000 train_time:680140ms step_avg:68.69ms +[2025-07-07 14:09:16] [Rank 0] step:9921/10000 train_time:680881ms step_avg:68.63ms +[2025-07-07 14:09:16] [Rank 0] step:9921/10000 train_time:680881ms step_avg:68.63ms +[2025-07-07 14:09:17] [Rank 0] step:9941/10000 train_time:682258ms step_avg:68.63ms +[2025-07-07 14:09:17] [Rank 0] step:9941/10000 train_time:682258ms step_avg:68.63ms +[2025-07-07 14:09:18] [Rank 0] step:9961/10000 train_time:683635ms step_avg:68.63ms +[2025-07-07 14:09:18] [Rank 0] step:9961/10000 train_time:683635ms step_avg:68.63ms +[2025-07-07 14:09:20] [Rank 0] step:9981/10000 train_time:685012ms step_avg:68.63ms +[2025-07-07 14:09:20] [Rank 0] step:9981/10000 train_time:685012ms step_avg:68.63ms +[2025-07-07 14:09:21] [Rank 0] step:10000/10000 train_time:686319ms step_avg:68.63ms +[2025-07-07 14:09:21] [Rank 0] step:10000/10000 train_time:686319ms step_avg:68.63ms +[2025-07-07 14:09:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 14:09:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 14:09:22] [Rank 0] PRINT: step:10000/10000 train_loss:1.3804 val_loss:1.3943 train_time:687019ms step_avg:68.70ms +[2025-07-07 14:09:22] [Rank 0] PRINT: step:10000/10000 train_loss:1.3804 val_loss:1.3943 train_time:687019ms step_avg:68.70ms +[2025-07-07 14:09:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 14:09:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 14:09:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 14:09:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 14:09:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 14:09:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 14:14:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 14:14:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 14:14:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 14:14:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 14:14:50] [Rank 0] Total Loss: 4.8121 +[2025-07-07 14:14:50] [Rank 0] Total Loss: 4.8121 +[2025-07-07 14:14:50] [Rank 0] Total FTA: 0.1809 +[2025-07-07 14:14:50] [Rank 0] Total FTA: 0.1809 +[2025-07-07 14:14:50] [Rank 0] Group 0 Loss: 5.0129 +[2025-07-07 14:14:50] [Rank 0] Group 0 Loss: 5.0129 +[2025-07-07 14:14:50] [Rank 0] Group 1 Loss: 4.5017 +[2025-07-07 14:14:50] [Rank 0] Group 1 Loss: 4.5017 +[2025-07-07 14:14:50] [Rank 0] Group 2 Loss: 4.4407 +[2025-07-07 14:14:50] [Rank 0] Group 2 Loss: 4.4407 +[2025-07-07 14:14:50] [Rank 0] Group 3 Loss: 5.1700 +[2025-07-07 14:14:50] [Rank 0] Group 3 Loss: 5.1700 +[2025-07-07 14:14:50] [Rank 0] Group 4 Loss: 4.6858 +[2025-07-07 14:14:50] [Rank 0] Group 4 Loss: 4.6858 +[2025-07-07 14:14:50] [Rank 0] Group 5 Loss: 4.7286 +[2025-07-07 14:14:50] [Rank 0] Group 5 Loss: 4.7286 +[2025-07-07 14:14:50] [Rank 0] Group 6 Loss: 4.6977 +[2025-07-07 14:14:50] [Rank 0] Group 6 Loss: 4.6977 +[2025-07-07 14:14:50] [Rank 0] Group 7 Loss: 4.9065 +[2025-07-07 14:14:50] [Rank 0] Group 7 Loss: 4.9065 +[2025-07-07 14:14:50] [Rank 0] Group 8 Loss: 4.8673 +[2025-07-07 14:14:50] [Rank 0] Group 8 Loss: 4.8673 +[2025-07-07 14:14:50] [Rank 0] Group 9 Loss: 4.8186 +[2025-07-07 14:14:50] [Rank 0] Group 9 Loss: 4.8186 +[2025-07-07 14:14:50] [Rank 0] Group 10 Loss: 4.8486 +[2025-07-07 14:14:50] [Rank 0] Group 10 Loss: 4.8486 +[2025-07-07 14:14:50] [Rank 0] Group 11 Loss: 4.8284 +[2025-07-07 14:14:50] [Rank 0] Group 11 Loss: 4.8284 +[2025-07-07 14:14:50] [Rank 0] Group 0 FTA: 0.2939 +[2025-07-07 14:14:50] [Rank 0] Group 0 FTA: 0.2939 +[2025-07-07 14:14:50] [Rank 0] Group 1 FTA: 0.2995 +[2025-07-07 14:14:50] [Rank 0] Group 1 FTA: 0.2995 +[2025-07-07 14:14:50] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 14:14:50] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 14:14:50] [Rank 0] Group 3 FTA: 0.1328 +[2025-07-07 14:14:50] [Rank 0] Group 3 FTA: 0.1328 +[2025-07-07 14:14:50] [Rank 0] Group 4 FTA: 0.1224 +[2025-07-07 14:14:50] [Rank 0] Group 4 FTA: 0.1224 +[2025-07-07 14:14:50] [Rank 0] Group 5 FTA: 0.1276 +[2025-07-07 14:14:50] [Rank 0] Group 5 FTA: 0.1276 +[2025-07-07 14:14:50] [Rank 0] Group 6 FTA: 0.1823 +[2025-07-07 14:14:50] [Rank 0] Group 6 FTA: 0.1823 +[2025-07-07 14:14:50] [Rank 0] Group 7 FTA: 0.1901 +[2025-07-07 14:14:50] [Rank 0] Group 7 FTA: 0.1901 +[2025-07-07 14:14:50] [Rank 0] Group 8 FTA: 0.1693 +[2025-07-07 14:14:50] [Rank 0] Group 8 FTA: 0.1693 +[2025-07-07 14:14:50] [Rank 0] Group 9 FTA: 0.1836 +[2025-07-07 14:14:50] [Rank 0] Group 9 FTA: 0.1836 +[2025-07-07 14:14:50] [Rank 0] Group 10 FTA: 0.1816 +[2025-07-07 14:14:50] [Rank 0] Group 10 FTA: 0.1816 +[2025-07-07 14:14:50] [Rank 0] Group 11 FTA: 0.1484 +[2025-07-07 14:14:50] [Rank 0] Group 11 FTA: 0.1484 +[2025-07-07 14:14:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 14:14:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_loss_curves.png +[2025-07-07 14:14:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 14:14:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/per_class_acc_curves.png +[2025-07-07 14:14:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 14:14:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_loss_curve.png +[2025-07-07 14:14:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 14:14:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_42/total_acc_curve.png +[2025-07-07 14:14:51] [Rank 0] step:10001/10000 train_time:687030ms step_avg:68.70ms +[2025-07-07 14:14:51] [Rank 0] step:10001/10000 train_time:687030ms step_avg:68.70ms +[2025-07-07 14:14:51] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 14:14:51 2025 --- +[2025-07-07 14:14:51] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 14:14:51 2025 --- +[2025-07-07 14:14:51] [Rank 0] PRINT: Peak memory allocated: 8720 MiB reserved: 10316 MiB +[2025-07-07 14:14:51] [Rank 0] PRINT: Peak memory allocated: 8720 MiB reserved: 10316 MiB diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/config.json b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..68c8c50b75a722793de6171e558294170a0aad67 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "47455a1a-9a84-42e6-a7f1-b01236f1126a", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..d36ec11b9a12862cff8c709a938a7272b45b9643 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dea8694d1a107471bc044a2753e6815ef53fbd0333ff98c2185a3c13f4565cbf +size 295769 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..7407a08b032c667ca6f8bc44e34c7df21ecf90e7 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7e0716cc76b5f7bb5355f095d3f4e59d16043fafc3056c258214af948dfbc38e +size 278630 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..8436392e1d01d4fd6535b1ec9a2ecdbb67c2b690 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:562d3336ed5fa74094402946c626a0709945a3305952add32bff620f21582a68 +size 88592 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..0eed40c14a4bd9d99a5f4de5f360da54a465a43f --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9cd49cd36f813024cccf97eb01b77a85ff9fe869253772ed18e3503fdc865373 +size 109905 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/training_log_47455a1a-9a84-42e6-a7f1-b01236f1126a.txt b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/training_log_47455a1a-9a84-42e6-a7f1-b01236f1126a.txt new file mode 100644 index 0000000000000000000000000000000000000000..79f3d91cb40aaeb964424bbc6f1046e8a14e3ac1 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/training_log_47455a1a-9a84-42e6-a7f1-b01236f1126a.txt @@ -0,0 +1,5144 @@ +[2025-07-07 20:41:55] [Rank 0] PRINT: --- Script Start: Mon Jul 7 20:41:55 2025 --- +[2025-07-07 20:41:55] [Rank 0] PRINT: --- Script Start: Mon Jul 7 20:41:55 2025 --- +[2025-07-07 20:41:55] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-07 20:41:55] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-07 20:41:55] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 20:41:55] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 20:41:55] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-07 20:41:55] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-07 20:41:55] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45 +[2025-07-07 20:41:55] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45 +[2025-07-07 20:41:55] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 20:41:55] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 20:41:56] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 20:41:56] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 20:41:56] [Rank 0] PRINT: Constructing model... +[2025-07-07 20:41:56] [Rank 0] PRINT: Constructing model... +[2025-07-07 20:41:58] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 20:41:58] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 20:41:58] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 20:41:58] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 20:41:58] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 20:41:58] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 20:41:58] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 20:41:58] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 20:41:58] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 20:41:58] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 20:41:58] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 20:41:58] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 20:41:58] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 20:41:58] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 20:41:59] [Rank 0] PRINT: Model returns: +[2025-07-07 20:41:59] [Rank 0] PRINT: Model returns: +[2025-07-07 20:41:59] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 20:41:59] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 20:41:59] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 20:41:59] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 20:41:59] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-07 20:41:59] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-07 20:41:59] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 20:41:59] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 20:41:59] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 20:41:59] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 20:41:59] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 20:41:59] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 20:41:59] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 20:41:59] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 20:41:59] [Rank 0] PRINT: Starting warmup... +[2025-07-07 20:41:59] [Rank 0] PRINT: Starting warmup... +[2025-07-07 20:43:32] [Rank 0] PRINT: Warmup complete. +[2025-07-07 20:43:32] [Rank 0] PRINT: Warmup complete. +[2025-07-07 20:43:32] [Rank 0] PRINT: Starting training... +[2025-07-07 20:43:32] [Rank 0] PRINT: Starting training... +[2025-07-07 20:43:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:43:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:43:40] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 20:43:40] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 20:43:41] [Rank 0] step:21/10000 train_time:821ms step_avg:39.11ms +[2025-07-07 20:43:41] [Rank 0] step:21/10000 train_time:821ms step_avg:39.11ms +[2025-07-07 20:43:43] [Rank 0] step:41/10000 train_time:2145ms step_avg:52.32ms +[2025-07-07 20:43:43] [Rank 0] step:41/10000 train_time:2145ms step_avg:52.32ms +[2025-07-07 20:43:44] [Rank 0] step:61/10000 train_time:3469ms step_avg:56.87ms +[2025-07-07 20:43:44] [Rank 0] step:61/10000 train_time:3469ms step_avg:56.87ms +[2025-07-07 20:43:45] [Rank 0] step:81/10000 train_time:4793ms step_avg:59.18ms +[2025-07-07 20:43:45] [Rank 0] step:81/10000 train_time:4793ms step_avg:59.18ms +[2025-07-07 20:43:47] [Rank 0] step:101/10000 train_time:6115ms step_avg:60.55ms +[2025-07-07 20:43:47] [Rank 0] step:101/10000 train_time:6115ms step_avg:60.55ms +[2025-07-07 20:43:48] [Rank 0] step:121/10000 train_time:7439ms step_avg:61.48ms +[2025-07-07 20:43:48] [Rank 0] step:121/10000 train_time:7439ms step_avg:61.48ms +[2025-07-07 20:43:49] [Rank 0] step:141/10000 train_time:8762ms step_avg:62.14ms +[2025-07-07 20:43:49] [Rank 0] step:141/10000 train_time:8762ms step_avg:62.14ms +[2025-07-07 20:43:51] [Rank 0] step:161/10000 train_time:10085ms step_avg:62.64ms +[2025-07-07 20:43:51] [Rank 0] step:161/10000 train_time:10085ms step_avg:62.64ms +[2025-07-07 20:43:52] [Rank 0] step:181/10000 train_time:12076ms step_avg:66.72ms +[2025-07-07 20:43:52] [Rank 0] step:181/10000 train_time:12076ms step_avg:66.72ms +[2025-07-07 20:43:53] [Rank 0] step:201/10000 train_time:12790ms step_avg:63.63ms +[2025-07-07 20:43:53] [Rank 0] step:201/10000 train_time:12790ms step_avg:63.63ms +[2025-07-07 20:43:55] [Rank 0] step:221/10000 train_time:14116ms step_avg:63.88ms +[2025-07-07 20:43:55] [Rank 0] step:221/10000 train_time:14116ms step_avg:63.88ms +[2025-07-07 20:43:56] [Rank 0] step:241/10000 train_time:15442ms step_avg:64.07ms +[2025-07-07 20:43:56] [Rank 0] step:241/10000 train_time:15442ms step_avg:64.07ms +[2025-07-07 20:43:57] [Rank 0] step:261/10000 train_time:16768ms step_avg:64.24ms +[2025-07-07 20:43:57] [Rank 0] step:261/10000 train_time:16768ms step_avg:64.24ms +[2025-07-07 20:43:59] [Rank 0] step:281/10000 train_time:18094ms step_avg:64.39ms +[2025-07-07 20:43:59] [Rank 0] step:281/10000 train_time:18094ms step_avg:64.39ms +[2025-07-07 20:44:00] [Rank 0] step:301/10000 train_time:19421ms step_avg:64.52ms +[2025-07-07 20:44:00] [Rank 0] step:301/10000 train_time:19421ms step_avg:64.52ms +[2025-07-07 20:44:01] [Rank 0] step:321/10000 train_time:20749ms step_avg:64.64ms +[2025-07-07 20:44:01] [Rank 0] step:321/10000 train_time:20749ms step_avg:64.64ms +[2025-07-07 20:44:03] [Rank 0] step:341/10000 train_time:22075ms step_avg:64.73ms +[2025-07-07 20:44:03] [Rank 0] step:341/10000 train_time:22075ms step_avg:64.73ms +[2025-07-07 20:44:04] [Rank 0] step:361/10000 train_time:23402ms step_avg:64.82ms +[2025-07-07 20:44:04] [Rank 0] step:361/10000 train_time:23402ms step_avg:64.82ms +[2025-07-07 20:44:05] [Rank 0] step:381/10000 train_time:24790ms step_avg:65.07ms +[2025-07-07 20:44:05] [Rank 0] step:381/10000 train_time:24790ms step_avg:65.07ms +[2025-07-07 20:44:07] [Rank 0] step:401/10000 train_time:26117ms step_avg:65.13ms +[2025-07-07 20:44:07] [Rank 0] step:401/10000 train_time:26117ms step_avg:65.13ms +[2025-07-07 20:44:08] [Rank 0] step:421/10000 train_time:27442ms step_avg:65.18ms +[2025-07-07 20:44:08] [Rank 0] step:421/10000 train_time:27442ms step_avg:65.18ms +[2025-07-07 20:44:09] [Rank 0] step:441/10000 train_time:28768ms step_avg:65.23ms +[2025-07-07 20:44:09] [Rank 0] step:441/10000 train_time:28768ms step_avg:65.23ms +[2025-07-07 20:44:11] [Rank 0] step:461/10000 train_time:30096ms step_avg:65.28ms +[2025-07-07 20:44:11] [Rank 0] step:461/10000 train_time:30096ms step_avg:65.28ms +[2025-07-07 20:44:12] [Rank 0] step:481/10000 train_time:31425ms step_avg:65.33ms +[2025-07-07 20:44:12] [Rank 0] step:481/10000 train_time:31425ms step_avg:65.33ms +[2025-07-07 20:44:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:44:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:44:14] [Rank 0] PRINT: step:500/10000 train_loss:8.7434 val_loss:7.1372 train_time:33359ms step_avg:66.72ms +[2025-07-07 20:44:14] [Rank 0] PRINT: step:500/10000 train_loss:8.7434 val_loss:7.1372 train_time:33359ms step_avg:66.72ms +[2025-07-07 20:44:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:44:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:44:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:44:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:44:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:44:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:49:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:49:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:49:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:49:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:49:39] [Rank 0] Total Loss: 7.6987 +[2025-07-07 20:49:39] [Rank 0] Total Loss: 7.6987 +[2025-07-07 20:49:39] [Rank 0] Total FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Total FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 0 Loss: 7.6997 +[2025-07-07 20:49:39] [Rank 0] Group 0 Loss: 7.6997 +[2025-07-07 20:49:39] [Rank 0] Group 1 Loss: 7.6397 +[2025-07-07 20:49:39] [Rank 0] Group 1 Loss: 7.6397 +[2025-07-07 20:49:39] [Rank 0] Group 2 Loss: 7.8396 +[2025-07-07 20:49:39] [Rank 0] Group 2 Loss: 7.8396 +[2025-07-07 20:49:39] [Rank 0] Group 3 Loss: 7.6637 +[2025-07-07 20:49:39] [Rank 0] Group 3 Loss: 7.6637 +[2025-07-07 20:49:39] [Rank 0] Group 4 Loss: 7.7196 +[2025-07-07 20:49:39] [Rank 0] Group 4 Loss: 7.7196 +[2025-07-07 20:49:39] [Rank 0] Group 5 Loss: 7.6646 +[2025-07-07 20:49:39] [Rank 0] Group 5 Loss: 7.6646 +[2025-07-07 20:49:39] [Rank 0] Group 6 Loss: 7.7108 +[2025-07-07 20:49:39] [Rank 0] Group 6 Loss: 7.7108 +[2025-07-07 20:49:39] [Rank 0] Group 7 Loss: 7.6877 +[2025-07-07 20:49:39] [Rank 0] Group 7 Loss: 7.6877 +[2025-07-07 20:49:39] [Rank 0] Group 8 Loss: 7.6688 +[2025-07-07 20:49:39] [Rank 0] Group 8 Loss: 7.6688 +[2025-07-07 20:49:39] [Rank 0] Group 9 Loss: 7.6954 +[2025-07-07 20:49:39] [Rank 0] Group 9 Loss: 7.6954 +[2025-07-07 20:49:39] [Rank 0] Group 10 Loss: 7.7057 +[2025-07-07 20:49:39] [Rank 0] Group 10 Loss: 7.7057 +[2025-07-07 20:49:39] [Rank 0] Group 11 Loss: 7.6936 +[2025-07-07 20:49:39] [Rank 0] Group 11 Loss: 7.6936 +[2025-07-07 20:49:39] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 20:49:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 20:49:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 20:49:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 20:49:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 20:49:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 20:49:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 20:49:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 20:49:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 20:49:40] [Rank 0] step:501/10000 train_time:33369ms step_avg:66.60ms +[2025-07-07 20:49:40] [Rank 0] step:501/10000 train_time:33369ms step_avg:66.60ms +[2025-07-07 20:49:42] [Rank 0] step:521/10000 train_time:34098ms step_avg:65.45ms +[2025-07-07 20:49:42] [Rank 0] step:521/10000 train_time:34098ms step_avg:65.45ms +[2025-07-07 20:49:43] [Rank 0] step:541/10000 train_time:35422ms step_avg:65.47ms +[2025-07-07 20:49:43] [Rank 0] step:541/10000 train_time:35422ms step_avg:65.47ms +[2025-07-07 20:49:44] [Rank 0] step:561/10000 train_time:36910ms step_avg:65.79ms +[2025-07-07 20:49:44] [Rank 0] step:561/10000 train_time:36910ms step_avg:65.79ms +[2025-07-07 20:49:46] [Rank 0] step:581/10000 train_time:38233ms step_avg:65.81ms +[2025-07-07 20:49:46] [Rank 0] step:581/10000 train_time:38233ms step_avg:65.81ms +[2025-07-07 20:49:47] [Rank 0] step:601/10000 train_time:39558ms step_avg:65.82ms +[2025-07-07 20:49:47] [Rank 0] step:601/10000 train_time:39558ms step_avg:65.82ms +[2025-07-07 20:49:48] [Rank 0] step:621/10000 train_time:40884ms step_avg:65.84ms +[2025-07-07 20:49:48] [Rank 0] step:621/10000 train_time:40884ms step_avg:65.84ms +[2025-07-07 20:49:50] [Rank 0] step:641/10000 train_time:42211ms step_avg:65.85ms +[2025-07-07 20:49:50] [Rank 0] step:641/10000 train_time:42211ms step_avg:65.85ms +[2025-07-07 20:49:51] [Rank 0] step:661/10000 train_time:43537ms step_avg:65.87ms +[2025-07-07 20:49:51] [Rank 0] step:661/10000 train_time:43537ms step_avg:65.87ms +[2025-07-07 20:49:52] [Rank 0] step:681/10000 train_time:44864ms step_avg:65.88ms +[2025-07-07 20:49:52] [Rank 0] step:681/10000 train_time:44864ms step_avg:65.88ms +[2025-07-07 20:49:54] [Rank 0] step:701/10000 train_time:46194ms step_avg:65.90ms +[2025-07-07 20:49:54] [Rank 0] step:701/10000 train_time:46194ms step_avg:65.90ms +[2025-07-07 20:49:55] [Rank 0] step:721/10000 train_time:47779ms step_avg:66.27ms +[2025-07-07 20:49:55] [Rank 0] step:721/10000 train_time:47779ms step_avg:66.27ms +[2025-07-07 20:49:56] [Rank 0] step:741/10000 train_time:48911ms step_avg:66.01ms +[2025-07-07 20:49:56] [Rank 0] step:741/10000 train_time:48911ms step_avg:66.01ms +[2025-07-07 20:49:58] [Rank 0] step:761/10000 train_time:50249ms step_avg:66.03ms +[2025-07-07 20:49:58] [Rank 0] step:761/10000 train_time:50249ms step_avg:66.03ms +[2025-07-07 20:49:59] [Rank 0] step:781/10000 train_time:51592ms step_avg:66.06ms +[2025-07-07 20:49:59] [Rank 0] step:781/10000 train_time:51592ms step_avg:66.06ms +[2025-07-07 20:50:01] [Rank 0] step:801/10000 train_time:52936ms step_avg:66.09ms +[2025-07-07 20:50:01] [Rank 0] step:801/10000 train_time:52936ms step_avg:66.09ms +[2025-07-07 20:50:02] [Rank 0] step:821/10000 train_time:54284ms step_avg:66.12ms +[2025-07-07 20:50:02] [Rank 0] step:821/10000 train_time:54284ms step_avg:66.12ms +[2025-07-07 20:50:03] [Rank 0] step:841/10000 train_time:55630ms step_avg:66.15ms +[2025-07-07 20:50:03] [Rank 0] step:841/10000 train_time:55630ms step_avg:66.15ms +[2025-07-07 20:50:05] [Rank 0] step:861/10000 train_time:56977ms step_avg:66.18ms +[2025-07-07 20:50:05] [Rank 0] step:861/10000 train_time:56977ms step_avg:66.18ms +[2025-07-07 20:50:06] [Rank 0] step:881/10000 train_time:58324ms step_avg:66.20ms +[2025-07-07 20:50:06] [Rank 0] step:881/10000 train_time:58324ms step_avg:66.20ms +[2025-07-07 20:50:07] [Rank 0] step:901/10000 train_time:59673ms step_avg:66.23ms +[2025-07-07 20:50:07] [Rank 0] step:901/10000 train_time:59673ms step_avg:66.23ms +[2025-07-07 20:50:09] [Rank 0] step:921/10000 train_time:61077ms step_avg:66.32ms +[2025-07-07 20:50:09] [Rank 0] step:921/10000 train_time:61077ms step_avg:66.32ms +[2025-07-07 20:50:10] [Rank 0] step:941/10000 train_time:62425ms step_avg:66.34ms +[2025-07-07 20:50:10] [Rank 0] step:941/10000 train_time:62425ms step_avg:66.34ms +[2025-07-07 20:50:11] [Rank 0] step:961/10000 train_time:63774ms step_avg:66.36ms +[2025-07-07 20:50:11] [Rank 0] step:961/10000 train_time:63774ms step_avg:66.36ms +[2025-07-07 20:50:13] [Rank 0] step:981/10000 train_time:65125ms step_avg:66.39ms +[2025-07-07 20:50:13] [Rank 0] step:981/10000 train_time:65125ms step_avg:66.39ms +[2025-07-07 20:50:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:50:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:50:15] [Rank 0] PRINT: step:1000/10000 train_loss:6.1557 val_loss:5.3255 train_time:67087ms step_avg:67.09ms +[2025-07-07 20:50:15] [Rank 0] PRINT: step:1000/10000 train_loss:6.1557 val_loss:5.3255 train_time:67087ms step_avg:67.09ms +[2025-07-07 20:50:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:50:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:50:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:50:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:50:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:50:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:55:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:55:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:55:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:55:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:55:43] [Rank 0] Total Loss: 6.3119 +[2025-07-07 20:55:43] [Rank 0] Total Loss: 6.3119 +[2025-07-07 20:55:43] [Rank 0] Total FTA: 0.0096 +[2025-07-07 20:55:43] [Rank 0] Total FTA: 0.0096 +[2025-07-07 20:55:43] [Rank 0] Group 0 Loss: 6.2251 +[2025-07-07 20:55:43] [Rank 0] Group 0 Loss: 6.2251 +[2025-07-07 20:55:43] [Rank 0] Group 1 Loss: 6.3072 +[2025-07-07 20:55:43] [Rank 0] Group 1 Loss: 6.3072 +[2025-07-07 20:55:43] [Rank 0] Group 2 Loss: 6.4396 +[2025-07-07 20:55:43] [Rank 0] Group 2 Loss: 6.4396 +[2025-07-07 20:55:43] [Rank 0] Group 3 Loss: 6.3036 +[2025-07-07 20:55:43] [Rank 0] Group 3 Loss: 6.3036 +[2025-07-07 20:55:43] [Rank 0] Group 4 Loss: 6.3451 +[2025-07-07 20:55:43] [Rank 0] Group 4 Loss: 6.3451 +[2025-07-07 20:55:43] [Rank 0] Group 5 Loss: 6.3092 +[2025-07-07 20:55:43] [Rank 0] Group 5 Loss: 6.3092 +[2025-07-07 20:55:43] [Rank 0] Group 6 Loss: 6.3351 +[2025-07-07 20:55:43] [Rank 0] Group 6 Loss: 6.3351 +[2025-07-07 20:55:43] [Rank 0] Group 7 Loss: 6.3278 +[2025-07-07 20:55:43] [Rank 0] Group 7 Loss: 6.3278 +[2025-07-07 20:55:43] [Rank 0] Group 8 Loss: 6.2767 +[2025-07-07 20:55:43] [Rank 0] Group 8 Loss: 6.2767 +[2025-07-07 20:55:43] [Rank 0] Group 9 Loss: 6.3139 +[2025-07-07 20:55:43] [Rank 0] Group 9 Loss: 6.3139 +[2025-07-07 20:55:43] [Rank 0] Group 10 Loss: 6.3140 +[2025-07-07 20:55:43] [Rank 0] Group 10 Loss: 6.3140 +[2025-07-07 20:55:43] [Rank 0] Group 11 Loss: 6.3196 +[2025-07-07 20:55:43] [Rank 0] Group 11 Loss: 6.3196 +[2025-07-07 20:55:43] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 20:55:43] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 20:55:43] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 20:55:43] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 20:55:43] [Rank 0] Group 2 FTA: 0.0938 +[2025-07-07 20:55:43] [Rank 0] Group 2 FTA: 0.0938 +[2025-07-07 20:55:43] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 20:55:43] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 20:55:43] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 20:55:43] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 20:55:43] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 20:55:43] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 20:55:43] [Rank 0] Group 6 FTA: 0.0104 +[2025-07-07 20:55:43] [Rank 0] Group 6 FTA: 0.0104 +[2025-07-07 20:55:43] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 20:55:43] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 20:55:43] [Rank 0] Group 8 FTA: 0.0026 +[2025-07-07 20:55:43] [Rank 0] Group 8 FTA: 0.0026 +[2025-07-07 20:55:43] [Rank 0] Group 9 FTA: 0.0039 +[2025-07-07 20:55:43] [Rank 0] Group 9 FTA: 0.0039 +[2025-07-07 20:55:43] [Rank 0] Group 10 FTA: 0.0039 +[2025-07-07 20:55:43] [Rank 0] Group 10 FTA: 0.0039 +[2025-07-07 20:55:43] [Rank 0] Group 11 FTA: 0.0098 +[2025-07-07 20:55:43] [Rank 0] Group 11 FTA: 0.0098 +[2025-07-07 20:55:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 20:55:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 20:55:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 20:55:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 20:55:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 20:55:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 20:55:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 20:55:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 20:55:44] [Rank 0] step:1001/10000 train_time:67096ms step_avg:67.03ms +[2025-07-07 20:55:44] [Rank 0] step:1001/10000 train_time:67096ms step_avg:67.03ms +[2025-07-07 20:55:45] [Rank 0] step:1021/10000 train_time:67841ms step_avg:66.45ms +[2025-07-07 20:55:45] [Rank 0] step:1021/10000 train_time:67841ms step_avg:66.45ms +[2025-07-07 20:55:47] [Rank 0] step:1041/10000 train_time:69181ms step_avg:66.46ms +[2025-07-07 20:55:47] [Rank 0] step:1041/10000 train_time:69181ms step_avg:66.46ms +[2025-07-07 20:55:48] [Rank 0] step:1061/10000 train_time:70522ms step_avg:66.47ms +[2025-07-07 20:55:48] [Rank 0] step:1061/10000 train_time:70522ms step_avg:66.47ms +[2025-07-07 20:55:49] [Rank 0] step:1081/10000 train_time:71914ms step_avg:66.53ms +[2025-07-07 20:55:49] [Rank 0] step:1081/10000 train_time:71914ms step_avg:66.53ms +[2025-07-07 20:55:51] [Rank 0] step:1101/10000 train_time:73255ms step_avg:66.54ms +[2025-07-07 20:55:51] [Rank 0] step:1101/10000 train_time:73255ms step_avg:66.54ms +[2025-07-07 20:55:52] [Rank 0] step:1121/10000 train_time:74598ms step_avg:66.55ms +[2025-07-07 20:55:52] [Rank 0] step:1121/10000 train_time:74598ms step_avg:66.55ms +[2025-07-07 20:55:54] [Rank 0] step:1141/10000 train_time:75943ms step_avg:66.56ms +[2025-07-07 20:55:54] [Rank 0] step:1141/10000 train_time:75943ms step_avg:66.56ms +[2025-07-07 20:55:55] [Rank 0] step:1161/10000 train_time:77288ms step_avg:66.57ms +[2025-07-07 20:55:55] [Rank 0] step:1161/10000 train_time:77288ms step_avg:66.57ms +[2025-07-07 20:55:56] [Rank 0] step:1181/10000 train_time:78635ms step_avg:66.58ms +[2025-07-07 20:55:56] [Rank 0] step:1181/10000 train_time:78635ms step_avg:66.58ms +[2025-07-07 20:55:58] [Rank 0] step:1201/10000 train_time:79981ms step_avg:66.60ms +[2025-07-07 20:55:58] [Rank 0] step:1201/10000 train_time:79981ms step_avg:66.60ms +[2025-07-07 20:55:59] [Rank 0] step:1221/10000 train_time:81328ms step_avg:66.61ms +[2025-07-07 20:55:59] [Rank 0] step:1221/10000 train_time:81328ms step_avg:66.61ms +[2025-07-07 20:56:00] [Rank 0] step:1241/10000 train_time:82678ms step_avg:66.62ms +[2025-07-07 20:56:00] [Rank 0] step:1241/10000 train_time:82678ms step_avg:66.62ms +[2025-07-07 20:56:02] [Rank 0] step:1261/10000 train_time:84692ms step_avg:67.16ms +[2025-07-07 20:56:02] [Rank 0] step:1261/10000 train_time:84692ms step_avg:67.16ms +[2025-07-07 20:56:03] [Rank 0] step:1281/10000 train_time:85420ms step_avg:66.68ms +[2025-07-07 20:56:03] [Rank 0] step:1281/10000 train_time:85420ms step_avg:66.68ms +[2025-07-07 20:56:04] [Rank 0] step:1301/10000 train_time:86769ms step_avg:66.69ms +[2025-07-07 20:56:04] [Rank 0] step:1301/10000 train_time:86769ms step_avg:66.69ms +[2025-07-07 20:56:06] [Rank 0] step:1321/10000 train_time:88117ms step_avg:66.71ms +[2025-07-07 20:56:06] [Rank 0] step:1321/10000 train_time:88117ms step_avg:66.71ms +[2025-07-07 20:56:07] [Rank 0] step:1341/10000 train_time:89466ms step_avg:66.72ms +[2025-07-07 20:56:07] [Rank 0] step:1341/10000 train_time:89466ms step_avg:66.72ms +[2025-07-07 20:56:08] [Rank 0] step:1361/10000 train_time:90815ms step_avg:66.73ms +[2025-07-07 20:56:08] [Rank 0] step:1361/10000 train_time:90815ms step_avg:66.73ms +[2025-07-07 20:56:10] [Rank 0] step:1381/10000 train_time:92164ms step_avg:66.74ms +[2025-07-07 20:56:10] [Rank 0] step:1381/10000 train_time:92164ms step_avg:66.74ms +[2025-07-07 20:56:11] [Rank 0] step:1401/10000 train_time:93514ms step_avg:66.75ms +[2025-07-07 20:56:11] [Rank 0] step:1401/10000 train_time:93514ms step_avg:66.75ms +[2025-07-07 20:56:12] [Rank 0] step:1421/10000 train_time:94864ms step_avg:66.76ms +[2025-07-07 20:56:12] [Rank 0] step:1421/10000 train_time:94864ms step_avg:66.76ms +[2025-07-07 20:56:14] [Rank 0] step:1441/10000 train_time:96214ms step_avg:66.77ms +[2025-07-07 20:56:14] [Rank 0] step:1441/10000 train_time:96214ms step_avg:66.77ms +[2025-07-07 20:56:15] [Rank 0] step:1461/10000 train_time:97611ms step_avg:66.81ms +[2025-07-07 20:56:15] [Rank 0] step:1461/10000 train_time:97611ms step_avg:66.81ms +[2025-07-07 20:56:17] [Rank 0] step:1481/10000 train_time:98963ms step_avg:66.82ms +[2025-07-07 20:56:17] [Rank 0] step:1481/10000 train_time:98963ms step_avg:66.82ms +[2025-07-07 20:56:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:56:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:56:19] [Rank 0] PRINT: step:1500/10000 train_loss:4.7219 val_loss:4.1532 train_time:100984ms step_avg:67.32ms +[2025-07-07 20:56:19] [Rank 0] PRINT: step:1500/10000 train_loss:4.7219 val_loss:4.1532 train_time:100984ms step_avg:67.32ms +[2025-07-07 20:56:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:56:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:56:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:56:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:56:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:56:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:01:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:01:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:01:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:01:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:01:44] [Rank 0] Total Loss: 5.5823 +[2025-07-07 21:01:44] [Rank 0] Total Loss: 5.5823 +[2025-07-07 21:01:44] [Rank 0] Total FTA: 0.0726 +[2025-07-07 21:01:44] [Rank 0] Total FTA: 0.0726 +[2025-07-07 21:01:44] [Rank 0] Group 0 Loss: 5.5188 +[2025-07-07 21:01:44] [Rank 0] Group 0 Loss: 5.5188 +[2025-07-07 21:01:44] [Rank 0] Group 1 Loss: 5.5700 +[2025-07-07 21:01:44] [Rank 0] Group 1 Loss: 5.5700 +[2025-07-07 21:01:44] [Rank 0] Group 2 Loss: 5.7029 +[2025-07-07 21:01:44] [Rank 0] Group 2 Loss: 5.7029 +[2025-07-07 21:01:44] [Rank 0] Group 3 Loss: 5.4790 +[2025-07-07 21:01:44] [Rank 0] Group 3 Loss: 5.4790 +[2025-07-07 21:01:44] [Rank 0] Group 4 Loss: 5.6218 +[2025-07-07 21:01:44] [Rank 0] Group 4 Loss: 5.6218 +[2025-07-07 21:01:44] [Rank 0] Group 5 Loss: 5.5941 +[2025-07-07 21:01:44] [Rank 0] Group 5 Loss: 5.5941 +[2025-07-07 21:01:44] [Rank 0] Group 6 Loss: 5.6152 +[2025-07-07 21:01:44] [Rank 0] Group 6 Loss: 5.6152 +[2025-07-07 21:01:44] [Rank 0] Group 7 Loss: 5.6300 +[2025-07-07 21:01:44] [Rank 0] Group 7 Loss: 5.6300 +[2025-07-07 21:01:44] [Rank 0] Group 8 Loss: 5.5649 +[2025-07-07 21:01:44] [Rank 0] Group 8 Loss: 5.5649 +[2025-07-07 21:01:44] [Rank 0] Group 9 Loss: 5.5539 +[2025-07-07 21:01:44] [Rank 0] Group 9 Loss: 5.5539 +[2025-07-07 21:01:44] [Rank 0] Group 10 Loss: 5.5875 +[2025-07-07 21:01:44] [Rank 0] Group 10 Loss: 5.5875 +[2025-07-07 21:01:44] [Rank 0] Group 11 Loss: 5.5898 +[2025-07-07 21:01:44] [Rank 0] Group 11 Loss: 5.5898 +[2025-07-07 21:01:44] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 21:01:44] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 21:01:44] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 21:01:44] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 21:01:44] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 21:01:44] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 21:01:44] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 21:01:44] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 21:01:44] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 21:01:44] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 21:01:44] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-07 21:01:44] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-07 21:01:44] [Rank 0] Group 6 FTA: 0.0599 +[2025-07-07 21:01:44] [Rank 0] Group 6 FTA: 0.0599 +[2025-07-07 21:01:44] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 21:01:44] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 21:01:44] [Rank 0] Group 8 FTA: 0.0755 +[2025-07-07 21:01:44] [Rank 0] Group 8 FTA: 0.0755 +[2025-07-07 21:01:44] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-07 21:01:44] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-07 21:01:44] [Rank 0] Group 10 FTA: 0.0605 +[2025-07-07 21:01:44] [Rank 0] Group 10 FTA: 0.0605 +[2025-07-07 21:01:44] [Rank 0] Group 11 FTA: 0.0596 +[2025-07-07 21:01:44] [Rank 0] Group 11 FTA: 0.0596 +[2025-07-07 21:01:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 21:01:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 21:01:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 21:01:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 21:01:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 21:01:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 21:01:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 21:01:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 21:01:45] [Rank 0] step:1501/10000 train_time:100993ms step_avg:67.28ms +[2025-07-07 21:01:45] [Rank 0] step:1501/10000 train_time:100993ms step_avg:67.28ms +[2025-07-07 21:01:47] [Rank 0] step:1521/10000 train_time:101736ms step_avg:66.89ms +[2025-07-07 21:01:47] [Rank 0] step:1521/10000 train_time:101736ms step_avg:66.89ms +[2025-07-07 21:01:48] [Rank 0] step:1541/10000 train_time:103079ms step_avg:66.89ms +[2025-07-07 21:01:48] [Rank 0] step:1541/10000 train_time:103079ms step_avg:66.89ms +[2025-07-07 21:01:50] [Rank 0] step:1561/10000 train_time:104422ms step_avg:66.89ms +[2025-07-07 21:01:50] [Rank 0] step:1561/10000 train_time:104422ms step_avg:66.89ms +[2025-07-07 21:01:51] [Rank 0] step:1581/10000 train_time:105765ms step_avg:66.90ms +[2025-07-07 21:01:51] [Rank 0] step:1581/10000 train_time:105765ms step_avg:66.90ms +[2025-07-07 21:01:52] [Rank 0] step:1601/10000 train_time:107110ms step_avg:66.90ms +[2025-07-07 21:01:52] [Rank 0] step:1601/10000 train_time:107110ms step_avg:66.90ms +[2025-07-07 21:01:54] [Rank 0] step:1621/10000 train_time:108454ms step_avg:66.91ms +[2025-07-07 21:01:54] [Rank 0] step:1621/10000 train_time:108454ms step_avg:66.91ms +[2025-07-07 21:01:55] [Rank 0] step:1641/10000 train_time:109858ms step_avg:66.95ms +[2025-07-07 21:01:55] [Rank 0] step:1641/10000 train_time:109858ms step_avg:66.95ms +[2025-07-07 21:01:56] [Rank 0] step:1661/10000 train_time:111203ms step_avg:66.95ms +[2025-07-07 21:01:56] [Rank 0] step:1661/10000 train_time:111203ms step_avg:66.95ms +[2025-07-07 21:01:58] [Rank 0] step:1681/10000 train_time:112549ms step_avg:66.95ms +[2025-07-07 21:01:58] [Rank 0] step:1681/10000 train_time:112549ms step_avg:66.95ms +[2025-07-07 21:01:59] [Rank 0] step:1701/10000 train_time:113894ms step_avg:66.96ms +[2025-07-07 21:01:59] [Rank 0] step:1701/10000 train_time:113894ms step_avg:66.96ms +[2025-07-07 21:02:00] [Rank 0] step:1721/10000 train_time:115240ms step_avg:66.96ms +[2025-07-07 21:02:00] [Rank 0] step:1721/10000 train_time:115240ms step_avg:66.96ms +[2025-07-07 21:02:02] [Rank 0] step:1741/10000 train_time:116588ms step_avg:66.97ms +[2025-07-07 21:02:02] [Rank 0] step:1741/10000 train_time:116588ms step_avg:66.97ms +[2025-07-07 21:02:03] [Rank 0] step:1761/10000 train_time:117939ms step_avg:66.97ms +[2025-07-07 21:02:03] [Rank 0] step:1761/10000 train_time:117939ms step_avg:66.97ms +[2025-07-07 21:02:04] [Rank 0] step:1781/10000 train_time:119288ms step_avg:66.98ms +[2025-07-07 21:02:04] [Rank 0] step:1781/10000 train_time:119288ms step_avg:66.98ms +[2025-07-07 21:02:06] [Rank 0] step:1801/10000 train_time:120636ms step_avg:66.98ms +[2025-07-07 21:02:06] [Rank 0] step:1801/10000 train_time:120636ms step_avg:66.98ms +[2025-07-07 21:02:07] [Rank 0] step:1821/10000 train_time:122020ms step_avg:67.01ms +[2025-07-07 21:02:07] [Rank 0] step:1821/10000 train_time:122020ms step_avg:67.01ms +[2025-07-07 21:02:08] [Rank 0] step:1841/10000 train_time:123373ms step_avg:67.01ms +[2025-07-07 21:02:08] [Rank 0] step:1841/10000 train_time:123373ms step_avg:67.01ms +[2025-07-07 21:02:10] [Rank 0] step:1861/10000 train_time:124722ms step_avg:67.02ms +[2025-07-07 21:02:10] [Rank 0] step:1861/10000 train_time:124722ms step_avg:67.02ms +[2025-07-07 21:02:11] [Rank 0] step:1881/10000 train_time:126071ms step_avg:67.02ms +[2025-07-07 21:02:11] [Rank 0] step:1881/10000 train_time:126071ms step_avg:67.02ms +[2025-07-07 21:02:13] [Rank 0] step:1901/10000 train_time:127420ms step_avg:67.03ms +[2025-07-07 21:02:13] [Rank 0] step:1901/10000 train_time:127420ms step_avg:67.03ms +[2025-07-07 21:02:14] [Rank 0] step:1921/10000 train_time:128769ms step_avg:67.03ms +[2025-07-07 21:02:14] [Rank 0] step:1921/10000 train_time:128769ms step_avg:67.03ms +[2025-07-07 21:02:15] [Rank 0] step:1941/10000 train_time:130119ms step_avg:67.04ms +[2025-07-07 21:02:15] [Rank 0] step:1941/10000 train_time:130119ms step_avg:67.04ms +[2025-07-07 21:02:17] [Rank 0] step:1961/10000 train_time:131467ms step_avg:67.04ms +[2025-07-07 21:02:17] [Rank 0] step:1961/10000 train_time:131467ms step_avg:67.04ms +[2025-07-07 21:02:18] [Rank 0] step:1981/10000 train_time:133486ms step_avg:67.38ms +[2025-07-07 21:02:18] [Rank 0] step:1981/10000 train_time:133486ms step_avg:67.38ms +[2025-07-07 21:02:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:02:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:02:20] [Rank 0] PRINT: step:2000/10000 train_loss:3.6662 val_loss:3.2366 train_time:134828ms step_avg:67.41ms +[2025-07-07 21:02:20] [Rank 0] PRINT: step:2000/10000 train_loss:3.6662 val_loss:3.2366 train_time:134828ms step_avg:67.41ms +[2025-07-07 21:02:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:02:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:02:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:02:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:02:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:02:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:07:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:07:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:07:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:07:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:07:45] [Rank 0] Total Loss: 5.0312 +[2025-07-07 21:07:45] [Rank 0] Total Loss: 5.0312 +[2025-07-07 21:07:45] [Rank 0] Total FTA: 0.0735 +[2025-07-07 21:07:45] [Rank 0] Total FTA: 0.0735 +[2025-07-07 21:07:45] [Rank 0] Group 0 Loss: 5.0214 +[2025-07-07 21:07:45] [Rank 0] Group 0 Loss: 5.0214 +[2025-07-07 21:07:45] [Rank 0] Group 1 Loss: 4.9969 +[2025-07-07 21:07:45] [Rank 0] Group 1 Loss: 4.9969 +[2025-07-07 21:07:45] [Rank 0] Group 2 Loss: 5.0367 +[2025-07-07 21:07:45] [Rank 0] Group 2 Loss: 5.0367 +[2025-07-07 21:07:45] [Rank 0] Group 3 Loss: 4.9523 +[2025-07-07 21:07:45] [Rank 0] Group 3 Loss: 4.9523 +[2025-07-07 21:07:45] [Rank 0] Group 4 Loss: 5.0640 +[2025-07-07 21:07:45] [Rank 0] Group 4 Loss: 5.0640 +[2025-07-07 21:07:45] [Rank 0] Group 5 Loss: 5.0260 +[2025-07-07 21:07:45] [Rank 0] Group 5 Loss: 5.0260 +[2025-07-07 21:07:45] [Rank 0] Group 6 Loss: 5.0837 +[2025-07-07 21:07:45] [Rank 0] Group 6 Loss: 5.0837 +[2025-07-07 21:07:45] [Rank 0] Group 7 Loss: 5.0619 +[2025-07-07 21:07:45] [Rank 0] Group 7 Loss: 5.0619 +[2025-07-07 21:07:45] [Rank 0] Group 8 Loss: 5.0000 +[2025-07-07 21:07:45] [Rank 0] Group 8 Loss: 5.0000 +[2025-07-07 21:07:45] [Rank 0] Group 9 Loss: 5.0547 +[2025-07-07 21:07:45] [Rank 0] Group 9 Loss: 5.0547 +[2025-07-07 21:07:45] [Rank 0] Group 10 Loss: 5.0577 +[2025-07-07 21:07:45] [Rank 0] Group 10 Loss: 5.0577 +[2025-07-07 21:07:45] [Rank 0] Group 11 Loss: 5.0300 +[2025-07-07 21:07:45] [Rank 0] Group 11 Loss: 5.0300 +[2025-07-07 21:07:45] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 21:07:45] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 21:07:45] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 21:07:45] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 21:07:45] [Rank 0] Group 2 FTA: 0.0885 +[2025-07-07 21:07:45] [Rank 0] Group 2 FTA: 0.0885 +[2025-07-07 21:07:45] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 21:07:45] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 21:07:45] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 21:07:45] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 21:07:45] [Rank 0] Group 5 FTA: 0.0521 +[2025-07-07 21:07:45] [Rank 0] Group 5 FTA: 0.0521 +[2025-07-07 21:07:45] [Rank 0] Group 6 FTA: 0.0651 +[2025-07-07 21:07:45] [Rank 0] Group 6 FTA: 0.0651 +[2025-07-07 21:07:45] [Rank 0] Group 7 FTA: 0.0781 +[2025-07-07 21:07:45] [Rank 0] Group 7 FTA: 0.0781 +[2025-07-07 21:07:45] [Rank 0] Group 8 FTA: 0.0625 +[2025-07-07 21:07:45] [Rank 0] Group 8 FTA: 0.0625 +[2025-07-07 21:07:45] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 21:07:45] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 21:07:45] [Rank 0] Group 10 FTA: 0.0645 +[2025-07-07 21:07:45] [Rank 0] Group 10 FTA: 0.0645 +[2025-07-07 21:07:45] [Rank 0] Group 11 FTA: 0.0732 +[2025-07-07 21:07:45] [Rank 0] Group 11 FTA: 0.0732 +[2025-07-07 21:07:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 21:07:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 21:07:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 21:07:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 21:07:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 21:07:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 21:07:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 21:07:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 21:07:46] [Rank 0] step:2001/10000 train_time:134837ms step_avg:67.38ms +[2025-07-07 21:07:46] [Rank 0] step:2001/10000 train_time:134837ms step_avg:67.38ms +[2025-07-07 21:07:48] [Rank 0] step:2021/10000 train_time:135578ms step_avg:67.08ms +[2025-07-07 21:07:48] [Rank 0] step:2021/10000 train_time:135578ms step_avg:67.08ms +[2025-07-07 21:07:49] [Rank 0] step:2041/10000 train_time:136919ms step_avg:67.08ms +[2025-07-07 21:07:49] [Rank 0] step:2041/10000 train_time:136919ms step_avg:67.08ms +[2025-07-07 21:07:50] [Rank 0] step:2061/10000 train_time:138261ms step_avg:67.08ms +[2025-07-07 21:07:50] [Rank 0] step:2061/10000 train_time:138261ms step_avg:67.08ms +[2025-07-07 21:07:52] [Rank 0] step:2081/10000 train_time:139604ms step_avg:67.08ms +[2025-07-07 21:07:52] [Rank 0] step:2081/10000 train_time:139604ms step_avg:67.08ms +[2025-07-07 21:07:53] [Rank 0] step:2101/10000 train_time:140962ms step_avg:67.09ms +[2025-07-07 21:07:53] [Rank 0] step:2101/10000 train_time:140962ms step_avg:67.09ms +[2025-07-07 21:07:54] [Rank 0] step:2121/10000 train_time:142307ms step_avg:67.09ms +[2025-07-07 21:07:54] [Rank 0] step:2121/10000 train_time:142307ms step_avg:67.09ms +[2025-07-07 21:07:56] [Rank 0] step:2141/10000 train_time:143650ms step_avg:67.09ms +[2025-07-07 21:07:56] [Rank 0] step:2141/10000 train_time:143650ms step_avg:67.09ms +[2025-07-07 21:07:57] [Rank 0] step:2161/10000 train_time:145049ms step_avg:67.12ms +[2025-07-07 21:07:57] [Rank 0] step:2161/10000 train_time:145049ms step_avg:67.12ms +[2025-07-07 21:07:58] [Rank 0] step:2181/10000 train_time:146385ms step_avg:67.12ms +[2025-07-07 21:07:58] [Rank 0] step:2181/10000 train_time:146385ms step_avg:67.12ms +[2025-07-07 21:08:00] [Rank 0] step:2201/10000 train_time:147731ms step_avg:67.12ms +[2025-07-07 21:08:00] [Rank 0] step:2201/10000 train_time:147731ms step_avg:67.12ms +[2025-07-07 21:08:01] [Rank 0] step:2221/10000 train_time:149078ms step_avg:67.12ms +[2025-07-07 21:08:01] [Rank 0] step:2221/10000 train_time:149078ms step_avg:67.12ms +[2025-07-07 21:08:02] [Rank 0] step:2241/10000 train_time:150435ms step_avg:67.13ms +[2025-07-07 21:08:02] [Rank 0] step:2241/10000 train_time:150435ms step_avg:67.13ms +[2025-07-07 21:08:04] [Rank 0] step:2261/10000 train_time:151806ms step_avg:67.14ms +[2025-07-07 21:08:04] [Rank 0] step:2261/10000 train_time:151806ms step_avg:67.14ms +[2025-07-07 21:08:05] [Rank 0] step:2281/10000 train_time:153179ms step_avg:67.15ms +[2025-07-07 21:08:05] [Rank 0] step:2281/10000 train_time:153179ms step_avg:67.15ms +[2025-07-07 21:08:07] [Rank 0] step:2301/10000 train_time:154552ms step_avg:67.17ms +[2025-07-07 21:08:07] [Rank 0] step:2301/10000 train_time:154552ms step_avg:67.17ms +[2025-07-07 21:08:08] [Rank 0] step:2321/10000 train_time:155925ms step_avg:67.18ms +[2025-07-07 21:08:08] [Rank 0] step:2321/10000 train_time:155925ms step_avg:67.18ms +[2025-07-07 21:08:09] [Rank 0] step:2341/10000 train_time:157299ms step_avg:67.19ms +[2025-07-07 21:08:09] [Rank 0] step:2341/10000 train_time:157299ms step_avg:67.19ms +[2025-07-07 21:08:11] [Rank 0] step:2361/10000 train_time:158721ms step_avg:67.23ms +[2025-07-07 21:08:11] [Rank 0] step:2361/10000 train_time:158721ms step_avg:67.23ms +[2025-07-07 21:08:12] [Rank 0] step:2381/10000 train_time:160095ms step_avg:67.24ms +[2025-07-07 21:08:12] [Rank 0] step:2381/10000 train_time:160095ms step_avg:67.24ms +[2025-07-07 21:08:14] [Rank 0] step:2401/10000 train_time:161469ms step_avg:67.25ms +[2025-07-07 21:08:14] [Rank 0] step:2401/10000 train_time:161469ms step_avg:67.25ms +[2025-07-07 21:08:15] [Rank 0] step:2421/10000 train_time:162844ms step_avg:67.26ms +[2025-07-07 21:08:15] [Rank 0] step:2421/10000 train_time:162844ms step_avg:67.26ms +[2025-07-07 21:08:16] [Rank 0] step:2441/10000 train_time:164219ms step_avg:67.28ms +[2025-07-07 21:08:16] [Rank 0] step:2441/10000 train_time:164219ms step_avg:67.28ms +[2025-07-07 21:08:18] [Rank 0] step:2461/10000 train_time:165596ms step_avg:67.29ms +[2025-07-07 21:08:18] [Rank 0] step:2461/10000 train_time:165596ms step_avg:67.29ms +[2025-07-07 21:08:19] [Rank 0] step:2481/10000 train_time:166971ms step_avg:67.30ms +[2025-07-07 21:08:19] [Rank 0] step:2481/10000 train_time:166971ms step_avg:67.30ms +[2025-07-07 21:08:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:08:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:08:21] [Rank 0] PRINT: step:2500/10000 train_loss:2.9095 val_loss:2.6329 train_time:168974ms step_avg:67.59ms +[2025-07-07 21:08:21] [Rank 0] PRINT: step:2500/10000 train_loss:2.9095 val_loss:2.6329 train_time:168974ms step_avg:67.59ms +[2025-07-07 21:08:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:08:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:08:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:08:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:08:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:08:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:13:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:13:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:13:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:13:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:13:49] [Rank 0] Total Loss: 4.7300 +[2025-07-07 21:13:49] [Rank 0] Total Loss: 4.7300 +[2025-07-07 21:13:49] [Rank 0] Total FTA: 0.0792 +[2025-07-07 21:13:49] [Rank 0] Total FTA: 0.0792 +[2025-07-07 21:13:49] [Rank 0] Group 0 Loss: 4.7199 +[2025-07-07 21:13:49] [Rank 0] Group 0 Loss: 4.7199 +[2025-07-07 21:13:49] [Rank 0] Group 1 Loss: 4.6321 +[2025-07-07 21:13:49] [Rank 0] Group 1 Loss: 4.6321 +[2025-07-07 21:13:49] [Rank 0] Group 2 Loss: 4.7379 +[2025-07-07 21:13:49] [Rank 0] Group 2 Loss: 4.7379 +[2025-07-07 21:13:49] [Rank 0] Group 3 Loss: 4.6586 +[2025-07-07 21:13:49] [Rank 0] Group 3 Loss: 4.6586 +[2025-07-07 21:13:49] [Rank 0] Group 4 Loss: 4.7931 +[2025-07-07 21:13:49] [Rank 0] Group 4 Loss: 4.7931 +[2025-07-07 21:13:49] [Rank 0] Group 5 Loss: 4.6622 +[2025-07-07 21:13:49] [Rank 0] Group 5 Loss: 4.6622 +[2025-07-07 21:13:49] [Rank 0] Group 6 Loss: 4.7704 +[2025-07-07 21:13:49] [Rank 0] Group 6 Loss: 4.7704 +[2025-07-07 21:13:49] [Rank 0] Group 7 Loss: 4.7660 +[2025-07-07 21:13:49] [Rank 0] Group 7 Loss: 4.7660 +[2025-07-07 21:13:49] [Rank 0] Group 8 Loss: 4.7239 +[2025-07-07 21:13:49] [Rank 0] Group 8 Loss: 4.7239 +[2025-07-07 21:13:49] [Rank 0] Group 9 Loss: 4.7223 +[2025-07-07 21:13:49] [Rank 0] Group 9 Loss: 4.7223 +[2025-07-07 21:13:49] [Rank 0] Group 10 Loss: 4.7803 +[2025-07-07 21:13:49] [Rank 0] Group 10 Loss: 4.7803 +[2025-07-07 21:13:49] [Rank 0] Group 11 Loss: 4.7502 +[2025-07-07 21:13:49] [Rank 0] Group 11 Loss: 4.7502 +[2025-07-07 21:13:49] [Rank 0] Group 0 FTA: 0.1730 +[2025-07-07 21:13:49] [Rank 0] Group 0 FTA: 0.1730 +[2025-07-07 21:13:49] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 21:13:49] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 21:13:49] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-07 21:13:49] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-07 21:13:49] [Rank 0] Group 3 FTA: 0.0286 +[2025-07-07 21:13:49] [Rank 0] Group 3 FTA: 0.0286 +[2025-07-07 21:13:49] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-07 21:13:49] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-07 21:13:49] [Rank 0] Group 5 FTA: 0.0391 +[2025-07-07 21:13:49] [Rank 0] Group 5 FTA: 0.0391 +[2025-07-07 21:13:49] [Rank 0] Group 6 FTA: 0.0599 +[2025-07-07 21:13:49] [Rank 0] Group 6 FTA: 0.0599 +[2025-07-07 21:13:49] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 21:13:49] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 21:13:49] [Rank 0] Group 8 FTA: 0.0729 +[2025-07-07 21:13:49] [Rank 0] Group 8 FTA: 0.0729 +[2025-07-07 21:13:49] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-07 21:13:49] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-07 21:13:49] [Rank 0] Group 10 FTA: 0.0977 +[2025-07-07 21:13:49] [Rank 0] Group 10 FTA: 0.0977 +[2025-07-07 21:13:49] [Rank 0] Group 11 FTA: 0.0850 +[2025-07-07 21:13:49] [Rank 0] Group 11 FTA: 0.0850 +[2025-07-07 21:13:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 21:13:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 21:13:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 21:13:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 21:13:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 21:13:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 21:13:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 21:13:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 21:13:50] [Rank 0] step:2501/10000 train_time:168983ms step_avg:67.57ms +[2025-07-07 21:13:50] [Rank 0] step:2501/10000 train_time:168983ms step_avg:67.57ms +[2025-07-07 21:13:52] [Rank 0] step:2521/10000 train_time:170405ms step_avg:67.59ms +[2025-07-07 21:13:52] [Rank 0] step:2521/10000 train_time:170405ms step_avg:67.59ms +[2025-07-07 21:13:53] [Rank 0] step:2541/10000 train_time:171140ms step_avg:67.35ms +[2025-07-07 21:13:53] [Rank 0] step:2541/10000 train_time:171140ms step_avg:67.35ms +[2025-07-07 21:13:54] [Rank 0] step:2561/10000 train_time:172504ms step_avg:67.36ms +[2025-07-07 21:13:54] [Rank 0] step:2561/10000 train_time:172504ms step_avg:67.36ms +[2025-07-07 21:13:56] [Rank 0] step:2581/10000 train_time:173868ms step_avg:67.36ms +[2025-07-07 21:13:56] [Rank 0] step:2581/10000 train_time:173868ms step_avg:67.36ms +[2025-07-07 21:13:57] [Rank 0] step:2601/10000 train_time:175236ms step_avg:67.37ms +[2025-07-07 21:13:57] [Rank 0] step:2601/10000 train_time:175236ms step_avg:67.37ms +[2025-07-07 21:13:58] [Rank 0] step:2621/10000 train_time:176602ms step_avg:67.38ms +[2025-07-07 21:13:58] [Rank 0] step:2621/10000 train_time:176602ms step_avg:67.38ms +[2025-07-07 21:14:00] [Rank 0] step:2641/10000 train_time:177970ms step_avg:67.39ms +[2025-07-07 21:14:00] [Rank 0] step:2641/10000 train_time:177970ms step_avg:67.39ms +[2025-07-07 21:14:01] [Rank 0] step:2661/10000 train_time:179337ms step_avg:67.39ms +[2025-07-07 21:14:01] [Rank 0] step:2661/10000 train_time:179337ms step_avg:67.39ms +[2025-07-07 21:14:03] [Rank 0] step:2681/10000 train_time:180706ms step_avg:67.40ms +[2025-07-07 21:14:03] [Rank 0] step:2681/10000 train_time:180706ms step_avg:67.40ms +[2025-07-07 21:14:04] [Rank 0] step:2701/10000 train_time:182325ms step_avg:67.50ms +[2025-07-07 21:14:04] [Rank 0] step:2701/10000 train_time:182325ms step_avg:67.50ms +[2025-07-07 21:14:05] [Rank 0] step:2721/10000 train_time:183493ms step_avg:67.44ms +[2025-07-07 21:14:05] [Rank 0] step:2721/10000 train_time:183493ms step_avg:67.44ms +[2025-07-07 21:14:07] [Rank 0] step:2741/10000 train_time:184863ms step_avg:67.44ms +[2025-07-07 21:14:07] [Rank 0] step:2741/10000 train_time:184863ms step_avg:67.44ms +[2025-07-07 21:14:08] [Rank 0] step:2761/10000 train_time:186233ms step_avg:67.45ms +[2025-07-07 21:14:08] [Rank 0] step:2761/10000 train_time:186233ms step_avg:67.45ms +[2025-07-07 21:14:09] [Rank 0] step:2781/10000 train_time:187603ms step_avg:67.46ms +[2025-07-07 21:14:09] [Rank 0] step:2781/10000 train_time:187603ms step_avg:67.46ms +[2025-07-07 21:14:11] [Rank 0] step:2801/10000 train_time:188973ms step_avg:67.47ms +[2025-07-07 21:14:11] [Rank 0] step:2801/10000 train_time:188973ms step_avg:67.47ms +[2025-07-07 21:14:12] [Rank 0] step:2821/10000 train_time:190351ms step_avg:67.48ms +[2025-07-07 21:14:12] [Rank 0] step:2821/10000 train_time:190351ms step_avg:67.48ms +[2025-07-07 21:14:14] [Rank 0] step:2841/10000 train_time:191723ms step_avg:67.48ms +[2025-07-07 21:14:14] [Rank 0] step:2841/10000 train_time:191723ms step_avg:67.48ms +[2025-07-07 21:14:15] [Rank 0] step:2861/10000 train_time:193095ms step_avg:67.49ms +[2025-07-07 21:14:15] [Rank 0] step:2861/10000 train_time:193095ms step_avg:67.49ms +[2025-07-07 21:14:16] [Rank 0] step:2881/10000 train_time:194467ms step_avg:67.50ms +[2025-07-07 21:14:16] [Rank 0] step:2881/10000 train_time:194467ms step_avg:67.50ms +[2025-07-07 21:14:18] [Rank 0] step:2901/10000 train_time:195881ms step_avg:67.52ms +[2025-07-07 21:14:18] [Rank 0] step:2901/10000 train_time:195881ms step_avg:67.52ms +[2025-07-07 21:14:19] [Rank 0] step:2921/10000 train_time:197251ms step_avg:67.53ms +[2025-07-07 21:14:19] [Rank 0] step:2921/10000 train_time:197251ms step_avg:67.53ms +[2025-07-07 21:14:20] [Rank 0] step:2941/10000 train_time:198623ms step_avg:67.54ms +[2025-07-07 21:14:20] [Rank 0] step:2941/10000 train_time:198623ms step_avg:67.54ms +[2025-07-07 21:14:22] [Rank 0] step:2961/10000 train_time:199996ms step_avg:67.54ms +[2025-07-07 21:14:22] [Rank 0] step:2961/10000 train_time:199996ms step_avg:67.54ms +[2025-07-07 21:14:23] [Rank 0] step:2981/10000 train_time:201368ms step_avg:67.55ms +[2025-07-07 21:14:23] [Rank 0] step:2981/10000 train_time:201368ms step_avg:67.55ms +[2025-07-07 21:14:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:14:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:14:26] [Rank 0] PRINT: step:3000/10000 train_loss:2.4299 val_loss:2.2610 train_time:203369ms step_avg:67.79ms +[2025-07-07 21:14:26] [Rank 0] PRINT: step:3000/10000 train_loss:2.4299 val_loss:2.2610 train_time:203369ms step_avg:67.79ms +[2025-07-07 21:14:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:14:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:14:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:14:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:14:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:14:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:19:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:19:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:19:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:19:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:19:54] [Rank 0] Total Loss: 4.6576 +[2025-07-07 21:19:54] [Rank 0] Total Loss: 4.6576 +[2025-07-07 21:19:54] [Rank 0] Total FTA: 0.1001 +[2025-07-07 21:19:54] [Rank 0] Total FTA: 0.1001 +[2025-07-07 21:19:54] [Rank 0] Group 0 Loss: 4.7574 +[2025-07-07 21:19:54] [Rank 0] Group 0 Loss: 4.7574 +[2025-07-07 21:19:54] [Rank 0] Group 1 Loss: 4.5483 +[2025-07-07 21:19:54] [Rank 0] Group 1 Loss: 4.5483 +[2025-07-07 21:19:54] [Rank 0] Group 2 Loss: 4.6157 +[2025-07-07 21:19:54] [Rank 0] Group 2 Loss: 4.6157 +[2025-07-07 21:19:54] [Rank 0] Group 3 Loss: 4.6384 +[2025-07-07 21:19:54] [Rank 0] Group 3 Loss: 4.6384 +[2025-07-07 21:19:54] [Rank 0] Group 4 Loss: 4.6477 +[2025-07-07 21:19:54] [Rank 0] Group 4 Loss: 4.6477 +[2025-07-07 21:19:54] [Rank 0] Group 5 Loss: 4.5871 +[2025-07-07 21:19:54] [Rank 0] Group 5 Loss: 4.5871 +[2025-07-07 21:19:54] [Rank 0] Group 6 Loss: 4.6512 +[2025-07-07 21:19:54] [Rank 0] Group 6 Loss: 4.6512 +[2025-07-07 21:19:54] [Rank 0] Group 7 Loss: 4.6903 +[2025-07-07 21:19:54] [Rank 0] Group 7 Loss: 4.6903 +[2025-07-07 21:19:54] [Rank 0] Group 8 Loss: 4.6846 +[2025-07-07 21:19:54] [Rank 0] Group 8 Loss: 4.6846 +[2025-07-07 21:19:54] [Rank 0] Group 9 Loss: 4.6694 +[2025-07-07 21:19:54] [Rank 0] Group 9 Loss: 4.6694 +[2025-07-07 21:19:54] [Rank 0] Group 10 Loss: 4.7093 +[2025-07-07 21:19:54] [Rank 0] Group 10 Loss: 4.7093 +[2025-07-07 21:19:54] [Rank 0] Group 11 Loss: 4.6282 +[2025-07-07 21:19:54] [Rank 0] Group 11 Loss: 4.6282 +[2025-07-07 21:19:54] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-07 21:19:54] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-07 21:19:54] [Rank 0] Group 1 FTA: 0.1667 +[2025-07-07 21:19:54] [Rank 0] Group 1 FTA: 0.1667 +[2025-07-07 21:19:54] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 21:19:54] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 21:19:54] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 21:19:54] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 21:19:54] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 21:19:54] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 21:19:54] [Rank 0] Group 5 FTA: 0.0755 +[2025-07-07 21:19:54] [Rank 0] Group 5 FTA: 0.0755 +[2025-07-07 21:19:54] [Rank 0] Group 6 FTA: 0.0911 +[2025-07-07 21:19:54] [Rank 0] Group 6 FTA: 0.0911 +[2025-07-07 21:19:54] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-07 21:19:54] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-07 21:19:54] [Rank 0] Group 8 FTA: 0.1042 +[2025-07-07 21:19:54] [Rank 0] Group 8 FTA: 0.1042 +[2025-07-07 21:19:54] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-07 21:19:54] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-07 21:19:54] [Rank 0] Group 10 FTA: 0.1016 +[2025-07-07 21:19:54] [Rank 0] Group 10 FTA: 0.1016 +[2025-07-07 21:19:54] [Rank 0] Group 11 FTA: 0.0938 +[2025-07-07 21:19:54] [Rank 0] Group 11 FTA: 0.0938 +[2025-07-07 21:19:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 21:19:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 21:19:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 21:19:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 21:19:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 21:19:55] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 21:19:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 21:19:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 21:19:56] [Rank 0] step:3001/10000 train_time:203378ms step_avg:67.77ms +[2025-07-07 21:19:56] [Rank 0] step:3001/10000 train_time:203378ms step_avg:67.77ms +[2025-07-07 21:19:57] [Rank 0] step:3021/10000 train_time:204147ms step_avg:67.58ms +[2025-07-07 21:19:57] [Rank 0] step:3021/10000 train_time:204147ms step_avg:67.58ms +[2025-07-07 21:19:58] [Rank 0] step:3041/10000 train_time:205511ms step_avg:67.58ms +[2025-07-07 21:19:58] [Rank 0] step:3041/10000 train_time:205511ms step_avg:67.58ms +[2025-07-07 21:20:00] [Rank 0] step:3061/10000 train_time:206877ms step_avg:67.58ms +[2025-07-07 21:20:00] [Rank 0] step:3061/10000 train_time:206877ms step_avg:67.58ms +[2025-07-07 21:20:01] [Rank 0] step:3081/10000 train_time:208277ms step_avg:67.60ms +[2025-07-07 21:20:01] [Rank 0] step:3081/10000 train_time:208277ms step_avg:67.60ms +[2025-07-07 21:20:02] [Rank 0] step:3101/10000 train_time:209645ms step_avg:67.61ms +[2025-07-07 21:20:02] [Rank 0] step:3101/10000 train_time:209645ms step_avg:67.61ms +[2025-07-07 21:20:04] [Rank 0] step:3121/10000 train_time:211012ms step_avg:67.61ms +[2025-07-07 21:20:04] [Rank 0] step:3121/10000 train_time:211012ms step_avg:67.61ms +[2025-07-07 21:20:05] [Rank 0] step:3141/10000 train_time:212379ms step_avg:67.62ms +[2025-07-07 21:20:05] [Rank 0] step:3141/10000 train_time:212379ms step_avg:67.62ms +[2025-07-07 21:20:07] [Rank 0] step:3161/10000 train_time:213747ms step_avg:67.62ms +[2025-07-07 21:20:07] [Rank 0] step:3161/10000 train_time:213747ms step_avg:67.62ms +[2025-07-07 21:20:08] [Rank 0] step:3181/10000 train_time:215115ms step_avg:67.62ms +[2025-07-07 21:20:08] [Rank 0] step:3181/10000 train_time:215115ms step_avg:67.62ms +[2025-07-07 21:20:09] [Rank 0] step:3201/10000 train_time:216485ms step_avg:67.63ms +[2025-07-07 21:20:09] [Rank 0] step:3201/10000 train_time:216485ms step_avg:67.63ms +[2025-07-07 21:20:11] [Rank 0] step:3221/10000 train_time:217855ms step_avg:67.64ms +[2025-07-07 21:20:11] [Rank 0] step:3221/10000 train_time:217855ms step_avg:67.64ms +[2025-07-07 21:20:12] [Rank 0] step:3241/10000 train_time:219227ms step_avg:67.64ms +[2025-07-07 21:20:12] [Rank 0] step:3241/10000 train_time:219227ms step_avg:67.64ms +[2025-07-07 21:20:13] [Rank 0] step:3261/10000 train_time:220631ms step_avg:67.66ms +[2025-07-07 21:20:13] [Rank 0] step:3261/10000 train_time:220631ms step_avg:67.66ms +[2025-07-07 21:20:15] [Rank 0] step:3281/10000 train_time:222003ms step_avg:67.66ms +[2025-07-07 21:20:15] [Rank 0] step:3281/10000 train_time:222003ms step_avg:67.66ms +[2025-07-07 21:20:16] [Rank 0] step:3301/10000 train_time:223376ms step_avg:67.67ms +[2025-07-07 21:20:16] [Rank 0] step:3301/10000 train_time:223376ms step_avg:67.67ms +[2025-07-07 21:20:18] [Rank 0] step:3321/10000 train_time:224748ms step_avg:67.67ms +[2025-07-07 21:20:18] [Rank 0] step:3321/10000 train_time:224748ms step_avg:67.67ms +[2025-07-07 21:20:20] [Rank 0] step:3341/10000 train_time:227170ms step_avg:67.99ms +[2025-07-07 21:20:20] [Rank 0] step:3341/10000 train_time:227170ms step_avg:67.99ms +[2025-07-07 21:20:21] [Rank 0] step:3361/10000 train_time:228511ms step_avg:67.99ms +[2025-07-07 21:20:21] [Rank 0] step:3361/10000 train_time:228511ms step_avg:67.99ms +[2025-07-07 21:20:23] [Rank 0] step:3381/10000 train_time:229883ms step_avg:67.99ms +[2025-07-07 21:20:23] [Rank 0] step:3381/10000 train_time:229883ms step_avg:67.99ms +[2025-07-07 21:20:24] [Rank 0] step:3401/10000 train_time:231257ms step_avg:68.00ms +[2025-07-07 21:20:24] [Rank 0] step:3401/10000 train_time:231257ms step_avg:68.00ms +[2025-07-07 21:20:25] [Rank 0] step:3421/10000 train_time:232630ms step_avg:68.00ms +[2025-07-07 21:20:25] [Rank 0] step:3421/10000 train_time:232630ms step_avg:68.00ms +[2025-07-07 21:20:27] [Rank 0] step:3441/10000 train_time:234041ms step_avg:68.02ms +[2025-07-07 21:20:27] [Rank 0] step:3441/10000 train_time:234041ms step_avg:68.02ms +[2025-07-07 21:20:28] [Rank 0] step:3461/10000 train_time:235414ms step_avg:68.02ms +[2025-07-07 21:20:28] [Rank 0] step:3461/10000 train_time:235414ms step_avg:68.02ms +[2025-07-07 21:20:30] [Rank 0] step:3481/10000 train_time:236790ms step_avg:68.02ms +[2025-07-07 21:20:30] [Rank 0] step:3481/10000 train_time:236790ms step_avg:68.02ms +[2025-07-07 21:20:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:20:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:20:32] [Rank 0] PRINT: step:3500/10000 train_loss:2.1336 val_loss:2.0272 train_time:238791ms step_avg:68.23ms +[2025-07-07 21:20:32] [Rank 0] PRINT: step:3500/10000 train_loss:2.1336 val_loss:2.0272 train_time:238791ms step_avg:68.23ms +[2025-07-07 21:20:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:20:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:20:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:20:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:20:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:20:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:26:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:26:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:26:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:26:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:26:00] [Rank 0] Total Loss: 4.6997 +[2025-07-07 21:26:00] [Rank 0] Total Loss: 4.6997 +[2025-07-07 21:26:00] [Rank 0] Total FTA: 0.0971 +[2025-07-07 21:26:00] [Rank 0] Total FTA: 0.0971 +[2025-07-07 21:26:00] [Rank 0] Group 0 Loss: 4.8628 +[2025-07-07 21:26:00] [Rank 0] Group 0 Loss: 4.8628 +[2025-07-07 21:26:00] [Rank 0] Group 1 Loss: 4.6352 +[2025-07-07 21:26:00] [Rank 0] Group 1 Loss: 4.6352 +[2025-07-07 21:26:00] [Rank 0] Group 2 Loss: 4.7817 +[2025-07-07 21:26:00] [Rank 0] Group 2 Loss: 4.7817 +[2025-07-07 21:26:00] [Rank 0] Group 3 Loss: 4.7359 +[2025-07-07 21:26:00] [Rank 0] Group 3 Loss: 4.7359 +[2025-07-07 21:26:00] [Rank 0] Group 4 Loss: 4.6909 +[2025-07-07 21:26:00] [Rank 0] Group 4 Loss: 4.6909 +[2025-07-07 21:26:00] [Rank 0] Group 5 Loss: 4.5718 +[2025-07-07 21:26:00] [Rank 0] Group 5 Loss: 4.5718 +[2025-07-07 21:26:00] [Rank 0] Group 6 Loss: 4.6582 +[2025-07-07 21:26:00] [Rank 0] Group 6 Loss: 4.6582 +[2025-07-07 21:26:00] [Rank 0] Group 7 Loss: 4.6828 +[2025-07-07 21:26:00] [Rank 0] Group 7 Loss: 4.6828 +[2025-07-07 21:26:00] [Rank 0] Group 8 Loss: 4.6467 +[2025-07-07 21:26:00] [Rank 0] Group 8 Loss: 4.6467 +[2025-07-07 21:26:00] [Rank 0] Group 9 Loss: 4.6450 +[2025-07-07 21:26:00] [Rank 0] Group 9 Loss: 4.6450 +[2025-07-07 21:26:00] [Rank 0] Group 10 Loss: 4.6774 +[2025-07-07 21:26:00] [Rank 0] Group 10 Loss: 4.6774 +[2025-07-07 21:26:00] [Rank 0] Group 11 Loss: 4.6751 +[2025-07-07 21:26:00] [Rank 0] Group 11 Loss: 4.6751 +[2025-07-07 21:26:00] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 21:26:00] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 21:26:00] [Rank 0] Group 1 FTA: 0.1745 +[2025-07-07 21:26:00] [Rank 0] Group 1 FTA: 0.1745 +[2025-07-07 21:26:00] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-07 21:26:00] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-07 21:26:00] [Rank 0] Group 3 FTA: 0.0443 +[2025-07-07 21:26:00] [Rank 0] Group 3 FTA: 0.0443 +[2025-07-07 21:26:00] [Rank 0] Group 4 FTA: 0.0312 +[2025-07-07 21:26:00] [Rank 0] Group 4 FTA: 0.0312 +[2025-07-07 21:26:00] [Rank 0] Group 5 FTA: 0.0703 +[2025-07-07 21:26:00] [Rank 0] Group 5 FTA: 0.0703 +[2025-07-07 21:26:00] [Rank 0] Group 6 FTA: 0.0833 +[2025-07-07 21:26:00] [Rank 0] Group 6 FTA: 0.0833 +[2025-07-07 21:26:00] [Rank 0] Group 7 FTA: 0.0781 +[2025-07-07 21:26:00] [Rank 0] Group 7 FTA: 0.0781 +[2025-07-07 21:26:00] [Rank 0] Group 8 FTA: 0.1172 +[2025-07-07 21:26:00] [Rank 0] Group 8 FTA: 0.1172 +[2025-07-07 21:26:00] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 21:26:00] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 21:26:00] [Rank 0] Group 10 FTA: 0.0859 +[2025-07-07 21:26:00] [Rank 0] Group 10 FTA: 0.0859 +[2025-07-07 21:26:00] [Rank 0] Group 11 FTA: 0.0928 +[2025-07-07 21:26:00] [Rank 0] Group 11 FTA: 0.0928 +[2025-07-07 21:26:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 21:26:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 21:26:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 21:26:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 21:26:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 21:26:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 21:26:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 21:26:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 21:26:01] [Rank 0] step:3501/10000 train_time:238800ms step_avg:68.21ms +[2025-07-07 21:26:01] [Rank 0] step:3501/10000 train_time:238800ms step_avg:68.21ms +[2025-07-07 21:26:03] [Rank 0] step:3521/10000 train_time:239569ms step_avg:68.04ms +[2025-07-07 21:26:03] [Rank 0] step:3521/10000 train_time:239569ms step_avg:68.04ms +[2025-07-07 21:26:04] [Rank 0] step:3541/10000 train_time:240935ms step_avg:68.04ms +[2025-07-07 21:26:04] [Rank 0] step:3541/10000 train_time:240935ms step_avg:68.04ms +[2025-07-07 21:26:05] [Rank 0] step:3561/10000 train_time:242300ms step_avg:68.04ms +[2025-07-07 21:26:05] [Rank 0] step:3561/10000 train_time:242300ms step_avg:68.04ms +[2025-07-07 21:26:07] [Rank 0] step:3581/10000 train_time:243666ms step_avg:68.04ms +[2025-07-07 21:26:07] [Rank 0] step:3581/10000 train_time:243666ms step_avg:68.04ms +[2025-07-07 21:26:08] [Rank 0] step:3601/10000 train_time:245285ms step_avg:68.12ms +[2025-07-07 21:26:08] [Rank 0] step:3601/10000 train_time:245285ms step_avg:68.12ms +[2025-07-07 21:26:10] [Rank 0] step:3621/10000 train_time:246447ms step_avg:68.06ms +[2025-07-07 21:26:10] [Rank 0] step:3621/10000 train_time:246447ms step_avg:68.06ms +[2025-07-07 21:26:11] [Rank 0] step:3641/10000 train_time:247814ms step_avg:68.06ms +[2025-07-07 21:26:11] [Rank 0] step:3641/10000 train_time:247814ms step_avg:68.06ms +[2025-07-07 21:26:12] [Rank 0] step:3661/10000 train_time:249181ms step_avg:68.06ms +[2025-07-07 21:26:12] [Rank 0] step:3661/10000 train_time:249181ms step_avg:68.06ms +[2025-07-07 21:26:14] [Rank 0] step:3681/10000 train_time:250550ms step_avg:68.07ms +[2025-07-07 21:26:14] [Rank 0] step:3681/10000 train_time:250550ms step_avg:68.07ms +[2025-07-07 21:26:15] [Rank 0] step:3701/10000 train_time:251919ms step_avg:68.07ms +[2025-07-07 21:26:15] [Rank 0] step:3701/10000 train_time:251919ms step_avg:68.07ms +[2025-07-07 21:26:16] [Rank 0] step:3721/10000 train_time:253289ms step_avg:68.07ms +[2025-07-07 21:26:16] [Rank 0] step:3721/10000 train_time:253289ms step_avg:68.07ms +[2025-07-07 21:26:18] [Rank 0] step:3741/10000 train_time:254660ms step_avg:68.07ms +[2025-07-07 21:26:18] [Rank 0] step:3741/10000 train_time:254660ms step_avg:68.07ms +[2025-07-07 21:26:19] [Rank 0] step:3761/10000 train_time:256029ms step_avg:68.07ms +[2025-07-07 21:26:19] [Rank 0] step:3761/10000 train_time:256029ms step_avg:68.07ms +[2025-07-07 21:26:21] [Rank 0] step:3781/10000 train_time:257449ms step_avg:68.09ms +[2025-07-07 21:26:21] [Rank 0] step:3781/10000 train_time:257449ms step_avg:68.09ms +[2025-07-07 21:26:22] [Rank 0] step:3801/10000 train_time:258799ms step_avg:68.09ms +[2025-07-07 21:26:22] [Rank 0] step:3801/10000 train_time:258799ms step_avg:68.09ms +[2025-07-07 21:26:23] [Rank 0] step:3821/10000 train_time:260170ms step_avg:68.09ms +[2025-07-07 21:26:23] [Rank 0] step:3821/10000 train_time:260170ms step_avg:68.09ms +[2025-07-07 21:26:25] [Rank 0] step:3841/10000 train_time:261544ms step_avg:68.09ms +[2025-07-07 21:26:25] [Rank 0] step:3841/10000 train_time:261544ms step_avg:68.09ms +[2025-07-07 21:26:26] [Rank 0] step:3861/10000 train_time:262918ms step_avg:68.10ms +[2025-07-07 21:26:26] [Rank 0] step:3861/10000 train_time:262918ms step_avg:68.10ms +[2025-07-07 21:26:27] [Rank 0] step:3881/10000 train_time:264291ms step_avg:68.10ms +[2025-07-07 21:26:27] [Rank 0] step:3881/10000 train_time:264291ms step_avg:68.10ms +[2025-07-07 21:26:29] [Rank 0] step:3901/10000 train_time:265664ms step_avg:68.10ms +[2025-07-07 21:26:29] [Rank 0] step:3901/10000 train_time:265664ms step_avg:68.10ms +[2025-07-07 21:26:30] [Rank 0] step:3921/10000 train_time:267038ms step_avg:68.10ms +[2025-07-07 21:26:30] [Rank 0] step:3921/10000 train_time:267038ms step_avg:68.10ms +[2025-07-07 21:26:32] [Rank 0] step:3941/10000 train_time:268412ms step_avg:68.11ms +[2025-07-07 21:26:32] [Rank 0] step:3941/10000 train_time:268412ms step_avg:68.11ms +[2025-07-07 21:26:33] [Rank 0] step:3961/10000 train_time:269787ms step_avg:68.11ms +[2025-07-07 21:26:33] [Rank 0] step:3961/10000 train_time:269787ms step_avg:68.11ms +[2025-07-07 21:26:34] [Rank 0] step:3981/10000 train_time:271186ms step_avg:68.12ms +[2025-07-07 21:26:34] [Rank 0] step:3981/10000 train_time:271186ms step_avg:68.12ms +[2025-07-07 21:26:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:26:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:26:37] [Rank 0] PRINT: step:4000/10000 train_loss:1.9453 val_loss:1.8758 train_time:273186ms step_avg:68.30ms +[2025-07-07 21:26:37] [Rank 0] PRINT: step:4000/10000 train_loss:1.9453 val_loss:1.8758 train_time:273186ms step_avg:68.30ms +[2025-07-07 21:26:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:26:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:26:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:26:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:26:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:26:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:32:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:32:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:32:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:32:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:32:02] [Rank 0] Total Loss: 4.6783 +[2025-07-07 21:32:02] [Rank 0] Total Loss: 4.6783 +[2025-07-07 21:32:02] [Rank 0] Total FTA: 0.1040 +[2025-07-07 21:32:02] [Rank 0] Total FTA: 0.1040 +[2025-07-07 21:32:02] [Rank 0] Group 0 Loss: 4.9054 +[2025-07-07 21:32:02] [Rank 0] Group 0 Loss: 4.9054 +[2025-07-07 21:32:02] [Rank 0] Group 1 Loss: 4.5048 +[2025-07-07 21:32:02] [Rank 0] Group 1 Loss: 4.5048 +[2025-07-07 21:32:02] [Rank 0] Group 2 Loss: 4.5796 +[2025-07-07 21:32:02] [Rank 0] Group 2 Loss: 4.5796 +[2025-07-07 21:32:02] [Rank 0] Group 3 Loss: 4.7020 +[2025-07-07 21:32:02] [Rank 0] Group 3 Loss: 4.7020 +[2025-07-07 21:32:02] [Rank 0] Group 4 Loss: 4.6123 +[2025-07-07 21:32:02] [Rank 0] Group 4 Loss: 4.6123 +[2025-07-07 21:32:02] [Rank 0] Group 5 Loss: 4.5166 +[2025-07-07 21:32:02] [Rank 0] Group 5 Loss: 4.5166 +[2025-07-07 21:32:02] [Rank 0] Group 6 Loss: 4.6253 +[2025-07-07 21:32:02] [Rank 0] Group 6 Loss: 4.6253 +[2025-07-07 21:32:02] [Rank 0] Group 7 Loss: 4.6873 +[2025-07-07 21:32:02] [Rank 0] Group 7 Loss: 4.6873 +[2025-07-07 21:32:02] [Rank 0] Group 8 Loss: 4.6813 +[2025-07-07 21:32:02] [Rank 0] Group 8 Loss: 4.6813 +[2025-07-07 21:32:02] [Rank 0] Group 9 Loss: 4.6722 +[2025-07-07 21:32:02] [Rank 0] Group 9 Loss: 4.6722 +[2025-07-07 21:32:03] [Rank 0] Group 10 Loss: 4.7073 +[2025-07-07 21:32:03] [Rank 0] Group 10 Loss: 4.7073 +[2025-07-07 21:32:03] [Rank 0] Group 11 Loss: 4.6884 +[2025-07-07 21:32:03] [Rank 0] Group 11 Loss: 4.6884 +[2025-07-07 21:32:03] [Rank 0] Group 0 FTA: 0.1899 +[2025-07-07 21:32:03] [Rank 0] Group 0 FTA: 0.1899 +[2025-07-07 21:32:03] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-07 21:32:03] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-07 21:32:03] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 21:32:03] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 21:32:03] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-07 21:32:03] [Rank 0] Group 3 FTA: 0.0365 +[2025-07-07 21:32:03] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-07 21:32:03] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-07 21:32:03] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-07 21:32:03] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-07 21:32:03] [Rank 0] Group 6 FTA: 0.1120 +[2025-07-07 21:32:03] [Rank 0] Group 6 FTA: 0.1120 +[2025-07-07 21:32:03] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-07 21:32:03] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-07 21:32:03] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 21:32:03] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 21:32:03] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-07 21:32:03] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-07 21:32:03] [Rank 0] Group 10 FTA: 0.1094 +[2025-07-07 21:32:03] [Rank 0] Group 10 FTA: 0.1094 +[2025-07-07 21:32:03] [Rank 0] Group 11 FTA: 0.0938 +[2025-07-07 21:32:03] [Rank 0] Group 11 FTA: 0.0938 +[2025-07-07 21:32:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 21:32:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 21:32:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 21:32:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 21:32:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 21:32:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 21:32:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 21:32:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 21:32:04] [Rank 0] step:4001/10000 train_time:273195ms step_avg:68.28ms +[2025-07-07 21:32:04] [Rank 0] step:4001/10000 train_time:273195ms step_avg:68.28ms +[2025-07-07 21:32:05] [Rank 0] step:4021/10000 train_time:273957ms step_avg:68.13ms +[2025-07-07 21:32:05] [Rank 0] step:4021/10000 train_time:273957ms step_avg:68.13ms +[2025-07-07 21:32:07] [Rank 0] step:4041/10000 train_time:275321ms step_avg:68.13ms +[2025-07-07 21:32:07] [Rank 0] step:4041/10000 train_time:275321ms step_avg:68.13ms +[2025-07-07 21:32:08] [Rank 0] step:4061/10000 train_time:276686ms step_avg:68.13ms +[2025-07-07 21:32:08] [Rank 0] step:4061/10000 train_time:276686ms step_avg:68.13ms +[2025-07-07 21:32:09] [Rank 0] step:4081/10000 train_time:278054ms step_avg:68.13ms +[2025-07-07 21:32:09] [Rank 0] step:4081/10000 train_time:278054ms step_avg:68.13ms +[2025-07-07 21:32:11] [Rank 0] step:4101/10000 train_time:279420ms step_avg:68.13ms +[2025-07-07 21:32:11] [Rank 0] step:4101/10000 train_time:279420ms step_avg:68.13ms +[2025-07-07 21:32:12] [Rank 0] step:4121/10000 train_time:280785ms step_avg:68.14ms +[2025-07-07 21:32:12] [Rank 0] step:4121/10000 train_time:280785ms step_avg:68.14ms +[2025-07-07 21:32:14] [Rank 0] step:4141/10000 train_time:282823ms step_avg:68.30ms +[2025-07-07 21:32:14] [Rank 0] step:4141/10000 train_time:282823ms step_avg:68.30ms +[2025-07-07 21:32:15] [Rank 0] step:4161/10000 train_time:283560ms step_avg:68.15ms +[2025-07-07 21:32:15] [Rank 0] step:4161/10000 train_time:283560ms step_avg:68.15ms +[2025-07-07 21:32:16] [Rank 0] step:4181/10000 train_time:284925ms step_avg:68.15ms +[2025-07-07 21:32:16] [Rank 0] step:4181/10000 train_time:284925ms step_avg:68.15ms +[2025-07-07 21:32:18] [Rank 0] step:4201/10000 train_time:286294ms step_avg:68.15ms +[2025-07-07 21:32:18] [Rank 0] step:4201/10000 train_time:286294ms step_avg:68.15ms +[2025-07-07 21:32:19] [Rank 0] step:4221/10000 train_time:287662ms step_avg:68.15ms +[2025-07-07 21:32:19] [Rank 0] step:4221/10000 train_time:287662ms step_avg:68.15ms +[2025-07-07 21:32:20] [Rank 0] step:4241/10000 train_time:289030ms step_avg:68.15ms +[2025-07-07 21:32:20] [Rank 0] step:4241/10000 train_time:289030ms step_avg:68.15ms +[2025-07-07 21:32:22] [Rank 0] step:4261/10000 train_time:290401ms step_avg:68.15ms +[2025-07-07 21:32:22] [Rank 0] step:4261/10000 train_time:290401ms step_avg:68.15ms +[2025-07-07 21:32:23] [Rank 0] step:4281/10000 train_time:291772ms step_avg:68.16ms +[2025-07-07 21:32:23] [Rank 0] step:4281/10000 train_time:291772ms step_avg:68.16ms +[2025-07-07 21:32:25] [Rank 0] step:4301/10000 train_time:293143ms step_avg:68.16ms +[2025-07-07 21:32:25] [Rank 0] step:4301/10000 train_time:293143ms step_avg:68.16ms +[2025-07-07 21:32:26] [Rank 0] step:4321/10000 train_time:294562ms step_avg:68.17ms +[2025-07-07 21:32:26] [Rank 0] step:4321/10000 train_time:294562ms step_avg:68.17ms +[2025-07-07 21:32:27] [Rank 0] step:4341/10000 train_time:295915ms step_avg:68.17ms +[2025-07-07 21:32:27] [Rank 0] step:4341/10000 train_time:295915ms step_avg:68.17ms +[2025-07-07 21:32:29] [Rank 0] step:4361/10000 train_time:297288ms step_avg:68.17ms +[2025-07-07 21:32:29] [Rank 0] step:4361/10000 train_time:297288ms step_avg:68.17ms +[2025-07-07 21:32:30] [Rank 0] step:4381/10000 train_time:298660ms step_avg:68.17ms +[2025-07-07 21:32:30] [Rank 0] step:4381/10000 train_time:298660ms step_avg:68.17ms +[2025-07-07 21:32:31] [Rank 0] step:4401/10000 train_time:300033ms step_avg:68.17ms +[2025-07-07 21:32:31] [Rank 0] step:4401/10000 train_time:300033ms step_avg:68.17ms +[2025-07-07 21:32:33] [Rank 0] step:4421/10000 train_time:301404ms step_avg:68.18ms +[2025-07-07 21:32:33] [Rank 0] step:4421/10000 train_time:301404ms step_avg:68.18ms +[2025-07-07 21:32:34] [Rank 0] step:4441/10000 train_time:302776ms step_avg:68.18ms +[2025-07-07 21:32:34] [Rank 0] step:4441/10000 train_time:302776ms step_avg:68.18ms +[2025-07-07 21:32:36] [Rank 0] step:4461/10000 train_time:304150ms step_avg:68.18ms +[2025-07-07 21:32:36] [Rank 0] step:4461/10000 train_time:304150ms step_avg:68.18ms +[2025-07-07 21:32:37] [Rank 0] step:4481/10000 train_time:305526ms step_avg:68.18ms +[2025-07-07 21:32:37] [Rank 0] step:4481/10000 train_time:305526ms step_avg:68.18ms +[2025-07-07 21:32:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:32:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:32:39] [Rank 0] PRINT: step:4500/10000 train_loss:1.8057 val_loss:1.7489 train_time:307526ms step_avg:68.34ms +[2025-07-07 21:32:39] [Rank 0] PRINT: step:4500/10000 train_loss:1.8057 val_loss:1.7489 train_time:307526ms step_avg:68.34ms +[2025-07-07 21:32:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:32:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:32:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:32:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:32:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:32:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:38:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:38:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:38:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:38:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:38:07] [Rank 0] Total Loss: 4.6946 +[2025-07-07 21:38:07] [Rank 0] Total Loss: 4.6946 +[2025-07-07 21:38:07] [Rank 0] Total FTA: 0.1012 +[2025-07-07 21:38:07] [Rank 0] Total FTA: 0.1012 +[2025-07-07 21:38:07] [Rank 0] Group 0 Loss: 4.8640 +[2025-07-07 21:38:07] [Rank 0] Group 0 Loss: 4.8640 +[2025-07-07 21:38:07] [Rank 0] Group 1 Loss: 4.4840 +[2025-07-07 21:38:07] [Rank 0] Group 1 Loss: 4.4840 +[2025-07-07 21:38:07] [Rank 0] Group 2 Loss: 4.6518 +[2025-07-07 21:38:07] [Rank 0] Group 2 Loss: 4.6518 +[2025-07-07 21:38:07] [Rank 0] Group 3 Loss: 4.7105 +[2025-07-07 21:38:07] [Rank 0] Group 3 Loss: 4.7105 +[2025-07-07 21:38:07] [Rank 0] Group 4 Loss: 4.6826 +[2025-07-07 21:38:07] [Rank 0] Group 4 Loss: 4.6826 +[2025-07-07 21:38:07] [Rank 0] Group 5 Loss: 4.5046 +[2025-07-07 21:38:07] [Rank 0] Group 5 Loss: 4.5046 +[2025-07-07 21:38:07] [Rank 0] Group 6 Loss: 4.6597 +[2025-07-07 21:38:07] [Rank 0] Group 6 Loss: 4.6597 +[2025-07-07 21:38:07] [Rank 0] Group 7 Loss: 4.7051 +[2025-07-07 21:38:07] [Rank 0] Group 7 Loss: 4.7051 +[2025-07-07 21:38:07] [Rank 0] Group 8 Loss: 4.7215 +[2025-07-07 21:38:07] [Rank 0] Group 8 Loss: 4.7215 +[2025-07-07 21:38:07] [Rank 0] Group 9 Loss: 4.6955 +[2025-07-07 21:38:07] [Rank 0] Group 9 Loss: 4.6955 +[2025-07-07 21:38:07] [Rank 0] Group 10 Loss: 4.7060 +[2025-07-07 21:38:07] [Rank 0] Group 10 Loss: 4.7060 +[2025-07-07 21:38:07] [Rank 0] Group 11 Loss: 4.7252 +[2025-07-07 21:38:07] [Rank 0] Group 11 Loss: 4.7252 +[2025-07-07 21:38:07] [Rank 0] Group 0 FTA: 0.1782 +[2025-07-07 21:38:07] [Rank 0] Group 0 FTA: 0.1782 +[2025-07-07 21:38:07] [Rank 0] Group 1 FTA: 0.1641 +[2025-07-07 21:38:07] [Rank 0] Group 1 FTA: 0.1641 +[2025-07-07 21:38:07] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 21:38:07] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 21:38:07] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-07 21:38:07] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-07 21:38:07] [Rank 0] Group 4 FTA: 0.0547 +[2025-07-07 21:38:07] [Rank 0] Group 4 FTA: 0.0547 +[2025-07-07 21:38:07] [Rank 0] Group 5 FTA: 0.0755 +[2025-07-07 21:38:07] [Rank 0] Group 5 FTA: 0.0755 +[2025-07-07 21:38:07] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-07 21:38:07] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-07 21:38:07] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-07 21:38:07] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-07 21:38:07] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-07 21:38:07] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-07 21:38:07] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 21:38:07] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 21:38:07] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 21:38:07] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 21:38:07] [Rank 0] Group 11 FTA: 0.0908 +[2025-07-07 21:38:07] [Rank 0] Group 11 FTA: 0.0908 +[2025-07-07 21:38:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 21:38:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 21:38:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 21:38:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 21:38:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 21:38:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 21:38:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 21:38:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 21:38:09] [Rank 0] step:4501/10000 train_time:307849ms step_avg:68.40ms +[2025-07-07 21:38:09] [Rank 0] step:4501/10000 train_time:307849ms step_avg:68.40ms +[2025-07-07 21:38:11] [Rank 0] step:4521/10000 train_time:309005ms step_avg:68.35ms +[2025-07-07 21:38:11] [Rank 0] step:4521/10000 train_time:309005ms step_avg:68.35ms +[2025-07-07 21:38:12] [Rank 0] step:4541/10000 train_time:310369ms step_avg:68.35ms +[2025-07-07 21:38:12] [Rank 0] step:4541/10000 train_time:310369ms step_avg:68.35ms +[2025-07-07 21:38:13] [Rank 0] step:4561/10000 train_time:311733ms step_avg:68.35ms +[2025-07-07 21:38:13] [Rank 0] step:4561/10000 train_time:311733ms step_avg:68.35ms +[2025-07-07 21:38:15] [Rank 0] step:4581/10000 train_time:313100ms step_avg:68.35ms +[2025-07-07 21:38:15] [Rank 0] step:4581/10000 train_time:313100ms step_avg:68.35ms +[2025-07-07 21:38:16] [Rank 0] step:4601/10000 train_time:314466ms step_avg:68.35ms +[2025-07-07 21:38:16] [Rank 0] step:4601/10000 train_time:314466ms step_avg:68.35ms +[2025-07-07 21:38:18] [Rank 0] step:4621/10000 train_time:315834ms step_avg:68.35ms +[2025-07-07 21:38:18] [Rank 0] step:4621/10000 train_time:315834ms step_avg:68.35ms +[2025-07-07 21:38:19] [Rank 0] step:4641/10000 train_time:317201ms step_avg:68.35ms +[2025-07-07 21:38:19] [Rank 0] step:4641/10000 train_time:317201ms step_avg:68.35ms +[2025-07-07 21:38:20] [Rank 0] step:4661/10000 train_time:318569ms step_avg:68.35ms +[2025-07-07 21:38:20] [Rank 0] step:4661/10000 train_time:318569ms step_avg:68.35ms +[2025-07-07 21:38:22] [Rank 0] step:4681/10000 train_time:319938ms step_avg:68.35ms +[2025-07-07 21:38:22] [Rank 0] step:4681/10000 train_time:319938ms step_avg:68.35ms +[2025-07-07 21:38:23] [Rank 0] step:4701/10000 train_time:321356ms step_avg:68.36ms +[2025-07-07 21:38:23] [Rank 0] step:4701/10000 train_time:321356ms step_avg:68.36ms +[2025-07-07 21:38:24] [Rank 0] step:4721/10000 train_time:322725ms step_avg:68.36ms +[2025-07-07 21:38:24] [Rank 0] step:4721/10000 train_time:322725ms step_avg:68.36ms +[2025-07-07 21:38:26] [Rank 0] step:4741/10000 train_time:324096ms step_avg:68.36ms +[2025-07-07 21:38:26] [Rank 0] step:4741/10000 train_time:324096ms step_avg:68.36ms +[2025-07-07 21:38:27] [Rank 0] step:4761/10000 train_time:325465ms step_avg:68.36ms +[2025-07-07 21:38:27] [Rank 0] step:4761/10000 train_time:325465ms step_avg:68.36ms +[2025-07-07 21:38:29] [Rank 0] step:4781/10000 train_time:326835ms step_avg:68.36ms +[2025-07-07 21:38:29] [Rank 0] step:4781/10000 train_time:326835ms step_avg:68.36ms +[2025-07-07 21:38:30] [Rank 0] step:4801/10000 train_time:328206ms step_avg:68.36ms +[2025-07-07 21:38:30] [Rank 0] step:4801/10000 train_time:328206ms step_avg:68.36ms +[2025-07-07 21:38:31] [Rank 0] step:4821/10000 train_time:329578ms step_avg:68.36ms +[2025-07-07 21:38:31] [Rank 0] step:4821/10000 train_time:329578ms step_avg:68.36ms +[2025-07-07 21:38:33] [Rank 0] step:4841/10000 train_time:330951ms step_avg:68.36ms +[2025-07-07 21:38:33] [Rank 0] step:4841/10000 train_time:330951ms step_avg:68.36ms +[2025-07-07 21:38:34] [Rank 0] step:4861/10000 train_time:332371ms step_avg:68.37ms +[2025-07-07 21:38:34] [Rank 0] step:4861/10000 train_time:332371ms step_avg:68.37ms +[2025-07-07 21:38:35] [Rank 0] step:4881/10000 train_time:333742ms step_avg:68.38ms +[2025-07-07 21:38:35] [Rank 0] step:4881/10000 train_time:333742ms step_avg:68.38ms +[2025-07-07 21:38:37] [Rank 0] step:4901/10000 train_time:335115ms step_avg:68.38ms +[2025-07-07 21:38:37] [Rank 0] step:4901/10000 train_time:335115ms step_avg:68.38ms +[2025-07-07 21:38:38] [Rank 0] step:4921/10000 train_time:336487ms step_avg:68.38ms +[2025-07-07 21:38:38] [Rank 0] step:4921/10000 train_time:336487ms step_avg:68.38ms +[2025-07-07 21:38:40] [Rank 0] step:4941/10000 train_time:337860ms step_avg:68.38ms +[2025-07-07 21:38:40] [Rank 0] step:4941/10000 train_time:337860ms step_avg:68.38ms +[2025-07-07 21:38:41] [Rank 0] step:4961/10000 train_time:339233ms step_avg:68.38ms +[2025-07-07 21:38:41] [Rank 0] step:4961/10000 train_time:339233ms step_avg:68.38ms +[2025-07-07 21:38:42] [Rank 0] step:4981/10000 train_time:340608ms step_avg:68.38ms +[2025-07-07 21:38:42] [Rank 0] step:4981/10000 train_time:340608ms step_avg:68.38ms +[2025-07-07 21:38:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:38:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:38:45] [Rank 0] PRINT: step:5000/10000 train_loss:1.7060 val_loss:1.6711 train_time:342605ms step_avg:68.52ms +[2025-07-07 21:38:45] [Rank 0] PRINT: step:5000/10000 train_loss:1.7060 val_loss:1.6711 train_time:342605ms step_avg:68.52ms +[2025-07-07 21:38:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:38:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:38:45] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:38:45] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:38:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:38:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:44:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:44:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:44:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:44:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:44:11] [Rank 0] Total Loss: 4.6666 +[2025-07-07 21:44:11] [Rank 0] Total Loss: 4.6666 +[2025-07-07 21:44:11] [Rank 0] Total FTA: 0.0975 +[2025-07-07 21:44:11] [Rank 0] Total FTA: 0.0975 +[2025-07-07 21:44:11] [Rank 0] Group 0 Loss: 4.7251 +[2025-07-07 21:44:11] [Rank 0] Group 0 Loss: 4.7251 +[2025-07-07 21:44:11] [Rank 0] Group 1 Loss: 4.4362 +[2025-07-07 21:44:11] [Rank 0] Group 1 Loss: 4.4362 +[2025-07-07 21:44:11] [Rank 0] Group 2 Loss: 4.6731 +[2025-07-07 21:44:11] [Rank 0] Group 2 Loss: 4.6731 +[2025-07-07 21:44:11] [Rank 0] Group 3 Loss: 4.7422 +[2025-07-07 21:44:11] [Rank 0] Group 3 Loss: 4.7422 +[2025-07-07 21:44:11] [Rank 0] Group 4 Loss: 4.6230 +[2025-07-07 21:44:11] [Rank 0] Group 4 Loss: 4.6230 +[2025-07-07 21:44:11] [Rank 0] Group 5 Loss: 4.5853 +[2025-07-07 21:44:11] [Rank 0] Group 5 Loss: 4.5853 +[2025-07-07 21:44:11] [Rank 0] Group 6 Loss: 4.6099 +[2025-07-07 21:44:11] [Rank 0] Group 6 Loss: 4.6099 +[2025-07-07 21:44:11] [Rank 0] Group 7 Loss: 4.6925 +[2025-07-07 21:44:11] [Rank 0] Group 7 Loss: 4.6925 +[2025-07-07 21:44:11] [Rank 0] Group 8 Loss: 4.7082 +[2025-07-07 21:44:11] [Rank 0] Group 8 Loss: 4.7082 +[2025-07-07 21:44:11] [Rank 0] Group 9 Loss: 4.6973 +[2025-07-07 21:44:11] [Rank 0] Group 9 Loss: 4.6973 +[2025-07-07 21:44:11] [Rank 0] Group 10 Loss: 4.6853 +[2025-07-07 21:44:11] [Rank 0] Group 10 Loss: 4.6853 +[2025-07-07 21:44:11] [Rank 0] Group 11 Loss: 4.7040 +[2025-07-07 21:44:11] [Rank 0] Group 11 Loss: 4.7040 +[2025-07-07 21:44:11] [Rank 0] Group 0 FTA: 0.1756 +[2025-07-07 21:44:11] [Rank 0] Group 0 FTA: 0.1756 +[2025-07-07 21:44:11] [Rank 0] Group 1 FTA: 0.1797 +[2025-07-07 21:44:11] [Rank 0] Group 1 FTA: 0.1797 +[2025-07-07 21:44:11] [Rank 0] Group 2 FTA: 0.0703 +[2025-07-07 21:44:11] [Rank 0] Group 2 FTA: 0.0703 +[2025-07-07 21:44:11] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 21:44:11] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 21:44:11] [Rank 0] Group 4 FTA: 0.0495 +[2025-07-07 21:44:11] [Rank 0] Group 4 FTA: 0.0495 +[2025-07-07 21:44:11] [Rank 0] Group 5 FTA: 0.1016 +[2025-07-07 21:44:11] [Rank 0] Group 5 FTA: 0.1016 +[2025-07-07 21:44:11] [Rank 0] Group 6 FTA: 0.1068 +[2025-07-07 21:44:11] [Rank 0] Group 6 FTA: 0.1068 +[2025-07-07 21:44:11] [Rank 0] Group 7 FTA: 0.0703 +[2025-07-07 21:44:11] [Rank 0] Group 7 FTA: 0.0703 +[2025-07-07 21:44:11] [Rank 0] Group 8 FTA: 0.0625 +[2025-07-07 21:44:11] [Rank 0] Group 8 FTA: 0.0625 +[2025-07-07 21:44:11] [Rank 0] Group 9 FTA: 0.0508 +[2025-07-07 21:44:11] [Rank 0] Group 9 FTA: 0.0508 +[2025-07-07 21:44:11] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-07 21:44:11] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-07 21:44:11] [Rank 0] Group 11 FTA: 0.0859 +[2025-07-07 21:44:11] [Rank 0] Group 11 FTA: 0.0859 +[2025-07-07 21:44:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 21:44:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 21:44:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 21:44:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 21:44:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 21:44:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 21:44:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 21:44:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 21:44:12] [Rank 0] step:5001/10000 train_time:342613ms step_avg:68.51ms +[2025-07-07 21:44:12] [Rank 0] step:5001/10000 train_time:342613ms step_avg:68.51ms +[2025-07-07 21:44:14] [Rank 0] step:5021/10000 train_time:343376ms step_avg:68.39ms +[2025-07-07 21:44:14] [Rank 0] step:5021/10000 train_time:343376ms step_avg:68.39ms +[2025-07-07 21:44:15] [Rank 0] step:5041/10000 train_time:344742ms step_avg:68.39ms +[2025-07-07 21:44:15] [Rank 0] step:5041/10000 train_time:344742ms step_avg:68.39ms +[2025-07-07 21:44:17] [Rank 0] step:5061/10000 train_time:346164ms step_avg:68.40ms +[2025-07-07 21:44:17] [Rank 0] step:5061/10000 train_time:346164ms step_avg:68.40ms +[2025-07-07 21:44:18] [Rank 0] step:5081/10000 train_time:347529ms step_avg:68.40ms +[2025-07-07 21:44:18] [Rank 0] step:5081/10000 train_time:347529ms step_avg:68.40ms +[2025-07-07 21:44:19] [Rank 0] step:5101/10000 train_time:348895ms step_avg:68.40ms +[2025-07-07 21:44:19] [Rank 0] step:5101/10000 train_time:348895ms step_avg:68.40ms +[2025-07-07 21:44:21] [Rank 0] step:5121/10000 train_time:350261ms step_avg:68.40ms +[2025-07-07 21:44:21] [Rank 0] step:5121/10000 train_time:350261ms step_avg:68.40ms +[2025-07-07 21:44:22] [Rank 0] step:5141/10000 train_time:351627ms step_avg:68.40ms +[2025-07-07 21:44:22] [Rank 0] step:5141/10000 train_time:351627ms step_avg:68.40ms +[2025-07-07 21:44:23] [Rank 0] step:5161/10000 train_time:352996ms step_avg:68.40ms +[2025-07-07 21:44:23] [Rank 0] step:5161/10000 train_time:352996ms step_avg:68.40ms +[2025-07-07 21:44:25] [Rank 0] step:5181/10000 train_time:354363ms step_avg:68.40ms +[2025-07-07 21:44:25] [Rank 0] step:5181/10000 train_time:354363ms step_avg:68.40ms +[2025-07-07 21:44:26] [Rank 0] step:5201/10000 train_time:355733ms step_avg:68.40ms +[2025-07-07 21:44:26] [Rank 0] step:5201/10000 train_time:355733ms step_avg:68.40ms +[2025-07-07 21:44:28] [Rank 0] step:5221/10000 train_time:357355ms step_avg:68.45ms +[2025-07-07 21:44:28] [Rank 0] step:5221/10000 train_time:357355ms step_avg:68.45ms +[2025-07-07 21:44:29] [Rank 0] step:5241/10000 train_time:358519ms step_avg:68.41ms +[2025-07-07 21:44:29] [Rank 0] step:5241/10000 train_time:358519ms step_avg:68.41ms +[2025-07-07 21:44:30] [Rank 0] step:5261/10000 train_time:359889ms step_avg:68.41ms +[2025-07-07 21:44:30] [Rank 0] step:5261/10000 train_time:359889ms step_avg:68.41ms +[2025-07-07 21:44:32] [Rank 0] step:5281/10000 train_time:361259ms step_avg:68.41ms +[2025-07-07 21:44:32] [Rank 0] step:5281/10000 train_time:361259ms step_avg:68.41ms +[2025-07-07 21:44:33] [Rank 0] step:5301/10000 train_time:362630ms step_avg:68.41ms +[2025-07-07 21:44:33] [Rank 0] step:5301/10000 train_time:362630ms step_avg:68.41ms +[2025-07-07 21:44:34] [Rank 0] step:5321/10000 train_time:364001ms step_avg:68.41ms +[2025-07-07 21:44:34] [Rank 0] step:5321/10000 train_time:364001ms step_avg:68.41ms +[2025-07-07 21:44:36] [Rank 0] step:5341/10000 train_time:365372ms step_avg:68.41ms +[2025-07-07 21:44:36] [Rank 0] step:5341/10000 train_time:365372ms step_avg:68.41ms +[2025-07-07 21:44:37] [Rank 0] step:5361/10000 train_time:366746ms step_avg:68.41ms +[2025-07-07 21:44:37] [Rank 0] step:5361/10000 train_time:366746ms step_avg:68.41ms +[2025-07-07 21:44:39] [Rank 0] step:5381/10000 train_time:368120ms step_avg:68.41ms +[2025-07-07 21:44:39] [Rank 0] step:5381/10000 train_time:368120ms step_avg:68.41ms +[2025-07-07 21:44:40] [Rank 0] step:5401/10000 train_time:369495ms step_avg:68.41ms +[2025-07-07 21:44:40] [Rank 0] step:5401/10000 train_time:369495ms step_avg:68.41ms +[2025-07-07 21:44:41] [Rank 0] step:5421/10000 train_time:370905ms step_avg:68.42ms +[2025-07-07 21:44:41] [Rank 0] step:5421/10000 train_time:370905ms step_avg:68.42ms +[2025-07-07 21:44:43] [Rank 0] step:5441/10000 train_time:372279ms step_avg:68.42ms +[2025-07-07 21:44:43] [Rank 0] step:5441/10000 train_time:372279ms step_avg:68.42ms +[2025-07-07 21:44:44] [Rank 0] step:5461/10000 train_time:373655ms step_avg:68.42ms +[2025-07-07 21:44:44] [Rank 0] step:5461/10000 train_time:373655ms step_avg:68.42ms +[2025-07-07 21:44:45] [Rank 0] step:5481/10000 train_time:375031ms step_avg:68.42ms +[2025-07-07 21:44:45] [Rank 0] step:5481/10000 train_time:375031ms step_avg:68.42ms +[2025-07-07 21:44:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:44:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:44:48] [Rank 0] PRINT: step:5500/10000 train_loss:1.6411 val_loss:1.6176 train_time:377029ms step_avg:68.55ms +[2025-07-07 21:44:48] [Rank 0] PRINT: step:5500/10000 train_loss:1.6411 val_loss:1.6176 train_time:377029ms step_avg:68.55ms +[2025-07-07 21:44:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:44:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:44:48] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:44:48] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:44:48] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:44:48] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:50:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:50:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:50:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:50:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:50:13] [Rank 0] Total Loss: 4.7115 +[2025-07-07 21:50:13] [Rank 0] Total Loss: 4.7115 +[2025-07-07 21:50:13] [Rank 0] Total FTA: 0.1070 +[2025-07-07 21:50:13] [Rank 0] Total FTA: 0.1070 +[2025-07-07 21:50:13] [Rank 0] Group 0 Loss: 4.7411 +[2025-07-07 21:50:13] [Rank 0] Group 0 Loss: 4.7411 +[2025-07-07 21:50:13] [Rank 0] Group 1 Loss: 4.5568 +[2025-07-07 21:50:13] [Rank 0] Group 1 Loss: 4.5568 +[2025-07-07 21:50:13] [Rank 0] Group 2 Loss: 4.8760 +[2025-07-07 21:50:13] [Rank 0] Group 2 Loss: 4.8760 +[2025-07-07 21:50:13] [Rank 0] Group 3 Loss: 4.7709 +[2025-07-07 21:50:13] [Rank 0] Group 3 Loss: 4.7709 +[2025-07-07 21:50:13] [Rank 0] Group 4 Loss: 4.6679 +[2025-07-07 21:50:13] [Rank 0] Group 4 Loss: 4.6679 +[2025-07-07 21:50:13] [Rank 0] Group 5 Loss: 4.6143 +[2025-07-07 21:50:13] [Rank 0] Group 5 Loss: 4.6143 +[2025-07-07 21:50:13] [Rank 0] Group 6 Loss: 4.5930 +[2025-07-07 21:50:13] [Rank 0] Group 6 Loss: 4.5930 +[2025-07-07 21:50:13] [Rank 0] Group 7 Loss: 4.7551 +[2025-07-07 21:50:13] [Rank 0] Group 7 Loss: 4.7551 +[2025-07-07 21:50:13] [Rank 0] Group 8 Loss: 4.7145 +[2025-07-07 21:50:13] [Rank 0] Group 8 Loss: 4.7145 +[2025-07-07 21:50:13] [Rank 0] Group 9 Loss: 4.7059 +[2025-07-07 21:50:13] [Rank 0] Group 9 Loss: 4.7059 +[2025-07-07 21:50:13] [Rank 0] Group 10 Loss: 4.7278 +[2025-07-07 21:50:13] [Rank 0] Group 10 Loss: 4.7278 +[2025-07-07 21:50:13] [Rank 0] Group 11 Loss: 4.7361 +[2025-07-07 21:50:13] [Rank 0] Group 11 Loss: 4.7361 +[2025-07-07 21:50:13] [Rank 0] Group 0 FTA: 0.1678 +[2025-07-07 21:50:13] [Rank 0] Group 0 FTA: 0.1678 +[2025-07-07 21:50:13] [Rank 0] Group 1 FTA: 0.1615 +[2025-07-07 21:50:13] [Rank 0] Group 1 FTA: 0.1615 +[2025-07-07 21:50:13] [Rank 0] Group 2 FTA: 0.1016 +[2025-07-07 21:50:13] [Rank 0] Group 2 FTA: 0.1016 +[2025-07-07 21:50:13] [Rank 0] Group 3 FTA: 0.0990 +[2025-07-07 21:50:13] [Rank 0] Group 3 FTA: 0.0990 +[2025-07-07 21:50:13] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-07 21:50:13] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-07 21:50:13] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-07 21:50:13] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-07 21:50:13] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-07 21:50:13] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-07 21:50:13] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-07 21:50:13] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-07 21:50:13] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-07 21:50:13] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-07 21:50:13] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 21:50:13] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 21:50:13] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-07 21:50:13] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-07 21:50:13] [Rank 0] Group 11 FTA: 0.0967 +[2025-07-07 21:50:13] [Rank 0] Group 11 FTA: 0.0967 +[2025-07-07 21:50:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 21:50:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 21:50:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 21:50:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 21:50:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 21:50:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 21:50:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 21:50:14] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 21:50:14] [Rank 0] step:5501/10000 train_time:377039ms step_avg:68.54ms +[2025-07-07 21:50:14] [Rank 0] step:5501/10000 train_time:377039ms step_avg:68.54ms +[2025-07-07 21:50:16] [Rank 0] step:5521/10000 train_time:377798ms step_avg:68.43ms +[2025-07-07 21:50:16] [Rank 0] step:5521/10000 train_time:377798ms step_avg:68.43ms +[2025-07-07 21:50:17] [Rank 0] step:5541/10000 train_time:379165ms step_avg:68.43ms +[2025-07-07 21:50:17] [Rank 0] step:5541/10000 train_time:379165ms step_avg:68.43ms +[2025-07-07 21:50:19] [Rank 0] step:5561/10000 train_time:380532ms step_avg:68.43ms +[2025-07-07 21:50:19] [Rank 0] step:5561/10000 train_time:380532ms step_avg:68.43ms +[2025-07-07 21:50:20] [Rank 0] step:5581/10000 train_time:381899ms step_avg:68.43ms +[2025-07-07 21:50:20] [Rank 0] step:5581/10000 train_time:381899ms step_avg:68.43ms +[2025-07-07 21:50:21] [Rank 0] step:5601/10000 train_time:383323ms step_avg:68.44ms +[2025-07-07 21:50:21] [Rank 0] step:5601/10000 train_time:383323ms step_avg:68.44ms +[2025-07-07 21:50:23] [Rank 0] step:5621/10000 train_time:384691ms step_avg:68.44ms +[2025-07-07 21:50:23] [Rank 0] step:5621/10000 train_time:384691ms step_avg:68.44ms +[2025-07-07 21:50:24] [Rank 0] step:5641/10000 train_time:386058ms step_avg:68.44ms +[2025-07-07 21:50:24] [Rank 0] step:5641/10000 train_time:386058ms step_avg:68.44ms +[2025-07-07 21:50:25] [Rank 0] step:5661/10000 train_time:387426ms step_avg:68.44ms +[2025-07-07 21:50:25] [Rank 0] step:5661/10000 train_time:387426ms step_avg:68.44ms +[2025-07-07 21:50:27] [Rank 0] step:5681/10000 train_time:388793ms step_avg:68.44ms +[2025-07-07 21:50:27] [Rank 0] step:5681/10000 train_time:388793ms step_avg:68.44ms +[2025-07-07 21:50:28] [Rank 0] step:5701/10000 train_time:390161ms step_avg:68.44ms +[2025-07-07 21:50:28] [Rank 0] step:5701/10000 train_time:390161ms step_avg:68.44ms +[2025-07-07 21:50:30] [Rank 0] step:5721/10000 train_time:391532ms step_avg:68.44ms +[2025-07-07 21:50:30] [Rank 0] step:5721/10000 train_time:391532ms step_avg:68.44ms +[2025-07-07 21:50:31] [Rank 0] step:5741/10000 train_time:392901ms step_avg:68.44ms +[2025-07-07 21:50:31] [Rank 0] step:5741/10000 train_time:392901ms step_avg:68.44ms +[2025-07-07 21:50:32] [Rank 0] step:5761/10000 train_time:394522ms step_avg:68.48ms +[2025-07-07 21:50:32] [Rank 0] step:5761/10000 train_time:394522ms step_avg:68.48ms +[2025-07-07 21:50:34] [Rank 0] step:5781/10000 train_time:395678ms step_avg:68.44ms +[2025-07-07 21:50:34] [Rank 0] step:5781/10000 train_time:395678ms step_avg:68.44ms +[2025-07-07 21:50:35] [Rank 0] step:5801/10000 train_time:397045ms step_avg:68.44ms +[2025-07-07 21:50:35] [Rank 0] step:5801/10000 train_time:397045ms step_avg:68.44ms +[2025-07-07 21:50:36] [Rank 0] step:5821/10000 train_time:398416ms step_avg:68.44ms +[2025-07-07 21:50:36] [Rank 0] step:5821/10000 train_time:398416ms step_avg:68.44ms +[2025-07-07 21:50:38] [Rank 0] step:5841/10000 train_time:399788ms step_avg:68.45ms +[2025-07-07 21:50:38] [Rank 0] step:5841/10000 train_time:399788ms step_avg:68.45ms +[2025-07-07 21:50:39] [Rank 0] step:5861/10000 train_time:401161ms step_avg:68.45ms +[2025-07-07 21:50:39] [Rank 0] step:5861/10000 train_time:401161ms step_avg:68.45ms +[2025-07-07 21:50:41] [Rank 0] step:5881/10000 train_time:402535ms step_avg:68.45ms +[2025-07-07 21:50:41] [Rank 0] step:5881/10000 train_time:402535ms step_avg:68.45ms +[2025-07-07 21:50:42] [Rank 0] step:5901/10000 train_time:403910ms step_avg:68.45ms +[2025-07-07 21:50:42] [Rank 0] step:5901/10000 train_time:403910ms step_avg:68.45ms +[2025-07-07 21:50:43] [Rank 0] step:5921/10000 train_time:405284ms step_avg:68.45ms +[2025-07-07 21:50:43] [Rank 0] step:5921/10000 train_time:405284ms step_avg:68.45ms +[2025-07-07 21:50:45] [Rank 0] step:5941/10000 train_time:406706ms step_avg:68.46ms +[2025-07-07 21:50:45] [Rank 0] step:5941/10000 train_time:406706ms step_avg:68.46ms +[2025-07-07 21:50:46] [Rank 0] step:5961/10000 train_time:408062ms step_avg:68.46ms +[2025-07-07 21:50:46] [Rank 0] step:5961/10000 train_time:408062ms step_avg:68.46ms +[2025-07-07 21:50:47] [Rank 0] step:5981/10000 train_time:409435ms step_avg:68.46ms +[2025-07-07 21:50:47] [Rank 0] step:5981/10000 train_time:409435ms step_avg:68.46ms +[2025-07-07 21:50:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:50:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:50:50] [Rank 0] PRINT: step:6000/10000 train_loss:1.5944 val_loss:1.5789 train_time:411433ms step_avg:68.57ms +[2025-07-07 21:50:50] [Rank 0] PRINT: step:6000/10000 train_loss:1.5944 val_loss:1.5789 train_time:411433ms step_avg:68.57ms +[2025-07-07 21:50:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:50:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:50:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:50:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:50:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:50:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:56:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:56:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:56:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:56:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:56:17] [Rank 0] Total Loss: 4.6995 +[2025-07-07 21:56:17] [Rank 0] Total Loss: 4.6995 +[2025-07-07 21:56:17] [Rank 0] Total FTA: 0.0868 +[2025-07-07 21:56:17] [Rank 0] Total FTA: 0.0868 +[2025-07-07 21:56:17] [Rank 0] Group 0 Loss: 4.8010 +[2025-07-07 21:56:17] [Rank 0] Group 0 Loss: 4.8010 +[2025-07-07 21:56:17] [Rank 0] Group 1 Loss: 4.5161 +[2025-07-07 21:56:17] [Rank 0] Group 1 Loss: 4.5161 +[2025-07-07 21:56:17] [Rank 0] Group 2 Loss: 4.8584 +[2025-07-07 21:56:17] [Rank 0] Group 2 Loss: 4.8584 +[2025-07-07 21:56:17] [Rank 0] Group 3 Loss: 4.7752 +[2025-07-07 21:56:17] [Rank 0] Group 3 Loss: 4.7752 +[2025-07-07 21:56:17] [Rank 0] Group 4 Loss: 4.6427 +[2025-07-07 21:56:17] [Rank 0] Group 4 Loss: 4.6427 +[2025-07-07 21:56:17] [Rank 0] Group 5 Loss: 4.5867 +[2025-07-07 21:56:17] [Rank 0] Group 5 Loss: 4.5867 +[2025-07-07 21:56:17] [Rank 0] Group 6 Loss: 4.5806 +[2025-07-07 21:56:17] [Rank 0] Group 6 Loss: 4.5806 +[2025-07-07 21:56:17] [Rank 0] Group 7 Loss: 4.6956 +[2025-07-07 21:56:17] [Rank 0] Group 7 Loss: 4.6956 +[2025-07-07 21:56:17] [Rank 0] Group 8 Loss: 4.6865 +[2025-07-07 21:56:17] [Rank 0] Group 8 Loss: 4.6865 +[2025-07-07 21:56:17] [Rank 0] Group 9 Loss: 4.6775 +[2025-07-07 21:56:17] [Rank 0] Group 9 Loss: 4.6775 +[2025-07-07 21:56:17] [Rank 0] Group 10 Loss: 4.6921 +[2025-07-07 21:56:17] [Rank 0] Group 10 Loss: 4.6921 +[2025-07-07 21:56:17] [Rank 0] Group 11 Loss: 4.7275 +[2025-07-07 21:56:17] [Rank 0] Group 11 Loss: 4.7275 +[2025-07-07 21:56:17] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 21:56:17] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 21:56:17] [Rank 0] Group 1 FTA: 0.1823 +[2025-07-07 21:56:17] [Rank 0] Group 1 FTA: 0.1823 +[2025-07-07 21:56:17] [Rank 0] Group 2 FTA: 0.1745 +[2025-07-07 21:56:17] [Rank 0] Group 2 FTA: 0.1745 +[2025-07-07 21:56:17] [Rank 0] Group 3 FTA: 0.0547 +[2025-07-07 21:56:17] [Rank 0] Group 3 FTA: 0.0547 +[2025-07-07 21:56:17] [Rank 0] Group 4 FTA: 0.0755 +[2025-07-07 21:56:17] [Rank 0] Group 4 FTA: 0.0755 +[2025-07-07 21:56:17] [Rank 0] Group 5 FTA: 0.0833 +[2025-07-07 21:56:17] [Rank 0] Group 5 FTA: 0.0833 +[2025-07-07 21:56:17] [Rank 0] Group 6 FTA: 0.1068 +[2025-07-07 21:56:17] [Rank 0] Group 6 FTA: 0.1068 +[2025-07-07 21:56:17] [Rank 0] Group 7 FTA: 0.0833 +[2025-07-07 21:56:17] [Rank 0] Group 7 FTA: 0.0833 +[2025-07-07 21:56:17] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 21:56:17] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 21:56:17] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 21:56:17] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 21:56:17] [Rank 0] Group 10 FTA: 0.0840 +[2025-07-07 21:56:17] [Rank 0] Group 10 FTA: 0.0840 +[2025-07-07 21:56:17] [Rank 0] Group 11 FTA: 0.0928 +[2025-07-07 21:56:17] [Rank 0] Group 11 FTA: 0.0928 +[2025-07-07 21:56:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 21:56:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 21:56:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 21:56:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 21:56:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 21:56:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 21:56:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 21:56:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 21:56:19] [Rank 0] step:6001/10000 train_time:411441ms step_avg:68.56ms +[2025-07-07 21:56:19] [Rank 0] step:6001/10000 train_time:411441ms step_avg:68.56ms +[2025-07-07 21:56:20] [Rank 0] step:6021/10000 train_time:412194ms step_avg:68.46ms +[2025-07-07 21:56:20] [Rank 0] step:6021/10000 train_time:412194ms step_avg:68.46ms +[2025-07-07 21:56:21] [Rank 0] step:6041/10000 train_time:413623ms step_avg:68.47ms +[2025-07-07 21:56:21] [Rank 0] step:6041/10000 train_time:413623ms step_avg:68.47ms +[2025-07-07 21:56:23] [Rank 0] step:6061/10000 train_time:414988ms step_avg:68.47ms +[2025-07-07 21:56:23] [Rank 0] step:6061/10000 train_time:414988ms step_avg:68.47ms +[2025-07-07 21:56:24] [Rank 0] step:6081/10000 train_time:416351ms step_avg:68.47ms +[2025-07-07 21:56:24] [Rank 0] step:6081/10000 train_time:416351ms step_avg:68.47ms +[2025-07-07 21:56:26] [Rank 0] step:6101/10000 train_time:417716ms step_avg:68.47ms +[2025-07-07 21:56:26] [Rank 0] step:6101/10000 train_time:417716ms step_avg:68.47ms +[2025-07-07 21:56:27] [Rank 0] step:6121/10000 train_time:419334ms step_avg:68.51ms +[2025-07-07 21:56:27] [Rank 0] step:6121/10000 train_time:419334ms step_avg:68.51ms +[2025-07-07 21:56:28] [Rank 0] step:6141/10000 train_time:420487ms step_avg:68.47ms +[2025-07-07 21:56:28] [Rank 0] step:6141/10000 train_time:420487ms step_avg:68.47ms +[2025-07-07 21:56:30] [Rank 0] step:6161/10000 train_time:421851ms step_avg:68.47ms +[2025-07-07 21:56:30] [Rank 0] step:6161/10000 train_time:421851ms step_avg:68.47ms +[2025-07-07 21:56:31] [Rank 0] step:6181/10000 train_time:423218ms step_avg:68.47ms +[2025-07-07 21:56:31] [Rank 0] step:6181/10000 train_time:423218ms step_avg:68.47ms +[2025-07-07 21:56:32] [Rank 0] step:6201/10000 train_time:424586ms step_avg:68.47ms +[2025-07-07 21:56:32] [Rank 0] step:6201/10000 train_time:424586ms step_avg:68.47ms +[2025-07-07 21:56:34] [Rank 0] step:6221/10000 train_time:425954ms step_avg:68.47ms +[2025-07-07 21:56:34] [Rank 0] step:6221/10000 train_time:425954ms step_avg:68.47ms +[2025-07-07 21:56:35] [Rank 0] step:6241/10000 train_time:427322ms step_avg:68.47ms +[2025-07-07 21:56:35] [Rank 0] step:6241/10000 train_time:427322ms step_avg:68.47ms +[2025-07-07 21:56:37] [Rank 0] step:6261/10000 train_time:428693ms step_avg:68.47ms +[2025-07-07 21:56:37] [Rank 0] step:6261/10000 train_time:428693ms step_avg:68.47ms +[2025-07-07 21:56:38] [Rank 0] step:6281/10000 train_time:430061ms step_avg:68.47ms +[2025-07-07 21:56:38] [Rank 0] step:6281/10000 train_time:430061ms step_avg:68.47ms +[2025-07-07 21:56:39] [Rank 0] step:6301/10000 train_time:431433ms step_avg:68.47ms +[2025-07-07 21:56:39] [Rank 0] step:6301/10000 train_time:431433ms step_avg:68.47ms +[2025-07-07 21:56:41] [Rank 0] step:6321/10000 train_time:432839ms step_avg:68.48ms +[2025-07-07 21:56:41] [Rank 0] step:6321/10000 train_time:432839ms step_avg:68.48ms +[2025-07-07 21:56:42] [Rank 0] step:6341/10000 train_time:434210ms step_avg:68.48ms +[2025-07-07 21:56:42] [Rank 0] step:6341/10000 train_time:434210ms step_avg:68.48ms +[2025-07-07 21:56:43] [Rank 0] step:6361/10000 train_time:435581ms step_avg:68.48ms +[2025-07-07 21:56:43] [Rank 0] step:6361/10000 train_time:435581ms step_avg:68.48ms +[2025-07-07 21:56:45] [Rank 0] step:6381/10000 train_time:436953ms step_avg:68.48ms +[2025-07-07 21:56:45] [Rank 0] step:6381/10000 train_time:436953ms step_avg:68.48ms +[2025-07-07 21:56:46] [Rank 0] step:6401/10000 train_time:438325ms step_avg:68.48ms +[2025-07-07 21:56:46] [Rank 0] step:6401/10000 train_time:438325ms step_avg:68.48ms +[2025-07-07 21:56:48] [Rank 0] step:6421/10000 train_time:439700ms step_avg:68.48ms +[2025-07-07 21:56:48] [Rank 0] step:6421/10000 train_time:439700ms step_avg:68.48ms +[2025-07-07 21:56:49] [Rank 0] step:6441/10000 train_time:441073ms step_avg:68.48ms +[2025-07-07 21:56:49] [Rank 0] step:6441/10000 train_time:441073ms step_avg:68.48ms +[2025-07-07 21:56:50] [Rank 0] step:6461/10000 train_time:442447ms step_avg:68.48ms +[2025-07-07 21:56:50] [Rank 0] step:6461/10000 train_time:442447ms step_avg:68.48ms +[2025-07-07 21:56:52] [Rank 0] step:6481/10000 train_time:443869ms step_avg:68.49ms +[2025-07-07 21:56:52] [Rank 0] step:6481/10000 train_time:443869ms step_avg:68.49ms +[2025-07-07 21:56:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:56:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:56:54] [Rank 0] PRINT: step:6500/10000 train_loss:1.5575 val_loss:1.5475 train_time:445851ms step_avg:68.59ms +[2025-07-07 21:56:54] [Rank 0] PRINT: step:6500/10000 train_loss:1.5575 val_loss:1.5475 train_time:445851ms step_avg:68.59ms +[2025-07-07 21:56:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:56:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:56:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:56:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:56:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:56:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:02:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:02:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:02:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:02:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:02:21] [Rank 0] Total Loss: 4.6449 +[2025-07-07 22:02:21] [Rank 0] Total Loss: 4.6449 +[2025-07-07 22:02:21] [Rank 0] Total FTA: 0.0964 +[2025-07-07 22:02:21] [Rank 0] Total FTA: 0.0964 +[2025-07-07 22:02:21] [Rank 0] Group 0 Loss: 4.7646 +[2025-07-07 22:02:21] [Rank 0] Group 0 Loss: 4.7646 +[2025-07-07 22:02:21] [Rank 0] Group 1 Loss: 4.5363 +[2025-07-07 22:02:21] [Rank 0] Group 1 Loss: 4.5363 +[2025-07-07 22:02:21] [Rank 0] Group 2 Loss: 4.6976 +[2025-07-07 22:02:21] [Rank 0] Group 2 Loss: 4.6976 +[2025-07-07 22:02:21] [Rank 0] Group 3 Loss: 4.7775 +[2025-07-07 22:02:21] [Rank 0] Group 3 Loss: 4.7775 +[2025-07-07 22:02:21] [Rank 0] Group 4 Loss: 4.6105 +[2025-07-07 22:02:21] [Rank 0] Group 4 Loss: 4.6105 +[2025-07-07 22:02:21] [Rank 0] Group 5 Loss: 4.4794 +[2025-07-07 22:02:21] [Rank 0] Group 5 Loss: 4.4794 +[2025-07-07 22:02:21] [Rank 0] Group 6 Loss: 4.5180 +[2025-07-07 22:02:21] [Rank 0] Group 6 Loss: 4.5180 +[2025-07-07 22:02:21] [Rank 0] Group 7 Loss: 4.6749 +[2025-07-07 22:02:21] [Rank 0] Group 7 Loss: 4.6749 +[2025-07-07 22:02:21] [Rank 0] Group 8 Loss: 4.6106 +[2025-07-07 22:02:21] [Rank 0] Group 8 Loss: 4.6106 +[2025-07-07 22:02:21] [Rank 0] Group 9 Loss: 4.5800 +[2025-07-07 22:02:21] [Rank 0] Group 9 Loss: 4.5800 +[2025-07-07 22:02:21] [Rank 0] Group 10 Loss: 4.6736 +[2025-07-07 22:02:21] [Rank 0] Group 10 Loss: 4.6736 +[2025-07-07 22:02:21] [Rank 0] Group 11 Loss: 4.6521 +[2025-07-07 22:02:21] [Rank 0] Group 11 Loss: 4.6521 +[2025-07-07 22:02:21] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 22:02:21] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 22:02:21] [Rank 0] Group 1 FTA: 0.2995 +[2025-07-07 22:02:21] [Rank 0] Group 1 FTA: 0.2995 +[2025-07-07 22:02:21] [Rank 0] Group 2 FTA: 0.1771 +[2025-07-07 22:02:21] [Rank 0] Group 2 FTA: 0.1771 +[2025-07-07 22:02:21] [Rank 0] Group 3 FTA: 0.1302 +[2025-07-07 22:02:21] [Rank 0] Group 3 FTA: 0.1302 +[2025-07-07 22:02:21] [Rank 0] Group 4 FTA: 0.0833 +[2025-07-07 22:02:21] [Rank 0] Group 4 FTA: 0.0833 +[2025-07-07 22:02:21] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-07 22:02:21] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-07 22:02:21] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-07 22:02:21] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-07 22:02:21] [Rank 0] Group 7 FTA: 0.0833 +[2025-07-07 22:02:21] [Rank 0] Group 7 FTA: 0.0833 +[2025-07-07 22:02:21] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-07 22:02:21] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-07 22:02:21] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 22:02:21] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 22:02:21] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-07 22:02:21] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-07 22:02:21] [Rank 0] Group 11 FTA: 0.0732 +[2025-07-07 22:02:21] [Rank 0] Group 11 FTA: 0.0732 +[2025-07-07 22:02:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 22:02:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 22:02:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 22:02:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 22:02:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 22:02:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 22:02:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 22:02:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 22:02:24] [Rank 0] step:6501/10000 train_time:445859ms step_avg:68.58ms +[2025-07-07 22:02:24] [Rank 0] step:6501/10000 train_time:445859ms step_avg:68.58ms +[2025-07-07 22:02:25] [Rank 0] step:6521/10000 train_time:446629ms step_avg:68.49ms +[2025-07-07 22:02:25] [Rank 0] step:6521/10000 train_time:446629ms step_avg:68.49ms +[2025-07-07 22:02:27] [Rank 0] step:6541/10000 train_time:447994ms step_avg:68.49ms +[2025-07-07 22:02:27] [Rank 0] step:6541/10000 train_time:447994ms step_avg:68.49ms +[2025-07-07 22:02:28] [Rank 0] step:6561/10000 train_time:449359ms step_avg:68.49ms +[2025-07-07 22:02:28] [Rank 0] step:6561/10000 train_time:449359ms step_avg:68.49ms +[2025-07-07 22:02:29] [Rank 0] step:6581/10000 train_time:450723ms step_avg:68.49ms +[2025-07-07 22:02:29] [Rank 0] step:6581/10000 train_time:450723ms step_avg:68.49ms +[2025-07-07 22:02:31] [Rank 0] step:6601/10000 train_time:452088ms step_avg:68.49ms +[2025-07-07 22:02:31] [Rank 0] step:6601/10000 train_time:452088ms step_avg:68.49ms +[2025-07-07 22:02:32] [Rank 0] step:6621/10000 train_time:453455ms step_avg:68.49ms +[2025-07-07 22:02:32] [Rank 0] step:6621/10000 train_time:453455ms step_avg:68.49ms +[2025-07-07 22:02:34] [Rank 0] step:6641/10000 train_time:454823ms step_avg:68.49ms +[2025-07-07 22:02:34] [Rank 0] step:6641/10000 train_time:454823ms step_avg:68.49ms +[2025-07-07 22:02:35] [Rank 0] step:6661/10000 train_time:456240ms step_avg:68.49ms +[2025-07-07 22:02:35] [Rank 0] step:6661/10000 train_time:456240ms step_avg:68.49ms +[2025-07-07 22:02:36] [Rank 0] step:6681/10000 train_time:457590ms step_avg:68.49ms +[2025-07-07 22:02:36] [Rank 0] step:6681/10000 train_time:457590ms step_avg:68.49ms +[2025-07-07 22:02:38] [Rank 0] step:6701/10000 train_time:458957ms step_avg:68.49ms +[2025-07-07 22:02:38] [Rank 0] step:6701/10000 train_time:458957ms step_avg:68.49ms +[2025-07-07 22:02:39] [Rank 0] step:6721/10000 train_time:460326ms step_avg:68.49ms +[2025-07-07 22:02:39] [Rank 0] step:6721/10000 train_time:460326ms step_avg:68.49ms +[2025-07-07 22:02:40] [Rank 0] step:6741/10000 train_time:461733ms step_avg:68.50ms +[2025-07-07 22:02:40] [Rank 0] step:6741/10000 train_time:461733ms step_avg:68.50ms +[2025-07-07 22:02:42] [Rank 0] step:6761/10000 train_time:463102ms step_avg:68.50ms +[2025-07-07 22:02:42] [Rank 0] step:6761/10000 train_time:463102ms step_avg:68.50ms +[2025-07-07 22:02:43] [Rank 0] step:6781/10000 train_time:464472ms step_avg:68.50ms +[2025-07-07 22:02:43] [Rank 0] step:6781/10000 train_time:464472ms step_avg:68.50ms +[2025-07-07 22:02:45] [Rank 0] step:6801/10000 train_time:465841ms step_avg:68.50ms +[2025-07-07 22:02:45] [Rank 0] step:6801/10000 train_time:465841ms step_avg:68.50ms +[2025-07-07 22:02:46] [Rank 0] step:6821/10000 train_time:467212ms step_avg:68.50ms +[2025-07-07 22:02:46] [Rank 0] step:6821/10000 train_time:467212ms step_avg:68.50ms +[2025-07-07 22:02:47] [Rank 0] step:6841/10000 train_time:468632ms step_avg:68.50ms +[2025-07-07 22:02:47] [Rank 0] step:6841/10000 train_time:468632ms step_avg:68.50ms +[2025-07-07 22:02:49] [Rank 0] step:6861/10000 train_time:470009ms step_avg:68.50ms +[2025-07-07 22:02:49] [Rank 0] step:6861/10000 train_time:470009ms step_avg:68.50ms +[2025-07-07 22:02:50] [Rank 0] step:6881/10000 train_time:471382ms step_avg:68.50ms +[2025-07-07 22:02:50] [Rank 0] step:6881/10000 train_time:471382ms step_avg:68.50ms +[2025-07-07 22:02:51] [Rank 0] step:6901/10000 train_time:472755ms step_avg:68.51ms +[2025-07-07 22:02:51] [Rank 0] step:6901/10000 train_time:472755ms step_avg:68.51ms +[2025-07-07 22:02:53] [Rank 0] step:6921/10000 train_time:474130ms step_avg:68.51ms +[2025-07-07 22:02:53] [Rank 0] step:6921/10000 train_time:474130ms step_avg:68.51ms +[2025-07-07 22:02:54] [Rank 0] step:6941/10000 train_time:475505ms step_avg:68.51ms +[2025-07-07 22:02:54] [Rank 0] step:6941/10000 train_time:475505ms step_avg:68.51ms +[2025-07-07 22:02:56] [Rank 0] step:6961/10000 train_time:476879ms step_avg:68.51ms +[2025-07-07 22:02:56] [Rank 0] step:6961/10000 train_time:476879ms step_avg:68.51ms +[2025-07-07 22:02:57] [Rank 0] step:6981/10000 train_time:478253ms step_avg:68.51ms +[2025-07-07 22:02:57] [Rank 0] step:6981/10000 train_time:478253ms step_avg:68.51ms +[2025-07-07 22:02:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:02:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:02:59] [Rank 0] PRINT: step:7000/10000 train_loss:1.5262 val_loss:1.5168 train_time:480253ms step_avg:68.61ms +[2025-07-07 22:02:59] [Rank 0] PRINT: step:7000/10000 train_loss:1.5262 val_loss:1.5168 train_time:480253ms step_avg:68.61ms +[2025-07-07 22:02:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:02:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:02:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:02:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:02:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:02:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:08:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:08:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:08:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:08:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:08:25] [Rank 0] Total Loss: 4.7299 +[2025-07-07 22:08:25] [Rank 0] Total Loss: 4.7299 +[2025-07-07 22:08:25] [Rank 0] Total FTA: 0.0861 +[2025-07-07 22:08:25] [Rank 0] Total FTA: 0.0861 +[2025-07-07 22:08:25] [Rank 0] Group 0 Loss: 4.8503 +[2025-07-07 22:08:25] [Rank 0] Group 0 Loss: 4.8503 +[2025-07-07 22:08:25] [Rank 0] Group 1 Loss: 4.7812 +[2025-07-07 22:08:25] [Rank 0] Group 1 Loss: 4.7812 +[2025-07-07 22:08:25] [Rank 0] Group 2 Loss: 4.6797 +[2025-07-07 22:08:25] [Rank 0] Group 2 Loss: 4.6797 +[2025-07-07 22:08:25] [Rank 0] Group 3 Loss: 4.8963 +[2025-07-07 22:08:25] [Rank 0] Group 3 Loss: 4.8963 +[2025-07-07 22:08:25] [Rank 0] Group 4 Loss: 4.6955 +[2025-07-07 22:08:25] [Rank 0] Group 4 Loss: 4.6955 +[2025-07-07 22:08:25] [Rank 0] Group 5 Loss: 4.5779 +[2025-07-07 22:08:25] [Rank 0] Group 5 Loss: 4.5779 +[2025-07-07 22:08:25] [Rank 0] Group 6 Loss: 4.5958 +[2025-07-07 22:08:25] [Rank 0] Group 6 Loss: 4.5958 +[2025-07-07 22:08:25] [Rank 0] Group 7 Loss: 4.7280 +[2025-07-07 22:08:25] [Rank 0] Group 7 Loss: 4.7280 +[2025-07-07 22:08:25] [Rank 0] Group 8 Loss: 4.7261 +[2025-07-07 22:08:25] [Rank 0] Group 8 Loss: 4.7261 +[2025-07-07 22:08:25] [Rank 0] Group 9 Loss: 4.6816 +[2025-07-07 22:08:25] [Rank 0] Group 9 Loss: 4.6816 +[2025-07-07 22:08:25] [Rank 0] Group 10 Loss: 4.7440 +[2025-07-07 22:08:25] [Rank 0] Group 10 Loss: 4.7440 +[2025-07-07 22:08:25] [Rank 0] Group 11 Loss: 4.7042 +[2025-07-07 22:08:25] [Rank 0] Group 11 Loss: 4.7042 +[2025-07-07 22:08:25] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 22:08:25] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 22:08:25] [Rank 0] Group 1 FTA: 0.1458 +[2025-07-07 22:08:25] [Rank 0] Group 1 FTA: 0.1458 +[2025-07-07 22:08:25] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-07 22:08:25] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-07 22:08:25] [Rank 0] Group 3 FTA: 0.0964 +[2025-07-07 22:08:25] [Rank 0] Group 3 FTA: 0.0964 +[2025-07-07 22:08:25] [Rank 0] Group 4 FTA: 0.0938 +[2025-07-07 22:08:25] [Rank 0] Group 4 FTA: 0.0938 +[2025-07-07 22:08:25] [Rank 0] Group 5 FTA: 0.0938 +[2025-07-07 22:08:25] [Rank 0] Group 5 FTA: 0.0938 +[2025-07-07 22:08:25] [Rank 0] Group 6 FTA: 0.1068 +[2025-07-07 22:08:25] [Rank 0] Group 6 FTA: 0.1068 +[2025-07-07 22:08:25] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 22:08:25] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 22:08:25] [Rank 0] Group 8 FTA: 0.0911 +[2025-07-07 22:08:25] [Rank 0] Group 8 FTA: 0.0911 +[2025-07-07 22:08:25] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 22:08:25] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 22:08:25] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 22:08:25] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 22:08:25] [Rank 0] Group 11 FTA: 0.1084 +[2025-07-07 22:08:25] [Rank 0] Group 11 FTA: 0.1084 +[2025-07-07 22:08:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 22:08:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 22:08:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 22:08:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 22:08:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 22:08:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 22:08:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 22:08:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 22:08:27] [Rank 0] step:7001/10000 train_time:480261ms step_avg:68.60ms +[2025-07-07 22:08:27] [Rank 0] step:7001/10000 train_time:480261ms step_avg:68.60ms +[2025-07-07 22:08:28] [Rank 0] step:7021/10000 train_time:481067ms step_avg:68.52ms +[2025-07-07 22:08:28] [Rank 0] step:7021/10000 train_time:481067ms step_avg:68.52ms +[2025-07-07 22:08:29] [Rank 0] step:7041/10000 train_time:482412ms step_avg:68.51ms +[2025-07-07 22:08:29] [Rank 0] step:7041/10000 train_time:482412ms step_avg:68.51ms +[2025-07-07 22:08:31] [Rank 0] step:7061/10000 train_time:483777ms step_avg:68.51ms +[2025-07-07 22:08:31] [Rank 0] step:7061/10000 train_time:483777ms step_avg:68.51ms +[2025-07-07 22:08:32] [Rank 0] step:7081/10000 train_time:485141ms step_avg:68.51ms +[2025-07-07 22:08:32] [Rank 0] step:7081/10000 train_time:485141ms step_avg:68.51ms +[2025-07-07 22:08:33] [Rank 0] step:7101/10000 train_time:486507ms step_avg:68.51ms +[2025-07-07 22:08:33] [Rank 0] step:7101/10000 train_time:486507ms step_avg:68.51ms +[2025-07-07 22:08:35] [Rank 0] step:7121/10000 train_time:487873ms step_avg:68.51ms +[2025-07-07 22:08:35] [Rank 0] step:7121/10000 train_time:487873ms step_avg:68.51ms +[2025-07-07 22:08:36] [Rank 0] step:7141/10000 train_time:489241ms step_avg:68.51ms +[2025-07-07 22:08:36] [Rank 0] step:7141/10000 train_time:489241ms step_avg:68.51ms +[2025-07-07 22:08:38] [Rank 0] step:7161/10000 train_time:490607ms step_avg:68.51ms +[2025-07-07 22:08:38] [Rank 0] step:7161/10000 train_time:490607ms step_avg:68.51ms +[2025-07-07 22:08:39] [Rank 0] step:7181/10000 train_time:491974ms step_avg:68.51ms +[2025-07-07 22:08:39] [Rank 0] step:7181/10000 train_time:491974ms step_avg:68.51ms +[2025-07-07 22:08:40] [Rank 0] step:7201/10000 train_time:493345ms step_avg:68.51ms +[2025-07-07 22:08:40] [Rank 0] step:7201/10000 train_time:493345ms step_avg:68.51ms +[2025-07-07 22:08:42] [Rank 0] step:7221/10000 train_time:494766ms step_avg:68.52ms +[2025-07-07 22:08:42] [Rank 0] step:7221/10000 train_time:494766ms step_avg:68.52ms +[2025-07-07 22:08:43] [Rank 0] step:7241/10000 train_time:496134ms step_avg:68.52ms +[2025-07-07 22:08:43] [Rank 0] step:7241/10000 train_time:496134ms step_avg:68.52ms +[2025-07-07 22:08:44] [Rank 0] step:7261/10000 train_time:497502ms step_avg:68.52ms +[2025-07-07 22:08:44] [Rank 0] step:7261/10000 train_time:497502ms step_avg:68.52ms +[2025-07-07 22:08:46] [Rank 0] step:7281/10000 train_time:498873ms step_avg:68.52ms +[2025-07-07 22:08:46] [Rank 0] step:7281/10000 train_time:498873ms step_avg:68.52ms +[2025-07-07 22:08:47] [Rank 0] step:7301/10000 train_time:500245ms step_avg:68.52ms +[2025-07-07 22:08:47] [Rank 0] step:7301/10000 train_time:500245ms step_avg:68.52ms +[2025-07-07 22:08:49] [Rank 0] step:7321/10000 train_time:501618ms step_avg:68.52ms +[2025-07-07 22:08:49] [Rank 0] step:7321/10000 train_time:501618ms step_avg:68.52ms +[2025-07-07 22:08:50] [Rank 0] step:7341/10000 train_time:502990ms step_avg:68.52ms +[2025-07-07 22:08:50] [Rank 0] step:7341/10000 train_time:502990ms step_avg:68.52ms +[2025-07-07 22:08:51] [Rank 0] step:7361/10000 train_time:504362ms step_avg:68.52ms +[2025-07-07 22:08:51] [Rank 0] step:7361/10000 train_time:504362ms step_avg:68.52ms +[2025-07-07 22:08:53] [Rank 0] step:7381/10000 train_time:505984ms step_avg:68.55ms +[2025-07-07 22:08:53] [Rank 0] step:7381/10000 train_time:505984ms step_avg:68.55ms +[2025-07-07 22:08:54] [Rank 0] step:7401/10000 train_time:507151ms step_avg:68.52ms +[2025-07-07 22:08:54] [Rank 0] step:7401/10000 train_time:507151ms step_avg:68.52ms +[2025-07-07 22:08:55] [Rank 0] step:7421/10000 train_time:508523ms step_avg:68.52ms +[2025-07-07 22:08:55] [Rank 0] step:7421/10000 train_time:508523ms step_avg:68.52ms +[2025-07-07 22:08:57] [Rank 0] step:7441/10000 train_time:509894ms step_avg:68.52ms +[2025-07-07 22:08:57] [Rank 0] step:7441/10000 train_time:509894ms step_avg:68.52ms +[2025-07-07 22:08:58] [Rank 0] step:7461/10000 train_time:511267ms step_avg:68.53ms +[2025-07-07 22:08:58] [Rank 0] step:7461/10000 train_time:511267ms step_avg:68.53ms +[2025-07-07 22:09:00] [Rank 0] step:7481/10000 train_time:512640ms step_avg:68.53ms +[2025-07-07 22:09:00] [Rank 0] step:7481/10000 train_time:512640ms step_avg:68.53ms +[2025-07-07 22:09:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:09:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:09:02] [Rank 0] PRINT: step:7500/10000 train_loss:1.4968 val_loss:1.4991 train_time:514639ms step_avg:68.62ms +[2025-07-07 22:09:02] [Rank 0] PRINT: step:7500/10000 train_loss:1.4968 val_loss:1.4991 train_time:514639ms step_avg:68.62ms +[2025-07-07 22:09:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:09:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:09:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:09:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:09:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:09:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:14:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:14:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:14:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:14:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:14:30] [Rank 0] Total Loss: 4.6920 +[2025-07-07 22:14:30] [Rank 0] Total Loss: 4.6920 +[2025-07-07 22:14:30] [Rank 0] Total FTA: 0.0811 +[2025-07-07 22:14:30] [Rank 0] Total FTA: 0.0811 +[2025-07-07 22:14:30] [Rank 0] Group 0 Loss: 4.8520 +[2025-07-07 22:14:30] [Rank 0] Group 0 Loss: 4.8520 +[2025-07-07 22:14:30] [Rank 0] Group 1 Loss: 4.4912 +[2025-07-07 22:14:30] [Rank 0] Group 1 Loss: 4.4912 +[2025-07-07 22:14:30] [Rank 0] Group 2 Loss: 4.7529 +[2025-07-07 22:14:30] [Rank 0] Group 2 Loss: 4.7529 +[2025-07-07 22:14:30] [Rank 0] Group 3 Loss: 4.8683 +[2025-07-07 22:14:30] [Rank 0] Group 3 Loss: 4.8683 +[2025-07-07 22:14:30] [Rank 0] Group 4 Loss: 4.6130 +[2025-07-07 22:14:30] [Rank 0] Group 4 Loss: 4.6130 +[2025-07-07 22:14:30] [Rank 0] Group 5 Loss: 4.5432 +[2025-07-07 22:14:30] [Rank 0] Group 5 Loss: 4.5432 +[2025-07-07 22:14:30] [Rank 0] Group 6 Loss: 4.6001 +[2025-07-07 22:14:30] [Rank 0] Group 6 Loss: 4.6001 +[2025-07-07 22:14:30] [Rank 0] Group 7 Loss: 4.6541 +[2025-07-07 22:14:30] [Rank 0] Group 7 Loss: 4.6541 +[2025-07-07 22:14:30] [Rank 0] Group 8 Loss: 4.6749 +[2025-07-07 22:14:30] [Rank 0] Group 8 Loss: 4.6749 +[2025-07-07 22:14:30] [Rank 0] Group 9 Loss: 4.6749 +[2025-07-07 22:14:30] [Rank 0] Group 9 Loss: 4.6749 +[2025-07-07 22:14:30] [Rank 0] Group 10 Loss: 4.7191 +[2025-07-07 22:14:30] [Rank 0] Group 10 Loss: 4.7191 +[2025-07-07 22:14:30] [Rank 0] Group 11 Loss: 4.6897 +[2025-07-07 22:14:30] [Rank 0] Group 11 Loss: 4.6897 +[2025-07-07 22:14:30] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 22:14:30] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 22:14:30] [Rank 0] Group 1 FTA: 0.1172 +[2025-07-07 22:14:30] [Rank 0] Group 1 FTA: 0.1172 +[2025-07-07 22:14:30] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 22:14:30] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 22:14:30] [Rank 0] Group 3 FTA: 0.0990 +[2025-07-07 22:14:30] [Rank 0] Group 3 FTA: 0.0990 +[2025-07-07 22:14:30] [Rank 0] Group 4 FTA: 0.0729 +[2025-07-07 22:14:30] [Rank 0] Group 4 FTA: 0.0729 +[2025-07-07 22:14:30] [Rank 0] Group 5 FTA: 0.1042 +[2025-07-07 22:14:30] [Rank 0] Group 5 FTA: 0.1042 +[2025-07-07 22:14:30] [Rank 0] Group 6 FTA: 0.1146 +[2025-07-07 22:14:30] [Rank 0] Group 6 FTA: 0.1146 +[2025-07-07 22:14:30] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 22:14:30] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 22:14:30] [Rank 0] Group 8 FTA: 0.1250 +[2025-07-07 22:14:30] [Rank 0] Group 8 FTA: 0.1250 +[2025-07-07 22:14:30] [Rank 0] Group 9 FTA: 0.1367 +[2025-07-07 22:14:30] [Rank 0] Group 9 FTA: 0.1367 +[2025-07-07 22:14:30] [Rank 0] Group 10 FTA: 0.0801 +[2025-07-07 22:14:30] [Rank 0] Group 10 FTA: 0.0801 +[2025-07-07 22:14:30] [Rank 0] Group 11 FTA: 0.0986 +[2025-07-07 22:14:30] [Rank 0] Group 11 FTA: 0.0986 +[2025-07-07 22:14:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 22:14:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 22:14:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 22:14:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 22:14:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 22:14:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 22:14:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 22:14:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 22:14:31] [Rank 0] step:7501/10000 train_time:514647ms step_avg:68.61ms +[2025-07-07 22:14:31] [Rank 0] step:7501/10000 train_time:514647ms step_avg:68.61ms +[2025-07-07 22:14:33] [Rank 0] step:7521/10000 train_time:515411ms step_avg:68.53ms +[2025-07-07 22:14:33] [Rank 0] step:7521/10000 train_time:515411ms step_avg:68.53ms +[2025-07-07 22:14:34] [Rank 0] step:7541/10000 train_time:516780ms step_avg:68.53ms +[2025-07-07 22:14:34] [Rank 0] step:7541/10000 train_time:516780ms step_avg:68.53ms +[2025-07-07 22:14:36] [Rank 0] step:7561/10000 train_time:518192ms step_avg:68.53ms +[2025-07-07 22:14:36] [Rank 0] step:7561/10000 train_time:518192ms step_avg:68.53ms +[2025-07-07 22:14:37] [Rank 0] step:7581/10000 train_time:519558ms step_avg:68.53ms +[2025-07-07 22:14:37] [Rank 0] step:7581/10000 train_time:519558ms step_avg:68.53ms +[2025-07-07 22:14:38] [Rank 0] step:7601/10000 train_time:520923ms step_avg:68.53ms +[2025-07-07 22:14:38] [Rank 0] step:7601/10000 train_time:520923ms step_avg:68.53ms +[2025-07-07 22:14:40] [Rank 0] step:7621/10000 train_time:522289ms step_avg:68.53ms +[2025-07-07 22:14:40] [Rank 0] step:7621/10000 train_time:522289ms step_avg:68.53ms +[2025-07-07 22:14:41] [Rank 0] step:7641/10000 train_time:523654ms step_avg:68.53ms +[2025-07-07 22:14:41] [Rank 0] step:7641/10000 train_time:523654ms step_avg:68.53ms +[2025-07-07 22:14:42] [Rank 0] step:7661/10000 train_time:525019ms step_avg:68.53ms +[2025-07-07 22:14:42] [Rank 0] step:7661/10000 train_time:525019ms step_avg:68.53ms +[2025-07-07 22:14:44] [Rank 0] step:7681/10000 train_time:526386ms step_avg:68.53ms +[2025-07-07 22:14:44] [Rank 0] step:7681/10000 train_time:526386ms step_avg:68.53ms +[2025-07-07 22:14:45] [Rank 0] step:7701/10000 train_time:527753ms step_avg:68.53ms +[2025-07-07 22:14:45] [Rank 0] step:7701/10000 train_time:527753ms step_avg:68.53ms +[2025-07-07 22:14:46] [Rank 0] step:7721/10000 train_time:529121ms step_avg:68.53ms +[2025-07-07 22:14:46] [Rank 0] step:7721/10000 train_time:529121ms step_avg:68.53ms +[2025-07-07 22:14:48] [Rank 0] step:7741/10000 train_time:530489ms step_avg:68.53ms +[2025-07-07 22:14:48] [Rank 0] step:7741/10000 train_time:530489ms step_avg:68.53ms +[2025-07-07 22:14:49] [Rank 0] step:7761/10000 train_time:531913ms step_avg:68.54ms +[2025-07-07 22:14:49] [Rank 0] step:7761/10000 train_time:531913ms step_avg:68.54ms +[2025-07-07 22:14:51] [Rank 0] step:7781/10000 train_time:533283ms step_avg:68.54ms +[2025-07-07 22:14:51] [Rank 0] step:7781/10000 train_time:533283ms step_avg:68.54ms +[2025-07-07 22:14:52] [Rank 0] step:7801/10000 train_time:534652ms step_avg:68.54ms +[2025-07-07 22:14:52] [Rank 0] step:7801/10000 train_time:534652ms step_avg:68.54ms +[2025-07-07 22:14:53] [Rank 0] step:7821/10000 train_time:536023ms step_avg:68.54ms +[2025-07-07 22:14:53] [Rank 0] step:7821/10000 train_time:536023ms step_avg:68.54ms +[2025-07-07 22:14:55] [Rank 0] step:7841/10000 train_time:537395ms step_avg:68.54ms +[2025-07-07 22:14:55] [Rank 0] step:7841/10000 train_time:537395ms step_avg:68.54ms +[2025-07-07 22:14:56] [Rank 0] step:7861/10000 train_time:538766ms step_avg:68.54ms +[2025-07-07 22:14:56] [Rank 0] step:7861/10000 train_time:538766ms step_avg:68.54ms +[2025-07-07 22:14:57] [Rank 0] step:7881/10000 train_time:540139ms step_avg:68.54ms +[2025-07-07 22:14:57] [Rank 0] step:7881/10000 train_time:540139ms step_avg:68.54ms +[2025-07-07 22:14:59] [Rank 0] step:7901/10000 train_time:541511ms step_avg:68.54ms +[2025-07-07 22:14:59] [Rank 0] step:7901/10000 train_time:541511ms step_avg:68.54ms +[2025-07-07 22:15:00] [Rank 0] step:7921/10000 train_time:542885ms step_avg:68.54ms +[2025-07-07 22:15:00] [Rank 0] step:7921/10000 train_time:542885ms step_avg:68.54ms +[2025-07-07 22:15:02] [Rank 0] step:7941/10000 train_time:544300ms step_avg:68.54ms +[2025-07-07 22:15:02] [Rank 0] step:7941/10000 train_time:544300ms step_avg:68.54ms +[2025-07-07 22:15:03] [Rank 0] step:7961/10000 train_time:545674ms step_avg:68.54ms +[2025-07-07 22:15:03] [Rank 0] step:7961/10000 train_time:545674ms step_avg:68.54ms +[2025-07-07 22:15:04] [Rank 0] step:7981/10000 train_time:547048ms step_avg:68.54ms +[2025-07-07 22:15:04] [Rank 0] step:7981/10000 train_time:547048ms step_avg:68.54ms +[2025-07-07 22:15:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:15:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:15:07] [Rank 0] PRINT: step:8000/10000 train_loss:1.4709 val_loss:1.4737 train_time:549047ms step_avg:68.63ms +[2025-07-07 22:15:07] [Rank 0] PRINT: step:8000/10000 train_loss:1.4709 val_loss:1.4737 train_time:549047ms step_avg:68.63ms +[2025-07-07 22:15:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:15:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:15:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:15:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:15:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:15:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:20:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:20:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:20:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:20:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:20:35] [Rank 0] Total Loss: 4.7213 +[2025-07-07 22:20:35] [Rank 0] Total Loss: 4.7213 +[2025-07-07 22:20:35] [Rank 0] Total FTA: 0.0882 +[2025-07-07 22:20:35] [Rank 0] Total FTA: 0.0882 +[2025-07-07 22:20:35] [Rank 0] Group 0 Loss: 4.8731 +[2025-07-07 22:20:35] [Rank 0] Group 0 Loss: 4.8731 +[2025-07-07 22:20:35] [Rank 0] Group 1 Loss: 4.5636 +[2025-07-07 22:20:35] [Rank 0] Group 1 Loss: 4.5636 +[2025-07-07 22:20:35] [Rank 0] Group 2 Loss: 4.6661 +[2025-07-07 22:20:35] [Rank 0] Group 2 Loss: 4.6661 +[2025-07-07 22:20:35] [Rank 0] Group 3 Loss: 4.9644 +[2025-07-07 22:20:35] [Rank 0] Group 3 Loss: 4.9644 +[2025-07-07 22:20:35] [Rank 0] Group 4 Loss: 4.7181 +[2025-07-07 22:20:35] [Rank 0] Group 4 Loss: 4.7181 +[2025-07-07 22:20:35] [Rank 0] Group 5 Loss: 4.5612 +[2025-07-07 22:20:35] [Rank 0] Group 5 Loss: 4.5612 +[2025-07-07 22:20:36] [Rank 0] Group 6 Loss: 4.5857 +[2025-07-07 22:20:36] [Rank 0] Group 6 Loss: 4.5857 +[2025-07-07 22:20:36] [Rank 0] Group 7 Loss: 4.7435 +[2025-07-07 22:20:36] [Rank 0] Group 7 Loss: 4.7435 +[2025-07-07 22:20:36] [Rank 0] Group 8 Loss: 4.7271 +[2025-07-07 22:20:36] [Rank 0] Group 8 Loss: 4.7271 +[2025-07-07 22:20:36] [Rank 0] Group 9 Loss: 4.6848 +[2025-07-07 22:20:36] [Rank 0] Group 9 Loss: 4.6848 +[2025-07-07 22:20:36] [Rank 0] Group 10 Loss: 4.7199 +[2025-07-07 22:20:36] [Rank 0] Group 10 Loss: 4.7199 +[2025-07-07 22:20:36] [Rank 0] Group 11 Loss: 4.7075 +[2025-07-07 22:20:36] [Rank 0] Group 11 Loss: 4.7075 +[2025-07-07 22:20:36] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 22:20:36] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 22:20:36] [Rank 0] Group 1 FTA: 0.1797 +[2025-07-07 22:20:36] [Rank 0] Group 1 FTA: 0.1797 +[2025-07-07 22:20:36] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 22:20:36] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 22:20:36] [Rank 0] Group 3 FTA: 0.1120 +[2025-07-07 22:20:36] [Rank 0] Group 3 FTA: 0.1120 +[2025-07-07 22:20:36] [Rank 0] Group 4 FTA: 0.0573 +[2025-07-07 22:20:36] [Rank 0] Group 4 FTA: 0.0573 +[2025-07-07 22:20:36] [Rank 0] Group 5 FTA: 0.0885 +[2025-07-07 22:20:36] [Rank 0] Group 5 FTA: 0.0885 +[2025-07-07 22:20:36] [Rank 0] Group 6 FTA: 0.1016 +[2025-07-07 22:20:36] [Rank 0] Group 6 FTA: 0.1016 +[2025-07-07 22:20:36] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-07 22:20:36] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-07 22:20:36] [Rank 0] Group 8 FTA: 0.1510 +[2025-07-07 22:20:36] [Rank 0] Group 8 FTA: 0.1510 +[2025-07-07 22:20:36] [Rank 0] Group 9 FTA: 0.1367 +[2025-07-07 22:20:36] [Rank 0] Group 9 FTA: 0.1367 +[2025-07-07 22:20:36] [Rank 0] Group 10 FTA: 0.1270 +[2025-07-07 22:20:36] [Rank 0] Group 10 FTA: 0.1270 +[2025-07-07 22:20:36] [Rank 0] Group 11 FTA: 0.0938 +[2025-07-07 22:20:36] [Rank 0] Group 11 FTA: 0.0938 +[2025-07-07 22:20:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 22:20:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 22:20:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 22:20:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 22:20:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 22:20:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 22:20:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 22:20:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 22:20:37] [Rank 0] step:8001/10000 train_time:549056ms step_avg:68.62ms +[2025-07-07 22:20:37] [Rank 0] step:8001/10000 train_time:549056ms step_avg:68.62ms +[2025-07-07 22:20:38] [Rank 0] step:8021/10000 train_time:549833ms step_avg:68.55ms +[2025-07-07 22:20:38] [Rank 0] step:8021/10000 train_time:549833ms step_avg:68.55ms +[2025-07-07 22:20:40] [Rank 0] step:8041/10000 train_time:551195ms step_avg:68.55ms +[2025-07-07 22:20:40] [Rank 0] step:8041/10000 train_time:551195ms step_avg:68.55ms +[2025-07-07 22:20:41] [Rank 0] step:8061/10000 train_time:552559ms step_avg:68.55ms +[2025-07-07 22:20:41] [Rank 0] step:8061/10000 train_time:552559ms step_avg:68.55ms +[2025-07-07 22:20:43] [Rank 0] step:8081/10000 train_time:553924ms step_avg:68.55ms +[2025-07-07 22:20:43] [Rank 0] step:8081/10000 train_time:553924ms step_avg:68.55ms +[2025-07-07 22:20:44] [Rank 0] step:8101/10000 train_time:555291ms step_avg:68.55ms +[2025-07-07 22:20:44] [Rank 0] step:8101/10000 train_time:555291ms step_avg:68.55ms +[2025-07-07 22:20:45] [Rank 0] step:8121/10000 train_time:556694ms step_avg:68.55ms +[2025-07-07 22:20:45] [Rank 0] step:8121/10000 train_time:556694ms step_avg:68.55ms +[2025-07-07 22:20:47] [Rank 0] step:8141/10000 train_time:558062ms step_avg:68.55ms +[2025-07-07 22:20:47] [Rank 0] step:8141/10000 train_time:558062ms step_avg:68.55ms +[2025-07-07 22:20:48] [Rank 0] step:8161/10000 train_time:559430ms step_avg:68.55ms +[2025-07-07 22:20:48] [Rank 0] step:8161/10000 train_time:559430ms step_avg:68.55ms +[2025-07-07 22:20:49] [Rank 0] step:8181/10000 train_time:560798ms step_avg:68.55ms +[2025-07-07 22:20:49] [Rank 0] step:8181/10000 train_time:560798ms step_avg:68.55ms +[2025-07-07 22:20:51] [Rank 0] step:8201/10000 train_time:562167ms step_avg:68.55ms +[2025-07-07 22:20:51] [Rank 0] step:8201/10000 train_time:562167ms step_avg:68.55ms +[2025-07-07 22:20:52] [Rank 0] step:8221/10000 train_time:563534ms step_avg:68.55ms +[2025-07-07 22:20:52] [Rank 0] step:8221/10000 train_time:563534ms step_avg:68.55ms +[2025-07-07 22:20:54] [Rank 0] step:8241/10000 train_time:564904ms step_avg:68.55ms +[2025-07-07 22:20:54] [Rank 0] step:8241/10000 train_time:564904ms step_avg:68.55ms +[2025-07-07 22:20:55] [Rank 0] step:8261/10000 train_time:566273ms step_avg:68.55ms +[2025-07-07 22:20:55] [Rank 0] step:8261/10000 train_time:566273ms step_avg:68.55ms +[2025-07-07 22:20:56] [Rank 0] step:8281/10000 train_time:567894ms step_avg:68.58ms +[2025-07-07 22:20:56] [Rank 0] step:8281/10000 train_time:567894ms step_avg:68.58ms +[2025-07-07 22:20:58] [Rank 0] step:8301/10000 train_time:569045ms step_avg:68.55ms +[2025-07-07 22:20:58] [Rank 0] step:8301/10000 train_time:569045ms step_avg:68.55ms +[2025-07-07 22:20:59] [Rank 0] step:8321/10000 train_time:570416ms step_avg:68.55ms +[2025-07-07 22:20:59] [Rank 0] step:8321/10000 train_time:570416ms step_avg:68.55ms +[2025-07-07 22:21:00] [Rank 0] step:8341/10000 train_time:571789ms step_avg:68.55ms +[2025-07-07 22:21:00] [Rank 0] step:8341/10000 train_time:571789ms step_avg:68.55ms +[2025-07-07 22:21:02] [Rank 0] step:8361/10000 train_time:573164ms step_avg:68.55ms +[2025-07-07 22:21:02] [Rank 0] step:8361/10000 train_time:573164ms step_avg:68.55ms +[2025-07-07 22:21:03] [Rank 0] step:8381/10000 train_time:574538ms step_avg:68.55ms +[2025-07-07 22:21:03] [Rank 0] step:8381/10000 train_time:574538ms step_avg:68.55ms +[2025-07-07 22:21:05] [Rank 0] step:8401/10000 train_time:575911ms step_avg:68.55ms +[2025-07-07 22:21:05] [Rank 0] step:8401/10000 train_time:575911ms step_avg:68.55ms +[2025-07-07 22:21:06] [Rank 0] step:8421/10000 train_time:577284ms step_avg:68.55ms +[2025-07-07 22:21:06] [Rank 0] step:8421/10000 train_time:577284ms step_avg:68.55ms +[2025-07-07 22:21:07] [Rank 0] step:8441/10000 train_time:578659ms step_avg:68.55ms +[2025-07-07 22:21:07] [Rank 0] step:8441/10000 train_time:578659ms step_avg:68.55ms +[2025-07-07 22:21:09] [Rank 0] step:8461/10000 train_time:580033ms step_avg:68.55ms +[2025-07-07 22:21:09] [Rank 0] step:8461/10000 train_time:580033ms step_avg:68.55ms +[2025-07-07 22:21:10] [Rank 0] step:8481/10000 train_time:581434ms step_avg:68.56ms +[2025-07-07 22:21:10] [Rank 0] step:8481/10000 train_time:581434ms step_avg:68.56ms +[2025-07-07 22:21:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:21:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:21:12] [Rank 0] PRINT: step:8500/10000 train_loss:1.4466 val_loss:1.4592 train_time:583434ms step_avg:68.64ms +[2025-07-07 22:21:12] [Rank 0] PRINT: step:8500/10000 train_loss:1.4466 val_loss:1.4592 train_time:583434ms step_avg:68.64ms +[2025-07-07 22:21:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:21:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:21:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:21:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:21:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:21:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:26:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:26:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:26:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:26:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:26:42] [Rank 0] Total Loss: 4.8312 +[2025-07-07 22:26:42] [Rank 0] Total Loss: 4.8312 +[2025-07-07 22:26:42] [Rank 0] Total FTA: 0.1044 +[2025-07-07 22:26:42] [Rank 0] Total FTA: 0.1044 +[2025-07-07 22:26:42] [Rank 0] Group 0 Loss: 4.8455 +[2025-07-07 22:26:42] [Rank 0] Group 0 Loss: 4.8455 +[2025-07-07 22:26:42] [Rank 0] Group 1 Loss: 4.8050 +[2025-07-07 22:26:42] [Rank 0] Group 1 Loss: 4.8050 +[2025-07-07 22:26:42] [Rank 0] Group 2 Loss: 4.8892 +[2025-07-07 22:26:42] [Rank 0] Group 2 Loss: 4.8892 +[2025-07-07 22:26:42] [Rank 0] Group 3 Loss: 5.0563 +[2025-07-07 22:26:42] [Rank 0] Group 3 Loss: 5.0563 +[2025-07-07 22:26:42] [Rank 0] Group 4 Loss: 4.7597 +[2025-07-07 22:26:42] [Rank 0] Group 4 Loss: 4.7597 +[2025-07-07 22:26:42] [Rank 0] Group 5 Loss: 4.7372 +[2025-07-07 22:26:42] [Rank 0] Group 5 Loss: 4.7372 +[2025-07-07 22:26:42] [Rank 0] Group 6 Loss: 4.7074 +[2025-07-07 22:26:42] [Rank 0] Group 6 Loss: 4.7074 +[2025-07-07 22:26:42] [Rank 0] Group 7 Loss: 4.7826 +[2025-07-07 22:26:42] [Rank 0] Group 7 Loss: 4.7826 +[2025-07-07 22:26:42] [Rank 0] Group 8 Loss: 4.8078 +[2025-07-07 22:26:42] [Rank 0] Group 8 Loss: 4.8078 +[2025-07-07 22:26:42] [Rank 0] Group 9 Loss: 4.7995 +[2025-07-07 22:26:42] [Rank 0] Group 9 Loss: 4.7995 +[2025-07-07 22:26:42] [Rank 0] Group 10 Loss: 4.8269 +[2025-07-07 22:26:42] [Rank 0] Group 10 Loss: 4.8269 +[2025-07-07 22:26:42] [Rank 0] Group 11 Loss: 4.8696 +[2025-07-07 22:26:42] [Rank 0] Group 11 Loss: 4.8696 +[2025-07-07 22:26:42] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 22:26:42] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 22:26:42] [Rank 0] Group 1 FTA: 0.3333 +[2025-07-07 22:26:42] [Rank 0] Group 1 FTA: 0.3333 +[2025-07-07 22:26:42] [Rank 0] Group 2 FTA: 0.1693 +[2025-07-07 22:26:42] [Rank 0] Group 2 FTA: 0.1693 +[2025-07-07 22:26:42] [Rank 0] Group 3 FTA: 0.1250 +[2025-07-07 22:26:42] [Rank 0] Group 3 FTA: 0.1250 +[2025-07-07 22:26:42] [Rank 0] Group 4 FTA: 0.0599 +[2025-07-07 22:26:42] [Rank 0] Group 4 FTA: 0.0599 +[2025-07-07 22:26:42] [Rank 0] Group 5 FTA: 0.0859 +[2025-07-07 22:26:42] [Rank 0] Group 5 FTA: 0.0859 +[2025-07-07 22:26:42] [Rank 0] Group 6 FTA: 0.1250 +[2025-07-07 22:26:42] [Rank 0] Group 6 FTA: 0.1250 +[2025-07-07 22:26:42] [Rank 0] Group 7 FTA: 0.0625 +[2025-07-07 22:26:42] [Rank 0] Group 7 FTA: 0.0625 +[2025-07-07 22:26:42] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 22:26:42] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 22:26:42] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 22:26:42] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 22:26:42] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-07 22:26:42] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-07 22:26:42] [Rank 0] Group 11 FTA: 0.1094 +[2025-07-07 22:26:42] [Rank 0] Group 11 FTA: 0.1094 +[2025-07-07 22:26:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 22:26:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 22:26:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 22:26:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 22:26:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 22:26:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 22:26:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 22:26:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 22:26:44] [Rank 0] step:8501/10000 train_time:583442ms step_avg:68.63ms +[2025-07-07 22:26:44] [Rank 0] step:8501/10000 train_time:583442ms step_avg:68.63ms +[2025-07-07 22:26:45] [Rank 0] step:8521/10000 train_time:584207ms step_avg:68.56ms +[2025-07-07 22:26:45] [Rank 0] step:8521/10000 train_time:584207ms step_avg:68.56ms +[2025-07-07 22:26:46] [Rank 0] step:8541/10000 train_time:585570ms step_avg:68.56ms +[2025-07-07 22:26:46] [Rank 0] step:8541/10000 train_time:585570ms step_avg:68.56ms +[2025-07-07 22:26:48] [Rank 0] step:8561/10000 train_time:586935ms step_avg:68.56ms +[2025-07-07 22:26:48] [Rank 0] step:8561/10000 train_time:586935ms step_avg:68.56ms +[2025-07-07 22:26:49] [Rank 0] step:8581/10000 train_time:588298ms step_avg:68.56ms +[2025-07-07 22:26:49] [Rank 0] step:8581/10000 train_time:588298ms step_avg:68.56ms +[2025-07-07 22:26:50] [Rank 0] step:8601/10000 train_time:589663ms step_avg:68.56ms +[2025-07-07 22:26:50] [Rank 0] step:8601/10000 train_time:589663ms step_avg:68.56ms +[2025-07-07 22:26:52] [Rank 0] step:8621/10000 train_time:591029ms step_avg:68.56ms +[2025-07-07 22:26:52] [Rank 0] step:8621/10000 train_time:591029ms step_avg:68.56ms +[2025-07-07 22:26:53] [Rank 0] step:8641/10000 train_time:592646ms step_avg:68.59ms +[2025-07-07 22:26:53] [Rank 0] step:8641/10000 train_time:592646ms step_avg:68.59ms +[2025-07-07 22:26:55] [Rank 0] step:8661/10000 train_time:593795ms step_avg:68.56ms +[2025-07-07 22:26:55] [Rank 0] step:8661/10000 train_time:593795ms step_avg:68.56ms +[2025-07-07 22:26:56] [Rank 0] step:8681/10000 train_time:595164ms step_avg:68.56ms +[2025-07-07 22:26:56] [Rank 0] step:8681/10000 train_time:595164ms step_avg:68.56ms +[2025-07-07 22:26:57] [Rank 0] step:8701/10000 train_time:596531ms step_avg:68.56ms +[2025-07-07 22:26:57] [Rank 0] step:8701/10000 train_time:596531ms step_avg:68.56ms +[2025-07-07 22:26:59] [Rank 0] step:8721/10000 train_time:597901ms step_avg:68.56ms +[2025-07-07 22:26:59] [Rank 0] step:8721/10000 train_time:597901ms step_avg:68.56ms +[2025-07-07 22:27:00] [Rank 0] step:8741/10000 train_time:599269ms step_avg:68.56ms +[2025-07-07 22:27:00] [Rank 0] step:8741/10000 train_time:599269ms step_avg:68.56ms +[2025-07-07 22:27:01] [Rank 0] step:8761/10000 train_time:600638ms step_avg:68.56ms +[2025-07-07 22:27:01] [Rank 0] step:8761/10000 train_time:600638ms step_avg:68.56ms +[2025-07-07 22:27:03] [Rank 0] step:8781/10000 train_time:602007ms step_avg:68.56ms +[2025-07-07 22:27:03] [Rank 0] step:8781/10000 train_time:602007ms step_avg:68.56ms +[2025-07-07 22:27:04] [Rank 0] step:8801/10000 train_time:603376ms step_avg:68.56ms +[2025-07-07 22:27:04] [Rank 0] step:8801/10000 train_time:603376ms step_avg:68.56ms +[2025-07-07 22:27:06] [Rank 0] step:8821/10000 train_time:604745ms step_avg:68.56ms +[2025-07-07 22:27:06] [Rank 0] step:8821/10000 train_time:604745ms step_avg:68.56ms +[2025-07-07 22:27:07] [Rank 0] step:8841/10000 train_time:606147ms step_avg:68.56ms +[2025-07-07 22:27:07] [Rank 0] step:8841/10000 train_time:606147ms step_avg:68.56ms +[2025-07-07 22:27:08] [Rank 0] step:8861/10000 train_time:607519ms step_avg:68.56ms +[2025-07-07 22:27:08] [Rank 0] step:8861/10000 train_time:607519ms step_avg:68.56ms +[2025-07-07 22:27:10] [Rank 0] step:8881/10000 train_time:608889ms step_avg:68.56ms +[2025-07-07 22:27:10] [Rank 0] step:8881/10000 train_time:608889ms step_avg:68.56ms +[2025-07-07 22:27:11] [Rank 0] step:8901/10000 train_time:610262ms step_avg:68.56ms +[2025-07-07 22:27:11] [Rank 0] step:8901/10000 train_time:610262ms step_avg:68.56ms +[2025-07-07 22:27:12] [Rank 0] step:8921/10000 train_time:611635ms step_avg:68.56ms +[2025-07-07 22:27:12] [Rank 0] step:8921/10000 train_time:611635ms step_avg:68.56ms +[2025-07-07 22:27:14] [Rank 0] step:8941/10000 train_time:613007ms step_avg:68.56ms +[2025-07-07 22:27:14] [Rank 0] step:8941/10000 train_time:613007ms step_avg:68.56ms +[2025-07-07 22:27:15] [Rank 0] step:8961/10000 train_time:614380ms step_avg:68.56ms +[2025-07-07 22:27:15] [Rank 0] step:8961/10000 train_time:614380ms step_avg:68.56ms +[2025-07-07 22:27:17] [Rank 0] step:8981/10000 train_time:615754ms step_avg:68.56ms +[2025-07-07 22:27:17] [Rank 0] step:8981/10000 train_time:615754ms step_avg:68.56ms +[2025-07-07 22:27:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:27:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:27:19] [Rank 0] PRINT: step:9000/10000 train_loss:1.4251 val_loss:1.4350 train_time:617753ms step_avg:68.64ms +[2025-07-07 22:27:19] [Rank 0] PRINT: step:9000/10000 train_loss:1.4251 val_loss:1.4350 train_time:617753ms step_avg:68.64ms +[2025-07-07 22:27:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:27:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:27:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:27:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:27:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:27:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:32:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:32:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:32:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:32:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:32:49] [Rank 0] Total Loss: 4.7666 +[2025-07-07 22:32:49] [Rank 0] Total Loss: 4.7666 +[2025-07-07 22:32:49] [Rank 0] Total FTA: 0.1017 +[2025-07-07 22:32:49] [Rank 0] Total FTA: 0.1017 +[2025-07-07 22:32:49] [Rank 0] Group 0 Loss: 4.8640 +[2025-07-07 22:32:49] [Rank 0] Group 0 Loss: 4.8640 +[2025-07-07 22:32:49] [Rank 0] Group 1 Loss: 4.5042 +[2025-07-07 22:32:49] [Rank 0] Group 1 Loss: 4.5042 +[2025-07-07 22:32:49] [Rank 0] Group 2 Loss: 4.6992 +[2025-07-07 22:32:49] [Rank 0] Group 2 Loss: 4.6992 +[2025-07-07 22:32:49] [Rank 0] Group 3 Loss: 5.1506 +[2025-07-07 22:32:49] [Rank 0] Group 3 Loss: 5.1506 +[2025-07-07 22:32:49] [Rank 0] Group 4 Loss: 4.7401 +[2025-07-07 22:32:49] [Rank 0] Group 4 Loss: 4.7401 +[2025-07-07 22:32:49] [Rank 0] Group 5 Loss: 4.6377 +[2025-07-07 22:32:49] [Rank 0] Group 5 Loss: 4.6377 +[2025-07-07 22:32:49] [Rank 0] Group 6 Loss: 4.6322 +[2025-07-07 22:32:49] [Rank 0] Group 6 Loss: 4.6322 +[2025-07-07 22:32:49] [Rank 0] Group 7 Loss: 4.7534 +[2025-07-07 22:32:49] [Rank 0] Group 7 Loss: 4.7534 +[2025-07-07 22:32:49] [Rank 0] Group 8 Loss: 4.8073 +[2025-07-07 22:32:49] [Rank 0] Group 8 Loss: 4.8073 +[2025-07-07 22:32:49] [Rank 0] Group 9 Loss: 4.6850 +[2025-07-07 22:32:49] [Rank 0] Group 9 Loss: 4.6850 +[2025-07-07 22:32:49] [Rank 0] Group 10 Loss: 4.7973 +[2025-07-07 22:32:49] [Rank 0] Group 10 Loss: 4.7973 +[2025-07-07 22:32:49] [Rank 0] Group 11 Loss: 4.7767 +[2025-07-07 22:32:49] [Rank 0] Group 11 Loss: 4.7767 +[2025-07-07 22:32:49] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 22:32:49] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 22:32:49] [Rank 0] Group 1 FTA: 0.3281 +[2025-07-07 22:32:49] [Rank 0] Group 1 FTA: 0.3281 +[2025-07-07 22:32:49] [Rank 0] Group 2 FTA: 0.0781 +[2025-07-07 22:32:49] [Rank 0] Group 2 FTA: 0.0781 +[2025-07-07 22:32:49] [Rank 0] Group 3 FTA: 0.1042 +[2025-07-07 22:32:49] [Rank 0] Group 3 FTA: 0.1042 +[2025-07-07 22:32:49] [Rank 0] Group 4 FTA: 0.1198 +[2025-07-07 22:32:49] [Rank 0] Group 4 FTA: 0.1198 +[2025-07-07 22:32:49] [Rank 0] Group 5 FTA: 0.0990 +[2025-07-07 22:32:49] [Rank 0] Group 5 FTA: 0.0990 +[2025-07-07 22:32:49] [Rank 0] Group 6 FTA: 0.1250 +[2025-07-07 22:32:49] [Rank 0] Group 6 FTA: 0.1250 +[2025-07-07 22:32:49] [Rank 0] Group 7 FTA: 0.0755 +[2025-07-07 22:32:49] [Rank 0] Group 7 FTA: 0.0755 +[2025-07-07 22:32:49] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-07 22:32:49] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-07 22:32:49] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 22:32:49] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 22:32:49] [Rank 0] Group 10 FTA: 0.1172 +[2025-07-07 22:32:49] [Rank 0] Group 10 FTA: 0.1172 +[2025-07-07 22:32:49] [Rank 0] Group 11 FTA: 0.0947 +[2025-07-07 22:32:49] [Rank 0] Group 11 FTA: 0.0947 +[2025-07-07 22:32:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 22:32:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 22:32:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 22:32:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 22:32:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 22:32:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 22:32:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 22:32:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 22:32:51] [Rank 0] step:9001/10000 train_time:618484ms step_avg:68.71ms +[2025-07-07 22:32:51] [Rank 0] step:9001/10000 train_time:618484ms step_avg:68.71ms +[2025-07-07 22:32:53] [Rank 0] step:9021/10000 train_time:619246ms step_avg:68.64ms +[2025-07-07 22:32:53] [Rank 0] step:9021/10000 train_time:619246ms step_avg:68.64ms +[2025-07-07 22:32:54] [Rank 0] step:9041/10000 train_time:620611ms step_avg:68.64ms +[2025-07-07 22:32:54] [Rank 0] step:9041/10000 train_time:620611ms step_avg:68.64ms +[2025-07-07 22:32:55] [Rank 0] step:9061/10000 train_time:621975ms step_avg:68.64ms +[2025-07-07 22:32:55] [Rank 0] step:9061/10000 train_time:621975ms step_avg:68.64ms +[2025-07-07 22:32:57] [Rank 0] step:9081/10000 train_time:623346ms step_avg:68.64ms +[2025-07-07 22:32:57] [Rank 0] step:9081/10000 train_time:623346ms step_avg:68.64ms +[2025-07-07 22:32:58] [Rank 0] step:9101/10000 train_time:624712ms step_avg:68.64ms +[2025-07-07 22:32:58] [Rank 0] step:9101/10000 train_time:624712ms step_avg:68.64ms +[2025-07-07 22:32:59] [Rank 0] step:9121/10000 train_time:626079ms step_avg:68.64ms +[2025-07-07 22:32:59] [Rank 0] step:9121/10000 train_time:626079ms step_avg:68.64ms +[2025-07-07 22:33:01] [Rank 0] step:9141/10000 train_time:627447ms step_avg:68.64ms +[2025-07-07 22:33:01] [Rank 0] step:9141/10000 train_time:627447ms step_avg:68.64ms +[2025-07-07 22:33:02] [Rank 0] step:9161/10000 train_time:628815ms step_avg:68.64ms +[2025-07-07 22:33:02] [Rank 0] step:9161/10000 train_time:628815ms step_avg:68.64ms +[2025-07-07 22:33:04] [Rank 0] step:9181/10000 train_time:630434ms step_avg:68.67ms +[2025-07-07 22:33:04] [Rank 0] step:9181/10000 train_time:630434ms step_avg:68.67ms +[2025-07-07 22:33:05] [Rank 0] step:9201/10000 train_time:631604ms step_avg:68.65ms +[2025-07-07 22:33:05] [Rank 0] step:9201/10000 train_time:631604ms step_avg:68.65ms +[2025-07-07 22:33:06] [Rank 0] step:9221/10000 train_time:632971ms step_avg:68.64ms +[2025-07-07 22:33:06] [Rank 0] step:9221/10000 train_time:632971ms step_avg:68.64ms +[2025-07-07 22:33:08] [Rank 0] step:9241/10000 train_time:634338ms step_avg:68.64ms +[2025-07-07 22:33:08] [Rank 0] step:9241/10000 train_time:634338ms step_avg:68.64ms +[2025-07-07 22:33:09] [Rank 0] step:9261/10000 train_time:635708ms step_avg:68.64ms +[2025-07-07 22:33:09] [Rank 0] step:9261/10000 train_time:635708ms step_avg:68.64ms +[2025-07-07 22:33:10] [Rank 0] step:9281/10000 train_time:637077ms step_avg:68.64ms +[2025-07-07 22:33:10] [Rank 0] step:9281/10000 train_time:637077ms step_avg:68.64ms +[2025-07-07 22:33:12] [Rank 0] step:9301/10000 train_time:638446ms step_avg:68.64ms +[2025-07-07 22:33:12] [Rank 0] step:9301/10000 train_time:638446ms step_avg:68.64ms +[2025-07-07 22:33:13] [Rank 0] step:9321/10000 train_time:639816ms step_avg:68.64ms +[2025-07-07 22:33:13] [Rank 0] step:9321/10000 train_time:639816ms step_avg:68.64ms +[2025-07-07 22:33:15] [Rank 0] step:9341/10000 train_time:641187ms step_avg:68.64ms +[2025-07-07 22:33:15] [Rank 0] step:9341/10000 train_time:641187ms step_avg:68.64ms +[2025-07-07 22:33:16] [Rank 0] step:9361/10000 train_time:642606ms step_avg:68.65ms +[2025-07-07 22:33:16] [Rank 0] step:9361/10000 train_time:642606ms step_avg:68.65ms +[2025-07-07 22:33:17] [Rank 0] step:9381/10000 train_time:643958ms step_avg:68.64ms +[2025-07-07 22:33:17] [Rank 0] step:9381/10000 train_time:643958ms step_avg:68.64ms +[2025-07-07 22:33:19] [Rank 0] step:9401/10000 train_time:645329ms step_avg:68.64ms +[2025-07-07 22:33:19] [Rank 0] step:9401/10000 train_time:645329ms step_avg:68.64ms +[2025-07-07 22:33:20] [Rank 0] step:9421/10000 train_time:646700ms step_avg:68.64ms +[2025-07-07 22:33:20] [Rank 0] step:9421/10000 train_time:646700ms step_avg:68.64ms +[2025-07-07 22:33:21] [Rank 0] step:9441/10000 train_time:648071ms step_avg:68.64ms +[2025-07-07 22:33:21] [Rank 0] step:9441/10000 train_time:648071ms step_avg:68.64ms +[2025-07-07 22:33:23] [Rank 0] step:9461/10000 train_time:649444ms step_avg:68.64ms +[2025-07-07 22:33:23] [Rank 0] step:9461/10000 train_time:649444ms step_avg:68.64ms +[2025-07-07 22:33:24] [Rank 0] step:9481/10000 train_time:650818ms step_avg:68.64ms +[2025-07-07 22:33:24] [Rank 0] step:9481/10000 train_time:650818ms step_avg:68.64ms +[2025-07-07 22:33:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:33:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:33:26] [Rank 0] PRINT: step:9500/10000 train_loss:1.4053 val_loss:1.4154 train_time:652815ms step_avg:68.72ms +[2025-07-07 22:33:26] [Rank 0] PRINT: step:9500/10000 train_loss:1.4053 val_loss:1.4154 train_time:652815ms step_avg:68.72ms +[2025-07-07 22:33:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:33:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:33:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:33:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:33:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:33:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:38:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:38:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:38:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:38:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:38:56] [Rank 0] Total Loss: 4.8387 +[2025-07-07 22:38:56] [Rank 0] Total Loss: 4.8387 +[2025-07-07 22:38:56] [Rank 0] Total FTA: 0.1104 +[2025-07-07 22:38:56] [Rank 0] Total FTA: 0.1104 +[2025-07-07 22:38:56] [Rank 0] Group 0 Loss: 4.8757 +[2025-07-07 22:38:56] [Rank 0] Group 0 Loss: 4.8757 +[2025-07-07 22:38:56] [Rank 0] Group 1 Loss: 4.5062 +[2025-07-07 22:38:56] [Rank 0] Group 1 Loss: 4.5062 +[2025-07-07 22:38:56] [Rank 0] Group 2 Loss: 4.8985 +[2025-07-07 22:38:56] [Rank 0] Group 2 Loss: 4.8985 +[2025-07-07 22:38:56] [Rank 0] Group 3 Loss: 5.1111 +[2025-07-07 22:38:56] [Rank 0] Group 3 Loss: 5.1111 +[2025-07-07 22:38:56] [Rank 0] Group 4 Loss: 4.8073 +[2025-07-07 22:38:56] [Rank 0] Group 4 Loss: 4.8073 +[2025-07-07 22:38:56] [Rank 0] Group 5 Loss: 4.7581 +[2025-07-07 22:38:56] [Rank 0] Group 5 Loss: 4.7581 +[2025-07-07 22:38:56] [Rank 0] Group 6 Loss: 4.7367 +[2025-07-07 22:38:56] [Rank 0] Group 6 Loss: 4.7367 +[2025-07-07 22:38:56] [Rank 0] Group 7 Loss: 4.8609 +[2025-07-07 22:38:56] [Rank 0] Group 7 Loss: 4.8609 +[2025-07-07 22:38:56] [Rank 0] Group 8 Loss: 4.8331 +[2025-07-07 22:38:56] [Rank 0] Group 8 Loss: 4.8331 +[2025-07-07 22:38:56] [Rank 0] Group 9 Loss: 4.8976 +[2025-07-07 22:38:56] [Rank 0] Group 9 Loss: 4.8976 +[2025-07-07 22:38:56] [Rank 0] Group 10 Loss: 4.9145 +[2025-07-07 22:38:56] [Rank 0] Group 10 Loss: 4.9145 +[2025-07-07 22:38:56] [Rank 0] Group 11 Loss: 4.8326 +[2025-07-07 22:38:56] [Rank 0] Group 11 Loss: 4.8326 +[2025-07-07 22:38:56] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 22:38:56] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 22:38:56] [Rank 0] Group 1 FTA: 0.3490 +[2025-07-07 22:38:56] [Rank 0] Group 1 FTA: 0.3490 +[2025-07-07 22:38:56] [Rank 0] Group 2 FTA: 0.0964 +[2025-07-07 22:38:56] [Rank 0] Group 2 FTA: 0.0964 +[2025-07-07 22:38:56] [Rank 0] Group 3 FTA: 0.1172 +[2025-07-07 22:38:56] [Rank 0] Group 3 FTA: 0.1172 +[2025-07-07 22:38:56] [Rank 0] Group 4 FTA: 0.0729 +[2025-07-07 22:38:56] [Rank 0] Group 4 FTA: 0.0729 +[2025-07-07 22:38:56] [Rank 0] Group 5 FTA: 0.0677 +[2025-07-07 22:38:56] [Rank 0] Group 5 FTA: 0.0677 +[2025-07-07 22:38:56] [Rank 0] Group 6 FTA: 0.1016 +[2025-07-07 22:38:56] [Rank 0] Group 6 FTA: 0.1016 +[2025-07-07 22:38:56] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-07 22:38:56] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-07 22:38:56] [Rank 0] Group 8 FTA: 0.1354 +[2025-07-07 22:38:56] [Rank 0] Group 8 FTA: 0.1354 +[2025-07-07 22:38:56] [Rank 0] Group 9 FTA: 0.1289 +[2025-07-07 22:38:56] [Rank 0] Group 9 FTA: 0.1289 +[2025-07-07 22:38:56] [Rank 0] Group 10 FTA: 0.1328 +[2025-07-07 22:38:56] [Rank 0] Group 10 FTA: 0.1328 +[2025-07-07 22:38:56] [Rank 0] Group 11 FTA: 0.1191 +[2025-07-07 22:38:56] [Rank 0] Group 11 FTA: 0.1191 +[2025-07-07 22:38:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 22:38:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 22:38:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 22:38:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 22:38:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 22:38:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 22:38:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 22:38:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 22:38:58] [Rank 0] step:9501/10000 train_time:652823ms step_avg:68.71ms +[2025-07-07 22:38:58] [Rank 0] step:9501/10000 train_time:652823ms step_avg:68.71ms +[2025-07-07 22:38:59] [Rank 0] step:9521/10000 train_time:653595ms step_avg:68.65ms +[2025-07-07 22:38:59] [Rank 0] step:9521/10000 train_time:653595ms step_avg:68.65ms +[2025-07-07 22:39:01] [Rank 0] step:9541/10000 train_time:655010ms step_avg:68.65ms +[2025-07-07 22:39:01] [Rank 0] step:9541/10000 train_time:655010ms step_avg:68.65ms +[2025-07-07 22:39:02] [Rank 0] step:9561/10000 train_time:656374ms step_avg:68.65ms +[2025-07-07 22:39:02] [Rank 0] step:9561/10000 train_time:656374ms step_avg:68.65ms +[2025-07-07 22:39:03] [Rank 0] step:9581/10000 train_time:657738ms step_avg:68.65ms +[2025-07-07 22:39:03] [Rank 0] step:9581/10000 train_time:657738ms step_avg:68.65ms +[2025-07-07 22:39:05] [Rank 0] step:9601/10000 train_time:659102ms step_avg:68.65ms +[2025-07-07 22:39:05] [Rank 0] step:9601/10000 train_time:659102ms step_avg:68.65ms +[2025-07-07 22:39:06] [Rank 0] step:9621/10000 train_time:660468ms step_avg:68.65ms +[2025-07-07 22:39:06] [Rank 0] step:9621/10000 train_time:660468ms step_avg:68.65ms +[2025-07-07 22:39:08] [Rank 0] step:9641/10000 train_time:661836ms step_avg:68.65ms +[2025-07-07 22:39:08] [Rank 0] step:9641/10000 train_time:661836ms step_avg:68.65ms +[2025-07-07 22:39:09] [Rank 0] step:9661/10000 train_time:663204ms step_avg:68.65ms +[2025-07-07 22:39:09] [Rank 0] step:9661/10000 train_time:663204ms step_avg:68.65ms +[2025-07-07 22:39:10] [Rank 0] step:9681/10000 train_time:664571ms step_avg:68.65ms +[2025-07-07 22:39:10] [Rank 0] step:9681/10000 train_time:664571ms step_avg:68.65ms +[2025-07-07 22:39:12] [Rank 0] step:9701/10000 train_time:665939ms step_avg:68.65ms +[2025-07-07 22:39:12] [Rank 0] step:9701/10000 train_time:665939ms step_avg:68.65ms +[2025-07-07 22:39:13] [Rank 0] step:9721/10000 train_time:667558ms step_avg:68.67ms +[2025-07-07 22:39:13] [Rank 0] step:9721/10000 train_time:667558ms step_avg:68.67ms +[2025-07-07 22:39:14] [Rank 0] step:9741/10000 train_time:668732ms step_avg:68.65ms +[2025-07-07 22:39:14] [Rank 0] step:9741/10000 train_time:668732ms step_avg:68.65ms +[2025-07-07 22:39:16] [Rank 0] step:9761/10000 train_time:670100ms step_avg:68.65ms +[2025-07-07 22:39:16] [Rank 0] step:9761/10000 train_time:670100ms step_avg:68.65ms +[2025-07-07 22:39:17] [Rank 0] step:9781/10000 train_time:671470ms step_avg:68.65ms +[2025-07-07 22:39:17] [Rank 0] step:9781/10000 train_time:671470ms step_avg:68.65ms +[2025-07-07 22:39:19] [Rank 0] step:9801/10000 train_time:672839ms step_avg:68.65ms +[2025-07-07 22:39:19] [Rank 0] step:9801/10000 train_time:672839ms step_avg:68.65ms +[2025-07-07 22:39:20] [Rank 0] step:9821/10000 train_time:674209ms step_avg:68.65ms +[2025-07-07 22:39:20] [Rank 0] step:9821/10000 train_time:674209ms step_avg:68.65ms +[2025-07-07 22:39:21] [Rank 0] step:9841/10000 train_time:675579ms step_avg:68.65ms +[2025-07-07 22:39:21] [Rank 0] step:9841/10000 train_time:675579ms step_avg:68.65ms +[2025-07-07 22:39:23] [Rank 0] step:9861/10000 train_time:676952ms step_avg:68.65ms +[2025-07-07 22:39:23] [Rank 0] step:9861/10000 train_time:676952ms step_avg:68.65ms +[2025-07-07 22:39:24] [Rank 0] step:9881/10000 train_time:678324ms step_avg:68.65ms +[2025-07-07 22:39:24] [Rank 0] step:9881/10000 train_time:678324ms step_avg:68.65ms +[2025-07-07 22:39:25] [Rank 0] step:9901/10000 train_time:679696ms step_avg:68.65ms +[2025-07-07 22:39:25] [Rank 0] step:9901/10000 train_time:679696ms step_avg:68.65ms +[2025-07-07 22:39:27] [Rank 0] step:9921/10000 train_time:681115ms step_avg:68.65ms +[2025-07-07 22:39:27] [Rank 0] step:9921/10000 train_time:681115ms step_avg:68.65ms +[2025-07-07 22:39:28] [Rank 0] step:9941/10000 train_time:682487ms step_avg:68.65ms +[2025-07-07 22:39:28] [Rank 0] step:9941/10000 train_time:682487ms step_avg:68.65ms +[2025-07-07 22:39:30] [Rank 0] step:9961/10000 train_time:683859ms step_avg:68.65ms +[2025-07-07 22:39:30] [Rank 0] step:9961/10000 train_time:683859ms step_avg:68.65ms +[2025-07-07 22:39:31] [Rank 0] step:9981/10000 train_time:685231ms step_avg:68.65ms +[2025-07-07 22:39:31] [Rank 0] step:9981/10000 train_time:685231ms step_avg:68.65ms +[2025-07-07 22:39:32] [Rank 0] step:10000/10000 train_time:686536ms step_avg:68.65ms +[2025-07-07 22:39:32] [Rank 0] step:10000/10000 train_time:686536ms step_avg:68.65ms +[2025-07-07 22:39:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:39:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:39:33] [Rank 0] PRINT: step:10000/10000 train_loss:1.3886 val_loss:1.4001 train_time:687236ms step_avg:68.72ms +[2025-07-07 22:39:33] [Rank 0] PRINT: step:10000/10000 train_loss:1.3886 val_loss:1.4001 train_time:687236ms step_avg:68.72ms +[2025-07-07 22:39:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:39:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:39:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:39:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:39:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:39:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:45:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:45:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:45:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:45:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:45:03] [Rank 0] Total Loss: 4.8525 +[2025-07-07 22:45:03] [Rank 0] Total Loss: 4.8525 +[2025-07-07 22:45:03] [Rank 0] Total FTA: 0.1166 +[2025-07-07 22:45:03] [Rank 0] Total FTA: 0.1166 +[2025-07-07 22:45:03] [Rank 0] Group 0 Loss: 4.9653 +[2025-07-07 22:45:03] [Rank 0] Group 0 Loss: 4.9653 +[2025-07-07 22:45:03] [Rank 0] Group 1 Loss: 4.5725 +[2025-07-07 22:45:03] [Rank 0] Group 1 Loss: 4.5725 +[2025-07-07 22:45:03] [Rank 0] Group 2 Loss: 4.8152 +[2025-07-07 22:45:03] [Rank 0] Group 2 Loss: 4.8152 +[2025-07-07 22:45:03] [Rank 0] Group 3 Loss: 5.0989 +[2025-07-07 22:45:03] [Rank 0] Group 3 Loss: 5.0989 +[2025-07-07 22:45:03] [Rank 0] Group 4 Loss: 4.8244 +[2025-07-07 22:45:03] [Rank 0] Group 4 Loss: 4.8244 +[2025-07-07 22:45:03] [Rank 0] Group 5 Loss: 4.6970 +[2025-07-07 22:45:03] [Rank 0] Group 5 Loss: 4.6970 +[2025-07-07 22:45:03] [Rank 0] Group 6 Loss: 4.7191 +[2025-07-07 22:45:03] [Rank 0] Group 6 Loss: 4.7191 +[2025-07-07 22:45:03] [Rank 0] Group 7 Loss: 4.9055 +[2025-07-07 22:45:03] [Rank 0] Group 7 Loss: 4.9055 +[2025-07-07 22:45:03] [Rank 0] Group 8 Loss: 4.8448 +[2025-07-07 22:45:03] [Rank 0] Group 8 Loss: 4.8448 +[2025-07-07 22:45:03] [Rank 0] Group 9 Loss: 4.8453 +[2025-07-07 22:45:03] [Rank 0] Group 9 Loss: 4.8453 +[2025-07-07 22:45:03] [Rank 0] Group 10 Loss: 4.8979 +[2025-07-07 22:45:03] [Rank 0] Group 10 Loss: 4.8979 +[2025-07-07 22:45:03] [Rank 0] Group 11 Loss: 4.8755 +[2025-07-07 22:45:03] [Rank 0] Group 11 Loss: 4.8755 +[2025-07-07 22:45:03] [Rank 0] Group 0 FTA: 0.1482 +[2025-07-07 22:45:03] [Rank 0] Group 0 FTA: 0.1482 +[2025-07-07 22:45:03] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-07 22:45:03] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-07 22:45:03] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 22:45:03] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 22:45:03] [Rank 0] Group 3 FTA: 0.0729 +[2025-07-07 22:45:03] [Rank 0] Group 3 FTA: 0.0729 +[2025-07-07 22:45:03] [Rank 0] Group 4 FTA: 0.0417 +[2025-07-07 22:45:03] [Rank 0] Group 4 FTA: 0.0417 +[2025-07-07 22:45:03] [Rank 0] Group 5 FTA: 0.0990 +[2025-07-07 22:45:03] [Rank 0] Group 5 FTA: 0.0990 +[2025-07-07 22:45:03] [Rank 0] Group 6 FTA: 0.1146 +[2025-07-07 22:45:03] [Rank 0] Group 6 FTA: 0.1146 +[2025-07-07 22:45:03] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-07 22:45:03] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-07 22:45:03] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-07 22:45:03] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-07 22:45:03] [Rank 0] Group 9 FTA: 0.1367 +[2025-07-07 22:45:03] [Rank 0] Group 9 FTA: 0.1367 +[2025-07-07 22:45:03] [Rank 0] Group 10 FTA: 0.1367 +[2025-07-07 22:45:03] [Rank 0] Group 10 FTA: 0.1367 +[2025-07-07 22:45:03] [Rank 0] Group 11 FTA: 0.1299 +[2025-07-07 22:45:03] [Rank 0] Group 11 FTA: 0.1299 +[2025-07-07 22:45:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 22:45:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_loss_curves.png +[2025-07-07 22:45:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 22:45:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/per_class_acc_curves.png +[2025-07-07 22:45:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 22:45:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_loss_curve.png +[2025-07-07 22:45:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 22:45:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_45/total_acc_curve.png +[2025-07-07 22:45:04] [Rank 0] step:10001/10000 train_time:687245ms step_avg:68.72ms +[2025-07-07 22:45:04] [Rank 0] step:10001/10000 train_time:687245ms step_avg:68.72ms +[2025-07-07 22:45:04] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 22:45:04 2025 --- +[2025-07-07 22:45:04] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 22:45:04 2025 --- +[2025-07-07 22:45:04] [Rank 0] PRINT: Peak memory allocated: 8720 MiB reserved: 10316 MiB +[2025-07-07 22:45:04] [Rank 0] PRINT: Peak memory allocated: 8720 MiB reserved: 10316 MiB diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/config.json b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..e761e03e1908b748d7c704a7a93fa4266b42ce73 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "e456ce24-8d46-4420-920d-3f4f1b4b09b6", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..8ea98194431cba8e2b714eaa6d03be87896e0181 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acd275ffe668d2d08e8f88b7305e462e450b64f6f2ca594b41419ae3800efb0b +size 326179 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..20363d771bb89d60e51466d4f8a622ad601aa068 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:095df7d8f413b7d4a927f536a3c2c7e102a0221f97199b1c89a44312f351ca31 +size 289564 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..d6ba9f2a9ad4a462e6a5d3bf18e9cfaa76bfc193 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aefaa8778ca4f1bc40d7258355d20dceb5d5343b27f47ab772c6755cae8743bf +size 92810 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..a9fa932e76eaeb4b2021d9da89f7e66038e0393d --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd67d7e7b3b5069777dd23aa50b2a988e8c98ea5d2e5097d95dbe4dc47d3e621 +size 108615 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/training_log_e456ce24-8d46-4420-920d-3f4f1b4b09b6.txt b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/training_log_e456ce24-8d46-4420-920d-3f4f1b4b09b6.txt new file mode 100644 index 0000000000000000000000000000000000000000..dd418663e6ff82d5f4e8f930111aa2a47432e958 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/training_log_e456ce24-8d46-4420-920d-3f4f1b4b09b6.txt @@ -0,0 +1,5144 @@ +[2025-07-07 20:39:54] [Rank 0] PRINT: --- Script Start: Mon Jul 7 20:39:54 2025 --- +[2025-07-07 20:39:54] [Rank 0] PRINT: --- Script Start: Mon Jul 7 20:39:54 2025 --- +[2025-07-07 20:39:54] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-07 20:39:54] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0002) +[2025-07-07 20:39:54] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 20:39:54] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 20:39:54] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-07 20:39:54] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-07 20:39:54] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48 +[2025-07-07 20:39:54] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48 +[2025-07-07 20:39:54] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 20:39:54] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 20:39:54] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 20:39:54] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 20:39:54] [Rank 0] PRINT: Constructing model... +[2025-07-07 20:39:54] [Rank 0] PRINT: Constructing model... +[2025-07-07 20:39:57] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 20:39:57] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 20:39:57] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 20:39:57] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 20:39:57] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 20:39:57] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 20:39:58] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 20:39:58] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 20:39:58] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 20:39:58] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 20:39:58] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 20:39:58] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 20:39:58] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 20:39:58] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 20:39:58] [Rank 0] PRINT: Model returns: +[2025-07-07 20:39:58] [Rank 0] PRINT: Model returns: +[2025-07-07 20:39:58] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 20:39:58] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 20:39:58] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 20:39:58] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 20:39:58] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-07 20:39:58] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0002). +[2025-07-07 20:39:58] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 20:39:58] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 20:39:58] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 20:39:58] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 20:39:58] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 20:39:58] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 20:39:58] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 20:39:58] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 20:39:58] [Rank 0] PRINT: Starting warmup... +[2025-07-07 20:39:58] [Rank 0] PRINT: Starting warmup... +[2025-07-07 20:41:37] [Rank 0] PRINT: Warmup complete. +[2025-07-07 20:41:37] [Rank 0] PRINT: Warmup complete. +[2025-07-07 20:41:37] [Rank 0] PRINT: Starting training... +[2025-07-07 20:41:37] [Rank 0] PRINT: Starting training... +[2025-07-07 20:41:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:41:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:41:45] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 20:41:45] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 20:41:47] [Rank 0] step:21/10000 train_time:817ms step_avg:38.90ms +[2025-07-07 20:41:47] [Rank 0] step:21/10000 train_time:817ms step_avg:38.90ms +[2025-07-07 20:41:48] [Rank 0] step:41/10000 train_time:2141ms step_avg:52.22ms +[2025-07-07 20:41:48] [Rank 0] step:41/10000 train_time:2141ms step_avg:52.22ms +[2025-07-07 20:41:50] [Rank 0] step:61/10000 train_time:3466ms step_avg:56.81ms +[2025-07-07 20:41:50] [Rank 0] step:61/10000 train_time:3466ms step_avg:56.81ms +[2025-07-07 20:41:51] [Rank 0] step:81/10000 train_time:4790ms step_avg:59.14ms +[2025-07-07 20:41:51] [Rank 0] step:81/10000 train_time:4790ms step_avg:59.14ms +[2025-07-07 20:41:52] [Rank 0] step:101/10000 train_time:6115ms step_avg:60.54ms +[2025-07-07 20:41:52] [Rank 0] step:101/10000 train_time:6115ms step_avg:60.54ms +[2025-07-07 20:41:54] [Rank 0] step:121/10000 train_time:7439ms step_avg:61.48ms +[2025-07-07 20:41:54] [Rank 0] step:121/10000 train_time:7439ms step_avg:61.48ms +[2025-07-07 20:41:55] [Rank 0] step:141/10000 train_time:8763ms step_avg:62.15ms +[2025-07-07 20:41:55] [Rank 0] step:141/10000 train_time:8763ms step_avg:62.15ms +[2025-07-07 20:41:56] [Rank 0] step:161/10000 train_time:10087ms step_avg:62.65ms +[2025-07-07 20:41:56] [Rank 0] step:161/10000 train_time:10087ms step_avg:62.65ms +[2025-07-07 20:41:58] [Rank 0] step:181/10000 train_time:11411ms step_avg:63.04ms +[2025-07-07 20:41:58] [Rank 0] step:181/10000 train_time:11411ms step_avg:63.04ms +[2025-07-07 20:41:59] [Rank 0] step:201/10000 train_time:12807ms step_avg:63.72ms +[2025-07-07 20:41:59] [Rank 0] step:201/10000 train_time:12807ms step_avg:63.72ms +[2025-07-07 20:42:00] [Rank 0] step:221/10000 train_time:14132ms step_avg:63.95ms +[2025-07-07 20:42:00] [Rank 0] step:221/10000 train_time:14132ms step_avg:63.95ms +[2025-07-07 20:42:02] [Rank 0] step:241/10000 train_time:15459ms step_avg:64.14ms +[2025-07-07 20:42:02] [Rank 0] step:241/10000 train_time:15459ms step_avg:64.14ms +[2025-07-07 20:42:03] [Rank 0] step:261/10000 train_time:16786ms step_avg:64.31ms +[2025-07-07 20:42:03] [Rank 0] step:261/10000 train_time:16786ms step_avg:64.31ms +[2025-07-07 20:42:04] [Rank 0] step:281/10000 train_time:18113ms step_avg:64.46ms +[2025-07-07 20:42:04] [Rank 0] step:281/10000 train_time:18113ms step_avg:64.46ms +[2025-07-07 20:42:06] [Rank 0] step:301/10000 train_time:19439ms step_avg:64.58ms +[2025-07-07 20:42:06] [Rank 0] step:301/10000 train_time:19439ms step_avg:64.58ms +[2025-07-07 20:42:07] [Rank 0] step:321/10000 train_time:20767ms step_avg:64.70ms +[2025-07-07 20:42:07] [Rank 0] step:321/10000 train_time:20767ms step_avg:64.70ms +[2025-07-07 20:42:08] [Rank 0] step:341/10000 train_time:22119ms step_avg:64.87ms +[2025-07-07 20:42:08] [Rank 0] step:341/10000 train_time:22119ms step_avg:64.87ms +[2025-07-07 20:42:10] [Rank 0] step:361/10000 train_time:23446ms step_avg:64.95ms +[2025-07-07 20:42:10] [Rank 0] step:361/10000 train_time:23446ms step_avg:64.95ms +[2025-07-07 20:42:11] [Rank 0] step:381/10000 train_time:24773ms step_avg:65.02ms +[2025-07-07 20:42:11] [Rank 0] step:381/10000 train_time:24773ms step_avg:65.02ms +[2025-07-07 20:42:12] [Rank 0] step:401/10000 train_time:26100ms step_avg:65.09ms +[2025-07-07 20:42:12] [Rank 0] step:401/10000 train_time:26100ms step_avg:65.09ms +[2025-07-07 20:42:14] [Rank 0] step:421/10000 train_time:27427ms step_avg:65.15ms +[2025-07-07 20:42:14] [Rank 0] step:421/10000 train_time:27427ms step_avg:65.15ms +[2025-07-07 20:42:15] [Rank 0] step:441/10000 train_time:28755ms step_avg:65.20ms +[2025-07-07 20:42:15] [Rank 0] step:441/10000 train_time:28755ms step_avg:65.20ms +[2025-07-07 20:42:16] [Rank 0] step:461/10000 train_time:30083ms step_avg:65.25ms +[2025-07-07 20:42:16] [Rank 0] step:461/10000 train_time:30083ms step_avg:65.25ms +[2025-07-07 20:42:17] [Rank 0] step:481/10000 train_time:31413ms step_avg:65.31ms +[2025-07-07 20:42:17] [Rank 0] step:481/10000 train_time:31413ms step_avg:65.31ms +[2025-07-07 20:42:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:42:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:42:20] [Rank 0] PRINT: step:500/10000 train_loss:8.7433 val_loss:7.1358 train_time:33347ms step_avg:66.69ms +[2025-07-07 20:42:20] [Rank 0] PRINT: step:500/10000 train_loss:8.7433 val_loss:7.1358 train_time:33347ms step_avg:66.69ms +[2025-07-07 20:42:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:42:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:42:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:42:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:42:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:42:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:47:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:47:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:47:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:47:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:47:36] [Rank 0] Total Loss: 7.7009 +[2025-07-07 20:47:36] [Rank 0] Total Loss: 7.7009 +[2025-07-07 20:47:36] [Rank 0] Total FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Total FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 0 Loss: 7.7076 +[2025-07-07 20:47:36] [Rank 0] Group 0 Loss: 7.7076 +[2025-07-07 20:47:36] [Rank 0] Group 1 Loss: 7.6408 +[2025-07-07 20:47:36] [Rank 0] Group 1 Loss: 7.6408 +[2025-07-07 20:47:36] [Rank 0] Group 2 Loss: 7.8361 +[2025-07-07 20:47:36] [Rank 0] Group 2 Loss: 7.8361 +[2025-07-07 20:47:36] [Rank 0] Group 3 Loss: 7.6746 +[2025-07-07 20:47:36] [Rank 0] Group 3 Loss: 7.6746 +[2025-07-07 20:47:36] [Rank 0] Group 4 Loss: 7.7111 +[2025-07-07 20:47:36] [Rank 0] Group 4 Loss: 7.7111 +[2025-07-07 20:47:36] [Rank 0] Group 5 Loss: 7.6650 +[2025-07-07 20:47:36] [Rank 0] Group 5 Loss: 7.6650 +[2025-07-07 20:47:36] [Rank 0] Group 6 Loss: 7.7163 +[2025-07-07 20:47:36] [Rank 0] Group 6 Loss: 7.7163 +[2025-07-07 20:47:36] [Rank 0] Group 7 Loss: 7.6955 +[2025-07-07 20:47:36] [Rank 0] Group 7 Loss: 7.6955 +[2025-07-07 20:47:36] [Rank 0] Group 8 Loss: 7.6788 +[2025-07-07 20:47:36] [Rank 0] Group 8 Loss: 7.6788 +[2025-07-07 20:47:36] [Rank 0] Group 9 Loss: 7.7154 +[2025-07-07 20:47:36] [Rank 0] Group 9 Loss: 7.7154 +[2025-07-07 20:47:36] [Rank 0] Group 10 Loss: 7.6884 +[2025-07-07 20:47:36] [Rank 0] Group 10 Loss: 7.6884 +[2025-07-07 20:47:36] [Rank 0] Group 11 Loss: 7.6943 +[2025-07-07 20:47:36] [Rank 0] Group 11 Loss: 7.6943 +[2025-07-07 20:47:36] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 20:47:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 20:47:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 20:47:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 20:47:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 20:47:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 20:47:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 20:47:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 20:47:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 20:47:37] [Rank 0] step:501/10000 train_time:33356ms step_avg:66.58ms +[2025-07-07 20:47:37] [Rank 0] step:501/10000 train_time:33356ms step_avg:66.58ms +[2025-07-07 20:47:38] [Rank 0] step:521/10000 train_time:34098ms step_avg:65.45ms +[2025-07-07 20:47:38] [Rank 0] step:521/10000 train_time:34098ms step_avg:65.45ms +[2025-07-07 20:47:40] [Rank 0] step:541/10000 train_time:35421ms step_avg:65.47ms +[2025-07-07 20:47:40] [Rank 0] step:541/10000 train_time:35421ms step_avg:65.47ms +[2025-07-07 20:47:41] [Rank 0] step:561/10000 train_time:36815ms step_avg:65.62ms +[2025-07-07 20:47:41] [Rank 0] step:561/10000 train_time:36815ms step_avg:65.62ms +[2025-07-07 20:47:42] [Rank 0] step:581/10000 train_time:38139ms step_avg:65.64ms +[2025-07-07 20:47:42] [Rank 0] step:581/10000 train_time:38139ms step_avg:65.64ms +[2025-07-07 20:47:44] [Rank 0] step:601/10000 train_time:39463ms step_avg:65.66ms +[2025-07-07 20:47:44] [Rank 0] step:601/10000 train_time:39463ms step_avg:65.66ms +[2025-07-07 20:47:45] [Rank 0] step:621/10000 train_time:40789ms step_avg:65.68ms +[2025-07-07 20:47:45] [Rank 0] step:621/10000 train_time:40789ms step_avg:65.68ms +[2025-07-07 20:47:46] [Rank 0] step:641/10000 train_time:42117ms step_avg:65.70ms +[2025-07-07 20:47:46] [Rank 0] step:641/10000 train_time:42117ms step_avg:65.70ms +[2025-07-07 20:47:48] [Rank 0] step:661/10000 train_time:43444ms step_avg:65.73ms +[2025-07-07 20:47:48] [Rank 0] step:661/10000 train_time:43444ms step_avg:65.73ms +[2025-07-07 20:47:49] [Rank 0] step:681/10000 train_time:44773ms step_avg:65.75ms +[2025-07-07 20:47:49] [Rank 0] step:681/10000 train_time:44773ms step_avg:65.75ms +[2025-07-07 20:47:50] [Rank 0] step:701/10000 train_time:46104ms step_avg:65.77ms +[2025-07-07 20:47:50] [Rank 0] step:701/10000 train_time:46104ms step_avg:65.77ms +[2025-07-07 20:47:52] [Rank 0] step:721/10000 train_time:48102ms step_avg:66.72ms +[2025-07-07 20:47:52] [Rank 0] step:721/10000 train_time:48102ms step_avg:66.72ms +[2025-07-07 20:47:53] [Rank 0] step:741/10000 train_time:48821ms step_avg:65.88ms +[2025-07-07 20:47:53] [Rank 0] step:741/10000 train_time:48821ms step_avg:65.88ms +[2025-07-07 20:47:54] [Rank 0] step:761/10000 train_time:50158ms step_avg:65.91ms +[2025-07-07 20:47:54] [Rank 0] step:761/10000 train_time:50158ms step_avg:65.91ms +[2025-07-07 20:47:56] [Rank 0] step:781/10000 train_time:51500ms step_avg:65.94ms +[2025-07-07 20:47:56] [Rank 0] step:781/10000 train_time:51500ms step_avg:65.94ms +[2025-07-07 20:47:57] [Rank 0] step:801/10000 train_time:52843ms step_avg:65.97ms +[2025-07-07 20:47:57] [Rank 0] step:801/10000 train_time:52843ms step_avg:65.97ms +[2025-07-07 20:47:58] [Rank 0] step:821/10000 train_time:54187ms step_avg:66.00ms +[2025-07-07 20:47:58] [Rank 0] step:821/10000 train_time:54187ms step_avg:66.00ms +[2025-07-07 20:48:00] [Rank 0] step:841/10000 train_time:55531ms step_avg:66.03ms +[2025-07-07 20:48:00] [Rank 0] step:841/10000 train_time:55531ms step_avg:66.03ms +[2025-07-07 20:48:01] [Rank 0] step:861/10000 train_time:56876ms step_avg:66.06ms +[2025-07-07 20:48:01] [Rank 0] step:861/10000 train_time:56876ms step_avg:66.06ms +[2025-07-07 20:48:03] [Rank 0] step:881/10000 train_time:58222ms step_avg:66.09ms +[2025-07-07 20:48:03] [Rank 0] step:881/10000 train_time:58222ms step_avg:66.09ms +[2025-07-07 20:48:04] [Rank 0] step:901/10000 train_time:59567ms step_avg:66.11ms +[2025-07-07 20:48:04] [Rank 0] step:901/10000 train_time:59567ms step_avg:66.11ms +[2025-07-07 20:48:05] [Rank 0] step:921/10000 train_time:60956ms step_avg:66.18ms +[2025-07-07 20:48:05] [Rank 0] step:921/10000 train_time:60956ms step_avg:66.18ms +[2025-07-07 20:48:07] [Rank 0] step:941/10000 train_time:62303ms step_avg:66.21ms +[2025-07-07 20:48:07] [Rank 0] step:941/10000 train_time:62303ms step_avg:66.21ms +[2025-07-07 20:48:08] [Rank 0] step:961/10000 train_time:63650ms step_avg:66.23ms +[2025-07-07 20:48:08] [Rank 0] step:961/10000 train_time:63650ms step_avg:66.23ms +[2025-07-07 20:48:09] [Rank 0] step:981/10000 train_time:64998ms step_avg:66.26ms +[2025-07-07 20:48:09] [Rank 0] step:981/10000 train_time:64998ms step_avg:66.26ms +[2025-07-07 20:48:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:48:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:48:12] [Rank 0] PRINT: step:1000/10000 train_loss:6.1525 val_loss:5.3212 train_time:66969ms step_avg:66.97ms +[2025-07-07 20:48:12] [Rank 0] PRINT: step:1000/10000 train_loss:6.1525 val_loss:5.3212 train_time:66969ms step_avg:66.97ms +[2025-07-07 20:48:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:48:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:48:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:48:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:48:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:48:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:53:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:53:30] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:53:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:53:30] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:53:30] [Rank 0] Total Loss: 6.2674 +[2025-07-07 20:53:30] [Rank 0] Total Loss: 6.2674 +[2025-07-07 20:53:30] [Rank 0] Total FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Total FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 0 Loss: 6.1995 +[2025-07-07 20:53:30] [Rank 0] Group 0 Loss: 6.1995 +[2025-07-07 20:53:30] [Rank 0] Group 1 Loss: 6.2487 +[2025-07-07 20:53:30] [Rank 0] Group 1 Loss: 6.2487 +[2025-07-07 20:53:30] [Rank 0] Group 2 Loss: 6.4210 +[2025-07-07 20:53:30] [Rank 0] Group 2 Loss: 6.4210 +[2025-07-07 20:53:30] [Rank 0] Group 3 Loss: 6.2303 +[2025-07-07 20:53:30] [Rank 0] Group 3 Loss: 6.2303 +[2025-07-07 20:53:30] [Rank 0] Group 4 Loss: 6.3316 +[2025-07-07 20:53:30] [Rank 0] Group 4 Loss: 6.3316 +[2025-07-07 20:53:30] [Rank 0] Group 5 Loss: 6.2349 +[2025-07-07 20:53:30] [Rank 0] Group 5 Loss: 6.2349 +[2025-07-07 20:53:30] [Rank 0] Group 6 Loss: 6.2883 +[2025-07-07 20:53:30] [Rank 0] Group 6 Loss: 6.2883 +[2025-07-07 20:53:30] [Rank 0] Group 7 Loss: 6.2877 +[2025-07-07 20:53:30] [Rank 0] Group 7 Loss: 6.2877 +[2025-07-07 20:53:30] [Rank 0] Group 8 Loss: 6.2184 +[2025-07-07 20:53:30] [Rank 0] Group 8 Loss: 6.2184 +[2025-07-07 20:53:30] [Rank 0] Group 9 Loss: 6.2733 +[2025-07-07 20:53:30] [Rank 0] Group 9 Loss: 6.2733 +[2025-07-07 20:53:30] [Rank 0] Group 10 Loss: 6.2699 +[2025-07-07 20:53:30] [Rank 0] Group 10 Loss: 6.2699 +[2025-07-07 20:53:30] [Rank 0] Group 11 Loss: 6.2698 +[2025-07-07 20:53:30] [Rank 0] Group 11 Loss: 6.2698 +[2025-07-07 20:53:30] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 3 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 4 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 5 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 6 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 7 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 8 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 9 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 10 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] Group 11 FTA: 0.0000 +[2025-07-07 20:53:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 20:53:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 20:53:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 20:53:31] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 20:53:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 20:53:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 20:53:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 20:53:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 20:53:31] [Rank 0] step:1001/10000 train_time:66978ms step_avg:66.91ms +[2025-07-07 20:53:31] [Rank 0] step:1001/10000 train_time:66978ms step_avg:66.91ms +[2025-07-07 20:53:33] [Rank 0] step:1021/10000 train_time:67729ms step_avg:66.34ms +[2025-07-07 20:53:33] [Rank 0] step:1021/10000 train_time:67729ms step_avg:66.34ms +[2025-07-07 20:53:34] [Rank 0] step:1041/10000 train_time:69067ms step_avg:66.35ms +[2025-07-07 20:53:34] [Rank 0] step:1041/10000 train_time:69067ms step_avg:66.35ms +[2025-07-07 20:53:35] [Rank 0] step:1061/10000 train_time:70408ms step_avg:66.36ms +[2025-07-07 20:53:35] [Rank 0] step:1061/10000 train_time:70408ms step_avg:66.36ms +[2025-07-07 20:53:37] [Rank 0] step:1081/10000 train_time:72005ms step_avg:66.61ms +[2025-07-07 20:53:37] [Rank 0] step:1081/10000 train_time:72005ms step_avg:66.61ms +[2025-07-07 20:53:38] [Rank 0] step:1101/10000 train_time:73136ms step_avg:66.43ms +[2025-07-07 20:53:38] [Rank 0] step:1101/10000 train_time:73136ms step_avg:66.43ms +[2025-07-07 20:53:39] [Rank 0] step:1121/10000 train_time:74479ms step_avg:66.44ms +[2025-07-07 20:53:39] [Rank 0] step:1121/10000 train_time:74479ms step_avg:66.44ms +[2025-07-07 20:53:41] [Rank 0] step:1141/10000 train_time:75822ms step_avg:66.45ms +[2025-07-07 20:53:41] [Rank 0] step:1141/10000 train_time:75822ms step_avg:66.45ms +[2025-07-07 20:53:42] [Rank 0] step:1161/10000 train_time:77166ms step_avg:66.46ms +[2025-07-07 20:53:42] [Rank 0] step:1161/10000 train_time:77166ms step_avg:66.46ms +[2025-07-07 20:53:43] [Rank 0] step:1181/10000 train_time:78532ms step_avg:66.50ms +[2025-07-07 20:53:43] [Rank 0] step:1181/10000 train_time:78532ms step_avg:66.50ms +[2025-07-07 20:53:45] [Rank 0] step:1201/10000 train_time:79877ms step_avg:66.51ms +[2025-07-07 20:53:45] [Rank 0] step:1201/10000 train_time:79877ms step_avg:66.51ms +[2025-07-07 20:53:46] [Rank 0] step:1221/10000 train_time:81223ms step_avg:66.52ms +[2025-07-07 20:53:46] [Rank 0] step:1221/10000 train_time:81223ms step_avg:66.52ms +[2025-07-07 20:53:48] [Rank 0] step:1241/10000 train_time:82570ms step_avg:66.54ms +[2025-07-07 20:53:48] [Rank 0] step:1241/10000 train_time:82570ms step_avg:66.54ms +[2025-07-07 20:53:49] [Rank 0] step:1261/10000 train_time:83918ms step_avg:66.55ms +[2025-07-07 20:53:49] [Rank 0] step:1261/10000 train_time:83918ms step_avg:66.55ms +[2025-07-07 20:53:50] [Rank 0] step:1281/10000 train_time:85328ms step_avg:66.61ms +[2025-07-07 20:53:50] [Rank 0] step:1281/10000 train_time:85328ms step_avg:66.61ms +[2025-07-07 20:53:52] [Rank 0] step:1301/10000 train_time:86677ms step_avg:66.62ms +[2025-07-07 20:53:52] [Rank 0] step:1301/10000 train_time:86677ms step_avg:66.62ms +[2025-07-07 20:53:53] [Rank 0] step:1321/10000 train_time:88024ms step_avg:66.63ms +[2025-07-07 20:53:53] [Rank 0] step:1321/10000 train_time:88024ms step_avg:66.63ms +[2025-07-07 20:53:54] [Rank 0] step:1341/10000 train_time:89372ms step_avg:66.65ms +[2025-07-07 20:53:54] [Rank 0] step:1341/10000 train_time:89372ms step_avg:66.65ms +[2025-07-07 20:53:56] [Rank 0] step:1361/10000 train_time:90720ms step_avg:66.66ms +[2025-07-07 20:53:56] [Rank 0] step:1361/10000 train_time:90720ms step_avg:66.66ms +[2025-07-07 20:53:57] [Rank 0] step:1381/10000 train_time:92068ms step_avg:66.67ms +[2025-07-07 20:53:57] [Rank 0] step:1381/10000 train_time:92068ms step_avg:66.67ms +[2025-07-07 20:53:58] [Rank 0] step:1401/10000 train_time:93416ms step_avg:66.68ms +[2025-07-07 20:53:58] [Rank 0] step:1401/10000 train_time:93416ms step_avg:66.68ms +[2025-07-07 20:54:00] [Rank 0] step:1421/10000 train_time:94765ms step_avg:66.69ms +[2025-07-07 20:54:00] [Rank 0] step:1421/10000 train_time:94765ms step_avg:66.69ms +[2025-07-07 20:54:01] [Rank 0] step:1441/10000 train_time:96162ms step_avg:66.73ms +[2025-07-07 20:54:01] [Rank 0] step:1441/10000 train_time:96162ms step_avg:66.73ms +[2025-07-07 20:54:02] [Rank 0] step:1461/10000 train_time:97521ms step_avg:66.75ms +[2025-07-07 20:54:02] [Rank 0] step:1461/10000 train_time:97521ms step_avg:66.75ms +[2025-07-07 20:54:04] [Rank 0] step:1481/10000 train_time:98870ms step_avg:66.76ms +[2025-07-07 20:54:04] [Rank 0] step:1481/10000 train_time:98870ms step_avg:66.76ms +[2025-07-07 20:54:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:54:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:54:06] [Rank 0] PRINT: step:1500/10000 train_loss:4.7243 val_loss:4.1696 train_time:100833ms step_avg:67.22ms +[2025-07-07 20:54:06] [Rank 0] PRINT: step:1500/10000 train_loss:4.7243 val_loss:4.1696 train_time:100833ms step_avg:67.22ms +[2025-07-07 20:54:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:54:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:54:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:54:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:54:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:54:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:59:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:59:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:59:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:59:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:59:24] [Rank 0] Total Loss: 5.5307 +[2025-07-07 20:59:24] [Rank 0] Total Loss: 5.5307 +[2025-07-07 20:59:24] [Rank 0] Total FTA: 0.0721 +[2025-07-07 20:59:24] [Rank 0] Total FTA: 0.0721 +[2025-07-07 20:59:24] [Rank 0] Group 0 Loss: 5.4250 +[2025-07-07 20:59:24] [Rank 0] Group 0 Loss: 5.4250 +[2025-07-07 20:59:24] [Rank 0] Group 1 Loss: 5.5110 +[2025-07-07 20:59:24] [Rank 0] Group 1 Loss: 5.5110 +[2025-07-07 20:59:24] [Rank 0] Group 2 Loss: 5.6695 +[2025-07-07 20:59:24] [Rank 0] Group 2 Loss: 5.6695 +[2025-07-07 20:59:24] [Rank 0] Group 3 Loss: 5.4957 +[2025-07-07 20:59:24] [Rank 0] Group 3 Loss: 5.4957 +[2025-07-07 20:59:24] [Rank 0] Group 4 Loss: 5.5986 +[2025-07-07 20:59:24] [Rank 0] Group 4 Loss: 5.5986 +[2025-07-07 20:59:24] [Rank 0] Group 5 Loss: 5.5194 +[2025-07-07 20:59:24] [Rank 0] Group 5 Loss: 5.5194 +[2025-07-07 20:59:24] [Rank 0] Group 6 Loss: 5.5784 +[2025-07-07 20:59:24] [Rank 0] Group 6 Loss: 5.5784 +[2025-07-07 20:59:24] [Rank 0] Group 7 Loss: 5.5661 +[2025-07-07 20:59:24] [Rank 0] Group 7 Loss: 5.5661 +[2025-07-07 20:59:24] [Rank 0] Group 8 Loss: 5.5148 +[2025-07-07 20:59:24] [Rank 0] Group 8 Loss: 5.5148 +[2025-07-07 20:59:24] [Rank 0] Group 9 Loss: 5.5502 +[2025-07-07 20:59:24] [Rank 0] Group 9 Loss: 5.5502 +[2025-07-07 20:59:24] [Rank 0] Group 10 Loss: 5.5292 +[2025-07-07 20:59:24] [Rank 0] Group 10 Loss: 5.5292 +[2025-07-07 20:59:24] [Rank 0] Group 11 Loss: 5.5279 +[2025-07-07 20:59:24] [Rank 0] Group 11 Loss: 5.5279 +[2025-07-07 20:59:24] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 20:59:24] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 20:59:24] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 20:59:24] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 20:59:24] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 20:59:24] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 20:59:24] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 20:59:24] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 20:59:24] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 20:59:24] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 20:59:24] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-07 20:59:24] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-07 20:59:24] [Rank 0] Group 6 FTA: 0.0703 +[2025-07-07 20:59:24] [Rank 0] Group 6 FTA: 0.0703 +[2025-07-07 20:59:24] [Rank 0] Group 7 FTA: 0.0859 +[2025-07-07 20:59:24] [Rank 0] Group 7 FTA: 0.0859 +[2025-07-07 20:59:24] [Rank 0] Group 8 FTA: 0.0599 +[2025-07-07 20:59:24] [Rank 0] Group 8 FTA: 0.0599 +[2025-07-07 20:59:24] [Rank 0] Group 9 FTA: 0.0586 +[2025-07-07 20:59:24] [Rank 0] Group 9 FTA: 0.0586 +[2025-07-07 20:59:24] [Rank 0] Group 10 FTA: 0.0664 +[2025-07-07 20:59:24] [Rank 0] Group 10 FTA: 0.0664 +[2025-07-07 20:59:24] [Rank 0] Group 11 FTA: 0.0664 +[2025-07-07 20:59:24] [Rank 0] Group 11 FTA: 0.0664 +[2025-07-07 20:59:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 20:59:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 20:59:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 20:59:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 20:59:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 20:59:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 20:59:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 20:59:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 20:59:26] [Rank 0] step:1501/10000 train_time:100842ms step_avg:67.18ms +[2025-07-07 20:59:26] [Rank 0] step:1501/10000 train_time:100842ms step_avg:67.18ms +[2025-07-07 20:59:27] [Rank 0] step:1521/10000 train_time:101579ms step_avg:66.78ms +[2025-07-07 20:59:27] [Rank 0] step:1521/10000 train_time:101579ms step_avg:66.78ms +[2025-07-07 20:59:28] [Rank 0] step:1541/10000 train_time:102921ms step_avg:66.79ms +[2025-07-07 20:59:28] [Rank 0] step:1541/10000 train_time:102921ms step_avg:66.79ms +[2025-07-07 20:59:30] [Rank 0] step:1561/10000 train_time:104264ms step_avg:66.79ms +[2025-07-07 20:59:30] [Rank 0] step:1561/10000 train_time:104264ms step_avg:66.79ms +[2025-07-07 20:59:31] [Rank 0] step:1581/10000 train_time:105606ms step_avg:66.80ms +[2025-07-07 20:59:31] [Rank 0] step:1581/10000 train_time:105606ms step_avg:66.80ms +[2025-07-07 20:59:32] [Rank 0] step:1601/10000 train_time:106951ms step_avg:66.80ms +[2025-07-07 20:59:32] [Rank 0] step:1601/10000 train_time:106951ms step_avg:66.80ms +[2025-07-07 20:59:34] [Rank 0] step:1621/10000 train_time:108295ms step_avg:66.81ms +[2025-07-07 20:59:34] [Rank 0] step:1621/10000 train_time:108295ms step_avg:66.81ms +[2025-07-07 20:59:35] [Rank 0] step:1641/10000 train_time:109700ms step_avg:66.85ms +[2025-07-07 20:59:35] [Rank 0] step:1641/10000 train_time:109700ms step_avg:66.85ms +[2025-07-07 20:59:36] [Rank 0] step:1661/10000 train_time:111048ms step_avg:66.86ms +[2025-07-07 20:59:36] [Rank 0] step:1661/10000 train_time:111048ms step_avg:66.86ms +[2025-07-07 20:59:38] [Rank 0] step:1681/10000 train_time:112395ms step_avg:66.86ms +[2025-07-07 20:59:38] [Rank 0] step:1681/10000 train_time:112395ms step_avg:66.86ms +[2025-07-07 20:59:39] [Rank 0] step:1701/10000 train_time:113743ms step_avg:66.87ms +[2025-07-07 20:59:39] [Rank 0] step:1701/10000 train_time:113743ms step_avg:66.87ms +[2025-07-07 20:59:40] [Rank 0] step:1721/10000 train_time:115092ms step_avg:66.88ms +[2025-07-07 20:59:40] [Rank 0] step:1721/10000 train_time:115092ms step_avg:66.88ms +[2025-07-07 20:59:42] [Rank 0] step:1741/10000 train_time:116440ms step_avg:66.88ms +[2025-07-07 20:59:42] [Rank 0] step:1741/10000 train_time:116440ms step_avg:66.88ms +[2025-07-07 20:59:43] [Rank 0] step:1761/10000 train_time:117788ms step_avg:66.89ms +[2025-07-07 20:59:43] [Rank 0] step:1761/10000 train_time:117788ms step_avg:66.89ms +[2025-07-07 20:59:44] [Rank 0] step:1781/10000 train_time:119138ms step_avg:66.89ms +[2025-07-07 20:59:44] [Rank 0] step:1781/10000 train_time:119138ms step_avg:66.89ms +[2025-07-07 20:59:46] [Rank 0] step:1801/10000 train_time:121165ms step_avg:67.28ms +[2025-07-07 20:59:46] [Rank 0] step:1801/10000 train_time:121165ms step_avg:67.28ms +[2025-07-07 20:59:47] [Rank 0] step:1821/10000 train_time:121894ms step_avg:66.94ms +[2025-07-07 20:59:47] [Rank 0] step:1821/10000 train_time:121894ms step_avg:66.94ms +[2025-07-07 20:59:49] [Rank 0] step:1841/10000 train_time:123244ms step_avg:66.94ms +[2025-07-07 20:59:49] [Rank 0] step:1841/10000 train_time:123244ms step_avg:66.94ms +[2025-07-07 20:59:50] [Rank 0] step:1861/10000 train_time:124595ms step_avg:66.95ms +[2025-07-07 20:59:50] [Rank 0] step:1861/10000 train_time:124595ms step_avg:66.95ms +[2025-07-07 20:59:51] [Rank 0] step:1881/10000 train_time:125944ms step_avg:66.96ms +[2025-07-07 20:59:51] [Rank 0] step:1881/10000 train_time:125944ms step_avg:66.96ms +[2025-07-07 20:59:53] [Rank 0] step:1901/10000 train_time:127294ms step_avg:66.96ms +[2025-07-07 20:59:53] [Rank 0] step:1901/10000 train_time:127294ms step_avg:66.96ms +[2025-07-07 20:59:54] [Rank 0] step:1921/10000 train_time:128643ms step_avg:66.97ms +[2025-07-07 20:59:54] [Rank 0] step:1921/10000 train_time:128643ms step_avg:66.97ms +[2025-07-07 20:59:55] [Rank 0] step:1941/10000 train_time:129993ms step_avg:66.97ms +[2025-07-07 20:59:55] [Rank 0] step:1941/10000 train_time:129993ms step_avg:66.97ms +[2025-07-07 20:59:57] [Rank 0] step:1961/10000 train_time:131343ms step_avg:66.98ms +[2025-07-07 20:59:57] [Rank 0] step:1961/10000 train_time:131343ms step_avg:66.98ms +[2025-07-07 20:59:58] [Rank 0] step:1981/10000 train_time:132694ms step_avg:66.98ms +[2025-07-07 20:59:58] [Rank 0] step:1981/10000 train_time:132694ms step_avg:66.98ms +[2025-07-07 20:59:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:59:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:00:00] [Rank 0] PRINT: step:2000/10000 train_loss:3.6993 val_loss:3.2780 train_time:134703ms step_avg:67.35ms +[2025-07-07 21:00:00] [Rank 0] PRINT: step:2000/10000 train_loss:3.6993 val_loss:3.2780 train_time:134703ms step_avg:67.35ms +[2025-07-07 21:00:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:00:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:00:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:00:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:00:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:00:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:05:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:05:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:05:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:05:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:05:15] [Rank 0] Total Loss: 4.9486 +[2025-07-07 21:05:15] [Rank 0] Total Loss: 4.9486 +[2025-07-07 21:05:15] [Rank 0] Total FTA: 0.0735 +[2025-07-07 21:05:15] [Rank 0] Total FTA: 0.0735 +[2025-07-07 21:05:15] [Rank 0] Group 0 Loss: 4.9211 +[2025-07-07 21:05:15] [Rank 0] Group 0 Loss: 4.9211 +[2025-07-07 21:05:15] [Rank 0] Group 1 Loss: 4.9660 +[2025-07-07 21:05:15] [Rank 0] Group 1 Loss: 4.9660 +[2025-07-07 21:05:15] [Rank 0] Group 2 Loss: 4.9970 +[2025-07-07 21:05:15] [Rank 0] Group 2 Loss: 4.9970 +[2025-07-07 21:05:15] [Rank 0] Group 3 Loss: 4.8471 +[2025-07-07 21:05:15] [Rank 0] Group 3 Loss: 4.8471 +[2025-07-07 21:05:15] [Rank 0] Group 4 Loss: 4.9997 +[2025-07-07 21:05:15] [Rank 0] Group 4 Loss: 4.9997 +[2025-07-07 21:05:15] [Rank 0] Group 5 Loss: 4.9127 +[2025-07-07 21:05:15] [Rank 0] Group 5 Loss: 4.9127 +[2025-07-07 21:05:15] [Rank 0] Group 6 Loss: 4.9945 +[2025-07-07 21:05:15] [Rank 0] Group 6 Loss: 4.9945 +[2025-07-07 21:05:15] [Rank 0] Group 7 Loss: 4.9866 +[2025-07-07 21:05:15] [Rank 0] Group 7 Loss: 4.9866 +[2025-07-07 21:05:15] [Rank 0] Group 8 Loss: 4.9337 +[2025-07-07 21:05:15] [Rank 0] Group 8 Loss: 4.9337 +[2025-07-07 21:05:15] [Rank 0] Group 9 Loss: 4.9165 +[2025-07-07 21:05:15] [Rank 0] Group 9 Loss: 4.9165 +[2025-07-07 21:05:15] [Rank 0] Group 10 Loss: 4.9420 +[2025-07-07 21:05:15] [Rank 0] Group 10 Loss: 4.9420 +[2025-07-07 21:05:15] [Rank 0] Group 11 Loss: 4.9622 +[2025-07-07 21:05:15] [Rank 0] Group 11 Loss: 4.9622 +[2025-07-07 21:05:15] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 21:05:15] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 21:05:15] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 21:05:15] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 21:05:15] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 21:05:15] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 21:05:15] [Rank 0] Group 3 FTA: 0.0625 +[2025-07-07 21:05:15] [Rank 0] Group 3 FTA: 0.0625 +[2025-07-07 21:05:15] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 21:05:15] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 21:05:15] [Rank 0] Group 5 FTA: 0.0443 +[2025-07-07 21:05:15] [Rank 0] Group 5 FTA: 0.0443 +[2025-07-07 21:05:15] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 21:05:15] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 21:05:15] [Rank 0] Group 7 FTA: 0.0703 +[2025-07-07 21:05:15] [Rank 0] Group 7 FTA: 0.0703 +[2025-07-07 21:05:15] [Rank 0] Group 8 FTA: 0.0833 +[2025-07-07 21:05:15] [Rank 0] Group 8 FTA: 0.0833 +[2025-07-07 21:05:15] [Rank 0] Group 9 FTA: 0.0547 +[2025-07-07 21:05:15] [Rank 0] Group 9 FTA: 0.0547 +[2025-07-07 21:05:15] [Rank 0] Group 10 FTA: 0.0586 +[2025-07-07 21:05:15] [Rank 0] Group 10 FTA: 0.0586 +[2025-07-07 21:05:15] [Rank 0] Group 11 FTA: 0.0654 +[2025-07-07 21:05:15] [Rank 0] Group 11 FTA: 0.0654 +[2025-07-07 21:05:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 21:05:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 21:05:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 21:05:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 21:05:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 21:05:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 21:05:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 21:05:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 21:05:17] [Rank 0] step:2001/10000 train_time:134713ms step_avg:67.32ms +[2025-07-07 21:05:17] [Rank 0] step:2001/10000 train_time:134713ms step_avg:67.32ms +[2025-07-07 21:05:18] [Rank 0] step:2021/10000 train_time:135459ms step_avg:67.03ms +[2025-07-07 21:05:18] [Rank 0] step:2021/10000 train_time:135459ms step_avg:67.03ms +[2025-07-07 21:05:19] [Rank 0] step:2041/10000 train_time:136801ms step_avg:67.03ms +[2025-07-07 21:05:19] [Rank 0] step:2041/10000 train_time:136801ms step_avg:67.03ms +[2025-07-07 21:05:21] [Rank 0] step:2061/10000 train_time:138145ms step_avg:67.03ms +[2025-07-07 21:05:21] [Rank 0] step:2061/10000 train_time:138145ms step_avg:67.03ms +[2025-07-07 21:05:22] [Rank 0] step:2081/10000 train_time:139488ms step_avg:67.03ms +[2025-07-07 21:05:22] [Rank 0] step:2081/10000 train_time:139488ms step_avg:67.03ms +[2025-07-07 21:05:23] [Rank 0] step:2101/10000 train_time:140832ms step_avg:67.03ms +[2025-07-07 21:05:23] [Rank 0] step:2101/10000 train_time:140832ms step_avg:67.03ms +[2025-07-07 21:05:25] [Rank 0] step:2121/10000 train_time:142174ms step_avg:67.03ms +[2025-07-07 21:05:25] [Rank 0] step:2121/10000 train_time:142174ms step_avg:67.03ms +[2025-07-07 21:05:26] [Rank 0] step:2141/10000 train_time:143519ms step_avg:67.03ms +[2025-07-07 21:05:26] [Rank 0] step:2141/10000 train_time:143519ms step_avg:67.03ms +[2025-07-07 21:05:28] [Rank 0] step:2161/10000 train_time:144866ms step_avg:67.04ms +[2025-07-07 21:05:28] [Rank 0] step:2161/10000 train_time:144866ms step_avg:67.04ms +[2025-07-07 21:05:29] [Rank 0] step:2181/10000 train_time:146252ms step_avg:67.06ms +[2025-07-07 21:05:29] [Rank 0] step:2181/10000 train_time:146252ms step_avg:67.06ms +[2025-07-07 21:05:30] [Rank 0] step:2201/10000 train_time:147598ms step_avg:67.06ms +[2025-07-07 21:05:30] [Rank 0] step:2201/10000 train_time:147598ms step_avg:67.06ms +[2025-07-07 21:05:32] [Rank 0] step:2221/10000 train_time:148947ms step_avg:67.06ms +[2025-07-07 21:05:32] [Rank 0] step:2221/10000 train_time:148947ms step_avg:67.06ms +[2025-07-07 21:05:33] [Rank 0] step:2241/10000 train_time:150305ms step_avg:67.07ms +[2025-07-07 21:05:33] [Rank 0] step:2241/10000 train_time:150305ms step_avg:67.07ms +[2025-07-07 21:05:34] [Rank 0] step:2261/10000 train_time:151676ms step_avg:67.08ms +[2025-07-07 21:05:34] [Rank 0] step:2261/10000 train_time:151676ms step_avg:67.08ms +[2025-07-07 21:05:36] [Rank 0] step:2281/10000 train_time:153047ms step_avg:67.10ms +[2025-07-07 21:05:36] [Rank 0] step:2281/10000 train_time:153047ms step_avg:67.10ms +[2025-07-07 21:05:37] [Rank 0] step:2301/10000 train_time:154419ms step_avg:67.11ms +[2025-07-07 21:05:37] [Rank 0] step:2301/10000 train_time:154419ms step_avg:67.11ms +[2025-07-07 21:05:38] [Rank 0] step:2321/10000 train_time:155792ms step_avg:67.12ms +[2025-07-07 21:05:38] [Rank 0] step:2321/10000 train_time:155792ms step_avg:67.12ms +[2025-07-07 21:05:40] [Rank 0] step:2341/10000 train_time:157844ms step_avg:67.43ms +[2025-07-07 21:05:40] [Rank 0] step:2341/10000 train_time:157844ms step_avg:67.43ms +[2025-07-07 21:05:41] [Rank 0] step:2361/10000 train_time:158586ms step_avg:67.17ms +[2025-07-07 21:05:41] [Rank 0] step:2361/10000 train_time:158586ms step_avg:67.17ms +[2025-07-07 21:05:43] [Rank 0] step:2381/10000 train_time:159959ms step_avg:67.18ms +[2025-07-07 21:05:43] [Rank 0] step:2381/10000 train_time:159959ms step_avg:67.18ms +[2025-07-07 21:05:44] [Rank 0] step:2401/10000 train_time:161334ms step_avg:67.19ms +[2025-07-07 21:05:44] [Rank 0] step:2401/10000 train_time:161334ms step_avg:67.19ms +[2025-07-07 21:05:45] [Rank 0] step:2421/10000 train_time:162708ms step_avg:67.21ms +[2025-07-07 21:05:45] [Rank 0] step:2421/10000 train_time:162708ms step_avg:67.21ms +[2025-07-07 21:05:47] [Rank 0] step:2441/10000 train_time:164081ms step_avg:67.22ms +[2025-07-07 21:05:47] [Rank 0] step:2441/10000 train_time:164081ms step_avg:67.22ms +[2025-07-07 21:05:48] [Rank 0] step:2461/10000 train_time:165456ms step_avg:67.23ms +[2025-07-07 21:05:48] [Rank 0] step:2461/10000 train_time:165456ms step_avg:67.23ms +[2025-07-07 21:05:50] [Rank 0] step:2481/10000 train_time:166830ms step_avg:67.24ms +[2025-07-07 21:05:50] [Rank 0] step:2481/10000 train_time:166830ms step_avg:67.24ms +[2025-07-07 21:05:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:05:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:05:52] [Rank 0] PRINT: step:2500/10000 train_loss:2.9277 val_loss:2.6180 train_time:168828ms step_avg:67.53ms +[2025-07-07 21:05:52] [Rank 0] PRINT: step:2500/10000 train_loss:2.9277 val_loss:2.6180 train_time:168828ms step_avg:67.53ms +[2025-07-07 21:05:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:05:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:05:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:05:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:05:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:05:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:11:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:11:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:11:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:11:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:11:07] [Rank 0] Total Loss: 4.6244 +[2025-07-07 21:11:07] [Rank 0] Total Loss: 4.6244 +[2025-07-07 21:11:07] [Rank 0] Total FTA: 0.0790 +[2025-07-07 21:11:07] [Rank 0] Total FTA: 0.0790 +[2025-07-07 21:11:07] [Rank 0] Group 0 Loss: 4.6767 +[2025-07-07 21:11:07] [Rank 0] Group 0 Loss: 4.6767 +[2025-07-07 21:11:07] [Rank 0] Group 1 Loss: 4.5083 +[2025-07-07 21:11:07] [Rank 0] Group 1 Loss: 4.5083 +[2025-07-07 21:11:07] [Rank 0] Group 2 Loss: 4.5975 +[2025-07-07 21:11:07] [Rank 0] Group 2 Loss: 4.5975 +[2025-07-07 21:11:07] [Rank 0] Group 3 Loss: 4.5867 +[2025-07-07 21:11:07] [Rank 0] Group 3 Loss: 4.5867 +[2025-07-07 21:11:07] [Rank 0] Group 4 Loss: 4.6490 +[2025-07-07 21:11:07] [Rank 0] Group 4 Loss: 4.6490 +[2025-07-07 21:11:07] [Rank 0] Group 5 Loss: 4.5881 +[2025-07-07 21:11:07] [Rank 0] Group 5 Loss: 4.5881 +[2025-07-07 21:11:07] [Rank 0] Group 6 Loss: 4.6178 +[2025-07-07 21:11:07] [Rank 0] Group 6 Loss: 4.6178 +[2025-07-07 21:11:07] [Rank 0] Group 7 Loss: 4.6366 +[2025-07-07 21:11:07] [Rank 0] Group 7 Loss: 4.6366 +[2025-07-07 21:11:07] [Rank 0] Group 8 Loss: 4.6277 +[2025-07-07 21:11:07] [Rank 0] Group 8 Loss: 4.6277 +[2025-07-07 21:11:07] [Rank 0] Group 9 Loss: 4.6258 +[2025-07-07 21:11:07] [Rank 0] Group 9 Loss: 4.6258 +[2025-07-07 21:11:07] [Rank 0] Group 10 Loss: 4.6371 +[2025-07-07 21:11:07] [Rank 0] Group 10 Loss: 4.6371 +[2025-07-07 21:11:07] [Rank 0] Group 11 Loss: 4.6473 +[2025-07-07 21:11:07] [Rank 0] Group 11 Loss: 4.6473 +[2025-07-07 21:11:07] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 21:11:07] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 21:11:07] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 21:11:07] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 21:11:07] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 21:11:07] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 21:11:07] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 21:11:07] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 21:11:07] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 21:11:07] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 21:11:07] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-07 21:11:07] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-07 21:11:07] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-07 21:11:07] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-07 21:11:07] [Rank 0] Group 7 FTA: 0.1146 +[2025-07-07 21:11:07] [Rank 0] Group 7 FTA: 0.1146 +[2025-07-07 21:11:07] [Rank 0] Group 8 FTA: 0.0729 +[2025-07-07 21:11:07] [Rank 0] Group 8 FTA: 0.0729 +[2025-07-07 21:11:07] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 21:11:07] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 21:11:07] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 21:11:07] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 21:11:07] [Rank 0] Group 11 FTA: 0.0850 +[2025-07-07 21:11:07] [Rank 0] Group 11 FTA: 0.0850 +[2025-07-07 21:11:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 21:11:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 21:11:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 21:11:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 21:11:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 21:11:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 21:11:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 21:11:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 21:11:09] [Rank 0] step:2501/10000 train_time:168838ms step_avg:67.51ms +[2025-07-07 21:11:09] [Rank 0] step:2501/10000 train_time:168838ms step_avg:67.51ms +[2025-07-07 21:11:10] [Rank 0] step:2521/10000 train_time:170259ms step_avg:67.54ms +[2025-07-07 21:11:10] [Rank 0] step:2521/10000 train_time:170259ms step_avg:67.54ms +[2025-07-07 21:11:12] [Rank 0] step:2541/10000 train_time:170992ms step_avg:67.29ms +[2025-07-07 21:11:12] [Rank 0] step:2541/10000 train_time:170992ms step_avg:67.29ms +[2025-07-07 21:11:13] [Rank 0] step:2561/10000 train_time:172357ms step_avg:67.30ms +[2025-07-07 21:11:13] [Rank 0] step:2561/10000 train_time:172357ms step_avg:67.30ms +[2025-07-07 21:11:14] [Rank 0] step:2581/10000 train_time:173722ms step_avg:67.31ms +[2025-07-07 21:11:14] [Rank 0] step:2581/10000 train_time:173722ms step_avg:67.31ms +[2025-07-07 21:11:16] [Rank 0] step:2601/10000 train_time:175089ms step_avg:67.32ms +[2025-07-07 21:11:16] [Rank 0] step:2601/10000 train_time:175089ms step_avg:67.32ms +[2025-07-07 21:11:17] [Rank 0] step:2621/10000 train_time:176455ms step_avg:67.32ms +[2025-07-07 21:11:17] [Rank 0] step:2621/10000 train_time:176455ms step_avg:67.32ms +[2025-07-07 21:11:19] [Rank 0] step:2641/10000 train_time:177822ms step_avg:67.33ms +[2025-07-07 21:11:19] [Rank 0] step:2641/10000 train_time:177822ms step_avg:67.33ms +[2025-07-07 21:11:20] [Rank 0] step:2661/10000 train_time:179189ms step_avg:67.34ms +[2025-07-07 21:11:20] [Rank 0] step:2661/10000 train_time:179189ms step_avg:67.34ms +[2025-07-07 21:11:21] [Rank 0] step:2681/10000 train_time:180558ms step_avg:67.35ms +[2025-07-07 21:11:21] [Rank 0] step:2681/10000 train_time:180558ms step_avg:67.35ms +[2025-07-07 21:11:23] [Rank 0] step:2701/10000 train_time:181927ms step_avg:67.36ms +[2025-07-07 21:11:23] [Rank 0] step:2701/10000 train_time:181927ms step_avg:67.36ms +[2025-07-07 21:11:24] [Rank 0] step:2721/10000 train_time:183347ms step_avg:67.38ms +[2025-07-07 21:11:24] [Rank 0] step:2721/10000 train_time:183347ms step_avg:67.38ms +[2025-07-07 21:11:25] [Rank 0] step:2741/10000 train_time:184716ms step_avg:67.39ms +[2025-07-07 21:11:25] [Rank 0] step:2741/10000 train_time:184716ms step_avg:67.39ms +[2025-07-07 21:11:27] [Rank 0] step:2761/10000 train_time:186088ms step_avg:67.40ms +[2025-07-07 21:11:27] [Rank 0] step:2761/10000 train_time:186088ms step_avg:67.40ms +[2025-07-07 21:11:28] [Rank 0] step:2781/10000 train_time:187459ms step_avg:67.41ms +[2025-07-07 21:11:28] [Rank 0] step:2781/10000 train_time:187459ms step_avg:67.41ms +[2025-07-07 21:11:30] [Rank 0] step:2801/10000 train_time:188831ms step_avg:67.42ms +[2025-07-07 21:11:30] [Rank 0] step:2801/10000 train_time:188831ms step_avg:67.42ms +[2025-07-07 21:11:31] [Rank 0] step:2821/10000 train_time:190204ms step_avg:67.42ms +[2025-07-07 21:11:31] [Rank 0] step:2821/10000 train_time:190204ms step_avg:67.42ms +[2025-07-07 21:11:32] [Rank 0] step:2841/10000 train_time:191575ms step_avg:67.43ms +[2025-07-07 21:11:32] [Rank 0] step:2841/10000 train_time:191575ms step_avg:67.43ms +[2025-07-07 21:11:34] [Rank 0] step:2861/10000 train_time:192947ms step_avg:67.44ms +[2025-07-07 21:11:34] [Rank 0] step:2861/10000 train_time:192947ms step_avg:67.44ms +[2025-07-07 21:11:35] [Rank 0] step:2881/10000 train_time:194319ms step_avg:67.45ms +[2025-07-07 21:11:35] [Rank 0] step:2881/10000 train_time:194319ms step_avg:67.45ms +[2025-07-07 21:11:36] [Rank 0] step:2901/10000 train_time:195732ms step_avg:67.47ms +[2025-07-07 21:11:36] [Rank 0] step:2901/10000 train_time:195732ms step_avg:67.47ms +[2025-07-07 21:11:38] [Rank 0] step:2921/10000 train_time:197103ms step_avg:67.48ms +[2025-07-07 21:11:38] [Rank 0] step:2921/10000 train_time:197103ms step_avg:67.48ms +[2025-07-07 21:11:39] [Rank 0] step:2941/10000 train_time:198476ms step_avg:67.49ms +[2025-07-07 21:11:39] [Rank 0] step:2941/10000 train_time:198476ms step_avg:67.49ms +[2025-07-07 21:11:41] [Rank 0] step:2961/10000 train_time:199927ms step_avg:67.52ms +[2025-07-07 21:11:41] [Rank 0] step:2961/10000 train_time:199927ms step_avg:67.52ms +[2025-07-07 21:11:42] [Rank 0] step:2981/10000 train_time:201298ms step_avg:67.53ms +[2025-07-07 21:11:42] [Rank 0] step:2981/10000 train_time:201298ms step_avg:67.53ms +[2025-07-07 21:11:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:11:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:11:44] [Rank 0] PRINT: step:3000/10000 train_loss:2.3880 val_loss:2.1967 train_time:203294ms step_avg:67.76ms +[2025-07-07 21:11:44] [Rank 0] PRINT: step:3000/10000 train_loss:2.3880 val_loss:2.1967 train_time:203294ms step_avg:67.76ms +[2025-07-07 21:11:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:11:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:11:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:11:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:11:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:11:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:17:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:17:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:17:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:17:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:17:00] [Rank 0] Total Loss: 4.4518 +[2025-07-07 21:17:00] [Rank 0] Total Loss: 4.4518 +[2025-07-07 21:17:00] [Rank 0] Total FTA: 0.0840 +[2025-07-07 21:17:00] [Rank 0] Total FTA: 0.0840 +[2025-07-07 21:17:00] [Rank 0] Group 0 Loss: 4.6507 +[2025-07-07 21:17:00] [Rank 0] Group 0 Loss: 4.6507 +[2025-07-07 21:17:00] [Rank 0] Group 1 Loss: 4.2979 +[2025-07-07 21:17:00] [Rank 0] Group 1 Loss: 4.2979 +[2025-07-07 21:17:00] [Rank 0] Group 2 Loss: 4.4603 +[2025-07-07 21:17:00] [Rank 0] Group 2 Loss: 4.4603 +[2025-07-07 21:17:00] [Rank 0] Group 3 Loss: 4.4611 +[2025-07-07 21:17:00] [Rank 0] Group 3 Loss: 4.4611 +[2025-07-07 21:17:00] [Rank 0] Group 4 Loss: 4.3686 +[2025-07-07 21:17:00] [Rank 0] Group 4 Loss: 4.3686 +[2025-07-07 21:17:00] [Rank 0] Group 5 Loss: 4.3913 +[2025-07-07 21:17:00] [Rank 0] Group 5 Loss: 4.3913 +[2025-07-07 21:17:00] [Rank 0] Group 6 Loss: 4.4067 +[2025-07-07 21:17:00] [Rank 0] Group 6 Loss: 4.4067 +[2025-07-07 21:17:00] [Rank 0] Group 7 Loss: 4.4499 +[2025-07-07 21:17:00] [Rank 0] Group 7 Loss: 4.4499 +[2025-07-07 21:17:00] [Rank 0] Group 8 Loss: 4.4130 +[2025-07-07 21:17:00] [Rank 0] Group 8 Loss: 4.4130 +[2025-07-07 21:17:00] [Rank 0] Group 9 Loss: 4.4201 +[2025-07-07 21:17:00] [Rank 0] Group 9 Loss: 4.4201 +[2025-07-07 21:17:00] [Rank 0] Group 10 Loss: 4.4280 +[2025-07-07 21:17:00] [Rank 0] Group 10 Loss: 4.4280 +[2025-07-07 21:17:00] [Rank 0] Group 11 Loss: 4.4593 +[2025-07-07 21:17:00] [Rank 0] Group 11 Loss: 4.4593 +[2025-07-07 21:17:00] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 21:17:00] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 21:17:00] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 21:17:00] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 21:17:00] [Rank 0] Group 2 FTA: 0.0677 +[2025-07-07 21:17:00] [Rank 0] Group 2 FTA: 0.0677 +[2025-07-07 21:17:00] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-07 21:17:00] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-07 21:17:00] [Rank 0] Group 4 FTA: 0.0260 +[2025-07-07 21:17:00] [Rank 0] Group 4 FTA: 0.0260 +[2025-07-07 21:17:00] [Rank 0] Group 5 FTA: 0.0547 +[2025-07-07 21:17:00] [Rank 0] Group 5 FTA: 0.0547 +[2025-07-07 21:17:00] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 21:17:00] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 21:17:00] [Rank 0] Group 7 FTA: 0.1302 +[2025-07-07 21:17:00] [Rank 0] Group 7 FTA: 0.1302 +[2025-07-07 21:17:00] [Rank 0] Group 8 FTA: 0.0911 +[2025-07-07 21:17:00] [Rank 0] Group 8 FTA: 0.0911 +[2025-07-07 21:17:00] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 21:17:00] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 21:17:00] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 21:17:00] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 21:17:00] [Rank 0] Group 11 FTA: 0.0938 +[2025-07-07 21:17:00] [Rank 0] Group 11 FTA: 0.0938 +[2025-07-07 21:17:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 21:17:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 21:17:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 21:17:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 21:17:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 21:17:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 21:17:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 21:17:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 21:17:02] [Rank 0] step:3001/10000 train_time:203304ms step_avg:67.75ms +[2025-07-07 21:17:02] [Rank 0] step:3001/10000 train_time:203304ms step_avg:67.75ms +[2025-07-07 21:17:03] [Rank 0] step:3021/10000 train_time:204058ms step_avg:67.55ms +[2025-07-07 21:17:03] [Rank 0] step:3021/10000 train_time:204058ms step_avg:67.55ms +[2025-07-07 21:17:05] [Rank 0] step:3041/10000 train_time:205422ms step_avg:67.55ms +[2025-07-07 21:17:05] [Rank 0] step:3041/10000 train_time:205422ms step_avg:67.55ms +[2025-07-07 21:17:06] [Rank 0] step:3061/10000 train_time:207043ms step_avg:67.64ms +[2025-07-07 21:17:06] [Rank 0] step:3061/10000 train_time:207043ms step_avg:67.64ms +[2025-07-07 21:17:07] [Rank 0] step:3081/10000 train_time:208198ms step_avg:67.57ms +[2025-07-07 21:17:07] [Rank 0] step:3081/10000 train_time:208198ms step_avg:67.57ms +[2025-07-07 21:17:09] [Rank 0] step:3101/10000 train_time:209566ms step_avg:67.58ms +[2025-07-07 21:17:09] [Rank 0] step:3101/10000 train_time:209566ms step_avg:67.58ms +[2025-07-07 21:17:10] [Rank 0] step:3121/10000 train_time:210932ms step_avg:67.58ms +[2025-07-07 21:17:10] [Rank 0] step:3121/10000 train_time:210932ms step_avg:67.58ms +[2025-07-07 21:17:12] [Rank 0] step:3141/10000 train_time:212298ms step_avg:67.59ms +[2025-07-07 21:17:12] [Rank 0] step:3141/10000 train_time:212298ms step_avg:67.59ms +[2025-07-07 21:17:13] [Rank 0] step:3161/10000 train_time:213666ms step_avg:67.59ms +[2025-07-07 21:17:13] [Rank 0] step:3161/10000 train_time:213666ms step_avg:67.59ms +[2025-07-07 21:17:14] [Rank 0] step:3181/10000 train_time:215036ms step_avg:67.60ms +[2025-07-07 21:17:14] [Rank 0] step:3181/10000 train_time:215036ms step_avg:67.60ms +[2025-07-07 21:17:16] [Rank 0] step:3201/10000 train_time:216407ms step_avg:67.61ms +[2025-07-07 21:17:16] [Rank 0] step:3201/10000 train_time:216407ms step_avg:67.61ms +[2025-07-07 21:17:17] [Rank 0] step:3221/10000 train_time:217778ms step_avg:67.61ms +[2025-07-07 21:17:17] [Rank 0] step:3221/10000 train_time:217778ms step_avg:67.61ms +[2025-07-07 21:17:18] [Rank 0] step:3241/10000 train_time:219819ms step_avg:67.82ms +[2025-07-07 21:17:18] [Rank 0] step:3241/10000 train_time:219819ms step_avg:67.82ms +[2025-07-07 21:17:20] [Rank 0] step:3261/10000 train_time:220554ms step_avg:67.63ms +[2025-07-07 21:17:20] [Rank 0] step:3261/10000 train_time:220554ms step_avg:67.63ms +[2025-07-07 21:17:21] [Rank 0] step:3281/10000 train_time:221926ms step_avg:67.64ms +[2025-07-07 21:17:21] [Rank 0] step:3281/10000 train_time:221926ms step_avg:67.64ms +[2025-07-07 21:17:23] [Rank 0] step:3301/10000 train_time:223298ms step_avg:67.65ms +[2025-07-07 21:17:23] [Rank 0] step:3301/10000 train_time:223298ms step_avg:67.65ms +[2025-07-07 21:17:24] [Rank 0] step:3321/10000 train_time:224668ms step_avg:67.65ms +[2025-07-07 21:17:24] [Rank 0] step:3321/10000 train_time:224668ms step_avg:67.65ms +[2025-07-07 21:17:25] [Rank 0] step:3341/10000 train_time:226039ms step_avg:67.66ms +[2025-07-07 21:17:25] [Rank 0] step:3341/10000 train_time:226039ms step_avg:67.66ms +[2025-07-07 21:17:27] [Rank 0] step:3361/10000 train_time:227411ms step_avg:67.66ms +[2025-07-07 21:17:27] [Rank 0] step:3361/10000 train_time:227411ms step_avg:67.66ms +[2025-07-07 21:17:28] [Rank 0] step:3381/10000 train_time:228784ms step_avg:67.67ms +[2025-07-07 21:17:28] [Rank 0] step:3381/10000 train_time:228784ms step_avg:67.67ms +[2025-07-07 21:17:29] [Rank 0] step:3401/10000 train_time:230157ms step_avg:67.67ms +[2025-07-07 21:17:29] [Rank 0] step:3401/10000 train_time:230157ms step_avg:67.67ms +[2025-07-07 21:17:31] [Rank 0] step:3421/10000 train_time:232196ms step_avg:67.87ms +[2025-07-07 21:17:31] [Rank 0] step:3421/10000 train_time:232196ms step_avg:67.87ms +[2025-07-07 21:17:32] [Rank 0] step:3441/10000 train_time:232936ms step_avg:67.69ms +[2025-07-07 21:17:32] [Rank 0] step:3441/10000 train_time:232936ms step_avg:67.69ms +[2025-07-07 21:17:34] [Rank 0] step:3461/10000 train_time:234309ms step_avg:67.70ms +[2025-07-07 21:17:34] [Rank 0] step:3461/10000 train_time:234309ms step_avg:67.70ms +[2025-07-07 21:17:35] [Rank 0] step:3481/10000 train_time:235683ms step_avg:67.71ms +[2025-07-07 21:17:35] [Rank 0] step:3481/10000 train_time:235683ms step_avg:67.71ms +[2025-07-07 21:17:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:17:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:17:37] [Rank 0] PRINT: step:3500/10000 train_loss:2.0624 val_loss:1.9529 train_time:237682ms step_avg:67.91ms +[2025-07-07 21:17:37] [Rank 0] PRINT: step:3500/10000 train_loss:2.0624 val_loss:1.9529 train_time:237682ms step_avg:67.91ms +[2025-07-07 21:17:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:17:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:17:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:17:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:17:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:17:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:22:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:22:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:22:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:22:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:22:53] [Rank 0] Total Loss: 4.4083 +[2025-07-07 21:22:53] [Rank 0] Total Loss: 4.4083 +[2025-07-07 21:22:53] [Rank 0] Total FTA: 0.0863 +[2025-07-07 21:22:53] [Rank 0] Total FTA: 0.0863 +[2025-07-07 21:22:53] [Rank 0] Group 0 Loss: 4.6149 +[2025-07-07 21:22:53] [Rank 0] Group 0 Loss: 4.6149 +[2025-07-07 21:22:53] [Rank 0] Group 1 Loss: 4.2306 +[2025-07-07 21:22:53] [Rank 0] Group 1 Loss: 4.2306 +[2025-07-07 21:22:53] [Rank 0] Group 2 Loss: 4.4524 +[2025-07-07 21:22:53] [Rank 0] Group 2 Loss: 4.4524 +[2025-07-07 21:22:53] [Rank 0] Group 3 Loss: 4.4516 +[2025-07-07 21:22:53] [Rank 0] Group 3 Loss: 4.4516 +[2025-07-07 21:22:53] [Rank 0] Group 4 Loss: 4.2701 +[2025-07-07 21:22:53] [Rank 0] Group 4 Loss: 4.2701 +[2025-07-07 21:22:53] [Rank 0] Group 5 Loss: 4.3216 +[2025-07-07 21:22:53] [Rank 0] Group 5 Loss: 4.3216 +[2025-07-07 21:22:53] [Rank 0] Group 6 Loss: 4.3749 +[2025-07-07 21:22:53] [Rank 0] Group 6 Loss: 4.3749 +[2025-07-07 21:22:53] [Rank 0] Group 7 Loss: 4.4384 +[2025-07-07 21:22:53] [Rank 0] Group 7 Loss: 4.4384 +[2025-07-07 21:22:53] [Rank 0] Group 8 Loss: 4.4030 +[2025-07-07 21:22:53] [Rank 0] Group 8 Loss: 4.4030 +[2025-07-07 21:22:53] [Rank 0] Group 9 Loss: 4.3443 +[2025-07-07 21:22:53] [Rank 0] Group 9 Loss: 4.3443 +[2025-07-07 21:22:53] [Rank 0] Group 10 Loss: 4.4027 +[2025-07-07 21:22:53] [Rank 0] Group 10 Loss: 4.4027 +[2025-07-07 21:22:53] [Rank 0] Group 11 Loss: 4.3932 +[2025-07-07 21:22:53] [Rank 0] Group 11 Loss: 4.3932 +[2025-07-07 21:22:53] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 21:22:53] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 21:22:53] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 21:22:53] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 21:22:53] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-07 21:22:53] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-07 21:22:53] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 21:22:53] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 21:22:53] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 21:22:53] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 21:22:53] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-07 21:22:53] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-07 21:22:53] [Rank 0] Group 6 FTA: 0.0833 +[2025-07-07 21:22:53] [Rank 0] Group 6 FTA: 0.0833 +[2025-07-07 21:22:53] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-07 21:22:53] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-07 21:22:53] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-07 21:22:53] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-07 21:22:53] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-07 21:22:53] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-07 21:22:53] [Rank 0] Group 10 FTA: 0.0820 +[2025-07-07 21:22:53] [Rank 0] Group 10 FTA: 0.0820 +[2025-07-07 21:22:53] [Rank 0] Group 11 FTA: 0.0859 +[2025-07-07 21:22:53] [Rank 0] Group 11 FTA: 0.0859 +[2025-07-07 21:22:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 21:22:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 21:22:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 21:22:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 21:22:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 21:22:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 21:22:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 21:22:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 21:22:55] [Rank 0] step:3501/10000 train_time:237691ms step_avg:67.89ms +[2025-07-07 21:22:55] [Rank 0] step:3501/10000 train_time:237691ms step_avg:67.89ms +[2025-07-07 21:22:56] [Rank 0] step:3521/10000 train_time:238446ms step_avg:67.72ms +[2025-07-07 21:22:56] [Rank 0] step:3521/10000 train_time:238446ms step_avg:67.72ms +[2025-07-07 21:22:57] [Rank 0] step:3541/10000 train_time:239809ms step_avg:67.72ms +[2025-07-07 21:22:57] [Rank 0] step:3541/10000 train_time:239809ms step_avg:67.72ms +[2025-07-07 21:22:59] [Rank 0] step:3561/10000 train_time:241176ms step_avg:67.73ms +[2025-07-07 21:22:59] [Rank 0] step:3561/10000 train_time:241176ms step_avg:67.73ms +[2025-07-07 21:23:00] [Rank 0] step:3581/10000 train_time:242541ms step_avg:67.73ms +[2025-07-07 21:23:00] [Rank 0] step:3581/10000 train_time:242541ms step_avg:67.73ms +[2025-07-07 21:23:01] [Rank 0] step:3601/10000 train_time:243907ms step_avg:67.73ms +[2025-07-07 21:23:01] [Rank 0] step:3601/10000 train_time:243907ms step_avg:67.73ms +[2025-07-07 21:23:03] [Rank 0] step:3621/10000 train_time:245307ms step_avg:67.75ms +[2025-07-07 21:23:03] [Rank 0] step:3621/10000 train_time:245307ms step_avg:67.75ms +[2025-07-07 21:23:04] [Rank 0] step:3641/10000 train_time:246675ms step_avg:67.75ms +[2025-07-07 21:23:04] [Rank 0] step:3641/10000 train_time:246675ms step_avg:67.75ms +[2025-07-07 21:23:06] [Rank 0] step:3661/10000 train_time:248045ms step_avg:67.75ms +[2025-07-07 21:23:06] [Rank 0] step:3661/10000 train_time:248045ms step_avg:67.75ms +[2025-07-07 21:23:07] [Rank 0] step:3681/10000 train_time:249415ms step_avg:67.76ms +[2025-07-07 21:23:07] [Rank 0] step:3681/10000 train_time:249415ms step_avg:67.76ms +[2025-07-07 21:23:08] [Rank 0] step:3701/10000 train_time:250783ms step_avg:67.76ms +[2025-07-07 21:23:08] [Rank 0] step:3701/10000 train_time:250783ms step_avg:67.76ms +[2025-07-07 21:23:10] [Rank 0] step:3721/10000 train_time:252154ms step_avg:67.77ms +[2025-07-07 21:23:10] [Rank 0] step:3721/10000 train_time:252154ms step_avg:67.77ms +[2025-07-07 21:23:11] [Rank 0] step:3741/10000 train_time:253524ms step_avg:67.77ms +[2025-07-07 21:23:11] [Rank 0] step:3741/10000 train_time:253524ms step_avg:67.77ms +[2025-07-07 21:23:12] [Rank 0] step:3761/10000 train_time:254896ms step_avg:67.77ms +[2025-07-07 21:23:12] [Rank 0] step:3761/10000 train_time:254896ms step_avg:67.77ms +[2025-07-07 21:23:14] [Rank 0] step:3781/10000 train_time:256269ms step_avg:67.78ms +[2025-07-07 21:23:14] [Rank 0] step:3781/10000 train_time:256269ms step_avg:67.78ms +[2025-07-07 21:23:15] [Rank 0] step:3801/10000 train_time:257641ms step_avg:67.78ms +[2025-07-07 21:23:15] [Rank 0] step:3801/10000 train_time:257641ms step_avg:67.78ms +[2025-07-07 21:23:17] [Rank 0] step:3821/10000 train_time:259013ms step_avg:67.79ms +[2025-07-07 21:23:17] [Rank 0] step:3821/10000 train_time:259013ms step_avg:67.79ms +[2025-07-07 21:23:18] [Rank 0] step:3841/10000 train_time:260386ms step_avg:67.79ms +[2025-07-07 21:23:18] [Rank 0] step:3841/10000 train_time:260386ms step_avg:67.79ms +[2025-07-07 21:23:19] [Rank 0] step:3861/10000 train_time:261759ms step_avg:67.80ms +[2025-07-07 21:23:19] [Rank 0] step:3861/10000 train_time:261759ms step_avg:67.80ms +[2025-07-07 21:23:21] [Rank 0] step:3881/10000 train_time:263132ms step_avg:67.80ms +[2025-07-07 21:23:21] [Rank 0] step:3881/10000 train_time:263132ms step_avg:67.80ms +[2025-07-07 21:23:22] [Rank 0] step:3901/10000 train_time:264504ms step_avg:67.80ms +[2025-07-07 21:23:22] [Rank 0] step:3901/10000 train_time:264504ms step_avg:67.80ms +[2025-07-07 21:23:23] [Rank 0] step:3921/10000 train_time:265876ms step_avg:67.81ms +[2025-07-07 21:23:23] [Rank 0] step:3921/10000 train_time:265876ms step_avg:67.81ms +[2025-07-07 21:23:25] [Rank 0] step:3941/10000 train_time:267248ms step_avg:67.81ms +[2025-07-07 21:23:25] [Rank 0] step:3941/10000 train_time:267248ms step_avg:67.81ms +[2025-07-07 21:23:26] [Rank 0] step:3961/10000 train_time:268871ms step_avg:67.88ms +[2025-07-07 21:23:26] [Rank 0] step:3961/10000 train_time:268871ms step_avg:67.88ms +[2025-07-07 21:23:28] [Rank 0] step:3981/10000 train_time:270045ms step_avg:67.83ms +[2025-07-07 21:23:28] [Rank 0] step:3981/10000 train_time:270045ms step_avg:67.83ms +[2025-07-07 21:23:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:23:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:23:30] [Rank 0] PRINT: step:4000/10000 train_loss:1.8744 val_loss:1.8112 train_time:272042ms step_avg:68.01ms +[2025-07-07 21:23:30] [Rank 0] PRINT: step:4000/10000 train_loss:1.8744 val_loss:1.8112 train_time:272042ms step_avg:68.01ms +[2025-07-07 21:23:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:23:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:23:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:23:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:23:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:23:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:28:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:28:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:28:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:28:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:28:47] [Rank 0] Total Loss: 4.4196 +[2025-07-07 21:28:47] [Rank 0] Total Loss: 4.4196 +[2025-07-07 21:28:47] [Rank 0] Total FTA: 0.0927 +[2025-07-07 21:28:47] [Rank 0] Total FTA: 0.0927 +[2025-07-07 21:28:47] [Rank 0] Group 0 Loss: 4.6498 +[2025-07-07 21:28:47] [Rank 0] Group 0 Loss: 4.6498 +[2025-07-07 21:28:47] [Rank 0] Group 1 Loss: 4.2417 +[2025-07-07 21:28:47] [Rank 0] Group 1 Loss: 4.2417 +[2025-07-07 21:28:47] [Rank 0] Group 2 Loss: 4.3333 +[2025-07-07 21:28:47] [Rank 0] Group 2 Loss: 4.3333 +[2025-07-07 21:28:47] [Rank 0] Group 3 Loss: 4.5603 +[2025-07-07 21:28:47] [Rank 0] Group 3 Loss: 4.5603 +[2025-07-07 21:28:47] [Rank 0] Group 4 Loss: 4.3628 +[2025-07-07 21:28:47] [Rank 0] Group 4 Loss: 4.3628 +[2025-07-07 21:28:47] [Rank 0] Group 5 Loss: 4.3170 +[2025-07-07 21:28:47] [Rank 0] Group 5 Loss: 4.3170 +[2025-07-07 21:28:47] [Rank 0] Group 6 Loss: 4.3364 +[2025-07-07 21:28:47] [Rank 0] Group 6 Loss: 4.3364 +[2025-07-07 21:28:47] [Rank 0] Group 7 Loss: 4.4567 +[2025-07-07 21:28:47] [Rank 0] Group 7 Loss: 4.4567 +[2025-07-07 21:28:47] [Rank 0] Group 8 Loss: 4.3714 +[2025-07-07 21:28:47] [Rank 0] Group 8 Loss: 4.3714 +[2025-07-07 21:28:47] [Rank 0] Group 9 Loss: 4.3963 +[2025-07-07 21:28:47] [Rank 0] Group 9 Loss: 4.3963 +[2025-07-07 21:28:47] [Rank 0] Group 10 Loss: 4.4167 +[2025-07-07 21:28:47] [Rank 0] Group 10 Loss: 4.4167 +[2025-07-07 21:28:47] [Rank 0] Group 11 Loss: 4.3953 +[2025-07-07 21:28:47] [Rank 0] Group 11 Loss: 4.3953 +[2025-07-07 21:28:47] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-07 21:28:47] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-07 21:28:47] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 21:28:47] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 21:28:47] [Rank 0] Group 2 FTA: 0.1641 +[2025-07-07 21:28:47] [Rank 0] Group 2 FTA: 0.1641 +[2025-07-07 21:28:47] [Rank 0] Group 3 FTA: 0.0833 +[2025-07-07 21:28:47] [Rank 0] Group 3 FTA: 0.0833 +[2025-07-07 21:28:47] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-07 21:28:47] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-07 21:28:47] [Rank 0] Group 5 FTA: 0.0833 +[2025-07-07 21:28:47] [Rank 0] Group 5 FTA: 0.0833 +[2025-07-07 21:28:47] [Rank 0] Group 6 FTA: 0.1120 +[2025-07-07 21:28:47] [Rank 0] Group 6 FTA: 0.1120 +[2025-07-07 21:28:47] [Rank 0] Group 7 FTA: 0.1198 +[2025-07-07 21:28:47] [Rank 0] Group 7 FTA: 0.1198 +[2025-07-07 21:28:47] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-07 21:28:47] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-07 21:28:47] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 21:28:47] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 21:28:47] [Rank 0] Group 10 FTA: 0.0762 +[2025-07-07 21:28:47] [Rank 0] Group 10 FTA: 0.0762 +[2025-07-07 21:28:47] [Rank 0] Group 11 FTA: 0.0879 +[2025-07-07 21:28:47] [Rank 0] Group 11 FTA: 0.0879 +[2025-07-07 21:28:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 21:28:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 21:28:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 21:28:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 21:28:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 21:28:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 21:28:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 21:28:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 21:28:49] [Rank 0] step:4001/10000 train_time:272051ms step_avg:68.00ms +[2025-07-07 21:28:49] [Rank 0] step:4001/10000 train_time:272051ms step_avg:68.00ms +[2025-07-07 21:28:50] [Rank 0] step:4021/10000 train_time:272827ms step_avg:67.85ms +[2025-07-07 21:28:50] [Rank 0] step:4021/10000 train_time:272827ms step_avg:67.85ms +[2025-07-07 21:28:51] [Rank 0] step:4041/10000 train_time:274193ms step_avg:67.85ms +[2025-07-07 21:28:51] [Rank 0] step:4041/10000 train_time:274193ms step_avg:67.85ms +[2025-07-07 21:28:53] [Rank 0] step:4061/10000 train_time:275558ms step_avg:67.85ms +[2025-07-07 21:28:53] [Rank 0] step:4061/10000 train_time:275558ms step_avg:67.85ms +[2025-07-07 21:28:54] [Rank 0] step:4081/10000 train_time:276925ms step_avg:67.86ms +[2025-07-07 21:28:54] [Rank 0] step:4081/10000 train_time:276925ms step_avg:67.86ms +[2025-07-07 21:28:55] [Rank 0] step:4101/10000 train_time:278290ms step_avg:67.86ms +[2025-07-07 21:28:55] [Rank 0] step:4101/10000 train_time:278290ms step_avg:67.86ms +[2025-07-07 21:28:57] [Rank 0] step:4121/10000 train_time:279655ms step_avg:67.86ms +[2025-07-07 21:28:57] [Rank 0] step:4121/10000 train_time:279655ms step_avg:67.86ms +[2025-07-07 21:28:58] [Rank 0] step:4141/10000 train_time:281685ms step_avg:68.02ms +[2025-07-07 21:28:58] [Rank 0] step:4141/10000 train_time:281685ms step_avg:68.02ms +[2025-07-07 21:29:00] [Rank 0] step:4161/10000 train_time:282424ms step_avg:67.87ms +[2025-07-07 21:29:00] [Rank 0] step:4161/10000 train_time:282424ms step_avg:67.87ms +[2025-07-07 21:29:01] [Rank 0] step:4181/10000 train_time:283792ms step_avg:67.88ms +[2025-07-07 21:29:01] [Rank 0] step:4181/10000 train_time:283792ms step_avg:67.88ms +[2025-07-07 21:29:02] [Rank 0] step:4201/10000 train_time:285162ms step_avg:67.88ms +[2025-07-07 21:29:02] [Rank 0] step:4201/10000 train_time:285162ms step_avg:67.88ms +[2025-07-07 21:29:04] [Rank 0] step:4221/10000 train_time:286534ms step_avg:67.88ms +[2025-07-07 21:29:04] [Rank 0] step:4221/10000 train_time:286534ms step_avg:67.88ms +[2025-07-07 21:29:05] [Rank 0] step:4241/10000 train_time:287906ms step_avg:67.89ms +[2025-07-07 21:29:05] [Rank 0] step:4241/10000 train_time:287906ms step_avg:67.89ms +[2025-07-07 21:29:06] [Rank 0] step:4261/10000 train_time:289277ms step_avg:67.89ms +[2025-07-07 21:29:06] [Rank 0] step:4261/10000 train_time:289277ms step_avg:67.89ms +[2025-07-07 21:29:08] [Rank 0] step:4281/10000 train_time:290648ms step_avg:67.89ms +[2025-07-07 21:29:08] [Rank 0] step:4281/10000 train_time:290648ms step_avg:67.89ms +[2025-07-07 21:29:09] [Rank 0] step:4301/10000 train_time:292021ms step_avg:67.90ms +[2025-07-07 21:29:09] [Rank 0] step:4301/10000 train_time:292021ms step_avg:67.90ms +[2025-07-07 21:29:11] [Rank 0] step:4321/10000 train_time:294070ms step_avg:68.06ms +[2025-07-07 21:29:11] [Rank 0] step:4321/10000 train_time:294070ms step_avg:68.06ms +[2025-07-07 21:29:12] [Rank 0] step:4341/10000 train_time:294809ms step_avg:67.91ms +[2025-07-07 21:29:12] [Rank 0] step:4341/10000 train_time:294809ms step_avg:67.91ms +[2025-07-07 21:29:13] [Rank 0] step:4361/10000 train_time:296179ms step_avg:67.92ms +[2025-07-07 21:29:13] [Rank 0] step:4361/10000 train_time:296179ms step_avg:67.92ms +[2025-07-07 21:29:15] [Rank 0] step:4381/10000 train_time:297551ms step_avg:67.92ms +[2025-07-07 21:29:15] [Rank 0] step:4381/10000 train_time:297551ms step_avg:67.92ms +[2025-07-07 21:29:16] [Rank 0] step:4401/10000 train_time:298922ms step_avg:67.92ms +[2025-07-07 21:29:16] [Rank 0] step:4401/10000 train_time:298922ms step_avg:67.92ms +[2025-07-07 21:29:17] [Rank 0] step:4421/10000 train_time:300293ms step_avg:67.92ms +[2025-07-07 21:29:17] [Rank 0] step:4421/10000 train_time:300293ms step_avg:67.92ms +[2025-07-07 21:29:19] [Rank 0] step:4441/10000 train_time:301665ms step_avg:67.93ms +[2025-07-07 21:29:19] [Rank 0] step:4441/10000 train_time:301665ms step_avg:67.93ms +[2025-07-07 21:29:20] [Rank 0] step:4461/10000 train_time:303036ms step_avg:67.93ms +[2025-07-07 21:29:20] [Rank 0] step:4461/10000 train_time:303036ms step_avg:67.93ms +[2025-07-07 21:29:22] [Rank 0] step:4481/10000 train_time:304408ms step_avg:67.93ms +[2025-07-07 21:29:22] [Rank 0] step:4481/10000 train_time:304408ms step_avg:67.93ms +[2025-07-07 21:29:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:29:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:29:24] [Rank 0] PRINT: step:4500/10000 train_loss:1.7604 val_loss:1.7185 train_time:306404ms step_avg:68.09ms +[2025-07-07 21:29:24] [Rank 0] PRINT: step:4500/10000 train_loss:1.7604 val_loss:1.7185 train_time:306404ms step_avg:68.09ms +[2025-07-07 21:29:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:29:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:29:24] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:29:24] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:29:24] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:29:24] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:34:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:34:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:34:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:34:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:34:42] [Rank 0] Total Loss: 4.4631 +[2025-07-07 21:34:42] [Rank 0] Total Loss: 4.4631 +[2025-07-07 21:34:42] [Rank 0] Total FTA: 0.1090 +[2025-07-07 21:34:42] [Rank 0] Total FTA: 0.1090 +[2025-07-07 21:34:42] [Rank 0] Group 0 Loss: 4.7440 +[2025-07-07 21:34:42] [Rank 0] Group 0 Loss: 4.7440 +[2025-07-07 21:34:42] [Rank 0] Group 1 Loss: 4.3156 +[2025-07-07 21:34:42] [Rank 0] Group 1 Loss: 4.3156 +[2025-07-07 21:34:42] [Rank 0] Group 2 Loss: 4.3627 +[2025-07-07 21:34:42] [Rank 0] Group 2 Loss: 4.3627 +[2025-07-07 21:34:42] [Rank 0] Group 3 Loss: 4.6504 +[2025-07-07 21:34:42] [Rank 0] Group 3 Loss: 4.6504 +[2025-07-07 21:34:42] [Rank 0] Group 4 Loss: 4.3857 +[2025-07-07 21:34:42] [Rank 0] Group 4 Loss: 4.3857 +[2025-07-07 21:34:42] [Rank 0] Group 5 Loss: 4.2672 +[2025-07-07 21:34:42] [Rank 0] Group 5 Loss: 4.2672 +[2025-07-07 21:34:42] [Rank 0] Group 6 Loss: 4.4352 +[2025-07-07 21:34:42] [Rank 0] Group 6 Loss: 4.4352 +[2025-07-07 21:34:42] [Rank 0] Group 7 Loss: 4.4691 +[2025-07-07 21:34:42] [Rank 0] Group 7 Loss: 4.4691 +[2025-07-07 21:34:42] [Rank 0] Group 8 Loss: 4.3933 +[2025-07-07 21:34:42] [Rank 0] Group 8 Loss: 4.3933 +[2025-07-07 21:34:42] [Rank 0] Group 9 Loss: 4.3894 +[2025-07-07 21:34:42] [Rank 0] Group 9 Loss: 4.3894 +[2025-07-07 21:34:42] [Rank 0] Group 10 Loss: 4.4370 +[2025-07-07 21:34:42] [Rank 0] Group 10 Loss: 4.4370 +[2025-07-07 21:34:42] [Rank 0] Group 11 Loss: 4.4434 +[2025-07-07 21:34:42] [Rank 0] Group 11 Loss: 4.4434 +[2025-07-07 21:34:42] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 21:34:42] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 21:34:42] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-07 21:34:42] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-07 21:34:42] [Rank 0] Group 2 FTA: 0.1484 +[2025-07-07 21:34:42] [Rank 0] Group 2 FTA: 0.1484 +[2025-07-07 21:34:42] [Rank 0] Group 3 FTA: 0.0677 +[2025-07-07 21:34:42] [Rank 0] Group 3 FTA: 0.0677 +[2025-07-07 21:34:42] [Rank 0] Group 4 FTA: 0.0651 +[2025-07-07 21:34:42] [Rank 0] Group 4 FTA: 0.0651 +[2025-07-07 21:34:42] [Rank 0] Group 5 FTA: 0.0703 +[2025-07-07 21:34:42] [Rank 0] Group 5 FTA: 0.0703 +[2025-07-07 21:34:42] [Rank 0] Group 6 FTA: 0.1224 +[2025-07-07 21:34:42] [Rank 0] Group 6 FTA: 0.1224 +[2025-07-07 21:34:42] [Rank 0] Group 7 FTA: 0.0755 +[2025-07-07 21:34:42] [Rank 0] Group 7 FTA: 0.0755 +[2025-07-07 21:34:42] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 21:34:42] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 21:34:42] [Rank 0] Group 9 FTA: 0.1055 +[2025-07-07 21:34:42] [Rank 0] Group 9 FTA: 0.1055 +[2025-07-07 21:34:42] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-07 21:34:42] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-07 21:34:42] [Rank 0] Group 11 FTA: 0.1045 +[2025-07-07 21:34:42] [Rank 0] Group 11 FTA: 0.1045 +[2025-07-07 21:34:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 21:34:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 21:34:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 21:34:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 21:34:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 21:34:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 21:34:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 21:34:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 21:34:45] [Rank 0] step:4501/10000 train_time:306524ms step_avg:68.10ms +[2025-07-07 21:34:45] [Rank 0] step:4501/10000 train_time:306524ms step_avg:68.10ms +[2025-07-07 21:34:46] [Rank 0] step:4521/10000 train_time:307893ms step_avg:68.10ms +[2025-07-07 21:34:46] [Rank 0] step:4521/10000 train_time:307893ms step_avg:68.10ms +[2025-07-07 21:34:47] [Rank 0] step:4541/10000 train_time:309257ms step_avg:68.10ms +[2025-07-07 21:34:47] [Rank 0] step:4541/10000 train_time:309257ms step_avg:68.10ms +[2025-07-07 21:34:49] [Rank 0] step:4561/10000 train_time:310621ms step_avg:68.10ms +[2025-07-07 21:34:49] [Rank 0] step:4561/10000 train_time:310621ms step_avg:68.10ms +[2025-07-07 21:34:50] [Rank 0] step:4581/10000 train_time:311994ms step_avg:68.11ms +[2025-07-07 21:34:50] [Rank 0] step:4581/10000 train_time:311994ms step_avg:68.11ms +[2025-07-07 21:34:51] [Rank 0] step:4601/10000 train_time:313359ms step_avg:68.11ms +[2025-07-07 21:34:51] [Rank 0] step:4601/10000 train_time:313359ms step_avg:68.11ms +[2025-07-07 21:34:53] [Rank 0] step:4621/10000 train_time:314725ms step_avg:68.11ms +[2025-07-07 21:34:53] [Rank 0] step:4621/10000 train_time:314725ms step_avg:68.11ms +[2025-07-07 21:34:54] [Rank 0] step:4641/10000 train_time:316093ms step_avg:68.11ms +[2025-07-07 21:34:54] [Rank 0] step:4641/10000 train_time:316093ms step_avg:68.11ms +[2025-07-07 21:34:55] [Rank 0] step:4661/10000 train_time:317460ms step_avg:68.11ms +[2025-07-07 21:34:55] [Rank 0] step:4661/10000 train_time:317460ms step_avg:68.11ms +[2025-07-07 21:34:57] [Rank 0] step:4681/10000 train_time:318833ms step_avg:68.11ms +[2025-07-07 21:34:57] [Rank 0] step:4681/10000 train_time:318833ms step_avg:68.11ms +[2025-07-07 21:34:58] [Rank 0] step:4701/10000 train_time:320249ms step_avg:68.12ms +[2025-07-07 21:34:58] [Rank 0] step:4701/10000 train_time:320249ms step_avg:68.12ms +[2025-07-07 21:35:00] [Rank 0] step:4721/10000 train_time:321620ms step_avg:68.13ms +[2025-07-07 21:35:00] [Rank 0] step:4721/10000 train_time:321620ms step_avg:68.13ms +[2025-07-07 21:35:01] [Rank 0] step:4741/10000 train_time:322991ms step_avg:68.13ms +[2025-07-07 21:35:01] [Rank 0] step:4741/10000 train_time:322991ms step_avg:68.13ms +[2025-07-07 21:35:02] [Rank 0] step:4761/10000 train_time:324363ms step_avg:68.13ms +[2025-07-07 21:35:02] [Rank 0] step:4761/10000 train_time:324363ms step_avg:68.13ms +[2025-07-07 21:35:04] [Rank 0] step:4781/10000 train_time:325735ms step_avg:68.13ms +[2025-07-07 21:35:04] [Rank 0] step:4781/10000 train_time:325735ms step_avg:68.13ms +[2025-07-07 21:35:05] [Rank 0] step:4801/10000 train_time:327108ms step_avg:68.13ms +[2025-07-07 21:35:05] [Rank 0] step:4801/10000 train_time:327108ms step_avg:68.13ms +[2025-07-07 21:35:06] [Rank 0] step:4821/10000 train_time:328479ms step_avg:68.14ms +[2025-07-07 21:35:06] [Rank 0] step:4821/10000 train_time:328479ms step_avg:68.14ms +[2025-07-07 21:35:08] [Rank 0] step:4841/10000 train_time:329852ms step_avg:68.14ms +[2025-07-07 21:35:08] [Rank 0] step:4841/10000 train_time:329852ms step_avg:68.14ms +[2025-07-07 21:35:09] [Rank 0] step:4861/10000 train_time:331225ms step_avg:68.14ms +[2025-07-07 21:35:09] [Rank 0] step:4861/10000 train_time:331225ms step_avg:68.14ms +[2025-07-07 21:35:11] [Rank 0] step:4881/10000 train_time:332633ms step_avg:68.15ms +[2025-07-07 21:35:11] [Rank 0] step:4881/10000 train_time:332633ms step_avg:68.15ms +[2025-07-07 21:35:12] [Rank 0] step:4901/10000 train_time:334006ms step_avg:68.15ms +[2025-07-07 21:35:12] [Rank 0] step:4901/10000 train_time:334006ms step_avg:68.15ms +[2025-07-07 21:35:13] [Rank 0] step:4921/10000 train_time:335380ms step_avg:68.15ms +[2025-07-07 21:35:13] [Rank 0] step:4921/10000 train_time:335380ms step_avg:68.15ms +[2025-07-07 21:35:15] [Rank 0] step:4941/10000 train_time:336753ms step_avg:68.15ms +[2025-07-07 21:35:15] [Rank 0] step:4941/10000 train_time:336753ms step_avg:68.15ms +[2025-07-07 21:35:16] [Rank 0] step:4961/10000 train_time:338126ms step_avg:68.16ms +[2025-07-07 21:35:16] [Rank 0] step:4961/10000 train_time:338126ms step_avg:68.16ms +[2025-07-07 21:35:17] [Rank 0] step:4981/10000 train_time:339499ms step_avg:68.16ms +[2025-07-07 21:35:17] [Rank 0] step:4981/10000 train_time:339499ms step_avg:68.16ms +[2025-07-07 21:35:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:35:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:35:20] [Rank 0] PRINT: step:5000/10000 train_loss:1.6847 val_loss:1.6575 train_time:341496ms step_avg:68.30ms +[2025-07-07 21:35:20] [Rank 0] PRINT: step:5000/10000 train_loss:1.6847 val_loss:1.6575 train_time:341496ms step_avg:68.30ms +[2025-07-07 21:35:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:35:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:35:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:35:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:35:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:35:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:40:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:40:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:40:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:40:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:40:39] [Rank 0] Total Loss: 4.5201 +[2025-07-07 21:40:39] [Rank 0] Total Loss: 4.5201 +[2025-07-07 21:40:39] [Rank 0] Total FTA: 0.0893 +[2025-07-07 21:40:39] [Rank 0] Total FTA: 0.0893 +[2025-07-07 21:40:39] [Rank 0] Group 0 Loss: 4.8821 +[2025-07-07 21:40:39] [Rank 0] Group 0 Loss: 4.8821 +[2025-07-07 21:40:39] [Rank 0] Group 1 Loss: 4.3614 +[2025-07-07 21:40:39] [Rank 0] Group 1 Loss: 4.3614 +[2025-07-07 21:40:39] [Rank 0] Group 2 Loss: 4.4259 +[2025-07-07 21:40:39] [Rank 0] Group 2 Loss: 4.4259 +[2025-07-07 21:40:39] [Rank 0] Group 3 Loss: 4.5529 +[2025-07-07 21:40:39] [Rank 0] Group 3 Loss: 4.5529 +[2025-07-07 21:40:39] [Rank 0] Group 4 Loss: 4.4129 +[2025-07-07 21:40:39] [Rank 0] Group 4 Loss: 4.4129 +[2025-07-07 21:40:39] [Rank 0] Group 5 Loss: 4.3595 +[2025-07-07 21:40:39] [Rank 0] Group 5 Loss: 4.3595 +[2025-07-07 21:40:39] [Rank 0] Group 6 Loss: 4.4066 +[2025-07-07 21:40:39] [Rank 0] Group 6 Loss: 4.4066 +[2025-07-07 21:40:39] [Rank 0] Group 7 Loss: 4.5332 +[2025-07-07 21:40:39] [Rank 0] Group 7 Loss: 4.5332 +[2025-07-07 21:40:39] [Rank 0] Group 8 Loss: 4.4487 +[2025-07-07 21:40:39] [Rank 0] Group 8 Loss: 4.4487 +[2025-07-07 21:40:39] [Rank 0] Group 9 Loss: 4.4802 +[2025-07-07 21:40:39] [Rank 0] Group 9 Loss: 4.4802 +[2025-07-07 21:40:39] [Rank 0] Group 10 Loss: 4.5126 +[2025-07-07 21:40:39] [Rank 0] Group 10 Loss: 4.5126 +[2025-07-07 21:40:39] [Rank 0] Group 11 Loss: 4.5094 +[2025-07-07 21:40:39] [Rank 0] Group 11 Loss: 4.5094 +[2025-07-07 21:40:39] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 21:40:39] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 21:40:39] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 21:40:39] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 21:40:39] [Rank 0] Group 2 FTA: 0.0521 +[2025-07-07 21:40:39] [Rank 0] Group 2 FTA: 0.0521 +[2025-07-07 21:40:39] [Rank 0] Group 3 FTA: 0.0677 +[2025-07-07 21:40:39] [Rank 0] Group 3 FTA: 0.0677 +[2025-07-07 21:40:39] [Rank 0] Group 4 FTA: 0.0339 +[2025-07-07 21:40:39] [Rank 0] Group 4 FTA: 0.0339 +[2025-07-07 21:40:39] [Rank 0] Group 5 FTA: 0.0833 +[2025-07-07 21:40:39] [Rank 0] Group 5 FTA: 0.0833 +[2025-07-07 21:40:39] [Rank 0] Group 6 FTA: 0.0807 +[2025-07-07 21:40:39] [Rank 0] Group 6 FTA: 0.0807 +[2025-07-07 21:40:39] [Rank 0] Group 7 FTA: 0.1172 +[2025-07-07 21:40:39] [Rank 0] Group 7 FTA: 0.1172 +[2025-07-07 21:40:39] [Rank 0] Group 8 FTA: 0.1172 +[2025-07-07 21:40:39] [Rank 0] Group 8 FTA: 0.1172 +[2025-07-07 21:40:39] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-07 21:40:39] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-07 21:40:39] [Rank 0] Group 10 FTA: 0.1016 +[2025-07-07 21:40:39] [Rank 0] Group 10 FTA: 0.1016 +[2025-07-07 21:40:39] [Rank 0] Group 11 FTA: 0.0850 +[2025-07-07 21:40:39] [Rank 0] Group 11 FTA: 0.0850 +[2025-07-07 21:40:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 21:40:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 21:40:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 21:40:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 21:40:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 21:40:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 21:40:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 21:40:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 21:40:40] [Rank 0] step:5001/10000 train_time:341505ms step_avg:68.29ms +[2025-07-07 21:40:40] [Rank 0] step:5001/10000 train_time:341505ms step_avg:68.29ms +[2025-07-07 21:40:42] [Rank 0] step:5021/10000 train_time:342263ms step_avg:68.17ms +[2025-07-07 21:40:42] [Rank 0] step:5021/10000 train_time:342263ms step_avg:68.17ms +[2025-07-07 21:40:43] [Rank 0] step:5041/10000 train_time:343682ms step_avg:68.18ms +[2025-07-07 21:40:43] [Rank 0] step:5041/10000 train_time:343682ms step_avg:68.18ms +[2025-07-07 21:40:44] [Rank 0] step:5061/10000 train_time:345057ms step_avg:68.18ms +[2025-07-07 21:40:44] [Rank 0] step:5061/10000 train_time:345057ms step_avg:68.18ms +[2025-07-07 21:40:46] [Rank 0] step:5081/10000 train_time:346422ms step_avg:68.18ms +[2025-07-07 21:40:46] [Rank 0] step:5081/10000 train_time:346422ms step_avg:68.18ms +[2025-07-07 21:40:47] [Rank 0] step:5101/10000 train_time:347788ms step_avg:68.18ms +[2025-07-07 21:40:47] [Rank 0] step:5101/10000 train_time:347788ms step_avg:68.18ms +[2025-07-07 21:40:48] [Rank 0] step:5121/10000 train_time:349155ms step_avg:68.18ms +[2025-07-07 21:40:48] [Rank 0] step:5121/10000 train_time:349155ms step_avg:68.18ms +[2025-07-07 21:40:50] [Rank 0] step:5141/10000 train_time:350522ms step_avg:68.18ms +[2025-07-07 21:40:50] [Rank 0] step:5141/10000 train_time:350522ms step_avg:68.18ms +[2025-07-07 21:40:51] [Rank 0] step:5161/10000 train_time:351890ms step_avg:68.18ms +[2025-07-07 21:40:51] [Rank 0] step:5161/10000 train_time:351890ms step_avg:68.18ms +[2025-07-07 21:40:53] [Rank 0] step:5181/10000 train_time:353258ms step_avg:68.18ms +[2025-07-07 21:40:53] [Rank 0] step:5181/10000 train_time:353258ms step_avg:68.18ms +[2025-07-07 21:40:54] [Rank 0] step:5201/10000 train_time:354627ms step_avg:68.18ms +[2025-07-07 21:40:54] [Rank 0] step:5201/10000 train_time:354627ms step_avg:68.18ms +[2025-07-07 21:40:55] [Rank 0] step:5221/10000 train_time:355998ms step_avg:68.19ms +[2025-07-07 21:40:55] [Rank 0] step:5221/10000 train_time:355998ms step_avg:68.19ms +[2025-07-07 21:40:57] [Rank 0] step:5241/10000 train_time:357403ms step_avg:68.19ms +[2025-07-07 21:40:57] [Rank 0] step:5241/10000 train_time:357403ms step_avg:68.19ms +[2025-07-07 21:40:58] [Rank 0] step:5261/10000 train_time:358773ms step_avg:68.19ms +[2025-07-07 21:40:58] [Rank 0] step:5261/10000 train_time:358773ms step_avg:68.19ms +[2025-07-07 21:40:59] [Rank 0] step:5281/10000 train_time:360143ms step_avg:68.20ms +[2025-07-07 21:40:59] [Rank 0] step:5281/10000 train_time:360143ms step_avg:68.20ms +[2025-07-07 21:41:01] [Rank 0] step:5301/10000 train_time:361515ms step_avg:68.20ms +[2025-07-07 21:41:01] [Rank 0] step:5301/10000 train_time:361515ms step_avg:68.20ms +[2025-07-07 21:41:02] [Rank 0] step:5321/10000 train_time:362889ms step_avg:68.20ms +[2025-07-07 21:41:02] [Rank 0] step:5321/10000 train_time:362889ms step_avg:68.20ms +[2025-07-07 21:41:04] [Rank 0] step:5341/10000 train_time:364260ms step_avg:68.20ms +[2025-07-07 21:41:04] [Rank 0] step:5341/10000 train_time:364260ms step_avg:68.20ms +[2025-07-07 21:41:05] [Rank 0] step:5361/10000 train_time:365633ms step_avg:68.20ms +[2025-07-07 21:41:05] [Rank 0] step:5361/10000 train_time:365633ms step_avg:68.20ms +[2025-07-07 21:41:06] [Rank 0] step:5381/10000 train_time:367007ms step_avg:68.20ms +[2025-07-07 21:41:06] [Rank 0] step:5381/10000 train_time:367007ms step_avg:68.20ms +[2025-07-07 21:41:08] [Rank 0] step:5401/10000 train_time:368380ms step_avg:68.21ms +[2025-07-07 21:41:08] [Rank 0] step:5401/10000 train_time:368380ms step_avg:68.21ms +[2025-07-07 21:41:09] [Rank 0] step:5421/10000 train_time:369795ms step_avg:68.22ms +[2025-07-07 21:41:09] [Rank 0] step:5421/10000 train_time:369795ms step_avg:68.22ms +[2025-07-07 21:41:10] [Rank 0] step:5441/10000 train_time:371168ms step_avg:68.22ms +[2025-07-07 21:41:10] [Rank 0] step:5441/10000 train_time:371168ms step_avg:68.22ms +[2025-07-07 21:41:12] [Rank 0] step:5461/10000 train_time:372565ms step_avg:68.22ms +[2025-07-07 21:41:12] [Rank 0] step:5461/10000 train_time:372565ms step_avg:68.22ms +[2025-07-07 21:41:13] [Rank 0] step:5481/10000 train_time:373939ms step_avg:68.22ms +[2025-07-07 21:41:13] [Rank 0] step:5481/10000 train_time:373939ms step_avg:68.22ms +[2025-07-07 21:41:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:41:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:41:15] [Rank 0] PRINT: step:5500/10000 train_loss:1.6273 val_loss:1.5986 train_time:375937ms step_avg:68.35ms +[2025-07-07 21:41:15] [Rank 0] PRINT: step:5500/10000 train_loss:1.6273 val_loss:1.5986 train_time:375937ms step_avg:68.35ms +[2025-07-07 21:41:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:41:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:41:16] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:41:16] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:41:16] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:41:16] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:46:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:46:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:46:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:46:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:46:31] [Rank 0] Total Loss: 4.5464 +[2025-07-07 21:46:31] [Rank 0] Total Loss: 4.5464 +[2025-07-07 21:46:31] [Rank 0] Total FTA: 0.0866 +[2025-07-07 21:46:31] [Rank 0] Total FTA: 0.0866 +[2025-07-07 21:46:31] [Rank 0] Group 0 Loss: 4.9439 +[2025-07-07 21:46:31] [Rank 0] Group 0 Loss: 4.9439 +[2025-07-07 21:46:31] [Rank 0] Group 1 Loss: 4.4512 +[2025-07-07 21:46:31] [Rank 0] Group 1 Loss: 4.4512 +[2025-07-07 21:46:31] [Rank 0] Group 2 Loss: 4.4332 +[2025-07-07 21:46:31] [Rank 0] Group 2 Loss: 4.4332 +[2025-07-07 21:46:31] [Rank 0] Group 3 Loss: 4.6317 +[2025-07-07 21:46:31] [Rank 0] Group 3 Loss: 4.6317 +[2025-07-07 21:46:31] [Rank 0] Group 4 Loss: 4.3506 +[2025-07-07 21:46:31] [Rank 0] Group 4 Loss: 4.3506 +[2025-07-07 21:46:31] [Rank 0] Group 5 Loss: 4.3710 +[2025-07-07 21:46:31] [Rank 0] Group 5 Loss: 4.3710 +[2025-07-07 21:46:31] [Rank 0] Group 6 Loss: 4.3829 +[2025-07-07 21:46:31] [Rank 0] Group 6 Loss: 4.3829 +[2025-07-07 21:46:31] [Rank 0] Group 7 Loss: 4.5723 +[2025-07-07 21:46:31] [Rank 0] Group 7 Loss: 4.5723 +[2025-07-07 21:46:31] [Rank 0] Group 8 Loss: 4.5369 +[2025-07-07 21:46:31] [Rank 0] Group 8 Loss: 4.5369 +[2025-07-07 21:46:31] [Rank 0] Group 9 Loss: 4.4087 +[2025-07-07 21:46:31] [Rank 0] Group 9 Loss: 4.4087 +[2025-07-07 21:46:31] [Rank 0] Group 10 Loss: 4.5229 +[2025-07-07 21:46:31] [Rank 0] Group 10 Loss: 4.5229 +[2025-07-07 21:46:31] [Rank 0] Group 11 Loss: 4.5345 +[2025-07-07 21:46:31] [Rank 0] Group 11 Loss: 4.5345 +[2025-07-07 21:46:31] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 21:46:31] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 21:46:31] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 21:46:31] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 21:46:31] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-07 21:46:31] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-07 21:46:31] [Rank 0] Group 3 FTA: 0.0729 +[2025-07-07 21:46:31] [Rank 0] Group 3 FTA: 0.0729 +[2025-07-07 21:46:31] [Rank 0] Group 4 FTA: 0.0495 +[2025-07-07 21:46:31] [Rank 0] Group 4 FTA: 0.0495 +[2025-07-07 21:46:31] [Rank 0] Group 5 FTA: 0.0573 +[2025-07-07 21:46:31] [Rank 0] Group 5 FTA: 0.0573 +[2025-07-07 21:46:31] [Rank 0] Group 6 FTA: 0.0677 +[2025-07-07 21:46:31] [Rank 0] Group 6 FTA: 0.0677 +[2025-07-07 21:46:31] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-07 21:46:31] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-07 21:46:31] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 21:46:31] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 21:46:31] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-07 21:46:31] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-07 21:46:31] [Rank 0] Group 10 FTA: 0.1055 +[2025-07-07 21:46:31] [Rank 0] Group 10 FTA: 0.1055 +[2025-07-07 21:46:31] [Rank 0] Group 11 FTA: 0.0889 +[2025-07-07 21:46:31] [Rank 0] Group 11 FTA: 0.0889 +[2025-07-07 21:46:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 21:46:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 21:46:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 21:46:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 21:46:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 21:46:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 21:46:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 21:46:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 21:46:33] [Rank 0] step:5501/10000 train_time:375947ms step_avg:68.34ms +[2025-07-07 21:46:33] [Rank 0] step:5501/10000 train_time:375947ms step_avg:68.34ms +[2025-07-07 21:46:34] [Rank 0] step:5521/10000 train_time:376724ms step_avg:68.23ms +[2025-07-07 21:46:34] [Rank 0] step:5521/10000 train_time:376724ms step_avg:68.23ms +[2025-07-07 21:46:35] [Rank 0] step:5541/10000 train_time:378089ms step_avg:68.23ms +[2025-07-07 21:46:35] [Rank 0] step:5541/10000 train_time:378089ms step_avg:68.23ms +[2025-07-07 21:46:37] [Rank 0] step:5561/10000 train_time:379453ms step_avg:68.23ms +[2025-07-07 21:46:37] [Rank 0] step:5561/10000 train_time:379453ms step_avg:68.23ms +[2025-07-07 21:46:38] [Rank 0] step:5581/10000 train_time:380870ms step_avg:68.24ms +[2025-07-07 21:46:38] [Rank 0] step:5581/10000 train_time:380870ms step_avg:68.24ms +[2025-07-07 21:46:40] [Rank 0] step:5601/10000 train_time:382216ms step_avg:68.24ms +[2025-07-07 21:46:40] [Rank 0] step:5601/10000 train_time:382216ms step_avg:68.24ms +[2025-07-07 21:46:41] [Rank 0] step:5621/10000 train_time:383584ms step_avg:68.24ms +[2025-07-07 21:46:41] [Rank 0] step:5621/10000 train_time:383584ms step_avg:68.24ms +[2025-07-07 21:46:42] [Rank 0] step:5641/10000 train_time:384953ms step_avg:68.24ms +[2025-07-07 21:46:42] [Rank 0] step:5641/10000 train_time:384953ms step_avg:68.24ms +[2025-07-07 21:46:44] [Rank 0] step:5661/10000 train_time:386322ms step_avg:68.24ms +[2025-07-07 21:46:44] [Rank 0] step:5661/10000 train_time:386322ms step_avg:68.24ms +[2025-07-07 21:46:45] [Rank 0] step:5681/10000 train_time:387693ms step_avg:68.24ms +[2025-07-07 21:46:45] [Rank 0] step:5681/10000 train_time:387693ms step_avg:68.24ms +[2025-07-07 21:46:46] [Rank 0] step:5701/10000 train_time:389062ms step_avg:68.24ms +[2025-07-07 21:46:46] [Rank 0] step:5701/10000 train_time:389062ms step_avg:68.24ms +[2025-07-07 21:46:48] [Rank 0] step:5721/10000 train_time:390432ms step_avg:68.25ms +[2025-07-07 21:46:48] [Rank 0] step:5721/10000 train_time:390432ms step_avg:68.25ms +[2025-07-07 21:46:49] [Rank 0] step:5741/10000 train_time:391803ms step_avg:68.25ms +[2025-07-07 21:46:49] [Rank 0] step:5741/10000 train_time:391803ms step_avg:68.25ms +[2025-07-07 21:46:51] [Rank 0] step:5761/10000 train_time:393830ms step_avg:68.36ms +[2025-07-07 21:46:51] [Rank 0] step:5761/10000 train_time:393830ms step_avg:68.36ms +[2025-07-07 21:46:52] [Rank 0] step:5781/10000 train_time:394570ms step_avg:68.25ms +[2025-07-07 21:46:52] [Rank 0] step:5781/10000 train_time:394570ms step_avg:68.25ms +[2025-07-07 21:46:53] [Rank 0] step:5801/10000 train_time:395941ms step_avg:68.25ms +[2025-07-07 21:46:53] [Rank 0] step:5801/10000 train_time:395941ms step_avg:68.25ms +[2025-07-07 21:46:55] [Rank 0] step:5821/10000 train_time:397313ms step_avg:68.26ms +[2025-07-07 21:46:55] [Rank 0] step:5821/10000 train_time:397313ms step_avg:68.26ms +[2025-07-07 21:46:56] [Rank 0] step:5841/10000 train_time:398683ms step_avg:68.26ms +[2025-07-07 21:46:56] [Rank 0] step:5841/10000 train_time:398683ms step_avg:68.26ms +[2025-07-07 21:46:57] [Rank 0] step:5861/10000 train_time:400054ms step_avg:68.26ms +[2025-07-07 21:46:57] [Rank 0] step:5861/10000 train_time:400054ms step_avg:68.26ms +[2025-07-07 21:46:59] [Rank 0] step:5881/10000 train_time:401426ms step_avg:68.26ms +[2025-07-07 21:46:59] [Rank 0] step:5881/10000 train_time:401426ms step_avg:68.26ms +[2025-07-07 21:47:00] [Rank 0] step:5901/10000 train_time:402797ms step_avg:68.26ms +[2025-07-07 21:47:00] [Rank 0] step:5901/10000 train_time:402797ms step_avg:68.26ms +[2025-07-07 21:47:02] [Rank 0] step:5921/10000 train_time:404168ms step_avg:68.26ms +[2025-07-07 21:47:02] [Rank 0] step:5921/10000 train_time:404168ms step_avg:68.26ms +[2025-07-07 21:47:03] [Rank 0] step:5941/10000 train_time:405542ms step_avg:68.26ms +[2025-07-07 21:47:03] [Rank 0] step:5941/10000 train_time:405542ms step_avg:68.26ms +[2025-07-07 21:47:04] [Rank 0] step:5961/10000 train_time:406954ms step_avg:68.27ms +[2025-07-07 21:47:04] [Rank 0] step:5961/10000 train_time:406954ms step_avg:68.27ms +[2025-07-07 21:47:06] [Rank 0] step:5981/10000 train_time:408324ms step_avg:68.27ms +[2025-07-07 21:47:06] [Rank 0] step:5981/10000 train_time:408324ms step_avg:68.27ms +[2025-07-07 21:47:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:47:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:47:08] [Rank 0] PRINT: step:6000/10000 train_loss:1.5683 val_loss:1.5525 train_time:410318ms step_avg:68.39ms +[2025-07-07 21:47:08] [Rank 0] PRINT: step:6000/10000 train_loss:1.5683 val_loss:1.5525 train_time:410318ms step_avg:68.39ms +[2025-07-07 21:47:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:47:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:47:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:47:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:47:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:47:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:52:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:52:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:52:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:52:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:52:28] [Rank 0] Total Loss: 4.5542 +[2025-07-07 21:52:28] [Rank 0] Total Loss: 4.5542 +[2025-07-07 21:52:28] [Rank 0] Total FTA: 0.0969 +[2025-07-07 21:52:28] [Rank 0] Total FTA: 0.0969 +[2025-07-07 21:52:28] [Rank 0] Group 0 Loss: 4.9155 +[2025-07-07 21:52:28] [Rank 0] Group 0 Loss: 4.9155 +[2025-07-07 21:52:28] [Rank 0] Group 1 Loss: 4.4336 +[2025-07-07 21:52:28] [Rank 0] Group 1 Loss: 4.4336 +[2025-07-07 21:52:28] [Rank 0] Group 2 Loss: 4.3892 +[2025-07-07 21:52:28] [Rank 0] Group 2 Loss: 4.3892 +[2025-07-07 21:52:28] [Rank 0] Group 3 Loss: 4.4688 +[2025-07-07 21:52:28] [Rank 0] Group 3 Loss: 4.4688 +[2025-07-07 21:52:28] [Rank 0] Group 4 Loss: 4.3956 +[2025-07-07 21:52:28] [Rank 0] Group 4 Loss: 4.3956 +[2025-07-07 21:52:28] [Rank 0] Group 5 Loss: 4.3649 +[2025-07-07 21:52:28] [Rank 0] Group 5 Loss: 4.3649 +[2025-07-07 21:52:28] [Rank 0] Group 6 Loss: 4.4531 +[2025-07-07 21:52:28] [Rank 0] Group 6 Loss: 4.4531 +[2025-07-07 21:52:28] [Rank 0] Group 7 Loss: 4.5956 +[2025-07-07 21:52:28] [Rank 0] Group 7 Loss: 4.5956 +[2025-07-07 21:52:28] [Rank 0] Group 8 Loss: 4.5014 +[2025-07-07 21:52:28] [Rank 0] Group 8 Loss: 4.5014 +[2025-07-07 21:52:28] [Rank 0] Group 9 Loss: 4.4842 +[2025-07-07 21:52:28] [Rank 0] Group 9 Loss: 4.4842 +[2025-07-07 21:52:28] [Rank 0] Group 10 Loss: 4.6437 +[2025-07-07 21:52:28] [Rank 0] Group 10 Loss: 4.6437 +[2025-07-07 21:52:28] [Rank 0] Group 11 Loss: 4.5674 +[2025-07-07 21:52:28] [Rank 0] Group 11 Loss: 4.5674 +[2025-07-07 21:52:28] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 21:52:28] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 21:52:28] [Rank 0] Group 1 FTA: 0.1536 +[2025-07-07 21:52:28] [Rank 0] Group 1 FTA: 0.1536 +[2025-07-07 21:52:28] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 21:52:28] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 21:52:28] [Rank 0] Group 3 FTA: 0.0286 +[2025-07-07 21:52:28] [Rank 0] Group 3 FTA: 0.0286 +[2025-07-07 21:52:28] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-07 21:52:28] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-07 21:52:28] [Rank 0] Group 5 FTA: 0.0729 +[2025-07-07 21:52:28] [Rank 0] Group 5 FTA: 0.0729 +[2025-07-07 21:52:28] [Rank 0] Group 6 FTA: 0.1120 +[2025-07-07 21:52:28] [Rank 0] Group 6 FTA: 0.1120 +[2025-07-07 21:52:28] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-07 21:52:28] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-07 21:52:28] [Rank 0] Group 8 FTA: 0.1068 +[2025-07-07 21:52:28] [Rank 0] Group 8 FTA: 0.1068 +[2025-07-07 21:52:28] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-07 21:52:28] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-07 21:52:28] [Rank 0] Group 10 FTA: 0.1250 +[2025-07-07 21:52:28] [Rank 0] Group 10 FTA: 0.1250 +[2025-07-07 21:52:28] [Rank 0] Group 11 FTA: 0.0967 +[2025-07-07 21:52:28] [Rank 0] Group 11 FTA: 0.0967 +[2025-07-07 21:52:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 21:52:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 21:52:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 21:52:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 21:52:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 21:52:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 21:52:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 21:52:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 21:52:29] [Rank 0] step:6001/10000 train_time:410329ms step_avg:68.38ms +[2025-07-07 21:52:29] [Rank 0] step:6001/10000 train_time:410329ms step_avg:68.38ms +[2025-07-07 21:52:31] [Rank 0] step:6021/10000 train_time:411091ms step_avg:68.28ms +[2025-07-07 21:52:31] [Rank 0] step:6021/10000 train_time:411091ms step_avg:68.28ms +[2025-07-07 21:52:32] [Rank 0] step:6041/10000 train_time:412457ms step_avg:68.28ms +[2025-07-07 21:52:32] [Rank 0] step:6041/10000 train_time:412457ms step_avg:68.28ms +[2025-07-07 21:52:33] [Rank 0] step:6061/10000 train_time:413820ms step_avg:68.28ms +[2025-07-07 21:52:33] [Rank 0] step:6061/10000 train_time:413820ms step_avg:68.28ms +[2025-07-07 21:52:35] [Rank 0] step:6081/10000 train_time:415185ms step_avg:68.28ms +[2025-07-07 21:52:35] [Rank 0] step:6081/10000 train_time:415185ms step_avg:68.28ms +[2025-07-07 21:52:36] [Rank 0] step:6101/10000 train_time:416550ms step_avg:68.28ms +[2025-07-07 21:52:36] [Rank 0] step:6101/10000 train_time:416550ms step_avg:68.28ms +[2025-07-07 21:52:37] [Rank 0] step:6121/10000 train_time:418167ms step_avg:68.32ms +[2025-07-07 21:52:37] [Rank 0] step:6121/10000 train_time:418167ms step_avg:68.32ms +[2025-07-07 21:52:39] [Rank 0] step:6141/10000 train_time:419326ms step_avg:68.28ms +[2025-07-07 21:52:39] [Rank 0] step:6141/10000 train_time:419326ms step_avg:68.28ms +[2025-07-07 21:52:40] [Rank 0] step:6161/10000 train_time:420692ms step_avg:68.28ms +[2025-07-07 21:52:40] [Rank 0] step:6161/10000 train_time:420692ms step_avg:68.28ms +[2025-07-07 21:52:41] [Rank 0] step:6181/10000 train_time:422060ms step_avg:68.28ms +[2025-07-07 21:52:41] [Rank 0] step:6181/10000 train_time:422060ms step_avg:68.28ms +[2025-07-07 21:52:43] [Rank 0] step:6201/10000 train_time:423427ms step_avg:68.28ms +[2025-07-07 21:52:43] [Rank 0] step:6201/10000 train_time:423427ms step_avg:68.28ms +[2025-07-07 21:52:44] [Rank 0] step:6221/10000 train_time:424793ms step_avg:68.28ms +[2025-07-07 21:52:44] [Rank 0] step:6221/10000 train_time:424793ms step_avg:68.28ms +[2025-07-07 21:52:46] [Rank 0] step:6241/10000 train_time:426163ms step_avg:68.28ms +[2025-07-07 21:52:46] [Rank 0] step:6241/10000 train_time:426163ms step_avg:68.28ms +[2025-07-07 21:52:47] [Rank 0] step:6261/10000 train_time:427533ms step_avg:68.29ms +[2025-07-07 21:52:47] [Rank 0] step:6261/10000 train_time:427533ms step_avg:68.29ms +[2025-07-07 21:52:48] [Rank 0] step:6281/10000 train_time:428903ms step_avg:68.29ms +[2025-07-07 21:52:48] [Rank 0] step:6281/10000 train_time:428903ms step_avg:68.29ms +[2025-07-07 21:52:50] [Rank 0] step:6301/10000 train_time:430274ms step_avg:68.29ms +[2025-07-07 21:52:50] [Rank 0] step:6301/10000 train_time:430274ms step_avg:68.29ms +[2025-07-07 21:52:51] [Rank 0] step:6321/10000 train_time:431678ms step_avg:68.29ms +[2025-07-07 21:52:51] [Rank 0] step:6321/10000 train_time:431678ms step_avg:68.29ms +[2025-07-07 21:52:52] [Rank 0] step:6341/10000 train_time:433048ms step_avg:68.29ms +[2025-07-07 21:52:52] [Rank 0] step:6341/10000 train_time:433048ms step_avg:68.29ms +[2025-07-07 21:52:54] [Rank 0] step:6361/10000 train_time:434419ms step_avg:68.29ms +[2025-07-07 21:52:54] [Rank 0] step:6361/10000 train_time:434419ms step_avg:68.29ms +[2025-07-07 21:52:55] [Rank 0] step:6381/10000 train_time:435789ms step_avg:68.29ms +[2025-07-07 21:52:55] [Rank 0] step:6381/10000 train_time:435789ms step_avg:68.29ms +[2025-07-07 21:52:57] [Rank 0] step:6401/10000 train_time:437160ms step_avg:68.30ms +[2025-07-07 21:52:57] [Rank 0] step:6401/10000 train_time:437160ms step_avg:68.30ms +[2025-07-07 21:52:58] [Rank 0] step:6421/10000 train_time:438532ms step_avg:68.30ms +[2025-07-07 21:52:58] [Rank 0] step:6421/10000 train_time:438532ms step_avg:68.30ms +[2025-07-07 21:52:59] [Rank 0] step:6441/10000 train_time:439905ms step_avg:68.30ms +[2025-07-07 21:52:59] [Rank 0] step:6441/10000 train_time:439905ms step_avg:68.30ms +[2025-07-07 21:53:01] [Rank 0] step:6461/10000 train_time:441277ms step_avg:68.30ms +[2025-07-07 21:53:01] [Rank 0] step:6461/10000 train_time:441277ms step_avg:68.30ms +[2025-07-07 21:53:02] [Rank 0] step:6481/10000 train_time:443323ms step_avg:68.40ms +[2025-07-07 21:53:02] [Rank 0] step:6481/10000 train_time:443323ms step_avg:68.40ms +[2025-07-07 21:53:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:53:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:53:04] [Rank 0] PRINT: step:6500/10000 train_loss:1.5278 val_loss:1.5196 train_time:444689ms step_avg:68.41ms +[2025-07-07 21:53:04] [Rank 0] PRINT: step:6500/10000 train_loss:1.5278 val_loss:1.5196 train_time:444689ms step_avg:68.41ms +[2025-07-07 21:53:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:53:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:53:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:53:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:53:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:53:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:58:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:58:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 21:58:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:58:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 21:58:25] [Rank 0] Total Loss: 4.6083 +[2025-07-07 21:58:25] [Rank 0] Total Loss: 4.6083 +[2025-07-07 21:58:25] [Rank 0] Total FTA: 0.1141 +[2025-07-07 21:58:25] [Rank 0] Total FTA: 0.1141 +[2025-07-07 21:58:25] [Rank 0] Group 0 Loss: 5.0394 +[2025-07-07 21:58:25] [Rank 0] Group 0 Loss: 5.0394 +[2025-07-07 21:58:25] [Rank 0] Group 1 Loss: 4.5269 +[2025-07-07 21:58:25] [Rank 0] Group 1 Loss: 4.5269 +[2025-07-07 21:58:25] [Rank 0] Group 2 Loss: 4.4587 +[2025-07-07 21:58:25] [Rank 0] Group 2 Loss: 4.4587 +[2025-07-07 21:58:25] [Rank 0] Group 3 Loss: 4.5630 +[2025-07-07 21:58:25] [Rank 0] Group 3 Loss: 4.5630 +[2025-07-07 21:58:25] [Rank 0] Group 4 Loss: 4.4225 +[2025-07-07 21:58:25] [Rank 0] Group 4 Loss: 4.4225 +[2025-07-07 21:58:25] [Rank 0] Group 5 Loss: 4.4561 +[2025-07-07 21:58:25] [Rank 0] Group 5 Loss: 4.4561 +[2025-07-07 21:58:25] [Rank 0] Group 6 Loss: 4.4952 +[2025-07-07 21:58:25] [Rank 0] Group 6 Loss: 4.4952 +[2025-07-07 21:58:25] [Rank 0] Group 7 Loss: 4.6286 +[2025-07-07 21:58:25] [Rank 0] Group 7 Loss: 4.6286 +[2025-07-07 21:58:25] [Rank 0] Group 8 Loss: 4.5581 +[2025-07-07 21:58:25] [Rank 0] Group 8 Loss: 4.5581 +[2025-07-07 21:58:25] [Rank 0] Group 9 Loss: 4.5513 +[2025-07-07 21:58:25] [Rank 0] Group 9 Loss: 4.5513 +[2025-07-07 21:58:25] [Rank 0] Group 10 Loss: 4.6446 +[2025-07-07 21:58:25] [Rank 0] Group 10 Loss: 4.6446 +[2025-07-07 21:58:25] [Rank 0] Group 11 Loss: 4.5649 +[2025-07-07 21:58:25] [Rank 0] Group 11 Loss: 4.5649 +[2025-07-07 21:58:25] [Rank 0] Group 0 FTA: 0.1834 +[2025-07-07 21:58:25] [Rank 0] Group 0 FTA: 0.1834 +[2025-07-07 21:58:25] [Rank 0] Group 1 FTA: 0.1771 +[2025-07-07 21:58:25] [Rank 0] Group 1 FTA: 0.1771 +[2025-07-07 21:58:25] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 21:58:25] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 21:58:25] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-07 21:58:25] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-07 21:58:25] [Rank 0] Group 4 FTA: 0.0859 +[2025-07-07 21:58:25] [Rank 0] Group 4 FTA: 0.0859 +[2025-07-07 21:58:25] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-07 21:58:25] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-07 21:58:25] [Rank 0] Group 6 FTA: 0.0911 +[2025-07-07 21:58:25] [Rank 0] Group 6 FTA: 0.0911 +[2025-07-07 21:58:25] [Rank 0] Group 7 FTA: 0.1250 +[2025-07-07 21:58:25] [Rank 0] Group 7 FTA: 0.1250 +[2025-07-07 21:58:25] [Rank 0] Group 8 FTA: 0.1589 +[2025-07-07 21:58:25] [Rank 0] Group 8 FTA: 0.1589 +[2025-07-07 21:58:25] [Rank 0] Group 9 FTA: 0.1055 +[2025-07-07 21:58:25] [Rank 0] Group 9 FTA: 0.1055 +[2025-07-07 21:58:25] [Rank 0] Group 10 FTA: 0.1348 +[2025-07-07 21:58:25] [Rank 0] Group 10 FTA: 0.1348 +[2025-07-07 21:58:25] [Rank 0] Group 11 FTA: 0.1152 +[2025-07-07 21:58:25] [Rank 0] Group 11 FTA: 0.1152 +[2025-07-07 21:58:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 21:58:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 21:58:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 21:58:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 21:58:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 21:58:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 21:58:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 21:58:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 21:58:26] [Rank 0] step:6501/10000 train_time:444699ms step_avg:68.40ms +[2025-07-07 21:58:26] [Rank 0] step:6501/10000 train_time:444699ms step_avg:68.40ms +[2025-07-07 21:58:28] [Rank 0] step:6521/10000 train_time:445463ms step_avg:68.31ms +[2025-07-07 21:58:28] [Rank 0] step:6521/10000 train_time:445463ms step_avg:68.31ms +[2025-07-07 21:58:29] [Rank 0] step:6541/10000 train_time:446828ms step_avg:68.31ms +[2025-07-07 21:58:29] [Rank 0] step:6541/10000 train_time:446828ms step_avg:68.31ms +[2025-07-07 21:58:30] [Rank 0] step:6561/10000 train_time:448193ms step_avg:68.31ms +[2025-07-07 21:58:30] [Rank 0] step:6561/10000 train_time:448193ms step_avg:68.31ms +[2025-07-07 21:58:32] [Rank 0] step:6581/10000 train_time:449558ms step_avg:68.31ms +[2025-07-07 21:58:32] [Rank 0] step:6581/10000 train_time:449558ms step_avg:68.31ms +[2025-07-07 21:58:33] [Rank 0] step:6601/10000 train_time:450923ms step_avg:68.31ms +[2025-07-07 21:58:33] [Rank 0] step:6601/10000 train_time:450923ms step_avg:68.31ms +[2025-07-07 21:58:34] [Rank 0] step:6621/10000 train_time:452291ms step_avg:68.31ms +[2025-07-07 21:58:34] [Rank 0] step:6621/10000 train_time:452291ms step_avg:68.31ms +[2025-07-07 21:58:36] [Rank 0] step:6641/10000 train_time:453658ms step_avg:68.31ms +[2025-07-07 21:58:36] [Rank 0] step:6641/10000 train_time:453658ms step_avg:68.31ms +[2025-07-07 21:58:37] [Rank 0] step:6661/10000 train_time:455683ms step_avg:68.41ms +[2025-07-07 21:58:37] [Rank 0] step:6661/10000 train_time:455683ms step_avg:68.41ms +[2025-07-07 21:58:39] [Rank 0] step:6681/10000 train_time:456422ms step_avg:68.32ms +[2025-07-07 21:58:39] [Rank 0] step:6681/10000 train_time:456422ms step_avg:68.32ms +[2025-07-07 21:58:40] [Rank 0] step:6701/10000 train_time:457791ms step_avg:68.32ms +[2025-07-07 21:58:40] [Rank 0] step:6701/10000 train_time:457791ms step_avg:68.32ms +[2025-07-07 21:58:41] [Rank 0] step:6721/10000 train_time:459161ms step_avg:68.32ms +[2025-07-07 21:58:41] [Rank 0] step:6721/10000 train_time:459161ms step_avg:68.32ms +[2025-07-07 21:58:43] [Rank 0] step:6741/10000 train_time:460530ms step_avg:68.32ms +[2025-07-07 21:58:43] [Rank 0] step:6741/10000 train_time:460530ms step_avg:68.32ms +[2025-07-07 21:58:44] [Rank 0] step:6761/10000 train_time:461902ms step_avg:68.32ms +[2025-07-07 21:58:44] [Rank 0] step:6761/10000 train_time:461902ms step_avg:68.32ms +[2025-07-07 21:58:45] [Rank 0] step:6781/10000 train_time:463273ms step_avg:68.32ms +[2025-07-07 21:58:45] [Rank 0] step:6781/10000 train_time:463273ms step_avg:68.32ms +[2025-07-07 21:58:47] [Rank 0] step:6801/10000 train_time:464643ms step_avg:68.32ms +[2025-07-07 21:58:47] [Rank 0] step:6801/10000 train_time:464643ms step_avg:68.32ms +[2025-07-07 21:58:48] [Rank 0] step:6821/10000 train_time:466011ms step_avg:68.32ms +[2025-07-07 21:58:48] [Rank 0] step:6821/10000 train_time:466011ms step_avg:68.32ms +[2025-07-07 21:58:50] [Rank 0] step:6841/10000 train_time:467382ms step_avg:68.32ms +[2025-07-07 21:58:50] [Rank 0] step:6841/10000 train_time:467382ms step_avg:68.32ms +[2025-07-07 21:58:51] [Rank 0] step:6861/10000 train_time:468805ms step_avg:68.33ms +[2025-07-07 21:58:51] [Rank 0] step:6861/10000 train_time:468805ms step_avg:68.33ms +[2025-07-07 21:58:52] [Rank 0] step:6881/10000 train_time:470175ms step_avg:68.33ms +[2025-07-07 21:58:52] [Rank 0] step:6881/10000 train_time:470175ms step_avg:68.33ms +[2025-07-07 21:58:54] [Rank 0] step:6901/10000 train_time:471547ms step_avg:68.33ms +[2025-07-07 21:58:54] [Rank 0] step:6901/10000 train_time:471547ms step_avg:68.33ms +[2025-07-07 21:58:55] [Rank 0] step:6921/10000 train_time:472919ms step_avg:68.33ms +[2025-07-07 21:58:55] [Rank 0] step:6921/10000 train_time:472919ms step_avg:68.33ms +[2025-07-07 21:58:56] [Rank 0] step:6941/10000 train_time:474290ms step_avg:68.33ms +[2025-07-07 21:58:56] [Rank 0] step:6941/10000 train_time:474290ms step_avg:68.33ms +[2025-07-07 21:58:58] [Rank 0] step:6961/10000 train_time:475663ms step_avg:68.33ms +[2025-07-07 21:58:58] [Rank 0] step:6961/10000 train_time:475663ms step_avg:68.33ms +[2025-07-07 21:58:59] [Rank 0] step:6981/10000 train_time:477034ms step_avg:68.33ms +[2025-07-07 21:58:59] [Rank 0] step:6981/10000 train_time:477034ms step_avg:68.33ms +[2025-07-07 21:59:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:59:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 21:59:02] [Rank 0] PRINT: step:7000/10000 train_loss:1.4966 val_loss:1.4932 train_time:479030ms step_avg:68.43ms +[2025-07-07 21:59:02] [Rank 0] PRINT: step:7000/10000 train_loss:1.4966 val_loss:1.4932 train_time:479030ms step_avg:68.43ms +[2025-07-07 21:59:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:59:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 21:59:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:59:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 21:59:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 21:59:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:04:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:04:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:04:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:04:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:04:22] [Rank 0] Total Loss: 4.6457 +[2025-07-07 22:04:22] [Rank 0] Total Loss: 4.6457 +[2025-07-07 22:04:22] [Rank 0] Total FTA: 0.1351 +[2025-07-07 22:04:22] [Rank 0] Total FTA: 0.1351 +[2025-07-07 22:04:22] [Rank 0] Group 0 Loss: 5.1918 +[2025-07-07 22:04:22] [Rank 0] Group 0 Loss: 5.1918 +[2025-07-07 22:04:22] [Rank 0] Group 1 Loss: 4.6470 +[2025-07-07 22:04:22] [Rank 0] Group 1 Loss: 4.6470 +[2025-07-07 22:04:22] [Rank 0] Group 2 Loss: 4.4258 +[2025-07-07 22:04:22] [Rank 0] Group 2 Loss: 4.4258 +[2025-07-07 22:04:22] [Rank 0] Group 3 Loss: 4.6451 +[2025-07-07 22:04:22] [Rank 0] Group 3 Loss: 4.6451 +[2025-07-07 22:04:23] [Rank 0] Group 4 Loss: 4.3782 +[2025-07-07 22:04:23] [Rank 0] Group 4 Loss: 4.3782 +[2025-07-07 22:04:23] [Rank 0] Group 5 Loss: 4.4638 +[2025-07-07 22:04:23] [Rank 0] Group 5 Loss: 4.4638 +[2025-07-07 22:04:23] [Rank 0] Group 6 Loss: 4.5152 +[2025-07-07 22:04:23] [Rank 0] Group 6 Loss: 4.5152 +[2025-07-07 22:04:23] [Rank 0] Group 7 Loss: 4.6165 +[2025-07-07 22:04:23] [Rank 0] Group 7 Loss: 4.6165 +[2025-07-07 22:04:23] [Rank 0] Group 8 Loss: 4.5545 +[2025-07-07 22:04:23] [Rank 0] Group 8 Loss: 4.5545 +[2025-07-07 22:04:23] [Rank 0] Group 9 Loss: 4.5603 +[2025-07-07 22:04:23] [Rank 0] Group 9 Loss: 4.5603 +[2025-07-07 22:04:23] [Rank 0] Group 10 Loss: 4.6275 +[2025-07-07 22:04:23] [Rank 0] Group 10 Loss: 4.6275 +[2025-07-07 22:04:23] [Rank 0] Group 11 Loss: 4.6106 +[2025-07-07 22:04:23] [Rank 0] Group 11 Loss: 4.6106 +[2025-07-07 22:04:23] [Rank 0] Group 0 FTA: 0.3498 +[2025-07-07 22:04:23] [Rank 0] Group 0 FTA: 0.3498 +[2025-07-07 22:04:23] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-07 22:04:23] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-07 22:04:23] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 22:04:23] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 22:04:23] [Rank 0] Group 3 FTA: 0.0807 +[2025-07-07 22:04:23] [Rank 0] Group 3 FTA: 0.0807 +[2025-07-07 22:04:23] [Rank 0] Group 4 FTA: 0.0807 +[2025-07-07 22:04:23] [Rank 0] Group 4 FTA: 0.0807 +[2025-07-07 22:04:23] [Rank 0] Group 5 FTA: 0.0729 +[2025-07-07 22:04:23] [Rank 0] Group 5 FTA: 0.0729 +[2025-07-07 22:04:23] [Rank 0] Group 6 FTA: 0.1094 +[2025-07-07 22:04:23] [Rank 0] Group 6 FTA: 0.1094 +[2025-07-07 22:04:23] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 22:04:23] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 22:04:23] [Rank 0] Group 8 FTA: 0.1432 +[2025-07-07 22:04:23] [Rank 0] Group 8 FTA: 0.1432 +[2025-07-07 22:04:23] [Rank 0] Group 9 FTA: 0.1289 +[2025-07-07 22:04:23] [Rank 0] Group 9 FTA: 0.1289 +[2025-07-07 22:04:23] [Rank 0] Group 10 FTA: 0.1016 +[2025-07-07 22:04:23] [Rank 0] Group 10 FTA: 0.1016 +[2025-07-07 22:04:23] [Rank 0] Group 11 FTA: 0.1230 +[2025-07-07 22:04:23] [Rank 0] Group 11 FTA: 0.1230 +[2025-07-07 22:04:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 22:04:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 22:04:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 22:04:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 22:04:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 22:04:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 22:04:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 22:04:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 22:04:24] [Rank 0] step:7001/10000 train_time:479040ms step_avg:68.42ms +[2025-07-07 22:04:24] [Rank 0] step:7001/10000 train_time:479040ms step_avg:68.42ms +[2025-07-07 22:04:26] [Rank 0] step:7021/10000 train_time:479813ms step_avg:68.34ms +[2025-07-07 22:04:26] [Rank 0] step:7021/10000 train_time:479813ms step_avg:68.34ms +[2025-07-07 22:04:27] [Rank 0] step:7041/10000 train_time:481236ms step_avg:68.35ms +[2025-07-07 22:04:27] [Rank 0] step:7041/10000 train_time:481236ms step_avg:68.35ms +[2025-07-07 22:04:28] [Rank 0] step:7061/10000 train_time:482601ms step_avg:68.35ms +[2025-07-07 22:04:28] [Rank 0] step:7061/10000 train_time:482601ms step_avg:68.35ms +[2025-07-07 22:04:30] [Rank 0] step:7081/10000 train_time:483967ms step_avg:68.35ms +[2025-07-07 22:04:30] [Rank 0] step:7081/10000 train_time:483967ms step_avg:68.35ms +[2025-07-07 22:04:31] [Rank 0] step:7101/10000 train_time:485334ms step_avg:68.35ms +[2025-07-07 22:04:31] [Rank 0] step:7101/10000 train_time:485334ms step_avg:68.35ms +[2025-07-07 22:04:32] [Rank 0] step:7121/10000 train_time:486701ms step_avg:68.35ms +[2025-07-07 22:04:32] [Rank 0] step:7121/10000 train_time:486701ms step_avg:68.35ms +[2025-07-07 22:04:34] [Rank 0] step:7141/10000 train_time:488070ms step_avg:68.35ms +[2025-07-07 22:04:34] [Rank 0] step:7141/10000 train_time:488070ms step_avg:68.35ms +[2025-07-07 22:04:35] [Rank 0] step:7161/10000 train_time:489439ms step_avg:68.35ms +[2025-07-07 22:04:35] [Rank 0] step:7161/10000 train_time:489439ms step_avg:68.35ms +[2025-07-07 22:04:37] [Rank 0] step:7181/10000 train_time:490807ms step_avg:68.35ms +[2025-07-07 22:04:37] [Rank 0] step:7181/10000 train_time:490807ms step_avg:68.35ms +[2025-07-07 22:04:38] [Rank 0] step:7201/10000 train_time:492852ms step_avg:68.44ms +[2025-07-07 22:04:38] [Rank 0] step:7201/10000 train_time:492852ms step_avg:68.44ms +[2025-07-07 22:04:39] [Rank 0] step:7221/10000 train_time:493590ms step_avg:68.35ms +[2025-07-07 22:04:39] [Rank 0] step:7221/10000 train_time:493590ms step_avg:68.35ms +[2025-07-07 22:04:41] [Rank 0] step:7241/10000 train_time:494960ms step_avg:68.36ms +[2025-07-07 22:04:41] [Rank 0] step:7241/10000 train_time:494960ms step_avg:68.36ms +[2025-07-07 22:04:42] [Rank 0] step:7261/10000 train_time:496332ms step_avg:68.36ms +[2025-07-07 22:04:42] [Rank 0] step:7261/10000 train_time:496332ms step_avg:68.36ms +[2025-07-07 22:04:43] [Rank 0] step:7281/10000 train_time:497703ms step_avg:68.36ms +[2025-07-07 22:04:43] [Rank 0] step:7281/10000 train_time:497703ms step_avg:68.36ms +[2025-07-07 22:04:45] [Rank 0] step:7301/10000 train_time:499073ms step_avg:68.36ms +[2025-07-07 22:04:45] [Rank 0] step:7301/10000 train_time:499073ms step_avg:68.36ms +[2025-07-07 22:04:46] [Rank 0] step:7321/10000 train_time:500443ms step_avg:68.36ms +[2025-07-07 22:04:46] [Rank 0] step:7321/10000 train_time:500443ms step_avg:68.36ms +[2025-07-07 22:04:48] [Rank 0] step:7341/10000 train_time:501815ms step_avg:68.36ms +[2025-07-07 22:04:48] [Rank 0] step:7341/10000 train_time:501815ms step_avg:68.36ms +[2025-07-07 22:04:49] [Rank 0] step:7361/10000 train_time:503189ms step_avg:68.36ms +[2025-07-07 22:04:49] [Rank 0] step:7361/10000 train_time:503189ms step_avg:68.36ms +[2025-07-07 22:04:50] [Rank 0] step:7381/10000 train_time:504561ms step_avg:68.36ms +[2025-07-07 22:04:50] [Rank 0] step:7381/10000 train_time:504561ms step_avg:68.36ms +[2025-07-07 22:04:52] [Rank 0] step:7401/10000 train_time:505977ms step_avg:68.37ms +[2025-07-07 22:04:52] [Rank 0] step:7401/10000 train_time:505977ms step_avg:68.37ms +[2025-07-07 22:04:53] [Rank 0] step:7421/10000 train_time:507349ms step_avg:68.37ms +[2025-07-07 22:04:53] [Rank 0] step:7421/10000 train_time:507349ms step_avg:68.37ms +[2025-07-07 22:04:55] [Rank 0] step:7441/10000 train_time:508722ms step_avg:68.37ms +[2025-07-07 22:04:55] [Rank 0] step:7441/10000 train_time:508722ms step_avg:68.37ms +[2025-07-07 22:04:56] [Rank 0] step:7461/10000 train_time:510094ms step_avg:68.37ms +[2025-07-07 22:04:56] [Rank 0] step:7461/10000 train_time:510094ms step_avg:68.37ms +[2025-07-07 22:04:57] [Rank 0] step:7481/10000 train_time:511467ms step_avg:68.37ms +[2025-07-07 22:04:57] [Rank 0] step:7481/10000 train_time:511467ms step_avg:68.37ms +[2025-07-07 22:04:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:04:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:05:00] [Rank 0] PRINT: step:7500/10000 train_loss:1.4695 val_loss:1.4796 train_time:513464ms step_avg:68.46ms +[2025-07-07 22:05:00] [Rank 0] PRINT: step:7500/10000 train_loss:1.4695 val_loss:1.4796 train_time:513464ms step_avg:68.46ms +[2025-07-07 22:05:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:05:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:05:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:05:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:05:00] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:05:00] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:10:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:10:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:10:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:10:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:10:20] [Rank 0] Total Loss: 4.6958 +[2025-07-07 22:10:20] [Rank 0] Total Loss: 4.6958 +[2025-07-07 22:10:20] [Rank 0] Total FTA: 0.1362 +[2025-07-07 22:10:20] [Rank 0] Total FTA: 0.1362 +[2025-07-07 22:10:20] [Rank 0] Group 0 Loss: 5.2052 +[2025-07-07 22:10:20] [Rank 0] Group 0 Loss: 5.2052 +[2025-07-07 22:10:20] [Rank 0] Group 1 Loss: 4.5134 +[2025-07-07 22:10:20] [Rank 0] Group 1 Loss: 4.5134 +[2025-07-07 22:10:20] [Rank 0] Group 2 Loss: 4.5726 +[2025-07-07 22:10:20] [Rank 0] Group 2 Loss: 4.5726 +[2025-07-07 22:10:20] [Rank 0] Group 3 Loss: 4.5660 +[2025-07-07 22:10:20] [Rank 0] Group 3 Loss: 4.5660 +[2025-07-07 22:10:20] [Rank 0] Group 4 Loss: 4.4995 +[2025-07-07 22:10:20] [Rank 0] Group 4 Loss: 4.4995 +[2025-07-07 22:10:20] [Rank 0] Group 5 Loss: 4.5447 +[2025-07-07 22:10:20] [Rank 0] Group 5 Loss: 4.5447 +[2025-07-07 22:10:20] [Rank 0] Group 6 Loss: 4.5993 +[2025-07-07 22:10:20] [Rank 0] Group 6 Loss: 4.5993 +[2025-07-07 22:10:20] [Rank 0] Group 7 Loss: 4.6754 +[2025-07-07 22:10:20] [Rank 0] Group 7 Loss: 4.6754 +[2025-07-07 22:10:20] [Rank 0] Group 8 Loss: 4.6397 +[2025-07-07 22:10:20] [Rank 0] Group 8 Loss: 4.6397 +[2025-07-07 22:10:20] [Rank 0] Group 9 Loss: 4.6543 +[2025-07-07 22:10:20] [Rank 0] Group 9 Loss: 4.6543 +[2025-07-07 22:10:20] [Rank 0] Group 10 Loss: 4.6997 +[2025-07-07 22:10:20] [Rank 0] Group 10 Loss: 4.6997 +[2025-07-07 22:10:20] [Rank 0] Group 11 Loss: 4.6802 +[2025-07-07 22:10:20] [Rank 0] Group 11 Loss: 4.6802 +[2025-07-07 22:10:20] [Rank 0] Group 0 FTA: 0.1964 +[2025-07-07 22:10:20] [Rank 0] Group 0 FTA: 0.1964 +[2025-07-07 22:10:20] [Rank 0] Group 1 FTA: 0.1771 +[2025-07-07 22:10:20] [Rank 0] Group 1 FTA: 0.1771 +[2025-07-07 22:10:20] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 22:10:20] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 22:10:20] [Rank 0] Group 3 FTA: 0.1198 +[2025-07-07 22:10:20] [Rank 0] Group 3 FTA: 0.1198 +[2025-07-07 22:10:20] [Rank 0] Group 4 FTA: 0.1224 +[2025-07-07 22:10:20] [Rank 0] Group 4 FTA: 0.1224 +[2025-07-07 22:10:20] [Rank 0] Group 5 FTA: 0.1068 +[2025-07-07 22:10:20] [Rank 0] Group 5 FTA: 0.1068 +[2025-07-07 22:10:20] [Rank 0] Group 6 FTA: 0.1458 +[2025-07-07 22:10:20] [Rank 0] Group 6 FTA: 0.1458 +[2025-07-07 22:10:20] [Rank 0] Group 7 FTA: 0.1562 +[2025-07-07 22:10:20] [Rank 0] Group 7 FTA: 0.1562 +[2025-07-07 22:10:20] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-07 22:10:20] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-07 22:10:20] [Rank 0] Group 9 FTA: 0.1211 +[2025-07-07 22:10:20] [Rank 0] Group 9 FTA: 0.1211 +[2025-07-07 22:10:20] [Rank 0] Group 10 FTA: 0.1387 +[2025-07-07 22:10:20] [Rank 0] Group 10 FTA: 0.1387 +[2025-07-07 22:10:20] [Rank 0] Group 11 FTA: 0.1494 +[2025-07-07 22:10:20] [Rank 0] Group 11 FTA: 0.1494 +[2025-07-07 22:10:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 22:10:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 22:10:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 22:10:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 22:10:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 22:10:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 22:10:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 22:10:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 22:10:22] [Rank 0] step:7501/10000 train_time:513474ms step_avg:68.45ms +[2025-07-07 22:10:22] [Rank 0] step:7501/10000 train_time:513474ms step_avg:68.45ms +[2025-07-07 22:10:23] [Rank 0] step:7521/10000 train_time:514227ms step_avg:68.37ms +[2025-07-07 22:10:23] [Rank 0] step:7521/10000 train_time:514227ms step_avg:68.37ms +[2025-07-07 22:10:25] [Rank 0] step:7541/10000 train_time:515591ms step_avg:68.37ms +[2025-07-07 22:10:25] [Rank 0] step:7541/10000 train_time:515591ms step_avg:68.37ms +[2025-07-07 22:10:26] [Rank 0] step:7561/10000 train_time:516954ms step_avg:68.37ms +[2025-07-07 22:10:26] [Rank 0] step:7561/10000 train_time:516954ms step_avg:68.37ms +[2025-07-07 22:10:27] [Rank 0] step:7581/10000 train_time:518365ms step_avg:68.38ms +[2025-07-07 22:10:27] [Rank 0] step:7581/10000 train_time:518365ms step_avg:68.38ms +[2025-07-07 22:10:29] [Rank 0] step:7601/10000 train_time:519729ms step_avg:68.38ms +[2025-07-07 22:10:29] [Rank 0] step:7601/10000 train_time:519729ms step_avg:68.38ms +[2025-07-07 22:10:30] [Rank 0] step:7621/10000 train_time:521093ms step_avg:68.38ms +[2025-07-07 22:10:30] [Rank 0] step:7621/10000 train_time:521093ms step_avg:68.38ms +[2025-07-07 22:10:31] [Rank 0] step:7641/10000 train_time:522458ms step_avg:68.38ms +[2025-07-07 22:10:31] [Rank 0] step:7641/10000 train_time:522458ms step_avg:68.38ms +[2025-07-07 22:10:33] [Rank 0] step:7661/10000 train_time:523826ms step_avg:68.38ms +[2025-07-07 22:10:33] [Rank 0] step:7661/10000 train_time:523826ms step_avg:68.38ms +[2025-07-07 22:10:34] [Rank 0] step:7681/10000 train_time:525193ms step_avg:68.38ms +[2025-07-07 22:10:34] [Rank 0] step:7681/10000 train_time:525193ms step_avg:68.38ms +[2025-07-07 22:10:36] [Rank 0] step:7701/10000 train_time:526561ms step_avg:68.38ms +[2025-07-07 22:10:36] [Rank 0] step:7701/10000 train_time:526561ms step_avg:68.38ms +[2025-07-07 22:10:37] [Rank 0] step:7721/10000 train_time:527928ms step_avg:68.38ms +[2025-07-07 22:10:37] [Rank 0] step:7721/10000 train_time:527928ms step_avg:68.38ms +[2025-07-07 22:10:38] [Rank 0] step:7741/10000 train_time:529298ms step_avg:68.38ms +[2025-07-07 22:10:38] [Rank 0] step:7741/10000 train_time:529298ms step_avg:68.38ms +[2025-07-07 22:10:40] [Rank 0] step:7761/10000 train_time:530719ms step_avg:68.38ms +[2025-07-07 22:10:40] [Rank 0] step:7761/10000 train_time:530719ms step_avg:68.38ms +[2025-07-07 22:10:41] [Rank 0] step:7781/10000 train_time:532090ms step_avg:68.38ms +[2025-07-07 22:10:41] [Rank 0] step:7781/10000 train_time:532090ms step_avg:68.38ms +[2025-07-07 22:10:42] [Rank 0] step:7801/10000 train_time:533461ms step_avg:68.38ms +[2025-07-07 22:10:42] [Rank 0] step:7801/10000 train_time:533461ms step_avg:68.38ms +[2025-07-07 22:10:44] [Rank 0] step:7821/10000 train_time:534832ms step_avg:68.38ms +[2025-07-07 22:10:44] [Rank 0] step:7821/10000 train_time:534832ms step_avg:68.38ms +[2025-07-07 22:10:45] [Rank 0] step:7841/10000 train_time:536203ms step_avg:68.38ms +[2025-07-07 22:10:45] [Rank 0] step:7841/10000 train_time:536203ms step_avg:68.38ms +[2025-07-07 22:10:47] [Rank 0] step:7861/10000 train_time:537573ms step_avg:68.38ms +[2025-07-07 22:10:47] [Rank 0] step:7861/10000 train_time:537573ms step_avg:68.38ms +[2025-07-07 22:10:48] [Rank 0] step:7881/10000 train_time:538945ms step_avg:68.39ms +[2025-07-07 22:10:48] [Rank 0] step:7881/10000 train_time:538945ms step_avg:68.39ms +[2025-07-07 22:10:49] [Rank 0] step:7901/10000 train_time:540317ms step_avg:68.39ms +[2025-07-07 22:10:49] [Rank 0] step:7901/10000 train_time:540317ms step_avg:68.39ms +[2025-07-07 22:10:51] [Rank 0] step:7921/10000 train_time:541687ms step_avg:68.39ms +[2025-07-07 22:10:51] [Rank 0] step:7921/10000 train_time:541687ms step_avg:68.39ms +[2025-07-07 22:10:52] [Rank 0] step:7941/10000 train_time:543103ms step_avg:68.39ms +[2025-07-07 22:10:52] [Rank 0] step:7941/10000 train_time:543103ms step_avg:68.39ms +[2025-07-07 22:10:53] [Rank 0] step:7961/10000 train_time:544474ms step_avg:68.39ms +[2025-07-07 22:10:53] [Rank 0] step:7961/10000 train_time:544474ms step_avg:68.39ms +[2025-07-07 22:10:55] [Rank 0] step:7981/10000 train_time:545846ms step_avg:68.39ms +[2025-07-07 22:10:55] [Rank 0] step:7981/10000 train_time:545846ms step_avg:68.39ms +[2025-07-07 22:10:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:10:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:10:57] [Rank 0] PRINT: step:8000/10000 train_loss:1.4451 val_loss:1.4502 train_time:547841ms step_avg:68.48ms +[2025-07-07 22:10:57] [Rank 0] PRINT: step:8000/10000 train_loss:1.4451 val_loss:1.4502 train_time:547841ms step_avg:68.48ms +[2025-07-07 22:10:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:10:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:10:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:10:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:10:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:10:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:16:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:16:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:16:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:16:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:16:15] [Rank 0] Total Loss: 4.6219 +[2025-07-07 22:16:15] [Rank 0] Total Loss: 4.6219 +[2025-07-07 22:16:15] [Rank 0] Total FTA: 0.1253 +[2025-07-07 22:16:15] [Rank 0] Total FTA: 0.1253 +[2025-07-07 22:16:15] [Rank 0] Group 0 Loss: 5.1316 +[2025-07-07 22:16:15] [Rank 0] Group 0 Loss: 5.1316 +[2025-07-07 22:16:15] [Rank 0] Group 1 Loss: 4.4644 +[2025-07-07 22:16:15] [Rank 0] Group 1 Loss: 4.4644 +[2025-07-07 22:16:15] [Rank 0] Group 2 Loss: 4.5816 +[2025-07-07 22:16:15] [Rank 0] Group 2 Loss: 4.5816 +[2025-07-07 22:16:15] [Rank 0] Group 3 Loss: 4.5680 +[2025-07-07 22:16:15] [Rank 0] Group 3 Loss: 4.5680 +[2025-07-07 22:16:15] [Rank 0] Group 4 Loss: 4.3523 +[2025-07-07 22:16:15] [Rank 0] Group 4 Loss: 4.3523 +[2025-07-07 22:16:15] [Rank 0] Group 5 Loss: 4.4761 +[2025-07-07 22:16:15] [Rank 0] Group 5 Loss: 4.4761 +[2025-07-07 22:16:15] [Rank 0] Group 6 Loss: 4.4740 +[2025-07-07 22:16:15] [Rank 0] Group 6 Loss: 4.4740 +[2025-07-07 22:16:15] [Rank 0] Group 7 Loss: 4.5920 +[2025-07-07 22:16:15] [Rank 0] Group 7 Loss: 4.5920 +[2025-07-07 22:16:15] [Rank 0] Group 8 Loss: 4.5930 +[2025-07-07 22:16:15] [Rank 0] Group 8 Loss: 4.5930 +[2025-07-07 22:16:15] [Rank 0] Group 9 Loss: 4.6067 +[2025-07-07 22:16:15] [Rank 0] Group 9 Loss: 4.6067 +[2025-07-07 22:16:15] [Rank 0] Group 10 Loss: 4.5592 +[2025-07-07 22:16:15] [Rank 0] Group 10 Loss: 4.5592 +[2025-07-07 22:16:15] [Rank 0] Group 11 Loss: 4.6017 +[2025-07-07 22:16:15] [Rank 0] Group 11 Loss: 4.6017 +[2025-07-07 22:16:15] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-07 22:16:15] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-07 22:16:15] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-07 22:16:15] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-07 22:16:15] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 22:16:15] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 22:16:15] [Rank 0] Group 3 FTA: 0.0703 +[2025-07-07 22:16:15] [Rank 0] Group 3 FTA: 0.0703 +[2025-07-07 22:16:15] [Rank 0] Group 4 FTA: 0.0833 +[2025-07-07 22:16:15] [Rank 0] Group 4 FTA: 0.0833 +[2025-07-07 22:16:15] [Rank 0] Group 5 FTA: 0.0859 +[2025-07-07 22:16:15] [Rank 0] Group 5 FTA: 0.0859 +[2025-07-07 22:16:15] [Rank 0] Group 6 FTA: 0.1146 +[2025-07-07 22:16:15] [Rank 0] Group 6 FTA: 0.1146 +[2025-07-07 22:16:15] [Rank 0] Group 7 FTA: 0.1432 +[2025-07-07 22:16:15] [Rank 0] Group 7 FTA: 0.1432 +[2025-07-07 22:16:15] [Rank 0] Group 8 FTA: 0.1901 +[2025-07-07 22:16:15] [Rank 0] Group 8 FTA: 0.1901 +[2025-07-07 22:16:15] [Rank 0] Group 9 FTA: 0.1406 +[2025-07-07 22:16:15] [Rank 0] Group 9 FTA: 0.1406 +[2025-07-07 22:16:15] [Rank 0] Group 10 FTA: 0.1504 +[2025-07-07 22:16:15] [Rank 0] Group 10 FTA: 0.1504 +[2025-07-07 22:16:15] [Rank 0] Group 11 FTA: 0.1338 +[2025-07-07 22:16:15] [Rank 0] Group 11 FTA: 0.1338 +[2025-07-07 22:16:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 22:16:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 22:16:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 22:16:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 22:16:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 22:16:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 22:16:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 22:16:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 22:16:17] [Rank 0] step:8001/10000 train_time:547852ms step_avg:68.47ms +[2025-07-07 22:16:17] [Rank 0] step:8001/10000 train_time:547852ms step_avg:68.47ms +[2025-07-07 22:16:18] [Rank 0] step:8021/10000 train_time:548621ms step_avg:68.40ms +[2025-07-07 22:16:18] [Rank 0] step:8021/10000 train_time:548621ms step_avg:68.40ms +[2025-07-07 22:16:19] [Rank 0] step:8041/10000 train_time:549984ms step_avg:68.40ms +[2025-07-07 22:16:19] [Rank 0] step:8041/10000 train_time:549984ms step_avg:68.40ms +[2025-07-07 22:16:21] [Rank 0] step:8061/10000 train_time:551350ms step_avg:68.40ms +[2025-07-07 22:16:21] [Rank 0] step:8061/10000 train_time:551350ms step_avg:68.40ms +[2025-07-07 22:16:22] [Rank 0] step:8081/10000 train_time:552716ms step_avg:68.40ms +[2025-07-07 22:16:22] [Rank 0] step:8081/10000 train_time:552716ms step_avg:68.40ms +[2025-07-07 22:16:24] [Rank 0] step:8101/10000 train_time:554082ms step_avg:68.40ms +[2025-07-07 22:16:24] [Rank 0] step:8101/10000 train_time:554082ms step_avg:68.40ms +[2025-07-07 22:16:25] [Rank 0] step:8121/10000 train_time:555503ms step_avg:68.40ms +[2025-07-07 22:16:25] [Rank 0] step:8121/10000 train_time:555503ms step_avg:68.40ms +[2025-07-07 22:16:26] [Rank 0] step:8141/10000 train_time:556870ms step_avg:68.40ms +[2025-07-07 22:16:26] [Rank 0] step:8141/10000 train_time:556870ms step_avg:68.40ms +[2025-07-07 22:16:28] [Rank 0] step:8161/10000 train_time:558237ms step_avg:68.40ms +[2025-07-07 22:16:28] [Rank 0] step:8161/10000 train_time:558237ms step_avg:68.40ms +[2025-07-07 22:16:29] [Rank 0] step:8181/10000 train_time:559605ms step_avg:68.40ms +[2025-07-07 22:16:29] [Rank 0] step:8181/10000 train_time:559605ms step_avg:68.40ms +[2025-07-07 22:16:30] [Rank 0] step:8201/10000 train_time:560975ms step_avg:68.40ms +[2025-07-07 22:16:30] [Rank 0] step:8201/10000 train_time:560975ms step_avg:68.40ms +[2025-07-07 22:16:32] [Rank 0] step:8221/10000 train_time:562347ms step_avg:68.40ms +[2025-07-07 22:16:32] [Rank 0] step:8221/10000 train_time:562347ms step_avg:68.40ms +[2025-07-07 22:16:33] [Rank 0] step:8241/10000 train_time:563717ms step_avg:68.40ms +[2025-07-07 22:16:33] [Rank 0] step:8241/10000 train_time:563717ms step_avg:68.40ms +[2025-07-07 22:16:34] [Rank 0] step:8261/10000 train_time:565088ms step_avg:68.40ms +[2025-07-07 22:16:34] [Rank 0] step:8261/10000 train_time:565088ms step_avg:68.40ms +[2025-07-07 22:16:36] [Rank 0] step:8281/10000 train_time:566459ms step_avg:68.40ms +[2025-07-07 22:16:36] [Rank 0] step:8281/10000 train_time:566459ms step_avg:68.40ms +[2025-07-07 22:16:37] [Rank 0] step:8301/10000 train_time:567857ms step_avg:68.41ms +[2025-07-07 22:16:37] [Rank 0] step:8301/10000 train_time:567857ms step_avg:68.41ms +[2025-07-07 22:16:39] [Rank 0] step:8321/10000 train_time:569228ms step_avg:68.41ms +[2025-07-07 22:16:39] [Rank 0] step:8321/10000 train_time:569228ms step_avg:68.41ms +[2025-07-07 22:16:40] [Rank 0] step:8341/10000 train_time:570600ms step_avg:68.41ms +[2025-07-07 22:16:40] [Rank 0] step:8341/10000 train_time:570600ms step_avg:68.41ms +[2025-07-07 22:16:41] [Rank 0] step:8361/10000 train_time:571971ms step_avg:68.41ms +[2025-07-07 22:16:41] [Rank 0] step:8361/10000 train_time:571971ms step_avg:68.41ms +[2025-07-07 22:16:43] [Rank 0] step:8381/10000 train_time:573344ms step_avg:68.41ms +[2025-07-07 22:16:43] [Rank 0] step:8381/10000 train_time:573344ms step_avg:68.41ms +[2025-07-07 22:16:44] [Rank 0] step:8401/10000 train_time:574716ms step_avg:68.41ms +[2025-07-07 22:16:44] [Rank 0] step:8401/10000 train_time:574716ms step_avg:68.41ms +[2025-07-07 22:16:46] [Rank 0] step:8421/10000 train_time:576090ms step_avg:68.41ms +[2025-07-07 22:16:46] [Rank 0] step:8421/10000 train_time:576090ms step_avg:68.41ms +[2025-07-07 22:16:47] [Rank 0] step:8441/10000 train_time:577463ms step_avg:68.41ms +[2025-07-07 22:16:47] [Rank 0] step:8441/10000 train_time:577463ms step_avg:68.41ms +[2025-07-07 22:16:48] [Rank 0] step:8461/10000 train_time:578881ms step_avg:68.42ms +[2025-07-07 22:16:48] [Rank 0] step:8461/10000 train_time:578881ms step_avg:68.42ms +[2025-07-07 22:16:50] [Rank 0] step:8481/10000 train_time:580242ms step_avg:68.42ms +[2025-07-07 22:16:50] [Rank 0] step:8481/10000 train_time:580242ms step_avg:68.42ms +[2025-07-07 22:16:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:16:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:16:52] [Rank 0] PRINT: step:8500/10000 train_loss:1.4230 val_loss:1.4329 train_time:582238ms step_avg:68.50ms +[2025-07-07 22:16:52] [Rank 0] PRINT: step:8500/10000 train_loss:1.4230 val_loss:1.4329 train_time:582238ms step_avg:68.50ms +[2025-07-07 22:16:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:16:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:16:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:16:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:16:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:16:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:22:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:22:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:22:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:22:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:22:12] [Rank 0] Total Loss: 4.6528 +[2025-07-07 22:22:12] [Rank 0] Total Loss: 4.6528 +[2025-07-07 22:22:12] [Rank 0] Total FTA: 0.1749 +[2025-07-07 22:22:12] [Rank 0] Total FTA: 0.1749 +[2025-07-07 22:22:12] [Rank 0] Group 0 Loss: 5.1306 +[2025-07-07 22:22:12] [Rank 0] Group 0 Loss: 5.1306 +[2025-07-07 22:22:12] [Rank 0] Group 1 Loss: 4.5240 +[2025-07-07 22:22:12] [Rank 0] Group 1 Loss: 4.5240 +[2025-07-07 22:22:12] [Rank 0] Group 2 Loss: 4.5633 +[2025-07-07 22:22:12] [Rank 0] Group 2 Loss: 4.5633 +[2025-07-07 22:22:12] [Rank 0] Group 3 Loss: 4.6626 +[2025-07-07 22:22:12] [Rank 0] Group 3 Loss: 4.6626 +[2025-07-07 22:22:12] [Rank 0] Group 4 Loss: 4.4278 +[2025-07-07 22:22:12] [Rank 0] Group 4 Loss: 4.4278 +[2025-07-07 22:22:12] [Rank 0] Group 5 Loss: 4.4791 +[2025-07-07 22:22:12] [Rank 0] Group 5 Loss: 4.4791 +[2025-07-07 22:22:12] [Rank 0] Group 6 Loss: 4.5889 +[2025-07-07 22:22:12] [Rank 0] Group 6 Loss: 4.5889 +[2025-07-07 22:22:12] [Rank 0] Group 7 Loss: 4.6838 +[2025-07-07 22:22:12] [Rank 0] Group 7 Loss: 4.6838 +[2025-07-07 22:22:12] [Rank 0] Group 8 Loss: 4.6045 +[2025-07-07 22:22:12] [Rank 0] Group 8 Loss: 4.6045 +[2025-07-07 22:22:12] [Rank 0] Group 9 Loss: 4.6483 +[2025-07-07 22:22:12] [Rank 0] Group 9 Loss: 4.6483 +[2025-07-07 22:22:12] [Rank 0] Group 10 Loss: 4.6276 +[2025-07-07 22:22:12] [Rank 0] Group 10 Loss: 4.6276 +[2025-07-07 22:22:12] [Rank 0] Group 11 Loss: 4.5658 +[2025-07-07 22:22:12] [Rank 0] Group 11 Loss: 4.5658 +[2025-07-07 22:22:12] [Rank 0] Group 0 FTA: 0.3537 +[2025-07-07 22:22:12] [Rank 0] Group 0 FTA: 0.3537 +[2025-07-07 22:22:12] [Rank 0] Group 1 FTA: 0.3125 +[2025-07-07 22:22:12] [Rank 0] Group 1 FTA: 0.3125 +[2025-07-07 22:22:12] [Rank 0] Group 2 FTA: 0.1068 +[2025-07-07 22:22:12] [Rank 0] Group 2 FTA: 0.1068 +[2025-07-07 22:22:12] [Rank 0] Group 3 FTA: 0.0911 +[2025-07-07 22:22:12] [Rank 0] Group 3 FTA: 0.0911 +[2025-07-07 22:22:12] [Rank 0] Group 4 FTA: 0.1302 +[2025-07-07 22:22:12] [Rank 0] Group 4 FTA: 0.1302 +[2025-07-07 22:22:12] [Rank 0] Group 5 FTA: 0.1094 +[2025-07-07 22:22:12] [Rank 0] Group 5 FTA: 0.1094 +[2025-07-07 22:22:12] [Rank 0] Group 6 FTA: 0.1719 +[2025-07-07 22:22:12] [Rank 0] Group 6 FTA: 0.1719 +[2025-07-07 22:22:12] [Rank 0] Group 7 FTA: 0.1354 +[2025-07-07 22:22:12] [Rank 0] Group 7 FTA: 0.1354 +[2025-07-07 22:22:12] [Rank 0] Group 8 FTA: 0.1172 +[2025-07-07 22:22:12] [Rank 0] Group 8 FTA: 0.1172 +[2025-07-07 22:22:12] [Rank 0] Group 9 FTA: 0.1055 +[2025-07-07 22:22:12] [Rank 0] Group 9 FTA: 0.1055 +[2025-07-07 22:22:12] [Rank 0] Group 10 FTA: 0.1484 +[2025-07-07 22:22:12] [Rank 0] Group 10 FTA: 0.1484 +[2025-07-07 22:22:12] [Rank 0] Group 11 FTA: 0.1553 +[2025-07-07 22:22:12] [Rank 0] Group 11 FTA: 0.1553 +[2025-07-07 22:22:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 22:22:13] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 22:22:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 22:22:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 22:22:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 22:22:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 22:22:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 22:22:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 22:22:14] [Rank 0] step:8501/10000 train_time:582249ms step_avg:68.49ms +[2025-07-07 22:22:14] [Rank 0] step:8501/10000 train_time:582249ms step_avg:68.49ms +[2025-07-07 22:22:15] [Rank 0] step:8521/10000 train_time:583020ms step_avg:68.42ms +[2025-07-07 22:22:15] [Rank 0] step:8521/10000 train_time:583020ms step_avg:68.42ms +[2025-07-07 22:22:16] [Rank 0] step:8541/10000 train_time:584384ms step_avg:68.42ms +[2025-07-07 22:22:16] [Rank 0] step:8541/10000 train_time:584384ms step_avg:68.42ms +[2025-07-07 22:22:18] [Rank 0] step:8561/10000 train_time:585749ms step_avg:68.42ms +[2025-07-07 22:22:18] [Rank 0] step:8561/10000 train_time:585749ms step_avg:68.42ms +[2025-07-07 22:22:19] [Rank 0] step:8581/10000 train_time:587113ms step_avg:68.42ms +[2025-07-07 22:22:19] [Rank 0] step:8581/10000 train_time:587113ms step_avg:68.42ms +[2025-07-07 22:22:20] [Rank 0] step:8601/10000 train_time:588479ms step_avg:68.42ms +[2025-07-07 22:22:20] [Rank 0] step:8601/10000 train_time:588479ms step_avg:68.42ms +[2025-07-07 22:22:22] [Rank 0] step:8621/10000 train_time:589845ms step_avg:68.42ms +[2025-07-07 22:22:22] [Rank 0] step:8621/10000 train_time:589845ms step_avg:68.42ms +[2025-07-07 22:22:23] [Rank 0] step:8641/10000 train_time:591460ms step_avg:68.45ms +[2025-07-07 22:22:23] [Rank 0] step:8641/10000 train_time:591460ms step_avg:68.45ms +[2025-07-07 22:22:25] [Rank 0] step:8661/10000 train_time:592603ms step_avg:68.42ms +[2025-07-07 22:22:25] [Rank 0] step:8661/10000 train_time:592603ms step_avg:68.42ms +[2025-07-07 22:22:26] [Rank 0] step:8681/10000 train_time:593979ms step_avg:68.42ms +[2025-07-07 22:22:26] [Rank 0] step:8681/10000 train_time:593979ms step_avg:68.42ms +[2025-07-07 22:22:27] [Rank 0] step:8701/10000 train_time:595348ms step_avg:68.42ms +[2025-07-07 22:22:27] [Rank 0] step:8701/10000 train_time:595348ms step_avg:68.42ms +[2025-07-07 22:22:29] [Rank 0] step:8721/10000 train_time:596719ms step_avg:68.42ms +[2025-07-07 22:22:29] [Rank 0] step:8721/10000 train_time:596719ms step_avg:68.42ms +[2025-07-07 22:22:30] [Rank 0] step:8741/10000 train_time:598089ms step_avg:68.42ms +[2025-07-07 22:22:30] [Rank 0] step:8741/10000 train_time:598089ms step_avg:68.42ms +[2025-07-07 22:22:31] [Rank 0] step:8761/10000 train_time:599459ms step_avg:68.42ms +[2025-07-07 22:22:31] [Rank 0] step:8761/10000 train_time:599459ms step_avg:68.42ms +[2025-07-07 22:22:33] [Rank 0] step:8781/10000 train_time:600831ms step_avg:68.42ms +[2025-07-07 22:22:33] [Rank 0] step:8781/10000 train_time:600831ms step_avg:68.42ms +[2025-07-07 22:22:34] [Rank 0] step:8801/10000 train_time:602204ms step_avg:68.42ms +[2025-07-07 22:22:34] [Rank 0] step:8801/10000 train_time:602204ms step_avg:68.42ms +[2025-07-07 22:22:36] [Rank 0] step:8821/10000 train_time:603629ms step_avg:68.43ms +[2025-07-07 22:22:36] [Rank 0] step:8821/10000 train_time:603629ms step_avg:68.43ms +[2025-07-07 22:22:37] [Rank 0] step:8841/10000 train_time:604990ms step_avg:68.43ms +[2025-07-07 22:22:37] [Rank 0] step:8841/10000 train_time:604990ms step_avg:68.43ms +[2025-07-07 22:22:38] [Rank 0] step:8861/10000 train_time:606362ms step_avg:68.43ms +[2025-07-07 22:22:38] [Rank 0] step:8861/10000 train_time:606362ms step_avg:68.43ms +[2025-07-07 22:22:40] [Rank 0] step:8881/10000 train_time:607734ms step_avg:68.43ms +[2025-07-07 22:22:40] [Rank 0] step:8881/10000 train_time:607734ms step_avg:68.43ms +[2025-07-07 22:22:41] [Rank 0] step:8901/10000 train_time:609106ms step_avg:68.43ms +[2025-07-07 22:22:41] [Rank 0] step:8901/10000 train_time:609106ms step_avg:68.43ms +[2025-07-07 22:22:42] [Rank 0] step:8921/10000 train_time:610479ms step_avg:68.43ms +[2025-07-07 22:22:42] [Rank 0] step:8921/10000 train_time:610479ms step_avg:68.43ms +[2025-07-07 22:22:44] [Rank 0] step:8941/10000 train_time:611852ms step_avg:68.43ms +[2025-07-07 22:22:44] [Rank 0] step:8941/10000 train_time:611852ms step_avg:68.43ms +[2025-07-07 22:22:45] [Rank 0] step:8961/10000 train_time:613225ms step_avg:68.43ms +[2025-07-07 22:22:45] [Rank 0] step:8961/10000 train_time:613225ms step_avg:68.43ms +[2025-07-07 22:22:47] [Rank 0] step:8981/10000 train_time:614598ms step_avg:68.43ms +[2025-07-07 22:22:47] [Rank 0] step:8981/10000 train_time:614598ms step_avg:68.43ms +[2025-07-07 22:22:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:22:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:22:49] [Rank 0] PRINT: step:9000/10000 train_loss:1.4052 val_loss:1.4165 train_time:616593ms step_avg:68.51ms +[2025-07-07 22:22:49] [Rank 0] PRINT: step:9000/10000 train_loss:1.4052 val_loss:1.4165 train_time:616593ms step_avg:68.51ms +[2025-07-07 22:22:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:22:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:22:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:22:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:22:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:22:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:28:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:28:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:28:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:28:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:28:10] [Rank 0] Total Loss: 4.6637 +[2025-07-07 22:28:10] [Rank 0] Total Loss: 4.6637 +[2025-07-07 22:28:10] [Rank 0] Total FTA: 0.1434 +[2025-07-07 22:28:10] [Rank 0] Total FTA: 0.1434 +[2025-07-07 22:28:10] [Rank 0] Group 0 Loss: 4.9520 +[2025-07-07 22:28:10] [Rank 0] Group 0 Loss: 4.9520 +[2025-07-07 22:28:10] [Rank 0] Group 1 Loss: 4.6161 +[2025-07-07 22:28:10] [Rank 0] Group 1 Loss: 4.6161 +[2025-07-07 22:28:10] [Rank 0] Group 2 Loss: 4.6604 +[2025-07-07 22:28:10] [Rank 0] Group 2 Loss: 4.6604 +[2025-07-07 22:28:10] [Rank 0] Group 3 Loss: 4.6688 +[2025-07-07 22:28:10] [Rank 0] Group 3 Loss: 4.6688 +[2025-07-07 22:28:10] [Rank 0] Group 4 Loss: 4.4963 +[2025-07-07 22:28:10] [Rank 0] Group 4 Loss: 4.4963 +[2025-07-07 22:28:10] [Rank 0] Group 5 Loss: 4.5229 +[2025-07-07 22:28:10] [Rank 0] Group 5 Loss: 4.5229 +[2025-07-07 22:28:10] [Rank 0] Group 6 Loss: 4.5549 +[2025-07-07 22:28:10] [Rank 0] Group 6 Loss: 4.5549 +[2025-07-07 22:28:10] [Rank 0] Group 7 Loss: 4.7105 +[2025-07-07 22:28:10] [Rank 0] Group 7 Loss: 4.7105 +[2025-07-07 22:28:10] [Rank 0] Group 8 Loss: 4.6532 +[2025-07-07 22:28:10] [Rank 0] Group 8 Loss: 4.6532 +[2025-07-07 22:28:10] [Rank 0] Group 9 Loss: 4.6102 +[2025-07-07 22:28:10] [Rank 0] Group 9 Loss: 4.6102 +[2025-07-07 22:28:10] [Rank 0] Group 10 Loss: 4.6856 +[2025-07-07 22:28:10] [Rank 0] Group 10 Loss: 4.6856 +[2025-07-07 22:28:10] [Rank 0] Group 11 Loss: 4.6095 +[2025-07-07 22:28:10] [Rank 0] Group 11 Loss: 4.6095 +[2025-07-07 22:28:10] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-07 22:28:10] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-07 22:28:10] [Rank 0] Group 1 FTA: 0.1745 +[2025-07-07 22:28:10] [Rank 0] Group 1 FTA: 0.1745 +[2025-07-07 22:28:10] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 22:28:10] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 22:28:10] [Rank 0] Group 3 FTA: 0.0807 +[2025-07-07 22:28:10] [Rank 0] Group 3 FTA: 0.0807 +[2025-07-07 22:28:10] [Rank 0] Group 4 FTA: 0.0807 +[2025-07-07 22:28:10] [Rank 0] Group 4 FTA: 0.0807 +[2025-07-07 22:28:10] [Rank 0] Group 5 FTA: 0.1641 +[2025-07-07 22:28:10] [Rank 0] Group 5 FTA: 0.1641 +[2025-07-07 22:28:10] [Rank 0] Group 6 FTA: 0.1562 +[2025-07-07 22:28:10] [Rank 0] Group 6 FTA: 0.1562 +[2025-07-07 22:28:10] [Rank 0] Group 7 FTA: 0.1771 +[2025-07-07 22:28:10] [Rank 0] Group 7 FTA: 0.1771 +[2025-07-07 22:28:10] [Rank 0] Group 8 FTA: 0.1667 +[2025-07-07 22:28:10] [Rank 0] Group 8 FTA: 0.1667 +[2025-07-07 22:28:10] [Rank 0] Group 9 FTA: 0.1680 +[2025-07-07 22:28:10] [Rank 0] Group 9 FTA: 0.1680 +[2025-07-07 22:28:10] [Rank 0] Group 10 FTA: 0.1621 +[2025-07-07 22:28:10] [Rank 0] Group 10 FTA: 0.1621 +[2025-07-07 22:28:10] [Rank 0] Group 11 FTA: 0.1709 +[2025-07-07 22:28:10] [Rank 0] Group 11 FTA: 0.1709 +[2025-07-07 22:28:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 22:28:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 22:28:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 22:28:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 22:28:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 22:28:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 22:28:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 22:28:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 22:28:12] [Rank 0] step:9001/10000 train_time:616612ms step_avg:68.50ms +[2025-07-07 22:28:12] [Rank 0] step:9001/10000 train_time:616612ms step_avg:68.50ms +[2025-07-07 22:28:13] [Rank 0] step:9021/10000 train_time:618074ms step_avg:68.52ms +[2025-07-07 22:28:13] [Rank 0] step:9021/10000 train_time:618074ms step_avg:68.52ms +[2025-07-07 22:28:15] [Rank 0] step:9041/10000 train_time:619438ms step_avg:68.51ms +[2025-07-07 22:28:15] [Rank 0] step:9041/10000 train_time:619438ms step_avg:68.51ms +[2025-07-07 22:28:16] [Rank 0] step:9061/10000 train_time:620802ms step_avg:68.51ms +[2025-07-07 22:28:16] [Rank 0] step:9061/10000 train_time:620802ms step_avg:68.51ms +[2025-07-07 22:28:17] [Rank 0] step:9081/10000 train_time:622167ms step_avg:68.51ms +[2025-07-07 22:28:17] [Rank 0] step:9081/10000 train_time:622167ms step_avg:68.51ms +[2025-07-07 22:28:19] [Rank 0] step:9101/10000 train_time:623534ms step_avg:68.51ms +[2025-07-07 22:28:19] [Rank 0] step:9101/10000 train_time:623534ms step_avg:68.51ms +[2025-07-07 22:28:20] [Rank 0] step:9121/10000 train_time:624899ms step_avg:68.51ms +[2025-07-07 22:28:20] [Rank 0] step:9121/10000 train_time:624899ms step_avg:68.51ms +[2025-07-07 22:28:22] [Rank 0] step:9141/10000 train_time:626266ms step_avg:68.51ms +[2025-07-07 22:28:22] [Rank 0] step:9141/10000 train_time:626266ms step_avg:68.51ms +[2025-07-07 22:28:23] [Rank 0] step:9161/10000 train_time:627634ms step_avg:68.51ms +[2025-07-07 22:28:23] [Rank 0] step:9161/10000 train_time:627634ms step_avg:68.51ms +[2025-07-07 22:28:24] [Rank 0] step:9181/10000 train_time:629003ms step_avg:68.51ms +[2025-07-07 22:28:24] [Rank 0] step:9181/10000 train_time:629003ms step_avg:68.51ms +[2025-07-07 22:28:26] [Rank 0] step:9201/10000 train_time:630394ms step_avg:68.51ms +[2025-07-07 22:28:26] [Rank 0] step:9201/10000 train_time:630394ms step_avg:68.51ms +[2025-07-07 22:28:27] [Rank 0] step:9221/10000 train_time:631763ms step_avg:68.51ms +[2025-07-07 22:28:27] [Rank 0] step:9221/10000 train_time:631763ms step_avg:68.51ms +[2025-07-07 22:28:28] [Rank 0] step:9241/10000 train_time:633133ms step_avg:68.51ms +[2025-07-07 22:28:28] [Rank 0] step:9241/10000 train_time:633133ms step_avg:68.51ms +[2025-07-07 22:28:30] [Rank 0] step:9261/10000 train_time:634504ms step_avg:68.51ms +[2025-07-07 22:28:30] [Rank 0] step:9261/10000 train_time:634504ms step_avg:68.51ms +[2025-07-07 22:28:31] [Rank 0] step:9281/10000 train_time:635876ms step_avg:68.51ms +[2025-07-07 22:28:31] [Rank 0] step:9281/10000 train_time:635876ms step_avg:68.51ms +[2025-07-07 22:28:33] [Rank 0] step:9301/10000 train_time:637247ms step_avg:68.51ms +[2025-07-07 22:28:33] [Rank 0] step:9301/10000 train_time:637247ms step_avg:68.51ms +[2025-07-07 22:28:34] [Rank 0] step:9321/10000 train_time:638620ms step_avg:68.51ms +[2025-07-07 22:28:34] [Rank 0] step:9321/10000 train_time:638620ms step_avg:68.51ms +[2025-07-07 22:28:35] [Rank 0] step:9341/10000 train_time:640019ms step_avg:68.52ms +[2025-07-07 22:28:35] [Rank 0] step:9341/10000 train_time:640019ms step_avg:68.52ms +[2025-07-07 22:28:37] [Rank 0] step:9361/10000 train_time:641639ms step_avg:68.54ms +[2025-07-07 22:28:37] [Rank 0] step:9361/10000 train_time:641639ms step_avg:68.54ms +[2025-07-07 22:28:38] [Rank 0] step:9381/10000 train_time:642761ms step_avg:68.52ms +[2025-07-07 22:28:38] [Rank 0] step:9381/10000 train_time:642761ms step_avg:68.52ms +[2025-07-07 22:28:39] [Rank 0] step:9401/10000 train_time:644133ms step_avg:68.52ms +[2025-07-07 22:28:39] [Rank 0] step:9401/10000 train_time:644133ms step_avg:68.52ms +[2025-07-07 22:28:41] [Rank 0] step:9421/10000 train_time:645505ms step_avg:68.52ms +[2025-07-07 22:28:41] [Rank 0] step:9421/10000 train_time:645505ms step_avg:68.52ms +[2025-07-07 22:28:42] [Rank 0] step:9441/10000 train_time:646877ms step_avg:68.52ms +[2025-07-07 22:28:42] [Rank 0] step:9441/10000 train_time:646877ms step_avg:68.52ms +[2025-07-07 22:28:44] [Rank 0] step:9461/10000 train_time:648251ms step_avg:68.52ms +[2025-07-07 22:28:44] [Rank 0] step:9461/10000 train_time:648251ms step_avg:68.52ms +[2025-07-07 22:28:45] [Rank 0] step:9481/10000 train_time:649623ms step_avg:68.52ms +[2025-07-07 22:28:45] [Rank 0] step:9481/10000 train_time:649623ms step_avg:68.52ms +[2025-07-07 22:28:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:28:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:28:47] [Rank 0] PRINT: step:9500/10000 train_loss:1.3901 val_loss:1.4065 train_time:651619ms step_avg:68.59ms +[2025-07-07 22:28:47] [Rank 0] PRINT: step:9500/10000 train_loss:1.3901 val_loss:1.4065 train_time:651619ms step_avg:68.59ms +[2025-07-07 22:28:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:28:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:28:47] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:28:47] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:28:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:28:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:34:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:34:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:34:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:34:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:34:08] [Rank 0] Total Loss: 4.6490 +[2025-07-07 22:34:08] [Rank 0] Total Loss: 4.6490 +[2025-07-07 22:34:08] [Rank 0] Total FTA: 0.1683 +[2025-07-07 22:34:08] [Rank 0] Total FTA: 0.1683 +[2025-07-07 22:34:08] [Rank 0] Group 0 Loss: 4.9700 +[2025-07-07 22:34:08] [Rank 0] Group 0 Loss: 4.9700 +[2025-07-07 22:34:08] [Rank 0] Group 1 Loss: 4.5809 +[2025-07-07 22:34:08] [Rank 0] Group 1 Loss: 4.5809 +[2025-07-07 22:34:08] [Rank 0] Group 2 Loss: 4.5659 +[2025-07-07 22:34:08] [Rank 0] Group 2 Loss: 4.5659 +[2025-07-07 22:34:08] [Rank 0] Group 3 Loss: 4.6709 +[2025-07-07 22:34:08] [Rank 0] Group 3 Loss: 4.6709 +[2025-07-07 22:34:08] [Rank 0] Group 4 Loss: 4.3584 +[2025-07-07 22:34:08] [Rank 0] Group 4 Loss: 4.3584 +[2025-07-07 22:34:08] [Rank 0] Group 5 Loss: 4.4808 +[2025-07-07 22:34:08] [Rank 0] Group 5 Loss: 4.4808 +[2025-07-07 22:34:08] [Rank 0] Group 6 Loss: 4.4921 +[2025-07-07 22:34:08] [Rank 0] Group 6 Loss: 4.4921 +[2025-07-07 22:34:08] [Rank 0] Group 7 Loss: 4.6997 +[2025-07-07 22:34:08] [Rank 0] Group 7 Loss: 4.6997 +[2025-07-07 22:34:08] [Rank 0] Group 8 Loss: 4.6495 +[2025-07-07 22:34:08] [Rank 0] Group 8 Loss: 4.6495 +[2025-07-07 22:34:08] [Rank 0] Group 9 Loss: 4.7135 +[2025-07-07 22:34:08] [Rank 0] Group 9 Loss: 4.7135 +[2025-07-07 22:34:08] [Rank 0] Group 10 Loss: 4.6866 +[2025-07-07 22:34:08] [Rank 0] Group 10 Loss: 4.6866 +[2025-07-07 22:34:08] [Rank 0] Group 11 Loss: 4.6330 +[2025-07-07 22:34:08] [Rank 0] Group 11 Loss: 4.6330 +[2025-07-07 22:34:08] [Rank 0] Group 0 FTA: 0.1521 +[2025-07-07 22:34:08] [Rank 0] Group 0 FTA: 0.1521 +[2025-07-07 22:34:08] [Rank 0] Group 1 FTA: 0.2891 +[2025-07-07 22:34:08] [Rank 0] Group 1 FTA: 0.2891 +[2025-07-07 22:34:08] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 22:34:08] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 22:34:08] [Rank 0] Group 3 FTA: 0.0911 +[2025-07-07 22:34:08] [Rank 0] Group 3 FTA: 0.0911 +[2025-07-07 22:34:08] [Rank 0] Group 4 FTA: 0.1562 +[2025-07-07 22:34:08] [Rank 0] Group 4 FTA: 0.1562 +[2025-07-07 22:34:08] [Rank 0] Group 5 FTA: 0.1562 +[2025-07-07 22:34:08] [Rank 0] Group 5 FTA: 0.1562 +[2025-07-07 22:34:08] [Rank 0] Group 6 FTA: 0.1484 +[2025-07-07 22:34:08] [Rank 0] Group 6 FTA: 0.1484 +[2025-07-07 22:34:08] [Rank 0] Group 7 FTA: 0.2083 +[2025-07-07 22:34:08] [Rank 0] Group 7 FTA: 0.2083 +[2025-07-07 22:34:08] [Rank 0] Group 8 FTA: 0.1953 +[2025-07-07 22:34:08] [Rank 0] Group 8 FTA: 0.1953 +[2025-07-07 22:34:08] [Rank 0] Group 9 FTA: 0.1992 +[2025-07-07 22:34:08] [Rank 0] Group 9 FTA: 0.1992 +[2025-07-07 22:34:08] [Rank 0] Group 10 FTA: 0.1777 +[2025-07-07 22:34:08] [Rank 0] Group 10 FTA: 0.1777 +[2025-07-07 22:34:08] [Rank 0] Group 11 FTA: 0.1738 +[2025-07-07 22:34:08] [Rank 0] Group 11 FTA: 0.1738 +[2025-07-07 22:34:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 22:34:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 22:34:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 22:34:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 22:34:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 22:34:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 22:34:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 22:34:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 22:34:10] [Rank 0] step:9501/10000 train_time:651630ms step_avg:68.59ms +[2025-07-07 22:34:10] [Rank 0] step:9501/10000 train_time:651630ms step_avg:68.59ms +[2025-07-07 22:34:11] [Rank 0] step:9521/10000 train_time:652387ms step_avg:68.52ms +[2025-07-07 22:34:11] [Rank 0] step:9521/10000 train_time:652387ms step_avg:68.52ms +[2025-07-07 22:34:13] [Rank 0] step:9541/10000 train_time:653752ms step_avg:68.52ms +[2025-07-07 22:34:13] [Rank 0] step:9541/10000 train_time:653752ms step_avg:68.52ms +[2025-07-07 22:34:14] [Rank 0] step:9561/10000 train_time:655150ms step_avg:68.52ms +[2025-07-07 22:34:14] [Rank 0] step:9561/10000 train_time:655150ms step_avg:68.52ms +[2025-07-07 22:34:15] [Rank 0] step:9581/10000 train_time:656516ms step_avg:68.52ms +[2025-07-07 22:34:15] [Rank 0] step:9581/10000 train_time:656516ms step_avg:68.52ms +[2025-07-07 22:34:17] [Rank 0] step:9601/10000 train_time:657882ms step_avg:68.52ms +[2025-07-07 22:34:17] [Rank 0] step:9601/10000 train_time:657882ms step_avg:68.52ms +[2025-07-07 22:34:18] [Rank 0] step:9621/10000 train_time:659248ms step_avg:68.52ms +[2025-07-07 22:34:18] [Rank 0] step:9621/10000 train_time:659248ms step_avg:68.52ms +[2025-07-07 22:34:19] [Rank 0] step:9641/10000 train_time:660614ms step_avg:68.52ms +[2025-07-07 22:34:19] [Rank 0] step:9641/10000 train_time:660614ms step_avg:68.52ms +[2025-07-07 22:34:21] [Rank 0] step:9661/10000 train_time:661981ms step_avg:68.52ms +[2025-07-07 22:34:21] [Rank 0] step:9661/10000 train_time:661981ms step_avg:68.52ms +[2025-07-07 22:34:22] [Rank 0] step:9681/10000 train_time:663348ms step_avg:68.52ms +[2025-07-07 22:34:22] [Rank 0] step:9681/10000 train_time:663348ms step_avg:68.52ms +[2025-07-07 22:34:23] [Rank 0] step:9701/10000 train_time:664715ms step_avg:68.52ms +[2025-07-07 22:34:23] [Rank 0] step:9701/10000 train_time:664715ms step_avg:68.52ms +[2025-07-07 22:34:25] [Rank 0] step:9721/10000 train_time:666085ms step_avg:68.52ms +[2025-07-07 22:34:25] [Rank 0] step:9721/10000 train_time:666085ms step_avg:68.52ms +[2025-07-07 22:34:26] [Rank 0] step:9741/10000 train_time:667505ms step_avg:68.53ms +[2025-07-07 22:34:26] [Rank 0] step:9741/10000 train_time:667505ms step_avg:68.53ms +[2025-07-07 22:34:28] [Rank 0] step:9761/10000 train_time:668874ms step_avg:68.53ms +[2025-07-07 22:34:28] [Rank 0] step:9761/10000 train_time:668874ms step_avg:68.53ms +[2025-07-07 22:34:29] [Rank 0] step:9781/10000 train_time:670245ms step_avg:68.53ms +[2025-07-07 22:34:29] [Rank 0] step:9781/10000 train_time:670245ms step_avg:68.53ms +[2025-07-07 22:34:30] [Rank 0] step:9801/10000 train_time:671616ms step_avg:68.53ms +[2025-07-07 22:34:30] [Rank 0] step:9801/10000 train_time:671616ms step_avg:68.53ms +[2025-07-07 22:34:32] [Rank 0] step:9821/10000 train_time:672987ms step_avg:68.53ms +[2025-07-07 22:34:32] [Rank 0] step:9821/10000 train_time:672987ms step_avg:68.53ms +[2025-07-07 22:34:33] [Rank 0] step:9841/10000 train_time:674358ms step_avg:68.53ms +[2025-07-07 22:34:33] [Rank 0] step:9841/10000 train_time:674358ms step_avg:68.53ms +[2025-07-07 22:34:34] [Rank 0] step:9861/10000 train_time:675728ms step_avg:68.53ms +[2025-07-07 22:34:34] [Rank 0] step:9861/10000 train_time:675728ms step_avg:68.53ms +[2025-07-07 22:34:36] [Rank 0] step:9881/10000 train_time:677099ms step_avg:68.53ms +[2025-07-07 22:34:36] [Rank 0] step:9881/10000 train_time:677099ms step_avg:68.53ms +[2025-07-07 22:34:37] [Rank 0] step:9901/10000 train_time:678472ms step_avg:68.53ms +[2025-07-07 22:34:37] [Rank 0] step:9901/10000 train_time:678472ms step_avg:68.53ms +[2025-07-07 22:34:39] [Rank 0] step:9921/10000 train_time:679891ms step_avg:68.53ms +[2025-07-07 22:34:39] [Rank 0] step:9921/10000 train_time:679891ms step_avg:68.53ms +[2025-07-07 22:34:40] [Rank 0] step:9941/10000 train_time:681262ms step_avg:68.53ms +[2025-07-07 22:34:40] [Rank 0] step:9941/10000 train_time:681262ms step_avg:68.53ms +[2025-07-07 22:34:41] [Rank 0] step:9961/10000 train_time:682634ms step_avg:68.53ms +[2025-07-07 22:34:41] [Rank 0] step:9961/10000 train_time:682634ms step_avg:68.53ms +[2025-07-07 22:34:43] [Rank 0] step:9981/10000 train_time:684005ms step_avg:68.53ms +[2025-07-07 22:34:43] [Rank 0] step:9981/10000 train_time:684005ms step_avg:68.53ms +[2025-07-07 22:34:44] [Rank 0] step:10000/10000 train_time:685309ms step_avg:68.53ms +[2025-07-07 22:34:44] [Rank 0] step:10000/10000 train_time:685309ms step_avg:68.53ms +[2025-07-07 22:34:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:34:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 22:34:45] [Rank 0] PRINT: step:10000/10000 train_loss:1.3791 val_loss:1.3971 train_time:686007ms step_avg:68.60ms +[2025-07-07 22:34:45] [Rank 0] PRINT: step:10000/10000 train_loss:1.3791 val_loss:1.3971 train_time:686007ms step_avg:68.60ms +[2025-07-07 22:34:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:34:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 22:34:45] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:34:45] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 22:34:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:34:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 22:40:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:40:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 22:40:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:40:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 22:40:04] [Rank 0] Total Loss: 4.7160 +[2025-07-07 22:40:04] [Rank 0] Total Loss: 4.7160 +[2025-07-07 22:40:04] [Rank 0] Total FTA: 0.1550 +[2025-07-07 22:40:04] [Rank 0] Total FTA: 0.1550 +[2025-07-07 22:40:04] [Rank 0] Group 0 Loss: 5.1034 +[2025-07-07 22:40:04] [Rank 0] Group 0 Loss: 5.1034 +[2025-07-07 22:40:04] [Rank 0] Group 1 Loss: 4.6329 +[2025-07-07 22:40:04] [Rank 0] Group 1 Loss: 4.6329 +[2025-07-07 22:40:04] [Rank 0] Group 2 Loss: 4.6598 +[2025-07-07 22:40:04] [Rank 0] Group 2 Loss: 4.6598 +[2025-07-07 22:40:04] [Rank 0] Group 3 Loss: 4.7051 +[2025-07-07 22:40:04] [Rank 0] Group 3 Loss: 4.7051 +[2025-07-07 22:40:04] [Rank 0] Group 4 Loss: 4.5791 +[2025-07-07 22:40:04] [Rank 0] Group 4 Loss: 4.5791 +[2025-07-07 22:40:04] [Rank 0] Group 5 Loss: 4.5114 +[2025-07-07 22:40:04] [Rank 0] Group 5 Loss: 4.5114 +[2025-07-07 22:40:04] [Rank 0] Group 6 Loss: 4.5853 +[2025-07-07 22:40:04] [Rank 0] Group 6 Loss: 4.5853 +[2025-07-07 22:40:04] [Rank 0] Group 7 Loss: 4.7009 +[2025-07-07 22:40:04] [Rank 0] Group 7 Loss: 4.7009 +[2025-07-07 22:40:04] [Rank 0] Group 8 Loss: 4.6646 +[2025-07-07 22:40:04] [Rank 0] Group 8 Loss: 4.6646 +[2025-07-07 22:40:04] [Rank 0] Group 9 Loss: 4.6937 +[2025-07-07 22:40:04] [Rank 0] Group 9 Loss: 4.6937 +[2025-07-07 22:40:04] [Rank 0] Group 10 Loss: 4.6881 +[2025-07-07 22:40:04] [Rank 0] Group 10 Loss: 4.6881 +[2025-07-07 22:40:04] [Rank 0] Group 11 Loss: 4.7030 +[2025-07-07 22:40:04] [Rank 0] Group 11 Loss: 4.7030 +[2025-07-07 22:40:04] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 22:40:04] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 22:40:04] [Rank 0] Group 1 FTA: 0.1667 +[2025-07-07 22:40:04] [Rank 0] Group 1 FTA: 0.1667 +[2025-07-07 22:40:04] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 22:40:04] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 22:40:04] [Rank 0] Group 3 FTA: 0.0677 +[2025-07-07 22:40:04] [Rank 0] Group 3 FTA: 0.0677 +[2025-07-07 22:40:04] [Rank 0] Group 4 FTA: 0.1016 +[2025-07-07 22:40:04] [Rank 0] Group 4 FTA: 0.1016 +[2025-07-07 22:40:04] [Rank 0] Group 5 FTA: 0.1953 +[2025-07-07 22:40:04] [Rank 0] Group 5 FTA: 0.1953 +[2025-07-07 22:40:04] [Rank 0] Group 6 FTA: 0.2292 +[2025-07-07 22:40:04] [Rank 0] Group 6 FTA: 0.2292 +[2025-07-07 22:40:04] [Rank 0] Group 7 FTA: 0.2057 +[2025-07-07 22:40:04] [Rank 0] Group 7 FTA: 0.2057 +[2025-07-07 22:40:04] [Rank 0] Group 8 FTA: 0.1589 +[2025-07-07 22:40:04] [Rank 0] Group 8 FTA: 0.1589 +[2025-07-07 22:40:04] [Rank 0] Group 9 FTA: 0.1406 +[2025-07-07 22:40:04] [Rank 0] Group 9 FTA: 0.1406 +[2025-07-07 22:40:04] [Rank 0] Group 10 FTA: 0.1816 +[2025-07-07 22:40:04] [Rank 0] Group 10 FTA: 0.1816 +[2025-07-07 22:40:04] [Rank 0] Group 11 FTA: 0.1797 +[2025-07-07 22:40:04] [Rank 0] Group 11 FTA: 0.1797 +[2025-07-07 22:40:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 22:40:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_loss_curves.png +[2025-07-07 22:40:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 22:40:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/per_class_acc_curves.png +[2025-07-07 22:40:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 22:40:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_loss_curve.png +[2025-07-07 22:40:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 22:40:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0002_seed_48/total_acc_curve.png +[2025-07-07 22:40:06] [Rank 0] step:10001/10000 train_time:686019ms step_avg:68.60ms +[2025-07-07 22:40:06] [Rank 0] step:10001/10000 train_time:686019ms step_avg:68.60ms +[2025-07-07 22:40:06] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 22:40:06 2025 --- +[2025-07-07 22:40:06] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 22:40:06 2025 --- +[2025-07-07 22:40:06] [Rank 0] PRINT: Peak memory allocated: 8720 MiB reserved: 10316 MiB +[2025-07-07 22:40:06] [Rank 0] PRINT: Peak memory allocated: 8720 MiB reserved: 10316 MiB diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/config.json b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..4bc1e0881bb3c94f6054a6d61cca7e78d9f86fa0 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "171e7215-276f-4842-abca-ec38bb794efd", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..4ae2fd5aafbfca5a287b0e99fc40d49ed4f10506 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0a94547cb4ef46005eedef0efcb0488327472f8dc6f2eeb02009db41767ff22 +size 348414 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..75b1a0320d9e9bdbd6e9e6b859c23bf1caa77745 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:116a0b3d57c56be8e42eb5a825d0290a68b31fbdac3bf0aec39914d7c9678ab3 +size 443359 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..48bfe3303b01cc83668ada6d3bf0411574a0fe25 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3dd2aab0fcc407642b354701cf645655d4ae91bfd28e8e3e7358550ac6da5d51 +size 90772 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..fe84db309501e76ffd9e6cceadf3f7263625195d --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0bea986fa22cb581125db09f9d5eceac49f5b42aa620da1b101cb3f9250c7902 +size 126769 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/training_log_171e7215-276f-4842-abca-ec38bb794efd.txt b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/training_log_171e7215-276f-4842-abca-ec38bb794efd.txt new file mode 100644 index 0000000000000000000000000000000000000000..3285ea007de94e3e1987a2df7137e5d1f1869b72 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/training_log_171e7215-276f-4842-abca-ec38bb794efd.txt @@ -0,0 +1,5132 @@ +[2025-07-07 10:10:52] [Rank 0] PRINT: --- Script Start: Mon Jul 7 10:10:52 2025 --- +[2025-07-07 10:10:52] [Rank 0] PRINT: --- Script Start: Mon Jul 7 10:10:52 2025 --- +[2025-07-07 10:10:52] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-07 10:10:52] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-07 10:10:52] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 10:10:52] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 10:10:52] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-07 10:10:52] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-07 10:10:52] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42 +[2025-07-07 10:10:52] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42 +[2025-07-07 10:10:52] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 10:10:52] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 10:10:53] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 10:10:53] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 10:10:53] [Rank 0] PRINT: Constructing model... +[2025-07-07 10:10:53] [Rank 0] PRINT: Constructing model... +[2025-07-07 10:10:55] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 10:10:55] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 10:10:55] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 10:10:55] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 10:10:55] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 10:10:55] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 10:10:56] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 10:10:56] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 10:10:56] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 10:10:56] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 10:10:56] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 10:10:56] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 10:10:56] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 10:10:56] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 10:10:56] [Rank 0] PRINT: Model returns: +[2025-07-07 10:10:56] [Rank 0] PRINT: Model returns: +[2025-07-07 10:10:56] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 10:10:56] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 10:10:56] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 10:10:56] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 10:10:56] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-07 10:10:56] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-07 10:10:56] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 10:10:56] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 10:10:56] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 10:10:56] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 10:10:56] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 10:10:56] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 10:10:56] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 10:10:56] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 10:10:56] [Rank 0] PRINT: Starting warmup... +[2025-07-07 10:10:56] [Rank 0] PRINT: Starting warmup... +[2025-07-07 10:12:03] [Rank 0] PRINT: Warmup complete. +[2025-07-07 10:12:03] [Rank 0] PRINT: Warmup complete. +[2025-07-07 10:12:03] [Rank 0] PRINT: Starting training... +[2025-07-07 10:12:03] [Rank 0] PRINT: Starting training... +[2025-07-07 10:12:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 10:12:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 10:12:10] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 10:12:10] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 10:12:12] [Rank 0] step:21/10000 train_time:816ms step_avg:38.84ms +[2025-07-07 10:12:12] [Rank 0] step:21/10000 train_time:816ms step_avg:38.84ms +[2025-07-07 10:12:13] [Rank 0] step:41/10000 train_time:2144ms step_avg:52.30ms +[2025-07-07 10:12:13] [Rank 0] step:41/10000 train_time:2144ms step_avg:52.30ms +[2025-07-07 10:12:14] [Rank 0] step:61/10000 train_time:3473ms step_avg:56.93ms +[2025-07-07 10:12:14] [Rank 0] step:61/10000 train_time:3473ms step_avg:56.93ms +[2025-07-07 10:12:16] [Rank 0] step:81/10000 train_time:4804ms step_avg:59.31ms +[2025-07-07 10:12:16] [Rank 0] step:81/10000 train_time:4804ms step_avg:59.31ms +[2025-07-07 10:12:17] [Rank 0] step:101/10000 train_time:6134ms step_avg:60.73ms +[2025-07-07 10:12:17] [Rank 0] step:101/10000 train_time:6134ms step_avg:60.73ms +[2025-07-07 10:12:18] [Rank 0] step:121/10000 train_time:7462ms step_avg:61.67ms +[2025-07-07 10:12:18] [Rank 0] step:121/10000 train_time:7462ms step_avg:61.67ms +[2025-07-07 10:12:20] [Rank 0] step:141/10000 train_time:8791ms step_avg:62.35ms +[2025-07-07 10:12:20] [Rank 0] step:141/10000 train_time:8791ms step_avg:62.35ms +[2025-07-07 10:12:21] [Rank 0] step:161/10000 train_time:10121ms step_avg:62.86ms +[2025-07-07 10:12:21] [Rank 0] step:161/10000 train_time:10121ms step_avg:62.86ms +[2025-07-07 10:12:22] [Rank 0] step:181/10000 train_time:11503ms step_avg:63.55ms +[2025-07-07 10:12:22] [Rank 0] step:181/10000 train_time:11503ms step_avg:63.55ms +[2025-07-07 10:12:24] [Rank 0] step:201/10000 train_time:12850ms step_avg:63.93ms +[2025-07-07 10:12:24] [Rank 0] step:201/10000 train_time:12850ms step_avg:63.93ms +[2025-07-07 10:12:25] [Rank 0] step:221/10000 train_time:14183ms step_avg:64.18ms +[2025-07-07 10:12:25] [Rank 0] step:221/10000 train_time:14183ms step_avg:64.18ms +[2025-07-07 10:12:26] [Rank 0] step:241/10000 train_time:15516ms step_avg:64.38ms +[2025-07-07 10:12:26] [Rank 0] step:241/10000 train_time:15516ms step_avg:64.38ms +[2025-07-07 10:12:28] [Rank 0] step:261/10000 train_time:16851ms step_avg:64.56ms +[2025-07-07 10:12:28] [Rank 0] step:261/10000 train_time:16851ms step_avg:64.56ms +[2025-07-07 10:12:29] [Rank 0] step:281/10000 train_time:18188ms step_avg:64.73ms +[2025-07-07 10:12:29] [Rank 0] step:281/10000 train_time:18188ms step_avg:64.73ms +[2025-07-07 10:12:30] [Rank 0] step:301/10000 train_time:19526ms step_avg:64.87ms +[2025-07-07 10:12:30] [Rank 0] step:301/10000 train_time:19526ms step_avg:64.87ms +[2025-07-07 10:12:32] [Rank 0] step:321/10000 train_time:20865ms step_avg:65.00ms +[2025-07-07 10:12:32] [Rank 0] step:321/10000 train_time:20865ms step_avg:65.00ms +[2025-07-07 10:12:33] [Rank 0] step:341/10000 train_time:22205ms step_avg:65.12ms +[2025-07-07 10:12:33] [Rank 0] step:341/10000 train_time:22205ms step_avg:65.12ms +[2025-07-07 10:12:35] [Rank 0] step:361/10000 train_time:23800ms step_avg:65.93ms +[2025-07-07 10:12:35] [Rank 0] step:361/10000 train_time:23800ms step_avg:65.93ms +[2025-07-07 10:12:36] [Rank 0] step:381/10000 train_time:24950ms step_avg:65.49ms +[2025-07-07 10:12:36] [Rank 0] step:381/10000 train_time:24950ms step_avg:65.49ms +[2025-07-07 10:12:37] [Rank 0] step:401/10000 train_time:26292ms step_avg:65.57ms +[2025-07-07 10:12:37] [Rank 0] step:401/10000 train_time:26292ms step_avg:65.57ms +[2025-07-07 10:12:39] [Rank 0] step:421/10000 train_time:27635ms step_avg:65.64ms +[2025-07-07 10:12:39] [Rank 0] step:421/10000 train_time:27635ms step_avg:65.64ms +[2025-07-07 10:12:40] [Rank 0] step:441/10000 train_time:28978ms step_avg:65.71ms +[2025-07-07 10:12:40] [Rank 0] step:441/10000 train_time:28978ms step_avg:65.71ms +[2025-07-07 10:12:41] [Rank 0] step:461/10000 train_time:30322ms step_avg:65.77ms +[2025-07-07 10:12:41] [Rank 0] step:461/10000 train_time:30322ms step_avg:65.77ms +[2025-07-07 10:12:43] [Rank 0] step:481/10000 train_time:31665ms step_avg:65.83ms +[2025-07-07 10:12:43] [Rank 0] step:481/10000 train_time:31665ms step_avg:65.83ms +[2025-07-07 10:12:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 10:12:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 10:12:45] [Rank 0] PRINT: step:500/10000 train_loss:7.0342 val_loss:4.8844 train_time:33621ms step_avg:67.24ms +[2025-07-07 10:12:45] [Rank 0] PRINT: step:500/10000 train_loss:7.0342 val_loss:4.8844 train_time:33621ms step_avg:67.24ms +[2025-07-07 10:12:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 10:12:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 10:12:45] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 10:12:45] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 10:12:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 10:12:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 10:18:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 10:18:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 10:18:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 10:18:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 10:18:03] [Rank 0] Total Loss: 5.9254 +[2025-07-07 10:18:03] [Rank 0] Total Loss: 5.9254 +[2025-07-07 10:18:03] [Rank 0] Total FTA: 0.0595 +[2025-07-07 10:18:03] [Rank 0] Total FTA: 0.0595 +[2025-07-07 10:18:03] [Rank 0] Group 0 Loss: 5.8043 +[2025-07-07 10:18:03] [Rank 0] Group 0 Loss: 5.8043 +[2025-07-07 10:18:03] [Rank 0] Group 1 Loss: 5.9647 +[2025-07-07 10:18:03] [Rank 0] Group 1 Loss: 5.9647 +[2025-07-07 10:18:03] [Rank 0] Group 2 Loss: 6.0680 +[2025-07-07 10:18:03] [Rank 0] Group 2 Loss: 6.0680 +[2025-07-07 10:18:03] [Rank 0] Group 3 Loss: 5.8850 +[2025-07-07 10:18:03] [Rank 0] Group 3 Loss: 5.8850 +[2025-07-07 10:18:03] [Rank 0] Group 4 Loss: 5.9681 +[2025-07-07 10:18:03] [Rank 0] Group 4 Loss: 5.9681 +[2025-07-07 10:18:03] [Rank 0] Group 5 Loss: 5.9238 +[2025-07-07 10:18:03] [Rank 0] Group 5 Loss: 5.9238 +[2025-07-07 10:18:03] [Rank 0] Group 6 Loss: 5.9341 +[2025-07-07 10:18:03] [Rank 0] Group 6 Loss: 5.9341 +[2025-07-07 10:18:03] [Rank 0] Group 7 Loss: 5.9456 +[2025-07-07 10:18:03] [Rank 0] Group 7 Loss: 5.9456 +[2025-07-07 10:18:03] [Rank 0] Group 8 Loss: 5.8947 +[2025-07-07 10:18:03] [Rank 0] Group 8 Loss: 5.8947 +[2025-07-07 10:18:03] [Rank 0] Group 9 Loss: 5.9054 +[2025-07-07 10:18:03] [Rank 0] Group 9 Loss: 5.9054 +[2025-07-07 10:18:03] [Rank 0] Group 10 Loss: 5.9218 +[2025-07-07 10:18:03] [Rank 0] Group 10 Loss: 5.9218 +[2025-07-07 10:18:03] [Rank 0] Group 11 Loss: 5.9553 +[2025-07-07 10:18:03] [Rank 0] Group 11 Loss: 5.9553 +[2025-07-07 10:18:03] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-07 10:18:03] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-07 10:18:03] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 10:18:03] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 10:18:03] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 10:18:03] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 10:18:03] [Rank 0] Group 3 FTA: 0.0547 +[2025-07-07 10:18:03] [Rank 0] Group 3 FTA: 0.0547 +[2025-07-07 10:18:03] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-07 10:18:03] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-07 10:18:03] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-07 10:18:03] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-07 10:18:03] [Rank 0] Group 6 FTA: 0.0625 +[2025-07-07 10:18:03] [Rank 0] Group 6 FTA: 0.0625 +[2025-07-07 10:18:03] [Rank 0] Group 7 FTA: 0.0599 +[2025-07-07 10:18:03] [Rank 0] Group 7 FTA: 0.0599 +[2025-07-07 10:18:03] [Rank 0] Group 8 FTA: 0.0755 +[2025-07-07 10:18:03] [Rank 0] Group 8 FTA: 0.0755 +[2025-07-07 10:18:03] [Rank 0] Group 9 FTA: 0.0508 +[2025-07-07 10:18:03] [Rank 0] Group 9 FTA: 0.0508 +[2025-07-07 10:18:03] [Rank 0] Group 10 FTA: 0.0605 +[2025-07-07 10:18:03] [Rank 0] Group 10 FTA: 0.0605 +[2025-07-07 10:18:03] [Rank 0] Group 11 FTA: 0.0605 +[2025-07-07 10:18:03] [Rank 0] Group 11 FTA: 0.0605 +[2025-07-07 10:18:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 10:18:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 10:18:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 10:18:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 10:18:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 10:18:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 10:18:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 10:18:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 10:18:04] [Rank 0] step:501/10000 train_time:33630ms step_avg:67.13ms +[2025-07-07 10:18:04] [Rank 0] step:501/10000 train_time:33630ms step_avg:67.13ms +[2025-07-07 10:18:06] [Rank 0] step:521/10000 train_time:34386ms step_avg:66.00ms +[2025-07-07 10:18:06] [Rank 0] step:521/10000 train_time:34386ms step_avg:66.00ms +[2025-07-07 10:18:07] [Rank 0] step:541/10000 train_time:35976ms step_avg:66.50ms +[2025-07-07 10:18:07] [Rank 0] step:541/10000 train_time:35976ms step_avg:66.50ms +[2025-07-07 10:18:08] [Rank 0] step:561/10000 train_time:37114ms step_avg:66.16ms +[2025-07-07 10:18:08] [Rank 0] step:561/10000 train_time:37114ms step_avg:66.16ms +[2025-07-07 10:18:10] [Rank 0] step:581/10000 train_time:38452ms step_avg:66.18ms +[2025-07-07 10:18:10] [Rank 0] step:581/10000 train_time:38452ms step_avg:66.18ms +[2025-07-07 10:18:11] [Rank 0] step:601/10000 train_time:39793ms step_avg:66.21ms +[2025-07-07 10:18:11] [Rank 0] step:601/10000 train_time:39793ms step_avg:66.21ms +[2025-07-07 10:18:12] [Rank 0] step:621/10000 train_time:41134ms step_avg:66.24ms +[2025-07-07 10:18:12] [Rank 0] step:621/10000 train_time:41134ms step_avg:66.24ms +[2025-07-07 10:18:14] [Rank 0] step:641/10000 train_time:42474ms step_avg:66.26ms +[2025-07-07 10:18:14] [Rank 0] step:641/10000 train_time:42474ms step_avg:66.26ms +[2025-07-07 10:18:15] [Rank 0] step:661/10000 train_time:43816ms step_avg:66.29ms +[2025-07-07 10:18:15] [Rank 0] step:661/10000 train_time:43816ms step_avg:66.29ms +[2025-07-07 10:18:16] [Rank 0] step:681/10000 train_time:45207ms step_avg:66.38ms +[2025-07-07 10:18:16] [Rank 0] step:681/10000 train_time:45207ms step_avg:66.38ms +[2025-07-07 10:18:18] [Rank 0] step:701/10000 train_time:46549ms step_avg:66.40ms +[2025-07-07 10:18:18] [Rank 0] step:701/10000 train_time:46549ms step_avg:66.40ms +[2025-07-07 10:18:19] [Rank 0] step:721/10000 train_time:47891ms step_avg:66.42ms +[2025-07-07 10:18:19] [Rank 0] step:721/10000 train_time:47891ms step_avg:66.42ms +[2025-07-07 10:18:20] [Rank 0] step:741/10000 train_time:49277ms step_avg:66.50ms +[2025-07-07 10:18:20] [Rank 0] step:741/10000 train_time:49277ms step_avg:66.50ms +[2025-07-07 10:18:22] [Rank 0] step:761/10000 train_time:50624ms step_avg:66.52ms +[2025-07-07 10:18:22] [Rank 0] step:761/10000 train_time:50624ms step_avg:66.52ms +[2025-07-07 10:18:23] [Rank 0] step:781/10000 train_time:51977ms step_avg:66.55ms +[2025-07-07 10:18:23] [Rank 0] step:781/10000 train_time:51977ms step_avg:66.55ms +[2025-07-07 10:18:25] [Rank 0] step:801/10000 train_time:53331ms step_avg:66.58ms +[2025-07-07 10:18:25] [Rank 0] step:801/10000 train_time:53331ms step_avg:66.58ms +[2025-07-07 10:18:26] [Rank 0] step:821/10000 train_time:54683ms step_avg:66.61ms +[2025-07-07 10:18:26] [Rank 0] step:821/10000 train_time:54683ms step_avg:66.61ms +[2025-07-07 10:18:27] [Rank 0] step:841/10000 train_time:56034ms step_avg:66.63ms +[2025-07-07 10:18:27] [Rank 0] step:841/10000 train_time:56034ms step_avg:66.63ms +[2025-07-07 10:18:29] [Rank 0] step:861/10000 train_time:57387ms step_avg:66.65ms +[2025-07-07 10:18:29] [Rank 0] step:861/10000 train_time:57387ms step_avg:66.65ms +[2025-07-07 10:18:30] [Rank 0] step:881/10000 train_time:58740ms step_avg:66.67ms +[2025-07-07 10:18:30] [Rank 0] step:881/10000 train_time:58740ms step_avg:66.67ms +[2025-07-07 10:18:31] [Rank 0] step:901/10000 train_time:60344ms step_avg:66.98ms +[2025-07-07 10:18:31] [Rank 0] step:901/10000 train_time:60344ms step_avg:66.98ms +[2025-07-07 10:18:33] [Rank 0] step:921/10000 train_time:61506ms step_avg:66.78ms +[2025-07-07 10:18:33] [Rank 0] step:921/10000 train_time:61506ms step_avg:66.78ms +[2025-07-07 10:18:34] [Rank 0] step:941/10000 train_time:62858ms step_avg:66.80ms +[2025-07-07 10:18:34] [Rank 0] step:941/10000 train_time:62858ms step_avg:66.80ms +[2025-07-07 10:18:35] [Rank 0] step:961/10000 train_time:64211ms step_avg:66.82ms +[2025-07-07 10:18:35] [Rank 0] step:961/10000 train_time:64211ms step_avg:66.82ms +[2025-07-07 10:18:37] [Rank 0] step:981/10000 train_time:65566ms step_avg:66.84ms +[2025-07-07 10:18:37] [Rank 0] step:981/10000 train_time:65566ms step_avg:66.84ms +[2025-07-07 10:18:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 10:18:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 10:18:39] [Rank 0] PRINT: step:1000/10000 train_loss:3.7144 val_loss:2.7505 train_time:67533ms step_avg:67.53ms +[2025-07-07 10:18:39] [Rank 0] PRINT: step:1000/10000 train_loss:3.7144 val_loss:2.7505 train_time:67533ms step_avg:67.53ms +[2025-07-07 10:18:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 10:18:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 10:18:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 10:18:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 10:18:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 10:18:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 10:23:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 10:23:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 10:23:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 10:23:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 10:23:55] [Rank 0] Total Loss: 4.7338 +[2025-07-07 10:23:55] [Rank 0] Total Loss: 4.7338 +[2025-07-07 10:23:55] [Rank 0] Total FTA: 0.0797 +[2025-07-07 10:23:55] [Rank 0] Total FTA: 0.0797 +[2025-07-07 10:23:55] [Rank 0] Group 0 Loss: 4.6947 +[2025-07-07 10:23:55] [Rank 0] Group 0 Loss: 4.6947 +[2025-07-07 10:23:55] [Rank 0] Group 1 Loss: 4.8513 +[2025-07-07 10:23:55] [Rank 0] Group 1 Loss: 4.8513 +[2025-07-07 10:23:56] [Rank 0] Group 2 Loss: 4.7918 +[2025-07-07 10:23:56] [Rank 0] Group 2 Loss: 4.7918 +[2025-07-07 10:23:56] [Rank 0] Group 3 Loss: 4.6452 +[2025-07-07 10:23:56] [Rank 0] Group 3 Loss: 4.6452 +[2025-07-07 10:23:56] [Rank 0] Group 4 Loss: 4.7847 +[2025-07-07 10:23:56] [Rank 0] Group 4 Loss: 4.7847 +[2025-07-07 10:23:56] [Rank 0] Group 5 Loss: 4.7168 +[2025-07-07 10:23:56] [Rank 0] Group 5 Loss: 4.7168 +[2025-07-07 10:23:56] [Rank 0] Group 6 Loss: 4.7488 +[2025-07-07 10:23:56] [Rank 0] Group 6 Loss: 4.7488 +[2025-07-07 10:23:56] [Rank 0] Group 7 Loss: 4.7036 +[2025-07-07 10:23:56] [Rank 0] Group 7 Loss: 4.7036 +[2025-07-07 10:23:56] [Rank 0] Group 8 Loss: 4.7204 +[2025-07-07 10:23:56] [Rank 0] Group 8 Loss: 4.7204 +[2025-07-07 10:23:56] [Rank 0] Group 9 Loss: 4.7011 +[2025-07-07 10:23:56] [Rank 0] Group 9 Loss: 4.7011 +[2025-07-07 10:23:56] [Rank 0] Group 10 Loss: 4.7156 +[2025-07-07 10:23:56] [Rank 0] Group 10 Loss: 4.7156 +[2025-07-07 10:23:56] [Rank 0] Group 11 Loss: 4.7461 +[2025-07-07 10:23:56] [Rank 0] Group 11 Loss: 4.7461 +[2025-07-07 10:23:56] [Rank 0] Group 0 FTA: 0.1547 +[2025-07-07 10:23:56] [Rank 0] Group 0 FTA: 0.1547 +[2025-07-07 10:23:56] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 10:23:56] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 10:23:56] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 10:23:56] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 10:23:56] [Rank 0] Group 3 FTA: 0.0729 +[2025-07-07 10:23:56] [Rank 0] Group 3 FTA: 0.0729 +[2025-07-07 10:23:56] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 10:23:56] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 10:23:56] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-07 10:23:56] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-07 10:23:56] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-07 10:23:56] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-07 10:23:56] [Rank 0] Group 7 FTA: 0.0703 +[2025-07-07 10:23:56] [Rank 0] Group 7 FTA: 0.0703 +[2025-07-07 10:23:56] [Rank 0] Group 8 FTA: 0.0859 +[2025-07-07 10:23:56] [Rank 0] Group 8 FTA: 0.0859 +[2025-07-07 10:23:56] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 10:23:56] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 10:23:56] [Rank 0] Group 10 FTA: 0.0781 +[2025-07-07 10:23:56] [Rank 0] Group 10 FTA: 0.0781 +[2025-07-07 10:23:56] [Rank 0] Group 11 FTA: 0.0869 +[2025-07-07 10:23:56] [Rank 0] Group 11 FTA: 0.0869 +[2025-07-07 10:23:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 10:23:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 10:23:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 10:23:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 10:23:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 10:23:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 10:23:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 10:23:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 10:23:57] [Rank 0] step:1001/10000 train_time:67542ms step_avg:67.47ms +[2025-07-07 10:23:57] [Rank 0] step:1001/10000 train_time:67542ms step_avg:67.47ms +[2025-07-07 10:23:58] [Rank 0] step:1021/10000 train_time:68292ms step_avg:66.89ms +[2025-07-07 10:23:58] [Rank 0] step:1021/10000 train_time:68292ms step_avg:66.89ms +[2025-07-07 10:24:00] [Rank 0] step:1041/10000 train_time:69636ms step_avg:66.89ms +[2025-07-07 10:24:00] [Rank 0] step:1041/10000 train_time:69636ms step_avg:66.89ms +[2025-07-07 10:24:01] [Rank 0] step:1061/10000 train_time:70981ms step_avg:66.90ms +[2025-07-07 10:24:01] [Rank 0] step:1061/10000 train_time:70981ms step_avg:66.90ms +[2025-07-07 10:24:03] [Rank 0] step:1081/10000 train_time:72378ms step_avg:66.95ms +[2025-07-07 10:24:03] [Rank 0] step:1081/10000 train_time:72378ms step_avg:66.95ms +[2025-07-07 10:24:04] [Rank 0] step:1101/10000 train_time:73733ms step_avg:66.97ms +[2025-07-07 10:24:04] [Rank 0] step:1101/10000 train_time:73733ms step_avg:66.97ms +[2025-07-07 10:24:05] [Rank 0] step:1121/10000 train_time:75079ms step_avg:66.98ms +[2025-07-07 10:24:05] [Rank 0] step:1121/10000 train_time:75079ms step_avg:66.98ms +[2025-07-07 10:24:07] [Rank 0] step:1141/10000 train_time:76427ms step_avg:66.98ms +[2025-07-07 10:24:07] [Rank 0] step:1141/10000 train_time:76427ms step_avg:66.98ms +[2025-07-07 10:24:08] [Rank 0] step:1161/10000 train_time:77774ms step_avg:66.99ms +[2025-07-07 10:24:08] [Rank 0] step:1161/10000 train_time:77774ms step_avg:66.99ms +[2025-07-07 10:24:09] [Rank 0] step:1181/10000 train_time:79123ms step_avg:67.00ms +[2025-07-07 10:24:09] [Rank 0] step:1181/10000 train_time:79123ms step_avg:67.00ms +[2025-07-07 10:24:11] [Rank 0] step:1201/10000 train_time:80470ms step_avg:67.00ms +[2025-07-07 10:24:11] [Rank 0] step:1201/10000 train_time:80470ms step_avg:67.00ms +[2025-07-07 10:24:12] [Rank 0] step:1221/10000 train_time:81819ms step_avg:67.01ms +[2025-07-07 10:24:12] [Rank 0] step:1221/10000 train_time:81819ms step_avg:67.01ms +[2025-07-07 10:24:13] [Rank 0] step:1241/10000 train_time:83168ms step_avg:67.02ms +[2025-07-07 10:24:13] [Rank 0] step:1241/10000 train_time:83168ms step_avg:67.02ms +[2025-07-07 10:24:15] [Rank 0] step:1261/10000 train_time:84518ms step_avg:67.02ms +[2025-07-07 10:24:15] [Rank 0] step:1261/10000 train_time:84518ms step_avg:67.02ms +[2025-07-07 10:24:16] [Rank 0] step:1281/10000 train_time:85927ms step_avg:67.08ms +[2025-07-07 10:24:16] [Rank 0] step:1281/10000 train_time:85927ms step_avg:67.08ms +[2025-07-07 10:24:17] [Rank 0] step:1301/10000 train_time:87277ms step_avg:67.08ms +[2025-07-07 10:24:17] [Rank 0] step:1301/10000 train_time:87277ms step_avg:67.08ms +[2025-07-07 10:24:19] [Rank 0] step:1321/10000 train_time:88629ms step_avg:67.09ms +[2025-07-07 10:24:19] [Rank 0] step:1321/10000 train_time:88629ms step_avg:67.09ms +[2025-07-07 10:24:20] [Rank 0] step:1341/10000 train_time:89980ms step_avg:67.10ms +[2025-07-07 10:24:20] [Rank 0] step:1341/10000 train_time:89980ms step_avg:67.10ms +[2025-07-07 10:24:21] [Rank 0] step:1361/10000 train_time:91330ms step_avg:67.11ms +[2025-07-07 10:24:21] [Rank 0] step:1361/10000 train_time:91330ms step_avg:67.11ms +[2025-07-07 10:24:23] [Rank 0] step:1381/10000 train_time:92682ms step_avg:67.11ms +[2025-07-07 10:24:23] [Rank 0] step:1381/10000 train_time:92682ms step_avg:67.11ms +[2025-07-07 10:24:24] [Rank 0] step:1401/10000 train_time:94034ms step_avg:67.12ms +[2025-07-07 10:24:24] [Rank 0] step:1401/10000 train_time:94034ms step_avg:67.12ms +[2025-07-07 10:24:26] [Rank 0] step:1421/10000 train_time:95386ms step_avg:67.13ms +[2025-07-07 10:24:26] [Rank 0] step:1421/10000 train_time:95386ms step_avg:67.13ms +[2025-07-07 10:24:27] [Rank 0] step:1441/10000 train_time:96738ms step_avg:67.13ms +[2025-07-07 10:24:27] [Rank 0] step:1441/10000 train_time:96738ms step_avg:67.13ms +[2025-07-07 10:24:28] [Rank 0] step:1461/10000 train_time:98150ms step_avg:67.18ms +[2025-07-07 10:24:28] [Rank 0] step:1461/10000 train_time:98150ms step_avg:67.18ms +[2025-07-07 10:24:30] [Rank 0] step:1481/10000 train_time:99603ms step_avg:67.25ms +[2025-07-07 10:24:30] [Rank 0] step:1481/10000 train_time:99603ms step_avg:67.25ms +[2025-07-07 10:24:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 10:24:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 10:24:32] [Rank 0] PRINT: step:1500/10000 train_loss:2.2515 val_loss:1.9323 train_time:101570ms step_avg:67.71ms +[2025-07-07 10:24:32] [Rank 0] PRINT: step:1500/10000 train_loss:2.2515 val_loss:1.9323 train_time:101570ms step_avg:67.71ms +[2025-07-07 10:24:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 10:24:32] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 10:24:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 10:24:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 10:24:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 10:24:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 10:29:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 10:29:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 10:29:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 10:29:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 10:29:49] [Rank 0] Total Loss: 4.6602 +[2025-07-07 10:29:49] [Rank 0] Total Loss: 4.6602 +[2025-07-07 10:29:49] [Rank 0] Total FTA: 0.0865 +[2025-07-07 10:29:49] [Rank 0] Total FTA: 0.0865 +[2025-07-07 10:29:49] [Rank 0] Group 0 Loss: 4.8757 +[2025-07-07 10:29:49] [Rank 0] Group 0 Loss: 4.8757 +[2025-07-07 10:29:49] [Rank 0] Group 1 Loss: 4.6665 +[2025-07-07 10:29:49] [Rank 0] Group 1 Loss: 4.6665 +[2025-07-07 10:29:49] [Rank 0] Group 2 Loss: 4.4543 +[2025-07-07 10:29:49] [Rank 0] Group 2 Loss: 4.4543 +[2025-07-07 10:29:50] [Rank 0] Group 3 Loss: 4.7793 +[2025-07-07 10:29:50] [Rank 0] Group 3 Loss: 4.7793 +[2025-07-07 10:29:50] [Rank 0] Group 4 Loss: 4.6757 +[2025-07-07 10:29:50] [Rank 0] Group 4 Loss: 4.6757 +[2025-07-07 10:29:50] [Rank 0] Group 5 Loss: 4.5604 +[2025-07-07 10:29:50] [Rank 0] Group 5 Loss: 4.5604 +[2025-07-07 10:29:50] [Rank 0] Group 6 Loss: 4.5443 +[2025-07-07 10:29:50] [Rank 0] Group 6 Loss: 4.5443 +[2025-07-07 10:29:50] [Rank 0] Group 7 Loss: 4.6480 +[2025-07-07 10:29:50] [Rank 0] Group 7 Loss: 4.6480 +[2025-07-07 10:29:50] [Rank 0] Group 8 Loss: 4.6116 +[2025-07-07 10:29:50] [Rank 0] Group 8 Loss: 4.6116 +[2025-07-07 10:29:50] [Rank 0] Group 9 Loss: 4.6119 +[2025-07-07 10:29:50] [Rank 0] Group 9 Loss: 4.6119 +[2025-07-07 10:29:50] [Rank 0] Group 10 Loss: 4.6494 +[2025-07-07 10:29:50] [Rank 0] Group 10 Loss: 4.6494 +[2025-07-07 10:29:50] [Rank 0] Group 11 Loss: 4.6437 +[2025-07-07 10:29:50] [Rank 0] Group 11 Loss: 4.6437 +[2025-07-07 10:29:50] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 10:29:50] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 10:29:50] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 10:29:50] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 10:29:50] [Rank 0] Group 2 FTA: 0.0885 +[2025-07-07 10:29:50] [Rank 0] Group 2 FTA: 0.0885 +[2025-07-07 10:29:50] [Rank 0] Group 3 FTA: 0.0521 +[2025-07-07 10:29:50] [Rank 0] Group 3 FTA: 0.0521 +[2025-07-07 10:29:50] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 10:29:50] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 10:29:50] [Rank 0] Group 5 FTA: 0.0573 +[2025-07-07 10:29:50] [Rank 0] Group 5 FTA: 0.0573 +[2025-07-07 10:29:50] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-07 10:29:50] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-07 10:29:50] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-07 10:29:50] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-07 10:29:50] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-07 10:29:50] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-07 10:29:50] [Rank 0] Group 9 FTA: 0.0586 +[2025-07-07 10:29:50] [Rank 0] Group 9 FTA: 0.0586 +[2025-07-07 10:29:50] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 10:29:50] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 10:29:50] [Rank 0] Group 11 FTA: 0.0967 +[2025-07-07 10:29:50] [Rank 0] Group 11 FTA: 0.0967 +[2025-07-07 10:29:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 10:29:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 10:29:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 10:29:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 10:29:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 10:29:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 10:29:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 10:29:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 10:29:51] [Rank 0] step:1501/10000 train_time:101579ms step_avg:67.67ms +[2025-07-07 10:29:51] [Rank 0] step:1501/10000 train_time:101579ms step_avg:67.67ms +[2025-07-07 10:29:53] [Rank 0] step:1521/10000 train_time:102345ms step_avg:67.29ms +[2025-07-07 10:29:53] [Rank 0] step:1521/10000 train_time:102345ms step_avg:67.29ms +[2025-07-07 10:29:54] [Rank 0] step:1541/10000 train_time:103689ms step_avg:67.29ms +[2025-07-07 10:29:54] [Rank 0] step:1541/10000 train_time:103689ms step_avg:67.29ms +[2025-07-07 10:29:55] [Rank 0] step:1561/10000 train_time:105035ms step_avg:67.29ms +[2025-07-07 10:29:55] [Rank 0] step:1561/10000 train_time:105035ms step_avg:67.29ms +[2025-07-07 10:29:57] [Rank 0] step:1581/10000 train_time:106379ms step_avg:67.29ms +[2025-07-07 10:29:57] [Rank 0] step:1581/10000 train_time:106379ms step_avg:67.29ms +[2025-07-07 10:29:58] [Rank 0] step:1601/10000 train_time:107725ms step_avg:67.29ms +[2025-07-07 10:29:58] [Rank 0] step:1601/10000 train_time:107725ms step_avg:67.29ms +[2025-07-07 10:29:59] [Rank 0] step:1621/10000 train_time:109070ms step_avg:67.29ms +[2025-07-07 10:29:59] [Rank 0] step:1621/10000 train_time:109070ms step_avg:67.29ms +[2025-07-07 10:30:01] [Rank 0] step:1641/10000 train_time:110476ms step_avg:67.32ms +[2025-07-07 10:30:01] [Rank 0] step:1641/10000 train_time:110476ms step_avg:67.32ms +[2025-07-07 10:30:02] [Rank 0] step:1661/10000 train_time:111824ms step_avg:67.32ms +[2025-07-07 10:30:02] [Rank 0] step:1661/10000 train_time:111824ms step_avg:67.32ms +[2025-07-07 10:30:03] [Rank 0] step:1681/10000 train_time:113176ms step_avg:67.33ms +[2025-07-07 10:30:03] [Rank 0] step:1681/10000 train_time:113176ms step_avg:67.33ms +[2025-07-07 10:30:05] [Rank 0] step:1701/10000 train_time:114523ms step_avg:67.33ms +[2025-07-07 10:30:05] [Rank 0] step:1701/10000 train_time:114523ms step_avg:67.33ms +[2025-07-07 10:30:06] [Rank 0] step:1721/10000 train_time:115872ms step_avg:67.33ms +[2025-07-07 10:30:06] [Rank 0] step:1721/10000 train_time:115872ms step_avg:67.33ms +[2025-07-07 10:30:07] [Rank 0] step:1741/10000 train_time:117222ms step_avg:67.33ms +[2025-07-07 10:30:07] [Rank 0] step:1741/10000 train_time:117222ms step_avg:67.33ms +[2025-07-07 10:30:09] [Rank 0] step:1761/10000 train_time:118572ms step_avg:67.33ms +[2025-07-07 10:30:09] [Rank 0] step:1761/10000 train_time:118572ms step_avg:67.33ms +[2025-07-07 10:30:10] [Rank 0] step:1781/10000 train_time:119922ms step_avg:67.33ms +[2025-07-07 10:30:10] [Rank 0] step:1781/10000 train_time:119922ms step_avg:67.33ms +[2025-07-07 10:30:12] [Rank 0] step:1801/10000 train_time:121274ms step_avg:67.34ms +[2025-07-07 10:30:12] [Rank 0] step:1801/10000 train_time:121274ms step_avg:67.34ms +[2025-07-07 10:30:13] [Rank 0] step:1821/10000 train_time:122769ms step_avg:67.42ms +[2025-07-07 10:30:13] [Rank 0] step:1821/10000 train_time:122769ms step_avg:67.42ms +[2025-07-07 10:30:14] [Rank 0] step:1841/10000 train_time:124120ms step_avg:67.42ms +[2025-07-07 10:30:14] [Rank 0] step:1841/10000 train_time:124120ms step_avg:67.42ms +[2025-07-07 10:30:16] [Rank 0] step:1861/10000 train_time:125472ms step_avg:67.42ms +[2025-07-07 10:30:16] [Rank 0] step:1861/10000 train_time:125472ms step_avg:67.42ms +[2025-07-07 10:30:17] [Rank 0] step:1881/10000 train_time:126824ms step_avg:67.42ms +[2025-07-07 10:30:17] [Rank 0] step:1881/10000 train_time:126824ms step_avg:67.42ms +[2025-07-07 10:30:18] [Rank 0] step:1901/10000 train_time:128177ms step_avg:67.43ms +[2025-07-07 10:30:18] [Rank 0] step:1901/10000 train_time:128177ms step_avg:67.43ms +[2025-07-07 10:30:20] [Rank 0] step:1921/10000 train_time:129528ms step_avg:67.43ms +[2025-07-07 10:30:20] [Rank 0] step:1921/10000 train_time:129528ms step_avg:67.43ms +[2025-07-07 10:30:21] [Rank 0] step:1941/10000 train_time:130880ms step_avg:67.43ms +[2025-07-07 10:30:21] [Rank 0] step:1941/10000 train_time:130880ms step_avg:67.43ms +[2025-07-07 10:30:22] [Rank 0] step:1961/10000 train_time:132232ms step_avg:67.43ms +[2025-07-07 10:30:22] [Rank 0] step:1961/10000 train_time:132232ms step_avg:67.43ms +[2025-07-07 10:30:24] [Rank 0] step:1981/10000 train_time:133584ms step_avg:67.43ms +[2025-07-07 10:30:24] [Rank 0] step:1981/10000 train_time:133584ms step_avg:67.43ms +[2025-07-07 10:30:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 10:30:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 10:30:26] [Rank 0] PRINT: step:2000/10000 train_loss:1.7721 val_loss:1.6716 train_time:135605ms step_avg:67.80ms +[2025-07-07 10:30:26] [Rank 0] PRINT: step:2000/10000 train_loss:1.7721 val_loss:1.6716 train_time:135605ms step_avg:67.80ms +[2025-07-07 10:30:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 10:30:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 10:30:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 10:30:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 10:30:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 10:30:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 10:35:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 10:35:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 10:35:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 10:35:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 10:35:45] [Rank 0] Total Loss: 4.8097 +[2025-07-07 10:35:45] [Rank 0] Total Loss: 4.8097 +[2025-07-07 10:35:45] [Rank 0] Total FTA: 0.0852 +[2025-07-07 10:35:45] [Rank 0] Total FTA: 0.0852 +[2025-07-07 10:35:45] [Rank 0] Group 0 Loss: 5.1028 +[2025-07-07 10:35:45] [Rank 0] Group 0 Loss: 5.1028 +[2025-07-07 10:35:45] [Rank 0] Group 1 Loss: 4.8770 +[2025-07-07 10:35:45] [Rank 0] Group 1 Loss: 4.8770 +[2025-07-07 10:35:45] [Rank 0] Group 2 Loss: 4.5316 +[2025-07-07 10:35:45] [Rank 0] Group 2 Loss: 4.5316 +[2025-07-07 10:35:45] [Rank 0] Group 3 Loss: 4.8577 +[2025-07-07 10:35:45] [Rank 0] Group 3 Loss: 4.8577 +[2025-07-07 10:35:45] [Rank 0] Group 4 Loss: 4.7360 +[2025-07-07 10:35:45] [Rank 0] Group 4 Loss: 4.7360 +[2025-07-07 10:35:45] [Rank 0] Group 5 Loss: 4.7160 +[2025-07-07 10:35:45] [Rank 0] Group 5 Loss: 4.7160 +[2025-07-07 10:35:45] [Rank 0] Group 6 Loss: 4.7378 +[2025-07-07 10:35:45] [Rank 0] Group 6 Loss: 4.7378 +[2025-07-07 10:35:45] [Rank 0] Group 7 Loss: 4.8093 +[2025-07-07 10:35:45] [Rank 0] Group 7 Loss: 4.8093 +[2025-07-07 10:35:45] [Rank 0] Group 8 Loss: 4.7670 +[2025-07-07 10:35:45] [Rank 0] Group 8 Loss: 4.7670 +[2025-07-07 10:35:45] [Rank 0] Group 9 Loss: 4.7439 +[2025-07-07 10:35:45] [Rank 0] Group 9 Loss: 4.7439 +[2025-07-07 10:35:45] [Rank 0] Group 10 Loss: 4.7872 +[2025-07-07 10:35:45] [Rank 0] Group 10 Loss: 4.7872 +[2025-07-07 10:35:45] [Rank 0] Group 11 Loss: 4.7840 +[2025-07-07 10:35:45] [Rank 0] Group 11 Loss: 4.7840 +[2025-07-07 10:35:45] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-07 10:35:45] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-07 10:35:45] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 10:35:45] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 10:35:45] [Rank 0] Group 2 FTA: 0.0964 +[2025-07-07 10:35:45] [Rank 0] Group 2 FTA: 0.0964 +[2025-07-07 10:35:45] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 10:35:45] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 10:35:45] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 10:35:45] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 10:35:45] [Rank 0] Group 5 FTA: 0.1068 +[2025-07-07 10:35:45] [Rank 0] Group 5 FTA: 0.1068 +[2025-07-07 10:35:45] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-07 10:35:45] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-07 10:35:45] [Rank 0] Group 7 FTA: 0.0755 +[2025-07-07 10:35:45] [Rank 0] Group 7 FTA: 0.0755 +[2025-07-07 10:35:45] [Rank 0] Group 8 FTA: 0.0599 +[2025-07-07 10:35:45] [Rank 0] Group 8 FTA: 0.0599 +[2025-07-07 10:35:45] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-07 10:35:45] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-07 10:35:45] [Rank 0] Group 10 FTA: 0.0977 +[2025-07-07 10:35:45] [Rank 0] Group 10 FTA: 0.0977 +[2025-07-07 10:35:45] [Rank 0] Group 11 FTA: 0.0957 +[2025-07-07 10:35:45] [Rank 0] Group 11 FTA: 0.0957 +[2025-07-07 10:35:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 10:35:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 10:35:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 10:35:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 10:35:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 10:35:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 10:35:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 10:35:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 10:35:47] [Rank 0] step:2001/10000 train_time:135615ms step_avg:67.77ms +[2025-07-07 10:35:47] [Rank 0] step:2001/10000 train_time:135615ms step_avg:67.77ms +[2025-07-07 10:35:48] [Rank 0] step:2021/10000 train_time:136352ms step_avg:67.47ms +[2025-07-07 10:35:48] [Rank 0] step:2021/10000 train_time:136352ms step_avg:67.47ms +[2025-07-07 10:35:49] [Rank 0] step:2041/10000 train_time:137695ms step_avg:67.46ms +[2025-07-07 10:35:49] [Rank 0] step:2041/10000 train_time:137695ms step_avg:67.46ms +[2025-07-07 10:35:51] [Rank 0] step:2061/10000 train_time:139041ms step_avg:67.46ms +[2025-07-07 10:35:51] [Rank 0] step:2061/10000 train_time:139041ms step_avg:67.46ms +[2025-07-07 10:35:52] [Rank 0] step:2081/10000 train_time:140386ms step_avg:67.46ms +[2025-07-07 10:35:52] [Rank 0] step:2081/10000 train_time:140386ms step_avg:67.46ms +[2025-07-07 10:35:53] [Rank 0] step:2101/10000 train_time:141735ms step_avg:67.46ms +[2025-07-07 10:35:53] [Rank 0] step:2101/10000 train_time:141735ms step_avg:67.46ms +[2025-07-07 10:35:55] [Rank 0] step:2121/10000 train_time:143081ms step_avg:67.46ms +[2025-07-07 10:35:55] [Rank 0] step:2121/10000 train_time:143081ms step_avg:67.46ms +[2025-07-07 10:35:56] [Rank 0] step:2141/10000 train_time:144428ms step_avg:67.46ms +[2025-07-07 10:35:56] [Rank 0] step:2141/10000 train_time:144428ms step_avg:67.46ms +[2025-07-07 10:35:57] [Rank 0] step:2161/10000 train_time:146459ms step_avg:67.77ms +[2025-07-07 10:35:57] [Rank 0] step:2161/10000 train_time:146459ms step_avg:67.77ms +[2025-07-07 10:35:59] [Rank 0] step:2181/10000 train_time:147187ms step_avg:67.49ms +[2025-07-07 10:35:59] [Rank 0] step:2181/10000 train_time:147187ms step_avg:67.49ms +[2025-07-07 10:36:00] [Rank 0] step:2201/10000 train_time:148537ms step_avg:67.49ms +[2025-07-07 10:36:00] [Rank 0] step:2201/10000 train_time:148537ms step_avg:67.49ms +[2025-07-07 10:36:02] [Rank 0] step:2221/10000 train_time:149885ms step_avg:67.49ms +[2025-07-07 10:36:02] [Rank 0] step:2221/10000 train_time:149885ms step_avg:67.49ms +[2025-07-07 10:36:03] [Rank 0] step:2241/10000 train_time:151246ms step_avg:67.49ms +[2025-07-07 10:36:03] [Rank 0] step:2241/10000 train_time:151246ms step_avg:67.49ms +[2025-07-07 10:36:04] [Rank 0] step:2261/10000 train_time:152621ms step_avg:67.50ms +[2025-07-07 10:36:04] [Rank 0] step:2261/10000 train_time:152621ms step_avg:67.50ms +[2025-07-07 10:36:06] [Rank 0] step:2281/10000 train_time:153996ms step_avg:67.51ms +[2025-07-07 10:36:06] [Rank 0] step:2281/10000 train_time:153996ms step_avg:67.51ms +[2025-07-07 10:36:07] [Rank 0] step:2301/10000 train_time:155372ms step_avg:67.52ms +[2025-07-07 10:36:07] [Rank 0] step:2301/10000 train_time:155372ms step_avg:67.52ms +[2025-07-07 10:36:08] [Rank 0] step:2321/10000 train_time:156747ms step_avg:67.53ms +[2025-07-07 10:36:08] [Rank 0] step:2321/10000 train_time:156747ms step_avg:67.53ms +[2025-07-07 10:36:10] [Rank 0] step:2341/10000 train_time:158124ms step_avg:67.55ms +[2025-07-07 10:36:10] [Rank 0] step:2341/10000 train_time:158124ms step_avg:67.55ms +[2025-07-07 10:36:11] [Rank 0] step:2361/10000 train_time:159522ms step_avg:67.57ms +[2025-07-07 10:36:11] [Rank 0] step:2361/10000 train_time:159522ms step_avg:67.57ms +[2025-07-07 10:36:13] [Rank 0] step:2381/10000 train_time:160898ms step_avg:67.58ms +[2025-07-07 10:36:13] [Rank 0] step:2381/10000 train_time:160898ms step_avg:67.58ms +[2025-07-07 10:36:14] [Rank 0] step:2401/10000 train_time:162274ms step_avg:67.59ms +[2025-07-07 10:36:14] [Rank 0] step:2401/10000 train_time:162274ms step_avg:67.59ms +[2025-07-07 10:36:15] [Rank 0] step:2421/10000 train_time:163650ms step_avg:67.60ms +[2025-07-07 10:36:15] [Rank 0] step:2421/10000 train_time:163650ms step_avg:67.60ms +[2025-07-07 10:36:17] [Rank 0] step:2441/10000 train_time:165026ms step_avg:67.61ms +[2025-07-07 10:36:17] [Rank 0] step:2441/10000 train_time:165026ms step_avg:67.61ms +[2025-07-07 10:36:18] [Rank 0] step:2461/10000 train_time:166401ms step_avg:67.62ms +[2025-07-07 10:36:18] [Rank 0] step:2461/10000 train_time:166401ms step_avg:67.62ms +[2025-07-07 10:36:19] [Rank 0] step:2481/10000 train_time:167778ms step_avg:67.63ms +[2025-07-07 10:36:19] [Rank 0] step:2481/10000 train_time:167778ms step_avg:67.63ms +[2025-07-07 10:36:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 10:36:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 10:36:22] [Rank 0] PRINT: step:2500/10000 train_loss:1.6101 val_loss:1.5543 train_time:169781ms step_avg:67.91ms +[2025-07-07 10:36:22] [Rank 0] PRINT: step:2500/10000 train_loss:1.6101 val_loss:1.5543 train_time:169781ms step_avg:67.91ms +[2025-07-07 10:36:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 10:36:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 10:36:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 10:36:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 10:36:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 10:36:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 10:41:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 10:41:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 10:41:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 10:41:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 10:41:41] [Rank 0] Total Loss: 4.8157 +[2025-07-07 10:41:41] [Rank 0] Total Loss: 4.8157 +[2025-07-07 10:41:41] [Rank 0] Total FTA: 0.0889 +[2025-07-07 10:41:41] [Rank 0] Total FTA: 0.0889 +[2025-07-07 10:41:41] [Rank 0] Group 0 Loss: 5.0664 +[2025-07-07 10:41:41] [Rank 0] Group 0 Loss: 5.0664 +[2025-07-07 10:41:41] [Rank 0] Group 1 Loss: 4.7750 +[2025-07-07 10:41:41] [Rank 0] Group 1 Loss: 4.7750 +[2025-07-07 10:41:41] [Rank 0] Group 2 Loss: 4.4425 +[2025-07-07 10:41:41] [Rank 0] Group 2 Loss: 4.4425 +[2025-07-07 10:41:41] [Rank 0] Group 3 Loss: 4.7559 +[2025-07-07 10:41:41] [Rank 0] Group 3 Loss: 4.7559 +[2025-07-07 10:41:41] [Rank 0] Group 4 Loss: 4.8265 +[2025-07-07 10:41:41] [Rank 0] Group 4 Loss: 4.8265 +[2025-07-07 10:41:41] [Rank 0] Group 5 Loss: 4.7416 +[2025-07-07 10:41:41] [Rank 0] Group 5 Loss: 4.7416 +[2025-07-07 10:41:41] [Rank 0] Group 6 Loss: 4.7251 +[2025-07-07 10:41:41] [Rank 0] Group 6 Loss: 4.7251 +[2025-07-07 10:41:41] [Rank 0] Group 7 Loss: 4.8055 +[2025-07-07 10:41:41] [Rank 0] Group 7 Loss: 4.8055 +[2025-07-07 10:41:41] [Rank 0] Group 8 Loss: 4.8727 +[2025-07-07 10:41:41] [Rank 0] Group 8 Loss: 4.8727 +[2025-07-07 10:41:41] [Rank 0] Group 9 Loss: 4.8453 +[2025-07-07 10:41:41] [Rank 0] Group 9 Loss: 4.8453 +[2025-07-07 10:41:41] [Rank 0] Group 10 Loss: 4.8305 +[2025-07-07 10:41:41] [Rank 0] Group 10 Loss: 4.8305 +[2025-07-07 10:41:41] [Rank 0] Group 11 Loss: 4.8303 +[2025-07-07 10:41:41] [Rank 0] Group 11 Loss: 4.8303 +[2025-07-07 10:41:41] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-07 10:41:41] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-07 10:41:41] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 10:41:41] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 10:41:41] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-07 10:41:41] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-07 10:41:41] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 10:41:41] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 10:41:41] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 10:41:41] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 10:41:41] [Rank 0] Group 5 FTA: 0.0885 +[2025-07-07 10:41:41] [Rank 0] Group 5 FTA: 0.0885 +[2025-07-07 10:41:41] [Rank 0] Group 6 FTA: 0.0833 +[2025-07-07 10:41:41] [Rank 0] Group 6 FTA: 0.0833 +[2025-07-07 10:41:41] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-07 10:41:41] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-07 10:41:41] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-07 10:41:41] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-07 10:41:41] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 10:41:41] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 10:41:41] [Rank 0] Group 10 FTA: 0.1191 +[2025-07-07 10:41:41] [Rank 0] Group 10 FTA: 0.1191 +[2025-07-07 10:41:41] [Rank 0] Group 11 FTA: 0.0889 +[2025-07-07 10:41:41] [Rank 0] Group 11 FTA: 0.0889 +[2025-07-07 10:41:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 10:41:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 10:41:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 10:41:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 10:41:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 10:41:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 10:41:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 10:41:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 10:41:43] [Rank 0] step:2501/10000 train_time:169790ms step_avg:67.89ms +[2025-07-07 10:41:43] [Rank 0] step:2501/10000 train_time:169790ms step_avg:67.89ms +[2025-07-07 10:41:44] [Rank 0] step:2521/10000 train_time:170596ms step_avg:67.67ms +[2025-07-07 10:41:44] [Rank 0] step:2521/10000 train_time:170596ms step_avg:67.67ms +[2025-07-07 10:41:46] [Rank 0] step:2541/10000 train_time:171967ms step_avg:67.68ms +[2025-07-07 10:41:46] [Rank 0] step:2541/10000 train_time:171967ms step_avg:67.68ms +[2025-07-07 10:41:47] [Rank 0] step:2561/10000 train_time:173337ms step_avg:67.68ms +[2025-07-07 10:41:47] [Rank 0] step:2561/10000 train_time:173337ms step_avg:67.68ms +[2025-07-07 10:41:49] [Rank 0] step:2581/10000 train_time:174708ms step_avg:67.69ms +[2025-07-07 10:41:49] [Rank 0] step:2581/10000 train_time:174708ms step_avg:67.69ms +[2025-07-07 10:41:50] [Rank 0] step:2601/10000 train_time:176078ms step_avg:67.70ms +[2025-07-07 10:41:50] [Rank 0] step:2601/10000 train_time:176078ms step_avg:67.70ms +[2025-07-07 10:41:51] [Rank 0] step:2621/10000 train_time:177449ms step_avg:67.70ms +[2025-07-07 10:41:51] [Rank 0] step:2621/10000 train_time:177449ms step_avg:67.70ms +[2025-07-07 10:41:53] [Rank 0] step:2641/10000 train_time:178821ms step_avg:67.71ms +[2025-07-07 10:41:53] [Rank 0] step:2641/10000 train_time:178821ms step_avg:67.71ms +[2025-07-07 10:41:54] [Rank 0] step:2661/10000 train_time:180192ms step_avg:67.72ms +[2025-07-07 10:41:54] [Rank 0] step:2661/10000 train_time:180192ms step_avg:67.72ms +[2025-07-07 10:41:55] [Rank 0] step:2681/10000 train_time:181564ms step_avg:67.72ms +[2025-07-07 10:41:55] [Rank 0] step:2681/10000 train_time:181564ms step_avg:67.72ms +[2025-07-07 10:41:57] [Rank 0] step:2701/10000 train_time:182938ms step_avg:67.73ms +[2025-07-07 10:41:57] [Rank 0] step:2701/10000 train_time:182938ms step_avg:67.73ms +[2025-07-07 10:41:58] [Rank 0] step:2721/10000 train_time:184355ms step_avg:67.75ms +[2025-07-07 10:41:58] [Rank 0] step:2721/10000 train_time:184355ms step_avg:67.75ms +[2025-07-07 10:42:00] [Rank 0] step:2741/10000 train_time:185728ms step_avg:67.76ms +[2025-07-07 10:42:00] [Rank 0] step:2741/10000 train_time:185728ms step_avg:67.76ms +[2025-07-07 10:42:01] [Rank 0] step:2761/10000 train_time:187101ms step_avg:67.77ms +[2025-07-07 10:42:01] [Rank 0] step:2761/10000 train_time:187101ms step_avg:67.77ms +[2025-07-07 10:42:02] [Rank 0] step:2781/10000 train_time:188475ms step_avg:67.77ms +[2025-07-07 10:42:02] [Rank 0] step:2781/10000 train_time:188475ms step_avg:67.77ms +[2025-07-07 10:42:04] [Rank 0] step:2801/10000 train_time:189850ms step_avg:67.78ms +[2025-07-07 10:42:04] [Rank 0] step:2801/10000 train_time:189850ms step_avg:67.78ms +[2025-07-07 10:42:05] [Rank 0] step:2821/10000 train_time:191226ms step_avg:67.79ms +[2025-07-07 10:42:05] [Rank 0] step:2821/10000 train_time:191226ms step_avg:67.79ms +[2025-07-07 10:42:06] [Rank 0] step:2841/10000 train_time:192600ms step_avg:67.79ms +[2025-07-07 10:42:06] [Rank 0] step:2841/10000 train_time:192600ms step_avg:67.79ms +[2025-07-07 10:42:08] [Rank 0] step:2861/10000 train_time:193975ms step_avg:67.80ms +[2025-07-07 10:42:08] [Rank 0] step:2861/10000 train_time:193975ms step_avg:67.80ms +[2025-07-07 10:42:09] [Rank 0] step:2881/10000 train_time:195350ms step_avg:67.81ms +[2025-07-07 10:42:09] [Rank 0] step:2881/10000 train_time:195350ms step_avg:67.81ms +[2025-07-07 10:42:11] [Rank 0] step:2901/10000 train_time:196769ms step_avg:67.83ms +[2025-07-07 10:42:11] [Rank 0] step:2901/10000 train_time:196769ms step_avg:67.83ms +[2025-07-07 10:42:12] [Rank 0] step:2921/10000 train_time:198144ms step_avg:67.83ms +[2025-07-07 10:42:12] [Rank 0] step:2921/10000 train_time:198144ms step_avg:67.83ms +[2025-07-07 10:42:13] [Rank 0] step:2941/10000 train_time:199518ms step_avg:67.84ms +[2025-07-07 10:42:13] [Rank 0] step:2941/10000 train_time:199518ms step_avg:67.84ms +[2025-07-07 10:42:15] [Rank 0] step:2961/10000 train_time:200892ms step_avg:67.85ms +[2025-07-07 10:42:15] [Rank 0] step:2961/10000 train_time:200892ms step_avg:67.85ms +[2025-07-07 10:42:16] [Rank 0] step:2981/10000 train_time:202267ms step_avg:67.85ms +[2025-07-07 10:42:16] [Rank 0] step:2981/10000 train_time:202267ms step_avg:67.85ms +[2025-07-07 10:42:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 10:42:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 10:42:18] [Rank 0] PRINT: step:3000/10000 train_loss:1.5083 val_loss:1.4626 train_time:204265ms step_avg:68.09ms +[2025-07-07 10:42:18] [Rank 0] PRINT: step:3000/10000 train_loss:1.5083 val_loss:1.4626 train_time:204265ms step_avg:68.09ms +[2025-07-07 10:42:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 10:42:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 10:42:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 10:42:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 10:42:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 10:42:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 10:47:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 10:47:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 10:47:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 10:47:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 10:47:38] [Rank 0] Total Loss: 4.9411 +[2025-07-07 10:47:38] [Rank 0] Total Loss: 4.9411 +[2025-07-07 10:47:38] [Rank 0] Total FTA: 0.1003 +[2025-07-07 10:47:38] [Rank 0] Total FTA: 0.1003 +[2025-07-07 10:47:38] [Rank 0] Group 0 Loss: 5.1743 +[2025-07-07 10:47:38] [Rank 0] Group 0 Loss: 5.1743 +[2025-07-07 10:47:38] [Rank 0] Group 1 Loss: 5.0929 +[2025-07-07 10:47:38] [Rank 0] Group 1 Loss: 5.0929 +[2025-07-07 10:47:38] [Rank 0] Group 2 Loss: 4.6282 +[2025-07-07 10:47:38] [Rank 0] Group 2 Loss: 4.6282 +[2025-07-07 10:47:38] [Rank 0] Group 3 Loss: 4.8871 +[2025-07-07 10:47:38] [Rank 0] Group 3 Loss: 4.8871 +[2025-07-07 10:47:38] [Rank 0] Group 4 Loss: 4.8701 +[2025-07-07 10:47:38] [Rank 0] Group 4 Loss: 4.8701 +[2025-07-07 10:47:38] [Rank 0] Group 5 Loss: 4.8575 +[2025-07-07 10:47:38] [Rank 0] Group 5 Loss: 4.8575 +[2025-07-07 10:47:38] [Rank 0] Group 6 Loss: 4.8558 +[2025-07-07 10:47:38] [Rank 0] Group 6 Loss: 4.8558 +[2025-07-07 10:47:39] [Rank 0] Group 7 Loss: 4.9695 +[2025-07-07 10:47:39] [Rank 0] Group 7 Loss: 4.9695 +[2025-07-07 10:47:39] [Rank 0] Group 8 Loss: 4.9135 +[2025-07-07 10:47:39] [Rank 0] Group 8 Loss: 4.9135 +[2025-07-07 10:47:39] [Rank 0] Group 9 Loss: 4.9332 +[2025-07-07 10:47:39] [Rank 0] Group 9 Loss: 4.9332 +[2025-07-07 10:47:39] [Rank 0] Group 10 Loss: 4.9398 +[2025-07-07 10:47:39] [Rank 0] Group 10 Loss: 4.9398 +[2025-07-07 10:47:39] [Rank 0] Group 11 Loss: 4.9388 +[2025-07-07 10:47:39] [Rank 0] Group 11 Loss: 4.9388 +[2025-07-07 10:47:39] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 10:47:39] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 10:47:39] [Rank 0] Group 1 FTA: 0.1797 +[2025-07-07 10:47:39] [Rank 0] Group 1 FTA: 0.1797 +[2025-07-07 10:47:39] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 10:47:39] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 10:47:39] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 10:47:39] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 10:47:39] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 10:47:39] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 10:47:39] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-07 10:47:39] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-07 10:47:39] [Rank 0] Group 6 FTA: 0.0807 +[2025-07-07 10:47:39] [Rank 0] Group 6 FTA: 0.0807 +[2025-07-07 10:47:39] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-07 10:47:39] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-07 10:47:39] [Rank 0] Group 8 FTA: 0.0859 +[2025-07-07 10:47:39] [Rank 0] Group 8 FTA: 0.0859 +[2025-07-07 10:47:39] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 10:47:39] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 10:47:39] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-07 10:47:39] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-07 10:47:39] [Rank 0] Group 11 FTA: 0.0996 +[2025-07-07 10:47:39] [Rank 0] Group 11 FTA: 0.0996 +[2025-07-07 10:47:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 10:47:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 10:47:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 10:47:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 10:47:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 10:47:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 10:47:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 10:47:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 10:47:40] [Rank 0] step:3001/10000 train_time:204274ms step_avg:68.07ms +[2025-07-07 10:47:40] [Rank 0] step:3001/10000 train_time:204274ms step_avg:68.07ms +[2025-07-07 10:47:41] [Rank 0] step:3021/10000 train_time:205060ms step_avg:67.88ms +[2025-07-07 10:47:41] [Rank 0] step:3021/10000 train_time:205060ms step_avg:67.88ms +[2025-07-07 10:47:43] [Rank 0] step:3041/10000 train_time:206424ms step_avg:67.88ms +[2025-07-07 10:47:43] [Rank 0] step:3041/10000 train_time:206424ms step_avg:67.88ms +[2025-07-07 10:47:44] [Rank 0] step:3061/10000 train_time:207820ms step_avg:67.89ms +[2025-07-07 10:47:44] [Rank 0] step:3061/10000 train_time:207820ms step_avg:67.89ms +[2025-07-07 10:47:46] [Rank 0] step:3081/10000 train_time:209184ms step_avg:67.89ms +[2025-07-07 10:47:46] [Rank 0] step:3081/10000 train_time:209184ms step_avg:67.89ms +[2025-07-07 10:47:47] [Rank 0] step:3101/10000 train_time:210551ms step_avg:67.90ms +[2025-07-07 10:47:47] [Rank 0] step:3101/10000 train_time:210551ms step_avg:67.90ms +[2025-07-07 10:47:48] [Rank 0] step:3121/10000 train_time:211917ms step_avg:67.90ms +[2025-07-07 10:47:48] [Rank 0] step:3121/10000 train_time:211917ms step_avg:67.90ms +[2025-07-07 10:47:50] [Rank 0] step:3141/10000 train_time:213384ms step_avg:67.93ms +[2025-07-07 10:47:50] [Rank 0] step:3141/10000 train_time:213384ms step_avg:67.93ms +[2025-07-07 10:47:51] [Rank 0] step:3161/10000 train_time:214753ms step_avg:67.94ms +[2025-07-07 10:47:51] [Rank 0] step:3161/10000 train_time:214753ms step_avg:67.94ms +[2025-07-07 10:47:52] [Rank 0] step:3181/10000 train_time:216120ms step_avg:67.94ms +[2025-07-07 10:47:52] [Rank 0] step:3181/10000 train_time:216120ms step_avg:67.94ms +[2025-07-07 10:47:54] [Rank 0] step:3201/10000 train_time:217489ms step_avg:67.94ms +[2025-07-07 10:47:54] [Rank 0] step:3201/10000 train_time:217489ms step_avg:67.94ms +[2025-07-07 10:47:55] [Rank 0] step:3221/10000 train_time:218859ms step_avg:67.95ms +[2025-07-07 10:47:55] [Rank 0] step:3221/10000 train_time:218859ms step_avg:67.95ms +[2025-07-07 10:47:57] [Rank 0] step:3241/10000 train_time:220229ms step_avg:67.95ms +[2025-07-07 10:47:57] [Rank 0] step:3241/10000 train_time:220229ms step_avg:67.95ms +[2025-07-07 10:47:58] [Rank 0] step:3261/10000 train_time:221597ms step_avg:67.95ms +[2025-07-07 10:47:58] [Rank 0] step:3261/10000 train_time:221597ms step_avg:67.95ms +[2025-07-07 10:47:59] [Rank 0] step:3281/10000 train_time:222966ms step_avg:67.96ms +[2025-07-07 10:47:59] [Rank 0] step:3281/10000 train_time:222966ms step_avg:67.96ms +[2025-07-07 10:48:01] [Rank 0] step:3301/10000 train_time:224336ms step_avg:67.96ms +[2025-07-07 10:48:01] [Rank 0] step:3301/10000 train_time:224336ms step_avg:67.96ms +[2025-07-07 10:48:02] [Rank 0] step:3321/10000 train_time:225707ms step_avg:67.96ms +[2025-07-07 10:48:02] [Rank 0] step:3321/10000 train_time:225707ms step_avg:67.96ms +[2025-07-07 10:48:03] [Rank 0] step:3341/10000 train_time:227078ms step_avg:67.97ms +[2025-07-07 10:48:03] [Rank 0] step:3341/10000 train_time:227078ms step_avg:67.97ms +[2025-07-07 10:48:05] [Rank 0] step:3361/10000 train_time:228448ms step_avg:67.97ms +[2025-07-07 10:48:05] [Rank 0] step:3361/10000 train_time:228448ms step_avg:67.97ms +[2025-07-07 10:48:06] [Rank 0] step:3381/10000 train_time:229820ms step_avg:67.97ms +[2025-07-07 10:48:06] [Rank 0] step:3381/10000 train_time:229820ms step_avg:67.97ms +[2025-07-07 10:48:08] [Rank 0] step:3401/10000 train_time:231191ms step_avg:67.98ms +[2025-07-07 10:48:08] [Rank 0] step:3401/10000 train_time:231191ms step_avg:67.98ms +[2025-07-07 10:48:09] [Rank 0] step:3421/10000 train_time:232561ms step_avg:67.98ms +[2025-07-07 10:48:09] [Rank 0] step:3421/10000 train_time:232561ms step_avg:67.98ms +[2025-07-07 10:48:10] [Rank 0] step:3441/10000 train_time:233973ms step_avg:68.00ms +[2025-07-07 10:48:10] [Rank 0] step:3441/10000 train_time:233973ms step_avg:68.00ms +[2025-07-07 10:48:12] [Rank 0] step:3461/10000 train_time:235344ms step_avg:68.00ms +[2025-07-07 10:48:12] [Rank 0] step:3461/10000 train_time:235344ms step_avg:68.00ms +[2025-07-07 10:48:13] [Rank 0] step:3481/10000 train_time:236715ms step_avg:68.00ms +[2025-07-07 10:48:13] [Rank 0] step:3481/10000 train_time:236715ms step_avg:68.00ms +[2025-07-07 10:48:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 10:48:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 10:48:15] [Rank 0] PRINT: step:3500/10000 train_loss:1.4234 val_loss:1.3893 train_time:238710ms step_avg:68.20ms +[2025-07-07 10:48:15] [Rank 0] PRINT: step:3500/10000 train_loss:1.4234 val_loss:1.3893 train_time:238710ms step_avg:68.20ms +[2025-07-07 10:48:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 10:48:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 10:48:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 10:48:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 10:48:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 10:48:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 10:53:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 10:53:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 10:53:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 10:53:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 10:53:36] [Rank 0] Total Loss: 5.0328 +[2025-07-07 10:53:36] [Rank 0] Total Loss: 5.0328 +[2025-07-07 10:53:36] [Rank 0] Total FTA: 0.1099 +[2025-07-07 10:53:36] [Rank 0] Total FTA: 0.1099 +[2025-07-07 10:53:36] [Rank 0] Group 0 Loss: 5.2253 +[2025-07-07 10:53:36] [Rank 0] Group 0 Loss: 5.2253 +[2025-07-07 10:53:36] [Rank 0] Group 1 Loss: 5.0452 +[2025-07-07 10:53:36] [Rank 0] Group 1 Loss: 5.0452 +[2025-07-07 10:53:36] [Rank 0] Group 2 Loss: 4.7999 +[2025-07-07 10:53:36] [Rank 0] Group 2 Loss: 4.7999 +[2025-07-07 10:53:36] [Rank 0] Group 3 Loss: 5.1255 +[2025-07-07 10:53:36] [Rank 0] Group 3 Loss: 5.1255 +[2025-07-07 10:53:36] [Rank 0] Group 4 Loss: 5.0069 +[2025-07-07 10:53:36] [Rank 0] Group 4 Loss: 5.0069 +[2025-07-07 10:53:36] [Rank 0] Group 5 Loss: 4.9423 +[2025-07-07 10:53:36] [Rank 0] Group 5 Loss: 4.9423 +[2025-07-07 10:53:36] [Rank 0] Group 6 Loss: 4.9856 +[2025-07-07 10:53:36] [Rank 0] Group 6 Loss: 4.9856 +[2025-07-07 10:53:36] [Rank 0] Group 7 Loss: 5.0704 +[2025-07-07 10:53:36] [Rank 0] Group 7 Loss: 5.0704 +[2025-07-07 10:53:36] [Rank 0] Group 8 Loss: 4.9965 +[2025-07-07 10:53:36] [Rank 0] Group 8 Loss: 4.9965 +[2025-07-07 10:53:36] [Rank 0] Group 9 Loss: 4.9913 +[2025-07-07 10:53:36] [Rank 0] Group 9 Loss: 4.9913 +[2025-07-07 10:53:36] [Rank 0] Group 10 Loss: 4.9936 +[2025-07-07 10:53:36] [Rank 0] Group 10 Loss: 4.9936 +[2025-07-07 10:53:36] [Rank 0] Group 11 Loss: 5.0267 +[2025-07-07 10:53:36] [Rank 0] Group 11 Loss: 5.0267 +[2025-07-07 10:53:36] [Rank 0] Group 0 FTA: 0.1821 +[2025-07-07 10:53:36] [Rank 0] Group 0 FTA: 0.1821 +[2025-07-07 10:53:36] [Rank 0] Group 1 FTA: 0.2031 +[2025-07-07 10:53:36] [Rank 0] Group 1 FTA: 0.2031 +[2025-07-07 10:53:36] [Rank 0] Group 2 FTA: 0.1094 +[2025-07-07 10:53:36] [Rank 0] Group 2 FTA: 0.1094 +[2025-07-07 10:53:36] [Rank 0] Group 3 FTA: 0.0573 +[2025-07-07 10:53:36] [Rank 0] Group 3 FTA: 0.0573 +[2025-07-07 10:53:36] [Rank 0] Group 4 FTA: 0.0625 +[2025-07-07 10:53:36] [Rank 0] Group 4 FTA: 0.0625 +[2025-07-07 10:53:36] [Rank 0] Group 5 FTA: 0.0990 +[2025-07-07 10:53:36] [Rank 0] Group 5 FTA: 0.0990 +[2025-07-07 10:53:36] [Rank 0] Group 6 FTA: 0.0911 +[2025-07-07 10:53:36] [Rank 0] Group 6 FTA: 0.0911 +[2025-07-07 10:53:36] [Rank 0] Group 7 FTA: 0.0781 +[2025-07-07 10:53:36] [Rank 0] Group 7 FTA: 0.0781 +[2025-07-07 10:53:36] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-07 10:53:36] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-07 10:53:36] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 10:53:36] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 10:53:36] [Rank 0] Group 10 FTA: 0.0820 +[2025-07-07 10:53:36] [Rank 0] Group 10 FTA: 0.0820 +[2025-07-07 10:53:36] [Rank 0] Group 11 FTA: 0.1006 +[2025-07-07 10:53:36] [Rank 0] Group 11 FTA: 0.1006 +[2025-07-07 10:53:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 10:53:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 10:53:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 10:53:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 10:53:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 10:53:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 10:53:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 10:53:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 10:53:38] [Rank 0] step:3501/10000 train_time:238719ms step_avg:68.19ms +[2025-07-07 10:53:38] [Rank 0] step:3501/10000 train_time:238719ms step_avg:68.19ms +[2025-07-07 10:53:39] [Rank 0] step:3521/10000 train_time:239469ms step_avg:68.01ms +[2025-07-07 10:53:39] [Rank 0] step:3521/10000 train_time:239469ms step_avg:68.01ms +[2025-07-07 10:53:40] [Rank 0] step:3541/10000 train_time:240832ms step_avg:68.01ms +[2025-07-07 10:53:40] [Rank 0] step:3541/10000 train_time:240832ms step_avg:68.01ms +[2025-07-07 10:53:42] [Rank 0] step:3561/10000 train_time:242198ms step_avg:68.01ms +[2025-07-07 10:53:42] [Rank 0] step:3561/10000 train_time:242198ms step_avg:68.01ms +[2025-07-07 10:53:43] [Rank 0] step:3581/10000 train_time:243562ms step_avg:68.02ms +[2025-07-07 10:53:43] [Rank 0] step:3581/10000 train_time:243562ms step_avg:68.02ms +[2025-07-07 10:53:45] [Rank 0] step:3601/10000 train_time:245029ms step_avg:68.04ms +[2025-07-07 10:53:45] [Rank 0] step:3601/10000 train_time:245029ms step_avg:68.04ms +[2025-07-07 10:53:46] [Rank 0] step:3621/10000 train_time:246419ms step_avg:68.05ms +[2025-07-07 10:53:46] [Rank 0] step:3621/10000 train_time:246419ms step_avg:68.05ms +[2025-07-07 10:53:47] [Rank 0] step:3641/10000 train_time:247785ms step_avg:68.05ms +[2025-07-07 10:53:47] [Rank 0] step:3641/10000 train_time:247785ms step_avg:68.05ms +[2025-07-07 10:53:49] [Rank 0] step:3661/10000 train_time:249152ms step_avg:68.06ms +[2025-07-07 10:53:49] [Rank 0] step:3661/10000 train_time:249152ms step_avg:68.06ms +[2025-07-07 10:53:50] [Rank 0] step:3681/10000 train_time:250520ms step_avg:68.06ms +[2025-07-07 10:53:50] [Rank 0] step:3681/10000 train_time:250520ms step_avg:68.06ms +[2025-07-07 10:53:52] [Rank 0] step:3701/10000 train_time:251889ms step_avg:68.06ms +[2025-07-07 10:53:52] [Rank 0] step:3701/10000 train_time:251889ms step_avg:68.06ms +[2025-07-07 10:53:53] [Rank 0] step:3721/10000 train_time:253259ms step_avg:68.06ms +[2025-07-07 10:53:53] [Rank 0] step:3721/10000 train_time:253259ms step_avg:68.06ms +[2025-07-07 10:53:54] [Rank 0] step:3741/10000 train_time:254628ms step_avg:68.06ms +[2025-07-07 10:53:54] [Rank 0] step:3741/10000 train_time:254628ms step_avg:68.06ms +[2025-07-07 10:53:56] [Rank 0] step:3761/10000 train_time:255998ms step_avg:68.07ms +[2025-07-07 10:53:56] [Rank 0] step:3761/10000 train_time:255998ms step_avg:68.07ms +[2025-07-07 10:53:57] [Rank 0] step:3781/10000 train_time:257413ms step_avg:68.08ms +[2025-07-07 10:53:57] [Rank 0] step:3781/10000 train_time:257413ms step_avg:68.08ms +[2025-07-07 10:53:58] [Rank 0] step:3801/10000 train_time:258773ms step_avg:68.08ms +[2025-07-07 10:53:58] [Rank 0] step:3801/10000 train_time:258773ms step_avg:68.08ms +[2025-07-07 10:54:00] [Rank 0] step:3821/10000 train_time:260143ms step_avg:68.08ms +[2025-07-07 10:54:00] [Rank 0] step:3821/10000 train_time:260143ms step_avg:68.08ms +[2025-07-07 10:54:01] [Rank 0] step:3841/10000 train_time:261514ms step_avg:68.08ms +[2025-07-07 10:54:01] [Rank 0] step:3841/10000 train_time:261514ms step_avg:68.08ms +[2025-07-07 10:54:03] [Rank 0] step:3861/10000 train_time:262887ms step_avg:68.09ms +[2025-07-07 10:54:03] [Rank 0] step:3861/10000 train_time:262887ms step_avg:68.09ms +[2025-07-07 10:54:04] [Rank 0] step:3881/10000 train_time:264361ms step_avg:68.12ms +[2025-07-07 10:54:04] [Rank 0] step:3881/10000 train_time:264361ms step_avg:68.12ms +[2025-07-07 10:54:05] [Rank 0] step:3901/10000 train_time:265732ms step_avg:68.12ms +[2025-07-07 10:54:05] [Rank 0] step:3901/10000 train_time:265732ms step_avg:68.12ms +[2025-07-07 10:54:07] [Rank 0] step:3921/10000 train_time:267105ms step_avg:68.12ms +[2025-07-07 10:54:07] [Rank 0] step:3921/10000 train_time:267105ms step_avg:68.12ms +[2025-07-07 10:54:08] [Rank 0] step:3941/10000 train_time:268476ms step_avg:68.12ms +[2025-07-07 10:54:08] [Rank 0] step:3941/10000 train_time:268476ms step_avg:68.12ms +[2025-07-07 10:54:10] [Rank 0] step:3961/10000 train_time:269848ms step_avg:68.13ms +[2025-07-07 10:54:10] [Rank 0] step:3961/10000 train_time:269848ms step_avg:68.13ms +[2025-07-07 10:54:11] [Rank 0] step:3981/10000 train_time:271253ms step_avg:68.14ms +[2025-07-07 10:54:11] [Rank 0] step:3981/10000 train_time:271253ms step_avg:68.14ms +[2025-07-07 10:54:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 10:54:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 10:54:13] [Rank 0] PRINT: step:4000/10000 train_loss:1.3617 val_loss:1.3385 train_time:273248ms step_avg:68.31ms +[2025-07-07 10:54:13] [Rank 0] PRINT: step:4000/10000 train_loss:1.3617 val_loss:1.3385 train_time:273248ms step_avg:68.31ms +[2025-07-07 10:54:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 10:54:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 10:54:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 10:54:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 10:54:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 10:54:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 10:59:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 10:59:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 10:59:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 10:59:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 10:59:35] [Rank 0] Total Loss: 5.1086 +[2025-07-07 10:59:35] [Rank 0] Total Loss: 5.1086 +[2025-07-07 10:59:35] [Rank 0] Total FTA: 0.0802 +[2025-07-07 10:59:35] [Rank 0] Total FTA: 0.0802 +[2025-07-07 10:59:35] [Rank 0] Group 0 Loss: 5.2743 +[2025-07-07 10:59:35] [Rank 0] Group 0 Loss: 5.2743 +[2025-07-07 10:59:35] [Rank 0] Group 1 Loss: 5.0714 +[2025-07-07 10:59:35] [Rank 0] Group 1 Loss: 5.0714 +[2025-07-07 10:59:35] [Rank 0] Group 2 Loss: 4.9031 +[2025-07-07 10:59:35] [Rank 0] Group 2 Loss: 4.9031 +[2025-07-07 10:59:35] [Rank 0] Group 3 Loss: 5.2168 +[2025-07-07 10:59:35] [Rank 0] Group 3 Loss: 5.2168 +[2025-07-07 10:59:35] [Rank 0] Group 4 Loss: 5.0986 +[2025-07-07 10:59:35] [Rank 0] Group 4 Loss: 5.0986 +[2025-07-07 10:59:35] [Rank 0] Group 5 Loss: 4.9863 +[2025-07-07 10:59:35] [Rank 0] Group 5 Loss: 4.9863 +[2025-07-07 10:59:35] [Rank 0] Group 6 Loss: 5.0104 +[2025-07-07 10:59:35] [Rank 0] Group 6 Loss: 5.0104 +[2025-07-07 10:59:35] [Rank 0] Group 7 Loss: 5.1532 +[2025-07-07 10:59:35] [Rank 0] Group 7 Loss: 5.1532 +[2025-07-07 10:59:35] [Rank 0] Group 8 Loss: 5.0987 +[2025-07-07 10:59:35] [Rank 0] Group 8 Loss: 5.0987 +[2025-07-07 10:59:35] [Rank 0] Group 9 Loss: 5.0715 +[2025-07-07 10:59:35] [Rank 0] Group 9 Loss: 5.0715 +[2025-07-07 10:59:35] [Rank 0] Group 10 Loss: 5.1292 +[2025-07-07 10:59:35] [Rank 0] Group 10 Loss: 5.1292 +[2025-07-07 10:59:35] [Rank 0] Group 11 Loss: 5.1068 +[2025-07-07 10:59:35] [Rank 0] Group 11 Loss: 5.1068 +[2025-07-07 10:59:35] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 10:59:35] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 10:59:35] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 10:59:35] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 10:59:35] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 10:59:35] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 10:59:35] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-07 10:59:35] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-07 10:59:35] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-07 10:59:35] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-07 10:59:35] [Rank 0] Group 5 FTA: 0.0625 +[2025-07-07 10:59:35] [Rank 0] Group 5 FTA: 0.0625 +[2025-07-07 10:59:35] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-07 10:59:35] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-07 10:59:35] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-07 10:59:35] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-07 10:59:36] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 10:59:36] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 10:59:36] [Rank 0] Group 9 FTA: 0.0625 +[2025-07-07 10:59:36] [Rank 0] Group 9 FTA: 0.0625 +[2025-07-07 10:59:36] [Rank 0] Group 10 FTA: 0.1133 +[2025-07-07 10:59:36] [Rank 0] Group 10 FTA: 0.1133 +[2025-07-07 10:59:36] [Rank 0] Group 11 FTA: 0.0723 +[2025-07-07 10:59:36] [Rank 0] Group 11 FTA: 0.0723 +[2025-07-07 10:59:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 10:59:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 10:59:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 10:59:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 10:59:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 10:59:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 10:59:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 10:59:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 10:59:37] [Rank 0] step:4001/10000 train_time:273257ms step_avg:68.30ms +[2025-07-07 10:59:37] [Rank 0] step:4001/10000 train_time:273257ms step_avg:68.30ms +[2025-07-07 10:59:38] [Rank 0] step:4021/10000 train_time:274016ms step_avg:68.15ms +[2025-07-07 10:59:38] [Rank 0] step:4021/10000 train_time:274016ms step_avg:68.15ms +[2025-07-07 10:59:40] [Rank 0] step:4041/10000 train_time:275380ms step_avg:68.15ms +[2025-07-07 10:59:40] [Rank 0] step:4041/10000 train_time:275380ms step_avg:68.15ms +[2025-07-07 10:59:41] [Rank 0] step:4061/10000 train_time:276744ms step_avg:68.15ms +[2025-07-07 10:59:41] [Rank 0] step:4061/10000 train_time:276744ms step_avg:68.15ms +[2025-07-07 10:59:43] [Rank 0] step:4081/10000 train_time:278109ms step_avg:68.15ms +[2025-07-07 10:59:43] [Rank 0] step:4081/10000 train_time:278109ms step_avg:68.15ms +[2025-07-07 10:59:44] [Rank 0] step:4101/10000 train_time:279474ms step_avg:68.15ms +[2025-07-07 10:59:44] [Rank 0] step:4101/10000 train_time:279474ms step_avg:68.15ms +[2025-07-07 10:59:45] [Rank 0] step:4121/10000 train_time:280841ms step_avg:68.15ms +[2025-07-07 10:59:45] [Rank 0] step:4121/10000 train_time:280841ms step_avg:68.15ms +[2025-07-07 10:59:47] [Rank 0] step:4141/10000 train_time:282209ms step_avg:68.15ms +[2025-07-07 10:59:47] [Rank 0] step:4141/10000 train_time:282209ms step_avg:68.15ms +[2025-07-07 10:59:48] [Rank 0] step:4161/10000 train_time:283609ms step_avg:68.16ms +[2025-07-07 10:59:48] [Rank 0] step:4161/10000 train_time:283609ms step_avg:68.16ms +[2025-07-07 10:59:49] [Rank 0] step:4181/10000 train_time:284981ms step_avg:68.16ms +[2025-07-07 10:59:49] [Rank 0] step:4181/10000 train_time:284981ms step_avg:68.16ms +[2025-07-07 10:59:51] [Rank 0] step:4201/10000 train_time:286350ms step_avg:68.16ms +[2025-07-07 10:59:51] [Rank 0] step:4201/10000 train_time:286350ms step_avg:68.16ms +[2025-07-07 10:59:52] [Rank 0] step:4221/10000 train_time:287820ms step_avg:68.19ms +[2025-07-07 10:59:52] [Rank 0] step:4221/10000 train_time:287820ms step_avg:68.19ms +[2025-07-07 10:59:54] [Rank 0] step:4241/10000 train_time:289190ms step_avg:68.19ms +[2025-07-07 10:59:54] [Rank 0] step:4241/10000 train_time:289190ms step_avg:68.19ms +[2025-07-07 10:59:55] [Rank 0] step:4261/10000 train_time:290658ms step_avg:68.21ms +[2025-07-07 10:59:55] [Rank 0] step:4261/10000 train_time:290658ms step_avg:68.21ms +[2025-07-07 10:59:56] [Rank 0] step:4281/10000 train_time:292029ms step_avg:68.22ms +[2025-07-07 10:59:56] [Rank 0] step:4281/10000 train_time:292029ms step_avg:68.22ms +[2025-07-07 10:59:58] [Rank 0] step:4301/10000 train_time:293400ms step_avg:68.22ms +[2025-07-07 10:59:58] [Rank 0] step:4301/10000 train_time:293400ms step_avg:68.22ms +[2025-07-07 10:59:59] [Rank 0] step:4321/10000 train_time:294771ms step_avg:68.22ms +[2025-07-07 10:59:59] [Rank 0] step:4321/10000 train_time:294771ms step_avg:68.22ms +[2025-07-07 11:00:01] [Rank 0] step:4341/10000 train_time:296185ms step_avg:68.23ms +[2025-07-07 11:00:01] [Rank 0] step:4341/10000 train_time:296185ms step_avg:68.23ms +[2025-07-07 11:00:02] [Rank 0] step:4361/10000 train_time:297657ms step_avg:68.25ms +[2025-07-07 11:00:02] [Rank 0] step:4361/10000 train_time:297657ms step_avg:68.25ms +[2025-07-07 11:00:03] [Rank 0] step:4381/10000 train_time:299029ms step_avg:68.26ms +[2025-07-07 11:00:03] [Rank 0] step:4381/10000 train_time:299029ms step_avg:68.26ms +[2025-07-07 11:00:05] [Rank 0] step:4401/10000 train_time:300400ms step_avg:68.26ms +[2025-07-07 11:00:05] [Rank 0] step:4401/10000 train_time:300400ms step_avg:68.26ms +[2025-07-07 11:00:06] [Rank 0] step:4421/10000 train_time:301772ms step_avg:68.26ms +[2025-07-07 11:00:06] [Rank 0] step:4421/10000 train_time:301772ms step_avg:68.26ms +[2025-07-07 11:00:08] [Rank 0] step:4441/10000 train_time:303143ms step_avg:68.26ms +[2025-07-07 11:00:08] [Rank 0] step:4441/10000 train_time:303143ms step_avg:68.26ms +[2025-07-07 11:00:09] [Rank 0] step:4461/10000 train_time:304515ms step_avg:68.26ms +[2025-07-07 11:00:09] [Rank 0] step:4461/10000 train_time:304515ms step_avg:68.26ms +[2025-07-07 11:00:10] [Rank 0] step:4481/10000 train_time:305884ms step_avg:68.26ms +[2025-07-07 11:00:10] [Rank 0] step:4481/10000 train_time:305884ms step_avg:68.26ms +[2025-07-07 11:00:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 11:00:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 11:00:13] [Rank 0] PRINT: step:4500/10000 train_loss:1.3222 val_loss:1.3126 train_time:307881ms step_avg:68.42ms +[2025-07-07 11:00:13] [Rank 0] PRINT: step:4500/10000 train_loss:1.3222 val_loss:1.3126 train_time:307881ms step_avg:68.42ms +[2025-07-07 11:00:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 11:00:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 11:00:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 11:00:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 11:00:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 11:00:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 11:05:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 11:05:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 11:05:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 11:05:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 11:05:34] [Rank 0] Total Loss: 5.0926 +[2025-07-07 11:05:34] [Rank 0] Total Loss: 5.0926 +[2025-07-07 11:05:34] [Rank 0] Total FTA: 0.1031 +[2025-07-07 11:05:34] [Rank 0] Total FTA: 0.1031 +[2025-07-07 11:05:34] [Rank 0] Group 0 Loss: 5.3809 +[2025-07-07 11:05:34] [Rank 0] Group 0 Loss: 5.3809 +[2025-07-07 11:05:34] [Rank 0] Group 1 Loss: 4.8919 +[2025-07-07 11:05:34] [Rank 0] Group 1 Loss: 4.8919 +[2025-07-07 11:05:34] [Rank 0] Group 2 Loss: 4.8865 +[2025-07-07 11:05:34] [Rank 0] Group 2 Loss: 4.8865 +[2025-07-07 11:05:34] [Rank 0] Group 3 Loss: 5.1617 +[2025-07-07 11:05:34] [Rank 0] Group 3 Loss: 5.1617 +[2025-07-07 11:05:34] [Rank 0] Group 4 Loss: 5.0381 +[2025-07-07 11:05:34] [Rank 0] Group 4 Loss: 5.0381 +[2025-07-07 11:05:34] [Rank 0] Group 5 Loss: 5.0494 +[2025-07-07 11:05:34] [Rank 0] Group 5 Loss: 5.0494 +[2025-07-07 11:05:34] [Rank 0] Group 6 Loss: 5.0096 +[2025-07-07 11:05:34] [Rank 0] Group 6 Loss: 5.0096 +[2025-07-07 11:05:34] [Rank 0] Group 7 Loss: 5.1162 +[2025-07-07 11:05:34] [Rank 0] Group 7 Loss: 5.1162 +[2025-07-07 11:05:34] [Rank 0] Group 8 Loss: 5.0817 +[2025-07-07 11:05:34] [Rank 0] Group 8 Loss: 5.0817 +[2025-07-07 11:05:34] [Rank 0] Group 9 Loss: 5.0562 +[2025-07-07 11:05:34] [Rank 0] Group 9 Loss: 5.0562 +[2025-07-07 11:05:34] [Rank 0] Group 10 Loss: 5.0562 +[2025-07-07 11:05:34] [Rank 0] Group 10 Loss: 5.0562 +[2025-07-07 11:05:34] [Rank 0] Group 11 Loss: 5.0932 +[2025-07-07 11:05:34] [Rank 0] Group 11 Loss: 5.0932 +[2025-07-07 11:05:34] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-07 11:05:34] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-07 11:05:34] [Rank 0] Group 1 FTA: 0.2005 +[2025-07-07 11:05:34] [Rank 0] Group 1 FTA: 0.2005 +[2025-07-07 11:05:34] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-07 11:05:34] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-07 11:05:34] [Rank 0] Group 3 FTA: 0.1198 +[2025-07-07 11:05:34] [Rank 0] Group 3 FTA: 0.1198 +[2025-07-07 11:05:34] [Rank 0] Group 4 FTA: 0.0677 +[2025-07-07 11:05:34] [Rank 0] Group 4 FTA: 0.0677 +[2025-07-07 11:05:34] [Rank 0] Group 5 FTA: 0.0234 +[2025-07-07 11:05:34] [Rank 0] Group 5 FTA: 0.0234 +[2025-07-07 11:05:34] [Rank 0] Group 6 FTA: 0.0911 +[2025-07-07 11:05:34] [Rank 0] Group 6 FTA: 0.0911 +[2025-07-07 11:05:34] [Rank 0] Group 7 FTA: 0.1146 +[2025-07-07 11:05:34] [Rank 0] Group 7 FTA: 0.1146 +[2025-07-07 11:05:34] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-07 11:05:34] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-07 11:05:34] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 11:05:34] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 11:05:34] [Rank 0] Group 10 FTA: 0.0820 +[2025-07-07 11:05:34] [Rank 0] Group 10 FTA: 0.0820 +[2025-07-07 11:05:34] [Rank 0] Group 11 FTA: 0.0752 +[2025-07-07 11:05:34] [Rank 0] Group 11 FTA: 0.0752 +[2025-07-07 11:05:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 11:05:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 11:05:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 11:05:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 11:05:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 11:05:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 11:05:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 11:05:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 11:05:36] [Rank 0] step:4501/10000 train_time:308210ms step_avg:68.48ms +[2025-07-07 11:05:36] [Rank 0] step:4501/10000 train_time:308210ms step_avg:68.48ms +[2025-07-07 11:05:37] [Rank 0] step:4521/10000 train_time:309054ms step_avg:68.36ms +[2025-07-07 11:05:37] [Rank 0] step:4521/10000 train_time:309054ms step_avg:68.36ms +[2025-07-07 11:05:39] [Rank 0] step:4541/10000 train_time:310420ms step_avg:68.36ms +[2025-07-07 11:05:39] [Rank 0] step:4541/10000 train_time:310420ms step_avg:68.36ms +[2025-07-07 11:05:40] [Rank 0] step:4561/10000 train_time:311888ms step_avg:68.38ms +[2025-07-07 11:05:40] [Rank 0] step:4561/10000 train_time:311888ms step_avg:68.38ms +[2025-07-07 11:05:41] [Rank 0] step:4581/10000 train_time:313252ms step_avg:68.38ms +[2025-07-07 11:05:41] [Rank 0] step:4581/10000 train_time:313252ms step_avg:68.38ms +[2025-07-07 11:05:43] [Rank 0] step:4601/10000 train_time:314716ms step_avg:68.40ms +[2025-07-07 11:05:43] [Rank 0] step:4601/10000 train_time:314716ms step_avg:68.40ms +[2025-07-07 11:05:44] [Rank 0] step:4621/10000 train_time:316084ms step_avg:68.40ms +[2025-07-07 11:05:44] [Rank 0] step:4621/10000 train_time:316084ms step_avg:68.40ms +[2025-07-07 11:05:46] [Rank 0] step:4641/10000 train_time:317451ms step_avg:68.40ms +[2025-07-07 11:05:46] [Rank 0] step:4641/10000 train_time:317451ms step_avg:68.40ms +[2025-07-07 11:05:47] [Rank 0] step:4661/10000 train_time:318820ms step_avg:68.40ms +[2025-07-07 11:05:47] [Rank 0] step:4661/10000 train_time:318820ms step_avg:68.40ms +[2025-07-07 11:05:48] [Rank 0] step:4681/10000 train_time:320189ms step_avg:68.40ms +[2025-07-07 11:05:48] [Rank 0] step:4681/10000 train_time:320189ms step_avg:68.40ms +[2025-07-07 11:05:50] [Rank 0] step:4701/10000 train_time:321600ms step_avg:68.41ms +[2025-07-07 11:05:50] [Rank 0] step:4701/10000 train_time:321600ms step_avg:68.41ms +[2025-07-07 11:05:51] [Rank 0] step:4721/10000 train_time:322971ms step_avg:68.41ms +[2025-07-07 11:05:51] [Rank 0] step:4721/10000 train_time:322971ms step_avg:68.41ms +[2025-07-07 11:05:53] [Rank 0] step:4741/10000 train_time:324340ms step_avg:68.41ms +[2025-07-07 11:05:53] [Rank 0] step:4741/10000 train_time:324340ms step_avg:68.41ms +[2025-07-07 11:05:54] [Rank 0] step:4761/10000 train_time:325711ms step_avg:68.41ms +[2025-07-07 11:05:54] [Rank 0] step:4761/10000 train_time:325711ms step_avg:68.41ms +[2025-07-07 11:05:55] [Rank 0] step:4781/10000 train_time:327082ms step_avg:68.41ms +[2025-07-07 11:05:55] [Rank 0] step:4781/10000 train_time:327082ms step_avg:68.41ms +[2025-07-07 11:05:57] [Rank 0] step:4801/10000 train_time:328454ms step_avg:68.41ms +[2025-07-07 11:05:57] [Rank 0] step:4801/10000 train_time:328454ms step_avg:68.41ms +[2025-07-07 11:05:58] [Rank 0] step:4821/10000 train_time:329826ms step_avg:68.41ms +[2025-07-07 11:05:58] [Rank 0] step:4821/10000 train_time:329826ms step_avg:68.41ms +[2025-07-07 11:05:59] [Rank 0] step:4841/10000 train_time:331198ms step_avg:68.42ms +[2025-07-07 11:05:59] [Rank 0] step:4841/10000 train_time:331198ms step_avg:68.42ms +[2025-07-07 11:06:01] [Rank 0] step:4861/10000 train_time:332570ms step_avg:68.42ms +[2025-07-07 11:06:01] [Rank 0] step:4861/10000 train_time:332570ms step_avg:68.42ms +[2025-07-07 11:06:02] [Rank 0] step:4881/10000 train_time:333985ms step_avg:68.43ms +[2025-07-07 11:06:02] [Rank 0] step:4881/10000 train_time:333985ms step_avg:68.43ms +[2025-07-07 11:06:04] [Rank 0] step:4901/10000 train_time:335358ms step_avg:68.43ms +[2025-07-07 11:06:04] [Rank 0] step:4901/10000 train_time:335358ms step_avg:68.43ms +[2025-07-07 11:06:05] [Rank 0] step:4921/10000 train_time:336732ms step_avg:68.43ms +[2025-07-07 11:06:05] [Rank 0] step:4921/10000 train_time:336732ms step_avg:68.43ms +[2025-07-07 11:06:06] [Rank 0] step:4941/10000 train_time:338205ms step_avg:68.45ms +[2025-07-07 11:06:06] [Rank 0] step:4941/10000 train_time:338205ms step_avg:68.45ms +[2025-07-07 11:06:08] [Rank 0] step:4961/10000 train_time:339579ms step_avg:68.45ms +[2025-07-07 11:06:08] [Rank 0] step:4961/10000 train_time:339579ms step_avg:68.45ms +[2025-07-07 11:06:09] [Rank 0] step:4981/10000 train_time:340952ms step_avg:68.45ms +[2025-07-07 11:06:09] [Rank 0] step:4981/10000 train_time:340952ms step_avg:68.45ms +[2025-07-07 11:06:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 11:06:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 11:06:11] [Rank 0] PRINT: step:5000/10000 train_loss:1.2980 val_loss:1.2925 train_time:342957ms step_avg:68.59ms +[2025-07-07 11:06:11] [Rank 0] PRINT: step:5000/10000 train_loss:1.2980 val_loss:1.2925 train_time:342957ms step_avg:68.59ms +[2025-07-07 11:06:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 11:06:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 11:06:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 11:06:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 11:06:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 11:06:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 11:11:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 11:11:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 11:11:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 11:11:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 11:11:31] [Rank 0] Total Loss: 5.1960 +[2025-07-07 11:11:31] [Rank 0] Total Loss: 5.1960 +[2025-07-07 11:11:31] [Rank 0] Total FTA: 0.1070 +[2025-07-07 11:11:31] [Rank 0] Total FTA: 0.1070 +[2025-07-07 11:11:31] [Rank 0] Group 0 Loss: 5.3770 +[2025-07-07 11:11:31] [Rank 0] Group 0 Loss: 5.3770 +[2025-07-07 11:11:31] [Rank 0] Group 1 Loss: 5.0338 +[2025-07-07 11:11:31] [Rank 0] Group 1 Loss: 5.0338 +[2025-07-07 11:11:31] [Rank 0] Group 2 Loss: 4.9642 +[2025-07-07 11:11:31] [Rank 0] Group 2 Loss: 4.9642 +[2025-07-07 11:11:31] [Rank 0] Group 3 Loss: 5.3916 +[2025-07-07 11:11:31] [Rank 0] Group 3 Loss: 5.3916 +[2025-07-07 11:11:31] [Rank 0] Group 4 Loss: 5.2264 +[2025-07-07 11:11:31] [Rank 0] Group 4 Loss: 5.2264 +[2025-07-07 11:11:31] [Rank 0] Group 5 Loss: 5.0983 +[2025-07-07 11:11:31] [Rank 0] Group 5 Loss: 5.0983 +[2025-07-07 11:11:31] [Rank 0] Group 6 Loss: 5.1098 +[2025-07-07 11:11:31] [Rank 0] Group 6 Loss: 5.1098 +[2025-07-07 11:11:31] [Rank 0] Group 7 Loss: 5.2207 +[2025-07-07 11:11:31] [Rank 0] Group 7 Loss: 5.2207 +[2025-07-07 11:11:31] [Rank 0] Group 8 Loss: 5.1731 +[2025-07-07 11:11:31] [Rank 0] Group 8 Loss: 5.1731 +[2025-07-07 11:11:31] [Rank 0] Group 9 Loss: 5.1709 +[2025-07-07 11:11:31] [Rank 0] Group 9 Loss: 5.1709 +[2025-07-07 11:11:31] [Rank 0] Group 10 Loss: 5.2134 +[2025-07-07 11:11:31] [Rank 0] Group 10 Loss: 5.2134 +[2025-07-07 11:11:31] [Rank 0] Group 11 Loss: 5.1891 +[2025-07-07 11:11:31] [Rank 0] Group 11 Loss: 5.1891 +[2025-07-07 11:11:31] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 11:11:31] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 11:11:31] [Rank 0] Group 1 FTA: 0.1432 +[2025-07-07 11:11:31] [Rank 0] Group 1 FTA: 0.1432 +[2025-07-07 11:11:31] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 11:11:31] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 11:11:31] [Rank 0] Group 3 FTA: 0.0911 +[2025-07-07 11:11:31] [Rank 0] Group 3 FTA: 0.0911 +[2025-07-07 11:11:31] [Rank 0] Group 4 FTA: 0.0885 +[2025-07-07 11:11:31] [Rank 0] Group 4 FTA: 0.0885 +[2025-07-07 11:11:31] [Rank 0] Group 5 FTA: 0.0599 +[2025-07-07 11:11:31] [Rank 0] Group 5 FTA: 0.0599 +[2025-07-07 11:11:31] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-07 11:11:31] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-07 11:11:32] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-07 11:11:32] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-07 11:11:32] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 11:11:32] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 11:11:32] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 11:11:32] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 11:11:32] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 11:11:32] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 11:11:32] [Rank 0] Group 11 FTA: 0.1064 +[2025-07-07 11:11:32] [Rank 0] Group 11 FTA: 0.1064 +[2025-07-07 11:11:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 11:11:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 11:11:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 11:11:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 11:11:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 11:11:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 11:11:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 11:11:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 11:11:33] [Rank 0] step:5001/10000 train_time:342966ms step_avg:68.58ms +[2025-07-07 11:11:33] [Rank 0] step:5001/10000 train_time:342966ms step_avg:68.58ms +[2025-07-07 11:11:35] [Rank 0] step:5021/10000 train_time:343728ms step_avg:68.46ms +[2025-07-07 11:11:35] [Rank 0] step:5021/10000 train_time:343728ms step_avg:68.46ms +[2025-07-07 11:11:36] [Rank 0] step:5041/10000 train_time:345094ms step_avg:68.46ms +[2025-07-07 11:11:36] [Rank 0] step:5041/10000 train_time:345094ms step_avg:68.46ms +[2025-07-07 11:11:37] [Rank 0] step:5061/10000 train_time:346613ms step_avg:68.49ms +[2025-07-07 11:11:37] [Rank 0] step:5061/10000 train_time:346613ms step_avg:68.49ms +[2025-07-07 11:11:39] [Rank 0] step:5081/10000 train_time:347980ms step_avg:68.49ms +[2025-07-07 11:11:39] [Rank 0] step:5081/10000 train_time:347980ms step_avg:68.49ms +[2025-07-07 11:11:40] [Rank 0] step:5101/10000 train_time:349348ms step_avg:68.49ms +[2025-07-07 11:11:40] [Rank 0] step:5101/10000 train_time:349348ms step_avg:68.49ms +[2025-07-07 11:11:42] [Rank 0] step:5121/10000 train_time:350716ms step_avg:68.49ms +[2025-07-07 11:11:42] [Rank 0] step:5121/10000 train_time:350716ms step_avg:68.49ms +[2025-07-07 11:11:43] [Rank 0] step:5141/10000 train_time:352085ms step_avg:68.49ms +[2025-07-07 11:11:43] [Rank 0] step:5141/10000 train_time:352085ms step_avg:68.49ms +[2025-07-07 11:11:44] [Rank 0] step:5161/10000 train_time:353453ms step_avg:68.49ms +[2025-07-07 11:11:44] [Rank 0] step:5161/10000 train_time:353453ms step_avg:68.49ms +[2025-07-07 11:11:46] [Rank 0] step:5181/10000 train_time:354925ms step_avg:68.51ms +[2025-07-07 11:11:46] [Rank 0] step:5181/10000 train_time:354925ms step_avg:68.51ms +[2025-07-07 11:11:47] [Rank 0] step:5201/10000 train_time:356295ms step_avg:68.51ms +[2025-07-07 11:11:47] [Rank 0] step:5201/10000 train_time:356295ms step_avg:68.51ms +[2025-07-07 11:11:48] [Rank 0] step:5221/10000 train_time:357665ms step_avg:68.51ms +[2025-07-07 11:11:48] [Rank 0] step:5221/10000 train_time:357665ms step_avg:68.51ms +[2025-07-07 11:11:50] [Rank 0] step:5241/10000 train_time:359062ms step_avg:68.51ms +[2025-07-07 11:11:50] [Rank 0] step:5241/10000 train_time:359062ms step_avg:68.51ms +[2025-07-07 11:11:51] [Rank 0] step:5261/10000 train_time:360432ms step_avg:68.51ms +[2025-07-07 11:11:51] [Rank 0] step:5261/10000 train_time:360432ms step_avg:68.51ms +[2025-07-07 11:11:53] [Rank 0] step:5281/10000 train_time:361803ms step_avg:68.51ms +[2025-07-07 11:11:53] [Rank 0] step:5281/10000 train_time:361803ms step_avg:68.51ms +[2025-07-07 11:11:54] [Rank 0] step:5301/10000 train_time:363175ms step_avg:68.51ms +[2025-07-07 11:11:54] [Rank 0] step:5301/10000 train_time:363175ms step_avg:68.51ms +[2025-07-07 11:11:55] [Rank 0] step:5321/10000 train_time:364547ms step_avg:68.51ms +[2025-07-07 11:11:55] [Rank 0] step:5321/10000 train_time:364547ms step_avg:68.51ms +[2025-07-07 11:11:57] [Rank 0] step:5341/10000 train_time:365919ms step_avg:68.51ms +[2025-07-07 11:11:57] [Rank 0] step:5341/10000 train_time:365919ms step_avg:68.51ms +[2025-07-07 11:11:58] [Rank 0] step:5361/10000 train_time:367290ms step_avg:68.51ms +[2025-07-07 11:11:58] [Rank 0] step:5361/10000 train_time:367290ms step_avg:68.51ms +[2025-07-07 11:11:59] [Rank 0] step:5381/10000 train_time:368662ms step_avg:68.51ms +[2025-07-07 11:11:59] [Rank 0] step:5381/10000 train_time:368662ms step_avg:68.51ms +[2025-07-07 11:12:01] [Rank 0] step:5401/10000 train_time:370081ms step_avg:68.52ms +[2025-07-07 11:12:01] [Rank 0] step:5401/10000 train_time:370081ms step_avg:68.52ms +[2025-07-07 11:12:02] [Rank 0] step:5421/10000 train_time:371408ms step_avg:68.51ms +[2025-07-07 11:12:02] [Rank 0] step:5421/10000 train_time:371408ms step_avg:68.51ms +[2025-07-07 11:12:04] [Rank 0] step:5441/10000 train_time:372780ms step_avg:68.51ms +[2025-07-07 11:12:04] [Rank 0] step:5441/10000 train_time:372780ms step_avg:68.51ms +[2025-07-07 11:12:05] [Rank 0] step:5461/10000 train_time:374153ms step_avg:68.51ms +[2025-07-07 11:12:05] [Rank 0] step:5461/10000 train_time:374153ms step_avg:68.51ms +[2025-07-07 11:12:06] [Rank 0] step:5481/10000 train_time:375527ms step_avg:68.51ms +[2025-07-07 11:12:06] [Rank 0] step:5481/10000 train_time:375527ms step_avg:68.51ms +[2025-07-07 11:12:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 11:12:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 11:12:09] [Rank 0] PRINT: step:5500/10000 train_loss:1.2807 val_loss:1.2853 train_time:377524ms step_avg:68.64ms +[2025-07-07 11:12:09] [Rank 0] PRINT: step:5500/10000 train_loss:1.2807 val_loss:1.2853 train_time:377524ms step_avg:68.64ms +[2025-07-07 11:12:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 11:12:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 11:12:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 11:12:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 11:12:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 11:12:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 11:17:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 11:17:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 11:17:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 11:17:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 11:17:29] [Rank 0] Total Loss: 5.1275 +[2025-07-07 11:17:29] [Rank 0] Total Loss: 5.1275 +[2025-07-07 11:17:29] [Rank 0] Total FTA: 0.1172 +[2025-07-07 11:17:29] [Rank 0] Total FTA: 0.1172 +[2025-07-07 11:17:29] [Rank 0] Group 0 Loss: 5.3599 +[2025-07-07 11:17:29] [Rank 0] Group 0 Loss: 5.3599 +[2025-07-07 11:17:29] [Rank 0] Group 1 Loss: 5.0888 +[2025-07-07 11:17:29] [Rank 0] Group 1 Loss: 5.0888 +[2025-07-07 11:17:29] [Rank 0] Group 2 Loss: 4.8627 +[2025-07-07 11:17:29] [Rank 0] Group 2 Loss: 4.8627 +[2025-07-07 11:17:29] [Rank 0] Group 3 Loss: 5.1606 +[2025-07-07 11:17:29] [Rank 0] Group 3 Loss: 5.1606 +[2025-07-07 11:17:29] [Rank 0] Group 4 Loss: 5.1351 +[2025-07-07 11:17:29] [Rank 0] Group 4 Loss: 5.1351 +[2025-07-07 11:17:29] [Rank 0] Group 5 Loss: 5.1112 +[2025-07-07 11:17:29] [Rank 0] Group 5 Loss: 5.1112 +[2025-07-07 11:17:29] [Rank 0] Group 6 Loss: 5.0410 +[2025-07-07 11:17:29] [Rank 0] Group 6 Loss: 5.0410 +[2025-07-07 11:17:29] [Rank 0] Group 7 Loss: 5.1755 +[2025-07-07 11:17:29] [Rank 0] Group 7 Loss: 5.1755 +[2025-07-07 11:17:29] [Rank 0] Group 8 Loss: 5.1072 +[2025-07-07 11:17:29] [Rank 0] Group 8 Loss: 5.1072 +[2025-07-07 11:17:29] [Rank 0] Group 9 Loss: 5.0632 +[2025-07-07 11:17:29] [Rank 0] Group 9 Loss: 5.0632 +[2025-07-07 11:17:29] [Rank 0] Group 10 Loss: 5.0886 +[2025-07-07 11:17:29] [Rank 0] Group 10 Loss: 5.0886 +[2025-07-07 11:17:29] [Rank 0] Group 11 Loss: 5.1154 +[2025-07-07 11:17:29] [Rank 0] Group 11 Loss: 5.1154 +[2025-07-07 11:17:29] [Rank 0] Group 0 FTA: 0.1912 +[2025-07-07 11:17:29] [Rank 0] Group 0 FTA: 0.1912 +[2025-07-07 11:17:29] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-07 11:17:29] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-07 11:17:29] [Rank 0] Group 2 FTA: 0.1068 +[2025-07-07 11:17:29] [Rank 0] Group 2 FTA: 0.1068 +[2025-07-07 11:17:29] [Rank 0] Group 3 FTA: 0.0729 +[2025-07-07 11:17:29] [Rank 0] Group 3 FTA: 0.0729 +[2025-07-07 11:17:29] [Rank 0] Group 4 FTA: 0.0521 +[2025-07-07 11:17:29] [Rank 0] Group 4 FTA: 0.0521 +[2025-07-07 11:17:29] [Rank 0] Group 5 FTA: 0.0573 +[2025-07-07 11:17:29] [Rank 0] Group 5 FTA: 0.0573 +[2025-07-07 11:17:29] [Rank 0] Group 6 FTA: 0.1198 +[2025-07-07 11:17:29] [Rank 0] Group 6 FTA: 0.1198 +[2025-07-07 11:17:29] [Rank 0] Group 7 FTA: 0.1172 +[2025-07-07 11:17:29] [Rank 0] Group 7 FTA: 0.1172 +[2025-07-07 11:17:29] [Rank 0] Group 8 FTA: 0.1198 +[2025-07-07 11:17:29] [Rank 0] Group 8 FTA: 0.1198 +[2025-07-07 11:17:29] [Rank 0] Group 9 FTA: 0.1289 +[2025-07-07 11:17:29] [Rank 0] Group 9 FTA: 0.1289 +[2025-07-07 11:17:29] [Rank 0] Group 10 FTA: 0.1074 +[2025-07-07 11:17:29] [Rank 0] Group 10 FTA: 0.1074 +[2025-07-07 11:17:29] [Rank 0] Group 11 FTA: 0.1084 +[2025-07-07 11:17:29] [Rank 0] Group 11 FTA: 0.1084 +[2025-07-07 11:17:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 11:17:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 11:17:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 11:17:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 11:17:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 11:17:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 11:17:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 11:17:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 11:17:31] [Rank 0] step:5501/10000 train_time:377534ms step_avg:68.63ms +[2025-07-07 11:17:31] [Rank 0] step:5501/10000 train_time:377534ms step_avg:68.63ms +[2025-07-07 11:17:32] [Rank 0] step:5521/10000 train_time:378309ms step_avg:68.52ms +[2025-07-07 11:17:32] [Rank 0] step:5521/10000 train_time:378309ms step_avg:68.52ms +[2025-07-07 11:17:34] [Rank 0] step:5541/10000 train_time:379674ms step_avg:68.52ms +[2025-07-07 11:17:34] [Rank 0] step:5541/10000 train_time:379674ms step_avg:68.52ms +[2025-07-07 11:17:35] [Rank 0] step:5561/10000 train_time:381040ms step_avg:68.52ms +[2025-07-07 11:17:35] [Rank 0] step:5561/10000 train_time:381040ms step_avg:68.52ms +[2025-07-07 11:17:36] [Rank 0] step:5581/10000 train_time:383077ms step_avg:68.64ms +[2025-07-07 11:17:36] [Rank 0] step:5581/10000 train_time:383077ms step_avg:68.64ms +[2025-07-07 11:17:38] [Rank 0] step:5601/10000 train_time:383814ms step_avg:68.53ms +[2025-07-07 11:17:38] [Rank 0] step:5601/10000 train_time:383814ms step_avg:68.53ms +[2025-07-07 11:17:39] [Rank 0] step:5621/10000 train_time:385181ms step_avg:68.53ms +[2025-07-07 11:17:39] [Rank 0] step:5621/10000 train_time:385181ms step_avg:68.53ms +[2025-07-07 11:17:40] [Rank 0] step:5641/10000 train_time:386551ms step_avg:68.53ms +[2025-07-07 11:17:40] [Rank 0] step:5641/10000 train_time:386551ms step_avg:68.53ms +[2025-07-07 11:17:42] [Rank 0] step:5661/10000 train_time:387922ms step_avg:68.53ms +[2025-07-07 11:17:42] [Rank 0] step:5661/10000 train_time:387922ms step_avg:68.53ms +[2025-07-07 11:17:43] [Rank 0] step:5681/10000 train_time:389293ms step_avg:68.53ms +[2025-07-07 11:17:43] [Rank 0] step:5681/10000 train_time:389293ms step_avg:68.53ms +[2025-07-07 11:17:45] [Rank 0] step:5701/10000 train_time:390665ms step_avg:68.53ms +[2025-07-07 11:17:45] [Rank 0] step:5701/10000 train_time:390665ms step_avg:68.53ms +[2025-07-07 11:17:46] [Rank 0] step:5721/10000 train_time:392036ms step_avg:68.53ms +[2025-07-07 11:17:46] [Rank 0] step:5721/10000 train_time:392036ms step_avg:68.53ms +[2025-07-07 11:17:47] [Rank 0] step:5741/10000 train_time:393408ms step_avg:68.53ms +[2025-07-07 11:17:47] [Rank 0] step:5741/10000 train_time:393408ms step_avg:68.53ms +[2025-07-07 11:17:49] [Rank 0] step:5761/10000 train_time:394781ms step_avg:68.53ms +[2025-07-07 11:17:49] [Rank 0] step:5761/10000 train_time:394781ms step_avg:68.53ms +[2025-07-07 11:17:50] [Rank 0] step:5781/10000 train_time:396200ms step_avg:68.53ms +[2025-07-07 11:17:50] [Rank 0] step:5781/10000 train_time:396200ms step_avg:68.53ms +[2025-07-07 11:17:52] [Rank 0] step:5801/10000 train_time:397572ms step_avg:68.54ms +[2025-07-07 11:17:52] [Rank 0] step:5801/10000 train_time:397572ms step_avg:68.54ms +[2025-07-07 11:17:53] [Rank 0] step:5821/10000 train_time:398944ms step_avg:68.54ms +[2025-07-07 11:17:53] [Rank 0] step:5821/10000 train_time:398944ms step_avg:68.54ms +[2025-07-07 11:17:54] [Rank 0] step:5841/10000 train_time:400316ms step_avg:68.54ms +[2025-07-07 11:17:54] [Rank 0] step:5841/10000 train_time:400316ms step_avg:68.54ms +[2025-07-07 11:17:56] [Rank 0] step:5861/10000 train_time:401691ms step_avg:68.54ms +[2025-07-07 11:17:56] [Rank 0] step:5861/10000 train_time:401691ms step_avg:68.54ms +[2025-07-07 11:17:57] [Rank 0] step:5881/10000 train_time:403064ms step_avg:68.54ms +[2025-07-07 11:17:57] [Rank 0] step:5881/10000 train_time:403064ms step_avg:68.54ms +[2025-07-07 11:17:58] [Rank 0] step:5901/10000 train_time:404438ms step_avg:68.54ms +[2025-07-07 11:17:58] [Rank 0] step:5901/10000 train_time:404438ms step_avg:68.54ms +[2025-07-07 11:18:00] [Rank 0] step:5921/10000 train_time:405814ms step_avg:68.54ms +[2025-07-07 11:18:00] [Rank 0] step:5921/10000 train_time:405814ms step_avg:68.54ms +[2025-07-07 11:18:01] [Rank 0] step:5941/10000 train_time:407188ms step_avg:68.54ms +[2025-07-07 11:18:01] [Rank 0] step:5941/10000 train_time:407188ms step_avg:68.54ms +[2025-07-07 11:18:03] [Rank 0] step:5961/10000 train_time:408587ms step_avg:68.54ms +[2025-07-07 11:18:03] [Rank 0] step:5961/10000 train_time:408587ms step_avg:68.54ms +[2025-07-07 11:18:04] [Rank 0] step:5981/10000 train_time:409962ms step_avg:68.54ms +[2025-07-07 11:18:04] [Rank 0] step:5981/10000 train_time:409962ms step_avg:68.54ms +[2025-07-07 11:18:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 11:18:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 11:18:06] [Rank 0] PRINT: step:6000/10000 train_loss:1.2669 val_loss:1.2735 train_time:411961ms step_avg:68.66ms +[2025-07-07 11:18:06] [Rank 0] PRINT: step:6000/10000 train_loss:1.2669 val_loss:1.2735 train_time:411961ms step_avg:68.66ms +[2025-07-07 11:18:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 11:18:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 11:18:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 11:18:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 11:18:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 11:18:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 11:23:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 11:23:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 11:23:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 11:23:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 11:23:28] [Rank 0] Total Loss: 5.2278 +[2025-07-07 11:23:28] [Rank 0] Total Loss: 5.2278 +[2025-07-07 11:23:28] [Rank 0] Total FTA: 0.1298 +[2025-07-07 11:23:28] [Rank 0] Total FTA: 0.1298 +[2025-07-07 11:23:28] [Rank 0] Group 0 Loss: 5.4612 +[2025-07-07 11:23:28] [Rank 0] Group 0 Loss: 5.4612 +[2025-07-07 11:23:28] [Rank 0] Group 1 Loss: 5.1569 +[2025-07-07 11:23:28] [Rank 0] Group 1 Loss: 5.1569 +[2025-07-07 11:23:28] [Rank 0] Group 2 Loss: 5.0367 +[2025-07-07 11:23:28] [Rank 0] Group 2 Loss: 5.0367 +[2025-07-07 11:23:28] [Rank 0] Group 3 Loss: 5.3900 +[2025-07-07 11:23:28] [Rank 0] Group 3 Loss: 5.3900 +[2025-07-07 11:23:28] [Rank 0] Group 4 Loss: 5.1651 +[2025-07-07 11:23:28] [Rank 0] Group 4 Loss: 5.1651 +[2025-07-07 11:23:28] [Rank 0] Group 5 Loss: 5.1512 +[2025-07-07 11:23:28] [Rank 0] Group 5 Loss: 5.1512 +[2025-07-07 11:23:28] [Rank 0] Group 6 Loss: 5.1481 +[2025-07-07 11:23:28] [Rank 0] Group 6 Loss: 5.1481 +[2025-07-07 11:23:28] [Rank 0] Group 7 Loss: 5.2231 +[2025-07-07 11:23:28] [Rank 0] Group 7 Loss: 5.2231 +[2025-07-07 11:23:28] [Rank 0] Group 8 Loss: 5.2698 +[2025-07-07 11:23:28] [Rank 0] Group 8 Loss: 5.2698 +[2025-07-07 11:23:28] [Rank 0] Group 9 Loss: 5.1282 +[2025-07-07 11:23:28] [Rank 0] Group 9 Loss: 5.1282 +[2025-07-07 11:23:28] [Rank 0] Group 10 Loss: 5.1727 +[2025-07-07 11:23:28] [Rank 0] Group 10 Loss: 5.1727 +[2025-07-07 11:23:28] [Rank 0] Group 11 Loss: 5.2107 +[2025-07-07 11:23:28] [Rank 0] Group 11 Loss: 5.2107 +[2025-07-07 11:23:28] [Rank 0] Group 0 FTA: 0.1808 +[2025-07-07 11:23:28] [Rank 0] Group 0 FTA: 0.1808 +[2025-07-07 11:23:28] [Rank 0] Group 1 FTA: 0.1797 +[2025-07-07 11:23:28] [Rank 0] Group 1 FTA: 0.1797 +[2025-07-07 11:23:28] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 11:23:28] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 11:23:28] [Rank 0] Group 3 FTA: 0.0755 +[2025-07-07 11:23:28] [Rank 0] Group 3 FTA: 0.0755 +[2025-07-07 11:23:28] [Rank 0] Group 4 FTA: 0.0417 +[2025-07-07 11:23:28] [Rank 0] Group 4 FTA: 0.0417 +[2025-07-07 11:23:28] [Rank 0] Group 5 FTA: 0.1562 +[2025-07-07 11:23:28] [Rank 0] Group 5 FTA: 0.1562 +[2025-07-07 11:23:28] [Rank 0] Group 6 FTA: 0.1328 +[2025-07-07 11:23:28] [Rank 0] Group 6 FTA: 0.1328 +[2025-07-07 11:23:28] [Rank 0] Group 7 FTA: 0.1302 +[2025-07-07 11:23:28] [Rank 0] Group 7 FTA: 0.1302 +[2025-07-07 11:23:28] [Rank 0] Group 8 FTA: 0.1250 +[2025-07-07 11:23:28] [Rank 0] Group 8 FTA: 0.1250 +[2025-07-07 11:23:28] [Rank 0] Group 9 FTA: 0.1172 +[2025-07-07 11:23:28] [Rank 0] Group 9 FTA: 0.1172 +[2025-07-07 11:23:28] [Rank 0] Group 10 FTA: 0.1582 +[2025-07-07 11:23:28] [Rank 0] Group 10 FTA: 0.1582 +[2025-07-07 11:23:28] [Rank 0] Group 11 FTA: 0.1221 +[2025-07-07 11:23:28] [Rank 0] Group 11 FTA: 0.1221 +[2025-07-07 11:23:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 11:23:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 11:23:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 11:23:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 11:23:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 11:23:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 11:23:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 11:23:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 11:23:29] [Rank 0] step:6001/10000 train_time:411970ms step_avg:68.65ms +[2025-07-07 11:23:29] [Rank 0] step:6001/10000 train_time:411970ms step_avg:68.65ms +[2025-07-07 11:23:31] [Rank 0] step:6021/10000 train_time:412726ms step_avg:68.55ms +[2025-07-07 11:23:31] [Rank 0] step:6021/10000 train_time:412726ms step_avg:68.55ms +[2025-07-07 11:23:32] [Rank 0] step:6041/10000 train_time:414092ms step_avg:68.55ms +[2025-07-07 11:23:32] [Rank 0] step:6041/10000 train_time:414092ms step_avg:68.55ms +[2025-07-07 11:23:33] [Rank 0] step:6061/10000 train_time:415460ms step_avg:68.55ms +[2025-07-07 11:23:33] [Rank 0] step:6061/10000 train_time:415460ms step_avg:68.55ms +[2025-07-07 11:23:35] [Rank 0] step:6081/10000 train_time:416827ms step_avg:68.55ms +[2025-07-07 11:23:35] [Rank 0] step:6081/10000 train_time:416827ms step_avg:68.55ms +[2025-07-07 11:23:36] [Rank 0] step:6101/10000 train_time:418194ms step_avg:68.55ms +[2025-07-07 11:23:36] [Rank 0] step:6101/10000 train_time:418194ms step_avg:68.55ms +[2025-07-07 11:23:38] [Rank 0] step:6121/10000 train_time:419565ms step_avg:68.55ms +[2025-07-07 11:23:38] [Rank 0] step:6121/10000 train_time:419565ms step_avg:68.55ms +[2025-07-07 11:23:39] [Rank 0] step:6141/10000 train_time:420984ms step_avg:68.55ms +[2025-07-07 11:23:39] [Rank 0] step:6141/10000 train_time:420984ms step_avg:68.55ms +[2025-07-07 11:23:40] [Rank 0] step:6161/10000 train_time:422356ms step_avg:68.55ms +[2025-07-07 11:23:40] [Rank 0] step:6161/10000 train_time:422356ms step_avg:68.55ms +[2025-07-07 11:23:42] [Rank 0] step:6181/10000 train_time:423727ms step_avg:68.55ms +[2025-07-07 11:23:42] [Rank 0] step:6181/10000 train_time:423727ms step_avg:68.55ms +[2025-07-07 11:23:43] [Rank 0] step:6201/10000 train_time:425099ms step_avg:68.55ms +[2025-07-07 11:23:43] [Rank 0] step:6201/10000 train_time:425099ms step_avg:68.55ms +[2025-07-07 11:23:44] [Rank 0] step:6221/10000 train_time:426471ms step_avg:68.55ms +[2025-07-07 11:23:44] [Rank 0] step:6221/10000 train_time:426471ms step_avg:68.55ms +[2025-07-07 11:23:46] [Rank 0] step:6241/10000 train_time:427843ms step_avg:68.55ms +[2025-07-07 11:23:46] [Rank 0] step:6241/10000 train_time:427843ms step_avg:68.55ms +[2025-07-07 11:23:47] [Rank 0] step:6261/10000 train_time:429214ms step_avg:68.55ms +[2025-07-07 11:23:47] [Rank 0] step:6261/10000 train_time:429214ms step_avg:68.55ms +[2025-07-07 11:23:49] [Rank 0] step:6281/10000 train_time:430585ms step_avg:68.55ms +[2025-07-07 11:23:49] [Rank 0] step:6281/10000 train_time:430585ms step_avg:68.55ms +[2025-07-07 11:23:50] [Rank 0] step:6301/10000 train_time:432209ms step_avg:68.59ms +[2025-07-07 11:23:50] [Rank 0] step:6301/10000 train_time:432209ms step_avg:68.59ms +[2025-07-07 11:23:51] [Rank 0] step:6321/10000 train_time:433371ms step_avg:68.56ms +[2025-07-07 11:23:51] [Rank 0] step:6321/10000 train_time:433371ms step_avg:68.56ms +[2025-07-07 11:23:53] [Rank 0] step:6341/10000 train_time:434744ms step_avg:68.56ms +[2025-07-07 11:23:53] [Rank 0] step:6341/10000 train_time:434744ms step_avg:68.56ms +[2025-07-07 11:23:54] [Rank 0] step:6361/10000 train_time:436116ms step_avg:68.56ms +[2025-07-07 11:23:54] [Rank 0] step:6361/10000 train_time:436116ms step_avg:68.56ms +[2025-07-07 11:23:55] [Rank 0] step:6381/10000 train_time:437489ms step_avg:68.56ms +[2025-07-07 11:23:55] [Rank 0] step:6381/10000 train_time:437489ms step_avg:68.56ms +[2025-07-07 11:23:57] [Rank 0] step:6401/10000 train_time:438862ms step_avg:68.56ms +[2025-07-07 11:23:57] [Rank 0] step:6401/10000 train_time:438862ms step_avg:68.56ms +[2025-07-07 11:23:58] [Rank 0] step:6421/10000 train_time:440235ms step_avg:68.56ms +[2025-07-07 11:23:58] [Rank 0] step:6421/10000 train_time:440235ms step_avg:68.56ms +[2025-07-07 11:24:00] [Rank 0] step:6441/10000 train_time:441608ms step_avg:68.56ms +[2025-07-07 11:24:00] [Rank 0] step:6441/10000 train_time:441608ms step_avg:68.56ms +[2025-07-07 11:24:01] [Rank 0] step:6461/10000 train_time:442982ms step_avg:68.56ms +[2025-07-07 11:24:01] [Rank 0] step:6461/10000 train_time:442982ms step_avg:68.56ms +[2025-07-07 11:24:02] [Rank 0] step:6481/10000 train_time:444356ms step_avg:68.56ms +[2025-07-07 11:24:02] [Rank 0] step:6481/10000 train_time:444356ms step_avg:68.56ms +[2025-07-07 11:24:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 11:24:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 11:24:05] [Rank 0] PRINT: step:6500/10000 train_loss:1.2549 val_loss:1.2716 train_time:446379ms step_avg:68.67ms +[2025-07-07 11:24:05] [Rank 0] PRINT: step:6500/10000 train_loss:1.2549 val_loss:1.2716 train_time:446379ms step_avg:68.67ms +[2025-07-07 11:24:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 11:24:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 11:24:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 11:24:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 11:24:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 11:24:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 11:29:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 11:29:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 11:29:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 11:29:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 11:29:25] [Rank 0] Total Loss: 5.2224 +[2025-07-07 11:29:25] [Rank 0] Total Loss: 5.2224 +[2025-07-07 11:29:25] [Rank 0] Total FTA: 0.1479 +[2025-07-07 11:29:25] [Rank 0] Total FTA: 0.1479 +[2025-07-07 11:29:25] [Rank 0] Group 0 Loss: 5.4668 +[2025-07-07 11:29:25] [Rank 0] Group 0 Loss: 5.4668 +[2025-07-07 11:29:25] [Rank 0] Group 1 Loss: 5.1628 +[2025-07-07 11:29:25] [Rank 0] Group 1 Loss: 5.1628 +[2025-07-07 11:29:25] [Rank 0] Group 2 Loss: 5.0125 +[2025-07-07 11:29:25] [Rank 0] Group 2 Loss: 5.0125 +[2025-07-07 11:29:25] [Rank 0] Group 3 Loss: 5.3308 +[2025-07-07 11:29:25] [Rank 0] Group 3 Loss: 5.3308 +[2025-07-07 11:29:25] [Rank 0] Group 4 Loss: 5.2155 +[2025-07-07 11:29:25] [Rank 0] Group 4 Loss: 5.2155 +[2025-07-07 11:29:26] [Rank 0] Group 5 Loss: 5.1182 +[2025-07-07 11:29:26] [Rank 0] Group 5 Loss: 5.1182 +[2025-07-07 11:29:26] [Rank 0] Group 6 Loss: 5.1637 +[2025-07-07 11:29:26] [Rank 0] Group 6 Loss: 5.1637 +[2025-07-07 11:29:26] [Rank 0] Group 7 Loss: 5.2078 +[2025-07-07 11:29:26] [Rank 0] Group 7 Loss: 5.2078 +[2025-07-07 11:29:26] [Rank 0] Group 8 Loss: 5.1689 +[2025-07-07 11:29:26] [Rank 0] Group 8 Loss: 5.1689 +[2025-07-07 11:29:26] [Rank 0] Group 9 Loss: 5.1722 +[2025-07-07 11:29:26] [Rank 0] Group 9 Loss: 5.1722 +[2025-07-07 11:29:26] [Rank 0] Group 10 Loss: 5.2168 +[2025-07-07 11:29:26] [Rank 0] Group 10 Loss: 5.2168 +[2025-07-07 11:29:26] [Rank 0] Group 11 Loss: 5.2039 +[2025-07-07 11:29:26] [Rank 0] Group 11 Loss: 5.2039 +[2025-07-07 11:29:26] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 11:29:26] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 11:29:26] [Rank 0] Group 1 FTA: 0.1641 +[2025-07-07 11:29:26] [Rank 0] Group 1 FTA: 0.1641 +[2025-07-07 11:29:26] [Rank 0] Group 2 FTA: 0.1536 +[2025-07-07 11:29:26] [Rank 0] Group 2 FTA: 0.1536 +[2025-07-07 11:29:26] [Rank 0] Group 3 FTA: 0.1745 +[2025-07-07 11:29:26] [Rank 0] Group 3 FTA: 0.1745 +[2025-07-07 11:29:26] [Rank 0] Group 4 FTA: 0.1120 +[2025-07-07 11:29:26] [Rank 0] Group 4 FTA: 0.1120 +[2025-07-07 11:29:26] [Rank 0] Group 5 FTA: 0.1562 +[2025-07-07 11:29:26] [Rank 0] Group 5 FTA: 0.1562 +[2025-07-07 11:29:26] [Rank 0] Group 6 FTA: 0.1484 +[2025-07-07 11:29:26] [Rank 0] Group 6 FTA: 0.1484 +[2025-07-07 11:29:26] [Rank 0] Group 7 FTA: 0.1302 +[2025-07-07 11:29:26] [Rank 0] Group 7 FTA: 0.1302 +[2025-07-07 11:29:26] [Rank 0] Group 8 FTA: 0.1406 +[2025-07-07 11:29:26] [Rank 0] Group 8 FTA: 0.1406 +[2025-07-07 11:29:26] [Rank 0] Group 9 FTA: 0.1758 +[2025-07-07 11:29:26] [Rank 0] Group 9 FTA: 0.1758 +[2025-07-07 11:29:26] [Rank 0] Group 10 FTA: 0.1230 +[2025-07-07 11:29:26] [Rank 0] Group 10 FTA: 0.1230 +[2025-07-07 11:29:26] [Rank 0] Group 11 FTA: 0.1553 +[2025-07-07 11:29:26] [Rank 0] Group 11 FTA: 0.1553 +[2025-07-07 11:29:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 11:29:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 11:29:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 11:29:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 11:29:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 11:29:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 11:29:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 11:29:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 11:29:28] [Rank 0] step:6501/10000 train_time:446388ms step_avg:68.66ms +[2025-07-07 11:29:28] [Rank 0] step:6501/10000 train_time:446388ms step_avg:68.66ms +[2025-07-07 11:29:30] [Rank 0] step:6521/10000 train_time:447145ms step_avg:68.57ms +[2025-07-07 11:29:30] [Rank 0] step:6521/10000 train_time:447145ms step_avg:68.57ms +[2025-07-07 11:29:31] [Rank 0] step:6541/10000 train_time:448513ms step_avg:68.57ms +[2025-07-07 11:29:31] [Rank 0] step:6541/10000 train_time:448513ms step_avg:68.57ms +[2025-07-07 11:29:32] [Rank 0] step:6561/10000 train_time:449879ms step_avg:68.57ms +[2025-07-07 11:29:32] [Rank 0] step:6561/10000 train_time:449879ms step_avg:68.57ms +[2025-07-07 11:29:34] [Rank 0] step:6581/10000 train_time:451247ms step_avg:68.57ms +[2025-07-07 11:29:34] [Rank 0] step:6581/10000 train_time:451247ms step_avg:68.57ms +[2025-07-07 11:29:35] [Rank 0] step:6601/10000 train_time:452616ms step_avg:68.57ms +[2025-07-07 11:29:35] [Rank 0] step:6601/10000 train_time:452616ms step_avg:68.57ms +[2025-07-07 11:29:36] [Rank 0] step:6621/10000 train_time:453985ms step_avg:68.57ms +[2025-07-07 11:29:36] [Rank 0] step:6621/10000 train_time:453985ms step_avg:68.57ms +[2025-07-07 11:29:38] [Rank 0] step:6641/10000 train_time:455355ms step_avg:68.57ms +[2025-07-07 11:29:38] [Rank 0] step:6641/10000 train_time:455355ms step_avg:68.57ms +[2025-07-07 11:29:39] [Rank 0] step:6661/10000 train_time:456725ms step_avg:68.57ms +[2025-07-07 11:29:39] [Rank 0] step:6661/10000 train_time:456725ms step_avg:68.57ms +[2025-07-07 11:29:41] [Rank 0] step:6681/10000 train_time:458124ms step_avg:68.57ms +[2025-07-07 11:29:41] [Rank 0] step:6681/10000 train_time:458124ms step_avg:68.57ms +[2025-07-07 11:29:42] [Rank 0] step:6701/10000 train_time:459495ms step_avg:68.57ms +[2025-07-07 11:29:42] [Rank 0] step:6701/10000 train_time:459495ms step_avg:68.57ms +[2025-07-07 11:29:43] [Rank 0] step:6721/10000 train_time:460865ms step_avg:68.57ms +[2025-07-07 11:29:43] [Rank 0] step:6721/10000 train_time:460865ms step_avg:68.57ms +[2025-07-07 11:29:45] [Rank 0] step:6741/10000 train_time:462238ms step_avg:68.57ms +[2025-07-07 11:29:45] [Rank 0] step:6741/10000 train_time:462238ms step_avg:68.57ms +[2025-07-07 11:29:46] [Rank 0] step:6761/10000 train_time:463612ms step_avg:68.57ms +[2025-07-07 11:29:46] [Rank 0] step:6761/10000 train_time:463612ms step_avg:68.57ms +[2025-07-07 11:29:47] [Rank 0] step:6781/10000 train_time:464985ms step_avg:68.57ms +[2025-07-07 11:29:47] [Rank 0] step:6781/10000 train_time:464985ms step_avg:68.57ms +[2025-07-07 11:29:49] [Rank 0] step:6801/10000 train_time:466358ms step_avg:68.57ms +[2025-07-07 11:29:49] [Rank 0] step:6801/10000 train_time:466358ms step_avg:68.57ms +[2025-07-07 11:29:50] [Rank 0] step:6821/10000 train_time:467731ms step_avg:68.57ms +[2025-07-07 11:29:50] [Rank 0] step:6821/10000 train_time:467731ms step_avg:68.57ms +[2025-07-07 11:29:52] [Rank 0] step:6841/10000 train_time:469105ms step_avg:68.57ms +[2025-07-07 11:29:52] [Rank 0] step:6841/10000 train_time:469105ms step_avg:68.57ms +[2025-07-07 11:29:53] [Rank 0] step:6861/10000 train_time:470510ms step_avg:68.58ms +[2025-07-07 11:29:53] [Rank 0] step:6861/10000 train_time:470510ms step_avg:68.58ms +[2025-07-07 11:29:54] [Rank 0] step:6881/10000 train_time:471883ms step_avg:68.58ms +[2025-07-07 11:29:54] [Rank 0] step:6881/10000 train_time:471883ms step_avg:68.58ms +[2025-07-07 11:29:56] [Rank 0] step:6901/10000 train_time:473258ms step_avg:68.58ms +[2025-07-07 11:29:56] [Rank 0] step:6901/10000 train_time:473258ms step_avg:68.58ms +[2025-07-07 11:29:57] [Rank 0] step:6921/10000 train_time:474631ms step_avg:68.58ms +[2025-07-07 11:29:57] [Rank 0] step:6921/10000 train_time:474631ms step_avg:68.58ms +[2025-07-07 11:29:59] [Rank 0] step:6941/10000 train_time:476006ms step_avg:68.58ms +[2025-07-07 11:29:59] [Rank 0] step:6941/10000 train_time:476006ms step_avg:68.58ms +[2025-07-07 11:30:00] [Rank 0] step:6961/10000 train_time:477380ms step_avg:68.58ms +[2025-07-07 11:30:00] [Rank 0] step:6961/10000 train_time:477380ms step_avg:68.58ms +[2025-07-07 11:30:01] [Rank 0] step:6981/10000 train_time:478754ms step_avg:68.58ms +[2025-07-07 11:30:01] [Rank 0] step:6981/10000 train_time:478754ms step_avg:68.58ms +[2025-07-07 11:30:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 11:30:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 11:30:04] [Rank 0] PRINT: step:7000/10000 train_loss:1.2412 val_loss:1.2615 train_time:480753ms step_avg:68.68ms +[2025-07-07 11:30:04] [Rank 0] PRINT: step:7000/10000 train_loss:1.2412 val_loss:1.2615 train_time:480753ms step_avg:68.68ms +[2025-07-07 11:30:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 11:30:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 11:30:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 11:30:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 11:30:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 11:30:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 11:35:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 11:35:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 11:35:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 11:35:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 11:35:25] [Rank 0] Total Loss: 5.3280 +[2025-07-07 11:35:25] [Rank 0] Total Loss: 5.3280 +[2025-07-07 11:35:25] [Rank 0] Total FTA: 0.1683 +[2025-07-07 11:35:25] [Rank 0] Total FTA: 0.1683 +[2025-07-07 11:35:25] [Rank 0] Group 0 Loss: 5.4889 +[2025-07-07 11:35:25] [Rank 0] Group 0 Loss: 5.4889 +[2025-07-07 11:35:25] [Rank 0] Group 1 Loss: 5.2179 +[2025-07-07 11:35:25] [Rank 0] Group 1 Loss: 5.2179 +[2025-07-07 11:35:25] [Rank 0] Group 2 Loss: 5.0911 +[2025-07-07 11:35:25] [Rank 0] Group 2 Loss: 5.0911 +[2025-07-07 11:35:25] [Rank 0] Group 3 Loss: 5.4694 +[2025-07-07 11:35:25] [Rank 0] Group 3 Loss: 5.4694 +[2025-07-07 11:35:25] [Rank 0] Group 4 Loss: 5.4042 +[2025-07-07 11:35:25] [Rank 0] Group 4 Loss: 5.4042 +[2025-07-07 11:35:25] [Rank 0] Group 5 Loss: 5.2121 +[2025-07-07 11:35:25] [Rank 0] Group 5 Loss: 5.2121 +[2025-07-07 11:35:25] [Rank 0] Group 6 Loss: 5.2475 +[2025-07-07 11:35:25] [Rank 0] Group 6 Loss: 5.2475 +[2025-07-07 11:35:25] [Rank 0] Group 7 Loss: 5.3910 +[2025-07-07 11:35:25] [Rank 0] Group 7 Loss: 5.3910 +[2025-07-07 11:35:25] [Rank 0] Group 8 Loss: 5.2783 +[2025-07-07 11:35:25] [Rank 0] Group 8 Loss: 5.2783 +[2025-07-07 11:35:25] [Rank 0] Group 9 Loss: 5.3526 +[2025-07-07 11:35:25] [Rank 0] Group 9 Loss: 5.3526 +[2025-07-07 11:35:25] [Rank 0] Group 10 Loss: 5.3225 +[2025-07-07 11:35:25] [Rank 0] Group 10 Loss: 5.3225 +[2025-07-07 11:35:25] [Rank 0] Group 11 Loss: 5.3212 +[2025-07-07 11:35:25] [Rank 0] Group 11 Loss: 5.3212 +[2025-07-07 11:35:25] [Rank 0] Group 0 FTA: 0.1808 +[2025-07-07 11:35:25] [Rank 0] Group 0 FTA: 0.1808 +[2025-07-07 11:35:25] [Rank 0] Group 1 FTA: 0.3385 +[2025-07-07 11:35:25] [Rank 0] Group 1 FTA: 0.3385 +[2025-07-07 11:35:25] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 11:35:25] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 11:35:25] [Rank 0] Group 3 FTA: 0.2943 +[2025-07-07 11:35:25] [Rank 0] Group 3 FTA: 0.2943 +[2025-07-07 11:35:25] [Rank 0] Group 4 FTA: 0.0833 +[2025-07-07 11:35:25] [Rank 0] Group 4 FTA: 0.0833 +[2025-07-07 11:35:25] [Rank 0] Group 5 FTA: 0.1302 +[2025-07-07 11:35:25] [Rank 0] Group 5 FTA: 0.1302 +[2025-07-07 11:35:25] [Rank 0] Group 6 FTA: 0.1484 +[2025-07-07 11:35:25] [Rank 0] Group 6 FTA: 0.1484 +[2025-07-07 11:35:25] [Rank 0] Group 7 FTA: 0.1432 +[2025-07-07 11:35:25] [Rank 0] Group 7 FTA: 0.1432 +[2025-07-07 11:35:25] [Rank 0] Group 8 FTA: 0.1406 +[2025-07-07 11:35:25] [Rank 0] Group 8 FTA: 0.1406 +[2025-07-07 11:35:25] [Rank 0] Group 9 FTA: 0.1641 +[2025-07-07 11:35:25] [Rank 0] Group 9 FTA: 0.1641 +[2025-07-07 11:35:25] [Rank 0] Group 10 FTA: 0.1602 +[2025-07-07 11:35:25] [Rank 0] Group 10 FTA: 0.1602 +[2025-07-07 11:35:25] [Rank 0] Group 11 FTA: 0.1592 +[2025-07-07 11:35:25] [Rank 0] Group 11 FTA: 0.1592 +[2025-07-07 11:35:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 11:35:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 11:35:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 11:35:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 11:35:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 11:35:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 11:35:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 11:35:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 11:35:27] [Rank 0] step:7001/10000 train_time:480763ms step_avg:68.67ms +[2025-07-07 11:35:27] [Rank 0] step:7001/10000 train_time:480763ms step_avg:68.67ms +[2025-07-07 11:35:28] [Rank 0] step:7021/10000 train_time:481514ms step_avg:68.58ms +[2025-07-07 11:35:28] [Rank 0] step:7021/10000 train_time:481514ms step_avg:68.58ms +[2025-07-07 11:35:29] [Rank 0] step:7041/10000 train_time:482916ms step_avg:68.59ms +[2025-07-07 11:35:29] [Rank 0] step:7041/10000 train_time:482916ms step_avg:68.59ms +[2025-07-07 11:35:31] [Rank 0] step:7061/10000 train_time:484283ms step_avg:68.59ms +[2025-07-07 11:35:31] [Rank 0] step:7061/10000 train_time:484283ms step_avg:68.59ms +[2025-07-07 11:35:32] [Rank 0] step:7081/10000 train_time:485649ms step_avg:68.58ms +[2025-07-07 11:35:32] [Rank 0] step:7081/10000 train_time:485649ms step_avg:68.58ms +[2025-07-07 11:35:34] [Rank 0] step:7101/10000 train_time:487017ms step_avg:68.58ms +[2025-07-07 11:35:34] [Rank 0] step:7101/10000 train_time:487017ms step_avg:68.58ms +[2025-07-07 11:35:35] [Rank 0] step:7121/10000 train_time:488386ms step_avg:68.58ms +[2025-07-07 11:35:35] [Rank 0] step:7121/10000 train_time:488386ms step_avg:68.58ms +[2025-07-07 11:35:36] [Rank 0] step:7141/10000 train_time:489755ms step_avg:68.58ms +[2025-07-07 11:35:36] [Rank 0] step:7141/10000 train_time:489755ms step_avg:68.58ms +[2025-07-07 11:35:38] [Rank 0] step:7161/10000 train_time:491124ms step_avg:68.58ms +[2025-07-07 11:35:38] [Rank 0] step:7161/10000 train_time:491124ms step_avg:68.58ms +[2025-07-07 11:35:39] [Rank 0] step:7181/10000 train_time:492495ms step_avg:68.58ms +[2025-07-07 11:35:39] [Rank 0] step:7181/10000 train_time:492495ms step_avg:68.58ms +[2025-07-07 11:35:40] [Rank 0] step:7201/10000 train_time:493865ms step_avg:68.58ms +[2025-07-07 11:35:40] [Rank 0] step:7201/10000 train_time:493865ms step_avg:68.58ms +[2025-07-07 11:35:42] [Rank 0] step:7221/10000 train_time:495271ms step_avg:68.59ms +[2025-07-07 11:35:42] [Rank 0] step:7221/10000 train_time:495271ms step_avg:68.59ms +[2025-07-07 11:35:43] [Rank 0] step:7241/10000 train_time:496643ms step_avg:68.59ms +[2025-07-07 11:35:43] [Rank 0] step:7241/10000 train_time:496643ms step_avg:68.59ms +[2025-07-07 11:35:45] [Rank 0] step:7261/10000 train_time:498014ms step_avg:68.59ms +[2025-07-07 11:35:45] [Rank 0] step:7261/10000 train_time:498014ms step_avg:68.59ms +[2025-07-07 11:35:46] [Rank 0] step:7281/10000 train_time:499386ms step_avg:68.59ms +[2025-07-07 11:35:46] [Rank 0] step:7281/10000 train_time:499386ms step_avg:68.59ms +[2025-07-07 11:35:47] [Rank 0] step:7301/10000 train_time:500758ms step_avg:68.59ms +[2025-07-07 11:35:47] [Rank 0] step:7301/10000 train_time:500758ms step_avg:68.59ms +[2025-07-07 11:35:49] [Rank 0] step:7321/10000 train_time:502130ms step_avg:68.59ms +[2025-07-07 11:35:49] [Rank 0] step:7321/10000 train_time:502130ms step_avg:68.59ms +[2025-07-07 11:35:50] [Rank 0] step:7341/10000 train_time:503502ms step_avg:68.59ms +[2025-07-07 11:35:50] [Rank 0] step:7341/10000 train_time:503502ms step_avg:68.59ms +[2025-07-07 11:35:51] [Rank 0] step:7361/10000 train_time:504875ms step_avg:68.59ms +[2025-07-07 11:35:51] [Rank 0] step:7361/10000 train_time:504875ms step_avg:68.59ms +[2025-07-07 11:35:53] [Rank 0] step:7381/10000 train_time:506247ms step_avg:68.59ms +[2025-07-07 11:35:53] [Rank 0] step:7381/10000 train_time:506247ms step_avg:68.59ms +[2025-07-07 11:35:54] [Rank 0] step:7401/10000 train_time:507655ms step_avg:68.59ms +[2025-07-07 11:35:54] [Rank 0] step:7401/10000 train_time:507655ms step_avg:68.59ms +[2025-07-07 11:35:56] [Rank 0] step:7421/10000 train_time:509028ms step_avg:68.59ms +[2025-07-07 11:35:56] [Rank 0] step:7421/10000 train_time:509028ms step_avg:68.59ms +[2025-07-07 11:35:57] [Rank 0] step:7441/10000 train_time:510399ms step_avg:68.59ms +[2025-07-07 11:35:57] [Rank 0] step:7441/10000 train_time:510399ms step_avg:68.59ms +[2025-07-07 11:35:58] [Rank 0] step:7461/10000 train_time:511771ms step_avg:68.59ms +[2025-07-07 11:35:58] [Rank 0] step:7461/10000 train_time:511771ms step_avg:68.59ms +[2025-07-07 11:36:00] [Rank 0] step:7481/10000 train_time:513144ms step_avg:68.59ms +[2025-07-07 11:36:00] [Rank 0] step:7481/10000 train_time:513144ms step_avg:68.59ms +[2025-07-07 11:36:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 11:36:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 11:36:02] [Rank 0] PRINT: step:7500/10000 train_loss:1.2268 val_loss:1.2516 train_time:515142ms step_avg:68.69ms +[2025-07-07 11:36:02] [Rank 0] PRINT: step:7500/10000 train_loss:1.2268 val_loss:1.2516 train_time:515142ms step_avg:68.69ms +[2025-07-07 11:36:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 11:36:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 11:36:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 11:36:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 11:36:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 11:36:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 11:41:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 11:41:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 11:41:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 11:41:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 11:41:26] [Rank 0] Total Loss: 5.2924 +[2025-07-07 11:41:26] [Rank 0] Total Loss: 5.2924 +[2025-07-07 11:41:26] [Rank 0] Total FTA: 0.1809 +[2025-07-07 11:41:26] [Rank 0] Total FTA: 0.1809 +[2025-07-07 11:41:26] [Rank 0] Group 0 Loss: 5.6117 +[2025-07-07 11:41:26] [Rank 0] Group 0 Loss: 5.6117 +[2025-07-07 11:41:26] [Rank 0] Group 1 Loss: 5.2417 +[2025-07-07 11:41:26] [Rank 0] Group 1 Loss: 5.2417 +[2025-07-07 11:41:26] [Rank 0] Group 2 Loss: 5.0662 +[2025-07-07 11:41:26] [Rank 0] Group 2 Loss: 5.0662 +[2025-07-07 11:41:26] [Rank 0] Group 3 Loss: 5.4494 +[2025-07-07 11:41:26] [Rank 0] Group 3 Loss: 5.4494 +[2025-07-07 11:41:26] [Rank 0] Group 4 Loss: 5.2911 +[2025-07-07 11:41:26] [Rank 0] Group 4 Loss: 5.2911 +[2025-07-07 11:41:26] [Rank 0] Group 5 Loss: 5.1576 +[2025-07-07 11:41:26] [Rank 0] Group 5 Loss: 5.1576 +[2025-07-07 11:41:26] [Rank 0] Group 6 Loss: 5.1863 +[2025-07-07 11:41:26] [Rank 0] Group 6 Loss: 5.1863 +[2025-07-07 11:41:26] [Rank 0] Group 7 Loss: 5.2532 +[2025-07-07 11:41:26] [Rank 0] Group 7 Loss: 5.2532 +[2025-07-07 11:41:26] [Rank 0] Group 8 Loss: 5.2318 +[2025-07-07 11:41:26] [Rank 0] Group 8 Loss: 5.2318 +[2025-07-07 11:41:26] [Rank 0] Group 9 Loss: 5.2712 +[2025-07-07 11:41:26] [Rank 0] Group 9 Loss: 5.2712 +[2025-07-07 11:41:26] [Rank 0] Group 10 Loss: 5.3081 +[2025-07-07 11:41:26] [Rank 0] Group 10 Loss: 5.3081 +[2025-07-07 11:41:26] [Rank 0] Group 11 Loss: 5.2231 +[2025-07-07 11:41:26] [Rank 0] Group 11 Loss: 5.2231 +[2025-07-07 11:41:26] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-07 11:41:26] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-07 11:41:26] [Rank 0] Group 1 FTA: 0.3151 +[2025-07-07 11:41:26] [Rank 0] Group 1 FTA: 0.3151 +[2025-07-07 11:41:26] [Rank 0] Group 2 FTA: 0.1927 +[2025-07-07 11:41:26] [Rank 0] Group 2 FTA: 0.1927 +[2025-07-07 11:41:26] [Rank 0] Group 3 FTA: 0.2083 +[2025-07-07 11:41:26] [Rank 0] Group 3 FTA: 0.2083 +[2025-07-07 11:41:26] [Rank 0] Group 4 FTA: 0.1328 +[2025-07-07 11:41:26] [Rank 0] Group 4 FTA: 0.1328 +[2025-07-07 11:41:26] [Rank 0] Group 5 FTA: 0.1901 +[2025-07-07 11:41:26] [Rank 0] Group 5 FTA: 0.1901 +[2025-07-07 11:41:26] [Rank 0] Group 6 FTA: 0.1536 +[2025-07-07 11:41:26] [Rank 0] Group 6 FTA: 0.1536 +[2025-07-07 11:41:26] [Rank 0] Group 7 FTA: 0.1823 +[2025-07-07 11:41:26] [Rank 0] Group 7 FTA: 0.1823 +[2025-07-07 11:41:26] [Rank 0] Group 8 FTA: 0.1719 +[2025-07-07 11:41:26] [Rank 0] Group 8 FTA: 0.1719 +[2025-07-07 11:41:26] [Rank 0] Group 9 FTA: 0.1250 +[2025-07-07 11:41:26] [Rank 0] Group 9 FTA: 0.1250 +[2025-07-07 11:41:26] [Rank 0] Group 10 FTA: 0.1758 +[2025-07-07 11:41:26] [Rank 0] Group 10 FTA: 0.1758 +[2025-07-07 11:41:26] [Rank 0] Group 11 FTA: 0.1748 +[2025-07-07 11:41:26] [Rank 0] Group 11 FTA: 0.1748 +[2025-07-07 11:41:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 11:41:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 11:41:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 11:41:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 11:41:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 11:41:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 11:41:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 11:41:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 11:41:27] [Rank 0] step:7501/10000 train_time:515151ms step_avg:68.68ms +[2025-07-07 11:41:27] [Rank 0] step:7501/10000 train_time:515151ms step_avg:68.68ms +[2025-07-07 11:41:29] [Rank 0] step:7521/10000 train_time:515927ms step_avg:68.60ms +[2025-07-07 11:41:29] [Rank 0] step:7521/10000 train_time:515927ms step_avg:68.60ms +[2025-07-07 11:41:30] [Rank 0] step:7541/10000 train_time:517294ms step_avg:68.60ms +[2025-07-07 11:41:30] [Rank 0] step:7541/10000 train_time:517294ms step_avg:68.60ms +[2025-07-07 11:41:32] [Rank 0] step:7561/10000 train_time:518661ms step_avg:68.60ms +[2025-07-07 11:41:32] [Rank 0] step:7561/10000 train_time:518661ms step_avg:68.60ms +[2025-07-07 11:41:33] [Rank 0] step:7581/10000 train_time:520087ms step_avg:68.60ms +[2025-07-07 11:41:33] [Rank 0] step:7581/10000 train_time:520087ms step_avg:68.60ms +[2025-07-07 11:41:34] [Rank 0] step:7601/10000 train_time:521454ms step_avg:68.60ms +[2025-07-07 11:41:34] [Rank 0] step:7601/10000 train_time:521454ms step_avg:68.60ms +[2025-07-07 11:41:36] [Rank 0] step:7621/10000 train_time:522823ms step_avg:68.60ms +[2025-07-07 11:41:36] [Rank 0] step:7621/10000 train_time:522823ms step_avg:68.60ms +[2025-07-07 11:41:37] [Rank 0] step:7641/10000 train_time:524192ms step_avg:68.60ms +[2025-07-07 11:41:37] [Rank 0] step:7641/10000 train_time:524192ms step_avg:68.60ms +[2025-07-07 11:41:38] [Rank 0] step:7661/10000 train_time:525562ms step_avg:68.60ms +[2025-07-07 11:41:38] [Rank 0] step:7661/10000 train_time:525562ms step_avg:68.60ms +[2025-07-07 11:41:40] [Rank 0] step:7681/10000 train_time:526932ms step_avg:68.60ms +[2025-07-07 11:41:40] [Rank 0] step:7681/10000 train_time:526932ms step_avg:68.60ms +[2025-07-07 11:41:41] [Rank 0] step:7701/10000 train_time:528304ms step_avg:68.60ms +[2025-07-07 11:41:41] [Rank 0] step:7701/10000 train_time:528304ms step_avg:68.60ms +[2025-07-07 11:41:42] [Rank 0] step:7721/10000 train_time:529675ms step_avg:68.60ms +[2025-07-07 11:41:42] [Rank 0] step:7721/10000 train_time:529675ms step_avg:68.60ms +[2025-07-07 11:41:44] [Rank 0] step:7741/10000 train_time:531046ms step_avg:68.60ms +[2025-07-07 11:41:44] [Rank 0] step:7741/10000 train_time:531046ms step_avg:68.60ms +[2025-07-07 11:41:45] [Rank 0] step:7761/10000 train_time:532439ms step_avg:68.60ms +[2025-07-07 11:41:45] [Rank 0] step:7761/10000 train_time:532439ms step_avg:68.60ms +[2025-07-07 11:41:47] [Rank 0] step:7781/10000 train_time:533811ms step_avg:68.60ms +[2025-07-07 11:41:47] [Rank 0] step:7781/10000 train_time:533811ms step_avg:68.60ms +[2025-07-07 11:41:48] [Rank 0] step:7801/10000 train_time:535184ms step_avg:68.60ms +[2025-07-07 11:41:48] [Rank 0] step:7801/10000 train_time:535184ms step_avg:68.60ms +[2025-07-07 11:41:49] [Rank 0] step:7821/10000 train_time:536558ms step_avg:68.60ms +[2025-07-07 11:41:49] [Rank 0] step:7821/10000 train_time:536558ms step_avg:68.60ms +[2025-07-07 11:41:51] [Rank 0] step:7841/10000 train_time:537931ms step_avg:68.60ms +[2025-07-07 11:41:51] [Rank 0] step:7841/10000 train_time:537931ms step_avg:68.60ms +[2025-07-07 11:41:52] [Rank 0] step:7861/10000 train_time:539303ms step_avg:68.60ms +[2025-07-07 11:41:52] [Rank 0] step:7861/10000 train_time:539303ms step_avg:68.60ms +[2025-07-07 11:41:53] [Rank 0] step:7881/10000 train_time:540676ms step_avg:68.61ms +[2025-07-07 11:41:53] [Rank 0] step:7881/10000 train_time:540676ms step_avg:68.61ms +[2025-07-07 11:41:55] [Rank 0] step:7901/10000 train_time:542049ms step_avg:68.61ms +[2025-07-07 11:41:55] [Rank 0] step:7901/10000 train_time:542049ms step_avg:68.61ms +[2025-07-07 11:41:56] [Rank 0] step:7921/10000 train_time:543423ms step_avg:68.61ms +[2025-07-07 11:41:56] [Rank 0] step:7921/10000 train_time:543423ms step_avg:68.61ms +[2025-07-07 11:41:58] [Rank 0] step:7941/10000 train_time:544825ms step_avg:68.61ms +[2025-07-07 11:41:58] [Rank 0] step:7941/10000 train_time:544825ms step_avg:68.61ms +[2025-07-07 11:41:59] [Rank 0] step:7961/10000 train_time:546199ms step_avg:68.61ms +[2025-07-07 11:41:59] [Rank 0] step:7961/10000 train_time:546199ms step_avg:68.61ms +[2025-07-07 11:42:00] [Rank 0] step:7981/10000 train_time:547575ms step_avg:68.61ms +[2025-07-07 11:42:00] [Rank 0] step:7981/10000 train_time:547575ms step_avg:68.61ms +[2025-07-07 11:42:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 11:42:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 11:42:03] [Rank 0] PRINT: step:8000/10000 train_loss:1.2131 val_loss:1.2374 train_time:549573ms step_avg:68.70ms +[2025-07-07 11:42:03] [Rank 0] PRINT: step:8000/10000 train_loss:1.2131 val_loss:1.2374 train_time:549573ms step_avg:68.70ms +[2025-07-07 11:42:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 11:42:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 11:42:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 11:42:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 11:42:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 11:42:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 11:47:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 11:47:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 11:47:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 11:47:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 11:47:25] [Rank 0] Total Loss: 5.4081 +[2025-07-07 11:47:25] [Rank 0] Total Loss: 5.4081 +[2025-07-07 11:47:25] [Rank 0] Total FTA: 0.1933 +[2025-07-07 11:47:25] [Rank 0] Total FTA: 0.1933 +[2025-07-07 11:47:25] [Rank 0] Group 0 Loss: 5.6008 +[2025-07-07 11:47:25] [Rank 0] Group 0 Loss: 5.6008 +[2025-07-07 11:47:25] [Rank 0] Group 1 Loss: 5.3684 +[2025-07-07 11:47:25] [Rank 0] Group 1 Loss: 5.3684 +[2025-07-07 11:47:25] [Rank 0] Group 2 Loss: 5.1245 +[2025-07-07 11:47:25] [Rank 0] Group 2 Loss: 5.1245 +[2025-07-07 11:47:25] [Rank 0] Group 3 Loss: 5.6896 +[2025-07-07 11:47:25] [Rank 0] Group 3 Loss: 5.6896 +[2025-07-07 11:47:25] [Rank 0] Group 4 Loss: 5.3760 +[2025-07-07 11:47:25] [Rank 0] Group 4 Loss: 5.3760 +[2025-07-07 11:47:25] [Rank 0] Group 5 Loss: 5.3464 +[2025-07-07 11:47:25] [Rank 0] Group 5 Loss: 5.3464 +[2025-07-07 11:47:25] [Rank 0] Group 6 Loss: 5.2984 +[2025-07-07 11:47:25] [Rank 0] Group 6 Loss: 5.2984 +[2025-07-07 11:47:25] [Rank 0] Group 7 Loss: 5.4449 +[2025-07-07 11:47:25] [Rank 0] Group 7 Loss: 5.4449 +[2025-07-07 11:47:25] [Rank 0] Group 8 Loss: 5.3877 +[2025-07-07 11:47:25] [Rank 0] Group 8 Loss: 5.3877 +[2025-07-07 11:47:25] [Rank 0] Group 9 Loss: 5.3297 +[2025-07-07 11:47:25] [Rank 0] Group 9 Loss: 5.3297 +[2025-07-07 11:47:25] [Rank 0] Group 10 Loss: 5.4207 +[2025-07-07 11:47:25] [Rank 0] Group 10 Loss: 5.4207 +[2025-07-07 11:47:25] [Rank 0] Group 11 Loss: 5.3626 +[2025-07-07 11:47:25] [Rank 0] Group 11 Loss: 5.3626 +[2025-07-07 11:47:25] [Rank 0] Group 0 FTA: 0.3212 +[2025-07-07 11:47:25] [Rank 0] Group 0 FTA: 0.3212 +[2025-07-07 11:47:25] [Rank 0] Group 1 FTA: 0.1589 +[2025-07-07 11:47:25] [Rank 0] Group 1 FTA: 0.1589 +[2025-07-07 11:47:25] [Rank 0] Group 2 FTA: 0.1432 +[2025-07-07 11:47:25] [Rank 0] Group 2 FTA: 0.1432 +[2025-07-07 11:47:25] [Rank 0] Group 3 FTA: 0.1693 +[2025-07-07 11:47:25] [Rank 0] Group 3 FTA: 0.1693 +[2025-07-07 11:47:25] [Rank 0] Group 4 FTA: 0.1016 +[2025-07-07 11:47:25] [Rank 0] Group 4 FTA: 0.1016 +[2025-07-07 11:47:25] [Rank 0] Group 5 FTA: 0.1693 +[2025-07-07 11:47:25] [Rank 0] Group 5 FTA: 0.1693 +[2025-07-07 11:47:25] [Rank 0] Group 6 FTA: 0.2161 +[2025-07-07 11:47:25] [Rank 0] Group 6 FTA: 0.2161 +[2025-07-07 11:47:25] [Rank 0] Group 7 FTA: 0.1615 +[2025-07-07 11:47:25] [Rank 0] Group 7 FTA: 0.1615 +[2025-07-07 11:47:25] [Rank 0] Group 8 FTA: 0.2031 +[2025-07-07 11:47:25] [Rank 0] Group 8 FTA: 0.2031 +[2025-07-07 11:47:25] [Rank 0] Group 9 FTA: 0.1797 +[2025-07-07 11:47:25] [Rank 0] Group 9 FTA: 0.1797 +[2025-07-07 11:47:25] [Rank 0] Group 10 FTA: 0.1875 +[2025-07-07 11:47:25] [Rank 0] Group 10 FTA: 0.1875 +[2025-07-07 11:47:25] [Rank 0] Group 11 FTA: 0.1875 +[2025-07-07 11:47:25] [Rank 0] Group 11 FTA: 0.1875 +[2025-07-07 11:47:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 11:47:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 11:47:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 11:47:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 11:47:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 11:47:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 11:47:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 11:47:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 11:47:27] [Rank 0] step:8001/10000 train_time:549583ms step_avg:68.69ms +[2025-07-07 11:47:27] [Rank 0] step:8001/10000 train_time:549583ms step_avg:68.69ms +[2025-07-07 11:47:28] [Rank 0] step:8021/10000 train_time:550362ms step_avg:68.62ms +[2025-07-07 11:47:28] [Rank 0] step:8021/10000 train_time:550362ms step_avg:68.62ms +[2025-07-07 11:47:29] [Rank 0] step:8041/10000 train_time:551729ms step_avg:68.61ms +[2025-07-07 11:47:29] [Rank 0] step:8041/10000 train_time:551729ms step_avg:68.61ms +[2025-07-07 11:47:31] [Rank 0] step:8061/10000 train_time:553096ms step_avg:68.61ms +[2025-07-07 11:47:31] [Rank 0] step:8061/10000 train_time:553096ms step_avg:68.61ms +[2025-07-07 11:47:32] [Rank 0] step:8081/10000 train_time:554464ms step_avg:68.61ms +[2025-07-07 11:47:32] [Rank 0] step:8081/10000 train_time:554464ms step_avg:68.61ms +[2025-07-07 11:47:34] [Rank 0] step:8101/10000 train_time:555832ms step_avg:68.61ms +[2025-07-07 11:47:34] [Rank 0] step:8101/10000 train_time:555832ms step_avg:68.61ms +[2025-07-07 11:47:35] [Rank 0] step:8121/10000 train_time:557252ms step_avg:68.62ms +[2025-07-07 11:47:35] [Rank 0] step:8121/10000 train_time:557252ms step_avg:68.62ms +[2025-07-07 11:47:36] [Rank 0] step:8141/10000 train_time:558621ms step_avg:68.62ms +[2025-07-07 11:47:36] [Rank 0] step:8141/10000 train_time:558621ms step_avg:68.62ms +[2025-07-07 11:47:38] [Rank 0] step:8161/10000 train_time:559991ms step_avg:68.62ms +[2025-07-07 11:47:38] [Rank 0] step:8161/10000 train_time:559991ms step_avg:68.62ms +[2025-07-07 11:47:39] [Rank 0] step:8181/10000 train_time:561361ms step_avg:68.62ms +[2025-07-07 11:47:39] [Rank 0] step:8181/10000 train_time:561361ms step_avg:68.62ms +[2025-07-07 11:47:40] [Rank 0] step:8201/10000 train_time:562732ms step_avg:68.62ms +[2025-07-07 11:47:40] [Rank 0] step:8201/10000 train_time:562732ms step_avg:68.62ms +[2025-07-07 11:47:42] [Rank 0] step:8221/10000 train_time:564103ms step_avg:68.62ms +[2025-07-07 11:47:42] [Rank 0] step:8221/10000 train_time:564103ms step_avg:68.62ms +[2025-07-07 11:47:43] [Rank 0] step:8241/10000 train_time:565475ms step_avg:68.62ms +[2025-07-07 11:47:43] [Rank 0] step:8241/10000 train_time:565475ms step_avg:68.62ms +[2025-07-07 11:47:45] [Rank 0] step:8261/10000 train_time:566846ms step_avg:68.62ms +[2025-07-07 11:47:45] [Rank 0] step:8261/10000 train_time:566846ms step_avg:68.62ms +[2025-07-07 11:47:46] [Rank 0] step:8281/10000 train_time:568467ms step_avg:68.65ms +[2025-07-07 11:47:46] [Rank 0] step:8281/10000 train_time:568467ms step_avg:68.65ms +[2025-07-07 11:47:47] [Rank 0] step:8301/10000 train_time:569636ms step_avg:68.62ms +[2025-07-07 11:47:47] [Rank 0] step:8301/10000 train_time:569636ms step_avg:68.62ms +[2025-07-07 11:47:49] [Rank 0] step:8321/10000 train_time:571009ms step_avg:68.62ms +[2025-07-07 11:47:49] [Rank 0] step:8321/10000 train_time:571009ms step_avg:68.62ms +[2025-07-07 11:47:50] [Rank 0] step:8341/10000 train_time:572381ms step_avg:68.62ms +[2025-07-07 11:47:50] [Rank 0] step:8341/10000 train_time:572381ms step_avg:68.62ms +[2025-07-07 11:47:51] [Rank 0] step:8361/10000 train_time:573757ms step_avg:68.62ms +[2025-07-07 11:47:51] [Rank 0] step:8361/10000 train_time:573757ms step_avg:68.62ms +[2025-07-07 11:47:53] [Rank 0] step:8381/10000 train_time:575129ms step_avg:68.62ms +[2025-07-07 11:47:53] [Rank 0] step:8381/10000 train_time:575129ms step_avg:68.62ms +[2025-07-07 11:47:54] [Rank 0] step:8401/10000 train_time:576501ms step_avg:68.62ms +[2025-07-07 11:47:54] [Rank 0] step:8401/10000 train_time:576501ms step_avg:68.62ms +[2025-07-07 11:47:56] [Rank 0] step:8421/10000 train_time:577875ms step_avg:68.62ms +[2025-07-07 11:47:56] [Rank 0] step:8421/10000 train_time:577875ms step_avg:68.62ms +[2025-07-07 11:47:57] [Rank 0] step:8441/10000 train_time:579247ms step_avg:68.62ms +[2025-07-07 11:47:57] [Rank 0] step:8441/10000 train_time:579247ms step_avg:68.62ms +[2025-07-07 11:47:58] [Rank 0] step:8461/10000 train_time:580623ms step_avg:68.62ms +[2025-07-07 11:47:58] [Rank 0] step:8461/10000 train_time:580623ms step_avg:68.62ms +[2025-07-07 11:48:00] [Rank 0] step:8481/10000 train_time:581996ms step_avg:68.62ms +[2025-07-07 11:48:00] [Rank 0] step:8481/10000 train_time:581996ms step_avg:68.62ms +[2025-07-07 11:48:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 11:48:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 11:48:02] [Rank 0] PRINT: step:8500/10000 train_loss:1.1972 val_loss:1.2201 train_time:583993ms step_avg:68.71ms +[2025-07-07 11:48:02] [Rank 0] PRINT: step:8500/10000 train_loss:1.1972 val_loss:1.2201 train_time:583993ms step_avg:68.71ms +[2025-07-07 11:48:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 11:48:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 11:48:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 11:48:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 11:48:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 11:48:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 11:53:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 11:53:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 11:53:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 11:53:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 11:53:25] [Rank 0] Total Loss: 5.3339 +[2025-07-07 11:53:25] [Rank 0] Total Loss: 5.3339 +[2025-07-07 11:53:25] [Rank 0] Total FTA: 0.2013 +[2025-07-07 11:53:25] [Rank 0] Total FTA: 0.2013 +[2025-07-07 11:53:25] [Rank 0] Group 0 Loss: 5.7564 +[2025-07-07 11:53:25] [Rank 0] Group 0 Loss: 5.7564 +[2025-07-07 11:53:25] [Rank 0] Group 1 Loss: 4.9677 +[2025-07-07 11:53:25] [Rank 0] Group 1 Loss: 4.9677 +[2025-07-07 11:53:25] [Rank 0] Group 2 Loss: 5.0798 +[2025-07-07 11:53:25] [Rank 0] Group 2 Loss: 5.0798 +[2025-07-07 11:53:25] [Rank 0] Group 3 Loss: 5.5983 +[2025-07-07 11:53:25] [Rank 0] Group 3 Loss: 5.5983 +[2025-07-07 11:53:25] [Rank 0] Group 4 Loss: 5.2414 +[2025-07-07 11:53:25] [Rank 0] Group 4 Loss: 5.2414 +[2025-07-07 11:53:25] [Rank 0] Group 5 Loss: 5.3136 +[2025-07-07 11:53:25] [Rank 0] Group 5 Loss: 5.3136 +[2025-07-07 11:53:25] [Rank 0] Group 6 Loss: 5.2503 +[2025-07-07 11:53:25] [Rank 0] Group 6 Loss: 5.2503 +[2025-07-07 11:53:25] [Rank 0] Group 7 Loss: 5.3439 +[2025-07-07 11:53:25] [Rank 0] Group 7 Loss: 5.3439 +[2025-07-07 11:53:26] [Rank 0] Group 8 Loss: 5.2830 +[2025-07-07 11:53:26] [Rank 0] Group 8 Loss: 5.2830 +[2025-07-07 11:53:26] [Rank 0] Group 9 Loss: 5.3163 +[2025-07-07 11:53:26] [Rank 0] Group 9 Loss: 5.3163 +[2025-07-07 11:53:26] [Rank 0] Group 10 Loss: 5.2883 +[2025-07-07 11:53:26] [Rank 0] Group 10 Loss: 5.2883 +[2025-07-07 11:53:26] [Rank 0] Group 11 Loss: 5.2665 +[2025-07-07 11:53:26] [Rank 0] Group 11 Loss: 5.2665 +[2025-07-07 11:53:26] [Rank 0] Group 0 FTA: 0.1834 +[2025-07-07 11:53:26] [Rank 0] Group 0 FTA: 0.1834 +[2025-07-07 11:53:26] [Rank 0] Group 1 FTA: 0.3333 +[2025-07-07 11:53:26] [Rank 0] Group 1 FTA: 0.3333 +[2025-07-07 11:53:26] [Rank 0] Group 2 FTA: 0.0781 +[2025-07-07 11:53:26] [Rank 0] Group 2 FTA: 0.0781 +[2025-07-07 11:53:26] [Rank 0] Group 3 FTA: 0.1771 +[2025-07-07 11:53:26] [Rank 0] Group 3 FTA: 0.1771 +[2025-07-07 11:53:26] [Rank 0] Group 4 FTA: 0.2161 +[2025-07-07 11:53:26] [Rank 0] Group 4 FTA: 0.2161 +[2025-07-07 11:53:26] [Rank 0] Group 5 FTA: 0.1953 +[2025-07-07 11:53:26] [Rank 0] Group 5 FTA: 0.1953 +[2025-07-07 11:53:26] [Rank 0] Group 6 FTA: 0.1927 +[2025-07-07 11:53:26] [Rank 0] Group 6 FTA: 0.1927 +[2025-07-07 11:53:26] [Rank 0] Group 7 FTA: 0.2109 +[2025-07-07 11:53:26] [Rank 0] Group 7 FTA: 0.2109 +[2025-07-07 11:53:26] [Rank 0] Group 8 FTA: 0.2031 +[2025-07-07 11:53:26] [Rank 0] Group 8 FTA: 0.2031 +[2025-07-07 11:53:26] [Rank 0] Group 9 FTA: 0.1992 +[2025-07-07 11:53:26] [Rank 0] Group 9 FTA: 0.1992 +[2025-07-07 11:53:26] [Rank 0] Group 10 FTA: 0.2129 +[2025-07-07 11:53:26] [Rank 0] Group 10 FTA: 0.2129 +[2025-07-07 11:53:26] [Rank 0] Group 11 FTA: 0.2109 +[2025-07-07 11:53:26] [Rank 0] Group 11 FTA: 0.2109 +[2025-07-07 11:53:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 11:53:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 11:53:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 11:53:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 11:53:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 11:53:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 11:53:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 11:53:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 11:53:27] [Rank 0] step:8501/10000 train_time:584005ms step_avg:68.70ms +[2025-07-07 11:53:27] [Rank 0] step:8501/10000 train_time:584005ms step_avg:68.70ms +[2025-07-07 11:53:29] [Rank 0] step:8521/10000 train_time:584758ms step_avg:68.63ms +[2025-07-07 11:53:29] [Rank 0] step:8521/10000 train_time:584758ms step_avg:68.63ms +[2025-07-07 11:53:30] [Rank 0] step:8541/10000 train_time:586126ms step_avg:68.63ms +[2025-07-07 11:53:30] [Rank 0] step:8541/10000 train_time:586126ms step_avg:68.63ms +[2025-07-07 11:53:31] [Rank 0] step:8561/10000 train_time:587494ms step_avg:68.62ms +[2025-07-07 11:53:31] [Rank 0] step:8561/10000 train_time:587494ms step_avg:68.62ms +[2025-07-07 11:53:33] [Rank 0] step:8581/10000 train_time:588863ms step_avg:68.62ms +[2025-07-07 11:53:33] [Rank 0] step:8581/10000 train_time:588863ms step_avg:68.62ms +[2025-07-07 11:53:34] [Rank 0] step:8601/10000 train_time:590232ms step_avg:68.62ms +[2025-07-07 11:53:34] [Rank 0] step:8601/10000 train_time:590232ms step_avg:68.62ms +[2025-07-07 11:53:35] [Rank 0] step:8621/10000 train_time:591602ms step_avg:68.62ms +[2025-07-07 11:53:35] [Rank 0] step:8621/10000 train_time:591602ms step_avg:68.62ms +[2025-07-07 11:53:37] [Rank 0] step:8641/10000 train_time:592972ms step_avg:68.62ms +[2025-07-07 11:53:37] [Rank 0] step:8641/10000 train_time:592972ms step_avg:68.62ms +[2025-07-07 11:53:38] [Rank 0] step:8661/10000 train_time:594392ms step_avg:68.63ms +[2025-07-07 11:53:38] [Rank 0] step:8661/10000 train_time:594392ms step_avg:68.63ms +[2025-07-07 11:53:40] [Rank 0] step:8681/10000 train_time:595763ms step_avg:68.63ms +[2025-07-07 11:53:40] [Rank 0] step:8681/10000 train_time:595763ms step_avg:68.63ms +[2025-07-07 11:53:41] [Rank 0] step:8701/10000 train_time:597135ms step_avg:68.63ms +[2025-07-07 11:53:41] [Rank 0] step:8701/10000 train_time:597135ms step_avg:68.63ms +[2025-07-07 11:53:42] [Rank 0] step:8721/10000 train_time:598507ms step_avg:68.63ms +[2025-07-07 11:53:42] [Rank 0] step:8721/10000 train_time:598507ms step_avg:68.63ms +[2025-07-07 11:53:44] [Rank 0] step:8741/10000 train_time:599879ms step_avg:68.63ms +[2025-07-07 11:53:44] [Rank 0] step:8741/10000 train_time:599879ms step_avg:68.63ms +[2025-07-07 11:53:45] [Rank 0] step:8761/10000 train_time:601251ms step_avg:68.63ms +[2025-07-07 11:53:45] [Rank 0] step:8761/10000 train_time:601251ms step_avg:68.63ms +[2025-07-07 11:53:46] [Rank 0] step:8781/10000 train_time:602624ms step_avg:68.63ms +[2025-07-07 11:53:46] [Rank 0] step:8781/10000 train_time:602624ms step_avg:68.63ms +[2025-07-07 11:53:48] [Rank 0] step:8801/10000 train_time:603996ms step_avg:68.63ms +[2025-07-07 11:53:48] [Rank 0] step:8801/10000 train_time:603996ms step_avg:68.63ms +[2025-07-07 11:53:49] [Rank 0] step:8821/10000 train_time:605370ms step_avg:68.63ms +[2025-07-07 11:53:49] [Rank 0] step:8821/10000 train_time:605370ms step_avg:68.63ms +[2025-07-07 11:53:51] [Rank 0] step:8841/10000 train_time:606777ms step_avg:68.63ms +[2025-07-07 11:53:51] [Rank 0] step:8841/10000 train_time:606777ms step_avg:68.63ms +[2025-07-07 11:53:52] [Rank 0] step:8861/10000 train_time:608151ms step_avg:68.63ms +[2025-07-07 11:53:52] [Rank 0] step:8861/10000 train_time:608151ms step_avg:68.63ms +[2025-07-07 11:53:53] [Rank 0] step:8881/10000 train_time:609525ms step_avg:68.63ms +[2025-07-07 11:53:53] [Rank 0] step:8881/10000 train_time:609525ms step_avg:68.63ms +[2025-07-07 11:53:55] [Rank 0] step:8901/10000 train_time:610899ms step_avg:68.63ms +[2025-07-07 11:53:55] [Rank 0] step:8901/10000 train_time:610899ms step_avg:68.63ms +[2025-07-07 11:53:56] [Rank 0] step:8921/10000 train_time:612274ms step_avg:68.63ms +[2025-07-07 11:53:56] [Rank 0] step:8921/10000 train_time:612274ms step_avg:68.63ms +[2025-07-07 11:53:57] [Rank 0] step:8941/10000 train_time:613648ms step_avg:68.63ms +[2025-07-07 11:53:57] [Rank 0] step:8941/10000 train_time:613648ms step_avg:68.63ms +[2025-07-07 11:53:59] [Rank 0] step:8961/10000 train_time:615022ms step_avg:68.63ms +[2025-07-07 11:53:59] [Rank 0] step:8961/10000 train_time:615022ms step_avg:68.63ms +[2025-07-07 11:54:00] [Rank 0] step:8981/10000 train_time:616396ms step_avg:68.63ms +[2025-07-07 11:54:00] [Rank 0] step:8981/10000 train_time:616396ms step_avg:68.63ms +[2025-07-07 11:54:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 11:54:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 11:54:02] [Rank 0] PRINT: step:9000/10000 train_loss:1.1833 val_loss:1.2115 train_time:618395ms step_avg:68.71ms +[2025-07-07 11:54:02] [Rank 0] PRINT: step:9000/10000 train_loss:1.1833 val_loss:1.2115 train_time:618395ms step_avg:68.71ms +[2025-07-07 11:54:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 11:54:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 11:54:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 11:54:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 11:54:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 11:54:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 11:59:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 11:59:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 11:59:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 11:59:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 11:59:25] [Rank 0] Total Loss: 5.4062 +[2025-07-07 11:59:25] [Rank 0] Total Loss: 5.4062 +[2025-07-07 11:59:25] [Rank 0] Total FTA: 0.2098 +[2025-07-07 11:59:25] [Rank 0] Total FTA: 0.2098 +[2025-07-07 11:59:25] [Rank 0] Group 0 Loss: 5.6050 +[2025-07-07 11:59:25] [Rank 0] Group 0 Loss: 5.6050 +[2025-07-07 11:59:25] [Rank 0] Group 1 Loss: 5.2871 +[2025-07-07 11:59:25] [Rank 0] Group 1 Loss: 5.2871 +[2025-07-07 11:59:25] [Rank 0] Group 2 Loss: 5.1644 +[2025-07-07 11:59:25] [Rank 0] Group 2 Loss: 5.1644 +[2025-07-07 11:59:25] [Rank 0] Group 3 Loss: 5.6842 +[2025-07-07 11:59:25] [Rank 0] Group 3 Loss: 5.6842 +[2025-07-07 11:59:25] [Rank 0] Group 4 Loss: 5.4093 +[2025-07-07 11:59:25] [Rank 0] Group 4 Loss: 5.4093 +[2025-07-07 11:59:25] [Rank 0] Group 5 Loss: 5.3751 +[2025-07-07 11:59:25] [Rank 0] Group 5 Loss: 5.3751 +[2025-07-07 11:59:25] [Rank 0] Group 6 Loss: 5.2569 +[2025-07-07 11:59:25] [Rank 0] Group 6 Loss: 5.2569 +[2025-07-07 11:59:25] [Rank 0] Group 7 Loss: 5.3697 +[2025-07-07 11:59:25] [Rank 0] Group 7 Loss: 5.3697 +[2025-07-07 11:59:25] [Rank 0] Group 8 Loss: 5.4305 +[2025-07-07 11:59:25] [Rank 0] Group 8 Loss: 5.4305 +[2025-07-07 11:59:25] [Rank 0] Group 9 Loss: 5.2490 +[2025-07-07 11:59:25] [Rank 0] Group 9 Loss: 5.2490 +[2025-07-07 11:59:25] [Rank 0] Group 10 Loss: 5.4435 +[2025-07-07 11:59:25] [Rank 0] Group 10 Loss: 5.4435 +[2025-07-07 11:59:25] [Rank 0] Group 11 Loss: 5.3800 +[2025-07-07 11:59:25] [Rank 0] Group 11 Loss: 5.3800 +[2025-07-07 11:59:25] [Rank 0] Group 0 FTA: 0.1560 +[2025-07-07 11:59:25] [Rank 0] Group 0 FTA: 0.1560 +[2025-07-07 11:59:25] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-07 11:59:25] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-07 11:59:25] [Rank 0] Group 2 FTA: 0.2318 +[2025-07-07 11:59:25] [Rank 0] Group 2 FTA: 0.2318 +[2025-07-07 11:59:25] [Rank 0] Group 3 FTA: 0.2370 +[2025-07-07 11:59:25] [Rank 0] Group 3 FTA: 0.2370 +[2025-07-07 11:59:25] [Rank 0] Group 4 FTA: 0.1120 +[2025-07-07 11:59:25] [Rank 0] Group 4 FTA: 0.1120 +[2025-07-07 11:59:25] [Rank 0] Group 5 FTA: 0.2109 +[2025-07-07 11:59:25] [Rank 0] Group 5 FTA: 0.2109 +[2025-07-07 11:59:25] [Rank 0] Group 6 FTA: 0.2839 +[2025-07-07 11:59:25] [Rank 0] Group 6 FTA: 0.2839 +[2025-07-07 11:59:25] [Rank 0] Group 7 FTA: 0.2422 +[2025-07-07 11:59:25] [Rank 0] Group 7 FTA: 0.2422 +[2025-07-07 11:59:25] [Rank 0] Group 8 FTA: 0.2578 +[2025-07-07 11:59:25] [Rank 0] Group 8 FTA: 0.2578 +[2025-07-07 11:59:25] [Rank 0] Group 9 FTA: 0.2500 +[2025-07-07 11:59:25] [Rank 0] Group 9 FTA: 0.2500 +[2025-07-07 11:59:25] [Rank 0] Group 10 FTA: 0.2188 +[2025-07-07 11:59:25] [Rank 0] Group 10 FTA: 0.2188 +[2025-07-07 11:59:25] [Rank 0] Group 11 FTA: 0.2188 +[2025-07-07 11:59:25] [Rank 0] Group 11 FTA: 0.2188 +[2025-07-07 11:59:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 11:59:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 11:59:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 11:59:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 11:59:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 11:59:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 11:59:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 11:59:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 11:59:27] [Rank 0] step:9001/10000 train_time:618413ms step_avg:68.70ms +[2025-07-07 11:59:27] [Rank 0] step:9001/10000 train_time:618413ms step_avg:68.70ms +[2025-07-07 11:59:29] [Rank 0] step:9021/10000 train_time:619878ms step_avg:68.72ms +[2025-07-07 11:59:29] [Rank 0] step:9021/10000 train_time:619878ms step_avg:68.72ms +[2025-07-07 11:59:30] [Rank 0] step:9041/10000 train_time:621246ms step_avg:68.71ms +[2025-07-07 11:59:30] [Rank 0] step:9041/10000 train_time:621246ms step_avg:68.71ms +[2025-07-07 11:59:32] [Rank 0] step:9061/10000 train_time:622615ms step_avg:68.71ms +[2025-07-07 11:59:32] [Rank 0] step:9061/10000 train_time:622615ms step_avg:68.71ms +[2025-07-07 11:59:33] [Rank 0] step:9081/10000 train_time:623985ms step_avg:68.71ms +[2025-07-07 11:59:33] [Rank 0] step:9081/10000 train_time:623985ms step_avg:68.71ms +[2025-07-07 11:59:34] [Rank 0] step:9101/10000 train_time:625355ms step_avg:68.71ms +[2025-07-07 11:59:34] [Rank 0] step:9101/10000 train_time:625355ms step_avg:68.71ms +[2025-07-07 11:59:36] [Rank 0] step:9121/10000 train_time:626724ms step_avg:68.71ms +[2025-07-07 11:59:36] [Rank 0] step:9121/10000 train_time:626724ms step_avg:68.71ms +[2025-07-07 11:59:37] [Rank 0] step:9141/10000 train_time:628095ms step_avg:68.71ms +[2025-07-07 11:59:37] [Rank 0] step:9141/10000 train_time:628095ms step_avg:68.71ms +[2025-07-07 11:59:38] [Rank 0] step:9161/10000 train_time:629466ms step_avg:68.71ms +[2025-07-07 11:59:38] [Rank 0] step:9161/10000 train_time:629466ms step_avg:68.71ms +[2025-07-07 11:59:40] [Rank 0] step:9181/10000 train_time:631087ms step_avg:68.74ms +[2025-07-07 11:59:40] [Rank 0] step:9181/10000 train_time:631087ms step_avg:68.74ms +[2025-07-07 11:59:41] [Rank 0] step:9201/10000 train_time:632259ms step_avg:68.72ms +[2025-07-07 11:59:41] [Rank 0] step:9201/10000 train_time:632259ms step_avg:68.72ms +[2025-07-07 11:59:43] [Rank 0] step:9221/10000 train_time:633630ms step_avg:68.72ms +[2025-07-07 11:59:43] [Rank 0] step:9221/10000 train_time:633630ms step_avg:68.72ms +[2025-07-07 11:59:44] [Rank 0] step:9241/10000 train_time:635002ms step_avg:68.72ms +[2025-07-07 11:59:44] [Rank 0] step:9241/10000 train_time:635002ms step_avg:68.72ms +[2025-07-07 11:59:45] [Rank 0] step:9261/10000 train_time:636374ms step_avg:68.72ms +[2025-07-07 11:59:45] [Rank 0] step:9261/10000 train_time:636374ms step_avg:68.72ms +[2025-07-07 11:59:47] [Rank 0] step:9281/10000 train_time:637747ms step_avg:68.72ms +[2025-07-07 11:59:47] [Rank 0] step:9281/10000 train_time:637747ms step_avg:68.72ms +[2025-07-07 11:59:48] [Rank 0] step:9301/10000 train_time:639120ms step_avg:68.72ms +[2025-07-07 11:59:48] [Rank 0] step:9301/10000 train_time:639120ms step_avg:68.72ms +[2025-07-07 11:59:49] [Rank 0] step:9321/10000 train_time:640492ms step_avg:68.71ms +[2025-07-07 11:59:49] [Rank 0] step:9321/10000 train_time:640492ms step_avg:68.71ms +[2025-07-07 11:59:51] [Rank 0] step:9341/10000 train_time:641865ms step_avg:68.71ms +[2025-07-07 11:59:51] [Rank 0] step:9341/10000 train_time:641865ms step_avg:68.71ms +[2025-07-07 11:59:52] [Rank 0] step:9361/10000 train_time:643238ms step_avg:68.71ms +[2025-07-07 11:59:52] [Rank 0] step:9361/10000 train_time:643238ms step_avg:68.71ms +[2025-07-07 11:59:54] [Rank 0] step:9381/10000 train_time:644644ms step_avg:68.72ms +[2025-07-07 11:59:54] [Rank 0] step:9381/10000 train_time:644644ms step_avg:68.72ms +[2025-07-07 11:59:55] [Rank 0] step:9401/10000 train_time:646017ms step_avg:68.72ms +[2025-07-07 11:59:55] [Rank 0] step:9401/10000 train_time:646017ms step_avg:68.72ms +[2025-07-07 11:59:56] [Rank 0] step:9421/10000 train_time:647392ms step_avg:68.72ms +[2025-07-07 11:59:56] [Rank 0] step:9421/10000 train_time:647392ms step_avg:68.72ms +[2025-07-07 11:59:58] [Rank 0] step:9441/10000 train_time:648767ms step_avg:68.72ms +[2025-07-07 11:59:58] [Rank 0] step:9441/10000 train_time:648767ms step_avg:68.72ms +[2025-07-07 11:59:59] [Rank 0] step:9461/10000 train_time:650140ms step_avg:68.72ms +[2025-07-07 11:59:59] [Rank 0] step:9461/10000 train_time:650140ms step_avg:68.72ms +[2025-07-07 12:00:00] [Rank 0] step:9481/10000 train_time:651515ms step_avg:68.72ms +[2025-07-07 12:00:00] [Rank 0] step:9481/10000 train_time:651515ms step_avg:68.72ms +[2025-07-07 12:00:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 12:00:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 12:00:03] [Rank 0] PRINT: step:9500/10000 train_loss:1.1678 val_loss:1.1928 train_time:653515ms step_avg:68.79ms +[2025-07-07 12:00:03] [Rank 0] PRINT: step:9500/10000 train_loss:1.1678 val_loss:1.1928 train_time:653515ms step_avg:68.79ms +[2025-07-07 12:00:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 12:00:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 12:00:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 12:00:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 12:00:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 12:00:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 12:05:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 12:05:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 12:05:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 12:05:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 12:05:26] [Rank 0] Total Loss: 5.4682 +[2025-07-07 12:05:26] [Rank 0] Total Loss: 5.4682 +[2025-07-07 12:05:26] [Rank 0] Total FTA: 0.2340 +[2025-07-07 12:05:26] [Rank 0] Total FTA: 0.2340 +[2025-07-07 12:05:26] [Rank 0] Group 0 Loss: 5.9680 +[2025-07-07 12:05:26] [Rank 0] Group 0 Loss: 5.9680 +[2025-07-07 12:05:26] [Rank 0] Group 1 Loss: 5.2541 +[2025-07-07 12:05:26] [Rank 0] Group 1 Loss: 5.2541 +[2025-07-07 12:05:26] [Rank 0] Group 2 Loss: 5.2436 +[2025-07-07 12:05:26] [Rank 0] Group 2 Loss: 5.2436 +[2025-07-07 12:05:26] [Rank 0] Group 3 Loss: 5.6111 +[2025-07-07 12:05:26] [Rank 0] Group 3 Loss: 5.6111 +[2025-07-07 12:05:26] [Rank 0] Group 4 Loss: 5.4077 +[2025-07-07 12:05:26] [Rank 0] Group 4 Loss: 5.4077 +[2025-07-07 12:05:26] [Rank 0] Group 5 Loss: 5.3241 +[2025-07-07 12:05:26] [Rank 0] Group 5 Loss: 5.3241 +[2025-07-07 12:05:27] [Rank 0] Group 6 Loss: 5.4019 +[2025-07-07 12:05:27] [Rank 0] Group 6 Loss: 5.4019 +[2025-07-07 12:05:27] [Rank 0] Group 7 Loss: 5.4047 +[2025-07-07 12:05:27] [Rank 0] Group 7 Loss: 5.4047 +[2025-07-07 12:05:27] [Rank 0] Group 8 Loss: 5.4297 +[2025-07-07 12:05:27] [Rank 0] Group 8 Loss: 5.4297 +[2025-07-07 12:05:27] [Rank 0] Group 9 Loss: 5.4229 +[2025-07-07 12:05:27] [Rank 0] Group 9 Loss: 5.4229 +[2025-07-07 12:05:27] [Rank 0] Group 10 Loss: 5.4131 +[2025-07-07 12:05:27] [Rank 0] Group 10 Loss: 5.4131 +[2025-07-07 12:05:27] [Rank 0] Group 11 Loss: 5.3825 +[2025-07-07 12:05:27] [Rank 0] Group 11 Loss: 5.3825 +[2025-07-07 12:05:27] [Rank 0] Group 0 FTA: 0.1873 +[2025-07-07 12:05:27] [Rank 0] Group 0 FTA: 0.1873 +[2025-07-07 12:05:27] [Rank 0] Group 1 FTA: 0.1745 +[2025-07-07 12:05:27] [Rank 0] Group 1 FTA: 0.1745 +[2025-07-07 12:05:27] [Rank 0] Group 2 FTA: 0.0781 +[2025-07-07 12:05:27] [Rank 0] Group 2 FTA: 0.0781 +[2025-07-07 12:05:27] [Rank 0] Group 3 FTA: 0.2135 +[2025-07-07 12:05:27] [Rank 0] Group 3 FTA: 0.2135 +[2025-07-07 12:05:27] [Rank 0] Group 4 FTA: 0.3255 +[2025-07-07 12:05:27] [Rank 0] Group 4 FTA: 0.3255 +[2025-07-07 12:05:27] [Rank 0] Group 5 FTA: 0.2839 +[2025-07-07 12:05:27] [Rank 0] Group 5 FTA: 0.2839 +[2025-07-07 12:05:27] [Rank 0] Group 6 FTA: 0.2917 +[2025-07-07 12:05:27] [Rank 0] Group 6 FTA: 0.2917 +[2025-07-07 12:05:27] [Rank 0] Group 7 FTA: 0.2422 +[2025-07-07 12:05:27] [Rank 0] Group 7 FTA: 0.2422 +[2025-07-07 12:05:27] [Rank 0] Group 8 FTA: 0.2760 +[2025-07-07 12:05:27] [Rank 0] Group 8 FTA: 0.2760 +[2025-07-07 12:05:27] [Rank 0] Group 9 FTA: 0.1914 +[2025-07-07 12:05:27] [Rank 0] Group 9 FTA: 0.1914 +[2025-07-07 12:05:27] [Rank 0] Group 10 FTA: 0.2676 +[2025-07-07 12:05:27] [Rank 0] Group 10 FTA: 0.2676 +[2025-07-07 12:05:27] [Rank 0] Group 11 FTA: 0.2578 +[2025-07-07 12:05:27] [Rank 0] Group 11 FTA: 0.2578 +[2025-07-07 12:05:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 12:05:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 12:05:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 12:05:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 12:05:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 12:05:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 12:05:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 12:05:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 12:05:28] [Rank 0] step:9501/10000 train_time:653525ms step_avg:68.78ms +[2025-07-07 12:05:28] [Rank 0] step:9501/10000 train_time:653525ms step_avg:68.78ms +[2025-07-07 12:05:29] [Rank 0] step:9521/10000 train_time:654288ms step_avg:68.72ms +[2025-07-07 12:05:29] [Rank 0] step:9521/10000 train_time:654288ms step_avg:68.72ms +[2025-07-07 12:05:31] [Rank 0] step:9541/10000 train_time:655653ms step_avg:68.72ms +[2025-07-07 12:05:31] [Rank 0] step:9541/10000 train_time:655653ms step_avg:68.72ms +[2025-07-07 12:05:32] [Rank 0] step:9561/10000 train_time:657067ms step_avg:68.72ms +[2025-07-07 12:05:32] [Rank 0] step:9561/10000 train_time:657067ms step_avg:68.72ms +[2025-07-07 12:05:34] [Rank 0] step:9581/10000 train_time:658436ms step_avg:68.72ms +[2025-07-07 12:05:34] [Rank 0] step:9581/10000 train_time:658436ms step_avg:68.72ms +[2025-07-07 12:05:35] [Rank 0] step:9601/10000 train_time:659805ms step_avg:68.72ms +[2025-07-07 12:05:35] [Rank 0] step:9601/10000 train_time:659805ms step_avg:68.72ms +[2025-07-07 12:05:36] [Rank 0] step:9621/10000 train_time:661175ms step_avg:68.72ms +[2025-07-07 12:05:36] [Rank 0] step:9621/10000 train_time:661175ms step_avg:68.72ms +[2025-07-07 12:05:38] [Rank 0] step:9641/10000 train_time:662546ms step_avg:68.72ms +[2025-07-07 12:05:38] [Rank 0] step:9641/10000 train_time:662546ms step_avg:68.72ms +[2025-07-07 12:05:39] [Rank 0] step:9661/10000 train_time:663916ms step_avg:68.72ms +[2025-07-07 12:05:39] [Rank 0] step:9661/10000 train_time:663916ms step_avg:68.72ms +[2025-07-07 12:05:40] [Rank 0] step:9681/10000 train_time:665288ms step_avg:68.72ms +[2025-07-07 12:05:40] [Rank 0] step:9681/10000 train_time:665288ms step_avg:68.72ms +[2025-07-07 12:05:42] [Rank 0] step:9701/10000 train_time:666659ms step_avg:68.72ms +[2025-07-07 12:05:42] [Rank 0] step:9701/10000 train_time:666659ms step_avg:68.72ms +[2025-07-07 12:05:43] [Rank 0] step:9721/10000 train_time:668045ms step_avg:68.72ms +[2025-07-07 12:05:43] [Rank 0] step:9721/10000 train_time:668045ms step_avg:68.72ms +[2025-07-07 12:05:45] [Rank 0] step:9741/10000 train_time:669418ms step_avg:68.72ms +[2025-07-07 12:05:45] [Rank 0] step:9741/10000 train_time:669418ms step_avg:68.72ms +[2025-07-07 12:05:46] [Rank 0] step:9761/10000 train_time:670791ms step_avg:68.72ms +[2025-07-07 12:05:46] [Rank 0] step:9761/10000 train_time:670791ms step_avg:68.72ms +[2025-07-07 12:05:47] [Rank 0] step:9781/10000 train_time:672163ms step_avg:68.72ms +[2025-07-07 12:05:47] [Rank 0] step:9781/10000 train_time:672163ms step_avg:68.72ms +[2025-07-07 12:05:49] [Rank 0] step:9801/10000 train_time:673537ms step_avg:68.72ms +[2025-07-07 12:05:49] [Rank 0] step:9801/10000 train_time:673537ms step_avg:68.72ms +[2025-07-07 12:05:50] [Rank 0] step:9821/10000 train_time:674913ms step_avg:68.72ms +[2025-07-07 12:05:50] [Rank 0] step:9821/10000 train_time:674913ms step_avg:68.72ms +[2025-07-07 12:05:51] [Rank 0] step:9841/10000 train_time:676287ms step_avg:68.72ms +[2025-07-07 12:05:51] [Rank 0] step:9841/10000 train_time:676287ms step_avg:68.72ms +[2025-07-07 12:05:53] [Rank 0] step:9861/10000 train_time:677662ms step_avg:68.72ms +[2025-07-07 12:05:53] [Rank 0] step:9861/10000 train_time:677662ms step_avg:68.72ms +[2025-07-07 12:05:54] [Rank 0] step:9881/10000 train_time:679036ms step_avg:68.72ms +[2025-07-07 12:05:54] [Rank 0] step:9881/10000 train_time:679036ms step_avg:68.72ms +[2025-07-07 12:05:56] [Rank 0] step:9901/10000 train_time:680409ms step_avg:68.72ms +[2025-07-07 12:05:56] [Rank 0] step:9901/10000 train_time:680409ms step_avg:68.72ms +[2025-07-07 12:05:57] [Rank 0] step:9921/10000 train_time:681805ms step_avg:68.72ms +[2025-07-07 12:05:57] [Rank 0] step:9921/10000 train_time:681805ms step_avg:68.72ms +[2025-07-07 12:05:58] [Rank 0] step:9941/10000 train_time:683179ms step_avg:68.72ms +[2025-07-07 12:05:58] [Rank 0] step:9941/10000 train_time:683179ms step_avg:68.72ms +[2025-07-07 12:06:00] [Rank 0] step:9961/10000 train_time:684555ms step_avg:68.72ms +[2025-07-07 12:06:00] [Rank 0] step:9961/10000 train_time:684555ms step_avg:68.72ms +[2025-07-07 12:06:01] [Rank 0] step:9981/10000 train_time:685930ms step_avg:68.72ms +[2025-07-07 12:06:01] [Rank 0] step:9981/10000 train_time:685930ms step_avg:68.72ms +[2025-07-07 12:06:02] [Rank 0] step:10000/10000 train_time:687237ms step_avg:68.72ms +[2025-07-07 12:06:02] [Rank 0] step:10000/10000 train_time:687237ms step_avg:68.72ms +[2025-07-07 12:06:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 12:06:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 12:06:03] [Rank 0] PRINT: step:10000/10000 train_loss:1.1528 val_loss:1.1785 train_time:687937ms step_avg:68.79ms +[2025-07-07 12:06:03] [Rank 0] PRINT: step:10000/10000 train_loss:1.1528 val_loss:1.1785 train_time:687937ms step_avg:68.79ms +[2025-07-07 12:06:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 12:06:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 12:06:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 12:06:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 12:06:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 12:06:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 12:11:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 12:11:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 12:11:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 12:11:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 12:11:26] [Rank 0] Total Loss: 5.4228 +[2025-07-07 12:11:26] [Rank 0] Total Loss: 5.4228 +[2025-07-07 12:11:26] [Rank 0] Total FTA: 0.2500 +[2025-07-07 12:11:26] [Rank 0] Total FTA: 0.2500 +[2025-07-07 12:11:26] [Rank 0] Group 0 Loss: 5.7714 +[2025-07-07 12:11:26] [Rank 0] Group 0 Loss: 5.7714 +[2025-07-07 12:11:26] [Rank 0] Group 1 Loss: 5.1030 +[2025-07-07 12:11:26] [Rank 0] Group 1 Loss: 5.1030 +[2025-07-07 12:11:26] [Rank 0] Group 2 Loss: 5.1493 +[2025-07-07 12:11:26] [Rank 0] Group 2 Loss: 5.1493 +[2025-07-07 12:11:26] [Rank 0] Group 3 Loss: 5.6296 +[2025-07-07 12:11:26] [Rank 0] Group 3 Loss: 5.6296 +[2025-07-07 12:11:26] [Rank 0] Group 4 Loss: 5.4859 +[2025-07-07 12:11:26] [Rank 0] Group 4 Loss: 5.4859 +[2025-07-07 12:11:26] [Rank 0] Group 5 Loss: 5.3307 +[2025-07-07 12:11:26] [Rank 0] Group 5 Loss: 5.3307 +[2025-07-07 12:11:26] [Rank 0] Group 6 Loss: 5.2836 +[2025-07-07 12:11:26] [Rank 0] Group 6 Loss: 5.2836 +[2025-07-07 12:11:26] [Rank 0] Group 7 Loss: 5.4636 +[2025-07-07 12:11:26] [Rank 0] Group 7 Loss: 5.4636 +[2025-07-07 12:11:26] [Rank 0] Group 8 Loss: 5.4147 +[2025-07-07 12:11:26] [Rank 0] Group 8 Loss: 5.4147 +[2025-07-07 12:11:26] [Rank 0] Group 9 Loss: 5.3774 +[2025-07-07 12:11:26] [Rank 0] Group 9 Loss: 5.3774 +[2025-07-07 12:11:26] [Rank 0] Group 10 Loss: 5.4009 +[2025-07-07 12:11:26] [Rank 0] Group 10 Loss: 5.4009 +[2025-07-07 12:11:26] [Rank 0] Group 11 Loss: 5.3792 +[2025-07-07 12:11:26] [Rank 0] Group 11 Loss: 5.3792 +[2025-07-07 12:11:26] [Rank 0] Group 0 FTA: 0.1326 +[2025-07-07 12:11:26] [Rank 0] Group 0 FTA: 0.1326 +[2025-07-07 12:11:26] [Rank 0] Group 1 FTA: 0.2995 +[2025-07-07 12:11:26] [Rank 0] Group 1 FTA: 0.2995 +[2025-07-07 12:11:26] [Rank 0] Group 2 FTA: 0.1484 +[2025-07-07 12:11:26] [Rank 0] Group 2 FTA: 0.1484 +[2025-07-07 12:11:26] [Rank 0] Group 3 FTA: 0.2865 +[2025-07-07 12:11:26] [Rank 0] Group 3 FTA: 0.2865 +[2025-07-07 12:11:26] [Rank 0] Group 4 FTA: 0.2214 +[2025-07-07 12:11:26] [Rank 0] Group 4 FTA: 0.2214 +[2025-07-07 12:11:26] [Rank 0] Group 5 FTA: 0.2526 +[2025-07-07 12:11:26] [Rank 0] Group 5 FTA: 0.2526 +[2025-07-07 12:11:26] [Rank 0] Group 6 FTA: 0.3151 +[2025-07-07 12:11:26] [Rank 0] Group 6 FTA: 0.3151 +[2025-07-07 12:11:26] [Rank 0] Group 7 FTA: 0.2943 +[2025-07-07 12:11:26] [Rank 0] Group 7 FTA: 0.2943 +[2025-07-07 12:11:26] [Rank 0] Group 8 FTA: 0.2969 +[2025-07-07 12:11:26] [Rank 0] Group 8 FTA: 0.2969 +[2025-07-07 12:11:26] [Rank 0] Group 9 FTA: 0.2734 +[2025-07-07 12:11:26] [Rank 0] Group 9 FTA: 0.2734 +[2025-07-07 12:11:26] [Rank 0] Group 10 FTA: 0.2852 +[2025-07-07 12:11:26] [Rank 0] Group 10 FTA: 0.2852 +[2025-07-07 12:11:26] [Rank 0] Group 11 FTA: 0.2715 +[2025-07-07 12:11:26] [Rank 0] Group 11 FTA: 0.2715 +[2025-07-07 12:11:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 12:11:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_loss_curves.png +[2025-07-07 12:11:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 12:11:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/per_class_acc_curves.png +[2025-07-07 12:11:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 12:11:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_loss_curve.png +[2025-07-07 12:11:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 12:11:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_42/total_acc_curve.png +[2025-07-07 12:11:28] [Rank 0] step:10001/10000 train_time:687947ms step_avg:68.79ms +[2025-07-07 12:11:28] [Rank 0] step:10001/10000 train_time:687947ms step_avg:68.79ms +[2025-07-07 12:11:28] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 12:11:28 2025 --- +[2025-07-07 12:11:28] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 12:11:28 2025 --- +[2025-07-07 12:11:28] [Rank 0] PRINT: Peak memory allocated: 8720 MiB reserved: 10316 MiB +[2025-07-07 12:11:28] [Rank 0] PRINT: Peak memory allocated: 8720 MiB reserved: 10316 MiB diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/config.json b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..92f790c7153fc91dd43d5029ffaeb99c9991a50f --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "f8f4f459-f230-4901-aaac-f7f4b0807b57", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..7a0072e771cedb4cead0be76c5fdfa4efb70c392 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1cec806f4a720680e587213d3d158ea3544c88baae15e7f97a3c4eba1bbf4b83 +size 430360 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..6b991ff096808549adcbd02940ad81bd0046afac --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3087cf48ee7b108e032dde6e15a2291120371c68101b842e1f6a81ab71c7d627 +size 411221 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..6ca0bc27d649fe64b6d94b7be593910956caca70 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d52b0484689dd3fd32ea9f5082877e82f628251d45db13748cb2b57633e1a79 +size 99986 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..1b2d6689093468ed69806ab98add5b6b8f661a1d --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6626b31002848db31aecb1d10b8450dabc2fa6d858a5978e1c6d8ad9168825c +size 137714 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/training_log_f8f4f459-f230-4901-aaac-f7f4b0807b57.txt b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/training_log_f8f4f459-f230-4901-aaac-f7f4b0807b57.txt new file mode 100644 index 0000000000000000000000000000000000000000..00ec24b997241dd8fee8860bf48225e9b1ab25a1 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/training_log_f8f4f459-f230-4901-aaac-f7f4b0807b57.txt @@ -0,0 +1,5144 @@ +[2025-07-07 18:38:36] [Rank 0] PRINT: --- Script Start: Mon Jul 7 18:38:36 2025 --- +[2025-07-07 18:38:36] [Rank 0] PRINT: --- Script Start: Mon Jul 7 18:38:36 2025 --- +[2025-07-07 18:38:36] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-07 18:38:36] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-07 18:38:36] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 18:38:36] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 18:38:36] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-07 18:38:36] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-07 18:38:36] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45 +[2025-07-07 18:38:36] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45 +[2025-07-07 18:38:36] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 18:38:36] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 18:38:36] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 18:38:36] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 18:38:36] [Rank 0] PRINT: Constructing model... +[2025-07-07 18:38:36] [Rank 0] PRINT: Constructing model... +[2025-07-07 18:38:38] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 18:38:38] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 18:38:38] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 18:38:38] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 18:38:38] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 18:38:38] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 18:38:39] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 18:38:39] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 18:38:39] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 18:38:39] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 18:38:39] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 18:38:39] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 18:38:39] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 18:38:39] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 18:38:39] [Rank 0] PRINT: Model returns: +[2025-07-07 18:38:39] [Rank 0] PRINT: Model returns: +[2025-07-07 18:38:39] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 18:38:39] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 18:38:39] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 18:38:39] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 18:38:39] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-07 18:38:39] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-07 18:38:39] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 18:38:39] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 18:38:39] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 18:38:39] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 18:38:39] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 18:38:39] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 18:38:39] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 18:38:39] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 18:38:39] [Rank 0] PRINT: Starting warmup... +[2025-07-07 18:38:39] [Rank 0] PRINT: Starting warmup... +[2025-07-07 18:40:28] [Rank 0] PRINT: Warmup complete. +[2025-07-07 18:40:28] [Rank 0] PRINT: Warmup complete. +[2025-07-07 18:40:28] [Rank 0] PRINT: Starting training... +[2025-07-07 18:40:28] [Rank 0] PRINT: Starting training... +[2025-07-07 18:40:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:40:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:40:35] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 18:40:35] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 18:40:37] [Rank 0] step:21/10000 train_time:1027ms step_avg:48.89ms +[2025-07-07 18:40:37] [Rank 0] step:21/10000 train_time:1027ms step_avg:48.89ms +[2025-07-07 18:40:38] [Rank 0] step:41/10000 train_time:2350ms step_avg:57.32ms +[2025-07-07 18:40:38] [Rank 0] step:41/10000 train_time:2350ms step_avg:57.32ms +[2025-07-07 18:40:40] [Rank 0] step:61/10000 train_time:3673ms step_avg:60.22ms +[2025-07-07 18:40:40] [Rank 0] step:61/10000 train_time:3673ms step_avg:60.22ms +[2025-07-07 18:40:41] [Rank 0] step:81/10000 train_time:4998ms step_avg:61.70ms +[2025-07-07 18:40:41] [Rank 0] step:81/10000 train_time:4998ms step_avg:61.70ms +[2025-07-07 18:40:42] [Rank 0] step:101/10000 train_time:6321ms step_avg:62.59ms +[2025-07-07 18:40:42] [Rank 0] step:101/10000 train_time:6321ms step_avg:62.59ms +[2025-07-07 18:40:44] [Rank 0] step:121/10000 train_time:7646ms step_avg:63.19ms +[2025-07-07 18:40:44] [Rank 0] step:121/10000 train_time:7646ms step_avg:63.19ms +[2025-07-07 18:40:45] [Rank 0] step:141/10000 train_time:8971ms step_avg:63.62ms +[2025-07-07 18:40:45] [Rank 0] step:141/10000 train_time:8971ms step_avg:63.62ms +[2025-07-07 18:40:46] [Rank 0] step:161/10000 train_time:10296ms step_avg:63.95ms +[2025-07-07 18:40:46] [Rank 0] step:161/10000 train_time:10296ms step_avg:63.95ms +[2025-07-07 18:40:48] [Rank 0] step:181/10000 train_time:11669ms step_avg:64.47ms +[2025-07-07 18:40:48] [Rank 0] step:181/10000 train_time:11669ms step_avg:64.47ms +[2025-07-07 18:40:49] [Rank 0] step:201/10000 train_time:12990ms step_avg:64.63ms +[2025-07-07 18:40:49] [Rank 0] step:201/10000 train_time:12990ms step_avg:64.63ms +[2025-07-07 18:40:50] [Rank 0] step:221/10000 train_time:14315ms step_avg:64.77ms +[2025-07-07 18:40:50] [Rank 0] step:221/10000 train_time:14315ms step_avg:64.77ms +[2025-07-07 18:40:52] [Rank 0] step:241/10000 train_time:15640ms step_avg:64.90ms +[2025-07-07 18:40:52] [Rank 0] step:241/10000 train_time:15640ms step_avg:64.90ms +[2025-07-07 18:40:53] [Rank 0] step:261/10000 train_time:16968ms step_avg:65.01ms +[2025-07-07 18:40:53] [Rank 0] step:261/10000 train_time:16968ms step_avg:65.01ms +[2025-07-07 18:40:54] [Rank 0] step:281/10000 train_time:18297ms step_avg:65.11ms +[2025-07-07 18:40:54] [Rank 0] step:281/10000 train_time:18297ms step_avg:65.11ms +[2025-07-07 18:40:56] [Rank 0] step:301/10000 train_time:19628ms step_avg:65.21ms +[2025-07-07 18:40:56] [Rank 0] step:301/10000 train_time:19628ms step_avg:65.21ms +[2025-07-07 18:40:57] [Rank 0] step:321/10000 train_time:20958ms step_avg:65.29ms +[2025-07-07 18:40:57] [Rank 0] step:321/10000 train_time:20958ms step_avg:65.29ms +[2025-07-07 18:40:58] [Rank 0] step:341/10000 train_time:22289ms step_avg:65.36ms +[2025-07-07 18:40:58] [Rank 0] step:341/10000 train_time:22289ms step_avg:65.36ms +[2025-07-07 18:41:00] [Rank 0] step:361/10000 train_time:23624ms step_avg:65.44ms +[2025-07-07 18:41:00] [Rank 0] step:361/10000 train_time:23624ms step_avg:65.44ms +[2025-07-07 18:41:01] [Rank 0] step:381/10000 train_time:25031ms step_avg:65.70ms +[2025-07-07 18:41:01] [Rank 0] step:381/10000 train_time:25031ms step_avg:65.70ms +[2025-07-07 18:41:02] [Rank 0] step:401/10000 train_time:26367ms step_avg:65.75ms +[2025-07-07 18:41:02] [Rank 0] step:401/10000 train_time:26367ms step_avg:65.75ms +[2025-07-07 18:41:04] [Rank 0] step:421/10000 train_time:27703ms step_avg:65.80ms +[2025-07-07 18:41:04] [Rank 0] step:421/10000 train_time:27703ms step_avg:65.80ms +[2025-07-07 18:41:05] [Rank 0] step:441/10000 train_time:29039ms step_avg:65.85ms +[2025-07-07 18:41:05] [Rank 0] step:441/10000 train_time:29039ms step_avg:65.85ms +[2025-07-07 18:41:06] [Rank 0] step:461/10000 train_time:30377ms step_avg:65.89ms +[2025-07-07 18:41:06] [Rank 0] step:461/10000 train_time:30377ms step_avg:65.89ms +[2025-07-07 18:41:08] [Rank 0] step:481/10000 train_time:31714ms step_avg:65.93ms +[2025-07-07 18:41:08] [Rank 0] step:481/10000 train_time:31714ms step_avg:65.93ms +[2025-07-07 18:41:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:41:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:41:10] [Rank 0] PRINT: step:500/10000 train_loss:7.0459 val_loss:4.8605 train_time:33659ms step_avg:67.32ms +[2025-07-07 18:41:10] [Rank 0] PRINT: step:500/10000 train_loss:7.0459 val_loss:4.8605 train_time:33659ms step_avg:67.32ms +[2025-07-07 18:41:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:41:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:41:10] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:41:10] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:41:10] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:41:10] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:46:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:46:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:46:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:46:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:46:34] [Rank 0] Total Loss: 5.9748 +[2025-07-07 18:46:34] [Rank 0] Total Loss: 5.9748 +[2025-07-07 18:46:34] [Rank 0] Total FTA: 0.0650 +[2025-07-07 18:46:34] [Rank 0] Total FTA: 0.0650 +[2025-07-07 18:46:34] [Rank 0] Group 0 Loss: 5.8690 +[2025-07-07 18:46:34] [Rank 0] Group 0 Loss: 5.8690 +[2025-07-07 18:46:34] [Rank 0] Group 1 Loss: 6.0134 +[2025-07-07 18:46:34] [Rank 0] Group 1 Loss: 6.0134 +[2025-07-07 18:46:34] [Rank 0] Group 2 Loss: 6.1327 +[2025-07-07 18:46:34] [Rank 0] Group 2 Loss: 6.1327 +[2025-07-07 18:46:34] [Rank 0] Group 3 Loss: 5.9483 +[2025-07-07 18:46:34] [Rank 0] Group 3 Loss: 5.9483 +[2025-07-07 18:46:34] [Rank 0] Group 4 Loss: 6.0099 +[2025-07-07 18:46:34] [Rank 0] Group 4 Loss: 6.0099 +[2025-07-07 18:46:34] [Rank 0] Group 5 Loss: 5.9708 +[2025-07-07 18:46:34] [Rank 0] Group 5 Loss: 5.9708 +[2025-07-07 18:46:34] [Rank 0] Group 6 Loss: 5.9891 +[2025-07-07 18:46:34] [Rank 0] Group 6 Loss: 5.9891 +[2025-07-07 18:46:34] [Rank 0] Group 7 Loss: 5.9686 +[2025-07-07 18:46:34] [Rank 0] Group 7 Loss: 5.9686 +[2025-07-07 18:46:34] [Rank 0] Group 8 Loss: 5.9443 +[2025-07-07 18:46:34] [Rank 0] Group 8 Loss: 5.9443 +[2025-07-07 18:46:34] [Rank 0] Group 9 Loss: 5.9955 +[2025-07-07 18:46:34] [Rank 0] Group 9 Loss: 5.9955 +[2025-07-07 18:46:34] [Rank 0] Group 10 Loss: 5.9883 +[2025-07-07 18:46:34] [Rank 0] Group 10 Loss: 5.9883 +[2025-07-07 18:46:34] [Rank 0] Group 11 Loss: 5.9754 +[2025-07-07 18:46:34] [Rank 0] Group 11 Loss: 5.9754 +[2025-07-07 18:46:34] [Rank 0] Group 0 FTA: 0.1795 +[2025-07-07 18:46:34] [Rank 0] Group 0 FTA: 0.1795 +[2025-07-07 18:46:34] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 18:46:34] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 18:46:34] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 18:46:34] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 18:46:34] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 18:46:34] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 18:46:34] [Rank 0] Group 4 FTA: 0.0130 +[2025-07-07 18:46:34] [Rank 0] Group 4 FTA: 0.0130 +[2025-07-07 18:46:34] [Rank 0] Group 5 FTA: 0.0469 +[2025-07-07 18:46:34] [Rank 0] Group 5 FTA: 0.0469 +[2025-07-07 18:46:34] [Rank 0] Group 6 FTA: 0.0807 +[2025-07-07 18:46:34] [Rank 0] Group 6 FTA: 0.0807 +[2025-07-07 18:46:34] [Rank 0] Group 7 FTA: 0.0729 +[2025-07-07 18:46:34] [Rank 0] Group 7 FTA: 0.0729 +[2025-07-07 18:46:34] [Rank 0] Group 8 FTA: 0.0625 +[2025-07-07 18:46:34] [Rank 0] Group 8 FTA: 0.0625 +[2025-07-07 18:46:34] [Rank 0] Group 9 FTA: 0.0586 +[2025-07-07 18:46:34] [Rank 0] Group 9 FTA: 0.0586 +[2025-07-07 18:46:34] [Rank 0] Group 10 FTA: 0.0625 +[2025-07-07 18:46:34] [Rank 0] Group 10 FTA: 0.0625 +[2025-07-07 18:46:34] [Rank 0] Group 11 FTA: 0.0547 +[2025-07-07 18:46:34] [Rank 0] Group 11 FTA: 0.0547 +[2025-07-07 18:46:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 18:46:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 18:46:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 18:46:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 18:46:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 18:46:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 18:46:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 18:46:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 18:46:35] [Rank 0] step:501/10000 train_time:33668ms step_avg:67.20ms +[2025-07-07 18:46:35] [Rank 0] step:501/10000 train_time:33668ms step_avg:67.20ms +[2025-07-07 18:46:37] [Rank 0] step:521/10000 train_time:34400ms step_avg:66.03ms +[2025-07-07 18:46:37] [Rank 0] step:521/10000 train_time:34400ms step_avg:66.03ms +[2025-07-07 18:46:38] [Rank 0] step:541/10000 train_time:35988ms step_avg:66.52ms +[2025-07-07 18:46:38] [Rank 0] step:541/10000 train_time:35988ms step_avg:66.52ms +[2025-07-07 18:46:39] [Rank 0] step:561/10000 train_time:37119ms step_avg:66.17ms +[2025-07-07 18:46:39] [Rank 0] step:561/10000 train_time:37119ms step_avg:66.17ms +[2025-07-07 18:46:41] [Rank 0] step:581/10000 train_time:38449ms step_avg:66.18ms +[2025-07-07 18:46:41] [Rank 0] step:581/10000 train_time:38449ms step_avg:66.18ms +[2025-07-07 18:46:42] [Rank 0] step:601/10000 train_time:39779ms step_avg:66.19ms +[2025-07-07 18:46:42] [Rank 0] step:601/10000 train_time:39779ms step_avg:66.19ms +[2025-07-07 18:46:43] [Rank 0] step:621/10000 train_time:41120ms step_avg:66.22ms +[2025-07-07 18:46:43] [Rank 0] step:621/10000 train_time:41120ms step_avg:66.22ms +[2025-07-07 18:46:45] [Rank 0] step:641/10000 train_time:42450ms step_avg:66.23ms +[2025-07-07 18:46:45] [Rank 0] step:641/10000 train_time:42450ms step_avg:66.23ms +[2025-07-07 18:46:46] [Rank 0] step:661/10000 train_time:43782ms step_avg:66.24ms +[2025-07-07 18:46:46] [Rank 0] step:661/10000 train_time:43782ms step_avg:66.24ms +[2025-07-07 18:46:47] [Rank 0] step:681/10000 train_time:45114ms step_avg:66.25ms +[2025-07-07 18:46:47] [Rank 0] step:681/10000 train_time:45114ms step_avg:66.25ms +[2025-07-07 18:46:49] [Rank 0] step:701/10000 train_time:46446ms step_avg:66.26ms +[2025-07-07 18:46:49] [Rank 0] step:701/10000 train_time:46446ms step_avg:66.26ms +[2025-07-07 18:46:50] [Rank 0] step:721/10000 train_time:47828ms step_avg:66.34ms +[2025-07-07 18:46:50] [Rank 0] step:721/10000 train_time:47828ms step_avg:66.34ms +[2025-07-07 18:46:51] [Rank 0] step:741/10000 train_time:49154ms step_avg:66.33ms +[2025-07-07 18:46:51] [Rank 0] step:741/10000 train_time:49154ms step_avg:66.33ms +[2025-07-07 18:46:53] [Rank 0] step:761/10000 train_time:50493ms step_avg:66.35ms +[2025-07-07 18:46:53] [Rank 0] step:761/10000 train_time:50493ms step_avg:66.35ms +[2025-07-07 18:46:54] [Rank 0] step:781/10000 train_time:51838ms step_avg:66.37ms +[2025-07-07 18:46:54] [Rank 0] step:781/10000 train_time:51838ms step_avg:66.37ms +[2025-07-07 18:46:55] [Rank 0] step:801/10000 train_time:53182ms step_avg:66.39ms +[2025-07-07 18:46:55] [Rank 0] step:801/10000 train_time:53182ms step_avg:66.39ms +[2025-07-07 18:46:57] [Rank 0] step:821/10000 train_time:54526ms step_avg:66.41ms +[2025-07-07 18:46:57] [Rank 0] step:821/10000 train_time:54526ms step_avg:66.41ms +[2025-07-07 18:46:58] [Rank 0] step:841/10000 train_time:55870ms step_avg:66.43ms +[2025-07-07 18:46:58] [Rank 0] step:841/10000 train_time:55870ms step_avg:66.43ms +[2025-07-07 18:46:59] [Rank 0] step:861/10000 train_time:57215ms step_avg:66.45ms +[2025-07-07 18:46:59] [Rank 0] step:861/10000 train_time:57215ms step_avg:66.45ms +[2025-07-07 18:47:01] [Rank 0] step:881/10000 train_time:58562ms step_avg:66.47ms +[2025-07-07 18:47:01] [Rank 0] step:881/10000 train_time:58562ms step_avg:66.47ms +[2025-07-07 18:47:02] [Rank 0] step:901/10000 train_time:60164ms step_avg:66.77ms +[2025-07-07 18:47:02] [Rank 0] step:901/10000 train_time:60164ms step_avg:66.77ms +[2025-07-07 18:47:04] [Rank 0] step:921/10000 train_time:61323ms step_avg:66.58ms +[2025-07-07 18:47:04] [Rank 0] step:921/10000 train_time:61323ms step_avg:66.58ms +[2025-07-07 18:47:05] [Rank 0] step:941/10000 train_time:62671ms step_avg:66.60ms +[2025-07-07 18:47:05] [Rank 0] step:941/10000 train_time:62671ms step_avg:66.60ms +[2025-07-07 18:47:06] [Rank 0] step:961/10000 train_time:64017ms step_avg:66.62ms +[2025-07-07 18:47:06] [Rank 0] step:961/10000 train_time:64017ms step_avg:66.62ms +[2025-07-07 18:47:08] [Rank 0] step:981/10000 train_time:65364ms step_avg:66.63ms +[2025-07-07 18:47:08] [Rank 0] step:981/10000 train_time:65364ms step_avg:66.63ms +[2025-07-07 18:47:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:47:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:47:10] [Rank 0] PRINT: step:1000/10000 train_loss:3.5667 val_loss:2.5658 train_time:67327ms step_avg:67.33ms +[2025-07-07 18:47:10] [Rank 0] PRINT: step:1000/10000 train_loss:3.5667 val_loss:2.5658 train_time:67327ms step_avg:67.33ms +[2025-07-07 18:47:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:47:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:47:10] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:47:10] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:47:10] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:47:10] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:52:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:52:34] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:52:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:52:34] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:52:34] [Rank 0] Total Loss: 4.5107 +[2025-07-07 18:52:34] [Rank 0] Total Loss: 4.5107 +[2025-07-07 18:52:34] [Rank 0] Total FTA: 0.0886 +[2025-07-07 18:52:34] [Rank 0] Total FTA: 0.0886 +[2025-07-07 18:52:34] [Rank 0] Group 0 Loss: 4.5396 +[2025-07-07 18:52:34] [Rank 0] Group 0 Loss: 4.5396 +[2025-07-07 18:52:34] [Rank 0] Group 1 Loss: 4.5201 +[2025-07-07 18:52:34] [Rank 0] Group 1 Loss: 4.5201 +[2025-07-07 18:52:34] [Rank 0] Group 2 Loss: 4.5296 +[2025-07-07 18:52:34] [Rank 0] Group 2 Loss: 4.5296 +[2025-07-07 18:52:34] [Rank 0] Group 3 Loss: 4.5462 +[2025-07-07 18:52:34] [Rank 0] Group 3 Loss: 4.5462 +[2025-07-07 18:52:34] [Rank 0] Group 4 Loss: 4.5140 +[2025-07-07 18:52:34] [Rank 0] Group 4 Loss: 4.5140 +[2025-07-07 18:52:34] [Rank 0] Group 5 Loss: 4.4590 +[2025-07-07 18:52:34] [Rank 0] Group 5 Loss: 4.4590 +[2025-07-07 18:52:34] [Rank 0] Group 6 Loss: 4.4665 +[2025-07-07 18:52:34] [Rank 0] Group 6 Loss: 4.4665 +[2025-07-07 18:52:34] [Rank 0] Group 7 Loss: 4.5359 +[2025-07-07 18:52:34] [Rank 0] Group 7 Loss: 4.5359 +[2025-07-07 18:52:34] [Rank 0] Group 8 Loss: 4.4553 +[2025-07-07 18:52:34] [Rank 0] Group 8 Loss: 4.4553 +[2025-07-07 18:52:34] [Rank 0] Group 9 Loss: 4.4692 +[2025-07-07 18:52:34] [Rank 0] Group 9 Loss: 4.4692 +[2025-07-07 18:52:34] [Rank 0] Group 10 Loss: 4.4986 +[2025-07-07 18:52:34] [Rank 0] Group 10 Loss: 4.4986 +[2025-07-07 18:52:34] [Rank 0] Group 11 Loss: 4.5278 +[2025-07-07 18:52:34] [Rank 0] Group 11 Loss: 4.5278 +[2025-07-07 18:52:34] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-07 18:52:34] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-07 18:52:34] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 18:52:34] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 18:52:34] [Rank 0] Group 2 FTA: 0.0938 +[2025-07-07 18:52:34] [Rank 0] Group 2 FTA: 0.0938 +[2025-07-07 18:52:34] [Rank 0] Group 3 FTA: 0.0964 +[2025-07-07 18:52:34] [Rank 0] Group 3 FTA: 0.0964 +[2025-07-07 18:52:34] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 18:52:34] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 18:52:34] [Rank 0] Group 5 FTA: 0.0391 +[2025-07-07 18:52:34] [Rank 0] Group 5 FTA: 0.0391 +[2025-07-07 18:52:34] [Rank 0] Group 6 FTA: 0.0703 +[2025-07-07 18:52:34] [Rank 0] Group 6 FTA: 0.0703 +[2025-07-07 18:52:34] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-07 18:52:34] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-07 18:52:34] [Rank 0] Group 8 FTA: 0.0755 +[2025-07-07 18:52:34] [Rank 0] Group 8 FTA: 0.0755 +[2025-07-07 18:52:34] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-07 18:52:34] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-07 18:52:34] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 18:52:34] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 18:52:34] [Rank 0] Group 11 FTA: 0.1113 +[2025-07-07 18:52:34] [Rank 0] Group 11 FTA: 0.1113 +[2025-07-07 18:52:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 18:52:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 18:52:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 18:52:35] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 18:52:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 18:52:35] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 18:52:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 18:52:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 18:52:36] [Rank 0] step:1001/10000 train_time:67337ms step_avg:67.27ms +[2025-07-07 18:52:36] [Rank 0] step:1001/10000 train_time:67337ms step_avg:67.27ms +[2025-07-07 18:52:37] [Rank 0] step:1021/10000 train_time:68073ms step_avg:66.67ms +[2025-07-07 18:52:37] [Rank 0] step:1021/10000 train_time:68073ms step_avg:66.67ms +[2025-07-07 18:52:38] [Rank 0] step:1041/10000 train_time:69412ms step_avg:66.68ms +[2025-07-07 18:52:38] [Rank 0] step:1041/10000 train_time:69412ms step_avg:66.68ms +[2025-07-07 18:52:40] [Rank 0] step:1061/10000 train_time:70750ms step_avg:66.68ms +[2025-07-07 18:52:40] [Rank 0] step:1061/10000 train_time:70750ms step_avg:66.68ms +[2025-07-07 18:52:41] [Rank 0] step:1081/10000 train_time:72089ms step_avg:66.69ms +[2025-07-07 18:52:41] [Rank 0] step:1081/10000 train_time:72089ms step_avg:66.69ms +[2025-07-07 18:52:42] [Rank 0] step:1101/10000 train_time:73479ms step_avg:66.74ms +[2025-07-07 18:52:42] [Rank 0] step:1101/10000 train_time:73479ms step_avg:66.74ms +[2025-07-07 18:52:44] [Rank 0] step:1121/10000 train_time:74817ms step_avg:66.74ms +[2025-07-07 18:52:44] [Rank 0] step:1121/10000 train_time:74817ms step_avg:66.74ms +[2025-07-07 18:52:45] [Rank 0] step:1141/10000 train_time:76156ms step_avg:66.74ms +[2025-07-07 18:52:45] [Rank 0] step:1141/10000 train_time:76156ms step_avg:66.74ms +[2025-07-07 18:52:46] [Rank 0] step:1161/10000 train_time:77495ms step_avg:66.75ms +[2025-07-07 18:52:46] [Rank 0] step:1161/10000 train_time:77495ms step_avg:66.75ms +[2025-07-07 18:52:48] [Rank 0] step:1181/10000 train_time:78835ms step_avg:66.75ms +[2025-07-07 18:52:48] [Rank 0] step:1181/10000 train_time:78835ms step_avg:66.75ms +[2025-07-07 18:52:49] [Rank 0] step:1201/10000 train_time:80175ms step_avg:66.76ms +[2025-07-07 18:52:49] [Rank 0] step:1201/10000 train_time:80175ms step_avg:66.76ms +[2025-07-07 18:52:50] [Rank 0] step:1221/10000 train_time:81517ms step_avg:66.76ms +[2025-07-07 18:52:50] [Rank 0] step:1221/10000 train_time:81517ms step_avg:66.76ms +[2025-07-07 18:52:52] [Rank 0] step:1241/10000 train_time:82858ms step_avg:66.77ms +[2025-07-07 18:52:52] [Rank 0] step:1241/10000 train_time:82858ms step_avg:66.77ms +[2025-07-07 18:52:53] [Rank 0] step:1261/10000 train_time:84199ms step_avg:66.77ms +[2025-07-07 18:52:53] [Rank 0] step:1261/10000 train_time:84199ms step_avg:66.77ms +[2025-07-07 18:52:55] [Rank 0] step:1281/10000 train_time:85609ms step_avg:66.83ms +[2025-07-07 18:52:55] [Rank 0] step:1281/10000 train_time:85609ms step_avg:66.83ms +[2025-07-07 18:52:56] [Rank 0] step:1301/10000 train_time:86951ms step_avg:66.83ms +[2025-07-07 18:52:56] [Rank 0] step:1301/10000 train_time:86951ms step_avg:66.83ms +[2025-07-07 18:52:57] [Rank 0] step:1321/10000 train_time:88295ms step_avg:66.84ms +[2025-07-07 18:52:57] [Rank 0] step:1321/10000 train_time:88295ms step_avg:66.84ms +[2025-07-07 18:52:59] [Rank 0] step:1341/10000 train_time:89643ms step_avg:66.85ms +[2025-07-07 18:52:59] [Rank 0] step:1341/10000 train_time:89643ms step_avg:66.85ms +[2025-07-07 18:53:00] [Rank 0] step:1361/10000 train_time:90990ms step_avg:66.85ms +[2025-07-07 18:53:00] [Rank 0] step:1361/10000 train_time:90990ms step_avg:66.85ms +[2025-07-07 18:53:01] [Rank 0] step:1381/10000 train_time:92335ms step_avg:66.86ms +[2025-07-07 18:53:01] [Rank 0] step:1381/10000 train_time:92335ms step_avg:66.86ms +[2025-07-07 18:53:03] [Rank 0] step:1401/10000 train_time:93680ms step_avg:66.87ms +[2025-07-07 18:53:03] [Rank 0] step:1401/10000 train_time:93680ms step_avg:66.87ms +[2025-07-07 18:53:04] [Rank 0] step:1421/10000 train_time:95029ms step_avg:66.87ms +[2025-07-07 18:53:04] [Rank 0] step:1421/10000 train_time:95029ms step_avg:66.87ms +[2025-07-07 18:53:05] [Rank 0] step:1441/10000 train_time:97045ms step_avg:67.35ms +[2025-07-07 18:53:05] [Rank 0] step:1441/10000 train_time:97045ms step_avg:67.35ms +[2025-07-07 18:53:07] [Rank 0] step:1461/10000 train_time:97771ms step_avg:66.92ms +[2025-07-07 18:53:07] [Rank 0] step:1461/10000 train_time:97771ms step_avg:66.92ms +[2025-07-07 18:53:08] [Rank 0] step:1481/10000 train_time:99117ms step_avg:66.93ms +[2025-07-07 18:53:08] [Rank 0] step:1481/10000 train_time:99117ms step_avg:66.93ms +[2025-07-07 18:53:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:53:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:53:10] [Rank 0] PRINT: step:1500/10000 train_loss:2.1331 val_loss:1.8603 train_time:101075ms step_avg:67.38ms +[2025-07-07 18:53:10] [Rank 0] PRINT: step:1500/10000 train_loss:2.1331 val_loss:1.8603 train_time:101075ms step_avg:67.38ms +[2025-07-07 18:53:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:53:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:53:10] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:53:10] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:53:10] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:53:10] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:58:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:58:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:58:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:58:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:58:36] [Rank 0] Total Loss: 4.3186 +[2025-07-07 18:58:36] [Rank 0] Total Loss: 4.3186 +[2025-07-07 18:58:36] [Rank 0] Total FTA: 0.0881 +[2025-07-07 18:58:36] [Rank 0] Total FTA: 0.0881 +[2025-07-07 18:58:36] [Rank 0] Group 0 Loss: 4.4628 +[2025-07-07 18:58:36] [Rank 0] Group 0 Loss: 4.4628 +[2025-07-07 18:58:36] [Rank 0] Group 1 Loss: 4.2152 +[2025-07-07 18:58:36] [Rank 0] Group 1 Loss: 4.2152 +[2025-07-07 18:58:36] [Rank 0] Group 2 Loss: 4.1899 +[2025-07-07 18:58:36] [Rank 0] Group 2 Loss: 4.1899 +[2025-07-07 18:58:36] [Rank 0] Group 3 Loss: 4.2781 +[2025-07-07 18:58:36] [Rank 0] Group 3 Loss: 4.2781 +[2025-07-07 18:58:36] [Rank 0] Group 4 Loss: 4.3079 +[2025-07-07 18:58:36] [Rank 0] Group 4 Loss: 4.3079 +[2025-07-07 18:58:36] [Rank 0] Group 5 Loss: 4.2805 +[2025-07-07 18:58:36] [Rank 0] Group 5 Loss: 4.2805 +[2025-07-07 18:58:36] [Rank 0] Group 6 Loss: 4.2301 +[2025-07-07 18:58:36] [Rank 0] Group 6 Loss: 4.2301 +[2025-07-07 18:58:36] [Rank 0] Group 7 Loss: 4.3770 +[2025-07-07 18:58:36] [Rank 0] Group 7 Loss: 4.3770 +[2025-07-07 18:58:36] [Rank 0] Group 8 Loss: 4.3321 +[2025-07-07 18:58:36] [Rank 0] Group 8 Loss: 4.3321 +[2025-07-07 18:58:36] [Rank 0] Group 9 Loss: 4.2816 +[2025-07-07 18:58:36] [Rank 0] Group 9 Loss: 4.2816 +[2025-07-07 18:58:36] [Rank 0] Group 10 Loss: 4.3434 +[2025-07-07 18:58:36] [Rank 0] Group 10 Loss: 4.3434 +[2025-07-07 18:58:36] [Rank 0] Group 11 Loss: 4.3339 +[2025-07-07 18:58:36] [Rank 0] Group 11 Loss: 4.3339 +[2025-07-07 18:58:36] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 18:58:36] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 18:58:36] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 18:58:36] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 18:58:36] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 18:58:36] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 18:58:36] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 18:58:36] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 18:58:36] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 18:58:36] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 18:58:36] [Rank 0] Group 5 FTA: 0.0755 +[2025-07-07 18:58:36] [Rank 0] Group 5 FTA: 0.0755 +[2025-07-07 18:58:36] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 18:58:36] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 18:58:36] [Rank 0] Group 7 FTA: 0.1198 +[2025-07-07 18:58:36] [Rank 0] Group 7 FTA: 0.1198 +[2025-07-07 18:58:36] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-07 18:58:36] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-07 18:58:36] [Rank 0] Group 9 FTA: 0.1055 +[2025-07-07 18:58:36] [Rank 0] Group 9 FTA: 0.1055 +[2025-07-07 18:58:36] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-07 18:58:36] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-07 18:58:36] [Rank 0] Group 11 FTA: 0.0830 +[2025-07-07 18:58:36] [Rank 0] Group 11 FTA: 0.0830 +[2025-07-07 18:58:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 18:58:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 18:58:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 18:58:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 18:58:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 18:58:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 18:58:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 18:58:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 18:58:38] [Rank 0] step:1501/10000 train_time:101084ms step_avg:67.34ms +[2025-07-07 18:58:38] [Rank 0] step:1501/10000 train_time:101084ms step_avg:67.34ms +[2025-07-07 18:58:39] [Rank 0] step:1521/10000 train_time:101844ms step_avg:66.96ms +[2025-07-07 18:58:39] [Rank 0] step:1521/10000 train_time:101844ms step_avg:66.96ms +[2025-07-07 18:58:41] [Rank 0] step:1541/10000 train_time:103181ms step_avg:66.96ms +[2025-07-07 18:58:41] [Rank 0] step:1541/10000 train_time:103181ms step_avg:66.96ms +[2025-07-07 18:58:42] [Rank 0] step:1561/10000 train_time:104519ms step_avg:66.96ms +[2025-07-07 18:58:42] [Rank 0] step:1561/10000 train_time:104519ms step_avg:66.96ms +[2025-07-07 18:58:43] [Rank 0] step:1581/10000 train_time:105857ms step_avg:66.96ms +[2025-07-07 18:58:43] [Rank 0] step:1581/10000 train_time:105857ms step_avg:66.96ms +[2025-07-07 18:58:45] [Rank 0] step:1601/10000 train_time:107197ms step_avg:66.96ms +[2025-07-07 18:58:45] [Rank 0] step:1601/10000 train_time:107197ms step_avg:66.96ms +[2025-07-07 18:58:46] [Rank 0] step:1621/10000 train_time:108586ms step_avg:66.99ms +[2025-07-07 18:58:46] [Rank 0] step:1621/10000 train_time:108586ms step_avg:66.99ms +[2025-07-07 18:58:47] [Rank 0] step:1641/10000 train_time:109950ms step_avg:67.00ms +[2025-07-07 18:58:47] [Rank 0] step:1641/10000 train_time:109950ms step_avg:67.00ms +[2025-07-07 18:58:49] [Rank 0] step:1661/10000 train_time:111290ms step_avg:67.00ms +[2025-07-07 18:58:49] [Rank 0] step:1661/10000 train_time:111290ms step_avg:67.00ms +[2025-07-07 18:58:50] [Rank 0] step:1681/10000 train_time:112631ms step_avg:67.00ms +[2025-07-07 18:58:50] [Rank 0] step:1681/10000 train_time:112631ms step_avg:67.00ms +[2025-07-07 18:58:51] [Rank 0] step:1701/10000 train_time:113972ms step_avg:67.00ms +[2025-07-07 18:58:51] [Rank 0] step:1701/10000 train_time:113972ms step_avg:67.00ms +[2025-07-07 18:58:53] [Rank 0] step:1721/10000 train_time:115314ms step_avg:67.00ms +[2025-07-07 18:58:53] [Rank 0] step:1721/10000 train_time:115314ms step_avg:67.00ms +[2025-07-07 18:58:54] [Rank 0] step:1741/10000 train_time:116655ms step_avg:67.00ms +[2025-07-07 18:58:54] [Rank 0] step:1741/10000 train_time:116655ms step_avg:67.00ms +[2025-07-07 18:58:55] [Rank 0] step:1761/10000 train_time:117998ms step_avg:67.01ms +[2025-07-07 18:58:55] [Rank 0] step:1761/10000 train_time:117998ms step_avg:67.01ms +[2025-07-07 18:58:57] [Rank 0] step:1781/10000 train_time:119341ms step_avg:67.01ms +[2025-07-07 18:58:57] [Rank 0] step:1781/10000 train_time:119341ms step_avg:67.01ms +[2025-07-07 18:58:58] [Rank 0] step:1801/10000 train_time:120685ms step_avg:67.01ms +[2025-07-07 18:58:58] [Rank 0] step:1801/10000 train_time:120685ms step_avg:67.01ms +[2025-07-07 18:59:00] [Rank 0] step:1821/10000 train_time:122076ms step_avg:67.04ms +[2025-07-07 18:59:00] [Rank 0] step:1821/10000 train_time:122076ms step_avg:67.04ms +[2025-07-07 18:59:01] [Rank 0] step:1841/10000 train_time:123421ms step_avg:67.04ms +[2025-07-07 18:59:01] [Rank 0] step:1841/10000 train_time:123421ms step_avg:67.04ms +[2025-07-07 18:59:02] [Rank 0] step:1861/10000 train_time:124767ms step_avg:67.04ms +[2025-07-07 18:59:02] [Rank 0] step:1861/10000 train_time:124767ms step_avg:67.04ms +[2025-07-07 18:59:04] [Rank 0] step:1881/10000 train_time:126114ms step_avg:67.05ms +[2025-07-07 18:59:04] [Rank 0] step:1881/10000 train_time:126114ms step_avg:67.05ms +[2025-07-07 18:59:05] [Rank 0] step:1901/10000 train_time:127459ms step_avg:67.05ms +[2025-07-07 18:59:05] [Rank 0] step:1901/10000 train_time:127459ms step_avg:67.05ms +[2025-07-07 18:59:06] [Rank 0] step:1921/10000 train_time:128806ms step_avg:67.05ms +[2025-07-07 18:59:06] [Rank 0] step:1921/10000 train_time:128806ms step_avg:67.05ms +[2025-07-07 18:59:08] [Rank 0] step:1941/10000 train_time:130152ms step_avg:67.05ms +[2025-07-07 18:59:08] [Rank 0] step:1941/10000 train_time:130152ms step_avg:67.05ms +[2025-07-07 18:59:09] [Rank 0] step:1961/10000 train_time:131500ms step_avg:67.06ms +[2025-07-07 18:59:09] [Rank 0] step:1961/10000 train_time:131500ms step_avg:67.06ms +[2025-07-07 18:59:10] [Rank 0] step:1981/10000 train_time:133099ms step_avg:67.19ms +[2025-07-07 18:59:10] [Rank 0] step:1981/10000 train_time:133099ms step_avg:67.19ms +[2025-07-07 18:59:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:59:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:59:13] [Rank 0] PRINT: step:2000/10000 train_loss:1.7478 val_loss:1.6738 train_time:134850ms step_avg:67.42ms +[2025-07-07 18:59:13] [Rank 0] PRINT: step:2000/10000 train_loss:1.7478 val_loss:1.6738 train_time:134850ms step_avg:67.42ms +[2025-07-07 18:59:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:59:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:59:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:59:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:59:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:59:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:04:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:04:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:04:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:04:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:04:38] [Rank 0] Total Loss: 4.7714 +[2025-07-07 19:04:38] [Rank 0] Total Loss: 4.7714 +[2025-07-07 19:04:38] [Rank 0] Total FTA: 0.0895 +[2025-07-07 19:04:38] [Rank 0] Total FTA: 0.0895 +[2025-07-07 19:04:38] [Rank 0] Group 0 Loss: 4.9722 +[2025-07-07 19:04:38] [Rank 0] Group 0 Loss: 4.9722 +[2025-07-07 19:04:38] [Rank 0] Group 1 Loss: 4.7674 +[2025-07-07 19:04:38] [Rank 0] Group 1 Loss: 4.7674 +[2025-07-07 19:04:38] [Rank 0] Group 2 Loss: 4.4408 +[2025-07-07 19:04:38] [Rank 0] Group 2 Loss: 4.4408 +[2025-07-07 19:04:38] [Rank 0] Group 3 Loss: 4.7833 +[2025-07-07 19:04:38] [Rank 0] Group 3 Loss: 4.7833 +[2025-07-07 19:04:38] [Rank 0] Group 4 Loss: 4.7444 +[2025-07-07 19:04:38] [Rank 0] Group 4 Loss: 4.7444 +[2025-07-07 19:04:38] [Rank 0] Group 5 Loss: 4.7192 +[2025-07-07 19:04:38] [Rank 0] Group 5 Loss: 4.7192 +[2025-07-07 19:04:38] [Rank 0] Group 6 Loss: 4.7198 +[2025-07-07 19:04:38] [Rank 0] Group 6 Loss: 4.7198 +[2025-07-07 19:04:38] [Rank 0] Group 7 Loss: 4.8054 +[2025-07-07 19:04:38] [Rank 0] Group 7 Loss: 4.8054 +[2025-07-07 19:04:38] [Rank 0] Group 8 Loss: 4.7780 +[2025-07-07 19:04:38] [Rank 0] Group 8 Loss: 4.7780 +[2025-07-07 19:04:38] [Rank 0] Group 9 Loss: 4.8019 +[2025-07-07 19:04:38] [Rank 0] Group 9 Loss: 4.8019 +[2025-07-07 19:04:38] [Rank 0] Group 10 Loss: 4.7938 +[2025-07-07 19:04:38] [Rank 0] Group 10 Loss: 4.7938 +[2025-07-07 19:04:38] [Rank 0] Group 11 Loss: 4.7567 +[2025-07-07 19:04:38] [Rank 0] Group 11 Loss: 4.7567 +[2025-07-07 19:04:38] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 19:04:38] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 19:04:38] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 19:04:38] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 19:04:38] [Rank 0] Group 2 FTA: 0.0885 +[2025-07-07 19:04:38] [Rank 0] Group 2 FTA: 0.0885 +[2025-07-07 19:04:38] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 19:04:38] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 19:04:38] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 19:04:38] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 19:04:38] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-07 19:04:38] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-07 19:04:38] [Rank 0] Group 6 FTA: 0.0938 +[2025-07-07 19:04:38] [Rank 0] Group 6 FTA: 0.0938 +[2025-07-07 19:04:38] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 19:04:38] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 19:04:38] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-07 19:04:38] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-07 19:04:38] [Rank 0] Group 9 FTA: 0.1172 +[2025-07-07 19:04:38] [Rank 0] Group 9 FTA: 0.1172 +[2025-07-07 19:04:38] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-07 19:04:38] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-07 19:04:38] [Rank 0] Group 11 FTA: 0.1064 +[2025-07-07 19:04:38] [Rank 0] Group 11 FTA: 0.1064 +[2025-07-07 19:04:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 19:04:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 19:04:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 19:04:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 19:04:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 19:04:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 19:04:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 19:04:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 19:04:39] [Rank 0] step:2001/10000 train_time:134859ms step_avg:67.40ms +[2025-07-07 19:04:39] [Rank 0] step:2001/10000 train_time:134859ms step_avg:67.40ms +[2025-07-07 19:04:41] [Rank 0] step:2021/10000 train_time:135622ms step_avg:67.11ms +[2025-07-07 19:04:41] [Rank 0] step:2021/10000 train_time:135622ms step_avg:67.11ms +[2025-07-07 19:04:42] [Rank 0] step:2041/10000 train_time:136959ms step_avg:67.10ms +[2025-07-07 19:04:42] [Rank 0] step:2041/10000 train_time:136959ms step_avg:67.10ms +[2025-07-07 19:04:44] [Rank 0] step:2061/10000 train_time:138297ms step_avg:67.10ms +[2025-07-07 19:04:44] [Rank 0] step:2061/10000 train_time:138297ms step_avg:67.10ms +[2025-07-07 19:04:45] [Rank 0] step:2081/10000 train_time:139637ms step_avg:67.10ms +[2025-07-07 19:04:45] [Rank 0] step:2081/10000 train_time:139637ms step_avg:67.10ms +[2025-07-07 19:04:46] [Rank 0] step:2101/10000 train_time:140976ms step_avg:67.10ms +[2025-07-07 19:04:46] [Rank 0] step:2101/10000 train_time:140976ms step_avg:67.10ms +[2025-07-07 19:04:48] [Rank 0] step:2121/10000 train_time:142316ms step_avg:67.10ms +[2025-07-07 19:04:48] [Rank 0] step:2121/10000 train_time:142316ms step_avg:67.10ms +[2025-07-07 19:04:49] [Rank 0] step:2141/10000 train_time:143654ms step_avg:67.10ms +[2025-07-07 19:04:49] [Rank 0] step:2141/10000 train_time:143654ms step_avg:67.10ms +[2025-07-07 19:04:50] [Rank 0] step:2161/10000 train_time:144994ms step_avg:67.10ms +[2025-07-07 19:04:50] [Rank 0] step:2161/10000 train_time:144994ms step_avg:67.10ms +[2025-07-07 19:04:52] [Rank 0] step:2181/10000 train_time:146399ms step_avg:67.12ms +[2025-07-07 19:04:52] [Rank 0] step:2181/10000 train_time:146399ms step_avg:67.12ms +[2025-07-07 19:04:53] [Rank 0] step:2201/10000 train_time:147742ms step_avg:67.12ms +[2025-07-07 19:04:53] [Rank 0] step:2201/10000 train_time:147742ms step_avg:67.12ms +[2025-07-07 19:04:54] [Rank 0] step:2221/10000 train_time:149083ms step_avg:67.12ms +[2025-07-07 19:04:54] [Rank 0] step:2221/10000 train_time:149083ms step_avg:67.12ms +[2025-07-07 19:04:56] [Rank 0] step:2241/10000 train_time:150435ms step_avg:67.13ms +[2025-07-07 19:04:56] [Rank 0] step:2241/10000 train_time:150435ms step_avg:67.13ms +[2025-07-07 19:04:57] [Rank 0] step:2261/10000 train_time:151801ms step_avg:67.14ms +[2025-07-07 19:04:57] [Rank 0] step:2261/10000 train_time:151801ms step_avg:67.14ms +[2025-07-07 19:04:58] [Rank 0] step:2281/10000 train_time:153169ms step_avg:67.15ms +[2025-07-07 19:04:58] [Rank 0] step:2281/10000 train_time:153169ms step_avg:67.15ms +[2025-07-07 19:05:00] [Rank 0] step:2301/10000 train_time:154536ms step_avg:67.16ms +[2025-07-07 19:05:00] [Rank 0] step:2301/10000 train_time:154536ms step_avg:67.16ms +[2025-07-07 19:05:01] [Rank 0] step:2321/10000 train_time:155905ms step_avg:67.17ms +[2025-07-07 19:05:01] [Rank 0] step:2321/10000 train_time:155905ms step_avg:67.17ms +[2025-07-07 19:05:03] [Rank 0] step:2341/10000 train_time:157320ms step_avg:67.20ms +[2025-07-07 19:05:03] [Rank 0] step:2341/10000 train_time:157320ms step_avg:67.20ms +[2025-07-07 19:05:04] [Rank 0] step:2361/10000 train_time:158642ms step_avg:67.19ms +[2025-07-07 19:05:04] [Rank 0] step:2361/10000 train_time:158642ms step_avg:67.19ms +[2025-07-07 19:05:05] [Rank 0] step:2381/10000 train_time:160011ms step_avg:67.20ms +[2025-07-07 19:05:05] [Rank 0] step:2381/10000 train_time:160011ms step_avg:67.20ms +[2025-07-07 19:05:07] [Rank 0] step:2401/10000 train_time:161381ms step_avg:67.21ms +[2025-07-07 19:05:07] [Rank 0] step:2401/10000 train_time:161381ms step_avg:67.21ms +[2025-07-07 19:05:08] [Rank 0] step:2421/10000 train_time:162751ms step_avg:67.22ms +[2025-07-07 19:05:08] [Rank 0] step:2421/10000 train_time:162751ms step_avg:67.22ms +[2025-07-07 19:05:09] [Rank 0] step:2441/10000 train_time:164121ms step_avg:67.24ms +[2025-07-07 19:05:09] [Rank 0] step:2441/10000 train_time:164121ms step_avg:67.24ms +[2025-07-07 19:05:11] [Rank 0] step:2461/10000 train_time:165491ms step_avg:67.25ms +[2025-07-07 19:05:11] [Rank 0] step:2461/10000 train_time:165491ms step_avg:67.25ms +[2025-07-07 19:05:12] [Rank 0] step:2481/10000 train_time:166861ms step_avg:67.26ms +[2025-07-07 19:05:12] [Rank 0] step:2481/10000 train_time:166861ms step_avg:67.26ms +[2025-07-07 19:05:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:05:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:05:14] [Rank 0] PRINT: step:2500/10000 train_loss:1.6144 val_loss:1.5488 train_time:168855ms step_avg:67.54ms +[2025-07-07 19:05:14] [Rank 0] PRINT: step:2500/10000 train_loss:1.6144 val_loss:1.5488 train_time:168855ms step_avg:67.54ms +[2025-07-07 19:05:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:05:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:05:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:05:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:05:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:05:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:10:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:10:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:10:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:10:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:10:38] [Rank 0] Total Loss: 4.7829 +[2025-07-07 19:10:38] [Rank 0] Total Loss: 4.7829 +[2025-07-07 19:10:38] [Rank 0] Total FTA: 0.0843 +[2025-07-07 19:10:38] [Rank 0] Total FTA: 0.0843 +[2025-07-07 19:10:38] [Rank 0] Group 0 Loss: 4.8687 +[2025-07-07 19:10:38] [Rank 0] Group 0 Loss: 4.8687 +[2025-07-07 19:10:38] [Rank 0] Group 1 Loss: 4.6564 +[2025-07-07 19:10:38] [Rank 0] Group 1 Loss: 4.6564 +[2025-07-07 19:10:38] [Rank 0] Group 2 Loss: 4.5605 +[2025-07-07 19:10:38] [Rank 0] Group 2 Loss: 4.5605 +[2025-07-07 19:10:38] [Rank 0] Group 3 Loss: 4.7584 +[2025-07-07 19:10:38] [Rank 0] Group 3 Loss: 4.7584 +[2025-07-07 19:10:38] [Rank 0] Group 4 Loss: 4.8259 +[2025-07-07 19:10:38] [Rank 0] Group 4 Loss: 4.8259 +[2025-07-07 19:10:38] [Rank 0] Group 5 Loss: 4.6794 +[2025-07-07 19:10:38] [Rank 0] Group 5 Loss: 4.6794 +[2025-07-07 19:10:38] [Rank 0] Group 6 Loss: 4.7165 +[2025-07-07 19:10:38] [Rank 0] Group 6 Loss: 4.7165 +[2025-07-07 19:10:38] [Rank 0] Group 7 Loss: 4.8579 +[2025-07-07 19:10:38] [Rank 0] Group 7 Loss: 4.8579 +[2025-07-07 19:10:38] [Rank 0] Group 8 Loss: 4.7740 +[2025-07-07 19:10:38] [Rank 0] Group 8 Loss: 4.7740 +[2025-07-07 19:10:38] [Rank 0] Group 9 Loss: 4.7756 +[2025-07-07 19:10:38] [Rank 0] Group 9 Loss: 4.7756 +[2025-07-07 19:10:38] [Rank 0] Group 10 Loss: 4.8739 +[2025-07-07 19:10:38] [Rank 0] Group 10 Loss: 4.8739 +[2025-07-07 19:10:38] [Rank 0] Group 11 Loss: 4.8377 +[2025-07-07 19:10:38] [Rank 0] Group 11 Loss: 4.8377 +[2025-07-07 19:10:38] [Rank 0] Group 0 FTA: 0.1730 +[2025-07-07 19:10:38] [Rank 0] Group 0 FTA: 0.1730 +[2025-07-07 19:10:38] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 19:10:38] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 19:10:38] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-07 19:10:38] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-07 19:10:38] [Rank 0] Group 3 FTA: 0.0703 +[2025-07-07 19:10:38] [Rank 0] Group 3 FTA: 0.0703 +[2025-07-07 19:10:38] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-07 19:10:38] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-07 19:10:38] [Rank 0] Group 5 FTA: 0.0521 +[2025-07-07 19:10:38] [Rank 0] Group 5 FTA: 0.0521 +[2025-07-07 19:10:39] [Rank 0] Group 6 FTA: 0.0599 +[2025-07-07 19:10:39] [Rank 0] Group 6 FTA: 0.0599 +[2025-07-07 19:10:39] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-07 19:10:39] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-07 19:10:39] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-07 19:10:39] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-07 19:10:39] [Rank 0] Group 9 FTA: 0.1094 +[2025-07-07 19:10:39] [Rank 0] Group 9 FTA: 0.1094 +[2025-07-07 19:10:39] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-07 19:10:39] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-07 19:10:39] [Rank 0] Group 11 FTA: 0.0830 +[2025-07-07 19:10:39] [Rank 0] Group 11 FTA: 0.0830 +[2025-07-07 19:10:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 19:10:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 19:10:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 19:10:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 19:10:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 19:10:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 19:10:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 19:10:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 19:10:40] [Rank 0] step:2501/10000 train_time:168864ms step_avg:67.52ms +[2025-07-07 19:10:40] [Rank 0] step:2501/10000 train_time:168864ms step_avg:67.52ms +[2025-07-07 19:10:41] [Rank 0] step:2521/10000 train_time:169623ms step_avg:67.28ms +[2025-07-07 19:10:41] [Rank 0] step:2521/10000 train_time:169623ms step_avg:67.28ms +[2025-07-07 19:10:43] [Rank 0] step:2541/10000 train_time:171019ms step_avg:67.30ms +[2025-07-07 19:10:43] [Rank 0] step:2541/10000 train_time:171019ms step_avg:67.30ms +[2025-07-07 19:10:44] [Rank 0] step:2561/10000 train_time:172382ms step_avg:67.31ms +[2025-07-07 19:10:44] [Rank 0] step:2561/10000 train_time:172382ms step_avg:67.31ms +[2025-07-07 19:10:45] [Rank 0] step:2581/10000 train_time:173745ms step_avg:67.32ms +[2025-07-07 19:10:45] [Rank 0] step:2581/10000 train_time:173745ms step_avg:67.32ms +[2025-07-07 19:10:47] [Rank 0] step:2601/10000 train_time:175109ms step_avg:67.32ms +[2025-07-07 19:10:47] [Rank 0] step:2601/10000 train_time:175109ms step_avg:67.32ms +[2025-07-07 19:10:48] [Rank 0] step:2621/10000 train_time:176474ms step_avg:67.33ms +[2025-07-07 19:10:48] [Rank 0] step:2621/10000 train_time:176474ms step_avg:67.33ms +[2025-07-07 19:10:50] [Rank 0] step:2641/10000 train_time:177837ms step_avg:67.34ms +[2025-07-07 19:10:50] [Rank 0] step:2641/10000 train_time:177837ms step_avg:67.34ms +[2025-07-07 19:10:51] [Rank 0] step:2661/10000 train_time:179201ms step_avg:67.34ms +[2025-07-07 19:10:51] [Rank 0] step:2661/10000 train_time:179201ms step_avg:67.34ms +[2025-07-07 19:10:52] [Rank 0] step:2681/10000 train_time:180573ms step_avg:67.35ms +[2025-07-07 19:10:52] [Rank 0] step:2681/10000 train_time:180573ms step_avg:67.35ms +[2025-07-07 19:10:54] [Rank 0] step:2701/10000 train_time:181929ms step_avg:67.36ms +[2025-07-07 19:10:54] [Rank 0] step:2701/10000 train_time:181929ms step_avg:67.36ms +[2025-07-07 19:10:55] [Rank 0] step:2721/10000 train_time:183295ms step_avg:67.36ms +[2025-07-07 19:10:55] [Rank 0] step:2721/10000 train_time:183295ms step_avg:67.36ms +[2025-07-07 19:10:56] [Rank 0] step:2741/10000 train_time:184661ms step_avg:67.37ms +[2025-07-07 19:10:56] [Rank 0] step:2741/10000 train_time:184661ms step_avg:67.37ms +[2025-07-07 19:10:58] [Rank 0] step:2761/10000 train_time:186029ms step_avg:67.38ms +[2025-07-07 19:10:58] [Rank 0] step:2761/10000 train_time:186029ms step_avg:67.38ms +[2025-07-07 19:10:59] [Rank 0] step:2781/10000 train_time:187396ms step_avg:67.38ms +[2025-07-07 19:10:59] [Rank 0] step:2781/10000 train_time:187396ms step_avg:67.38ms +[2025-07-07 19:11:00] [Rank 0] step:2801/10000 train_time:188764ms step_avg:67.39ms +[2025-07-07 19:11:00] [Rank 0] step:2801/10000 train_time:188764ms step_avg:67.39ms +[2025-07-07 19:11:02] [Rank 0] step:2821/10000 train_time:190131ms step_avg:67.40ms +[2025-07-07 19:11:02] [Rank 0] step:2821/10000 train_time:190131ms step_avg:67.40ms +[2025-07-07 19:11:03] [Rank 0] step:2841/10000 train_time:191498ms step_avg:67.41ms +[2025-07-07 19:11:03] [Rank 0] step:2841/10000 train_time:191498ms step_avg:67.41ms +[2025-07-07 19:11:05] [Rank 0] step:2861/10000 train_time:192866ms step_avg:67.41ms +[2025-07-07 19:11:05] [Rank 0] step:2861/10000 train_time:192866ms step_avg:67.41ms +[2025-07-07 19:11:06] [Rank 0] step:2881/10000 train_time:194235ms step_avg:67.42ms +[2025-07-07 19:11:06] [Rank 0] step:2881/10000 train_time:194235ms step_avg:67.42ms +[2025-07-07 19:11:07] [Rank 0] step:2901/10000 train_time:195631ms step_avg:67.44ms +[2025-07-07 19:11:07] [Rank 0] step:2901/10000 train_time:195631ms step_avg:67.44ms +[2025-07-07 19:11:09] [Rank 0] step:2921/10000 train_time:197000ms step_avg:67.44ms +[2025-07-07 19:11:09] [Rank 0] step:2921/10000 train_time:197000ms step_avg:67.44ms +[2025-07-07 19:11:10] [Rank 0] step:2941/10000 train_time:198370ms step_avg:67.45ms +[2025-07-07 19:11:10] [Rank 0] step:2941/10000 train_time:198370ms step_avg:67.45ms +[2025-07-07 19:11:11] [Rank 0] step:2961/10000 train_time:199741ms step_avg:67.46ms +[2025-07-07 19:11:11] [Rank 0] step:2961/10000 train_time:199741ms step_avg:67.46ms +[2025-07-07 19:11:13] [Rank 0] step:2981/10000 train_time:201112ms step_avg:67.46ms +[2025-07-07 19:11:13] [Rank 0] step:2981/10000 train_time:201112ms step_avg:67.46ms +[2025-07-07 19:11:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:11:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:11:15] [Rank 0] PRINT: step:3000/10000 train_loss:1.5004 val_loss:1.4548 train_time:203105ms step_avg:67.70ms +[2025-07-07 19:11:15] [Rank 0] PRINT: step:3000/10000 train_loss:1.5004 val_loss:1.4548 train_time:203105ms step_avg:67.70ms +[2025-07-07 19:11:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:11:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:11:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:11:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:11:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:11:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:16:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:16:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:16:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:16:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:16:39] [Rank 0] Total Loss: 4.7118 +[2025-07-07 19:16:39] [Rank 0] Total Loss: 4.7118 +[2025-07-07 19:16:39] [Rank 0] Total FTA: 0.0960 +[2025-07-07 19:16:39] [Rank 0] Total FTA: 0.0960 +[2025-07-07 19:16:39] [Rank 0] Group 0 Loss: 4.9802 +[2025-07-07 19:16:39] [Rank 0] Group 0 Loss: 4.9802 +[2025-07-07 19:16:39] [Rank 0] Group 1 Loss: 4.5728 +[2025-07-07 19:16:39] [Rank 0] Group 1 Loss: 4.5728 +[2025-07-07 19:16:39] [Rank 0] Group 2 Loss: 4.4729 +[2025-07-07 19:16:39] [Rank 0] Group 2 Loss: 4.4729 +[2025-07-07 19:16:39] [Rank 0] Group 3 Loss: 4.7563 +[2025-07-07 19:16:39] [Rank 0] Group 3 Loss: 4.7563 +[2025-07-07 19:16:39] [Rank 0] Group 4 Loss: 4.6726 +[2025-07-07 19:16:39] [Rank 0] Group 4 Loss: 4.6726 +[2025-07-07 19:16:39] [Rank 0] Group 5 Loss: 4.6266 +[2025-07-07 19:16:39] [Rank 0] Group 5 Loss: 4.6266 +[2025-07-07 19:16:39] [Rank 0] Group 6 Loss: 4.6140 +[2025-07-07 19:16:39] [Rank 0] Group 6 Loss: 4.6140 +[2025-07-07 19:16:39] [Rank 0] Group 7 Loss: 4.7567 +[2025-07-07 19:16:39] [Rank 0] Group 7 Loss: 4.7567 +[2025-07-07 19:16:39] [Rank 0] Group 8 Loss: 4.6994 +[2025-07-07 19:16:39] [Rank 0] Group 8 Loss: 4.6994 +[2025-07-07 19:16:39] [Rank 0] Group 9 Loss: 4.6697 +[2025-07-07 19:16:39] [Rank 0] Group 9 Loss: 4.6697 +[2025-07-07 19:16:39] [Rank 0] Group 10 Loss: 4.7209 +[2025-07-07 19:16:39] [Rank 0] Group 10 Loss: 4.7209 +[2025-07-07 19:16:39] [Rank 0] Group 11 Loss: 4.7121 +[2025-07-07 19:16:39] [Rank 0] Group 11 Loss: 4.7121 +[2025-07-07 19:16:39] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-07 19:16:39] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-07 19:16:39] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 19:16:39] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 19:16:39] [Rank 0] Group 2 FTA: 0.1771 +[2025-07-07 19:16:39] [Rank 0] Group 2 FTA: 0.1771 +[2025-07-07 19:16:39] [Rank 0] Group 3 FTA: 0.0859 +[2025-07-07 19:16:39] [Rank 0] Group 3 FTA: 0.0859 +[2025-07-07 19:16:39] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-07 19:16:39] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-07 19:16:39] [Rank 0] Group 5 FTA: 0.0703 +[2025-07-07 19:16:39] [Rank 0] Group 5 FTA: 0.0703 +[2025-07-07 19:16:39] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-07 19:16:39] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-07 19:16:39] [Rank 0] Group 7 FTA: 0.1198 +[2025-07-07 19:16:39] [Rank 0] Group 7 FTA: 0.1198 +[2025-07-07 19:16:39] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-07 19:16:39] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-07 19:16:39] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 19:16:39] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 19:16:39] [Rank 0] Group 10 FTA: 0.1055 +[2025-07-07 19:16:39] [Rank 0] Group 10 FTA: 0.1055 +[2025-07-07 19:16:39] [Rank 0] Group 11 FTA: 0.0928 +[2025-07-07 19:16:39] [Rank 0] Group 11 FTA: 0.0928 +[2025-07-07 19:16:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 19:16:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 19:16:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 19:16:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 19:16:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 19:16:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 19:16:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 19:16:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 19:16:41] [Rank 0] step:3001/10000 train_time:203114ms step_avg:67.68ms +[2025-07-07 19:16:41] [Rank 0] step:3001/10000 train_time:203114ms step_avg:67.68ms +[2025-07-07 19:16:42] [Rank 0] step:3021/10000 train_time:203872ms step_avg:67.48ms +[2025-07-07 19:16:42] [Rank 0] step:3021/10000 train_time:203872ms step_avg:67.48ms +[2025-07-07 19:16:43] [Rank 0] step:3041/10000 train_time:205233ms step_avg:67.49ms +[2025-07-07 19:16:43] [Rank 0] step:3041/10000 train_time:205233ms step_avg:67.49ms +[2025-07-07 19:16:45] [Rank 0] step:3061/10000 train_time:206595ms step_avg:67.49ms +[2025-07-07 19:16:45] [Rank 0] step:3061/10000 train_time:206595ms step_avg:67.49ms +[2025-07-07 19:16:46] [Rank 0] step:3081/10000 train_time:207993ms step_avg:67.51ms +[2025-07-07 19:16:46] [Rank 0] step:3081/10000 train_time:207993ms step_avg:67.51ms +[2025-07-07 19:16:48] [Rank 0] step:3101/10000 train_time:209355ms step_avg:67.51ms +[2025-07-07 19:16:48] [Rank 0] step:3101/10000 train_time:209355ms step_avg:67.51ms +[2025-07-07 19:16:49] [Rank 0] step:3121/10000 train_time:210718ms step_avg:67.52ms +[2025-07-07 19:16:49] [Rank 0] step:3121/10000 train_time:210718ms step_avg:67.52ms +[2025-07-07 19:16:50] [Rank 0] step:3141/10000 train_time:212081ms step_avg:67.52ms +[2025-07-07 19:16:50] [Rank 0] step:3141/10000 train_time:212081ms step_avg:67.52ms +[2025-07-07 19:16:52] [Rank 0] step:3161/10000 train_time:213446ms step_avg:67.52ms +[2025-07-07 19:16:52] [Rank 0] step:3161/10000 train_time:213446ms step_avg:67.52ms +[2025-07-07 19:16:53] [Rank 0] step:3181/10000 train_time:214811ms step_avg:67.53ms +[2025-07-07 19:16:53] [Rank 0] step:3181/10000 train_time:214811ms step_avg:67.53ms +[2025-07-07 19:16:54] [Rank 0] step:3201/10000 train_time:216178ms step_avg:67.53ms +[2025-07-07 19:16:54] [Rank 0] step:3201/10000 train_time:216178ms step_avg:67.53ms +[2025-07-07 19:16:56] [Rank 0] step:3221/10000 train_time:217545ms step_avg:67.54ms +[2025-07-07 19:16:56] [Rank 0] step:3221/10000 train_time:217545ms step_avg:67.54ms +[2025-07-07 19:16:57] [Rank 0] step:3241/10000 train_time:218918ms step_avg:67.55ms +[2025-07-07 19:16:57] [Rank 0] step:3241/10000 train_time:218918ms step_avg:67.55ms +[2025-07-07 19:16:59] [Rank 0] step:3261/10000 train_time:220314ms step_avg:67.56ms +[2025-07-07 19:16:59] [Rank 0] step:3261/10000 train_time:220314ms step_avg:67.56ms +[2025-07-07 19:17:00] [Rank 0] step:3281/10000 train_time:221680ms step_avg:67.56ms +[2025-07-07 19:17:00] [Rank 0] step:3281/10000 train_time:221680ms step_avg:67.56ms +[2025-07-07 19:17:01] [Rank 0] step:3301/10000 train_time:223046ms step_avg:67.57ms +[2025-07-07 19:17:01] [Rank 0] step:3301/10000 train_time:223046ms step_avg:67.57ms +[2025-07-07 19:17:03] [Rank 0] step:3321/10000 train_time:224413ms step_avg:67.57ms +[2025-07-07 19:17:03] [Rank 0] step:3321/10000 train_time:224413ms step_avg:67.57ms +[2025-07-07 19:17:04] [Rank 0] step:3341/10000 train_time:225782ms step_avg:67.58ms +[2025-07-07 19:17:04] [Rank 0] step:3341/10000 train_time:225782ms step_avg:67.58ms +[2025-07-07 19:17:05] [Rank 0] step:3361/10000 train_time:227149ms step_avg:67.58ms +[2025-07-07 19:17:05] [Rank 0] step:3361/10000 train_time:227149ms step_avg:67.58ms +[2025-07-07 19:17:07] [Rank 0] step:3381/10000 train_time:228517ms step_avg:67.59ms +[2025-07-07 19:17:07] [Rank 0] step:3381/10000 train_time:228517ms step_avg:67.59ms +[2025-07-07 19:17:08] [Rank 0] step:3401/10000 train_time:229887ms step_avg:67.59ms +[2025-07-07 19:17:08] [Rank 0] step:3401/10000 train_time:229887ms step_avg:67.59ms +[2025-07-07 19:17:10] [Rank 0] step:3421/10000 train_time:231257ms step_avg:67.60ms +[2025-07-07 19:17:10] [Rank 0] step:3421/10000 train_time:231257ms step_avg:67.60ms +[2025-07-07 19:17:11] [Rank 0] step:3441/10000 train_time:232668ms step_avg:67.62ms +[2025-07-07 19:17:11] [Rank 0] step:3441/10000 train_time:232668ms step_avg:67.62ms +[2025-07-07 19:17:12] [Rank 0] step:3461/10000 train_time:234039ms step_avg:67.62ms +[2025-07-07 19:17:12] [Rank 0] step:3461/10000 train_time:234039ms step_avg:67.62ms +[2025-07-07 19:17:14] [Rank 0] step:3481/10000 train_time:235410ms step_avg:67.63ms +[2025-07-07 19:17:14] [Rank 0] step:3481/10000 train_time:235410ms step_avg:67.63ms +[2025-07-07 19:17:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:17:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:17:16] [Rank 0] PRINT: step:3500/10000 train_loss:1.4132 val_loss:1.3743 train_time:237405ms step_avg:67.83ms +[2025-07-07 19:17:16] [Rank 0] PRINT: step:3500/10000 train_loss:1.4132 val_loss:1.3743 train_time:237405ms step_avg:67.83ms +[2025-07-07 19:17:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:17:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:17:16] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:17:16] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:17:16] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:17:16] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:22:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:22:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:22:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:22:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:22:40] [Rank 0] Total Loss: 4.7823 +[2025-07-07 19:22:40] [Rank 0] Total Loss: 4.7823 +[2025-07-07 19:22:40] [Rank 0] Total FTA: 0.0943 +[2025-07-07 19:22:40] [Rank 0] Total FTA: 0.0943 +[2025-07-07 19:22:40] [Rank 0] Group 0 Loss: 4.9595 +[2025-07-07 19:22:40] [Rank 0] Group 0 Loss: 4.9595 +[2025-07-07 19:22:40] [Rank 0] Group 1 Loss: 4.5312 +[2025-07-07 19:22:40] [Rank 0] Group 1 Loss: 4.5312 +[2025-07-07 19:22:40] [Rank 0] Group 2 Loss: 4.4791 +[2025-07-07 19:22:40] [Rank 0] Group 2 Loss: 4.4791 +[2025-07-07 19:22:40] [Rank 0] Group 3 Loss: 4.9248 +[2025-07-07 19:22:40] [Rank 0] Group 3 Loss: 4.9248 +[2025-07-07 19:22:40] [Rank 0] Group 4 Loss: 4.8229 +[2025-07-07 19:22:40] [Rank 0] Group 4 Loss: 4.8229 +[2025-07-07 19:22:40] [Rank 0] Group 5 Loss: 4.7132 +[2025-07-07 19:22:40] [Rank 0] Group 5 Loss: 4.7132 +[2025-07-07 19:22:40] [Rank 0] Group 6 Loss: 4.7296 +[2025-07-07 19:22:40] [Rank 0] Group 6 Loss: 4.7296 +[2025-07-07 19:22:40] [Rank 0] Group 7 Loss: 4.8281 +[2025-07-07 19:22:40] [Rank 0] Group 7 Loss: 4.8281 +[2025-07-07 19:22:40] [Rank 0] Group 8 Loss: 4.7706 +[2025-07-07 19:22:40] [Rank 0] Group 8 Loss: 4.7706 +[2025-07-07 19:22:40] [Rank 0] Group 9 Loss: 4.7883 +[2025-07-07 19:22:40] [Rank 0] Group 9 Loss: 4.7883 +[2025-07-07 19:22:40] [Rank 0] Group 10 Loss: 4.8013 +[2025-07-07 19:22:40] [Rank 0] Group 10 Loss: 4.8013 +[2025-07-07 19:22:40] [Rank 0] Group 11 Loss: 4.8106 +[2025-07-07 19:22:40] [Rank 0] Group 11 Loss: 4.8106 +[2025-07-07 19:22:40] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 19:22:40] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 19:22:40] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 19:22:40] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 19:22:40] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-07 19:22:40] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-07 19:22:40] [Rank 0] Group 3 FTA: 0.0443 +[2025-07-07 19:22:40] [Rank 0] Group 3 FTA: 0.0443 +[2025-07-07 19:22:40] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 19:22:40] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 19:22:40] [Rank 0] Group 5 FTA: 0.1198 +[2025-07-07 19:22:40] [Rank 0] Group 5 FTA: 0.1198 +[2025-07-07 19:22:40] [Rank 0] Group 6 FTA: 0.0599 +[2025-07-07 19:22:40] [Rank 0] Group 6 FTA: 0.0599 +[2025-07-07 19:22:40] [Rank 0] Group 7 FTA: 0.1276 +[2025-07-07 19:22:40] [Rank 0] Group 7 FTA: 0.1276 +[2025-07-07 19:22:40] [Rank 0] Group 8 FTA: 0.1328 +[2025-07-07 19:22:40] [Rank 0] Group 8 FTA: 0.1328 +[2025-07-07 19:22:40] [Rank 0] Group 9 FTA: 0.1094 +[2025-07-07 19:22:40] [Rank 0] Group 9 FTA: 0.1094 +[2025-07-07 19:22:40] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-07 19:22:40] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-07 19:22:40] [Rank 0] Group 11 FTA: 0.0977 +[2025-07-07 19:22:40] [Rank 0] Group 11 FTA: 0.0977 +[2025-07-07 19:22:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 19:22:40] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 19:22:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 19:22:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 19:22:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 19:22:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 19:22:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 19:22:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 19:22:41] [Rank 0] step:3501/10000 train_time:237414ms step_avg:67.81ms +[2025-07-07 19:22:41] [Rank 0] step:3501/10000 train_time:237414ms step_avg:67.81ms +[2025-07-07 19:22:43] [Rank 0] step:3521/10000 train_time:238190ms step_avg:67.65ms +[2025-07-07 19:22:43] [Rank 0] step:3521/10000 train_time:238190ms step_avg:67.65ms +[2025-07-07 19:22:44] [Rank 0] step:3541/10000 train_time:239551ms step_avg:67.65ms +[2025-07-07 19:22:44] [Rank 0] step:3541/10000 train_time:239551ms step_avg:67.65ms +[2025-07-07 19:22:45] [Rank 0] step:3561/10000 train_time:240913ms step_avg:67.65ms +[2025-07-07 19:22:45] [Rank 0] step:3561/10000 train_time:240913ms step_avg:67.65ms +[2025-07-07 19:22:47] [Rank 0] step:3581/10000 train_time:242275ms step_avg:67.66ms +[2025-07-07 19:22:47] [Rank 0] step:3581/10000 train_time:242275ms step_avg:67.66ms +[2025-07-07 19:22:48] [Rank 0] step:3601/10000 train_time:243888ms step_avg:67.73ms +[2025-07-07 19:22:48] [Rank 0] step:3601/10000 train_time:243888ms step_avg:67.73ms +[2025-07-07 19:22:49] [Rank 0] step:3621/10000 train_time:245036ms step_avg:67.67ms +[2025-07-07 19:22:49] [Rank 0] step:3621/10000 train_time:245036ms step_avg:67.67ms +[2025-07-07 19:22:51] [Rank 0] step:3641/10000 train_time:246398ms step_avg:67.67ms +[2025-07-07 19:22:51] [Rank 0] step:3641/10000 train_time:246398ms step_avg:67.67ms +[2025-07-07 19:22:52] [Rank 0] step:3661/10000 train_time:247761ms step_avg:67.68ms +[2025-07-07 19:22:52] [Rank 0] step:3661/10000 train_time:247761ms step_avg:67.68ms +[2025-07-07 19:22:54] [Rank 0] step:3681/10000 train_time:249125ms step_avg:67.68ms +[2025-07-07 19:22:54] [Rank 0] step:3681/10000 train_time:249125ms step_avg:67.68ms +[2025-07-07 19:22:55] [Rank 0] step:3701/10000 train_time:250491ms step_avg:67.68ms +[2025-07-07 19:22:55] [Rank 0] step:3701/10000 train_time:250491ms step_avg:67.68ms +[2025-07-07 19:22:56] [Rank 0] step:3721/10000 train_time:251857ms step_avg:67.69ms +[2025-07-07 19:22:56] [Rank 0] step:3721/10000 train_time:251857ms step_avg:67.69ms +[2025-07-07 19:22:58] [Rank 0] step:3741/10000 train_time:253223ms step_avg:67.69ms +[2025-07-07 19:22:58] [Rank 0] step:3741/10000 train_time:253223ms step_avg:67.69ms +[2025-07-07 19:22:59] [Rank 0] step:3761/10000 train_time:254591ms step_avg:67.69ms +[2025-07-07 19:22:59] [Rank 0] step:3761/10000 train_time:254591ms step_avg:67.69ms +[2025-07-07 19:23:00] [Rank 0] step:3781/10000 train_time:256620ms step_avg:67.87ms +[2025-07-07 19:23:00] [Rank 0] step:3781/10000 train_time:256620ms step_avg:67.87ms +[2025-07-07 19:23:02] [Rank 0] step:3801/10000 train_time:257357ms step_avg:67.71ms +[2025-07-07 19:23:02] [Rank 0] step:3801/10000 train_time:257357ms step_avg:67.71ms +[2025-07-07 19:23:03] [Rank 0] step:3821/10000 train_time:258727ms step_avg:67.71ms +[2025-07-07 19:23:03] [Rank 0] step:3821/10000 train_time:258727ms step_avg:67.71ms +[2025-07-07 19:23:05] [Rank 0] step:3841/10000 train_time:260095ms step_avg:67.72ms +[2025-07-07 19:23:05] [Rank 0] step:3841/10000 train_time:260095ms step_avg:67.72ms +[2025-07-07 19:23:06] [Rank 0] step:3861/10000 train_time:261464ms step_avg:67.72ms +[2025-07-07 19:23:06] [Rank 0] step:3861/10000 train_time:261464ms step_avg:67.72ms +[2025-07-07 19:23:07] [Rank 0] step:3881/10000 train_time:262834ms step_avg:67.72ms +[2025-07-07 19:23:07] [Rank 0] step:3881/10000 train_time:262834ms step_avg:67.72ms +[2025-07-07 19:23:09] [Rank 0] step:3901/10000 train_time:264205ms step_avg:67.73ms +[2025-07-07 19:23:09] [Rank 0] step:3901/10000 train_time:264205ms step_avg:67.73ms +[2025-07-07 19:23:10] [Rank 0] step:3921/10000 train_time:265575ms step_avg:67.73ms +[2025-07-07 19:23:10] [Rank 0] step:3921/10000 train_time:265575ms step_avg:67.73ms +[2025-07-07 19:23:11] [Rank 0] step:3941/10000 train_time:266949ms step_avg:67.74ms +[2025-07-07 19:23:11] [Rank 0] step:3941/10000 train_time:266949ms step_avg:67.74ms +[2025-07-07 19:23:13] [Rank 0] step:3961/10000 train_time:269005ms step_avg:67.91ms +[2025-07-07 19:23:13] [Rank 0] step:3961/10000 train_time:269005ms step_avg:67.91ms +[2025-07-07 19:23:14] [Rank 0] step:3981/10000 train_time:269743ms step_avg:67.76ms +[2025-07-07 19:23:14] [Rank 0] step:3981/10000 train_time:269743ms step_avg:67.76ms +[2025-07-07 19:23:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:23:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:23:16] [Rank 0] PRINT: step:4000/10000 train_loss:1.3395 val_loss:1.3131 train_time:271735ms step_avg:67.93ms +[2025-07-07 19:23:16] [Rank 0] PRINT: step:4000/10000 train_loss:1.3395 val_loss:1.3131 train_time:271735ms step_avg:67.93ms +[2025-07-07 19:23:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:23:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:23:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:23:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:23:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:23:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:28:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:28:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:28:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:28:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:28:41] [Rank 0] Total Loss: 5.0213 +[2025-07-07 19:28:41] [Rank 0] Total Loss: 5.0213 +[2025-07-07 19:28:41] [Rank 0] Total FTA: 0.1422 +[2025-07-07 19:28:41] [Rank 0] Total FTA: 0.1422 +[2025-07-07 19:28:41] [Rank 0] Group 0 Loss: 5.3283 +[2025-07-07 19:28:41] [Rank 0] Group 0 Loss: 5.3283 +[2025-07-07 19:28:41] [Rank 0] Group 1 Loss: 4.5270 +[2025-07-07 19:28:41] [Rank 0] Group 1 Loss: 4.5270 +[2025-07-07 19:28:41] [Rank 0] Group 2 Loss: 4.7350 +[2025-07-07 19:28:41] [Rank 0] Group 2 Loss: 4.7350 +[2025-07-07 19:28:41] [Rank 0] Group 3 Loss: 5.1886 +[2025-07-07 19:28:41] [Rank 0] Group 3 Loss: 5.1886 +[2025-07-07 19:28:41] [Rank 0] Group 4 Loss: 5.0085 +[2025-07-07 19:28:41] [Rank 0] Group 4 Loss: 5.0085 +[2025-07-07 19:28:41] [Rank 0] Group 5 Loss: 4.9198 +[2025-07-07 19:28:41] [Rank 0] Group 5 Loss: 4.9198 +[2025-07-07 19:28:41] [Rank 0] Group 6 Loss: 4.9101 +[2025-07-07 19:28:41] [Rank 0] Group 6 Loss: 4.9101 +[2025-07-07 19:28:41] [Rank 0] Group 7 Loss: 5.0492 +[2025-07-07 19:28:41] [Rank 0] Group 7 Loss: 5.0492 +[2025-07-07 19:28:41] [Rank 0] Group 8 Loss: 5.0589 +[2025-07-07 19:28:41] [Rank 0] Group 8 Loss: 5.0589 +[2025-07-07 19:28:41] [Rank 0] Group 9 Loss: 5.0190 +[2025-07-07 19:28:41] [Rank 0] Group 9 Loss: 5.0190 +[2025-07-07 19:28:41] [Rank 0] Group 10 Loss: 5.0706 +[2025-07-07 19:28:41] [Rank 0] Group 10 Loss: 5.0706 +[2025-07-07 19:28:41] [Rank 0] Group 11 Loss: 5.0569 +[2025-07-07 19:28:41] [Rank 0] Group 11 Loss: 5.0569 +[2025-07-07 19:28:41] [Rank 0] Group 0 FTA: 0.1899 +[2025-07-07 19:28:41] [Rank 0] Group 0 FTA: 0.1899 +[2025-07-07 19:28:41] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-07 19:28:41] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-07 19:28:41] [Rank 0] Group 2 FTA: 0.1901 +[2025-07-07 19:28:41] [Rank 0] Group 2 FTA: 0.1901 +[2025-07-07 19:28:41] [Rank 0] Group 3 FTA: 0.1615 +[2025-07-07 19:28:41] [Rank 0] Group 3 FTA: 0.1615 +[2025-07-07 19:28:41] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-07 19:28:41] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-07 19:28:41] [Rank 0] Group 5 FTA: 0.1432 +[2025-07-07 19:28:41] [Rank 0] Group 5 FTA: 0.1432 +[2025-07-07 19:28:41] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 19:28:41] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 19:28:41] [Rank 0] Group 7 FTA: 0.1589 +[2025-07-07 19:28:41] [Rank 0] Group 7 FTA: 0.1589 +[2025-07-07 19:28:41] [Rank 0] Group 8 FTA: 0.1224 +[2025-07-07 19:28:41] [Rank 0] Group 8 FTA: 0.1224 +[2025-07-07 19:28:41] [Rank 0] Group 9 FTA: 0.1289 +[2025-07-07 19:28:41] [Rank 0] Group 9 FTA: 0.1289 +[2025-07-07 19:28:41] [Rank 0] Group 10 FTA: 0.1543 +[2025-07-07 19:28:41] [Rank 0] Group 10 FTA: 0.1543 +[2025-07-07 19:28:41] [Rank 0] Group 11 FTA: 0.1377 +[2025-07-07 19:28:41] [Rank 0] Group 11 FTA: 0.1377 +[2025-07-07 19:28:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 19:28:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 19:28:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 19:28:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 19:28:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 19:28:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 19:28:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 19:28:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 19:28:42] [Rank 0] step:4001/10000 train_time:271744ms step_avg:67.92ms +[2025-07-07 19:28:42] [Rank 0] step:4001/10000 train_time:271744ms step_avg:67.92ms +[2025-07-07 19:28:44] [Rank 0] step:4021/10000 train_time:272507ms step_avg:67.77ms +[2025-07-07 19:28:44] [Rank 0] step:4021/10000 train_time:272507ms step_avg:67.77ms +[2025-07-07 19:28:45] [Rank 0] step:4041/10000 train_time:273869ms step_avg:67.77ms +[2025-07-07 19:28:45] [Rank 0] step:4041/10000 train_time:273869ms step_avg:67.77ms +[2025-07-07 19:28:46] [Rank 0] step:4061/10000 train_time:275230ms step_avg:67.77ms +[2025-07-07 19:28:46] [Rank 0] step:4061/10000 train_time:275230ms step_avg:67.77ms +[2025-07-07 19:28:48] [Rank 0] step:4081/10000 train_time:276592ms step_avg:67.78ms +[2025-07-07 19:28:48] [Rank 0] step:4081/10000 train_time:276592ms step_avg:67.78ms +[2025-07-07 19:28:49] [Rank 0] step:4101/10000 train_time:277955ms step_avg:67.78ms +[2025-07-07 19:28:49] [Rank 0] step:4101/10000 train_time:277955ms step_avg:67.78ms +[2025-07-07 19:28:50] [Rank 0] step:4121/10000 train_time:279319ms step_avg:67.78ms +[2025-07-07 19:28:50] [Rank 0] step:4121/10000 train_time:279319ms step_avg:67.78ms +[2025-07-07 19:28:52] [Rank 0] step:4141/10000 train_time:280683ms step_avg:67.78ms +[2025-07-07 19:28:52] [Rank 0] step:4141/10000 train_time:280683ms step_avg:67.78ms +[2025-07-07 19:28:53] [Rank 0] step:4161/10000 train_time:282091ms step_avg:67.79ms +[2025-07-07 19:28:53] [Rank 0] step:4161/10000 train_time:282091ms step_avg:67.79ms +[2025-07-07 19:28:55] [Rank 0] step:4181/10000 train_time:283457ms step_avg:67.80ms +[2025-07-07 19:28:55] [Rank 0] step:4181/10000 train_time:283457ms step_avg:67.80ms +[2025-07-07 19:28:56] [Rank 0] step:4201/10000 train_time:284823ms step_avg:67.80ms +[2025-07-07 19:28:56] [Rank 0] step:4201/10000 train_time:284823ms step_avg:67.80ms +[2025-07-07 19:28:57] [Rank 0] step:4221/10000 train_time:286190ms step_avg:67.80ms +[2025-07-07 19:28:57] [Rank 0] step:4221/10000 train_time:286190ms step_avg:67.80ms +[2025-07-07 19:28:59] [Rank 0] step:4241/10000 train_time:287556ms step_avg:67.80ms +[2025-07-07 19:28:59] [Rank 0] step:4241/10000 train_time:287556ms step_avg:67.80ms +[2025-07-07 19:29:00] [Rank 0] step:4261/10000 train_time:288923ms step_avg:67.81ms +[2025-07-07 19:29:00] [Rank 0] step:4261/10000 train_time:288923ms step_avg:67.81ms +[2025-07-07 19:29:01] [Rank 0] step:4281/10000 train_time:290289ms step_avg:67.81ms +[2025-07-07 19:29:01] [Rank 0] step:4281/10000 train_time:290289ms step_avg:67.81ms +[2025-07-07 19:29:03] [Rank 0] step:4301/10000 train_time:291658ms step_avg:67.81ms +[2025-07-07 19:29:03] [Rank 0] step:4301/10000 train_time:291658ms step_avg:67.81ms +[2025-07-07 19:29:04] [Rank 0] step:4321/10000 train_time:293073ms step_avg:67.83ms +[2025-07-07 19:29:04] [Rank 0] step:4321/10000 train_time:293073ms step_avg:67.83ms +[2025-07-07 19:29:06] [Rank 0] step:4341/10000 train_time:294444ms step_avg:67.83ms +[2025-07-07 19:29:06] [Rank 0] step:4341/10000 train_time:294444ms step_avg:67.83ms +[2025-07-07 19:29:07] [Rank 0] step:4361/10000 train_time:295812ms step_avg:67.83ms +[2025-07-07 19:29:07] [Rank 0] step:4361/10000 train_time:295812ms step_avg:67.83ms +[2025-07-07 19:29:08] [Rank 0] step:4381/10000 train_time:297182ms step_avg:67.83ms +[2025-07-07 19:29:08] [Rank 0] step:4381/10000 train_time:297182ms step_avg:67.83ms +[2025-07-07 19:29:10] [Rank 0] step:4401/10000 train_time:298550ms step_avg:67.84ms +[2025-07-07 19:29:10] [Rank 0] step:4401/10000 train_time:298550ms step_avg:67.84ms +[2025-07-07 19:29:11] [Rank 0] step:4421/10000 train_time:299920ms step_avg:67.84ms +[2025-07-07 19:29:11] [Rank 0] step:4421/10000 train_time:299920ms step_avg:67.84ms +[2025-07-07 19:29:12] [Rank 0] step:4441/10000 train_time:301290ms step_avg:67.84ms +[2025-07-07 19:29:12] [Rank 0] step:4441/10000 train_time:301290ms step_avg:67.84ms +[2025-07-07 19:29:14] [Rank 0] step:4461/10000 train_time:302661ms step_avg:67.85ms +[2025-07-07 19:29:14] [Rank 0] step:4461/10000 train_time:302661ms step_avg:67.85ms +[2025-07-07 19:29:15] [Rank 0] step:4481/10000 train_time:304035ms step_avg:67.85ms +[2025-07-07 19:29:15] [Rank 0] step:4481/10000 train_time:304035ms step_avg:67.85ms +[2025-07-07 19:29:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:29:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:29:17] [Rank 0] PRINT: step:4500/10000 train_loss:1.2841 val_loss:1.2680 train_time:306031ms step_avg:68.01ms +[2025-07-07 19:29:17] [Rank 0] PRINT: step:4500/10000 train_loss:1.2841 val_loss:1.2680 train_time:306031ms step_avg:68.01ms +[2025-07-07 19:29:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:29:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:29:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:29:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:29:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:29:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:34:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:34:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:34:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:34:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:34:41] [Rank 0] Total Loss: 5.0479 +[2025-07-07 19:34:41] [Rank 0] Total Loss: 5.0479 +[2025-07-07 19:34:41] [Rank 0] Total FTA: 0.2088 +[2025-07-07 19:34:41] [Rank 0] Total FTA: 0.2088 +[2025-07-07 19:34:41] [Rank 0] Group 0 Loss: 5.2397 +[2025-07-07 19:34:41] [Rank 0] Group 0 Loss: 5.2397 +[2025-07-07 19:34:41] [Rank 0] Group 1 Loss: 4.6455 +[2025-07-07 19:34:41] [Rank 0] Group 1 Loss: 4.6455 +[2025-07-07 19:34:41] [Rank 0] Group 2 Loss: 4.8042 +[2025-07-07 19:34:41] [Rank 0] Group 2 Loss: 4.8042 +[2025-07-07 19:34:41] [Rank 0] Group 3 Loss: 5.0710 +[2025-07-07 19:34:41] [Rank 0] Group 3 Loss: 5.0710 +[2025-07-07 19:34:41] [Rank 0] Group 4 Loss: 5.1164 +[2025-07-07 19:34:41] [Rank 0] Group 4 Loss: 5.1164 +[2025-07-07 19:34:41] [Rank 0] Group 5 Loss: 5.0264 +[2025-07-07 19:34:41] [Rank 0] Group 5 Loss: 5.0264 +[2025-07-07 19:34:41] [Rank 0] Group 6 Loss: 5.0264 +[2025-07-07 19:34:41] [Rank 0] Group 6 Loss: 5.0264 +[2025-07-07 19:34:41] [Rank 0] Group 7 Loss: 5.1165 +[2025-07-07 19:34:41] [Rank 0] Group 7 Loss: 5.1165 +[2025-07-07 19:34:41] [Rank 0] Group 8 Loss: 5.0715 +[2025-07-07 19:34:41] [Rank 0] Group 8 Loss: 5.0715 +[2025-07-07 19:34:41] [Rank 0] Group 9 Loss: 5.0227 +[2025-07-07 19:34:41] [Rank 0] Group 9 Loss: 5.0227 +[2025-07-07 19:34:41] [Rank 0] Group 10 Loss: 5.0808 +[2025-07-07 19:34:41] [Rank 0] Group 10 Loss: 5.0808 +[2025-07-07 19:34:41] [Rank 0] Group 11 Loss: 5.0832 +[2025-07-07 19:34:41] [Rank 0] Group 11 Loss: 5.0832 +[2025-07-07 19:34:41] [Rank 0] Group 0 FTA: 0.3433 +[2025-07-07 19:34:41] [Rank 0] Group 0 FTA: 0.3433 +[2025-07-07 19:34:41] [Rank 0] Group 1 FTA: 0.1641 +[2025-07-07 19:34:41] [Rank 0] Group 1 FTA: 0.1641 +[2025-07-07 19:34:41] [Rank 0] Group 2 FTA: 0.3203 +[2025-07-07 19:34:41] [Rank 0] Group 2 FTA: 0.3203 +[2025-07-07 19:34:41] [Rank 0] Group 3 FTA: 0.1719 +[2025-07-07 19:34:41] [Rank 0] Group 3 FTA: 0.1719 +[2025-07-07 19:34:41] [Rank 0] Group 4 FTA: 0.0990 +[2025-07-07 19:34:41] [Rank 0] Group 4 FTA: 0.0990 +[2025-07-07 19:34:41] [Rank 0] Group 5 FTA: 0.1354 +[2025-07-07 19:34:41] [Rank 0] Group 5 FTA: 0.1354 +[2025-07-07 19:34:41] [Rank 0] Group 6 FTA: 0.1693 +[2025-07-07 19:34:41] [Rank 0] Group 6 FTA: 0.1693 +[2025-07-07 19:34:41] [Rank 0] Group 7 FTA: 0.2031 +[2025-07-07 19:34:41] [Rank 0] Group 7 FTA: 0.2031 +[2025-07-07 19:34:41] [Rank 0] Group 8 FTA: 0.2005 +[2025-07-07 19:34:41] [Rank 0] Group 8 FTA: 0.2005 +[2025-07-07 19:34:41] [Rank 0] Group 9 FTA: 0.1680 +[2025-07-07 19:34:41] [Rank 0] Group 9 FTA: 0.1680 +[2025-07-07 19:34:41] [Rank 0] Group 10 FTA: 0.2070 +[2025-07-07 19:34:41] [Rank 0] Group 10 FTA: 0.2070 +[2025-07-07 19:34:41] [Rank 0] Group 11 FTA: 0.1963 +[2025-07-07 19:34:41] [Rank 0] Group 11 FTA: 0.1963 +[2025-07-07 19:34:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 19:34:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 19:34:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 19:34:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 19:34:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 19:34:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 19:34:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 19:34:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 19:34:43] [Rank 0] step:4501/10000 train_time:306048ms step_avg:68.00ms +[2025-07-07 19:34:43] [Rank 0] step:4501/10000 train_time:306048ms step_avg:68.00ms +[2025-07-07 19:34:45] [Rank 0] step:4521/10000 train_time:307498ms step_avg:68.02ms +[2025-07-07 19:34:45] [Rank 0] step:4521/10000 train_time:307498ms step_avg:68.02ms +[2025-07-07 19:34:46] [Rank 0] step:4541/10000 train_time:308860ms step_avg:68.02ms +[2025-07-07 19:34:46] [Rank 0] step:4541/10000 train_time:308860ms step_avg:68.02ms +[2025-07-07 19:34:47] [Rank 0] step:4561/10000 train_time:310224ms step_avg:68.02ms +[2025-07-07 19:34:47] [Rank 0] step:4561/10000 train_time:310224ms step_avg:68.02ms +[2025-07-07 19:34:49] [Rank 0] step:4581/10000 train_time:311587ms step_avg:68.02ms +[2025-07-07 19:34:49] [Rank 0] step:4581/10000 train_time:311587ms step_avg:68.02ms +[2025-07-07 19:34:50] [Rank 0] step:4601/10000 train_time:312950ms step_avg:68.02ms +[2025-07-07 19:34:50] [Rank 0] step:4601/10000 train_time:312950ms step_avg:68.02ms +[2025-07-07 19:34:52] [Rank 0] step:4621/10000 train_time:314315ms step_avg:68.02ms +[2025-07-07 19:34:52] [Rank 0] step:4621/10000 train_time:314315ms step_avg:68.02ms +[2025-07-07 19:34:53] [Rank 0] step:4641/10000 train_time:315679ms step_avg:68.02ms +[2025-07-07 19:34:53] [Rank 0] step:4641/10000 train_time:315679ms step_avg:68.02ms +[2025-07-07 19:34:54] [Rank 0] step:4661/10000 train_time:317045ms step_avg:68.02ms +[2025-07-07 19:34:54] [Rank 0] step:4661/10000 train_time:317045ms step_avg:68.02ms +[2025-07-07 19:34:56] [Rank 0] step:4681/10000 train_time:319084ms step_avg:68.17ms +[2025-07-07 19:34:56] [Rank 0] step:4681/10000 train_time:319084ms step_avg:68.17ms +[2025-07-07 19:34:57] [Rank 0] step:4701/10000 train_time:319820ms step_avg:68.03ms +[2025-07-07 19:34:57] [Rank 0] step:4701/10000 train_time:319820ms step_avg:68.03ms +[2025-07-07 19:34:58] [Rank 0] step:4721/10000 train_time:321186ms step_avg:68.03ms +[2025-07-07 19:34:58] [Rank 0] step:4721/10000 train_time:321186ms step_avg:68.03ms +[2025-07-07 19:35:00] [Rank 0] step:4741/10000 train_time:322555ms step_avg:68.04ms +[2025-07-07 19:35:00] [Rank 0] step:4741/10000 train_time:322555ms step_avg:68.04ms +[2025-07-07 19:35:01] [Rank 0] step:4761/10000 train_time:323924ms step_avg:68.04ms +[2025-07-07 19:35:01] [Rank 0] step:4761/10000 train_time:323924ms step_avg:68.04ms +[2025-07-07 19:35:03] [Rank 0] step:4781/10000 train_time:325294ms step_avg:68.04ms +[2025-07-07 19:35:03] [Rank 0] step:4781/10000 train_time:325294ms step_avg:68.04ms +[2025-07-07 19:35:04] [Rank 0] step:4801/10000 train_time:326663ms step_avg:68.04ms +[2025-07-07 19:35:04] [Rank 0] step:4801/10000 train_time:326663ms step_avg:68.04ms +[2025-07-07 19:35:05] [Rank 0] step:4821/10000 train_time:328033ms step_avg:68.04ms +[2025-07-07 19:35:05] [Rank 0] step:4821/10000 train_time:328033ms step_avg:68.04ms +[2025-07-07 19:35:07] [Rank 0] step:4841/10000 train_time:329401ms step_avg:68.04ms +[2025-07-07 19:35:07] [Rank 0] step:4841/10000 train_time:329401ms step_avg:68.04ms +[2025-07-07 19:35:08] [Rank 0] step:4861/10000 train_time:331436ms step_avg:68.18ms +[2025-07-07 19:35:08] [Rank 0] step:4861/10000 train_time:331436ms step_avg:68.18ms +[2025-07-07 19:35:09] [Rank 0] step:4881/10000 train_time:332173ms step_avg:68.05ms +[2025-07-07 19:35:09] [Rank 0] step:4881/10000 train_time:332173ms step_avg:68.05ms +[2025-07-07 19:35:11] [Rank 0] step:4901/10000 train_time:333543ms step_avg:68.06ms +[2025-07-07 19:35:11] [Rank 0] step:4901/10000 train_time:333543ms step_avg:68.06ms +[2025-07-07 19:35:12] [Rank 0] step:4921/10000 train_time:334915ms step_avg:68.06ms +[2025-07-07 19:35:12] [Rank 0] step:4921/10000 train_time:334915ms step_avg:68.06ms +[2025-07-07 19:35:14] [Rank 0] step:4941/10000 train_time:336286ms step_avg:68.06ms +[2025-07-07 19:35:14] [Rank 0] step:4941/10000 train_time:336286ms step_avg:68.06ms +[2025-07-07 19:35:15] [Rank 0] step:4961/10000 train_time:337658ms step_avg:68.06ms +[2025-07-07 19:35:15] [Rank 0] step:4961/10000 train_time:337658ms step_avg:68.06ms +[2025-07-07 19:35:16] [Rank 0] step:4981/10000 train_time:339031ms step_avg:68.06ms +[2025-07-07 19:35:16] [Rank 0] step:4981/10000 train_time:339031ms step_avg:68.06ms +[2025-07-07 19:35:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:35:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:35:19] [Rank 0] PRINT: step:5000/10000 train_loss:1.2443 val_loss:1.2405 train_time:341027ms step_avg:68.21ms +[2025-07-07 19:35:19] [Rank 0] PRINT: step:5000/10000 train_loss:1.2443 val_loss:1.2405 train_time:341027ms step_avg:68.21ms +[2025-07-07 19:35:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:35:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:35:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:35:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:35:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:35:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:40:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:40:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:40:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:40:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:40:43] [Rank 0] Total Loss: 5.1321 +[2025-07-07 19:40:43] [Rank 0] Total Loss: 5.1321 +[2025-07-07 19:40:43] [Rank 0] Total FTA: 0.2643 +[2025-07-07 19:40:43] [Rank 0] Total FTA: 0.2643 +[2025-07-07 19:40:43] [Rank 0] Group 0 Loss: 5.4148 +[2025-07-07 19:40:43] [Rank 0] Group 0 Loss: 5.4148 +[2025-07-07 19:40:43] [Rank 0] Group 1 Loss: 4.7103 +[2025-07-07 19:40:43] [Rank 0] Group 1 Loss: 4.7103 +[2025-07-07 19:40:43] [Rank 0] Group 2 Loss: 4.9013 +[2025-07-07 19:40:43] [Rank 0] Group 2 Loss: 4.9013 +[2025-07-07 19:40:43] [Rank 0] Group 3 Loss: 5.0900 +[2025-07-07 19:40:43] [Rank 0] Group 3 Loss: 5.0900 +[2025-07-07 19:40:43] [Rank 0] Group 4 Loss: 5.1708 +[2025-07-07 19:40:43] [Rank 0] Group 4 Loss: 5.1708 +[2025-07-07 19:40:43] [Rank 0] Group 5 Loss: 5.1526 +[2025-07-07 19:40:43] [Rank 0] Group 5 Loss: 5.1526 +[2025-07-07 19:40:43] [Rank 0] Group 6 Loss: 5.0955 +[2025-07-07 19:40:43] [Rank 0] Group 6 Loss: 5.0955 +[2025-07-07 19:40:43] [Rank 0] Group 7 Loss: 5.1296 +[2025-07-07 19:40:43] [Rank 0] Group 7 Loss: 5.1296 +[2025-07-07 19:40:43] [Rank 0] Group 8 Loss: 5.1447 +[2025-07-07 19:40:43] [Rank 0] Group 8 Loss: 5.1447 +[2025-07-07 19:40:43] [Rank 0] Group 9 Loss: 5.1747 +[2025-07-07 19:40:43] [Rank 0] Group 9 Loss: 5.1747 +[2025-07-07 19:40:43] [Rank 0] Group 10 Loss: 5.1585 +[2025-07-07 19:40:43] [Rank 0] Group 10 Loss: 5.1585 +[2025-07-07 19:40:43] [Rank 0] Group 11 Loss: 5.1442 +[2025-07-07 19:40:43] [Rank 0] Group 11 Loss: 5.1442 +[2025-07-07 19:40:43] [Rank 0] Group 0 FTA: 0.4967 +[2025-07-07 19:40:43] [Rank 0] Group 0 FTA: 0.4967 +[2025-07-07 19:40:43] [Rank 0] Group 1 FTA: 0.4766 +[2025-07-07 19:40:43] [Rank 0] Group 1 FTA: 0.4766 +[2025-07-07 19:40:43] [Rank 0] Group 2 FTA: 0.2370 +[2025-07-07 19:40:43] [Rank 0] Group 2 FTA: 0.2370 +[2025-07-07 19:40:43] [Rank 0] Group 3 FTA: 0.1667 +[2025-07-07 19:40:43] [Rank 0] Group 3 FTA: 0.1667 +[2025-07-07 19:40:43] [Rank 0] Group 4 FTA: 0.1875 +[2025-07-07 19:40:43] [Rank 0] Group 4 FTA: 0.1875 +[2025-07-07 19:40:43] [Rank 0] Group 5 FTA: 0.2083 +[2025-07-07 19:40:43] [Rank 0] Group 5 FTA: 0.2083 +[2025-07-07 19:40:43] [Rank 0] Group 6 FTA: 0.1745 +[2025-07-07 19:40:43] [Rank 0] Group 6 FTA: 0.1745 +[2025-07-07 19:40:43] [Rank 0] Group 7 FTA: 0.2500 +[2025-07-07 19:40:43] [Rank 0] Group 7 FTA: 0.2500 +[2025-07-07 19:40:43] [Rank 0] Group 8 FTA: 0.1667 +[2025-07-07 19:40:43] [Rank 0] Group 8 FTA: 0.1667 +[2025-07-07 19:40:43] [Rank 0] Group 9 FTA: 0.1602 +[2025-07-07 19:40:43] [Rank 0] Group 9 FTA: 0.1602 +[2025-07-07 19:40:43] [Rank 0] Group 10 FTA: 0.2500 +[2025-07-07 19:40:43] [Rank 0] Group 10 FTA: 0.2500 +[2025-07-07 19:40:43] [Rank 0] Group 11 FTA: 0.2158 +[2025-07-07 19:40:43] [Rank 0] Group 11 FTA: 0.2158 +[2025-07-07 19:40:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 19:40:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 19:40:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 19:40:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 19:40:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 19:40:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 19:40:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 19:40:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 19:40:44] [Rank 0] step:5001/10000 train_time:341035ms step_avg:68.19ms +[2025-07-07 19:40:44] [Rank 0] step:5001/10000 train_time:341035ms step_avg:68.19ms +[2025-07-07 19:40:45] [Rank 0] step:5021/10000 train_time:341794ms step_avg:68.07ms +[2025-07-07 19:40:45] [Rank 0] step:5021/10000 train_time:341794ms step_avg:68.07ms +[2025-07-07 19:40:47] [Rank 0] step:5041/10000 train_time:343207ms step_avg:68.08ms +[2025-07-07 19:40:47] [Rank 0] step:5041/10000 train_time:343207ms step_avg:68.08ms +[2025-07-07 19:40:48] [Rank 0] step:5061/10000 train_time:344581ms step_avg:68.09ms +[2025-07-07 19:40:48] [Rank 0] step:5061/10000 train_time:344581ms step_avg:68.09ms +[2025-07-07 19:40:50] [Rank 0] step:5081/10000 train_time:345946ms step_avg:68.09ms +[2025-07-07 19:40:50] [Rank 0] step:5081/10000 train_time:345946ms step_avg:68.09ms +[2025-07-07 19:40:51] [Rank 0] step:5101/10000 train_time:347311ms step_avg:68.09ms +[2025-07-07 19:40:51] [Rank 0] step:5101/10000 train_time:347311ms step_avg:68.09ms +[2025-07-07 19:40:52] [Rank 0] step:5121/10000 train_time:348677ms step_avg:68.09ms +[2025-07-07 19:40:52] [Rank 0] step:5121/10000 train_time:348677ms step_avg:68.09ms +[2025-07-07 19:40:54] [Rank 0] step:5141/10000 train_time:350041ms step_avg:68.09ms +[2025-07-07 19:40:54] [Rank 0] step:5141/10000 train_time:350041ms step_avg:68.09ms +[2025-07-07 19:40:55] [Rank 0] step:5161/10000 train_time:351408ms step_avg:68.09ms +[2025-07-07 19:40:55] [Rank 0] step:5161/10000 train_time:351408ms step_avg:68.09ms +[2025-07-07 19:40:56] [Rank 0] step:5181/10000 train_time:352775ms step_avg:68.09ms +[2025-07-07 19:40:56] [Rank 0] step:5181/10000 train_time:352775ms step_avg:68.09ms +[2025-07-07 19:40:58] [Rank 0] step:5201/10000 train_time:354142ms step_avg:68.09ms +[2025-07-07 19:40:58] [Rank 0] step:5201/10000 train_time:354142ms step_avg:68.09ms +[2025-07-07 19:40:59] [Rank 0] step:5221/10000 train_time:355767ms step_avg:68.14ms +[2025-07-07 19:40:59] [Rank 0] step:5221/10000 train_time:355767ms step_avg:68.14ms +[2025-07-07 19:41:01] [Rank 0] step:5241/10000 train_time:356933ms step_avg:68.10ms +[2025-07-07 19:41:01] [Rank 0] step:5241/10000 train_time:356933ms step_avg:68.10ms +[2025-07-07 19:41:02] [Rank 0] step:5261/10000 train_time:358301ms step_avg:68.11ms +[2025-07-07 19:41:02] [Rank 0] step:5261/10000 train_time:358301ms step_avg:68.11ms +[2025-07-07 19:41:03] [Rank 0] step:5281/10000 train_time:359670ms step_avg:68.11ms +[2025-07-07 19:41:03] [Rank 0] step:5281/10000 train_time:359670ms step_avg:68.11ms +[2025-07-07 19:41:05] [Rank 0] step:5301/10000 train_time:361041ms step_avg:68.11ms +[2025-07-07 19:41:05] [Rank 0] step:5301/10000 train_time:361041ms step_avg:68.11ms +[2025-07-07 19:41:06] [Rank 0] step:5321/10000 train_time:362410ms step_avg:68.11ms +[2025-07-07 19:41:06] [Rank 0] step:5321/10000 train_time:362410ms step_avg:68.11ms +[2025-07-07 19:41:07] [Rank 0] step:5341/10000 train_time:363779ms step_avg:68.11ms +[2025-07-07 19:41:07] [Rank 0] step:5341/10000 train_time:363779ms step_avg:68.11ms +[2025-07-07 19:41:09] [Rank 0] step:5361/10000 train_time:365149ms step_avg:68.11ms +[2025-07-07 19:41:09] [Rank 0] step:5361/10000 train_time:365149ms step_avg:68.11ms +[2025-07-07 19:41:10] [Rank 0] step:5381/10000 train_time:366519ms step_avg:68.11ms +[2025-07-07 19:41:10] [Rank 0] step:5381/10000 train_time:366519ms step_avg:68.11ms +[2025-07-07 19:41:12] [Rank 0] step:5401/10000 train_time:368139ms step_avg:68.16ms +[2025-07-07 19:41:12] [Rank 0] step:5401/10000 train_time:368139ms step_avg:68.16ms +[2025-07-07 19:41:13] [Rank 0] step:5421/10000 train_time:369289ms step_avg:68.12ms +[2025-07-07 19:41:13] [Rank 0] step:5421/10000 train_time:369289ms step_avg:68.12ms +[2025-07-07 19:41:14] [Rank 0] step:5441/10000 train_time:370661ms step_avg:68.12ms +[2025-07-07 19:41:14] [Rank 0] step:5441/10000 train_time:370661ms step_avg:68.12ms +[2025-07-07 19:41:16] [Rank 0] step:5461/10000 train_time:372033ms step_avg:68.13ms +[2025-07-07 19:41:16] [Rank 0] step:5461/10000 train_time:372033ms step_avg:68.13ms +[2025-07-07 19:41:17] [Rank 0] step:5481/10000 train_time:373405ms step_avg:68.13ms +[2025-07-07 19:41:17] [Rank 0] step:5481/10000 train_time:373405ms step_avg:68.13ms +[2025-07-07 19:41:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:41:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:41:19] [Rank 0] PRINT: step:5500/10000 train_loss:1.2138 val_loss:1.2059 train_time:375400ms step_avg:68.25ms +[2025-07-07 19:41:19] [Rank 0] PRINT: step:5500/10000 train_loss:1.2138 val_loss:1.2059 train_time:375400ms step_avg:68.25ms +[2025-07-07 19:41:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:41:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:41:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:41:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:41:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:41:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:46:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:46:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:46:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:46:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:46:43] [Rank 0] Total Loss: 5.0916 +[2025-07-07 19:46:43] [Rank 0] Total Loss: 5.0916 +[2025-07-07 19:46:43] [Rank 0] Total FTA: 0.2920 +[2025-07-07 19:46:43] [Rank 0] Total FTA: 0.2920 +[2025-07-07 19:46:43] [Rank 0] Group 0 Loss: 5.2816 +[2025-07-07 19:46:43] [Rank 0] Group 0 Loss: 5.2816 +[2025-07-07 19:46:43] [Rank 0] Group 1 Loss: 4.8280 +[2025-07-07 19:46:43] [Rank 0] Group 1 Loss: 4.8280 +[2025-07-07 19:46:43] [Rank 0] Group 2 Loss: 4.9507 +[2025-07-07 19:46:43] [Rank 0] Group 2 Loss: 4.9507 +[2025-07-07 19:46:43] [Rank 0] Group 3 Loss: 5.0318 +[2025-07-07 19:46:43] [Rank 0] Group 3 Loss: 5.0318 +[2025-07-07 19:46:43] [Rank 0] Group 4 Loss: 5.1200 +[2025-07-07 19:46:43] [Rank 0] Group 4 Loss: 5.1200 +[2025-07-07 19:46:43] [Rank 0] Group 5 Loss: 5.0626 +[2025-07-07 19:46:43] [Rank 0] Group 5 Loss: 5.0626 +[2025-07-07 19:46:43] [Rank 0] Group 6 Loss: 5.0102 +[2025-07-07 19:46:43] [Rank 0] Group 6 Loss: 5.0102 +[2025-07-07 19:46:43] [Rank 0] Group 7 Loss: 5.1346 +[2025-07-07 19:46:43] [Rank 0] Group 7 Loss: 5.1346 +[2025-07-07 19:46:43] [Rank 0] Group 8 Loss: 5.1605 +[2025-07-07 19:46:43] [Rank 0] Group 8 Loss: 5.1605 +[2025-07-07 19:46:43] [Rank 0] Group 9 Loss: 5.0460 +[2025-07-07 19:46:43] [Rank 0] Group 9 Loss: 5.0460 +[2025-07-07 19:46:43] [Rank 0] Group 10 Loss: 5.1005 +[2025-07-07 19:46:43] [Rank 0] Group 10 Loss: 5.1005 +[2025-07-07 19:46:43] [Rank 0] Group 11 Loss: 5.1187 +[2025-07-07 19:46:43] [Rank 0] Group 11 Loss: 5.1187 +[2025-07-07 19:46:43] [Rank 0] Group 0 FTA: 0.3225 +[2025-07-07 19:46:43] [Rank 0] Group 0 FTA: 0.3225 +[2025-07-07 19:46:43] [Rank 0] Group 1 FTA: 0.3047 +[2025-07-07 19:46:43] [Rank 0] Group 1 FTA: 0.3047 +[2025-07-07 19:46:43] [Rank 0] Group 2 FTA: 0.3880 +[2025-07-07 19:46:43] [Rank 0] Group 2 FTA: 0.3880 +[2025-07-07 19:46:43] [Rank 0] Group 3 FTA: 0.2943 +[2025-07-07 19:46:43] [Rank 0] Group 3 FTA: 0.2943 +[2025-07-07 19:46:43] [Rank 0] Group 4 FTA: 0.2344 +[2025-07-07 19:46:43] [Rank 0] Group 4 FTA: 0.2344 +[2025-07-07 19:46:43] [Rank 0] Group 5 FTA: 0.2682 +[2025-07-07 19:46:43] [Rank 0] Group 5 FTA: 0.2682 +[2025-07-07 19:46:43] [Rank 0] Group 6 FTA: 0.2500 +[2025-07-07 19:46:43] [Rank 0] Group 6 FTA: 0.2500 +[2025-07-07 19:46:43] [Rank 0] Group 7 FTA: 0.2839 +[2025-07-07 19:46:43] [Rank 0] Group 7 FTA: 0.2839 +[2025-07-07 19:46:43] [Rank 0] Group 8 FTA: 0.3516 +[2025-07-07 19:46:43] [Rank 0] Group 8 FTA: 0.3516 +[2025-07-07 19:46:43] [Rank 0] Group 9 FTA: 0.2891 +[2025-07-07 19:46:43] [Rank 0] Group 9 FTA: 0.2891 +[2025-07-07 19:46:43] [Rank 0] Group 10 FTA: 0.2676 +[2025-07-07 19:46:43] [Rank 0] Group 10 FTA: 0.2676 +[2025-07-07 19:46:43] [Rank 0] Group 11 FTA: 0.2676 +[2025-07-07 19:46:43] [Rank 0] Group 11 FTA: 0.2676 +[2025-07-07 19:46:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 19:46:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 19:46:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 19:46:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 19:46:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 19:46:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 19:46:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 19:46:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 19:46:45] [Rank 0] step:5501/10000 train_time:375409ms step_avg:68.24ms +[2025-07-07 19:46:45] [Rank 0] step:5501/10000 train_time:375409ms step_avg:68.24ms +[2025-07-07 19:46:46] [Rank 0] step:5521/10000 train_time:376182ms step_avg:68.14ms +[2025-07-07 19:46:46] [Rank 0] step:5521/10000 train_time:376182ms step_avg:68.14ms +[2025-07-07 19:46:47] [Rank 0] step:5541/10000 train_time:377543ms step_avg:68.14ms +[2025-07-07 19:46:47] [Rank 0] step:5541/10000 train_time:377543ms step_avg:68.14ms +[2025-07-07 19:46:49] [Rank 0] step:5561/10000 train_time:378907ms step_avg:68.14ms +[2025-07-07 19:46:49] [Rank 0] step:5561/10000 train_time:378907ms step_avg:68.14ms +[2025-07-07 19:46:50] [Rank 0] step:5581/10000 train_time:380272ms step_avg:68.14ms +[2025-07-07 19:46:50] [Rank 0] step:5581/10000 train_time:380272ms step_avg:68.14ms +[2025-07-07 19:46:51] [Rank 0] step:5601/10000 train_time:381690ms step_avg:68.15ms +[2025-07-07 19:46:51] [Rank 0] step:5601/10000 train_time:381690ms step_avg:68.15ms +[2025-07-07 19:46:53] [Rank 0] step:5621/10000 train_time:383056ms step_avg:68.15ms +[2025-07-07 19:46:53] [Rank 0] step:5621/10000 train_time:383056ms step_avg:68.15ms +[2025-07-07 19:46:54] [Rank 0] step:5641/10000 train_time:384422ms step_avg:68.15ms +[2025-07-07 19:46:54] [Rank 0] step:5641/10000 train_time:384422ms step_avg:68.15ms +[2025-07-07 19:46:56] [Rank 0] step:5661/10000 train_time:385789ms step_avg:68.15ms +[2025-07-07 19:46:56] [Rank 0] step:5661/10000 train_time:385789ms step_avg:68.15ms +[2025-07-07 19:46:57] [Rank 0] step:5681/10000 train_time:387157ms step_avg:68.15ms +[2025-07-07 19:46:57] [Rank 0] step:5681/10000 train_time:387157ms step_avg:68.15ms +[2025-07-07 19:46:58] [Rank 0] step:5701/10000 train_time:388524ms step_avg:68.15ms +[2025-07-07 19:46:58] [Rank 0] step:5701/10000 train_time:388524ms step_avg:68.15ms +[2025-07-07 19:47:00] [Rank 0] step:5721/10000 train_time:389893ms step_avg:68.15ms +[2025-07-07 19:47:00] [Rank 0] step:5721/10000 train_time:389893ms step_avg:68.15ms +[2025-07-07 19:47:01] [Rank 0] step:5741/10000 train_time:391260ms step_avg:68.15ms +[2025-07-07 19:47:01] [Rank 0] step:5741/10000 train_time:391260ms step_avg:68.15ms +[2025-07-07 19:47:02] [Rank 0] step:5761/10000 train_time:392630ms step_avg:68.15ms +[2025-07-07 19:47:02] [Rank 0] step:5761/10000 train_time:392630ms step_avg:68.15ms +[2025-07-07 19:47:04] [Rank 0] step:5781/10000 train_time:393999ms step_avg:68.15ms +[2025-07-07 19:47:04] [Rank 0] step:5781/10000 train_time:393999ms step_avg:68.15ms +[2025-07-07 19:47:05] [Rank 0] step:5801/10000 train_time:395370ms step_avg:68.16ms +[2025-07-07 19:47:05] [Rank 0] step:5801/10000 train_time:395370ms step_avg:68.16ms +[2025-07-07 19:47:06] [Rank 0] step:5821/10000 train_time:396741ms step_avg:68.16ms +[2025-07-07 19:47:06] [Rank 0] step:5821/10000 train_time:396741ms step_avg:68.16ms +[2025-07-07 19:47:08] [Rank 0] step:5841/10000 train_time:398111ms step_avg:68.16ms +[2025-07-07 19:47:08] [Rank 0] step:5841/10000 train_time:398111ms step_avg:68.16ms +[2025-07-07 19:47:09] [Rank 0] step:5861/10000 train_time:399481ms step_avg:68.16ms +[2025-07-07 19:47:09] [Rank 0] step:5861/10000 train_time:399481ms step_avg:68.16ms +[2025-07-07 19:47:11] [Rank 0] step:5881/10000 train_time:400851ms step_avg:68.16ms +[2025-07-07 19:47:11] [Rank 0] step:5881/10000 train_time:400851ms step_avg:68.16ms +[2025-07-07 19:47:12] [Rank 0] step:5901/10000 train_time:402222ms step_avg:68.16ms +[2025-07-07 19:47:12] [Rank 0] step:5901/10000 train_time:402222ms step_avg:68.16ms +[2025-07-07 19:47:13] [Rank 0] step:5921/10000 train_time:403593ms step_avg:68.16ms +[2025-07-07 19:47:13] [Rank 0] step:5921/10000 train_time:403593ms step_avg:68.16ms +[2025-07-07 19:47:15] [Rank 0] step:5941/10000 train_time:404963ms step_avg:68.16ms +[2025-07-07 19:47:15] [Rank 0] step:5941/10000 train_time:404963ms step_avg:68.16ms +[2025-07-07 19:47:16] [Rank 0] step:5961/10000 train_time:406367ms step_avg:68.17ms +[2025-07-07 19:47:16] [Rank 0] step:5961/10000 train_time:406367ms step_avg:68.17ms +[2025-07-07 19:47:17] [Rank 0] step:5981/10000 train_time:407737ms step_avg:68.17ms +[2025-07-07 19:47:17] [Rank 0] step:5981/10000 train_time:407737ms step_avg:68.17ms +[2025-07-07 19:47:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:47:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:47:20] [Rank 0] PRINT: step:6000/10000 train_loss:1.1847 val_loss:1.1895 train_time:409732ms step_avg:68.29ms +[2025-07-07 19:47:20] [Rank 0] PRINT: step:6000/10000 train_loss:1.1847 val_loss:1.1895 train_time:409732ms step_avg:68.29ms +[2025-07-07 19:47:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:47:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:47:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:47:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:47:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:47:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:52:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:52:47] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:52:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:52:47] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:52:47] [Rank 0] Total Loss: 5.2106 +[2025-07-07 19:52:47] [Rank 0] Total Loss: 5.2106 +[2025-07-07 19:52:47] [Rank 0] Total FTA: 0.3206 +[2025-07-07 19:52:47] [Rank 0] Total FTA: 0.3206 +[2025-07-07 19:52:47] [Rank 0] Group 0 Loss: 5.6014 +[2025-07-07 19:52:47] [Rank 0] Group 0 Loss: 5.6014 +[2025-07-07 19:52:47] [Rank 0] Group 1 Loss: 5.0075 +[2025-07-07 19:52:47] [Rank 0] Group 1 Loss: 5.0075 +[2025-07-07 19:52:47] [Rank 0] Group 2 Loss: 4.9974 +[2025-07-07 19:52:47] [Rank 0] Group 2 Loss: 4.9974 +[2025-07-07 19:52:47] [Rank 0] Group 3 Loss: 5.1749 +[2025-07-07 19:52:47] [Rank 0] Group 3 Loss: 5.1749 +[2025-07-07 19:52:47] [Rank 0] Group 4 Loss: 5.1242 +[2025-07-07 19:52:47] [Rank 0] Group 4 Loss: 5.1242 +[2025-07-07 19:52:47] [Rank 0] Group 5 Loss: 5.1078 +[2025-07-07 19:52:47] [Rank 0] Group 5 Loss: 5.1078 +[2025-07-07 19:52:47] [Rank 0] Group 6 Loss: 5.0885 +[2025-07-07 19:52:47] [Rank 0] Group 6 Loss: 5.0885 +[2025-07-07 19:52:47] [Rank 0] Group 7 Loss: 5.2348 +[2025-07-07 19:52:47] [Rank 0] Group 7 Loss: 5.2348 +[2025-07-07 19:52:47] [Rank 0] Group 8 Loss: 5.1264 +[2025-07-07 19:52:47] [Rank 0] Group 8 Loss: 5.1264 +[2025-07-07 19:52:47] [Rank 0] Group 9 Loss: 5.1970 +[2025-07-07 19:52:47] [Rank 0] Group 9 Loss: 5.1970 +[2025-07-07 19:52:47] [Rank 0] Group 10 Loss: 5.2168 +[2025-07-07 19:52:47] [Rank 0] Group 10 Loss: 5.2168 +[2025-07-07 19:52:47] [Rank 0] Group 11 Loss: 5.2262 +[2025-07-07 19:52:47] [Rank 0] Group 11 Loss: 5.2262 +[2025-07-07 19:52:47] [Rank 0] Group 0 FTA: 0.3459 +[2025-07-07 19:52:47] [Rank 0] Group 0 FTA: 0.3459 +[2025-07-07 19:52:47] [Rank 0] Group 1 FTA: 0.5417 +[2025-07-07 19:52:47] [Rank 0] Group 1 FTA: 0.5417 +[2025-07-07 19:52:47] [Rank 0] Group 2 FTA: 0.3229 +[2025-07-07 19:52:47] [Rank 0] Group 2 FTA: 0.3229 +[2025-07-07 19:52:47] [Rank 0] Group 3 FTA: 0.1771 +[2025-07-07 19:52:47] [Rank 0] Group 3 FTA: 0.1771 +[2025-07-07 19:52:47] [Rank 0] Group 4 FTA: 0.2891 +[2025-07-07 19:52:47] [Rank 0] Group 4 FTA: 0.2891 +[2025-07-07 19:52:47] [Rank 0] Group 5 FTA: 0.2656 +[2025-07-07 19:52:47] [Rank 0] Group 5 FTA: 0.2656 +[2025-07-07 19:52:47] [Rank 0] Group 6 FTA: 0.3021 +[2025-07-07 19:52:47] [Rank 0] Group 6 FTA: 0.3021 +[2025-07-07 19:52:47] [Rank 0] Group 7 FTA: 0.3724 +[2025-07-07 19:52:47] [Rank 0] Group 7 FTA: 0.3724 +[2025-07-07 19:52:47] [Rank 0] Group 8 FTA: 0.2917 +[2025-07-07 19:52:47] [Rank 0] Group 8 FTA: 0.2917 +[2025-07-07 19:52:47] [Rank 0] Group 9 FTA: 0.3125 +[2025-07-07 19:52:47] [Rank 0] Group 9 FTA: 0.3125 +[2025-07-07 19:52:47] [Rank 0] Group 10 FTA: 0.3125 +[2025-07-07 19:52:47] [Rank 0] Group 10 FTA: 0.3125 +[2025-07-07 19:52:47] [Rank 0] Group 11 FTA: 0.3086 +[2025-07-07 19:52:47] [Rank 0] Group 11 FTA: 0.3086 +[2025-07-07 19:52:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 19:52:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 19:52:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 19:52:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 19:52:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 19:52:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 19:52:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 19:52:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 19:52:49] [Rank 0] step:6001/10000 train_time:409741ms step_avg:68.28ms +[2025-07-07 19:52:49] [Rank 0] step:6001/10000 train_time:409741ms step_avg:68.28ms +[2025-07-07 19:52:51] [Rank 0] step:6021/10000 train_time:410492ms step_avg:68.18ms +[2025-07-07 19:52:51] [Rank 0] step:6021/10000 train_time:410492ms step_avg:68.18ms +[2025-07-07 19:52:52] [Rank 0] step:6041/10000 train_time:411854ms step_avg:68.18ms +[2025-07-07 19:52:52] [Rank 0] step:6041/10000 train_time:411854ms step_avg:68.18ms +[2025-07-07 19:52:53] [Rank 0] step:6061/10000 train_time:413218ms step_avg:68.18ms +[2025-07-07 19:52:53] [Rank 0] step:6061/10000 train_time:413218ms step_avg:68.18ms +[2025-07-07 19:52:55] [Rank 0] step:6081/10000 train_time:414582ms step_avg:68.18ms +[2025-07-07 19:52:55] [Rank 0] step:6081/10000 train_time:414582ms step_avg:68.18ms +[2025-07-07 19:52:56] [Rank 0] step:6101/10000 train_time:415946ms step_avg:68.18ms +[2025-07-07 19:52:56] [Rank 0] step:6101/10000 train_time:415946ms step_avg:68.18ms +[2025-07-07 19:52:58] [Rank 0] step:6121/10000 train_time:417991ms step_avg:68.29ms +[2025-07-07 19:52:58] [Rank 0] step:6121/10000 train_time:417991ms step_avg:68.29ms +[2025-07-07 19:52:59] [Rank 0] step:6141/10000 train_time:418729ms step_avg:68.19ms +[2025-07-07 19:52:59] [Rank 0] step:6141/10000 train_time:418729ms step_avg:68.19ms +[2025-07-07 19:53:00] [Rank 0] step:6161/10000 train_time:420096ms step_avg:68.19ms +[2025-07-07 19:53:00] [Rank 0] step:6161/10000 train_time:420096ms step_avg:68.19ms +[2025-07-07 19:53:02] [Rank 0] step:6181/10000 train_time:421463ms step_avg:68.19ms +[2025-07-07 19:53:02] [Rank 0] step:6181/10000 train_time:421463ms step_avg:68.19ms +[2025-07-07 19:53:03] [Rank 0] step:6201/10000 train_time:422829ms step_avg:68.19ms +[2025-07-07 19:53:03] [Rank 0] step:6201/10000 train_time:422829ms step_avg:68.19ms +[2025-07-07 19:53:04] [Rank 0] step:6221/10000 train_time:424264ms step_avg:68.20ms +[2025-07-07 19:53:04] [Rank 0] step:6221/10000 train_time:424264ms step_avg:68.20ms +[2025-07-07 19:53:06] [Rank 0] step:6241/10000 train_time:425632ms step_avg:68.20ms +[2025-07-07 19:53:06] [Rank 0] step:6241/10000 train_time:425632ms step_avg:68.20ms +[2025-07-07 19:53:07] [Rank 0] step:6261/10000 train_time:427001ms step_avg:68.20ms +[2025-07-07 19:53:07] [Rank 0] step:6261/10000 train_time:427001ms step_avg:68.20ms +[2025-07-07 19:53:09] [Rank 0] step:6281/10000 train_time:428370ms step_avg:68.20ms +[2025-07-07 19:53:09] [Rank 0] step:6281/10000 train_time:428370ms step_avg:68.20ms +[2025-07-07 19:53:10] [Rank 0] step:6301/10000 train_time:429990ms step_avg:68.24ms +[2025-07-07 19:53:10] [Rank 0] step:6301/10000 train_time:429990ms step_avg:68.24ms +[2025-07-07 19:53:11] [Rank 0] step:6321/10000 train_time:431145ms step_avg:68.21ms +[2025-07-07 19:53:11] [Rank 0] step:6321/10000 train_time:431145ms step_avg:68.21ms +[2025-07-07 19:53:13] [Rank 0] step:6341/10000 train_time:432514ms step_avg:68.21ms +[2025-07-07 19:53:13] [Rank 0] step:6341/10000 train_time:432514ms step_avg:68.21ms +[2025-07-07 19:53:14] [Rank 0] step:6361/10000 train_time:433883ms step_avg:68.21ms +[2025-07-07 19:53:14] [Rank 0] step:6361/10000 train_time:433883ms step_avg:68.21ms +[2025-07-07 19:53:15] [Rank 0] step:6381/10000 train_time:435311ms step_avg:68.22ms +[2025-07-07 19:53:15] [Rank 0] step:6381/10000 train_time:435311ms step_avg:68.22ms +[2025-07-07 19:53:17] [Rank 0] step:6401/10000 train_time:436681ms step_avg:68.22ms +[2025-07-07 19:53:17] [Rank 0] step:6401/10000 train_time:436681ms step_avg:68.22ms +[2025-07-07 19:53:18] [Rank 0] step:6421/10000 train_time:438049ms step_avg:68.22ms +[2025-07-07 19:53:18] [Rank 0] step:6421/10000 train_time:438049ms step_avg:68.22ms +[2025-07-07 19:53:20] [Rank 0] step:6441/10000 train_time:439420ms step_avg:68.22ms +[2025-07-07 19:53:20] [Rank 0] step:6441/10000 train_time:439420ms step_avg:68.22ms +[2025-07-07 19:53:21] [Rank 0] step:6461/10000 train_time:440791ms step_avg:68.22ms +[2025-07-07 19:53:21] [Rank 0] step:6461/10000 train_time:440791ms step_avg:68.22ms +[2025-07-07 19:53:22] [Rank 0] step:6481/10000 train_time:442161ms step_avg:68.22ms +[2025-07-07 19:53:22] [Rank 0] step:6481/10000 train_time:442161ms step_avg:68.22ms +[2025-07-07 19:53:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:53:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:53:25] [Rank 0] PRINT: step:6500/10000 train_loss:1.1608 val_loss:1.1668 train_time:444185ms step_avg:68.34ms +[2025-07-07 19:53:25] [Rank 0] PRINT: step:6500/10000 train_loss:1.1608 val_loss:1.1668 train_time:444185ms step_avg:68.34ms +[2025-07-07 19:53:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:53:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:53:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:53:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:53:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:53:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:58:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:58:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:58:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:58:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:58:51] [Rank 0] Total Loss: 5.1887 +[2025-07-07 19:58:51] [Rank 0] Total Loss: 5.1887 +[2025-07-07 19:58:51] [Rank 0] Total FTA: 0.3190 +[2025-07-07 19:58:51] [Rank 0] Total FTA: 0.3190 +[2025-07-07 19:58:51] [Rank 0] Group 0 Loss: 5.4048 +[2025-07-07 19:58:51] [Rank 0] Group 0 Loss: 5.4048 +[2025-07-07 19:58:51] [Rank 0] Group 1 Loss: 4.8982 +[2025-07-07 19:58:51] [Rank 0] Group 1 Loss: 4.8982 +[2025-07-07 19:58:51] [Rank 0] Group 2 Loss: 4.9497 +[2025-07-07 19:58:51] [Rank 0] Group 2 Loss: 4.9497 +[2025-07-07 19:58:51] [Rank 0] Group 3 Loss: 5.1923 +[2025-07-07 19:58:51] [Rank 0] Group 3 Loss: 5.1923 +[2025-07-07 19:58:51] [Rank 0] Group 4 Loss: 5.2576 +[2025-07-07 19:58:51] [Rank 0] Group 4 Loss: 5.2576 +[2025-07-07 19:58:51] [Rank 0] Group 5 Loss: 5.1429 +[2025-07-07 19:58:51] [Rank 0] Group 5 Loss: 5.1429 +[2025-07-07 19:58:51] [Rank 0] Group 6 Loss: 5.0750 +[2025-07-07 19:58:51] [Rank 0] Group 6 Loss: 5.0750 +[2025-07-07 19:58:51] [Rank 0] Group 7 Loss: 5.2659 +[2025-07-07 19:58:51] [Rank 0] Group 7 Loss: 5.2659 +[2025-07-07 19:58:51] [Rank 0] Group 8 Loss: 5.1616 +[2025-07-07 19:58:51] [Rank 0] Group 8 Loss: 5.1616 +[2025-07-07 19:58:51] [Rank 0] Group 9 Loss: 5.1785 +[2025-07-07 19:58:51] [Rank 0] Group 9 Loss: 5.1785 +[2025-07-07 19:58:51] [Rank 0] Group 10 Loss: 5.2366 +[2025-07-07 19:58:51] [Rank 0] Group 10 Loss: 5.2366 +[2025-07-07 19:58:51] [Rank 0] Group 11 Loss: 5.2174 +[2025-07-07 19:58:51] [Rank 0] Group 11 Loss: 5.2174 +[2025-07-07 19:58:51] [Rank 0] Group 0 FTA: 0.3446 +[2025-07-07 19:58:51] [Rank 0] Group 0 FTA: 0.3446 +[2025-07-07 19:58:51] [Rank 0] Group 1 FTA: 0.4427 +[2025-07-07 19:58:51] [Rank 0] Group 1 FTA: 0.4427 +[2025-07-07 19:58:51] [Rank 0] Group 2 FTA: 0.2500 +[2025-07-07 19:58:51] [Rank 0] Group 2 FTA: 0.2500 +[2025-07-07 19:58:51] [Rank 0] Group 3 FTA: 0.1615 +[2025-07-07 19:58:51] [Rank 0] Group 3 FTA: 0.1615 +[2025-07-07 19:58:51] [Rank 0] Group 4 FTA: 0.2578 +[2025-07-07 19:58:51] [Rank 0] Group 4 FTA: 0.2578 +[2025-07-07 19:58:51] [Rank 0] Group 5 FTA: 0.3359 +[2025-07-07 19:58:51] [Rank 0] Group 5 FTA: 0.3359 +[2025-07-07 19:58:51] [Rank 0] Group 6 FTA: 0.2969 +[2025-07-07 19:58:51] [Rank 0] Group 6 FTA: 0.2969 +[2025-07-07 19:58:51] [Rank 0] Group 7 FTA: 0.3646 +[2025-07-07 19:58:51] [Rank 0] Group 7 FTA: 0.3646 +[2025-07-07 19:58:51] [Rank 0] Group 8 FTA: 0.3359 +[2025-07-07 19:58:51] [Rank 0] Group 8 FTA: 0.3359 +[2025-07-07 19:58:51] [Rank 0] Group 9 FTA: 0.3477 +[2025-07-07 19:58:51] [Rank 0] Group 9 FTA: 0.3477 +[2025-07-07 19:58:51] [Rank 0] Group 10 FTA: 0.3223 +[2025-07-07 19:58:51] [Rank 0] Group 10 FTA: 0.3223 +[2025-07-07 19:58:51] [Rank 0] Group 11 FTA: 0.3311 +[2025-07-07 19:58:51] [Rank 0] Group 11 FTA: 0.3311 +[2025-07-07 19:58:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 19:58:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 19:58:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 19:58:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 19:58:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 19:58:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 19:58:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 19:58:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 19:58:53] [Rank 0] step:6501/10000 train_time:444193ms step_avg:68.33ms +[2025-07-07 19:58:53] [Rank 0] step:6501/10000 train_time:444193ms step_avg:68.33ms +[2025-07-07 19:58:54] [Rank 0] step:6521/10000 train_time:444965ms step_avg:68.24ms +[2025-07-07 19:58:54] [Rank 0] step:6521/10000 train_time:444965ms step_avg:68.24ms +[2025-07-07 19:58:55] [Rank 0] step:6541/10000 train_time:446327ms step_avg:68.24ms +[2025-07-07 19:58:55] [Rank 0] step:6541/10000 train_time:446327ms step_avg:68.24ms +[2025-07-07 19:58:57] [Rank 0] step:6561/10000 train_time:447692ms step_avg:68.24ms +[2025-07-07 19:58:57] [Rank 0] step:6561/10000 train_time:447692ms step_avg:68.24ms +[2025-07-07 19:58:58] [Rank 0] step:6581/10000 train_time:449056ms step_avg:68.24ms +[2025-07-07 19:58:58] [Rank 0] step:6581/10000 train_time:449056ms step_avg:68.24ms +[2025-07-07 19:59:00] [Rank 0] step:6601/10000 train_time:450421ms step_avg:68.24ms +[2025-07-07 19:59:00] [Rank 0] step:6601/10000 train_time:450421ms step_avg:68.24ms +[2025-07-07 19:59:01] [Rank 0] step:6621/10000 train_time:451787ms step_avg:68.24ms +[2025-07-07 19:59:01] [Rank 0] step:6621/10000 train_time:451787ms step_avg:68.24ms +[2025-07-07 19:59:02] [Rank 0] step:6641/10000 train_time:453153ms step_avg:68.24ms +[2025-07-07 19:59:02] [Rank 0] step:6641/10000 train_time:453153ms step_avg:68.24ms +[2025-07-07 19:59:04] [Rank 0] step:6661/10000 train_time:454518ms step_avg:68.24ms +[2025-07-07 19:59:04] [Rank 0] step:6661/10000 train_time:454518ms step_avg:68.24ms +[2025-07-07 19:59:05] [Rank 0] step:6681/10000 train_time:455928ms step_avg:68.24ms +[2025-07-07 19:59:05] [Rank 0] step:6681/10000 train_time:455928ms step_avg:68.24ms +[2025-07-07 19:59:06] [Rank 0] step:6701/10000 train_time:457296ms step_avg:68.24ms +[2025-07-07 19:59:06] [Rank 0] step:6701/10000 train_time:457296ms step_avg:68.24ms +[2025-07-07 19:59:08] [Rank 0] step:6721/10000 train_time:458665ms step_avg:68.24ms +[2025-07-07 19:59:08] [Rank 0] step:6721/10000 train_time:458665ms step_avg:68.24ms +[2025-07-07 19:59:09] [Rank 0] step:6741/10000 train_time:460033ms step_avg:68.24ms +[2025-07-07 19:59:09] [Rank 0] step:6741/10000 train_time:460033ms step_avg:68.24ms +[2025-07-07 19:59:11] [Rank 0] step:6761/10000 train_time:461403ms step_avg:68.24ms +[2025-07-07 19:59:11] [Rank 0] step:6761/10000 train_time:461403ms step_avg:68.24ms +[2025-07-07 19:59:12] [Rank 0] step:6781/10000 train_time:462774ms step_avg:68.25ms +[2025-07-07 19:59:12] [Rank 0] step:6781/10000 train_time:462774ms step_avg:68.25ms +[2025-07-07 19:59:13] [Rank 0] step:6801/10000 train_time:464144ms step_avg:68.25ms +[2025-07-07 19:59:13] [Rank 0] step:6801/10000 train_time:464144ms step_avg:68.25ms +[2025-07-07 19:59:15] [Rank 0] step:6821/10000 train_time:465515ms step_avg:68.25ms +[2025-07-07 19:59:15] [Rank 0] step:6821/10000 train_time:465515ms step_avg:68.25ms +[2025-07-07 19:59:16] [Rank 0] step:6841/10000 train_time:467543ms step_avg:68.34ms +[2025-07-07 19:59:16] [Rank 0] step:6841/10000 train_time:467543ms step_avg:68.34ms +[2025-07-07 19:59:17] [Rank 0] step:6861/10000 train_time:468280ms step_avg:68.25ms +[2025-07-07 19:59:17] [Rank 0] step:6861/10000 train_time:468280ms step_avg:68.25ms +[2025-07-07 19:59:19] [Rank 0] step:6881/10000 train_time:469652ms step_avg:68.25ms +[2025-07-07 19:59:19] [Rank 0] step:6881/10000 train_time:469652ms step_avg:68.25ms +[2025-07-07 19:59:20] [Rank 0] step:6901/10000 train_time:471023ms step_avg:68.25ms +[2025-07-07 19:59:20] [Rank 0] step:6901/10000 train_time:471023ms step_avg:68.25ms +[2025-07-07 19:59:22] [Rank 0] step:6921/10000 train_time:472395ms step_avg:68.26ms +[2025-07-07 19:59:22] [Rank 0] step:6921/10000 train_time:472395ms step_avg:68.26ms +[2025-07-07 19:59:23] [Rank 0] step:6941/10000 train_time:473770ms step_avg:68.26ms +[2025-07-07 19:59:23] [Rank 0] step:6941/10000 train_time:473770ms step_avg:68.26ms +[2025-07-07 19:59:24] [Rank 0] step:6961/10000 train_time:475142ms step_avg:68.26ms +[2025-07-07 19:59:24] [Rank 0] step:6961/10000 train_time:475142ms step_avg:68.26ms +[2025-07-07 19:59:26] [Rank 0] step:6981/10000 train_time:476515ms step_avg:68.26ms +[2025-07-07 19:59:26] [Rank 0] step:6981/10000 train_time:476515ms step_avg:68.26ms +[2025-07-07 19:59:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:59:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:59:28] [Rank 0] PRINT: step:7000/10000 train_loss:1.1410 val_loss:1.1613 train_time:478514ms step_avg:68.36ms +[2025-07-07 19:59:28] [Rank 0] PRINT: step:7000/10000 train_loss:1.1410 val_loss:1.1613 train_time:478514ms step_avg:68.36ms +[2025-07-07 19:59:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:59:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:59:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:59:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:59:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:59:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:04:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:04:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:04:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:04:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:04:57] [Rank 0] Total Loss: 5.2422 +[2025-07-07 20:04:57] [Rank 0] Total Loss: 5.2422 +[2025-07-07 20:04:57] [Rank 0] Total FTA: 0.3643 +[2025-07-07 20:04:57] [Rank 0] Total FTA: 0.3643 +[2025-07-07 20:04:57] [Rank 0] Group 0 Loss: 5.5387 +[2025-07-07 20:04:57] [Rank 0] Group 0 Loss: 5.5387 +[2025-07-07 20:04:57] [Rank 0] Group 1 Loss: 4.9133 +[2025-07-07 20:04:57] [Rank 0] Group 1 Loss: 4.9133 +[2025-07-07 20:04:57] [Rank 0] Group 2 Loss: 4.9224 +[2025-07-07 20:04:57] [Rank 0] Group 2 Loss: 4.9224 +[2025-07-07 20:04:57] [Rank 0] Group 3 Loss: 5.1723 +[2025-07-07 20:04:57] [Rank 0] Group 3 Loss: 5.1723 +[2025-07-07 20:04:57] [Rank 0] Group 4 Loss: 5.1719 +[2025-07-07 20:04:57] [Rank 0] Group 4 Loss: 5.1719 +[2025-07-07 20:04:57] [Rank 0] Group 5 Loss: 5.2067 +[2025-07-07 20:04:57] [Rank 0] Group 5 Loss: 5.2067 +[2025-07-07 20:04:57] [Rank 0] Group 6 Loss: 5.2189 +[2025-07-07 20:04:57] [Rank 0] Group 6 Loss: 5.2189 +[2025-07-07 20:04:57] [Rank 0] Group 7 Loss: 5.2970 +[2025-07-07 20:04:57] [Rank 0] Group 7 Loss: 5.2970 +[2025-07-07 20:04:57] [Rank 0] Group 8 Loss: 5.2474 +[2025-07-07 20:04:57] [Rank 0] Group 8 Loss: 5.2474 +[2025-07-07 20:04:57] [Rank 0] Group 9 Loss: 5.2459 +[2025-07-07 20:04:57] [Rank 0] Group 9 Loss: 5.2459 +[2025-07-07 20:04:57] [Rank 0] Group 10 Loss: 5.2984 +[2025-07-07 20:04:57] [Rank 0] Group 10 Loss: 5.2984 +[2025-07-07 20:04:57] [Rank 0] Group 11 Loss: 5.2857 +[2025-07-07 20:04:57] [Rank 0] Group 11 Loss: 5.2857 +[2025-07-07 20:04:57] [Rank 0] Group 0 FTA: 0.1443 +[2025-07-07 20:04:57] [Rank 0] Group 0 FTA: 0.1443 +[2025-07-07 20:04:57] [Rank 0] Group 1 FTA: 0.4583 +[2025-07-07 20:04:57] [Rank 0] Group 1 FTA: 0.4583 +[2025-07-07 20:04:57] [Rank 0] Group 2 FTA: 0.4922 +[2025-07-07 20:04:57] [Rank 0] Group 2 FTA: 0.4922 +[2025-07-07 20:04:57] [Rank 0] Group 3 FTA: 0.2969 +[2025-07-07 20:04:57] [Rank 0] Group 3 FTA: 0.2969 +[2025-07-07 20:04:57] [Rank 0] Group 4 FTA: 0.3255 +[2025-07-07 20:04:57] [Rank 0] Group 4 FTA: 0.3255 +[2025-07-07 20:04:57] [Rank 0] Group 5 FTA: 0.4297 +[2025-07-07 20:04:57] [Rank 0] Group 5 FTA: 0.4297 +[2025-07-07 20:04:57] [Rank 0] Group 6 FTA: 0.4089 +[2025-07-07 20:04:57] [Rank 0] Group 6 FTA: 0.4089 +[2025-07-07 20:04:57] [Rank 0] Group 7 FTA: 0.4193 +[2025-07-07 20:04:57] [Rank 0] Group 7 FTA: 0.4193 +[2025-07-07 20:04:57] [Rank 0] Group 8 FTA: 0.4167 +[2025-07-07 20:04:57] [Rank 0] Group 8 FTA: 0.4167 +[2025-07-07 20:04:57] [Rank 0] Group 9 FTA: 0.3984 +[2025-07-07 20:04:57] [Rank 0] Group 9 FTA: 0.3984 +[2025-07-07 20:04:57] [Rank 0] Group 10 FTA: 0.3555 +[2025-07-07 20:04:57] [Rank 0] Group 10 FTA: 0.3555 +[2025-07-07 20:04:57] [Rank 0] Group 11 FTA: 0.4004 +[2025-07-07 20:04:57] [Rank 0] Group 11 FTA: 0.4004 +[2025-07-07 20:04:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 20:04:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 20:04:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 20:04:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 20:04:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 20:04:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 20:04:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 20:04:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 20:04:58] [Rank 0] step:7001/10000 train_time:478523ms step_avg:68.35ms +[2025-07-07 20:04:58] [Rank 0] step:7001/10000 train_time:478523ms step_avg:68.35ms +[2025-07-07 20:05:00] [Rank 0] step:7021/10000 train_time:479280ms step_avg:68.26ms +[2025-07-07 20:05:00] [Rank 0] step:7021/10000 train_time:479280ms step_avg:68.26ms +[2025-07-07 20:05:01] [Rank 0] step:7041/10000 train_time:480672ms step_avg:68.27ms +[2025-07-07 20:05:01] [Rank 0] step:7041/10000 train_time:480672ms step_avg:68.27ms +[2025-07-07 20:05:03] [Rank 0] step:7061/10000 train_time:482043ms step_avg:68.27ms +[2025-07-07 20:05:03] [Rank 0] step:7061/10000 train_time:482043ms step_avg:68.27ms +[2025-07-07 20:05:04] [Rank 0] step:7081/10000 train_time:483407ms step_avg:68.27ms +[2025-07-07 20:05:04] [Rank 0] step:7081/10000 train_time:483407ms step_avg:68.27ms +[2025-07-07 20:05:05] [Rank 0] step:7101/10000 train_time:484773ms step_avg:68.27ms +[2025-07-07 20:05:05] [Rank 0] step:7101/10000 train_time:484773ms step_avg:68.27ms +[2025-07-07 20:05:07] [Rank 0] step:7121/10000 train_time:486343ms step_avg:68.30ms +[2025-07-07 20:05:07] [Rank 0] step:7121/10000 train_time:486343ms step_avg:68.30ms +[2025-07-07 20:05:08] [Rank 0] step:7141/10000 train_time:487687ms step_avg:68.29ms +[2025-07-07 20:05:08] [Rank 0] step:7141/10000 train_time:487687ms step_avg:68.29ms +[2025-07-07 20:05:10] [Rank 0] step:7161/10000 train_time:489054ms step_avg:68.29ms +[2025-07-07 20:05:10] [Rank 0] step:7161/10000 train_time:489054ms step_avg:68.29ms +[2025-07-07 20:05:11] [Rank 0] step:7181/10000 train_time:490420ms step_avg:68.29ms +[2025-07-07 20:05:11] [Rank 0] step:7181/10000 train_time:490420ms step_avg:68.29ms +[2025-07-07 20:05:12] [Rank 0] step:7201/10000 train_time:491786ms step_avg:68.29ms +[2025-07-07 20:05:12] [Rank 0] step:7201/10000 train_time:491786ms step_avg:68.29ms +[2025-07-07 20:05:14] [Rank 0] step:7221/10000 train_time:493187ms step_avg:68.30ms +[2025-07-07 20:05:14] [Rank 0] step:7221/10000 train_time:493187ms step_avg:68.30ms +[2025-07-07 20:05:15] [Rank 0] step:7241/10000 train_time:494557ms step_avg:68.30ms +[2025-07-07 20:05:15] [Rank 0] step:7241/10000 train_time:494557ms step_avg:68.30ms +[2025-07-07 20:05:16] [Rank 0] step:7261/10000 train_time:495926ms step_avg:68.30ms +[2025-07-07 20:05:16] [Rank 0] step:7261/10000 train_time:495926ms step_avg:68.30ms +[2025-07-07 20:05:18] [Rank 0] step:7281/10000 train_time:497295ms step_avg:68.30ms +[2025-07-07 20:05:18] [Rank 0] step:7281/10000 train_time:497295ms step_avg:68.30ms +[2025-07-07 20:05:19] [Rank 0] step:7301/10000 train_time:498836ms step_avg:68.32ms +[2025-07-07 20:05:19] [Rank 0] step:7301/10000 train_time:498836ms step_avg:68.32ms +[2025-07-07 20:05:21] [Rank 0] step:7321/10000 train_time:500203ms step_avg:68.32ms +[2025-07-07 20:05:21] [Rank 0] step:7321/10000 train_time:500203ms step_avg:68.32ms +[2025-07-07 20:05:22] [Rank 0] step:7341/10000 train_time:501570ms step_avg:68.32ms +[2025-07-07 20:05:22] [Rank 0] step:7341/10000 train_time:501570ms step_avg:68.32ms +[2025-07-07 20:05:24] [Rank 0] step:7361/10000 train_time:502939ms step_avg:68.32ms +[2025-07-07 20:05:24] [Rank 0] step:7361/10000 train_time:502939ms step_avg:68.32ms +[2025-07-07 20:05:25] [Rank 0] step:7381/10000 train_time:504561ms step_avg:68.36ms +[2025-07-07 20:05:25] [Rank 0] step:7381/10000 train_time:504561ms step_avg:68.36ms +[2025-07-07 20:05:26] [Rank 0] step:7401/10000 train_time:505732ms step_avg:68.33ms +[2025-07-07 20:05:26] [Rank 0] step:7401/10000 train_time:505732ms step_avg:68.33ms +[2025-07-07 20:05:28] [Rank 0] step:7421/10000 train_time:507102ms step_avg:68.33ms +[2025-07-07 20:05:28] [Rank 0] step:7421/10000 train_time:507102ms step_avg:68.33ms +[2025-07-07 20:05:29] [Rank 0] step:7441/10000 train_time:508473ms step_avg:68.33ms +[2025-07-07 20:05:29] [Rank 0] step:7441/10000 train_time:508473ms step_avg:68.33ms +[2025-07-07 20:05:30] [Rank 0] step:7461/10000 train_time:509845ms step_avg:68.33ms +[2025-07-07 20:05:30] [Rank 0] step:7461/10000 train_time:509845ms step_avg:68.33ms +[2025-07-07 20:05:32] [Rank 0] step:7481/10000 train_time:511216ms step_avg:68.34ms +[2025-07-07 20:05:32] [Rank 0] step:7481/10000 train_time:511216ms step_avg:68.34ms +[2025-07-07 20:05:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:05:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:05:34] [Rank 0] PRINT: step:7500/10000 train_loss:1.1235 val_loss:1.1456 train_time:513211ms step_avg:68.43ms +[2025-07-07 20:05:34] [Rank 0] PRINT: step:7500/10000 train_loss:1.1235 val_loss:1.1456 train_time:513211ms step_avg:68.43ms +[2025-07-07 20:05:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:05:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:05:34] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:05:34] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:05:34] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:05:34] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:11:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:11:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:11:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:11:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:11:00] [Rank 0] Total Loss: 5.1611 +[2025-07-07 20:11:00] [Rank 0] Total Loss: 5.1611 +[2025-07-07 20:11:00] [Rank 0] Total FTA: 0.3749 +[2025-07-07 20:11:00] [Rank 0] Total FTA: 0.3749 +[2025-07-07 20:11:00] [Rank 0] Group 0 Loss: 5.3943 +[2025-07-07 20:11:00] [Rank 0] Group 0 Loss: 5.3943 +[2025-07-07 20:11:00] [Rank 0] Group 1 Loss: 4.8743 +[2025-07-07 20:11:00] [Rank 0] Group 1 Loss: 4.8743 +[2025-07-07 20:11:00] [Rank 0] Group 2 Loss: 4.9094 +[2025-07-07 20:11:00] [Rank 0] Group 2 Loss: 4.9094 +[2025-07-07 20:11:00] [Rank 0] Group 3 Loss: 5.1666 +[2025-07-07 20:11:00] [Rank 0] Group 3 Loss: 5.1666 +[2025-07-07 20:11:00] [Rank 0] Group 4 Loss: 5.0898 +[2025-07-07 20:11:00] [Rank 0] Group 4 Loss: 5.0898 +[2025-07-07 20:11:00] [Rank 0] Group 5 Loss: 5.1563 +[2025-07-07 20:11:00] [Rank 0] Group 5 Loss: 5.1563 +[2025-07-07 20:11:00] [Rank 0] Group 6 Loss: 5.1355 +[2025-07-07 20:11:00] [Rank 0] Group 6 Loss: 5.1355 +[2025-07-07 20:11:00] [Rank 0] Group 7 Loss: 5.2053 +[2025-07-07 20:11:00] [Rank 0] Group 7 Loss: 5.2053 +[2025-07-07 20:11:00] [Rank 0] Group 8 Loss: 5.1736 +[2025-07-07 20:11:00] [Rank 0] Group 8 Loss: 5.1736 +[2025-07-07 20:11:00] [Rank 0] Group 9 Loss: 5.1708 +[2025-07-07 20:11:00] [Rank 0] Group 9 Loss: 5.1708 +[2025-07-07 20:11:00] [Rank 0] Group 10 Loss: 5.1656 +[2025-07-07 20:11:00] [Rank 0] Group 10 Loss: 5.1656 +[2025-07-07 20:11:00] [Rank 0] Group 11 Loss: 5.1981 +[2025-07-07 20:11:00] [Rank 0] Group 11 Loss: 5.1981 +[2025-07-07 20:11:00] [Rank 0] Group 0 FTA: 0.3420 +[2025-07-07 20:11:00] [Rank 0] Group 0 FTA: 0.3420 +[2025-07-07 20:11:00] [Rank 0] Group 1 FTA: 0.4141 +[2025-07-07 20:11:00] [Rank 0] Group 1 FTA: 0.4141 +[2025-07-07 20:11:00] [Rank 0] Group 2 FTA: 0.4531 +[2025-07-07 20:11:00] [Rank 0] Group 2 FTA: 0.4531 +[2025-07-07 20:11:00] [Rank 0] Group 3 FTA: 0.2370 +[2025-07-07 20:11:00] [Rank 0] Group 3 FTA: 0.2370 +[2025-07-07 20:11:00] [Rank 0] Group 4 FTA: 0.3229 +[2025-07-07 20:11:00] [Rank 0] Group 4 FTA: 0.3229 +[2025-07-07 20:11:00] [Rank 0] Group 5 FTA: 0.4844 +[2025-07-07 20:11:00] [Rank 0] Group 5 FTA: 0.4844 +[2025-07-07 20:11:00] [Rank 0] Group 6 FTA: 0.3385 +[2025-07-07 20:11:00] [Rank 0] Group 6 FTA: 0.3385 +[2025-07-07 20:11:00] [Rank 0] Group 7 FTA: 0.3672 +[2025-07-07 20:11:00] [Rank 0] Group 7 FTA: 0.3672 +[2025-07-07 20:11:00] [Rank 0] Group 8 FTA: 0.4062 +[2025-07-07 20:11:00] [Rank 0] Group 8 FTA: 0.4062 +[2025-07-07 20:11:00] [Rank 0] Group 9 FTA: 0.3945 +[2025-07-07 20:11:00] [Rank 0] Group 9 FTA: 0.3945 +[2025-07-07 20:11:00] [Rank 0] Group 10 FTA: 0.3633 +[2025-07-07 20:11:00] [Rank 0] Group 10 FTA: 0.3633 +[2025-07-07 20:11:00] [Rank 0] Group 11 FTA: 0.3916 +[2025-07-07 20:11:00] [Rank 0] Group 11 FTA: 0.3916 +[2025-07-07 20:11:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 20:11:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 20:11:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 20:11:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 20:11:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 20:11:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 20:11:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 20:11:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 20:11:02] [Rank 0] step:7501/10000 train_time:513219ms step_avg:68.42ms +[2025-07-07 20:11:02] [Rank 0] step:7501/10000 train_time:513219ms step_avg:68.42ms +[2025-07-07 20:11:03] [Rank 0] step:7521/10000 train_time:513976ms step_avg:68.34ms +[2025-07-07 20:11:03] [Rank 0] step:7521/10000 train_time:513976ms step_avg:68.34ms +[2025-07-07 20:11:05] [Rank 0] step:7541/10000 train_time:515338ms step_avg:68.34ms +[2025-07-07 20:11:05] [Rank 0] step:7541/10000 train_time:515338ms step_avg:68.34ms +[2025-07-07 20:11:06] [Rank 0] step:7561/10000 train_time:516702ms step_avg:68.34ms +[2025-07-07 20:11:06] [Rank 0] step:7561/10000 train_time:516702ms step_avg:68.34ms +[2025-07-07 20:11:07] [Rank 0] step:7581/10000 train_time:518098ms step_avg:68.34ms +[2025-07-07 20:11:07] [Rank 0] step:7581/10000 train_time:518098ms step_avg:68.34ms +[2025-07-07 20:11:09] [Rank 0] step:7601/10000 train_time:519461ms step_avg:68.34ms +[2025-07-07 20:11:09] [Rank 0] step:7601/10000 train_time:519461ms step_avg:68.34ms +[2025-07-07 20:11:10] [Rank 0] step:7621/10000 train_time:520827ms step_avg:68.34ms +[2025-07-07 20:11:10] [Rank 0] step:7621/10000 train_time:520827ms step_avg:68.34ms +[2025-07-07 20:11:11] [Rank 0] step:7641/10000 train_time:522192ms step_avg:68.34ms +[2025-07-07 20:11:11] [Rank 0] step:7641/10000 train_time:522192ms step_avg:68.34ms +[2025-07-07 20:11:13] [Rank 0] step:7661/10000 train_time:523749ms step_avg:68.37ms +[2025-07-07 20:11:13] [Rank 0] step:7661/10000 train_time:523749ms step_avg:68.37ms +[2025-07-07 20:11:14] [Rank 0] step:7681/10000 train_time:525112ms step_avg:68.37ms +[2025-07-07 20:11:14] [Rank 0] step:7681/10000 train_time:525112ms step_avg:68.37ms +[2025-07-07 20:11:16] [Rank 0] step:7701/10000 train_time:526477ms step_avg:68.36ms +[2025-07-07 20:11:16] [Rank 0] step:7701/10000 train_time:526477ms step_avg:68.36ms +[2025-07-07 20:11:17] [Rank 0] step:7721/10000 train_time:527845ms step_avg:68.36ms +[2025-07-07 20:11:17] [Rank 0] step:7721/10000 train_time:527845ms step_avg:68.36ms +[2025-07-07 20:11:18] [Rank 0] step:7741/10000 train_time:529213ms step_avg:68.36ms +[2025-07-07 20:11:18] [Rank 0] step:7741/10000 train_time:529213ms step_avg:68.36ms +[2025-07-07 20:11:20] [Rank 0] step:7761/10000 train_time:530612ms step_avg:68.37ms +[2025-07-07 20:11:20] [Rank 0] step:7761/10000 train_time:530612ms step_avg:68.37ms +[2025-07-07 20:11:21] [Rank 0] step:7781/10000 train_time:531980ms step_avg:68.37ms +[2025-07-07 20:11:21] [Rank 0] step:7781/10000 train_time:531980ms step_avg:68.37ms +[2025-07-07 20:11:23] [Rank 0] step:7801/10000 train_time:533349ms step_avg:68.37ms +[2025-07-07 20:11:23] [Rank 0] step:7801/10000 train_time:533349ms step_avg:68.37ms +[2025-07-07 20:11:24] [Rank 0] step:7821/10000 train_time:534718ms step_avg:68.37ms +[2025-07-07 20:11:24] [Rank 0] step:7821/10000 train_time:534718ms step_avg:68.37ms +[2025-07-07 20:11:25] [Rank 0] step:7841/10000 train_time:536087ms step_avg:68.37ms +[2025-07-07 20:11:25] [Rank 0] step:7841/10000 train_time:536087ms step_avg:68.37ms +[2025-07-07 20:11:27] [Rank 0] step:7861/10000 train_time:537455ms step_avg:68.37ms +[2025-07-07 20:11:27] [Rank 0] step:7861/10000 train_time:537455ms step_avg:68.37ms +[2025-07-07 20:11:28] [Rank 0] step:7881/10000 train_time:538825ms step_avg:68.37ms +[2025-07-07 20:11:28] [Rank 0] step:7881/10000 train_time:538825ms step_avg:68.37ms +[2025-07-07 20:11:29] [Rank 0] step:7901/10000 train_time:540195ms step_avg:68.37ms +[2025-07-07 20:11:29] [Rank 0] step:7901/10000 train_time:540195ms step_avg:68.37ms +[2025-07-07 20:11:31] [Rank 0] step:7921/10000 train_time:541566ms step_avg:68.37ms +[2025-07-07 20:11:31] [Rank 0] step:7921/10000 train_time:541566ms step_avg:68.37ms +[2025-07-07 20:11:32] [Rank 0] step:7941/10000 train_time:542965ms step_avg:68.37ms +[2025-07-07 20:11:32] [Rank 0] step:7941/10000 train_time:542965ms step_avg:68.37ms +[2025-07-07 20:11:34] [Rank 0] step:7961/10000 train_time:544334ms step_avg:68.38ms +[2025-07-07 20:11:34] [Rank 0] step:7961/10000 train_time:544334ms step_avg:68.38ms +[2025-07-07 20:11:35] [Rank 0] step:7981/10000 train_time:545708ms step_avg:68.38ms +[2025-07-07 20:11:35] [Rank 0] step:7981/10000 train_time:545708ms step_avg:68.38ms +[2025-07-07 20:11:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:11:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:11:37] [Rank 0] PRINT: step:8000/10000 train_loss:1.1070 val_loss:1.1439 train_time:547702ms step_avg:68.46ms +[2025-07-07 20:11:37] [Rank 0] PRINT: step:8000/10000 train_loss:1.1070 val_loss:1.1439 train_time:547702ms step_avg:68.46ms +[2025-07-07 20:11:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:11:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:11:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:11:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:11:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:11:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:17:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:17:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:17:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:17:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:17:03] [Rank 0] Total Loss: 5.3870 +[2025-07-07 20:17:03] [Rank 0] Total Loss: 5.3870 +[2025-07-07 20:17:03] [Rank 0] Total FTA: 0.4193 +[2025-07-07 20:17:03] [Rank 0] Total FTA: 0.4193 +[2025-07-07 20:17:03] [Rank 0] Group 0 Loss: 5.7345 +[2025-07-07 20:17:03] [Rank 0] Group 0 Loss: 5.7345 +[2025-07-07 20:17:03] [Rank 0] Group 1 Loss: 5.0120 +[2025-07-07 20:17:03] [Rank 0] Group 1 Loss: 5.0120 +[2025-07-07 20:17:03] [Rank 0] Group 2 Loss: 5.0364 +[2025-07-07 20:17:03] [Rank 0] Group 2 Loss: 5.0364 +[2025-07-07 20:17:03] [Rank 0] Group 3 Loss: 5.4049 +[2025-07-07 20:17:03] [Rank 0] Group 3 Loss: 5.4049 +[2025-07-07 20:17:03] [Rank 0] Group 4 Loss: 5.4220 +[2025-07-07 20:17:03] [Rank 0] Group 4 Loss: 5.4220 +[2025-07-07 20:17:03] [Rank 0] Group 5 Loss: 5.3665 +[2025-07-07 20:17:03] [Rank 0] Group 5 Loss: 5.3665 +[2025-07-07 20:17:03] [Rank 0] Group 6 Loss: 5.3578 +[2025-07-07 20:17:03] [Rank 0] Group 6 Loss: 5.3578 +[2025-07-07 20:17:03] [Rank 0] Group 7 Loss: 5.4496 +[2025-07-07 20:17:03] [Rank 0] Group 7 Loss: 5.4496 +[2025-07-07 20:17:03] [Rank 0] Group 8 Loss: 5.4161 +[2025-07-07 20:17:03] [Rank 0] Group 8 Loss: 5.4161 +[2025-07-07 20:17:03] [Rank 0] Group 9 Loss: 5.4012 +[2025-07-07 20:17:03] [Rank 0] Group 9 Loss: 5.4012 +[2025-07-07 20:17:03] [Rank 0] Group 10 Loss: 5.3805 +[2025-07-07 20:17:03] [Rank 0] Group 10 Loss: 5.3805 +[2025-07-07 20:17:03] [Rank 0] Group 11 Loss: 5.3625 +[2025-07-07 20:17:03] [Rank 0] Group 11 Loss: 5.3625 +[2025-07-07 20:17:03] [Rank 0] Group 0 FTA: 0.3238 +[2025-07-07 20:17:03] [Rank 0] Group 0 FTA: 0.3238 +[2025-07-07 20:17:03] [Rank 0] Group 1 FTA: 0.3724 +[2025-07-07 20:17:03] [Rank 0] Group 1 FTA: 0.3724 +[2025-07-07 20:17:03] [Rank 0] Group 2 FTA: 0.5911 +[2025-07-07 20:17:03] [Rank 0] Group 2 FTA: 0.5911 +[2025-07-07 20:17:03] [Rank 0] Group 3 FTA: 0.3073 +[2025-07-07 20:17:03] [Rank 0] Group 3 FTA: 0.3073 +[2025-07-07 20:17:03] [Rank 0] Group 4 FTA: 0.3542 +[2025-07-07 20:17:03] [Rank 0] Group 4 FTA: 0.3542 +[2025-07-07 20:17:03] [Rank 0] Group 5 FTA: 0.4896 +[2025-07-07 20:17:03] [Rank 0] Group 5 FTA: 0.4896 +[2025-07-07 20:17:03] [Rank 0] Group 6 FTA: 0.4115 +[2025-07-07 20:17:03] [Rank 0] Group 6 FTA: 0.4115 +[2025-07-07 20:17:03] [Rank 0] Group 7 FTA: 0.4818 +[2025-07-07 20:17:03] [Rank 0] Group 7 FTA: 0.4818 +[2025-07-07 20:17:03] [Rank 0] Group 8 FTA: 0.4375 +[2025-07-07 20:17:03] [Rank 0] Group 8 FTA: 0.4375 +[2025-07-07 20:17:03] [Rank 0] Group 9 FTA: 0.4688 +[2025-07-07 20:17:03] [Rank 0] Group 9 FTA: 0.4688 +[2025-07-07 20:17:03] [Rank 0] Group 10 FTA: 0.4512 +[2025-07-07 20:17:03] [Rank 0] Group 10 FTA: 0.4512 +[2025-07-07 20:17:03] [Rank 0] Group 11 FTA: 0.4287 +[2025-07-07 20:17:03] [Rank 0] Group 11 FTA: 0.4287 +[2025-07-07 20:17:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 20:17:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 20:17:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 20:17:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 20:17:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 20:17:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 20:17:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 20:17:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 20:17:05] [Rank 0] step:8001/10000 train_time:547710ms step_avg:68.46ms +[2025-07-07 20:17:05] [Rank 0] step:8001/10000 train_time:547710ms step_avg:68.46ms +[2025-07-07 20:17:06] [Rank 0] step:8021/10000 train_time:548476ms step_avg:68.38ms +[2025-07-07 20:17:06] [Rank 0] step:8021/10000 train_time:548476ms step_avg:68.38ms +[2025-07-07 20:17:08] [Rank 0] step:8041/10000 train_time:549838ms step_avg:68.38ms +[2025-07-07 20:17:08] [Rank 0] step:8041/10000 train_time:549838ms step_avg:68.38ms +[2025-07-07 20:17:09] [Rank 0] step:8061/10000 train_time:551200ms step_avg:68.38ms +[2025-07-07 20:17:09] [Rank 0] step:8061/10000 train_time:551200ms step_avg:68.38ms +[2025-07-07 20:17:10] [Rank 0] step:8081/10000 train_time:552563ms step_avg:68.38ms +[2025-07-07 20:17:10] [Rank 0] step:8081/10000 train_time:552563ms step_avg:68.38ms +[2025-07-07 20:17:12] [Rank 0] step:8101/10000 train_time:553975ms step_avg:68.38ms +[2025-07-07 20:17:12] [Rank 0] step:8101/10000 train_time:553975ms step_avg:68.38ms +[2025-07-07 20:17:13] [Rank 0] step:8121/10000 train_time:555325ms step_avg:68.38ms +[2025-07-07 20:17:13] [Rank 0] step:8121/10000 train_time:555325ms step_avg:68.38ms +[2025-07-07 20:17:15] [Rank 0] step:8141/10000 train_time:556690ms step_avg:68.38ms +[2025-07-07 20:17:15] [Rank 0] step:8141/10000 train_time:556690ms step_avg:68.38ms +[2025-07-07 20:17:16] [Rank 0] step:8161/10000 train_time:558056ms step_avg:68.38ms +[2025-07-07 20:17:16] [Rank 0] step:8161/10000 train_time:558056ms step_avg:68.38ms +[2025-07-07 20:17:17] [Rank 0] step:8181/10000 train_time:559422ms step_avg:68.38ms +[2025-07-07 20:17:17] [Rank 0] step:8181/10000 train_time:559422ms step_avg:68.38ms +[2025-07-07 20:17:19] [Rank 0] step:8201/10000 train_time:560789ms step_avg:68.38ms +[2025-07-07 20:17:19] [Rank 0] step:8201/10000 train_time:560789ms step_avg:68.38ms +[2025-07-07 20:17:20] [Rank 0] step:8221/10000 train_time:562155ms step_avg:68.38ms +[2025-07-07 20:17:20] [Rank 0] step:8221/10000 train_time:562155ms step_avg:68.38ms +[2025-07-07 20:17:21] [Rank 0] step:8241/10000 train_time:563523ms step_avg:68.38ms +[2025-07-07 20:17:21] [Rank 0] step:8241/10000 train_time:563523ms step_avg:68.38ms +[2025-07-07 20:17:23] [Rank 0] step:8261/10000 train_time:564891ms step_avg:68.38ms +[2025-07-07 20:17:23] [Rank 0] step:8261/10000 train_time:564891ms step_avg:68.38ms +[2025-07-07 20:17:24] [Rank 0] step:8281/10000 train_time:566941ms step_avg:68.46ms +[2025-07-07 20:17:24] [Rank 0] step:8281/10000 train_time:566941ms step_avg:68.46ms +[2025-07-07 20:17:26] [Rank 0] step:8301/10000 train_time:567679ms step_avg:68.39ms +[2025-07-07 20:17:26] [Rank 0] step:8301/10000 train_time:567679ms step_avg:68.39ms +[2025-07-07 20:17:27] [Rank 0] step:8321/10000 train_time:569055ms step_avg:68.39ms +[2025-07-07 20:17:27] [Rank 0] step:8321/10000 train_time:569055ms step_avg:68.39ms +[2025-07-07 20:17:28] [Rank 0] step:8341/10000 train_time:570426ms step_avg:68.39ms +[2025-07-07 20:17:28] [Rank 0] step:8341/10000 train_time:570426ms step_avg:68.39ms +[2025-07-07 20:17:30] [Rank 0] step:8361/10000 train_time:571797ms step_avg:68.39ms +[2025-07-07 20:17:30] [Rank 0] step:8361/10000 train_time:571797ms step_avg:68.39ms +[2025-07-07 20:17:31] [Rank 0] step:8381/10000 train_time:573166ms step_avg:68.39ms +[2025-07-07 20:17:31] [Rank 0] step:8381/10000 train_time:573166ms step_avg:68.39ms +[2025-07-07 20:17:32] [Rank 0] step:8401/10000 train_time:574539ms step_avg:68.39ms +[2025-07-07 20:17:32] [Rank 0] step:8401/10000 train_time:574539ms step_avg:68.39ms +[2025-07-07 20:17:34] [Rank 0] step:8421/10000 train_time:575911ms step_avg:68.39ms +[2025-07-07 20:17:34] [Rank 0] step:8421/10000 train_time:575911ms step_avg:68.39ms +[2025-07-07 20:17:35] [Rank 0] step:8441/10000 train_time:577284ms step_avg:68.39ms +[2025-07-07 20:17:35] [Rank 0] step:8441/10000 train_time:577284ms step_avg:68.39ms +[2025-07-07 20:17:37] [Rank 0] step:8461/10000 train_time:578703ms step_avg:68.40ms +[2025-07-07 20:17:37] [Rank 0] step:8461/10000 train_time:578703ms step_avg:68.40ms +[2025-07-07 20:17:38] [Rank 0] step:8481/10000 train_time:580065ms step_avg:68.40ms +[2025-07-07 20:17:38] [Rank 0] step:8481/10000 train_time:580065ms step_avg:68.40ms +[2025-07-07 20:17:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:17:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:17:40] [Rank 0] PRINT: step:8500/10000 train_loss:1.0901 val_loss:1.1122 train_time:582064ms step_avg:68.48ms +[2025-07-07 20:17:40] [Rank 0] PRINT: step:8500/10000 train_loss:1.0901 val_loss:1.1122 train_time:582064ms step_avg:68.48ms +[2025-07-07 20:17:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:17:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:17:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:17:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:17:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:17:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:23:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:23:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:23:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:23:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:23:09] [Rank 0] Total Loss: 5.2033 +[2025-07-07 20:23:09] [Rank 0] Total Loss: 5.2033 +[2025-07-07 20:23:09] [Rank 0] Total FTA: 0.4937 +[2025-07-07 20:23:09] [Rank 0] Total FTA: 0.4937 +[2025-07-07 20:23:09] [Rank 0] Group 0 Loss: 5.5882 +[2025-07-07 20:23:09] [Rank 0] Group 0 Loss: 5.5882 +[2025-07-07 20:23:09] [Rank 0] Group 1 Loss: 4.6892 +[2025-07-07 20:23:09] [Rank 0] Group 1 Loss: 4.6892 +[2025-07-07 20:23:09] [Rank 0] Group 2 Loss: 4.9677 +[2025-07-07 20:23:09] [Rank 0] Group 2 Loss: 4.9677 +[2025-07-07 20:23:09] [Rank 0] Group 3 Loss: 5.1947 +[2025-07-07 20:23:09] [Rank 0] Group 3 Loss: 5.1947 +[2025-07-07 20:23:09] [Rank 0] Group 4 Loss: 5.1766 +[2025-07-07 20:23:09] [Rank 0] Group 4 Loss: 5.1766 +[2025-07-07 20:23:09] [Rank 0] Group 5 Loss: 5.1513 +[2025-07-07 20:23:09] [Rank 0] Group 5 Loss: 5.1513 +[2025-07-07 20:23:09] [Rank 0] Group 6 Loss: 5.1567 +[2025-07-07 20:23:09] [Rank 0] Group 6 Loss: 5.1567 +[2025-07-07 20:23:09] [Rank 0] Group 7 Loss: 5.2477 +[2025-07-07 20:23:09] [Rank 0] Group 7 Loss: 5.2477 +[2025-07-07 20:23:09] [Rank 0] Group 8 Loss: 5.1865 +[2025-07-07 20:23:09] [Rank 0] Group 8 Loss: 5.1865 +[2025-07-07 20:23:09] [Rank 0] Group 9 Loss: 5.1609 +[2025-07-07 20:23:09] [Rank 0] Group 9 Loss: 5.1609 +[2025-07-07 20:23:09] [Rank 0] Group 10 Loss: 5.1936 +[2025-07-07 20:23:09] [Rank 0] Group 10 Loss: 5.1936 +[2025-07-07 20:23:09] [Rank 0] Group 11 Loss: 5.2508 +[2025-07-07 20:23:09] [Rank 0] Group 11 Loss: 5.2508 +[2025-07-07 20:23:09] [Rank 0] Group 0 FTA: 0.4980 +[2025-07-07 20:23:09] [Rank 0] Group 0 FTA: 0.4980 +[2025-07-07 20:23:09] [Rank 0] Group 1 FTA: 0.5182 +[2025-07-07 20:23:09] [Rank 0] Group 1 FTA: 0.5182 +[2025-07-07 20:23:09] [Rank 0] Group 2 FTA: 0.5911 +[2025-07-07 20:23:09] [Rank 0] Group 2 FTA: 0.5911 +[2025-07-07 20:23:09] [Rank 0] Group 3 FTA: 0.3698 +[2025-07-07 20:23:09] [Rank 0] Group 3 FTA: 0.3698 +[2025-07-07 20:23:09] [Rank 0] Group 4 FTA: 0.4245 +[2025-07-07 20:23:09] [Rank 0] Group 4 FTA: 0.4245 +[2025-07-07 20:23:09] [Rank 0] Group 5 FTA: 0.6354 +[2025-07-07 20:23:09] [Rank 0] Group 5 FTA: 0.6354 +[2025-07-07 20:23:09] [Rank 0] Group 6 FTA: 0.4245 +[2025-07-07 20:23:09] [Rank 0] Group 6 FTA: 0.4245 +[2025-07-07 20:23:09] [Rank 0] Group 7 FTA: 0.5052 +[2025-07-07 20:23:09] [Rank 0] Group 7 FTA: 0.5052 +[2025-07-07 20:23:09] [Rank 0] Group 8 FTA: 0.5312 +[2025-07-07 20:23:09] [Rank 0] Group 8 FTA: 0.5312 +[2025-07-07 20:23:09] [Rank 0] Group 9 FTA: 0.4648 +[2025-07-07 20:23:09] [Rank 0] Group 9 FTA: 0.4648 +[2025-07-07 20:23:09] [Rank 0] Group 10 FTA: 0.4922 +[2025-07-07 20:23:09] [Rank 0] Group 10 FTA: 0.4922 +[2025-07-07 20:23:09] [Rank 0] Group 11 FTA: 0.4795 +[2025-07-07 20:23:09] [Rank 0] Group 11 FTA: 0.4795 +[2025-07-07 20:23:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 20:23:10] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 20:23:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 20:23:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 20:23:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 20:23:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 20:23:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 20:23:11] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 20:23:11] [Rank 0] step:8501/10000 train_time:582072ms step_avg:68.47ms +[2025-07-07 20:23:11] [Rank 0] step:8501/10000 train_time:582072ms step_avg:68.47ms +[2025-07-07 20:23:12] [Rank 0] step:8521/10000 train_time:582832ms step_avg:68.40ms +[2025-07-07 20:23:12] [Rank 0] step:8521/10000 train_time:582832ms step_avg:68.40ms +[2025-07-07 20:23:14] [Rank 0] step:8541/10000 train_time:584194ms step_avg:68.40ms +[2025-07-07 20:23:14] [Rank 0] step:8541/10000 train_time:584194ms step_avg:68.40ms +[2025-07-07 20:23:15] [Rank 0] step:8561/10000 train_time:585557ms step_avg:68.40ms +[2025-07-07 20:23:15] [Rank 0] step:8561/10000 train_time:585557ms step_avg:68.40ms +[2025-07-07 20:23:16] [Rank 0] step:8581/10000 train_time:586922ms step_avg:68.40ms +[2025-07-07 20:23:16] [Rank 0] step:8581/10000 train_time:586922ms step_avg:68.40ms +[2025-07-07 20:23:18] [Rank 0] step:8601/10000 train_time:588286ms step_avg:68.40ms +[2025-07-07 20:23:18] [Rank 0] step:8601/10000 train_time:588286ms step_avg:68.40ms +[2025-07-07 20:23:19] [Rank 0] step:8621/10000 train_time:589653ms step_avg:68.40ms +[2025-07-07 20:23:19] [Rank 0] step:8621/10000 train_time:589653ms step_avg:68.40ms +[2025-07-07 20:23:21] [Rank 0] step:8641/10000 train_time:591017ms step_avg:68.40ms +[2025-07-07 20:23:21] [Rank 0] step:8641/10000 train_time:591017ms step_avg:68.40ms +[2025-07-07 20:23:22] [Rank 0] step:8661/10000 train_time:592435ms step_avg:68.40ms +[2025-07-07 20:23:22] [Rank 0] step:8661/10000 train_time:592435ms step_avg:68.40ms +[2025-07-07 20:23:23] [Rank 0] step:8681/10000 train_time:593801ms step_avg:68.40ms +[2025-07-07 20:23:23] [Rank 0] step:8681/10000 train_time:593801ms step_avg:68.40ms +[2025-07-07 20:23:25] [Rank 0] step:8701/10000 train_time:595168ms step_avg:68.40ms +[2025-07-07 20:23:25] [Rank 0] step:8701/10000 train_time:595168ms step_avg:68.40ms +[2025-07-07 20:23:26] [Rank 0] step:8721/10000 train_time:596534ms step_avg:68.40ms +[2025-07-07 20:23:26] [Rank 0] step:8721/10000 train_time:596534ms step_avg:68.40ms +[2025-07-07 20:23:27] [Rank 0] step:8741/10000 train_time:597902ms step_avg:68.40ms +[2025-07-07 20:23:27] [Rank 0] step:8741/10000 train_time:597902ms step_avg:68.40ms +[2025-07-07 20:23:29] [Rank 0] step:8761/10000 train_time:599271ms step_avg:68.40ms +[2025-07-07 20:23:29] [Rank 0] step:8761/10000 train_time:599271ms step_avg:68.40ms +[2025-07-07 20:23:30] [Rank 0] step:8781/10000 train_time:600641ms step_avg:68.40ms +[2025-07-07 20:23:30] [Rank 0] step:8781/10000 train_time:600641ms step_avg:68.40ms +[2025-07-07 20:23:31] [Rank 0] step:8801/10000 train_time:602009ms step_avg:68.40ms +[2025-07-07 20:23:31] [Rank 0] step:8801/10000 train_time:602009ms step_avg:68.40ms +[2025-07-07 20:23:33] [Rank 0] step:8821/10000 train_time:603628ms step_avg:68.43ms +[2025-07-07 20:23:33] [Rank 0] step:8821/10000 train_time:603628ms step_avg:68.43ms +[2025-07-07 20:23:34] [Rank 0] step:8841/10000 train_time:604788ms step_avg:68.41ms +[2025-07-07 20:23:34] [Rank 0] step:8841/10000 train_time:604788ms step_avg:68.41ms +[2025-07-07 20:23:36] [Rank 0] step:8861/10000 train_time:606158ms step_avg:68.41ms +[2025-07-07 20:23:36] [Rank 0] step:8861/10000 train_time:606158ms step_avg:68.41ms +[2025-07-07 20:23:37] [Rank 0] step:8881/10000 train_time:607528ms step_avg:68.41ms +[2025-07-07 20:23:37] [Rank 0] step:8881/10000 train_time:607528ms step_avg:68.41ms +[2025-07-07 20:23:38] [Rank 0] step:8901/10000 train_time:608899ms step_avg:68.41ms +[2025-07-07 20:23:38] [Rank 0] step:8901/10000 train_time:608899ms step_avg:68.41ms +[2025-07-07 20:23:40] [Rank 0] step:8921/10000 train_time:610267ms step_avg:68.41ms +[2025-07-07 20:23:40] [Rank 0] step:8921/10000 train_time:610267ms step_avg:68.41ms +[2025-07-07 20:23:41] [Rank 0] step:8941/10000 train_time:611639ms step_avg:68.41ms +[2025-07-07 20:23:41] [Rank 0] step:8941/10000 train_time:611639ms step_avg:68.41ms +[2025-07-07 20:23:42] [Rank 0] step:8961/10000 train_time:613010ms step_avg:68.41ms +[2025-07-07 20:23:42] [Rank 0] step:8961/10000 train_time:613010ms step_avg:68.41ms +[2025-07-07 20:23:44] [Rank 0] step:8981/10000 train_time:614384ms step_avg:68.41ms +[2025-07-07 20:23:44] [Rank 0] step:8981/10000 train_time:614384ms step_avg:68.41ms +[2025-07-07 20:23:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:23:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:23:46] [Rank 0] PRINT: step:9000/10000 train_loss:1.0770 val_loss:1.1064 train_time:616381ms step_avg:68.49ms +[2025-07-07 20:23:46] [Rank 0] PRINT: step:9000/10000 train_loss:1.0770 val_loss:1.1064 train_time:616381ms step_avg:68.49ms +[2025-07-07 20:23:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:23:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:23:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:23:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:23:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:23:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:29:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:29:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:29:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:29:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:29:15] [Rank 0] Total Loss: 5.3493 +[2025-07-07 20:29:15] [Rank 0] Total Loss: 5.3493 +[2025-07-07 20:29:15] [Rank 0] Total FTA: 0.5090 +[2025-07-07 20:29:15] [Rank 0] Total FTA: 0.5090 +[2025-07-07 20:29:15] [Rank 0] Group 0 Loss: 5.6544 +[2025-07-07 20:29:15] [Rank 0] Group 0 Loss: 5.6544 +[2025-07-07 20:29:15] [Rank 0] Group 1 Loss: 4.9093 +[2025-07-07 20:29:15] [Rank 0] Group 1 Loss: 4.9093 +[2025-07-07 20:29:15] [Rank 0] Group 2 Loss: 5.0397 +[2025-07-07 20:29:15] [Rank 0] Group 2 Loss: 5.0397 +[2025-07-07 20:29:15] [Rank 0] Group 3 Loss: 5.2788 +[2025-07-07 20:29:15] [Rank 0] Group 3 Loss: 5.2788 +[2025-07-07 20:29:15] [Rank 0] Group 4 Loss: 5.3514 +[2025-07-07 20:29:15] [Rank 0] Group 4 Loss: 5.3514 +[2025-07-07 20:29:15] [Rank 0] Group 5 Loss: 5.3925 +[2025-07-07 20:29:15] [Rank 0] Group 5 Loss: 5.3925 +[2025-07-07 20:29:15] [Rank 0] Group 6 Loss: 5.3849 +[2025-07-07 20:29:15] [Rank 0] Group 6 Loss: 5.3849 +[2025-07-07 20:29:15] [Rank 0] Group 7 Loss: 5.4552 +[2025-07-07 20:29:15] [Rank 0] Group 7 Loss: 5.4552 +[2025-07-07 20:29:15] [Rank 0] Group 8 Loss: 5.3452 +[2025-07-07 20:29:15] [Rank 0] Group 8 Loss: 5.3452 +[2025-07-07 20:29:15] [Rank 0] Group 9 Loss: 5.3445 +[2025-07-07 20:29:15] [Rank 0] Group 9 Loss: 5.3445 +[2025-07-07 20:29:15] [Rank 0] Group 10 Loss: 5.3516 +[2025-07-07 20:29:15] [Rank 0] Group 10 Loss: 5.3516 +[2025-07-07 20:29:15] [Rank 0] Group 11 Loss: 5.3592 +[2025-07-07 20:29:15] [Rank 0] Group 11 Loss: 5.3592 +[2025-07-07 20:29:15] [Rank 0] Group 0 FTA: 0.5202 +[2025-07-07 20:29:15] [Rank 0] Group 0 FTA: 0.5202 +[2025-07-07 20:29:15] [Rank 0] Group 1 FTA: 0.3568 +[2025-07-07 20:29:15] [Rank 0] Group 1 FTA: 0.3568 +[2025-07-07 20:29:15] [Rank 0] Group 2 FTA: 0.6380 +[2025-07-07 20:29:15] [Rank 0] Group 2 FTA: 0.6380 +[2025-07-07 20:29:15] [Rank 0] Group 3 FTA: 0.3568 +[2025-07-07 20:29:15] [Rank 0] Group 3 FTA: 0.3568 +[2025-07-07 20:29:15] [Rank 0] Group 4 FTA: 0.4974 +[2025-07-07 20:29:15] [Rank 0] Group 4 FTA: 0.4974 +[2025-07-07 20:29:15] [Rank 0] Group 5 FTA: 0.5807 +[2025-07-07 20:29:15] [Rank 0] Group 5 FTA: 0.5807 +[2025-07-07 20:29:15] [Rank 0] Group 6 FTA: 0.5026 +[2025-07-07 20:29:15] [Rank 0] Group 6 FTA: 0.5026 +[2025-07-07 20:29:15] [Rank 0] Group 7 FTA: 0.5625 +[2025-07-07 20:29:15] [Rank 0] Group 7 FTA: 0.5625 +[2025-07-07 20:29:15] [Rank 0] Group 8 FTA: 0.5312 +[2025-07-07 20:29:15] [Rank 0] Group 8 FTA: 0.5312 +[2025-07-07 20:29:15] [Rank 0] Group 9 FTA: 0.5000 +[2025-07-07 20:29:15] [Rank 0] Group 9 FTA: 0.5000 +[2025-07-07 20:29:15] [Rank 0] Group 10 FTA: 0.5410 +[2025-07-07 20:29:15] [Rank 0] Group 10 FTA: 0.5410 +[2025-07-07 20:29:16] [Rank 0] Group 11 FTA: 0.5039 +[2025-07-07 20:29:16] [Rank 0] Group 11 FTA: 0.5039 +[2025-07-07 20:29:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 20:29:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 20:29:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 20:29:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 20:29:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 20:29:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 20:29:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 20:29:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 20:29:17] [Rank 0] step:9001/10000 train_time:616395ms step_avg:68.48ms +[2025-07-07 20:29:17] [Rank 0] step:9001/10000 train_time:616395ms step_avg:68.48ms +[2025-07-07 20:29:19] [Rank 0] step:9021/10000 train_time:617435ms step_avg:68.44ms +[2025-07-07 20:29:19] [Rank 0] step:9021/10000 train_time:617435ms step_avg:68.44ms +[2025-07-07 20:29:20] [Rank 0] step:9041/10000 train_time:618799ms step_avg:68.44ms +[2025-07-07 20:29:20] [Rank 0] step:9041/10000 train_time:618799ms step_avg:68.44ms +[2025-07-07 20:29:21] [Rank 0] step:9061/10000 train_time:620164ms step_avg:68.44ms +[2025-07-07 20:29:21] [Rank 0] step:9061/10000 train_time:620164ms step_avg:68.44ms +[2025-07-07 20:29:23] [Rank 0] step:9081/10000 train_time:621529ms step_avg:68.44ms +[2025-07-07 20:29:23] [Rank 0] step:9081/10000 train_time:621529ms step_avg:68.44ms +[2025-07-07 20:29:24] [Rank 0] step:9101/10000 train_time:622893ms step_avg:68.44ms +[2025-07-07 20:29:24] [Rank 0] step:9101/10000 train_time:622893ms step_avg:68.44ms +[2025-07-07 20:29:25] [Rank 0] step:9121/10000 train_time:624258ms step_avg:68.44ms +[2025-07-07 20:29:25] [Rank 0] step:9121/10000 train_time:624258ms step_avg:68.44ms +[2025-07-07 20:29:27] [Rank 0] step:9141/10000 train_time:625623ms step_avg:68.44ms +[2025-07-07 20:29:27] [Rank 0] step:9141/10000 train_time:625623ms step_avg:68.44ms +[2025-07-07 20:29:28] [Rank 0] step:9161/10000 train_time:626989ms step_avg:68.44ms +[2025-07-07 20:29:28] [Rank 0] step:9161/10000 train_time:626989ms step_avg:68.44ms +[2025-07-07 20:29:30] [Rank 0] step:9181/10000 train_time:628356ms step_avg:68.44ms +[2025-07-07 20:29:30] [Rank 0] step:9181/10000 train_time:628356ms step_avg:68.44ms +[2025-07-07 20:29:31] [Rank 0] step:9201/10000 train_time:629765ms step_avg:68.45ms +[2025-07-07 20:29:31] [Rank 0] step:9201/10000 train_time:629765ms step_avg:68.45ms +[2025-07-07 20:29:32] [Rank 0] step:9221/10000 train_time:631133ms step_avg:68.45ms +[2025-07-07 20:29:32] [Rank 0] step:9221/10000 train_time:631133ms step_avg:68.45ms +[2025-07-07 20:29:34] [Rank 0] step:9241/10000 train_time:632501ms step_avg:68.45ms +[2025-07-07 20:29:34] [Rank 0] step:9241/10000 train_time:632501ms step_avg:68.45ms +[2025-07-07 20:29:35] [Rank 0] step:9261/10000 train_time:633870ms step_avg:68.45ms +[2025-07-07 20:29:35] [Rank 0] step:9261/10000 train_time:633870ms step_avg:68.45ms +[2025-07-07 20:29:36] [Rank 0] step:9281/10000 train_time:635238ms step_avg:68.45ms +[2025-07-07 20:29:36] [Rank 0] step:9281/10000 train_time:635238ms step_avg:68.45ms +[2025-07-07 20:29:38] [Rank 0] step:9301/10000 train_time:636607ms step_avg:68.45ms +[2025-07-07 20:29:38] [Rank 0] step:9301/10000 train_time:636607ms step_avg:68.45ms +[2025-07-07 20:29:39] [Rank 0] step:9321/10000 train_time:637976ms step_avg:68.44ms +[2025-07-07 20:29:39] [Rank 0] step:9321/10000 train_time:637976ms step_avg:68.44ms +[2025-07-07 20:29:40] [Rank 0] step:9341/10000 train_time:639344ms step_avg:68.44ms +[2025-07-07 20:29:40] [Rank 0] step:9341/10000 train_time:639344ms step_avg:68.44ms +[2025-07-07 20:29:42] [Rank 0] step:9361/10000 train_time:640963ms step_avg:68.47ms +[2025-07-07 20:29:42] [Rank 0] step:9361/10000 train_time:640963ms step_avg:68.47ms +[2025-07-07 20:29:43] [Rank 0] step:9381/10000 train_time:642117ms step_avg:68.45ms +[2025-07-07 20:29:43] [Rank 0] step:9381/10000 train_time:642117ms step_avg:68.45ms +[2025-07-07 20:29:45] [Rank 0] step:9401/10000 train_time:643486ms step_avg:68.45ms +[2025-07-07 20:29:45] [Rank 0] step:9401/10000 train_time:643486ms step_avg:68.45ms +[2025-07-07 20:29:46] [Rank 0] step:9421/10000 train_time:644857ms step_avg:68.45ms +[2025-07-07 20:29:46] [Rank 0] step:9421/10000 train_time:644857ms step_avg:68.45ms +[2025-07-07 20:29:47] [Rank 0] step:9441/10000 train_time:646227ms step_avg:68.45ms +[2025-07-07 20:29:47] [Rank 0] step:9441/10000 train_time:646227ms step_avg:68.45ms +[2025-07-07 20:29:49] [Rank 0] step:9461/10000 train_time:647600ms step_avg:68.45ms +[2025-07-07 20:29:49] [Rank 0] step:9461/10000 train_time:647600ms step_avg:68.45ms +[2025-07-07 20:29:50] [Rank 0] step:9481/10000 train_time:648972ms step_avg:68.45ms +[2025-07-07 20:29:50] [Rank 0] step:9481/10000 train_time:648972ms step_avg:68.45ms +[2025-07-07 20:29:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:29:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:29:52] [Rank 0] PRINT: step:9500/10000 train_loss:1.0674 val_loss:1.0841 train_time:650968ms step_avg:68.52ms +[2025-07-07 20:29:52] [Rank 0] PRINT: step:9500/10000 train_loss:1.0674 val_loss:1.0841 train_time:650968ms step_avg:68.52ms +[2025-07-07 20:29:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:29:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:29:53] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:29:53] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:29:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:29:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:35:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:35:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:35:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:35:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:35:18] [Rank 0] Total Loss: 5.2531 +[2025-07-07 20:35:18] [Rank 0] Total Loss: 5.2531 +[2025-07-07 20:35:18] [Rank 0] Total FTA: 0.5432 +[2025-07-07 20:35:18] [Rank 0] Total FTA: 0.5432 +[2025-07-07 20:35:18] [Rank 0] Group 0 Loss: 5.5853 +[2025-07-07 20:35:18] [Rank 0] Group 0 Loss: 5.5853 +[2025-07-07 20:35:18] [Rank 0] Group 1 Loss: 4.7186 +[2025-07-07 20:35:18] [Rank 0] Group 1 Loss: 4.7186 +[2025-07-07 20:35:18] [Rank 0] Group 2 Loss: 5.0536 +[2025-07-07 20:35:18] [Rank 0] Group 2 Loss: 5.0536 +[2025-07-07 20:35:18] [Rank 0] Group 3 Loss: 5.3486 +[2025-07-07 20:35:18] [Rank 0] Group 3 Loss: 5.3486 +[2025-07-07 20:35:18] [Rank 0] Group 4 Loss: 5.2097 +[2025-07-07 20:35:18] [Rank 0] Group 4 Loss: 5.2097 +[2025-07-07 20:35:18] [Rank 0] Group 5 Loss: 5.2846 +[2025-07-07 20:35:18] [Rank 0] Group 5 Loss: 5.2846 +[2025-07-07 20:35:18] [Rank 0] Group 6 Loss: 5.1742 +[2025-07-07 20:35:18] [Rank 0] Group 6 Loss: 5.1742 +[2025-07-07 20:35:18] [Rank 0] Group 7 Loss: 5.3077 +[2025-07-07 20:35:18] [Rank 0] Group 7 Loss: 5.3077 +[2025-07-07 20:35:18] [Rank 0] Group 8 Loss: 5.1863 +[2025-07-07 20:35:18] [Rank 0] Group 8 Loss: 5.1863 +[2025-07-07 20:35:18] [Rank 0] Group 9 Loss: 5.3140 +[2025-07-07 20:35:18] [Rank 0] Group 9 Loss: 5.3140 +[2025-07-07 20:35:18] [Rank 0] Group 10 Loss: 5.2996 +[2025-07-07 20:35:18] [Rank 0] Group 10 Loss: 5.2996 +[2025-07-07 20:35:19] [Rank 0] Group 11 Loss: 5.2434 +[2025-07-07 20:35:19] [Rank 0] Group 11 Loss: 5.2434 +[2025-07-07 20:35:19] [Rank 0] Group 0 FTA: 0.4993 +[2025-07-07 20:35:19] [Rank 0] Group 0 FTA: 0.4993 +[2025-07-07 20:35:19] [Rank 0] Group 1 FTA: 0.5208 +[2025-07-07 20:35:19] [Rank 0] Group 1 FTA: 0.5208 +[2025-07-07 20:35:19] [Rank 0] Group 2 FTA: 0.5234 +[2025-07-07 20:35:19] [Rank 0] Group 2 FTA: 0.5234 +[2025-07-07 20:35:19] [Rank 0] Group 3 FTA: 0.4219 +[2025-07-07 20:35:19] [Rank 0] Group 3 FTA: 0.4219 +[2025-07-07 20:35:19] [Rank 0] Group 4 FTA: 0.5677 +[2025-07-07 20:35:19] [Rank 0] Group 4 FTA: 0.5677 +[2025-07-07 20:35:19] [Rank 0] Group 5 FTA: 0.6302 +[2025-07-07 20:35:19] [Rank 0] Group 5 FTA: 0.6302 +[2025-07-07 20:35:19] [Rank 0] Group 6 FTA: 0.5495 +[2025-07-07 20:35:19] [Rank 0] Group 6 FTA: 0.5495 +[2025-07-07 20:35:19] [Rank 0] Group 7 FTA: 0.5729 +[2025-07-07 20:35:19] [Rank 0] Group 7 FTA: 0.5729 +[2025-07-07 20:35:19] [Rank 0] Group 8 FTA: 0.5469 +[2025-07-07 20:35:19] [Rank 0] Group 8 FTA: 0.5469 +[2025-07-07 20:35:19] [Rank 0] Group 9 FTA: 0.5156 +[2025-07-07 20:35:19] [Rank 0] Group 9 FTA: 0.5156 +[2025-07-07 20:35:19] [Rank 0] Group 10 FTA: 0.5859 +[2025-07-07 20:35:19] [Rank 0] Group 10 FTA: 0.5859 +[2025-07-07 20:35:19] [Rank 0] Group 11 FTA: 0.5664 +[2025-07-07 20:35:19] [Rank 0] Group 11 FTA: 0.5664 +[2025-07-07 20:35:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 20:35:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 20:35:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 20:35:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 20:35:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 20:35:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 20:35:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 20:35:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 20:35:20] [Rank 0] step:9501/10000 train_time:650977ms step_avg:68.52ms +[2025-07-07 20:35:20] [Rank 0] step:9501/10000 train_time:650977ms step_avg:68.52ms +[2025-07-07 20:35:21] [Rank 0] step:9521/10000 train_time:651746ms step_avg:68.45ms +[2025-07-07 20:35:21] [Rank 0] step:9521/10000 train_time:651746ms step_avg:68.45ms +[2025-07-07 20:35:23] [Rank 0] step:9541/10000 train_time:653774ms step_avg:68.52ms +[2025-07-07 20:35:23] [Rank 0] step:9541/10000 train_time:653774ms step_avg:68.52ms +[2025-07-07 20:35:24] [Rank 0] step:9561/10000 train_time:654509ms step_avg:68.46ms +[2025-07-07 20:35:24] [Rank 0] step:9561/10000 train_time:654509ms step_avg:68.46ms +[2025-07-07 20:35:26] [Rank 0] step:9581/10000 train_time:655874ms step_avg:68.46ms +[2025-07-07 20:35:26] [Rank 0] step:9581/10000 train_time:655874ms step_avg:68.46ms +[2025-07-07 20:35:27] [Rank 0] step:9601/10000 train_time:657239ms step_avg:68.46ms +[2025-07-07 20:35:27] [Rank 0] step:9601/10000 train_time:657239ms step_avg:68.46ms +[2025-07-07 20:35:28] [Rank 0] step:9621/10000 train_time:658604ms step_avg:68.45ms +[2025-07-07 20:35:28] [Rank 0] step:9621/10000 train_time:658604ms step_avg:68.45ms +[2025-07-07 20:35:30] [Rank 0] step:9641/10000 train_time:659970ms step_avg:68.45ms +[2025-07-07 20:35:30] [Rank 0] step:9641/10000 train_time:659970ms step_avg:68.45ms +[2025-07-07 20:35:31] [Rank 0] step:9661/10000 train_time:661336ms step_avg:68.45ms +[2025-07-07 20:35:31] [Rank 0] step:9661/10000 train_time:661336ms step_avg:68.45ms +[2025-07-07 20:35:32] [Rank 0] step:9681/10000 train_time:662702ms step_avg:68.45ms +[2025-07-07 20:35:32] [Rank 0] step:9681/10000 train_time:662702ms step_avg:68.45ms +[2025-07-07 20:35:34] [Rank 0] step:9701/10000 train_time:664069ms step_avg:68.45ms +[2025-07-07 20:35:34] [Rank 0] step:9701/10000 train_time:664069ms step_avg:68.45ms +[2025-07-07 20:35:35] [Rank 0] step:9721/10000 train_time:665685ms step_avg:68.48ms +[2025-07-07 20:35:35] [Rank 0] step:9721/10000 train_time:665685ms step_avg:68.48ms +[2025-07-07 20:35:36] [Rank 0] step:9741/10000 train_time:666831ms step_avg:68.46ms +[2025-07-07 20:35:36] [Rank 0] step:9741/10000 train_time:666831ms step_avg:68.46ms +[2025-07-07 20:35:38] [Rank 0] step:9761/10000 train_time:668200ms step_avg:68.46ms +[2025-07-07 20:35:38] [Rank 0] step:9761/10000 train_time:668200ms step_avg:68.46ms +[2025-07-07 20:35:39] [Rank 0] step:9781/10000 train_time:669569ms step_avg:68.46ms +[2025-07-07 20:35:39] [Rank 0] step:9781/10000 train_time:669569ms step_avg:68.46ms +[2025-07-07 20:35:41] [Rank 0] step:9801/10000 train_time:670937ms step_avg:68.46ms +[2025-07-07 20:35:41] [Rank 0] step:9801/10000 train_time:670937ms step_avg:68.46ms +[2025-07-07 20:35:42] [Rank 0] step:9821/10000 train_time:672308ms step_avg:68.46ms +[2025-07-07 20:35:42] [Rank 0] step:9821/10000 train_time:672308ms step_avg:68.46ms +[2025-07-07 20:35:43] [Rank 0] step:9841/10000 train_time:673676ms step_avg:68.46ms +[2025-07-07 20:35:43] [Rank 0] step:9841/10000 train_time:673676ms step_avg:68.46ms +[2025-07-07 20:35:45] [Rank 0] step:9861/10000 train_time:675046ms step_avg:68.46ms +[2025-07-07 20:35:45] [Rank 0] step:9861/10000 train_time:675046ms step_avg:68.46ms +[2025-07-07 20:35:46] [Rank 0] step:9881/10000 train_time:676415ms step_avg:68.46ms +[2025-07-07 20:35:46] [Rank 0] step:9881/10000 train_time:676415ms step_avg:68.46ms +[2025-07-07 20:35:47] [Rank 0] step:9901/10000 train_time:678039ms step_avg:68.48ms +[2025-07-07 20:35:47] [Rank 0] step:9901/10000 train_time:678039ms step_avg:68.48ms +[2025-07-07 20:35:49] [Rank 0] step:9921/10000 train_time:679185ms step_avg:68.46ms +[2025-07-07 20:35:49] [Rank 0] step:9921/10000 train_time:679185ms step_avg:68.46ms +[2025-07-07 20:35:50] [Rank 0] step:9941/10000 train_time:680555ms step_avg:68.46ms +[2025-07-07 20:35:50] [Rank 0] step:9941/10000 train_time:680555ms step_avg:68.46ms +[2025-07-07 20:35:52] [Rank 0] step:9961/10000 train_time:681926ms step_avg:68.46ms +[2025-07-07 20:35:52] [Rank 0] step:9961/10000 train_time:681926ms step_avg:68.46ms +[2025-07-07 20:35:53] [Rank 0] step:9981/10000 train_time:683297ms step_avg:68.46ms +[2025-07-07 20:35:53] [Rank 0] step:9981/10000 train_time:683297ms step_avg:68.46ms +[2025-07-07 20:35:54] [Rank 0] step:10000/10000 train_time:684601ms step_avg:68.46ms +[2025-07-07 20:35:54] [Rank 0] step:10000/10000 train_time:684601ms step_avg:68.46ms +[2025-07-07 20:35:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:35:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:35:55] [Rank 0] PRINT: step:10000/10000 train_loss:1.0576 val_loss:1.0869 train_time:685300ms step_avg:68.53ms +[2025-07-07 20:35:55] [Rank 0] PRINT: step:10000/10000 train_loss:1.0576 val_loss:1.0869 train_time:685300ms step_avg:68.53ms +[2025-07-07 20:35:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:35:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:35:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:35:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:35:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:35:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:41:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:41:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:41:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:41:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:41:20] [Rank 0] Total Loss: 5.3840 +[2025-07-07 20:41:20] [Rank 0] Total Loss: 5.3840 +[2025-07-07 20:41:20] [Rank 0] Total FTA: 0.5793 +[2025-07-07 20:41:20] [Rank 0] Total FTA: 0.5793 +[2025-07-07 20:41:20] [Rank 0] Group 0 Loss: 5.4438 +[2025-07-07 20:41:20] [Rank 0] Group 0 Loss: 5.4438 +[2025-07-07 20:41:20] [Rank 0] Group 1 Loss: 4.9482 +[2025-07-07 20:41:20] [Rank 0] Group 1 Loss: 4.9482 +[2025-07-07 20:41:20] [Rank 0] Group 2 Loss: 5.1506 +[2025-07-07 20:41:20] [Rank 0] Group 2 Loss: 5.1506 +[2025-07-07 20:41:20] [Rank 0] Group 3 Loss: 5.3450 +[2025-07-07 20:41:20] [Rank 0] Group 3 Loss: 5.3450 +[2025-07-07 20:41:20] [Rank 0] Group 4 Loss: 5.3408 +[2025-07-07 20:41:20] [Rank 0] Group 4 Loss: 5.3408 +[2025-07-07 20:41:20] [Rank 0] Group 5 Loss: 5.3791 +[2025-07-07 20:41:20] [Rank 0] Group 5 Loss: 5.3791 +[2025-07-07 20:41:20] [Rank 0] Group 6 Loss: 5.4094 +[2025-07-07 20:41:20] [Rank 0] Group 6 Loss: 5.4094 +[2025-07-07 20:41:20] [Rank 0] Group 7 Loss: 5.5691 +[2025-07-07 20:41:20] [Rank 0] Group 7 Loss: 5.5691 +[2025-07-07 20:41:20] [Rank 0] Group 8 Loss: 5.3661 +[2025-07-07 20:41:20] [Rank 0] Group 8 Loss: 5.3661 +[2025-07-07 20:41:20] [Rank 0] Group 9 Loss: 5.4247 +[2025-07-07 20:41:20] [Rank 0] Group 9 Loss: 5.4247 +[2025-07-07 20:41:20] [Rank 0] Group 10 Loss: 5.4954 +[2025-07-07 20:41:20] [Rank 0] Group 10 Loss: 5.4954 +[2025-07-07 20:41:20] [Rank 0] Group 11 Loss: 5.4845 +[2025-07-07 20:41:20] [Rank 0] Group 11 Loss: 5.4845 +[2025-07-07 20:41:20] [Rank 0] Group 0 FTA: 0.6580 +[2025-07-07 20:41:20] [Rank 0] Group 0 FTA: 0.6580 +[2025-07-07 20:41:20] [Rank 0] Group 1 FTA: 0.3464 +[2025-07-07 20:41:20] [Rank 0] Group 1 FTA: 0.3464 +[2025-07-07 20:41:20] [Rank 0] Group 2 FTA: 0.5651 +[2025-07-07 20:41:20] [Rank 0] Group 2 FTA: 0.5651 +[2025-07-07 20:41:20] [Rank 0] Group 3 FTA: 0.5911 +[2025-07-07 20:41:20] [Rank 0] Group 3 FTA: 0.5911 +[2025-07-07 20:41:20] [Rank 0] Group 4 FTA: 0.5391 +[2025-07-07 20:41:20] [Rank 0] Group 4 FTA: 0.5391 +[2025-07-07 20:41:20] [Rank 0] Group 5 FTA: 0.7135 +[2025-07-07 20:41:20] [Rank 0] Group 5 FTA: 0.7135 +[2025-07-07 20:41:20] [Rank 0] Group 6 FTA: 0.5573 +[2025-07-07 20:41:20] [Rank 0] Group 6 FTA: 0.5573 +[2025-07-07 20:41:20] [Rank 0] Group 7 FTA: 0.6172 +[2025-07-07 20:41:20] [Rank 0] Group 7 FTA: 0.6172 +[2025-07-07 20:41:20] [Rank 0] Group 8 FTA: 0.5781 +[2025-07-07 20:41:20] [Rank 0] Group 8 FTA: 0.5781 +[2025-07-07 20:41:20] [Rank 0] Group 9 FTA: 0.5156 +[2025-07-07 20:41:20] [Rank 0] Group 9 FTA: 0.5156 +[2025-07-07 20:41:20] [Rank 0] Group 10 FTA: 0.5664 +[2025-07-07 20:41:20] [Rank 0] Group 10 FTA: 0.5664 +[2025-07-07 20:41:20] [Rank 0] Group 11 FTA: 0.5898 +[2025-07-07 20:41:20] [Rank 0] Group 11 FTA: 0.5898 +[2025-07-07 20:41:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 20:41:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_loss_curves.png +[2025-07-07 20:41:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 20:41:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/per_class_acc_curves.png +[2025-07-07 20:41:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 20:41:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_loss_curve.png +[2025-07-07 20:41:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 20:41:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_45/total_acc_curve.png +[2025-07-07 20:41:21] [Rank 0] step:10001/10000 train_time:685309ms step_avg:68.52ms +[2025-07-07 20:41:21] [Rank 0] step:10001/10000 train_time:685309ms step_avg:68.52ms +[2025-07-07 20:41:21] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 20:41:21 2025 --- +[2025-07-07 20:41:21] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 20:41:21 2025 --- +[2025-07-07 20:41:21] [Rank 0] PRINT: Peak memory allocated: 8720 MiB reserved: 10316 MiB +[2025-07-07 20:41:21] [Rank 0] PRINT: Peak memory allocated: 8720 MiB reserved: 10316 MiB diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/config.json b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..38a077c166ae01d8f9eb952442be3d8dce95686b --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.0005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "c8ac566d-d5fa-4018-b699-fe3a866fecd6", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..675422d4da68095522e3b904905a5dd611492469 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f6941cc6d25238564d22a264b90bbca9e5b2f145b98e265dd71d6ddd8743c8a +size 407043 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..c7578af6a1e64ea5a4af353cc6461c80a6798ecd --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79786116bd682e77da3a3abbe7c5a55e995b980135762285ae4f75b5d4e15116 +size 394948 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..6654eda96dc42141c7a23398464bffd70894875a --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46d77f929a8e55d13d183e5ea522825f0f2446bb80c443f92aeba74967805ff9 +size 104917 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..4d32131cc5f98a6c6d443379b8301d1694743320 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32fefd444b02171e8c155ab744dc355e23821a6d25872e6e36a52c45d873a0e3 +size 129374 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/training_log_c8ac566d-d5fa-4018-b699-fe3a866fecd6.txt b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/training_log_c8ac566d-d5fa-4018-b699-fe3a866fecd6.txt new file mode 100644 index 0000000000000000000000000000000000000000..16c39d7b05d4aec4383a508a50ac7bcfa5b7b044 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/training_log_c8ac566d-d5fa-4018-b699-fe3a866fecd6.txt @@ -0,0 +1,5144 @@ +[2025-07-07 18:39:01] [Rank 0] PRINT: --- Script Start: Mon Jul 7 18:39:01 2025 --- +[2025-07-07 18:39:01] [Rank 0] PRINT: --- Script Start: Mon Jul 7 18:39:01 2025 --- +[2025-07-07 18:39:01] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-07 18:39:01] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.0005) +[2025-07-07 18:39:01] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 18:39:01] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 18:39:01] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-07 18:39:01] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-07 18:39:01] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48 +[2025-07-07 18:39:01] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48 +[2025-07-07 18:39:01] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 18:39:01] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 18:39:02] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 18:39:02] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 18:39:02] [Rank 0] PRINT: Constructing model... +[2025-07-07 18:39:02] [Rank 0] PRINT: Constructing model... +[2025-07-07 18:39:04] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 18:39:04] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 18:39:04] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 18:39:04] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 18:39:04] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 18:39:04] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 18:39:05] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 18:39:05] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 18:39:05] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 18:39:05] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 18:39:05] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 18:39:05] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 18:39:05] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 18:39:05] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 18:39:05] [Rank 0] PRINT: Model returns: +[2025-07-07 18:39:05] [Rank 0] PRINT: Model returns: +[2025-07-07 18:39:05] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 18:39:05] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 18:39:05] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 18:39:05] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 18:39:05] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-07 18:39:05] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.0005). +[2025-07-07 18:39:05] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 18:39:05] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 18:39:05] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 18:39:05] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 18:39:05] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 18:39:05] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 18:39:05] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 18:39:05] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 18:39:05] [Rank 0] PRINT: Starting warmup... +[2025-07-07 18:39:05] [Rank 0] PRINT: Starting warmup... +[2025-07-07 18:40:25] [Rank 0] PRINT: Warmup complete. +[2025-07-07 18:40:25] [Rank 0] PRINT: Warmup complete. +[2025-07-07 18:40:25] [Rank 0] PRINT: Starting training... +[2025-07-07 18:40:25] [Rank 0] PRINT: Starting training... +[2025-07-07 18:40:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:40:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:40:33] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 18:40:33] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 18:40:34] [Rank 0] step:21/10000 train_time:817ms step_avg:38.92ms +[2025-07-07 18:40:34] [Rank 0] step:21/10000 train_time:817ms step_avg:38.92ms +[2025-07-07 18:40:35] [Rank 0] step:41/10000 train_time:2142ms step_avg:52.24ms +[2025-07-07 18:40:35] [Rank 0] step:41/10000 train_time:2142ms step_avg:52.24ms +[2025-07-07 18:40:37] [Rank 0] step:61/10000 train_time:3467ms step_avg:56.83ms +[2025-07-07 18:40:37] [Rank 0] step:61/10000 train_time:3467ms step_avg:56.83ms +[2025-07-07 18:40:38] [Rank 0] step:81/10000 train_time:4791ms step_avg:59.15ms +[2025-07-07 18:40:38] [Rank 0] step:81/10000 train_time:4791ms step_avg:59.15ms +[2025-07-07 18:40:39] [Rank 0] step:101/10000 train_time:6115ms step_avg:60.54ms +[2025-07-07 18:40:39] [Rank 0] step:101/10000 train_time:6115ms step_avg:60.54ms +[2025-07-07 18:40:41] [Rank 0] step:121/10000 train_time:7440ms step_avg:61.49ms +[2025-07-07 18:40:41] [Rank 0] step:121/10000 train_time:7440ms step_avg:61.49ms +[2025-07-07 18:40:42] [Rank 0] step:141/10000 train_time:8765ms step_avg:62.16ms +[2025-07-07 18:40:42] [Rank 0] step:141/10000 train_time:8765ms step_avg:62.16ms +[2025-07-07 18:40:43] [Rank 0] step:161/10000 train_time:10092ms step_avg:62.68ms +[2025-07-07 18:40:43] [Rank 0] step:161/10000 train_time:10092ms step_avg:62.68ms +[2025-07-07 18:40:45] [Rank 0] step:181/10000 train_time:11419ms step_avg:63.09ms +[2025-07-07 18:40:45] [Rank 0] step:181/10000 train_time:11419ms step_avg:63.09ms +[2025-07-07 18:40:46] [Rank 0] step:201/10000 train_time:12745ms step_avg:63.41ms +[2025-07-07 18:40:46] [Rank 0] step:201/10000 train_time:12745ms step_avg:63.41ms +[2025-07-07 18:40:47] [Rank 0] step:221/10000 train_time:14073ms step_avg:63.68ms +[2025-07-07 18:40:47] [Rank 0] step:221/10000 train_time:14073ms step_avg:63.68ms +[2025-07-07 18:40:49] [Rank 0] step:241/10000 train_time:15401ms step_avg:63.90ms +[2025-07-07 18:40:49] [Rank 0] step:241/10000 train_time:15401ms step_avg:63.90ms +[2025-07-07 18:40:50] [Rank 0] step:261/10000 train_time:16730ms step_avg:64.10ms +[2025-07-07 18:40:50] [Rank 0] step:261/10000 train_time:16730ms step_avg:64.10ms +[2025-07-07 18:40:51] [Rank 0] step:281/10000 train_time:18059ms step_avg:64.27ms +[2025-07-07 18:40:51] [Rank 0] step:281/10000 train_time:18059ms step_avg:64.27ms +[2025-07-07 18:40:53] [Rank 0] step:301/10000 train_time:19389ms step_avg:64.41ms +[2025-07-07 18:40:53] [Rank 0] step:301/10000 train_time:19389ms step_avg:64.41ms +[2025-07-07 18:40:54] [Rank 0] step:321/10000 train_time:20721ms step_avg:64.55ms +[2025-07-07 18:40:54] [Rank 0] step:321/10000 train_time:20721ms step_avg:64.55ms +[2025-07-07 18:40:55] [Rank 0] step:341/10000 train_time:22055ms step_avg:64.68ms +[2025-07-07 18:40:55] [Rank 0] step:341/10000 train_time:22055ms step_avg:64.68ms +[2025-07-07 18:40:57] [Rank 0] step:361/10000 train_time:24058ms step_avg:66.64ms +[2025-07-07 18:40:57] [Rank 0] step:361/10000 train_time:24058ms step_avg:66.64ms +[2025-07-07 18:40:58] [Rank 0] step:381/10000 train_time:24778ms step_avg:65.03ms +[2025-07-07 18:40:58] [Rank 0] step:381/10000 train_time:24778ms step_avg:65.03ms +[2025-07-07 18:40:59] [Rank 0] step:401/10000 train_time:26116ms step_avg:65.13ms +[2025-07-07 18:40:59] [Rank 0] step:401/10000 train_time:26116ms step_avg:65.13ms +[2025-07-07 18:41:01] [Rank 0] step:421/10000 train_time:27453ms step_avg:65.21ms +[2025-07-07 18:41:01] [Rank 0] step:421/10000 train_time:27453ms step_avg:65.21ms +[2025-07-07 18:41:02] [Rank 0] step:441/10000 train_time:28790ms step_avg:65.28ms +[2025-07-07 18:41:02] [Rank 0] step:441/10000 train_time:28790ms step_avg:65.28ms +[2025-07-07 18:41:03] [Rank 0] step:461/10000 train_time:30129ms step_avg:65.36ms +[2025-07-07 18:41:03] [Rank 0] step:461/10000 train_time:30129ms step_avg:65.36ms +[2025-07-07 18:41:05] [Rank 0] step:481/10000 train_time:31467ms step_avg:65.42ms +[2025-07-07 18:41:05] [Rank 0] step:481/10000 train_time:31467ms step_avg:65.42ms +[2025-07-07 18:41:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:41:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:41:07] [Rank 0] PRINT: step:500/10000 train_loss:7.0490 val_loss:4.9085 train_time:33414ms step_avg:66.83ms +[2025-07-07 18:41:07] [Rank 0] PRINT: step:500/10000 train_loss:7.0490 val_loss:4.9085 train_time:33414ms step_avg:66.83ms +[2025-07-07 18:41:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:41:07] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:41:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:41:07] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:41:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:41:07] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:46:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:46:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:46:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:46:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:46:23] [Rank 0] Total Loss: 6.0152 +[2025-07-07 18:46:23] [Rank 0] Total Loss: 6.0152 +[2025-07-07 18:46:23] [Rank 0] Total FTA: 0.0581 +[2025-07-07 18:46:23] [Rank 0] Total FTA: 0.0581 +[2025-07-07 18:46:23] [Rank 0] Group 0 Loss: 5.9772 +[2025-07-07 18:46:23] [Rank 0] Group 0 Loss: 5.9772 +[2025-07-07 18:46:23] [Rank 0] Group 1 Loss: 6.0286 +[2025-07-07 18:46:23] [Rank 0] Group 1 Loss: 6.0286 +[2025-07-07 18:46:23] [Rank 0] Group 2 Loss: 6.1664 +[2025-07-07 18:46:23] [Rank 0] Group 2 Loss: 6.1664 +[2025-07-07 18:46:23] [Rank 0] Group 3 Loss: 5.9723 +[2025-07-07 18:46:23] [Rank 0] Group 3 Loss: 5.9723 +[2025-07-07 18:46:23] [Rank 0] Group 4 Loss: 6.0426 +[2025-07-07 18:46:23] [Rank 0] Group 4 Loss: 6.0426 +[2025-07-07 18:46:23] [Rank 0] Group 5 Loss: 6.0054 +[2025-07-07 18:46:23] [Rank 0] Group 5 Loss: 6.0054 +[2025-07-07 18:46:23] [Rank 0] Group 6 Loss: 6.0373 +[2025-07-07 18:46:23] [Rank 0] Group 6 Loss: 6.0373 +[2025-07-07 18:46:23] [Rank 0] Group 7 Loss: 6.0015 +[2025-07-07 18:46:23] [Rank 0] Group 7 Loss: 6.0015 +[2025-07-07 18:46:23] [Rank 0] Group 8 Loss: 5.9926 +[2025-07-07 18:46:23] [Rank 0] Group 8 Loss: 5.9926 +[2025-07-07 18:46:23] [Rank 0] Group 9 Loss: 6.0483 +[2025-07-07 18:46:23] [Rank 0] Group 9 Loss: 6.0483 +[2025-07-07 18:46:23] [Rank 0] Group 10 Loss: 5.9864 +[2025-07-07 18:46:23] [Rank 0] Group 10 Loss: 5.9864 +[2025-07-07 18:46:23] [Rank 0] Group 11 Loss: 6.0031 +[2025-07-07 18:46:23] [Rank 0] Group 11 Loss: 6.0031 +[2025-07-07 18:46:23] [Rank 0] Group 0 FTA: 0.1404 +[2025-07-07 18:46:23] [Rank 0] Group 0 FTA: 0.1404 +[2025-07-07 18:46:23] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 18:46:23] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 18:46:23] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 18:46:23] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 18:46:23] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-07 18:46:23] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-07 18:46:23] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 18:46:23] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 18:46:23] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-07 18:46:23] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-07 18:46:23] [Rank 0] Group 6 FTA: 0.0547 +[2025-07-07 18:46:23] [Rank 0] Group 6 FTA: 0.0547 +[2025-07-07 18:46:23] [Rank 0] Group 7 FTA: 0.0599 +[2025-07-07 18:46:23] [Rank 0] Group 7 FTA: 0.0599 +[2025-07-07 18:46:23] [Rank 0] Group 8 FTA: 0.0833 +[2025-07-07 18:46:23] [Rank 0] Group 8 FTA: 0.0833 +[2025-07-07 18:46:23] [Rank 0] Group 9 FTA: 0.0664 +[2025-07-07 18:46:23] [Rank 0] Group 9 FTA: 0.0664 +[2025-07-07 18:46:23] [Rank 0] Group 10 FTA: 0.0547 +[2025-07-07 18:46:23] [Rank 0] Group 10 FTA: 0.0547 +[2025-07-07 18:46:23] [Rank 0] Group 11 FTA: 0.0537 +[2025-07-07 18:46:23] [Rank 0] Group 11 FTA: 0.0537 +[2025-07-07 18:46:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 18:46:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 18:46:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 18:46:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 18:46:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 18:46:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 18:46:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 18:46:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 18:46:24] [Rank 0] step:501/10000 train_time:33423ms step_avg:66.71ms +[2025-07-07 18:46:24] [Rank 0] step:501/10000 train_time:33423ms step_avg:66.71ms +[2025-07-07 18:46:25] [Rank 0] step:521/10000 train_time:34179ms step_avg:65.60ms +[2025-07-07 18:46:25] [Rank 0] step:521/10000 train_time:34179ms step_avg:65.60ms +[2025-07-07 18:46:27] [Rank 0] step:541/10000 train_time:35512ms step_avg:65.64ms +[2025-07-07 18:46:27] [Rank 0] step:541/10000 train_time:35512ms step_avg:65.64ms +[2025-07-07 18:46:28] [Rank 0] step:561/10000 train_time:36920ms step_avg:65.81ms +[2025-07-07 18:46:28] [Rank 0] step:561/10000 train_time:36920ms step_avg:65.81ms +[2025-07-07 18:46:30] [Rank 0] step:581/10000 train_time:38253ms step_avg:65.84ms +[2025-07-07 18:46:30] [Rank 0] step:581/10000 train_time:38253ms step_avg:65.84ms +[2025-07-07 18:46:31] [Rank 0] step:601/10000 train_time:39587ms step_avg:65.87ms +[2025-07-07 18:46:31] [Rank 0] step:601/10000 train_time:39587ms step_avg:65.87ms +[2025-07-07 18:46:32] [Rank 0] step:621/10000 train_time:40922ms step_avg:65.90ms +[2025-07-07 18:46:32] [Rank 0] step:621/10000 train_time:40922ms step_avg:65.90ms +[2025-07-07 18:46:34] [Rank 0] step:641/10000 train_time:42257ms step_avg:65.92ms +[2025-07-07 18:46:34] [Rank 0] step:641/10000 train_time:42257ms step_avg:65.92ms +[2025-07-07 18:46:35] [Rank 0] step:661/10000 train_time:43592ms step_avg:65.95ms +[2025-07-07 18:46:35] [Rank 0] step:661/10000 train_time:43592ms step_avg:65.95ms +[2025-07-07 18:46:36] [Rank 0] step:681/10000 train_time:44929ms step_avg:65.98ms +[2025-07-07 18:46:36] [Rank 0] step:681/10000 train_time:44929ms step_avg:65.98ms +[2025-07-07 18:46:38] [Rank 0] step:701/10000 train_time:46265ms step_avg:66.00ms +[2025-07-07 18:46:38] [Rank 0] step:701/10000 train_time:46265ms step_avg:66.00ms +[2025-07-07 18:46:39] [Rank 0] step:721/10000 train_time:47602ms step_avg:66.02ms +[2025-07-07 18:46:39] [Rank 0] step:721/10000 train_time:47602ms step_avg:66.02ms +[2025-07-07 18:46:40] [Rank 0] step:741/10000 train_time:48987ms step_avg:66.11ms +[2025-07-07 18:46:40] [Rank 0] step:741/10000 train_time:48987ms step_avg:66.11ms +[2025-07-07 18:46:42] [Rank 0] step:761/10000 train_time:50329ms step_avg:66.14ms +[2025-07-07 18:46:42] [Rank 0] step:761/10000 train_time:50329ms step_avg:66.14ms +[2025-07-07 18:46:43] [Rank 0] step:781/10000 train_time:51676ms step_avg:66.17ms +[2025-07-07 18:46:43] [Rank 0] step:781/10000 train_time:51676ms step_avg:66.17ms +[2025-07-07 18:46:44] [Rank 0] step:801/10000 train_time:53024ms step_avg:66.20ms +[2025-07-07 18:46:44] [Rank 0] step:801/10000 train_time:53024ms step_avg:66.20ms +[2025-07-07 18:46:46] [Rank 0] step:821/10000 train_time:54370ms step_avg:66.22ms +[2025-07-07 18:46:46] [Rank 0] step:821/10000 train_time:54370ms step_avg:66.22ms +[2025-07-07 18:46:47] [Rank 0] step:841/10000 train_time:55716ms step_avg:66.25ms +[2025-07-07 18:46:47] [Rank 0] step:841/10000 train_time:55716ms step_avg:66.25ms +[2025-07-07 18:46:48] [Rank 0] step:861/10000 train_time:57064ms step_avg:66.28ms +[2025-07-07 18:46:48] [Rank 0] step:861/10000 train_time:57064ms step_avg:66.28ms +[2025-07-07 18:46:50] [Rank 0] step:881/10000 train_time:58410ms step_avg:66.30ms +[2025-07-07 18:46:50] [Rank 0] step:881/10000 train_time:58410ms step_avg:66.30ms +[2025-07-07 18:46:51] [Rank 0] step:901/10000 train_time:59756ms step_avg:66.32ms +[2025-07-07 18:46:51] [Rank 0] step:901/10000 train_time:59756ms step_avg:66.32ms +[2025-07-07 18:46:52] [Rank 0] step:921/10000 train_time:61151ms step_avg:66.40ms +[2025-07-07 18:46:52] [Rank 0] step:921/10000 train_time:61151ms step_avg:66.40ms +[2025-07-07 18:46:54] [Rank 0] step:941/10000 train_time:62498ms step_avg:66.42ms +[2025-07-07 18:46:54] [Rank 0] step:941/10000 train_time:62498ms step_avg:66.42ms +[2025-07-07 18:46:55] [Rank 0] step:961/10000 train_time:63845ms step_avg:66.44ms +[2025-07-07 18:46:55] [Rank 0] step:961/10000 train_time:63845ms step_avg:66.44ms +[2025-07-07 18:46:56] [Rank 0] step:981/10000 train_time:65194ms step_avg:66.46ms +[2025-07-07 18:46:56] [Rank 0] step:981/10000 train_time:65194ms step_avg:66.46ms +[2025-07-07 18:46:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:46:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:46:59] [Rank 0] PRINT: step:1000/10000 train_loss:3.6944 val_loss:2.7231 train_time:67155ms step_avg:67.16ms +[2025-07-07 18:46:59] [Rank 0] PRINT: step:1000/10000 train_loss:3.6944 val_loss:2.7231 train_time:67155ms step_avg:67.16ms +[2025-07-07 18:46:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:46:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:46:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:46:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:46:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:46:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:52:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:52:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:52:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:52:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:52:18] [Rank 0] Total Loss: 4.6140 +[2025-07-07 18:52:18] [Rank 0] Total Loss: 4.6140 +[2025-07-07 18:52:18] [Rank 0] Total FTA: 0.0968 +[2025-07-07 18:52:18] [Rank 0] Total FTA: 0.0968 +[2025-07-07 18:52:18] [Rank 0] Group 0 Loss: 4.6322 +[2025-07-07 18:52:18] [Rank 0] Group 0 Loss: 4.6322 +[2025-07-07 18:52:18] [Rank 0] Group 1 Loss: 4.8046 +[2025-07-07 18:52:18] [Rank 0] Group 1 Loss: 4.8046 +[2025-07-07 18:52:18] [Rank 0] Group 2 Loss: 4.6679 +[2025-07-07 18:52:18] [Rank 0] Group 2 Loss: 4.6679 +[2025-07-07 18:52:18] [Rank 0] Group 3 Loss: 4.5212 +[2025-07-07 18:52:18] [Rank 0] Group 3 Loss: 4.5212 +[2025-07-07 18:52:18] [Rank 0] Group 4 Loss: 4.6760 +[2025-07-07 18:52:18] [Rank 0] Group 4 Loss: 4.6760 +[2025-07-07 18:52:18] [Rank 0] Group 5 Loss: 4.5531 +[2025-07-07 18:52:18] [Rank 0] Group 5 Loss: 4.5531 +[2025-07-07 18:52:18] [Rank 0] Group 6 Loss: 4.6085 +[2025-07-07 18:52:18] [Rank 0] Group 6 Loss: 4.6085 +[2025-07-07 18:52:18] [Rank 0] Group 7 Loss: 4.5953 +[2025-07-07 18:52:18] [Rank 0] Group 7 Loss: 4.5953 +[2025-07-07 18:52:18] [Rank 0] Group 8 Loss: 4.5351 +[2025-07-07 18:52:18] [Rank 0] Group 8 Loss: 4.5351 +[2025-07-07 18:52:18] [Rank 0] Group 9 Loss: 4.6121 +[2025-07-07 18:52:18] [Rank 0] Group 9 Loss: 4.6121 +[2025-07-07 18:52:18] [Rank 0] Group 10 Loss: 4.5864 +[2025-07-07 18:52:18] [Rank 0] Group 10 Loss: 4.5864 +[2025-07-07 18:52:18] [Rank 0] Group 11 Loss: 4.5957 +[2025-07-07 18:52:18] [Rank 0] Group 11 Loss: 4.5957 +[2025-07-07 18:52:18] [Rank 0] Group 0 FTA: 0.1534 +[2025-07-07 18:52:18] [Rank 0] Group 0 FTA: 0.1534 +[2025-07-07 18:52:18] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 18:52:18] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 18:52:18] [Rank 0] Group 2 FTA: 0.1589 +[2025-07-07 18:52:18] [Rank 0] Group 2 FTA: 0.1589 +[2025-07-07 18:52:18] [Rank 0] Group 3 FTA: 0.1042 +[2025-07-07 18:52:18] [Rank 0] Group 3 FTA: 0.1042 +[2025-07-07 18:52:18] [Rank 0] Group 4 FTA: 0.0495 +[2025-07-07 18:52:18] [Rank 0] Group 4 FTA: 0.0495 +[2025-07-07 18:52:18] [Rank 0] Group 5 FTA: 0.0469 +[2025-07-07 18:52:18] [Rank 0] Group 5 FTA: 0.0469 +[2025-07-07 18:52:18] [Rank 0] Group 6 FTA: 0.1016 +[2025-07-07 18:52:18] [Rank 0] Group 6 FTA: 0.1016 +[2025-07-07 18:52:18] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-07 18:52:18] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-07 18:52:18] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 18:52:18] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 18:52:18] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 18:52:18] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 18:52:18] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-07 18:52:18] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-07 18:52:18] [Rank 0] Group 11 FTA: 0.1035 +[2025-07-07 18:52:18] [Rank 0] Group 11 FTA: 0.1035 +[2025-07-07 18:52:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 18:52:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 18:52:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 18:52:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 18:52:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 18:52:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 18:52:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 18:52:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 18:52:19] [Rank 0] step:1001/10000 train_time:67166ms step_avg:67.10ms +[2025-07-07 18:52:19] [Rank 0] step:1001/10000 train_time:67166ms step_avg:67.10ms +[2025-07-07 18:52:21] [Rank 0] step:1021/10000 train_time:67915ms step_avg:66.52ms +[2025-07-07 18:52:21] [Rank 0] step:1021/10000 train_time:67915ms step_avg:66.52ms +[2025-07-07 18:52:22] [Rank 0] step:1041/10000 train_time:69256ms step_avg:66.53ms +[2025-07-07 18:52:22] [Rank 0] step:1041/10000 train_time:69256ms step_avg:66.53ms +[2025-07-07 18:52:24] [Rank 0] step:1061/10000 train_time:70597ms step_avg:66.54ms +[2025-07-07 18:52:24] [Rank 0] step:1061/10000 train_time:70597ms step_avg:66.54ms +[2025-07-07 18:52:25] [Rank 0] step:1081/10000 train_time:72611ms step_avg:67.17ms +[2025-07-07 18:52:25] [Rank 0] step:1081/10000 train_time:72611ms step_avg:67.17ms +[2025-07-07 18:52:26] [Rank 0] step:1101/10000 train_time:73333ms step_avg:66.61ms +[2025-07-07 18:52:26] [Rank 0] step:1101/10000 train_time:73333ms step_avg:66.61ms +[2025-07-07 18:52:28] [Rank 0] step:1121/10000 train_time:74674ms step_avg:66.61ms +[2025-07-07 18:52:28] [Rank 0] step:1121/10000 train_time:74674ms step_avg:66.61ms +[2025-07-07 18:52:29] [Rank 0] step:1141/10000 train_time:76016ms step_avg:66.62ms +[2025-07-07 18:52:29] [Rank 0] step:1141/10000 train_time:76016ms step_avg:66.62ms +[2025-07-07 18:52:30] [Rank 0] step:1161/10000 train_time:77360ms step_avg:66.63ms +[2025-07-07 18:52:30] [Rank 0] step:1161/10000 train_time:77360ms step_avg:66.63ms +[2025-07-07 18:52:32] [Rank 0] step:1181/10000 train_time:78721ms step_avg:66.66ms +[2025-07-07 18:52:32] [Rank 0] step:1181/10000 train_time:78721ms step_avg:66.66ms +[2025-07-07 18:52:33] [Rank 0] step:1201/10000 train_time:80066ms step_avg:66.67ms +[2025-07-07 18:52:33] [Rank 0] step:1201/10000 train_time:80066ms step_avg:66.67ms +[2025-07-07 18:52:34] [Rank 0] step:1221/10000 train_time:81410ms step_avg:66.68ms +[2025-07-07 18:52:34] [Rank 0] step:1221/10000 train_time:81410ms step_avg:66.68ms +[2025-07-07 18:52:36] [Rank 0] step:1241/10000 train_time:82753ms step_avg:66.68ms +[2025-07-07 18:52:36] [Rank 0] step:1241/10000 train_time:82753ms step_avg:66.68ms +[2025-07-07 18:52:37] [Rank 0] step:1261/10000 train_time:84098ms step_avg:66.69ms +[2025-07-07 18:52:37] [Rank 0] step:1261/10000 train_time:84098ms step_avg:66.69ms +[2025-07-07 18:52:38] [Rank 0] step:1281/10000 train_time:85495ms step_avg:66.74ms +[2025-07-07 18:52:38] [Rank 0] step:1281/10000 train_time:85495ms step_avg:66.74ms +[2025-07-07 18:52:40] [Rank 0] step:1301/10000 train_time:86842ms step_avg:66.75ms +[2025-07-07 18:52:40] [Rank 0] step:1301/10000 train_time:86842ms step_avg:66.75ms +[2025-07-07 18:52:41] [Rank 0] step:1321/10000 train_time:88188ms step_avg:66.76ms +[2025-07-07 18:52:41] [Rank 0] step:1321/10000 train_time:88188ms step_avg:66.76ms +[2025-07-07 18:52:42] [Rank 0] step:1341/10000 train_time:89534ms step_avg:66.77ms +[2025-07-07 18:52:42] [Rank 0] step:1341/10000 train_time:89534ms step_avg:66.77ms +[2025-07-07 18:52:44] [Rank 0] step:1361/10000 train_time:90879ms step_avg:66.77ms +[2025-07-07 18:52:44] [Rank 0] step:1361/10000 train_time:90879ms step_avg:66.77ms +[2025-07-07 18:52:45] [Rank 0] step:1381/10000 train_time:92225ms step_avg:66.78ms +[2025-07-07 18:52:45] [Rank 0] step:1381/10000 train_time:92225ms step_avg:66.78ms +[2025-07-07 18:52:46] [Rank 0] step:1401/10000 train_time:93571ms step_avg:66.79ms +[2025-07-07 18:52:46] [Rank 0] step:1401/10000 train_time:93571ms step_avg:66.79ms +[2025-07-07 18:52:48] [Rank 0] step:1421/10000 train_time:94916ms step_avg:66.80ms +[2025-07-07 18:52:48] [Rank 0] step:1421/10000 train_time:94916ms step_avg:66.80ms +[2025-07-07 18:52:49] [Rank 0] step:1441/10000 train_time:96263ms step_avg:66.80ms +[2025-07-07 18:52:49] [Rank 0] step:1441/10000 train_time:96263ms step_avg:66.80ms +[2025-07-07 18:52:51] [Rank 0] step:1461/10000 train_time:97659ms step_avg:66.84ms +[2025-07-07 18:52:51] [Rank 0] step:1461/10000 train_time:97659ms step_avg:66.84ms +[2025-07-07 18:52:52] [Rank 0] step:1481/10000 train_time:99004ms step_avg:66.85ms +[2025-07-07 18:52:52] [Rank 0] step:1481/10000 train_time:99004ms step_avg:66.85ms +[2025-07-07 18:52:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:52:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:52:54] [Rank 0] PRINT: step:1500/10000 train_loss:2.2801 val_loss:1.9724 train_time:100963ms step_avg:67.31ms +[2025-07-07 18:52:54] [Rank 0] PRINT: step:1500/10000 train_loss:2.2801 val_loss:1.9724 train_time:100963ms step_avg:67.31ms +[2025-07-07 18:52:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:52:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:52:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:52:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:52:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:52:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:58:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:58:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:58:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:58:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:58:11] [Rank 0] Total Loss: 4.5150 +[2025-07-07 18:58:11] [Rank 0] Total Loss: 4.5150 +[2025-07-07 18:58:11] [Rank 0] Total FTA: 0.0904 +[2025-07-07 18:58:11] [Rank 0] Total FTA: 0.0904 +[2025-07-07 18:58:11] [Rank 0] Group 0 Loss: 4.7853 +[2025-07-07 18:58:11] [Rank 0] Group 0 Loss: 4.7853 +[2025-07-07 18:58:11] [Rank 0] Group 1 Loss: 4.4613 +[2025-07-07 18:58:11] [Rank 0] Group 1 Loss: 4.4613 +[2025-07-07 18:58:11] [Rank 0] Group 2 Loss: 4.4415 +[2025-07-07 18:58:11] [Rank 0] Group 2 Loss: 4.4415 +[2025-07-07 18:58:11] [Rank 0] Group 3 Loss: 4.4525 +[2025-07-07 18:58:11] [Rank 0] Group 3 Loss: 4.4525 +[2025-07-07 18:58:11] [Rank 0] Group 4 Loss: 4.5168 +[2025-07-07 18:58:11] [Rank 0] Group 4 Loss: 4.5168 +[2025-07-07 18:58:11] [Rank 0] Group 5 Loss: 4.4643 +[2025-07-07 18:58:11] [Rank 0] Group 5 Loss: 4.4643 +[2025-07-07 18:58:11] [Rank 0] Group 6 Loss: 4.4473 +[2025-07-07 18:58:11] [Rank 0] Group 6 Loss: 4.4473 +[2025-07-07 18:58:11] [Rank 0] Group 7 Loss: 4.5260 +[2025-07-07 18:58:11] [Rank 0] Group 7 Loss: 4.5260 +[2025-07-07 18:58:11] [Rank 0] Group 8 Loss: 4.4690 +[2025-07-07 18:58:11] [Rank 0] Group 8 Loss: 4.4690 +[2025-07-07 18:58:11] [Rank 0] Group 9 Loss: 4.4833 +[2025-07-07 18:58:11] [Rank 0] Group 9 Loss: 4.4833 +[2025-07-07 18:58:11] [Rank 0] Group 10 Loss: 4.4633 +[2025-07-07 18:58:11] [Rank 0] Group 10 Loss: 4.4633 +[2025-07-07 18:58:11] [Rank 0] Group 11 Loss: 4.4740 +[2025-07-07 18:58:11] [Rank 0] Group 11 Loss: 4.4740 +[2025-07-07 18:58:11] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 18:58:11] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 18:58:11] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 18:58:11] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 18:58:11] [Rank 0] Group 2 FTA: 0.1458 +[2025-07-07 18:58:11] [Rank 0] Group 2 FTA: 0.1458 +[2025-07-07 18:58:11] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 18:58:11] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 18:58:11] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 18:58:11] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 18:58:11] [Rank 0] Group 5 FTA: 0.0833 +[2025-07-07 18:58:11] [Rank 0] Group 5 FTA: 0.0833 +[2025-07-07 18:58:11] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 18:58:11] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 18:58:11] [Rank 0] Group 7 FTA: 0.1146 +[2025-07-07 18:58:11] [Rank 0] Group 7 FTA: 0.1146 +[2025-07-07 18:58:11] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-07 18:58:11] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-07 18:58:11] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 18:58:11] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 18:58:11] [Rank 0] Group 10 FTA: 0.0820 +[2025-07-07 18:58:11] [Rank 0] Group 10 FTA: 0.0820 +[2025-07-07 18:58:11] [Rank 0] Group 11 FTA: 0.0889 +[2025-07-07 18:58:11] [Rank 0] Group 11 FTA: 0.0889 +[2025-07-07 18:58:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 18:58:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 18:58:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 18:58:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 18:58:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 18:58:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 18:58:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 18:58:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 18:58:13] [Rank 0] step:1501/10000 train_time:100972ms step_avg:67.27ms +[2025-07-07 18:58:13] [Rank 0] step:1501/10000 train_time:100972ms step_avg:67.27ms +[2025-07-07 18:58:14] [Rank 0] step:1521/10000 train_time:101717ms step_avg:66.88ms +[2025-07-07 18:58:14] [Rank 0] step:1521/10000 train_time:101717ms step_avg:66.88ms +[2025-07-07 18:58:16] [Rank 0] step:1541/10000 train_time:103056ms step_avg:66.88ms +[2025-07-07 18:58:16] [Rank 0] step:1541/10000 train_time:103056ms step_avg:66.88ms +[2025-07-07 18:58:17] [Rank 0] step:1561/10000 train_time:104396ms step_avg:66.88ms +[2025-07-07 18:58:17] [Rank 0] step:1561/10000 train_time:104396ms step_avg:66.88ms +[2025-07-07 18:58:18] [Rank 0] step:1581/10000 train_time:105737ms step_avg:66.88ms +[2025-07-07 18:58:18] [Rank 0] step:1581/10000 train_time:105737ms step_avg:66.88ms +[2025-07-07 18:58:20] [Rank 0] step:1601/10000 train_time:107076ms step_avg:66.88ms +[2025-07-07 18:58:20] [Rank 0] step:1601/10000 train_time:107076ms step_avg:66.88ms +[2025-07-07 18:58:21] [Rank 0] step:1621/10000 train_time:108464ms step_avg:66.91ms +[2025-07-07 18:58:21] [Rank 0] step:1621/10000 train_time:108464ms step_avg:66.91ms +[2025-07-07 18:58:22] [Rank 0] step:1641/10000 train_time:109756ms step_avg:66.88ms +[2025-07-07 18:58:22] [Rank 0] step:1641/10000 train_time:109756ms step_avg:66.88ms +[2025-07-07 18:58:24] [Rank 0] step:1661/10000 train_time:111100ms step_avg:66.89ms +[2025-07-07 18:58:24] [Rank 0] step:1661/10000 train_time:111100ms step_avg:66.89ms +[2025-07-07 18:58:25] [Rank 0] step:1681/10000 train_time:112442ms step_avg:66.89ms +[2025-07-07 18:58:25] [Rank 0] step:1681/10000 train_time:112442ms step_avg:66.89ms +[2025-07-07 18:58:26] [Rank 0] step:1701/10000 train_time:113785ms step_avg:66.89ms +[2025-07-07 18:58:26] [Rank 0] step:1701/10000 train_time:113785ms step_avg:66.89ms +[2025-07-07 18:58:28] [Rank 0] step:1721/10000 train_time:115130ms step_avg:66.90ms +[2025-07-07 18:58:28] [Rank 0] step:1721/10000 train_time:115130ms step_avg:66.90ms +[2025-07-07 18:58:29] [Rank 0] step:1741/10000 train_time:116474ms step_avg:66.90ms +[2025-07-07 18:58:29] [Rank 0] step:1741/10000 train_time:116474ms step_avg:66.90ms +[2025-07-07 18:58:30] [Rank 0] step:1761/10000 train_time:117817ms step_avg:66.90ms +[2025-07-07 18:58:30] [Rank 0] step:1761/10000 train_time:117817ms step_avg:66.90ms +[2025-07-07 18:58:32] [Rank 0] step:1781/10000 train_time:119163ms step_avg:66.91ms +[2025-07-07 18:58:32] [Rank 0] step:1781/10000 train_time:119163ms step_avg:66.91ms +[2025-07-07 18:58:33] [Rank 0] step:1801/10000 train_time:120759ms step_avg:67.05ms +[2025-07-07 18:58:33] [Rank 0] step:1801/10000 train_time:120759ms step_avg:67.05ms +[2025-07-07 18:58:34] [Rank 0] step:1821/10000 train_time:121909ms step_avg:66.95ms +[2025-07-07 18:58:34] [Rank 0] step:1821/10000 train_time:121909ms step_avg:66.95ms +[2025-07-07 18:58:36] [Rank 0] step:1841/10000 train_time:123254ms step_avg:66.95ms +[2025-07-07 18:58:36] [Rank 0] step:1841/10000 train_time:123254ms step_avg:66.95ms +[2025-07-07 18:58:37] [Rank 0] step:1861/10000 train_time:124600ms step_avg:66.95ms +[2025-07-07 18:58:37] [Rank 0] step:1861/10000 train_time:124600ms step_avg:66.95ms +[2025-07-07 18:58:38] [Rank 0] step:1881/10000 train_time:125947ms step_avg:66.96ms +[2025-07-07 18:58:38] [Rank 0] step:1881/10000 train_time:125947ms step_avg:66.96ms +[2025-07-07 18:58:40] [Rank 0] step:1901/10000 train_time:127292ms step_avg:66.96ms +[2025-07-07 18:58:40] [Rank 0] step:1901/10000 train_time:127292ms step_avg:66.96ms +[2025-07-07 18:58:41] [Rank 0] step:1921/10000 train_time:128638ms step_avg:66.96ms +[2025-07-07 18:58:41] [Rank 0] step:1921/10000 train_time:128638ms step_avg:66.96ms +[2025-07-07 18:58:42] [Rank 0] step:1941/10000 train_time:129984ms step_avg:66.97ms +[2025-07-07 18:58:42] [Rank 0] step:1941/10000 train_time:129984ms step_avg:66.97ms +[2025-07-07 18:58:44] [Rank 0] step:1961/10000 train_time:131330ms step_avg:66.97ms +[2025-07-07 18:58:44] [Rank 0] step:1961/10000 train_time:131330ms step_avg:66.97ms +[2025-07-07 18:58:45] [Rank 0] step:1981/10000 train_time:132928ms step_avg:67.10ms +[2025-07-07 18:58:45] [Rank 0] step:1981/10000 train_time:132928ms step_avg:67.10ms +[2025-07-07 18:58:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:58:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:58:47] [Rank 0] PRINT: step:2000/10000 train_loss:1.8167 val_loss:1.7187 train_time:134684ms step_avg:67.34ms +[2025-07-07 18:58:47] [Rank 0] PRINT: step:2000/10000 train_loss:1.8167 val_loss:1.7187 train_time:134684ms step_avg:67.34ms +[2025-07-07 18:58:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:58:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:58:48] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:58:48] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:58:48] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:58:48] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:04:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:04:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:04:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:04:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:04:08] [Rank 0] Total Loss: 4.6610 +[2025-07-07 19:04:08] [Rank 0] Total Loss: 4.6610 +[2025-07-07 19:04:08] [Rank 0] Total FTA: 0.0911 +[2025-07-07 19:04:08] [Rank 0] Total FTA: 0.0911 +[2025-07-07 19:04:08] [Rank 0] Group 0 Loss: 4.8936 +[2025-07-07 19:04:08] [Rank 0] Group 0 Loss: 4.8936 +[2025-07-07 19:04:08] [Rank 0] Group 1 Loss: 4.7459 +[2025-07-07 19:04:08] [Rank 0] Group 1 Loss: 4.7459 +[2025-07-07 19:04:08] [Rank 0] Group 2 Loss: 4.5023 +[2025-07-07 19:04:08] [Rank 0] Group 2 Loss: 4.5023 +[2025-07-07 19:04:08] [Rank 0] Group 3 Loss: 4.6945 +[2025-07-07 19:04:08] [Rank 0] Group 3 Loss: 4.6945 +[2025-07-07 19:04:08] [Rank 0] Group 4 Loss: 4.5310 +[2025-07-07 19:04:08] [Rank 0] Group 4 Loss: 4.5310 +[2025-07-07 19:04:08] [Rank 0] Group 5 Loss: 4.5790 +[2025-07-07 19:04:08] [Rank 0] Group 5 Loss: 4.5790 +[2025-07-07 19:04:08] [Rank 0] Group 6 Loss: 4.6331 +[2025-07-07 19:04:08] [Rank 0] Group 6 Loss: 4.6331 +[2025-07-07 19:04:08] [Rank 0] Group 7 Loss: 4.6738 +[2025-07-07 19:04:08] [Rank 0] Group 7 Loss: 4.6738 +[2025-07-07 19:04:08] [Rank 0] Group 8 Loss: 4.6243 +[2025-07-07 19:04:08] [Rank 0] Group 8 Loss: 4.6243 +[2025-07-07 19:04:08] [Rank 0] Group 9 Loss: 4.5683 +[2025-07-07 19:04:08] [Rank 0] Group 9 Loss: 4.5683 +[2025-07-07 19:04:08] [Rank 0] Group 10 Loss: 4.6169 +[2025-07-07 19:04:08] [Rank 0] Group 10 Loss: 4.6169 +[2025-07-07 19:04:08] [Rank 0] Group 11 Loss: 4.6453 +[2025-07-07 19:04:08] [Rank 0] Group 11 Loss: 4.6453 +[2025-07-07 19:04:08] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 19:04:08] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 19:04:08] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 19:04:08] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 19:04:08] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 19:04:08] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 19:04:08] [Rank 0] Group 3 FTA: 0.0625 +[2025-07-07 19:04:08] [Rank 0] Group 3 FTA: 0.0625 +[2025-07-07 19:04:08] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 19:04:08] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 19:04:08] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-07 19:04:08] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-07 19:04:08] [Rank 0] Group 6 FTA: 0.1146 +[2025-07-07 19:04:08] [Rank 0] Group 6 FTA: 0.1146 +[2025-07-07 19:04:08] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-07 19:04:08] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-07 19:04:08] [Rank 0] Group 8 FTA: 0.1068 +[2025-07-07 19:04:08] [Rank 0] Group 8 FTA: 0.1068 +[2025-07-07 19:04:08] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 19:04:08] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 19:04:08] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-07 19:04:08] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-07 19:04:08] [Rank 0] Group 11 FTA: 0.0869 +[2025-07-07 19:04:08] [Rank 0] Group 11 FTA: 0.0869 +[2025-07-07 19:04:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 19:04:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 19:04:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 19:04:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 19:04:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 19:04:09] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 19:04:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 19:04:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 19:04:09] [Rank 0] step:2001/10000 train_time:134693ms step_avg:67.31ms +[2025-07-07 19:04:09] [Rank 0] step:2001/10000 train_time:134693ms step_avg:67.31ms +[2025-07-07 19:04:11] [Rank 0] step:2021/10000 train_time:135436ms step_avg:67.01ms +[2025-07-07 19:04:11] [Rank 0] step:2021/10000 train_time:135436ms step_avg:67.01ms +[2025-07-07 19:04:12] [Rank 0] step:2041/10000 train_time:136775ms step_avg:67.01ms +[2025-07-07 19:04:12] [Rank 0] step:2041/10000 train_time:136775ms step_avg:67.01ms +[2025-07-07 19:04:13] [Rank 0] step:2061/10000 train_time:138115ms step_avg:67.01ms +[2025-07-07 19:04:13] [Rank 0] step:2061/10000 train_time:138115ms step_avg:67.01ms +[2025-07-07 19:04:15] [Rank 0] step:2081/10000 train_time:139455ms step_avg:67.01ms +[2025-07-07 19:04:15] [Rank 0] step:2081/10000 train_time:139455ms step_avg:67.01ms +[2025-07-07 19:04:16] [Rank 0] step:2101/10000 train_time:140795ms step_avg:67.01ms +[2025-07-07 19:04:16] [Rank 0] step:2101/10000 train_time:140795ms step_avg:67.01ms +[2025-07-07 19:04:17] [Rank 0] step:2121/10000 train_time:142135ms step_avg:67.01ms +[2025-07-07 19:04:17] [Rank 0] step:2121/10000 train_time:142135ms step_avg:67.01ms +[2025-07-07 19:04:19] [Rank 0] step:2141/10000 train_time:143476ms step_avg:67.01ms +[2025-07-07 19:04:19] [Rank 0] step:2141/10000 train_time:143476ms step_avg:67.01ms +[2025-07-07 19:04:20] [Rank 0] step:2161/10000 train_time:144816ms step_avg:67.01ms +[2025-07-07 19:04:20] [Rank 0] step:2161/10000 train_time:144816ms step_avg:67.01ms +[2025-07-07 19:04:21] [Rank 0] step:2181/10000 train_time:146228ms step_avg:67.05ms +[2025-07-07 19:04:21] [Rank 0] step:2181/10000 train_time:146228ms step_avg:67.05ms +[2025-07-07 19:04:23] [Rank 0] step:2201/10000 train_time:147568ms step_avg:67.05ms +[2025-07-07 19:04:23] [Rank 0] step:2201/10000 train_time:147568ms step_avg:67.05ms +[2025-07-07 19:04:24] [Rank 0] step:2221/10000 train_time:148911ms step_avg:67.05ms +[2025-07-07 19:04:24] [Rank 0] step:2221/10000 train_time:148911ms step_avg:67.05ms +[2025-07-07 19:04:25] [Rank 0] step:2241/10000 train_time:150264ms step_avg:67.05ms +[2025-07-07 19:04:25] [Rank 0] step:2241/10000 train_time:150264ms step_avg:67.05ms +[2025-07-07 19:04:27] [Rank 0] step:2261/10000 train_time:151630ms step_avg:67.06ms +[2025-07-07 19:04:27] [Rank 0] step:2261/10000 train_time:151630ms step_avg:67.06ms +[2025-07-07 19:04:28] [Rank 0] step:2281/10000 train_time:152998ms step_avg:67.07ms +[2025-07-07 19:04:28] [Rank 0] step:2281/10000 train_time:152998ms step_avg:67.07ms +[2025-07-07 19:04:29] [Rank 0] step:2301/10000 train_time:154365ms step_avg:67.09ms +[2025-07-07 19:04:29] [Rank 0] step:2301/10000 train_time:154365ms step_avg:67.09ms +[2025-07-07 19:04:31] [Rank 0] step:2321/10000 train_time:155733ms step_avg:67.10ms +[2025-07-07 19:04:31] [Rank 0] step:2321/10000 train_time:155733ms step_avg:67.10ms +[2025-07-07 19:04:32] [Rank 0] step:2341/10000 train_time:157101ms step_avg:67.11ms +[2025-07-07 19:04:32] [Rank 0] step:2341/10000 train_time:157101ms step_avg:67.11ms +[2025-07-07 19:04:34] [Rank 0] step:2361/10000 train_time:158498ms step_avg:67.13ms +[2025-07-07 19:04:34] [Rank 0] step:2361/10000 train_time:158498ms step_avg:67.13ms +[2025-07-07 19:04:35] [Rank 0] step:2381/10000 train_time:159867ms step_avg:67.14ms +[2025-07-07 19:04:35] [Rank 0] step:2381/10000 train_time:159867ms step_avg:67.14ms +[2025-07-07 19:04:36] [Rank 0] step:2401/10000 train_time:161237ms step_avg:67.15ms +[2025-07-07 19:04:36] [Rank 0] step:2401/10000 train_time:161237ms step_avg:67.15ms +[2025-07-07 19:04:38] [Rank 0] step:2421/10000 train_time:162604ms step_avg:67.16ms +[2025-07-07 19:04:38] [Rank 0] step:2421/10000 train_time:162604ms step_avg:67.16ms +[2025-07-07 19:04:39] [Rank 0] step:2441/10000 train_time:163971ms step_avg:67.17ms +[2025-07-07 19:04:39] [Rank 0] step:2441/10000 train_time:163971ms step_avg:67.17ms +[2025-07-07 19:04:40] [Rank 0] step:2461/10000 train_time:165341ms step_avg:67.18ms +[2025-07-07 19:04:40] [Rank 0] step:2461/10000 train_time:165341ms step_avg:67.18ms +[2025-07-07 19:04:42] [Rank 0] step:2481/10000 train_time:166710ms step_avg:67.19ms +[2025-07-07 19:04:42] [Rank 0] step:2481/10000 train_time:166710ms step_avg:67.19ms +[2025-07-07 19:04:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:04:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:04:44] [Rank 0] PRINT: step:2500/10000 train_loss:1.6649 val_loss:1.6191 train_time:168699ms step_avg:67.48ms +[2025-07-07 19:04:44] [Rank 0] PRINT: step:2500/10000 train_loss:1.6649 val_loss:1.6191 train_time:168699ms step_avg:67.48ms +[2025-07-07 19:04:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:04:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:04:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:04:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:04:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:04:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:10:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:10:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:10:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:10:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:10:04] [Rank 0] Total Loss: 4.7821 +[2025-07-07 19:10:04] [Rank 0] Total Loss: 4.7821 +[2025-07-07 19:10:04] [Rank 0] Total FTA: 0.0925 +[2025-07-07 19:10:04] [Rank 0] Total FTA: 0.0925 +[2025-07-07 19:10:04] [Rank 0] Group 0 Loss: 5.0488 +[2025-07-07 19:10:04] [Rank 0] Group 0 Loss: 5.0488 +[2025-07-07 19:10:04] [Rank 0] Group 1 Loss: 4.8847 +[2025-07-07 19:10:04] [Rank 0] Group 1 Loss: 4.8847 +[2025-07-07 19:10:04] [Rank 0] Group 2 Loss: 4.5068 +[2025-07-07 19:10:04] [Rank 0] Group 2 Loss: 4.5068 +[2025-07-07 19:10:04] [Rank 0] Group 3 Loss: 4.7722 +[2025-07-07 19:10:04] [Rank 0] Group 3 Loss: 4.7722 +[2025-07-07 19:10:04] [Rank 0] Group 4 Loss: 4.7520 +[2025-07-07 19:10:04] [Rank 0] Group 4 Loss: 4.7520 +[2025-07-07 19:10:04] [Rank 0] Group 5 Loss: 4.7104 +[2025-07-07 19:10:04] [Rank 0] Group 5 Loss: 4.7104 +[2025-07-07 19:10:04] [Rank 0] Group 6 Loss: 4.6671 +[2025-07-07 19:10:04] [Rank 0] Group 6 Loss: 4.6671 +[2025-07-07 19:10:04] [Rank 0] Group 7 Loss: 4.7437 +[2025-07-07 19:10:04] [Rank 0] Group 7 Loss: 4.7437 +[2025-07-07 19:10:04] [Rank 0] Group 8 Loss: 4.7336 +[2025-07-07 19:10:04] [Rank 0] Group 8 Loss: 4.7336 +[2025-07-07 19:10:04] [Rank 0] Group 9 Loss: 4.7075 +[2025-07-07 19:10:04] [Rank 0] Group 9 Loss: 4.7075 +[2025-07-07 19:10:04] [Rank 0] Group 10 Loss: 4.7764 +[2025-07-07 19:10:04] [Rank 0] Group 10 Loss: 4.7764 +[2025-07-07 19:10:04] [Rank 0] Group 11 Loss: 4.7857 +[2025-07-07 19:10:04] [Rank 0] Group 11 Loss: 4.7857 +[2025-07-07 19:10:04] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 19:10:04] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 19:10:04] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 19:10:04] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 19:10:04] [Rank 0] Group 2 FTA: 0.1536 +[2025-07-07 19:10:04] [Rank 0] Group 2 FTA: 0.1536 +[2025-07-07 19:10:04] [Rank 0] Group 3 FTA: 0.0964 +[2025-07-07 19:10:04] [Rank 0] Group 3 FTA: 0.0964 +[2025-07-07 19:10:04] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 19:10:04] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 19:10:04] [Rank 0] Group 5 FTA: 0.0885 +[2025-07-07 19:10:04] [Rank 0] Group 5 FTA: 0.0885 +[2025-07-07 19:10:04] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 19:10:04] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 19:10:04] [Rank 0] Group 7 FTA: 0.1198 +[2025-07-07 19:10:04] [Rank 0] Group 7 FTA: 0.1198 +[2025-07-07 19:10:04] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-07 19:10:04] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-07 19:10:04] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-07 19:10:04] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-07 19:10:04] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-07 19:10:04] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-07 19:10:04] [Rank 0] Group 11 FTA: 0.0938 +[2025-07-07 19:10:04] [Rank 0] Group 11 FTA: 0.0938 +[2025-07-07 19:10:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 19:10:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 19:10:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 19:10:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 19:10:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 19:10:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 19:10:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 19:10:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 19:10:05] [Rank 0] step:2501/10000 train_time:168709ms step_avg:67.46ms +[2025-07-07 19:10:05] [Rank 0] step:2501/10000 train_time:168709ms step_avg:67.46ms +[2025-07-07 19:10:07] [Rank 0] step:2521/10000 train_time:169545ms step_avg:67.25ms +[2025-07-07 19:10:07] [Rank 0] step:2521/10000 train_time:169545ms step_avg:67.25ms +[2025-07-07 19:10:08] [Rank 0] step:2541/10000 train_time:170896ms step_avg:67.26ms +[2025-07-07 19:10:08] [Rank 0] step:2541/10000 train_time:170896ms step_avg:67.26ms +[2025-07-07 19:10:10] [Rank 0] step:2561/10000 train_time:172260ms step_avg:67.26ms +[2025-07-07 19:10:10] [Rank 0] step:2561/10000 train_time:172260ms step_avg:67.26ms +[2025-07-07 19:10:11] [Rank 0] step:2581/10000 train_time:173624ms step_avg:67.27ms +[2025-07-07 19:10:11] [Rank 0] step:2581/10000 train_time:173624ms step_avg:67.27ms +[2025-07-07 19:10:12] [Rank 0] step:2601/10000 train_time:174989ms step_avg:67.28ms +[2025-07-07 19:10:12] [Rank 0] step:2601/10000 train_time:174989ms step_avg:67.28ms +[2025-07-07 19:10:14] [Rank 0] step:2621/10000 train_time:176354ms step_avg:67.28ms +[2025-07-07 19:10:14] [Rank 0] step:2621/10000 train_time:176354ms step_avg:67.28ms +[2025-07-07 19:10:15] [Rank 0] step:2641/10000 train_time:177718ms step_avg:67.29ms +[2025-07-07 19:10:15] [Rank 0] step:2641/10000 train_time:177718ms step_avg:67.29ms +[2025-07-07 19:10:16] [Rank 0] step:2661/10000 train_time:179083ms step_avg:67.30ms +[2025-07-07 19:10:16] [Rank 0] step:2661/10000 train_time:179083ms step_avg:67.30ms +[2025-07-07 19:10:18] [Rank 0] step:2681/10000 train_time:180448ms step_avg:67.31ms +[2025-07-07 19:10:18] [Rank 0] step:2681/10000 train_time:180448ms step_avg:67.31ms +[2025-07-07 19:10:19] [Rank 0] step:2701/10000 train_time:181815ms step_avg:67.31ms +[2025-07-07 19:10:19] [Rank 0] step:2701/10000 train_time:181815ms step_avg:67.31ms +[2025-07-07 19:10:21] [Rank 0] step:2721/10000 train_time:183215ms step_avg:67.33ms +[2025-07-07 19:10:21] [Rank 0] step:2721/10000 train_time:183215ms step_avg:67.33ms +[2025-07-07 19:10:22] [Rank 0] step:2741/10000 train_time:184581ms step_avg:67.34ms +[2025-07-07 19:10:22] [Rank 0] step:2741/10000 train_time:184581ms step_avg:67.34ms +[2025-07-07 19:10:23] [Rank 0] step:2761/10000 train_time:185949ms step_avg:67.35ms +[2025-07-07 19:10:23] [Rank 0] step:2761/10000 train_time:185949ms step_avg:67.35ms +[2025-07-07 19:10:25] [Rank 0] step:2781/10000 train_time:187315ms step_avg:67.36ms +[2025-07-07 19:10:25] [Rank 0] step:2781/10000 train_time:187315ms step_avg:67.36ms +[2025-07-07 19:10:26] [Rank 0] step:2801/10000 train_time:188681ms step_avg:67.36ms +[2025-07-07 19:10:26] [Rank 0] step:2801/10000 train_time:188681ms step_avg:67.36ms +[2025-07-07 19:10:27] [Rank 0] step:2821/10000 train_time:190049ms step_avg:67.37ms +[2025-07-07 19:10:27] [Rank 0] step:2821/10000 train_time:190049ms step_avg:67.37ms +[2025-07-07 19:10:29] [Rank 0] step:2841/10000 train_time:191454ms step_avg:67.39ms +[2025-07-07 19:10:29] [Rank 0] step:2841/10000 train_time:191454ms step_avg:67.39ms +[2025-07-07 19:10:30] [Rank 0] step:2861/10000 train_time:192823ms step_avg:67.40ms +[2025-07-07 19:10:30] [Rank 0] step:2861/10000 train_time:192823ms step_avg:67.40ms +[2025-07-07 19:10:32] [Rank 0] step:2881/10000 train_time:194236ms step_avg:67.42ms +[2025-07-07 19:10:32] [Rank 0] step:2881/10000 train_time:194236ms step_avg:67.42ms +[2025-07-07 19:10:33] [Rank 0] step:2901/10000 train_time:195601ms step_avg:67.43ms +[2025-07-07 19:10:33] [Rank 0] step:2901/10000 train_time:195601ms step_avg:67.43ms +[2025-07-07 19:10:34] [Rank 0] step:2921/10000 train_time:196970ms step_avg:67.43ms +[2025-07-07 19:10:34] [Rank 0] step:2921/10000 train_time:196970ms step_avg:67.43ms +[2025-07-07 19:10:36] [Rank 0] step:2941/10000 train_time:198338ms step_avg:67.44ms +[2025-07-07 19:10:36] [Rank 0] step:2941/10000 train_time:198338ms step_avg:67.44ms +[2025-07-07 19:10:37] [Rank 0] step:2961/10000 train_time:199706ms step_avg:67.45ms +[2025-07-07 19:10:37] [Rank 0] step:2961/10000 train_time:199706ms step_avg:67.45ms +[2025-07-07 19:10:38] [Rank 0] step:2981/10000 train_time:201073ms step_avg:67.45ms +[2025-07-07 19:10:38] [Rank 0] step:2981/10000 train_time:201073ms step_avg:67.45ms +[2025-07-07 19:10:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:10:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:10:41] [Rank 0] PRINT: step:3000/10000 train_loss:1.5901 val_loss:1.5565 train_time:203061ms step_avg:67.69ms +[2025-07-07 19:10:41] [Rank 0] PRINT: step:3000/10000 train_loss:1.5901 val_loss:1.5565 train_time:203061ms step_avg:67.69ms +[2025-07-07 19:10:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:10:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:10:41] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:10:41] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:10:41] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:10:41] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:15:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:15:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:15:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:15:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:15:59] [Rank 0] Total Loss: 4.8214 +[2025-07-07 19:15:59] [Rank 0] Total Loss: 4.8214 +[2025-07-07 19:15:59] [Rank 0] Total FTA: 0.0900 +[2025-07-07 19:15:59] [Rank 0] Total FTA: 0.0900 +[2025-07-07 19:15:59] [Rank 0] Group 0 Loss: 5.1225 +[2025-07-07 19:15:59] [Rank 0] Group 0 Loss: 5.1225 +[2025-07-07 19:15:59] [Rank 0] Group 1 Loss: 4.7526 +[2025-07-07 19:15:59] [Rank 0] Group 1 Loss: 4.7526 +[2025-07-07 19:15:59] [Rank 0] Group 2 Loss: 4.5746 +[2025-07-07 19:15:59] [Rank 0] Group 2 Loss: 4.5746 +[2025-07-07 19:15:59] [Rank 0] Group 3 Loss: 4.8464 +[2025-07-07 19:15:59] [Rank 0] Group 3 Loss: 4.8464 +[2025-07-07 19:15:59] [Rank 0] Group 4 Loss: 4.7797 +[2025-07-07 19:15:59] [Rank 0] Group 4 Loss: 4.7797 +[2025-07-07 19:15:59] [Rank 0] Group 5 Loss: 4.7912 +[2025-07-07 19:15:59] [Rank 0] Group 5 Loss: 4.7912 +[2025-07-07 19:15:59] [Rank 0] Group 6 Loss: 4.7122 +[2025-07-07 19:15:59] [Rank 0] Group 6 Loss: 4.7122 +[2025-07-07 19:15:59] [Rank 0] Group 7 Loss: 4.8396 +[2025-07-07 19:15:59] [Rank 0] Group 7 Loss: 4.8396 +[2025-07-07 19:15:59] [Rank 0] Group 8 Loss: 4.7858 +[2025-07-07 19:15:59] [Rank 0] Group 8 Loss: 4.7858 +[2025-07-07 19:15:59] [Rank 0] Group 9 Loss: 4.7732 +[2025-07-07 19:15:59] [Rank 0] Group 9 Loss: 4.7732 +[2025-07-07 19:16:00] [Rank 0] Group 10 Loss: 4.7855 +[2025-07-07 19:16:00] [Rank 0] Group 10 Loss: 4.7855 +[2025-07-07 19:16:00] [Rank 0] Group 11 Loss: 4.8089 +[2025-07-07 19:16:00] [Rank 0] Group 11 Loss: 4.8089 +[2025-07-07 19:16:00] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 19:16:00] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 19:16:00] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 19:16:00] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 19:16:00] [Rank 0] Group 2 FTA: 0.0677 +[2025-07-07 19:16:00] [Rank 0] Group 2 FTA: 0.0677 +[2025-07-07 19:16:00] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-07 19:16:00] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-07 19:16:00] [Rank 0] Group 4 FTA: 0.0938 +[2025-07-07 19:16:00] [Rank 0] Group 4 FTA: 0.0938 +[2025-07-07 19:16:00] [Rank 0] Group 5 FTA: 0.0625 +[2025-07-07 19:16:00] [Rank 0] Group 5 FTA: 0.0625 +[2025-07-07 19:16:00] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-07 19:16:00] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-07 19:16:00] [Rank 0] Group 7 FTA: 0.1198 +[2025-07-07 19:16:00] [Rank 0] Group 7 FTA: 0.1198 +[2025-07-07 19:16:00] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 19:16:00] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 19:16:00] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-07 19:16:00] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-07 19:16:00] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-07 19:16:00] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-07 19:16:00] [Rank 0] Group 11 FTA: 0.0928 +[2025-07-07 19:16:00] [Rank 0] Group 11 FTA: 0.0928 +[2025-07-07 19:16:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 19:16:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 19:16:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 19:16:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 19:16:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 19:16:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 19:16:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 19:16:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 19:16:02] [Rank 0] step:3001/10000 train_time:203071ms step_avg:67.67ms +[2025-07-07 19:16:02] [Rank 0] step:3001/10000 train_time:203071ms step_avg:67.67ms +[2025-07-07 19:16:03] [Rank 0] step:3021/10000 train_time:203833ms step_avg:67.47ms +[2025-07-07 19:16:03] [Rank 0] step:3021/10000 train_time:203833ms step_avg:67.47ms +[2025-07-07 19:16:05] [Rank 0] step:3041/10000 train_time:205195ms step_avg:67.48ms +[2025-07-07 19:16:05] [Rank 0] step:3041/10000 train_time:205195ms step_avg:67.48ms +[2025-07-07 19:16:06] [Rank 0] step:3061/10000 train_time:206558ms step_avg:67.48ms +[2025-07-07 19:16:06] [Rank 0] step:3061/10000 train_time:206558ms step_avg:67.48ms +[2025-07-07 19:16:07] [Rank 0] step:3081/10000 train_time:207962ms step_avg:67.50ms +[2025-07-07 19:16:07] [Rank 0] step:3081/10000 train_time:207962ms step_avg:67.50ms +[2025-07-07 19:16:09] [Rank 0] step:3101/10000 train_time:209326ms step_avg:67.50ms +[2025-07-07 19:16:09] [Rank 0] step:3101/10000 train_time:209326ms step_avg:67.50ms +[2025-07-07 19:16:10] [Rank 0] step:3121/10000 train_time:210692ms step_avg:67.51ms +[2025-07-07 19:16:10] [Rank 0] step:3121/10000 train_time:210692ms step_avg:67.51ms +[2025-07-07 19:16:11] [Rank 0] step:3141/10000 train_time:212058ms step_avg:67.51ms +[2025-07-07 19:16:11] [Rank 0] step:3141/10000 train_time:212058ms step_avg:67.51ms +[2025-07-07 19:16:13] [Rank 0] step:3161/10000 train_time:213423ms step_avg:67.52ms +[2025-07-07 19:16:13] [Rank 0] step:3161/10000 train_time:213423ms step_avg:67.52ms +[2025-07-07 19:16:14] [Rank 0] step:3181/10000 train_time:214788ms step_avg:67.52ms +[2025-07-07 19:16:14] [Rank 0] step:3181/10000 train_time:214788ms step_avg:67.52ms +[2025-07-07 19:16:16] [Rank 0] step:3201/10000 train_time:216155ms step_avg:67.53ms +[2025-07-07 19:16:16] [Rank 0] step:3201/10000 train_time:216155ms step_avg:67.53ms +[2025-07-07 19:16:17] [Rank 0] step:3221/10000 train_time:217521ms step_avg:67.53ms +[2025-07-07 19:16:17] [Rank 0] step:3221/10000 train_time:217521ms step_avg:67.53ms +[2025-07-07 19:16:18] [Rank 0] step:3241/10000 train_time:219574ms step_avg:67.75ms +[2025-07-07 19:16:18] [Rank 0] step:3241/10000 train_time:219574ms step_avg:67.75ms +[2025-07-07 19:16:20] [Rank 0] step:3261/10000 train_time:220311ms step_avg:67.56ms +[2025-07-07 19:16:20] [Rank 0] step:3261/10000 train_time:220311ms step_avg:67.56ms +[2025-07-07 19:16:21] [Rank 0] step:3281/10000 train_time:221677ms step_avg:67.56ms +[2025-07-07 19:16:21] [Rank 0] step:3281/10000 train_time:221677ms step_avg:67.56ms +[2025-07-07 19:16:22] [Rank 0] step:3301/10000 train_time:223043ms step_avg:67.57ms +[2025-07-07 19:16:22] [Rank 0] step:3301/10000 train_time:223043ms step_avg:67.57ms +[2025-07-07 19:16:24] [Rank 0] step:3321/10000 train_time:224412ms step_avg:67.57ms +[2025-07-07 19:16:24] [Rank 0] step:3321/10000 train_time:224412ms step_avg:67.57ms +[2025-07-07 19:16:25] [Rank 0] step:3341/10000 train_time:225780ms step_avg:67.58ms +[2025-07-07 19:16:25] [Rank 0] step:3341/10000 train_time:225780ms step_avg:67.58ms +[2025-07-07 19:16:26] [Rank 0] step:3361/10000 train_time:227149ms step_avg:67.58ms +[2025-07-07 19:16:26] [Rank 0] step:3361/10000 train_time:227149ms step_avg:67.58ms +[2025-07-07 19:16:28] [Rank 0] step:3381/10000 train_time:228517ms step_avg:67.59ms +[2025-07-07 19:16:28] [Rank 0] step:3381/10000 train_time:228517ms step_avg:67.59ms +[2025-07-07 19:16:29] [Rank 0] step:3401/10000 train_time:229885ms step_avg:67.59ms +[2025-07-07 19:16:29] [Rank 0] step:3401/10000 train_time:229885ms step_avg:67.59ms +[2025-07-07 19:16:31] [Rank 0] step:3421/10000 train_time:231300ms step_avg:67.61ms +[2025-07-07 19:16:31] [Rank 0] step:3421/10000 train_time:231300ms step_avg:67.61ms +[2025-07-07 19:16:32] [Rank 0] step:3441/10000 train_time:232662ms step_avg:67.61ms +[2025-07-07 19:16:32] [Rank 0] step:3441/10000 train_time:232662ms step_avg:67.61ms +[2025-07-07 19:16:33] [Rank 0] step:3461/10000 train_time:234030ms step_avg:67.62ms +[2025-07-07 19:16:33] [Rank 0] step:3461/10000 train_time:234030ms step_avg:67.62ms +[2025-07-07 19:16:35] [Rank 0] step:3481/10000 train_time:235398ms step_avg:67.62ms +[2025-07-07 19:16:35] [Rank 0] step:3481/10000 train_time:235398ms step_avg:67.62ms +[2025-07-07 19:16:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:16:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:16:37] [Rank 0] PRINT: step:3500/10000 train_loss:1.5265 val_loss:1.4937 train_time:237387ms step_avg:67.82ms +[2025-07-07 19:16:37] [Rank 0] PRINT: step:3500/10000 train_loss:1.5265 val_loss:1.4937 train_time:237387ms step_avg:67.82ms +[2025-07-07 19:16:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:16:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:16:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:16:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:16:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:16:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:21:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:21:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:21:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:21:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:21:55] [Rank 0] Total Loss: 4.9106 +[2025-07-07 19:21:55] [Rank 0] Total Loss: 4.9106 +[2025-07-07 19:21:55] [Rank 0] Total FTA: 0.0746 +[2025-07-07 19:21:55] [Rank 0] Total FTA: 0.0746 +[2025-07-07 19:21:55] [Rank 0] Group 0 Loss: 5.1527 +[2025-07-07 19:21:55] [Rank 0] Group 0 Loss: 5.1527 +[2025-07-07 19:21:55] [Rank 0] Group 1 Loss: 4.8807 +[2025-07-07 19:21:55] [Rank 0] Group 1 Loss: 4.8807 +[2025-07-07 19:21:55] [Rank 0] Group 2 Loss: 4.7203 +[2025-07-07 19:21:55] [Rank 0] Group 2 Loss: 4.7203 +[2025-07-07 19:21:55] [Rank 0] Group 3 Loss: 4.7693 +[2025-07-07 19:21:55] [Rank 0] Group 3 Loss: 4.7693 +[2025-07-07 19:21:55] [Rank 0] Group 4 Loss: 4.8489 +[2025-07-07 19:21:55] [Rank 0] Group 4 Loss: 4.8489 +[2025-07-07 19:21:55] [Rank 0] Group 5 Loss: 4.9130 +[2025-07-07 19:21:55] [Rank 0] Group 5 Loss: 4.9130 +[2025-07-07 19:21:55] [Rank 0] Group 6 Loss: 4.8279 +[2025-07-07 19:21:55] [Rank 0] Group 6 Loss: 4.8279 +[2025-07-07 19:21:55] [Rank 0] Group 7 Loss: 4.9547 +[2025-07-07 19:21:55] [Rank 0] Group 7 Loss: 4.9547 +[2025-07-07 19:21:55] [Rank 0] Group 8 Loss: 4.9571 +[2025-07-07 19:21:55] [Rank 0] Group 8 Loss: 4.9571 +[2025-07-07 19:21:55] [Rank 0] Group 9 Loss: 4.8660 +[2025-07-07 19:21:55] [Rank 0] Group 9 Loss: 4.8660 +[2025-07-07 19:21:55] [Rank 0] Group 10 Loss: 4.9196 +[2025-07-07 19:21:55] [Rank 0] Group 10 Loss: 4.9196 +[2025-07-07 19:21:55] [Rank 0] Group 11 Loss: 4.8902 +[2025-07-07 19:21:55] [Rank 0] Group 11 Loss: 4.8902 +[2025-07-07 19:21:55] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 19:21:55] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 19:21:55] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 19:21:55] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 19:21:55] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-07 19:21:55] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-07 19:21:55] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 19:21:55] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 19:21:55] [Rank 0] Group 4 FTA: 0.0599 +[2025-07-07 19:21:55] [Rank 0] Group 4 FTA: 0.0599 +[2025-07-07 19:21:55] [Rank 0] Group 5 FTA: 0.1302 +[2025-07-07 19:21:55] [Rank 0] Group 5 FTA: 0.1302 +[2025-07-07 19:21:55] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 19:21:55] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 19:21:55] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-07 19:21:55] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-07 19:21:55] [Rank 0] Group 8 FTA: 0.1250 +[2025-07-07 19:21:55] [Rank 0] Group 8 FTA: 0.1250 +[2025-07-07 19:21:55] [Rank 0] Group 9 FTA: 0.1172 +[2025-07-07 19:21:55] [Rank 0] Group 9 FTA: 0.1172 +[2025-07-07 19:21:55] [Rank 0] Group 10 FTA: 0.1133 +[2025-07-07 19:21:55] [Rank 0] Group 10 FTA: 0.1133 +[2025-07-07 19:21:55] [Rank 0] Group 11 FTA: 0.0889 +[2025-07-07 19:21:55] [Rank 0] Group 11 FTA: 0.0889 +[2025-07-07 19:21:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 19:21:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 19:21:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 19:21:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 19:21:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 19:21:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 19:21:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 19:21:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 19:21:57] [Rank 0] step:3501/10000 train_time:237396ms step_avg:67.81ms +[2025-07-07 19:21:57] [Rank 0] step:3501/10000 train_time:237396ms step_avg:67.81ms +[2025-07-07 19:21:59] [Rank 0] step:3521/10000 train_time:238163ms step_avg:67.64ms +[2025-07-07 19:21:59] [Rank 0] step:3521/10000 train_time:238163ms step_avg:67.64ms +[2025-07-07 19:22:00] [Rank 0] step:3541/10000 train_time:239524ms step_avg:67.64ms +[2025-07-07 19:22:00] [Rank 0] step:3541/10000 train_time:239524ms step_avg:67.64ms +[2025-07-07 19:22:02] [Rank 0] step:3561/10000 train_time:240887ms step_avg:67.65ms +[2025-07-07 19:22:02] [Rank 0] step:3561/10000 train_time:240887ms step_avg:67.65ms +[2025-07-07 19:22:03] [Rank 0] step:3581/10000 train_time:242285ms step_avg:67.66ms +[2025-07-07 19:22:03] [Rank 0] step:3581/10000 train_time:242285ms step_avg:67.66ms +[2025-07-07 19:22:04] [Rank 0] step:3601/10000 train_time:243650ms step_avg:67.66ms +[2025-07-07 19:22:04] [Rank 0] step:3601/10000 train_time:243650ms step_avg:67.66ms +[2025-07-07 19:22:06] [Rank 0] step:3621/10000 train_time:245014ms step_avg:67.66ms +[2025-07-07 19:22:06] [Rank 0] step:3621/10000 train_time:245014ms step_avg:67.66ms +[2025-07-07 19:22:07] [Rank 0] step:3641/10000 train_time:246381ms step_avg:67.67ms +[2025-07-07 19:22:07] [Rank 0] step:3641/10000 train_time:246381ms step_avg:67.67ms +[2025-07-07 19:22:08] [Rank 0] step:3661/10000 train_time:247745ms step_avg:67.67ms +[2025-07-07 19:22:08] [Rank 0] step:3661/10000 train_time:247745ms step_avg:67.67ms +[2025-07-07 19:22:10] [Rank 0] step:3681/10000 train_time:249111ms step_avg:67.67ms +[2025-07-07 19:22:10] [Rank 0] step:3681/10000 train_time:249111ms step_avg:67.67ms +[2025-07-07 19:22:11] [Rank 0] step:3701/10000 train_time:250476ms step_avg:67.68ms +[2025-07-07 19:22:11] [Rank 0] step:3701/10000 train_time:250476ms step_avg:67.68ms +[2025-07-07 19:22:13] [Rank 0] step:3721/10000 train_time:251842ms step_avg:67.68ms +[2025-07-07 19:22:13] [Rank 0] step:3721/10000 train_time:251842ms step_avg:67.68ms +[2025-07-07 19:22:14] [Rank 0] step:3741/10000 train_time:253208ms step_avg:67.68ms +[2025-07-07 19:22:14] [Rank 0] step:3741/10000 train_time:253208ms step_avg:67.68ms +[2025-07-07 19:22:15] [Rank 0] step:3761/10000 train_time:254576ms step_avg:67.69ms +[2025-07-07 19:22:15] [Rank 0] step:3761/10000 train_time:254576ms step_avg:67.69ms +[2025-07-07 19:22:17] [Rank 0] step:3781/10000 train_time:255943ms step_avg:67.69ms +[2025-07-07 19:22:17] [Rank 0] step:3781/10000 train_time:255943ms step_avg:67.69ms +[2025-07-07 19:22:18] [Rank 0] step:3801/10000 train_time:257337ms step_avg:67.70ms +[2025-07-07 19:22:18] [Rank 0] step:3801/10000 train_time:257337ms step_avg:67.70ms +[2025-07-07 19:22:19] [Rank 0] step:3821/10000 train_time:258706ms step_avg:67.71ms +[2025-07-07 19:22:19] [Rank 0] step:3821/10000 train_time:258706ms step_avg:67.71ms +[2025-07-07 19:22:21] [Rank 0] step:3841/10000 train_time:260072ms step_avg:67.71ms +[2025-07-07 19:22:21] [Rank 0] step:3841/10000 train_time:260072ms step_avg:67.71ms +[2025-07-07 19:22:22] [Rank 0] step:3861/10000 train_time:261440ms step_avg:67.71ms +[2025-07-07 19:22:22] [Rank 0] step:3861/10000 train_time:261440ms step_avg:67.71ms +[2025-07-07 19:22:24] [Rank 0] step:3881/10000 train_time:262807ms step_avg:67.72ms +[2025-07-07 19:22:24] [Rank 0] step:3881/10000 train_time:262807ms step_avg:67.72ms +[2025-07-07 19:22:25] [Rank 0] step:3901/10000 train_time:264176ms step_avg:67.72ms +[2025-07-07 19:22:25] [Rank 0] step:3901/10000 train_time:264176ms step_avg:67.72ms +[2025-07-07 19:22:26] [Rank 0] step:3921/10000 train_time:265544ms step_avg:67.72ms +[2025-07-07 19:22:26] [Rank 0] step:3921/10000 train_time:265544ms step_avg:67.72ms +[2025-07-07 19:22:28] [Rank 0] step:3941/10000 train_time:266915ms step_avg:67.73ms +[2025-07-07 19:22:28] [Rank 0] step:3941/10000 train_time:266915ms step_avg:67.73ms +[2025-07-07 19:22:29] [Rank 0] step:3961/10000 train_time:268286ms step_avg:67.73ms +[2025-07-07 19:22:29] [Rank 0] step:3961/10000 train_time:268286ms step_avg:67.73ms +[2025-07-07 19:22:30] [Rank 0] step:3981/10000 train_time:269689ms step_avg:67.74ms +[2025-07-07 19:22:30] [Rank 0] step:3981/10000 train_time:269689ms step_avg:67.74ms +[2025-07-07 19:22:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:22:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:22:33] [Rank 0] PRINT: step:4000/10000 train_loss:1.4568 val_loss:1.4236 train_time:271679ms step_avg:67.92ms +[2025-07-07 19:22:33] [Rank 0] PRINT: step:4000/10000 train_loss:1.4568 val_loss:1.4236 train_time:271679ms step_avg:67.92ms +[2025-07-07 19:22:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:22:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:22:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:22:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:22:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:22:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:27:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:27:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:27:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:27:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:27:51] [Rank 0] Total Loss: 5.0866 +[2025-07-07 19:27:51] [Rank 0] Total Loss: 5.0866 +[2025-07-07 19:27:51] [Rank 0] Total FTA: 0.1102 +[2025-07-07 19:27:51] [Rank 0] Total FTA: 0.1102 +[2025-07-07 19:27:51] [Rank 0] Group 0 Loss: 5.3402 +[2025-07-07 19:27:51] [Rank 0] Group 0 Loss: 5.3402 +[2025-07-07 19:27:51] [Rank 0] Group 1 Loss: 5.1326 +[2025-07-07 19:27:51] [Rank 0] Group 1 Loss: 5.1326 +[2025-07-07 19:27:51] [Rank 0] Group 2 Loss: 5.0042 +[2025-07-07 19:27:51] [Rank 0] Group 2 Loss: 5.0042 +[2025-07-07 19:27:51] [Rank 0] Group 3 Loss: 5.1019 +[2025-07-07 19:27:51] [Rank 0] Group 3 Loss: 5.1019 +[2025-07-07 19:27:51] [Rank 0] Group 4 Loss: 4.9888 +[2025-07-07 19:27:51] [Rank 0] Group 4 Loss: 4.9888 +[2025-07-07 19:27:51] [Rank 0] Group 5 Loss: 5.0147 +[2025-07-07 19:27:51] [Rank 0] Group 5 Loss: 5.0147 +[2025-07-07 19:27:51] [Rank 0] Group 6 Loss: 4.9208 +[2025-07-07 19:27:51] [Rank 0] Group 6 Loss: 4.9208 +[2025-07-07 19:27:51] [Rank 0] Group 7 Loss: 5.1265 +[2025-07-07 19:27:51] [Rank 0] Group 7 Loss: 5.1265 +[2025-07-07 19:27:51] [Rank 0] Group 8 Loss: 5.0139 +[2025-07-07 19:27:51] [Rank 0] Group 8 Loss: 5.0139 +[2025-07-07 19:27:51] [Rank 0] Group 9 Loss: 5.0384 +[2025-07-07 19:27:51] [Rank 0] Group 9 Loss: 5.0384 +[2025-07-07 19:27:51] [Rank 0] Group 10 Loss: 5.0810 +[2025-07-07 19:27:51] [Rank 0] Group 10 Loss: 5.0810 +[2025-07-07 19:27:51] [Rank 0] Group 11 Loss: 5.0570 +[2025-07-07 19:27:51] [Rank 0] Group 11 Loss: 5.0570 +[2025-07-07 19:27:51] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 19:27:51] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 19:27:51] [Rank 0] Group 1 FTA: 0.1510 +[2025-07-07 19:27:51] [Rank 0] Group 1 FTA: 0.1510 +[2025-07-07 19:27:51] [Rank 0] Group 2 FTA: 0.1641 +[2025-07-07 19:27:51] [Rank 0] Group 2 FTA: 0.1641 +[2025-07-07 19:27:51] [Rank 0] Group 3 FTA: 0.1615 +[2025-07-07 19:27:51] [Rank 0] Group 3 FTA: 0.1615 +[2025-07-07 19:27:51] [Rank 0] Group 4 FTA: 0.0729 +[2025-07-07 19:27:51] [Rank 0] Group 4 FTA: 0.0729 +[2025-07-07 19:27:51] [Rank 0] Group 5 FTA: 0.1328 +[2025-07-07 19:27:51] [Rank 0] Group 5 FTA: 0.1328 +[2025-07-07 19:27:51] [Rank 0] Group 6 FTA: 0.1146 +[2025-07-07 19:27:51] [Rank 0] Group 6 FTA: 0.1146 +[2025-07-07 19:27:51] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-07 19:27:51] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-07 19:27:51] [Rank 0] Group 8 FTA: 0.1484 +[2025-07-07 19:27:51] [Rank 0] Group 8 FTA: 0.1484 +[2025-07-07 19:27:51] [Rank 0] Group 9 FTA: 0.1289 +[2025-07-07 19:27:51] [Rank 0] Group 9 FTA: 0.1289 +[2025-07-07 19:27:51] [Rank 0] Group 10 FTA: 0.0977 +[2025-07-07 19:27:51] [Rank 0] Group 10 FTA: 0.0977 +[2025-07-07 19:27:51] [Rank 0] Group 11 FTA: 0.1318 +[2025-07-07 19:27:51] [Rank 0] Group 11 FTA: 0.1318 +[2025-07-07 19:27:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 19:27:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 19:27:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 19:27:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 19:27:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 19:27:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 19:27:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 19:27:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 19:27:53] [Rank 0] step:4001/10000 train_time:271688ms step_avg:67.91ms +[2025-07-07 19:27:53] [Rank 0] step:4001/10000 train_time:271688ms step_avg:67.91ms +[2025-07-07 19:27:54] [Rank 0] step:4021/10000 train_time:272468ms step_avg:67.76ms +[2025-07-07 19:27:54] [Rank 0] step:4021/10000 train_time:272468ms step_avg:67.76ms +[2025-07-07 19:27:56] [Rank 0] step:4041/10000 train_time:273830ms step_avg:67.76ms +[2025-07-07 19:27:56] [Rank 0] step:4041/10000 train_time:273830ms step_avg:67.76ms +[2025-07-07 19:27:57] [Rank 0] step:4061/10000 train_time:275194ms step_avg:67.77ms +[2025-07-07 19:27:57] [Rank 0] step:4061/10000 train_time:275194ms step_avg:67.77ms +[2025-07-07 19:27:58] [Rank 0] step:4081/10000 train_time:276557ms step_avg:67.77ms +[2025-07-07 19:27:58] [Rank 0] step:4081/10000 train_time:276557ms step_avg:67.77ms +[2025-07-07 19:28:00] [Rank 0] step:4101/10000 train_time:277922ms step_avg:67.77ms +[2025-07-07 19:28:00] [Rank 0] step:4101/10000 train_time:277922ms step_avg:67.77ms +[2025-07-07 19:28:01] [Rank 0] step:4121/10000 train_time:279289ms step_avg:67.77ms +[2025-07-07 19:28:01] [Rank 0] step:4121/10000 train_time:279289ms step_avg:67.77ms +[2025-07-07 19:28:03] [Rank 0] step:4141/10000 train_time:280655ms step_avg:67.77ms +[2025-07-07 19:28:03] [Rank 0] step:4141/10000 train_time:280655ms step_avg:67.77ms +[2025-07-07 19:28:04] [Rank 0] step:4161/10000 train_time:282050ms step_avg:67.78ms +[2025-07-07 19:28:04] [Rank 0] step:4161/10000 train_time:282050ms step_avg:67.78ms +[2025-07-07 19:28:05] [Rank 0] step:4181/10000 train_time:283416ms step_avg:67.79ms +[2025-07-07 19:28:05] [Rank 0] step:4181/10000 train_time:283416ms step_avg:67.79ms +[2025-07-07 19:28:07] [Rank 0] step:4201/10000 train_time:284780ms step_avg:67.79ms +[2025-07-07 19:28:07] [Rank 0] step:4201/10000 train_time:284780ms step_avg:67.79ms +[2025-07-07 19:28:08] [Rank 0] step:4221/10000 train_time:286148ms step_avg:67.79ms +[2025-07-07 19:28:08] [Rank 0] step:4221/10000 train_time:286148ms step_avg:67.79ms +[2025-07-07 19:28:09] [Rank 0] step:4241/10000 train_time:287514ms step_avg:67.79ms +[2025-07-07 19:28:09] [Rank 0] step:4241/10000 train_time:287514ms step_avg:67.79ms +[2025-07-07 19:28:11] [Rank 0] step:4261/10000 train_time:288882ms step_avg:67.80ms +[2025-07-07 19:28:11] [Rank 0] step:4261/10000 train_time:288882ms step_avg:67.80ms +[2025-07-07 19:28:12] [Rank 0] step:4281/10000 train_time:290250ms step_avg:67.80ms +[2025-07-07 19:28:12] [Rank 0] step:4281/10000 train_time:290250ms step_avg:67.80ms +[2025-07-07 19:28:14] [Rank 0] step:4301/10000 train_time:291617ms step_avg:67.80ms +[2025-07-07 19:28:14] [Rank 0] step:4301/10000 train_time:291617ms step_avg:67.80ms +[2025-07-07 19:28:15] [Rank 0] step:4321/10000 train_time:293030ms step_avg:67.82ms +[2025-07-07 19:28:15] [Rank 0] step:4321/10000 train_time:293030ms step_avg:67.82ms +[2025-07-07 19:28:16] [Rank 0] step:4341/10000 train_time:294404ms step_avg:67.82ms +[2025-07-07 19:28:16] [Rank 0] step:4341/10000 train_time:294404ms step_avg:67.82ms +[2025-07-07 19:28:18] [Rank 0] step:4361/10000 train_time:295771ms step_avg:67.82ms +[2025-07-07 19:28:18] [Rank 0] step:4361/10000 train_time:295771ms step_avg:67.82ms +[2025-07-07 19:28:19] [Rank 0] step:4381/10000 train_time:297138ms step_avg:67.82ms +[2025-07-07 19:28:19] [Rank 0] step:4381/10000 train_time:297138ms step_avg:67.82ms +[2025-07-07 19:28:20] [Rank 0] step:4401/10000 train_time:298506ms step_avg:67.83ms +[2025-07-07 19:28:20] [Rank 0] step:4401/10000 train_time:298506ms step_avg:67.83ms +[2025-07-07 19:28:22] [Rank 0] step:4421/10000 train_time:299885ms step_avg:67.83ms +[2025-07-07 19:28:22] [Rank 0] step:4421/10000 train_time:299885ms step_avg:67.83ms +[2025-07-07 19:28:23] [Rank 0] step:4441/10000 train_time:301253ms step_avg:67.83ms +[2025-07-07 19:28:23] [Rank 0] step:4441/10000 train_time:301253ms step_avg:67.83ms +[2025-07-07 19:28:25] [Rank 0] step:4461/10000 train_time:302620ms step_avg:67.84ms +[2025-07-07 19:28:25] [Rank 0] step:4461/10000 train_time:302620ms step_avg:67.84ms +[2025-07-07 19:28:26] [Rank 0] step:4481/10000 train_time:303987ms step_avg:67.84ms +[2025-07-07 19:28:26] [Rank 0] step:4481/10000 train_time:303987ms step_avg:67.84ms +[2025-07-07 19:28:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:28:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:28:28] [Rank 0] PRINT: step:4500/10000 train_loss:1.3853 val_loss:1.3681 train_time:305975ms step_avg:67.99ms +[2025-07-07 19:28:28] [Rank 0] PRINT: step:4500/10000 train_loss:1.3853 val_loss:1.3681 train_time:305975ms step_avg:67.99ms +[2025-07-07 19:28:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:28:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:28:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:28:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:28:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:28:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:33:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:33:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:33:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:33:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:33:49] [Rank 0] Total Loss: 5.2172 +[2025-07-07 19:33:49] [Rank 0] Total Loss: 5.2172 +[2025-07-07 19:33:49] [Rank 0] Total FTA: 0.1651 +[2025-07-07 19:33:49] [Rank 0] Total FTA: 0.1651 +[2025-07-07 19:33:49] [Rank 0] Group 0 Loss: 5.4816 +[2025-07-07 19:33:49] [Rank 0] Group 0 Loss: 5.4816 +[2025-07-07 19:33:49] [Rank 0] Group 1 Loss: 5.0908 +[2025-07-07 19:33:49] [Rank 0] Group 1 Loss: 5.0908 +[2025-07-07 19:33:49] [Rank 0] Group 2 Loss: 4.9998 +[2025-07-07 19:33:49] [Rank 0] Group 2 Loss: 4.9998 +[2025-07-07 19:33:49] [Rank 0] Group 3 Loss: 5.2974 +[2025-07-07 19:33:49] [Rank 0] Group 3 Loss: 5.2974 +[2025-07-07 19:33:49] [Rank 0] Group 4 Loss: 5.1987 +[2025-07-07 19:33:49] [Rank 0] Group 4 Loss: 5.1987 +[2025-07-07 19:33:49] [Rank 0] Group 5 Loss: 5.1703 +[2025-07-07 19:33:49] [Rank 0] Group 5 Loss: 5.1703 +[2025-07-07 19:33:49] [Rank 0] Group 6 Loss: 5.1489 +[2025-07-07 19:33:49] [Rank 0] Group 6 Loss: 5.1489 +[2025-07-07 19:33:49] [Rank 0] Group 7 Loss: 5.1994 +[2025-07-07 19:33:49] [Rank 0] Group 7 Loss: 5.1994 +[2025-07-07 19:33:49] [Rank 0] Group 8 Loss: 5.1849 +[2025-07-07 19:33:49] [Rank 0] Group 8 Loss: 5.1849 +[2025-07-07 19:33:49] [Rank 0] Group 9 Loss: 5.1711 +[2025-07-07 19:33:49] [Rank 0] Group 9 Loss: 5.1711 +[2025-07-07 19:33:49] [Rank 0] Group 10 Loss: 5.2129 +[2025-07-07 19:33:49] [Rank 0] Group 10 Loss: 5.2129 +[2025-07-07 19:33:49] [Rank 0] Group 11 Loss: 5.1998 +[2025-07-07 19:33:49] [Rank 0] Group 11 Loss: 5.1998 +[2025-07-07 19:33:49] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 19:33:49] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 19:33:49] [Rank 0] Group 1 FTA: 0.1641 +[2025-07-07 19:33:49] [Rank 0] Group 1 FTA: 0.1641 +[2025-07-07 19:33:49] [Rank 0] Group 2 FTA: 0.2422 +[2025-07-07 19:33:49] [Rank 0] Group 2 FTA: 0.2422 +[2025-07-07 19:33:49] [Rank 0] Group 3 FTA: 0.1953 +[2025-07-07 19:33:49] [Rank 0] Group 3 FTA: 0.1953 +[2025-07-07 19:33:49] [Rank 0] Group 4 FTA: 0.1302 +[2025-07-07 19:33:49] [Rank 0] Group 4 FTA: 0.1302 +[2025-07-07 19:33:49] [Rank 0] Group 5 FTA: 0.1719 +[2025-07-07 19:33:49] [Rank 0] Group 5 FTA: 0.1719 +[2025-07-07 19:33:49] [Rank 0] Group 6 FTA: 0.2005 +[2025-07-07 19:33:49] [Rank 0] Group 6 FTA: 0.2005 +[2025-07-07 19:33:49] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-07 19:33:49] [Rank 0] Group 7 FTA: 0.1016 +[2025-07-07 19:33:49] [Rank 0] Group 8 FTA: 0.1458 +[2025-07-07 19:33:49] [Rank 0] Group 8 FTA: 0.1458 +[2025-07-07 19:33:49] [Rank 0] Group 9 FTA: 0.1719 +[2025-07-07 19:33:49] [Rank 0] Group 9 FTA: 0.1719 +[2025-07-07 19:33:49] [Rank 0] Group 10 FTA: 0.1406 +[2025-07-07 19:33:49] [Rank 0] Group 10 FTA: 0.1406 +[2025-07-07 19:33:49] [Rank 0] Group 11 FTA: 0.1611 +[2025-07-07 19:33:49] [Rank 0] Group 11 FTA: 0.1611 +[2025-07-07 19:33:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 19:33:49] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 19:33:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 19:33:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 19:33:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 19:33:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 19:33:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 19:33:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 19:33:51] [Rank 0] step:4501/10000 train_time:305993ms step_avg:67.98ms +[2025-07-07 19:33:51] [Rank 0] step:4501/10000 train_time:305993ms step_avg:67.98ms +[2025-07-07 19:33:52] [Rank 0] step:4521/10000 train_time:307454ms step_avg:68.01ms +[2025-07-07 19:33:52] [Rank 0] step:4521/10000 train_time:307454ms step_avg:68.01ms +[2025-07-07 19:33:54] [Rank 0] step:4541/10000 train_time:308817ms step_avg:68.01ms +[2025-07-07 19:33:54] [Rank 0] step:4541/10000 train_time:308817ms step_avg:68.01ms +[2025-07-07 19:33:55] [Rank 0] step:4561/10000 train_time:310180ms step_avg:68.01ms +[2025-07-07 19:33:55] [Rank 0] step:4561/10000 train_time:310180ms step_avg:68.01ms +[2025-07-07 19:33:57] [Rank 0] step:4581/10000 train_time:311544ms step_avg:68.01ms +[2025-07-07 19:33:57] [Rank 0] step:4581/10000 train_time:311544ms step_avg:68.01ms +[2025-07-07 19:33:58] [Rank 0] step:4601/10000 train_time:312911ms step_avg:68.01ms +[2025-07-07 19:33:58] [Rank 0] step:4601/10000 train_time:312911ms step_avg:68.01ms +[2025-07-07 19:33:59] [Rank 0] step:4621/10000 train_time:314276ms step_avg:68.01ms +[2025-07-07 19:33:59] [Rank 0] step:4621/10000 train_time:314276ms step_avg:68.01ms +[2025-07-07 19:34:01] [Rank 0] step:4641/10000 train_time:315641ms step_avg:68.01ms +[2025-07-07 19:34:01] [Rank 0] step:4641/10000 train_time:315641ms step_avg:68.01ms +[2025-07-07 19:34:02] [Rank 0] step:4661/10000 train_time:317009ms step_avg:68.01ms +[2025-07-07 19:34:02] [Rank 0] step:4661/10000 train_time:317009ms step_avg:68.01ms +[2025-07-07 19:34:03] [Rank 0] step:4681/10000 train_time:318630ms step_avg:68.07ms +[2025-07-07 19:34:03] [Rank 0] step:4681/10000 train_time:318630ms step_avg:68.07ms +[2025-07-07 19:34:05] [Rank 0] step:4701/10000 train_time:319777ms step_avg:68.02ms +[2025-07-07 19:34:05] [Rank 0] step:4701/10000 train_time:319777ms step_avg:68.02ms +[2025-07-07 19:34:06] [Rank 0] step:4721/10000 train_time:321144ms step_avg:68.02ms +[2025-07-07 19:34:06] [Rank 0] step:4721/10000 train_time:321144ms step_avg:68.02ms +[2025-07-07 19:34:08] [Rank 0] step:4741/10000 train_time:322512ms step_avg:68.03ms +[2025-07-07 19:34:08] [Rank 0] step:4741/10000 train_time:322512ms step_avg:68.03ms +[2025-07-07 19:34:09] [Rank 0] step:4761/10000 train_time:323878ms step_avg:68.03ms +[2025-07-07 19:34:09] [Rank 0] step:4761/10000 train_time:323878ms step_avg:68.03ms +[2025-07-07 19:34:10] [Rank 0] step:4781/10000 train_time:325247ms step_avg:68.03ms +[2025-07-07 19:34:10] [Rank 0] step:4781/10000 train_time:325247ms step_avg:68.03ms +[2025-07-07 19:34:12] [Rank 0] step:4801/10000 train_time:326617ms step_avg:68.03ms +[2025-07-07 19:34:12] [Rank 0] step:4801/10000 train_time:326617ms step_avg:68.03ms +[2025-07-07 19:34:13] [Rank 0] step:4821/10000 train_time:327985ms step_avg:68.03ms +[2025-07-07 19:34:13] [Rank 0] step:4821/10000 train_time:327985ms step_avg:68.03ms +[2025-07-07 19:34:14] [Rank 0] step:4841/10000 train_time:329353ms step_avg:68.03ms +[2025-07-07 19:34:14] [Rank 0] step:4841/10000 train_time:329353ms step_avg:68.03ms +[2025-07-07 19:34:16] [Rank 0] step:4861/10000 train_time:330722ms step_avg:68.04ms +[2025-07-07 19:34:16] [Rank 0] step:4861/10000 train_time:330722ms step_avg:68.04ms +[2025-07-07 19:34:17] [Rank 0] step:4881/10000 train_time:332128ms step_avg:68.05ms +[2025-07-07 19:34:17] [Rank 0] step:4881/10000 train_time:332128ms step_avg:68.05ms +[2025-07-07 19:34:19] [Rank 0] step:4901/10000 train_time:333497ms step_avg:68.05ms +[2025-07-07 19:34:19] [Rank 0] step:4901/10000 train_time:333497ms step_avg:68.05ms +[2025-07-07 19:34:20] [Rank 0] step:4921/10000 train_time:334866ms step_avg:68.05ms +[2025-07-07 19:34:20] [Rank 0] step:4921/10000 train_time:334866ms step_avg:68.05ms +[2025-07-07 19:34:21] [Rank 0] step:4941/10000 train_time:336236ms step_avg:68.05ms +[2025-07-07 19:34:21] [Rank 0] step:4941/10000 train_time:336236ms step_avg:68.05ms +[2025-07-07 19:34:23] [Rank 0] step:4961/10000 train_time:337605ms step_avg:68.05ms +[2025-07-07 19:34:23] [Rank 0] step:4961/10000 train_time:337605ms step_avg:68.05ms +[2025-07-07 19:34:24] [Rank 0] step:4981/10000 train_time:338975ms step_avg:68.05ms +[2025-07-07 19:34:24] [Rank 0] step:4981/10000 train_time:338975ms step_avg:68.05ms +[2025-07-07 19:34:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:34:25] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:34:26] [Rank 0] PRINT: step:5000/10000 train_loss:1.3190 val_loss:1.3049 train_time:340968ms step_avg:68.19ms +[2025-07-07 19:34:26] [Rank 0] PRINT: step:5000/10000 train_loss:1.3190 val_loss:1.3049 train_time:340968ms step_avg:68.19ms +[2025-07-07 19:34:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:34:26] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:34:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:34:26] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:34:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:34:26] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:39:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:39:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:39:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:39:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:39:46] [Rank 0] Total Loss: 5.2374 +[2025-07-07 19:39:46] [Rank 0] Total Loss: 5.2374 +[2025-07-07 19:39:46] [Rank 0] Total FTA: 0.1969 +[2025-07-07 19:39:46] [Rank 0] Total FTA: 0.1969 +[2025-07-07 19:39:46] [Rank 0] Group 0 Loss: 5.5461 +[2025-07-07 19:39:46] [Rank 0] Group 0 Loss: 5.5461 +[2025-07-07 19:39:46] [Rank 0] Group 1 Loss: 5.1311 +[2025-07-07 19:39:46] [Rank 0] Group 1 Loss: 5.1311 +[2025-07-07 19:39:46] [Rank 0] Group 2 Loss: 4.9638 +[2025-07-07 19:39:46] [Rank 0] Group 2 Loss: 4.9638 +[2025-07-07 19:39:46] [Rank 0] Group 3 Loss: 5.2513 +[2025-07-07 19:39:46] [Rank 0] Group 3 Loss: 5.2513 +[2025-07-07 19:39:46] [Rank 0] Group 4 Loss: 5.1928 +[2025-07-07 19:39:46] [Rank 0] Group 4 Loss: 5.1928 +[2025-07-07 19:39:46] [Rank 0] Group 5 Loss: 5.1965 +[2025-07-07 19:39:46] [Rank 0] Group 5 Loss: 5.1965 +[2025-07-07 19:39:46] [Rank 0] Group 6 Loss: 5.1191 +[2025-07-07 19:39:46] [Rank 0] Group 6 Loss: 5.1191 +[2025-07-07 19:39:46] [Rank 0] Group 7 Loss: 5.2627 +[2025-07-07 19:39:46] [Rank 0] Group 7 Loss: 5.2627 +[2025-07-07 19:39:46] [Rank 0] Group 8 Loss: 5.1832 +[2025-07-07 19:39:46] [Rank 0] Group 8 Loss: 5.1832 +[2025-07-07 19:39:46] [Rank 0] Group 9 Loss: 5.2160 +[2025-07-07 19:39:46] [Rank 0] Group 9 Loss: 5.2160 +[2025-07-07 19:39:46] [Rank 0] Group 10 Loss: 5.2501 +[2025-07-07 19:39:46] [Rank 0] Group 10 Loss: 5.2501 +[2025-07-07 19:39:46] [Rank 0] Group 11 Loss: 5.2294 +[2025-07-07 19:39:46] [Rank 0] Group 11 Loss: 5.2294 +[2025-07-07 19:39:46] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 19:39:46] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 19:39:46] [Rank 0] Group 1 FTA: 0.1901 +[2025-07-07 19:39:46] [Rank 0] Group 1 FTA: 0.1901 +[2025-07-07 19:39:46] [Rank 0] Group 2 FTA: 0.2266 +[2025-07-07 19:39:46] [Rank 0] Group 2 FTA: 0.2266 +[2025-07-07 19:39:46] [Rank 0] Group 3 FTA: 0.1589 +[2025-07-07 19:39:46] [Rank 0] Group 3 FTA: 0.1589 +[2025-07-07 19:39:46] [Rank 0] Group 4 FTA: 0.1068 +[2025-07-07 19:39:46] [Rank 0] Group 4 FTA: 0.1068 +[2025-07-07 19:39:46] [Rank 0] Group 5 FTA: 0.1979 +[2025-07-07 19:39:46] [Rank 0] Group 5 FTA: 0.1979 +[2025-07-07 19:39:46] [Rank 0] Group 6 FTA: 0.2240 +[2025-07-07 19:39:46] [Rank 0] Group 6 FTA: 0.2240 +[2025-07-07 19:39:46] [Rank 0] Group 7 FTA: 0.2240 +[2025-07-07 19:39:46] [Rank 0] Group 7 FTA: 0.2240 +[2025-07-07 19:39:46] [Rank 0] Group 8 FTA: 0.2188 +[2025-07-07 19:39:46] [Rank 0] Group 8 FTA: 0.2188 +[2025-07-07 19:39:46] [Rank 0] Group 9 FTA: 0.2188 +[2025-07-07 19:39:46] [Rank 0] Group 9 FTA: 0.2188 +[2025-07-07 19:39:46] [Rank 0] Group 10 FTA: 0.2520 +[2025-07-07 19:39:46] [Rank 0] Group 10 FTA: 0.2520 +[2025-07-07 19:39:46] [Rank 0] Group 11 FTA: 0.1973 +[2025-07-07 19:39:46] [Rank 0] Group 11 FTA: 0.1973 +[2025-07-07 19:39:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 19:39:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 19:39:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 19:39:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 19:39:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 19:39:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 19:39:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 19:39:47] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 19:39:47] [Rank 0] step:5001/10000 train_time:340978ms step_avg:68.18ms +[2025-07-07 19:39:47] [Rank 0] step:5001/10000 train_time:340978ms step_avg:68.18ms +[2025-07-07 19:39:49] [Rank 0] step:5021/10000 train_time:341733ms step_avg:68.06ms +[2025-07-07 19:39:49] [Rank 0] step:5021/10000 train_time:341733ms step_avg:68.06ms +[2025-07-07 19:39:50] [Rank 0] step:5041/10000 train_time:343098ms step_avg:68.06ms +[2025-07-07 19:39:50] [Rank 0] step:5041/10000 train_time:343098ms step_avg:68.06ms +[2025-07-07 19:39:51] [Rank 0] step:5061/10000 train_time:344498ms step_avg:68.07ms +[2025-07-07 19:39:51] [Rank 0] step:5061/10000 train_time:344498ms step_avg:68.07ms +[2025-07-07 19:39:53] [Rank 0] step:5081/10000 train_time:345863ms step_avg:68.07ms +[2025-07-07 19:39:53] [Rank 0] step:5081/10000 train_time:345863ms step_avg:68.07ms +[2025-07-07 19:39:54] [Rank 0] step:5101/10000 train_time:347228ms step_avg:68.07ms +[2025-07-07 19:39:54] [Rank 0] step:5101/10000 train_time:347228ms step_avg:68.07ms +[2025-07-07 19:39:55] [Rank 0] step:5121/10000 train_time:348594ms step_avg:68.07ms +[2025-07-07 19:39:55] [Rank 0] step:5121/10000 train_time:348594ms step_avg:68.07ms +[2025-07-07 19:39:57] [Rank 0] step:5141/10000 train_time:349987ms step_avg:68.08ms +[2025-07-07 19:39:57] [Rank 0] step:5141/10000 train_time:349987ms step_avg:68.08ms +[2025-07-07 19:39:58] [Rank 0] step:5161/10000 train_time:351357ms step_avg:68.08ms +[2025-07-07 19:39:58] [Rank 0] step:5161/10000 train_time:351357ms step_avg:68.08ms +[2025-07-07 19:40:00] [Rank 0] step:5181/10000 train_time:352725ms step_avg:68.08ms +[2025-07-07 19:40:00] [Rank 0] step:5181/10000 train_time:352725ms step_avg:68.08ms +[2025-07-07 19:40:01] [Rank 0] step:5201/10000 train_time:354092ms step_avg:68.08ms +[2025-07-07 19:40:01] [Rank 0] step:5201/10000 train_time:354092ms step_avg:68.08ms +[2025-07-07 19:40:02] [Rank 0] step:5221/10000 train_time:356145ms step_avg:68.21ms +[2025-07-07 19:40:02] [Rank 0] step:5221/10000 train_time:356145ms step_avg:68.21ms +[2025-07-07 19:40:04] [Rank 0] step:5241/10000 train_time:356884ms step_avg:68.09ms +[2025-07-07 19:40:04] [Rank 0] step:5241/10000 train_time:356884ms step_avg:68.09ms +[2025-07-07 19:40:05] [Rank 0] step:5261/10000 train_time:358254ms step_avg:68.10ms +[2025-07-07 19:40:05] [Rank 0] step:5261/10000 train_time:358254ms step_avg:68.10ms +[2025-07-07 19:40:06] [Rank 0] step:5281/10000 train_time:359622ms step_avg:68.10ms +[2025-07-07 19:40:06] [Rank 0] step:5281/10000 train_time:359622ms step_avg:68.10ms +[2025-07-07 19:40:08] [Rank 0] step:5301/10000 train_time:360992ms step_avg:68.10ms +[2025-07-07 19:40:08] [Rank 0] step:5301/10000 train_time:360992ms step_avg:68.10ms +[2025-07-07 19:40:09] [Rank 0] step:5321/10000 train_time:362362ms step_avg:68.10ms +[2025-07-07 19:40:09] [Rank 0] step:5321/10000 train_time:362362ms step_avg:68.10ms +[2025-07-07 19:40:11] [Rank 0] step:5341/10000 train_time:363731ms step_avg:68.10ms +[2025-07-07 19:40:11] [Rank 0] step:5341/10000 train_time:363731ms step_avg:68.10ms +[2025-07-07 19:40:12] [Rank 0] step:5361/10000 train_time:365100ms step_avg:68.10ms +[2025-07-07 19:40:12] [Rank 0] step:5361/10000 train_time:365100ms step_avg:68.10ms +[2025-07-07 19:40:13] [Rank 0] step:5381/10000 train_time:366470ms step_avg:68.10ms +[2025-07-07 19:40:13] [Rank 0] step:5381/10000 train_time:366470ms step_avg:68.10ms +[2025-07-07 19:40:15] [Rank 0] step:5401/10000 train_time:367840ms step_avg:68.11ms +[2025-07-07 19:40:15] [Rank 0] step:5401/10000 train_time:367840ms step_avg:68.11ms +[2025-07-07 19:40:16] [Rank 0] step:5421/10000 train_time:369236ms step_avg:68.11ms +[2025-07-07 19:40:16] [Rank 0] step:5421/10000 train_time:369236ms step_avg:68.11ms +[2025-07-07 19:40:17] [Rank 0] step:5441/10000 train_time:370606ms step_avg:68.11ms +[2025-07-07 19:40:17] [Rank 0] step:5441/10000 train_time:370606ms step_avg:68.11ms +[2025-07-07 19:40:19] [Rank 0] step:5461/10000 train_time:371977ms step_avg:68.12ms +[2025-07-07 19:40:19] [Rank 0] step:5461/10000 train_time:371977ms step_avg:68.12ms +[2025-07-07 19:40:20] [Rank 0] step:5481/10000 train_time:373347ms step_avg:68.12ms +[2025-07-07 19:40:20] [Rank 0] step:5481/10000 train_time:373347ms step_avg:68.12ms +[2025-07-07 19:40:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:40:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:40:22] [Rank 0] PRINT: step:5500/10000 train_loss:1.2605 val_loss:1.2598 train_time:375341ms step_avg:68.24ms +[2025-07-07 19:40:22] [Rank 0] PRINT: step:5500/10000 train_loss:1.2605 val_loss:1.2598 train_time:375341ms step_avg:68.24ms +[2025-07-07 19:40:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:40:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:40:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:40:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:40:23] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:40:23] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:45:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:45:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:45:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:45:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:45:43] [Rank 0] Total Loss: 5.2384 +[2025-07-07 19:45:43] [Rank 0] Total Loss: 5.2384 +[2025-07-07 19:45:43] [Rank 0] Total FTA: 0.2945 +[2025-07-07 19:45:43] [Rank 0] Total FTA: 0.2945 +[2025-07-07 19:45:43] [Rank 0] Group 0 Loss: 5.5205 +[2025-07-07 19:45:43] [Rank 0] Group 0 Loss: 5.5205 +[2025-07-07 19:45:43] [Rank 0] Group 1 Loss: 5.1108 +[2025-07-07 19:45:43] [Rank 0] Group 1 Loss: 5.1108 +[2025-07-07 19:45:43] [Rank 0] Group 2 Loss: 5.0568 +[2025-07-07 19:45:43] [Rank 0] Group 2 Loss: 5.0568 +[2025-07-07 19:45:43] [Rank 0] Group 3 Loss: 5.3502 +[2025-07-07 19:45:43] [Rank 0] Group 3 Loss: 5.3502 +[2025-07-07 19:45:43] [Rank 0] Group 4 Loss: 5.1589 +[2025-07-07 19:45:43] [Rank 0] Group 4 Loss: 5.1589 +[2025-07-07 19:45:43] [Rank 0] Group 5 Loss: 5.1888 +[2025-07-07 19:45:43] [Rank 0] Group 5 Loss: 5.1888 +[2025-07-07 19:45:43] [Rank 0] Group 6 Loss: 5.1014 +[2025-07-07 19:45:43] [Rank 0] Group 6 Loss: 5.1014 +[2025-07-07 19:45:43] [Rank 0] Group 7 Loss: 5.2898 +[2025-07-07 19:45:43] [Rank 0] Group 7 Loss: 5.2898 +[2025-07-07 19:45:43] [Rank 0] Group 8 Loss: 5.2012 +[2025-07-07 19:45:43] [Rank 0] Group 8 Loss: 5.2012 +[2025-07-07 19:45:43] [Rank 0] Group 9 Loss: 5.1440 +[2025-07-07 19:45:43] [Rank 0] Group 9 Loss: 5.1440 +[2025-07-07 19:45:43] [Rank 0] Group 10 Loss: 5.2072 +[2025-07-07 19:45:43] [Rank 0] Group 10 Loss: 5.2072 +[2025-07-07 19:45:43] [Rank 0] Group 11 Loss: 5.2341 +[2025-07-07 19:45:43] [Rank 0] Group 11 Loss: 5.2341 +[2025-07-07 19:45:43] [Rank 0] Group 0 FTA: 0.3186 +[2025-07-07 19:45:43] [Rank 0] Group 0 FTA: 0.3186 +[2025-07-07 19:45:43] [Rank 0] Group 1 FTA: 0.3359 +[2025-07-07 19:45:43] [Rank 0] Group 1 FTA: 0.3359 +[2025-07-07 19:45:43] [Rank 0] Group 2 FTA: 0.4167 +[2025-07-07 19:45:43] [Rank 0] Group 2 FTA: 0.4167 +[2025-07-07 19:45:43] [Rank 0] Group 3 FTA: 0.3099 +[2025-07-07 19:45:43] [Rank 0] Group 3 FTA: 0.3099 +[2025-07-07 19:45:43] [Rank 0] Group 4 FTA: 0.3073 +[2025-07-07 19:45:43] [Rank 0] Group 4 FTA: 0.3073 +[2025-07-07 19:45:43] [Rank 0] Group 5 FTA: 0.3047 +[2025-07-07 19:45:43] [Rank 0] Group 5 FTA: 0.3047 +[2025-07-07 19:45:43] [Rank 0] Group 6 FTA: 0.2656 +[2025-07-07 19:45:43] [Rank 0] Group 6 FTA: 0.2656 +[2025-07-07 19:45:43] [Rank 0] Group 7 FTA: 0.2370 +[2025-07-07 19:45:43] [Rank 0] Group 7 FTA: 0.2370 +[2025-07-07 19:45:43] [Rank 0] Group 8 FTA: 0.2266 +[2025-07-07 19:45:43] [Rank 0] Group 8 FTA: 0.2266 +[2025-07-07 19:45:43] [Rank 0] Group 9 FTA: 0.2578 +[2025-07-07 19:45:43] [Rank 0] Group 9 FTA: 0.2578 +[2025-07-07 19:45:43] [Rank 0] Group 10 FTA: 0.2695 +[2025-07-07 19:45:43] [Rank 0] Group 10 FTA: 0.2695 +[2025-07-07 19:45:43] [Rank 0] Group 11 FTA: 0.2803 +[2025-07-07 19:45:43] [Rank 0] Group 11 FTA: 0.2803 +[2025-07-07 19:45:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 19:45:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 19:45:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 19:45:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 19:45:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 19:45:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 19:45:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 19:45:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 19:45:45] [Rank 0] step:5501/10000 train_time:375350ms step_avg:68.23ms +[2025-07-07 19:45:45] [Rank 0] step:5501/10000 train_time:375350ms step_avg:68.23ms +[2025-07-07 19:45:46] [Rank 0] step:5521/10000 train_time:376112ms step_avg:68.12ms +[2025-07-07 19:45:46] [Rank 0] step:5521/10000 train_time:376112ms step_avg:68.12ms +[2025-07-07 19:45:48] [Rank 0] step:5541/10000 train_time:377475ms step_avg:68.12ms +[2025-07-07 19:45:48] [Rank 0] step:5541/10000 train_time:377475ms step_avg:68.12ms +[2025-07-07 19:45:49] [Rank 0] step:5561/10000 train_time:378845ms step_avg:68.13ms +[2025-07-07 19:45:49] [Rank 0] step:5561/10000 train_time:378845ms step_avg:68.13ms +[2025-07-07 19:45:50] [Rank 0] step:5581/10000 train_time:380466ms step_avg:68.17ms +[2025-07-07 19:45:50] [Rank 0] step:5581/10000 train_time:380466ms step_avg:68.17ms +[2025-07-07 19:45:52] [Rank 0] step:5601/10000 train_time:381627ms step_avg:68.14ms +[2025-07-07 19:45:52] [Rank 0] step:5601/10000 train_time:381627ms step_avg:68.14ms +[2025-07-07 19:45:53] [Rank 0] step:5621/10000 train_time:382993ms step_avg:68.14ms +[2025-07-07 19:45:53] [Rank 0] step:5621/10000 train_time:382993ms step_avg:68.14ms +[2025-07-07 19:45:54] [Rank 0] step:5641/10000 train_time:384360ms step_avg:68.14ms +[2025-07-07 19:45:54] [Rank 0] step:5641/10000 train_time:384360ms step_avg:68.14ms +[2025-07-07 19:45:56] [Rank 0] step:5661/10000 train_time:385727ms step_avg:68.14ms +[2025-07-07 19:45:56] [Rank 0] step:5661/10000 train_time:385727ms step_avg:68.14ms +[2025-07-07 19:45:57] [Rank 0] step:5681/10000 train_time:387096ms step_avg:68.14ms +[2025-07-07 19:45:57] [Rank 0] step:5681/10000 train_time:387096ms step_avg:68.14ms +[2025-07-07 19:45:59] [Rank 0] step:5701/10000 train_time:388464ms step_avg:68.14ms +[2025-07-07 19:45:59] [Rank 0] step:5701/10000 train_time:388464ms step_avg:68.14ms +[2025-07-07 19:46:00] [Rank 0] step:5721/10000 train_time:389832ms step_avg:68.14ms +[2025-07-07 19:46:00] [Rank 0] step:5721/10000 train_time:389832ms step_avg:68.14ms +[2025-07-07 19:46:01] [Rank 0] step:5741/10000 train_time:391200ms step_avg:68.14ms +[2025-07-07 19:46:01] [Rank 0] step:5741/10000 train_time:391200ms step_avg:68.14ms +[2025-07-07 19:46:03] [Rank 0] step:5761/10000 train_time:392822ms step_avg:68.19ms +[2025-07-07 19:46:03] [Rank 0] step:5761/10000 train_time:392822ms step_avg:68.19ms +[2025-07-07 19:46:04] [Rank 0] step:5781/10000 train_time:393981ms step_avg:68.15ms +[2025-07-07 19:46:04] [Rank 0] step:5781/10000 train_time:393981ms step_avg:68.15ms +[2025-07-07 19:46:05] [Rank 0] step:5801/10000 train_time:395350ms step_avg:68.15ms +[2025-07-07 19:46:05] [Rank 0] step:5801/10000 train_time:395350ms step_avg:68.15ms +[2025-07-07 19:46:07] [Rank 0] step:5821/10000 train_time:396720ms step_avg:68.15ms +[2025-07-07 19:46:07] [Rank 0] step:5821/10000 train_time:396720ms step_avg:68.15ms +[2025-07-07 19:46:08] [Rank 0] step:5841/10000 train_time:398088ms step_avg:68.15ms +[2025-07-07 19:46:08] [Rank 0] step:5841/10000 train_time:398088ms step_avg:68.15ms +[2025-07-07 19:46:10] [Rank 0] step:5861/10000 train_time:399458ms step_avg:68.16ms +[2025-07-07 19:46:10] [Rank 0] step:5861/10000 train_time:399458ms step_avg:68.16ms +[2025-07-07 19:46:11] [Rank 0] step:5881/10000 train_time:400826ms step_avg:68.16ms +[2025-07-07 19:46:11] [Rank 0] step:5881/10000 train_time:400826ms step_avg:68.16ms +[2025-07-07 19:46:12] [Rank 0] step:5901/10000 train_time:402194ms step_avg:68.16ms +[2025-07-07 19:46:12] [Rank 0] step:5901/10000 train_time:402194ms step_avg:68.16ms +[2025-07-07 19:46:14] [Rank 0] step:5921/10000 train_time:403563ms step_avg:68.16ms +[2025-07-07 19:46:14] [Rank 0] step:5921/10000 train_time:403563ms step_avg:68.16ms +[2025-07-07 19:46:15] [Rank 0] step:5941/10000 train_time:405182ms step_avg:68.20ms +[2025-07-07 19:46:15] [Rank 0] step:5941/10000 train_time:405182ms step_avg:68.20ms +[2025-07-07 19:46:16] [Rank 0] step:5961/10000 train_time:406338ms step_avg:68.17ms +[2025-07-07 19:46:16] [Rank 0] step:5961/10000 train_time:406338ms step_avg:68.17ms +[2025-07-07 19:46:18] [Rank 0] step:5981/10000 train_time:407708ms step_avg:68.17ms +[2025-07-07 19:46:18] [Rank 0] step:5981/10000 train_time:407708ms step_avg:68.17ms +[2025-07-07 19:46:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:46:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:46:20] [Rank 0] PRINT: step:6000/10000 train_loss:1.2134 val_loss:1.2245 train_time:409699ms step_avg:68.28ms +[2025-07-07 19:46:20] [Rank 0] PRINT: step:6000/10000 train_loss:1.2134 val_loss:1.2245 train_time:409699ms step_avg:68.28ms +[2025-07-07 19:46:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:46:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:46:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:46:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:46:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:46:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:51:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:51:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:51:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:51:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:51:41] [Rank 0] Total Loss: 5.2341 +[2025-07-07 19:51:41] [Rank 0] Total Loss: 5.2341 +[2025-07-07 19:51:41] [Rank 0] Total FTA: 0.2723 +[2025-07-07 19:51:41] [Rank 0] Total FTA: 0.2723 +[2025-07-07 19:51:41] [Rank 0] Group 0 Loss: 5.5813 +[2025-07-07 19:51:41] [Rank 0] Group 0 Loss: 5.5813 +[2025-07-07 19:51:41] [Rank 0] Group 1 Loss: 5.1391 +[2025-07-07 19:51:41] [Rank 0] Group 1 Loss: 5.1391 +[2025-07-07 19:51:41] [Rank 0] Group 2 Loss: 5.1262 +[2025-07-07 19:51:41] [Rank 0] Group 2 Loss: 5.1262 +[2025-07-07 19:51:41] [Rank 0] Group 3 Loss: 5.2052 +[2025-07-07 19:51:41] [Rank 0] Group 3 Loss: 5.2052 +[2025-07-07 19:51:41] [Rank 0] Group 4 Loss: 5.1554 +[2025-07-07 19:51:41] [Rank 0] Group 4 Loss: 5.1554 +[2025-07-07 19:51:41] [Rank 0] Group 5 Loss: 5.1512 +[2025-07-07 19:51:41] [Rank 0] Group 5 Loss: 5.1512 +[2025-07-07 19:51:41] [Rank 0] Group 6 Loss: 5.0851 +[2025-07-07 19:51:41] [Rank 0] Group 6 Loss: 5.0851 +[2025-07-07 19:51:41] [Rank 0] Group 7 Loss: 5.2052 +[2025-07-07 19:51:41] [Rank 0] Group 7 Loss: 5.2052 +[2025-07-07 19:51:41] [Rank 0] Group 8 Loss: 5.1542 +[2025-07-07 19:51:41] [Rank 0] Group 8 Loss: 5.1542 +[2025-07-07 19:51:41] [Rank 0] Group 9 Loss: 5.1709 +[2025-07-07 19:51:41] [Rank 0] Group 9 Loss: 5.1709 +[2025-07-07 19:51:41] [Rank 0] Group 10 Loss: 5.2611 +[2025-07-07 19:51:41] [Rank 0] Group 10 Loss: 5.2611 +[2025-07-07 19:51:41] [Rank 0] Group 11 Loss: 5.2196 +[2025-07-07 19:51:41] [Rank 0] Group 11 Loss: 5.2196 +[2025-07-07 19:51:41] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 19:51:41] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 19:51:41] [Rank 0] Group 1 FTA: 0.1953 +[2025-07-07 19:51:41] [Rank 0] Group 1 FTA: 0.1953 +[2025-07-07 19:51:41] [Rank 0] Group 2 FTA: 0.3333 +[2025-07-07 19:51:41] [Rank 0] Group 2 FTA: 0.3333 +[2025-07-07 19:51:41] [Rank 0] Group 3 FTA: 0.2161 +[2025-07-07 19:51:41] [Rank 0] Group 3 FTA: 0.2161 +[2025-07-07 19:51:41] [Rank 0] Group 4 FTA: 0.2630 +[2025-07-07 19:51:41] [Rank 0] Group 4 FTA: 0.2630 +[2025-07-07 19:51:41] [Rank 0] Group 5 FTA: 0.3385 +[2025-07-07 19:51:41] [Rank 0] Group 5 FTA: 0.3385 +[2025-07-07 19:51:41] [Rank 0] Group 6 FTA: 0.3411 +[2025-07-07 19:51:41] [Rank 0] Group 6 FTA: 0.3411 +[2025-07-07 19:51:41] [Rank 0] Group 7 FTA: 0.2812 +[2025-07-07 19:51:41] [Rank 0] Group 7 FTA: 0.2812 +[2025-07-07 19:51:41] [Rank 0] Group 8 FTA: 0.3021 +[2025-07-07 19:51:41] [Rank 0] Group 8 FTA: 0.3021 +[2025-07-07 19:51:41] [Rank 0] Group 9 FTA: 0.2695 +[2025-07-07 19:51:41] [Rank 0] Group 9 FTA: 0.2695 +[2025-07-07 19:51:41] [Rank 0] Group 10 FTA: 0.3223 +[2025-07-07 19:51:41] [Rank 0] Group 10 FTA: 0.3223 +[2025-07-07 19:51:41] [Rank 0] Group 11 FTA: 0.2939 +[2025-07-07 19:51:41] [Rank 0] Group 11 FTA: 0.2939 +[2025-07-07 19:51:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 19:51:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 19:51:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 19:51:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 19:51:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 19:51:42] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 19:51:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 19:51:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 19:51:42] [Rank 0] step:6001/10000 train_time:409708ms step_avg:68.27ms +[2025-07-07 19:51:42] [Rank 0] step:6001/10000 train_time:409708ms step_avg:68.27ms +[2025-07-07 19:51:44] [Rank 0] step:6021/10000 train_time:410466ms step_avg:68.17ms +[2025-07-07 19:51:44] [Rank 0] step:6021/10000 train_time:410466ms step_avg:68.17ms +[2025-07-07 19:51:45] [Rank 0] step:6041/10000 train_time:411828ms step_avg:68.17ms +[2025-07-07 19:51:45] [Rank 0] step:6041/10000 train_time:411828ms step_avg:68.17ms +[2025-07-07 19:51:46] [Rank 0] step:6061/10000 train_time:413193ms step_avg:68.17ms +[2025-07-07 19:51:46] [Rank 0] step:6061/10000 train_time:413193ms step_avg:68.17ms +[2025-07-07 19:51:48] [Rank 0] step:6081/10000 train_time:414558ms step_avg:68.17ms +[2025-07-07 19:51:48] [Rank 0] step:6081/10000 train_time:414558ms step_avg:68.17ms +[2025-07-07 19:51:49] [Rank 0] step:6101/10000 train_time:415924ms step_avg:68.17ms +[2025-07-07 19:51:49] [Rank 0] step:6101/10000 train_time:415924ms step_avg:68.17ms +[2025-07-07 19:51:50] [Rank 0] step:6121/10000 train_time:417290ms step_avg:68.17ms +[2025-07-07 19:51:50] [Rank 0] step:6121/10000 train_time:417290ms step_avg:68.17ms +[2025-07-07 19:51:52] [Rank 0] step:6141/10000 train_time:418685ms step_avg:68.18ms +[2025-07-07 19:51:52] [Rank 0] step:6141/10000 train_time:418685ms step_avg:68.18ms +[2025-07-07 19:51:53] [Rank 0] step:6161/10000 train_time:420053ms step_avg:68.18ms +[2025-07-07 19:51:53] [Rank 0] step:6161/10000 train_time:420053ms step_avg:68.18ms +[2025-07-07 19:51:55] [Rank 0] step:6181/10000 train_time:421421ms step_avg:68.18ms +[2025-07-07 19:51:55] [Rank 0] step:6181/10000 train_time:421421ms step_avg:68.18ms +[2025-07-07 19:51:56] [Rank 0] step:6201/10000 train_time:422790ms step_avg:68.18ms +[2025-07-07 19:51:56] [Rank 0] step:6201/10000 train_time:422790ms step_avg:68.18ms +[2025-07-07 19:51:57] [Rank 0] step:6221/10000 train_time:424158ms step_avg:68.18ms +[2025-07-07 19:51:57] [Rank 0] step:6221/10000 train_time:424158ms step_avg:68.18ms +[2025-07-07 19:51:59] [Rank 0] step:6241/10000 train_time:425526ms step_avg:68.18ms +[2025-07-07 19:51:59] [Rank 0] step:6241/10000 train_time:425526ms step_avg:68.18ms +[2025-07-07 19:52:00] [Rank 0] step:6261/10000 train_time:426896ms step_avg:68.18ms +[2025-07-07 19:52:00] [Rank 0] step:6261/10000 train_time:426896ms step_avg:68.18ms +[2025-07-07 19:52:01] [Rank 0] step:6281/10000 train_time:428265ms step_avg:68.18ms +[2025-07-07 19:52:01] [Rank 0] step:6281/10000 train_time:428265ms step_avg:68.18ms +[2025-07-07 19:52:03] [Rank 0] step:6301/10000 train_time:430301ms step_avg:68.29ms +[2025-07-07 19:52:03] [Rank 0] step:6301/10000 train_time:430301ms step_avg:68.29ms +[2025-07-07 19:52:04] [Rank 0] step:6321/10000 train_time:431040ms step_avg:68.19ms +[2025-07-07 19:52:04] [Rank 0] step:6321/10000 train_time:431040ms step_avg:68.19ms +[2025-07-07 19:52:06] [Rank 0] step:6341/10000 train_time:432411ms step_avg:68.19ms +[2025-07-07 19:52:06] [Rank 0] step:6341/10000 train_time:432411ms step_avg:68.19ms +[2025-07-07 19:52:07] [Rank 0] step:6361/10000 train_time:433780ms step_avg:68.19ms +[2025-07-07 19:52:07] [Rank 0] step:6361/10000 train_time:433780ms step_avg:68.19ms +[2025-07-07 19:52:08] [Rank 0] step:6381/10000 train_time:435149ms step_avg:68.19ms +[2025-07-07 19:52:08] [Rank 0] step:6381/10000 train_time:435149ms step_avg:68.19ms +[2025-07-07 19:52:10] [Rank 0] step:6401/10000 train_time:436519ms step_avg:68.20ms +[2025-07-07 19:52:10] [Rank 0] step:6401/10000 train_time:436519ms step_avg:68.20ms +[2025-07-07 19:52:11] [Rank 0] step:6421/10000 train_time:437888ms step_avg:68.20ms +[2025-07-07 19:52:11] [Rank 0] step:6421/10000 train_time:437888ms step_avg:68.20ms +[2025-07-07 19:52:12] [Rank 0] step:6441/10000 train_time:439258ms step_avg:68.20ms +[2025-07-07 19:52:12] [Rank 0] step:6441/10000 train_time:439258ms step_avg:68.20ms +[2025-07-07 19:52:14] [Rank 0] step:6461/10000 train_time:440628ms step_avg:68.20ms +[2025-07-07 19:52:14] [Rank 0] step:6461/10000 train_time:440628ms step_avg:68.20ms +[2025-07-07 19:52:15] [Rank 0] step:6481/10000 train_time:442002ms step_avg:68.20ms +[2025-07-07 19:52:15] [Rank 0] step:6481/10000 train_time:442002ms step_avg:68.20ms +[2025-07-07 19:52:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:52:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:52:17] [Rank 0] PRINT: step:6500/10000 train_loss:1.1793 val_loss:1.1940 train_time:443994ms step_avg:68.31ms +[2025-07-07 19:52:17] [Rank 0] PRINT: step:6500/10000 train_loss:1.1793 val_loss:1.1940 train_time:443994ms step_avg:68.31ms +[2025-07-07 19:52:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:52:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:52:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:52:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:52:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:52:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:57:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:57:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 19:57:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:57:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 19:57:36] [Rank 0] Total Loss: 5.2194 +[2025-07-07 19:57:36] [Rank 0] Total Loss: 5.2194 +[2025-07-07 19:57:36] [Rank 0] Total FTA: 0.3476 +[2025-07-07 19:57:36] [Rank 0] Total FTA: 0.3476 +[2025-07-07 19:57:36] [Rank 0] Group 0 Loss: 5.6416 +[2025-07-07 19:57:36] [Rank 0] Group 0 Loss: 5.6416 +[2025-07-07 19:57:36] [Rank 0] Group 1 Loss: 5.0588 +[2025-07-07 19:57:36] [Rank 0] Group 1 Loss: 5.0588 +[2025-07-07 19:57:36] [Rank 0] Group 2 Loss: 5.0693 +[2025-07-07 19:57:36] [Rank 0] Group 2 Loss: 5.0693 +[2025-07-07 19:57:36] [Rank 0] Group 3 Loss: 5.2492 +[2025-07-07 19:57:36] [Rank 0] Group 3 Loss: 5.2492 +[2025-07-07 19:57:36] [Rank 0] Group 4 Loss: 5.1691 +[2025-07-07 19:57:36] [Rank 0] Group 4 Loss: 5.1691 +[2025-07-07 19:57:36] [Rank 0] Group 5 Loss: 5.1544 +[2025-07-07 19:57:36] [Rank 0] Group 5 Loss: 5.1544 +[2025-07-07 19:57:36] [Rank 0] Group 6 Loss: 5.1292 +[2025-07-07 19:57:36] [Rank 0] Group 6 Loss: 5.1292 +[2025-07-07 19:57:36] [Rank 0] Group 7 Loss: 5.1996 +[2025-07-07 19:57:36] [Rank 0] Group 7 Loss: 5.1996 +[2025-07-07 19:57:36] [Rank 0] Group 8 Loss: 5.1626 +[2025-07-07 19:57:36] [Rank 0] Group 8 Loss: 5.1626 +[2025-07-07 19:57:36] [Rank 0] Group 9 Loss: 5.1996 +[2025-07-07 19:57:36] [Rank 0] Group 9 Loss: 5.1996 +[2025-07-07 19:57:36] [Rank 0] Group 10 Loss: 5.1995 +[2025-07-07 19:57:36] [Rank 0] Group 10 Loss: 5.1995 +[2025-07-07 19:57:36] [Rank 0] Group 11 Loss: 5.1286 +[2025-07-07 19:57:36] [Rank 0] Group 11 Loss: 5.1286 +[2025-07-07 19:57:36] [Rank 0] Group 0 FTA: 0.3342 +[2025-07-07 19:57:36] [Rank 0] Group 0 FTA: 0.3342 +[2025-07-07 19:57:36] [Rank 0] Group 1 FTA: 0.3385 +[2025-07-07 19:57:36] [Rank 0] Group 1 FTA: 0.3385 +[2025-07-07 19:57:36] [Rank 0] Group 2 FTA: 0.4089 +[2025-07-07 19:57:36] [Rank 0] Group 2 FTA: 0.4089 +[2025-07-07 19:57:36] [Rank 0] Group 3 FTA: 0.2656 +[2025-07-07 19:57:36] [Rank 0] Group 3 FTA: 0.2656 +[2025-07-07 19:57:37] [Rank 0] Group 4 FTA: 0.2526 +[2025-07-07 19:57:37] [Rank 0] Group 4 FTA: 0.2526 +[2025-07-07 19:57:37] [Rank 0] Group 5 FTA: 0.3880 +[2025-07-07 19:57:37] [Rank 0] Group 5 FTA: 0.3880 +[2025-07-07 19:57:37] [Rank 0] Group 6 FTA: 0.3203 +[2025-07-07 19:57:37] [Rank 0] Group 6 FTA: 0.3203 +[2025-07-07 19:57:37] [Rank 0] Group 7 FTA: 0.3984 +[2025-07-07 19:57:37] [Rank 0] Group 7 FTA: 0.3984 +[2025-07-07 19:57:37] [Rank 0] Group 8 FTA: 0.3646 +[2025-07-07 19:57:37] [Rank 0] Group 8 FTA: 0.3646 +[2025-07-07 19:57:37] [Rank 0] Group 9 FTA: 0.3828 +[2025-07-07 19:57:37] [Rank 0] Group 9 FTA: 0.3828 +[2025-07-07 19:57:37] [Rank 0] Group 10 FTA: 0.3691 +[2025-07-07 19:57:37] [Rank 0] Group 10 FTA: 0.3691 +[2025-07-07 19:57:37] [Rank 0] Group 11 FTA: 0.3545 +[2025-07-07 19:57:37] [Rank 0] Group 11 FTA: 0.3545 +[2025-07-07 19:57:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 19:57:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 19:57:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 19:57:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 19:57:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 19:57:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 19:57:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 19:57:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 19:57:38] [Rank 0] step:6501/10000 train_time:444004ms step_avg:68.30ms +[2025-07-07 19:57:38] [Rank 0] step:6501/10000 train_time:444004ms step_avg:68.30ms +[2025-07-07 19:57:39] [Rank 0] step:6521/10000 train_time:444764ms step_avg:68.20ms +[2025-07-07 19:57:39] [Rank 0] step:6521/10000 train_time:444764ms step_avg:68.20ms +[2025-07-07 19:57:41] [Rank 0] step:6541/10000 train_time:446126ms step_avg:68.20ms +[2025-07-07 19:57:41] [Rank 0] step:6541/10000 train_time:446126ms step_avg:68.20ms +[2025-07-07 19:57:42] [Rank 0] step:6561/10000 train_time:447489ms step_avg:68.20ms +[2025-07-07 19:57:42] [Rank 0] step:6561/10000 train_time:447489ms step_avg:68.20ms +[2025-07-07 19:57:43] [Rank 0] step:6581/10000 train_time:448855ms step_avg:68.20ms +[2025-07-07 19:57:43] [Rank 0] step:6581/10000 train_time:448855ms step_avg:68.20ms +[2025-07-07 19:57:45] [Rank 0] step:6601/10000 train_time:450219ms step_avg:68.20ms +[2025-07-07 19:57:45] [Rank 0] step:6601/10000 train_time:450219ms step_avg:68.20ms +[2025-07-07 19:57:46] [Rank 0] step:6621/10000 train_time:451585ms step_avg:68.21ms +[2025-07-07 19:57:46] [Rank 0] step:6621/10000 train_time:451585ms step_avg:68.21ms +[2025-07-07 19:57:48] [Rank 0] step:6641/10000 train_time:452951ms step_avg:68.21ms +[2025-07-07 19:57:48] [Rank 0] step:6641/10000 train_time:452951ms step_avg:68.21ms +[2025-07-07 19:57:49] [Rank 0] step:6661/10000 train_time:454984ms step_avg:68.31ms +[2025-07-07 19:57:49] [Rank 0] step:6661/10000 train_time:454984ms step_avg:68.31ms +[2025-07-07 19:57:50] [Rank 0] step:6681/10000 train_time:455719ms step_avg:68.21ms +[2025-07-07 19:57:50] [Rank 0] step:6681/10000 train_time:455719ms step_avg:68.21ms +[2025-07-07 19:57:52] [Rank 0] step:6701/10000 train_time:457097ms step_avg:68.21ms +[2025-07-07 19:57:52] [Rank 0] step:6701/10000 train_time:457097ms step_avg:68.21ms +[2025-07-07 19:57:53] [Rank 0] step:6721/10000 train_time:458465ms step_avg:68.21ms +[2025-07-07 19:57:53] [Rank 0] step:6721/10000 train_time:458465ms step_avg:68.21ms +[2025-07-07 19:57:54] [Rank 0] step:6741/10000 train_time:459833ms step_avg:68.21ms +[2025-07-07 19:57:54] [Rank 0] step:6741/10000 train_time:459833ms step_avg:68.21ms +[2025-07-07 19:57:56] [Rank 0] step:6761/10000 train_time:461201ms step_avg:68.21ms +[2025-07-07 19:57:56] [Rank 0] step:6761/10000 train_time:461201ms step_avg:68.21ms +[2025-07-07 19:57:57] [Rank 0] step:6781/10000 train_time:462570ms step_avg:68.22ms +[2025-07-07 19:57:57] [Rank 0] step:6781/10000 train_time:462570ms step_avg:68.22ms +[2025-07-07 19:57:59] [Rank 0] step:6801/10000 train_time:463940ms step_avg:68.22ms +[2025-07-07 19:57:59] [Rank 0] step:6801/10000 train_time:463940ms step_avg:68.22ms +[2025-07-07 19:58:00] [Rank 0] step:6821/10000 train_time:465309ms step_avg:68.22ms +[2025-07-07 19:58:00] [Rank 0] step:6821/10000 train_time:465309ms step_avg:68.22ms +[2025-07-07 19:58:01] [Rank 0] step:6841/10000 train_time:466680ms step_avg:68.22ms +[2025-07-07 19:58:01] [Rank 0] step:6841/10000 train_time:466680ms step_avg:68.22ms +[2025-07-07 19:58:03] [Rank 0] step:6861/10000 train_time:468076ms step_avg:68.22ms +[2025-07-07 19:58:03] [Rank 0] step:6861/10000 train_time:468076ms step_avg:68.22ms +[2025-07-07 19:58:04] [Rank 0] step:6881/10000 train_time:469446ms step_avg:68.22ms +[2025-07-07 19:58:04] [Rank 0] step:6881/10000 train_time:469446ms step_avg:68.22ms +[2025-07-07 19:58:05] [Rank 0] step:6901/10000 train_time:470815ms step_avg:68.22ms +[2025-07-07 19:58:05] [Rank 0] step:6901/10000 train_time:470815ms step_avg:68.22ms +[2025-07-07 19:58:07] [Rank 0] step:6921/10000 train_time:472184ms step_avg:68.22ms +[2025-07-07 19:58:07] [Rank 0] step:6921/10000 train_time:472184ms step_avg:68.22ms +[2025-07-07 19:58:08] [Rank 0] step:6941/10000 train_time:473554ms step_avg:68.23ms +[2025-07-07 19:58:08] [Rank 0] step:6941/10000 train_time:473554ms step_avg:68.23ms +[2025-07-07 19:58:10] [Rank 0] step:6961/10000 train_time:474923ms step_avg:68.23ms +[2025-07-07 19:58:10] [Rank 0] step:6961/10000 train_time:474923ms step_avg:68.23ms +[2025-07-07 19:58:11] [Rank 0] step:6981/10000 train_time:476293ms step_avg:68.23ms +[2025-07-07 19:58:11] [Rank 0] step:6981/10000 train_time:476293ms step_avg:68.23ms +[2025-07-07 19:58:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:58:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 19:58:13] [Rank 0] PRINT: step:7000/10000 train_loss:1.1539 val_loss:1.1725 train_time:478284ms step_avg:68.33ms +[2025-07-07 19:58:13] [Rank 0] PRINT: step:7000/10000 train_loss:1.1539 val_loss:1.1725 train_time:478284ms step_avg:68.33ms +[2025-07-07 19:58:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:58:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 19:58:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:58:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 19:58:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 19:58:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:03:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:03:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:03:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:03:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:03:32] [Rank 0] Total Loss: 5.4484 +[2025-07-07 20:03:32] [Rank 0] Total Loss: 5.4484 +[2025-07-07 20:03:32] [Rank 0] Total FTA: 0.4023 +[2025-07-07 20:03:32] [Rank 0] Total FTA: 0.4023 +[2025-07-07 20:03:32] [Rank 0] Group 0 Loss: 5.7624 +[2025-07-07 20:03:32] [Rank 0] Group 0 Loss: 5.7624 +[2025-07-07 20:03:32] [Rank 0] Group 1 Loss: 5.3472 +[2025-07-07 20:03:32] [Rank 0] Group 1 Loss: 5.3472 +[2025-07-07 20:03:32] [Rank 0] Group 2 Loss: 5.3109 +[2025-07-07 20:03:32] [Rank 0] Group 2 Loss: 5.3109 +[2025-07-07 20:03:32] [Rank 0] Group 3 Loss: 5.4631 +[2025-07-07 20:03:32] [Rank 0] Group 3 Loss: 5.4631 +[2025-07-07 20:03:32] [Rank 0] Group 4 Loss: 5.3289 +[2025-07-07 20:03:32] [Rank 0] Group 4 Loss: 5.3289 +[2025-07-07 20:03:32] [Rank 0] Group 5 Loss: 5.4793 +[2025-07-07 20:03:32] [Rank 0] Group 5 Loss: 5.4793 +[2025-07-07 20:03:33] [Rank 0] Group 6 Loss: 5.3302 +[2025-07-07 20:03:33] [Rank 0] Group 6 Loss: 5.3302 +[2025-07-07 20:03:33] [Rank 0] Group 7 Loss: 5.4141 +[2025-07-07 20:03:33] [Rank 0] Group 7 Loss: 5.4141 +[2025-07-07 20:03:33] [Rank 0] Group 8 Loss: 5.3875 +[2025-07-07 20:03:33] [Rank 0] Group 8 Loss: 5.3875 +[2025-07-07 20:03:33] [Rank 0] Group 9 Loss: 5.3924 +[2025-07-07 20:03:33] [Rank 0] Group 9 Loss: 5.3924 +[2025-07-07 20:03:33] [Rank 0] Group 10 Loss: 5.4069 +[2025-07-07 20:03:33] [Rank 0] Group 10 Loss: 5.4069 +[2025-07-07 20:03:33] [Rank 0] Group 11 Loss: 5.4444 +[2025-07-07 20:03:33] [Rank 0] Group 11 Loss: 5.4444 +[2025-07-07 20:03:33] [Rank 0] Group 0 FTA: 0.4967 +[2025-07-07 20:03:33] [Rank 0] Group 0 FTA: 0.4967 +[2025-07-07 20:03:33] [Rank 0] Group 1 FTA: 0.5000 +[2025-07-07 20:03:33] [Rank 0] Group 1 FTA: 0.5000 +[2025-07-07 20:03:33] [Rank 0] Group 2 FTA: 0.4922 +[2025-07-07 20:03:33] [Rank 0] Group 2 FTA: 0.4922 +[2025-07-07 20:03:33] [Rank 0] Group 3 FTA: 0.3151 +[2025-07-07 20:03:33] [Rank 0] Group 3 FTA: 0.3151 +[2025-07-07 20:03:33] [Rank 0] Group 4 FTA: 0.3203 +[2025-07-07 20:03:33] [Rank 0] Group 4 FTA: 0.3203 +[2025-07-07 20:03:33] [Rank 0] Group 5 FTA: 0.4036 +[2025-07-07 20:03:33] [Rank 0] Group 5 FTA: 0.4036 +[2025-07-07 20:03:33] [Rank 0] Group 6 FTA: 0.3724 +[2025-07-07 20:03:33] [Rank 0] Group 6 FTA: 0.3724 +[2025-07-07 20:03:33] [Rank 0] Group 7 FTA: 0.3828 +[2025-07-07 20:03:33] [Rank 0] Group 7 FTA: 0.3828 +[2025-07-07 20:03:33] [Rank 0] Group 8 FTA: 0.3880 +[2025-07-07 20:03:33] [Rank 0] Group 8 FTA: 0.3880 +[2025-07-07 20:03:33] [Rank 0] Group 9 FTA: 0.3711 +[2025-07-07 20:03:33] [Rank 0] Group 9 FTA: 0.3711 +[2025-07-07 20:03:33] [Rank 0] Group 10 FTA: 0.3340 +[2025-07-07 20:03:33] [Rank 0] Group 10 FTA: 0.3340 +[2025-07-07 20:03:33] [Rank 0] Group 11 FTA: 0.3896 +[2025-07-07 20:03:33] [Rank 0] Group 11 FTA: 0.3896 +[2025-07-07 20:03:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 20:03:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 20:03:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 20:03:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 20:03:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 20:03:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 20:03:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 20:03:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 20:03:34] [Rank 0] step:7001/10000 train_time:478295ms step_avg:68.32ms +[2025-07-07 20:03:34] [Rank 0] step:7001/10000 train_time:478295ms step_avg:68.32ms +[2025-07-07 20:03:36] [Rank 0] step:7021/10000 train_time:479306ms step_avg:68.27ms +[2025-07-07 20:03:36] [Rank 0] step:7021/10000 train_time:479306ms step_avg:68.27ms +[2025-07-07 20:03:37] [Rank 0] step:7041/10000 train_time:480456ms step_avg:68.24ms +[2025-07-07 20:03:37] [Rank 0] step:7041/10000 train_time:480456ms step_avg:68.24ms +[2025-07-07 20:03:38] [Rank 0] step:7061/10000 train_time:481821ms step_avg:68.24ms +[2025-07-07 20:03:38] [Rank 0] step:7061/10000 train_time:481821ms step_avg:68.24ms +[2025-07-07 20:03:40] [Rank 0] step:7081/10000 train_time:483187ms step_avg:68.24ms +[2025-07-07 20:03:40] [Rank 0] step:7081/10000 train_time:483187ms step_avg:68.24ms +[2025-07-07 20:03:41] [Rank 0] step:7101/10000 train_time:484553ms step_avg:68.24ms +[2025-07-07 20:03:41] [Rank 0] step:7101/10000 train_time:484553ms step_avg:68.24ms +[2025-07-07 20:03:42] [Rank 0] step:7121/10000 train_time:485920ms step_avg:68.24ms +[2025-07-07 20:03:42] [Rank 0] step:7121/10000 train_time:485920ms step_avg:68.24ms +[2025-07-07 20:03:44] [Rank 0] step:7141/10000 train_time:487289ms step_avg:68.24ms +[2025-07-07 20:03:44] [Rank 0] step:7141/10000 train_time:487289ms step_avg:68.24ms +[2025-07-07 20:03:45] [Rank 0] step:7161/10000 train_time:488656ms step_avg:68.24ms +[2025-07-07 20:03:45] [Rank 0] step:7161/10000 train_time:488656ms step_avg:68.24ms +[2025-07-07 20:03:46] [Rank 0] step:7181/10000 train_time:490025ms step_avg:68.24ms +[2025-07-07 20:03:46] [Rank 0] step:7181/10000 train_time:490025ms step_avg:68.24ms +[2025-07-07 20:03:48] [Rank 0] step:7201/10000 train_time:491393ms step_avg:68.24ms +[2025-07-07 20:03:48] [Rank 0] step:7201/10000 train_time:491393ms step_avg:68.24ms +[2025-07-07 20:03:49] [Rank 0] step:7221/10000 train_time:492809ms step_avg:68.25ms +[2025-07-07 20:03:49] [Rank 0] step:7221/10000 train_time:492809ms step_avg:68.25ms +[2025-07-07 20:03:51] [Rank 0] step:7241/10000 train_time:494177ms step_avg:68.25ms +[2025-07-07 20:03:51] [Rank 0] step:7241/10000 train_time:494177ms step_avg:68.25ms +[2025-07-07 20:03:52] [Rank 0] step:7261/10000 train_time:495548ms step_avg:68.25ms +[2025-07-07 20:03:52] [Rank 0] step:7261/10000 train_time:495548ms step_avg:68.25ms +[2025-07-07 20:03:53] [Rank 0] step:7281/10000 train_time:496917ms step_avg:68.25ms +[2025-07-07 20:03:53] [Rank 0] step:7281/10000 train_time:496917ms step_avg:68.25ms +[2025-07-07 20:03:55] [Rank 0] step:7301/10000 train_time:498285ms step_avg:68.25ms +[2025-07-07 20:03:55] [Rank 0] step:7301/10000 train_time:498285ms step_avg:68.25ms +[2025-07-07 20:03:56] [Rank 0] step:7321/10000 train_time:499653ms step_avg:68.25ms +[2025-07-07 20:03:56] [Rank 0] step:7321/10000 train_time:499653ms step_avg:68.25ms +[2025-07-07 20:03:57] [Rank 0] step:7341/10000 train_time:501024ms step_avg:68.25ms +[2025-07-07 20:03:57] [Rank 0] step:7341/10000 train_time:501024ms step_avg:68.25ms +[2025-07-07 20:03:59] [Rank 0] step:7361/10000 train_time:502395ms step_avg:68.25ms +[2025-07-07 20:03:59] [Rank 0] step:7361/10000 train_time:502395ms step_avg:68.25ms +[2025-07-07 20:04:00] [Rank 0] step:7381/10000 train_time:503764ms step_avg:68.25ms +[2025-07-07 20:04:00] [Rank 0] step:7381/10000 train_time:503764ms step_avg:68.25ms +[2025-07-07 20:04:02] [Rank 0] step:7401/10000 train_time:505163ms step_avg:68.26ms +[2025-07-07 20:04:02] [Rank 0] step:7401/10000 train_time:505163ms step_avg:68.26ms +[2025-07-07 20:04:03] [Rank 0] step:7421/10000 train_time:506533ms step_avg:68.26ms +[2025-07-07 20:04:03] [Rank 0] step:7421/10000 train_time:506533ms step_avg:68.26ms +[2025-07-07 20:04:04] [Rank 0] step:7441/10000 train_time:507904ms step_avg:68.26ms +[2025-07-07 20:04:04] [Rank 0] step:7441/10000 train_time:507904ms step_avg:68.26ms +[2025-07-07 20:04:06] [Rank 0] step:7461/10000 train_time:509276ms step_avg:68.26ms +[2025-07-07 20:04:06] [Rank 0] step:7461/10000 train_time:509276ms step_avg:68.26ms +[2025-07-07 20:04:07] [Rank 0] step:7481/10000 train_time:510647ms step_avg:68.26ms +[2025-07-07 20:04:07] [Rank 0] step:7481/10000 train_time:510647ms step_avg:68.26ms +[2025-07-07 20:04:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:04:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:04:09] [Rank 0] PRINT: step:7500/10000 train_loss:1.1347 val_loss:1.1614 train_time:512644ms step_avg:68.35ms +[2025-07-07 20:04:09] [Rank 0] PRINT: step:7500/10000 train_loss:1.1347 val_loss:1.1614 train_time:512644ms step_avg:68.35ms +[2025-07-07 20:04:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:04:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:04:10] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:04:10] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:04:10] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:04:10] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:09:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:09:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:09:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:09:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:09:31] [Rank 0] Total Loss: 5.3443 +[2025-07-07 20:09:31] [Rank 0] Total Loss: 5.3443 +[2025-07-07 20:09:31] [Rank 0] Total FTA: 0.3978 +[2025-07-07 20:09:31] [Rank 0] Total FTA: 0.3978 +[2025-07-07 20:09:31] [Rank 0] Group 0 Loss: 5.7791 +[2025-07-07 20:09:31] [Rank 0] Group 0 Loss: 5.7791 +[2025-07-07 20:09:31] [Rank 0] Group 1 Loss: 5.0404 +[2025-07-07 20:09:31] [Rank 0] Group 1 Loss: 5.0404 +[2025-07-07 20:09:31] [Rank 0] Group 2 Loss: 5.1880 +[2025-07-07 20:09:31] [Rank 0] Group 2 Loss: 5.1880 +[2025-07-07 20:09:31] [Rank 0] Group 3 Loss: 5.2927 +[2025-07-07 20:09:31] [Rank 0] Group 3 Loss: 5.2927 +[2025-07-07 20:09:31] [Rank 0] Group 4 Loss: 5.3379 +[2025-07-07 20:09:31] [Rank 0] Group 4 Loss: 5.3379 +[2025-07-07 20:09:31] [Rank 0] Group 5 Loss: 5.2993 +[2025-07-07 20:09:31] [Rank 0] Group 5 Loss: 5.2993 +[2025-07-07 20:09:31] [Rank 0] Group 6 Loss: 5.1887 +[2025-07-07 20:09:31] [Rank 0] Group 6 Loss: 5.1887 +[2025-07-07 20:09:31] [Rank 0] Group 7 Loss: 5.3320 +[2025-07-07 20:09:31] [Rank 0] Group 7 Loss: 5.3320 +[2025-07-07 20:09:31] [Rank 0] Group 8 Loss: 5.2895 +[2025-07-07 20:09:31] [Rank 0] Group 8 Loss: 5.2895 +[2025-07-07 20:09:31] [Rank 0] Group 9 Loss: 5.3021 +[2025-07-07 20:09:31] [Rank 0] Group 9 Loss: 5.3021 +[2025-07-07 20:09:31] [Rank 0] Group 10 Loss: 5.3284 +[2025-07-07 20:09:31] [Rank 0] Group 10 Loss: 5.3284 +[2025-07-07 20:09:31] [Rank 0] Group 11 Loss: 5.3311 +[2025-07-07 20:09:31] [Rank 0] Group 11 Loss: 5.3311 +[2025-07-07 20:09:31] [Rank 0] Group 0 FTA: 0.3329 +[2025-07-07 20:09:31] [Rank 0] Group 0 FTA: 0.3329 +[2025-07-07 20:09:31] [Rank 0] Group 1 FTA: 0.5260 +[2025-07-07 20:09:31] [Rank 0] Group 1 FTA: 0.5260 +[2025-07-07 20:09:31] [Rank 0] Group 2 FTA: 0.5208 +[2025-07-07 20:09:31] [Rank 0] Group 2 FTA: 0.5208 +[2025-07-07 20:09:31] [Rank 0] Group 3 FTA: 0.3047 +[2025-07-07 20:09:31] [Rank 0] Group 3 FTA: 0.3047 +[2025-07-07 20:09:31] [Rank 0] Group 4 FTA: 0.3828 +[2025-07-07 20:09:31] [Rank 0] Group 4 FTA: 0.3828 +[2025-07-07 20:09:31] [Rank 0] Group 5 FTA: 0.4740 +[2025-07-07 20:09:31] [Rank 0] Group 5 FTA: 0.4740 +[2025-07-07 20:09:31] [Rank 0] Group 6 FTA: 0.3542 +[2025-07-07 20:09:31] [Rank 0] Group 6 FTA: 0.3542 +[2025-07-07 20:09:31] [Rank 0] Group 7 FTA: 0.3932 +[2025-07-07 20:09:31] [Rank 0] Group 7 FTA: 0.3932 +[2025-07-07 20:09:31] [Rank 0] Group 8 FTA: 0.3620 +[2025-07-07 20:09:31] [Rank 0] Group 8 FTA: 0.3620 +[2025-07-07 20:09:31] [Rank 0] Group 9 FTA: 0.3672 +[2025-07-07 20:09:31] [Rank 0] Group 9 FTA: 0.3672 +[2025-07-07 20:09:31] [Rank 0] Group 10 FTA: 0.4043 +[2025-07-07 20:09:31] [Rank 0] Group 10 FTA: 0.4043 +[2025-07-07 20:09:31] [Rank 0] Group 11 FTA: 0.4004 +[2025-07-07 20:09:31] [Rank 0] Group 11 FTA: 0.4004 +[2025-07-07 20:09:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 20:09:31] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 20:09:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 20:09:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 20:09:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 20:09:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 20:09:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 20:09:32] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 20:09:32] [Rank 0] step:7501/10000 train_time:512655ms step_avg:68.34ms +[2025-07-07 20:09:32] [Rank 0] step:7501/10000 train_time:512655ms step_avg:68.34ms +[2025-07-07 20:09:34] [Rank 0] step:7521/10000 train_time:513422ms step_avg:68.27ms +[2025-07-07 20:09:34] [Rank 0] step:7521/10000 train_time:513422ms step_avg:68.27ms +[2025-07-07 20:09:35] [Rank 0] step:7541/10000 train_time:514785ms step_avg:68.26ms +[2025-07-07 20:09:35] [Rank 0] step:7541/10000 train_time:514785ms step_avg:68.26ms +[2025-07-07 20:09:37] [Rank 0] step:7561/10000 train_time:516151ms step_avg:68.26ms +[2025-07-07 20:09:37] [Rank 0] step:7561/10000 train_time:516151ms step_avg:68.26ms +[2025-07-07 20:09:38] [Rank 0] step:7581/10000 train_time:517552ms step_avg:68.27ms +[2025-07-07 20:09:38] [Rank 0] step:7581/10000 train_time:517552ms step_avg:68.27ms +[2025-07-07 20:09:39] [Rank 0] step:7601/10000 train_time:518918ms step_avg:68.27ms +[2025-07-07 20:09:39] [Rank 0] step:7601/10000 train_time:518918ms step_avg:68.27ms +[2025-07-07 20:09:41] [Rank 0] step:7621/10000 train_time:520285ms step_avg:68.27ms +[2025-07-07 20:09:41] [Rank 0] step:7621/10000 train_time:520285ms step_avg:68.27ms +[2025-07-07 20:09:42] [Rank 0] step:7641/10000 train_time:521651ms step_avg:68.27ms +[2025-07-07 20:09:42] [Rank 0] step:7641/10000 train_time:521651ms step_avg:68.27ms +[2025-07-07 20:09:43] [Rank 0] step:7661/10000 train_time:523019ms step_avg:68.27ms +[2025-07-07 20:09:43] [Rank 0] step:7661/10000 train_time:523019ms step_avg:68.27ms +[2025-07-07 20:09:45] [Rank 0] step:7681/10000 train_time:524386ms step_avg:68.27ms +[2025-07-07 20:09:45] [Rank 0] step:7681/10000 train_time:524386ms step_avg:68.27ms +[2025-07-07 20:09:46] [Rank 0] step:7701/10000 train_time:525751ms step_avg:68.27ms +[2025-07-07 20:09:46] [Rank 0] step:7701/10000 train_time:525751ms step_avg:68.27ms +[2025-07-07 20:09:47] [Rank 0] step:7721/10000 train_time:527121ms step_avg:68.27ms +[2025-07-07 20:09:47] [Rank 0] step:7721/10000 train_time:527121ms step_avg:68.27ms +[2025-07-07 20:09:49] [Rank 0] step:7741/10000 train_time:528490ms step_avg:68.27ms +[2025-07-07 20:09:49] [Rank 0] step:7741/10000 train_time:528490ms step_avg:68.27ms +[2025-07-07 20:09:50] [Rank 0] step:7761/10000 train_time:529903ms step_avg:68.28ms +[2025-07-07 20:09:50] [Rank 0] step:7761/10000 train_time:529903ms step_avg:68.28ms +[2025-07-07 20:09:52] [Rank 0] step:7781/10000 train_time:531272ms step_avg:68.28ms +[2025-07-07 20:09:52] [Rank 0] step:7781/10000 train_time:531272ms step_avg:68.28ms +[2025-07-07 20:09:53] [Rank 0] step:7801/10000 train_time:532642ms step_avg:68.28ms +[2025-07-07 20:09:53] [Rank 0] step:7801/10000 train_time:532642ms step_avg:68.28ms +[2025-07-07 20:09:54] [Rank 0] step:7821/10000 train_time:534010ms step_avg:68.28ms +[2025-07-07 20:09:54] [Rank 0] step:7821/10000 train_time:534010ms step_avg:68.28ms +[2025-07-07 20:09:56] [Rank 0] step:7841/10000 train_time:535381ms step_avg:68.28ms +[2025-07-07 20:09:56] [Rank 0] step:7841/10000 train_time:535381ms step_avg:68.28ms +[2025-07-07 20:09:57] [Rank 0] step:7861/10000 train_time:536751ms step_avg:68.28ms +[2025-07-07 20:09:57] [Rank 0] step:7861/10000 train_time:536751ms step_avg:68.28ms +[2025-07-07 20:09:58] [Rank 0] step:7881/10000 train_time:538120ms step_avg:68.28ms +[2025-07-07 20:09:58] [Rank 0] step:7881/10000 train_time:538120ms step_avg:68.28ms +[2025-07-07 20:10:00] [Rank 0] step:7901/10000 train_time:539488ms step_avg:68.28ms +[2025-07-07 20:10:00] [Rank 0] step:7901/10000 train_time:539488ms step_avg:68.28ms +[2025-07-07 20:10:01] [Rank 0] step:7921/10000 train_time:541109ms step_avg:68.31ms +[2025-07-07 20:10:01] [Rank 0] step:7921/10000 train_time:541109ms step_avg:68.31ms +[2025-07-07 20:10:03] [Rank 0] step:7941/10000 train_time:542259ms step_avg:68.29ms +[2025-07-07 20:10:03] [Rank 0] step:7941/10000 train_time:542259ms step_avg:68.29ms +[2025-07-07 20:10:04] [Rank 0] step:7961/10000 train_time:543631ms step_avg:68.29ms +[2025-07-07 20:10:04] [Rank 0] step:7961/10000 train_time:543631ms step_avg:68.29ms +[2025-07-07 20:10:05] [Rank 0] step:7981/10000 train_time:545001ms step_avg:68.29ms +[2025-07-07 20:10:05] [Rank 0] step:7981/10000 train_time:545001ms step_avg:68.29ms +[2025-07-07 20:10:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:10:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:10:08] [Rank 0] PRINT: step:8000/10000 train_loss:1.1173 val_loss:1.1436 train_time:546993ms step_avg:68.37ms +[2025-07-07 20:10:08] [Rank 0] PRINT: step:8000/10000 train_loss:1.1173 val_loss:1.1436 train_time:546993ms step_avg:68.37ms +[2025-07-07 20:10:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:10:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:10:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:10:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:10:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:10:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:15:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:15:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:15:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:15:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:15:27] [Rank 0] Total Loss: 5.4906 +[2025-07-07 20:15:27] [Rank 0] Total Loss: 5.4906 +[2025-07-07 20:15:27] [Rank 0] Total FTA: 0.4545 +[2025-07-07 20:15:27] [Rank 0] Total FTA: 0.4545 +[2025-07-07 20:15:27] [Rank 0] Group 0 Loss: 5.8289 +[2025-07-07 20:15:27] [Rank 0] Group 0 Loss: 5.8289 +[2025-07-07 20:15:27] [Rank 0] Group 1 Loss: 5.3248 +[2025-07-07 20:15:27] [Rank 0] Group 1 Loss: 5.3248 +[2025-07-07 20:15:27] [Rank 0] Group 2 Loss: 5.3724 +[2025-07-07 20:15:27] [Rank 0] Group 2 Loss: 5.3724 +[2025-07-07 20:15:27] [Rank 0] Group 3 Loss: 5.5696 +[2025-07-07 20:15:27] [Rank 0] Group 3 Loss: 5.5696 +[2025-07-07 20:15:27] [Rank 0] Group 4 Loss: 5.4176 +[2025-07-07 20:15:27] [Rank 0] Group 4 Loss: 5.4176 +[2025-07-07 20:15:27] [Rank 0] Group 5 Loss: 5.4669 +[2025-07-07 20:15:27] [Rank 0] Group 5 Loss: 5.4669 +[2025-07-07 20:15:27] [Rank 0] Group 6 Loss: 5.3318 +[2025-07-07 20:15:27] [Rank 0] Group 6 Loss: 5.3318 +[2025-07-07 20:15:27] [Rank 0] Group 7 Loss: 5.4587 +[2025-07-07 20:15:27] [Rank 0] Group 7 Loss: 5.4587 +[2025-07-07 20:15:27] [Rank 0] Group 8 Loss: 5.4781 +[2025-07-07 20:15:27] [Rank 0] Group 8 Loss: 5.4781 +[2025-07-07 20:15:27] [Rank 0] Group 9 Loss: 5.5042 +[2025-07-07 20:15:27] [Rank 0] Group 9 Loss: 5.5042 +[2025-07-07 20:15:27] [Rank 0] Group 10 Loss: 5.4343 +[2025-07-07 20:15:27] [Rank 0] Group 10 Loss: 5.4343 +[2025-07-07 20:15:27] [Rank 0] Group 11 Loss: 5.4505 +[2025-07-07 20:15:27] [Rank 0] Group 11 Loss: 5.4505 +[2025-07-07 20:15:27] [Rank 0] Group 0 FTA: 0.6749 +[2025-07-07 20:15:27] [Rank 0] Group 0 FTA: 0.6749 +[2025-07-07 20:15:27] [Rank 0] Group 1 FTA: 0.5130 +[2025-07-07 20:15:27] [Rank 0] Group 1 FTA: 0.5130 +[2025-07-07 20:15:27] [Rank 0] Group 2 FTA: 0.4167 +[2025-07-07 20:15:27] [Rank 0] Group 2 FTA: 0.4167 +[2025-07-07 20:15:27] [Rank 0] Group 3 FTA: 0.3646 +[2025-07-07 20:15:27] [Rank 0] Group 3 FTA: 0.3646 +[2025-07-07 20:15:27] [Rank 0] Group 4 FTA: 0.3724 +[2025-07-07 20:15:27] [Rank 0] Group 4 FTA: 0.3724 +[2025-07-07 20:15:27] [Rank 0] Group 5 FTA: 0.4740 +[2025-07-07 20:15:27] [Rank 0] Group 5 FTA: 0.4740 +[2025-07-07 20:15:27] [Rank 0] Group 6 FTA: 0.3698 +[2025-07-07 20:15:27] [Rank 0] Group 6 FTA: 0.3698 +[2025-07-07 20:15:27] [Rank 0] Group 7 FTA: 0.4453 +[2025-07-07 20:15:27] [Rank 0] Group 7 FTA: 0.4453 +[2025-07-07 20:15:27] [Rank 0] Group 8 FTA: 0.4323 +[2025-07-07 20:15:27] [Rank 0] Group 8 FTA: 0.4323 +[2025-07-07 20:15:27] [Rank 0] Group 9 FTA: 0.4375 +[2025-07-07 20:15:27] [Rank 0] Group 9 FTA: 0.4375 +[2025-07-07 20:15:27] [Rank 0] Group 10 FTA: 0.4141 +[2025-07-07 20:15:27] [Rank 0] Group 10 FTA: 0.4141 +[2025-07-07 20:15:27] [Rank 0] Group 11 FTA: 0.4062 +[2025-07-07 20:15:27] [Rank 0] Group 11 FTA: 0.4062 +[2025-07-07 20:15:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 20:15:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 20:15:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 20:15:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 20:15:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 20:15:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 20:15:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 20:15:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 20:15:29] [Rank 0] step:8001/10000 train_time:547004ms step_avg:68.37ms +[2025-07-07 20:15:29] [Rank 0] step:8001/10000 train_time:547004ms step_avg:68.37ms +[2025-07-07 20:15:30] [Rank 0] step:8021/10000 train_time:547770ms step_avg:68.29ms +[2025-07-07 20:15:30] [Rank 0] step:8021/10000 train_time:547770ms step_avg:68.29ms +[2025-07-07 20:15:31] [Rank 0] step:8041/10000 train_time:549134ms step_avg:68.29ms +[2025-07-07 20:15:31] [Rank 0] step:8041/10000 train_time:549134ms step_avg:68.29ms +[2025-07-07 20:15:33] [Rank 0] step:8061/10000 train_time:550501ms step_avg:68.29ms +[2025-07-07 20:15:33] [Rank 0] step:8061/10000 train_time:550501ms step_avg:68.29ms +[2025-07-07 20:15:34] [Rank 0] step:8081/10000 train_time:551865ms step_avg:68.29ms +[2025-07-07 20:15:34] [Rank 0] step:8081/10000 train_time:551865ms step_avg:68.29ms +[2025-07-07 20:15:35] [Rank 0] step:8101/10000 train_time:553483ms step_avg:68.32ms +[2025-07-07 20:15:35] [Rank 0] step:8101/10000 train_time:553483ms step_avg:68.32ms +[2025-07-07 20:15:37] [Rank 0] step:8121/10000 train_time:554649ms step_avg:68.30ms +[2025-07-07 20:15:37] [Rank 0] step:8121/10000 train_time:554649ms step_avg:68.30ms +[2025-07-07 20:15:38] [Rank 0] step:8141/10000 train_time:556015ms step_avg:68.30ms +[2025-07-07 20:15:38] [Rank 0] step:8141/10000 train_time:556015ms step_avg:68.30ms +[2025-07-07 20:15:40] [Rank 0] step:8161/10000 train_time:557382ms step_avg:68.30ms +[2025-07-07 20:15:40] [Rank 0] step:8161/10000 train_time:557382ms step_avg:68.30ms +[2025-07-07 20:15:41] [Rank 0] step:8181/10000 train_time:558750ms step_avg:68.30ms +[2025-07-07 20:15:41] [Rank 0] step:8181/10000 train_time:558750ms step_avg:68.30ms +[2025-07-07 20:15:42] [Rank 0] step:8201/10000 train_time:560119ms step_avg:68.30ms +[2025-07-07 20:15:42] [Rank 0] step:8201/10000 train_time:560119ms step_avg:68.30ms +[2025-07-07 20:15:44] [Rank 0] step:8221/10000 train_time:561489ms step_avg:68.30ms +[2025-07-07 20:15:44] [Rank 0] step:8221/10000 train_time:561489ms step_avg:68.30ms +[2025-07-07 20:15:45] [Rank 0] step:8241/10000 train_time:562857ms step_avg:68.30ms +[2025-07-07 20:15:45] [Rank 0] step:8241/10000 train_time:562857ms step_avg:68.30ms +[2025-07-07 20:15:46] [Rank 0] step:8261/10000 train_time:564227ms step_avg:68.30ms +[2025-07-07 20:15:46] [Rank 0] step:8261/10000 train_time:564227ms step_avg:68.30ms +[2025-07-07 20:15:48] [Rank 0] step:8281/10000 train_time:565846ms step_avg:68.33ms +[2025-07-07 20:15:48] [Rank 0] step:8281/10000 train_time:565846ms step_avg:68.33ms +[2025-07-07 20:15:49] [Rank 0] step:8301/10000 train_time:567002ms step_avg:68.31ms +[2025-07-07 20:15:49] [Rank 0] step:8301/10000 train_time:567002ms step_avg:68.31ms +[2025-07-07 20:15:51] [Rank 0] step:8321/10000 train_time:568371ms step_avg:68.31ms +[2025-07-07 20:15:51] [Rank 0] step:8321/10000 train_time:568371ms step_avg:68.31ms +[2025-07-07 20:15:52] [Rank 0] step:8341/10000 train_time:569739ms step_avg:68.31ms +[2025-07-07 20:15:52] [Rank 0] step:8341/10000 train_time:569739ms step_avg:68.31ms +[2025-07-07 20:15:53] [Rank 0] step:8361/10000 train_time:571108ms step_avg:68.31ms +[2025-07-07 20:15:53] [Rank 0] step:8361/10000 train_time:571108ms step_avg:68.31ms +[2025-07-07 20:15:55] [Rank 0] step:8381/10000 train_time:572478ms step_avg:68.31ms +[2025-07-07 20:15:55] [Rank 0] step:8381/10000 train_time:572478ms step_avg:68.31ms +[2025-07-07 20:15:56] [Rank 0] step:8401/10000 train_time:573848ms step_avg:68.31ms +[2025-07-07 20:15:56] [Rank 0] step:8401/10000 train_time:573848ms step_avg:68.31ms +[2025-07-07 20:15:57] [Rank 0] step:8421/10000 train_time:575219ms step_avg:68.31ms +[2025-07-07 20:15:57] [Rank 0] step:8421/10000 train_time:575219ms step_avg:68.31ms +[2025-07-07 20:15:59] [Rank 0] step:8441/10000 train_time:576589ms step_avg:68.31ms +[2025-07-07 20:15:59] [Rank 0] step:8441/10000 train_time:576589ms step_avg:68.31ms +[2025-07-07 20:16:00] [Rank 0] step:8461/10000 train_time:577960ms step_avg:68.31ms +[2025-07-07 20:16:00] [Rank 0] step:8461/10000 train_time:577960ms step_avg:68.31ms +[2025-07-07 20:16:02] [Rank 0] step:8481/10000 train_time:579354ms step_avg:68.31ms +[2025-07-07 20:16:02] [Rank 0] step:8481/10000 train_time:579354ms step_avg:68.31ms +[2025-07-07 20:16:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:16:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:16:04] [Rank 0] PRINT: step:8500/10000 train_loss:1.0987 val_loss:1.1313 train_time:581346ms step_avg:68.39ms +[2025-07-07 20:16:04] [Rank 0] PRINT: step:8500/10000 train_loss:1.0987 val_loss:1.1313 train_time:581346ms step_avg:68.39ms +[2025-07-07 20:16:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:16:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:16:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:16:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:16:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:16:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:21:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:21:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:21:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:21:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:21:25] [Rank 0] Total Loss: 5.3624 +[2025-07-07 20:21:25] [Rank 0] Total Loss: 5.3624 +[2025-07-07 20:21:25] [Rank 0] Total FTA: 0.4580 +[2025-07-07 20:21:25] [Rank 0] Total FTA: 0.4580 +[2025-07-07 20:21:25] [Rank 0] Group 0 Loss: 5.6809 +[2025-07-07 20:21:25] [Rank 0] Group 0 Loss: 5.6809 +[2025-07-07 20:21:25] [Rank 0] Group 1 Loss: 5.2306 +[2025-07-07 20:21:25] [Rank 0] Group 1 Loss: 5.2306 +[2025-07-07 20:21:25] [Rank 0] Group 2 Loss: 5.3086 +[2025-07-07 20:21:25] [Rank 0] Group 2 Loss: 5.3086 +[2025-07-07 20:21:25] [Rank 0] Group 3 Loss: 5.4598 +[2025-07-07 20:21:25] [Rank 0] Group 3 Loss: 5.4598 +[2025-07-07 20:21:25] [Rank 0] Group 4 Loss: 5.3519 +[2025-07-07 20:21:25] [Rank 0] Group 4 Loss: 5.3519 +[2025-07-07 20:21:25] [Rank 0] Group 5 Loss: 5.3229 +[2025-07-07 20:21:25] [Rank 0] Group 5 Loss: 5.3229 +[2025-07-07 20:21:25] [Rank 0] Group 6 Loss: 5.2588 +[2025-07-07 20:21:25] [Rank 0] Group 6 Loss: 5.2588 +[2025-07-07 20:21:25] [Rank 0] Group 7 Loss: 5.3392 +[2025-07-07 20:21:25] [Rank 0] Group 7 Loss: 5.3392 +[2025-07-07 20:21:25] [Rank 0] Group 8 Loss: 5.3078 +[2025-07-07 20:21:25] [Rank 0] Group 8 Loss: 5.3078 +[2025-07-07 20:21:25] [Rank 0] Group 9 Loss: 5.3897 +[2025-07-07 20:21:25] [Rank 0] Group 9 Loss: 5.3897 +[2025-07-07 20:21:25] [Rank 0] Group 10 Loss: 5.3105 +[2025-07-07 20:21:25] [Rank 0] Group 10 Loss: 5.3105 +[2025-07-07 20:21:25] [Rank 0] Group 11 Loss: 5.2625 +[2025-07-07 20:21:25] [Rank 0] Group 11 Loss: 5.2625 +[2025-07-07 20:21:25] [Rank 0] Group 0 FTA: 0.5124 +[2025-07-07 20:21:25] [Rank 0] Group 0 FTA: 0.5124 +[2025-07-07 20:21:25] [Rank 0] Group 1 FTA: 0.4661 +[2025-07-07 20:21:25] [Rank 0] Group 1 FTA: 0.4661 +[2025-07-07 20:21:25] [Rank 0] Group 2 FTA: 0.6823 +[2025-07-07 20:21:25] [Rank 0] Group 2 FTA: 0.6823 +[2025-07-07 20:21:25] [Rank 0] Group 3 FTA: 0.3333 +[2025-07-07 20:21:25] [Rank 0] Group 3 FTA: 0.3333 +[2025-07-07 20:21:25] [Rank 0] Group 4 FTA: 0.4089 +[2025-07-07 20:21:25] [Rank 0] Group 4 FTA: 0.4089 +[2025-07-07 20:21:25] [Rank 0] Group 5 FTA: 0.4896 +[2025-07-07 20:21:25] [Rank 0] Group 5 FTA: 0.4896 +[2025-07-07 20:21:25] [Rank 0] Group 6 FTA: 0.3646 +[2025-07-07 20:21:25] [Rank 0] Group 6 FTA: 0.3646 +[2025-07-07 20:21:25] [Rank 0] Group 7 FTA: 0.4609 +[2025-07-07 20:21:25] [Rank 0] Group 7 FTA: 0.4609 +[2025-07-07 20:21:25] [Rank 0] Group 8 FTA: 0.4427 +[2025-07-07 20:21:25] [Rank 0] Group 8 FTA: 0.4427 +[2025-07-07 20:21:25] [Rank 0] Group 9 FTA: 0.4219 +[2025-07-07 20:21:25] [Rank 0] Group 9 FTA: 0.4219 +[2025-07-07 20:21:25] [Rank 0] Group 10 FTA: 0.4395 +[2025-07-07 20:21:25] [Rank 0] Group 10 FTA: 0.4395 +[2025-07-07 20:21:25] [Rank 0] Group 11 FTA: 0.4414 +[2025-07-07 20:21:25] [Rank 0] Group 11 FTA: 0.4414 +[2025-07-07 20:21:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 20:21:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 20:21:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 20:21:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 20:21:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 20:21:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 20:21:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 20:21:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 20:21:27] [Rank 0] step:8501/10000 train_time:581356ms step_avg:68.39ms +[2025-07-07 20:21:27] [Rank 0] step:8501/10000 train_time:581356ms step_avg:68.39ms +[2025-07-07 20:21:28] [Rank 0] step:8521/10000 train_time:582128ms step_avg:68.32ms +[2025-07-07 20:21:28] [Rank 0] step:8521/10000 train_time:582128ms step_avg:68.32ms +[2025-07-07 20:21:30] [Rank 0] step:8541/10000 train_time:583492ms step_avg:68.32ms +[2025-07-07 20:21:30] [Rank 0] step:8541/10000 train_time:583492ms step_avg:68.32ms +[2025-07-07 20:21:31] [Rank 0] step:8561/10000 train_time:584856ms step_avg:68.32ms +[2025-07-07 20:21:31] [Rank 0] step:8561/10000 train_time:584856ms step_avg:68.32ms +[2025-07-07 20:21:32] [Rank 0] step:8581/10000 train_time:586222ms step_avg:68.32ms +[2025-07-07 20:21:32] [Rank 0] step:8581/10000 train_time:586222ms step_avg:68.32ms +[2025-07-07 20:21:34] [Rank 0] step:8601/10000 train_time:587588ms step_avg:68.32ms +[2025-07-07 20:21:34] [Rank 0] step:8601/10000 train_time:587588ms step_avg:68.32ms +[2025-07-07 20:21:35] [Rank 0] step:8621/10000 train_time:588955ms step_avg:68.32ms +[2025-07-07 20:21:35] [Rank 0] step:8621/10000 train_time:588955ms step_avg:68.32ms +[2025-07-07 20:21:37] [Rank 0] step:8641/10000 train_time:590573ms step_avg:68.35ms +[2025-07-07 20:21:37] [Rank 0] step:8641/10000 train_time:590573ms step_avg:68.35ms +[2025-07-07 20:21:38] [Rank 0] step:8661/10000 train_time:591735ms step_avg:68.32ms +[2025-07-07 20:21:38] [Rank 0] step:8661/10000 train_time:591735ms step_avg:68.32ms +[2025-07-07 20:21:39] [Rank 0] step:8681/10000 train_time:593103ms step_avg:68.32ms +[2025-07-07 20:21:39] [Rank 0] step:8681/10000 train_time:593103ms step_avg:68.32ms +[2025-07-07 20:21:41] [Rank 0] step:8701/10000 train_time:594472ms step_avg:68.32ms +[2025-07-07 20:21:41] [Rank 0] step:8701/10000 train_time:594472ms step_avg:68.32ms +[2025-07-07 20:21:42] [Rank 0] step:8721/10000 train_time:595840ms step_avg:68.32ms +[2025-07-07 20:21:42] [Rank 0] step:8721/10000 train_time:595840ms step_avg:68.32ms +[2025-07-07 20:21:43] [Rank 0] step:8741/10000 train_time:597209ms step_avg:68.32ms +[2025-07-07 20:21:43] [Rank 0] step:8741/10000 train_time:597209ms step_avg:68.32ms +[2025-07-07 20:21:45] [Rank 0] step:8761/10000 train_time:598578ms step_avg:68.32ms +[2025-07-07 20:21:45] [Rank 0] step:8761/10000 train_time:598578ms step_avg:68.32ms +[2025-07-07 20:21:46] [Rank 0] step:8781/10000 train_time:599947ms step_avg:68.32ms +[2025-07-07 20:21:46] [Rank 0] step:8781/10000 train_time:599947ms step_avg:68.32ms +[2025-07-07 20:21:47] [Rank 0] step:8801/10000 train_time:601318ms step_avg:68.32ms +[2025-07-07 20:21:47] [Rank 0] step:8801/10000 train_time:601318ms step_avg:68.32ms +[2025-07-07 20:21:49] [Rank 0] step:8821/10000 train_time:602686ms step_avg:68.32ms +[2025-07-07 20:21:49] [Rank 0] step:8821/10000 train_time:602686ms step_avg:68.32ms +[2025-07-07 20:21:50] [Rank 0] step:8841/10000 train_time:604091ms step_avg:68.33ms +[2025-07-07 20:21:50] [Rank 0] step:8841/10000 train_time:604091ms step_avg:68.33ms +[2025-07-07 20:21:52] [Rank 0] step:8861/10000 train_time:605462ms step_avg:68.33ms +[2025-07-07 20:21:52] [Rank 0] step:8861/10000 train_time:605462ms step_avg:68.33ms +[2025-07-07 20:21:53] [Rank 0] step:8881/10000 train_time:606833ms step_avg:68.33ms +[2025-07-07 20:21:53] [Rank 0] step:8881/10000 train_time:606833ms step_avg:68.33ms +[2025-07-07 20:21:54] [Rank 0] step:8901/10000 train_time:608204ms step_avg:68.33ms +[2025-07-07 20:21:54] [Rank 0] step:8901/10000 train_time:608204ms step_avg:68.33ms +[2025-07-07 20:21:56] [Rank 0] step:8921/10000 train_time:609573ms step_avg:68.33ms +[2025-07-07 20:21:56] [Rank 0] step:8921/10000 train_time:609573ms step_avg:68.33ms +[2025-07-07 20:21:57] [Rank 0] step:8941/10000 train_time:610945ms step_avg:68.33ms +[2025-07-07 20:21:57] [Rank 0] step:8941/10000 train_time:610945ms step_avg:68.33ms +[2025-07-07 20:21:58] [Rank 0] step:8961/10000 train_time:612314ms step_avg:68.33ms +[2025-07-07 20:21:58] [Rank 0] step:8961/10000 train_time:612314ms step_avg:68.33ms +[2025-07-07 20:22:00] [Rank 0] step:8981/10000 train_time:613685ms step_avg:68.33ms +[2025-07-07 20:22:00] [Rank 0] step:8981/10000 train_time:613685ms step_avg:68.33ms +[2025-07-07 20:22:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:22:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:22:02] [Rank 0] PRINT: step:9000/10000 train_loss:1.0807 val_loss:1.1204 train_time:615679ms step_avg:68.41ms +[2025-07-07 20:22:02] [Rank 0] PRINT: step:9000/10000 train_loss:1.0807 val_loss:1.1204 train_time:615679ms step_avg:68.41ms +[2025-07-07 20:22:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:22:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:22:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:22:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:22:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:22:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:27:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:27:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:27:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:27:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:27:21] [Rank 0] Total Loss: 5.4722 +[2025-07-07 20:27:21] [Rank 0] Total Loss: 5.4722 +[2025-07-07 20:27:21] [Rank 0] Total FTA: 0.4658 +[2025-07-07 20:27:21] [Rank 0] Total FTA: 0.4658 +[2025-07-07 20:27:21] [Rank 0] Group 0 Loss: 5.8557 +[2025-07-07 20:27:21] [Rank 0] Group 0 Loss: 5.8557 +[2025-07-07 20:27:21] [Rank 0] Group 1 Loss: 5.3469 +[2025-07-07 20:27:21] [Rank 0] Group 1 Loss: 5.3469 +[2025-07-07 20:27:21] [Rank 0] Group 2 Loss: 5.4851 +[2025-07-07 20:27:21] [Rank 0] Group 2 Loss: 5.4851 +[2025-07-07 20:27:21] [Rank 0] Group 3 Loss: 5.5531 +[2025-07-07 20:27:21] [Rank 0] Group 3 Loss: 5.5531 +[2025-07-07 20:27:21] [Rank 0] Group 4 Loss: 5.4326 +[2025-07-07 20:27:21] [Rank 0] Group 4 Loss: 5.4326 +[2025-07-07 20:27:21] [Rank 0] Group 5 Loss: 5.4034 +[2025-07-07 20:27:21] [Rank 0] Group 5 Loss: 5.4034 +[2025-07-07 20:27:21] [Rank 0] Group 6 Loss: 5.3287 +[2025-07-07 20:27:21] [Rank 0] Group 6 Loss: 5.3287 +[2025-07-07 20:27:21] [Rank 0] Group 7 Loss: 5.4190 +[2025-07-07 20:27:21] [Rank 0] Group 7 Loss: 5.4190 +[2025-07-07 20:27:21] [Rank 0] Group 8 Loss: 5.4283 +[2025-07-07 20:27:21] [Rank 0] Group 8 Loss: 5.4283 +[2025-07-07 20:27:21] [Rank 0] Group 9 Loss: 5.3991 +[2025-07-07 20:27:21] [Rank 0] Group 9 Loss: 5.3991 +[2025-07-07 20:27:21] [Rank 0] Group 10 Loss: 5.4118 +[2025-07-07 20:27:21] [Rank 0] Group 10 Loss: 5.4118 +[2025-07-07 20:27:21] [Rank 0] Group 11 Loss: 5.3752 +[2025-07-07 20:27:21] [Rank 0] Group 11 Loss: 5.3752 +[2025-07-07 20:27:21] [Rank 0] Group 0 FTA: 0.5059 +[2025-07-07 20:27:21] [Rank 0] Group 0 FTA: 0.5059 +[2025-07-07 20:27:21] [Rank 0] Group 1 FTA: 0.5417 +[2025-07-07 20:27:21] [Rank 0] Group 1 FTA: 0.5417 +[2025-07-07 20:27:21] [Rank 0] Group 2 FTA: 0.4271 +[2025-07-07 20:27:21] [Rank 0] Group 2 FTA: 0.4271 +[2025-07-07 20:27:21] [Rank 0] Group 3 FTA: 0.3802 +[2025-07-07 20:27:21] [Rank 0] Group 3 FTA: 0.3802 +[2025-07-07 20:27:21] [Rank 0] Group 4 FTA: 0.4141 +[2025-07-07 20:27:21] [Rank 0] Group 4 FTA: 0.4141 +[2025-07-07 20:27:21] [Rank 0] Group 5 FTA: 0.5104 +[2025-07-07 20:27:21] [Rank 0] Group 5 FTA: 0.5104 +[2025-07-07 20:27:21] [Rank 0] Group 6 FTA: 0.3646 +[2025-07-07 20:27:21] [Rank 0] Group 6 FTA: 0.3646 +[2025-07-07 20:27:21] [Rank 0] Group 7 FTA: 0.4635 +[2025-07-07 20:27:21] [Rank 0] Group 7 FTA: 0.4635 +[2025-07-07 20:27:21] [Rank 0] Group 8 FTA: 0.4583 +[2025-07-07 20:27:21] [Rank 0] Group 8 FTA: 0.4583 +[2025-07-07 20:27:21] [Rank 0] Group 9 FTA: 0.4883 +[2025-07-07 20:27:21] [Rank 0] Group 9 FTA: 0.4883 +[2025-07-07 20:27:21] [Rank 0] Group 10 FTA: 0.4863 +[2025-07-07 20:27:21] [Rank 0] Group 10 FTA: 0.4863 +[2025-07-07 20:27:21] [Rank 0] Group 11 FTA: 0.4824 +[2025-07-07 20:27:21] [Rank 0] Group 11 FTA: 0.4824 +[2025-07-07 20:27:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 20:27:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 20:27:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 20:27:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 20:27:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 20:27:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 20:27:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 20:27:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 20:27:23] [Rank 0] step:9001/10000 train_time:615698ms step_avg:68.40ms +[2025-07-07 20:27:23] [Rank 0] step:9001/10000 train_time:615698ms step_avg:68.40ms +[2025-07-07 20:27:25] [Rank 0] step:9021/10000 train_time:617224ms step_avg:68.42ms +[2025-07-07 20:27:25] [Rank 0] step:9021/10000 train_time:617224ms step_avg:68.42ms +[2025-07-07 20:27:26] [Rank 0] step:9041/10000 train_time:618619ms step_avg:68.42ms +[2025-07-07 20:27:26] [Rank 0] step:9041/10000 train_time:618619ms step_avg:68.42ms +[2025-07-07 20:27:28] [Rank 0] step:9061/10000 train_time:620420ms step_avg:68.47ms +[2025-07-07 20:27:28] [Rank 0] step:9061/10000 train_time:620420ms step_avg:68.47ms +[2025-07-07 20:27:30] [Rank 0] step:9081/10000 train_time:622145ms step_avg:68.51ms +[2025-07-07 20:27:30] [Rank 0] step:9081/10000 train_time:622145ms step_avg:68.51ms +[2025-07-07 20:27:31] [Rank 0] step:9101/10000 train_time:623849ms step_avg:68.55ms +[2025-07-07 20:27:31] [Rank 0] step:9101/10000 train_time:623849ms step_avg:68.55ms +[2025-07-07 20:27:33] [Rank 0] step:9121/10000 train_time:625595ms step_avg:68.59ms +[2025-07-07 20:27:33] [Rank 0] step:9121/10000 train_time:625595ms step_avg:68.59ms +[2025-07-07 20:27:35] [Rank 0] step:9141/10000 train_time:627520ms step_avg:68.65ms +[2025-07-07 20:27:35] [Rank 0] step:9141/10000 train_time:627520ms step_avg:68.65ms +[2025-07-07 20:27:37] [Rank 0] step:9161/10000 train_time:629240ms step_avg:68.69ms +[2025-07-07 20:27:37] [Rank 0] step:9161/10000 train_time:629240ms step_avg:68.69ms +[2025-07-07 20:27:38] [Rank 0] step:9181/10000 train_time:631241ms step_avg:68.76ms +[2025-07-07 20:27:38] [Rank 0] step:9181/10000 train_time:631241ms step_avg:68.76ms +[2025-07-07 20:27:40] [Rank 0] step:9201/10000 train_time:632291ms step_avg:68.72ms +[2025-07-07 20:27:40] [Rank 0] step:9201/10000 train_time:632291ms step_avg:68.72ms +[2025-07-07 20:27:42] [Rank 0] step:9221/10000 train_time:634223ms step_avg:68.78ms +[2025-07-07 20:27:42] [Rank 0] step:9221/10000 train_time:634223ms step_avg:68.78ms +[2025-07-07 20:27:44] [Rank 0] step:9241/10000 train_time:636067ms step_avg:68.83ms +[2025-07-07 20:27:44] [Rank 0] step:9241/10000 train_time:636067ms step_avg:68.83ms +[2025-07-07 20:27:45] [Rank 0] step:9261/10000 train_time:637761ms step_avg:68.87ms +[2025-07-07 20:27:45] [Rank 0] step:9261/10000 train_time:637761ms step_avg:68.87ms +[2025-07-07 20:27:47] [Rank 0] step:9281/10000 train_time:639402ms step_avg:68.89ms +[2025-07-07 20:27:47] [Rank 0] step:9281/10000 train_time:639402ms step_avg:68.89ms +[2025-07-07 20:27:48] [Rank 0] step:9301/10000 train_time:640877ms step_avg:68.90ms +[2025-07-07 20:27:48] [Rank 0] step:9301/10000 train_time:640877ms step_avg:68.90ms +[2025-07-07 20:27:50] [Rank 0] step:9321/10000 train_time:642302ms step_avg:68.91ms +[2025-07-07 20:27:50] [Rank 0] step:9321/10000 train_time:642302ms step_avg:68.91ms +[2025-07-07 20:27:51] [Rank 0] step:9341/10000 train_time:643769ms step_avg:68.92ms +[2025-07-07 20:27:51] [Rank 0] step:9341/10000 train_time:643769ms step_avg:68.92ms +[2025-07-07 20:27:54] [Rank 0] step:9361/10000 train_time:645473ms step_avg:68.95ms +[2025-07-07 20:27:54] [Rank 0] step:9361/10000 train_time:645473ms step_avg:68.95ms +[2025-07-07 20:27:55] [Rank 0] step:9381/10000 train_time:647679ms step_avg:69.04ms +[2025-07-07 20:27:55] [Rank 0] step:9381/10000 train_time:647679ms step_avg:69.04ms +[2025-07-07 20:27:57] [Rank 0] step:9401/10000 train_time:649385ms step_avg:69.08ms +[2025-07-07 20:27:57] [Rank 0] step:9401/10000 train_time:649385ms step_avg:69.08ms +[2025-07-07 20:27:58] [Rank 0] step:9421/10000 train_time:650816ms step_avg:69.08ms +[2025-07-07 20:27:58] [Rank 0] step:9421/10000 train_time:650816ms step_avg:69.08ms +[2025-07-07 20:28:00] [Rank 0] step:9441/10000 train_time:652203ms step_avg:69.08ms +[2025-07-07 20:28:00] [Rank 0] step:9441/10000 train_time:652203ms step_avg:69.08ms +[2025-07-07 20:28:01] [Rank 0] step:9461/10000 train_time:653649ms step_avg:69.09ms +[2025-07-07 20:28:01] [Rank 0] step:9461/10000 train_time:653649ms step_avg:69.09ms +[2025-07-07 20:28:03] [Rank 0] step:9481/10000 train_time:655075ms step_avg:69.09ms +[2025-07-07 20:28:03] [Rank 0] step:9481/10000 train_time:655075ms step_avg:69.09ms +[2025-07-07 20:28:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:28:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:28:05] [Rank 0] PRINT: step:9500/10000 train_loss:1.0626 val_loss:1.1191 train_time:657100ms step_avg:69.17ms +[2025-07-07 20:28:05] [Rank 0] PRINT: step:9500/10000 train_loss:1.0626 val_loss:1.1191 train_time:657100ms step_avg:69.17ms +[2025-07-07 20:28:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:28:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:28:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:28:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:28:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:28:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:33:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:33:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:33:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:33:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:33:28] [Rank 0] Total Loss: 5.4353 +[2025-07-07 20:33:28] [Rank 0] Total Loss: 5.4353 +[2025-07-07 20:33:28] [Rank 0] Total FTA: 0.5162 +[2025-07-07 20:33:28] [Rank 0] Total FTA: 0.5162 +[2025-07-07 20:33:28] [Rank 0] Group 0 Loss: 5.7047 +[2025-07-07 20:33:28] [Rank 0] Group 0 Loss: 5.7047 +[2025-07-07 20:33:28] [Rank 0] Group 1 Loss: 5.4560 +[2025-07-07 20:33:28] [Rank 0] Group 1 Loss: 5.4560 +[2025-07-07 20:33:28] [Rank 0] Group 2 Loss: 5.2753 +[2025-07-07 20:33:28] [Rank 0] Group 2 Loss: 5.2753 +[2025-07-07 20:33:28] [Rank 0] Group 3 Loss: 5.5344 +[2025-07-07 20:33:28] [Rank 0] Group 3 Loss: 5.5344 +[2025-07-07 20:33:28] [Rank 0] Group 4 Loss: 5.3431 +[2025-07-07 20:33:28] [Rank 0] Group 4 Loss: 5.3431 +[2025-07-07 20:33:28] [Rank 0] Group 5 Loss: 5.3559 +[2025-07-07 20:33:28] [Rank 0] Group 5 Loss: 5.3559 +[2025-07-07 20:33:28] [Rank 0] Group 6 Loss: 5.2398 +[2025-07-07 20:33:28] [Rank 0] Group 6 Loss: 5.2398 +[2025-07-07 20:33:28] [Rank 0] Group 7 Loss: 5.4401 +[2025-07-07 20:33:28] [Rank 0] Group 7 Loss: 5.4401 +[2025-07-07 20:33:28] [Rank 0] Group 8 Loss: 5.3658 +[2025-07-07 20:33:28] [Rank 0] Group 8 Loss: 5.3658 +[2025-07-07 20:33:28] [Rank 0] Group 9 Loss: 5.4264 +[2025-07-07 20:33:28] [Rank 0] Group 9 Loss: 5.4264 +[2025-07-07 20:33:28] [Rank 0] Group 10 Loss: 5.4333 +[2025-07-07 20:33:28] [Rank 0] Group 10 Loss: 5.4333 +[2025-07-07 20:33:28] [Rank 0] Group 11 Loss: 5.4133 +[2025-07-07 20:33:28] [Rank 0] Group 11 Loss: 5.4133 +[2025-07-07 20:33:28] [Rank 0] Group 0 FTA: 0.4837 +[2025-07-07 20:33:28] [Rank 0] Group 0 FTA: 0.4837 +[2025-07-07 20:33:28] [Rank 0] Group 1 FTA: 0.4531 +[2025-07-07 20:33:28] [Rank 0] Group 1 FTA: 0.4531 +[2025-07-07 20:33:28] [Rank 0] Group 2 FTA: 0.7500 +[2025-07-07 20:33:28] [Rank 0] Group 2 FTA: 0.7500 +[2025-07-07 20:33:28] [Rank 0] Group 3 FTA: 0.4766 +[2025-07-07 20:33:28] [Rank 0] Group 3 FTA: 0.4766 +[2025-07-07 20:33:28] [Rank 0] Group 4 FTA: 0.5156 +[2025-07-07 20:33:28] [Rank 0] Group 4 FTA: 0.5156 +[2025-07-07 20:33:28] [Rank 0] Group 5 FTA: 0.4844 +[2025-07-07 20:33:28] [Rank 0] Group 5 FTA: 0.4844 +[2025-07-07 20:33:28] [Rank 0] Group 6 FTA: 0.4818 +[2025-07-07 20:33:28] [Rank 0] Group 6 FTA: 0.4818 +[2025-07-07 20:33:28] [Rank 0] Group 7 FTA: 0.5156 +[2025-07-07 20:33:28] [Rank 0] Group 7 FTA: 0.5156 +[2025-07-07 20:33:28] [Rank 0] Group 8 FTA: 0.5234 +[2025-07-07 20:33:28] [Rank 0] Group 8 FTA: 0.5234 +[2025-07-07 20:33:28] [Rank 0] Group 9 FTA: 0.5938 +[2025-07-07 20:33:28] [Rank 0] Group 9 FTA: 0.5938 +[2025-07-07 20:33:28] [Rank 0] Group 10 FTA: 0.5449 +[2025-07-07 20:33:28] [Rank 0] Group 10 FTA: 0.5449 +[2025-07-07 20:33:28] [Rank 0] Group 11 FTA: 0.4805 +[2025-07-07 20:33:28] [Rank 0] Group 11 FTA: 0.4805 +[2025-07-07 20:33:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 20:33:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 20:33:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 20:33:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 20:33:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 20:33:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 20:33:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 20:33:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 20:33:29] [Rank 0] step:9501/10000 train_time:657111ms step_avg:69.16ms +[2025-07-07 20:33:29] [Rank 0] step:9501/10000 train_time:657111ms step_avg:69.16ms +[2025-07-07 20:33:31] [Rank 0] step:9521/10000 train_time:657865ms step_avg:69.10ms +[2025-07-07 20:33:31] [Rank 0] step:9521/10000 train_time:657865ms step_avg:69.10ms +[2025-07-07 20:33:32] [Rank 0] step:9541/10000 train_time:659229ms step_avg:69.09ms +[2025-07-07 20:33:32] [Rank 0] step:9541/10000 train_time:659229ms step_avg:69.09ms +[2025-07-07 20:33:34] [Rank 0] step:9561/10000 train_time:660624ms step_avg:69.10ms +[2025-07-07 20:33:34] [Rank 0] step:9561/10000 train_time:660624ms step_avg:69.10ms +[2025-07-07 20:33:35] [Rank 0] step:9581/10000 train_time:661990ms step_avg:69.09ms +[2025-07-07 20:33:35] [Rank 0] step:9581/10000 train_time:661990ms step_avg:69.09ms +[2025-07-07 20:33:36] [Rank 0] step:9601/10000 train_time:663358ms step_avg:69.09ms +[2025-07-07 20:33:36] [Rank 0] step:9601/10000 train_time:663358ms step_avg:69.09ms +[2025-07-07 20:33:38] [Rank 0] step:9621/10000 train_time:664725ms step_avg:69.09ms +[2025-07-07 20:33:38] [Rank 0] step:9621/10000 train_time:664725ms step_avg:69.09ms +[2025-07-07 20:33:39] [Rank 0] step:9641/10000 train_time:666092ms step_avg:69.09ms +[2025-07-07 20:33:39] [Rank 0] step:9641/10000 train_time:666092ms step_avg:69.09ms +[2025-07-07 20:33:40] [Rank 0] step:9661/10000 train_time:667460ms step_avg:69.09ms +[2025-07-07 20:33:40] [Rank 0] step:9661/10000 train_time:667460ms step_avg:69.09ms +[2025-07-07 20:33:42] [Rank 0] step:9681/10000 train_time:668828ms step_avg:69.09ms +[2025-07-07 20:33:42] [Rank 0] step:9681/10000 train_time:668828ms step_avg:69.09ms +[2025-07-07 20:33:43] [Rank 0] step:9701/10000 train_time:670198ms step_avg:69.09ms +[2025-07-07 20:33:43] [Rank 0] step:9701/10000 train_time:670198ms step_avg:69.09ms +[2025-07-07 20:33:45] [Rank 0] step:9721/10000 train_time:672238ms step_avg:69.15ms +[2025-07-07 20:33:45] [Rank 0] step:9721/10000 train_time:672238ms step_avg:69.15ms +[2025-07-07 20:33:46] [Rank 0] step:9741/10000 train_time:672978ms step_avg:69.09ms +[2025-07-07 20:33:46] [Rank 0] step:9741/10000 train_time:672978ms step_avg:69.09ms +[2025-07-07 20:33:47] [Rank 0] step:9761/10000 train_time:674345ms step_avg:69.09ms +[2025-07-07 20:33:47] [Rank 0] step:9761/10000 train_time:674345ms step_avg:69.09ms +[2025-07-07 20:33:49] [Rank 0] step:9781/10000 train_time:675715ms step_avg:69.08ms +[2025-07-07 20:33:49] [Rank 0] step:9781/10000 train_time:675715ms step_avg:69.08ms +[2025-07-07 20:33:50] [Rank 0] step:9801/10000 train_time:677083ms step_avg:69.08ms +[2025-07-07 20:33:50] [Rank 0] step:9801/10000 train_time:677083ms step_avg:69.08ms +[2025-07-07 20:33:51] [Rank 0] step:9821/10000 train_time:678453ms step_avg:69.08ms +[2025-07-07 20:33:51] [Rank 0] step:9821/10000 train_time:678453ms step_avg:69.08ms +[2025-07-07 20:33:53] [Rank 0] step:9841/10000 train_time:679824ms step_avg:69.08ms +[2025-07-07 20:33:53] [Rank 0] step:9841/10000 train_time:679824ms step_avg:69.08ms +[2025-07-07 20:33:54] [Rank 0] step:9861/10000 train_time:681194ms step_avg:69.08ms +[2025-07-07 20:33:54] [Rank 0] step:9861/10000 train_time:681194ms step_avg:69.08ms +[2025-07-07 20:33:55] [Rank 0] step:9881/10000 train_time:682564ms step_avg:69.08ms +[2025-07-07 20:33:55] [Rank 0] step:9881/10000 train_time:682564ms step_avg:69.08ms +[2025-07-07 20:33:57] [Rank 0] step:9901/10000 train_time:684287ms step_avg:69.11ms +[2025-07-07 20:33:57] [Rank 0] step:9901/10000 train_time:684287ms step_avg:69.11ms +[2025-07-07 20:33:58] [Rank 0] step:9921/10000 train_time:685303ms step_avg:69.08ms +[2025-07-07 20:33:58] [Rank 0] step:9921/10000 train_time:685303ms step_avg:69.08ms +[2025-07-07 20:34:00] [Rank 0] step:9941/10000 train_time:686673ms step_avg:69.07ms +[2025-07-07 20:34:00] [Rank 0] step:9941/10000 train_time:686673ms step_avg:69.07ms +[2025-07-07 20:34:01] [Rank 0] step:9961/10000 train_time:688042ms step_avg:69.07ms +[2025-07-07 20:34:01] [Rank 0] step:9961/10000 train_time:688042ms step_avg:69.07ms +[2025-07-07 20:34:02] [Rank 0] step:9981/10000 train_time:689413ms step_avg:69.07ms +[2025-07-07 20:34:02] [Rank 0] step:9981/10000 train_time:689413ms step_avg:69.07ms +[2025-07-07 20:34:04] [Rank 0] step:10000/10000 train_time:690714ms step_avg:69.07ms +[2025-07-07 20:34:04] [Rank 0] step:10000/10000 train_time:690714ms step_avg:69.07ms +[2025-07-07 20:34:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:34:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 20:34:05] [Rank 0] PRINT: step:10000/10000 train_loss:1.0499 val_loss:1.1007 train_time:691411ms step_avg:69.14ms +[2025-07-07 20:34:05] [Rank 0] PRINT: step:10000/10000 train_loss:1.0499 val_loss:1.1007 train_time:691411ms step_avg:69.14ms +[2025-07-07 20:34:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:34:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 20:34:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:34:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 20:34:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:34:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 20:39:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:39:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 20:39:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:39:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 20:39:25] [Rank 0] Total Loss: 5.4629 +[2025-07-07 20:39:25] [Rank 0] Total Loss: 5.4629 +[2025-07-07 20:39:25] [Rank 0] Total FTA: 0.5750 +[2025-07-07 20:39:25] [Rank 0] Total FTA: 0.5750 +[2025-07-07 20:39:25] [Rank 0] Group 0 Loss: 5.7482 +[2025-07-07 20:39:25] [Rank 0] Group 0 Loss: 5.7482 +[2025-07-07 20:39:25] [Rank 0] Group 1 Loss: 5.3176 +[2025-07-07 20:39:25] [Rank 0] Group 1 Loss: 5.3176 +[2025-07-07 20:39:25] [Rank 0] Group 2 Loss: 5.4120 +[2025-07-07 20:39:25] [Rank 0] Group 2 Loss: 5.4120 +[2025-07-07 20:39:25] [Rank 0] Group 3 Loss: 5.6369 +[2025-07-07 20:39:25] [Rank 0] Group 3 Loss: 5.6369 +[2025-07-07 20:39:25] [Rank 0] Group 4 Loss: 5.3238 +[2025-07-07 20:39:25] [Rank 0] Group 4 Loss: 5.3238 +[2025-07-07 20:39:25] [Rank 0] Group 5 Loss: 5.3365 +[2025-07-07 20:39:25] [Rank 0] Group 5 Loss: 5.3365 +[2025-07-07 20:39:25] [Rank 0] Group 6 Loss: 5.3445 +[2025-07-07 20:39:25] [Rank 0] Group 6 Loss: 5.3445 +[2025-07-07 20:39:25] [Rank 0] Group 7 Loss: 5.4510 +[2025-07-07 20:39:25] [Rank 0] Group 7 Loss: 5.4510 +[2025-07-07 20:39:25] [Rank 0] Group 8 Loss: 5.3951 +[2025-07-07 20:39:25] [Rank 0] Group 8 Loss: 5.3951 +[2025-07-07 20:39:25] [Rank 0] Group 9 Loss: 5.4246 +[2025-07-07 20:39:25] [Rank 0] Group 9 Loss: 5.4246 +[2025-07-07 20:39:25] [Rank 0] Group 10 Loss: 5.4420 +[2025-07-07 20:39:25] [Rank 0] Group 10 Loss: 5.4420 +[2025-07-07 20:39:25] [Rank 0] Group 11 Loss: 5.4510 +[2025-07-07 20:39:25] [Rank 0] Group 11 Loss: 5.4510 +[2025-07-07 20:39:25] [Rank 0] Group 0 FTA: 0.6476 +[2025-07-07 20:39:25] [Rank 0] Group 0 FTA: 0.6476 +[2025-07-07 20:39:25] [Rank 0] Group 1 FTA: 0.7188 +[2025-07-07 20:39:25] [Rank 0] Group 1 FTA: 0.7188 +[2025-07-07 20:39:25] [Rank 0] Group 2 FTA: 0.6484 +[2025-07-07 20:39:25] [Rank 0] Group 2 FTA: 0.6484 +[2025-07-07 20:39:25] [Rank 0] Group 3 FTA: 0.4349 +[2025-07-07 20:39:25] [Rank 0] Group 3 FTA: 0.4349 +[2025-07-07 20:39:25] [Rank 0] Group 4 FTA: 0.5078 +[2025-07-07 20:39:25] [Rank 0] Group 4 FTA: 0.5078 +[2025-07-07 20:39:25] [Rank 0] Group 5 FTA: 0.6328 +[2025-07-07 20:39:25] [Rank 0] Group 5 FTA: 0.6328 +[2025-07-07 20:39:25] [Rank 0] Group 6 FTA: 0.5208 +[2025-07-07 20:39:25] [Rank 0] Group 6 FTA: 0.5208 +[2025-07-07 20:39:25] [Rank 0] Group 7 FTA: 0.5156 +[2025-07-07 20:39:25] [Rank 0] Group 7 FTA: 0.5156 +[2025-07-07 20:39:25] [Rank 0] Group 8 FTA: 0.4948 +[2025-07-07 20:39:25] [Rank 0] Group 8 FTA: 0.4948 +[2025-07-07 20:39:25] [Rank 0] Group 9 FTA: 0.4922 +[2025-07-07 20:39:25] [Rank 0] Group 9 FTA: 0.4922 +[2025-07-07 20:39:25] [Rank 0] Group 10 FTA: 0.5820 +[2025-07-07 20:39:25] [Rank 0] Group 10 FTA: 0.5820 +[2025-07-07 20:39:25] [Rank 0] Group 11 FTA: 0.5850 +[2025-07-07 20:39:25] [Rank 0] Group 11 FTA: 0.5850 +[2025-07-07 20:39:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 20:39:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_loss_curves.png +[2025-07-07 20:39:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 20:39:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/per_class_acc_curves.png +[2025-07-07 20:39:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 20:39:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_loss_curve.png +[2025-07-07 20:39:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 20:39:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.0005_seed_48/total_acc_curve.png +[2025-07-07 20:39:27] [Rank 0] step:10001/10000 train_time:691420ms step_avg:69.14ms +[2025-07-07 20:39:27] [Rank 0] step:10001/10000 train_time:691420ms step_avg:69.14ms +[2025-07-07 20:39:27] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 20:39:27 2025 --- +[2025-07-07 20:39:27] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 20:39:27 2025 --- +[2025-07-07 20:39:27] [Rank 0] PRINT: Peak memory allocated: 9064 MiB reserved: 10316 MiB +[2025-07-07 20:39:27] [Rank 0] PRINT: Peak memory allocated: 9064 MiB reserved: 10316 MiB diff --git a/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/config.json b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..f85d89959d3319bc38e92ebd55c31fda50a11749 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "6f979276-4b94-44a7-ab91-319ab0a66ac9", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..ccaa3302f574c68f6751fa45fbde25733a2c34b5 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84e239572ea48a89f50ac71cd182816ef07952f6b163fd785b77108d62f685b1 +size 460772 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..6631b83085bf129af62264370000dacdc5f233b1 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:532d99aa433c3b8f3b383b04f14fc753558cbc4213077e2d4d5cec5a2a00483a +size 394487 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..517dec1623a38373e3fe23c904aa5bbcd4804cf7 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9fbf6bb883ac9015008357696c587dd5bbf42571d84d802811acd676ddcbced +size 111671 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..ed09bf976033ffed33c46022c6c96704e500dd70 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c82d21462f54e71f4c868f0d09b0b66a67acd6f056ea66c84e655e6a6531fe15 +size 112332 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/training_log_6f979276-4b94-44a7-ab91-319ab0a66ac9.txt b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/training_log_6f979276-4b94-44a7-ab91-319ab0a66ac9.txt new file mode 100644 index 0000000000000000000000000000000000000000..53007d9ff2d640cdce2b877999add67858e5e112 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/training_log_6f979276-4b94-44a7-ab91-319ab0a66ac9.txt @@ -0,0 +1,5132 @@ +[2025-07-07 08:09:53] [Rank 0] PRINT: --- Script Start: Mon Jul 7 08:09:53 2025 --- +[2025-07-07 08:09:53] [Rank 0] PRINT: --- Script Start: Mon Jul 7 08:09:53 2025 --- +[2025-07-07 08:09:53] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-07 08:09:53] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-07 08:09:53] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 08:09:53] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 08:09:53] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-07 08:09:53] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-07 08:09:53] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42 +[2025-07-07 08:09:53] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42 +[2025-07-07 08:09:53] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 08:09:53] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 08:09:54] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 08:09:54] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 08:09:54] [Rank 0] PRINT: Constructing model... +[2025-07-07 08:09:54] [Rank 0] PRINT: Constructing model... +[2025-07-07 08:09:56] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 08:09:56] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 08:09:56] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 08:09:56] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 08:09:56] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 08:09:56] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 08:09:58] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 08:09:58] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 08:09:58] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 08:09:58] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 08:09:58] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 08:09:58] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 08:09:58] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 08:09:58] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 08:09:58] [Rank 0] PRINT: Model returns: +[2025-07-07 08:09:58] [Rank 0] PRINT: Model returns: +[2025-07-07 08:09:58] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 08:09:58] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 08:09:58] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 08:09:58] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 08:09:58] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-07 08:09:58] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-07 08:09:58] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 08:09:58] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 08:09:58] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 08:09:58] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 08:09:58] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 08:09:58] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 08:09:58] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 08:09:58] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 08:09:58] [Rank 0] PRINT: Starting warmup... +[2025-07-07 08:09:58] [Rank 0] PRINT: Starting warmup... +[2025-07-07 08:11:06] [Rank 0] PRINT: Warmup complete. +[2025-07-07 08:11:06] [Rank 0] PRINT: Warmup complete. +[2025-07-07 08:11:06] [Rank 0] PRINT: Starting training... +[2025-07-07 08:11:06] [Rank 0] PRINT: Starting training... +[2025-07-07 08:11:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:11:06] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:11:14] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 08:11:14] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 08:11:16] [Rank 0] step:21/10000 train_time:816ms step_avg:38.84ms +[2025-07-07 08:11:16] [Rank 0] step:21/10000 train_time:816ms step_avg:38.84ms +[2025-07-07 08:11:17] [Rank 0] step:41/10000 train_time:2136ms step_avg:52.09ms +[2025-07-07 08:11:17] [Rank 0] step:41/10000 train_time:2136ms step_avg:52.09ms +[2025-07-07 08:11:18] [Rank 0] step:61/10000 train_time:3455ms step_avg:56.65ms +[2025-07-07 08:11:18] [Rank 0] step:61/10000 train_time:3455ms step_avg:56.65ms +[2025-07-07 08:11:20] [Rank 0] step:81/10000 train_time:4775ms step_avg:58.96ms +[2025-07-07 08:11:20] [Rank 0] step:81/10000 train_time:4775ms step_avg:58.96ms +[2025-07-07 08:11:21] [Rank 0] step:101/10000 train_time:6097ms step_avg:60.37ms +[2025-07-07 08:11:21] [Rank 0] step:101/10000 train_time:6097ms step_avg:60.37ms +[2025-07-07 08:11:22] [Rank 0] step:121/10000 train_time:7418ms step_avg:61.30ms +[2025-07-07 08:11:22] [Rank 0] step:121/10000 train_time:7418ms step_avg:61.30ms +[2025-07-07 08:11:24] [Rank 0] step:141/10000 train_time:8740ms step_avg:61.98ms +[2025-07-07 08:11:24] [Rank 0] step:141/10000 train_time:8740ms step_avg:61.98ms +[2025-07-07 08:11:25] [Rank 0] step:161/10000 train_time:10061ms step_avg:62.49ms +[2025-07-07 08:11:25] [Rank 0] step:161/10000 train_time:10061ms step_avg:62.49ms +[2025-07-07 08:11:26] [Rank 0] step:181/10000 train_time:12066ms step_avg:66.66ms +[2025-07-07 08:11:26] [Rank 0] step:181/10000 train_time:12066ms step_avg:66.66ms +[2025-07-07 08:11:28] [Rank 0] step:201/10000 train_time:12780ms step_avg:63.58ms +[2025-07-07 08:11:28] [Rank 0] step:201/10000 train_time:12780ms step_avg:63.58ms +[2025-07-07 08:11:29] [Rank 0] step:221/10000 train_time:14108ms step_avg:63.84ms +[2025-07-07 08:11:29] [Rank 0] step:221/10000 train_time:14108ms step_avg:63.84ms +[2025-07-07 08:11:30] [Rank 0] step:241/10000 train_time:15435ms step_avg:64.05ms +[2025-07-07 08:11:30] [Rank 0] step:241/10000 train_time:15435ms step_avg:64.05ms +[2025-07-07 08:11:32] [Rank 0] step:261/10000 train_time:16765ms step_avg:64.24ms +[2025-07-07 08:11:32] [Rank 0] step:261/10000 train_time:16765ms step_avg:64.24ms +[2025-07-07 08:11:33] [Rank 0] step:281/10000 train_time:18096ms step_avg:64.40ms +[2025-07-07 08:11:33] [Rank 0] step:281/10000 train_time:18096ms step_avg:64.40ms +[2025-07-07 08:11:34] [Rank 0] step:301/10000 train_time:19427ms step_avg:64.54ms +[2025-07-07 08:11:34] [Rank 0] step:301/10000 train_time:19427ms step_avg:64.54ms +[2025-07-07 08:11:36] [Rank 0] step:321/10000 train_time:20757ms step_avg:64.66ms +[2025-07-07 08:11:36] [Rank 0] step:321/10000 train_time:20757ms step_avg:64.66ms +[2025-07-07 08:11:37] [Rank 0] step:341/10000 train_time:22087ms step_avg:64.77ms +[2025-07-07 08:11:37] [Rank 0] step:341/10000 train_time:22087ms step_avg:64.77ms +[2025-07-07 08:11:38] [Rank 0] step:361/10000 train_time:23470ms step_avg:65.02ms +[2025-07-07 08:11:38] [Rank 0] step:361/10000 train_time:23470ms step_avg:65.02ms +[2025-07-07 08:11:40] [Rank 0] step:381/10000 train_time:24816ms step_avg:65.13ms +[2025-07-07 08:11:40] [Rank 0] step:381/10000 train_time:24816ms step_avg:65.13ms +[2025-07-07 08:11:41] [Rank 0] step:401/10000 train_time:26147ms step_avg:65.21ms +[2025-07-07 08:11:41] [Rank 0] step:401/10000 train_time:26147ms step_avg:65.21ms +[2025-07-07 08:11:42] [Rank 0] step:421/10000 train_time:27480ms step_avg:65.27ms +[2025-07-07 08:11:42] [Rank 0] step:421/10000 train_time:27480ms step_avg:65.27ms +[2025-07-07 08:11:44] [Rank 0] step:441/10000 train_time:28815ms step_avg:65.34ms +[2025-07-07 08:11:44] [Rank 0] step:441/10000 train_time:28815ms step_avg:65.34ms +[2025-07-07 08:11:45] [Rank 0] step:461/10000 train_time:30149ms step_avg:65.40ms +[2025-07-07 08:11:45] [Rank 0] step:461/10000 train_time:30149ms step_avg:65.40ms +[2025-07-07 08:11:46] [Rank 0] step:481/10000 train_time:31484ms step_avg:65.46ms +[2025-07-07 08:11:46] [Rank 0] step:481/10000 train_time:31484ms step_avg:65.46ms +[2025-07-07 08:11:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:11:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:11:48] [Rank 0] PRINT: step:500/10000 train_loss:5.2613 val_loss:2.4559 train_time:33426ms step_avg:66.85ms +[2025-07-07 08:11:48] [Rank 0] PRINT: step:500/10000 train_loss:5.2613 val_loss:2.4559 train_time:33426ms step_avg:66.85ms +[2025-07-07 08:11:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:11:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:11:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:11:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:11:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:11:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:17:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:17:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:17:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:17:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:17:05] [Rank 0] Total Loss: 4.3330 +[2025-07-07 08:17:05] [Rank 0] Total Loss: 4.3330 +[2025-07-07 08:17:05] [Rank 0] Total FTA: 0.0847 +[2025-07-07 08:17:05] [Rank 0] Total FTA: 0.0847 +[2025-07-07 08:17:05] [Rank 0] Group 0 Loss: 4.4560 +[2025-07-07 08:17:05] [Rank 0] Group 0 Loss: 4.4560 +[2025-07-07 08:17:05] [Rank 0] Group 1 Loss: 4.3773 +[2025-07-07 08:17:05] [Rank 0] Group 1 Loss: 4.3773 +[2025-07-07 08:17:05] [Rank 0] Group 2 Loss: 4.2669 +[2025-07-07 08:17:05] [Rank 0] Group 2 Loss: 4.2669 +[2025-07-07 08:17:05] [Rank 0] Group 3 Loss: 4.2933 +[2025-07-07 08:17:05] [Rank 0] Group 3 Loss: 4.2933 +[2025-07-07 08:17:05] [Rank 0] Group 4 Loss: 4.3524 +[2025-07-07 08:17:05] [Rank 0] Group 4 Loss: 4.3524 +[2025-07-07 08:17:05] [Rank 0] Group 5 Loss: 4.2797 +[2025-07-07 08:17:05] [Rank 0] Group 5 Loss: 4.2797 +[2025-07-07 08:17:05] [Rank 0] Group 6 Loss: 4.2759 +[2025-07-07 08:17:05] [Rank 0] Group 6 Loss: 4.2759 +[2025-07-07 08:17:05] [Rank 0] Group 7 Loss: 4.3270 +[2025-07-07 08:17:05] [Rank 0] Group 7 Loss: 4.3270 +[2025-07-07 08:17:05] [Rank 0] Group 8 Loss: 4.3008 +[2025-07-07 08:17:05] [Rank 0] Group 8 Loss: 4.3008 +[2025-07-07 08:17:05] [Rank 0] Group 9 Loss: 4.2719 +[2025-07-07 08:17:05] [Rank 0] Group 9 Loss: 4.2719 +[2025-07-07 08:17:05] [Rank 0] Group 10 Loss: 4.2970 +[2025-07-07 08:17:05] [Rank 0] Group 10 Loss: 4.2970 +[2025-07-07 08:17:05] [Rank 0] Group 11 Loss: 4.3453 +[2025-07-07 08:17:05] [Rank 0] Group 11 Loss: 4.3453 +[2025-07-07 08:17:05] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-07 08:17:05] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-07 08:17:05] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 08:17:05] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 08:17:05] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-07 08:17:05] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-07 08:17:05] [Rank 0] Group 3 FTA: 0.0547 +[2025-07-07 08:17:05] [Rank 0] Group 3 FTA: 0.0547 +[2025-07-07 08:17:05] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-07 08:17:05] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-07 08:17:05] [Rank 0] Group 5 FTA: 0.0729 +[2025-07-07 08:17:05] [Rank 0] Group 5 FTA: 0.0729 +[2025-07-07 08:17:05] [Rank 0] Group 6 FTA: 0.0807 +[2025-07-07 08:17:05] [Rank 0] Group 6 FTA: 0.0807 +[2025-07-07 08:17:05] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-07 08:17:05] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-07 08:17:05] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-07 08:17:05] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-07 08:17:05] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-07 08:17:05] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-07 08:17:05] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 08:17:05] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 08:17:05] [Rank 0] Group 11 FTA: 0.0850 +[2025-07-07 08:17:05] [Rank 0] Group 11 FTA: 0.0850 +[2025-07-07 08:17:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 08:17:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 08:17:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 08:17:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 08:17:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 08:17:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 08:17:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 08:17:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 08:17:07] [Rank 0] step:501/10000 train_time:33437ms step_avg:66.74ms +[2025-07-07 08:17:07] [Rank 0] step:501/10000 train_time:33437ms step_avg:66.74ms +[2025-07-07 08:17:08] [Rank 0] step:521/10000 train_time:34172ms step_avg:65.59ms +[2025-07-07 08:17:08] [Rank 0] step:521/10000 train_time:34172ms step_avg:65.59ms +[2025-07-07 08:17:09] [Rank 0] step:541/10000 train_time:36188ms step_avg:66.89ms +[2025-07-07 08:17:09] [Rank 0] step:541/10000 train_time:36188ms step_avg:66.89ms +[2025-07-07 08:17:11] [Rank 0] step:561/10000 train_time:36902ms step_avg:65.78ms +[2025-07-07 08:17:11] [Rank 0] step:561/10000 train_time:36902ms step_avg:65.78ms +[2025-07-07 08:17:12] [Rank 0] step:581/10000 train_time:38231ms step_avg:65.80ms +[2025-07-07 08:17:12] [Rank 0] step:581/10000 train_time:38231ms step_avg:65.80ms +[2025-07-07 08:17:13] [Rank 0] step:601/10000 train_time:39558ms step_avg:65.82ms +[2025-07-07 08:17:13] [Rank 0] step:601/10000 train_time:39558ms step_avg:65.82ms +[2025-07-07 08:17:15] [Rank 0] step:621/10000 train_time:40884ms step_avg:65.84ms +[2025-07-07 08:17:15] [Rank 0] step:621/10000 train_time:40884ms step_avg:65.84ms +[2025-07-07 08:17:16] [Rank 0] step:641/10000 train_time:42213ms step_avg:65.85ms +[2025-07-07 08:17:16] [Rank 0] step:641/10000 train_time:42213ms step_avg:65.85ms +[2025-07-07 08:17:17] [Rank 0] step:661/10000 train_time:43540ms step_avg:65.87ms +[2025-07-07 08:17:17] [Rank 0] step:661/10000 train_time:43540ms step_avg:65.87ms +[2025-07-07 08:17:19] [Rank 0] step:681/10000 train_time:44870ms step_avg:65.89ms +[2025-07-07 08:17:19] [Rank 0] step:681/10000 train_time:44870ms step_avg:65.89ms +[2025-07-07 08:17:20] [Rank 0] step:701/10000 train_time:46201ms step_avg:65.91ms +[2025-07-07 08:17:20] [Rank 0] step:701/10000 train_time:46201ms step_avg:65.91ms +[2025-07-07 08:17:21] [Rank 0] step:721/10000 train_time:47783ms step_avg:66.27ms +[2025-07-07 08:17:21] [Rank 0] step:721/10000 train_time:47783ms step_avg:66.27ms +[2025-07-07 08:17:23] [Rank 0] step:741/10000 train_time:48905ms step_avg:66.00ms +[2025-07-07 08:17:23] [Rank 0] step:741/10000 train_time:48905ms step_avg:66.00ms +[2025-07-07 08:17:24] [Rank 0] step:761/10000 train_time:50240ms step_avg:66.02ms +[2025-07-07 08:17:24] [Rank 0] step:761/10000 train_time:50240ms step_avg:66.02ms +[2025-07-07 08:17:25] [Rank 0] step:781/10000 train_time:51581ms step_avg:66.04ms +[2025-07-07 08:17:25] [Rank 0] step:781/10000 train_time:51581ms step_avg:66.04ms +[2025-07-07 08:17:27] [Rank 0] step:801/10000 train_time:52924ms step_avg:66.07ms +[2025-07-07 08:17:27] [Rank 0] step:801/10000 train_time:52924ms step_avg:66.07ms +[2025-07-07 08:17:28] [Rank 0] step:821/10000 train_time:54266ms step_avg:66.10ms +[2025-07-07 08:17:28] [Rank 0] step:821/10000 train_time:54266ms step_avg:66.10ms +[2025-07-07 08:17:29] [Rank 0] step:841/10000 train_time:55609ms step_avg:66.12ms +[2025-07-07 08:17:29] [Rank 0] step:841/10000 train_time:55609ms step_avg:66.12ms +[2025-07-07 08:17:31] [Rank 0] step:861/10000 train_time:56953ms step_avg:66.15ms +[2025-07-07 08:17:31] [Rank 0] step:861/10000 train_time:56953ms step_avg:66.15ms +[2025-07-07 08:17:32] [Rank 0] step:881/10000 train_time:58296ms step_avg:66.17ms +[2025-07-07 08:17:32] [Rank 0] step:881/10000 train_time:58296ms step_avg:66.17ms +[2025-07-07 08:17:33] [Rank 0] step:901/10000 train_time:60316ms step_avg:66.94ms +[2025-07-07 08:17:33] [Rank 0] step:901/10000 train_time:60316ms step_avg:66.94ms +[2025-07-07 08:17:35] [Rank 0] step:921/10000 train_time:61039ms step_avg:66.27ms +[2025-07-07 08:17:35] [Rank 0] step:921/10000 train_time:61039ms step_avg:66.27ms +[2025-07-07 08:17:36] [Rank 0] step:941/10000 train_time:62383ms step_avg:66.29ms +[2025-07-07 08:17:36] [Rank 0] step:941/10000 train_time:62383ms step_avg:66.29ms +[2025-07-07 08:17:38] [Rank 0] step:961/10000 train_time:63727ms step_avg:66.31ms +[2025-07-07 08:17:38] [Rank 0] step:961/10000 train_time:63727ms step_avg:66.31ms +[2025-07-07 08:17:39] [Rank 0] step:981/10000 train_time:65073ms step_avg:66.33ms +[2025-07-07 08:17:39] [Rank 0] step:981/10000 train_time:65073ms step_avg:66.33ms +[2025-07-07 08:17:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:17:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:17:41] [Rank 0] PRINT: step:1000/10000 train_loss:1.9524 val_loss:1.7276 train_time:67030ms step_avg:67.03ms +[2025-07-07 08:17:41] [Rank 0] PRINT: step:1000/10000 train_loss:1.9524 val_loss:1.7276 train_time:67030ms step_avg:67.03ms +[2025-07-07 08:17:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:17:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:17:41] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:17:41] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:17:41] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:17:41] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:22:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:22:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:22:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:22:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:22:59] [Rank 0] Total Loss: 4.3560 +[2025-07-07 08:22:59] [Rank 0] Total Loss: 4.3560 +[2025-07-07 08:22:59] [Rank 0] Total FTA: 0.0941 +[2025-07-07 08:22:59] [Rank 0] Total FTA: 0.0941 +[2025-07-07 08:22:59] [Rank 0] Group 0 Loss: 4.6721 +[2025-07-07 08:22:59] [Rank 0] Group 0 Loss: 4.6721 +[2025-07-07 08:22:59] [Rank 0] Group 1 Loss: 4.2841 +[2025-07-07 08:22:59] [Rank 0] Group 1 Loss: 4.2841 +[2025-07-07 08:22:59] [Rank 0] Group 2 Loss: 4.2129 +[2025-07-07 08:22:59] [Rank 0] Group 2 Loss: 4.2129 +[2025-07-07 08:22:59] [Rank 0] Group 3 Loss: 4.2509 +[2025-07-07 08:22:59] [Rank 0] Group 3 Loss: 4.2509 +[2025-07-07 08:22:59] [Rank 0] Group 4 Loss: 4.3193 +[2025-07-07 08:22:59] [Rank 0] Group 4 Loss: 4.3193 +[2025-07-07 08:22:59] [Rank 0] Group 5 Loss: 4.2842 +[2025-07-07 08:22:59] [Rank 0] Group 5 Loss: 4.2842 +[2025-07-07 08:22:59] [Rank 0] Group 6 Loss: 4.2827 +[2025-07-07 08:22:59] [Rank 0] Group 6 Loss: 4.2827 +[2025-07-07 08:22:59] [Rank 0] Group 7 Loss: 4.3702 +[2025-07-07 08:22:59] [Rank 0] Group 7 Loss: 4.3702 +[2025-07-07 08:22:59] [Rank 0] Group 8 Loss: 4.3425 +[2025-07-07 08:22:59] [Rank 0] Group 8 Loss: 4.3425 +[2025-07-07 08:22:59] [Rank 0] Group 9 Loss: 4.2558 +[2025-07-07 08:22:59] [Rank 0] Group 9 Loss: 4.2558 +[2025-07-07 08:22:59] [Rank 0] Group 10 Loss: 4.3078 +[2025-07-07 08:22:59] [Rank 0] Group 10 Loss: 4.3078 +[2025-07-07 08:22:59] [Rank 0] Group 11 Loss: 4.3559 +[2025-07-07 08:22:59] [Rank 0] Group 11 Loss: 4.3559 +[2025-07-07 08:22:59] [Rank 0] Group 0 FTA: 0.1547 +[2025-07-07 08:22:59] [Rank 0] Group 0 FTA: 0.1547 +[2025-07-07 08:22:59] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 08:22:59] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 08:22:59] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-07 08:22:59] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-07 08:22:59] [Rank 0] Group 3 FTA: 0.0729 +[2025-07-07 08:22:59] [Rank 0] Group 3 FTA: 0.0729 +[2025-07-07 08:22:59] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 08:22:59] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 08:22:59] [Rank 0] Group 5 FTA: 0.0625 +[2025-07-07 08:22:59] [Rank 0] Group 5 FTA: 0.0625 +[2025-07-07 08:22:59] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-07 08:22:59] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-07 08:22:59] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-07 08:22:59] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-07 08:22:59] [Rank 0] Group 8 FTA: 0.1016 +[2025-07-07 08:22:59] [Rank 0] Group 8 FTA: 0.1016 +[2025-07-07 08:22:59] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-07 08:22:59] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-07 08:22:59] [Rank 0] Group 10 FTA: 0.0840 +[2025-07-07 08:22:59] [Rank 0] Group 10 FTA: 0.0840 +[2025-07-07 08:22:59] [Rank 0] Group 11 FTA: 0.1025 +[2025-07-07 08:22:59] [Rank 0] Group 11 FTA: 0.1025 +[2025-07-07 08:22:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 08:22:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 08:23:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 08:23:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 08:23:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 08:23:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 08:23:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 08:23:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 08:23:00] [Rank 0] step:1001/10000 train_time:67039ms step_avg:66.97ms +[2025-07-07 08:23:00] [Rank 0] step:1001/10000 train_time:67039ms step_avg:66.97ms +[2025-07-07 08:23:02] [Rank 0] step:1021/10000 train_time:67777ms step_avg:66.38ms +[2025-07-07 08:23:02] [Rank 0] step:1021/10000 train_time:67777ms step_avg:66.38ms +[2025-07-07 08:23:03] [Rank 0] step:1041/10000 train_time:69114ms step_avg:66.39ms +[2025-07-07 08:23:03] [Rank 0] step:1041/10000 train_time:69114ms step_avg:66.39ms +[2025-07-07 08:23:04] [Rank 0] step:1061/10000 train_time:70451ms step_avg:66.40ms +[2025-07-07 08:23:04] [Rank 0] step:1061/10000 train_time:70451ms step_avg:66.40ms +[2025-07-07 08:23:06] [Rank 0] step:1081/10000 train_time:71841ms step_avg:66.46ms +[2025-07-07 08:23:06] [Rank 0] step:1081/10000 train_time:71841ms step_avg:66.46ms +[2025-07-07 08:23:07] [Rank 0] step:1101/10000 train_time:73192ms step_avg:66.48ms +[2025-07-07 08:23:07] [Rank 0] step:1101/10000 train_time:73192ms step_avg:66.48ms +[2025-07-07 08:23:08] [Rank 0] step:1121/10000 train_time:74532ms step_avg:66.49ms +[2025-07-07 08:23:08] [Rank 0] step:1121/10000 train_time:74532ms step_avg:66.49ms +[2025-07-07 08:23:10] [Rank 0] step:1141/10000 train_time:75871ms step_avg:66.49ms +[2025-07-07 08:23:10] [Rank 0] step:1141/10000 train_time:75871ms step_avg:66.49ms +[2025-07-07 08:23:11] [Rank 0] step:1161/10000 train_time:77212ms step_avg:66.50ms +[2025-07-07 08:23:11] [Rank 0] step:1161/10000 train_time:77212ms step_avg:66.50ms +[2025-07-07 08:23:12] [Rank 0] step:1181/10000 train_time:78553ms step_avg:66.51ms +[2025-07-07 08:23:12] [Rank 0] step:1181/10000 train_time:78553ms step_avg:66.51ms +[2025-07-07 08:23:14] [Rank 0] step:1201/10000 train_time:79895ms step_avg:66.52ms +[2025-07-07 08:23:14] [Rank 0] step:1201/10000 train_time:79895ms step_avg:66.52ms +[2025-07-07 08:23:15] [Rank 0] step:1221/10000 train_time:81238ms step_avg:66.53ms +[2025-07-07 08:23:15] [Rank 0] step:1221/10000 train_time:81238ms step_avg:66.53ms +[2025-07-07 08:23:16] [Rank 0] step:1241/10000 train_time:82581ms step_avg:66.54ms +[2025-07-07 08:23:16] [Rank 0] step:1241/10000 train_time:82581ms step_avg:66.54ms +[2025-07-07 08:23:18] [Rank 0] step:1261/10000 train_time:84598ms step_avg:67.09ms +[2025-07-07 08:23:18] [Rank 0] step:1261/10000 train_time:84598ms step_avg:67.09ms +[2025-07-07 08:23:19] [Rank 0] step:1281/10000 train_time:85321ms step_avg:66.61ms +[2025-07-07 08:23:19] [Rank 0] step:1281/10000 train_time:85321ms step_avg:66.61ms +[2025-07-07 08:23:20] [Rank 0] step:1301/10000 train_time:86663ms step_avg:66.61ms +[2025-07-07 08:23:20] [Rank 0] step:1301/10000 train_time:86663ms step_avg:66.61ms +[2025-07-07 08:23:22] [Rank 0] step:1321/10000 train_time:88006ms step_avg:66.62ms +[2025-07-07 08:23:22] [Rank 0] step:1321/10000 train_time:88006ms step_avg:66.62ms +[2025-07-07 08:23:23] [Rank 0] step:1341/10000 train_time:89384ms step_avg:66.65ms +[2025-07-07 08:23:23] [Rank 0] step:1341/10000 train_time:89384ms step_avg:66.65ms +[2025-07-07 08:23:24] [Rank 0] step:1361/10000 train_time:90726ms step_avg:66.66ms +[2025-07-07 08:23:24] [Rank 0] step:1361/10000 train_time:90726ms step_avg:66.66ms +[2025-07-07 08:23:26] [Rank 0] step:1381/10000 train_time:92069ms step_avg:66.67ms +[2025-07-07 08:23:26] [Rank 0] step:1381/10000 train_time:92069ms step_avg:66.67ms +[2025-07-07 08:23:27] [Rank 0] step:1401/10000 train_time:93411ms step_avg:66.67ms +[2025-07-07 08:23:27] [Rank 0] step:1401/10000 train_time:93411ms step_avg:66.67ms +[2025-07-07 08:23:28] [Rank 0] step:1421/10000 train_time:94754ms step_avg:66.68ms +[2025-07-07 08:23:28] [Rank 0] step:1421/10000 train_time:94754ms step_avg:66.68ms +[2025-07-07 08:23:30] [Rank 0] step:1441/10000 train_time:96145ms step_avg:66.72ms +[2025-07-07 08:23:30] [Rank 0] step:1441/10000 train_time:96145ms step_avg:66.72ms +[2025-07-07 08:23:31] [Rank 0] step:1461/10000 train_time:97482ms step_avg:66.72ms +[2025-07-07 08:23:31] [Rank 0] step:1461/10000 train_time:97482ms step_avg:66.72ms +[2025-07-07 08:23:33] [Rank 0] step:1481/10000 train_time:98826ms step_avg:66.73ms +[2025-07-07 08:23:33] [Rank 0] step:1481/10000 train_time:98826ms step_avg:66.73ms +[2025-07-07 08:23:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:23:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:23:35] [Rank 0] PRINT: step:1500/10000 train_loss:1.6600 val_loss:1.5973 train_time:100780ms step_avg:67.19ms +[2025-07-07 08:23:35] [Rank 0] PRINT: step:1500/10000 train_loss:1.6600 val_loss:1.5973 train_time:100780ms step_avg:67.19ms +[2025-07-07 08:23:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:23:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:23:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:23:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:23:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:23:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:28:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:28:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:28:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:28:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:28:53] [Rank 0] Total Loss: 4.7136 +[2025-07-07 08:28:53] [Rank 0] Total Loss: 4.7136 +[2025-07-07 08:28:53] [Rank 0] Total FTA: 0.1102 +[2025-07-07 08:28:53] [Rank 0] Total FTA: 0.1102 +[2025-07-07 08:28:53] [Rank 0] Group 0 Loss: 5.0742 +[2025-07-07 08:28:53] [Rank 0] Group 0 Loss: 5.0742 +[2025-07-07 08:28:53] [Rank 0] Group 1 Loss: 4.5780 +[2025-07-07 08:28:53] [Rank 0] Group 1 Loss: 4.5780 +[2025-07-07 08:28:53] [Rank 0] Group 2 Loss: 4.3556 +[2025-07-07 08:28:53] [Rank 0] Group 2 Loss: 4.3556 +[2025-07-07 08:28:53] [Rank 0] Group 3 Loss: 4.7399 +[2025-07-07 08:28:53] [Rank 0] Group 3 Loss: 4.7399 +[2025-07-07 08:28:53] [Rank 0] Group 4 Loss: 4.6767 +[2025-07-07 08:28:53] [Rank 0] Group 4 Loss: 4.6767 +[2025-07-07 08:28:53] [Rank 0] Group 5 Loss: 4.6407 +[2025-07-07 08:28:53] [Rank 0] Group 5 Loss: 4.6407 +[2025-07-07 08:28:53] [Rank 0] Group 6 Loss: 4.5932 +[2025-07-07 08:28:53] [Rank 0] Group 6 Loss: 4.5932 +[2025-07-07 08:28:53] [Rank 0] Group 7 Loss: 4.7358 +[2025-07-07 08:28:53] [Rank 0] Group 7 Loss: 4.7358 +[2025-07-07 08:28:53] [Rank 0] Group 8 Loss: 4.6806 +[2025-07-07 08:28:53] [Rank 0] Group 8 Loss: 4.6806 +[2025-07-07 08:28:53] [Rank 0] Group 9 Loss: 4.6492 +[2025-07-07 08:28:53] [Rank 0] Group 9 Loss: 4.6492 +[2025-07-07 08:28:53] [Rank 0] Group 10 Loss: 4.7181 +[2025-07-07 08:28:53] [Rank 0] Group 10 Loss: 4.7181 +[2025-07-07 08:28:53] [Rank 0] Group 11 Loss: 4.7221 +[2025-07-07 08:28:53] [Rank 0] Group 11 Loss: 4.7221 +[2025-07-07 08:28:53] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 08:28:53] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 08:28:53] [Rank 0] Group 1 FTA: 0.2005 +[2025-07-07 08:28:53] [Rank 0] Group 1 FTA: 0.2005 +[2025-07-07 08:28:53] [Rank 0] Group 2 FTA: 0.1797 +[2025-07-07 08:28:53] [Rank 0] Group 2 FTA: 0.1797 +[2025-07-07 08:28:53] [Rank 0] Group 3 FTA: 0.0521 +[2025-07-07 08:28:53] [Rank 0] Group 3 FTA: 0.0521 +[2025-07-07 08:28:53] [Rank 0] Group 4 FTA: 0.0599 +[2025-07-07 08:28:53] [Rank 0] Group 4 FTA: 0.0599 +[2025-07-07 08:28:53] [Rank 0] Group 5 FTA: 0.0443 +[2025-07-07 08:28:53] [Rank 0] Group 5 FTA: 0.0443 +[2025-07-07 08:28:53] [Rank 0] Group 6 FTA: 0.0938 +[2025-07-07 08:28:53] [Rank 0] Group 6 FTA: 0.0938 +[2025-07-07 08:28:53] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-07 08:28:53] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-07 08:28:53] [Rank 0] Group 8 FTA: 0.1276 +[2025-07-07 08:28:53] [Rank 0] Group 8 FTA: 0.1276 +[2025-07-07 08:28:53] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-07 08:28:53] [Rank 0] Group 9 FTA: 0.0742 +[2025-07-07 08:28:53] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-07 08:28:53] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-07 08:28:53] [Rank 0] Group 11 FTA: 0.0986 +[2025-07-07 08:28:53] [Rank 0] Group 11 FTA: 0.0986 +[2025-07-07 08:28:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 08:28:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 08:28:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 08:28:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 08:28:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 08:28:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 08:28:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 08:28:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 08:28:55] [Rank 0] step:1501/10000 train_time:100789ms step_avg:67.15ms +[2025-07-07 08:28:55] [Rank 0] step:1501/10000 train_time:100789ms step_avg:67.15ms +[2025-07-07 08:28:56] [Rank 0] step:1521/10000 train_time:101547ms step_avg:66.76ms +[2025-07-07 08:28:56] [Rank 0] step:1521/10000 train_time:101547ms step_avg:66.76ms +[2025-07-07 08:28:57] [Rank 0] step:1541/10000 train_time:102885ms step_avg:66.77ms +[2025-07-07 08:28:57] [Rank 0] step:1541/10000 train_time:102885ms step_avg:66.77ms +[2025-07-07 08:28:59] [Rank 0] step:1561/10000 train_time:104226ms step_avg:66.77ms +[2025-07-07 08:28:59] [Rank 0] step:1561/10000 train_time:104226ms step_avg:66.77ms +[2025-07-07 08:29:00] [Rank 0] step:1581/10000 train_time:105564ms step_avg:66.77ms +[2025-07-07 08:29:00] [Rank 0] step:1581/10000 train_time:105564ms step_avg:66.77ms +[2025-07-07 08:29:02] [Rank 0] step:1601/10000 train_time:106904ms step_avg:66.77ms +[2025-07-07 08:29:02] [Rank 0] step:1601/10000 train_time:106904ms step_avg:66.77ms +[2025-07-07 08:29:03] [Rank 0] step:1621/10000 train_time:108495ms step_avg:66.93ms +[2025-07-07 08:29:03] [Rank 0] step:1621/10000 train_time:108495ms step_avg:66.93ms +[2025-07-07 08:29:04] [Rank 0] step:1641/10000 train_time:109583ms step_avg:66.78ms +[2025-07-07 08:29:04] [Rank 0] step:1641/10000 train_time:109583ms step_avg:66.78ms +[2025-07-07 08:29:06] [Rank 0] step:1661/10000 train_time:110926ms step_avg:66.78ms +[2025-07-07 08:29:06] [Rank 0] step:1661/10000 train_time:110926ms step_avg:66.78ms +[2025-07-07 08:29:07] [Rank 0] step:1681/10000 train_time:112266ms step_avg:66.79ms +[2025-07-07 08:29:07] [Rank 0] step:1681/10000 train_time:112266ms step_avg:66.79ms +[2025-07-07 08:29:08] [Rank 0] step:1701/10000 train_time:113607ms step_avg:66.79ms +[2025-07-07 08:29:08] [Rank 0] step:1701/10000 train_time:113607ms step_avg:66.79ms +[2025-07-07 08:29:10] [Rank 0] step:1721/10000 train_time:114948ms step_avg:66.79ms +[2025-07-07 08:29:10] [Rank 0] step:1721/10000 train_time:114948ms step_avg:66.79ms +[2025-07-07 08:29:11] [Rank 0] step:1741/10000 train_time:116291ms step_avg:66.80ms +[2025-07-07 08:29:11] [Rank 0] step:1741/10000 train_time:116291ms step_avg:66.80ms +[2025-07-07 08:29:12] [Rank 0] step:1761/10000 train_time:117634ms step_avg:66.80ms +[2025-07-07 08:29:12] [Rank 0] step:1761/10000 train_time:117634ms step_avg:66.80ms +[2025-07-07 08:29:14] [Rank 0] step:1781/10000 train_time:118977ms step_avg:66.80ms +[2025-07-07 08:29:14] [Rank 0] step:1781/10000 train_time:118977ms step_avg:66.80ms +[2025-07-07 08:29:15] [Rank 0] step:1801/10000 train_time:120320ms step_avg:66.81ms +[2025-07-07 08:29:15] [Rank 0] step:1801/10000 train_time:120320ms step_avg:66.81ms +[2025-07-07 08:29:16] [Rank 0] step:1821/10000 train_time:121726ms step_avg:66.85ms +[2025-07-07 08:29:16] [Rank 0] step:1821/10000 train_time:121726ms step_avg:66.85ms +[2025-07-07 08:29:18] [Rank 0] step:1841/10000 train_time:123070ms step_avg:66.85ms +[2025-07-07 08:29:18] [Rank 0] step:1841/10000 train_time:123070ms step_avg:66.85ms +[2025-07-07 08:29:19] [Rank 0] step:1861/10000 train_time:124414ms step_avg:66.85ms +[2025-07-07 08:29:19] [Rank 0] step:1861/10000 train_time:124414ms step_avg:66.85ms +[2025-07-07 08:29:20] [Rank 0] step:1881/10000 train_time:125759ms step_avg:66.86ms +[2025-07-07 08:29:20] [Rank 0] step:1881/10000 train_time:125759ms step_avg:66.86ms +[2025-07-07 08:29:22] [Rank 0] step:1901/10000 train_time:127103ms step_avg:66.86ms +[2025-07-07 08:29:22] [Rank 0] step:1901/10000 train_time:127103ms step_avg:66.86ms +[2025-07-07 08:29:23] [Rank 0] step:1921/10000 train_time:128448ms step_avg:66.87ms +[2025-07-07 08:29:23] [Rank 0] step:1921/10000 train_time:128448ms step_avg:66.87ms +[2025-07-07 08:29:24] [Rank 0] step:1941/10000 train_time:129794ms step_avg:66.87ms +[2025-07-07 08:29:24] [Rank 0] step:1941/10000 train_time:129794ms step_avg:66.87ms +[2025-07-07 08:29:26] [Rank 0] step:1961/10000 train_time:131140ms step_avg:66.87ms +[2025-07-07 08:29:26] [Rank 0] step:1961/10000 train_time:131140ms step_avg:66.87ms +[2025-07-07 08:29:27] [Rank 0] step:1981/10000 train_time:132637ms step_avg:66.95ms +[2025-07-07 08:29:27] [Rank 0] step:1981/10000 train_time:132637ms step_avg:66.95ms +[2025-07-07 08:29:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:29:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:29:29] [Rank 0] PRINT: step:2000/10000 train_loss:1.5355 val_loss:1.4624 train_time:134446ms step_avg:67.22ms +[2025-07-07 08:29:29] [Rank 0] PRINT: step:2000/10000 train_loss:1.5355 val_loss:1.4624 train_time:134446ms step_avg:67.22ms +[2025-07-07 08:29:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:29:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:29:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:29:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:29:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:29:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:34:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:34:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:34:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:34:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:34:51] [Rank 0] Total Loss: 4.6662 +[2025-07-07 08:34:51] [Rank 0] Total Loss: 4.6662 +[2025-07-07 08:34:51] [Rank 0] Total FTA: 0.1007 +[2025-07-07 08:34:51] [Rank 0] Total FTA: 0.1007 +[2025-07-07 08:34:51] [Rank 0] Group 0 Loss: 4.9357 +[2025-07-07 08:34:51] [Rank 0] Group 0 Loss: 4.9357 +[2025-07-07 08:34:51] [Rank 0] Group 1 Loss: 4.4901 +[2025-07-07 08:34:51] [Rank 0] Group 1 Loss: 4.4901 +[2025-07-07 08:34:51] [Rank 0] Group 2 Loss: 4.6000 +[2025-07-07 08:34:51] [Rank 0] Group 2 Loss: 4.6000 +[2025-07-07 08:34:51] [Rank 0] Group 3 Loss: 4.5883 +[2025-07-07 08:34:51] [Rank 0] Group 3 Loss: 4.5883 +[2025-07-07 08:34:51] [Rank 0] Group 4 Loss: 4.5822 +[2025-07-07 08:34:51] [Rank 0] Group 4 Loss: 4.5822 +[2025-07-07 08:34:51] [Rank 0] Group 5 Loss: 4.5455 +[2025-07-07 08:34:51] [Rank 0] Group 5 Loss: 4.5455 +[2025-07-07 08:34:51] [Rank 0] Group 6 Loss: 4.5856 +[2025-07-07 08:34:51] [Rank 0] Group 6 Loss: 4.5856 +[2025-07-07 08:34:51] [Rank 0] Group 7 Loss: 4.7488 +[2025-07-07 08:34:51] [Rank 0] Group 7 Loss: 4.7488 +[2025-07-07 08:34:51] [Rank 0] Group 8 Loss: 4.6578 +[2025-07-07 08:34:51] [Rank 0] Group 8 Loss: 4.6578 +[2025-07-07 08:34:51] [Rank 0] Group 9 Loss: 4.6339 +[2025-07-07 08:34:51] [Rank 0] Group 9 Loss: 4.6339 +[2025-07-07 08:34:51] [Rank 0] Group 10 Loss: 4.6501 +[2025-07-07 08:34:51] [Rank 0] Group 10 Loss: 4.6501 +[2025-07-07 08:34:51] [Rank 0] Group 11 Loss: 4.6791 +[2025-07-07 08:34:51] [Rank 0] Group 11 Loss: 4.6791 +[2025-07-07 08:34:51] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-07 08:34:51] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-07 08:34:51] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-07 08:34:51] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-07 08:34:51] [Rank 0] Group 2 FTA: 0.0964 +[2025-07-07 08:34:51] [Rank 0] Group 2 FTA: 0.0964 +[2025-07-07 08:34:51] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 08:34:51] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 08:34:51] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 08:34:51] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 08:34:51] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-07 08:34:51] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-07 08:34:51] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-07 08:34:51] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-07 08:34:51] [Rank 0] Group 7 FTA: 0.0807 +[2025-07-07 08:34:51] [Rank 0] Group 7 FTA: 0.0807 +[2025-07-07 08:34:51] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 08:34:51] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 08:34:51] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-07 08:34:51] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-07 08:34:51] [Rank 0] Group 10 FTA: 0.1035 +[2025-07-07 08:34:51] [Rank 0] Group 10 FTA: 0.1035 +[2025-07-07 08:34:51] [Rank 0] Group 11 FTA: 0.1084 +[2025-07-07 08:34:51] [Rank 0] Group 11 FTA: 0.1084 +[2025-07-07 08:34:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 08:34:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 08:34:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 08:34:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 08:34:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 08:34:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 08:34:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 08:34:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 08:34:52] [Rank 0] step:2001/10000 train_time:134457ms step_avg:67.19ms +[2025-07-07 08:34:52] [Rank 0] step:2001/10000 train_time:134457ms step_avg:67.19ms +[2025-07-07 08:34:54] [Rank 0] step:2021/10000 train_time:135194ms step_avg:66.89ms +[2025-07-07 08:34:54] [Rank 0] step:2021/10000 train_time:135194ms step_avg:66.89ms +[2025-07-07 08:34:55] [Rank 0] step:2041/10000 train_time:136531ms step_avg:66.89ms +[2025-07-07 08:34:55] [Rank 0] step:2041/10000 train_time:136531ms step_avg:66.89ms +[2025-07-07 08:34:56] [Rank 0] step:2061/10000 train_time:137869ms step_avg:66.89ms +[2025-07-07 08:34:56] [Rank 0] step:2061/10000 train_time:137869ms step_avg:66.89ms +[2025-07-07 08:34:58] [Rank 0] step:2081/10000 train_time:139207ms step_avg:66.89ms +[2025-07-07 08:34:58] [Rank 0] step:2081/10000 train_time:139207ms step_avg:66.89ms +[2025-07-07 08:34:59] [Rank 0] step:2101/10000 train_time:140545ms step_avg:66.89ms +[2025-07-07 08:34:59] [Rank 0] step:2101/10000 train_time:140545ms step_avg:66.89ms +[2025-07-07 08:35:01] [Rank 0] step:2121/10000 train_time:141884ms step_avg:66.90ms +[2025-07-07 08:35:01] [Rank 0] step:2121/10000 train_time:141884ms step_avg:66.90ms +[2025-07-07 08:35:02] [Rank 0] step:2141/10000 train_time:143225ms step_avg:66.90ms +[2025-07-07 08:35:02] [Rank 0] step:2141/10000 train_time:143225ms step_avg:66.90ms +[2025-07-07 08:35:03] [Rank 0] step:2161/10000 train_time:144565ms step_avg:66.90ms +[2025-07-07 08:35:03] [Rank 0] step:2161/10000 train_time:144565ms step_avg:66.90ms +[2025-07-07 08:35:05] [Rank 0] step:2181/10000 train_time:145977ms step_avg:66.93ms +[2025-07-07 08:35:05] [Rank 0] step:2181/10000 train_time:145977ms step_avg:66.93ms +[2025-07-07 08:35:06] [Rank 0] step:2201/10000 train_time:147317ms step_avg:66.93ms +[2025-07-07 08:35:06] [Rank 0] step:2201/10000 train_time:147317ms step_avg:66.93ms +[2025-07-07 08:35:07] [Rank 0] step:2221/10000 train_time:148659ms step_avg:66.93ms +[2025-07-07 08:35:07] [Rank 0] step:2221/10000 train_time:148659ms step_avg:66.93ms +[2025-07-07 08:35:09] [Rank 0] step:2241/10000 train_time:150010ms step_avg:66.94ms +[2025-07-07 08:35:09] [Rank 0] step:2241/10000 train_time:150010ms step_avg:66.94ms +[2025-07-07 08:35:10] [Rank 0] step:2261/10000 train_time:151377ms step_avg:66.95ms +[2025-07-07 08:35:10] [Rank 0] step:2261/10000 train_time:151377ms step_avg:66.95ms +[2025-07-07 08:35:11] [Rank 0] step:2281/10000 train_time:152744ms step_avg:66.96ms +[2025-07-07 08:35:11] [Rank 0] step:2281/10000 train_time:152744ms step_avg:66.96ms +[2025-07-07 08:35:13] [Rank 0] step:2301/10000 train_time:154115ms step_avg:66.98ms +[2025-07-07 08:35:13] [Rank 0] step:2301/10000 train_time:154115ms step_avg:66.98ms +[2025-07-07 08:35:14] [Rank 0] step:2321/10000 train_time:155487ms step_avg:66.99ms +[2025-07-07 08:35:14] [Rank 0] step:2321/10000 train_time:155487ms step_avg:66.99ms +[2025-07-07 08:35:16] [Rank 0] step:2341/10000 train_time:157534ms step_avg:67.29ms +[2025-07-07 08:35:16] [Rank 0] step:2341/10000 train_time:157534ms step_avg:67.29ms +[2025-07-07 08:35:17] [Rank 0] step:2361/10000 train_time:158271ms step_avg:67.04ms +[2025-07-07 08:35:17] [Rank 0] step:2361/10000 train_time:158271ms step_avg:67.04ms +[2025-07-07 08:35:18] [Rank 0] step:2381/10000 train_time:159641ms step_avg:67.05ms +[2025-07-07 08:35:18] [Rank 0] step:2381/10000 train_time:159641ms step_avg:67.05ms +[2025-07-07 08:35:20] [Rank 0] step:2401/10000 train_time:161010ms step_avg:67.06ms +[2025-07-07 08:35:20] [Rank 0] step:2401/10000 train_time:161010ms step_avg:67.06ms +[2025-07-07 08:35:21] [Rank 0] step:2421/10000 train_time:162379ms step_avg:67.07ms +[2025-07-07 08:35:21] [Rank 0] step:2421/10000 train_time:162379ms step_avg:67.07ms +[2025-07-07 08:35:22] [Rank 0] step:2441/10000 train_time:163749ms step_avg:67.08ms +[2025-07-07 08:35:22] [Rank 0] step:2441/10000 train_time:163749ms step_avg:67.08ms +[2025-07-07 08:35:24] [Rank 0] step:2461/10000 train_time:165120ms step_avg:67.09ms +[2025-07-07 08:35:24] [Rank 0] step:2461/10000 train_time:165120ms step_avg:67.09ms +[2025-07-07 08:35:25] [Rank 0] step:2481/10000 train_time:166488ms step_avg:67.11ms +[2025-07-07 08:35:25] [Rank 0] step:2481/10000 train_time:166488ms step_avg:67.11ms +[2025-07-07 08:35:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:35:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:35:27] [Rank 0] PRINT: step:2500/10000 train_loss:1.3845 val_loss:1.3200 train_time:168480ms step_avg:67.39ms +[2025-07-07 08:35:27] [Rank 0] PRINT: step:2500/10000 train_loss:1.3845 val_loss:1.3200 train_time:168480ms step_avg:67.39ms +[2025-07-07 08:35:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:35:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:35:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:35:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:35:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:35:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:40:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:40:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:40:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:40:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:40:46] [Rank 0] Total Loss: 4.8857 +[2025-07-07 08:40:46] [Rank 0] Total Loss: 4.8857 +[2025-07-07 08:40:46] [Rank 0] Total FTA: 0.1669 +[2025-07-07 08:40:46] [Rank 0] Total FTA: 0.1669 +[2025-07-07 08:40:46] [Rank 0] Group 0 Loss: 5.3158 +[2025-07-07 08:40:46] [Rank 0] Group 0 Loss: 5.3158 +[2025-07-07 08:40:46] [Rank 0] Group 1 Loss: 4.4936 +[2025-07-07 08:40:46] [Rank 0] Group 1 Loss: 4.4936 +[2025-07-07 08:40:46] [Rank 0] Group 2 Loss: 4.5473 +[2025-07-07 08:40:46] [Rank 0] Group 2 Loss: 4.5473 +[2025-07-07 08:40:46] [Rank 0] Group 3 Loss: 4.8186 +[2025-07-07 08:40:46] [Rank 0] Group 3 Loss: 4.8186 +[2025-07-07 08:40:46] [Rank 0] Group 4 Loss: 4.8564 +[2025-07-07 08:40:46] [Rank 0] Group 4 Loss: 4.8564 +[2025-07-07 08:40:46] [Rank 0] Group 5 Loss: 4.7802 +[2025-07-07 08:40:46] [Rank 0] Group 5 Loss: 4.7802 +[2025-07-07 08:40:46] [Rank 0] Group 6 Loss: 4.7980 +[2025-07-07 08:40:46] [Rank 0] Group 6 Loss: 4.7980 +[2025-07-07 08:40:46] [Rank 0] Group 7 Loss: 4.9091 +[2025-07-07 08:40:46] [Rank 0] Group 7 Loss: 4.9091 +[2025-07-07 08:40:46] [Rank 0] Group 8 Loss: 4.9385 +[2025-07-07 08:40:46] [Rank 0] Group 8 Loss: 4.9385 +[2025-07-07 08:40:46] [Rank 0] Group 9 Loss: 4.9050 +[2025-07-07 08:40:46] [Rank 0] Group 9 Loss: 4.9050 +[2025-07-07 08:40:46] [Rank 0] Group 10 Loss: 4.8959 +[2025-07-07 08:40:46] [Rank 0] Group 10 Loss: 4.8959 +[2025-07-07 08:40:46] [Rank 0] Group 11 Loss: 4.9065 +[2025-07-07 08:40:46] [Rank 0] Group 11 Loss: 4.9065 +[2025-07-07 08:40:46] [Rank 0] Group 0 FTA: 0.3251 +[2025-07-07 08:40:46] [Rank 0] Group 0 FTA: 0.3251 +[2025-07-07 08:40:46] [Rank 0] Group 1 FTA: 0.3359 +[2025-07-07 08:40:46] [Rank 0] Group 1 FTA: 0.3359 +[2025-07-07 08:40:46] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-07 08:40:46] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-07 08:40:46] [Rank 0] Group 3 FTA: 0.0729 +[2025-07-07 08:40:46] [Rank 0] Group 3 FTA: 0.0729 +[2025-07-07 08:40:46] [Rank 0] Group 4 FTA: 0.0677 +[2025-07-07 08:40:46] [Rank 0] Group 4 FTA: 0.0677 +[2025-07-07 08:40:46] [Rank 0] Group 5 FTA: 0.1224 +[2025-07-07 08:40:46] [Rank 0] Group 5 FTA: 0.1224 +[2025-07-07 08:40:46] [Rank 0] Group 6 FTA: 0.1354 +[2025-07-07 08:40:46] [Rank 0] Group 6 FTA: 0.1354 +[2025-07-07 08:40:46] [Rank 0] Group 7 FTA: 0.1458 +[2025-07-07 08:40:46] [Rank 0] Group 7 FTA: 0.1458 +[2025-07-07 08:40:46] [Rank 0] Group 8 FTA: 0.1172 +[2025-07-07 08:40:46] [Rank 0] Group 8 FTA: 0.1172 +[2025-07-07 08:40:46] [Rank 0] Group 9 FTA: 0.1367 +[2025-07-07 08:40:46] [Rank 0] Group 9 FTA: 0.1367 +[2025-07-07 08:40:46] [Rank 0] Group 10 FTA: 0.1719 +[2025-07-07 08:40:46] [Rank 0] Group 10 FTA: 0.1719 +[2025-07-07 08:40:46] [Rank 0] Group 11 FTA: 0.1426 +[2025-07-07 08:40:46] [Rank 0] Group 11 FTA: 0.1426 +[2025-07-07 08:40:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 08:40:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 08:40:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 08:40:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 08:40:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 08:40:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 08:40:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 08:40:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 08:40:48] [Rank 0] step:2501/10000 train_time:168493ms step_avg:67.37ms +[2025-07-07 08:40:48] [Rank 0] step:2501/10000 train_time:168493ms step_avg:67.37ms +[2025-07-07 08:40:49] [Rank 0] step:2521/10000 train_time:169510ms step_avg:67.24ms +[2025-07-07 08:40:49] [Rank 0] step:2521/10000 train_time:169510ms step_avg:67.24ms +[2025-07-07 08:40:51] [Rank 0] step:2541/10000 train_time:170666ms step_avg:67.16ms +[2025-07-07 08:40:51] [Rank 0] step:2541/10000 train_time:170666ms step_avg:67.16ms +[2025-07-07 08:40:52] [Rank 0] step:2561/10000 train_time:172030ms step_avg:67.17ms +[2025-07-07 08:40:52] [Rank 0] step:2561/10000 train_time:172030ms step_avg:67.17ms +[2025-07-07 08:40:53] [Rank 0] step:2581/10000 train_time:173395ms step_avg:67.18ms +[2025-07-07 08:40:53] [Rank 0] step:2581/10000 train_time:173395ms step_avg:67.18ms +[2025-07-07 08:40:55] [Rank 0] step:2601/10000 train_time:174761ms step_avg:67.19ms +[2025-07-07 08:40:55] [Rank 0] step:2601/10000 train_time:174761ms step_avg:67.19ms +[2025-07-07 08:40:56] [Rank 0] step:2621/10000 train_time:176128ms step_avg:67.20ms +[2025-07-07 08:40:56] [Rank 0] step:2621/10000 train_time:176128ms step_avg:67.20ms +[2025-07-07 08:40:57] [Rank 0] step:2641/10000 train_time:177496ms step_avg:67.21ms +[2025-07-07 08:40:57] [Rank 0] step:2641/10000 train_time:177496ms step_avg:67.21ms +[2025-07-07 08:40:59] [Rank 0] step:2661/10000 train_time:178863ms step_avg:67.22ms +[2025-07-07 08:40:59] [Rank 0] step:2661/10000 train_time:178863ms step_avg:67.22ms +[2025-07-07 08:41:00] [Rank 0] step:2681/10000 train_time:180231ms step_avg:67.23ms +[2025-07-07 08:41:00] [Rank 0] step:2681/10000 train_time:180231ms step_avg:67.23ms +[2025-07-07 08:41:02] [Rank 0] step:2701/10000 train_time:181849ms step_avg:67.33ms +[2025-07-07 08:41:02] [Rank 0] step:2701/10000 train_time:181849ms step_avg:67.33ms +[2025-07-07 08:41:03] [Rank 0] step:2721/10000 train_time:183019ms step_avg:67.26ms +[2025-07-07 08:41:03] [Rank 0] step:2721/10000 train_time:183019ms step_avg:67.26ms +[2025-07-07 08:41:04] [Rank 0] step:2741/10000 train_time:184387ms step_avg:67.27ms +[2025-07-07 08:41:04] [Rank 0] step:2741/10000 train_time:184387ms step_avg:67.27ms +[2025-07-07 08:41:06] [Rank 0] step:2761/10000 train_time:185756ms step_avg:67.28ms +[2025-07-07 08:41:06] [Rank 0] step:2761/10000 train_time:185756ms step_avg:67.28ms +[2025-07-07 08:41:07] [Rank 0] step:2781/10000 train_time:187126ms step_avg:67.29ms +[2025-07-07 08:41:07] [Rank 0] step:2781/10000 train_time:187126ms step_avg:67.29ms +[2025-07-07 08:41:08] [Rank 0] step:2801/10000 train_time:188495ms step_avg:67.30ms +[2025-07-07 08:41:08] [Rank 0] step:2801/10000 train_time:188495ms step_avg:67.30ms +[2025-07-07 08:41:10] [Rank 0] step:2821/10000 train_time:189864ms step_avg:67.30ms +[2025-07-07 08:41:10] [Rank 0] step:2821/10000 train_time:189864ms step_avg:67.30ms +[2025-07-07 08:41:11] [Rank 0] step:2841/10000 train_time:191234ms step_avg:67.31ms +[2025-07-07 08:41:11] [Rank 0] step:2841/10000 train_time:191234ms step_avg:67.31ms +[2025-07-07 08:41:12] [Rank 0] step:2861/10000 train_time:192605ms step_avg:67.32ms +[2025-07-07 08:41:12] [Rank 0] step:2861/10000 train_time:192605ms step_avg:67.32ms +[2025-07-07 08:41:14] [Rank 0] step:2881/10000 train_time:194022ms step_avg:67.35ms +[2025-07-07 08:41:14] [Rank 0] step:2881/10000 train_time:194022ms step_avg:67.35ms +[2025-07-07 08:41:15] [Rank 0] step:2901/10000 train_time:195373ms step_avg:67.35ms +[2025-07-07 08:41:15] [Rank 0] step:2901/10000 train_time:195373ms step_avg:67.35ms +[2025-07-07 08:41:17] [Rank 0] step:2921/10000 train_time:196743ms step_avg:67.35ms +[2025-07-07 08:41:17] [Rank 0] step:2921/10000 train_time:196743ms step_avg:67.35ms +[2025-07-07 08:41:18] [Rank 0] step:2941/10000 train_time:198114ms step_avg:67.36ms +[2025-07-07 08:41:18] [Rank 0] step:2941/10000 train_time:198114ms step_avg:67.36ms +[2025-07-07 08:41:19] [Rank 0] step:2961/10000 train_time:199486ms step_avg:67.37ms +[2025-07-07 08:41:19] [Rank 0] step:2961/10000 train_time:199486ms step_avg:67.37ms +[2025-07-07 08:41:21] [Rank 0] step:2981/10000 train_time:200857ms step_avg:67.38ms +[2025-07-07 08:41:21] [Rank 0] step:2981/10000 train_time:200857ms step_avg:67.38ms +[2025-07-07 08:41:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:41:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:41:23] [Rank 0] PRINT: step:3000/10000 train_loss:1.2773 val_loss:1.2494 train_time:202855ms step_avg:67.62ms +[2025-07-07 08:41:23] [Rank 0] PRINT: step:3000/10000 train_loss:1.2773 val_loss:1.2494 train_time:202855ms step_avg:67.62ms +[2025-07-07 08:41:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:41:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:41:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:41:23] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:41:23] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:41:23] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:46:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:46:45] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:46:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:46:45] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:46:45] [Rank 0] Total Loss: 4.9155 +[2025-07-07 08:46:45] [Rank 0] Total Loss: 4.9155 +[2025-07-07 08:46:45] [Rank 0] Total FTA: 0.1825 +[2025-07-07 08:46:45] [Rank 0] Total FTA: 0.1825 +[2025-07-07 08:46:45] [Rank 0] Group 0 Loss: 5.1735 +[2025-07-07 08:46:45] [Rank 0] Group 0 Loss: 5.1735 +[2025-07-07 08:46:45] [Rank 0] Group 1 Loss: 4.6490 +[2025-07-07 08:46:45] [Rank 0] Group 1 Loss: 4.6490 +[2025-07-07 08:46:45] [Rank 0] Group 2 Loss: 4.5994 +[2025-07-07 08:46:45] [Rank 0] Group 2 Loss: 4.5994 +[2025-07-07 08:46:45] [Rank 0] Group 3 Loss: 4.9613 +[2025-07-07 08:46:45] [Rank 0] Group 3 Loss: 4.9613 +[2025-07-07 08:46:45] [Rank 0] Group 4 Loss: 4.8664 +[2025-07-07 08:46:45] [Rank 0] Group 4 Loss: 4.8664 +[2025-07-07 08:46:45] [Rank 0] Group 5 Loss: 4.8861 +[2025-07-07 08:46:45] [Rank 0] Group 5 Loss: 4.8861 +[2025-07-07 08:46:45] [Rank 0] Group 6 Loss: 4.8429 +[2025-07-07 08:46:45] [Rank 0] Group 6 Loss: 4.8429 +[2025-07-07 08:46:45] [Rank 0] Group 7 Loss: 4.9761 +[2025-07-07 08:46:45] [Rank 0] Group 7 Loss: 4.9761 +[2025-07-07 08:46:45] [Rank 0] Group 8 Loss: 4.9466 +[2025-07-07 08:46:45] [Rank 0] Group 8 Loss: 4.9466 +[2025-07-07 08:46:45] [Rank 0] Group 9 Loss: 4.9245 +[2025-07-07 08:46:45] [Rank 0] Group 9 Loss: 4.9245 +[2025-07-07 08:46:45] [Rank 0] Group 10 Loss: 4.9135 +[2025-07-07 08:46:45] [Rank 0] Group 10 Loss: 4.9135 +[2025-07-07 08:46:45] [Rank 0] Group 11 Loss: 4.9438 +[2025-07-07 08:46:45] [Rank 0] Group 11 Loss: 4.9438 +[2025-07-07 08:46:45] [Rank 0] Group 0 FTA: 0.1756 +[2025-07-07 08:46:45] [Rank 0] Group 0 FTA: 0.1756 +[2025-07-07 08:46:45] [Rank 0] Group 1 FTA: 0.1328 +[2025-07-07 08:46:45] [Rank 0] Group 1 FTA: 0.1328 +[2025-07-07 08:46:45] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 08:46:45] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 08:46:45] [Rank 0] Group 3 FTA: 0.1667 +[2025-07-07 08:46:45] [Rank 0] Group 3 FTA: 0.1667 +[2025-07-07 08:46:45] [Rank 0] Group 4 FTA: 0.2188 +[2025-07-07 08:46:45] [Rank 0] Group 4 FTA: 0.2188 +[2025-07-07 08:46:45] [Rank 0] Group 5 FTA: 0.2604 +[2025-07-07 08:46:45] [Rank 0] Group 5 FTA: 0.2604 +[2025-07-07 08:46:45] [Rank 0] Group 6 FTA: 0.1719 +[2025-07-07 08:46:45] [Rank 0] Group 6 FTA: 0.1719 +[2025-07-07 08:46:45] [Rank 0] Group 7 FTA: 0.2057 +[2025-07-07 08:46:45] [Rank 0] Group 7 FTA: 0.2057 +[2025-07-07 08:46:45] [Rank 0] Group 8 FTA: 0.1901 +[2025-07-07 08:46:45] [Rank 0] Group 8 FTA: 0.1901 +[2025-07-07 08:46:45] [Rank 0] Group 9 FTA: 0.2148 +[2025-07-07 08:46:45] [Rank 0] Group 9 FTA: 0.2148 +[2025-07-07 08:46:45] [Rank 0] Group 10 FTA: 0.1836 +[2025-07-07 08:46:45] [Rank 0] Group 10 FTA: 0.1836 +[2025-07-07 08:46:45] [Rank 0] Group 11 FTA: 0.1875 +[2025-07-07 08:46:45] [Rank 0] Group 11 FTA: 0.1875 +[2025-07-07 08:46:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 08:46:46] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 08:46:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 08:46:46] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 08:46:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 08:46:46] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 08:46:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 08:46:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 08:46:47] [Rank 0] step:3001/10000 train_time:202864ms step_avg:67.60ms +[2025-07-07 08:46:47] [Rank 0] step:3001/10000 train_time:202864ms step_avg:67.60ms +[2025-07-07 08:46:48] [Rank 0] step:3021/10000 train_time:203629ms step_avg:67.40ms +[2025-07-07 08:46:48] [Rank 0] step:3021/10000 train_time:203629ms step_avg:67.40ms +[2025-07-07 08:46:49] [Rank 0] step:3041/10000 train_time:204992ms step_avg:67.41ms +[2025-07-07 08:46:49] [Rank 0] step:3041/10000 train_time:204992ms step_avg:67.41ms +[2025-07-07 08:46:51] [Rank 0] step:3061/10000 train_time:206610ms step_avg:67.50ms +[2025-07-07 08:46:51] [Rank 0] step:3061/10000 train_time:206610ms step_avg:67.50ms +[2025-07-07 08:46:52] [Rank 0] step:3081/10000 train_time:207767ms step_avg:67.43ms +[2025-07-07 08:46:52] [Rank 0] step:3081/10000 train_time:207767ms step_avg:67.43ms +[2025-07-07 08:46:53] [Rank 0] step:3101/10000 train_time:209132ms step_avg:67.44ms +[2025-07-07 08:46:53] [Rank 0] step:3101/10000 train_time:209132ms step_avg:67.44ms +[2025-07-07 08:46:55] [Rank 0] step:3121/10000 train_time:210498ms step_avg:67.45ms +[2025-07-07 08:46:55] [Rank 0] step:3121/10000 train_time:210498ms step_avg:67.45ms +[2025-07-07 08:46:56] [Rank 0] step:3141/10000 train_time:211865ms step_avg:67.45ms +[2025-07-07 08:46:56] [Rank 0] step:3141/10000 train_time:211865ms step_avg:67.45ms +[2025-07-07 08:46:58] [Rank 0] step:3161/10000 train_time:213233ms step_avg:67.46ms +[2025-07-07 08:46:58] [Rank 0] step:3161/10000 train_time:213233ms step_avg:67.46ms +[2025-07-07 08:46:59] [Rank 0] step:3181/10000 train_time:214601ms step_avg:67.46ms +[2025-07-07 08:46:59] [Rank 0] step:3181/10000 train_time:214601ms step_avg:67.46ms +[2025-07-07 08:47:00] [Rank 0] step:3201/10000 train_time:215970ms step_avg:67.47ms +[2025-07-07 08:47:00] [Rank 0] step:3201/10000 train_time:215970ms step_avg:67.47ms +[2025-07-07 08:47:02] [Rank 0] step:3221/10000 train_time:217341ms step_avg:67.48ms +[2025-07-07 08:47:02] [Rank 0] step:3221/10000 train_time:217341ms step_avg:67.48ms +[2025-07-07 08:47:03] [Rank 0] step:3241/10000 train_time:218962ms step_avg:67.56ms +[2025-07-07 08:47:03] [Rank 0] step:3241/10000 train_time:218962ms step_avg:67.56ms +[2025-07-07 08:47:04] [Rank 0] step:3261/10000 train_time:220120ms step_avg:67.50ms +[2025-07-07 08:47:04] [Rank 0] step:3261/10000 train_time:220120ms step_avg:67.50ms +[2025-07-07 08:47:06] [Rank 0] step:3281/10000 train_time:221489ms step_avg:67.51ms +[2025-07-07 08:47:06] [Rank 0] step:3281/10000 train_time:221489ms step_avg:67.51ms +[2025-07-07 08:47:07] [Rank 0] step:3301/10000 train_time:222859ms step_avg:67.51ms +[2025-07-07 08:47:07] [Rank 0] step:3301/10000 train_time:222859ms step_avg:67.51ms +[2025-07-07 08:47:09] [Rank 0] step:3321/10000 train_time:224229ms step_avg:67.52ms +[2025-07-07 08:47:09] [Rank 0] step:3321/10000 train_time:224229ms step_avg:67.52ms +[2025-07-07 08:47:10] [Rank 0] step:3341/10000 train_time:225599ms step_avg:67.52ms +[2025-07-07 08:47:10] [Rank 0] step:3341/10000 train_time:225599ms step_avg:67.52ms +[2025-07-07 08:47:11] [Rank 0] step:3361/10000 train_time:226970ms step_avg:67.53ms +[2025-07-07 08:47:11] [Rank 0] step:3361/10000 train_time:226970ms step_avg:67.53ms +[2025-07-07 08:47:13] [Rank 0] step:3381/10000 train_time:228340ms step_avg:67.54ms +[2025-07-07 08:47:13] [Rank 0] step:3381/10000 train_time:228340ms step_avg:67.54ms +[2025-07-07 08:47:14] [Rank 0] step:3401/10000 train_time:229710ms step_avg:67.54ms +[2025-07-07 08:47:14] [Rank 0] step:3401/10000 train_time:229710ms step_avg:67.54ms +[2025-07-07 08:47:15] [Rank 0] step:3421/10000 train_time:231739ms step_avg:67.74ms +[2025-07-07 08:47:15] [Rank 0] step:3421/10000 train_time:231739ms step_avg:67.74ms +[2025-07-07 08:47:17] [Rank 0] step:3441/10000 train_time:232477ms step_avg:67.56ms +[2025-07-07 08:47:17] [Rank 0] step:3441/10000 train_time:232477ms step_avg:67.56ms +[2025-07-07 08:47:18] [Rank 0] step:3461/10000 train_time:233847ms step_avg:67.57ms +[2025-07-07 08:47:18] [Rank 0] step:3461/10000 train_time:233847ms step_avg:67.57ms +[2025-07-07 08:47:19] [Rank 0] step:3481/10000 train_time:235217ms step_avg:67.57ms +[2025-07-07 08:47:19] [Rank 0] step:3481/10000 train_time:235217ms step_avg:67.57ms +[2025-07-07 08:47:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:47:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:47:22] [Rank 0] PRINT: step:3500/10000 train_loss:1.2136 val_loss:1.2083 train_time:237210ms step_avg:67.77ms +[2025-07-07 08:47:22] [Rank 0] PRINT: step:3500/10000 train_loss:1.2136 val_loss:1.2083 train_time:237210ms step_avg:67.77ms +[2025-07-07 08:47:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:47:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:47:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:47:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:47:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:47:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:52:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:52:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:52:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:52:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:52:44] [Rank 0] Total Loss: 5.0178 +[2025-07-07 08:52:44] [Rank 0] Total Loss: 5.0178 +[2025-07-07 08:52:44] [Rank 0] Total FTA: 0.3137 +[2025-07-07 08:52:44] [Rank 0] Total FTA: 0.3137 +[2025-07-07 08:52:44] [Rank 0] Group 0 Loss: 5.3292 +[2025-07-07 08:52:44] [Rank 0] Group 0 Loss: 5.3292 +[2025-07-07 08:52:44] [Rank 0] Group 1 Loss: 4.7538 +[2025-07-07 08:52:44] [Rank 0] Group 1 Loss: 4.7538 +[2025-07-07 08:52:44] [Rank 0] Group 2 Loss: 4.6625 +[2025-07-07 08:52:44] [Rank 0] Group 2 Loss: 4.6625 +[2025-07-07 08:52:44] [Rank 0] Group 3 Loss: 5.0847 +[2025-07-07 08:52:44] [Rank 0] Group 3 Loss: 5.0847 +[2025-07-07 08:52:44] [Rank 0] Group 4 Loss: 4.9748 +[2025-07-07 08:52:44] [Rank 0] Group 4 Loss: 4.9748 +[2025-07-07 08:52:44] [Rank 0] Group 5 Loss: 5.0542 +[2025-07-07 08:52:44] [Rank 0] Group 5 Loss: 5.0542 +[2025-07-07 08:52:44] [Rank 0] Group 6 Loss: 4.9068 +[2025-07-07 08:52:44] [Rank 0] Group 6 Loss: 4.9068 +[2025-07-07 08:52:44] [Rank 0] Group 7 Loss: 5.0688 +[2025-07-07 08:52:44] [Rank 0] Group 7 Loss: 5.0688 +[2025-07-07 08:52:44] [Rank 0] Group 8 Loss: 5.0667 +[2025-07-07 08:52:44] [Rank 0] Group 8 Loss: 5.0667 +[2025-07-07 08:52:44] [Rank 0] Group 9 Loss: 4.9717 +[2025-07-07 08:52:44] [Rank 0] Group 9 Loss: 4.9717 +[2025-07-07 08:52:44] [Rank 0] Group 10 Loss: 5.0141 +[2025-07-07 08:52:44] [Rank 0] Group 10 Loss: 5.0141 +[2025-07-07 08:52:44] [Rank 0] Group 11 Loss: 5.0113 +[2025-07-07 08:52:44] [Rank 0] Group 11 Loss: 5.0113 +[2025-07-07 08:52:44] [Rank 0] Group 0 FTA: 0.5098 +[2025-07-07 08:52:44] [Rank 0] Group 0 FTA: 0.5098 +[2025-07-07 08:52:44] [Rank 0] Group 1 FTA: 0.2031 +[2025-07-07 08:52:44] [Rank 0] Group 1 FTA: 0.2031 +[2025-07-07 08:52:44] [Rank 0] Group 2 FTA: 0.3984 +[2025-07-07 08:52:44] [Rank 0] Group 2 FTA: 0.3984 +[2025-07-07 08:52:44] [Rank 0] Group 3 FTA: 0.3516 +[2025-07-07 08:52:44] [Rank 0] Group 3 FTA: 0.3516 +[2025-07-07 08:52:44] [Rank 0] Group 4 FTA: 0.2057 +[2025-07-07 08:52:44] [Rank 0] Group 4 FTA: 0.2057 +[2025-07-07 08:52:44] [Rank 0] Group 5 FTA: 0.3073 +[2025-07-07 08:52:44] [Rank 0] Group 5 FTA: 0.3073 +[2025-07-07 08:52:44] [Rank 0] Group 6 FTA: 0.2708 +[2025-07-07 08:52:44] [Rank 0] Group 6 FTA: 0.2708 +[2025-07-07 08:52:44] [Rank 0] Group 7 FTA: 0.2917 +[2025-07-07 08:52:44] [Rank 0] Group 7 FTA: 0.2917 +[2025-07-07 08:52:44] [Rank 0] Group 8 FTA: 0.2734 +[2025-07-07 08:52:44] [Rank 0] Group 8 FTA: 0.2734 +[2025-07-07 08:52:44] [Rank 0] Group 9 FTA: 0.2617 +[2025-07-07 08:52:44] [Rank 0] Group 9 FTA: 0.2617 +[2025-07-07 08:52:44] [Rank 0] Group 10 FTA: 0.2852 +[2025-07-07 08:52:44] [Rank 0] Group 10 FTA: 0.2852 +[2025-07-07 08:52:44] [Rank 0] Group 11 FTA: 0.2715 +[2025-07-07 08:52:44] [Rank 0] Group 11 FTA: 0.2715 +[2025-07-07 08:52:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 08:52:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 08:52:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 08:52:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 08:52:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 08:52:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 08:52:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 08:52:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 08:52:46] [Rank 0] step:3501/10000 train_time:237220ms step_avg:67.76ms +[2025-07-07 08:52:46] [Rank 0] step:3501/10000 train_time:237220ms step_avg:67.76ms +[2025-07-07 08:52:47] [Rank 0] step:3521/10000 train_time:237980ms step_avg:67.59ms +[2025-07-07 08:52:47] [Rank 0] step:3521/10000 train_time:237980ms step_avg:67.59ms +[2025-07-07 08:52:48] [Rank 0] step:3541/10000 train_time:239344ms step_avg:67.59ms +[2025-07-07 08:52:48] [Rank 0] step:3541/10000 train_time:239344ms step_avg:67.59ms +[2025-07-07 08:52:50] [Rank 0] step:3561/10000 train_time:240709ms step_avg:67.60ms +[2025-07-07 08:52:50] [Rank 0] step:3561/10000 train_time:240709ms step_avg:67.60ms +[2025-07-07 08:52:51] [Rank 0] step:3581/10000 train_time:242075ms step_avg:67.60ms +[2025-07-07 08:52:51] [Rank 0] step:3581/10000 train_time:242075ms step_avg:67.60ms +[2025-07-07 08:52:52] [Rank 0] step:3601/10000 train_time:243441ms step_avg:67.60ms +[2025-07-07 08:52:52] [Rank 0] step:3601/10000 train_time:243441ms step_avg:67.60ms +[2025-07-07 08:52:54] [Rank 0] step:3621/10000 train_time:244808ms step_avg:67.61ms +[2025-07-07 08:52:54] [Rank 0] step:3621/10000 train_time:244808ms step_avg:67.61ms +[2025-07-07 08:52:55] [Rank 0] step:3641/10000 train_time:246247ms step_avg:67.63ms +[2025-07-07 08:52:55] [Rank 0] step:3641/10000 train_time:246247ms step_avg:67.63ms +[2025-07-07 08:52:57] [Rank 0] step:3661/10000 train_time:247614ms step_avg:67.64ms +[2025-07-07 08:52:57] [Rank 0] step:3661/10000 train_time:247614ms step_avg:67.64ms +[2025-07-07 08:52:58] [Rank 0] step:3681/10000 train_time:248981ms step_avg:67.64ms +[2025-07-07 08:52:58] [Rank 0] step:3681/10000 train_time:248981ms step_avg:67.64ms +[2025-07-07 08:52:59] [Rank 0] step:3701/10000 train_time:250350ms step_avg:67.64ms +[2025-07-07 08:52:59] [Rank 0] step:3701/10000 train_time:250350ms step_avg:67.64ms +[2025-07-07 08:53:01] [Rank 0] step:3721/10000 train_time:251720ms step_avg:67.65ms +[2025-07-07 08:53:01] [Rank 0] step:3721/10000 train_time:251720ms step_avg:67.65ms +[2025-07-07 08:53:02] [Rank 0] step:3741/10000 train_time:253089ms step_avg:67.65ms +[2025-07-07 08:53:02] [Rank 0] step:3741/10000 train_time:253089ms step_avg:67.65ms +[2025-07-07 08:53:03] [Rank 0] step:3761/10000 train_time:254459ms step_avg:67.66ms +[2025-07-07 08:53:03] [Rank 0] step:3761/10000 train_time:254459ms step_avg:67.66ms +[2025-07-07 08:53:05] [Rank 0] step:3781/10000 train_time:256493ms step_avg:67.84ms +[2025-07-07 08:53:05] [Rank 0] step:3781/10000 train_time:256493ms step_avg:67.84ms +[2025-07-07 08:53:06] [Rank 0] step:3801/10000 train_time:257230ms step_avg:67.67ms +[2025-07-07 08:53:06] [Rank 0] step:3801/10000 train_time:257230ms step_avg:67.67ms +[2025-07-07 08:53:08] [Rank 0] step:3821/10000 train_time:258600ms step_avg:67.68ms +[2025-07-07 08:53:08] [Rank 0] step:3821/10000 train_time:258600ms step_avg:67.68ms +[2025-07-07 08:53:09] [Rank 0] step:3841/10000 train_time:259970ms step_avg:67.68ms +[2025-07-07 08:53:09] [Rank 0] step:3841/10000 train_time:259970ms step_avg:67.68ms +[2025-07-07 08:53:10] [Rank 0] step:3861/10000 train_time:261342ms step_avg:67.69ms +[2025-07-07 08:53:10] [Rank 0] step:3861/10000 train_time:261342ms step_avg:67.69ms +[2025-07-07 08:53:12] [Rank 0] step:3881/10000 train_time:262714ms step_avg:67.69ms +[2025-07-07 08:53:12] [Rank 0] step:3881/10000 train_time:262714ms step_avg:67.69ms +[2025-07-07 08:53:13] [Rank 0] step:3901/10000 train_time:264087ms step_avg:67.70ms +[2025-07-07 08:53:13] [Rank 0] step:3901/10000 train_time:264087ms step_avg:67.70ms +[2025-07-07 08:53:14] [Rank 0] step:3921/10000 train_time:265459ms step_avg:67.70ms +[2025-07-07 08:53:14] [Rank 0] step:3921/10000 train_time:265459ms step_avg:67.70ms +[2025-07-07 08:53:16] [Rank 0] step:3941/10000 train_time:266831ms step_avg:67.71ms +[2025-07-07 08:53:16] [Rank 0] step:3941/10000 train_time:266831ms step_avg:67.71ms +[2025-07-07 08:53:17] [Rank 0] step:3961/10000 train_time:268202ms step_avg:67.71ms +[2025-07-07 08:53:17] [Rank 0] step:3961/10000 train_time:268202ms step_avg:67.71ms +[2025-07-07 08:53:19] [Rank 0] step:3981/10000 train_time:269574ms step_avg:67.72ms +[2025-07-07 08:53:19] [Rank 0] step:3981/10000 train_time:269574ms step_avg:67.72ms +[2025-07-07 08:53:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:53:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:53:21] [Rank 0] PRINT: step:4000/10000 train_loss:1.1668 val_loss:1.1722 train_time:271571ms step_avg:67.89ms +[2025-07-07 08:53:21] [Rank 0] PRINT: step:4000/10000 train_loss:1.1668 val_loss:1.1722 train_time:271571ms step_avg:67.89ms +[2025-07-07 08:53:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:53:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:53:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:53:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:53:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:53:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:58:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:58:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:58:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:58:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:58:43] [Rank 0] Total Loss: 5.2083 +[2025-07-07 08:58:43] [Rank 0] Total Loss: 5.2083 +[2025-07-07 08:58:43] [Rank 0] Total FTA: 0.3519 +[2025-07-07 08:58:43] [Rank 0] Total FTA: 0.3519 +[2025-07-07 08:58:43] [Rank 0] Group 0 Loss: 5.5211 +[2025-07-07 08:58:43] [Rank 0] Group 0 Loss: 5.5211 +[2025-07-07 08:58:43] [Rank 0] Group 1 Loss: 5.0270 +[2025-07-07 08:58:43] [Rank 0] Group 1 Loss: 5.0270 +[2025-07-07 08:58:43] [Rank 0] Group 2 Loss: 4.8001 +[2025-07-07 08:58:43] [Rank 0] Group 2 Loss: 4.8001 +[2025-07-07 08:58:43] [Rank 0] Group 3 Loss: 5.2765 +[2025-07-07 08:58:43] [Rank 0] Group 3 Loss: 5.2765 +[2025-07-07 08:58:43] [Rank 0] Group 4 Loss: 5.1918 +[2025-07-07 08:58:43] [Rank 0] Group 4 Loss: 5.1918 +[2025-07-07 08:58:43] [Rank 0] Group 5 Loss: 5.1886 +[2025-07-07 08:58:43] [Rank 0] Group 5 Loss: 5.1886 +[2025-07-07 08:58:43] [Rank 0] Group 6 Loss: 5.0612 +[2025-07-07 08:58:43] [Rank 0] Group 6 Loss: 5.0612 +[2025-07-07 08:58:43] [Rank 0] Group 7 Loss: 5.2633 +[2025-07-07 08:58:43] [Rank 0] Group 7 Loss: 5.2633 +[2025-07-07 08:58:43] [Rank 0] Group 8 Loss: 5.2176 +[2025-07-07 08:58:43] [Rank 0] Group 8 Loss: 5.2176 +[2025-07-07 08:58:43] [Rank 0] Group 9 Loss: 5.1863 +[2025-07-07 08:58:43] [Rank 0] Group 9 Loss: 5.1863 +[2025-07-07 08:58:43] [Rank 0] Group 10 Loss: 5.2395 +[2025-07-07 08:58:43] [Rank 0] Group 10 Loss: 5.2395 +[2025-07-07 08:58:43] [Rank 0] Group 11 Loss: 5.2035 +[2025-07-07 08:58:43] [Rank 0] Group 11 Loss: 5.2035 +[2025-07-07 08:58:43] [Rank 0] Group 0 FTA: 0.3173 +[2025-07-07 08:58:43] [Rank 0] Group 0 FTA: 0.3173 +[2025-07-07 08:58:43] [Rank 0] Group 1 FTA: 0.3411 +[2025-07-07 08:58:43] [Rank 0] Group 1 FTA: 0.3411 +[2025-07-07 08:58:43] [Rank 0] Group 2 FTA: 0.2500 +[2025-07-07 08:58:43] [Rank 0] Group 2 FTA: 0.2500 +[2025-07-07 08:58:43] [Rank 0] Group 3 FTA: 0.4740 +[2025-07-07 08:58:43] [Rank 0] Group 3 FTA: 0.4740 +[2025-07-07 08:58:43] [Rank 0] Group 4 FTA: 0.2917 +[2025-07-07 08:58:43] [Rank 0] Group 4 FTA: 0.2917 +[2025-07-07 08:58:43] [Rank 0] Group 5 FTA: 0.4010 +[2025-07-07 08:58:43] [Rank 0] Group 5 FTA: 0.4010 +[2025-07-07 08:58:43] [Rank 0] Group 6 FTA: 0.3958 +[2025-07-07 08:58:43] [Rank 0] Group 6 FTA: 0.3958 +[2025-07-07 08:58:43] [Rank 0] Group 7 FTA: 0.3438 +[2025-07-07 08:58:43] [Rank 0] Group 7 FTA: 0.3438 +[2025-07-07 08:58:43] [Rank 0] Group 8 FTA: 0.3750 +[2025-07-07 08:58:43] [Rank 0] Group 8 FTA: 0.3750 +[2025-07-07 08:58:43] [Rank 0] Group 9 FTA: 0.3555 +[2025-07-07 08:58:43] [Rank 0] Group 9 FTA: 0.3555 +[2025-07-07 08:58:43] [Rank 0] Group 10 FTA: 0.3301 +[2025-07-07 08:58:43] [Rank 0] Group 10 FTA: 0.3301 +[2025-07-07 08:58:43] [Rank 0] Group 11 FTA: 0.3662 +[2025-07-07 08:58:43] [Rank 0] Group 11 FTA: 0.3662 +[2025-07-07 08:58:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 08:58:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 08:58:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 08:58:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 08:58:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 08:58:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 08:58:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 08:58:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 08:58:45] [Rank 0] step:4001/10000 train_time:271580ms step_avg:67.88ms +[2025-07-07 08:58:45] [Rank 0] step:4001/10000 train_time:271580ms step_avg:67.88ms +[2025-07-07 08:58:46] [Rank 0] step:4021/10000 train_time:272350ms step_avg:67.73ms +[2025-07-07 08:58:46] [Rank 0] step:4021/10000 train_time:272350ms step_avg:67.73ms +[2025-07-07 08:58:48] [Rank 0] step:4041/10000 train_time:273713ms step_avg:67.73ms +[2025-07-07 08:58:48] [Rank 0] step:4041/10000 train_time:273713ms step_avg:67.73ms +[2025-07-07 08:58:49] [Rank 0] step:4061/10000 train_time:275078ms step_avg:67.74ms +[2025-07-07 08:58:49] [Rank 0] step:4061/10000 train_time:275078ms step_avg:67.74ms +[2025-07-07 08:58:50] [Rank 0] step:4081/10000 train_time:276443ms step_avg:67.74ms +[2025-07-07 08:58:50] [Rank 0] step:4081/10000 train_time:276443ms step_avg:67.74ms +[2025-07-07 08:58:52] [Rank 0] step:4101/10000 train_time:277810ms step_avg:67.74ms +[2025-07-07 08:58:52] [Rank 0] step:4101/10000 train_time:277810ms step_avg:67.74ms +[2025-07-07 08:58:53] [Rank 0] step:4121/10000 train_time:279177ms step_avg:67.74ms +[2025-07-07 08:58:53] [Rank 0] step:4121/10000 train_time:279177ms step_avg:67.74ms +[2025-07-07 08:58:54] [Rank 0] step:4141/10000 train_time:281205ms step_avg:67.91ms +[2025-07-07 08:58:54] [Rank 0] step:4141/10000 train_time:281205ms step_avg:67.91ms +[2025-07-07 08:58:56] [Rank 0] step:4161/10000 train_time:281942ms step_avg:67.76ms +[2025-07-07 08:58:56] [Rank 0] step:4161/10000 train_time:281942ms step_avg:67.76ms +[2025-07-07 08:58:57] [Rank 0] step:4181/10000 train_time:283311ms step_avg:67.76ms +[2025-07-07 08:58:57] [Rank 0] step:4181/10000 train_time:283311ms step_avg:67.76ms +[2025-07-07 08:58:59] [Rank 0] step:4201/10000 train_time:284678ms step_avg:67.76ms +[2025-07-07 08:58:59] [Rank 0] step:4201/10000 train_time:284678ms step_avg:67.76ms +[2025-07-07 08:59:00] [Rank 0] step:4221/10000 train_time:286048ms step_avg:67.77ms +[2025-07-07 08:59:00] [Rank 0] step:4221/10000 train_time:286048ms step_avg:67.77ms +[2025-07-07 08:59:01] [Rank 0] step:4241/10000 train_time:287417ms step_avg:67.77ms +[2025-07-07 08:59:01] [Rank 0] step:4241/10000 train_time:287417ms step_avg:67.77ms +[2025-07-07 08:59:03] [Rank 0] step:4261/10000 train_time:288787ms step_avg:67.77ms +[2025-07-07 08:59:03] [Rank 0] step:4261/10000 train_time:288787ms step_avg:67.77ms +[2025-07-07 08:59:04] [Rank 0] step:4281/10000 train_time:290156ms step_avg:67.78ms +[2025-07-07 08:59:04] [Rank 0] step:4281/10000 train_time:290156ms step_avg:67.78ms +[2025-07-07 08:59:05] [Rank 0] step:4301/10000 train_time:291527ms step_avg:67.78ms +[2025-07-07 08:59:05] [Rank 0] step:4301/10000 train_time:291527ms step_avg:67.78ms +[2025-07-07 08:59:07] [Rank 0] step:4321/10000 train_time:292898ms step_avg:67.78ms +[2025-07-07 08:59:07] [Rank 0] step:4321/10000 train_time:292898ms step_avg:67.78ms +[2025-07-07 08:59:08] [Rank 0] step:4341/10000 train_time:294296ms step_avg:67.79ms +[2025-07-07 08:59:08] [Rank 0] step:4341/10000 train_time:294296ms step_avg:67.79ms +[2025-07-07 08:59:10] [Rank 0] step:4361/10000 train_time:295666ms step_avg:67.80ms +[2025-07-07 08:59:10] [Rank 0] step:4361/10000 train_time:295666ms step_avg:67.80ms +[2025-07-07 08:59:11] [Rank 0] step:4381/10000 train_time:297038ms step_avg:67.80ms +[2025-07-07 08:59:11] [Rank 0] step:4381/10000 train_time:297038ms step_avg:67.80ms +[2025-07-07 08:59:12] [Rank 0] step:4401/10000 train_time:298444ms step_avg:67.81ms +[2025-07-07 08:59:12] [Rank 0] step:4401/10000 train_time:298444ms step_avg:67.81ms +[2025-07-07 08:59:14] [Rank 0] step:4421/10000 train_time:299815ms step_avg:67.82ms +[2025-07-07 08:59:14] [Rank 0] step:4421/10000 train_time:299815ms step_avg:67.82ms +[2025-07-07 08:59:15] [Rank 0] step:4441/10000 train_time:301186ms step_avg:67.82ms +[2025-07-07 08:59:15] [Rank 0] step:4441/10000 train_time:301186ms step_avg:67.82ms +[2025-07-07 08:59:16] [Rank 0] step:4461/10000 train_time:302557ms step_avg:67.82ms +[2025-07-07 08:59:16] [Rank 0] step:4461/10000 train_time:302557ms step_avg:67.82ms +[2025-07-07 08:59:18] [Rank 0] step:4481/10000 train_time:303927ms step_avg:67.83ms +[2025-07-07 08:59:18] [Rank 0] step:4481/10000 train_time:303927ms step_avg:67.83ms +[2025-07-07 08:59:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:59:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:59:20] [Rank 0] PRINT: step:4500/10000 train_loss:1.1241 val_loss:1.1373 train_time:305921ms step_avg:67.98ms +[2025-07-07 08:59:20] [Rank 0] PRINT: step:4500/10000 train_loss:1.1241 val_loss:1.1373 train_time:305921ms step_avg:67.98ms +[2025-07-07 08:59:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:59:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:59:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:59:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:59:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:59:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:04:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:04:41] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:04:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:04:41] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:04:41] [Rank 0] Total Loss: 5.2085 +[2025-07-07 09:04:41] [Rank 0] Total Loss: 5.2085 +[2025-07-07 09:04:41] [Rank 0] Total FTA: 0.4511 +[2025-07-07 09:04:41] [Rank 0] Total FTA: 0.4511 +[2025-07-07 09:04:41] [Rank 0] Group 0 Loss: 5.5311 +[2025-07-07 09:04:41] [Rank 0] Group 0 Loss: 5.5311 +[2025-07-07 09:04:41] [Rank 0] Group 1 Loss: 4.8936 +[2025-07-07 09:04:41] [Rank 0] Group 1 Loss: 4.8936 +[2025-07-07 09:04:41] [Rank 0] Group 2 Loss: 4.8684 +[2025-07-07 09:04:41] [Rank 0] Group 2 Loss: 4.8684 +[2025-07-07 09:04:41] [Rank 0] Group 3 Loss: 5.3456 +[2025-07-07 09:04:41] [Rank 0] Group 3 Loss: 5.3456 +[2025-07-07 09:04:41] [Rank 0] Group 4 Loss: 5.1554 +[2025-07-07 09:04:41] [Rank 0] Group 4 Loss: 5.1554 +[2025-07-07 09:04:41] [Rank 0] Group 5 Loss: 5.1632 +[2025-07-07 09:04:41] [Rank 0] Group 5 Loss: 5.1632 +[2025-07-07 09:04:41] [Rank 0] Group 6 Loss: 5.1094 +[2025-07-07 09:04:41] [Rank 0] Group 6 Loss: 5.1094 +[2025-07-07 09:04:41] [Rank 0] Group 7 Loss: 5.2390 +[2025-07-07 09:04:41] [Rank 0] Group 7 Loss: 5.2390 +[2025-07-07 09:04:41] [Rank 0] Group 8 Loss: 5.2167 +[2025-07-07 09:04:41] [Rank 0] Group 8 Loss: 5.2167 +[2025-07-07 09:04:41] [Rank 0] Group 9 Loss: 5.2049 +[2025-07-07 09:04:41] [Rank 0] Group 9 Loss: 5.2049 +[2025-07-07 09:04:41] [Rank 0] Group 10 Loss: 5.1972 +[2025-07-07 09:04:41] [Rank 0] Group 10 Loss: 5.1972 +[2025-07-07 09:04:41] [Rank 0] Group 11 Loss: 5.2265 +[2025-07-07 09:04:41] [Rank 0] Group 11 Loss: 5.2265 +[2025-07-07 09:04:41] [Rank 0] Group 0 FTA: 0.6710 +[2025-07-07 09:04:41] [Rank 0] Group 0 FTA: 0.6710 +[2025-07-07 09:04:41] [Rank 0] Group 1 FTA: 0.3724 +[2025-07-07 09:04:41] [Rank 0] Group 1 FTA: 0.3724 +[2025-07-07 09:04:41] [Rank 0] Group 2 FTA: 0.3984 +[2025-07-07 09:04:41] [Rank 0] Group 2 FTA: 0.3984 +[2025-07-07 09:04:41] [Rank 0] Group 3 FTA: 0.4453 +[2025-07-07 09:04:41] [Rank 0] Group 3 FTA: 0.4453 +[2025-07-07 09:04:41] [Rank 0] Group 4 FTA: 0.3281 +[2025-07-07 09:04:41] [Rank 0] Group 4 FTA: 0.3281 +[2025-07-07 09:04:41] [Rank 0] Group 5 FTA: 0.4635 +[2025-07-07 09:04:41] [Rank 0] Group 5 FTA: 0.4635 +[2025-07-07 09:04:41] [Rank 0] Group 6 FTA: 0.3906 +[2025-07-07 09:04:41] [Rank 0] Group 6 FTA: 0.3906 +[2025-07-07 09:04:41] [Rank 0] Group 7 FTA: 0.4479 +[2025-07-07 09:04:41] [Rank 0] Group 7 FTA: 0.4479 +[2025-07-07 09:04:41] [Rank 0] Group 8 FTA: 0.4583 +[2025-07-07 09:04:41] [Rank 0] Group 8 FTA: 0.4583 +[2025-07-07 09:04:41] [Rank 0] Group 9 FTA: 0.4414 +[2025-07-07 09:04:41] [Rank 0] Group 9 FTA: 0.4414 +[2025-07-07 09:04:41] [Rank 0] Group 10 FTA: 0.4258 +[2025-07-07 09:04:41] [Rank 0] Group 10 FTA: 0.4258 +[2025-07-07 09:04:41] [Rank 0] Group 11 FTA: 0.4150 +[2025-07-07 09:04:41] [Rank 0] Group 11 FTA: 0.4150 +[2025-07-07 09:04:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 09:04:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 09:04:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 09:04:42] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 09:04:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 09:04:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 09:04:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 09:04:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 09:04:44] [Rank 0] step:4501/10000 train_time:305938ms step_avg:67.97ms +[2025-07-07 09:04:44] [Rank 0] step:4501/10000 train_time:305938ms step_avg:67.97ms +[2025-07-07 09:04:45] [Rank 0] step:4521/10000 train_time:307382ms step_avg:67.99ms +[2025-07-07 09:04:45] [Rank 0] step:4521/10000 train_time:307382ms step_avg:67.99ms +[2025-07-07 09:04:46] [Rank 0] step:4541/10000 train_time:308746ms step_avg:67.99ms +[2025-07-07 09:04:46] [Rank 0] step:4541/10000 train_time:308746ms step_avg:67.99ms +[2025-07-07 09:04:48] [Rank 0] step:4561/10000 train_time:310112ms step_avg:67.99ms +[2025-07-07 09:04:48] [Rank 0] step:4561/10000 train_time:310112ms step_avg:67.99ms +[2025-07-07 09:04:49] [Rank 0] step:4581/10000 train_time:311476ms step_avg:67.99ms +[2025-07-07 09:04:49] [Rank 0] step:4581/10000 train_time:311476ms step_avg:67.99ms +[2025-07-07 09:04:50] [Rank 0] step:4601/10000 train_time:312842ms step_avg:67.99ms +[2025-07-07 09:04:50] [Rank 0] step:4601/10000 train_time:312842ms step_avg:67.99ms +[2025-07-07 09:04:52] [Rank 0] step:4621/10000 train_time:314210ms step_avg:68.00ms +[2025-07-07 09:04:52] [Rank 0] step:4621/10000 train_time:314210ms step_avg:68.00ms +[2025-07-07 09:04:53] [Rank 0] step:4641/10000 train_time:315577ms step_avg:68.00ms +[2025-07-07 09:04:53] [Rank 0] step:4641/10000 train_time:315577ms step_avg:68.00ms +[2025-07-07 09:04:55] [Rank 0] step:4661/10000 train_time:316946ms step_avg:68.00ms +[2025-07-07 09:04:55] [Rank 0] step:4661/10000 train_time:316946ms step_avg:68.00ms +[2025-07-07 09:04:56] [Rank 0] step:4681/10000 train_time:318998ms step_avg:68.15ms +[2025-07-07 09:04:56] [Rank 0] step:4681/10000 train_time:318998ms step_avg:68.15ms +[2025-07-07 09:04:57] [Rank 0] step:4701/10000 train_time:319736ms step_avg:68.01ms +[2025-07-07 09:04:57] [Rank 0] step:4701/10000 train_time:319736ms step_avg:68.01ms +[2025-07-07 09:04:59] [Rank 0] step:4721/10000 train_time:321105ms step_avg:68.02ms +[2025-07-07 09:04:59] [Rank 0] step:4721/10000 train_time:321105ms step_avg:68.02ms +[2025-07-07 09:05:00] [Rank 0] step:4741/10000 train_time:322473ms step_avg:68.02ms +[2025-07-07 09:05:00] [Rank 0] step:4741/10000 train_time:322473ms step_avg:68.02ms +[2025-07-07 09:05:01] [Rank 0] step:4761/10000 train_time:323843ms step_avg:68.02ms +[2025-07-07 09:05:01] [Rank 0] step:4761/10000 train_time:323843ms step_avg:68.02ms +[2025-07-07 09:05:03] [Rank 0] step:4781/10000 train_time:325212ms step_avg:68.02ms +[2025-07-07 09:05:03] [Rank 0] step:4781/10000 train_time:325212ms step_avg:68.02ms +[2025-07-07 09:05:04] [Rank 0] step:4801/10000 train_time:326582ms step_avg:68.02ms +[2025-07-07 09:05:04] [Rank 0] step:4801/10000 train_time:326582ms step_avg:68.02ms +[2025-07-07 09:05:06] [Rank 0] step:4821/10000 train_time:327951ms step_avg:68.03ms +[2025-07-07 09:05:06] [Rank 0] step:4821/10000 train_time:327951ms step_avg:68.03ms +[2025-07-07 09:05:07] [Rank 0] step:4841/10000 train_time:329321ms step_avg:68.03ms +[2025-07-07 09:05:07] [Rank 0] step:4841/10000 train_time:329321ms step_avg:68.03ms +[2025-07-07 09:05:08] [Rank 0] step:4861/10000 train_time:331350ms step_avg:68.16ms +[2025-07-07 09:05:08] [Rank 0] step:4861/10000 train_time:331350ms step_avg:68.16ms +[2025-07-07 09:05:10] [Rank 0] step:4881/10000 train_time:332088ms step_avg:68.04ms +[2025-07-07 09:05:10] [Rank 0] step:4881/10000 train_time:332088ms step_avg:68.04ms +[2025-07-07 09:05:11] [Rank 0] step:4901/10000 train_time:333459ms step_avg:68.04ms +[2025-07-07 09:05:11] [Rank 0] step:4901/10000 train_time:333459ms step_avg:68.04ms +[2025-07-07 09:05:12] [Rank 0] step:4921/10000 train_time:334830ms step_avg:68.04ms +[2025-07-07 09:05:12] [Rank 0] step:4921/10000 train_time:334830ms step_avg:68.04ms +[2025-07-07 09:05:14] [Rank 0] step:4941/10000 train_time:336202ms step_avg:68.04ms +[2025-07-07 09:05:14] [Rank 0] step:4941/10000 train_time:336202ms step_avg:68.04ms +[2025-07-07 09:05:15] [Rank 0] step:4961/10000 train_time:337573ms step_avg:68.05ms +[2025-07-07 09:05:15] [Rank 0] step:4961/10000 train_time:337573ms step_avg:68.05ms +[2025-07-07 09:05:17] [Rank 0] step:4981/10000 train_time:338944ms step_avg:68.05ms +[2025-07-07 09:05:17] [Rank 0] step:4981/10000 train_time:338944ms step_avg:68.05ms +[2025-07-07 09:05:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:05:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:05:19] [Rank 0] PRINT: step:5000/10000 train_loss:1.0853 val_loss:1.1031 train_time:340938ms step_avg:68.19ms +[2025-07-07 09:05:19] [Rank 0] PRINT: step:5000/10000 train_loss:1.0853 val_loss:1.1031 train_time:340938ms step_avg:68.19ms +[2025-07-07 09:05:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:05:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:05:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:05:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:05:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:05:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:10:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:10:39] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:10:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:10:39] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:10:39] [Rank 0] Total Loss: 5.1293 +[2025-07-07 09:10:39] [Rank 0] Total Loss: 5.1293 +[2025-07-07 09:10:39] [Rank 0] Total FTA: 0.4575 +[2025-07-07 09:10:39] [Rank 0] Total FTA: 0.4575 +[2025-07-07 09:10:39] [Rank 0] Group 0 Loss: 5.3412 +[2025-07-07 09:10:39] [Rank 0] Group 0 Loss: 5.3412 +[2025-07-07 09:10:39] [Rank 0] Group 1 Loss: 5.0180 +[2025-07-07 09:10:39] [Rank 0] Group 1 Loss: 5.0180 +[2025-07-07 09:10:39] [Rank 0] Group 2 Loss: 4.7644 +[2025-07-07 09:10:39] [Rank 0] Group 2 Loss: 4.7644 +[2025-07-07 09:10:39] [Rank 0] Group 3 Loss: 5.1751 +[2025-07-07 09:10:39] [Rank 0] Group 3 Loss: 5.1751 +[2025-07-07 09:10:39] [Rank 0] Group 4 Loss: 5.0936 +[2025-07-07 09:10:39] [Rank 0] Group 4 Loss: 5.0936 +[2025-07-07 09:10:39] [Rank 0] Group 5 Loss: 5.0976 +[2025-07-07 09:10:39] [Rank 0] Group 5 Loss: 5.0976 +[2025-07-07 09:10:39] [Rank 0] Group 6 Loss: 5.0500 +[2025-07-07 09:10:39] [Rank 0] Group 6 Loss: 5.0500 +[2025-07-07 09:10:39] [Rank 0] Group 7 Loss: 5.1571 +[2025-07-07 09:10:39] [Rank 0] Group 7 Loss: 5.1571 +[2025-07-07 09:10:39] [Rank 0] Group 8 Loss: 5.1469 +[2025-07-07 09:10:39] [Rank 0] Group 8 Loss: 5.1469 +[2025-07-07 09:10:39] [Rank 0] Group 9 Loss: 5.1469 +[2025-07-07 09:10:39] [Rank 0] Group 9 Loss: 5.1469 +[2025-07-07 09:10:39] [Rank 0] Group 10 Loss: 5.1794 +[2025-07-07 09:10:39] [Rank 0] Group 10 Loss: 5.1794 +[2025-07-07 09:10:39] [Rank 0] Group 11 Loss: 5.1402 +[2025-07-07 09:10:39] [Rank 0] Group 11 Loss: 5.1402 +[2025-07-07 09:10:39] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 09:10:39] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 09:10:39] [Rank 0] Group 1 FTA: 0.3047 +[2025-07-07 09:10:39] [Rank 0] Group 1 FTA: 0.3047 +[2025-07-07 09:10:39] [Rank 0] Group 2 FTA: 0.3568 +[2025-07-07 09:10:39] [Rank 0] Group 2 FTA: 0.3568 +[2025-07-07 09:10:39] [Rank 0] Group 3 FTA: 0.6719 +[2025-07-07 09:10:39] [Rank 0] Group 3 FTA: 0.6719 +[2025-07-07 09:10:39] [Rank 0] Group 4 FTA: 0.5521 +[2025-07-07 09:10:39] [Rank 0] Group 4 FTA: 0.5521 +[2025-07-07 09:10:39] [Rank 0] Group 5 FTA: 0.5026 +[2025-07-07 09:10:39] [Rank 0] Group 5 FTA: 0.5026 +[2025-07-07 09:10:39] [Rank 0] Group 6 FTA: 0.5391 +[2025-07-07 09:10:39] [Rank 0] Group 6 FTA: 0.5391 +[2025-07-07 09:10:39] [Rank 0] Group 7 FTA: 0.5547 +[2025-07-07 09:10:39] [Rank 0] Group 7 FTA: 0.5547 +[2025-07-07 09:10:39] [Rank 0] Group 8 FTA: 0.4948 +[2025-07-07 09:10:39] [Rank 0] Group 8 FTA: 0.4948 +[2025-07-07 09:10:39] [Rank 0] Group 9 FTA: 0.5234 +[2025-07-07 09:10:39] [Rank 0] Group 9 FTA: 0.5234 +[2025-07-07 09:10:39] [Rank 0] Group 10 FTA: 0.5195 +[2025-07-07 09:10:39] [Rank 0] Group 10 FTA: 0.5195 +[2025-07-07 09:10:39] [Rank 0] Group 11 FTA: 0.5107 +[2025-07-07 09:10:39] [Rank 0] Group 11 FTA: 0.5107 +[2025-07-07 09:10:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 09:10:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 09:10:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 09:10:40] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 09:10:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 09:10:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 09:10:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 09:10:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 09:10:40] [Rank 0] step:5001/10000 train_time:340949ms step_avg:68.18ms +[2025-07-07 09:10:40] [Rank 0] step:5001/10000 train_time:340949ms step_avg:68.18ms +[2025-07-07 09:10:42] [Rank 0] step:5021/10000 train_time:341706ms step_avg:68.06ms +[2025-07-07 09:10:42] [Rank 0] step:5021/10000 train_time:341706ms step_avg:68.06ms +[2025-07-07 09:10:43] [Rank 0] step:5041/10000 train_time:343120ms step_avg:68.07ms +[2025-07-07 09:10:43] [Rank 0] step:5041/10000 train_time:343120ms step_avg:68.07ms +[2025-07-07 09:10:44] [Rank 0] step:5061/10000 train_time:344469ms step_avg:68.06ms +[2025-07-07 09:10:44] [Rank 0] step:5061/10000 train_time:344469ms step_avg:68.06ms +[2025-07-07 09:10:46] [Rank 0] step:5081/10000 train_time:345836ms step_avg:68.06ms +[2025-07-07 09:10:46] [Rank 0] step:5081/10000 train_time:345836ms step_avg:68.06ms +[2025-07-07 09:10:47] [Rank 0] step:5101/10000 train_time:347202ms step_avg:68.07ms +[2025-07-07 09:10:47] [Rank 0] step:5101/10000 train_time:347202ms step_avg:68.07ms +[2025-07-07 09:10:48] [Rank 0] step:5121/10000 train_time:348570ms step_avg:68.07ms +[2025-07-07 09:10:48] [Rank 0] step:5121/10000 train_time:348570ms step_avg:68.07ms +[2025-07-07 09:10:50] [Rank 0] step:5141/10000 train_time:349937ms step_avg:68.07ms +[2025-07-07 09:10:50] [Rank 0] step:5141/10000 train_time:349937ms step_avg:68.07ms +[2025-07-07 09:10:51] [Rank 0] step:5161/10000 train_time:351306ms step_avg:68.07ms +[2025-07-07 09:10:51] [Rank 0] step:5161/10000 train_time:351306ms step_avg:68.07ms +[2025-07-07 09:10:53] [Rank 0] step:5181/10000 train_time:352675ms step_avg:68.07ms +[2025-07-07 09:10:53] [Rank 0] step:5181/10000 train_time:352675ms step_avg:68.07ms +[2025-07-07 09:10:54] [Rank 0] step:5201/10000 train_time:354044ms step_avg:68.07ms +[2025-07-07 09:10:54] [Rank 0] step:5201/10000 train_time:354044ms step_avg:68.07ms +[2025-07-07 09:10:55] [Rank 0] step:5221/10000 train_time:355461ms step_avg:68.08ms +[2025-07-07 09:10:55] [Rank 0] step:5221/10000 train_time:355461ms step_avg:68.08ms +[2025-07-07 09:10:57] [Rank 0] step:5241/10000 train_time:356822ms step_avg:68.08ms +[2025-07-07 09:10:57] [Rank 0] step:5241/10000 train_time:356822ms step_avg:68.08ms +[2025-07-07 09:10:58] [Rank 0] step:5261/10000 train_time:358191ms step_avg:68.08ms +[2025-07-07 09:10:58] [Rank 0] step:5261/10000 train_time:358191ms step_avg:68.08ms +[2025-07-07 09:10:59] [Rank 0] step:5281/10000 train_time:359559ms step_avg:68.09ms +[2025-07-07 09:10:59] [Rank 0] step:5281/10000 train_time:359559ms step_avg:68.09ms +[2025-07-07 09:11:01] [Rank 0] step:5301/10000 train_time:360931ms step_avg:68.09ms +[2025-07-07 09:11:01] [Rank 0] step:5301/10000 train_time:360931ms step_avg:68.09ms +[2025-07-07 09:11:02] [Rank 0] step:5321/10000 train_time:362302ms step_avg:68.09ms +[2025-07-07 09:11:02] [Rank 0] step:5321/10000 train_time:362302ms step_avg:68.09ms +[2025-07-07 09:11:04] [Rank 0] step:5341/10000 train_time:363673ms step_avg:68.09ms +[2025-07-07 09:11:04] [Rank 0] step:5341/10000 train_time:363673ms step_avg:68.09ms +[2025-07-07 09:11:05] [Rank 0] step:5361/10000 train_time:365044ms step_avg:68.09ms +[2025-07-07 09:11:05] [Rank 0] step:5361/10000 train_time:365044ms step_avg:68.09ms +[2025-07-07 09:11:06] [Rank 0] step:5381/10000 train_time:366415ms step_avg:68.09ms +[2025-07-07 09:11:06] [Rank 0] step:5381/10000 train_time:366415ms step_avg:68.09ms +[2025-07-07 09:11:08] [Rank 0] step:5401/10000 train_time:368036ms step_avg:68.14ms +[2025-07-07 09:11:08] [Rank 0] step:5401/10000 train_time:368036ms step_avg:68.14ms +[2025-07-07 09:11:09] [Rank 0] step:5421/10000 train_time:369207ms step_avg:68.11ms +[2025-07-07 09:11:09] [Rank 0] step:5421/10000 train_time:369207ms step_avg:68.11ms +[2025-07-07 09:11:11] [Rank 0] step:5441/10000 train_time:370578ms step_avg:68.11ms +[2025-07-07 09:11:11] [Rank 0] step:5441/10000 train_time:370578ms step_avg:68.11ms +[2025-07-07 09:11:12] [Rank 0] step:5461/10000 train_time:371949ms step_avg:68.11ms +[2025-07-07 09:11:12] [Rank 0] step:5461/10000 train_time:371949ms step_avg:68.11ms +[2025-07-07 09:11:13] [Rank 0] step:5481/10000 train_time:373320ms step_avg:68.11ms +[2025-07-07 09:11:13] [Rank 0] step:5481/10000 train_time:373320ms step_avg:68.11ms +[2025-07-07 09:11:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:11:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:11:16] [Rank 0] PRINT: step:5500/10000 train_loss:1.0439 val_loss:1.0844 train_time:375315ms step_avg:68.24ms +[2025-07-07 09:11:16] [Rank 0] PRINT: step:5500/10000 train_loss:1.0439 val_loss:1.0844 train_time:375315ms step_avg:68.24ms +[2025-07-07 09:11:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:11:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:11:16] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:11:16] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:11:16] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:11:16] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:16:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:16:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:16:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:16:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:16:35] [Rank 0] Total Loss: 5.2262 +[2025-07-07 09:16:35] [Rank 0] Total Loss: 5.2262 +[2025-07-07 09:16:35] [Rank 0] Total FTA: 0.5846 +[2025-07-07 09:16:35] [Rank 0] Total FTA: 0.5846 +[2025-07-07 09:16:35] [Rank 0] Group 0 Loss: 5.5381 +[2025-07-07 09:16:35] [Rank 0] Group 0 Loss: 5.5381 +[2025-07-07 09:16:35] [Rank 0] Group 1 Loss: 4.8753 +[2025-07-07 09:16:35] [Rank 0] Group 1 Loss: 4.8753 +[2025-07-07 09:16:35] [Rank 0] Group 2 Loss: 4.8969 +[2025-07-07 09:16:35] [Rank 0] Group 2 Loss: 4.8969 +[2025-07-07 09:16:35] [Rank 0] Group 3 Loss: 5.3304 +[2025-07-07 09:16:35] [Rank 0] Group 3 Loss: 5.3304 +[2025-07-07 09:16:35] [Rank 0] Group 4 Loss: 5.2288 +[2025-07-07 09:16:35] [Rank 0] Group 4 Loss: 5.2288 +[2025-07-07 09:16:35] [Rank 0] Group 5 Loss: 5.1782 +[2025-07-07 09:16:35] [Rank 0] Group 5 Loss: 5.1782 +[2025-07-07 09:16:35] [Rank 0] Group 6 Loss: 5.1273 +[2025-07-07 09:16:35] [Rank 0] Group 6 Loss: 5.1273 +[2025-07-07 09:16:35] [Rank 0] Group 7 Loss: 5.2831 +[2025-07-07 09:16:35] [Rank 0] Group 7 Loss: 5.2831 +[2025-07-07 09:16:35] [Rank 0] Group 8 Loss: 5.2650 +[2025-07-07 09:16:35] [Rank 0] Group 8 Loss: 5.2650 +[2025-07-07 09:16:35] [Rank 0] Group 9 Loss: 5.1873 +[2025-07-07 09:16:35] [Rank 0] Group 9 Loss: 5.1873 +[2025-07-07 09:16:35] [Rank 0] Group 10 Loss: 5.1958 +[2025-07-07 09:16:35] [Rank 0] Group 10 Loss: 5.1958 +[2025-07-07 09:16:35] [Rank 0] Group 11 Loss: 5.2508 +[2025-07-07 09:16:35] [Rank 0] Group 11 Loss: 5.2508 +[2025-07-07 09:16:35] [Rank 0] Group 0 FTA: 0.4889 +[2025-07-07 09:16:35] [Rank 0] Group 0 FTA: 0.4889 +[2025-07-07 09:16:35] [Rank 0] Group 1 FTA: 0.7005 +[2025-07-07 09:16:35] [Rank 0] Group 1 FTA: 0.7005 +[2025-07-07 09:16:35] [Rank 0] Group 2 FTA: 0.5000 +[2025-07-07 09:16:35] [Rank 0] Group 2 FTA: 0.5000 +[2025-07-07 09:16:35] [Rank 0] Group 3 FTA: 0.6042 +[2025-07-07 09:16:35] [Rank 0] Group 3 FTA: 0.6042 +[2025-07-07 09:16:35] [Rank 0] Group 4 FTA: 0.6224 +[2025-07-07 09:16:35] [Rank 0] Group 4 FTA: 0.6224 +[2025-07-07 09:16:35] [Rank 0] Group 5 FTA: 0.6589 +[2025-07-07 09:16:35] [Rank 0] Group 5 FTA: 0.6589 +[2025-07-07 09:16:35] [Rank 0] Group 6 FTA: 0.5443 +[2025-07-07 09:16:35] [Rank 0] Group 6 FTA: 0.5443 +[2025-07-07 09:16:35] [Rank 0] Group 7 FTA: 0.6615 +[2025-07-07 09:16:35] [Rank 0] Group 7 FTA: 0.6615 +[2025-07-07 09:16:35] [Rank 0] Group 8 FTA: 0.5703 +[2025-07-07 09:16:35] [Rank 0] Group 8 FTA: 0.5703 +[2025-07-07 09:16:35] [Rank 0] Group 9 FTA: 0.5664 +[2025-07-07 09:16:35] [Rank 0] Group 9 FTA: 0.5664 +[2025-07-07 09:16:35] [Rank 0] Group 10 FTA: 0.5781 +[2025-07-07 09:16:35] [Rank 0] Group 10 FTA: 0.5781 +[2025-07-07 09:16:35] [Rank 0] Group 11 FTA: 0.5947 +[2025-07-07 09:16:35] [Rank 0] Group 11 FTA: 0.5947 +[2025-07-07 09:16:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 09:16:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 09:16:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 09:16:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 09:16:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 09:16:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 09:16:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 09:16:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 09:16:36] [Rank 0] step:5501/10000 train_time:375325ms step_avg:68.23ms +[2025-07-07 09:16:36] [Rank 0] step:5501/10000 train_time:375325ms step_avg:68.23ms +[2025-07-07 09:16:38] [Rank 0] step:5521/10000 train_time:376080ms step_avg:68.12ms +[2025-07-07 09:16:38] [Rank 0] step:5521/10000 train_time:376080ms step_avg:68.12ms +[2025-07-07 09:16:39] [Rank 0] step:5541/10000 train_time:377443ms step_avg:68.12ms +[2025-07-07 09:16:39] [Rank 0] step:5541/10000 train_time:377443ms step_avg:68.12ms +[2025-07-07 09:16:40] [Rank 0] step:5561/10000 train_time:378810ms step_avg:68.12ms +[2025-07-07 09:16:40] [Rank 0] step:5561/10000 train_time:378810ms step_avg:68.12ms +[2025-07-07 09:16:42] [Rank 0] step:5581/10000 train_time:380853ms step_avg:68.24ms +[2025-07-07 09:16:42] [Rank 0] step:5581/10000 train_time:380853ms step_avg:68.24ms +[2025-07-07 09:16:43] [Rank 0] step:5601/10000 train_time:381588ms step_avg:68.13ms +[2025-07-07 09:16:43] [Rank 0] step:5601/10000 train_time:381588ms step_avg:68.13ms +[2025-07-07 09:16:44] [Rank 0] step:5621/10000 train_time:382955ms step_avg:68.13ms +[2025-07-07 09:16:44] [Rank 0] step:5621/10000 train_time:382955ms step_avg:68.13ms +[2025-07-07 09:16:46] [Rank 0] step:5641/10000 train_time:384322ms step_avg:68.13ms +[2025-07-07 09:16:46] [Rank 0] step:5641/10000 train_time:384322ms step_avg:68.13ms +[2025-07-07 09:16:47] [Rank 0] step:5661/10000 train_time:385689ms step_avg:68.13ms +[2025-07-07 09:16:47] [Rank 0] step:5661/10000 train_time:385689ms step_avg:68.13ms +[2025-07-07 09:16:49] [Rank 0] step:5681/10000 train_time:387058ms step_avg:68.13ms +[2025-07-07 09:16:49] [Rank 0] step:5681/10000 train_time:387058ms step_avg:68.13ms +[2025-07-07 09:16:50] [Rank 0] step:5701/10000 train_time:388429ms step_avg:68.13ms +[2025-07-07 09:16:50] [Rank 0] step:5701/10000 train_time:388429ms step_avg:68.13ms +[2025-07-07 09:16:51] [Rank 0] step:5721/10000 train_time:389798ms step_avg:68.13ms +[2025-07-07 09:16:51] [Rank 0] step:5721/10000 train_time:389798ms step_avg:68.13ms +[2025-07-07 09:16:53] [Rank 0] step:5741/10000 train_time:391167ms step_avg:68.14ms +[2025-07-07 09:16:53] [Rank 0] step:5741/10000 train_time:391167ms step_avg:68.14ms +[2025-07-07 09:16:54] [Rank 0] step:5761/10000 train_time:392582ms step_avg:68.14ms +[2025-07-07 09:16:54] [Rank 0] step:5761/10000 train_time:392582ms step_avg:68.14ms +[2025-07-07 09:16:55] [Rank 0] step:5781/10000 train_time:393906ms step_avg:68.14ms +[2025-07-07 09:16:55] [Rank 0] step:5781/10000 train_time:393906ms step_avg:68.14ms +[2025-07-07 09:16:57] [Rank 0] step:5801/10000 train_time:395277ms step_avg:68.14ms +[2025-07-07 09:16:57] [Rank 0] step:5801/10000 train_time:395277ms step_avg:68.14ms +[2025-07-07 09:16:58] [Rank 0] step:5821/10000 train_time:396647ms step_avg:68.14ms +[2025-07-07 09:16:58] [Rank 0] step:5821/10000 train_time:396647ms step_avg:68.14ms +[2025-07-07 09:17:00] [Rank 0] step:5841/10000 train_time:398017ms step_avg:68.14ms +[2025-07-07 09:17:00] [Rank 0] step:5841/10000 train_time:398017ms step_avg:68.14ms +[2025-07-07 09:17:01] [Rank 0] step:5861/10000 train_time:399389ms step_avg:68.14ms +[2025-07-07 09:17:01] [Rank 0] step:5861/10000 train_time:399389ms step_avg:68.14ms +[2025-07-07 09:17:02] [Rank 0] step:5881/10000 train_time:400761ms step_avg:68.14ms +[2025-07-07 09:17:02] [Rank 0] step:5881/10000 train_time:400761ms step_avg:68.14ms +[2025-07-07 09:17:04] [Rank 0] step:5901/10000 train_time:402133ms step_avg:68.15ms +[2025-07-07 09:17:04] [Rank 0] step:5901/10000 train_time:402133ms step_avg:68.15ms +[2025-07-07 09:17:05] [Rank 0] step:5921/10000 train_time:403505ms step_avg:68.15ms +[2025-07-07 09:17:05] [Rank 0] step:5921/10000 train_time:403505ms step_avg:68.15ms +[2025-07-07 09:17:06] [Rank 0] step:5941/10000 train_time:404923ms step_avg:68.16ms +[2025-07-07 09:17:06] [Rank 0] step:5941/10000 train_time:404923ms step_avg:68.16ms +[2025-07-07 09:17:08] [Rank 0] step:5961/10000 train_time:406250ms step_avg:68.15ms +[2025-07-07 09:17:08] [Rank 0] step:5961/10000 train_time:406250ms step_avg:68.15ms +[2025-07-07 09:17:09] [Rank 0] step:5981/10000 train_time:407622ms step_avg:68.15ms +[2025-07-07 09:17:09] [Rank 0] step:5981/10000 train_time:407622ms step_avg:68.15ms +[2025-07-07 09:17:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:17:10] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:17:11] [Rank 0] PRINT: step:6000/10000 train_loss:1.0064 val_loss:1.0617 train_time:409618ms step_avg:68.27ms +[2025-07-07 09:17:11] [Rank 0] PRINT: step:6000/10000 train_loss:1.0064 val_loss:1.0617 train_time:409618ms step_avg:68.27ms +[2025-07-07 09:17:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:17:11] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:17:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:17:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:17:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:17:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:22:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:22:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:22:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:22:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:22:31] [Rank 0] Total Loss: 5.3936 +[2025-07-07 09:22:31] [Rank 0] Total Loss: 5.3936 +[2025-07-07 09:22:31] [Rank 0] Total FTA: 0.6446 +[2025-07-07 09:22:31] [Rank 0] Total FTA: 0.6446 +[2025-07-07 09:22:31] [Rank 0] Group 0 Loss: 5.7199 +[2025-07-07 09:22:31] [Rank 0] Group 0 Loss: 5.7199 +[2025-07-07 09:22:31] [Rank 0] Group 1 Loss: 5.1109 +[2025-07-07 09:22:31] [Rank 0] Group 1 Loss: 5.1109 +[2025-07-07 09:22:31] [Rank 0] Group 2 Loss: 5.1069 +[2025-07-07 09:22:31] [Rank 0] Group 2 Loss: 5.1069 +[2025-07-07 09:22:31] [Rank 0] Group 3 Loss: 5.3922 +[2025-07-07 09:22:31] [Rank 0] Group 3 Loss: 5.3922 +[2025-07-07 09:22:32] [Rank 0] Group 4 Loss: 5.3248 +[2025-07-07 09:22:32] [Rank 0] Group 4 Loss: 5.3248 +[2025-07-07 09:22:32] [Rank 0] Group 5 Loss: 5.3572 +[2025-07-07 09:22:32] [Rank 0] Group 5 Loss: 5.3572 +[2025-07-07 09:22:32] [Rank 0] Group 6 Loss: 5.2609 +[2025-07-07 09:22:32] [Rank 0] Group 6 Loss: 5.2609 +[2025-07-07 09:22:32] [Rank 0] Group 7 Loss: 5.4335 +[2025-07-07 09:22:32] [Rank 0] Group 7 Loss: 5.4335 +[2025-07-07 09:22:32] [Rank 0] Group 8 Loss: 5.4279 +[2025-07-07 09:22:32] [Rank 0] Group 8 Loss: 5.4279 +[2025-07-07 09:22:32] [Rank 0] Group 9 Loss: 5.3649 +[2025-07-07 09:22:32] [Rank 0] Group 9 Loss: 5.3649 +[2025-07-07 09:22:32] [Rank 0] Group 10 Loss: 5.3892 +[2025-07-07 09:22:32] [Rank 0] Group 10 Loss: 5.3892 +[2025-07-07 09:22:32] [Rank 0] Group 11 Loss: 5.4333 +[2025-07-07 09:22:32] [Rank 0] Group 11 Loss: 5.4333 +[2025-07-07 09:22:32] [Rank 0] Group 0 FTA: 0.4954 +[2025-07-07 09:22:32] [Rank 0] Group 0 FTA: 0.4954 +[2025-07-07 09:22:32] [Rank 0] Group 1 FTA: 0.6354 +[2025-07-07 09:22:32] [Rank 0] Group 1 FTA: 0.6354 +[2025-07-07 09:22:32] [Rank 0] Group 2 FTA: 0.6823 +[2025-07-07 09:22:32] [Rank 0] Group 2 FTA: 0.6823 +[2025-07-07 09:22:32] [Rank 0] Group 3 FTA: 0.7057 +[2025-07-07 09:22:32] [Rank 0] Group 3 FTA: 0.7057 +[2025-07-07 09:22:32] [Rank 0] Group 4 FTA: 0.7396 +[2025-07-07 09:22:32] [Rank 0] Group 4 FTA: 0.7396 +[2025-07-07 09:22:32] [Rank 0] Group 5 FTA: 0.6354 +[2025-07-07 09:22:32] [Rank 0] Group 5 FTA: 0.6354 +[2025-07-07 09:22:32] [Rank 0] Group 6 FTA: 0.6354 +[2025-07-07 09:22:32] [Rank 0] Group 6 FTA: 0.6354 +[2025-07-07 09:22:32] [Rank 0] Group 7 FTA: 0.6719 +[2025-07-07 09:22:32] [Rank 0] Group 7 FTA: 0.6719 +[2025-07-07 09:22:32] [Rank 0] Group 8 FTA: 0.6615 +[2025-07-07 09:22:32] [Rank 0] Group 8 FTA: 0.6615 +[2025-07-07 09:22:32] [Rank 0] Group 9 FTA: 0.6641 +[2025-07-07 09:22:32] [Rank 0] Group 9 FTA: 0.6641 +[2025-07-07 09:22:32] [Rank 0] Group 10 FTA: 0.6348 +[2025-07-07 09:22:32] [Rank 0] Group 10 FTA: 0.6348 +[2025-07-07 09:22:32] [Rank 0] Group 11 FTA: 0.6777 +[2025-07-07 09:22:32] [Rank 0] Group 11 FTA: 0.6777 +[2025-07-07 09:22:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 09:22:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 09:22:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 09:22:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 09:22:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 09:22:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 09:22:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 09:22:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 09:22:34] [Rank 0] step:6001/10000 train_time:409628ms step_avg:68.26ms +[2025-07-07 09:22:34] [Rank 0] step:6001/10000 train_time:409628ms step_avg:68.26ms +[2025-07-07 09:22:36] [Rank 0] step:6021/10000 train_time:410384ms step_avg:68.16ms +[2025-07-07 09:22:36] [Rank 0] step:6021/10000 train_time:410384ms step_avg:68.16ms +[2025-07-07 09:22:37] [Rank 0] step:6041/10000 train_time:411749ms step_avg:68.16ms +[2025-07-07 09:22:37] [Rank 0] step:6041/10000 train_time:411749ms step_avg:68.16ms +[2025-07-07 09:22:38] [Rank 0] step:6061/10000 train_time:413115ms step_avg:68.16ms +[2025-07-07 09:22:38] [Rank 0] step:6061/10000 train_time:413115ms step_avg:68.16ms +[2025-07-07 09:22:40] [Rank 0] step:6081/10000 train_time:414481ms step_avg:68.16ms +[2025-07-07 09:22:40] [Rank 0] step:6081/10000 train_time:414481ms step_avg:68.16ms +[2025-07-07 09:22:41] [Rank 0] step:6101/10000 train_time:415849ms step_avg:68.16ms +[2025-07-07 09:22:41] [Rank 0] step:6101/10000 train_time:415849ms step_avg:68.16ms +[2025-07-07 09:22:42] [Rank 0] step:6121/10000 train_time:417261ms step_avg:68.17ms +[2025-07-07 09:22:42] [Rank 0] step:6121/10000 train_time:417261ms step_avg:68.17ms +[2025-07-07 09:22:44] [Rank 0] step:6141/10000 train_time:418584ms step_avg:68.16ms +[2025-07-07 09:22:44] [Rank 0] step:6141/10000 train_time:418584ms step_avg:68.16ms +[2025-07-07 09:22:45] [Rank 0] step:6161/10000 train_time:419958ms step_avg:68.16ms +[2025-07-07 09:22:45] [Rank 0] step:6161/10000 train_time:419958ms step_avg:68.16ms +[2025-07-07 09:22:47] [Rank 0] step:6181/10000 train_time:421329ms step_avg:68.17ms +[2025-07-07 09:22:47] [Rank 0] step:6181/10000 train_time:421329ms step_avg:68.17ms +[2025-07-07 09:22:48] [Rank 0] step:6201/10000 train_time:422698ms step_avg:68.17ms +[2025-07-07 09:22:48] [Rank 0] step:6201/10000 train_time:422698ms step_avg:68.17ms +[2025-07-07 09:22:49] [Rank 0] step:6221/10000 train_time:424067ms step_avg:68.17ms +[2025-07-07 09:22:49] [Rank 0] step:6221/10000 train_time:424067ms step_avg:68.17ms +[2025-07-07 09:22:51] [Rank 0] step:6241/10000 train_time:425436ms step_avg:68.17ms +[2025-07-07 09:22:51] [Rank 0] step:6241/10000 train_time:425436ms step_avg:68.17ms +[2025-07-07 09:22:52] [Rank 0] step:6261/10000 train_time:426806ms step_avg:68.17ms +[2025-07-07 09:22:52] [Rank 0] step:6261/10000 train_time:426806ms step_avg:68.17ms +[2025-07-07 09:22:53] [Rank 0] step:6281/10000 train_time:428174ms step_avg:68.17ms +[2025-07-07 09:22:53] [Rank 0] step:6281/10000 train_time:428174ms step_avg:68.17ms +[2025-07-07 09:22:55] [Rank 0] step:6301/10000 train_time:429545ms step_avg:68.17ms +[2025-07-07 09:22:55] [Rank 0] step:6301/10000 train_time:429545ms step_avg:68.17ms +[2025-07-07 09:22:56] [Rank 0] step:6321/10000 train_time:430915ms step_avg:68.17ms +[2025-07-07 09:22:56] [Rank 0] step:6321/10000 train_time:430915ms step_avg:68.17ms +[2025-07-07 09:22:57] [Rank 0] step:6341/10000 train_time:432287ms step_avg:68.17ms +[2025-07-07 09:22:57] [Rank 0] step:6341/10000 train_time:432287ms step_avg:68.17ms +[2025-07-07 09:22:59] [Rank 0] step:6361/10000 train_time:433657ms step_avg:68.17ms +[2025-07-07 09:22:59] [Rank 0] step:6361/10000 train_time:433657ms step_avg:68.17ms +[2025-07-07 09:23:00] [Rank 0] step:6381/10000 train_time:435029ms step_avg:68.18ms +[2025-07-07 09:23:00] [Rank 0] step:6381/10000 train_time:435029ms step_avg:68.18ms +[2025-07-07 09:23:02] [Rank 0] step:6401/10000 train_time:436400ms step_avg:68.18ms +[2025-07-07 09:23:02] [Rank 0] step:6401/10000 train_time:436400ms step_avg:68.18ms +[2025-07-07 09:23:03] [Rank 0] step:6421/10000 train_time:437771ms step_avg:68.18ms +[2025-07-07 09:23:03] [Rank 0] step:6421/10000 train_time:437771ms step_avg:68.18ms +[2025-07-07 09:23:04] [Rank 0] step:6441/10000 train_time:439141ms step_avg:68.18ms +[2025-07-07 09:23:04] [Rank 0] step:6441/10000 train_time:439141ms step_avg:68.18ms +[2025-07-07 09:23:06] [Rank 0] step:6461/10000 train_time:440512ms step_avg:68.18ms +[2025-07-07 09:23:06] [Rank 0] step:6461/10000 train_time:440512ms step_avg:68.18ms +[2025-07-07 09:23:07] [Rank 0] step:6481/10000 train_time:442558ms step_avg:68.29ms +[2025-07-07 09:23:07] [Rank 0] step:6481/10000 train_time:442558ms step_avg:68.29ms +[2025-07-07 09:23:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:23:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:23:09] [Rank 0] PRINT: step:6500/10000 train_loss:0.9751 val_loss:1.0454 train_time:443920ms step_avg:68.30ms +[2025-07-07 09:23:09] [Rank 0] PRINT: step:6500/10000 train_loss:0.9751 val_loss:1.0454 train_time:443920ms step_avg:68.30ms +[2025-07-07 09:23:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:23:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:23:10] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:23:10] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:23:10] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:23:10] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:28:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:28:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:28:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:28:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:28:29] [Rank 0] Total Loss: 5.4932 +[2025-07-07 09:28:29] [Rank 0] Total Loss: 5.4932 +[2025-07-07 09:28:29] [Rank 0] Total FTA: 0.7463 +[2025-07-07 09:28:29] [Rank 0] Total FTA: 0.7463 +[2025-07-07 09:28:29] [Rank 0] Group 0 Loss: 5.8901 +[2025-07-07 09:28:29] [Rank 0] Group 0 Loss: 5.8901 +[2025-07-07 09:28:29] [Rank 0] Group 1 Loss: 5.1594 +[2025-07-07 09:28:29] [Rank 0] Group 1 Loss: 5.1594 +[2025-07-07 09:28:29] [Rank 0] Group 2 Loss: 5.1357 +[2025-07-07 09:28:29] [Rank 0] Group 2 Loss: 5.1357 +[2025-07-07 09:28:29] [Rank 0] Group 3 Loss: 5.6551 +[2025-07-07 09:28:29] [Rank 0] Group 3 Loss: 5.6551 +[2025-07-07 09:28:29] [Rank 0] Group 4 Loss: 5.4156 +[2025-07-07 09:28:29] [Rank 0] Group 4 Loss: 5.4156 +[2025-07-07 09:28:29] [Rank 0] Group 5 Loss: 5.4718 +[2025-07-07 09:28:29] [Rank 0] Group 5 Loss: 5.4718 +[2025-07-07 09:28:29] [Rank 0] Group 6 Loss: 5.3713 +[2025-07-07 09:28:29] [Rank 0] Group 6 Loss: 5.3713 +[2025-07-07 09:28:29] [Rank 0] Group 7 Loss: 5.5384 +[2025-07-07 09:28:29] [Rank 0] Group 7 Loss: 5.5384 +[2025-07-07 09:28:29] [Rank 0] Group 8 Loss: 5.4551 +[2025-07-07 09:28:29] [Rank 0] Group 8 Loss: 5.4551 +[2025-07-07 09:28:29] [Rank 0] Group 9 Loss: 5.4543 +[2025-07-07 09:28:29] [Rank 0] Group 9 Loss: 5.4543 +[2025-07-07 09:28:29] [Rank 0] Group 10 Loss: 5.4894 +[2025-07-07 09:28:29] [Rank 0] Group 10 Loss: 5.4894 +[2025-07-07 09:28:29] [Rank 0] Group 11 Loss: 5.4857 +[2025-07-07 09:28:29] [Rank 0] Group 11 Loss: 5.4857 +[2025-07-07 09:28:29] [Rank 0] Group 0 FTA: 0.8401 +[2025-07-07 09:28:29] [Rank 0] Group 0 FTA: 0.8401 +[2025-07-07 09:28:29] [Rank 0] Group 1 FTA: 0.6562 +[2025-07-07 09:28:29] [Rank 0] Group 1 FTA: 0.6562 +[2025-07-07 09:28:29] [Rank 0] Group 2 FTA: 0.9089 +[2025-07-07 09:28:29] [Rank 0] Group 2 FTA: 0.9089 +[2025-07-07 09:28:29] [Rank 0] Group 3 FTA: 0.6615 +[2025-07-07 09:28:29] [Rank 0] Group 3 FTA: 0.6615 +[2025-07-07 09:28:29] [Rank 0] Group 4 FTA: 0.7734 +[2025-07-07 09:28:29] [Rank 0] Group 4 FTA: 0.7734 +[2025-07-07 09:28:29] [Rank 0] Group 5 FTA: 0.7370 +[2025-07-07 09:28:29] [Rank 0] Group 5 FTA: 0.7370 +[2025-07-07 09:28:29] [Rank 0] Group 6 FTA: 0.7474 +[2025-07-07 09:28:29] [Rank 0] Group 6 FTA: 0.7474 +[2025-07-07 09:28:29] [Rank 0] Group 7 FTA: 0.7083 +[2025-07-07 09:28:29] [Rank 0] Group 7 FTA: 0.7083 +[2025-07-07 09:28:29] [Rank 0] Group 8 FTA: 0.6823 +[2025-07-07 09:28:29] [Rank 0] Group 8 FTA: 0.6823 +[2025-07-07 09:28:29] [Rank 0] Group 9 FTA: 0.6797 +[2025-07-07 09:28:29] [Rank 0] Group 9 FTA: 0.6797 +[2025-07-07 09:28:29] [Rank 0] Group 10 FTA: 0.7129 +[2025-07-07 09:28:29] [Rank 0] Group 10 FTA: 0.7129 +[2025-07-07 09:28:29] [Rank 0] Group 11 FTA: 0.7451 +[2025-07-07 09:28:29] [Rank 0] Group 11 FTA: 0.7451 +[2025-07-07 09:28:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 09:28:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 09:28:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 09:28:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 09:28:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 09:28:31] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 09:28:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 09:28:31] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 09:28:31] [Rank 0] step:6501/10000 train_time:443931ms step_avg:68.29ms +[2025-07-07 09:28:31] [Rank 0] step:6501/10000 train_time:443931ms step_avg:68.29ms +[2025-07-07 09:28:32] [Rank 0] step:6521/10000 train_time:444690ms step_avg:68.19ms +[2025-07-07 09:28:32] [Rank 0] step:6521/10000 train_time:444690ms step_avg:68.19ms +[2025-07-07 09:28:34] [Rank 0] step:6541/10000 train_time:446053ms step_avg:68.19ms +[2025-07-07 09:28:34] [Rank 0] step:6541/10000 train_time:446053ms step_avg:68.19ms +[2025-07-07 09:28:35] [Rank 0] step:6561/10000 train_time:447417ms step_avg:68.19ms +[2025-07-07 09:28:35] [Rank 0] step:6561/10000 train_time:447417ms step_avg:68.19ms +[2025-07-07 09:28:36] [Rank 0] step:6581/10000 train_time:448783ms step_avg:68.19ms +[2025-07-07 09:28:36] [Rank 0] step:6581/10000 train_time:448783ms step_avg:68.19ms +[2025-07-07 09:28:38] [Rank 0] step:6601/10000 train_time:450150ms step_avg:68.19ms +[2025-07-07 09:28:38] [Rank 0] step:6601/10000 train_time:450150ms step_avg:68.19ms +[2025-07-07 09:28:39] [Rank 0] step:6621/10000 train_time:451516ms step_avg:68.19ms +[2025-07-07 09:28:39] [Rank 0] step:6621/10000 train_time:451516ms step_avg:68.19ms +[2025-07-07 09:28:40] [Rank 0] step:6641/10000 train_time:452883ms step_avg:68.20ms +[2025-07-07 09:28:40] [Rank 0] step:6641/10000 train_time:452883ms step_avg:68.20ms +[2025-07-07 09:28:42] [Rank 0] step:6661/10000 train_time:454251ms step_avg:68.20ms +[2025-07-07 09:28:42] [Rank 0] step:6661/10000 train_time:454251ms step_avg:68.20ms +[2025-07-07 09:28:43] [Rank 0] step:6681/10000 train_time:455669ms step_avg:68.20ms +[2025-07-07 09:28:43] [Rank 0] step:6681/10000 train_time:455669ms step_avg:68.20ms +[2025-07-07 09:28:45] [Rank 0] step:6701/10000 train_time:457087ms step_avg:68.21ms +[2025-07-07 09:28:45] [Rank 0] step:6701/10000 train_time:457087ms step_avg:68.21ms +[2025-07-07 09:28:46] [Rank 0] step:6721/10000 train_time:458455ms step_avg:68.21ms +[2025-07-07 09:28:46] [Rank 0] step:6721/10000 train_time:458455ms step_avg:68.21ms +[2025-07-07 09:28:47] [Rank 0] step:6741/10000 train_time:459822ms step_avg:68.21ms +[2025-07-07 09:28:47] [Rank 0] step:6741/10000 train_time:459822ms step_avg:68.21ms +[2025-07-07 09:28:49] [Rank 0] step:6761/10000 train_time:461190ms step_avg:68.21ms +[2025-07-07 09:28:49] [Rank 0] step:6761/10000 train_time:461190ms step_avg:68.21ms +[2025-07-07 09:28:50] [Rank 0] step:6781/10000 train_time:462560ms step_avg:68.21ms +[2025-07-07 09:28:50] [Rank 0] step:6781/10000 train_time:462560ms step_avg:68.21ms +[2025-07-07 09:28:52] [Rank 0] step:6801/10000 train_time:463930ms step_avg:68.21ms +[2025-07-07 09:28:52] [Rank 0] step:6801/10000 train_time:463930ms step_avg:68.21ms +[2025-07-07 09:28:53] [Rank 0] step:6821/10000 train_time:465298ms step_avg:68.22ms +[2025-07-07 09:28:53] [Rank 0] step:6821/10000 train_time:465298ms step_avg:68.22ms +[2025-07-07 09:28:54] [Rank 0] step:6841/10000 train_time:467346ms step_avg:68.32ms +[2025-07-07 09:28:54] [Rank 0] step:6841/10000 train_time:467346ms step_avg:68.32ms +[2025-07-07 09:28:56] [Rank 0] step:6861/10000 train_time:468084ms step_avg:68.22ms +[2025-07-07 09:28:56] [Rank 0] step:6861/10000 train_time:468084ms step_avg:68.22ms +[2025-07-07 09:28:57] [Rank 0] step:6881/10000 train_time:469453ms step_avg:68.22ms +[2025-07-07 09:28:57] [Rank 0] step:6881/10000 train_time:469453ms step_avg:68.22ms +[2025-07-07 09:28:58] [Rank 0] step:6901/10000 train_time:470822ms step_avg:68.23ms +[2025-07-07 09:28:58] [Rank 0] step:6901/10000 train_time:470822ms step_avg:68.23ms +[2025-07-07 09:29:00] [Rank 0] step:6921/10000 train_time:472192ms step_avg:68.23ms +[2025-07-07 09:29:00] [Rank 0] step:6921/10000 train_time:472192ms step_avg:68.23ms +[2025-07-07 09:29:01] [Rank 0] step:6941/10000 train_time:473561ms step_avg:68.23ms +[2025-07-07 09:29:01] [Rank 0] step:6941/10000 train_time:473561ms step_avg:68.23ms +[2025-07-07 09:29:03] [Rank 0] step:6961/10000 train_time:474930ms step_avg:68.23ms +[2025-07-07 09:29:03] [Rank 0] step:6961/10000 train_time:474930ms step_avg:68.23ms +[2025-07-07 09:29:04] [Rank 0] step:6981/10000 train_time:476300ms step_avg:68.23ms +[2025-07-07 09:29:04] [Rank 0] step:6981/10000 train_time:476300ms step_avg:68.23ms +[2025-07-07 09:29:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:29:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:29:06] [Rank 0] PRINT: step:7000/10000 train_loss:0.9478 val_loss:1.0052 train_time:478293ms step_avg:68.33ms +[2025-07-07 09:29:06] [Rank 0] PRINT: step:7000/10000 train_loss:0.9478 val_loss:1.0052 train_time:478293ms step_avg:68.33ms +[2025-07-07 09:29:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:29:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:29:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:29:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:29:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:29:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:34:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:34:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:34:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:34:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:34:29] [Rank 0] Total Loss: 5.5003 +[2025-07-07 09:34:29] [Rank 0] Total Loss: 5.5003 +[2025-07-07 09:34:29] [Rank 0] Total FTA: 0.8353 +[2025-07-07 09:34:29] [Rank 0] Total FTA: 0.8353 +[2025-07-07 09:34:29] [Rank 0] Group 0 Loss: 5.9119 +[2025-07-07 09:34:29] [Rank 0] Group 0 Loss: 5.9119 +[2025-07-07 09:34:29] [Rank 0] Group 1 Loss: 5.2921 +[2025-07-07 09:34:29] [Rank 0] Group 1 Loss: 5.2921 +[2025-07-07 09:34:29] [Rank 0] Group 2 Loss: 5.2229 +[2025-07-07 09:34:29] [Rank 0] Group 2 Loss: 5.2229 +[2025-07-07 09:34:29] [Rank 0] Group 3 Loss: 5.5550 +[2025-07-07 09:34:29] [Rank 0] Group 3 Loss: 5.5550 +[2025-07-07 09:34:29] [Rank 0] Group 4 Loss: 5.4536 +[2025-07-07 09:34:29] [Rank 0] Group 4 Loss: 5.4536 +[2025-07-07 09:34:29] [Rank 0] Group 5 Loss: 5.3768 +[2025-07-07 09:34:29] [Rank 0] Group 5 Loss: 5.3768 +[2025-07-07 09:34:29] [Rank 0] Group 6 Loss: 5.3347 +[2025-07-07 09:34:29] [Rank 0] Group 6 Loss: 5.3347 +[2025-07-07 09:34:29] [Rank 0] Group 7 Loss: 5.5896 +[2025-07-07 09:34:29] [Rank 0] Group 7 Loss: 5.5896 +[2025-07-07 09:34:29] [Rank 0] Group 8 Loss: 5.4775 +[2025-07-07 09:34:29] [Rank 0] Group 8 Loss: 5.4775 +[2025-07-07 09:34:29] [Rank 0] Group 9 Loss: 5.4759 +[2025-07-07 09:34:29] [Rank 0] Group 9 Loss: 5.4759 +[2025-07-07 09:34:29] [Rank 0] Group 10 Loss: 5.4761 +[2025-07-07 09:34:29] [Rank 0] Group 10 Loss: 5.4761 +[2025-07-07 09:34:29] [Rank 0] Group 11 Loss: 5.4720 +[2025-07-07 09:34:29] [Rank 0] Group 11 Loss: 5.4720 +[2025-07-07 09:34:29] [Rank 0] Group 0 FTA: 0.8296 +[2025-07-07 09:34:29] [Rank 0] Group 0 FTA: 0.8296 +[2025-07-07 09:34:29] [Rank 0] Group 1 FTA: 0.6615 +[2025-07-07 09:34:29] [Rank 0] Group 1 FTA: 0.6615 +[2025-07-07 09:34:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 09:34:29] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 09:34:29] [Rank 0] Group 3 FTA: 0.9089 +[2025-07-07 09:34:29] [Rank 0] Group 3 FTA: 0.9089 +[2025-07-07 09:34:29] [Rank 0] Group 4 FTA: 0.7708 +[2025-07-07 09:34:29] [Rank 0] Group 4 FTA: 0.7708 +[2025-07-07 09:34:29] [Rank 0] Group 5 FTA: 0.8438 +[2025-07-07 09:34:29] [Rank 0] Group 5 FTA: 0.8438 +[2025-07-07 09:34:29] [Rank 0] Group 6 FTA: 0.8880 +[2025-07-07 09:34:29] [Rank 0] Group 6 FTA: 0.8880 +[2025-07-07 09:34:29] [Rank 0] Group 7 FTA: 0.8073 +[2025-07-07 09:34:29] [Rank 0] Group 7 FTA: 0.8073 +[2025-07-07 09:34:29] [Rank 0] Group 8 FTA: 0.8411 +[2025-07-07 09:34:29] [Rank 0] Group 8 FTA: 0.8411 +[2025-07-07 09:34:29] [Rank 0] Group 9 FTA: 0.8359 +[2025-07-07 09:34:29] [Rank 0] Group 9 FTA: 0.8359 +[2025-07-07 09:34:29] [Rank 0] Group 10 FTA: 0.8301 +[2025-07-07 09:34:29] [Rank 0] Group 10 FTA: 0.8301 +[2025-07-07 09:34:29] [Rank 0] Group 11 FTA: 0.8271 +[2025-07-07 09:34:29] [Rank 0] Group 11 FTA: 0.8271 +[2025-07-07 09:34:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 09:34:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 09:34:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 09:34:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 09:34:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 09:34:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 09:34:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 09:34:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 09:34:30] [Rank 0] step:7001/10000 train_time:478306ms step_avg:68.32ms +[2025-07-07 09:34:30] [Rank 0] step:7001/10000 train_time:478306ms step_avg:68.32ms +[2025-07-07 09:34:32] [Rank 0] step:7021/10000 train_time:479318ms step_avg:68.27ms +[2025-07-07 09:34:32] [Rank 0] step:7021/10000 train_time:479318ms step_avg:68.27ms +[2025-07-07 09:34:33] [Rank 0] step:7041/10000 train_time:480466ms step_avg:68.24ms +[2025-07-07 09:34:33] [Rank 0] step:7041/10000 train_time:480466ms step_avg:68.24ms +[2025-07-07 09:34:34] [Rank 0] step:7061/10000 train_time:481831ms step_avg:68.24ms +[2025-07-07 09:34:34] [Rank 0] step:7061/10000 train_time:481831ms step_avg:68.24ms +[2025-07-07 09:34:36] [Rank 0] step:7081/10000 train_time:483197ms step_avg:68.24ms +[2025-07-07 09:34:36] [Rank 0] step:7081/10000 train_time:483197ms step_avg:68.24ms +[2025-07-07 09:34:37] [Rank 0] step:7101/10000 train_time:484563ms step_avg:68.24ms +[2025-07-07 09:34:37] [Rank 0] step:7101/10000 train_time:484563ms step_avg:68.24ms +[2025-07-07 09:34:38] [Rank 0] step:7121/10000 train_time:485930ms step_avg:68.24ms +[2025-07-07 09:34:38] [Rank 0] step:7121/10000 train_time:485930ms step_avg:68.24ms +[2025-07-07 09:34:40] [Rank 0] step:7141/10000 train_time:487297ms step_avg:68.24ms +[2025-07-07 09:34:40] [Rank 0] step:7141/10000 train_time:487297ms step_avg:68.24ms +[2025-07-07 09:34:41] [Rank 0] step:7161/10000 train_time:488664ms step_avg:68.24ms +[2025-07-07 09:34:41] [Rank 0] step:7161/10000 train_time:488664ms step_avg:68.24ms +[2025-07-07 09:34:43] [Rank 0] step:7181/10000 train_time:490031ms step_avg:68.24ms +[2025-07-07 09:34:43] [Rank 0] step:7181/10000 train_time:490031ms step_avg:68.24ms +[2025-07-07 09:34:44] [Rank 0] step:7201/10000 train_time:492082ms step_avg:68.34ms +[2025-07-07 09:34:44] [Rank 0] step:7201/10000 train_time:492082ms step_avg:68.34ms +[2025-07-07 09:34:45] [Rank 0] step:7221/10000 train_time:492819ms step_avg:68.25ms +[2025-07-07 09:34:45] [Rank 0] step:7221/10000 train_time:492819ms step_avg:68.25ms +[2025-07-07 09:34:47] [Rank 0] step:7241/10000 train_time:494189ms step_avg:68.25ms +[2025-07-07 09:34:47] [Rank 0] step:7241/10000 train_time:494189ms step_avg:68.25ms +[2025-07-07 09:34:48] [Rank 0] step:7261/10000 train_time:495558ms step_avg:68.25ms +[2025-07-07 09:34:48] [Rank 0] step:7261/10000 train_time:495558ms step_avg:68.25ms +[2025-07-07 09:34:49] [Rank 0] step:7281/10000 train_time:496928ms step_avg:68.25ms +[2025-07-07 09:34:49] [Rank 0] step:7281/10000 train_time:496928ms step_avg:68.25ms +[2025-07-07 09:34:51] [Rank 0] step:7301/10000 train_time:498297ms step_avg:68.25ms +[2025-07-07 09:34:51] [Rank 0] step:7301/10000 train_time:498297ms step_avg:68.25ms +[2025-07-07 09:34:52] [Rank 0] step:7321/10000 train_time:499668ms step_avg:68.25ms +[2025-07-07 09:34:52] [Rank 0] step:7321/10000 train_time:499668ms step_avg:68.25ms +[2025-07-07 09:34:54] [Rank 0] step:7341/10000 train_time:501039ms step_avg:68.25ms +[2025-07-07 09:34:54] [Rank 0] step:7341/10000 train_time:501039ms step_avg:68.25ms +[2025-07-07 09:34:55] [Rank 0] step:7361/10000 train_time:502410ms step_avg:68.25ms +[2025-07-07 09:34:55] [Rank 0] step:7361/10000 train_time:502410ms step_avg:68.25ms +[2025-07-07 09:34:56] [Rank 0] step:7381/10000 train_time:503780ms step_avg:68.25ms +[2025-07-07 09:34:56] [Rank 0] step:7381/10000 train_time:503780ms step_avg:68.25ms +[2025-07-07 09:34:58] [Rank 0] step:7401/10000 train_time:505172ms step_avg:68.26ms +[2025-07-07 09:34:58] [Rank 0] step:7401/10000 train_time:505172ms step_avg:68.26ms +[2025-07-07 09:34:59] [Rank 0] step:7421/10000 train_time:506544ms step_avg:68.26ms +[2025-07-07 09:34:59] [Rank 0] step:7421/10000 train_time:506544ms step_avg:68.26ms +[2025-07-07 09:35:00] [Rank 0] step:7441/10000 train_time:507915ms step_avg:68.26ms +[2025-07-07 09:35:00] [Rank 0] step:7441/10000 train_time:507915ms step_avg:68.26ms +[2025-07-07 09:35:02] [Rank 0] step:7461/10000 train_time:509286ms step_avg:68.26ms +[2025-07-07 09:35:02] [Rank 0] step:7461/10000 train_time:509286ms step_avg:68.26ms +[2025-07-07 09:35:03] [Rank 0] step:7481/10000 train_time:510658ms step_avg:68.26ms +[2025-07-07 09:35:03] [Rank 0] step:7481/10000 train_time:510658ms step_avg:68.26ms +[2025-07-07 09:35:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:35:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:35:05] [Rank 0] PRINT: step:7500/10000 train_loss:0.9204 val_loss:0.9885 train_time:512653ms step_avg:68.35ms +[2025-07-07 09:35:05] [Rank 0] PRINT: step:7500/10000 train_loss:0.9204 val_loss:0.9885 train_time:512653ms step_avg:68.35ms +[2025-07-07 09:35:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:35:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:35:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:35:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:35:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:35:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:40:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:40:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:40:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:40:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:40:28] [Rank 0] Total Loss: 5.6314 +[2025-07-07 09:40:28] [Rank 0] Total Loss: 5.6314 +[2025-07-07 09:40:28] [Rank 0] Total FTA: 0.8310 +[2025-07-07 09:40:28] [Rank 0] Total FTA: 0.8310 +[2025-07-07 09:40:28] [Rank 0] Group 0 Loss: 6.1385 +[2025-07-07 09:40:28] [Rank 0] Group 0 Loss: 6.1385 +[2025-07-07 09:40:28] [Rank 0] Group 1 Loss: 5.2750 +[2025-07-07 09:40:28] [Rank 0] Group 1 Loss: 5.2750 +[2025-07-07 09:40:28] [Rank 0] Group 2 Loss: 5.5033 +[2025-07-07 09:40:28] [Rank 0] Group 2 Loss: 5.5033 +[2025-07-07 09:40:28] [Rank 0] Group 3 Loss: 5.7807 +[2025-07-07 09:40:28] [Rank 0] Group 3 Loss: 5.7807 +[2025-07-07 09:40:28] [Rank 0] Group 4 Loss: 5.5428 +[2025-07-07 09:40:28] [Rank 0] Group 4 Loss: 5.5428 +[2025-07-07 09:40:28] [Rank 0] Group 5 Loss: 5.5303 +[2025-07-07 09:40:28] [Rank 0] Group 5 Loss: 5.5303 +[2025-07-07 09:40:28] [Rank 0] Group 6 Loss: 5.4396 +[2025-07-07 09:40:28] [Rank 0] Group 6 Loss: 5.4396 +[2025-07-07 09:40:28] [Rank 0] Group 7 Loss: 5.6129 +[2025-07-07 09:40:28] [Rank 0] Group 7 Loss: 5.6129 +[2025-07-07 09:40:28] [Rank 0] Group 8 Loss: 5.5481 +[2025-07-07 09:40:28] [Rank 0] Group 8 Loss: 5.5481 +[2025-07-07 09:40:28] [Rank 0] Group 9 Loss: 5.6126 +[2025-07-07 09:40:28] [Rank 0] Group 9 Loss: 5.6126 +[2025-07-07 09:40:28] [Rank 0] Group 10 Loss: 5.6234 +[2025-07-07 09:40:28] [Rank 0] Group 10 Loss: 5.6234 +[2025-07-07 09:40:28] [Rank 0] Group 11 Loss: 5.5661 +[2025-07-07 09:40:28] [Rank 0] Group 11 Loss: 5.5661 +[2025-07-07 09:40:28] [Rank 0] Group 0 FTA: 0.6541 +[2025-07-07 09:40:28] [Rank 0] Group 0 FTA: 0.6541 +[2025-07-07 09:40:28] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 09:40:28] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 09:40:28] [Rank 0] Group 2 FTA: 0.8516 +[2025-07-07 09:40:28] [Rank 0] Group 2 FTA: 0.8516 +[2025-07-07 09:40:28] [Rank 0] Group 3 FTA: 0.7422 +[2025-07-07 09:40:28] [Rank 0] Group 3 FTA: 0.7422 +[2025-07-07 09:40:28] [Rank 0] Group 4 FTA: 0.9062 +[2025-07-07 09:40:28] [Rank 0] Group 4 FTA: 0.9062 +[2025-07-07 09:40:28] [Rank 0] Group 5 FTA: 0.8802 +[2025-07-07 09:40:28] [Rank 0] Group 5 FTA: 0.8802 +[2025-07-07 09:40:28] [Rank 0] Group 6 FTA: 0.8516 +[2025-07-07 09:40:28] [Rank 0] Group 6 FTA: 0.8516 +[2025-07-07 09:40:28] [Rank 0] Group 7 FTA: 0.8594 +[2025-07-07 09:40:28] [Rank 0] Group 7 FTA: 0.8594 +[2025-07-07 09:40:28] [Rank 0] Group 8 FTA: 0.8255 +[2025-07-07 09:40:28] [Rank 0] Group 8 FTA: 0.8255 +[2025-07-07 09:40:28] [Rank 0] Group 9 FTA: 0.8398 +[2025-07-07 09:40:28] [Rank 0] Group 9 FTA: 0.8398 +[2025-07-07 09:40:28] [Rank 0] Group 10 FTA: 0.8672 +[2025-07-07 09:40:28] [Rank 0] Group 10 FTA: 0.8672 +[2025-07-07 09:40:28] [Rank 0] Group 11 FTA: 0.8428 +[2025-07-07 09:40:28] [Rank 0] Group 11 FTA: 0.8428 +[2025-07-07 09:40:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 09:40:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 09:40:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 09:40:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 09:40:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 09:40:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 09:40:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 09:40:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 09:40:29] [Rank 0] step:7501/10000 train_time:512664ms step_avg:68.35ms +[2025-07-07 09:40:29] [Rank 0] step:7501/10000 train_time:512664ms step_avg:68.35ms +[2025-07-07 09:40:31] [Rank 0] step:7521/10000 train_time:513431ms step_avg:68.27ms +[2025-07-07 09:40:31] [Rank 0] step:7521/10000 train_time:513431ms step_avg:68.27ms +[2025-07-07 09:40:32] [Rank 0] step:7541/10000 train_time:514794ms step_avg:68.27ms +[2025-07-07 09:40:32] [Rank 0] step:7541/10000 train_time:514794ms step_avg:68.27ms +[2025-07-07 09:40:34] [Rank 0] step:7561/10000 train_time:516832ms step_avg:68.35ms +[2025-07-07 09:40:34] [Rank 0] step:7561/10000 train_time:516832ms step_avg:68.35ms +[2025-07-07 09:40:35] [Rank 0] step:7581/10000 train_time:517566ms step_avg:68.27ms +[2025-07-07 09:40:35] [Rank 0] step:7581/10000 train_time:517566ms step_avg:68.27ms +[2025-07-07 09:40:36] [Rank 0] step:7601/10000 train_time:518933ms step_avg:68.27ms +[2025-07-07 09:40:36] [Rank 0] step:7601/10000 train_time:518933ms step_avg:68.27ms +[2025-07-07 09:40:38] [Rank 0] step:7621/10000 train_time:520298ms step_avg:68.27ms +[2025-07-07 09:40:38] [Rank 0] step:7621/10000 train_time:520298ms step_avg:68.27ms +[2025-07-07 09:40:39] [Rank 0] step:7641/10000 train_time:521666ms step_avg:68.27ms +[2025-07-07 09:40:39] [Rank 0] step:7641/10000 train_time:521666ms step_avg:68.27ms +[2025-07-07 09:40:40] [Rank 0] step:7661/10000 train_time:523033ms step_avg:68.27ms +[2025-07-07 09:40:40] [Rank 0] step:7661/10000 train_time:523033ms step_avg:68.27ms +[2025-07-07 09:40:42] [Rank 0] step:7681/10000 train_time:524401ms step_avg:68.27ms +[2025-07-07 09:40:42] [Rank 0] step:7681/10000 train_time:524401ms step_avg:68.27ms +[2025-07-07 09:40:43] [Rank 0] step:7701/10000 train_time:525769ms step_avg:68.27ms +[2025-07-07 09:40:43] [Rank 0] step:7701/10000 train_time:525769ms step_avg:68.27ms +[2025-07-07 09:40:44] [Rank 0] step:7721/10000 train_time:527137ms step_avg:68.27ms +[2025-07-07 09:40:44] [Rank 0] step:7721/10000 train_time:527137ms step_avg:68.27ms +[2025-07-07 09:40:46] [Rank 0] step:7741/10000 train_time:528551ms step_avg:68.28ms +[2025-07-07 09:40:46] [Rank 0] step:7741/10000 train_time:528551ms step_avg:68.28ms +[2025-07-07 09:40:47] [Rank 0] step:7761/10000 train_time:529920ms step_avg:68.28ms +[2025-07-07 09:40:47] [Rank 0] step:7761/10000 train_time:529920ms step_avg:68.28ms +[2025-07-07 09:40:49] [Rank 0] step:7781/10000 train_time:531289ms step_avg:68.28ms +[2025-07-07 09:40:49] [Rank 0] step:7781/10000 train_time:531289ms step_avg:68.28ms +[2025-07-07 09:40:50] [Rank 0] step:7801/10000 train_time:532659ms step_avg:68.28ms +[2025-07-07 09:40:50] [Rank 0] step:7801/10000 train_time:532659ms step_avg:68.28ms +[2025-07-07 09:40:51] [Rank 0] step:7821/10000 train_time:534028ms step_avg:68.28ms +[2025-07-07 09:40:51] [Rank 0] step:7821/10000 train_time:534028ms step_avg:68.28ms +[2025-07-07 09:40:53] [Rank 0] step:7841/10000 train_time:535398ms step_avg:68.28ms +[2025-07-07 09:40:53] [Rank 0] step:7841/10000 train_time:535398ms step_avg:68.28ms +[2025-07-07 09:40:54] [Rank 0] step:7861/10000 train_time:536767ms step_avg:68.28ms +[2025-07-07 09:40:54] [Rank 0] step:7861/10000 train_time:536767ms step_avg:68.28ms +[2025-07-07 09:40:55] [Rank 0] step:7881/10000 train_time:538136ms step_avg:68.28ms +[2025-07-07 09:40:55] [Rank 0] step:7881/10000 train_time:538136ms step_avg:68.28ms +[2025-07-07 09:40:57] [Rank 0] step:7901/10000 train_time:539507ms step_avg:68.28ms +[2025-07-07 09:40:57] [Rank 0] step:7901/10000 train_time:539507ms step_avg:68.28ms +[2025-07-07 09:40:58] [Rank 0] step:7921/10000 train_time:540880ms step_avg:68.28ms +[2025-07-07 09:40:58] [Rank 0] step:7921/10000 train_time:540880ms step_avg:68.28ms +[2025-07-07 09:41:00] [Rank 0] step:7941/10000 train_time:542274ms step_avg:68.29ms +[2025-07-07 09:41:00] [Rank 0] step:7941/10000 train_time:542274ms step_avg:68.29ms +[2025-07-07 09:41:01] [Rank 0] step:7961/10000 train_time:543645ms step_avg:68.29ms +[2025-07-07 09:41:01] [Rank 0] step:7961/10000 train_time:543645ms step_avg:68.29ms +[2025-07-07 09:41:02] [Rank 0] step:7981/10000 train_time:545017ms step_avg:68.29ms +[2025-07-07 09:41:02] [Rank 0] step:7981/10000 train_time:545017ms step_avg:68.29ms +[2025-07-07 09:41:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:41:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:41:04] [Rank 0] PRINT: step:8000/10000 train_loss:0.8988 val_loss:0.9941 train_time:547013ms step_avg:68.38ms +[2025-07-07 09:41:04] [Rank 0] PRINT: step:8000/10000 train_loss:0.8988 val_loss:0.9941 train_time:547013ms step_avg:68.38ms +[2025-07-07 09:41:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:41:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:41:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:41:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:41:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:41:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:46:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:46:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:46:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:46:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:46:26] [Rank 0] Total Loss: 5.6733 +[2025-07-07 09:46:26] [Rank 0] Total Loss: 5.6733 +[2025-07-07 09:46:27] [Rank 0] Total FTA: 0.8294 +[2025-07-07 09:46:27] [Rank 0] Total FTA: 0.8294 +[2025-07-07 09:46:27] [Rank 0] Group 0 Loss: 6.0934 +[2025-07-07 09:46:27] [Rank 0] Group 0 Loss: 6.0934 +[2025-07-07 09:46:27] [Rank 0] Group 1 Loss: 5.3587 +[2025-07-07 09:46:27] [Rank 0] Group 1 Loss: 5.3587 +[2025-07-07 09:46:27] [Rank 0] Group 2 Loss: 5.4062 +[2025-07-07 09:46:27] [Rank 0] Group 2 Loss: 5.4062 +[2025-07-07 09:46:27] [Rank 0] Group 3 Loss: 5.7727 +[2025-07-07 09:46:27] [Rank 0] Group 3 Loss: 5.7727 +[2025-07-07 09:46:27] [Rank 0] Group 4 Loss: 5.5962 +[2025-07-07 09:46:27] [Rank 0] Group 4 Loss: 5.5962 +[2025-07-07 09:46:27] [Rank 0] Group 5 Loss: 5.6342 +[2025-07-07 09:46:27] [Rank 0] Group 5 Loss: 5.6342 +[2025-07-07 09:46:27] [Rank 0] Group 6 Loss: 5.4986 +[2025-07-07 09:46:27] [Rank 0] Group 6 Loss: 5.4986 +[2025-07-07 09:46:27] [Rank 0] Group 7 Loss: 5.7298 +[2025-07-07 09:46:27] [Rank 0] Group 7 Loss: 5.7298 +[2025-07-07 09:46:27] [Rank 0] Group 8 Loss: 5.6491 +[2025-07-07 09:46:27] [Rank 0] Group 8 Loss: 5.6491 +[2025-07-07 09:46:27] [Rank 0] Group 9 Loss: 5.6281 +[2025-07-07 09:46:27] [Rank 0] Group 9 Loss: 5.6281 +[2025-07-07 09:46:27] [Rank 0] Group 10 Loss: 5.6519 +[2025-07-07 09:46:27] [Rank 0] Group 10 Loss: 5.6519 +[2025-07-07 09:46:27] [Rank 0] Group 11 Loss: 5.6575 +[2025-07-07 09:46:27] [Rank 0] Group 11 Loss: 5.6575 +[2025-07-07 09:46:27] [Rank 0] Group 0 FTA: 0.6554 +[2025-07-07 09:46:27] [Rank 0] Group 0 FTA: 0.6554 +[2025-07-07 09:46:27] [Rank 0] Group 1 FTA: 0.8516 +[2025-07-07 09:46:27] [Rank 0] Group 1 FTA: 0.8516 +[2025-07-07 09:46:27] [Rank 0] Group 2 FTA: 0.9193 +[2025-07-07 09:46:27] [Rank 0] Group 2 FTA: 0.9193 +[2025-07-07 09:46:27] [Rank 0] Group 3 FTA: 0.8880 +[2025-07-07 09:46:27] [Rank 0] Group 3 FTA: 0.8880 +[2025-07-07 09:46:27] [Rank 0] Group 4 FTA: 0.8542 +[2025-07-07 09:46:27] [Rank 0] Group 4 FTA: 0.8542 +[2025-07-07 09:46:27] [Rank 0] Group 5 FTA: 0.8672 +[2025-07-07 09:46:27] [Rank 0] Group 5 FTA: 0.8672 +[2025-07-07 09:46:27] [Rank 0] Group 6 FTA: 0.8724 +[2025-07-07 09:46:27] [Rank 0] Group 6 FTA: 0.8724 +[2025-07-07 09:46:27] [Rank 0] Group 7 FTA: 0.8229 +[2025-07-07 09:46:27] [Rank 0] Group 7 FTA: 0.8229 +[2025-07-07 09:46:27] [Rank 0] Group 8 FTA: 0.8411 +[2025-07-07 09:46:27] [Rank 0] Group 8 FTA: 0.8411 +[2025-07-07 09:46:27] [Rank 0] Group 9 FTA: 0.8594 +[2025-07-07 09:46:27] [Rank 0] Group 9 FTA: 0.8594 +[2025-07-07 09:46:27] [Rank 0] Group 10 FTA: 0.8398 +[2025-07-07 09:46:27] [Rank 0] Group 10 FTA: 0.8398 +[2025-07-07 09:46:27] [Rank 0] Group 11 FTA: 0.8418 +[2025-07-07 09:46:27] [Rank 0] Group 11 FTA: 0.8418 +[2025-07-07 09:46:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 09:46:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 09:46:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 09:46:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 09:46:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 09:46:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 09:46:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 09:46:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 09:46:28] [Rank 0] step:8001/10000 train_time:547023ms step_avg:68.37ms +[2025-07-07 09:46:28] [Rank 0] step:8001/10000 train_time:547023ms step_avg:68.37ms +[2025-07-07 09:46:29] [Rank 0] step:8021/10000 train_time:547771ms step_avg:68.29ms +[2025-07-07 09:46:29] [Rank 0] step:8021/10000 train_time:547771ms step_avg:68.29ms +[2025-07-07 09:46:31] [Rank 0] step:8041/10000 train_time:549135ms step_avg:68.29ms +[2025-07-07 09:46:31] [Rank 0] step:8041/10000 train_time:549135ms step_avg:68.29ms +[2025-07-07 09:46:32] [Rank 0] step:8061/10000 train_time:550501ms step_avg:68.29ms +[2025-07-07 09:46:32] [Rank 0] step:8061/10000 train_time:550501ms step_avg:68.29ms +[2025-07-07 09:46:34] [Rank 0] step:8081/10000 train_time:551867ms step_avg:68.29ms +[2025-07-07 09:46:34] [Rank 0] step:8081/10000 train_time:551867ms step_avg:68.29ms +[2025-07-07 09:46:35] [Rank 0] step:8101/10000 train_time:553234ms step_avg:68.29ms +[2025-07-07 09:46:35] [Rank 0] step:8101/10000 train_time:553234ms step_avg:68.29ms +[2025-07-07 09:46:36] [Rank 0] step:8121/10000 train_time:554626ms step_avg:68.30ms +[2025-07-07 09:46:36] [Rank 0] step:8121/10000 train_time:554626ms step_avg:68.30ms +[2025-07-07 09:46:38] [Rank 0] step:8141/10000 train_time:555993ms step_avg:68.30ms +[2025-07-07 09:46:38] [Rank 0] step:8141/10000 train_time:555993ms step_avg:68.30ms +[2025-07-07 09:46:39] [Rank 0] step:8161/10000 train_time:557362ms step_avg:68.30ms +[2025-07-07 09:46:39] [Rank 0] step:8161/10000 train_time:557362ms step_avg:68.30ms +[2025-07-07 09:46:40] [Rank 0] step:8181/10000 train_time:558731ms step_avg:68.30ms +[2025-07-07 09:46:40] [Rank 0] step:8181/10000 train_time:558731ms step_avg:68.30ms +[2025-07-07 09:46:42] [Rank 0] step:8201/10000 train_time:560099ms step_avg:68.30ms +[2025-07-07 09:46:42] [Rank 0] step:8201/10000 train_time:560099ms step_avg:68.30ms +[2025-07-07 09:46:43] [Rank 0] step:8221/10000 train_time:561468ms step_avg:68.30ms +[2025-07-07 09:46:43] [Rank 0] step:8221/10000 train_time:561468ms step_avg:68.30ms +[2025-07-07 09:46:45] [Rank 0] step:8241/10000 train_time:562837ms step_avg:68.30ms +[2025-07-07 09:46:45] [Rank 0] step:8241/10000 train_time:562837ms step_avg:68.30ms +[2025-07-07 09:46:46] [Rank 0] step:8261/10000 train_time:564207ms step_avg:68.30ms +[2025-07-07 09:46:46] [Rank 0] step:8261/10000 train_time:564207ms step_avg:68.30ms +[2025-07-07 09:46:47] [Rank 0] step:8281/10000 train_time:565829ms step_avg:68.33ms +[2025-07-07 09:46:47] [Rank 0] step:8281/10000 train_time:565829ms step_avg:68.33ms +[2025-07-07 09:46:49] [Rank 0] step:8301/10000 train_time:566980ms step_avg:68.30ms +[2025-07-07 09:46:49] [Rank 0] step:8301/10000 train_time:566980ms step_avg:68.30ms +[2025-07-07 09:46:50] [Rank 0] step:8321/10000 train_time:568351ms step_avg:68.30ms +[2025-07-07 09:46:50] [Rank 0] step:8321/10000 train_time:568351ms step_avg:68.30ms +[2025-07-07 09:46:51] [Rank 0] step:8341/10000 train_time:569722ms step_avg:68.30ms +[2025-07-07 09:46:51] [Rank 0] step:8341/10000 train_time:569722ms step_avg:68.30ms +[2025-07-07 09:46:53] [Rank 0] step:8361/10000 train_time:571092ms step_avg:68.30ms +[2025-07-07 09:46:53] [Rank 0] step:8361/10000 train_time:571092ms step_avg:68.30ms +[2025-07-07 09:46:54] [Rank 0] step:8381/10000 train_time:572462ms step_avg:68.30ms +[2025-07-07 09:46:54] [Rank 0] step:8381/10000 train_time:572462ms step_avg:68.30ms +[2025-07-07 09:46:56] [Rank 0] step:8401/10000 train_time:573833ms step_avg:68.31ms +[2025-07-07 09:46:56] [Rank 0] step:8401/10000 train_time:573833ms step_avg:68.31ms +[2025-07-07 09:46:57] [Rank 0] step:8421/10000 train_time:575206ms step_avg:68.31ms +[2025-07-07 09:46:57] [Rank 0] step:8421/10000 train_time:575206ms step_avg:68.31ms +[2025-07-07 09:46:58] [Rank 0] step:8441/10000 train_time:576579ms step_avg:68.31ms +[2025-07-07 09:46:58] [Rank 0] step:8441/10000 train_time:576579ms step_avg:68.31ms +[2025-07-07 09:47:00] [Rank 0] step:8461/10000 train_time:577997ms step_avg:68.31ms +[2025-07-07 09:47:00] [Rank 0] step:8461/10000 train_time:577997ms step_avg:68.31ms +[2025-07-07 09:47:01] [Rank 0] step:8481/10000 train_time:579365ms step_avg:68.31ms +[2025-07-07 09:47:01] [Rank 0] step:8481/10000 train_time:579365ms step_avg:68.31ms +[2025-07-07 09:47:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:47:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:47:03] [Rank 0] PRINT: step:8500/10000 train_loss:0.8812 val_loss:0.9582 train_time:581362ms step_avg:68.40ms +[2025-07-07 09:47:03] [Rank 0] PRINT: step:8500/10000 train_loss:0.8812 val_loss:0.9582 train_time:581362ms step_avg:68.40ms +[2025-07-07 09:47:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:47:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:47:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:47:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:47:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:47:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:52:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:52:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:52:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:52:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:52:27] [Rank 0] Total Loss: 5.6517 +[2025-07-07 09:52:27] [Rank 0] Total Loss: 5.6517 +[2025-07-07 09:52:27] [Rank 0] Total FTA: 0.8922 +[2025-07-07 09:52:27] [Rank 0] Total FTA: 0.8922 +[2025-07-07 09:52:27] [Rank 0] Group 0 Loss: 6.1573 +[2025-07-07 09:52:27] [Rank 0] Group 0 Loss: 6.1573 +[2025-07-07 09:52:27] [Rank 0] Group 1 Loss: 5.2165 +[2025-07-07 09:52:27] [Rank 0] Group 1 Loss: 5.2165 +[2025-07-07 09:52:27] [Rank 0] Group 2 Loss: 5.4315 +[2025-07-07 09:52:27] [Rank 0] Group 2 Loss: 5.4315 +[2025-07-07 09:52:27] [Rank 0] Group 3 Loss: 5.7224 +[2025-07-07 09:52:27] [Rank 0] Group 3 Loss: 5.7224 +[2025-07-07 09:52:27] [Rank 0] Group 4 Loss: 5.5747 +[2025-07-07 09:52:27] [Rank 0] Group 4 Loss: 5.5747 +[2025-07-07 09:52:27] [Rank 0] Group 5 Loss: 5.5895 +[2025-07-07 09:52:27] [Rank 0] Group 5 Loss: 5.5895 +[2025-07-07 09:52:27] [Rank 0] Group 6 Loss: 5.4393 +[2025-07-07 09:52:27] [Rank 0] Group 6 Loss: 5.4393 +[2025-07-07 09:52:27] [Rank 0] Group 7 Loss: 5.6928 +[2025-07-07 09:52:27] [Rank 0] Group 7 Loss: 5.6928 +[2025-07-07 09:52:27] [Rank 0] Group 8 Loss: 5.6216 +[2025-07-07 09:52:27] [Rank 0] Group 8 Loss: 5.6216 +[2025-07-07 09:52:27] [Rank 0] Group 9 Loss: 5.6138 +[2025-07-07 09:52:27] [Rank 0] Group 9 Loss: 5.6138 +[2025-07-07 09:52:27] [Rank 0] Group 10 Loss: 5.5946 +[2025-07-07 09:52:27] [Rank 0] Group 10 Loss: 5.5946 +[2025-07-07 09:52:27] [Rank 0] Group 11 Loss: 5.6570 +[2025-07-07 09:52:27] [Rank 0] Group 11 Loss: 5.6570 +[2025-07-07 09:52:27] [Rank 0] Group 0 FTA: 0.8283 +[2025-07-07 09:52:27] [Rank 0] Group 0 FTA: 0.8283 +[2025-07-07 09:52:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 09:52:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 09:52:27] [Rank 0] Group 2 FTA: 0.9323 +[2025-07-07 09:52:27] [Rank 0] Group 2 FTA: 0.9323 +[2025-07-07 09:52:27] [Rank 0] Group 3 FTA: 0.8802 +[2025-07-07 09:52:27] [Rank 0] Group 3 FTA: 0.8802 +[2025-07-07 09:52:27] [Rank 0] Group 4 FTA: 0.9062 +[2025-07-07 09:52:27] [Rank 0] Group 4 FTA: 0.9062 +[2025-07-07 09:52:27] [Rank 0] Group 5 FTA: 0.9141 +[2025-07-07 09:52:27] [Rank 0] Group 5 FTA: 0.9141 +[2025-07-07 09:52:27] [Rank 0] Group 6 FTA: 0.9089 +[2025-07-07 09:52:27] [Rank 0] Group 6 FTA: 0.9089 +[2025-07-07 09:52:27] [Rank 0] Group 7 FTA: 0.8594 +[2025-07-07 09:52:27] [Rank 0] Group 7 FTA: 0.8594 +[2025-07-07 09:52:27] [Rank 0] Group 8 FTA: 0.9115 +[2025-07-07 09:52:27] [Rank 0] Group 8 FTA: 0.9115 +[2025-07-07 09:52:27] [Rank 0] Group 9 FTA: 0.8945 +[2025-07-07 09:52:27] [Rank 0] Group 9 FTA: 0.8945 +[2025-07-07 09:52:27] [Rank 0] Group 10 FTA: 0.8535 +[2025-07-07 09:52:27] [Rank 0] Group 10 FTA: 0.8535 +[2025-07-07 09:52:27] [Rank 0] Group 11 FTA: 0.8936 +[2025-07-07 09:52:27] [Rank 0] Group 11 FTA: 0.8936 +[2025-07-07 09:52:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 09:52:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 09:52:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 09:52:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 09:52:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 09:52:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 09:52:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 09:52:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 09:52:28] [Rank 0] step:8501/10000 train_time:581374ms step_avg:68.39ms +[2025-07-07 09:52:28] [Rank 0] step:8501/10000 train_time:581374ms step_avg:68.39ms +[2025-07-07 09:52:30] [Rank 0] step:8521/10000 train_time:582132ms step_avg:68.32ms +[2025-07-07 09:52:30] [Rank 0] step:8521/10000 train_time:582132ms step_avg:68.32ms +[2025-07-07 09:52:31] [Rank 0] step:8541/10000 train_time:583496ms step_avg:68.32ms +[2025-07-07 09:52:31] [Rank 0] step:8541/10000 train_time:583496ms step_avg:68.32ms +[2025-07-07 09:52:33] [Rank 0] step:8561/10000 train_time:584862ms step_avg:68.32ms +[2025-07-07 09:52:33] [Rank 0] step:8561/10000 train_time:584862ms step_avg:68.32ms +[2025-07-07 09:52:34] [Rank 0] step:8581/10000 train_time:586229ms step_avg:68.32ms +[2025-07-07 09:52:34] [Rank 0] step:8581/10000 train_time:586229ms step_avg:68.32ms +[2025-07-07 09:52:35] [Rank 0] step:8601/10000 train_time:587596ms step_avg:68.32ms +[2025-07-07 09:52:35] [Rank 0] step:8601/10000 train_time:587596ms step_avg:68.32ms +[2025-07-07 09:52:37] [Rank 0] step:8621/10000 train_time:588964ms step_avg:68.32ms +[2025-07-07 09:52:37] [Rank 0] step:8621/10000 train_time:588964ms step_avg:68.32ms +[2025-07-07 09:52:38] [Rank 0] step:8641/10000 train_time:591009ms step_avg:68.40ms +[2025-07-07 09:52:38] [Rank 0] step:8641/10000 train_time:591009ms step_avg:68.40ms +[2025-07-07 09:52:39] [Rank 0] step:8661/10000 train_time:591746ms step_avg:68.32ms +[2025-07-07 09:52:39] [Rank 0] step:8661/10000 train_time:591746ms step_avg:68.32ms +[2025-07-07 09:52:41] [Rank 0] step:8681/10000 train_time:593114ms step_avg:68.32ms +[2025-07-07 09:52:41] [Rank 0] step:8681/10000 train_time:593114ms step_avg:68.32ms +[2025-07-07 09:52:42] [Rank 0] step:8701/10000 train_time:594482ms step_avg:68.32ms +[2025-07-07 09:52:42] [Rank 0] step:8701/10000 train_time:594482ms step_avg:68.32ms +[2025-07-07 09:52:44] [Rank 0] step:8721/10000 train_time:595852ms step_avg:68.32ms +[2025-07-07 09:52:44] [Rank 0] step:8721/10000 train_time:595852ms step_avg:68.32ms +[2025-07-07 09:52:45] [Rank 0] step:8741/10000 train_time:597221ms step_avg:68.32ms +[2025-07-07 09:52:45] [Rank 0] step:8741/10000 train_time:597221ms step_avg:68.32ms +[2025-07-07 09:52:46] [Rank 0] step:8761/10000 train_time:598591ms step_avg:68.32ms +[2025-07-07 09:52:46] [Rank 0] step:8761/10000 train_time:598591ms step_avg:68.32ms +[2025-07-07 09:52:48] [Rank 0] step:8781/10000 train_time:599960ms step_avg:68.32ms +[2025-07-07 09:52:48] [Rank 0] step:8781/10000 train_time:599960ms step_avg:68.32ms +[2025-07-07 09:52:49] [Rank 0] step:8801/10000 train_time:601330ms step_avg:68.33ms +[2025-07-07 09:52:49] [Rank 0] step:8801/10000 train_time:601330ms step_avg:68.33ms +[2025-07-07 09:52:50] [Rank 0] step:8821/10000 train_time:602700ms step_avg:68.33ms +[2025-07-07 09:52:50] [Rank 0] step:8821/10000 train_time:602700ms step_avg:68.33ms +[2025-07-07 09:52:52] [Rank 0] step:8841/10000 train_time:604100ms step_avg:68.33ms +[2025-07-07 09:52:52] [Rank 0] step:8841/10000 train_time:604100ms step_avg:68.33ms +[2025-07-07 09:52:53] [Rank 0] step:8861/10000 train_time:605472ms step_avg:68.33ms +[2025-07-07 09:52:53] [Rank 0] step:8861/10000 train_time:605472ms step_avg:68.33ms +[2025-07-07 09:52:55] [Rank 0] step:8881/10000 train_time:606843ms step_avg:68.33ms +[2025-07-07 09:52:55] [Rank 0] step:8881/10000 train_time:606843ms step_avg:68.33ms +[2025-07-07 09:52:56] [Rank 0] step:8901/10000 train_time:608237ms step_avg:68.33ms +[2025-07-07 09:52:56] [Rank 0] step:8901/10000 train_time:608237ms step_avg:68.33ms +[2025-07-07 09:52:57] [Rank 0] step:8921/10000 train_time:609609ms step_avg:68.33ms +[2025-07-07 09:52:57] [Rank 0] step:8921/10000 train_time:609609ms step_avg:68.33ms +[2025-07-07 09:52:59] [Rank 0] step:8941/10000 train_time:611012ms step_avg:68.34ms +[2025-07-07 09:52:59] [Rank 0] step:8941/10000 train_time:611012ms step_avg:68.34ms +[2025-07-07 09:53:00] [Rank 0] step:8961/10000 train_time:612383ms step_avg:68.34ms +[2025-07-07 09:53:00] [Rank 0] step:8961/10000 train_time:612383ms step_avg:68.34ms +[2025-07-07 09:53:01] [Rank 0] step:8981/10000 train_time:613756ms step_avg:68.34ms +[2025-07-07 09:53:01] [Rank 0] step:8981/10000 train_time:613756ms step_avg:68.34ms +[2025-07-07 09:53:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:53:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:53:04] [Rank 0] PRINT: step:9000/10000 train_loss:0.8691 val_loss:0.9384 train_time:615753ms step_avg:68.42ms +[2025-07-07 09:53:04] [Rank 0] PRINT: step:9000/10000 train_loss:0.8691 val_loss:0.9384 train_time:615753ms step_avg:68.42ms +[2025-07-07 09:53:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:53:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:53:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:53:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:53:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:53:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:58:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:58:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:58:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:58:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:58:25] [Rank 0] Total Loss: 5.6767 +[2025-07-07 09:58:25] [Rank 0] Total Loss: 5.6767 +[2025-07-07 09:58:25] [Rank 0] Total FTA: 0.9279 +[2025-07-07 09:58:25] [Rank 0] Total FTA: 0.9279 +[2025-07-07 09:58:25] [Rank 0] Group 0 Loss: 5.9692 +[2025-07-07 09:58:25] [Rank 0] Group 0 Loss: 5.9692 +[2025-07-07 09:58:25] [Rank 0] Group 1 Loss: 5.4888 +[2025-07-07 09:58:25] [Rank 0] Group 1 Loss: 5.4888 +[2025-07-07 09:58:25] [Rank 0] Group 2 Loss: 5.3880 +[2025-07-07 09:58:25] [Rank 0] Group 2 Loss: 5.3880 +[2025-07-07 09:58:25] [Rank 0] Group 3 Loss: 5.7695 +[2025-07-07 09:58:25] [Rank 0] Group 3 Loss: 5.7695 +[2025-07-07 09:58:25] [Rank 0] Group 4 Loss: 5.6335 +[2025-07-07 09:58:25] [Rank 0] Group 4 Loss: 5.6335 +[2025-07-07 09:58:25] [Rank 0] Group 5 Loss: 5.6410 +[2025-07-07 09:58:25] [Rank 0] Group 5 Loss: 5.6410 +[2025-07-07 09:58:25] [Rank 0] Group 6 Loss: 5.4952 +[2025-07-07 09:58:25] [Rank 0] Group 6 Loss: 5.4952 +[2025-07-07 09:58:25] [Rank 0] Group 7 Loss: 5.6221 +[2025-07-07 09:58:25] [Rank 0] Group 7 Loss: 5.6221 +[2025-07-07 09:58:25] [Rank 0] Group 8 Loss: 5.7131 +[2025-07-07 09:58:25] [Rank 0] Group 8 Loss: 5.7131 +[2025-07-07 09:58:25] [Rank 0] Group 9 Loss: 5.6303 +[2025-07-07 09:58:25] [Rank 0] Group 9 Loss: 5.6303 +[2025-07-07 09:58:25] [Rank 0] Group 10 Loss: 5.7039 +[2025-07-07 09:58:25] [Rank 0] Group 10 Loss: 5.7039 +[2025-07-07 09:58:25] [Rank 0] Group 11 Loss: 5.7032 +[2025-07-07 09:58:25] [Rank 0] Group 11 Loss: 5.7032 +[2025-07-07 09:58:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 09:58:26] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 09:58:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 09:58:26] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 09:58:26] [Rank 0] Group 2 FTA: 0.8151 +[2025-07-07 09:58:26] [Rank 0] Group 2 FTA: 0.8151 +[2025-07-07 09:58:26] [Rank 0] Group 3 FTA: 0.9089 +[2025-07-07 09:58:26] [Rank 0] Group 3 FTA: 0.9089 +[2025-07-07 09:58:26] [Rank 0] Group 4 FTA: 0.9167 +[2025-07-07 09:58:26] [Rank 0] Group 4 FTA: 0.9167 +[2025-07-07 09:58:26] [Rank 0] Group 5 FTA: 0.9115 +[2025-07-07 09:58:26] [Rank 0] Group 5 FTA: 0.9115 +[2025-07-07 09:58:26] [Rank 0] Group 6 FTA: 0.9193 +[2025-07-07 09:58:26] [Rank 0] Group 6 FTA: 0.9193 +[2025-07-07 09:58:26] [Rank 0] Group 7 FTA: 0.9089 +[2025-07-07 09:58:26] [Rank 0] Group 7 FTA: 0.9089 +[2025-07-07 09:58:26] [Rank 0] Group 8 FTA: 0.9193 +[2025-07-07 09:58:26] [Rank 0] Group 8 FTA: 0.9193 +[2025-07-07 09:58:26] [Rank 0] Group 9 FTA: 0.9219 +[2025-07-07 09:58:26] [Rank 0] Group 9 FTA: 0.9219 +[2025-07-07 09:58:26] [Rank 0] Group 10 FTA: 0.9219 +[2025-07-07 09:58:26] [Rank 0] Group 10 FTA: 0.9219 +[2025-07-07 09:58:26] [Rank 0] Group 11 FTA: 0.9248 +[2025-07-07 09:58:26] [Rank 0] Group 11 FTA: 0.9248 +[2025-07-07 09:58:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 09:58:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 09:58:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 09:58:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 09:58:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 09:58:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 09:58:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 09:58:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 09:58:28] [Rank 0] step:9001/10000 train_time:615875ms step_avg:68.42ms +[2025-07-07 09:58:28] [Rank 0] step:9001/10000 train_time:615875ms step_avg:68.42ms +[2025-07-07 09:58:29] [Rank 0] step:9021/10000 train_time:617238ms step_avg:68.42ms +[2025-07-07 09:58:29] [Rank 0] step:9021/10000 train_time:617238ms step_avg:68.42ms +[2025-07-07 09:58:30] [Rank 0] step:9041/10000 train_time:618604ms step_avg:68.42ms +[2025-07-07 09:58:30] [Rank 0] step:9041/10000 train_time:618604ms step_avg:68.42ms +[2025-07-07 09:58:32] [Rank 0] step:9061/10000 train_time:619969ms step_avg:68.42ms +[2025-07-07 09:58:32] [Rank 0] step:9061/10000 train_time:619969ms step_avg:68.42ms +[2025-07-07 09:58:33] [Rank 0] step:9081/10000 train_time:621336ms step_avg:68.42ms +[2025-07-07 09:58:33] [Rank 0] step:9081/10000 train_time:621336ms step_avg:68.42ms +[2025-07-07 09:58:35] [Rank 0] step:9101/10000 train_time:622702ms step_avg:68.42ms +[2025-07-07 09:58:35] [Rank 0] step:9101/10000 train_time:622702ms step_avg:68.42ms +[2025-07-07 09:58:36] [Rank 0] step:9121/10000 train_time:624070ms step_avg:68.42ms +[2025-07-07 09:58:36] [Rank 0] step:9121/10000 train_time:624070ms step_avg:68.42ms +[2025-07-07 09:58:37] [Rank 0] step:9141/10000 train_time:625437ms step_avg:68.42ms +[2025-07-07 09:58:37] [Rank 0] step:9141/10000 train_time:625437ms step_avg:68.42ms +[2025-07-07 09:58:39] [Rank 0] step:9161/10000 train_time:626805ms step_avg:68.42ms +[2025-07-07 09:58:39] [Rank 0] step:9161/10000 train_time:626805ms step_avg:68.42ms +[2025-07-07 09:58:40] [Rank 0] step:9181/10000 train_time:628424ms step_avg:68.45ms +[2025-07-07 09:58:40] [Rank 0] step:9181/10000 train_time:628424ms step_avg:68.45ms +[2025-07-07 09:58:41] [Rank 0] step:9201/10000 train_time:629589ms step_avg:68.43ms +[2025-07-07 09:58:41] [Rank 0] step:9201/10000 train_time:629589ms step_avg:68.43ms +[2025-07-07 09:58:43] [Rank 0] step:9221/10000 train_time:630957ms step_avg:68.43ms +[2025-07-07 09:58:43] [Rank 0] step:9221/10000 train_time:630957ms step_avg:68.43ms +[2025-07-07 09:58:44] [Rank 0] step:9241/10000 train_time:632325ms step_avg:68.43ms +[2025-07-07 09:58:44] [Rank 0] step:9241/10000 train_time:632325ms step_avg:68.43ms +[2025-07-07 09:58:46] [Rank 0] step:9261/10000 train_time:633694ms step_avg:68.43ms +[2025-07-07 09:58:46] [Rank 0] step:9261/10000 train_time:633694ms step_avg:68.43ms +[2025-07-07 09:58:47] [Rank 0] step:9281/10000 train_time:635063ms step_avg:68.43ms +[2025-07-07 09:58:47] [Rank 0] step:9281/10000 train_time:635063ms step_avg:68.43ms +[2025-07-07 09:58:48] [Rank 0] step:9301/10000 train_time:636434ms step_avg:68.43ms +[2025-07-07 09:58:48] [Rank 0] step:9301/10000 train_time:636434ms step_avg:68.43ms +[2025-07-07 09:58:50] [Rank 0] step:9321/10000 train_time:637804ms step_avg:68.43ms +[2025-07-07 09:58:50] [Rank 0] step:9321/10000 train_time:637804ms step_avg:68.43ms +[2025-07-07 09:58:51] [Rank 0] step:9341/10000 train_time:639175ms step_avg:68.43ms +[2025-07-07 09:58:51] [Rank 0] step:9341/10000 train_time:639175ms step_avg:68.43ms +[2025-07-07 09:58:52] [Rank 0] step:9361/10000 train_time:641204ms step_avg:68.50ms +[2025-07-07 09:58:52] [Rank 0] step:9361/10000 train_time:641204ms step_avg:68.50ms +[2025-07-07 09:58:54] [Rank 0] step:9381/10000 train_time:641943ms step_avg:68.43ms +[2025-07-07 09:58:54] [Rank 0] step:9381/10000 train_time:641943ms step_avg:68.43ms +[2025-07-07 09:58:55] [Rank 0] step:9401/10000 train_time:643314ms step_avg:68.43ms +[2025-07-07 09:58:55] [Rank 0] step:9401/10000 train_time:643314ms step_avg:68.43ms +[2025-07-07 09:58:57] [Rank 0] step:9421/10000 train_time:644686ms step_avg:68.43ms +[2025-07-07 09:58:57] [Rank 0] step:9421/10000 train_time:644686ms step_avg:68.43ms +[2025-07-07 09:58:58] [Rank 0] step:9441/10000 train_time:646059ms step_avg:68.43ms +[2025-07-07 09:58:58] [Rank 0] step:9441/10000 train_time:646059ms step_avg:68.43ms +[2025-07-07 09:58:59] [Rank 0] step:9461/10000 train_time:647432ms step_avg:68.43ms +[2025-07-07 09:58:59] [Rank 0] step:9461/10000 train_time:647432ms step_avg:68.43ms +[2025-07-07 09:59:01] [Rank 0] step:9481/10000 train_time:648805ms step_avg:68.43ms +[2025-07-07 09:59:01] [Rank 0] step:9481/10000 train_time:648805ms step_avg:68.43ms +[2025-07-07 09:59:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:59:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:59:03] [Rank 0] PRINT: step:9500/10000 train_loss:0.8567 val_loss:0.9391 train_time:650802ms step_avg:68.51ms +[2025-07-07 09:59:03] [Rank 0] PRINT: step:9500/10000 train_loss:0.8567 val_loss:0.9391 train_time:650802ms step_avg:68.51ms +[2025-07-07 09:59:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:59:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:59:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:59:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:59:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:59:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 10:04:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 10:04:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 10:04:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 10:04:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 10:04:27] [Rank 0] Total Loss: 5.7092 +[2025-07-07 10:04:27] [Rank 0] Total Loss: 5.7092 +[2025-07-07 10:04:27] [Rank 0] Total FTA: 0.9526 +[2025-07-07 10:04:27] [Rank 0] Total FTA: 0.9526 +[2025-07-07 10:04:27] [Rank 0] Group 0 Loss: 6.1968 +[2025-07-07 10:04:27] [Rank 0] Group 0 Loss: 6.1968 +[2025-07-07 10:04:27] [Rank 0] Group 1 Loss: 5.5046 +[2025-07-07 10:04:27] [Rank 0] Group 1 Loss: 5.5046 +[2025-07-07 10:04:27] [Rank 0] Group 2 Loss: 5.3058 +[2025-07-07 10:04:27] [Rank 0] Group 2 Loss: 5.3058 +[2025-07-07 10:04:27] [Rank 0] Group 3 Loss: 5.7901 +[2025-07-07 10:04:27] [Rank 0] Group 3 Loss: 5.7901 +[2025-07-07 10:04:27] [Rank 0] Group 4 Loss: 5.6557 +[2025-07-07 10:04:27] [Rank 0] Group 4 Loss: 5.6557 +[2025-07-07 10:04:27] [Rank 0] Group 5 Loss: 5.7081 +[2025-07-07 10:04:27] [Rank 0] Group 5 Loss: 5.7081 +[2025-07-07 10:04:27] [Rank 0] Group 6 Loss: 5.4973 +[2025-07-07 10:04:27] [Rank 0] Group 6 Loss: 5.4973 +[2025-07-07 10:04:27] [Rank 0] Group 7 Loss: 5.6851 +[2025-07-07 10:04:27] [Rank 0] Group 7 Loss: 5.6851 +[2025-07-07 10:04:27] [Rank 0] Group 8 Loss: 5.7038 +[2025-07-07 10:04:27] [Rank 0] Group 8 Loss: 5.7038 +[2025-07-07 10:04:27] [Rank 0] Group 9 Loss: 5.7036 +[2025-07-07 10:04:27] [Rank 0] Group 9 Loss: 5.7036 +[2025-07-07 10:04:27] [Rank 0] Group 10 Loss: 5.6288 +[2025-07-07 10:04:27] [Rank 0] Group 10 Loss: 5.6288 +[2025-07-07 10:04:27] [Rank 0] Group 11 Loss: 5.6932 +[2025-07-07 10:04:27] [Rank 0] Group 11 Loss: 5.6932 +[2025-07-07 10:04:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 10:04:27] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 10:04:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 10:04:27] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 10:04:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 10:04:27] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 10:04:27] [Rank 0] Group 3 FTA: 0.9297 +[2025-07-07 10:04:27] [Rank 0] Group 3 FTA: 0.9297 +[2025-07-07 10:04:27] [Rank 0] Group 4 FTA: 0.9896 +[2025-07-07 10:04:27] [Rank 0] Group 4 FTA: 0.9896 +[2025-07-07 10:04:27] [Rank 0] Group 5 FTA: 0.9427 +[2025-07-07 10:04:27] [Rank 0] Group 5 FTA: 0.9427 +[2025-07-07 10:04:27] [Rank 0] Group 6 FTA: 0.9089 +[2025-07-07 10:04:27] [Rank 0] Group 6 FTA: 0.9089 +[2025-07-07 10:04:27] [Rank 0] Group 7 FTA: 0.9349 +[2025-07-07 10:04:27] [Rank 0] Group 7 FTA: 0.9349 +[2025-07-07 10:04:27] [Rank 0] Group 8 FTA: 0.8958 +[2025-07-07 10:04:27] [Rank 0] Group 8 FTA: 0.8958 +[2025-07-07 10:04:27] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-07 10:04:27] [Rank 0] Group 9 FTA: 0.9414 +[2025-07-07 10:04:27] [Rank 0] Group 10 FTA: 0.9395 +[2025-07-07 10:04:27] [Rank 0] Group 10 FTA: 0.9395 +[2025-07-07 10:04:27] [Rank 0] Group 11 FTA: 0.9336 +[2025-07-07 10:04:27] [Rank 0] Group 11 FTA: 0.9336 +[2025-07-07 10:04:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 10:04:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 10:04:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 10:04:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 10:04:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 10:04:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 10:04:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 10:04:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 10:04:28] [Rank 0] step:9501/10000 train_time:650812ms step_avg:68.50ms +[2025-07-07 10:04:28] [Rank 0] step:9501/10000 train_time:650812ms step_avg:68.50ms +[2025-07-07 10:04:30] [Rank 0] step:9521/10000 train_time:651575ms step_avg:68.44ms +[2025-07-07 10:04:30] [Rank 0] step:9521/10000 train_time:651575ms step_avg:68.44ms +[2025-07-07 10:04:31] [Rank 0] step:9541/10000 train_time:653194ms step_avg:68.46ms +[2025-07-07 10:04:31] [Rank 0] step:9541/10000 train_time:653194ms step_avg:68.46ms +[2025-07-07 10:04:33] [Rank 0] step:9561/10000 train_time:654342ms step_avg:68.44ms +[2025-07-07 10:04:33] [Rank 0] step:9561/10000 train_time:654342ms step_avg:68.44ms +[2025-07-07 10:04:34] [Rank 0] step:9581/10000 train_time:655709ms step_avg:68.44ms +[2025-07-07 10:04:34] [Rank 0] step:9581/10000 train_time:655709ms step_avg:68.44ms +[2025-07-07 10:04:35] [Rank 0] step:9601/10000 train_time:657076ms step_avg:68.44ms +[2025-07-07 10:04:35] [Rank 0] step:9601/10000 train_time:657076ms step_avg:68.44ms +[2025-07-07 10:04:37] [Rank 0] step:9621/10000 train_time:658444ms step_avg:68.44ms +[2025-07-07 10:04:37] [Rank 0] step:9621/10000 train_time:658444ms step_avg:68.44ms +[2025-07-07 10:04:38] [Rank 0] step:9641/10000 train_time:659811ms step_avg:68.44ms +[2025-07-07 10:04:38] [Rank 0] step:9641/10000 train_time:659811ms step_avg:68.44ms +[2025-07-07 10:04:39] [Rank 0] step:9661/10000 train_time:661180ms step_avg:68.44ms +[2025-07-07 10:04:39] [Rank 0] step:9661/10000 train_time:661180ms step_avg:68.44ms +[2025-07-07 10:04:41] [Rank 0] step:9681/10000 train_time:662549ms step_avg:68.44ms +[2025-07-07 10:04:41] [Rank 0] step:9681/10000 train_time:662549ms step_avg:68.44ms +[2025-07-07 10:04:42] [Rank 0] step:9701/10000 train_time:663918ms step_avg:68.44ms +[2025-07-07 10:04:42] [Rank 0] step:9701/10000 train_time:663918ms step_avg:68.44ms +[2025-07-07 10:04:44] [Rank 0] step:9721/10000 train_time:665334ms step_avg:68.44ms +[2025-07-07 10:04:44] [Rank 0] step:9721/10000 train_time:665334ms step_avg:68.44ms +[2025-07-07 10:04:45] [Rank 0] step:9741/10000 train_time:666696ms step_avg:68.44ms +[2025-07-07 10:04:45] [Rank 0] step:9741/10000 train_time:666696ms step_avg:68.44ms +[2025-07-07 10:04:46] [Rank 0] step:9761/10000 train_time:668065ms step_avg:68.44ms +[2025-07-07 10:04:46] [Rank 0] step:9761/10000 train_time:668065ms step_avg:68.44ms +[2025-07-07 10:04:48] [Rank 0] step:9781/10000 train_time:669436ms step_avg:68.44ms +[2025-07-07 10:04:48] [Rank 0] step:9781/10000 train_time:669436ms step_avg:68.44ms +[2025-07-07 10:04:49] [Rank 0] step:9801/10000 train_time:670806ms step_avg:68.44ms +[2025-07-07 10:04:49] [Rank 0] step:9801/10000 train_time:670806ms step_avg:68.44ms +[2025-07-07 10:04:50] [Rank 0] step:9821/10000 train_time:672176ms step_avg:68.44ms +[2025-07-07 10:04:50] [Rank 0] step:9821/10000 train_time:672176ms step_avg:68.44ms +[2025-07-07 10:04:52] [Rank 0] step:9841/10000 train_time:673548ms step_avg:68.44ms +[2025-07-07 10:04:52] [Rank 0] step:9841/10000 train_time:673548ms step_avg:68.44ms +[2025-07-07 10:04:53] [Rank 0] step:9861/10000 train_time:674920ms step_avg:68.44ms +[2025-07-07 10:04:53] [Rank 0] step:9861/10000 train_time:674920ms step_avg:68.44ms +[2025-07-07 10:04:55] [Rank 0] step:9881/10000 train_time:676291ms step_avg:68.44ms +[2025-07-07 10:04:55] [Rank 0] step:9881/10000 train_time:676291ms step_avg:68.44ms +[2025-07-07 10:04:56] [Rank 0] step:9901/10000 train_time:678342ms step_avg:68.51ms +[2025-07-07 10:04:56] [Rank 0] step:9901/10000 train_time:678342ms step_avg:68.51ms +[2025-07-07 10:04:57] [Rank 0] step:9921/10000 train_time:679081ms step_avg:68.45ms +[2025-07-07 10:04:57] [Rank 0] step:9921/10000 train_time:679081ms step_avg:68.45ms +[2025-07-07 10:04:59] [Rank 0] step:9941/10000 train_time:680454ms step_avg:68.45ms +[2025-07-07 10:04:59] [Rank 0] step:9941/10000 train_time:680454ms step_avg:68.45ms +[2025-07-07 10:05:00] [Rank 0] step:9961/10000 train_time:681827ms step_avg:68.45ms +[2025-07-07 10:05:00] [Rank 0] step:9961/10000 train_time:681827ms step_avg:68.45ms +[2025-07-07 10:05:01] [Rank 0] step:9981/10000 train_time:683202ms step_avg:68.45ms +[2025-07-07 10:05:01] [Rank 0] step:9981/10000 train_time:683202ms step_avg:68.45ms +[2025-07-07 10:05:03] [Rank 0] step:10000/10000 train_time:684508ms step_avg:68.45ms +[2025-07-07 10:05:03] [Rank 0] step:10000/10000 train_time:684508ms step_avg:68.45ms +[2025-07-07 10:05:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 10:05:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 10:05:04] [Rank 0] PRINT: step:10000/10000 train_loss:0.8518 val_loss:0.9305 train_time:685208ms step_avg:68.52ms +[2025-07-07 10:05:04] [Rank 0] PRINT: step:10000/10000 train_loss:0.8518 val_loss:0.9305 train_time:685208ms step_avg:68.52ms +[2025-07-07 10:05:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 10:05:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 10:05:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 10:05:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 10:05:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 10:05:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 10:10:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 10:10:24] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 10:10:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 10:10:24] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 10:10:24] [Rank 0] Total Loss: 5.6940 +[2025-07-07 10:10:24] [Rank 0] Total Loss: 5.6940 +[2025-07-07 10:10:24] [Rank 0] Total FTA: 0.9434 +[2025-07-07 10:10:24] [Rank 0] Total FTA: 0.9434 +[2025-07-07 10:10:24] [Rank 0] Group 0 Loss: 6.0699 +[2025-07-07 10:10:24] [Rank 0] Group 0 Loss: 6.0699 +[2025-07-07 10:10:24] [Rank 0] Group 1 Loss: 5.4922 +[2025-07-07 10:10:24] [Rank 0] Group 1 Loss: 5.4922 +[2025-07-07 10:10:24] [Rank 0] Group 2 Loss: 5.2974 +[2025-07-07 10:10:24] [Rank 0] Group 2 Loss: 5.2974 +[2025-07-07 10:10:24] [Rank 0] Group 3 Loss: 5.8481 +[2025-07-07 10:10:24] [Rank 0] Group 3 Loss: 5.8481 +[2025-07-07 10:10:24] [Rank 0] Group 4 Loss: 5.6702 +[2025-07-07 10:10:24] [Rank 0] Group 4 Loss: 5.6702 +[2025-07-07 10:10:24] [Rank 0] Group 5 Loss: 5.5913 +[2025-07-07 10:10:24] [Rank 0] Group 5 Loss: 5.5913 +[2025-07-07 10:10:24] [Rank 0] Group 6 Loss: 5.5078 +[2025-07-07 10:10:24] [Rank 0] Group 6 Loss: 5.5078 +[2025-07-07 10:10:24] [Rank 0] Group 7 Loss: 5.7748 +[2025-07-07 10:10:24] [Rank 0] Group 7 Loss: 5.7748 +[2025-07-07 10:10:24] [Rank 0] Group 8 Loss: 5.7010 +[2025-07-07 10:10:24] [Rank 0] Group 8 Loss: 5.7010 +[2025-07-07 10:10:24] [Rank 0] Group 9 Loss: 5.6454 +[2025-07-07 10:10:24] [Rank 0] Group 9 Loss: 5.6454 +[2025-07-07 10:10:24] [Rank 0] Group 10 Loss: 5.6582 +[2025-07-07 10:10:24] [Rank 0] Group 10 Loss: 5.6582 +[2025-07-07 10:10:24] [Rank 0] Group 11 Loss: 5.6928 +[2025-07-07 10:10:24] [Rank 0] Group 11 Loss: 5.6928 +[2025-07-07 10:10:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 10:10:24] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 10:10:24] [Rank 0] Group 1 FTA: 0.8203 +[2025-07-07 10:10:24] [Rank 0] Group 1 FTA: 0.8203 +[2025-07-07 10:10:24] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 10:10:24] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 10:10:24] [Rank 0] Group 3 FTA: 0.8464 +[2025-07-07 10:10:24] [Rank 0] Group 3 FTA: 0.8464 +[2025-07-07 10:10:24] [Rank 0] Group 4 FTA: 0.9792 +[2025-07-07 10:10:24] [Rank 0] Group 4 FTA: 0.9792 +[2025-07-07 10:10:24] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-07 10:10:24] [Rank 0] Group 5 FTA: 0.9401 +[2025-07-07 10:10:24] [Rank 0] Group 6 FTA: 0.9505 +[2025-07-07 10:10:24] [Rank 0] Group 6 FTA: 0.9505 +[2025-07-07 10:10:24] [Rank 0] Group 7 FTA: 0.9583 +[2025-07-07 10:10:24] [Rank 0] Group 7 FTA: 0.9583 +[2025-07-07 10:10:24] [Rank 0] Group 8 FTA: 0.9427 +[2025-07-07 10:10:24] [Rank 0] Group 8 FTA: 0.9427 +[2025-07-07 10:10:24] [Rank 0] Group 9 FTA: 0.9531 +[2025-07-07 10:10:24] [Rank 0] Group 9 FTA: 0.9531 +[2025-07-07 10:10:24] [Rank 0] Group 10 FTA: 0.9395 +[2025-07-07 10:10:24] [Rank 0] Group 10 FTA: 0.9395 +[2025-07-07 10:10:24] [Rank 0] Group 11 FTA: 0.9414 +[2025-07-07 10:10:24] [Rank 0] Group 11 FTA: 0.9414 +[2025-07-07 10:10:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 10:10:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_loss_curves.png +[2025-07-07 10:10:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 10:10:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/per_class_acc_curves.png +[2025-07-07 10:10:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 10:10:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_loss_curve.png +[2025-07-07 10:10:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 10:10:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_42/total_acc_curve.png +[2025-07-07 10:10:25] [Rank 0] step:10001/10000 train_time:685219ms step_avg:68.52ms +[2025-07-07 10:10:25] [Rank 0] step:10001/10000 train_time:685219ms step_avg:68.52ms +[2025-07-07 10:10:25] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 10:10:25 2025 --- +[2025-07-07 10:10:25] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 10:10:25 2025 --- +[2025-07-07 10:10:25] [Rank 0] PRINT: Peak memory allocated: 8720 MiB reserved: 10316 MiB +[2025-07-07 10:10:25] [Rank 0] PRINT: Peak memory allocated: 8720 MiB reserved: 10316 MiB diff --git a/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/config.json b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..3a59f2fdaf21b81a2a772a7da3f37c0f312b157f --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "480de1ac-7a28-4eaf-9c7c-5f9902193512", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..901bca9bcdcf424e6ddd4a140fb7da0b126c0cf4 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8d19085403456e9c58c146c587965659cc45bf9c934fd7c78fe3430c6f4b52c +size 480600 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..7ac555581855f649996bb5f5fea20ff7d671967c --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b7dc8ce780d5b2753d7e56c11ff9e9f95353618f4edb9f98b213338a863cad1 +size 459845 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..fa6ea441d116d069e0b207efd11f23cb8197f4f7 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f363ad28f1de7c638fd53a6780d2433f25d6bd106b07a626a3cfb257706dbf0b +size 111172 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..73a4f85df3a99c8395cd0a1ae4e862b526d6e365 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:973c9b28c15094136ea54ed180cfe58a1b4971440392010ddaa9e38f9a739a3d +size 132268 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/training_log_480de1ac-7a28-4eaf-9c7c-5f9902193512.txt b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/training_log_480de1ac-7a28-4eaf-9c7c-5f9902193512.txt new file mode 100644 index 0000000000000000000000000000000000000000..4f45d9e30d572af7e8890880378a2820478e2c17 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/training_log_480de1ac-7a28-4eaf-9c7c-5f9902193512.txt @@ -0,0 +1,5144 @@ +[2025-07-07 16:20:44] [Rank 0] PRINT: --- Script Start: Mon Jul 7 16:20:44 2025 --- +[2025-07-07 16:20:44] [Rank 0] PRINT: --- Script Start: Mon Jul 7 16:20:44 2025 --- +[2025-07-07 16:20:44] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-07 16:20:44] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-07 16:20:44] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 16:20:44] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 16:20:44] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-07 16:20:44] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-07 16:20:44] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45 +[2025-07-07 16:20:44] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45 +[2025-07-07 16:20:44] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 16:20:44] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 16:20:44] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 16:20:44] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 16:20:44] [Rank 0] PRINT: Constructing model... +[2025-07-07 16:20:44] [Rank 0] PRINT: Constructing model... +[2025-07-07 16:20:46] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 16:20:46] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 16:20:46] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 16:20:46] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 16:20:46] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 16:20:46] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 16:20:47] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 16:20:47] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 16:20:47] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 16:20:47] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 16:20:47] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 16:20:47] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 16:20:47] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 16:20:47] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 16:20:47] [Rank 0] PRINT: Model returns: +[2025-07-07 16:20:47] [Rank 0] PRINT: Model returns: +[2025-07-07 16:20:47] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 16:20:47] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 16:20:47] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 16:20:47] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 16:20:47] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-07 16:20:47] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-07 16:20:47] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 16:20:47] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 16:20:47] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 16:20:47] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 16:20:47] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 16:20:47] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 16:20:47] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 16:20:47] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 16:20:47] [Rank 0] PRINT: Starting warmup... +[2025-07-07 16:20:47] [Rank 0] PRINT: Starting warmup... +[2025-07-07 16:31:37] [Rank 0] PRINT: Warmup complete. +[2025-07-07 16:31:37] [Rank 0] PRINT: Warmup complete. +[2025-07-07 16:31:37] [Rank 0] PRINT: Starting training... +[2025-07-07 16:31:37] [Rank 0] PRINT: Starting training... +[2025-07-07 16:31:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:31:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:35:54] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 16:35:54] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 16:35:56] [Rank 0] step:21/10000 train_time:1022ms step_avg:48.67ms +[2025-07-07 16:35:56] [Rank 0] step:21/10000 train_time:1022ms step_avg:48.67ms +[2025-07-07 16:35:57] [Rank 0] step:41/10000 train_time:2345ms step_avg:57.20ms +[2025-07-07 16:35:57] [Rank 0] step:41/10000 train_time:2345ms step_avg:57.20ms +[2025-07-07 16:35:58] [Rank 0] step:61/10000 train_time:3670ms step_avg:60.16ms +[2025-07-07 16:35:58] [Rank 0] step:61/10000 train_time:3670ms step_avg:60.16ms +[2025-07-07 16:36:00] [Rank 0] step:81/10000 train_time:4996ms step_avg:61.68ms +[2025-07-07 16:36:00] [Rank 0] step:81/10000 train_time:4996ms step_avg:61.68ms +[2025-07-07 16:36:01] [Rank 0] step:101/10000 train_time:6321ms step_avg:62.59ms +[2025-07-07 16:36:01] [Rank 0] step:101/10000 train_time:6321ms step_avg:62.59ms +[2025-07-07 16:36:02] [Rank 0] step:121/10000 train_time:7647ms step_avg:63.20ms +[2025-07-07 16:36:02] [Rank 0] step:121/10000 train_time:7647ms step_avg:63.20ms +[2025-07-07 16:36:04] [Rank 0] step:141/10000 train_time:8974ms step_avg:63.64ms +[2025-07-07 16:36:04] [Rank 0] step:141/10000 train_time:8974ms step_avg:63.64ms +[2025-07-07 16:36:05] [Rank 0] step:161/10000 train_time:10301ms step_avg:63.98ms +[2025-07-07 16:36:05] [Rank 0] step:161/10000 train_time:10301ms step_avg:63.98ms +[2025-07-07 16:36:06] [Rank 0] step:181/10000 train_time:11683ms step_avg:64.55ms +[2025-07-07 16:36:06] [Rank 0] step:181/10000 train_time:11683ms step_avg:64.55ms +[2025-07-07 16:36:08] [Rank 0] step:201/10000 train_time:13039ms step_avg:64.87ms +[2025-07-07 16:36:08] [Rank 0] step:201/10000 train_time:13039ms step_avg:64.87ms +[2025-07-07 16:36:09] [Rank 0] step:221/10000 train_time:14372ms step_avg:65.03ms +[2025-07-07 16:36:09] [Rank 0] step:221/10000 train_time:14372ms step_avg:65.03ms +[2025-07-07 16:36:10] [Rank 0] step:241/10000 train_time:15705ms step_avg:65.17ms +[2025-07-07 16:36:10] [Rank 0] step:241/10000 train_time:15705ms step_avg:65.17ms +[2025-07-07 16:36:12] [Rank 0] step:261/10000 train_time:17039ms step_avg:65.28ms +[2025-07-07 16:36:12] [Rank 0] step:261/10000 train_time:17039ms step_avg:65.28ms +[2025-07-07 16:36:13] [Rank 0] step:281/10000 train_time:18375ms step_avg:65.39ms +[2025-07-07 16:36:13] [Rank 0] step:281/10000 train_time:18375ms step_avg:65.39ms +[2025-07-07 16:36:14] [Rank 0] step:301/10000 train_time:19711ms step_avg:65.49ms +[2025-07-07 16:36:14] [Rank 0] step:301/10000 train_time:19711ms step_avg:65.49ms +[2025-07-07 16:36:16] [Rank 0] step:321/10000 train_time:21047ms step_avg:65.57ms +[2025-07-07 16:36:16] [Rank 0] step:321/10000 train_time:21047ms step_avg:65.57ms +[2025-07-07 16:36:17] [Rank 0] step:341/10000 train_time:22383ms step_avg:65.64ms +[2025-07-07 16:36:17] [Rank 0] step:341/10000 train_time:22383ms step_avg:65.64ms +[2025-07-07 16:36:18] [Rank 0] step:361/10000 train_time:23773ms step_avg:65.85ms +[2025-07-07 16:36:18] [Rank 0] step:361/10000 train_time:23773ms step_avg:65.85ms +[2025-07-07 16:36:20] [Rank 0] step:381/10000 train_time:25058ms step_avg:65.77ms +[2025-07-07 16:36:20] [Rank 0] step:381/10000 train_time:25058ms step_avg:65.77ms +[2025-07-07 16:36:21] [Rank 0] step:401/10000 train_time:26395ms step_avg:65.82ms +[2025-07-07 16:36:21] [Rank 0] step:401/10000 train_time:26395ms step_avg:65.82ms +[2025-07-07 16:36:22] [Rank 0] step:421/10000 train_time:27733ms step_avg:65.87ms +[2025-07-07 16:36:22] [Rank 0] step:421/10000 train_time:27733ms step_avg:65.87ms +[2025-07-07 16:36:24] [Rank 0] step:441/10000 train_time:29069ms step_avg:65.92ms +[2025-07-07 16:36:24] [Rank 0] step:441/10000 train_time:29069ms step_avg:65.92ms +[2025-07-07 16:36:25] [Rank 0] step:461/10000 train_time:30407ms step_avg:65.96ms +[2025-07-07 16:36:25] [Rank 0] step:461/10000 train_time:30407ms step_avg:65.96ms +[2025-07-07 16:36:26] [Rank 0] step:481/10000 train_time:31745ms step_avg:66.00ms +[2025-07-07 16:36:26] [Rank 0] step:481/10000 train_time:31745ms step_avg:66.00ms +[2025-07-07 16:36:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:36:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:36:28] [Rank 0] PRINT: step:500/10000 train_loss:5.2850 val_loss:2.4911 train_time:33690ms step_avg:67.38ms +[2025-07-07 16:36:28] [Rank 0] PRINT: step:500/10000 train_loss:5.2850 val_loss:2.4911 train_time:33690ms step_avg:67.38ms +[2025-07-07 16:36:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 16:36:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 16:36:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 16:36:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 16:36:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 16:36:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 16:41:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 16:41:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 16:41:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 16:41:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 16:41:57] [Rank 0] Total Loss: 4.3136 +[2025-07-07 16:41:57] [Rank 0] Total Loss: 4.3136 +[2025-07-07 16:41:57] [Rank 0] Total FTA: 0.0893 +[2025-07-07 16:41:57] [Rank 0] Total FTA: 0.0893 +[2025-07-07 16:41:57] [Rank 0] Group 0 Loss: 4.4398 +[2025-07-07 16:41:57] [Rank 0] Group 0 Loss: 4.4398 +[2025-07-07 16:41:57] [Rank 0] Group 1 Loss: 4.2883 +[2025-07-07 16:41:57] [Rank 0] Group 1 Loss: 4.2883 +[2025-07-07 16:41:57] [Rank 0] Group 2 Loss: 4.2508 +[2025-07-07 16:41:57] [Rank 0] Group 2 Loss: 4.2508 +[2025-07-07 16:41:57] [Rank 0] Group 3 Loss: 4.2114 +[2025-07-07 16:41:57] [Rank 0] Group 3 Loss: 4.2114 +[2025-07-07 16:41:57] [Rank 0] Group 4 Loss: 4.3799 +[2025-07-07 16:41:57] [Rank 0] Group 4 Loss: 4.3799 +[2025-07-07 16:41:57] [Rank 0] Group 5 Loss: 4.2799 +[2025-07-07 16:41:57] [Rank 0] Group 5 Loss: 4.2799 +[2025-07-07 16:41:57] [Rank 0] Group 6 Loss: 4.2820 +[2025-07-07 16:41:57] [Rank 0] Group 6 Loss: 4.2820 +[2025-07-07 16:41:57] [Rank 0] Group 7 Loss: 4.3100 +[2025-07-07 16:41:57] [Rank 0] Group 7 Loss: 4.3100 +[2025-07-07 16:41:57] [Rank 0] Group 8 Loss: 4.2812 +[2025-07-07 16:41:57] [Rank 0] Group 8 Loss: 4.2812 +[2025-07-07 16:41:57] [Rank 0] Group 9 Loss: 4.3079 +[2025-07-07 16:41:57] [Rank 0] Group 9 Loss: 4.3079 +[2025-07-07 16:41:57] [Rank 0] Group 10 Loss: 4.3135 +[2025-07-07 16:41:57] [Rank 0] Group 10 Loss: 4.3135 +[2025-07-07 16:41:57] [Rank 0] Group 11 Loss: 4.3047 +[2025-07-07 16:41:57] [Rank 0] Group 11 Loss: 4.3047 +[2025-07-07 16:41:57] [Rank 0] Group 0 FTA: 0.1795 +[2025-07-07 16:41:57] [Rank 0] Group 0 FTA: 0.1795 +[2025-07-07 16:41:57] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 16:41:57] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 16:41:57] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-07 16:41:57] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-07 16:41:57] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 16:41:57] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 16:41:57] [Rank 0] Group 4 FTA: 0.0130 +[2025-07-07 16:41:57] [Rank 0] Group 4 FTA: 0.0130 +[2025-07-07 16:41:57] [Rank 0] Group 5 FTA: 0.0807 +[2025-07-07 16:41:57] [Rank 0] Group 5 FTA: 0.0807 +[2025-07-07 16:41:57] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-07 16:41:57] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-07 16:41:57] [Rank 0] Group 7 FTA: 0.1250 +[2025-07-07 16:41:57] [Rank 0] Group 7 FTA: 0.1250 +[2025-07-07 16:41:57] [Rank 0] Group 8 FTA: 0.0807 +[2025-07-07 16:41:57] [Rank 0] Group 8 FTA: 0.0807 +[2025-07-07 16:41:57] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 16:41:57] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 16:41:57] [Rank 0] Group 10 FTA: 0.0977 +[2025-07-07 16:41:57] [Rank 0] Group 10 FTA: 0.0977 +[2025-07-07 16:41:57] [Rank 0] Group 11 FTA: 0.0908 +[2025-07-07 16:41:57] [Rank 0] Group 11 FTA: 0.0908 +[2025-07-07 16:41:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 16:41:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 16:41:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 16:41:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 16:41:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 16:41:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 16:41:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 16:41:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 16:41:59] [Rank 0] step:501/10000 train_time:33701ms step_avg:67.27ms +[2025-07-07 16:41:59] [Rank 0] step:501/10000 train_time:33701ms step_avg:67.27ms +[2025-07-07 16:42:00] [Rank 0] step:521/10000 train_time:34444ms step_avg:66.11ms +[2025-07-07 16:42:00] [Rank 0] step:521/10000 train_time:34444ms step_avg:66.11ms +[2025-07-07 16:42:02] [Rank 0] step:541/10000 train_time:35825ms step_avg:66.22ms +[2025-07-07 16:42:02] [Rank 0] step:541/10000 train_time:35825ms step_avg:66.22ms +[2025-07-07 16:42:03] [Rank 0] step:561/10000 train_time:37179ms step_avg:66.27ms +[2025-07-07 16:42:03] [Rank 0] step:561/10000 train_time:37179ms step_avg:66.27ms +[2025-07-07 16:42:04] [Rank 0] step:581/10000 train_time:38511ms step_avg:66.28ms +[2025-07-07 16:42:04] [Rank 0] step:581/10000 train_time:38511ms step_avg:66.28ms +[2025-07-07 16:42:06] [Rank 0] step:601/10000 train_time:39841ms step_avg:66.29ms +[2025-07-07 16:42:06] [Rank 0] step:601/10000 train_time:39841ms step_avg:66.29ms +[2025-07-07 16:42:07] [Rank 0] step:621/10000 train_time:41173ms step_avg:66.30ms +[2025-07-07 16:42:07] [Rank 0] step:621/10000 train_time:41173ms step_avg:66.30ms +[2025-07-07 16:42:08] [Rank 0] step:641/10000 train_time:42505ms step_avg:66.31ms +[2025-07-07 16:42:08] [Rank 0] step:641/10000 train_time:42505ms step_avg:66.31ms +[2025-07-07 16:42:10] [Rank 0] step:661/10000 train_time:43838ms step_avg:66.32ms +[2025-07-07 16:42:10] [Rank 0] step:661/10000 train_time:43838ms step_avg:66.32ms +[2025-07-07 16:42:11] [Rank 0] step:681/10000 train_time:45170ms step_avg:66.33ms +[2025-07-07 16:42:11] [Rank 0] step:681/10000 train_time:45170ms step_avg:66.33ms +[2025-07-07 16:42:12] [Rank 0] step:701/10000 train_time:46505ms step_avg:66.34ms +[2025-07-07 16:42:12] [Rank 0] step:701/10000 train_time:46505ms step_avg:66.34ms +[2025-07-07 16:42:14] [Rank 0] step:721/10000 train_time:47841ms step_avg:66.35ms +[2025-07-07 16:42:14] [Rank 0] step:721/10000 train_time:47841ms step_avg:66.35ms +[2025-07-07 16:42:15] [Rank 0] step:741/10000 train_time:49245ms step_avg:66.46ms +[2025-07-07 16:42:15] [Rank 0] step:741/10000 train_time:49245ms step_avg:66.46ms +[2025-07-07 16:42:16] [Rank 0] step:761/10000 train_time:50583ms step_avg:66.47ms +[2025-07-07 16:42:16] [Rank 0] step:761/10000 train_time:50583ms step_avg:66.47ms +[2025-07-07 16:42:18] [Rank 0] step:781/10000 train_time:51927ms step_avg:66.49ms +[2025-07-07 16:42:18] [Rank 0] step:781/10000 train_time:51927ms step_avg:66.49ms +[2025-07-07 16:42:19] [Rank 0] step:801/10000 train_time:53271ms step_avg:66.51ms +[2025-07-07 16:42:19] [Rank 0] step:801/10000 train_time:53271ms step_avg:66.51ms +[2025-07-07 16:42:20] [Rank 0] step:821/10000 train_time:54616ms step_avg:66.52ms +[2025-07-07 16:42:20] [Rank 0] step:821/10000 train_time:54616ms step_avg:66.52ms +[2025-07-07 16:42:22] [Rank 0] step:841/10000 train_time:55962ms step_avg:66.54ms +[2025-07-07 16:42:22] [Rank 0] step:841/10000 train_time:55962ms step_avg:66.54ms +[2025-07-07 16:42:23] [Rank 0] step:861/10000 train_time:57308ms step_avg:66.56ms +[2025-07-07 16:42:23] [Rank 0] step:861/10000 train_time:57308ms step_avg:66.56ms +[2025-07-07 16:42:24] [Rank 0] step:881/10000 train_time:58653ms step_avg:66.57ms +[2025-07-07 16:42:24] [Rank 0] step:881/10000 train_time:58653ms step_avg:66.57ms +[2025-07-07 16:42:26] [Rank 0] step:901/10000 train_time:60686ms step_avg:67.35ms +[2025-07-07 16:42:26] [Rank 0] step:901/10000 train_time:60686ms step_avg:67.35ms +[2025-07-07 16:42:27] [Rank 0] step:921/10000 train_time:61411ms step_avg:66.68ms +[2025-07-07 16:42:27] [Rank 0] step:921/10000 train_time:61411ms step_avg:66.68ms +[2025-07-07 16:42:29] [Rank 0] step:941/10000 train_time:62760ms step_avg:66.70ms +[2025-07-07 16:42:29] [Rank 0] step:941/10000 train_time:62760ms step_avg:66.70ms +[2025-07-07 16:42:30] [Rank 0] step:961/10000 train_time:64109ms step_avg:66.71ms +[2025-07-07 16:42:30] [Rank 0] step:961/10000 train_time:64109ms step_avg:66.71ms +[2025-07-07 16:42:31] [Rank 0] step:981/10000 train_time:65459ms step_avg:66.73ms +[2025-07-07 16:42:31] [Rank 0] step:981/10000 train_time:65459ms step_avg:66.73ms +[2025-07-07 16:42:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:42:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:42:33] [Rank 0] PRINT: step:1000/10000 train_loss:1.9706 val_loss:1.7510 train_time:67423ms step_avg:67.42ms +[2025-07-07 16:42:33] [Rank 0] PRINT: step:1000/10000 train_loss:1.9706 val_loss:1.7510 train_time:67423ms step_avg:67.42ms +[2025-07-07 16:42:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 16:42:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 16:42:34] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 16:42:34] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 16:42:34] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 16:42:34] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 16:48:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 16:48:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 16:48:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 16:48:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 16:48:05] [Rank 0] Total Loss: 4.5653 +[2025-07-07 16:48:05] [Rank 0] Total Loss: 4.5653 +[2025-07-07 16:48:05] [Rank 0] Total FTA: 0.0975 +[2025-07-07 16:48:05] [Rank 0] Total FTA: 0.0975 +[2025-07-07 16:48:05] [Rank 0] Group 0 Loss: 4.7410 +[2025-07-07 16:48:05] [Rank 0] Group 0 Loss: 4.7410 +[2025-07-07 16:48:05] [Rank 0] Group 1 Loss: 4.4983 +[2025-07-07 16:48:05] [Rank 0] Group 1 Loss: 4.4983 +[2025-07-07 16:48:05] [Rank 0] Group 2 Loss: 4.3411 +[2025-07-07 16:48:05] [Rank 0] Group 2 Loss: 4.3411 +[2025-07-07 16:48:05] [Rank 0] Group 3 Loss: 4.5263 +[2025-07-07 16:48:05] [Rank 0] Group 3 Loss: 4.5263 +[2025-07-07 16:48:05] [Rank 0] Group 4 Loss: 4.5883 +[2025-07-07 16:48:05] [Rank 0] Group 4 Loss: 4.5883 +[2025-07-07 16:48:05] [Rank 0] Group 5 Loss: 4.4887 +[2025-07-07 16:48:05] [Rank 0] Group 5 Loss: 4.4887 +[2025-07-07 16:48:05] [Rank 0] Group 6 Loss: 4.5100 +[2025-07-07 16:48:05] [Rank 0] Group 6 Loss: 4.5100 +[2025-07-07 16:48:05] [Rank 0] Group 7 Loss: 4.6194 +[2025-07-07 16:48:05] [Rank 0] Group 7 Loss: 4.6194 +[2025-07-07 16:48:05] [Rank 0] Group 8 Loss: 4.5446 +[2025-07-07 16:48:05] [Rank 0] Group 8 Loss: 4.5446 +[2025-07-07 16:48:05] [Rank 0] Group 9 Loss: 4.5106 +[2025-07-07 16:48:05] [Rank 0] Group 9 Loss: 4.5106 +[2025-07-07 16:48:05] [Rank 0] Group 10 Loss: 4.5664 +[2025-07-07 16:48:05] [Rank 0] Group 10 Loss: 4.5664 +[2025-07-07 16:48:05] [Rank 0] Group 11 Loss: 4.5985 +[2025-07-07 16:48:05] [Rank 0] Group 11 Loss: 4.5985 +[2025-07-07 16:48:05] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-07 16:48:05] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-07 16:48:05] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 16:48:05] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 16:48:05] [Rank 0] Group 2 FTA: 0.1901 +[2025-07-07 16:48:05] [Rank 0] Group 2 FTA: 0.1901 +[2025-07-07 16:48:05] [Rank 0] Group 3 FTA: 0.0964 +[2025-07-07 16:48:05] [Rank 0] Group 3 FTA: 0.0964 +[2025-07-07 16:48:05] [Rank 0] Group 4 FTA: 0.0312 +[2025-07-07 16:48:05] [Rank 0] Group 4 FTA: 0.0312 +[2025-07-07 16:48:05] [Rank 0] Group 5 FTA: 0.0443 +[2025-07-07 16:48:05] [Rank 0] Group 5 FTA: 0.0443 +[2025-07-07 16:48:05] [Rank 0] Group 6 FTA: 0.0859 +[2025-07-07 16:48:05] [Rank 0] Group 6 FTA: 0.0859 +[2025-07-07 16:48:05] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-07 16:48:05] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-07 16:48:05] [Rank 0] Group 8 FTA: 0.0833 +[2025-07-07 16:48:05] [Rank 0] Group 8 FTA: 0.0833 +[2025-07-07 16:48:05] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-07 16:48:05] [Rank 0] Group 9 FTA: 0.1133 +[2025-07-07 16:48:05] [Rank 0] Group 10 FTA: 0.0840 +[2025-07-07 16:48:05] [Rank 0] Group 10 FTA: 0.0840 +[2025-07-07 16:48:05] [Rank 0] Group 11 FTA: 0.1123 +[2025-07-07 16:48:05] [Rank 0] Group 11 FTA: 0.1123 +[2025-07-07 16:48:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 16:48:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 16:48:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 16:48:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 16:48:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 16:48:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 16:48:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 16:48:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 16:48:06] [Rank 0] step:1001/10000 train_time:67434ms step_avg:67.37ms +[2025-07-07 16:48:06] [Rank 0] step:1001/10000 train_time:67434ms step_avg:67.37ms +[2025-07-07 16:48:08] [Rank 0] step:1021/10000 train_time:68178ms step_avg:66.78ms +[2025-07-07 16:48:08] [Rank 0] step:1021/10000 train_time:68178ms step_avg:66.78ms +[2025-07-07 16:48:09] [Rank 0] step:1041/10000 train_time:69521ms step_avg:66.78ms +[2025-07-07 16:48:09] [Rank 0] step:1041/10000 train_time:69521ms step_avg:66.78ms +[2025-07-07 16:48:10] [Rank 0] step:1061/10000 train_time:70875ms step_avg:66.80ms +[2025-07-07 16:48:10] [Rank 0] step:1061/10000 train_time:70875ms step_avg:66.80ms +[2025-07-07 16:48:12] [Rank 0] step:1081/10000 train_time:72217ms step_avg:66.81ms +[2025-07-07 16:48:12] [Rank 0] step:1081/10000 train_time:72217ms step_avg:66.81ms +[2025-07-07 16:48:13] [Rank 0] step:1101/10000 train_time:73619ms step_avg:66.87ms +[2025-07-07 16:48:13] [Rank 0] step:1101/10000 train_time:73619ms step_avg:66.87ms +[2025-07-07 16:48:14] [Rank 0] step:1121/10000 train_time:74961ms step_avg:66.87ms +[2025-07-07 16:48:14] [Rank 0] step:1121/10000 train_time:74961ms step_avg:66.87ms +[2025-07-07 16:48:16] [Rank 0] step:1141/10000 train_time:76306ms step_avg:66.88ms +[2025-07-07 16:48:16] [Rank 0] step:1141/10000 train_time:76306ms step_avg:66.88ms +[2025-07-07 16:48:17] [Rank 0] step:1161/10000 train_time:77649ms step_avg:66.88ms +[2025-07-07 16:48:17] [Rank 0] step:1161/10000 train_time:77649ms step_avg:66.88ms +[2025-07-07 16:48:18] [Rank 0] step:1181/10000 train_time:78992ms step_avg:66.89ms +[2025-07-07 16:48:18] [Rank 0] step:1181/10000 train_time:78992ms step_avg:66.89ms +[2025-07-07 16:48:20] [Rank 0] step:1201/10000 train_time:80337ms step_avg:66.89ms +[2025-07-07 16:48:20] [Rank 0] step:1201/10000 train_time:80337ms step_avg:66.89ms +[2025-07-07 16:48:21] [Rank 0] step:1221/10000 train_time:81682ms step_avg:66.90ms +[2025-07-07 16:48:21] [Rank 0] step:1221/10000 train_time:81682ms step_avg:66.90ms +[2025-07-07 16:48:22] [Rank 0] step:1241/10000 train_time:83026ms step_avg:66.90ms +[2025-07-07 16:48:22] [Rank 0] step:1241/10000 train_time:83026ms step_avg:66.90ms +[2025-07-07 16:48:24] [Rank 0] step:1261/10000 train_time:85057ms step_avg:67.45ms +[2025-07-07 16:48:24] [Rank 0] step:1261/10000 train_time:85057ms step_avg:67.45ms +[2025-07-07 16:48:25] [Rank 0] step:1281/10000 train_time:85781ms step_avg:66.96ms +[2025-07-07 16:48:25] [Rank 0] step:1281/10000 train_time:85781ms step_avg:66.96ms +[2025-07-07 16:48:27] [Rank 0] step:1301/10000 train_time:87125ms step_avg:66.97ms +[2025-07-07 16:48:27] [Rank 0] step:1301/10000 train_time:87125ms step_avg:66.97ms +[2025-07-07 16:48:28] [Rank 0] step:1321/10000 train_time:88469ms step_avg:66.97ms +[2025-07-07 16:48:28] [Rank 0] step:1321/10000 train_time:88469ms step_avg:66.97ms +[2025-07-07 16:48:29] [Rank 0] step:1341/10000 train_time:89814ms step_avg:66.98ms +[2025-07-07 16:48:29] [Rank 0] step:1341/10000 train_time:89814ms step_avg:66.98ms +[2025-07-07 16:48:31] [Rank 0] step:1361/10000 train_time:91160ms step_avg:66.98ms +[2025-07-07 16:48:31] [Rank 0] step:1361/10000 train_time:91160ms step_avg:66.98ms +[2025-07-07 16:48:32] [Rank 0] step:1381/10000 train_time:92506ms step_avg:66.98ms +[2025-07-07 16:48:32] [Rank 0] step:1381/10000 train_time:92506ms step_avg:66.98ms +[2025-07-07 16:48:33] [Rank 0] step:1401/10000 train_time:93855ms step_avg:66.99ms +[2025-07-07 16:48:33] [Rank 0] step:1401/10000 train_time:93855ms step_avg:66.99ms +[2025-07-07 16:48:35] [Rank 0] step:1421/10000 train_time:95201ms step_avg:67.00ms +[2025-07-07 16:48:35] [Rank 0] step:1421/10000 train_time:95201ms step_avg:67.00ms +[2025-07-07 16:48:36] [Rank 0] step:1441/10000 train_time:96600ms step_avg:67.04ms +[2025-07-07 16:48:36] [Rank 0] step:1441/10000 train_time:96600ms step_avg:67.04ms +[2025-07-07 16:48:37] [Rank 0] step:1461/10000 train_time:97942ms step_avg:67.04ms +[2025-07-07 16:48:37] [Rank 0] step:1461/10000 train_time:97942ms step_avg:67.04ms +[2025-07-07 16:48:39] [Rank 0] step:1481/10000 train_time:99289ms step_avg:67.04ms +[2025-07-07 16:48:39] [Rank 0] step:1481/10000 train_time:99289ms step_avg:67.04ms +[2025-07-07 16:48:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:48:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:48:41] [Rank 0] PRINT: step:1500/10000 train_loss:1.6942 val_loss:1.6414 train_time:101248ms step_avg:67.50ms +[2025-07-07 16:48:41] [Rank 0] PRINT: step:1500/10000 train_loss:1.6942 val_loss:1.6414 train_time:101248ms step_avg:67.50ms +[2025-07-07 16:48:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 16:48:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 16:48:41] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 16:48:41] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 16:48:41] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 16:48:41] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 16:54:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 16:54:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 16:54:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 16:54:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 16:54:13] [Rank 0] Total Loss: 4.8048 +[2025-07-07 16:54:13] [Rank 0] Total Loss: 4.8048 +[2025-07-07 16:54:13] [Rank 0] Total FTA: 0.1060 +[2025-07-07 16:54:13] [Rank 0] Total FTA: 0.1060 +[2025-07-07 16:54:13] [Rank 0] Group 0 Loss: 5.0595 +[2025-07-07 16:54:13] [Rank 0] Group 0 Loss: 5.0595 +[2025-07-07 16:54:13] [Rank 0] Group 1 Loss: 4.7683 +[2025-07-07 16:54:13] [Rank 0] Group 1 Loss: 4.7683 +[2025-07-07 16:54:13] [Rank 0] Group 2 Loss: 4.4685 +[2025-07-07 16:54:13] [Rank 0] Group 2 Loss: 4.4685 +[2025-07-07 16:54:13] [Rank 0] Group 3 Loss: 4.8173 +[2025-07-07 16:54:13] [Rank 0] Group 3 Loss: 4.8173 +[2025-07-07 16:54:13] [Rank 0] Group 4 Loss: 4.7849 +[2025-07-07 16:54:13] [Rank 0] Group 4 Loss: 4.7849 +[2025-07-07 16:54:13] [Rank 0] Group 5 Loss: 4.7299 +[2025-07-07 16:54:13] [Rank 0] Group 5 Loss: 4.7299 +[2025-07-07 16:54:13] [Rank 0] Group 6 Loss: 4.7395 +[2025-07-07 16:54:13] [Rank 0] Group 6 Loss: 4.7395 +[2025-07-07 16:54:13] [Rank 0] Group 7 Loss: 4.8500 +[2025-07-07 16:54:13] [Rank 0] Group 7 Loss: 4.8500 +[2025-07-07 16:54:13] [Rank 0] Group 8 Loss: 4.7682 +[2025-07-07 16:54:13] [Rank 0] Group 8 Loss: 4.7682 +[2025-07-07 16:54:13] [Rank 0] Group 9 Loss: 4.7262 +[2025-07-07 16:54:13] [Rank 0] Group 9 Loss: 4.7262 +[2025-07-07 16:54:13] [Rank 0] Group 10 Loss: 4.8066 +[2025-07-07 16:54:13] [Rank 0] Group 10 Loss: 4.8066 +[2025-07-07 16:54:13] [Rank 0] Group 11 Loss: 4.8241 +[2025-07-07 16:54:13] [Rank 0] Group 11 Loss: 4.8241 +[2025-07-07 16:54:13] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 16:54:13] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 16:54:13] [Rank 0] Group 1 FTA: 0.1536 +[2025-07-07 16:54:13] [Rank 0] Group 1 FTA: 0.1536 +[2025-07-07 16:54:13] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-07 16:54:13] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-07 16:54:13] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 16:54:13] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 16:54:13] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 16:54:13] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 16:54:13] [Rank 0] Group 5 FTA: 0.0755 +[2025-07-07 16:54:13] [Rank 0] Group 5 FTA: 0.0755 +[2025-07-07 16:54:13] [Rank 0] Group 6 FTA: 0.0729 +[2025-07-07 16:54:13] [Rank 0] Group 6 FTA: 0.0729 +[2025-07-07 16:54:13] [Rank 0] Group 7 FTA: 0.1146 +[2025-07-07 16:54:13] [Rank 0] Group 7 FTA: 0.1146 +[2025-07-07 16:54:13] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 16:54:13] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 16:54:13] [Rank 0] Group 9 FTA: 0.1172 +[2025-07-07 16:54:13] [Rank 0] Group 9 FTA: 0.1172 +[2025-07-07 16:54:13] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-07 16:54:13] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-07 16:54:13] [Rank 0] Group 11 FTA: 0.0898 +[2025-07-07 16:54:13] [Rank 0] Group 11 FTA: 0.0898 +[2025-07-07 16:54:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 16:54:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 16:54:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 16:54:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 16:54:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 16:54:14] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 16:54:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 16:54:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 16:54:15] [Rank 0] step:1501/10000 train_time:101259ms step_avg:67.46ms +[2025-07-07 16:54:15] [Rank 0] step:1501/10000 train_time:101259ms step_avg:67.46ms +[2025-07-07 16:54:16] [Rank 0] step:1521/10000 train_time:101993ms step_avg:67.06ms +[2025-07-07 16:54:16] [Rank 0] step:1521/10000 train_time:101993ms step_avg:67.06ms +[2025-07-07 16:54:17] [Rank 0] step:1541/10000 train_time:103332ms step_avg:67.06ms +[2025-07-07 16:54:17] [Rank 0] step:1541/10000 train_time:103332ms step_avg:67.06ms +[2025-07-07 16:54:19] [Rank 0] step:1561/10000 train_time:104672ms step_avg:67.05ms +[2025-07-07 16:54:19] [Rank 0] step:1561/10000 train_time:104672ms step_avg:67.05ms +[2025-07-07 16:54:20] [Rank 0] step:1581/10000 train_time:106012ms step_avg:67.05ms +[2025-07-07 16:54:20] [Rank 0] step:1581/10000 train_time:106012ms step_avg:67.05ms +[2025-07-07 16:54:21] [Rank 0] step:1601/10000 train_time:107352ms step_avg:67.05ms +[2025-07-07 16:54:21] [Rank 0] step:1601/10000 train_time:107352ms step_avg:67.05ms +[2025-07-07 16:54:23] [Rank 0] step:1621/10000 train_time:108691ms step_avg:67.05ms +[2025-07-07 16:54:23] [Rank 0] step:1621/10000 train_time:108691ms step_avg:67.05ms +[2025-07-07 16:54:24] [Rank 0] step:1641/10000 train_time:110078ms step_avg:67.08ms +[2025-07-07 16:54:24] [Rank 0] step:1641/10000 train_time:110078ms step_avg:67.08ms +[2025-07-07 16:54:25] [Rank 0] step:1661/10000 train_time:111418ms step_avg:67.08ms +[2025-07-07 16:54:25] [Rank 0] step:1661/10000 train_time:111418ms step_avg:67.08ms +[2025-07-07 16:54:27] [Rank 0] step:1681/10000 train_time:112760ms step_avg:67.08ms +[2025-07-07 16:54:27] [Rank 0] step:1681/10000 train_time:112760ms step_avg:67.08ms +[2025-07-07 16:54:28] [Rank 0] step:1701/10000 train_time:114103ms step_avg:67.08ms +[2025-07-07 16:54:28] [Rank 0] step:1701/10000 train_time:114103ms step_avg:67.08ms +[2025-07-07 16:54:30] [Rank 0] step:1721/10000 train_time:115447ms step_avg:67.08ms +[2025-07-07 16:54:30] [Rank 0] step:1721/10000 train_time:115447ms step_avg:67.08ms +[2025-07-07 16:54:31] [Rank 0] step:1741/10000 train_time:116792ms step_avg:67.08ms +[2025-07-07 16:54:31] [Rank 0] step:1741/10000 train_time:116792ms step_avg:67.08ms +[2025-07-07 16:54:32] [Rank 0] step:1761/10000 train_time:118135ms step_avg:67.08ms +[2025-07-07 16:54:32] [Rank 0] step:1761/10000 train_time:118135ms step_avg:67.08ms +[2025-07-07 16:54:34] [Rank 0] step:1781/10000 train_time:119479ms step_avg:67.09ms +[2025-07-07 16:54:34] [Rank 0] step:1781/10000 train_time:119479ms step_avg:67.09ms +[2025-07-07 16:54:35] [Rank 0] step:1801/10000 train_time:120871ms step_avg:67.11ms +[2025-07-07 16:54:35] [Rank 0] step:1801/10000 train_time:120871ms step_avg:67.11ms +[2025-07-07 16:54:36] [Rank 0] step:1821/10000 train_time:122207ms step_avg:67.11ms +[2025-07-07 16:54:36] [Rank 0] step:1821/10000 train_time:122207ms step_avg:67.11ms +[2025-07-07 16:54:38] [Rank 0] step:1841/10000 train_time:123551ms step_avg:67.11ms +[2025-07-07 16:54:38] [Rank 0] step:1841/10000 train_time:123551ms step_avg:67.11ms +[2025-07-07 16:54:39] [Rank 0] step:1861/10000 train_time:124896ms step_avg:67.11ms +[2025-07-07 16:54:39] [Rank 0] step:1861/10000 train_time:124896ms step_avg:67.11ms +[2025-07-07 16:54:40] [Rank 0] step:1881/10000 train_time:126241ms step_avg:67.11ms +[2025-07-07 16:54:40] [Rank 0] step:1881/10000 train_time:126241ms step_avg:67.11ms +[2025-07-07 16:54:42] [Rank 0] step:1901/10000 train_time:127586ms step_avg:67.12ms +[2025-07-07 16:54:42] [Rank 0] step:1901/10000 train_time:127586ms step_avg:67.12ms +[2025-07-07 16:54:43] [Rank 0] step:1921/10000 train_time:128932ms step_avg:67.12ms +[2025-07-07 16:54:43] [Rank 0] step:1921/10000 train_time:128932ms step_avg:67.12ms +[2025-07-07 16:54:44] [Rank 0] step:1941/10000 train_time:130277ms step_avg:67.12ms +[2025-07-07 16:54:44] [Rank 0] step:1941/10000 train_time:130277ms step_avg:67.12ms +[2025-07-07 16:54:46] [Rank 0] step:1961/10000 train_time:131622ms step_avg:67.12ms +[2025-07-07 16:54:46] [Rank 0] step:1961/10000 train_time:131622ms step_avg:67.12ms +[2025-07-07 16:54:47] [Rank 0] step:1981/10000 train_time:133644ms step_avg:67.46ms +[2025-07-07 16:54:47] [Rank 0] step:1981/10000 train_time:133644ms step_avg:67.46ms +[2025-07-07 16:54:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:54:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:54:49] [Rank 0] PRINT: step:2000/10000 train_loss:1.5905 val_loss:1.5354 train_time:134978ms step_avg:67.49ms +[2025-07-07 16:54:49] [Rank 0] PRINT: step:2000/10000 train_loss:1.5905 val_loss:1.5354 train_time:134978ms step_avg:67.49ms +[2025-07-07 16:54:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 16:54:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 16:54:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 16:54:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 16:54:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 16:54:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:00:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:00:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:00:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:00:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:00:18] [Rank 0] Total Loss: 4.8737 +[2025-07-07 17:00:18] [Rank 0] Total Loss: 4.8737 +[2025-07-07 17:00:18] [Rank 0] Total FTA: 0.1040 +[2025-07-07 17:00:18] [Rank 0] Total FTA: 0.1040 +[2025-07-07 17:00:18] [Rank 0] Group 0 Loss: 5.1208 +[2025-07-07 17:00:18] [Rank 0] Group 0 Loss: 5.1208 +[2025-07-07 17:00:18] [Rank 0] Group 1 Loss: 4.9419 +[2025-07-07 17:00:18] [Rank 0] Group 1 Loss: 4.9419 +[2025-07-07 17:00:18] [Rank 0] Group 2 Loss: 4.6071 +[2025-07-07 17:00:18] [Rank 0] Group 2 Loss: 4.6071 +[2025-07-07 17:00:18] [Rank 0] Group 3 Loss: 4.9347 +[2025-07-07 17:00:18] [Rank 0] Group 3 Loss: 4.9347 +[2025-07-07 17:00:18] [Rank 0] Group 4 Loss: 4.7473 +[2025-07-07 17:00:18] [Rank 0] Group 4 Loss: 4.7473 +[2025-07-07 17:00:18] [Rank 0] Group 5 Loss: 4.7607 +[2025-07-07 17:00:18] [Rank 0] Group 5 Loss: 4.7607 +[2025-07-07 17:00:18] [Rank 0] Group 6 Loss: 4.7941 +[2025-07-07 17:00:18] [Rank 0] Group 6 Loss: 4.7941 +[2025-07-07 17:00:18] [Rank 0] Group 7 Loss: 4.8806 +[2025-07-07 17:00:18] [Rank 0] Group 7 Loss: 4.8806 +[2025-07-07 17:00:18] [Rank 0] Group 8 Loss: 4.8475 +[2025-07-07 17:00:18] [Rank 0] Group 8 Loss: 4.8475 +[2025-07-07 17:00:18] [Rank 0] Group 9 Loss: 4.9163 +[2025-07-07 17:00:18] [Rank 0] Group 9 Loss: 4.9163 +[2025-07-07 17:00:18] [Rank 0] Group 10 Loss: 4.8952 +[2025-07-07 17:00:18] [Rank 0] Group 10 Loss: 4.8952 +[2025-07-07 17:00:18] [Rank 0] Group 11 Loss: 4.8454 +[2025-07-07 17:00:18] [Rank 0] Group 11 Loss: 4.8454 +[2025-07-07 17:00:18] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 17:00:18] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 17:00:19] [Rank 0] Group 1 FTA: 0.1693 +[2025-07-07 17:00:19] [Rank 0] Group 1 FTA: 0.1693 +[2025-07-07 17:00:19] [Rank 0] Group 2 FTA: 0.0885 +[2025-07-07 17:00:19] [Rank 0] Group 2 FTA: 0.0885 +[2025-07-07 17:00:19] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 17:00:19] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 17:00:19] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-07 17:00:19] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-07 17:00:19] [Rank 0] Group 5 FTA: 0.0677 +[2025-07-07 17:00:19] [Rank 0] Group 5 FTA: 0.0677 +[2025-07-07 17:00:19] [Rank 0] Group 6 FTA: 0.1068 +[2025-07-07 17:00:19] [Rank 0] Group 6 FTA: 0.1068 +[2025-07-07 17:00:19] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-07 17:00:19] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-07 17:00:19] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-07 17:00:19] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-07 17:00:19] [Rank 0] Group 9 FTA: 0.1328 +[2025-07-07 17:00:19] [Rank 0] Group 9 FTA: 0.1328 +[2025-07-07 17:00:19] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 17:00:19] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 17:00:19] [Rank 0] Group 11 FTA: 0.1104 +[2025-07-07 17:00:19] [Rank 0] Group 11 FTA: 0.1104 +[2025-07-07 17:00:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 17:00:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 17:00:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 17:00:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 17:00:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 17:00:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 17:00:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 17:00:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 17:00:20] [Rank 0] step:2001/10000 train_time:134989ms step_avg:67.46ms +[2025-07-07 17:00:20] [Rank 0] step:2001/10000 train_time:134989ms step_avg:67.46ms +[2025-07-07 17:00:21] [Rank 0] step:2021/10000 train_time:135747ms step_avg:67.17ms +[2025-07-07 17:00:21] [Rank 0] step:2021/10000 train_time:135747ms step_avg:67.17ms +[2025-07-07 17:00:23] [Rank 0] step:2041/10000 train_time:137089ms step_avg:67.17ms +[2025-07-07 17:00:23] [Rank 0] step:2041/10000 train_time:137089ms step_avg:67.17ms +[2025-07-07 17:00:24] [Rank 0] step:2061/10000 train_time:138430ms step_avg:67.17ms +[2025-07-07 17:00:24] [Rank 0] step:2061/10000 train_time:138430ms step_avg:67.17ms +[2025-07-07 17:00:25] [Rank 0] step:2081/10000 train_time:139770ms step_avg:67.16ms +[2025-07-07 17:00:25] [Rank 0] step:2081/10000 train_time:139770ms step_avg:67.16ms +[2025-07-07 17:00:27] [Rank 0] step:2101/10000 train_time:141111ms step_avg:67.16ms +[2025-07-07 17:00:27] [Rank 0] step:2101/10000 train_time:141111ms step_avg:67.16ms +[2025-07-07 17:00:28] [Rank 0] step:2121/10000 train_time:142452ms step_avg:67.16ms +[2025-07-07 17:00:28] [Rank 0] step:2121/10000 train_time:142452ms step_avg:67.16ms +[2025-07-07 17:00:29] [Rank 0] step:2141/10000 train_time:143798ms step_avg:67.16ms +[2025-07-07 17:00:29] [Rank 0] step:2141/10000 train_time:143798ms step_avg:67.16ms +[2025-07-07 17:00:31] [Rank 0] step:2161/10000 train_time:145393ms step_avg:67.28ms +[2025-07-07 17:00:31] [Rank 0] step:2161/10000 train_time:145393ms step_avg:67.28ms +[2025-07-07 17:00:32] [Rank 0] step:2181/10000 train_time:146524ms step_avg:67.18ms +[2025-07-07 17:00:32] [Rank 0] step:2181/10000 train_time:146524ms step_avg:67.18ms +[2025-07-07 17:00:33] [Rank 0] step:2201/10000 train_time:147867ms step_avg:67.18ms +[2025-07-07 17:00:33] [Rank 0] step:2201/10000 train_time:147867ms step_avg:67.18ms +[2025-07-07 17:00:35] [Rank 0] step:2221/10000 train_time:149211ms step_avg:67.18ms +[2025-07-07 17:00:35] [Rank 0] step:2221/10000 train_time:149211ms step_avg:67.18ms +[2025-07-07 17:00:36] [Rank 0] step:2241/10000 train_time:150565ms step_avg:67.19ms +[2025-07-07 17:00:36] [Rank 0] step:2241/10000 train_time:150565ms step_avg:67.19ms +[2025-07-07 17:00:37] [Rank 0] step:2261/10000 train_time:151936ms step_avg:67.20ms +[2025-07-07 17:00:37] [Rank 0] step:2261/10000 train_time:151936ms step_avg:67.20ms +[2025-07-07 17:00:39] [Rank 0] step:2281/10000 train_time:153306ms step_avg:67.21ms +[2025-07-07 17:00:39] [Rank 0] step:2281/10000 train_time:153306ms step_avg:67.21ms +[2025-07-07 17:00:40] [Rank 0] step:2301/10000 train_time:154677ms step_avg:67.22ms +[2025-07-07 17:00:40] [Rank 0] step:2301/10000 train_time:154677ms step_avg:67.22ms +[2025-07-07 17:00:42] [Rank 0] step:2321/10000 train_time:156049ms step_avg:67.23ms +[2025-07-07 17:00:42] [Rank 0] step:2321/10000 train_time:156049ms step_avg:67.23ms +[2025-07-07 17:00:43] [Rank 0] step:2341/10000 train_time:158093ms step_avg:67.53ms +[2025-07-07 17:00:43] [Rank 0] step:2341/10000 train_time:158093ms step_avg:67.53ms +[2025-07-07 17:00:44] [Rank 0] step:2361/10000 train_time:158830ms step_avg:67.27ms +[2025-07-07 17:00:44] [Rank 0] step:2361/10000 train_time:158830ms step_avg:67.27ms +[2025-07-07 17:00:46] [Rank 0] step:2381/10000 train_time:160200ms step_avg:67.28ms +[2025-07-07 17:00:46] [Rank 0] step:2381/10000 train_time:160200ms step_avg:67.28ms +[2025-07-07 17:00:47] [Rank 0] step:2401/10000 train_time:161571ms step_avg:67.29ms +[2025-07-07 17:00:47] [Rank 0] step:2401/10000 train_time:161571ms step_avg:67.29ms +[2025-07-07 17:00:48] [Rank 0] step:2421/10000 train_time:162945ms step_avg:67.30ms +[2025-07-07 17:00:48] [Rank 0] step:2421/10000 train_time:162945ms step_avg:67.30ms +[2025-07-07 17:00:50] [Rank 0] step:2441/10000 train_time:164319ms step_avg:67.32ms +[2025-07-07 17:00:50] [Rank 0] step:2441/10000 train_time:164319ms step_avg:67.32ms +[2025-07-07 17:00:51] [Rank 0] step:2461/10000 train_time:165693ms step_avg:67.33ms +[2025-07-07 17:00:51] [Rank 0] step:2461/10000 train_time:165693ms step_avg:67.33ms +[2025-07-07 17:00:53] [Rank 0] step:2481/10000 train_time:167068ms step_avg:67.34ms +[2025-07-07 17:00:53] [Rank 0] step:2481/10000 train_time:167068ms step_avg:67.34ms +[2025-07-07 17:00:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:00:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:00:55] [Rank 0] PRINT: step:2500/10000 train_loss:1.4713 val_loss:1.4010 train_time:169067ms step_avg:67.63ms +[2025-07-07 17:00:55] [Rank 0] PRINT: step:2500/10000 train_loss:1.4713 val_loss:1.4010 train_time:169067ms step_avg:67.63ms +[2025-07-07 17:00:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:00:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:00:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:00:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:00:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:00:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:06:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:06:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:06:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:06:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:06:25] [Rank 0] Total Loss: 4.9253 +[2025-07-07 17:06:25] [Rank 0] Total Loss: 4.9253 +[2025-07-07 17:06:25] [Rank 0] Total FTA: 0.0817 +[2025-07-07 17:06:25] [Rank 0] Total FTA: 0.0817 +[2025-07-07 17:06:25] [Rank 0] Group 0 Loss: 5.2259 +[2025-07-07 17:06:25] [Rank 0] Group 0 Loss: 5.2259 +[2025-07-07 17:06:25] [Rank 0] Group 1 Loss: 4.7393 +[2025-07-07 17:06:25] [Rank 0] Group 1 Loss: 4.7393 +[2025-07-07 17:06:25] [Rank 0] Group 2 Loss: 4.7015 +[2025-07-07 17:06:25] [Rank 0] Group 2 Loss: 4.7015 +[2025-07-07 17:06:25] [Rank 0] Group 3 Loss: 4.8022 +[2025-07-07 17:06:25] [Rank 0] Group 3 Loss: 4.8022 +[2025-07-07 17:06:25] [Rank 0] Group 4 Loss: 4.8867 +[2025-07-07 17:06:25] [Rank 0] Group 4 Loss: 4.8867 +[2025-07-07 17:06:25] [Rank 0] Group 5 Loss: 4.8444 +[2025-07-07 17:06:25] [Rank 0] Group 5 Loss: 4.8444 +[2025-07-07 17:06:25] [Rank 0] Group 6 Loss: 4.8147 +[2025-07-07 17:06:25] [Rank 0] Group 6 Loss: 4.8147 +[2025-07-07 17:06:25] [Rank 0] Group 7 Loss: 4.9707 +[2025-07-07 17:06:25] [Rank 0] Group 7 Loss: 4.9707 +[2025-07-07 17:06:25] [Rank 0] Group 8 Loss: 4.9348 +[2025-07-07 17:06:25] [Rank 0] Group 8 Loss: 4.9348 +[2025-07-07 17:06:25] [Rank 0] Group 9 Loss: 4.8934 +[2025-07-07 17:06:25] [Rank 0] Group 9 Loss: 4.8934 +[2025-07-07 17:06:25] [Rank 0] Group 10 Loss: 4.9789 +[2025-07-07 17:06:25] [Rank 0] Group 10 Loss: 4.9789 +[2025-07-07 17:06:25] [Rank 0] Group 11 Loss: 4.9464 +[2025-07-07 17:06:25] [Rank 0] Group 11 Loss: 4.9464 +[2025-07-07 17:06:25] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 17:06:25] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 17:06:25] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:06:25] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:06:25] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-07 17:06:25] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-07 17:06:25] [Rank 0] Group 3 FTA: 0.0547 +[2025-07-07 17:06:25] [Rank 0] Group 3 FTA: 0.0547 +[2025-07-07 17:06:25] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-07 17:06:25] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-07 17:06:25] [Rank 0] Group 5 FTA: 0.0755 +[2025-07-07 17:06:25] [Rank 0] Group 5 FTA: 0.0755 +[2025-07-07 17:06:25] [Rank 0] Group 6 FTA: 0.1406 +[2025-07-07 17:06:25] [Rank 0] Group 6 FTA: 0.1406 +[2025-07-07 17:06:25] [Rank 0] Group 7 FTA: 0.1302 +[2025-07-07 17:06:25] [Rank 0] Group 7 FTA: 0.1302 +[2025-07-07 17:06:25] [Rank 0] Group 8 FTA: 0.1016 +[2025-07-07 17:06:25] [Rank 0] Group 8 FTA: 0.1016 +[2025-07-07 17:06:25] [Rank 0] Group 9 FTA: 0.1445 +[2025-07-07 17:06:25] [Rank 0] Group 9 FTA: 0.1445 +[2025-07-07 17:06:25] [Rank 0] Group 10 FTA: 0.1289 +[2025-07-07 17:06:25] [Rank 0] Group 10 FTA: 0.1289 +[2025-07-07 17:06:25] [Rank 0] Group 11 FTA: 0.1230 +[2025-07-07 17:06:25] [Rank 0] Group 11 FTA: 0.1230 +[2025-07-07 17:06:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 17:06:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 17:06:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 17:06:25] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 17:06:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 17:06:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 17:06:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 17:06:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 17:06:26] [Rank 0] step:2501/10000 train_time:169079ms step_avg:67.60ms +[2025-07-07 17:06:26] [Rank 0] step:2501/10000 train_time:169079ms step_avg:67.60ms +[2025-07-07 17:06:28] [Rank 0] step:2521/10000 train_time:170524ms step_avg:67.64ms +[2025-07-07 17:06:28] [Rank 0] step:2521/10000 train_time:170524ms step_avg:67.64ms +[2025-07-07 17:06:29] [Rank 0] step:2541/10000 train_time:171258ms step_avg:67.40ms +[2025-07-07 17:06:29] [Rank 0] step:2541/10000 train_time:171258ms step_avg:67.40ms +[2025-07-07 17:06:30] [Rank 0] step:2561/10000 train_time:172623ms step_avg:67.40ms +[2025-07-07 17:06:30] [Rank 0] step:2561/10000 train_time:172623ms step_avg:67.40ms +[2025-07-07 17:06:32] [Rank 0] step:2581/10000 train_time:173989ms step_avg:67.41ms +[2025-07-07 17:06:32] [Rank 0] step:2581/10000 train_time:173989ms step_avg:67.41ms +[2025-07-07 17:06:33] [Rank 0] step:2601/10000 train_time:175354ms step_avg:67.42ms +[2025-07-07 17:06:33] [Rank 0] step:2601/10000 train_time:175354ms step_avg:67.42ms +[2025-07-07 17:06:34] [Rank 0] step:2621/10000 train_time:176722ms step_avg:67.43ms +[2025-07-07 17:06:34] [Rank 0] step:2621/10000 train_time:176722ms step_avg:67.43ms +[2025-07-07 17:06:36] [Rank 0] step:2641/10000 train_time:178088ms step_avg:67.43ms +[2025-07-07 17:06:36] [Rank 0] step:2641/10000 train_time:178088ms step_avg:67.43ms +[2025-07-07 17:06:37] [Rank 0] step:2661/10000 train_time:179455ms step_avg:67.44ms +[2025-07-07 17:06:37] [Rank 0] step:2661/10000 train_time:179455ms step_avg:67.44ms +[2025-07-07 17:06:38] [Rank 0] step:2681/10000 train_time:180822ms step_avg:67.45ms +[2025-07-07 17:06:38] [Rank 0] step:2681/10000 train_time:180822ms step_avg:67.45ms +[2025-07-07 17:06:40] [Rank 0] step:2701/10000 train_time:182238ms step_avg:67.47ms +[2025-07-07 17:06:40] [Rank 0] step:2701/10000 train_time:182238ms step_avg:67.47ms +[2025-07-07 17:06:41] [Rank 0] step:2721/10000 train_time:183613ms step_avg:67.48ms +[2025-07-07 17:06:41] [Rank 0] step:2721/10000 train_time:183613ms step_avg:67.48ms +[2025-07-07 17:06:43] [Rank 0] step:2741/10000 train_time:184982ms step_avg:67.49ms +[2025-07-07 17:06:43] [Rank 0] step:2741/10000 train_time:184982ms step_avg:67.49ms +[2025-07-07 17:06:44] [Rank 0] step:2761/10000 train_time:186350ms step_avg:67.49ms +[2025-07-07 17:06:44] [Rank 0] step:2761/10000 train_time:186350ms step_avg:67.49ms +[2025-07-07 17:06:45] [Rank 0] step:2781/10000 train_time:187719ms step_avg:67.50ms +[2025-07-07 17:06:45] [Rank 0] step:2781/10000 train_time:187719ms step_avg:67.50ms +[2025-07-07 17:06:47] [Rank 0] step:2801/10000 train_time:189089ms step_avg:67.51ms +[2025-07-07 17:06:47] [Rank 0] step:2801/10000 train_time:189089ms step_avg:67.51ms +[2025-07-07 17:06:48] [Rank 0] step:2821/10000 train_time:190460ms step_avg:67.51ms +[2025-07-07 17:06:48] [Rank 0] step:2821/10000 train_time:190460ms step_avg:67.51ms +[2025-07-07 17:06:49] [Rank 0] step:2841/10000 train_time:191830ms step_avg:67.52ms +[2025-07-07 17:06:49] [Rank 0] step:2841/10000 train_time:191830ms step_avg:67.52ms +[2025-07-07 17:06:51] [Rank 0] step:2861/10000 train_time:193201ms step_avg:67.53ms +[2025-07-07 17:06:51] [Rank 0] step:2861/10000 train_time:193201ms step_avg:67.53ms +[2025-07-07 17:06:52] [Rank 0] step:2881/10000 train_time:194574ms step_avg:67.54ms +[2025-07-07 17:06:52] [Rank 0] step:2881/10000 train_time:194574ms step_avg:67.54ms +[2025-07-07 17:06:54] [Rank 0] step:2901/10000 train_time:195945ms step_avg:67.54ms +[2025-07-07 17:06:54] [Rank 0] step:2901/10000 train_time:195945ms step_avg:67.54ms +[2025-07-07 17:06:55] [Rank 0] step:2921/10000 train_time:197317ms step_avg:67.55ms +[2025-07-07 17:06:55] [Rank 0] step:2921/10000 train_time:197317ms step_avg:67.55ms +[2025-07-07 17:06:56] [Rank 0] step:2941/10000 train_time:198690ms step_avg:67.56ms +[2025-07-07 17:06:56] [Rank 0] step:2941/10000 train_time:198690ms step_avg:67.56ms +[2025-07-07 17:06:58] [Rank 0] step:2961/10000 train_time:200063ms step_avg:67.57ms +[2025-07-07 17:06:58] [Rank 0] step:2961/10000 train_time:200063ms step_avg:67.57ms +[2025-07-07 17:06:59] [Rank 0] step:2981/10000 train_time:201436ms step_avg:67.57ms +[2025-07-07 17:06:59] [Rank 0] step:2981/10000 train_time:201436ms step_avg:67.57ms +[2025-07-07 17:07:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:07:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:07:01] [Rank 0] PRINT: step:3000/10000 train_loss:1.3368 val_loss:1.2865 train_time:203435ms step_avg:67.81ms +[2025-07-07 17:07:01] [Rank 0] PRINT: step:3000/10000 train_loss:1.3368 val_loss:1.2865 train_time:203435ms step_avg:67.81ms +[2025-07-07 17:07:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:07:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:07:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:07:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:07:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:07:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:12:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:12:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:12:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:12:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:12:31] [Rank 0] Total Loss: 4.9925 +[2025-07-07 17:12:31] [Rank 0] Total Loss: 4.9925 +[2025-07-07 17:12:31] [Rank 0] Total FTA: 0.1694 +[2025-07-07 17:12:31] [Rank 0] Total FTA: 0.1694 +[2025-07-07 17:12:31] [Rank 0] Group 0 Loss: 5.1021 +[2025-07-07 17:12:31] [Rank 0] Group 0 Loss: 5.1021 +[2025-07-07 17:12:31] [Rank 0] Group 1 Loss: 4.9508 +[2025-07-07 17:12:31] [Rank 0] Group 1 Loss: 4.9508 +[2025-07-07 17:12:31] [Rank 0] Group 2 Loss: 4.7252 +[2025-07-07 17:12:31] [Rank 0] Group 2 Loss: 4.7252 +[2025-07-07 17:12:31] [Rank 0] Group 3 Loss: 5.0706 +[2025-07-07 17:12:31] [Rank 0] Group 3 Loss: 5.0706 +[2025-07-07 17:12:31] [Rank 0] Group 4 Loss: 4.9317 +[2025-07-07 17:12:31] [Rank 0] Group 4 Loss: 4.9317 +[2025-07-07 17:12:31] [Rank 0] Group 5 Loss: 4.9762 +[2025-07-07 17:12:31] [Rank 0] Group 5 Loss: 4.9762 +[2025-07-07 17:12:31] [Rank 0] Group 6 Loss: 4.9419 +[2025-07-07 17:12:31] [Rank 0] Group 6 Loss: 4.9419 +[2025-07-07 17:12:31] [Rank 0] Group 7 Loss: 5.0223 +[2025-07-07 17:12:31] [Rank 0] Group 7 Loss: 5.0223 +[2025-07-07 17:12:31] [Rank 0] Group 8 Loss: 5.0215 +[2025-07-07 17:12:31] [Rank 0] Group 8 Loss: 5.0215 +[2025-07-07 17:12:31] [Rank 0] Group 9 Loss: 5.0240 +[2025-07-07 17:12:31] [Rank 0] Group 9 Loss: 5.0240 +[2025-07-07 17:12:31] [Rank 0] Group 10 Loss: 5.0537 +[2025-07-07 17:12:31] [Rank 0] Group 10 Loss: 5.0537 +[2025-07-07 17:12:31] [Rank 0] Group 11 Loss: 4.9840 +[2025-07-07 17:12:31] [Rank 0] Group 11 Loss: 4.9840 +[2025-07-07 17:12:31] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-07 17:12:31] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-07 17:12:31] [Rank 0] Group 1 FTA: 0.3151 +[2025-07-07 17:12:31] [Rank 0] Group 1 FTA: 0.3151 +[2025-07-07 17:12:31] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 17:12:31] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 17:12:31] [Rank 0] Group 3 FTA: 0.0885 +[2025-07-07 17:12:31] [Rank 0] Group 3 FTA: 0.0885 +[2025-07-07 17:12:31] [Rank 0] Group 4 FTA: 0.0964 +[2025-07-07 17:12:31] [Rank 0] Group 4 FTA: 0.0964 +[2025-07-07 17:12:31] [Rank 0] Group 5 FTA: 0.2604 +[2025-07-07 17:12:31] [Rank 0] Group 5 FTA: 0.2604 +[2025-07-07 17:12:31] [Rank 0] Group 6 FTA: 0.1719 +[2025-07-07 17:12:31] [Rank 0] Group 6 FTA: 0.1719 +[2025-07-07 17:12:31] [Rank 0] Group 7 FTA: 0.1667 +[2025-07-07 17:12:31] [Rank 0] Group 7 FTA: 0.1667 +[2025-07-07 17:12:31] [Rank 0] Group 8 FTA: 0.1901 +[2025-07-07 17:12:31] [Rank 0] Group 8 FTA: 0.1901 +[2025-07-07 17:12:31] [Rank 0] Group 9 FTA: 0.1797 +[2025-07-07 17:12:31] [Rank 0] Group 9 FTA: 0.1797 +[2025-07-07 17:12:31] [Rank 0] Group 10 FTA: 0.1660 +[2025-07-07 17:12:31] [Rank 0] Group 10 FTA: 0.1660 +[2025-07-07 17:12:31] [Rank 0] Group 11 FTA: 0.1973 +[2025-07-07 17:12:31] [Rank 0] Group 11 FTA: 0.1973 +[2025-07-07 17:12:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 17:12:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 17:12:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 17:12:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 17:12:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 17:12:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 17:12:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 17:12:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 17:12:33] [Rank 0] step:3001/10000 train_time:203446ms step_avg:67.79ms +[2025-07-07 17:12:33] [Rank 0] step:3001/10000 train_time:203446ms step_avg:67.79ms +[2025-07-07 17:12:34] [Rank 0] step:3021/10000 train_time:204214ms step_avg:67.60ms +[2025-07-07 17:12:34] [Rank 0] step:3021/10000 train_time:204214ms step_avg:67.60ms +[2025-07-07 17:12:36] [Rank 0] step:3041/10000 train_time:205583ms step_avg:67.60ms +[2025-07-07 17:12:36] [Rank 0] step:3041/10000 train_time:205583ms step_avg:67.60ms +[2025-07-07 17:12:37] [Rank 0] step:3061/10000 train_time:207001ms step_avg:67.63ms +[2025-07-07 17:12:37] [Rank 0] step:3061/10000 train_time:207001ms step_avg:67.63ms +[2025-07-07 17:12:38] [Rank 0] step:3081/10000 train_time:208352ms step_avg:67.62ms +[2025-07-07 17:12:38] [Rank 0] step:3081/10000 train_time:208352ms step_avg:67.62ms +[2025-07-07 17:12:40] [Rank 0] step:3101/10000 train_time:209718ms step_avg:67.63ms +[2025-07-07 17:12:40] [Rank 0] step:3101/10000 train_time:209718ms step_avg:67.63ms +[2025-07-07 17:12:41] [Rank 0] step:3121/10000 train_time:211085ms step_avg:67.63ms +[2025-07-07 17:12:41] [Rank 0] step:3121/10000 train_time:211085ms step_avg:67.63ms +[2025-07-07 17:12:43] [Rank 0] step:3141/10000 train_time:212452ms step_avg:67.64ms +[2025-07-07 17:12:43] [Rank 0] step:3141/10000 train_time:212452ms step_avg:67.64ms +[2025-07-07 17:12:44] [Rank 0] step:3161/10000 train_time:213818ms step_avg:67.64ms +[2025-07-07 17:12:44] [Rank 0] step:3161/10000 train_time:213818ms step_avg:67.64ms +[2025-07-07 17:12:45] [Rank 0] step:3181/10000 train_time:215186ms step_avg:67.65ms +[2025-07-07 17:12:45] [Rank 0] step:3181/10000 train_time:215186ms step_avg:67.65ms +[2025-07-07 17:12:47] [Rank 0] step:3201/10000 train_time:216554ms step_avg:67.65ms +[2025-07-07 17:12:47] [Rank 0] step:3201/10000 train_time:216554ms step_avg:67.65ms +[2025-07-07 17:12:48] [Rank 0] step:3221/10000 train_time:217923ms step_avg:67.66ms +[2025-07-07 17:12:48] [Rank 0] step:3221/10000 train_time:217923ms step_avg:67.66ms +[2025-07-07 17:12:49] [Rank 0] step:3241/10000 train_time:219292ms step_avg:67.66ms +[2025-07-07 17:12:49] [Rank 0] step:3241/10000 train_time:219292ms step_avg:67.66ms +[2025-07-07 17:12:51] [Rank 0] step:3261/10000 train_time:220705ms step_avg:67.68ms +[2025-07-07 17:12:51] [Rank 0] step:3261/10000 train_time:220705ms step_avg:67.68ms +[2025-07-07 17:12:52] [Rank 0] step:3281/10000 train_time:222075ms step_avg:67.69ms +[2025-07-07 17:12:52] [Rank 0] step:3281/10000 train_time:222075ms step_avg:67.69ms +[2025-07-07 17:12:54] [Rank 0] step:3301/10000 train_time:223445ms step_avg:67.69ms +[2025-07-07 17:12:54] [Rank 0] step:3301/10000 train_time:223445ms step_avg:67.69ms +[2025-07-07 17:12:55] [Rank 0] step:3321/10000 train_time:224817ms step_avg:67.70ms +[2025-07-07 17:12:55] [Rank 0] step:3321/10000 train_time:224817ms step_avg:67.70ms +[2025-07-07 17:12:56] [Rank 0] step:3341/10000 train_time:226188ms step_avg:67.70ms +[2025-07-07 17:12:56] [Rank 0] step:3341/10000 train_time:226188ms step_avg:67.70ms +[2025-07-07 17:12:58] [Rank 0] step:3361/10000 train_time:227560ms step_avg:67.71ms +[2025-07-07 17:12:58] [Rank 0] step:3361/10000 train_time:227560ms step_avg:67.71ms +[2025-07-07 17:12:59] [Rank 0] step:3381/10000 train_time:228932ms step_avg:67.71ms +[2025-07-07 17:12:59] [Rank 0] step:3381/10000 train_time:228932ms step_avg:67.71ms +[2025-07-07 17:13:00] [Rank 0] step:3401/10000 train_time:230304ms step_avg:67.72ms +[2025-07-07 17:13:00] [Rank 0] step:3401/10000 train_time:230304ms step_avg:67.72ms +[2025-07-07 17:13:02] [Rank 0] step:3421/10000 train_time:231726ms step_avg:67.74ms +[2025-07-07 17:13:02] [Rank 0] step:3421/10000 train_time:231726ms step_avg:67.74ms +[2025-07-07 17:13:03] [Rank 0] step:3441/10000 train_time:233090ms step_avg:67.74ms +[2025-07-07 17:13:03] [Rank 0] step:3441/10000 train_time:233090ms step_avg:67.74ms +[2025-07-07 17:13:05] [Rank 0] step:3461/10000 train_time:234462ms step_avg:67.74ms +[2025-07-07 17:13:05] [Rank 0] step:3461/10000 train_time:234462ms step_avg:67.74ms +[2025-07-07 17:13:06] [Rank 0] step:3481/10000 train_time:235833ms step_avg:67.75ms +[2025-07-07 17:13:06] [Rank 0] step:3481/10000 train_time:235833ms step_avg:67.75ms +[2025-07-07 17:13:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:13:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:13:08] [Rank 0] PRINT: step:3500/10000 train_loss:1.2423 val_loss:1.2163 train_time:237830ms step_avg:67.95ms +[2025-07-07 17:13:08] [Rank 0] PRINT: step:3500/10000 train_loss:1.2423 val_loss:1.2163 train_time:237830ms step_avg:67.95ms +[2025-07-07 17:13:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:13:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:13:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:13:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:13:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:13:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:18:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:18:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:18:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:18:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:18:37] [Rank 0] Total Loss: 5.0709 +[2025-07-07 17:18:37] [Rank 0] Total Loss: 5.0709 +[2025-07-07 17:18:37] [Rank 0] Total FTA: 0.2773 +[2025-07-07 17:18:37] [Rank 0] Total FTA: 0.2773 +[2025-07-07 17:18:37] [Rank 0] Group 0 Loss: 5.2875 +[2025-07-07 17:18:37] [Rank 0] Group 0 Loss: 5.2875 +[2025-07-07 17:18:37] [Rank 0] Group 1 Loss: 4.8895 +[2025-07-07 17:18:37] [Rank 0] Group 1 Loss: 4.8895 +[2025-07-07 17:18:37] [Rank 0] Group 2 Loss: 4.8947 +[2025-07-07 17:18:37] [Rank 0] Group 2 Loss: 4.8947 +[2025-07-07 17:18:37] [Rank 0] Group 3 Loss: 5.1043 +[2025-07-07 17:18:37] [Rank 0] Group 3 Loss: 5.1043 +[2025-07-07 17:18:37] [Rank 0] Group 4 Loss: 5.0854 +[2025-07-07 17:18:37] [Rank 0] Group 4 Loss: 5.0854 +[2025-07-07 17:18:37] [Rank 0] Group 5 Loss: 5.0431 +[2025-07-07 17:18:37] [Rank 0] Group 5 Loss: 5.0431 +[2025-07-07 17:18:37] [Rank 0] Group 6 Loss: 4.9433 +[2025-07-07 17:18:37] [Rank 0] Group 6 Loss: 4.9433 +[2025-07-07 17:18:37] [Rank 0] Group 7 Loss: 5.0549 +[2025-07-07 17:18:37] [Rank 0] Group 7 Loss: 5.0549 +[2025-07-07 17:18:37] [Rank 0] Group 8 Loss: 5.0713 +[2025-07-07 17:18:37] [Rank 0] Group 8 Loss: 5.0713 +[2025-07-07 17:18:37] [Rank 0] Group 9 Loss: 5.0620 +[2025-07-07 17:18:37] [Rank 0] Group 9 Loss: 5.0620 +[2025-07-07 17:18:37] [Rank 0] Group 10 Loss: 5.1152 +[2025-07-07 17:18:37] [Rank 0] Group 10 Loss: 5.1152 +[2025-07-07 17:18:37] [Rank 0] Group 11 Loss: 5.0683 +[2025-07-07 17:18:37] [Rank 0] Group 11 Loss: 5.0683 +[2025-07-07 17:18:38] [Rank 0] Group 0 FTA: 0.3238 +[2025-07-07 17:18:38] [Rank 0] Group 0 FTA: 0.3238 +[2025-07-07 17:18:38] [Rank 0] Group 1 FTA: 0.5000 +[2025-07-07 17:18:38] [Rank 0] Group 1 FTA: 0.5000 +[2025-07-07 17:18:38] [Rank 0] Group 2 FTA: 0.2448 +[2025-07-07 17:18:38] [Rank 0] Group 2 FTA: 0.2448 +[2025-07-07 17:18:38] [Rank 0] Group 3 FTA: 0.2839 +[2025-07-07 17:18:38] [Rank 0] Group 3 FTA: 0.2839 +[2025-07-07 17:18:38] [Rank 0] Group 4 FTA: 0.2839 +[2025-07-07 17:18:38] [Rank 0] Group 4 FTA: 0.2839 +[2025-07-07 17:18:38] [Rank 0] Group 5 FTA: 0.1953 +[2025-07-07 17:18:38] [Rank 0] Group 5 FTA: 0.1953 +[2025-07-07 17:18:38] [Rank 0] Group 6 FTA: 0.2448 +[2025-07-07 17:18:38] [Rank 0] Group 6 FTA: 0.2448 +[2025-07-07 17:18:38] [Rank 0] Group 7 FTA: 0.2344 +[2025-07-07 17:18:38] [Rank 0] Group 7 FTA: 0.2344 +[2025-07-07 17:18:38] [Rank 0] Group 8 FTA: 0.3255 +[2025-07-07 17:18:38] [Rank 0] Group 8 FTA: 0.3255 +[2025-07-07 17:18:38] [Rank 0] Group 9 FTA: 0.2656 +[2025-07-07 17:18:38] [Rank 0] Group 9 FTA: 0.2656 +[2025-07-07 17:18:38] [Rank 0] Group 10 FTA: 0.2520 +[2025-07-07 17:18:38] [Rank 0] Group 10 FTA: 0.2520 +[2025-07-07 17:18:38] [Rank 0] Group 11 FTA: 0.2227 +[2025-07-07 17:18:38] [Rank 0] Group 11 FTA: 0.2227 +[2025-07-07 17:18:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 17:18:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 17:18:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 17:18:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 17:18:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 17:18:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 17:18:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 17:18:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 17:18:39] [Rank 0] step:3501/10000 train_time:237841ms step_avg:67.94ms +[2025-07-07 17:18:39] [Rank 0] step:3501/10000 train_time:237841ms step_avg:67.94ms +[2025-07-07 17:18:40] [Rank 0] step:3521/10000 train_time:238600ms step_avg:67.76ms +[2025-07-07 17:18:40] [Rank 0] step:3521/10000 train_time:238600ms step_avg:67.76ms +[2025-07-07 17:18:42] [Rank 0] step:3541/10000 train_time:239965ms step_avg:67.77ms +[2025-07-07 17:18:42] [Rank 0] step:3541/10000 train_time:239965ms step_avg:67.77ms +[2025-07-07 17:18:43] [Rank 0] step:3561/10000 train_time:241331ms step_avg:67.77ms +[2025-07-07 17:18:43] [Rank 0] step:3561/10000 train_time:241331ms step_avg:67.77ms +[2025-07-07 17:18:44] [Rank 0] step:3581/10000 train_time:242696ms step_avg:67.77ms +[2025-07-07 17:18:44] [Rank 0] step:3581/10000 train_time:242696ms step_avg:67.77ms +[2025-07-07 17:18:46] [Rank 0] step:3601/10000 train_time:244062ms step_avg:67.78ms +[2025-07-07 17:18:46] [Rank 0] step:3601/10000 train_time:244062ms step_avg:67.78ms +[2025-07-07 17:18:47] [Rank 0] step:3621/10000 train_time:245455ms step_avg:67.79ms +[2025-07-07 17:18:47] [Rank 0] step:3621/10000 train_time:245455ms step_avg:67.79ms +[2025-07-07 17:18:49] [Rank 0] step:3641/10000 train_time:246823ms step_avg:67.79ms +[2025-07-07 17:18:49] [Rank 0] step:3641/10000 train_time:246823ms step_avg:67.79ms +[2025-07-07 17:18:50] [Rank 0] step:3661/10000 train_time:248190ms step_avg:67.79ms +[2025-07-07 17:18:50] [Rank 0] step:3661/10000 train_time:248190ms step_avg:67.79ms +[2025-07-07 17:18:51] [Rank 0] step:3681/10000 train_time:249557ms step_avg:67.80ms +[2025-07-07 17:18:51] [Rank 0] step:3681/10000 train_time:249557ms step_avg:67.80ms +[2025-07-07 17:18:53] [Rank 0] step:3701/10000 train_time:250926ms step_avg:67.80ms +[2025-07-07 17:18:53] [Rank 0] step:3701/10000 train_time:250926ms step_avg:67.80ms +[2025-07-07 17:18:54] [Rank 0] step:3721/10000 train_time:252293ms step_avg:67.80ms +[2025-07-07 17:18:54] [Rank 0] step:3721/10000 train_time:252293ms step_avg:67.80ms +[2025-07-07 17:18:55] [Rank 0] step:3741/10000 train_time:253662ms step_avg:67.81ms +[2025-07-07 17:18:55] [Rank 0] step:3741/10000 train_time:253662ms step_avg:67.81ms +[2025-07-07 17:18:57] [Rank 0] step:3761/10000 train_time:255030ms step_avg:67.81ms +[2025-07-07 17:18:57] [Rank 0] step:3761/10000 train_time:255030ms step_avg:67.81ms +[2025-07-07 17:18:58] [Rank 0] step:3781/10000 train_time:257072ms step_avg:67.99ms +[2025-07-07 17:18:58] [Rank 0] step:3781/10000 train_time:257072ms step_avg:67.99ms +[2025-07-07 17:19:00] [Rank 0] step:3801/10000 train_time:257808ms step_avg:67.83ms +[2025-07-07 17:19:00] [Rank 0] step:3801/10000 train_time:257808ms step_avg:67.83ms +[2025-07-07 17:19:01] [Rank 0] step:3821/10000 train_time:259178ms step_avg:67.83ms +[2025-07-07 17:19:01] [Rank 0] step:3821/10000 train_time:259178ms step_avg:67.83ms +[2025-07-07 17:19:02] [Rank 0] step:3841/10000 train_time:260549ms step_avg:67.83ms +[2025-07-07 17:19:02] [Rank 0] step:3841/10000 train_time:260549ms step_avg:67.83ms +[2025-07-07 17:19:04] [Rank 0] step:3861/10000 train_time:261921ms step_avg:67.84ms +[2025-07-07 17:19:04] [Rank 0] step:3861/10000 train_time:261921ms step_avg:67.84ms +[2025-07-07 17:19:05] [Rank 0] step:3881/10000 train_time:263294ms step_avg:67.84ms +[2025-07-07 17:19:05] [Rank 0] step:3881/10000 train_time:263294ms step_avg:67.84ms +[2025-07-07 17:19:06] [Rank 0] step:3901/10000 train_time:264666ms step_avg:67.85ms +[2025-07-07 17:19:06] [Rank 0] step:3901/10000 train_time:264666ms step_avg:67.85ms +[2025-07-07 17:19:08] [Rank 0] step:3921/10000 train_time:266037ms step_avg:67.85ms +[2025-07-07 17:19:08] [Rank 0] step:3921/10000 train_time:266037ms step_avg:67.85ms +[2025-07-07 17:19:09] [Rank 0] step:3941/10000 train_time:267410ms step_avg:67.85ms +[2025-07-07 17:19:09] [Rank 0] step:3941/10000 train_time:267410ms step_avg:67.85ms +[2025-07-07 17:19:11] [Rank 0] step:3961/10000 train_time:268782ms step_avg:67.86ms +[2025-07-07 17:19:11] [Rank 0] step:3961/10000 train_time:268782ms step_avg:67.86ms +[2025-07-07 17:19:12] [Rank 0] step:3981/10000 train_time:270192ms step_avg:67.87ms +[2025-07-07 17:19:12] [Rank 0] step:3981/10000 train_time:270192ms step_avg:67.87ms +[2025-07-07 17:19:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:19:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:19:14] [Rank 0] PRINT: step:4000/10000 train_loss:1.1838 val_loss:1.1760 train_time:272271ms step_avg:68.07ms +[2025-07-07 17:19:14] [Rank 0] PRINT: step:4000/10000 train_loss:1.1838 val_loss:1.1760 train_time:272271ms step_avg:68.07ms +[2025-07-07 17:19:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:19:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:19:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:19:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:19:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:19:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:24:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:24:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:24:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:24:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:24:43] [Rank 0] Total Loss: 5.1653 +[2025-07-07 17:24:43] [Rank 0] Total Loss: 5.1653 +[2025-07-07 17:24:43] [Rank 0] Total FTA: 0.3401 +[2025-07-07 17:24:43] [Rank 0] Total FTA: 0.3401 +[2025-07-07 17:24:43] [Rank 0] Group 0 Loss: 5.4471 +[2025-07-07 17:24:43] [Rank 0] Group 0 Loss: 5.4471 +[2025-07-07 17:24:43] [Rank 0] Group 1 Loss: 4.7784 +[2025-07-07 17:24:43] [Rank 0] Group 1 Loss: 4.7784 +[2025-07-07 17:24:43] [Rank 0] Group 2 Loss: 5.0461 +[2025-07-07 17:24:43] [Rank 0] Group 2 Loss: 5.0461 +[2025-07-07 17:24:43] [Rank 0] Group 3 Loss: 5.1413 +[2025-07-07 17:24:43] [Rank 0] Group 3 Loss: 5.1413 +[2025-07-07 17:24:43] [Rank 0] Group 4 Loss: 5.0649 +[2025-07-07 17:24:43] [Rank 0] Group 4 Loss: 5.0649 +[2025-07-07 17:24:43] [Rank 0] Group 5 Loss: 5.0969 +[2025-07-07 17:24:43] [Rank 0] Group 5 Loss: 5.0969 +[2025-07-07 17:24:43] [Rank 0] Group 6 Loss: 5.0159 +[2025-07-07 17:24:43] [Rank 0] Group 6 Loss: 5.0159 +[2025-07-07 17:24:43] [Rank 0] Group 7 Loss: 5.2008 +[2025-07-07 17:24:43] [Rank 0] Group 7 Loss: 5.2008 +[2025-07-07 17:24:43] [Rank 0] Group 8 Loss: 5.2199 +[2025-07-07 17:24:43] [Rank 0] Group 8 Loss: 5.2199 +[2025-07-07 17:24:43] [Rank 0] Group 9 Loss: 5.1976 +[2025-07-07 17:24:43] [Rank 0] Group 9 Loss: 5.1976 +[2025-07-07 17:24:43] [Rank 0] Group 10 Loss: 5.2123 +[2025-07-07 17:24:43] [Rank 0] Group 10 Loss: 5.2123 +[2025-07-07 17:24:43] [Rank 0] Group 11 Loss: 5.2064 +[2025-07-07 17:24:43] [Rank 0] Group 11 Loss: 5.2064 +[2025-07-07 17:24:43] [Rank 0] Group 0 FTA: 0.3290 +[2025-07-07 17:24:43] [Rank 0] Group 0 FTA: 0.3290 +[2025-07-07 17:24:43] [Rank 0] Group 1 FTA: 0.3203 +[2025-07-07 17:24:43] [Rank 0] Group 1 FTA: 0.3203 +[2025-07-07 17:24:43] [Rank 0] Group 2 FTA: 0.3984 +[2025-07-07 17:24:43] [Rank 0] Group 2 FTA: 0.3984 +[2025-07-07 17:24:43] [Rank 0] Group 3 FTA: 0.3984 +[2025-07-07 17:24:43] [Rank 0] Group 3 FTA: 0.3984 +[2025-07-07 17:24:43] [Rank 0] Group 4 FTA: 0.2708 +[2025-07-07 17:24:43] [Rank 0] Group 4 FTA: 0.2708 +[2025-07-07 17:24:43] [Rank 0] Group 5 FTA: 0.3750 +[2025-07-07 17:24:43] [Rank 0] Group 5 FTA: 0.3750 +[2025-07-07 17:24:43] [Rank 0] Group 6 FTA: 0.3203 +[2025-07-07 17:24:43] [Rank 0] Group 6 FTA: 0.3203 +[2025-07-07 17:24:43] [Rank 0] Group 7 FTA: 0.3125 +[2025-07-07 17:24:43] [Rank 0] Group 7 FTA: 0.3125 +[2025-07-07 17:24:43] [Rank 0] Group 8 FTA: 0.3385 +[2025-07-07 17:24:43] [Rank 0] Group 8 FTA: 0.3385 +[2025-07-07 17:24:43] [Rank 0] Group 9 FTA: 0.3203 +[2025-07-07 17:24:43] [Rank 0] Group 9 FTA: 0.3203 +[2025-07-07 17:24:43] [Rank 0] Group 10 FTA: 0.3516 +[2025-07-07 17:24:43] [Rank 0] Group 10 FTA: 0.3516 +[2025-07-07 17:24:43] [Rank 0] Group 11 FTA: 0.3428 +[2025-07-07 17:24:43] [Rank 0] Group 11 FTA: 0.3428 +[2025-07-07 17:24:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 17:24:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 17:24:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 17:24:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 17:24:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 17:24:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 17:24:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 17:24:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 17:24:44] [Rank 0] step:4001/10000 train_time:272282ms step_avg:68.05ms +[2025-07-07 17:24:44] [Rank 0] step:4001/10000 train_time:272282ms step_avg:68.05ms +[2025-07-07 17:24:46] [Rank 0] step:4021/10000 train_time:273048ms step_avg:67.91ms +[2025-07-07 17:24:46] [Rank 0] step:4021/10000 train_time:273048ms step_avg:67.91ms +[2025-07-07 17:24:47] [Rank 0] step:4041/10000 train_time:274413ms step_avg:67.91ms +[2025-07-07 17:24:47] [Rank 0] step:4041/10000 train_time:274413ms step_avg:67.91ms +[2025-07-07 17:24:48] [Rank 0] step:4061/10000 train_time:275779ms step_avg:67.91ms +[2025-07-07 17:24:48] [Rank 0] step:4061/10000 train_time:275779ms step_avg:67.91ms +[2025-07-07 17:24:50] [Rank 0] step:4081/10000 train_time:277145ms step_avg:67.91ms +[2025-07-07 17:24:50] [Rank 0] step:4081/10000 train_time:277145ms step_avg:67.91ms +[2025-07-07 17:24:51] [Rank 0] step:4101/10000 train_time:278512ms step_avg:67.91ms +[2025-07-07 17:24:51] [Rank 0] step:4101/10000 train_time:278512ms step_avg:67.91ms +[2025-07-07 17:24:53] [Rank 0] step:4121/10000 train_time:279879ms step_avg:67.92ms +[2025-07-07 17:24:53] [Rank 0] step:4121/10000 train_time:279879ms step_avg:67.92ms +[2025-07-07 17:24:54] [Rank 0] step:4141/10000 train_time:281502ms step_avg:67.98ms +[2025-07-07 17:24:54] [Rank 0] step:4141/10000 train_time:281502ms step_avg:67.98ms +[2025-07-07 17:24:55] [Rank 0] step:4161/10000 train_time:282673ms step_avg:67.93ms +[2025-07-07 17:24:55] [Rank 0] step:4161/10000 train_time:282673ms step_avg:67.93ms +[2025-07-07 17:24:57] [Rank 0] step:4181/10000 train_time:284043ms step_avg:67.94ms +[2025-07-07 17:24:57] [Rank 0] step:4181/10000 train_time:284043ms step_avg:67.94ms +[2025-07-07 17:24:58] [Rank 0] step:4201/10000 train_time:285412ms step_avg:67.94ms +[2025-07-07 17:24:58] [Rank 0] step:4201/10000 train_time:285412ms step_avg:67.94ms +[2025-07-07 17:24:59] [Rank 0] step:4221/10000 train_time:286783ms step_avg:67.94ms +[2025-07-07 17:24:59] [Rank 0] step:4221/10000 train_time:286783ms step_avg:67.94ms +[2025-07-07 17:25:01] [Rank 0] step:4241/10000 train_time:288154ms step_avg:67.94ms +[2025-07-07 17:25:01] [Rank 0] step:4241/10000 train_time:288154ms step_avg:67.94ms +[2025-07-07 17:25:02] [Rank 0] step:4261/10000 train_time:289524ms step_avg:67.95ms +[2025-07-07 17:25:02] [Rank 0] step:4261/10000 train_time:289524ms step_avg:67.95ms +[2025-07-07 17:25:04] [Rank 0] step:4281/10000 train_time:290894ms step_avg:67.95ms +[2025-07-07 17:25:04] [Rank 0] step:4281/10000 train_time:290894ms step_avg:67.95ms +[2025-07-07 17:25:05] [Rank 0] step:4301/10000 train_time:292264ms step_avg:67.95ms +[2025-07-07 17:25:05] [Rank 0] step:4301/10000 train_time:292264ms step_avg:67.95ms +[2025-07-07 17:25:06] [Rank 0] step:4321/10000 train_time:294321ms step_avg:68.11ms +[2025-07-07 17:25:06] [Rank 0] step:4321/10000 train_time:294321ms step_avg:68.11ms +[2025-07-07 17:25:08] [Rank 0] step:4341/10000 train_time:295059ms step_avg:67.97ms +[2025-07-07 17:25:08] [Rank 0] step:4341/10000 train_time:295059ms step_avg:67.97ms +[2025-07-07 17:25:09] [Rank 0] step:4361/10000 train_time:296431ms step_avg:67.97ms +[2025-07-07 17:25:09] [Rank 0] step:4361/10000 train_time:296431ms step_avg:67.97ms +[2025-07-07 17:25:10] [Rank 0] step:4381/10000 train_time:297802ms step_avg:67.98ms +[2025-07-07 17:25:10] [Rank 0] step:4381/10000 train_time:297802ms step_avg:67.98ms +[2025-07-07 17:25:12] [Rank 0] step:4401/10000 train_time:299174ms step_avg:67.98ms +[2025-07-07 17:25:12] [Rank 0] step:4401/10000 train_time:299174ms step_avg:67.98ms +[2025-07-07 17:25:13] [Rank 0] step:4421/10000 train_time:300547ms step_avg:67.98ms +[2025-07-07 17:25:13] [Rank 0] step:4421/10000 train_time:300547ms step_avg:67.98ms +[2025-07-07 17:25:15] [Rank 0] step:4441/10000 train_time:301920ms step_avg:67.98ms +[2025-07-07 17:25:15] [Rank 0] step:4441/10000 train_time:301920ms step_avg:67.98ms +[2025-07-07 17:25:16] [Rank 0] step:4461/10000 train_time:303293ms step_avg:67.99ms +[2025-07-07 17:25:16] [Rank 0] step:4461/10000 train_time:303293ms step_avg:67.99ms +[2025-07-07 17:25:17] [Rank 0] step:4481/10000 train_time:304665ms step_avg:67.99ms +[2025-07-07 17:25:17] [Rank 0] step:4481/10000 train_time:304665ms step_avg:67.99ms +[2025-07-07 17:25:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:25:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:25:20] [Rank 0] PRINT: step:4500/10000 train_loss:1.1417 val_loss:1.1427 train_time:306663ms step_avg:68.15ms +[2025-07-07 17:25:20] [Rank 0] PRINT: step:4500/10000 train_loss:1.1417 val_loss:1.1427 train_time:306663ms step_avg:68.15ms +[2025-07-07 17:25:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:25:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:25:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:25:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:25:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:25:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:30:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:30:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:30:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:30:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:30:50] [Rank 0] Total Loss: 5.3383 +[2025-07-07 17:30:50] [Rank 0] Total Loss: 5.3383 +[2025-07-07 17:30:50] [Rank 0] Total FTA: 0.4021 +[2025-07-07 17:30:50] [Rank 0] Total FTA: 0.4021 +[2025-07-07 17:30:50] [Rank 0] Group 0 Loss: 5.6074 +[2025-07-07 17:30:50] [Rank 0] Group 0 Loss: 5.6074 +[2025-07-07 17:30:50] [Rank 0] Group 1 Loss: 5.3526 +[2025-07-07 17:30:50] [Rank 0] Group 1 Loss: 5.3526 +[2025-07-07 17:30:50] [Rank 0] Group 2 Loss: 5.0475 +[2025-07-07 17:30:50] [Rank 0] Group 2 Loss: 5.0475 +[2025-07-07 17:30:50] [Rank 0] Group 3 Loss: 5.2848 +[2025-07-07 17:30:50] [Rank 0] Group 3 Loss: 5.2848 +[2025-07-07 17:30:50] [Rank 0] Group 4 Loss: 5.3222 +[2025-07-07 17:30:50] [Rank 0] Group 4 Loss: 5.3222 +[2025-07-07 17:30:50] [Rank 0] Group 5 Loss: 5.2364 +[2025-07-07 17:30:50] [Rank 0] Group 5 Loss: 5.2364 +[2025-07-07 17:30:50] [Rank 0] Group 6 Loss: 5.2293 +[2025-07-07 17:30:50] [Rank 0] Group 6 Loss: 5.2293 +[2025-07-07 17:30:50] [Rank 0] Group 7 Loss: 5.3358 +[2025-07-07 17:30:50] [Rank 0] Group 7 Loss: 5.3358 +[2025-07-07 17:30:50] [Rank 0] Group 8 Loss: 5.3257 +[2025-07-07 17:30:50] [Rank 0] Group 8 Loss: 5.3257 +[2025-07-07 17:30:50] [Rank 0] Group 9 Loss: 5.3237 +[2025-07-07 17:30:50] [Rank 0] Group 9 Loss: 5.3237 +[2025-07-07 17:30:50] [Rank 0] Group 10 Loss: 5.3322 +[2025-07-07 17:30:50] [Rank 0] Group 10 Loss: 5.3322 +[2025-07-07 17:30:50] [Rank 0] Group 11 Loss: 5.3572 +[2025-07-07 17:30:50] [Rank 0] Group 11 Loss: 5.3572 +[2025-07-07 17:30:50] [Rank 0] Group 0 FTA: 0.4993 +[2025-07-07 17:30:50] [Rank 0] Group 0 FTA: 0.4993 +[2025-07-07 17:30:50] [Rank 0] Group 1 FTA: 0.4922 +[2025-07-07 17:30:50] [Rank 0] Group 1 FTA: 0.4922 +[2025-07-07 17:30:50] [Rank 0] Group 2 FTA: 0.3307 +[2025-07-07 17:30:50] [Rank 0] Group 2 FTA: 0.3307 +[2025-07-07 17:30:50] [Rank 0] Group 3 FTA: 0.3047 +[2025-07-07 17:30:50] [Rank 0] Group 3 FTA: 0.3047 +[2025-07-07 17:30:50] [Rank 0] Group 4 FTA: 0.3203 +[2025-07-07 17:30:50] [Rank 0] Group 4 FTA: 0.3203 +[2025-07-07 17:30:50] [Rank 0] Group 5 FTA: 0.4323 +[2025-07-07 17:30:50] [Rank 0] Group 5 FTA: 0.4323 +[2025-07-07 17:30:50] [Rank 0] Group 6 FTA: 0.3750 +[2025-07-07 17:30:50] [Rank 0] Group 6 FTA: 0.3750 +[2025-07-07 17:30:50] [Rank 0] Group 7 FTA: 0.3958 +[2025-07-07 17:30:50] [Rank 0] Group 7 FTA: 0.3958 +[2025-07-07 17:30:50] [Rank 0] Group 8 FTA: 0.3958 +[2025-07-07 17:30:50] [Rank 0] Group 8 FTA: 0.3958 +[2025-07-07 17:30:50] [Rank 0] Group 9 FTA: 0.3633 +[2025-07-07 17:30:50] [Rank 0] Group 9 FTA: 0.3633 +[2025-07-07 17:30:50] [Rank 0] Group 10 FTA: 0.3906 +[2025-07-07 17:30:50] [Rank 0] Group 10 FTA: 0.3906 +[2025-07-07 17:30:50] [Rank 0] Group 11 FTA: 0.4082 +[2025-07-07 17:30:50] [Rank 0] Group 11 FTA: 0.4082 +[2025-07-07 17:30:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 17:30:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 17:30:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 17:30:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 17:30:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 17:30:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 17:30:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 17:30:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 17:30:52] [Rank 0] step:4501/10000 train_time:306681ms step_avg:68.14ms +[2025-07-07 17:30:52] [Rank 0] step:4501/10000 train_time:306681ms step_avg:68.14ms +[2025-07-07 17:30:53] [Rank 0] step:4521/10000 train_time:308150ms step_avg:68.16ms +[2025-07-07 17:30:53] [Rank 0] step:4521/10000 train_time:308150ms step_avg:68.16ms +[2025-07-07 17:30:55] [Rank 0] step:4541/10000 train_time:309515ms step_avg:68.16ms +[2025-07-07 17:30:55] [Rank 0] step:4541/10000 train_time:309515ms step_avg:68.16ms +[2025-07-07 17:30:56] [Rank 0] step:4561/10000 train_time:310882ms step_avg:68.16ms +[2025-07-07 17:30:56] [Rank 0] step:4561/10000 train_time:310882ms step_avg:68.16ms +[2025-07-07 17:30:58] [Rank 0] step:4581/10000 train_time:312249ms step_avg:68.16ms +[2025-07-07 17:30:58] [Rank 0] step:4581/10000 train_time:312249ms step_avg:68.16ms +[2025-07-07 17:30:59] [Rank 0] step:4601/10000 train_time:313616ms step_avg:68.16ms +[2025-07-07 17:30:59] [Rank 0] step:4601/10000 train_time:313616ms step_avg:68.16ms +[2025-07-07 17:31:00] [Rank 0] step:4621/10000 train_time:314983ms step_avg:68.16ms +[2025-07-07 17:31:00] [Rank 0] step:4621/10000 train_time:314983ms step_avg:68.16ms +[2025-07-07 17:31:02] [Rank 0] step:4641/10000 train_time:316350ms step_avg:68.16ms +[2025-07-07 17:31:02] [Rank 0] step:4641/10000 train_time:316350ms step_avg:68.16ms +[2025-07-07 17:31:03] [Rank 0] step:4661/10000 train_time:317719ms step_avg:68.17ms +[2025-07-07 17:31:03] [Rank 0] step:4661/10000 train_time:317719ms step_avg:68.17ms +[2025-07-07 17:31:04] [Rank 0] step:4681/10000 train_time:319139ms step_avg:68.18ms +[2025-07-07 17:31:04] [Rank 0] step:4681/10000 train_time:319139ms step_avg:68.18ms +[2025-07-07 17:31:06] [Rank 0] step:4701/10000 train_time:320505ms step_avg:68.18ms +[2025-07-07 17:31:06] [Rank 0] step:4701/10000 train_time:320505ms step_avg:68.18ms +[2025-07-07 17:31:07] [Rank 0] step:4721/10000 train_time:321873ms step_avg:68.18ms +[2025-07-07 17:31:07] [Rank 0] step:4721/10000 train_time:321873ms step_avg:68.18ms +[2025-07-07 17:31:09] [Rank 0] step:4741/10000 train_time:323243ms step_avg:68.18ms +[2025-07-07 17:31:09] [Rank 0] step:4741/10000 train_time:323243ms step_avg:68.18ms +[2025-07-07 17:31:10] [Rank 0] step:4761/10000 train_time:324613ms step_avg:68.18ms +[2025-07-07 17:31:10] [Rank 0] step:4761/10000 train_time:324613ms step_avg:68.18ms +[2025-07-07 17:31:11] [Rank 0] step:4781/10000 train_time:325982ms step_avg:68.18ms +[2025-07-07 17:31:11] [Rank 0] step:4781/10000 train_time:325982ms step_avg:68.18ms +[2025-07-07 17:31:13] [Rank 0] step:4801/10000 train_time:327352ms step_avg:68.18ms +[2025-07-07 17:31:13] [Rank 0] step:4801/10000 train_time:327352ms step_avg:68.18ms +[2025-07-07 17:31:14] [Rank 0] step:4821/10000 train_time:328723ms step_avg:68.19ms +[2025-07-07 17:31:14] [Rank 0] step:4821/10000 train_time:328723ms step_avg:68.19ms +[2025-07-07 17:31:15] [Rank 0] step:4841/10000 train_time:330095ms step_avg:68.19ms +[2025-07-07 17:31:15] [Rank 0] step:4841/10000 train_time:330095ms step_avg:68.19ms +[2025-07-07 17:31:17] [Rank 0] step:4861/10000 train_time:331719ms step_avg:68.24ms +[2025-07-07 17:31:17] [Rank 0] step:4861/10000 train_time:331719ms step_avg:68.24ms +[2025-07-07 17:31:18] [Rank 0] step:4881/10000 train_time:332890ms step_avg:68.20ms +[2025-07-07 17:31:18] [Rank 0] step:4881/10000 train_time:332890ms step_avg:68.20ms +[2025-07-07 17:31:20] [Rank 0] step:4901/10000 train_time:334262ms step_avg:68.20ms +[2025-07-07 17:31:20] [Rank 0] step:4901/10000 train_time:334262ms step_avg:68.20ms +[2025-07-07 17:31:21] [Rank 0] step:4921/10000 train_time:335635ms step_avg:68.20ms +[2025-07-07 17:31:21] [Rank 0] step:4921/10000 train_time:335635ms step_avg:68.20ms +[2025-07-07 17:31:22] [Rank 0] step:4941/10000 train_time:337008ms step_avg:68.21ms +[2025-07-07 17:31:22] [Rank 0] step:4941/10000 train_time:337008ms step_avg:68.21ms +[2025-07-07 17:31:24] [Rank 0] step:4961/10000 train_time:338382ms step_avg:68.21ms +[2025-07-07 17:31:24] [Rank 0] step:4961/10000 train_time:338382ms step_avg:68.21ms +[2025-07-07 17:31:25] [Rank 0] step:4981/10000 train_time:339755ms step_avg:68.21ms +[2025-07-07 17:31:25] [Rank 0] step:4981/10000 train_time:339755ms step_avg:68.21ms +[2025-07-07 17:31:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:31:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:31:27] [Rank 0] PRINT: step:5000/10000 train_loss:1.1033 val_loss:1.1042 train_time:341759ms step_avg:68.35ms +[2025-07-07 17:31:27] [Rank 0] PRINT: step:5000/10000 train_loss:1.1033 val_loss:1.1042 train_time:341759ms step_avg:68.35ms +[2025-07-07 17:31:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:31:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:31:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:31:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:31:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:31:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:36:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:36:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:36:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:36:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:36:58] [Rank 0] Total Loss: 5.1365 +[2025-07-07 17:36:58] [Rank 0] Total Loss: 5.1365 +[2025-07-07 17:36:58] [Rank 0] Total FTA: 0.4490 +[2025-07-07 17:36:58] [Rank 0] Total FTA: 0.4490 +[2025-07-07 17:36:58] [Rank 0] Group 0 Loss: 5.2517 +[2025-07-07 17:36:58] [Rank 0] Group 0 Loss: 5.2517 +[2025-07-07 17:36:58] [Rank 0] Group 1 Loss: 4.9470 +[2025-07-07 17:36:58] [Rank 0] Group 1 Loss: 4.9470 +[2025-07-07 17:36:58] [Rank 0] Group 2 Loss: 4.8584 +[2025-07-07 17:36:58] [Rank 0] Group 2 Loss: 4.8584 +[2025-07-07 17:36:58] [Rank 0] Group 3 Loss: 5.1068 +[2025-07-07 17:36:58] [Rank 0] Group 3 Loss: 5.1068 +[2025-07-07 17:36:58] [Rank 0] Group 4 Loss: 5.1623 +[2025-07-07 17:36:58] [Rank 0] Group 4 Loss: 5.1623 +[2025-07-07 17:36:58] [Rank 0] Group 5 Loss: 5.1363 +[2025-07-07 17:36:58] [Rank 0] Group 5 Loss: 5.1363 +[2025-07-07 17:36:58] [Rank 0] Group 6 Loss: 5.1085 +[2025-07-07 17:36:58] [Rank 0] Group 6 Loss: 5.1085 +[2025-07-07 17:36:58] [Rank 0] Group 7 Loss: 5.1254 +[2025-07-07 17:36:58] [Rank 0] Group 7 Loss: 5.1254 +[2025-07-07 17:36:58] [Rank 0] Group 8 Loss: 5.1899 +[2025-07-07 17:36:58] [Rank 0] Group 8 Loss: 5.1899 +[2025-07-07 17:36:58] [Rank 0] Group 9 Loss: 5.2234 +[2025-07-07 17:36:58] [Rank 0] Group 9 Loss: 5.2234 +[2025-07-07 17:36:58] [Rank 0] Group 10 Loss: 5.1703 +[2025-07-07 17:36:58] [Rank 0] Group 10 Loss: 5.1703 +[2025-07-07 17:36:58] [Rank 0] Group 11 Loss: 5.1829 +[2025-07-07 17:36:58] [Rank 0] Group 11 Loss: 5.1829 +[2025-07-07 17:36:58] [Rank 0] Group 0 FTA: 0.3290 +[2025-07-07 17:36:58] [Rank 0] Group 0 FTA: 0.3290 +[2025-07-07 17:36:58] [Rank 0] Group 1 FTA: 0.4896 +[2025-07-07 17:36:58] [Rank 0] Group 1 FTA: 0.4896 +[2025-07-07 17:36:58] [Rank 0] Group 2 FTA: 0.5391 +[2025-07-07 17:36:58] [Rank 0] Group 2 FTA: 0.5391 +[2025-07-07 17:36:58] [Rank 0] Group 3 FTA: 0.3906 +[2025-07-07 17:36:58] [Rank 0] Group 3 FTA: 0.3906 +[2025-07-07 17:36:58] [Rank 0] Group 4 FTA: 0.4844 +[2025-07-07 17:36:58] [Rank 0] Group 4 FTA: 0.4844 +[2025-07-07 17:36:58] [Rank 0] Group 5 FTA: 0.5391 +[2025-07-07 17:36:58] [Rank 0] Group 5 FTA: 0.5391 +[2025-07-07 17:36:58] [Rank 0] Group 6 FTA: 0.4297 +[2025-07-07 17:36:58] [Rank 0] Group 6 FTA: 0.4297 +[2025-07-07 17:36:58] [Rank 0] Group 7 FTA: 0.4219 +[2025-07-07 17:36:58] [Rank 0] Group 7 FTA: 0.4219 +[2025-07-07 17:36:58] [Rank 0] Group 8 FTA: 0.4323 +[2025-07-07 17:36:58] [Rank 0] Group 8 FTA: 0.4323 +[2025-07-07 17:36:58] [Rank 0] Group 9 FTA: 0.4570 +[2025-07-07 17:36:58] [Rank 0] Group 9 FTA: 0.4570 +[2025-07-07 17:36:58] [Rank 0] Group 10 FTA: 0.4668 +[2025-07-07 17:36:58] [Rank 0] Group 10 FTA: 0.4668 +[2025-07-07 17:36:58] [Rank 0] Group 11 FTA: 0.4775 +[2025-07-07 17:36:58] [Rank 0] Group 11 FTA: 0.4775 +[2025-07-07 17:36:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 17:36:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 17:36:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 17:36:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 17:36:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 17:36:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 17:36:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 17:36:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 17:36:59] [Rank 0] step:5001/10000 train_time:341770ms step_avg:68.34ms +[2025-07-07 17:36:59] [Rank 0] step:5001/10000 train_time:341770ms step_avg:68.34ms +[2025-07-07 17:37:01] [Rank 0] step:5021/10000 train_time:342526ms step_avg:68.22ms +[2025-07-07 17:37:01] [Rank 0] step:5021/10000 train_time:342526ms step_avg:68.22ms +[2025-07-07 17:37:02] [Rank 0] step:5041/10000 train_time:344149ms step_avg:68.27ms +[2025-07-07 17:37:02] [Rank 0] step:5041/10000 train_time:344149ms step_avg:68.27ms +[2025-07-07 17:37:03] [Rank 0] step:5061/10000 train_time:345316ms step_avg:68.23ms +[2025-07-07 17:37:03] [Rank 0] step:5061/10000 train_time:345316ms step_avg:68.23ms +[2025-07-07 17:37:05] [Rank 0] step:5081/10000 train_time:346683ms step_avg:68.23ms +[2025-07-07 17:37:05] [Rank 0] step:5081/10000 train_time:346683ms step_avg:68.23ms +[2025-07-07 17:37:06] [Rank 0] step:5101/10000 train_time:348050ms step_avg:68.23ms +[2025-07-07 17:37:06] [Rank 0] step:5101/10000 train_time:348050ms step_avg:68.23ms +[2025-07-07 17:37:07] [Rank 0] step:5121/10000 train_time:349417ms step_avg:68.23ms +[2025-07-07 17:37:07] [Rank 0] step:5121/10000 train_time:349417ms step_avg:68.23ms +[2025-07-07 17:37:09] [Rank 0] step:5141/10000 train_time:350784ms step_avg:68.23ms +[2025-07-07 17:37:09] [Rank 0] step:5141/10000 train_time:350784ms step_avg:68.23ms +[2025-07-07 17:37:10] [Rank 0] step:5161/10000 train_time:352151ms step_avg:68.23ms +[2025-07-07 17:37:10] [Rank 0] step:5161/10000 train_time:352151ms step_avg:68.23ms +[2025-07-07 17:37:12] [Rank 0] step:5181/10000 train_time:353520ms step_avg:68.23ms +[2025-07-07 17:37:12] [Rank 0] step:5181/10000 train_time:353520ms step_avg:68.23ms +[2025-07-07 17:37:13] [Rank 0] step:5201/10000 train_time:354891ms step_avg:68.24ms +[2025-07-07 17:37:13] [Rank 0] step:5201/10000 train_time:354891ms step_avg:68.24ms +[2025-07-07 17:37:14] [Rank 0] step:5221/10000 train_time:356512ms step_avg:68.28ms +[2025-07-07 17:37:14] [Rank 0] step:5221/10000 train_time:356512ms step_avg:68.28ms +[2025-07-07 17:37:16] [Rank 0] step:5241/10000 train_time:357671ms step_avg:68.24ms +[2025-07-07 17:37:16] [Rank 0] step:5241/10000 train_time:357671ms step_avg:68.24ms +[2025-07-07 17:37:17] [Rank 0] step:5261/10000 train_time:359041ms step_avg:68.25ms +[2025-07-07 17:37:17] [Rank 0] step:5261/10000 train_time:359041ms step_avg:68.25ms +[2025-07-07 17:37:18] [Rank 0] step:5281/10000 train_time:360412ms step_avg:68.25ms +[2025-07-07 17:37:18] [Rank 0] step:5281/10000 train_time:360412ms step_avg:68.25ms +[2025-07-07 17:37:20] [Rank 0] step:5301/10000 train_time:361783ms step_avg:68.25ms +[2025-07-07 17:37:20] [Rank 0] step:5301/10000 train_time:361783ms step_avg:68.25ms +[2025-07-07 17:37:21] [Rank 0] step:5321/10000 train_time:363154ms step_avg:68.25ms +[2025-07-07 17:37:21] [Rank 0] step:5321/10000 train_time:363154ms step_avg:68.25ms +[2025-07-07 17:37:23] [Rank 0] step:5341/10000 train_time:364525ms step_avg:68.25ms +[2025-07-07 17:37:23] [Rank 0] step:5341/10000 train_time:364525ms step_avg:68.25ms +[2025-07-07 17:37:24] [Rank 0] step:5361/10000 train_time:365896ms step_avg:68.25ms +[2025-07-07 17:37:24] [Rank 0] step:5361/10000 train_time:365896ms step_avg:68.25ms +[2025-07-07 17:37:25] [Rank 0] step:5381/10000 train_time:367267ms step_avg:68.25ms +[2025-07-07 17:37:25] [Rank 0] step:5381/10000 train_time:367267ms step_avg:68.25ms +[2025-07-07 17:37:27] [Rank 0] step:5401/10000 train_time:368889ms step_avg:68.30ms +[2025-07-07 17:37:27] [Rank 0] step:5401/10000 train_time:368889ms step_avg:68.30ms +[2025-07-07 17:37:28] [Rank 0] step:5421/10000 train_time:370055ms step_avg:68.26ms +[2025-07-07 17:37:28] [Rank 0] step:5421/10000 train_time:370055ms step_avg:68.26ms +[2025-07-07 17:37:29] [Rank 0] step:5441/10000 train_time:371429ms step_avg:68.26ms +[2025-07-07 17:37:29] [Rank 0] step:5441/10000 train_time:371429ms step_avg:68.26ms +[2025-07-07 17:37:31] [Rank 0] step:5461/10000 train_time:372800ms step_avg:68.27ms +[2025-07-07 17:37:31] [Rank 0] step:5461/10000 train_time:372800ms step_avg:68.27ms +[2025-07-07 17:37:32] [Rank 0] step:5481/10000 train_time:374174ms step_avg:68.27ms +[2025-07-07 17:37:32] [Rank 0] step:5481/10000 train_time:374174ms step_avg:68.27ms +[2025-07-07 17:37:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:37:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:37:34] [Rank 0] PRINT: step:5500/10000 train_loss:1.0632 val_loss:1.0770 train_time:376170ms step_avg:68.39ms +[2025-07-07 17:37:34] [Rank 0] PRINT: step:5500/10000 train_loss:1.0632 val_loss:1.0770 train_time:376170ms step_avg:68.39ms +[2025-07-07 17:37:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:37:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:37:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:37:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:37:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:37:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:43:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:43:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:43:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:43:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:43:04] [Rank 0] Total Loss: 5.3431 +[2025-07-07 17:43:04] [Rank 0] Total Loss: 5.3431 +[2025-07-07 17:43:04] [Rank 0] Total FTA: 0.5700 +[2025-07-07 17:43:04] [Rank 0] Total FTA: 0.5700 +[2025-07-07 17:43:04] [Rank 0] Group 0 Loss: 5.5041 +[2025-07-07 17:43:04] [Rank 0] Group 0 Loss: 5.5041 +[2025-07-07 17:43:04] [Rank 0] Group 1 Loss: 5.1913 +[2025-07-07 17:43:04] [Rank 0] Group 1 Loss: 5.1913 +[2025-07-07 17:43:04] [Rank 0] Group 2 Loss: 5.0969 +[2025-07-07 17:43:04] [Rank 0] Group 2 Loss: 5.0969 +[2025-07-07 17:43:04] [Rank 0] Group 3 Loss: 5.3365 +[2025-07-07 17:43:04] [Rank 0] Group 3 Loss: 5.3365 +[2025-07-07 17:43:04] [Rank 0] Group 4 Loss: 5.3965 +[2025-07-07 17:43:04] [Rank 0] Group 4 Loss: 5.3965 +[2025-07-07 17:43:04] [Rank 0] Group 5 Loss: 5.2655 +[2025-07-07 17:43:04] [Rank 0] Group 5 Loss: 5.2655 +[2025-07-07 17:43:04] [Rank 0] Group 6 Loss: 5.2263 +[2025-07-07 17:43:04] [Rank 0] Group 6 Loss: 5.2263 +[2025-07-07 17:43:04] [Rank 0] Group 7 Loss: 5.3906 +[2025-07-07 17:43:04] [Rank 0] Group 7 Loss: 5.3906 +[2025-07-07 17:43:04] [Rank 0] Group 8 Loss: 5.3747 +[2025-07-07 17:43:04] [Rank 0] Group 8 Loss: 5.3747 +[2025-07-07 17:43:04] [Rank 0] Group 9 Loss: 5.3408 +[2025-07-07 17:43:04] [Rank 0] Group 9 Loss: 5.3408 +[2025-07-07 17:43:04] [Rank 0] Group 10 Loss: 5.3819 +[2025-07-07 17:43:04] [Rank 0] Group 10 Loss: 5.3819 +[2025-07-07 17:43:04] [Rank 0] Group 11 Loss: 5.3783 +[2025-07-07 17:43:04] [Rank 0] Group 11 Loss: 5.3783 +[2025-07-07 17:43:04] [Rank 0] Group 0 FTA: 0.6931 +[2025-07-07 17:43:04] [Rank 0] Group 0 FTA: 0.6931 +[2025-07-07 17:43:04] [Rank 0] Group 1 FTA: 0.4766 +[2025-07-07 17:43:04] [Rank 0] Group 1 FTA: 0.4766 +[2025-07-07 17:43:04] [Rank 0] Group 2 FTA: 0.7500 +[2025-07-07 17:43:04] [Rank 0] Group 2 FTA: 0.7500 +[2025-07-07 17:43:04] [Rank 0] Group 3 FTA: 0.4167 +[2025-07-07 17:43:04] [Rank 0] Group 3 FTA: 0.4167 +[2025-07-07 17:43:04] [Rank 0] Group 4 FTA: 0.6016 +[2025-07-07 17:43:04] [Rank 0] Group 4 FTA: 0.6016 +[2025-07-07 17:43:04] [Rank 0] Group 5 FTA: 0.5964 +[2025-07-07 17:43:04] [Rank 0] Group 5 FTA: 0.5964 +[2025-07-07 17:43:04] [Rank 0] Group 6 FTA: 0.4740 +[2025-07-07 17:43:04] [Rank 0] Group 6 FTA: 0.4740 +[2025-07-07 17:43:04] [Rank 0] Group 7 FTA: 0.5443 +[2025-07-07 17:43:04] [Rank 0] Group 7 FTA: 0.5443 +[2025-07-07 17:43:04] [Rank 0] Group 8 FTA: 0.5781 +[2025-07-07 17:43:04] [Rank 0] Group 8 FTA: 0.5781 +[2025-07-07 17:43:04] [Rank 0] Group 9 FTA: 0.5742 +[2025-07-07 17:43:04] [Rank 0] Group 9 FTA: 0.5742 +[2025-07-07 17:43:04] [Rank 0] Group 10 FTA: 0.5273 +[2025-07-07 17:43:04] [Rank 0] Group 10 FTA: 0.5273 +[2025-07-07 17:43:04] [Rank 0] Group 11 FTA: 0.5439 +[2025-07-07 17:43:04] [Rank 0] Group 11 FTA: 0.5439 +[2025-07-07 17:43:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 17:43:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 17:43:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 17:43:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 17:43:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 17:43:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 17:43:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 17:43:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 17:43:06] [Rank 0] step:5501/10000 train_time:376181ms step_avg:68.38ms +[2025-07-07 17:43:06] [Rank 0] step:5501/10000 train_time:376181ms step_avg:68.38ms +[2025-07-07 17:43:07] [Rank 0] step:5521/10000 train_time:376958ms step_avg:68.28ms +[2025-07-07 17:43:07] [Rank 0] step:5521/10000 train_time:376958ms step_avg:68.28ms +[2025-07-07 17:43:09] [Rank 0] step:5541/10000 train_time:378324ms step_avg:68.28ms +[2025-07-07 17:43:09] [Rank 0] step:5541/10000 train_time:378324ms step_avg:68.28ms +[2025-07-07 17:43:10] [Rank 0] step:5561/10000 train_time:379690ms step_avg:68.28ms +[2025-07-07 17:43:10] [Rank 0] step:5561/10000 train_time:379690ms step_avg:68.28ms +[2025-07-07 17:43:11] [Rank 0] step:5581/10000 train_time:381107ms step_avg:68.29ms +[2025-07-07 17:43:11] [Rank 0] step:5581/10000 train_time:381107ms step_avg:68.29ms +[2025-07-07 17:43:13] [Rank 0] step:5601/10000 train_time:382459ms step_avg:68.28ms +[2025-07-07 17:43:13] [Rank 0] step:5601/10000 train_time:382459ms step_avg:68.28ms +[2025-07-07 17:43:14] [Rank 0] step:5621/10000 train_time:383825ms step_avg:68.28ms +[2025-07-07 17:43:14] [Rank 0] step:5621/10000 train_time:383825ms step_avg:68.28ms +[2025-07-07 17:43:15] [Rank 0] step:5641/10000 train_time:385192ms step_avg:68.28ms +[2025-07-07 17:43:15] [Rank 0] step:5641/10000 train_time:385192ms step_avg:68.28ms +[2025-07-07 17:43:17] [Rank 0] step:5661/10000 train_time:386560ms step_avg:68.28ms +[2025-07-07 17:43:17] [Rank 0] step:5661/10000 train_time:386560ms step_avg:68.28ms +[2025-07-07 17:43:18] [Rank 0] step:5681/10000 train_time:387929ms step_avg:68.29ms +[2025-07-07 17:43:18] [Rank 0] step:5681/10000 train_time:387929ms step_avg:68.29ms +[2025-07-07 17:43:20] [Rank 0] step:5701/10000 train_time:389298ms step_avg:68.29ms +[2025-07-07 17:43:20] [Rank 0] step:5701/10000 train_time:389298ms step_avg:68.29ms +[2025-07-07 17:43:21] [Rank 0] step:5721/10000 train_time:390666ms step_avg:68.29ms +[2025-07-07 17:43:21] [Rank 0] step:5721/10000 train_time:390666ms step_avg:68.29ms +[2025-07-07 17:43:22] [Rank 0] step:5741/10000 train_time:392037ms step_avg:68.29ms +[2025-07-07 17:43:22] [Rank 0] step:5741/10000 train_time:392037ms step_avg:68.29ms +[2025-07-07 17:43:24] [Rank 0] step:5761/10000 train_time:393659ms step_avg:68.33ms +[2025-07-07 17:43:24] [Rank 0] step:5761/10000 train_time:393659ms step_avg:68.33ms +[2025-07-07 17:43:25] [Rank 0] step:5781/10000 train_time:394812ms step_avg:68.29ms +[2025-07-07 17:43:25] [Rank 0] step:5781/10000 train_time:394812ms step_avg:68.29ms +[2025-07-07 17:43:26] [Rank 0] step:5801/10000 train_time:396183ms step_avg:68.30ms +[2025-07-07 17:43:26] [Rank 0] step:5801/10000 train_time:396183ms step_avg:68.30ms +[2025-07-07 17:43:28] [Rank 0] step:5821/10000 train_time:397641ms step_avg:68.31ms +[2025-07-07 17:43:28] [Rank 0] step:5821/10000 train_time:397641ms step_avg:68.31ms +[2025-07-07 17:43:29] [Rank 0] step:5841/10000 train_time:399013ms step_avg:68.31ms +[2025-07-07 17:43:29] [Rank 0] step:5841/10000 train_time:399013ms step_avg:68.31ms +[2025-07-07 17:43:31] [Rank 0] step:5861/10000 train_time:400385ms step_avg:68.31ms +[2025-07-07 17:43:31] [Rank 0] step:5861/10000 train_time:400385ms step_avg:68.31ms +[2025-07-07 17:43:32] [Rank 0] step:5881/10000 train_time:401758ms step_avg:68.31ms +[2025-07-07 17:43:32] [Rank 0] step:5881/10000 train_time:401758ms step_avg:68.31ms +[2025-07-07 17:43:33] [Rank 0] step:5901/10000 train_time:403131ms step_avg:68.32ms +[2025-07-07 17:43:33] [Rank 0] step:5901/10000 train_time:403131ms step_avg:68.32ms +[2025-07-07 17:43:35] [Rank 0] step:5921/10000 train_time:404504ms step_avg:68.32ms +[2025-07-07 17:43:35] [Rank 0] step:5921/10000 train_time:404504ms step_avg:68.32ms +[2025-07-07 17:43:36] [Rank 0] step:5941/10000 train_time:405927ms step_avg:68.33ms +[2025-07-07 17:43:36] [Rank 0] step:5941/10000 train_time:405927ms step_avg:68.33ms +[2025-07-07 17:43:38] [Rank 0] step:5961/10000 train_time:407293ms step_avg:68.33ms +[2025-07-07 17:43:38] [Rank 0] step:5961/10000 train_time:407293ms step_avg:68.33ms +[2025-07-07 17:43:39] [Rank 0] step:5981/10000 train_time:408665ms step_avg:68.33ms +[2025-07-07 17:43:39] [Rank 0] step:5981/10000 train_time:408665ms step_avg:68.33ms +[2025-07-07 17:43:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:43:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:43:41] [Rank 0] PRINT: step:6000/10000 train_loss:1.0208 val_loss:1.0507 train_time:410663ms step_avg:68.44ms +[2025-07-07 17:43:41] [Rank 0] PRINT: step:6000/10000 train_loss:1.0208 val_loss:1.0507 train_time:410663ms step_avg:68.44ms +[2025-07-07 17:43:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:43:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:43:41] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:43:41] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:43:41] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:43:41] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:49:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:49:12] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:49:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:49:12] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:49:12] [Rank 0] Total Loss: 5.2899 +[2025-07-07 17:49:12] [Rank 0] Total Loss: 5.2899 +[2025-07-07 17:49:12] [Rank 0] Total FTA: 0.6384 +[2025-07-07 17:49:12] [Rank 0] Total FTA: 0.6384 +[2025-07-07 17:49:12] [Rank 0] Group 0 Loss: 5.3624 +[2025-07-07 17:49:12] [Rank 0] Group 0 Loss: 5.3624 +[2025-07-07 17:49:12] [Rank 0] Group 1 Loss: 5.0958 +[2025-07-07 17:49:12] [Rank 0] Group 1 Loss: 5.0958 +[2025-07-07 17:49:12] [Rank 0] Group 2 Loss: 5.0136 +[2025-07-07 17:49:12] [Rank 0] Group 2 Loss: 5.0136 +[2025-07-07 17:49:12] [Rank 0] Group 3 Loss: 5.2502 +[2025-07-07 17:49:12] [Rank 0] Group 3 Loss: 5.2502 +[2025-07-07 17:49:12] [Rank 0] Group 4 Loss: 5.3210 +[2025-07-07 17:49:12] [Rank 0] Group 4 Loss: 5.3210 +[2025-07-07 17:49:12] [Rank 0] Group 5 Loss: 5.2735 +[2025-07-07 17:49:12] [Rank 0] Group 5 Loss: 5.2735 +[2025-07-07 17:49:12] [Rank 0] Group 6 Loss: 5.2645 +[2025-07-07 17:49:12] [Rank 0] Group 6 Loss: 5.2645 +[2025-07-07 17:49:12] [Rank 0] Group 7 Loss: 5.3613 +[2025-07-07 17:49:12] [Rank 0] Group 7 Loss: 5.3613 +[2025-07-07 17:49:12] [Rank 0] Group 8 Loss: 5.3044 +[2025-07-07 17:49:12] [Rank 0] Group 8 Loss: 5.3044 +[2025-07-07 17:49:12] [Rank 0] Group 9 Loss: 5.3362 +[2025-07-07 17:49:12] [Rank 0] Group 9 Loss: 5.3362 +[2025-07-07 17:49:12] [Rank 0] Group 10 Loss: 5.3398 +[2025-07-07 17:49:12] [Rank 0] Group 10 Loss: 5.3398 +[2025-07-07 17:49:12] [Rank 0] Group 11 Loss: 5.3618 +[2025-07-07 17:49:12] [Rank 0] Group 11 Loss: 5.3618 +[2025-07-07 17:49:12] [Rank 0] Group 0 FTA: 0.6853 +[2025-07-07 17:49:12] [Rank 0] Group 0 FTA: 0.6853 +[2025-07-07 17:49:12] [Rank 0] Group 1 FTA: 0.5417 +[2025-07-07 17:49:12] [Rank 0] Group 1 FTA: 0.5417 +[2025-07-07 17:49:12] [Rank 0] Group 2 FTA: 0.7760 +[2025-07-07 17:49:12] [Rank 0] Group 2 FTA: 0.7760 +[2025-07-07 17:49:12] [Rank 0] Group 3 FTA: 0.6094 +[2025-07-07 17:49:12] [Rank 0] Group 3 FTA: 0.6094 +[2025-07-07 17:49:12] [Rank 0] Group 4 FTA: 0.6615 +[2025-07-07 17:49:12] [Rank 0] Group 4 FTA: 0.6615 +[2025-07-07 17:49:12] [Rank 0] Group 5 FTA: 0.6927 +[2025-07-07 17:49:12] [Rank 0] Group 5 FTA: 0.6927 +[2025-07-07 17:49:12] [Rank 0] Group 6 FTA: 0.5156 +[2025-07-07 17:49:12] [Rank 0] Group 6 FTA: 0.5156 +[2025-07-07 17:49:12] [Rank 0] Group 7 FTA: 0.6562 +[2025-07-07 17:49:12] [Rank 0] Group 7 FTA: 0.6562 +[2025-07-07 17:49:12] [Rank 0] Group 8 FTA: 0.5573 +[2025-07-07 17:49:12] [Rank 0] Group 8 FTA: 0.5573 +[2025-07-07 17:49:12] [Rank 0] Group 9 FTA: 0.6836 +[2025-07-07 17:49:12] [Rank 0] Group 9 FTA: 0.6836 +[2025-07-07 17:49:12] [Rank 0] Group 10 FTA: 0.6602 +[2025-07-07 17:49:12] [Rank 0] Group 10 FTA: 0.6602 +[2025-07-07 17:49:12] [Rank 0] Group 11 FTA: 0.6172 +[2025-07-07 17:49:12] [Rank 0] Group 11 FTA: 0.6172 +[2025-07-07 17:49:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 17:49:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 17:49:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 17:49:13] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 17:49:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 17:49:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 17:49:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 17:49:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 17:49:13] [Rank 0] step:6001/10000 train_time:410674ms step_avg:68.43ms +[2025-07-07 17:49:13] [Rank 0] step:6001/10000 train_time:410674ms step_avg:68.43ms +[2025-07-07 17:49:15] [Rank 0] step:6021/10000 train_time:411427ms step_avg:68.33ms +[2025-07-07 17:49:15] [Rank 0] step:6021/10000 train_time:411427ms step_avg:68.33ms +[2025-07-07 17:49:16] [Rank 0] step:6041/10000 train_time:412791ms step_avg:68.33ms +[2025-07-07 17:49:16] [Rank 0] step:6041/10000 train_time:412791ms step_avg:68.33ms +[2025-07-07 17:49:18] [Rank 0] step:6061/10000 train_time:414155ms step_avg:68.33ms +[2025-07-07 17:49:18] [Rank 0] step:6061/10000 train_time:414155ms step_avg:68.33ms +[2025-07-07 17:49:19] [Rank 0] step:6081/10000 train_time:415521ms step_avg:68.33ms +[2025-07-07 17:49:19] [Rank 0] step:6081/10000 train_time:415521ms step_avg:68.33ms +[2025-07-07 17:49:20] [Rank 0] step:6101/10000 train_time:416887ms step_avg:68.33ms +[2025-07-07 17:49:20] [Rank 0] step:6101/10000 train_time:416887ms step_avg:68.33ms +[2025-07-07 17:49:22] [Rank 0] step:6121/10000 train_time:418299ms step_avg:68.34ms +[2025-07-07 17:49:22] [Rank 0] step:6121/10000 train_time:418299ms step_avg:68.34ms +[2025-07-07 17:49:23] [Rank 0] step:6141/10000 train_time:419654ms step_avg:68.34ms +[2025-07-07 17:49:23] [Rank 0] step:6141/10000 train_time:419654ms step_avg:68.34ms +[2025-07-07 17:49:24] [Rank 0] step:6161/10000 train_time:421021ms step_avg:68.34ms +[2025-07-07 17:49:24] [Rank 0] step:6161/10000 train_time:421021ms step_avg:68.34ms +[2025-07-07 17:49:26] [Rank 0] step:6181/10000 train_time:422389ms step_avg:68.34ms +[2025-07-07 17:49:26] [Rank 0] step:6181/10000 train_time:422389ms step_avg:68.34ms +[2025-07-07 17:49:27] [Rank 0] step:6201/10000 train_time:423758ms step_avg:68.34ms +[2025-07-07 17:49:27] [Rank 0] step:6201/10000 train_time:423758ms step_avg:68.34ms +[2025-07-07 17:49:28] [Rank 0] step:6221/10000 train_time:425127ms step_avg:68.34ms +[2025-07-07 17:49:28] [Rank 0] step:6221/10000 train_time:425127ms step_avg:68.34ms +[2025-07-07 17:49:30] [Rank 0] step:6241/10000 train_time:426497ms step_avg:68.34ms +[2025-07-07 17:49:30] [Rank 0] step:6241/10000 train_time:426497ms step_avg:68.34ms +[2025-07-07 17:49:31] [Rank 0] step:6261/10000 train_time:427868ms step_avg:68.34ms +[2025-07-07 17:49:31] [Rank 0] step:6261/10000 train_time:427868ms step_avg:68.34ms +[2025-07-07 17:49:33] [Rank 0] step:6281/10000 train_time:429237ms step_avg:68.34ms +[2025-07-07 17:49:33] [Rank 0] step:6281/10000 train_time:429237ms step_avg:68.34ms +[2025-07-07 17:49:34] [Rank 0] step:6301/10000 train_time:431269ms step_avg:68.44ms +[2025-07-07 17:49:34] [Rank 0] step:6301/10000 train_time:431269ms step_avg:68.44ms +[2025-07-07 17:49:35] [Rank 0] step:6321/10000 train_time:432006ms step_avg:68.34ms +[2025-07-07 17:49:35] [Rank 0] step:6321/10000 train_time:432006ms step_avg:68.34ms +[2025-07-07 17:49:37] [Rank 0] step:6341/10000 train_time:433377ms step_avg:68.35ms +[2025-07-07 17:49:37] [Rank 0] step:6341/10000 train_time:433377ms step_avg:68.35ms +[2025-07-07 17:49:38] [Rank 0] step:6361/10000 train_time:434749ms step_avg:68.35ms +[2025-07-07 17:49:38] [Rank 0] step:6361/10000 train_time:434749ms step_avg:68.35ms +[2025-07-07 17:49:39] [Rank 0] step:6381/10000 train_time:436121ms step_avg:68.35ms +[2025-07-07 17:49:39] [Rank 0] step:6381/10000 train_time:436121ms step_avg:68.35ms +[2025-07-07 17:49:41] [Rank 0] step:6401/10000 train_time:437493ms step_avg:68.35ms +[2025-07-07 17:49:41] [Rank 0] step:6401/10000 train_time:437493ms step_avg:68.35ms +[2025-07-07 17:49:42] [Rank 0] step:6421/10000 train_time:438864ms step_avg:68.35ms +[2025-07-07 17:49:42] [Rank 0] step:6421/10000 train_time:438864ms step_avg:68.35ms +[2025-07-07 17:49:44] [Rank 0] step:6441/10000 train_time:440237ms step_avg:68.35ms +[2025-07-07 17:49:44] [Rank 0] step:6441/10000 train_time:440237ms step_avg:68.35ms +[2025-07-07 17:49:45] [Rank 0] step:6461/10000 train_time:441611ms step_avg:68.35ms +[2025-07-07 17:49:45] [Rank 0] step:6461/10000 train_time:441611ms step_avg:68.35ms +[2025-07-07 17:49:46] [Rank 0] step:6481/10000 train_time:442985ms step_avg:68.35ms +[2025-07-07 17:49:46] [Rank 0] step:6481/10000 train_time:442985ms step_avg:68.35ms +[2025-07-07 17:49:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:49:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:49:49] [Rank 0] PRINT: step:6500/10000 train_loss:0.9832 val_loss:1.0106 train_time:444983ms step_avg:68.46ms +[2025-07-07 17:49:49] [Rank 0] PRINT: step:6500/10000 train_loss:0.9832 val_loss:1.0106 train_time:444983ms step_avg:68.46ms +[2025-07-07 17:49:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:49:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:49:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:49:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:49:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:49:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:55:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:55:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:55:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:55:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:55:17] [Rank 0] Total Loss: 5.5942 +[2025-07-07 17:55:17] [Rank 0] Total Loss: 5.5942 +[2025-07-07 17:55:17] [Rank 0] Total FTA: 0.6920 +[2025-07-07 17:55:17] [Rank 0] Total FTA: 0.6920 +[2025-07-07 17:55:17] [Rank 0] Group 0 Loss: 5.7563 +[2025-07-07 17:55:17] [Rank 0] Group 0 Loss: 5.7563 +[2025-07-07 17:55:17] [Rank 0] Group 1 Loss: 5.4888 +[2025-07-07 17:55:17] [Rank 0] Group 1 Loss: 5.4888 +[2025-07-07 17:55:17] [Rank 0] Group 2 Loss: 5.3132 +[2025-07-07 17:55:17] [Rank 0] Group 2 Loss: 5.3132 +[2025-07-07 17:55:17] [Rank 0] Group 3 Loss: 5.6158 +[2025-07-07 17:55:17] [Rank 0] Group 3 Loss: 5.6158 +[2025-07-07 17:55:17] [Rank 0] Group 4 Loss: 5.6133 +[2025-07-07 17:55:17] [Rank 0] Group 4 Loss: 5.6133 +[2025-07-07 17:55:17] [Rank 0] Group 5 Loss: 5.5231 +[2025-07-07 17:55:17] [Rank 0] Group 5 Loss: 5.5231 +[2025-07-07 17:55:17] [Rank 0] Group 6 Loss: 5.4636 +[2025-07-07 17:55:17] [Rank 0] Group 6 Loss: 5.4636 +[2025-07-07 17:55:17] [Rank 0] Group 7 Loss: 5.6595 +[2025-07-07 17:55:17] [Rank 0] Group 7 Loss: 5.6595 +[2025-07-07 17:55:17] [Rank 0] Group 8 Loss: 5.5778 +[2025-07-07 17:55:17] [Rank 0] Group 8 Loss: 5.5778 +[2025-07-07 17:55:17] [Rank 0] Group 9 Loss: 5.6039 +[2025-07-07 17:55:17] [Rank 0] Group 9 Loss: 5.6039 +[2025-07-07 17:55:17] [Rank 0] Group 10 Loss: 5.6463 +[2025-07-07 17:55:17] [Rank 0] Group 10 Loss: 5.6463 +[2025-07-07 17:55:17] [Rank 0] Group 11 Loss: 5.6309 +[2025-07-07 17:55:17] [Rank 0] Group 11 Loss: 5.6309 +[2025-07-07 17:55:17] [Rank 0] Group 0 FTA: 0.6645 +[2025-07-07 17:55:17] [Rank 0] Group 0 FTA: 0.6645 +[2025-07-07 17:55:17] [Rank 0] Group 1 FTA: 0.8620 +[2025-07-07 17:55:17] [Rank 0] Group 1 FTA: 0.8620 +[2025-07-07 17:55:17] [Rank 0] Group 2 FTA: 0.6719 +[2025-07-07 17:55:17] [Rank 0] Group 2 FTA: 0.6719 +[2025-07-07 17:55:17] [Rank 0] Group 3 FTA: 0.6406 +[2025-07-07 17:55:17] [Rank 0] Group 3 FTA: 0.6406 +[2025-07-07 17:55:17] [Rank 0] Group 4 FTA: 0.7552 +[2025-07-07 17:55:17] [Rank 0] Group 4 FTA: 0.7552 +[2025-07-07 17:55:17] [Rank 0] Group 5 FTA: 0.7656 +[2025-07-07 17:55:17] [Rank 0] Group 5 FTA: 0.7656 +[2025-07-07 17:55:17] [Rank 0] Group 6 FTA: 0.5938 +[2025-07-07 17:55:17] [Rank 0] Group 6 FTA: 0.5938 +[2025-07-07 17:55:17] [Rank 0] Group 7 FTA: 0.6797 +[2025-07-07 17:55:17] [Rank 0] Group 7 FTA: 0.6797 +[2025-07-07 17:55:17] [Rank 0] Group 8 FTA: 0.6302 +[2025-07-07 17:55:17] [Rank 0] Group 8 FTA: 0.6302 +[2025-07-07 17:55:17] [Rank 0] Group 9 FTA: 0.6680 +[2025-07-07 17:55:17] [Rank 0] Group 9 FTA: 0.6680 +[2025-07-07 17:55:17] [Rank 0] Group 10 FTA: 0.7109 +[2025-07-07 17:55:17] [Rank 0] Group 10 FTA: 0.7109 +[2025-07-07 17:55:17] [Rank 0] Group 11 FTA: 0.6855 +[2025-07-07 17:55:17] [Rank 0] Group 11 FTA: 0.6855 +[2025-07-07 17:55:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 17:55:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 17:55:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 17:55:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 17:55:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 17:55:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 17:55:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 17:55:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 17:55:19] [Rank 0] step:6501/10000 train_time:444994ms step_avg:68.45ms +[2025-07-07 17:55:19] [Rank 0] step:6501/10000 train_time:444994ms step_avg:68.45ms +[2025-07-07 17:55:20] [Rank 0] step:6521/10000 train_time:445745ms step_avg:68.36ms +[2025-07-07 17:55:20] [Rank 0] step:6521/10000 train_time:445745ms step_avg:68.36ms +[2025-07-07 17:55:21] [Rank 0] step:6541/10000 train_time:447109ms step_avg:68.35ms +[2025-07-07 17:55:21] [Rank 0] step:6541/10000 train_time:447109ms step_avg:68.35ms +[2025-07-07 17:55:23] [Rank 0] step:6561/10000 train_time:448474ms step_avg:68.35ms +[2025-07-07 17:55:23] [Rank 0] step:6561/10000 train_time:448474ms step_avg:68.35ms +[2025-07-07 17:55:24] [Rank 0] step:6581/10000 train_time:449841ms step_avg:68.35ms +[2025-07-07 17:55:24] [Rank 0] step:6581/10000 train_time:449841ms step_avg:68.35ms +[2025-07-07 17:55:25] [Rank 0] step:6601/10000 train_time:451209ms step_avg:68.35ms +[2025-07-07 17:55:25] [Rank 0] step:6601/10000 train_time:451209ms step_avg:68.35ms +[2025-07-07 17:55:27] [Rank 0] step:6621/10000 train_time:452576ms step_avg:68.35ms +[2025-07-07 17:55:27] [Rank 0] step:6621/10000 train_time:452576ms step_avg:68.35ms +[2025-07-07 17:55:28] [Rank 0] step:6641/10000 train_time:453943ms step_avg:68.35ms +[2025-07-07 17:55:28] [Rank 0] step:6641/10000 train_time:453943ms step_avg:68.35ms +[2025-07-07 17:55:30] [Rank 0] step:6661/10000 train_time:455359ms step_avg:68.36ms +[2025-07-07 17:55:30] [Rank 0] step:6661/10000 train_time:455359ms step_avg:68.36ms +[2025-07-07 17:55:31] [Rank 0] step:6681/10000 train_time:456718ms step_avg:68.36ms +[2025-07-07 17:55:31] [Rank 0] step:6681/10000 train_time:456718ms step_avg:68.36ms +[2025-07-07 17:55:32] [Rank 0] step:6701/10000 train_time:458086ms step_avg:68.36ms +[2025-07-07 17:55:32] [Rank 0] step:6701/10000 train_time:458086ms step_avg:68.36ms +[2025-07-07 17:55:34] [Rank 0] step:6721/10000 train_time:459455ms step_avg:68.36ms +[2025-07-07 17:55:34] [Rank 0] step:6721/10000 train_time:459455ms step_avg:68.36ms +[2025-07-07 17:55:35] [Rank 0] step:6741/10000 train_time:460825ms step_avg:68.36ms +[2025-07-07 17:55:35] [Rank 0] step:6741/10000 train_time:460825ms step_avg:68.36ms +[2025-07-07 17:55:36] [Rank 0] step:6761/10000 train_time:462195ms step_avg:68.36ms +[2025-07-07 17:55:36] [Rank 0] step:6761/10000 train_time:462195ms step_avg:68.36ms +[2025-07-07 17:55:38] [Rank 0] step:6781/10000 train_time:463565ms step_avg:68.36ms +[2025-07-07 17:55:38] [Rank 0] step:6781/10000 train_time:463565ms step_avg:68.36ms +[2025-07-07 17:55:39] [Rank 0] step:6801/10000 train_time:464934ms step_avg:68.36ms +[2025-07-07 17:55:39] [Rank 0] step:6801/10000 train_time:464934ms step_avg:68.36ms +[2025-07-07 17:55:41] [Rank 0] step:6821/10000 train_time:466306ms step_avg:68.36ms +[2025-07-07 17:55:41] [Rank 0] step:6821/10000 train_time:466306ms step_avg:68.36ms +[2025-07-07 17:55:42] [Rank 0] step:6841/10000 train_time:468364ms step_avg:68.46ms +[2025-07-07 17:55:42] [Rank 0] step:6841/10000 train_time:468364ms step_avg:68.46ms +[2025-07-07 17:55:43] [Rank 0] step:6861/10000 train_time:469101ms step_avg:68.37ms +[2025-07-07 17:55:43] [Rank 0] step:6861/10000 train_time:469101ms step_avg:68.37ms +[2025-07-07 17:55:45] [Rank 0] step:6881/10000 train_time:470474ms step_avg:68.37ms +[2025-07-07 17:55:45] [Rank 0] step:6881/10000 train_time:470474ms step_avg:68.37ms +[2025-07-07 17:55:46] [Rank 0] step:6901/10000 train_time:471846ms step_avg:68.37ms +[2025-07-07 17:55:46] [Rank 0] step:6901/10000 train_time:471846ms step_avg:68.37ms +[2025-07-07 17:55:47] [Rank 0] step:6921/10000 train_time:473219ms step_avg:68.37ms +[2025-07-07 17:55:47] [Rank 0] step:6921/10000 train_time:473219ms step_avg:68.37ms +[2025-07-07 17:55:49] [Rank 0] step:6941/10000 train_time:474592ms step_avg:68.38ms +[2025-07-07 17:55:49] [Rank 0] step:6941/10000 train_time:474592ms step_avg:68.38ms +[2025-07-07 17:55:50] [Rank 0] step:6961/10000 train_time:475965ms step_avg:68.38ms +[2025-07-07 17:55:50] [Rank 0] step:6961/10000 train_time:475965ms step_avg:68.38ms +[2025-07-07 17:55:52] [Rank 0] step:6981/10000 train_time:477339ms step_avg:68.38ms +[2025-07-07 17:55:52] [Rank 0] step:6981/10000 train_time:477339ms step_avg:68.38ms +[2025-07-07 17:55:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:55:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:55:54] [Rank 0] PRINT: step:7000/10000 train_loss:0.9503 val_loss:0.9853 train_time:479338ms step_avg:68.48ms +[2025-07-07 17:55:54] [Rank 0] PRINT: step:7000/10000 train_loss:0.9503 val_loss:0.9853 train_time:479338ms step_avg:68.48ms +[2025-07-07 17:55:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:55:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:55:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:55:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:55:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:55:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:01:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:01:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:01:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:01:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:01:21] [Rank 0] Total Loss: 5.4811 +[2025-07-07 18:01:21] [Rank 0] Total Loss: 5.4811 +[2025-07-07 18:01:21] [Rank 0] Total FTA: 0.7575 +[2025-07-07 18:01:21] [Rank 0] Total FTA: 0.7575 +[2025-07-07 18:01:21] [Rank 0] Group 0 Loss: 5.6066 +[2025-07-07 18:01:21] [Rank 0] Group 0 Loss: 5.6066 +[2025-07-07 18:01:21] [Rank 0] Group 1 Loss: 5.4671 +[2025-07-07 18:01:21] [Rank 0] Group 1 Loss: 5.4671 +[2025-07-07 18:01:21] [Rank 0] Group 2 Loss: 5.1612 +[2025-07-07 18:01:21] [Rank 0] Group 2 Loss: 5.1612 +[2025-07-07 18:01:21] [Rank 0] Group 3 Loss: 5.3984 +[2025-07-07 18:01:21] [Rank 0] Group 3 Loss: 5.3984 +[2025-07-07 18:01:21] [Rank 0] Group 4 Loss: 5.4571 +[2025-07-07 18:01:21] [Rank 0] Group 4 Loss: 5.4571 +[2025-07-07 18:01:21] [Rank 0] Group 5 Loss: 5.4322 +[2025-07-07 18:01:21] [Rank 0] Group 5 Loss: 5.4322 +[2025-07-07 18:01:21] [Rank 0] Group 6 Loss: 5.3529 +[2025-07-07 18:01:21] [Rank 0] Group 6 Loss: 5.3529 +[2025-07-07 18:01:21] [Rank 0] Group 7 Loss: 5.5477 +[2025-07-07 18:01:21] [Rank 0] Group 7 Loss: 5.5477 +[2025-07-07 18:01:21] [Rank 0] Group 8 Loss: 5.5233 +[2025-07-07 18:01:21] [Rank 0] Group 8 Loss: 5.5233 +[2025-07-07 18:01:21] [Rank 0] Group 9 Loss: 5.5041 +[2025-07-07 18:01:21] [Rank 0] Group 9 Loss: 5.5041 +[2025-07-07 18:01:21] [Rank 0] Group 10 Loss: 5.5404 +[2025-07-07 18:01:21] [Rank 0] Group 10 Loss: 5.5404 +[2025-07-07 18:01:21] [Rank 0] Group 11 Loss: 5.5421 +[2025-07-07 18:01:21] [Rank 0] Group 11 Loss: 5.5421 +[2025-07-07 18:01:21] [Rank 0] Group 0 FTA: 0.6905 +[2025-07-07 18:01:21] [Rank 0] Group 0 FTA: 0.6905 +[2025-07-07 18:01:21] [Rank 0] Group 1 FTA: 0.8333 +[2025-07-07 18:01:21] [Rank 0] Group 1 FTA: 0.8333 +[2025-07-07 18:01:21] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 18:01:21] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 18:01:21] [Rank 0] Group 3 FTA: 0.6771 +[2025-07-07 18:01:21] [Rank 0] Group 3 FTA: 0.6771 +[2025-07-07 18:01:21] [Rank 0] Group 4 FTA: 0.7630 +[2025-07-07 18:01:21] [Rank 0] Group 4 FTA: 0.7630 +[2025-07-07 18:01:21] [Rank 0] Group 5 FTA: 0.7760 +[2025-07-07 18:01:21] [Rank 0] Group 5 FTA: 0.7760 +[2025-07-07 18:01:21] [Rank 0] Group 6 FTA: 0.6901 +[2025-07-07 18:01:21] [Rank 0] Group 6 FTA: 0.6901 +[2025-07-07 18:01:21] [Rank 0] Group 7 FTA: 0.7396 +[2025-07-07 18:01:21] [Rank 0] Group 7 FTA: 0.7396 +[2025-07-07 18:01:21] [Rank 0] Group 8 FTA: 0.7318 +[2025-07-07 18:01:21] [Rank 0] Group 8 FTA: 0.7318 +[2025-07-07 18:01:21] [Rank 0] Group 9 FTA: 0.7930 +[2025-07-07 18:01:21] [Rank 0] Group 9 FTA: 0.7930 +[2025-07-07 18:01:21] [Rank 0] Group 10 FTA: 0.7520 +[2025-07-07 18:01:21] [Rank 0] Group 10 FTA: 0.7520 +[2025-07-07 18:01:21] [Rank 0] Group 11 FTA: 0.7451 +[2025-07-07 18:01:21] [Rank 0] Group 11 FTA: 0.7451 +[2025-07-07 18:01:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 18:01:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 18:01:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 18:01:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 18:01:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 18:01:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 18:01:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 18:01:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 18:01:22] [Rank 0] step:7001/10000 train_time:479349ms step_avg:68.47ms +[2025-07-07 18:01:22] [Rank 0] step:7001/10000 train_time:479349ms step_avg:68.47ms +[2025-07-07 18:01:24] [Rank 0] step:7021/10000 train_time:480801ms step_avg:68.48ms +[2025-07-07 18:01:24] [Rank 0] step:7021/10000 train_time:480801ms step_avg:68.48ms +[2025-07-07 18:01:25] [Rank 0] step:7041/10000 train_time:481536ms step_avg:68.39ms +[2025-07-07 18:01:25] [Rank 0] step:7041/10000 train_time:481536ms step_avg:68.39ms +[2025-07-07 18:01:27] [Rank 0] step:7061/10000 train_time:482901ms step_avg:68.39ms +[2025-07-07 18:01:27] [Rank 0] step:7061/10000 train_time:482901ms step_avg:68.39ms +[2025-07-07 18:01:28] [Rank 0] step:7081/10000 train_time:484267ms step_avg:68.39ms +[2025-07-07 18:01:28] [Rank 0] step:7081/10000 train_time:484267ms step_avg:68.39ms +[2025-07-07 18:01:29] [Rank 0] step:7101/10000 train_time:485635ms step_avg:68.39ms +[2025-07-07 18:01:29] [Rank 0] step:7101/10000 train_time:485635ms step_avg:68.39ms +[2025-07-07 18:01:31] [Rank 0] step:7121/10000 train_time:487002ms step_avg:68.39ms +[2025-07-07 18:01:31] [Rank 0] step:7121/10000 train_time:487002ms step_avg:68.39ms +[2025-07-07 18:01:32] [Rank 0] step:7141/10000 train_time:488370ms step_avg:68.39ms +[2025-07-07 18:01:32] [Rank 0] step:7141/10000 train_time:488370ms step_avg:68.39ms +[2025-07-07 18:01:33] [Rank 0] step:7161/10000 train_time:489738ms step_avg:68.39ms +[2025-07-07 18:01:33] [Rank 0] step:7161/10000 train_time:489738ms step_avg:68.39ms +[2025-07-07 18:01:35] [Rank 0] step:7181/10000 train_time:491106ms step_avg:68.39ms +[2025-07-07 18:01:35] [Rank 0] step:7181/10000 train_time:491106ms step_avg:68.39ms +[2025-07-07 18:01:36] [Rank 0] step:7201/10000 train_time:492725ms step_avg:68.42ms +[2025-07-07 18:01:36] [Rank 0] step:7201/10000 train_time:492725ms step_avg:68.42ms +[2025-07-07 18:01:38] [Rank 0] step:7221/10000 train_time:493889ms step_avg:68.40ms +[2025-07-07 18:01:38] [Rank 0] step:7221/10000 train_time:493889ms step_avg:68.40ms +[2025-07-07 18:01:39] [Rank 0] step:7241/10000 train_time:495260ms step_avg:68.40ms +[2025-07-07 18:01:39] [Rank 0] step:7241/10000 train_time:495260ms step_avg:68.40ms +[2025-07-07 18:01:40] [Rank 0] step:7261/10000 train_time:496631ms step_avg:68.40ms +[2025-07-07 18:01:40] [Rank 0] step:7261/10000 train_time:496631ms step_avg:68.40ms +[2025-07-07 18:01:42] [Rank 0] step:7281/10000 train_time:498002ms step_avg:68.40ms +[2025-07-07 18:01:42] [Rank 0] step:7281/10000 train_time:498002ms step_avg:68.40ms +[2025-07-07 18:01:43] [Rank 0] step:7301/10000 train_time:499373ms step_avg:68.40ms +[2025-07-07 18:01:43] [Rank 0] step:7301/10000 train_time:499373ms step_avg:68.40ms +[2025-07-07 18:01:44] [Rank 0] step:7321/10000 train_time:500745ms step_avg:68.40ms +[2025-07-07 18:01:44] [Rank 0] step:7321/10000 train_time:500745ms step_avg:68.40ms +[2025-07-07 18:01:46] [Rank 0] step:7341/10000 train_time:502115ms step_avg:68.40ms +[2025-07-07 18:01:46] [Rank 0] step:7341/10000 train_time:502115ms step_avg:68.40ms +[2025-07-07 18:01:47] [Rank 0] step:7361/10000 train_time:503486ms step_avg:68.40ms +[2025-07-07 18:01:47] [Rank 0] step:7361/10000 train_time:503486ms step_avg:68.40ms +[2025-07-07 18:01:49] [Rank 0] step:7381/10000 train_time:505538ms step_avg:68.49ms +[2025-07-07 18:01:49] [Rank 0] step:7381/10000 train_time:505538ms step_avg:68.49ms +[2025-07-07 18:01:50] [Rank 0] step:7401/10000 train_time:506275ms step_avg:68.41ms +[2025-07-07 18:01:50] [Rank 0] step:7401/10000 train_time:506275ms step_avg:68.41ms +[2025-07-07 18:01:51] [Rank 0] step:7421/10000 train_time:507647ms step_avg:68.41ms +[2025-07-07 18:01:51] [Rank 0] step:7421/10000 train_time:507647ms step_avg:68.41ms +[2025-07-07 18:01:53] [Rank 0] step:7441/10000 train_time:509019ms step_avg:68.41ms +[2025-07-07 18:01:53] [Rank 0] step:7441/10000 train_time:509019ms step_avg:68.41ms +[2025-07-07 18:01:54] [Rank 0] step:7461/10000 train_time:510391ms step_avg:68.41ms +[2025-07-07 18:01:54] [Rank 0] step:7461/10000 train_time:510391ms step_avg:68.41ms +[2025-07-07 18:01:55] [Rank 0] step:7481/10000 train_time:511763ms step_avg:68.41ms +[2025-07-07 18:01:55] [Rank 0] step:7481/10000 train_time:511763ms step_avg:68.41ms +[2025-07-07 18:01:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:01:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:01:58] [Rank 0] PRINT: step:7500/10000 train_loss:0.9249 val_loss:0.9698 train_time:513760ms step_avg:68.50ms +[2025-07-07 18:01:58] [Rank 0] PRINT: step:7500/10000 train_loss:0.9249 val_loss:0.9698 train_time:513760ms step_avg:68.50ms +[2025-07-07 18:01:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:01:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:01:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:01:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:01:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:01:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:07:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:07:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:07:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:07:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:07:28] [Rank 0] Total Loss: 5.6055 +[2025-07-07 18:07:28] [Rank 0] Total Loss: 5.6055 +[2025-07-07 18:07:28] [Rank 0] Total FTA: 0.7621 +[2025-07-07 18:07:28] [Rank 0] Total FTA: 0.7621 +[2025-07-07 18:07:28] [Rank 0] Group 0 Loss: 5.7021 +[2025-07-07 18:07:28] [Rank 0] Group 0 Loss: 5.7021 +[2025-07-07 18:07:28] [Rank 0] Group 1 Loss: 5.5728 +[2025-07-07 18:07:28] [Rank 0] Group 1 Loss: 5.5728 +[2025-07-07 18:07:28] [Rank 0] Group 2 Loss: 5.2818 +[2025-07-07 18:07:28] [Rank 0] Group 2 Loss: 5.2818 +[2025-07-07 18:07:28] [Rank 0] Group 3 Loss: 5.5699 +[2025-07-07 18:07:28] [Rank 0] Group 3 Loss: 5.5699 +[2025-07-07 18:07:28] [Rank 0] Group 4 Loss: 5.5208 +[2025-07-07 18:07:28] [Rank 0] Group 4 Loss: 5.5208 +[2025-07-07 18:07:28] [Rank 0] Group 5 Loss: 5.5492 +[2025-07-07 18:07:28] [Rank 0] Group 5 Loss: 5.5492 +[2025-07-07 18:07:28] [Rank 0] Group 6 Loss: 5.5543 +[2025-07-07 18:07:28] [Rank 0] Group 6 Loss: 5.5543 +[2025-07-07 18:07:28] [Rank 0] Group 7 Loss: 5.6504 +[2025-07-07 18:07:28] [Rank 0] Group 7 Loss: 5.6504 +[2025-07-07 18:07:28] [Rank 0] Group 8 Loss: 5.6501 +[2025-07-07 18:07:28] [Rank 0] Group 8 Loss: 5.6501 +[2025-07-07 18:07:28] [Rank 0] Group 9 Loss: 5.6764 +[2025-07-07 18:07:28] [Rank 0] Group 9 Loss: 5.6764 +[2025-07-07 18:07:28] [Rank 0] Group 10 Loss: 5.6659 +[2025-07-07 18:07:28] [Rank 0] Group 10 Loss: 5.6659 +[2025-07-07 18:07:28] [Rank 0] Group 11 Loss: 5.6704 +[2025-07-07 18:07:28] [Rank 0] Group 11 Loss: 5.6704 +[2025-07-07 18:07:28] [Rank 0] Group 0 FTA: 0.6710 +[2025-07-07 18:07:28] [Rank 0] Group 0 FTA: 0.6710 +[2025-07-07 18:07:28] [Rank 0] Group 1 FTA: 0.8568 +[2025-07-07 18:07:28] [Rank 0] Group 1 FTA: 0.8568 +[2025-07-07 18:07:28] [Rank 0] Group 2 FTA: 0.7630 +[2025-07-07 18:07:28] [Rank 0] Group 2 FTA: 0.7630 +[2025-07-07 18:07:28] [Rank 0] Group 3 FTA: 0.6589 +[2025-07-07 18:07:28] [Rank 0] Group 3 FTA: 0.6589 +[2025-07-07 18:07:28] [Rank 0] Group 4 FTA: 0.8229 +[2025-07-07 18:07:28] [Rank 0] Group 4 FTA: 0.8229 +[2025-07-07 18:07:28] [Rank 0] Group 5 FTA: 0.8724 +[2025-07-07 18:07:28] [Rank 0] Group 5 FTA: 0.8724 +[2025-07-07 18:07:28] [Rank 0] Group 6 FTA: 0.7161 +[2025-07-07 18:07:28] [Rank 0] Group 6 FTA: 0.7161 +[2025-07-07 18:07:28] [Rank 0] Group 7 FTA: 0.7786 +[2025-07-07 18:07:28] [Rank 0] Group 7 FTA: 0.7786 +[2025-07-07 18:07:28] [Rank 0] Group 8 FTA: 0.7292 +[2025-07-07 18:07:28] [Rank 0] Group 8 FTA: 0.7292 +[2025-07-07 18:07:28] [Rank 0] Group 9 FTA: 0.7891 +[2025-07-07 18:07:28] [Rank 0] Group 9 FTA: 0.7891 +[2025-07-07 18:07:28] [Rank 0] Group 10 FTA: 0.7793 +[2025-07-07 18:07:28] [Rank 0] Group 10 FTA: 0.7793 +[2025-07-07 18:07:28] [Rank 0] Group 11 FTA: 0.7773 +[2025-07-07 18:07:28] [Rank 0] Group 11 FTA: 0.7773 +[2025-07-07 18:07:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 18:07:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 18:07:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 18:07:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 18:07:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 18:07:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 18:07:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 18:07:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 18:07:30] [Rank 0] step:7501/10000 train_time:513770ms step_avg:68.49ms +[2025-07-07 18:07:30] [Rank 0] step:7501/10000 train_time:513770ms step_avg:68.49ms +[2025-07-07 18:07:31] [Rank 0] step:7521/10000 train_time:514525ms step_avg:68.41ms +[2025-07-07 18:07:31] [Rank 0] step:7521/10000 train_time:514525ms step_avg:68.41ms +[2025-07-07 18:07:33] [Rank 0] step:7541/10000 train_time:515889ms step_avg:68.41ms +[2025-07-07 18:07:33] [Rank 0] step:7541/10000 train_time:515889ms step_avg:68.41ms +[2025-07-07 18:07:34] [Rank 0] step:7561/10000 train_time:517255ms step_avg:68.41ms +[2025-07-07 18:07:34] [Rank 0] step:7561/10000 train_time:517255ms step_avg:68.41ms +[2025-07-07 18:07:35] [Rank 0] step:7581/10000 train_time:518649ms step_avg:68.41ms +[2025-07-07 18:07:35] [Rank 0] step:7581/10000 train_time:518649ms step_avg:68.41ms +[2025-07-07 18:07:37] [Rank 0] step:7601/10000 train_time:520015ms step_avg:68.41ms +[2025-07-07 18:07:37] [Rank 0] step:7601/10000 train_time:520015ms step_avg:68.41ms +[2025-07-07 18:07:38] [Rank 0] step:7621/10000 train_time:521382ms step_avg:68.41ms +[2025-07-07 18:07:38] [Rank 0] step:7621/10000 train_time:521382ms step_avg:68.41ms +[2025-07-07 18:07:39] [Rank 0] step:7641/10000 train_time:522747ms step_avg:68.41ms +[2025-07-07 18:07:39] [Rank 0] step:7641/10000 train_time:522747ms step_avg:68.41ms +[2025-07-07 18:07:41] [Rank 0] step:7661/10000 train_time:524114ms step_avg:68.41ms +[2025-07-07 18:07:41] [Rank 0] step:7661/10000 train_time:524114ms step_avg:68.41ms +[2025-07-07 18:07:42] [Rank 0] step:7681/10000 train_time:525482ms step_avg:68.41ms +[2025-07-07 18:07:42] [Rank 0] step:7681/10000 train_time:525482ms step_avg:68.41ms +[2025-07-07 18:07:43] [Rank 0] step:7701/10000 train_time:526849ms step_avg:68.41ms +[2025-07-07 18:07:43] [Rank 0] step:7701/10000 train_time:526849ms step_avg:68.41ms +[2025-07-07 18:07:45] [Rank 0] step:7721/10000 train_time:528216ms step_avg:68.41ms +[2025-07-07 18:07:45] [Rank 0] step:7721/10000 train_time:528216ms step_avg:68.41ms +[2025-07-07 18:07:46] [Rank 0] step:7741/10000 train_time:529635ms step_avg:68.42ms +[2025-07-07 18:07:46] [Rank 0] step:7741/10000 train_time:529635ms step_avg:68.42ms +[2025-07-07 18:07:48] [Rank 0] step:7761/10000 train_time:531003ms step_avg:68.42ms +[2025-07-07 18:07:48] [Rank 0] step:7761/10000 train_time:531003ms step_avg:68.42ms +[2025-07-07 18:07:49] [Rank 0] step:7781/10000 train_time:532373ms step_avg:68.42ms +[2025-07-07 18:07:49] [Rank 0] step:7781/10000 train_time:532373ms step_avg:68.42ms +[2025-07-07 18:07:50] [Rank 0] step:7801/10000 train_time:533742ms step_avg:68.42ms +[2025-07-07 18:07:50] [Rank 0] step:7801/10000 train_time:533742ms step_avg:68.42ms +[2025-07-07 18:07:52] [Rank 0] step:7821/10000 train_time:535112ms step_avg:68.42ms +[2025-07-07 18:07:52] [Rank 0] step:7821/10000 train_time:535112ms step_avg:68.42ms +[2025-07-07 18:07:53] [Rank 0] step:7841/10000 train_time:536482ms step_avg:68.42ms +[2025-07-07 18:07:53] [Rank 0] step:7841/10000 train_time:536482ms step_avg:68.42ms +[2025-07-07 18:07:54] [Rank 0] step:7861/10000 train_time:537854ms step_avg:68.42ms +[2025-07-07 18:07:54] [Rank 0] step:7861/10000 train_time:537854ms step_avg:68.42ms +[2025-07-07 18:07:56] [Rank 0] step:7881/10000 train_time:539226ms step_avg:68.42ms +[2025-07-07 18:07:56] [Rank 0] step:7881/10000 train_time:539226ms step_avg:68.42ms +[2025-07-07 18:07:57] [Rank 0] step:7901/10000 train_time:540598ms step_avg:68.42ms +[2025-07-07 18:07:57] [Rank 0] step:7901/10000 train_time:540598ms step_avg:68.42ms +[2025-07-07 18:07:59] [Rank 0] step:7921/10000 train_time:542650ms step_avg:68.51ms +[2025-07-07 18:07:59] [Rank 0] step:7921/10000 train_time:542650ms step_avg:68.51ms +[2025-07-07 18:08:00] [Rank 0] step:7941/10000 train_time:543388ms step_avg:68.43ms +[2025-07-07 18:08:00] [Rank 0] step:7941/10000 train_time:543388ms step_avg:68.43ms +[2025-07-07 18:08:01] [Rank 0] step:7961/10000 train_time:544760ms step_avg:68.43ms +[2025-07-07 18:08:01] [Rank 0] step:7961/10000 train_time:544760ms step_avg:68.43ms +[2025-07-07 18:08:03] [Rank 0] step:7981/10000 train_time:546133ms step_avg:68.43ms +[2025-07-07 18:08:03] [Rank 0] step:7981/10000 train_time:546133ms step_avg:68.43ms +[2025-07-07 18:08:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:08:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:08:05] [Rank 0] PRINT: step:8000/10000 train_loss:0.9043 val_loss:0.9553 train_time:548129ms step_avg:68.52ms +[2025-07-07 18:08:05] [Rank 0] PRINT: step:8000/10000 train_loss:0.9043 val_loss:0.9553 train_time:548129ms step_avg:68.52ms +[2025-07-07 18:08:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:08:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:08:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:08:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:08:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:08:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:13:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:13:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:13:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:13:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:13:36] [Rank 0] Total Loss: 5.4672 +[2025-07-07 18:13:36] [Rank 0] Total Loss: 5.4672 +[2025-07-07 18:13:36] [Rank 0] Total FTA: 0.8337 +[2025-07-07 18:13:36] [Rank 0] Total FTA: 0.8337 +[2025-07-07 18:13:36] [Rank 0] Group 0 Loss: 5.6816 +[2025-07-07 18:13:36] [Rank 0] Group 0 Loss: 5.6816 +[2025-07-07 18:13:36] [Rank 0] Group 1 Loss: 5.1616 +[2025-07-07 18:13:36] [Rank 0] Group 1 Loss: 5.1616 +[2025-07-07 18:13:36] [Rank 0] Group 2 Loss: 5.0685 +[2025-07-07 18:13:36] [Rank 0] Group 2 Loss: 5.0685 +[2025-07-07 18:13:36] [Rank 0] Group 3 Loss: 5.4168 +[2025-07-07 18:13:36] [Rank 0] Group 3 Loss: 5.4168 +[2025-07-07 18:13:36] [Rank 0] Group 4 Loss: 5.5239 +[2025-07-07 18:13:36] [Rank 0] Group 4 Loss: 5.5239 +[2025-07-07 18:13:36] [Rank 0] Group 5 Loss: 5.4581 +[2025-07-07 18:13:36] [Rank 0] Group 5 Loss: 5.4581 +[2025-07-07 18:13:36] [Rank 0] Group 6 Loss: 5.3795 +[2025-07-07 18:13:36] [Rank 0] Group 6 Loss: 5.3795 +[2025-07-07 18:13:36] [Rank 0] Group 7 Loss: 5.5923 +[2025-07-07 18:13:36] [Rank 0] Group 7 Loss: 5.5923 +[2025-07-07 18:13:36] [Rank 0] Group 8 Loss: 5.5322 +[2025-07-07 18:13:36] [Rank 0] Group 8 Loss: 5.5322 +[2025-07-07 18:13:36] [Rank 0] Group 9 Loss: 5.5511 +[2025-07-07 18:13:36] [Rank 0] Group 9 Loss: 5.5511 +[2025-07-07 18:13:36] [Rank 0] Group 10 Loss: 5.5084 +[2025-07-07 18:13:36] [Rank 0] Group 10 Loss: 5.5084 +[2025-07-07 18:13:36] [Rank 0] Group 11 Loss: 5.4913 +[2025-07-07 18:13:36] [Rank 0] Group 11 Loss: 5.4913 +[2025-07-07 18:13:36] [Rank 0] Group 0 FTA: 0.4824 +[2025-07-07 18:13:36] [Rank 0] Group 0 FTA: 0.4824 +[2025-07-07 18:13:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 18:13:36] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 18:13:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 18:13:36] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 18:13:36] [Rank 0] Group 3 FTA: 0.8099 +[2025-07-07 18:13:36] [Rank 0] Group 3 FTA: 0.8099 +[2025-07-07 18:13:36] [Rank 0] Group 4 FTA: 0.9115 +[2025-07-07 18:13:36] [Rank 0] Group 4 FTA: 0.9115 +[2025-07-07 18:13:36] [Rank 0] Group 5 FTA: 0.8542 +[2025-07-07 18:13:36] [Rank 0] Group 5 FTA: 0.8542 +[2025-07-07 18:13:36] [Rank 0] Group 6 FTA: 0.8724 +[2025-07-07 18:13:36] [Rank 0] Group 6 FTA: 0.8724 +[2025-07-07 18:13:36] [Rank 0] Group 7 FTA: 0.8802 +[2025-07-07 18:13:36] [Rank 0] Group 7 FTA: 0.8802 +[2025-07-07 18:13:36] [Rank 0] Group 8 FTA: 0.8490 +[2025-07-07 18:13:36] [Rank 0] Group 8 FTA: 0.8490 +[2025-07-07 18:13:36] [Rank 0] Group 9 FTA: 0.8750 +[2025-07-07 18:13:36] [Rank 0] Group 9 FTA: 0.8750 +[2025-07-07 18:13:36] [Rank 0] Group 10 FTA: 0.8906 +[2025-07-07 18:13:36] [Rank 0] Group 10 FTA: 0.8906 +[2025-07-07 18:13:36] [Rank 0] Group 11 FTA: 0.8682 +[2025-07-07 18:13:36] [Rank 0] Group 11 FTA: 0.8682 +[2025-07-07 18:13:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 18:13:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 18:13:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 18:13:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 18:13:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 18:13:37] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 18:13:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 18:13:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 18:13:38] [Rank 0] step:8001/10000 train_time:548140ms step_avg:68.51ms +[2025-07-07 18:13:38] [Rank 0] step:8001/10000 train_time:548140ms step_avg:68.51ms +[2025-07-07 18:13:39] [Rank 0] step:8021/10000 train_time:548906ms step_avg:68.43ms +[2025-07-07 18:13:39] [Rank 0] step:8021/10000 train_time:548906ms step_avg:68.43ms +[2025-07-07 18:13:41] [Rank 0] step:8041/10000 train_time:550271ms step_avg:68.43ms +[2025-07-07 18:13:41] [Rank 0] step:8041/10000 train_time:550271ms step_avg:68.43ms +[2025-07-07 18:13:42] [Rank 0] step:8061/10000 train_time:551637ms step_avg:68.43ms +[2025-07-07 18:13:42] [Rank 0] step:8061/10000 train_time:551637ms step_avg:68.43ms +[2025-07-07 18:13:43] [Rank 0] step:8081/10000 train_time:553003ms step_avg:68.43ms +[2025-07-07 18:13:43] [Rank 0] step:8081/10000 train_time:553003ms step_avg:68.43ms +[2025-07-07 18:13:45] [Rank 0] step:8101/10000 train_time:554369ms step_avg:68.43ms +[2025-07-07 18:13:45] [Rank 0] step:8101/10000 train_time:554369ms step_avg:68.43ms +[2025-07-07 18:13:46] [Rank 0] step:8121/10000 train_time:555779ms step_avg:68.44ms +[2025-07-07 18:13:46] [Rank 0] step:8121/10000 train_time:555779ms step_avg:68.44ms +[2025-07-07 18:13:47] [Rank 0] step:8141/10000 train_time:557146ms step_avg:68.44ms +[2025-07-07 18:13:47] [Rank 0] step:8141/10000 train_time:557146ms step_avg:68.44ms +[2025-07-07 18:13:49] [Rank 0] step:8161/10000 train_time:558514ms step_avg:68.44ms +[2025-07-07 18:13:49] [Rank 0] step:8161/10000 train_time:558514ms step_avg:68.44ms +[2025-07-07 18:13:50] [Rank 0] step:8181/10000 train_time:559883ms step_avg:68.44ms +[2025-07-07 18:13:50] [Rank 0] step:8181/10000 train_time:559883ms step_avg:68.44ms +[2025-07-07 18:13:52] [Rank 0] step:8201/10000 train_time:561252ms step_avg:68.44ms +[2025-07-07 18:13:52] [Rank 0] step:8201/10000 train_time:561252ms step_avg:68.44ms +[2025-07-07 18:13:53] [Rank 0] step:8221/10000 train_time:562621ms step_avg:68.44ms +[2025-07-07 18:13:53] [Rank 0] step:8221/10000 train_time:562621ms step_avg:68.44ms +[2025-07-07 18:13:54] [Rank 0] step:8241/10000 train_time:563990ms step_avg:68.44ms +[2025-07-07 18:13:54] [Rank 0] step:8241/10000 train_time:563990ms step_avg:68.44ms +[2025-07-07 18:13:56] [Rank 0] step:8261/10000 train_time:565360ms step_avg:68.44ms +[2025-07-07 18:13:56] [Rank 0] step:8261/10000 train_time:565360ms step_avg:68.44ms +[2025-07-07 18:13:57] [Rank 0] step:8281/10000 train_time:567395ms step_avg:68.52ms +[2025-07-07 18:13:57] [Rank 0] step:8281/10000 train_time:567395ms step_avg:68.52ms +[2025-07-07 18:13:58] [Rank 0] step:8301/10000 train_time:568132ms step_avg:68.44ms +[2025-07-07 18:13:58] [Rank 0] step:8301/10000 train_time:568132ms step_avg:68.44ms +[2025-07-07 18:14:00] [Rank 0] step:8321/10000 train_time:569503ms step_avg:68.44ms +[2025-07-07 18:14:00] [Rank 0] step:8321/10000 train_time:569503ms step_avg:68.44ms +[2025-07-07 18:14:01] [Rank 0] step:8341/10000 train_time:570874ms step_avg:68.44ms +[2025-07-07 18:14:01] [Rank 0] step:8341/10000 train_time:570874ms step_avg:68.44ms +[2025-07-07 18:14:03] [Rank 0] step:8361/10000 train_time:572246ms step_avg:68.44ms +[2025-07-07 18:14:03] [Rank 0] step:8361/10000 train_time:572246ms step_avg:68.44ms +[2025-07-07 18:14:04] [Rank 0] step:8381/10000 train_time:573618ms step_avg:68.44ms +[2025-07-07 18:14:04] [Rank 0] step:8381/10000 train_time:573618ms step_avg:68.44ms +[2025-07-07 18:14:05] [Rank 0] step:8401/10000 train_time:574990ms step_avg:68.44ms +[2025-07-07 18:14:05] [Rank 0] step:8401/10000 train_time:574990ms step_avg:68.44ms +[2025-07-07 18:14:07] [Rank 0] step:8421/10000 train_time:576362ms step_avg:68.44ms +[2025-07-07 18:14:07] [Rank 0] step:8421/10000 train_time:576362ms step_avg:68.44ms +[2025-07-07 18:14:08] [Rank 0] step:8441/10000 train_time:577735ms step_avg:68.44ms +[2025-07-07 18:14:08] [Rank 0] step:8441/10000 train_time:577735ms step_avg:68.44ms +[2025-07-07 18:14:09] [Rank 0] step:8461/10000 train_time:579111ms step_avg:68.44ms +[2025-07-07 18:14:09] [Rank 0] step:8461/10000 train_time:579111ms step_avg:68.44ms +[2025-07-07 18:14:11] [Rank 0] step:8481/10000 train_time:580518ms step_avg:68.45ms +[2025-07-07 18:14:11] [Rank 0] step:8481/10000 train_time:580518ms step_avg:68.45ms +[2025-07-07 18:14:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:14:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:14:13] [Rank 0] PRINT: step:8500/10000 train_loss:0.8895 val_loss:0.9635 train_time:582519ms step_avg:68.53ms +[2025-07-07 18:14:13] [Rank 0] PRINT: step:8500/10000 train_loss:0.8895 val_loss:0.9635 train_time:582519ms step_avg:68.53ms +[2025-07-07 18:14:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:14:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:14:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:14:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:14:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:14:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:19:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:19:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:19:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:19:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:19:44] [Rank 0] Total Loss: 5.6104 +[2025-07-07 18:19:44] [Rank 0] Total Loss: 5.6104 +[2025-07-07 18:19:44] [Rank 0] Total FTA: 0.8788 +[2025-07-07 18:19:44] [Rank 0] Total FTA: 0.8788 +[2025-07-07 18:19:44] [Rank 0] Group 0 Loss: 5.8776 +[2025-07-07 18:19:44] [Rank 0] Group 0 Loss: 5.8776 +[2025-07-07 18:19:44] [Rank 0] Group 1 Loss: 5.4083 +[2025-07-07 18:19:44] [Rank 0] Group 1 Loss: 5.4083 +[2025-07-07 18:19:45] [Rank 0] Group 2 Loss: 5.2603 +[2025-07-07 18:19:45] [Rank 0] Group 2 Loss: 5.2603 +[2025-07-07 18:19:45] [Rank 0] Group 3 Loss: 5.4804 +[2025-07-07 18:19:45] [Rank 0] Group 3 Loss: 5.4804 +[2025-07-07 18:19:45] [Rank 0] Group 4 Loss: 5.5882 +[2025-07-07 18:19:45] [Rank 0] Group 4 Loss: 5.5882 +[2025-07-07 18:19:45] [Rank 0] Group 5 Loss: 5.5554 +[2025-07-07 18:19:45] [Rank 0] Group 5 Loss: 5.5554 +[2025-07-07 18:19:45] [Rank 0] Group 6 Loss: 5.4927 +[2025-07-07 18:19:45] [Rank 0] Group 6 Loss: 5.4927 +[2025-07-07 18:19:45] [Rank 0] Group 7 Loss: 5.6371 +[2025-07-07 18:19:45] [Rank 0] Group 7 Loss: 5.6371 +[2025-07-07 18:19:45] [Rank 0] Group 8 Loss: 5.6134 +[2025-07-07 18:19:45] [Rank 0] Group 8 Loss: 5.6134 +[2025-07-07 18:19:45] [Rank 0] Group 9 Loss: 5.6152 +[2025-07-07 18:19:45] [Rank 0] Group 9 Loss: 5.6152 +[2025-07-07 18:19:45] [Rank 0] Group 10 Loss: 5.6505 +[2025-07-07 18:19:45] [Rank 0] Group 10 Loss: 5.6505 +[2025-07-07 18:19:45] [Rank 0] Group 11 Loss: 5.7065 +[2025-07-07 18:19:45] [Rank 0] Group 11 Loss: 5.7065 +[2025-07-07 18:19:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 18:19:45] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 18:19:45] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 18:19:45] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 18:19:45] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 18:19:45] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 18:19:45] [Rank 0] Group 3 FTA: 0.7318 +[2025-07-07 18:19:45] [Rank 0] Group 3 FTA: 0.7318 +[2025-07-07 18:19:45] [Rank 0] Group 4 FTA: 0.9609 +[2025-07-07 18:19:45] [Rank 0] Group 4 FTA: 0.9609 +[2025-07-07 18:19:45] [Rank 0] Group 5 FTA: 0.8672 +[2025-07-07 18:19:45] [Rank 0] Group 5 FTA: 0.8672 +[2025-07-07 18:19:45] [Rank 0] Group 6 FTA: 0.7891 +[2025-07-07 18:19:45] [Rank 0] Group 6 FTA: 0.7891 +[2025-07-07 18:19:45] [Rank 0] Group 7 FTA: 0.8203 +[2025-07-07 18:19:45] [Rank 0] Group 7 FTA: 0.8203 +[2025-07-07 18:19:45] [Rank 0] Group 8 FTA: 0.8047 +[2025-07-07 18:19:45] [Rank 0] Group 8 FTA: 0.8047 +[2025-07-07 18:19:45] [Rank 0] Group 9 FTA: 0.8672 +[2025-07-07 18:19:45] [Rank 0] Group 9 FTA: 0.8672 +[2025-07-07 18:19:45] [Rank 0] Group 10 FTA: 0.8438 +[2025-07-07 18:19:45] [Rank 0] Group 10 FTA: 0.8438 +[2025-07-07 18:19:45] [Rank 0] Group 11 FTA: 0.8291 +[2025-07-07 18:19:45] [Rank 0] Group 11 FTA: 0.8291 +[2025-07-07 18:19:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 18:19:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 18:19:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 18:19:48] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 18:19:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 18:19:48] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 18:19:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 18:19:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 18:19:48] [Rank 0] step:8501/10000 train_time:582529ms step_avg:68.52ms +[2025-07-07 18:19:48] [Rank 0] step:8501/10000 train_time:582529ms step_avg:68.52ms +[2025-07-07 18:19:50] [Rank 0] step:8521/10000 train_time:583294ms step_avg:68.45ms +[2025-07-07 18:19:50] [Rank 0] step:8521/10000 train_time:583294ms step_avg:68.45ms +[2025-07-07 18:19:51] [Rank 0] step:8541/10000 train_time:584657ms step_avg:68.45ms +[2025-07-07 18:19:51] [Rank 0] step:8541/10000 train_time:584657ms step_avg:68.45ms +[2025-07-07 18:19:52] [Rank 0] step:8561/10000 train_time:586023ms step_avg:68.45ms +[2025-07-07 18:19:52] [Rank 0] step:8561/10000 train_time:586023ms step_avg:68.45ms +[2025-07-07 18:19:54] [Rank 0] step:8581/10000 train_time:587389ms step_avg:68.45ms +[2025-07-07 18:19:54] [Rank 0] step:8581/10000 train_time:587389ms step_avg:68.45ms +[2025-07-07 18:19:55] [Rank 0] step:8601/10000 train_time:588757ms step_avg:68.45ms +[2025-07-07 18:19:55] [Rank 0] step:8601/10000 train_time:588757ms step_avg:68.45ms +[2025-07-07 18:19:57] [Rank 0] step:8621/10000 train_time:590125ms step_avg:68.45ms +[2025-07-07 18:19:57] [Rank 0] step:8621/10000 train_time:590125ms step_avg:68.45ms +[2025-07-07 18:19:58] [Rank 0] step:8641/10000 train_time:591492ms step_avg:68.45ms +[2025-07-07 18:19:58] [Rank 0] step:8641/10000 train_time:591492ms step_avg:68.45ms +[2025-07-07 18:19:59] [Rank 0] step:8661/10000 train_time:592889ms step_avg:68.46ms +[2025-07-07 18:19:59] [Rank 0] step:8661/10000 train_time:592889ms step_avg:68.46ms +[2025-07-07 18:20:01] [Rank 0] step:8681/10000 train_time:594259ms step_avg:68.46ms +[2025-07-07 18:20:01] [Rank 0] step:8681/10000 train_time:594259ms step_avg:68.46ms +[2025-07-07 18:20:02] [Rank 0] step:8701/10000 train_time:595628ms step_avg:68.46ms +[2025-07-07 18:20:02] [Rank 0] step:8701/10000 train_time:595628ms step_avg:68.46ms +[2025-07-07 18:20:03] [Rank 0] step:8721/10000 train_time:596995ms step_avg:68.45ms +[2025-07-07 18:20:03] [Rank 0] step:8721/10000 train_time:596995ms step_avg:68.45ms +[2025-07-07 18:20:05] [Rank 0] step:8741/10000 train_time:598365ms step_avg:68.45ms +[2025-07-07 18:20:05] [Rank 0] step:8741/10000 train_time:598365ms step_avg:68.45ms +[2025-07-07 18:20:06] [Rank 0] step:8761/10000 train_time:599734ms step_avg:68.46ms +[2025-07-07 18:20:06] [Rank 0] step:8761/10000 train_time:599734ms step_avg:68.46ms +[2025-07-07 18:20:08] [Rank 0] step:8781/10000 train_time:601106ms step_avg:68.46ms +[2025-07-07 18:20:08] [Rank 0] step:8781/10000 train_time:601106ms step_avg:68.46ms +[2025-07-07 18:20:09] [Rank 0] step:8801/10000 train_time:602478ms step_avg:68.46ms +[2025-07-07 18:20:09] [Rank 0] step:8801/10000 train_time:602478ms step_avg:68.46ms +[2025-07-07 18:20:10] [Rank 0] step:8821/10000 train_time:603849ms step_avg:68.46ms +[2025-07-07 18:20:10] [Rank 0] step:8821/10000 train_time:603849ms step_avg:68.46ms +[2025-07-07 18:20:12] [Rank 0] step:8841/10000 train_time:605243ms step_avg:68.46ms +[2025-07-07 18:20:12] [Rank 0] step:8841/10000 train_time:605243ms step_avg:68.46ms +[2025-07-07 18:20:13] [Rank 0] step:8861/10000 train_time:606615ms step_avg:68.46ms +[2025-07-07 18:20:13] [Rank 0] step:8861/10000 train_time:606615ms step_avg:68.46ms +[2025-07-07 18:20:14] [Rank 0] step:8881/10000 train_time:607987ms step_avg:68.46ms +[2025-07-07 18:20:14] [Rank 0] step:8881/10000 train_time:607987ms step_avg:68.46ms +[2025-07-07 18:20:16] [Rank 0] step:8901/10000 train_time:609359ms step_avg:68.46ms +[2025-07-07 18:20:16] [Rank 0] step:8901/10000 train_time:609359ms step_avg:68.46ms +[2025-07-07 18:20:17] [Rank 0] step:8921/10000 train_time:610731ms step_avg:68.46ms +[2025-07-07 18:20:17] [Rank 0] step:8921/10000 train_time:610731ms step_avg:68.46ms +[2025-07-07 18:20:19] [Rank 0] step:8941/10000 train_time:612103ms step_avg:68.46ms +[2025-07-07 18:20:19] [Rank 0] step:8941/10000 train_time:612103ms step_avg:68.46ms +[2025-07-07 18:20:20] [Rank 0] step:8961/10000 train_time:613476ms step_avg:68.46ms +[2025-07-07 18:20:20] [Rank 0] step:8961/10000 train_time:613476ms step_avg:68.46ms +[2025-07-07 18:20:21] [Rank 0] step:8981/10000 train_time:614850ms step_avg:68.46ms +[2025-07-07 18:20:21] [Rank 0] step:8981/10000 train_time:614850ms step_avg:68.46ms +[2025-07-07 18:20:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:20:23] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:20:24] [Rank 0] PRINT: step:9000/10000 train_loss:0.8786 val_loss:0.9230 train_time:616849ms step_avg:68.54ms +[2025-07-07 18:20:24] [Rank 0] PRINT: step:9000/10000 train_loss:0.8786 val_loss:0.9230 train_time:616849ms step_avg:68.54ms +[2025-07-07 18:20:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:20:24] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:20:24] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:20:24] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:20:24] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:20:24] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:25:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:25:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:25:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:25:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:25:52] [Rank 0] Total Loss: 5.7272 +[2025-07-07 18:25:52] [Rank 0] Total Loss: 5.7272 +[2025-07-07 18:25:52] [Rank 0] Total FTA: 0.9015 +[2025-07-07 18:25:52] [Rank 0] Total FTA: 0.9015 +[2025-07-07 18:25:52] [Rank 0] Group 0 Loss: 5.9267 +[2025-07-07 18:25:52] [Rank 0] Group 0 Loss: 5.9267 +[2025-07-07 18:25:52] [Rank 0] Group 1 Loss: 5.5255 +[2025-07-07 18:25:52] [Rank 0] Group 1 Loss: 5.5255 +[2025-07-07 18:25:52] [Rank 0] Group 2 Loss: 5.4920 +[2025-07-07 18:25:52] [Rank 0] Group 2 Loss: 5.4920 +[2025-07-07 18:25:52] [Rank 0] Group 3 Loss: 5.5872 +[2025-07-07 18:25:52] [Rank 0] Group 3 Loss: 5.5872 +[2025-07-07 18:25:52] [Rank 0] Group 4 Loss: 5.7879 +[2025-07-07 18:25:52] [Rank 0] Group 4 Loss: 5.7879 +[2025-07-07 18:25:52] [Rank 0] Group 5 Loss: 5.6862 +[2025-07-07 18:25:52] [Rank 0] Group 5 Loss: 5.6862 +[2025-07-07 18:25:52] [Rank 0] Group 6 Loss: 5.6244 +[2025-07-07 18:25:52] [Rank 0] Group 6 Loss: 5.6244 +[2025-07-07 18:25:52] [Rank 0] Group 7 Loss: 5.7726 +[2025-07-07 18:25:52] [Rank 0] Group 7 Loss: 5.7726 +[2025-07-07 18:25:52] [Rank 0] Group 8 Loss: 5.8008 +[2025-07-07 18:25:52] [Rank 0] Group 8 Loss: 5.8008 +[2025-07-07 18:25:52] [Rank 0] Group 9 Loss: 5.7075 +[2025-07-07 18:25:52] [Rank 0] Group 9 Loss: 5.7075 +[2025-07-07 18:25:52] [Rank 0] Group 10 Loss: 5.7912 +[2025-07-07 18:25:52] [Rank 0] Group 10 Loss: 5.7912 +[2025-07-07 18:25:52] [Rank 0] Group 11 Loss: 5.7534 +[2025-07-07 18:25:52] [Rank 0] Group 11 Loss: 5.7534 +[2025-07-07 18:25:52] [Rank 0] Group 0 FTA: 0.8088 +[2025-07-07 18:25:52] [Rank 0] Group 0 FTA: 0.8088 +[2025-07-07 18:25:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 18:25:52] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 18:25:52] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 18:25:52] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 18:25:52] [Rank 0] Group 3 FTA: 0.8177 +[2025-07-07 18:25:52] [Rank 0] Group 3 FTA: 0.8177 +[2025-07-07 18:25:52] [Rank 0] Group 4 FTA: 0.9557 +[2025-07-07 18:25:52] [Rank 0] Group 4 FTA: 0.9557 +[2025-07-07 18:25:52] [Rank 0] Group 5 FTA: 0.9427 +[2025-07-07 18:25:52] [Rank 0] Group 5 FTA: 0.9427 +[2025-07-07 18:25:52] [Rank 0] Group 6 FTA: 0.9010 +[2025-07-07 18:25:52] [Rank 0] Group 6 FTA: 0.9010 +[2025-07-07 18:25:52] [Rank 0] Group 7 FTA: 0.9219 +[2025-07-07 18:25:52] [Rank 0] Group 7 FTA: 0.9219 +[2025-07-07 18:25:52] [Rank 0] Group 8 FTA: 0.8906 +[2025-07-07 18:25:52] [Rank 0] Group 8 FTA: 0.8906 +[2025-07-07 18:25:52] [Rank 0] Group 9 FTA: 0.9102 +[2025-07-07 18:25:52] [Rank 0] Group 9 FTA: 0.9102 +[2025-07-07 18:25:52] [Rank 0] Group 10 FTA: 0.9062 +[2025-07-07 18:25:52] [Rank 0] Group 10 FTA: 0.9062 +[2025-07-07 18:25:52] [Rank 0] Group 11 FTA: 0.8848 +[2025-07-07 18:25:52] [Rank 0] Group 11 FTA: 0.8848 +[2025-07-07 18:25:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 18:25:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 18:25:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 18:25:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 18:25:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 18:25:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 18:25:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 18:25:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 18:25:54] [Rank 0] step:9001/10000 train_time:616867ms step_avg:68.53ms +[2025-07-07 18:25:54] [Rank 0] step:9001/10000 train_time:616867ms step_avg:68.53ms +[2025-07-07 18:25:55] [Rank 0] step:9021/10000 train_time:618320ms step_avg:68.54ms +[2025-07-07 18:25:55] [Rank 0] step:9021/10000 train_time:618320ms step_avg:68.54ms +[2025-07-07 18:25:57] [Rank 0] step:9041/10000 train_time:619684ms step_avg:68.54ms +[2025-07-07 18:25:57] [Rank 0] step:9041/10000 train_time:619684ms step_avg:68.54ms +[2025-07-07 18:25:58] [Rank 0] step:9061/10000 train_time:621051ms step_avg:68.54ms +[2025-07-07 18:25:58] [Rank 0] step:9061/10000 train_time:621051ms step_avg:68.54ms +[2025-07-07 18:25:59] [Rank 0] step:9081/10000 train_time:622418ms step_avg:68.54ms +[2025-07-07 18:25:59] [Rank 0] step:9081/10000 train_time:622418ms step_avg:68.54ms +[2025-07-07 18:26:01] [Rank 0] step:9101/10000 train_time:623786ms step_avg:68.54ms +[2025-07-07 18:26:01] [Rank 0] step:9101/10000 train_time:623786ms step_avg:68.54ms +[2025-07-07 18:26:02] [Rank 0] step:9121/10000 train_time:625155ms step_avg:68.54ms +[2025-07-07 18:26:02] [Rank 0] step:9121/10000 train_time:625155ms step_avg:68.54ms +[2025-07-07 18:26:03] [Rank 0] step:9141/10000 train_time:626522ms step_avg:68.54ms +[2025-07-07 18:26:03] [Rank 0] step:9141/10000 train_time:626522ms step_avg:68.54ms +[2025-07-07 18:26:05] [Rank 0] step:9161/10000 train_time:627891ms step_avg:68.54ms +[2025-07-07 18:26:05] [Rank 0] step:9161/10000 train_time:627891ms step_avg:68.54ms +[2025-07-07 18:26:06] [Rank 0] step:9181/10000 train_time:629936ms step_avg:68.61ms +[2025-07-07 18:26:06] [Rank 0] step:9181/10000 train_time:629936ms step_avg:68.61ms +[2025-07-07 18:26:08] [Rank 0] step:9201/10000 train_time:630673ms step_avg:68.54ms +[2025-07-07 18:26:08] [Rank 0] step:9201/10000 train_time:630673ms step_avg:68.54ms +[2025-07-07 18:26:09] [Rank 0] step:9221/10000 train_time:632042ms step_avg:68.54ms +[2025-07-07 18:26:09] [Rank 0] step:9221/10000 train_time:632042ms step_avg:68.54ms +[2025-07-07 18:26:10] [Rank 0] step:9241/10000 train_time:633411ms step_avg:68.54ms +[2025-07-07 18:26:10] [Rank 0] step:9241/10000 train_time:633411ms step_avg:68.54ms +[2025-07-07 18:26:12] [Rank 0] step:9261/10000 train_time:634780ms step_avg:68.54ms +[2025-07-07 18:26:12] [Rank 0] step:9261/10000 train_time:634780ms step_avg:68.54ms +[2025-07-07 18:26:13] [Rank 0] step:9281/10000 train_time:636151ms step_avg:68.54ms +[2025-07-07 18:26:13] [Rank 0] step:9281/10000 train_time:636151ms step_avg:68.54ms +[2025-07-07 18:26:14] [Rank 0] step:9301/10000 train_time:637522ms step_avg:68.54ms +[2025-07-07 18:26:14] [Rank 0] step:9301/10000 train_time:637522ms step_avg:68.54ms +[2025-07-07 18:26:16] [Rank 0] step:9321/10000 train_time:638892ms step_avg:68.54ms +[2025-07-07 18:26:16] [Rank 0] step:9321/10000 train_time:638892ms step_avg:68.54ms +[2025-07-07 18:26:17] [Rank 0] step:9341/10000 train_time:640263ms step_avg:68.54ms +[2025-07-07 18:26:17] [Rank 0] step:9341/10000 train_time:640263ms step_avg:68.54ms +[2025-07-07 18:26:19] [Rank 0] step:9361/10000 train_time:641886ms step_avg:68.57ms +[2025-07-07 18:26:19] [Rank 0] step:9361/10000 train_time:641886ms step_avg:68.57ms +[2025-07-07 18:26:20] [Rank 0] step:9381/10000 train_time:643028ms step_avg:68.55ms +[2025-07-07 18:26:20] [Rank 0] step:9381/10000 train_time:643028ms step_avg:68.55ms +[2025-07-07 18:26:21] [Rank 0] step:9401/10000 train_time:644399ms step_avg:68.55ms +[2025-07-07 18:26:21] [Rank 0] step:9401/10000 train_time:644399ms step_avg:68.55ms +[2025-07-07 18:26:23] [Rank 0] step:9421/10000 train_time:645771ms step_avg:68.55ms +[2025-07-07 18:26:23] [Rank 0] step:9421/10000 train_time:645771ms step_avg:68.55ms +[2025-07-07 18:26:24] [Rank 0] step:9441/10000 train_time:647143ms step_avg:68.55ms +[2025-07-07 18:26:24] [Rank 0] step:9441/10000 train_time:647143ms step_avg:68.55ms +[2025-07-07 18:26:25] [Rank 0] step:9461/10000 train_time:648516ms step_avg:68.55ms +[2025-07-07 18:26:25] [Rank 0] step:9461/10000 train_time:648516ms step_avg:68.55ms +[2025-07-07 18:26:27] [Rank 0] step:9481/10000 train_time:649888ms step_avg:68.55ms +[2025-07-07 18:26:27] [Rank 0] step:9481/10000 train_time:649888ms step_avg:68.55ms +[2025-07-07 18:26:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:26:28] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:26:29] [Rank 0] PRINT: step:9500/10000 train_loss:0.8697 val_loss:0.9206 train_time:651887ms step_avg:68.62ms +[2025-07-07 18:26:29] [Rank 0] PRINT: step:9500/10000 train_loss:0.8697 val_loss:0.9206 train_time:651887ms step_avg:68.62ms +[2025-07-07 18:26:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:26:29] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:26:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:26:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:26:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:26:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:31:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:31:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:31:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:31:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:31:58] [Rank 0] Total Loss: 5.5893 +[2025-07-07 18:31:58] [Rank 0] Total Loss: 5.5893 +[2025-07-07 18:31:58] [Rank 0] Total FTA: 0.9205 +[2025-07-07 18:31:58] [Rank 0] Total FTA: 0.9205 +[2025-07-07 18:31:58] [Rank 0] Group 0 Loss: 5.7881 +[2025-07-07 18:31:58] [Rank 0] Group 0 Loss: 5.7881 +[2025-07-07 18:31:58] [Rank 0] Group 1 Loss: 5.4650 +[2025-07-07 18:31:58] [Rank 0] Group 1 Loss: 5.4650 +[2025-07-07 18:31:58] [Rank 0] Group 2 Loss: 5.1743 +[2025-07-07 18:31:58] [Rank 0] Group 2 Loss: 5.1743 +[2025-07-07 18:31:58] [Rank 0] Group 3 Loss: 5.3365 +[2025-07-07 18:31:58] [Rank 0] Group 3 Loss: 5.3365 +[2025-07-07 18:31:58] [Rank 0] Group 4 Loss: 5.6283 +[2025-07-07 18:31:58] [Rank 0] Group 4 Loss: 5.6283 +[2025-07-07 18:31:58] [Rank 0] Group 5 Loss: 5.6046 +[2025-07-07 18:31:58] [Rank 0] Group 5 Loss: 5.6046 +[2025-07-07 18:31:58] [Rank 0] Group 6 Loss: 5.4915 +[2025-07-07 18:31:58] [Rank 0] Group 6 Loss: 5.4915 +[2025-07-07 18:31:58] [Rank 0] Group 7 Loss: 5.6938 +[2025-07-07 18:31:58] [Rank 0] Group 7 Loss: 5.6938 +[2025-07-07 18:31:58] [Rank 0] Group 8 Loss: 5.5743 +[2025-07-07 18:31:58] [Rank 0] Group 8 Loss: 5.5743 +[2025-07-07 18:31:58] [Rank 0] Group 9 Loss: 5.6987 +[2025-07-07 18:31:58] [Rank 0] Group 9 Loss: 5.6987 +[2025-07-07 18:31:58] [Rank 0] Group 10 Loss: 5.6766 +[2025-07-07 18:31:58] [Rank 0] Group 10 Loss: 5.6766 +[2025-07-07 18:31:58] [Rank 0] Group 11 Loss: 5.6490 +[2025-07-07 18:31:58] [Rank 0] Group 11 Loss: 5.6490 +[2025-07-07 18:31:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 18:31:58] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 18:31:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 18:31:58] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 18:31:58] [Rank 0] Group 2 FTA: 0.9167 +[2025-07-07 18:31:58] [Rank 0] Group 2 FTA: 0.9167 +[2025-07-07 18:31:58] [Rank 0] Group 3 FTA: 0.7734 +[2025-07-07 18:31:58] [Rank 0] Group 3 FTA: 0.7734 +[2025-07-07 18:31:58] [Rank 0] Group 4 FTA: 0.9740 +[2025-07-07 18:31:58] [Rank 0] Group 4 FTA: 0.9740 +[2025-07-07 18:31:58] [Rank 0] Group 5 FTA: 0.9531 +[2025-07-07 18:31:58] [Rank 0] Group 5 FTA: 0.9531 +[2025-07-07 18:31:58] [Rank 0] Group 6 FTA: 0.8385 +[2025-07-07 18:31:58] [Rank 0] Group 6 FTA: 0.8385 +[2025-07-07 18:31:58] [Rank 0] Group 7 FTA: 0.9062 +[2025-07-07 18:31:58] [Rank 0] Group 7 FTA: 0.9062 +[2025-07-07 18:31:58] [Rank 0] Group 8 FTA: 0.8854 +[2025-07-07 18:31:58] [Rank 0] Group 8 FTA: 0.8854 +[2025-07-07 18:31:58] [Rank 0] Group 9 FTA: 0.9336 +[2025-07-07 18:31:58] [Rank 0] Group 9 FTA: 0.9336 +[2025-07-07 18:31:58] [Rank 0] Group 10 FTA: 0.9102 +[2025-07-07 18:31:58] [Rank 0] Group 10 FTA: 0.9102 +[2025-07-07 18:31:58] [Rank 0] Group 11 FTA: 0.9062 +[2025-07-07 18:31:58] [Rank 0] Group 11 FTA: 0.9062 +[2025-07-07 18:31:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 18:31:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 18:31:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 18:31:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 18:31:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 18:31:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 18:31:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 18:31:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 18:31:59] [Rank 0] step:9501/10000 train_time:651897ms step_avg:68.61ms +[2025-07-07 18:31:59] [Rank 0] step:9501/10000 train_time:651897ms step_avg:68.61ms +[2025-07-07 18:32:01] [Rank 0] step:9521/10000 train_time:652660ms step_avg:68.55ms +[2025-07-07 18:32:01] [Rank 0] step:9521/10000 train_time:652660ms step_avg:68.55ms +[2025-07-07 18:32:02] [Rank 0] step:9541/10000 train_time:654699ms step_avg:68.62ms +[2025-07-07 18:32:02] [Rank 0] step:9541/10000 train_time:654699ms step_avg:68.62ms +[2025-07-07 18:32:03] [Rank 0] step:9561/10000 train_time:655434ms step_avg:68.55ms +[2025-07-07 18:32:03] [Rank 0] step:9561/10000 train_time:655434ms step_avg:68.55ms +[2025-07-07 18:32:05] [Rank 0] step:9581/10000 train_time:656799ms step_avg:68.55ms +[2025-07-07 18:32:05] [Rank 0] step:9581/10000 train_time:656799ms step_avg:68.55ms +[2025-07-07 18:32:06] [Rank 0] step:9601/10000 train_time:658165ms step_avg:68.55ms +[2025-07-07 18:32:06] [Rank 0] step:9601/10000 train_time:658165ms step_avg:68.55ms +[2025-07-07 18:32:08] [Rank 0] step:9621/10000 train_time:659532ms step_avg:68.55ms +[2025-07-07 18:32:08] [Rank 0] step:9621/10000 train_time:659532ms step_avg:68.55ms +[2025-07-07 18:32:09] [Rank 0] step:9641/10000 train_time:660900ms step_avg:68.55ms +[2025-07-07 18:32:09] [Rank 0] step:9641/10000 train_time:660900ms step_avg:68.55ms +[2025-07-07 18:32:10] [Rank 0] step:9661/10000 train_time:662268ms step_avg:68.55ms +[2025-07-07 18:32:10] [Rank 0] step:9661/10000 train_time:662268ms step_avg:68.55ms +[2025-07-07 18:32:12] [Rank 0] step:9681/10000 train_time:663635ms step_avg:68.55ms +[2025-07-07 18:32:12] [Rank 0] step:9681/10000 train_time:663635ms step_avg:68.55ms +[2025-07-07 18:32:13] [Rank 0] step:9701/10000 train_time:665004ms step_avg:68.55ms +[2025-07-07 18:32:13] [Rank 0] step:9701/10000 train_time:665004ms step_avg:68.55ms +[2025-07-07 18:32:14] [Rank 0] step:9721/10000 train_time:666373ms step_avg:68.55ms +[2025-07-07 18:32:14] [Rank 0] step:9721/10000 train_time:666373ms step_avg:68.55ms +[2025-07-07 18:32:16] [Rank 0] step:9741/10000 train_time:667786ms step_avg:68.55ms +[2025-07-07 18:32:16] [Rank 0] step:9741/10000 train_time:667786ms step_avg:68.55ms +[2025-07-07 18:32:17] [Rank 0] step:9761/10000 train_time:669155ms step_avg:68.55ms +[2025-07-07 18:32:17] [Rank 0] step:9761/10000 train_time:669155ms step_avg:68.55ms +[2025-07-07 18:32:19] [Rank 0] step:9781/10000 train_time:670524ms step_avg:68.55ms +[2025-07-07 18:32:19] [Rank 0] step:9781/10000 train_time:670524ms step_avg:68.55ms +[2025-07-07 18:32:20] [Rank 0] step:9801/10000 train_time:671894ms step_avg:68.55ms +[2025-07-07 18:32:20] [Rank 0] step:9801/10000 train_time:671894ms step_avg:68.55ms +[2025-07-07 18:32:21] [Rank 0] step:9821/10000 train_time:673266ms step_avg:68.55ms +[2025-07-07 18:32:21] [Rank 0] step:9821/10000 train_time:673266ms step_avg:68.55ms +[2025-07-07 18:32:23] [Rank 0] step:9841/10000 train_time:674638ms step_avg:68.55ms +[2025-07-07 18:32:23] [Rank 0] step:9841/10000 train_time:674638ms step_avg:68.55ms +[2025-07-07 18:32:24] [Rank 0] step:9861/10000 train_time:676009ms step_avg:68.55ms +[2025-07-07 18:32:24] [Rank 0] step:9861/10000 train_time:676009ms step_avg:68.55ms +[2025-07-07 18:32:25] [Rank 0] step:9881/10000 train_time:677381ms step_avg:68.55ms +[2025-07-07 18:32:25] [Rank 0] step:9881/10000 train_time:677381ms step_avg:68.55ms +[2025-07-07 18:32:27] [Rank 0] step:9901/10000 train_time:678799ms step_avg:68.56ms +[2025-07-07 18:32:27] [Rank 0] step:9901/10000 train_time:678799ms step_avg:68.56ms +[2025-07-07 18:32:28] [Rank 0] step:9921/10000 train_time:680171ms step_avg:68.56ms +[2025-07-07 18:32:28] [Rank 0] step:9921/10000 train_time:680171ms step_avg:68.56ms +[2025-07-07 18:32:30] [Rank 0] step:9941/10000 train_time:681545ms step_avg:68.56ms +[2025-07-07 18:32:30] [Rank 0] step:9941/10000 train_time:681545ms step_avg:68.56ms +[2025-07-07 18:32:31] [Rank 0] step:9961/10000 train_time:682918ms step_avg:68.56ms +[2025-07-07 18:32:31] [Rank 0] step:9961/10000 train_time:682918ms step_avg:68.56ms +[2025-07-07 18:32:32] [Rank 0] step:9981/10000 train_time:684291ms step_avg:68.56ms +[2025-07-07 18:32:32] [Rank 0] step:9981/10000 train_time:684291ms step_avg:68.56ms +[2025-07-07 18:32:34] [Rank 0] step:10000/10000 train_time:685596ms step_avg:68.56ms +[2025-07-07 18:32:34] [Rank 0] step:10000/10000 train_time:685596ms step_avg:68.56ms +[2025-07-07 18:32:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:32:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:32:35] [Rank 0] PRINT: step:10000/10000 train_loss:0.8675 val_loss:0.9124 train_time:686294ms step_avg:68.63ms +[2025-07-07 18:32:35] [Rank 0] PRINT: step:10000/10000 train_loss:0.8675 val_loss:0.9124 train_time:686294ms step_avg:68.63ms +[2025-07-07 18:32:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:32:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:32:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:32:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:32:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:32:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:38:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:38:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:38:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:38:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:38:02] [Rank 0] Total Loss: 5.6801 +[2025-07-07 18:38:02] [Rank 0] Total Loss: 5.6801 +[2025-07-07 18:38:02] [Rank 0] Total FTA: 0.9254 +[2025-07-07 18:38:02] [Rank 0] Total FTA: 0.9254 +[2025-07-07 18:38:02] [Rank 0] Group 0 Loss: 5.8795 +[2025-07-07 18:38:02] [Rank 0] Group 0 Loss: 5.8795 +[2025-07-07 18:38:02] [Rank 0] Group 1 Loss: 5.4735 +[2025-07-07 18:38:02] [Rank 0] Group 1 Loss: 5.4735 +[2025-07-07 18:38:02] [Rank 0] Group 2 Loss: 5.3284 +[2025-07-07 18:38:02] [Rank 0] Group 2 Loss: 5.3284 +[2025-07-07 18:38:02] [Rank 0] Group 3 Loss: 5.4417 +[2025-07-07 18:38:02] [Rank 0] Group 3 Loss: 5.4417 +[2025-07-07 18:38:02] [Rank 0] Group 4 Loss: 5.7433 +[2025-07-07 18:38:02] [Rank 0] Group 4 Loss: 5.7433 +[2025-07-07 18:38:02] [Rank 0] Group 5 Loss: 5.5937 +[2025-07-07 18:38:02] [Rank 0] Group 5 Loss: 5.5937 +[2025-07-07 18:38:02] [Rank 0] Group 6 Loss: 5.5965 +[2025-07-07 18:38:02] [Rank 0] Group 6 Loss: 5.5965 +[2025-07-07 18:38:02] [Rank 0] Group 7 Loss: 5.8074 +[2025-07-07 18:38:02] [Rank 0] Group 7 Loss: 5.8074 +[2025-07-07 18:38:02] [Rank 0] Group 8 Loss: 5.6737 +[2025-07-07 18:38:02] [Rank 0] Group 8 Loss: 5.6737 +[2025-07-07 18:38:02] [Rank 0] Group 9 Loss: 5.7548 +[2025-07-07 18:38:02] [Rank 0] Group 9 Loss: 5.7548 +[2025-07-07 18:38:02] [Rank 0] Group 10 Loss: 5.7875 +[2025-07-07 18:38:02] [Rank 0] Group 10 Loss: 5.7875 +[2025-07-07 18:38:02] [Rank 0] Group 11 Loss: 5.7513 +[2025-07-07 18:38:02] [Rank 0] Group 11 Loss: 5.7513 +[2025-07-07 18:38:02] [Rank 0] Group 0 FTA: 0.8231 +[2025-07-07 18:38:02] [Rank 0] Group 0 FTA: 0.8231 +[2025-07-07 18:38:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 18:38:02] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 18:38:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 18:38:02] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 18:38:02] [Rank 0] Group 3 FTA: 0.8750 +[2025-07-07 18:38:02] [Rank 0] Group 3 FTA: 0.8750 +[2025-07-07 18:38:02] [Rank 0] Group 4 FTA: 0.9896 +[2025-07-07 18:38:02] [Rank 0] Group 4 FTA: 0.9896 +[2025-07-07 18:38:02] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-07 18:38:02] [Rank 0] Group 5 FTA: 0.9583 +[2025-07-07 18:38:02] [Rank 0] Group 6 FTA: 0.9167 +[2025-07-07 18:38:02] [Rank 0] Group 6 FTA: 0.9167 +[2025-07-07 18:38:02] [Rank 0] Group 7 FTA: 0.9609 +[2025-07-07 18:38:02] [Rank 0] Group 7 FTA: 0.9609 +[2025-07-07 18:38:02] [Rank 0] Group 8 FTA: 0.9036 +[2025-07-07 18:38:02] [Rank 0] Group 8 FTA: 0.9036 +[2025-07-07 18:38:02] [Rank 0] Group 9 FTA: 0.9375 +[2025-07-07 18:38:02] [Rank 0] Group 9 FTA: 0.9375 +[2025-07-07 18:38:02] [Rank 0] Group 10 FTA: 0.9238 +[2025-07-07 18:38:02] [Rank 0] Group 10 FTA: 0.9238 +[2025-07-07 18:38:02] [Rank 0] Group 11 FTA: 0.9248 +[2025-07-07 18:38:02] [Rank 0] Group 11 FTA: 0.9248 +[2025-07-07 18:38:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 18:38:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_loss_curves.png +[2025-07-07 18:38:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 18:38:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/per_class_acc_curves.png +[2025-07-07 18:38:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 18:38:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_loss_curve.png +[2025-07-07 18:38:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 18:38:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_45/total_acc_curve.png +[2025-07-07 18:38:03] [Rank 0] step:10001/10000 train_time:686304ms step_avg:68.62ms +[2025-07-07 18:38:03] [Rank 0] step:10001/10000 train_time:686304ms step_avg:68.62ms +[2025-07-07 18:38:03] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 18:38:03 2025 --- +[2025-07-07 18:38:03] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 18:38:03 2025 --- +[2025-07-07 18:38:03] [Rank 0] PRINT: Peak memory allocated: 9138 MiB reserved: 10596 MiB +[2025-07-07 18:38:03] [Rank 0] PRINT: Peak memory allocated: 9138 MiB reserved: 10596 MiB diff --git a/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/config.json b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..b684174806dfeef759dfae22f213977fdbb8b1cd --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.001 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "933ee79b-4628-4b8b-88ec-52c135650ec9", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..bf3e5453e09a259d3cd7e5abcdb534169dc62522 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f7ba1b213e0d53ad4ce96c7ad7a3b21d373abc45e3b502ba23228a9f4745a8e +size 421231 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..47939c5b5050dd2a1d32ecdce140c8a32132eaad --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be973103d2b9bfd062d81e327886cd8455c686c5960f5c6bc793a4cd6662fee9 +size 409416 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..c8dbd86b3a5db3842771f35d859c1638731eeee1 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b87cb9d9dd6e0c3c09b2ca1c7e4680f13820f78755835deb6073caa8347395c5 +size 116173 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..1f03850c04b8f1853357a1531ffbb336d1212e78 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ba1c99943e0341cd521fe033af4bbf008f8824b813de1162adf8a17601f8702 +size 125056 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/training_log_933ee79b-4628-4b8b-88ec-52c135650ec9.txt b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/training_log_933ee79b-4628-4b8b-88ec-52c135650ec9.txt new file mode 100644 index 0000000000000000000000000000000000000000..59a1034221ecb442afd3ee408d578dba05a28f76 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/training_log_933ee79b-4628-4b8b-88ec-52c135650ec9.txt @@ -0,0 +1,5144 @@ +[2025-07-07 16:24:22] [Rank 0] PRINT: --- Script Start: Mon Jul 7 16:24:22 2025 --- +[2025-07-07 16:24:22] [Rank 0] PRINT: --- Script Start: Mon Jul 7 16:24:22 2025 --- +[2025-07-07 16:24:22] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-07 16:24:22] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.001) +[2025-07-07 16:24:22] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 16:24:22] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 16:24:22] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-07 16:24:22] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-07 16:24:22] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48 +[2025-07-07 16:24:22] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48 +[2025-07-07 16:24:22] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 16:24:22] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 16:24:23] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 16:24:23] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 16:24:23] [Rank 0] PRINT: Constructing model... +[2025-07-07 16:24:23] [Rank 0] PRINT: Constructing model... +[2025-07-07 16:24:25] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 16:24:25] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 16:24:25] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 16:24:25] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 16:24:25] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 16:24:25] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 16:24:26] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 16:24:26] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 16:24:26] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 16:24:26] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 16:24:26] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 16:24:26] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 16:24:26] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 16:24:26] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 16:24:26] [Rank 0] PRINT: Model returns: +[2025-07-07 16:24:26] [Rank 0] PRINT: Model returns: +[2025-07-07 16:24:26] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 16:24:26] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 16:24:26] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 16:24:26] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 16:24:26] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-07 16:24:26] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.001). +[2025-07-07 16:24:26] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 16:24:26] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 16:24:26] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 16:24:26] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 16:24:26] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 16:24:26] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 16:24:26] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 16:24:26] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 16:24:26] [Rank 0] PRINT: Starting warmup... +[2025-07-07 16:24:26] [Rank 0] PRINT: Starting warmup... +[2025-07-07 16:33:28] [Rank 0] PRINT: Warmup complete. +[2025-07-07 16:33:28] [Rank 0] PRINT: Warmup complete. +[2025-07-07 16:33:28] [Rank 0] PRINT: Starting training... +[2025-07-07 16:33:28] [Rank 0] PRINT: Starting training... +[2025-07-07 16:33:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:33:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:37:53] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 16:37:53] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 16:37:54] [Rank 0] step:21/10000 train_time:1023ms step_avg:48.70ms +[2025-07-07 16:37:54] [Rank 0] step:21/10000 train_time:1023ms step_avg:48.70ms +[2025-07-07 16:37:56] [Rank 0] step:41/10000 train_time:2351ms step_avg:57.35ms +[2025-07-07 16:37:56] [Rank 0] step:41/10000 train_time:2351ms step_avg:57.35ms +[2025-07-07 16:37:57] [Rank 0] step:61/10000 train_time:3676ms step_avg:60.26ms +[2025-07-07 16:37:57] [Rank 0] step:61/10000 train_time:3676ms step_avg:60.26ms +[2025-07-07 16:37:58] [Rank 0] step:81/10000 train_time:5002ms step_avg:61.75ms +[2025-07-07 16:37:58] [Rank 0] step:81/10000 train_time:5002ms step_avg:61.75ms +[2025-07-07 16:38:00] [Rank 0] step:101/10000 train_time:6330ms step_avg:62.67ms +[2025-07-07 16:38:00] [Rank 0] step:101/10000 train_time:6330ms step_avg:62.67ms +[2025-07-07 16:38:01] [Rank 0] step:121/10000 train_time:7657ms step_avg:63.28ms +[2025-07-07 16:38:01] [Rank 0] step:121/10000 train_time:7657ms step_avg:63.28ms +[2025-07-07 16:38:02] [Rank 0] step:141/10000 train_time:8988ms step_avg:63.74ms +[2025-07-07 16:38:02] [Rank 0] step:141/10000 train_time:8988ms step_avg:63.74ms +[2025-07-07 16:38:04] [Rank 0] step:161/10000 train_time:10317ms step_avg:64.08ms +[2025-07-07 16:38:04] [Rank 0] step:161/10000 train_time:10317ms step_avg:64.08ms +[2025-07-07 16:38:05] [Rank 0] step:181/10000 train_time:11899ms step_avg:65.74ms +[2025-07-07 16:38:05] [Rank 0] step:181/10000 train_time:11899ms step_avg:65.74ms +[2025-07-07 16:38:06] [Rank 0] step:201/10000 train_time:13026ms step_avg:64.81ms +[2025-07-07 16:38:06] [Rank 0] step:201/10000 train_time:13026ms step_avg:64.81ms +[2025-07-07 16:38:08] [Rank 0] step:221/10000 train_time:14359ms step_avg:64.97ms +[2025-07-07 16:38:08] [Rank 0] step:221/10000 train_time:14359ms step_avg:64.97ms +[2025-07-07 16:38:09] [Rank 0] step:241/10000 train_time:15691ms step_avg:65.11ms +[2025-07-07 16:38:09] [Rank 0] step:241/10000 train_time:15691ms step_avg:65.11ms +[2025-07-07 16:38:10] [Rank 0] step:261/10000 train_time:17026ms step_avg:65.23ms +[2025-07-07 16:38:10] [Rank 0] step:261/10000 train_time:17026ms step_avg:65.23ms +[2025-07-07 16:38:12] [Rank 0] step:281/10000 train_time:18362ms step_avg:65.35ms +[2025-07-07 16:38:12] [Rank 0] step:281/10000 train_time:18362ms step_avg:65.35ms +[2025-07-07 16:38:13] [Rank 0] step:301/10000 train_time:19697ms step_avg:65.44ms +[2025-07-07 16:38:13] [Rank 0] step:301/10000 train_time:19697ms step_avg:65.44ms +[2025-07-07 16:38:14] [Rank 0] step:321/10000 train_time:21030ms step_avg:65.51ms +[2025-07-07 16:38:14] [Rank 0] step:321/10000 train_time:21030ms step_avg:65.51ms +[2025-07-07 16:38:16] [Rank 0] step:341/10000 train_time:22365ms step_avg:65.59ms +[2025-07-07 16:38:16] [Rank 0] step:341/10000 train_time:22365ms step_avg:65.59ms +[2025-07-07 16:38:17] [Rank 0] step:361/10000 train_time:23751ms step_avg:65.79ms +[2025-07-07 16:38:17] [Rank 0] step:361/10000 train_time:23751ms step_avg:65.79ms +[2025-07-07 16:38:19] [Rank 0] step:381/10000 train_time:25091ms step_avg:65.86ms +[2025-07-07 16:38:19] [Rank 0] step:381/10000 train_time:25091ms step_avg:65.86ms +[2025-07-07 16:38:20] [Rank 0] step:401/10000 train_time:26426ms step_avg:65.90ms +[2025-07-07 16:38:20] [Rank 0] step:401/10000 train_time:26426ms step_avg:65.90ms +[2025-07-07 16:38:21] [Rank 0] step:421/10000 train_time:27762ms step_avg:65.94ms +[2025-07-07 16:38:21] [Rank 0] step:421/10000 train_time:27762ms step_avg:65.94ms +[2025-07-07 16:38:23] [Rank 0] step:441/10000 train_time:29097ms step_avg:65.98ms +[2025-07-07 16:38:23] [Rank 0] step:441/10000 train_time:29097ms step_avg:65.98ms +[2025-07-07 16:38:24] [Rank 0] step:461/10000 train_time:30432ms step_avg:66.01ms +[2025-07-07 16:38:24] [Rank 0] step:461/10000 train_time:30432ms step_avg:66.01ms +[2025-07-07 16:38:25] [Rank 0] step:481/10000 train_time:31768ms step_avg:66.05ms +[2025-07-07 16:38:25] [Rank 0] step:481/10000 train_time:31768ms step_avg:66.05ms +[2025-07-07 16:38:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:38:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:38:27] [Rank 0] PRINT: step:500/10000 train_loss:5.2683 val_loss:2.4723 train_time:33711ms step_avg:67.42ms +[2025-07-07 16:38:27] [Rank 0] PRINT: step:500/10000 train_loss:5.2683 val_loss:2.4723 train_time:33711ms step_avg:67.42ms +[2025-07-07 16:38:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 16:38:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 16:38:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 16:38:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 16:38:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 16:38:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 16:43:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 16:43:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 16:43:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 16:43:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 16:43:52] [Rank 0] Total Loss: 4.2321 +[2025-07-07 16:43:52] [Rank 0] Total Loss: 4.2321 +[2025-07-07 16:43:52] [Rank 0] Total FTA: 0.0795 +[2025-07-07 16:43:52] [Rank 0] Total FTA: 0.0795 +[2025-07-07 16:43:52] [Rank 0] Group 0 Loss: 4.3092 +[2025-07-07 16:43:52] [Rank 0] Group 0 Loss: 4.3092 +[2025-07-07 16:43:52] [Rank 0] Group 1 Loss: 4.1768 +[2025-07-07 16:43:52] [Rank 0] Group 1 Loss: 4.1768 +[2025-07-07 16:43:52] [Rank 0] Group 2 Loss: 4.1899 +[2025-07-07 16:43:52] [Rank 0] Group 2 Loss: 4.1899 +[2025-07-07 16:43:52] [Rank 0] Group 3 Loss: 4.2134 +[2025-07-07 16:43:52] [Rank 0] Group 3 Loss: 4.2134 +[2025-07-07 16:43:52] [Rank 0] Group 4 Loss: 4.2675 +[2025-07-07 16:43:52] [Rank 0] Group 4 Loss: 4.2675 +[2025-07-07 16:43:52] [Rank 0] Group 5 Loss: 4.2146 +[2025-07-07 16:43:52] [Rank 0] Group 5 Loss: 4.2146 +[2025-07-07 16:43:52] [Rank 0] Group 6 Loss: 4.2183 +[2025-07-07 16:43:52] [Rank 0] Group 6 Loss: 4.2183 +[2025-07-07 16:43:52] [Rank 0] Group 7 Loss: 4.2332 +[2025-07-07 16:43:52] [Rank 0] Group 7 Loss: 4.2332 +[2025-07-07 16:43:52] [Rank 0] Group 8 Loss: 4.2182 +[2025-07-07 16:43:52] [Rank 0] Group 8 Loss: 4.2182 +[2025-07-07 16:43:52] [Rank 0] Group 9 Loss: 4.2374 +[2025-07-07 16:43:52] [Rank 0] Group 9 Loss: 4.2374 +[2025-07-07 16:43:52] [Rank 0] Group 10 Loss: 4.2099 +[2025-07-07 16:43:52] [Rank 0] Group 10 Loss: 4.2099 +[2025-07-07 16:43:52] [Rank 0] Group 11 Loss: 4.2308 +[2025-07-07 16:43:52] [Rank 0] Group 11 Loss: 4.2308 +[2025-07-07 16:43:52] [Rank 0] Group 0 FTA: 0.1404 +[2025-07-07 16:43:52] [Rank 0] Group 0 FTA: 0.1404 +[2025-07-07 16:43:52] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 16:43:52] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 16:43:52] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-07 16:43:52] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-07 16:43:52] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-07 16:43:52] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-07 16:43:52] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 16:43:52] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 16:43:52] [Rank 0] Group 5 FTA: 0.0677 +[2025-07-07 16:43:52] [Rank 0] Group 5 FTA: 0.0677 +[2025-07-07 16:43:52] [Rank 0] Group 6 FTA: 0.0677 +[2025-07-07 16:43:52] [Rank 0] Group 6 FTA: 0.0677 +[2025-07-07 16:43:52] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-07 16:43:52] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-07 16:43:52] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-07 16:43:52] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-07 16:43:52] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 16:43:52] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 16:43:52] [Rank 0] Group 10 FTA: 0.0977 +[2025-07-07 16:43:52] [Rank 0] Group 10 FTA: 0.0977 +[2025-07-07 16:43:52] [Rank 0] Group 11 FTA: 0.0830 +[2025-07-07 16:43:52] [Rank 0] Group 11 FTA: 0.0830 +[2025-07-07 16:43:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 16:43:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 16:43:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 16:43:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 16:43:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 16:43:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 16:43:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 16:43:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 16:43:53] [Rank 0] step:501/10000 train_time:33722ms step_avg:67.31ms +[2025-07-07 16:43:53] [Rank 0] step:501/10000 train_time:33722ms step_avg:67.31ms +[2025-07-07 16:43:55] [Rank 0] step:521/10000 train_time:34471ms step_avg:66.16ms +[2025-07-07 16:43:55] [Rank 0] step:521/10000 train_time:34471ms step_avg:66.16ms +[2025-07-07 16:43:56] [Rank 0] step:541/10000 train_time:35802ms step_avg:66.18ms +[2025-07-07 16:43:56] [Rank 0] step:541/10000 train_time:35802ms step_avg:66.18ms +[2025-07-07 16:43:57] [Rank 0] step:561/10000 train_time:37134ms step_avg:66.19ms +[2025-07-07 16:43:57] [Rank 0] step:561/10000 train_time:37134ms step_avg:66.19ms +[2025-07-07 16:43:59] [Rank 0] step:581/10000 train_time:38467ms step_avg:66.21ms +[2025-07-07 16:43:59] [Rank 0] step:581/10000 train_time:38467ms step_avg:66.21ms +[2025-07-07 16:44:00] [Rank 0] step:601/10000 train_time:39799ms step_avg:66.22ms +[2025-07-07 16:44:00] [Rank 0] step:601/10000 train_time:39799ms step_avg:66.22ms +[2025-07-07 16:44:01] [Rank 0] step:621/10000 train_time:41131ms step_avg:66.23ms +[2025-07-07 16:44:01] [Rank 0] step:621/10000 train_time:41131ms step_avg:66.23ms +[2025-07-07 16:44:03] [Rank 0] step:641/10000 train_time:42464ms step_avg:66.25ms +[2025-07-07 16:44:03] [Rank 0] step:641/10000 train_time:42464ms step_avg:66.25ms +[2025-07-07 16:44:04] [Rank 0] step:661/10000 train_time:43798ms step_avg:66.26ms +[2025-07-07 16:44:04] [Rank 0] step:661/10000 train_time:43798ms step_avg:66.26ms +[2025-07-07 16:44:05] [Rank 0] step:681/10000 train_time:45132ms step_avg:66.27ms +[2025-07-07 16:44:05] [Rank 0] step:681/10000 train_time:45132ms step_avg:66.27ms +[2025-07-07 16:44:07] [Rank 0] step:701/10000 train_time:46468ms step_avg:66.29ms +[2025-07-07 16:44:07] [Rank 0] step:701/10000 train_time:46468ms step_avg:66.29ms +[2025-07-07 16:44:08] [Rank 0] step:721/10000 train_time:47803ms step_avg:66.30ms +[2025-07-07 16:44:08] [Rank 0] step:721/10000 train_time:47803ms step_avg:66.30ms +[2025-07-07 16:44:09] [Rank 0] step:741/10000 train_time:49208ms step_avg:66.41ms +[2025-07-07 16:44:09] [Rank 0] step:741/10000 train_time:49208ms step_avg:66.41ms +[2025-07-07 16:44:11] [Rank 0] step:761/10000 train_time:50550ms step_avg:66.43ms +[2025-07-07 16:44:11] [Rank 0] step:761/10000 train_time:50550ms step_avg:66.43ms +[2025-07-07 16:44:12] [Rank 0] step:781/10000 train_time:51897ms step_avg:66.45ms +[2025-07-07 16:44:12] [Rank 0] step:781/10000 train_time:51897ms step_avg:66.45ms +[2025-07-07 16:44:13] [Rank 0] step:801/10000 train_time:53245ms step_avg:66.47ms +[2025-07-07 16:44:13] [Rank 0] step:801/10000 train_time:53245ms step_avg:66.47ms +[2025-07-07 16:44:15] [Rank 0] step:821/10000 train_time:54591ms step_avg:66.49ms +[2025-07-07 16:44:15] [Rank 0] step:821/10000 train_time:54591ms step_avg:66.49ms +[2025-07-07 16:44:16] [Rank 0] step:841/10000 train_time:55939ms step_avg:66.51ms +[2025-07-07 16:44:16] [Rank 0] step:841/10000 train_time:55939ms step_avg:66.51ms +[2025-07-07 16:44:17] [Rank 0] step:861/10000 train_time:57287ms step_avg:66.53ms +[2025-07-07 16:44:17] [Rank 0] step:861/10000 train_time:57287ms step_avg:66.53ms +[2025-07-07 16:44:19] [Rank 0] step:881/10000 train_time:58635ms step_avg:66.55ms +[2025-07-07 16:44:19] [Rank 0] step:881/10000 train_time:58635ms step_avg:66.55ms +[2025-07-07 16:44:20] [Rank 0] step:901/10000 train_time:59984ms step_avg:66.58ms +[2025-07-07 16:44:20] [Rank 0] step:901/10000 train_time:59984ms step_avg:66.58ms +[2025-07-07 16:44:22] [Rank 0] step:921/10000 train_time:61377ms step_avg:66.64ms +[2025-07-07 16:44:22] [Rank 0] step:921/10000 train_time:61377ms step_avg:66.64ms +[2025-07-07 16:44:23] [Rank 0] step:941/10000 train_time:62727ms step_avg:66.66ms +[2025-07-07 16:44:23] [Rank 0] step:941/10000 train_time:62727ms step_avg:66.66ms +[2025-07-07 16:44:24] [Rank 0] step:961/10000 train_time:64076ms step_avg:66.68ms +[2025-07-07 16:44:24] [Rank 0] step:961/10000 train_time:64076ms step_avg:66.68ms +[2025-07-07 16:44:26] [Rank 0] step:981/10000 train_time:65425ms step_avg:66.69ms +[2025-07-07 16:44:26] [Rank 0] step:981/10000 train_time:65425ms step_avg:66.69ms +[2025-07-07 16:44:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:44:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:44:28] [Rank 0] PRINT: step:1000/10000 train_loss:1.9585 val_loss:1.7542 train_time:67389ms step_avg:67.39ms +[2025-07-07 16:44:28] [Rank 0] PRINT: step:1000/10000 train_loss:1.9585 val_loss:1.7542 train_time:67389ms step_avg:67.39ms +[2025-07-07 16:44:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 16:44:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 16:44:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 16:44:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 16:44:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 16:44:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 16:49:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 16:49:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 16:49:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 16:49:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 16:49:51] [Rank 0] Total Loss: 4.2379 +[2025-07-07 16:49:51] [Rank 0] Total Loss: 4.2379 +[2025-07-07 16:49:51] [Rank 0] Total FTA: 0.0969 +[2025-07-07 16:49:51] [Rank 0] Total FTA: 0.0969 +[2025-07-07 16:49:51] [Rank 0] Group 0 Loss: 4.4758 +[2025-07-07 16:49:51] [Rank 0] Group 0 Loss: 4.4758 +[2025-07-07 16:49:51] [Rank 0] Group 1 Loss: 4.1858 +[2025-07-07 16:49:51] [Rank 0] Group 1 Loss: 4.1858 +[2025-07-07 16:49:51] [Rank 0] Group 2 Loss: 3.9879 +[2025-07-07 16:49:51] [Rank 0] Group 2 Loss: 3.9879 +[2025-07-07 16:49:51] [Rank 0] Group 3 Loss: 4.2469 +[2025-07-07 16:49:51] [Rank 0] Group 3 Loss: 4.2469 +[2025-07-07 16:49:51] [Rank 0] Group 4 Loss: 4.2842 +[2025-07-07 16:49:51] [Rank 0] Group 4 Loss: 4.2842 +[2025-07-07 16:49:51] [Rank 0] Group 5 Loss: 4.1253 +[2025-07-07 16:49:51] [Rank 0] Group 5 Loss: 4.1253 +[2025-07-07 16:49:51] [Rank 0] Group 6 Loss: 4.1596 +[2025-07-07 16:49:51] [Rank 0] Group 6 Loss: 4.1596 +[2025-07-07 16:49:51] [Rank 0] Group 7 Loss: 4.2456 +[2025-07-07 16:49:51] [Rank 0] Group 7 Loss: 4.2456 +[2025-07-07 16:49:51] [Rank 0] Group 8 Loss: 4.1925 +[2025-07-07 16:49:51] [Rank 0] Group 8 Loss: 4.1925 +[2025-07-07 16:49:51] [Rank 0] Group 9 Loss: 4.2126 +[2025-07-07 16:49:51] [Rank 0] Group 9 Loss: 4.2126 +[2025-07-07 16:49:51] [Rank 0] Group 10 Loss: 4.2464 +[2025-07-07 16:49:51] [Rank 0] Group 10 Loss: 4.2464 +[2025-07-07 16:49:51] [Rank 0] Group 11 Loss: 4.2393 +[2025-07-07 16:49:51] [Rank 0] Group 11 Loss: 4.2393 +[2025-07-07 16:49:51] [Rank 0] Group 0 FTA: 0.1534 +[2025-07-07 16:49:51] [Rank 0] Group 0 FTA: 0.1534 +[2025-07-07 16:49:51] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 16:49:51] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 16:49:51] [Rank 0] Group 2 FTA: 0.1589 +[2025-07-07 16:49:51] [Rank 0] Group 2 FTA: 0.1589 +[2025-07-07 16:49:51] [Rank 0] Group 3 FTA: 0.1042 +[2025-07-07 16:49:51] [Rank 0] Group 3 FTA: 0.1042 +[2025-07-07 16:49:51] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-07 16:49:51] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-07 16:49:51] [Rank 0] Group 5 FTA: 0.0729 +[2025-07-07 16:49:51] [Rank 0] Group 5 FTA: 0.0729 +[2025-07-07 16:49:51] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-07 16:49:51] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-07 16:49:51] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-07 16:49:51] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-07 16:49:51] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 16:49:51] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 16:49:51] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 16:49:51] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 16:49:51] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-07 16:49:51] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-07 16:49:51] [Rank 0] Group 11 FTA: 0.1025 +[2025-07-07 16:49:51] [Rank 0] Group 11 FTA: 0.1025 +[2025-07-07 16:49:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 16:49:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 16:49:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 16:49:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 16:49:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 16:49:52] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 16:49:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 16:49:52] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 16:49:52] [Rank 0] step:1001/10000 train_time:67399ms step_avg:67.33ms +[2025-07-07 16:49:52] [Rank 0] step:1001/10000 train_time:67399ms step_avg:67.33ms +[2025-07-07 16:49:53] [Rank 0] step:1021/10000 train_time:68144ms step_avg:66.74ms +[2025-07-07 16:49:53] [Rank 0] step:1021/10000 train_time:68144ms step_avg:66.74ms +[2025-07-07 16:49:55] [Rank 0] step:1041/10000 train_time:69485ms step_avg:66.75ms +[2025-07-07 16:49:55] [Rank 0] step:1041/10000 train_time:69485ms step_avg:66.75ms +[2025-07-07 16:49:56] [Rank 0] step:1061/10000 train_time:70829ms step_avg:66.76ms +[2025-07-07 16:49:56] [Rank 0] step:1061/10000 train_time:70829ms step_avg:66.76ms +[2025-07-07 16:49:58] [Rank 0] step:1081/10000 train_time:72173ms step_avg:66.77ms +[2025-07-07 16:49:58] [Rank 0] step:1081/10000 train_time:72173ms step_avg:66.77ms +[2025-07-07 16:49:59] [Rank 0] step:1101/10000 train_time:73573ms step_avg:66.82ms +[2025-07-07 16:49:59] [Rank 0] step:1101/10000 train_time:73573ms step_avg:66.82ms +[2025-07-07 16:50:00] [Rank 0] step:1121/10000 train_time:74918ms step_avg:66.83ms +[2025-07-07 16:50:00] [Rank 0] step:1121/10000 train_time:74918ms step_avg:66.83ms +[2025-07-07 16:50:02] [Rank 0] step:1141/10000 train_time:76263ms step_avg:66.84ms +[2025-07-07 16:50:02] [Rank 0] step:1141/10000 train_time:76263ms step_avg:66.84ms +[2025-07-07 16:50:03] [Rank 0] step:1161/10000 train_time:77610ms step_avg:66.85ms +[2025-07-07 16:50:03] [Rank 0] step:1161/10000 train_time:77610ms step_avg:66.85ms +[2025-07-07 16:50:04] [Rank 0] step:1181/10000 train_time:78956ms step_avg:66.86ms +[2025-07-07 16:50:04] [Rank 0] step:1181/10000 train_time:78956ms step_avg:66.86ms +[2025-07-07 16:50:06] [Rank 0] step:1201/10000 train_time:80302ms step_avg:66.86ms +[2025-07-07 16:50:06] [Rank 0] step:1201/10000 train_time:80302ms step_avg:66.86ms +[2025-07-07 16:50:07] [Rank 0] step:1221/10000 train_time:81647ms step_avg:66.87ms +[2025-07-07 16:50:07] [Rank 0] step:1221/10000 train_time:81647ms step_avg:66.87ms +[2025-07-07 16:50:08] [Rank 0] step:1241/10000 train_time:82995ms step_avg:66.88ms +[2025-07-07 16:50:08] [Rank 0] step:1241/10000 train_time:82995ms step_avg:66.88ms +[2025-07-07 16:50:10] [Rank 0] step:1261/10000 train_time:84388ms step_avg:66.92ms +[2025-07-07 16:50:10] [Rank 0] step:1261/10000 train_time:84388ms step_avg:66.92ms +[2025-07-07 16:50:11] [Rank 0] step:1281/10000 train_time:85733ms step_avg:66.93ms +[2025-07-07 16:50:11] [Rank 0] step:1281/10000 train_time:85733ms step_avg:66.93ms +[2025-07-07 16:50:12] [Rank 0] step:1301/10000 train_time:87079ms step_avg:66.93ms +[2025-07-07 16:50:12] [Rank 0] step:1301/10000 train_time:87079ms step_avg:66.93ms +[2025-07-07 16:50:14] [Rank 0] step:1321/10000 train_time:88424ms step_avg:66.94ms +[2025-07-07 16:50:14] [Rank 0] step:1321/10000 train_time:88424ms step_avg:66.94ms +[2025-07-07 16:50:15] [Rank 0] step:1341/10000 train_time:89769ms step_avg:66.94ms +[2025-07-07 16:50:15] [Rank 0] step:1341/10000 train_time:89769ms step_avg:66.94ms +[2025-07-07 16:50:16] [Rank 0] step:1361/10000 train_time:91118ms step_avg:66.95ms +[2025-07-07 16:50:16] [Rank 0] step:1361/10000 train_time:91118ms step_avg:66.95ms +[2025-07-07 16:50:18] [Rank 0] step:1381/10000 train_time:92465ms step_avg:66.96ms +[2025-07-07 16:50:18] [Rank 0] step:1381/10000 train_time:92465ms step_avg:66.96ms +[2025-07-07 16:50:19] [Rank 0] step:1401/10000 train_time:93834ms step_avg:66.98ms +[2025-07-07 16:50:19] [Rank 0] step:1401/10000 train_time:93834ms step_avg:66.98ms +[2025-07-07 16:50:21] [Rank 0] step:1421/10000 train_time:95181ms step_avg:66.98ms +[2025-07-07 16:50:21] [Rank 0] step:1421/10000 train_time:95181ms step_avg:66.98ms +[2025-07-07 16:50:22] [Rank 0] step:1441/10000 train_time:96529ms step_avg:66.99ms +[2025-07-07 16:50:22] [Rank 0] step:1441/10000 train_time:96529ms step_avg:66.99ms +[2025-07-07 16:50:23] [Rank 0] step:1461/10000 train_time:97927ms step_avg:67.03ms +[2025-07-07 16:50:23] [Rank 0] step:1461/10000 train_time:97927ms step_avg:67.03ms +[2025-07-07 16:50:25] [Rank 0] step:1481/10000 train_time:99274ms step_avg:67.03ms +[2025-07-07 16:50:25] [Rank 0] step:1481/10000 train_time:99274ms step_avg:67.03ms +[2025-07-07 16:50:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:50:26] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:50:27] [Rank 0] PRINT: step:1500/10000 train_loss:1.6966 val_loss:1.6451 train_time:101235ms step_avg:67.49ms +[2025-07-07 16:50:27] [Rank 0] PRINT: step:1500/10000 train_loss:1.6966 val_loss:1.6451 train_time:101235ms step_avg:67.49ms +[2025-07-07 16:50:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 16:50:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 16:50:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 16:50:27] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 16:50:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 16:50:27] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 16:55:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 16:55:52] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 16:55:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 16:55:52] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 16:55:52] [Rank 0] Total Loss: 4.7573 +[2025-07-07 16:55:52] [Rank 0] Total Loss: 4.7573 +[2025-07-07 16:55:52] [Rank 0] Total FTA: 0.0861 +[2025-07-07 16:55:52] [Rank 0] Total FTA: 0.0861 +[2025-07-07 16:55:52] [Rank 0] Group 0 Loss: 5.0047 +[2025-07-07 16:55:52] [Rank 0] Group 0 Loss: 5.0047 +[2025-07-07 16:55:52] [Rank 0] Group 1 Loss: 4.8695 +[2025-07-07 16:55:52] [Rank 0] Group 1 Loss: 4.8695 +[2025-07-07 16:55:52] [Rank 0] Group 2 Loss: 4.4742 +[2025-07-07 16:55:52] [Rank 0] Group 2 Loss: 4.4742 +[2025-07-07 16:55:52] [Rank 0] Group 3 Loss: 4.8075 +[2025-07-07 16:55:52] [Rank 0] Group 3 Loss: 4.8075 +[2025-07-07 16:55:52] [Rank 0] Group 4 Loss: 4.7130 +[2025-07-07 16:55:52] [Rank 0] Group 4 Loss: 4.7130 +[2025-07-07 16:55:52] [Rank 0] Group 5 Loss: 4.6442 +[2025-07-07 16:55:52] [Rank 0] Group 5 Loss: 4.6442 +[2025-07-07 16:55:52] [Rank 0] Group 6 Loss: 4.6471 +[2025-07-07 16:55:52] [Rank 0] Group 6 Loss: 4.6471 +[2025-07-07 16:55:52] [Rank 0] Group 7 Loss: 4.7505 +[2025-07-07 16:55:52] [Rank 0] Group 7 Loss: 4.7505 +[2025-07-07 16:55:52] [Rank 0] Group 8 Loss: 4.7565 +[2025-07-07 16:55:52] [Rank 0] Group 8 Loss: 4.7565 +[2025-07-07 16:55:52] [Rank 0] Group 9 Loss: 4.7596 +[2025-07-07 16:55:52] [Rank 0] Group 9 Loss: 4.7596 +[2025-07-07 16:55:52] [Rank 0] Group 10 Loss: 4.7208 +[2025-07-07 16:55:52] [Rank 0] Group 10 Loss: 4.7208 +[2025-07-07 16:55:52] [Rank 0] Group 11 Loss: 4.7378 +[2025-07-07 16:55:52] [Rank 0] Group 11 Loss: 4.7378 +[2025-07-07 16:55:52] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 16:55:52] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 16:55:52] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 16:55:52] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 16:55:52] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 16:55:52] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 16:55:52] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 16:55:52] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 16:55:52] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 16:55:52] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 16:55:52] [Rank 0] Group 5 FTA: 0.0833 +[2025-07-07 16:55:52] [Rank 0] Group 5 FTA: 0.0833 +[2025-07-07 16:55:52] [Rank 0] Group 6 FTA: 0.0729 +[2025-07-07 16:55:52] [Rank 0] Group 6 FTA: 0.0729 +[2025-07-07 16:55:52] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-07 16:55:52] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-07 16:55:52] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 16:55:52] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 16:55:52] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 16:55:52] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 16:55:52] [Rank 0] Group 10 FTA: 0.0840 +[2025-07-07 16:55:52] [Rank 0] Group 10 FTA: 0.0840 +[2025-07-07 16:55:52] [Rank 0] Group 11 FTA: 0.0879 +[2025-07-07 16:55:52] [Rank 0] Group 11 FTA: 0.0879 +[2025-07-07 16:55:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 16:55:53] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 16:55:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 16:55:53] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 16:55:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 16:55:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 16:55:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 16:55:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 16:55:53] [Rank 0] step:1501/10000 train_time:101245ms step_avg:67.45ms +[2025-07-07 16:55:53] [Rank 0] step:1501/10000 train_time:101245ms step_avg:67.45ms +[2025-07-07 16:55:55] [Rank 0] step:1521/10000 train_time:101997ms step_avg:67.06ms +[2025-07-07 16:55:55] [Rank 0] step:1521/10000 train_time:101997ms step_avg:67.06ms +[2025-07-07 16:55:56] [Rank 0] step:1541/10000 train_time:103530ms step_avg:67.18ms +[2025-07-07 16:55:56] [Rank 0] step:1541/10000 train_time:103530ms step_avg:67.18ms +[2025-07-07 16:55:58] [Rank 0] step:1561/10000 train_time:104871ms step_avg:67.18ms +[2025-07-07 16:55:58] [Rank 0] step:1561/10000 train_time:104871ms step_avg:67.18ms +[2025-07-07 16:55:59] [Rank 0] step:1581/10000 train_time:106214ms step_avg:67.18ms +[2025-07-07 16:55:59] [Rank 0] step:1581/10000 train_time:106214ms step_avg:67.18ms +[2025-07-07 16:56:00] [Rank 0] step:1601/10000 train_time:107557ms step_avg:67.18ms +[2025-07-07 16:56:00] [Rank 0] step:1601/10000 train_time:107557ms step_avg:67.18ms +[2025-07-07 16:56:02] [Rank 0] step:1621/10000 train_time:108899ms step_avg:67.18ms +[2025-07-07 16:56:02] [Rank 0] step:1621/10000 train_time:108899ms step_avg:67.18ms +[2025-07-07 16:56:03] [Rank 0] step:1641/10000 train_time:110286ms step_avg:67.21ms +[2025-07-07 16:56:03] [Rank 0] step:1641/10000 train_time:110286ms step_avg:67.21ms +[2025-07-07 16:56:04] [Rank 0] step:1661/10000 train_time:111632ms step_avg:67.21ms +[2025-07-07 16:56:04] [Rank 0] step:1661/10000 train_time:111632ms step_avg:67.21ms +[2025-07-07 16:56:06] [Rank 0] step:1681/10000 train_time:112976ms step_avg:67.21ms +[2025-07-07 16:56:06] [Rank 0] step:1681/10000 train_time:112976ms step_avg:67.21ms +[2025-07-07 16:56:07] [Rank 0] step:1701/10000 train_time:114322ms step_avg:67.21ms +[2025-07-07 16:56:07] [Rank 0] step:1701/10000 train_time:114322ms step_avg:67.21ms +[2025-07-07 16:56:09] [Rank 0] step:1721/10000 train_time:115667ms step_avg:67.21ms +[2025-07-07 16:56:09] [Rank 0] step:1721/10000 train_time:115667ms step_avg:67.21ms +[2025-07-07 16:56:10] [Rank 0] step:1741/10000 train_time:117012ms step_avg:67.21ms +[2025-07-07 16:56:10] [Rank 0] step:1741/10000 train_time:117012ms step_avg:67.21ms +[2025-07-07 16:56:11] [Rank 0] step:1761/10000 train_time:118358ms step_avg:67.21ms +[2025-07-07 16:56:11] [Rank 0] step:1761/10000 train_time:118358ms step_avg:67.21ms +[2025-07-07 16:56:13] [Rank 0] step:1781/10000 train_time:119704ms step_avg:67.21ms +[2025-07-07 16:56:13] [Rank 0] step:1781/10000 train_time:119704ms step_avg:67.21ms +[2025-07-07 16:56:14] [Rank 0] step:1801/10000 train_time:121053ms step_avg:67.21ms +[2025-07-07 16:56:14] [Rank 0] step:1801/10000 train_time:121053ms step_avg:67.21ms +[2025-07-07 16:56:15] [Rank 0] step:1821/10000 train_time:122448ms step_avg:67.24ms +[2025-07-07 16:56:15] [Rank 0] step:1821/10000 train_time:122448ms step_avg:67.24ms +[2025-07-07 16:56:17] [Rank 0] step:1841/10000 train_time:123795ms step_avg:67.24ms +[2025-07-07 16:56:17] [Rank 0] step:1841/10000 train_time:123795ms step_avg:67.24ms +[2025-07-07 16:56:18] [Rank 0] step:1861/10000 train_time:125141ms step_avg:67.24ms +[2025-07-07 16:56:18] [Rank 0] step:1861/10000 train_time:125141ms step_avg:67.24ms +[2025-07-07 16:56:19] [Rank 0] step:1881/10000 train_time:126488ms step_avg:67.25ms +[2025-07-07 16:56:19] [Rank 0] step:1881/10000 train_time:126488ms step_avg:67.25ms +[2025-07-07 16:56:21] [Rank 0] step:1901/10000 train_time:127846ms step_avg:67.25ms +[2025-07-07 16:56:21] [Rank 0] step:1901/10000 train_time:127846ms step_avg:67.25ms +[2025-07-07 16:56:22] [Rank 0] step:1921/10000 train_time:129196ms step_avg:67.25ms +[2025-07-07 16:56:22] [Rank 0] step:1921/10000 train_time:129196ms step_avg:67.25ms +[2025-07-07 16:56:23] [Rank 0] step:1941/10000 train_time:130543ms step_avg:67.26ms +[2025-07-07 16:56:23] [Rank 0] step:1941/10000 train_time:130543ms step_avg:67.26ms +[2025-07-07 16:56:25] [Rank 0] step:1961/10000 train_time:131892ms step_avg:67.26ms +[2025-07-07 16:56:25] [Rank 0] step:1961/10000 train_time:131892ms step_avg:67.26ms +[2025-07-07 16:56:26] [Rank 0] step:1981/10000 train_time:133913ms step_avg:67.60ms +[2025-07-07 16:56:26] [Rank 0] step:1981/10000 train_time:133913ms step_avg:67.60ms +[2025-07-07 16:56:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:56:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 16:56:28] [Rank 0] PRINT: step:2000/10000 train_loss:1.5883 val_loss:1.5293 train_time:135262ms step_avg:67.63ms +[2025-07-07 16:56:28] [Rank 0] PRINT: step:2000/10000 train_loss:1.5883 val_loss:1.5293 train_time:135262ms step_avg:67.63ms +[2025-07-07 16:56:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 16:56:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 16:56:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 16:56:29] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 16:56:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 16:56:29] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:01:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:01:55] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:01:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:01:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:01:55] [Rank 0] Total Loss: 4.8058 +[2025-07-07 17:01:55] [Rank 0] Total Loss: 4.8058 +[2025-07-07 17:01:55] [Rank 0] Total FTA: 0.0948 +[2025-07-07 17:01:55] [Rank 0] Total FTA: 0.0948 +[2025-07-07 17:01:55] [Rank 0] Group 0 Loss: 5.1057 +[2025-07-07 17:01:55] [Rank 0] Group 0 Loss: 5.1057 +[2025-07-07 17:01:55] [Rank 0] Group 1 Loss: 4.9779 +[2025-07-07 17:01:55] [Rank 0] Group 1 Loss: 4.9779 +[2025-07-07 17:01:55] [Rank 0] Group 2 Loss: 4.5182 +[2025-07-07 17:01:55] [Rank 0] Group 2 Loss: 4.5182 +[2025-07-07 17:01:55] [Rank 0] Group 3 Loss: 4.7930 +[2025-07-07 17:01:55] [Rank 0] Group 3 Loss: 4.7930 +[2025-07-07 17:01:55] [Rank 0] Group 4 Loss: 4.7225 +[2025-07-07 17:01:55] [Rank 0] Group 4 Loss: 4.7225 +[2025-07-07 17:01:55] [Rank 0] Group 5 Loss: 4.6431 +[2025-07-07 17:01:55] [Rank 0] Group 5 Loss: 4.6431 +[2025-07-07 17:01:55] [Rank 0] Group 6 Loss: 4.7402 +[2025-07-07 17:01:55] [Rank 0] Group 6 Loss: 4.7402 +[2025-07-07 17:01:55] [Rank 0] Group 7 Loss: 4.8604 +[2025-07-07 17:01:55] [Rank 0] Group 7 Loss: 4.8604 +[2025-07-07 17:01:55] [Rank 0] Group 8 Loss: 4.7783 +[2025-07-07 17:01:55] [Rank 0] Group 8 Loss: 4.7783 +[2025-07-07 17:01:55] [Rank 0] Group 9 Loss: 4.7162 +[2025-07-07 17:01:55] [Rank 0] Group 9 Loss: 4.7162 +[2025-07-07 17:01:55] [Rank 0] Group 10 Loss: 4.7596 +[2025-07-07 17:01:55] [Rank 0] Group 10 Loss: 4.7596 +[2025-07-07 17:01:55] [Rank 0] Group 11 Loss: 4.7808 +[2025-07-07 17:01:55] [Rank 0] Group 11 Loss: 4.7808 +[2025-07-07 17:01:55] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 17:01:55] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 17:01:55] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:01:55] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 17:01:55] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 17:01:55] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 17:01:55] [Rank 0] Group 3 FTA: 0.0625 +[2025-07-07 17:01:55] [Rank 0] Group 3 FTA: 0.0625 +[2025-07-07 17:01:55] [Rank 0] Group 4 FTA: 0.0625 +[2025-07-07 17:01:55] [Rank 0] Group 4 FTA: 0.0625 +[2025-07-07 17:01:55] [Rank 0] Group 5 FTA: 0.0443 +[2025-07-07 17:01:55] [Rank 0] Group 5 FTA: 0.0443 +[2025-07-07 17:01:55] [Rank 0] Group 6 FTA: 0.1120 +[2025-07-07 17:01:55] [Rank 0] Group 6 FTA: 0.1120 +[2025-07-07 17:01:55] [Rank 0] Group 7 FTA: 0.1172 +[2025-07-07 17:01:55] [Rank 0] Group 7 FTA: 0.1172 +[2025-07-07 17:01:55] [Rank 0] Group 8 FTA: 0.1042 +[2025-07-07 17:01:55] [Rank 0] Group 8 FTA: 0.1042 +[2025-07-07 17:01:55] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 17:01:55] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 17:01:55] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-07 17:01:55] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-07 17:01:55] [Rank 0] Group 11 FTA: 0.1016 +[2025-07-07 17:01:55] [Rank 0] Group 11 FTA: 0.1016 +[2025-07-07 17:01:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 17:01:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 17:01:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 17:01:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 17:01:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 17:01:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 17:01:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 17:01:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 17:01:56] [Rank 0] step:2001/10000 train_time:135273ms step_avg:67.60ms +[2025-07-07 17:01:56] [Rank 0] step:2001/10000 train_time:135273ms step_avg:67.60ms +[2025-07-07 17:01:58] [Rank 0] step:2021/10000 train_time:136018ms step_avg:67.30ms +[2025-07-07 17:01:58] [Rank 0] step:2021/10000 train_time:136018ms step_avg:67.30ms +[2025-07-07 17:01:59] [Rank 0] step:2041/10000 train_time:137359ms step_avg:67.30ms +[2025-07-07 17:01:59] [Rank 0] step:2041/10000 train_time:137359ms step_avg:67.30ms +[2025-07-07 17:02:00] [Rank 0] step:2061/10000 train_time:138701ms step_avg:67.30ms +[2025-07-07 17:02:00] [Rank 0] step:2061/10000 train_time:138701ms step_avg:67.30ms +[2025-07-07 17:02:02] [Rank 0] step:2081/10000 train_time:140044ms step_avg:67.30ms +[2025-07-07 17:02:02] [Rank 0] step:2081/10000 train_time:140044ms step_avg:67.30ms +[2025-07-07 17:02:03] [Rank 0] step:2101/10000 train_time:141390ms step_avg:67.30ms +[2025-07-07 17:02:03] [Rank 0] step:2101/10000 train_time:141390ms step_avg:67.30ms +[2025-07-07 17:02:04] [Rank 0] step:2121/10000 train_time:142736ms step_avg:67.30ms +[2025-07-07 17:02:04] [Rank 0] step:2121/10000 train_time:142736ms step_avg:67.30ms +[2025-07-07 17:02:06] [Rank 0] step:2141/10000 train_time:144081ms step_avg:67.30ms +[2025-07-07 17:02:06] [Rank 0] step:2141/10000 train_time:144081ms step_avg:67.30ms +[2025-07-07 17:02:07] [Rank 0] step:2161/10000 train_time:145474ms step_avg:67.32ms +[2025-07-07 17:02:07] [Rank 0] step:2161/10000 train_time:145474ms step_avg:67.32ms +[2025-07-07 17:02:08] [Rank 0] step:2181/10000 train_time:146813ms step_avg:67.31ms +[2025-07-07 17:02:08] [Rank 0] step:2181/10000 train_time:146813ms step_avg:67.31ms +[2025-07-07 17:02:10] [Rank 0] step:2201/10000 train_time:148157ms step_avg:67.31ms +[2025-07-07 17:02:10] [Rank 0] step:2201/10000 train_time:148157ms step_avg:67.31ms +[2025-07-07 17:02:11] [Rank 0] step:2221/10000 train_time:149502ms step_avg:67.31ms +[2025-07-07 17:02:11] [Rank 0] step:2221/10000 train_time:149502ms step_avg:67.31ms +[2025-07-07 17:02:12] [Rank 0] step:2241/10000 train_time:150861ms step_avg:67.32ms +[2025-07-07 17:02:12] [Rank 0] step:2241/10000 train_time:150861ms step_avg:67.32ms +[2025-07-07 17:02:14] [Rank 0] step:2261/10000 train_time:152230ms step_avg:67.33ms +[2025-07-07 17:02:14] [Rank 0] step:2261/10000 train_time:152230ms step_avg:67.33ms +[2025-07-07 17:02:15] [Rank 0] step:2281/10000 train_time:153601ms step_avg:67.34ms +[2025-07-07 17:02:15] [Rank 0] step:2281/10000 train_time:153601ms step_avg:67.34ms +[2025-07-07 17:02:17] [Rank 0] step:2301/10000 train_time:154972ms step_avg:67.35ms +[2025-07-07 17:02:17] [Rank 0] step:2301/10000 train_time:154972ms step_avg:67.35ms +[2025-07-07 17:02:18] [Rank 0] step:2321/10000 train_time:156349ms step_avg:67.36ms +[2025-07-07 17:02:18] [Rank 0] step:2321/10000 train_time:156349ms step_avg:67.36ms +[2025-07-07 17:02:19] [Rank 0] step:2341/10000 train_time:157978ms step_avg:67.48ms +[2025-07-07 17:02:19] [Rank 0] step:2341/10000 train_time:157978ms step_avg:67.48ms +[2025-07-07 17:02:21] [Rank 0] step:2361/10000 train_time:159148ms step_avg:67.41ms +[2025-07-07 17:02:21] [Rank 0] step:2361/10000 train_time:159148ms step_avg:67.41ms +[2025-07-07 17:02:22] [Rank 0] step:2381/10000 train_time:160522ms step_avg:67.42ms +[2025-07-07 17:02:22] [Rank 0] step:2381/10000 train_time:160522ms step_avg:67.42ms +[2025-07-07 17:02:23] [Rank 0] step:2401/10000 train_time:161896ms step_avg:67.43ms +[2025-07-07 17:02:23] [Rank 0] step:2401/10000 train_time:161896ms step_avg:67.43ms +[2025-07-07 17:02:25] [Rank 0] step:2421/10000 train_time:163271ms step_avg:67.44ms +[2025-07-07 17:02:25] [Rank 0] step:2421/10000 train_time:163271ms step_avg:67.44ms +[2025-07-07 17:02:26] [Rank 0] step:2441/10000 train_time:164644ms step_avg:67.45ms +[2025-07-07 17:02:26] [Rank 0] step:2441/10000 train_time:164644ms step_avg:67.45ms +[2025-07-07 17:02:28] [Rank 0] step:2461/10000 train_time:166018ms step_avg:67.46ms +[2025-07-07 17:02:28] [Rank 0] step:2461/10000 train_time:166018ms step_avg:67.46ms +[2025-07-07 17:02:29] [Rank 0] step:2481/10000 train_time:167393ms step_avg:67.47ms +[2025-07-07 17:02:29] [Rank 0] step:2481/10000 train_time:167393ms step_avg:67.47ms +[2025-07-07 17:02:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:02:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:02:31] [Rank 0] PRINT: step:2500/10000 train_loss:1.4641 val_loss:1.3956 train_time:169394ms step_avg:67.76ms +[2025-07-07 17:02:31] [Rank 0] PRINT: step:2500/10000 train_loss:1.4641 val_loss:1.3956 train_time:169394ms step_avg:67.76ms +[2025-07-07 17:02:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:02:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:02:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:02:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:02:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:02:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:07:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:07:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:07:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:07:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:07:56] [Rank 0] Total Loss: 4.9715 +[2025-07-07 17:07:56] [Rank 0] Total Loss: 4.9715 +[2025-07-07 17:07:56] [Rank 0] Total FTA: 0.1312 +[2025-07-07 17:07:56] [Rank 0] Total FTA: 0.1312 +[2025-07-07 17:07:56] [Rank 0] Group 0 Loss: 5.2460 +[2025-07-07 17:07:56] [Rank 0] Group 0 Loss: 5.2460 +[2025-07-07 17:07:56] [Rank 0] Group 1 Loss: 5.0887 +[2025-07-07 17:07:56] [Rank 0] Group 1 Loss: 5.0887 +[2025-07-07 17:07:56] [Rank 0] Group 2 Loss: 4.6610 +[2025-07-07 17:07:56] [Rank 0] Group 2 Loss: 4.6610 +[2025-07-07 17:07:56] [Rank 0] Group 3 Loss: 5.0041 +[2025-07-07 17:07:56] [Rank 0] Group 3 Loss: 5.0041 +[2025-07-07 17:07:56] [Rank 0] Group 4 Loss: 4.8567 +[2025-07-07 17:07:56] [Rank 0] Group 4 Loss: 4.8567 +[2025-07-07 17:07:56] [Rank 0] Group 5 Loss: 4.8917 +[2025-07-07 17:07:56] [Rank 0] Group 5 Loss: 4.8917 +[2025-07-07 17:07:56] [Rank 0] Group 6 Loss: 4.8359 +[2025-07-07 17:07:56] [Rank 0] Group 6 Loss: 4.8359 +[2025-07-07 17:07:56] [Rank 0] Group 7 Loss: 4.9719 +[2025-07-07 17:07:56] [Rank 0] Group 7 Loss: 4.9719 +[2025-07-07 17:07:56] [Rank 0] Group 8 Loss: 4.9468 +[2025-07-07 17:07:56] [Rank 0] Group 8 Loss: 4.9468 +[2025-07-07 17:07:56] [Rank 0] Group 9 Loss: 4.9192 +[2025-07-07 17:07:56] [Rank 0] Group 9 Loss: 4.9192 +[2025-07-07 17:07:56] [Rank 0] Group 10 Loss: 4.9581 +[2025-07-07 17:07:56] [Rank 0] Group 10 Loss: 4.9581 +[2025-07-07 17:07:56] [Rank 0] Group 11 Loss: 4.9785 +[2025-07-07 17:07:56] [Rank 0] Group 11 Loss: 4.9785 +[2025-07-07 17:07:56] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 17:07:56] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 17:07:56] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-07 17:07:56] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-07 17:07:56] [Rank 0] Group 2 FTA: 0.1484 +[2025-07-07 17:07:56] [Rank 0] Group 2 FTA: 0.1484 +[2025-07-07 17:07:56] [Rank 0] Group 3 FTA: 0.1406 +[2025-07-07 17:07:56] [Rank 0] Group 3 FTA: 0.1406 +[2025-07-07 17:07:56] [Rank 0] Group 4 FTA: 0.0573 +[2025-07-07 17:07:56] [Rank 0] Group 4 FTA: 0.0573 +[2025-07-07 17:07:56] [Rank 0] Group 5 FTA: 0.1302 +[2025-07-07 17:07:56] [Rank 0] Group 5 FTA: 0.1302 +[2025-07-07 17:07:56] [Rank 0] Group 6 FTA: 0.1146 +[2025-07-07 17:07:56] [Rank 0] Group 6 FTA: 0.1146 +[2025-07-07 17:07:56] [Rank 0] Group 7 FTA: 0.1484 +[2025-07-07 17:07:56] [Rank 0] Group 7 FTA: 0.1484 +[2025-07-07 17:07:56] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-07 17:07:56] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-07 17:07:56] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 17:07:56] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 17:07:56] [Rank 0] Group 10 FTA: 0.1270 +[2025-07-07 17:07:56] [Rank 0] Group 10 FTA: 0.1270 +[2025-07-07 17:07:56] [Rank 0] Group 11 FTA: 0.1377 +[2025-07-07 17:07:56] [Rank 0] Group 11 FTA: 0.1377 +[2025-07-07 17:07:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 17:07:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 17:07:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 17:07:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 17:07:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 17:07:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 17:07:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 17:07:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 17:07:57] [Rank 0] step:2501/10000 train_time:169405ms step_avg:67.74ms +[2025-07-07 17:07:57] [Rank 0] step:2501/10000 train_time:169405ms step_avg:67.74ms +[2025-07-07 17:07:59] [Rank 0] step:2521/10000 train_time:170219ms step_avg:67.52ms +[2025-07-07 17:07:59] [Rank 0] step:2521/10000 train_time:170219ms step_avg:67.52ms +[2025-07-07 17:08:00] [Rank 0] step:2541/10000 train_time:171577ms step_avg:67.52ms +[2025-07-07 17:08:00] [Rank 0] step:2541/10000 train_time:171577ms step_avg:67.52ms +[2025-07-07 17:08:02] [Rank 0] step:2561/10000 train_time:172948ms step_avg:67.53ms +[2025-07-07 17:08:02] [Rank 0] step:2561/10000 train_time:172948ms step_avg:67.53ms +[2025-07-07 17:08:03] [Rank 0] step:2581/10000 train_time:174318ms step_avg:67.54ms +[2025-07-07 17:08:03] [Rank 0] step:2581/10000 train_time:174318ms step_avg:67.54ms +[2025-07-07 17:08:04] [Rank 0] step:2601/10000 train_time:175688ms step_avg:67.55ms +[2025-07-07 17:08:04] [Rank 0] step:2601/10000 train_time:175688ms step_avg:67.55ms +[2025-07-07 17:08:06] [Rank 0] step:2621/10000 train_time:177057ms step_avg:67.55ms +[2025-07-07 17:08:06] [Rank 0] step:2621/10000 train_time:177057ms step_avg:67.55ms +[2025-07-07 17:08:07] [Rank 0] step:2641/10000 train_time:178428ms step_avg:67.56ms +[2025-07-07 17:08:07] [Rank 0] step:2641/10000 train_time:178428ms step_avg:67.56ms +[2025-07-07 17:08:08] [Rank 0] step:2661/10000 train_time:179798ms step_avg:67.57ms +[2025-07-07 17:08:08] [Rank 0] step:2661/10000 train_time:179798ms step_avg:67.57ms +[2025-07-07 17:08:10] [Rank 0] step:2681/10000 train_time:181168ms step_avg:67.57ms +[2025-07-07 17:08:10] [Rank 0] step:2681/10000 train_time:181168ms step_avg:67.57ms +[2025-07-07 17:08:11] [Rank 0] step:2701/10000 train_time:182540ms step_avg:67.58ms +[2025-07-07 17:08:11] [Rank 0] step:2701/10000 train_time:182540ms step_avg:67.58ms +[2025-07-07 17:08:13] [Rank 0] step:2721/10000 train_time:183912ms step_avg:67.59ms +[2025-07-07 17:08:13] [Rank 0] step:2721/10000 train_time:183912ms step_avg:67.59ms +[2025-07-07 17:08:14] [Rank 0] step:2741/10000 train_time:185284ms step_avg:67.60ms +[2025-07-07 17:08:14] [Rank 0] step:2741/10000 train_time:185284ms step_avg:67.60ms +[2025-07-07 17:08:15] [Rank 0] step:2761/10000 train_time:186656ms step_avg:67.60ms +[2025-07-07 17:08:15] [Rank 0] step:2761/10000 train_time:186656ms step_avg:67.60ms +[2025-07-07 17:08:17] [Rank 0] step:2781/10000 train_time:188029ms step_avg:67.61ms +[2025-07-07 17:08:17] [Rank 0] step:2781/10000 train_time:188029ms step_avg:67.61ms +[2025-07-07 17:08:18] [Rank 0] step:2801/10000 train_time:189402ms step_avg:67.62ms +[2025-07-07 17:08:18] [Rank 0] step:2801/10000 train_time:189402ms step_avg:67.62ms +[2025-07-07 17:08:19] [Rank 0] step:2821/10000 train_time:190775ms step_avg:67.63ms +[2025-07-07 17:08:19] [Rank 0] step:2821/10000 train_time:190775ms step_avg:67.63ms +[2025-07-07 17:08:21] [Rank 0] step:2841/10000 train_time:192147ms step_avg:67.63ms +[2025-07-07 17:08:21] [Rank 0] step:2841/10000 train_time:192147ms step_avg:67.63ms +[2025-07-07 17:08:22] [Rank 0] step:2861/10000 train_time:193519ms step_avg:67.64ms +[2025-07-07 17:08:22] [Rank 0] step:2861/10000 train_time:193519ms step_avg:67.64ms +[2025-07-07 17:08:24] [Rank 0] step:2881/10000 train_time:194893ms step_avg:67.65ms +[2025-07-07 17:08:24] [Rank 0] step:2881/10000 train_time:194893ms step_avg:67.65ms +[2025-07-07 17:08:25] [Rank 0] step:2901/10000 train_time:196315ms step_avg:67.67ms +[2025-07-07 17:08:25] [Rank 0] step:2901/10000 train_time:196315ms step_avg:67.67ms +[2025-07-07 17:08:26] [Rank 0] step:2921/10000 train_time:197688ms step_avg:67.68ms +[2025-07-07 17:08:26] [Rank 0] step:2921/10000 train_time:197688ms step_avg:67.68ms +[2025-07-07 17:08:28] [Rank 0] step:2941/10000 train_time:199060ms step_avg:67.68ms +[2025-07-07 17:08:28] [Rank 0] step:2941/10000 train_time:199060ms step_avg:67.68ms +[2025-07-07 17:08:29] [Rank 0] step:2961/10000 train_time:200432ms step_avg:67.69ms +[2025-07-07 17:08:29] [Rank 0] step:2961/10000 train_time:200432ms step_avg:67.69ms +[2025-07-07 17:08:30] [Rank 0] step:2981/10000 train_time:201805ms step_avg:67.70ms +[2025-07-07 17:08:30] [Rank 0] step:2981/10000 train_time:201805ms step_avg:67.70ms +[2025-07-07 17:08:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:08:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:08:33] [Rank 0] PRINT: step:3000/10000 train_loss:1.3266 val_loss:1.2770 train_time:203800ms step_avg:67.93ms +[2025-07-07 17:08:33] [Rank 0] PRINT: step:3000/10000 train_loss:1.3266 val_loss:1.2770 train_time:203800ms step_avg:67.93ms +[2025-07-07 17:08:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:08:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:08:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:08:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:08:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:08:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:13:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:13:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:13:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:13:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:13:58] [Rank 0] Total Loss: 4.8835 +[2025-07-07 17:13:58] [Rank 0] Total Loss: 4.8835 +[2025-07-07 17:13:58] [Rank 0] Total FTA: 0.1766 +[2025-07-07 17:13:58] [Rank 0] Total FTA: 0.1766 +[2025-07-07 17:13:58] [Rank 0] Group 0 Loss: 5.3415 +[2025-07-07 17:13:58] [Rank 0] Group 0 Loss: 5.3415 +[2025-07-07 17:13:58] [Rank 0] Group 1 Loss: 4.8676 +[2025-07-07 17:13:58] [Rank 0] Group 1 Loss: 4.8676 +[2025-07-07 17:13:58] [Rank 0] Group 2 Loss: 4.6759 +[2025-07-07 17:13:58] [Rank 0] Group 2 Loss: 4.6759 +[2025-07-07 17:13:58] [Rank 0] Group 3 Loss: 4.8864 +[2025-07-07 17:13:58] [Rank 0] Group 3 Loss: 4.8864 +[2025-07-07 17:13:58] [Rank 0] Group 4 Loss: 4.7587 +[2025-07-07 17:13:58] [Rank 0] Group 4 Loss: 4.7587 +[2025-07-07 17:13:58] [Rank 0] Group 5 Loss: 4.8433 +[2025-07-07 17:13:58] [Rank 0] Group 5 Loss: 4.8433 +[2025-07-07 17:13:58] [Rank 0] Group 6 Loss: 4.7389 +[2025-07-07 17:13:58] [Rank 0] Group 6 Loss: 4.7389 +[2025-07-07 17:13:58] [Rank 0] Group 7 Loss: 4.8523 +[2025-07-07 17:13:58] [Rank 0] Group 7 Loss: 4.8523 +[2025-07-07 17:13:58] [Rank 0] Group 8 Loss: 4.7646 +[2025-07-07 17:13:58] [Rank 0] Group 8 Loss: 4.7646 +[2025-07-07 17:13:58] [Rank 0] Group 9 Loss: 4.8241 +[2025-07-07 17:13:58] [Rank 0] Group 9 Loss: 4.8241 +[2025-07-07 17:13:58] [Rank 0] Group 10 Loss: 4.8096 +[2025-07-07 17:13:58] [Rank 0] Group 10 Loss: 4.8096 +[2025-07-07 17:13:58] [Rank 0] Group 11 Loss: 4.8463 +[2025-07-07 17:13:58] [Rank 0] Group 11 Loss: 4.8463 +[2025-07-07 17:13:58] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 17:13:58] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 17:13:58] [Rank 0] Group 1 FTA: 0.1406 +[2025-07-07 17:13:58] [Rank 0] Group 1 FTA: 0.1406 +[2025-07-07 17:13:58] [Rank 0] Group 2 FTA: 0.1589 +[2025-07-07 17:13:58] [Rank 0] Group 2 FTA: 0.1589 +[2025-07-07 17:13:58] [Rank 0] Group 3 FTA: 0.2109 +[2025-07-07 17:13:58] [Rank 0] Group 3 FTA: 0.2109 +[2025-07-07 17:13:58] [Rank 0] Group 4 FTA: 0.2109 +[2025-07-07 17:13:58] [Rank 0] Group 4 FTA: 0.2109 +[2025-07-07 17:13:58] [Rank 0] Group 5 FTA: 0.2057 +[2025-07-07 17:13:58] [Rank 0] Group 5 FTA: 0.2057 +[2025-07-07 17:13:58] [Rank 0] Group 6 FTA: 0.1953 +[2025-07-07 17:13:58] [Rank 0] Group 6 FTA: 0.1953 +[2025-07-07 17:13:58] [Rank 0] Group 7 FTA: 0.2057 +[2025-07-07 17:13:58] [Rank 0] Group 7 FTA: 0.2057 +[2025-07-07 17:13:58] [Rank 0] Group 8 FTA: 0.1432 +[2025-07-07 17:13:58] [Rank 0] Group 8 FTA: 0.1432 +[2025-07-07 17:13:58] [Rank 0] Group 9 FTA: 0.1797 +[2025-07-07 17:13:58] [Rank 0] Group 9 FTA: 0.1797 +[2025-07-07 17:13:58] [Rank 0] Group 10 FTA: 0.1504 +[2025-07-07 17:13:58] [Rank 0] Group 10 FTA: 0.1504 +[2025-07-07 17:13:59] [Rank 0] Group 11 FTA: 0.1758 +[2025-07-07 17:13:59] [Rank 0] Group 11 FTA: 0.1758 +[2025-07-07 17:13:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 17:13:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 17:13:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 17:13:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 17:14:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 17:14:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 17:14:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 17:14:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 17:14:00] [Rank 0] step:3001/10000 train_time:203812ms step_avg:67.91ms +[2025-07-07 17:14:00] [Rank 0] step:3001/10000 train_time:203812ms step_avg:67.91ms +[2025-07-07 17:14:01] [Rank 0] step:3021/10000 train_time:204574ms step_avg:67.72ms +[2025-07-07 17:14:01] [Rank 0] step:3021/10000 train_time:204574ms step_avg:67.72ms +[2025-07-07 17:14:03] [Rank 0] step:3041/10000 train_time:205942ms step_avg:67.72ms +[2025-07-07 17:14:03] [Rank 0] step:3041/10000 train_time:205942ms step_avg:67.72ms +[2025-07-07 17:14:04] [Rank 0] step:3061/10000 train_time:207974ms step_avg:67.94ms +[2025-07-07 17:14:04] [Rank 0] step:3061/10000 train_time:207974ms step_avg:67.94ms +[2025-07-07 17:14:05] [Rank 0] step:3081/10000 train_time:208713ms step_avg:67.74ms +[2025-07-07 17:14:05] [Rank 0] step:3081/10000 train_time:208713ms step_avg:67.74ms +[2025-07-07 17:14:07] [Rank 0] step:3101/10000 train_time:210084ms step_avg:67.75ms +[2025-07-07 17:14:07] [Rank 0] step:3101/10000 train_time:210084ms step_avg:67.75ms +[2025-07-07 17:14:08] [Rank 0] step:3121/10000 train_time:211453ms step_avg:67.75ms +[2025-07-07 17:14:08] [Rank 0] step:3121/10000 train_time:211453ms step_avg:67.75ms +[2025-07-07 17:14:10] [Rank 0] step:3141/10000 train_time:212823ms step_avg:67.76ms +[2025-07-07 17:14:10] [Rank 0] step:3141/10000 train_time:212823ms step_avg:67.76ms +[2025-07-07 17:14:11] [Rank 0] step:3161/10000 train_time:214194ms step_avg:67.76ms +[2025-07-07 17:14:11] [Rank 0] step:3161/10000 train_time:214194ms step_avg:67.76ms +[2025-07-07 17:14:12] [Rank 0] step:3181/10000 train_time:215565ms step_avg:67.77ms +[2025-07-07 17:14:12] [Rank 0] step:3181/10000 train_time:215565ms step_avg:67.77ms +[2025-07-07 17:14:14] [Rank 0] step:3201/10000 train_time:216936ms step_avg:67.77ms +[2025-07-07 17:14:14] [Rank 0] step:3201/10000 train_time:216936ms step_avg:67.77ms +[2025-07-07 17:14:15] [Rank 0] step:3221/10000 train_time:218308ms step_avg:67.78ms +[2025-07-07 17:14:15] [Rank 0] step:3221/10000 train_time:218308ms step_avg:67.78ms +[2025-07-07 17:14:17] [Rank 0] step:3241/10000 train_time:219681ms step_avg:67.78ms +[2025-07-07 17:14:17] [Rank 0] step:3241/10000 train_time:219681ms step_avg:67.78ms +[2025-07-07 17:14:18] [Rank 0] step:3261/10000 train_time:221099ms step_avg:67.80ms +[2025-07-07 17:14:18] [Rank 0] step:3261/10000 train_time:221099ms step_avg:67.80ms +[2025-07-07 17:14:19] [Rank 0] step:3281/10000 train_time:222472ms step_avg:67.81ms +[2025-07-07 17:14:19] [Rank 0] step:3281/10000 train_time:222472ms step_avg:67.81ms +[2025-07-07 17:14:21] [Rank 0] step:3301/10000 train_time:223846ms step_avg:67.81ms +[2025-07-07 17:14:21] [Rank 0] step:3301/10000 train_time:223846ms step_avg:67.81ms +[2025-07-07 17:14:22] [Rank 0] step:3321/10000 train_time:225220ms step_avg:67.82ms +[2025-07-07 17:14:22] [Rank 0] step:3321/10000 train_time:225220ms step_avg:67.82ms +[2025-07-07 17:14:23] [Rank 0] step:3341/10000 train_time:226594ms step_avg:67.82ms +[2025-07-07 17:14:23] [Rank 0] step:3341/10000 train_time:226594ms step_avg:67.82ms +[2025-07-07 17:14:25] [Rank 0] step:3361/10000 train_time:227968ms step_avg:67.83ms +[2025-07-07 17:14:25] [Rank 0] step:3361/10000 train_time:227968ms step_avg:67.83ms +[2025-07-07 17:14:26] [Rank 0] step:3381/10000 train_time:229341ms step_avg:67.83ms +[2025-07-07 17:14:26] [Rank 0] step:3381/10000 train_time:229341ms step_avg:67.83ms +[2025-07-07 17:14:27] [Rank 0] step:3401/10000 train_time:230716ms step_avg:67.84ms +[2025-07-07 17:14:27] [Rank 0] step:3401/10000 train_time:230716ms step_avg:67.84ms +[2025-07-07 17:14:29] [Rank 0] step:3421/10000 train_time:232136ms step_avg:67.86ms +[2025-07-07 17:14:29] [Rank 0] step:3421/10000 train_time:232136ms step_avg:67.86ms +[2025-07-07 17:14:30] [Rank 0] step:3441/10000 train_time:233515ms step_avg:67.86ms +[2025-07-07 17:14:30] [Rank 0] step:3441/10000 train_time:233515ms step_avg:67.86ms +[2025-07-07 17:14:32] [Rank 0] step:3461/10000 train_time:234902ms step_avg:67.87ms +[2025-07-07 17:14:32] [Rank 0] step:3461/10000 train_time:234902ms step_avg:67.87ms +[2025-07-07 17:14:33] [Rank 0] step:3481/10000 train_time:236277ms step_avg:67.88ms +[2025-07-07 17:14:33] [Rank 0] step:3481/10000 train_time:236277ms step_avg:67.88ms +[2025-07-07 17:14:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:14:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:14:35] [Rank 0] PRINT: step:3500/10000 train_loss:1.2333 val_loss:1.2137 train_time:238275ms step_avg:68.08ms +[2025-07-07 17:14:35] [Rank 0] PRINT: step:3500/10000 train_loss:1.2333 val_loss:1.2137 train_time:238275ms step_avg:68.08ms +[2025-07-07 17:14:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:14:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:14:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:14:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:14:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:14:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:19:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:19:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:19:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:19:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:19:59] [Rank 0] Total Loss: 4.8382 +[2025-07-07 17:19:59] [Rank 0] Total Loss: 4.8382 +[2025-07-07 17:19:59] [Rank 0] Total FTA: 0.2839 +[2025-07-07 17:19:59] [Rank 0] Total FTA: 0.2839 +[2025-07-07 17:19:59] [Rank 0] Group 0 Loss: 5.0523 +[2025-07-07 17:19:59] [Rank 0] Group 0 Loss: 5.0523 +[2025-07-07 17:19:59] [Rank 0] Group 1 Loss: 4.7184 +[2025-07-07 17:19:59] [Rank 0] Group 1 Loss: 4.7184 +[2025-07-07 17:19:59] [Rank 0] Group 2 Loss: 4.6973 +[2025-07-07 17:19:59] [Rank 0] Group 2 Loss: 4.6973 +[2025-07-07 17:19:59] [Rank 0] Group 3 Loss: 4.7932 +[2025-07-07 17:19:59] [Rank 0] Group 3 Loss: 4.7932 +[2025-07-07 17:19:59] [Rank 0] Group 4 Loss: 4.7575 +[2025-07-07 17:19:59] [Rank 0] Group 4 Loss: 4.7575 +[2025-07-07 17:19:59] [Rank 0] Group 5 Loss: 4.8601 +[2025-07-07 17:19:59] [Rank 0] Group 5 Loss: 4.8601 +[2025-07-07 17:19:59] [Rank 0] Group 6 Loss: 4.7413 +[2025-07-07 17:19:59] [Rank 0] Group 6 Loss: 4.7413 +[2025-07-07 17:19:59] [Rank 0] Group 7 Loss: 4.8673 +[2025-07-07 17:19:59] [Rank 0] Group 7 Loss: 4.8673 +[2025-07-07 17:19:59] [Rank 0] Group 8 Loss: 4.8643 +[2025-07-07 17:19:59] [Rank 0] Group 8 Loss: 4.8643 +[2025-07-07 17:19:59] [Rank 0] Group 9 Loss: 4.8273 +[2025-07-07 17:19:59] [Rank 0] Group 9 Loss: 4.8273 +[2025-07-07 17:19:59] [Rank 0] Group 10 Loss: 4.8580 +[2025-07-07 17:19:59] [Rank 0] Group 10 Loss: 4.8580 +[2025-07-07 17:19:59] [Rank 0] Group 11 Loss: 4.8228 +[2025-07-07 17:19:59] [Rank 0] Group 11 Loss: 4.8228 +[2025-07-07 17:19:59] [Rank 0] Group 0 FTA: 0.3316 +[2025-07-07 17:19:59] [Rank 0] Group 0 FTA: 0.3316 +[2025-07-07 17:19:59] [Rank 0] Group 1 FTA: 0.3385 +[2025-07-07 17:19:59] [Rank 0] Group 1 FTA: 0.3385 +[2025-07-07 17:19:59] [Rank 0] Group 2 FTA: 0.3255 +[2025-07-07 17:19:59] [Rank 0] Group 2 FTA: 0.3255 +[2025-07-07 17:19:59] [Rank 0] Group 3 FTA: 0.2031 +[2025-07-07 17:19:59] [Rank 0] Group 3 FTA: 0.2031 +[2025-07-07 17:19:59] [Rank 0] Group 4 FTA: 0.2396 +[2025-07-07 17:19:59] [Rank 0] Group 4 FTA: 0.2396 +[2025-07-07 17:19:59] [Rank 0] Group 5 FTA: 0.3125 +[2025-07-07 17:19:59] [Rank 0] Group 5 FTA: 0.3125 +[2025-07-07 17:19:59] [Rank 0] Group 6 FTA: 0.2839 +[2025-07-07 17:19:59] [Rank 0] Group 6 FTA: 0.2839 +[2025-07-07 17:19:59] [Rank 0] Group 7 FTA: 0.2891 +[2025-07-07 17:19:59] [Rank 0] Group 7 FTA: 0.2891 +[2025-07-07 17:19:59] [Rank 0] Group 8 FTA: 0.2812 +[2025-07-07 17:19:59] [Rank 0] Group 8 FTA: 0.2812 +[2025-07-07 17:19:59] [Rank 0] Group 9 FTA: 0.2734 +[2025-07-07 17:19:59] [Rank 0] Group 9 FTA: 0.2734 +[2025-07-07 17:19:59] [Rank 0] Group 10 FTA: 0.2910 +[2025-07-07 17:19:59] [Rank 0] Group 10 FTA: 0.2910 +[2025-07-07 17:19:59] [Rank 0] Group 11 FTA: 0.2461 +[2025-07-07 17:19:59] [Rank 0] Group 11 FTA: 0.2461 +[2025-07-07 17:19:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 17:19:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 17:20:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 17:20:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 17:20:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 17:20:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 17:20:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 17:20:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 17:20:00] [Rank 0] step:3501/10000 train_time:238289ms step_avg:68.06ms +[2025-07-07 17:20:00] [Rank 0] step:3501/10000 train_time:238289ms step_avg:68.06ms +[2025-07-07 17:20:02] [Rank 0] step:3521/10000 train_time:239040ms step_avg:67.89ms +[2025-07-07 17:20:02] [Rank 0] step:3521/10000 train_time:239040ms step_avg:67.89ms +[2025-07-07 17:20:03] [Rank 0] step:3541/10000 train_time:240409ms step_avg:67.89ms +[2025-07-07 17:20:03] [Rank 0] step:3541/10000 train_time:240409ms step_avg:67.89ms +[2025-07-07 17:20:04] [Rank 0] step:3561/10000 train_time:241778ms step_avg:67.90ms +[2025-07-07 17:20:04] [Rank 0] step:3561/10000 train_time:241778ms step_avg:67.90ms +[2025-07-07 17:20:06] [Rank 0] step:3581/10000 train_time:243148ms step_avg:67.90ms +[2025-07-07 17:20:06] [Rank 0] step:3581/10000 train_time:243148ms step_avg:67.90ms +[2025-07-07 17:20:07] [Rank 0] step:3601/10000 train_time:244564ms step_avg:67.92ms +[2025-07-07 17:20:07] [Rank 0] step:3601/10000 train_time:244564ms step_avg:67.92ms +[2025-07-07 17:20:08] [Rank 0] step:3621/10000 train_time:245912ms step_avg:67.91ms +[2025-07-07 17:20:08] [Rank 0] step:3621/10000 train_time:245912ms step_avg:67.91ms +[2025-07-07 17:20:10] [Rank 0] step:3641/10000 train_time:247283ms step_avg:67.92ms +[2025-07-07 17:20:10] [Rank 0] step:3641/10000 train_time:247283ms step_avg:67.92ms +[2025-07-07 17:20:11] [Rank 0] step:3661/10000 train_time:248653ms step_avg:67.92ms +[2025-07-07 17:20:11] [Rank 0] step:3661/10000 train_time:248653ms step_avg:67.92ms +[2025-07-07 17:20:13] [Rank 0] step:3681/10000 train_time:250026ms step_avg:67.92ms +[2025-07-07 17:20:13] [Rank 0] step:3681/10000 train_time:250026ms step_avg:67.92ms +[2025-07-07 17:20:14] [Rank 0] step:3701/10000 train_time:251397ms step_avg:67.93ms +[2025-07-07 17:20:14] [Rank 0] step:3701/10000 train_time:251397ms step_avg:67.93ms +[2025-07-07 17:20:15] [Rank 0] step:3721/10000 train_time:252769ms step_avg:67.93ms +[2025-07-07 17:20:15] [Rank 0] step:3721/10000 train_time:252769ms step_avg:67.93ms +[2025-07-07 17:20:17] [Rank 0] step:3741/10000 train_time:254141ms step_avg:67.93ms +[2025-07-07 17:20:17] [Rank 0] step:3741/10000 train_time:254141ms step_avg:67.93ms +[2025-07-07 17:20:18] [Rank 0] step:3761/10000 train_time:255513ms step_avg:67.94ms +[2025-07-07 17:20:18] [Rank 0] step:3761/10000 train_time:255513ms step_avg:67.94ms +[2025-07-07 17:20:19] [Rank 0] step:3781/10000 train_time:256885ms step_avg:67.94ms +[2025-07-07 17:20:19] [Rank 0] step:3781/10000 train_time:256885ms step_avg:67.94ms +[2025-07-07 17:20:21] [Rank 0] step:3801/10000 train_time:258297ms step_avg:67.96ms +[2025-07-07 17:20:21] [Rank 0] step:3801/10000 train_time:258297ms step_avg:67.96ms +[2025-07-07 17:20:22] [Rank 0] step:3821/10000 train_time:259671ms step_avg:67.96ms +[2025-07-07 17:20:22] [Rank 0] step:3821/10000 train_time:259671ms step_avg:67.96ms +[2025-07-07 17:20:24] [Rank 0] step:3841/10000 train_time:261045ms step_avg:67.96ms +[2025-07-07 17:20:24] [Rank 0] step:3841/10000 train_time:261045ms step_avg:67.96ms +[2025-07-07 17:20:25] [Rank 0] step:3861/10000 train_time:262419ms step_avg:67.97ms +[2025-07-07 17:20:25] [Rank 0] step:3861/10000 train_time:262419ms step_avg:67.97ms +[2025-07-07 17:20:26] [Rank 0] step:3881/10000 train_time:263792ms step_avg:67.97ms +[2025-07-07 17:20:26] [Rank 0] step:3881/10000 train_time:263792ms step_avg:67.97ms +[2025-07-07 17:20:28] [Rank 0] step:3901/10000 train_time:265165ms step_avg:67.97ms +[2025-07-07 17:20:28] [Rank 0] step:3901/10000 train_time:265165ms step_avg:67.97ms +[2025-07-07 17:20:29] [Rank 0] step:3921/10000 train_time:266538ms step_avg:67.98ms +[2025-07-07 17:20:29] [Rank 0] step:3921/10000 train_time:266538ms step_avg:67.98ms +[2025-07-07 17:20:30] [Rank 0] step:3941/10000 train_time:267911ms step_avg:67.98ms +[2025-07-07 17:20:30] [Rank 0] step:3941/10000 train_time:267911ms step_avg:67.98ms +[2025-07-07 17:20:32] [Rank 0] step:3961/10000 train_time:269332ms step_avg:68.00ms +[2025-07-07 17:20:32] [Rank 0] step:3961/10000 train_time:269332ms step_avg:68.00ms +[2025-07-07 17:20:33] [Rank 0] step:3981/10000 train_time:270683ms step_avg:67.99ms +[2025-07-07 17:20:33] [Rank 0] step:3981/10000 train_time:270683ms step_avg:67.99ms +[2025-07-07 17:20:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:20:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:20:36] [Rank 0] PRINT: step:4000/10000 train_loss:1.1752 val_loss:1.1677 train_time:272679ms step_avg:68.17ms +[2025-07-07 17:20:36] [Rank 0] PRINT: step:4000/10000 train_loss:1.1752 val_loss:1.1677 train_time:272679ms step_avg:68.17ms +[2025-07-07 17:20:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:20:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:20:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:20:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:20:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:20:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:25:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:25:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:25:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:25:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:25:59] [Rank 0] Total Loss: 4.8631 +[2025-07-07 17:25:59] [Rank 0] Total Loss: 4.8631 +[2025-07-07 17:25:59] [Rank 0] Total FTA: 0.3739 +[2025-07-07 17:25:59] [Rank 0] Total FTA: 0.3739 +[2025-07-07 17:25:59] [Rank 0] Group 0 Loss: 5.1336 +[2025-07-07 17:25:59] [Rank 0] Group 0 Loss: 5.1336 +[2025-07-07 17:25:59] [Rank 0] Group 1 Loss: 4.6662 +[2025-07-07 17:25:59] [Rank 0] Group 1 Loss: 4.6662 +[2025-07-07 17:25:59] [Rank 0] Group 2 Loss: 4.7896 +[2025-07-07 17:25:59] [Rank 0] Group 2 Loss: 4.7896 +[2025-07-07 17:25:59] [Rank 0] Group 3 Loss: 4.7393 +[2025-07-07 17:25:59] [Rank 0] Group 3 Loss: 4.7393 +[2025-07-07 17:25:59] [Rank 0] Group 4 Loss: 4.7852 +[2025-07-07 17:25:59] [Rank 0] Group 4 Loss: 4.7852 +[2025-07-07 17:25:59] [Rank 0] Group 5 Loss: 4.8020 +[2025-07-07 17:25:59] [Rank 0] Group 5 Loss: 4.8020 +[2025-07-07 17:25:59] [Rank 0] Group 6 Loss: 4.7796 +[2025-07-07 17:25:59] [Rank 0] Group 6 Loss: 4.7796 +[2025-07-07 17:25:59] [Rank 0] Group 7 Loss: 4.8985 +[2025-07-07 17:25:59] [Rank 0] Group 7 Loss: 4.8985 +[2025-07-07 17:25:59] [Rank 0] Group 8 Loss: 4.8178 +[2025-07-07 17:25:59] [Rank 0] Group 8 Loss: 4.8178 +[2025-07-07 17:25:59] [Rank 0] Group 9 Loss: 4.8528 +[2025-07-07 17:25:59] [Rank 0] Group 9 Loss: 4.8528 +[2025-07-07 17:25:59] [Rank 0] Group 10 Loss: 4.8961 +[2025-07-07 17:25:59] [Rank 0] Group 10 Loss: 4.8961 +[2025-07-07 17:25:59] [Rank 0] Group 11 Loss: 4.8811 +[2025-07-07 17:25:59] [Rank 0] Group 11 Loss: 4.8811 +[2025-07-07 17:25:59] [Rank 0] Group 0 FTA: 0.4954 +[2025-07-07 17:25:59] [Rank 0] Group 0 FTA: 0.4954 +[2025-07-07 17:25:59] [Rank 0] Group 1 FTA: 0.5052 +[2025-07-07 17:25:59] [Rank 0] Group 1 FTA: 0.5052 +[2025-07-07 17:26:00] [Rank 0] Group 2 FTA: 0.4453 +[2025-07-07 17:26:00] [Rank 0] Group 2 FTA: 0.4453 +[2025-07-07 17:26:00] [Rank 0] Group 3 FTA: 0.3828 +[2025-07-07 17:26:00] [Rank 0] Group 3 FTA: 0.3828 +[2025-07-07 17:26:00] [Rank 0] Group 4 FTA: 0.2500 +[2025-07-07 17:26:00] [Rank 0] Group 4 FTA: 0.2500 +[2025-07-07 17:26:00] [Rank 0] Group 5 FTA: 0.3516 +[2025-07-07 17:26:00] [Rank 0] Group 5 FTA: 0.3516 +[2025-07-07 17:26:00] [Rank 0] Group 6 FTA: 0.3021 +[2025-07-07 17:26:00] [Rank 0] Group 6 FTA: 0.3021 +[2025-07-07 17:26:00] [Rank 0] Group 7 FTA: 0.3464 +[2025-07-07 17:26:00] [Rank 0] Group 7 FTA: 0.3464 +[2025-07-07 17:26:00] [Rank 0] Group 8 FTA: 0.3047 +[2025-07-07 17:26:00] [Rank 0] Group 8 FTA: 0.3047 +[2025-07-07 17:26:00] [Rank 0] Group 9 FTA: 0.3438 +[2025-07-07 17:26:00] [Rank 0] Group 9 FTA: 0.3438 +[2025-07-07 17:26:00] [Rank 0] Group 10 FTA: 0.3105 +[2025-07-07 17:26:00] [Rank 0] Group 10 FTA: 0.3105 +[2025-07-07 17:26:00] [Rank 0] Group 11 FTA: 0.3604 +[2025-07-07 17:26:00] [Rank 0] Group 11 FTA: 0.3604 +[2025-07-07 17:26:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 17:26:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 17:26:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 17:26:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 17:26:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 17:26:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 17:26:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 17:26:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 17:26:01] [Rank 0] step:4001/10000 train_time:272691ms step_avg:68.16ms +[2025-07-07 17:26:01] [Rank 0] step:4001/10000 train_time:272691ms step_avg:68.16ms +[2025-07-07 17:26:02] [Rank 0] step:4021/10000 train_time:273454ms step_avg:68.01ms +[2025-07-07 17:26:02] [Rank 0] step:4021/10000 train_time:273454ms step_avg:68.01ms +[2025-07-07 17:26:04] [Rank 0] step:4041/10000 train_time:274821ms step_avg:68.01ms +[2025-07-07 17:26:04] [Rank 0] step:4041/10000 train_time:274821ms step_avg:68.01ms +[2025-07-07 17:26:05] [Rank 0] step:4061/10000 train_time:276189ms step_avg:68.01ms +[2025-07-07 17:26:05] [Rank 0] step:4061/10000 train_time:276189ms step_avg:68.01ms +[2025-07-07 17:26:07] [Rank 0] step:4081/10000 train_time:277582ms step_avg:68.02ms +[2025-07-07 17:26:07] [Rank 0] step:4081/10000 train_time:277582ms step_avg:68.02ms +[2025-07-07 17:26:08] [Rank 0] step:4101/10000 train_time:278952ms step_avg:68.02ms +[2025-07-07 17:26:08] [Rank 0] step:4101/10000 train_time:278952ms step_avg:68.02ms +[2025-07-07 17:26:09] [Rank 0] step:4121/10000 train_time:280355ms step_avg:68.03ms +[2025-07-07 17:26:09] [Rank 0] step:4121/10000 train_time:280355ms step_avg:68.03ms +[2025-07-07 17:26:11] [Rank 0] step:4141/10000 train_time:282386ms step_avg:68.19ms +[2025-07-07 17:26:11] [Rank 0] step:4141/10000 train_time:282386ms step_avg:68.19ms +[2025-07-07 17:26:12] [Rank 0] step:4161/10000 train_time:283124ms step_avg:68.04ms +[2025-07-07 17:26:12] [Rank 0] step:4161/10000 train_time:283124ms step_avg:68.04ms +[2025-07-07 17:26:13] [Rank 0] step:4181/10000 train_time:284497ms step_avg:68.05ms +[2025-07-07 17:26:13] [Rank 0] step:4181/10000 train_time:284497ms step_avg:68.05ms +[2025-07-07 17:26:15] [Rank 0] step:4201/10000 train_time:285870ms step_avg:68.05ms +[2025-07-07 17:26:15] [Rank 0] step:4201/10000 train_time:285870ms step_avg:68.05ms +[2025-07-07 17:26:16] [Rank 0] step:4221/10000 train_time:287242ms step_avg:68.05ms +[2025-07-07 17:26:16] [Rank 0] step:4221/10000 train_time:287242ms step_avg:68.05ms +[2025-07-07 17:26:18] [Rank 0] step:4241/10000 train_time:288614ms step_avg:68.05ms +[2025-07-07 17:26:18] [Rank 0] step:4241/10000 train_time:288614ms step_avg:68.05ms +[2025-07-07 17:26:19] [Rank 0] step:4261/10000 train_time:289987ms step_avg:68.06ms +[2025-07-07 17:26:19] [Rank 0] step:4261/10000 train_time:289987ms step_avg:68.06ms +[2025-07-07 17:26:20] [Rank 0] step:4281/10000 train_time:291360ms step_avg:68.06ms +[2025-07-07 17:26:20] [Rank 0] step:4281/10000 train_time:291360ms step_avg:68.06ms +[2025-07-07 17:26:22] [Rank 0] step:4301/10000 train_time:292733ms step_avg:68.06ms +[2025-07-07 17:26:22] [Rank 0] step:4301/10000 train_time:292733ms step_avg:68.06ms +[2025-07-07 17:26:23] [Rank 0] step:4321/10000 train_time:294107ms step_avg:68.06ms +[2025-07-07 17:26:23] [Rank 0] step:4321/10000 train_time:294107ms step_avg:68.06ms +[2025-07-07 17:26:24] [Rank 0] step:4341/10000 train_time:295510ms step_avg:68.07ms +[2025-07-07 17:26:24] [Rank 0] step:4341/10000 train_time:295510ms step_avg:68.07ms +[2025-07-07 17:26:26] [Rank 0] step:4361/10000 train_time:296886ms step_avg:68.08ms +[2025-07-07 17:26:26] [Rank 0] step:4361/10000 train_time:296886ms step_avg:68.08ms +[2025-07-07 17:26:27] [Rank 0] step:4381/10000 train_time:298260ms step_avg:68.08ms +[2025-07-07 17:26:27] [Rank 0] step:4381/10000 train_time:298260ms step_avg:68.08ms +[2025-07-07 17:26:29] [Rank 0] step:4401/10000 train_time:299635ms step_avg:68.08ms +[2025-07-07 17:26:29] [Rank 0] step:4401/10000 train_time:299635ms step_avg:68.08ms +[2025-07-07 17:26:30] [Rank 0] step:4421/10000 train_time:301009ms step_avg:68.09ms +[2025-07-07 17:26:30] [Rank 0] step:4421/10000 train_time:301009ms step_avg:68.09ms +[2025-07-07 17:26:31] [Rank 0] step:4441/10000 train_time:302385ms step_avg:68.09ms +[2025-07-07 17:26:31] [Rank 0] step:4441/10000 train_time:302385ms step_avg:68.09ms +[2025-07-07 17:26:33] [Rank 0] step:4461/10000 train_time:303761ms step_avg:68.09ms +[2025-07-07 17:26:33] [Rank 0] step:4461/10000 train_time:303761ms step_avg:68.09ms +[2025-07-07 17:26:34] [Rank 0] step:4481/10000 train_time:305136ms step_avg:68.10ms +[2025-07-07 17:26:34] [Rank 0] step:4481/10000 train_time:305136ms step_avg:68.10ms +[2025-07-07 17:26:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:26:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:26:36] [Rank 0] PRINT: step:4500/10000 train_loss:1.1330 val_loss:1.1340 train_time:307137ms step_avg:68.25ms +[2025-07-07 17:26:36] [Rank 0] PRINT: step:4500/10000 train_loss:1.1330 val_loss:1.1340 train_time:307137ms step_avg:68.25ms +[2025-07-07 17:26:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:26:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:26:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:26:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:26:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:26:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:32:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:32:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:32:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:32:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:32:00] [Rank 0] Total Loss: 5.0447 +[2025-07-07 17:32:00] [Rank 0] Total Loss: 5.0447 +[2025-07-07 17:32:00] [Rank 0] Total FTA: 0.4064 +[2025-07-07 17:32:00] [Rank 0] Total FTA: 0.4064 +[2025-07-07 17:32:00] [Rank 0] Group 0 Loss: 5.3808 +[2025-07-07 17:32:00] [Rank 0] Group 0 Loss: 5.3808 +[2025-07-07 17:32:00] [Rank 0] Group 1 Loss: 5.0067 +[2025-07-07 17:32:00] [Rank 0] Group 1 Loss: 5.0067 +[2025-07-07 17:32:00] [Rank 0] Group 2 Loss: 4.7961 +[2025-07-07 17:32:00] [Rank 0] Group 2 Loss: 4.7961 +[2025-07-07 17:32:00] [Rank 0] Group 3 Loss: 5.0131 +[2025-07-07 17:32:00] [Rank 0] Group 3 Loss: 5.0131 +[2025-07-07 17:32:00] [Rank 0] Group 4 Loss: 4.9589 +[2025-07-07 17:32:00] [Rank 0] Group 4 Loss: 4.9589 +[2025-07-07 17:32:00] [Rank 0] Group 5 Loss: 4.8865 +[2025-07-07 17:32:00] [Rank 0] Group 5 Loss: 4.8865 +[2025-07-07 17:32:00] [Rank 0] Group 6 Loss: 5.0096 +[2025-07-07 17:32:00] [Rank 0] Group 6 Loss: 5.0096 +[2025-07-07 17:32:00] [Rank 0] Group 7 Loss: 5.0135 +[2025-07-07 17:32:00] [Rank 0] Group 7 Loss: 5.0135 +[2025-07-07 17:32:00] [Rank 0] Group 8 Loss: 5.0236 +[2025-07-07 17:32:00] [Rank 0] Group 8 Loss: 5.0236 +[2025-07-07 17:32:00] [Rank 0] Group 9 Loss: 5.0328 +[2025-07-07 17:32:00] [Rank 0] Group 9 Loss: 5.0328 +[2025-07-07 17:32:00] [Rank 0] Group 10 Loss: 5.0236 +[2025-07-07 17:32:00] [Rank 0] Group 10 Loss: 5.0236 +[2025-07-07 17:32:00] [Rank 0] Group 11 Loss: 5.0493 +[2025-07-07 17:32:00] [Rank 0] Group 11 Loss: 5.0493 +[2025-07-07 17:32:00] [Rank 0] Group 0 FTA: 0.3329 +[2025-07-07 17:32:00] [Rank 0] Group 0 FTA: 0.3329 +[2025-07-07 17:32:00] [Rank 0] Group 1 FTA: 0.5000 +[2025-07-07 17:32:00] [Rank 0] Group 1 FTA: 0.5000 +[2025-07-07 17:32:00] [Rank 0] Group 2 FTA: 0.4115 +[2025-07-07 17:32:00] [Rank 0] Group 2 FTA: 0.4115 +[2025-07-07 17:32:00] [Rank 0] Group 3 FTA: 0.3984 +[2025-07-07 17:32:00] [Rank 0] Group 3 FTA: 0.3984 +[2025-07-07 17:32:00] [Rank 0] Group 4 FTA: 0.4089 +[2025-07-07 17:32:00] [Rank 0] Group 4 FTA: 0.4089 +[2025-07-07 17:32:00] [Rank 0] Group 5 FTA: 0.3958 +[2025-07-07 17:32:00] [Rank 0] Group 5 FTA: 0.3958 +[2025-07-07 17:32:00] [Rank 0] Group 6 FTA: 0.3802 +[2025-07-07 17:32:00] [Rank 0] Group 6 FTA: 0.3802 +[2025-07-07 17:32:00] [Rank 0] Group 7 FTA: 0.4036 +[2025-07-07 17:32:00] [Rank 0] Group 7 FTA: 0.4036 +[2025-07-07 17:32:00] [Rank 0] Group 8 FTA: 0.4062 +[2025-07-07 17:32:00] [Rank 0] Group 8 FTA: 0.4062 +[2025-07-07 17:32:00] [Rank 0] Group 9 FTA: 0.4648 +[2025-07-07 17:32:00] [Rank 0] Group 9 FTA: 0.4648 +[2025-07-07 17:32:00] [Rank 0] Group 10 FTA: 0.3945 +[2025-07-07 17:32:00] [Rank 0] Group 10 FTA: 0.3945 +[2025-07-07 17:32:00] [Rank 0] Group 11 FTA: 0.4326 +[2025-07-07 17:32:00] [Rank 0] Group 11 FTA: 0.4326 +[2025-07-07 17:32:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 17:32:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 17:32:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 17:32:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 17:32:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 17:32:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 17:32:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 17:32:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 17:32:02] [Rank 0] step:4501/10000 train_time:307154ms step_avg:68.24ms +[2025-07-07 17:32:02] [Rank 0] step:4501/10000 train_time:307154ms step_avg:68.24ms +[2025-07-07 17:32:03] [Rank 0] step:4521/10000 train_time:308620ms step_avg:68.26ms +[2025-07-07 17:32:03] [Rank 0] step:4521/10000 train_time:308620ms step_avg:68.26ms +[2025-07-07 17:32:05] [Rank 0] step:4541/10000 train_time:309988ms step_avg:68.26ms +[2025-07-07 17:32:05] [Rank 0] step:4541/10000 train_time:309988ms step_avg:68.26ms +[2025-07-07 17:32:06] [Rank 0] step:4561/10000 train_time:311357ms step_avg:68.27ms +[2025-07-07 17:32:06] [Rank 0] step:4561/10000 train_time:311357ms step_avg:68.27ms +[2025-07-07 17:32:08] [Rank 0] step:4581/10000 train_time:312727ms step_avg:68.27ms +[2025-07-07 17:32:08] [Rank 0] step:4581/10000 train_time:312727ms step_avg:68.27ms +[2025-07-07 17:32:09] [Rank 0] step:4601/10000 train_time:314096ms step_avg:68.27ms +[2025-07-07 17:32:09] [Rank 0] step:4601/10000 train_time:314096ms step_avg:68.27ms +[2025-07-07 17:32:10] [Rank 0] step:4621/10000 train_time:315466ms step_avg:68.27ms +[2025-07-07 17:32:10] [Rank 0] step:4621/10000 train_time:315466ms step_avg:68.27ms +[2025-07-07 17:32:12] [Rank 0] step:4641/10000 train_time:316838ms step_avg:68.27ms +[2025-07-07 17:32:12] [Rank 0] step:4641/10000 train_time:316838ms step_avg:68.27ms +[2025-07-07 17:32:13] [Rank 0] step:4661/10000 train_time:318209ms step_avg:68.27ms +[2025-07-07 17:32:13] [Rank 0] step:4661/10000 train_time:318209ms step_avg:68.27ms +[2025-07-07 17:32:14] [Rank 0] step:4681/10000 train_time:320234ms step_avg:68.41ms +[2025-07-07 17:32:14] [Rank 0] step:4681/10000 train_time:320234ms step_avg:68.41ms +[2025-07-07 17:32:16] [Rank 0] step:4701/10000 train_time:320975ms step_avg:68.28ms +[2025-07-07 17:32:16] [Rank 0] step:4701/10000 train_time:320975ms step_avg:68.28ms +[2025-07-07 17:32:17] [Rank 0] step:4721/10000 train_time:322348ms step_avg:68.28ms +[2025-07-07 17:32:17] [Rank 0] step:4721/10000 train_time:322348ms step_avg:68.28ms +[2025-07-07 17:32:19] [Rank 0] step:4741/10000 train_time:323722ms step_avg:68.28ms +[2025-07-07 17:32:19] [Rank 0] step:4741/10000 train_time:323722ms step_avg:68.28ms +[2025-07-07 17:32:20] [Rank 0] step:4761/10000 train_time:325103ms step_avg:68.28ms +[2025-07-07 17:32:20] [Rank 0] step:4761/10000 train_time:325103ms step_avg:68.28ms +[2025-07-07 17:32:21] [Rank 0] step:4781/10000 train_time:326477ms step_avg:68.29ms +[2025-07-07 17:32:21] [Rank 0] step:4781/10000 train_time:326477ms step_avg:68.29ms +[2025-07-07 17:32:23] [Rank 0] step:4801/10000 train_time:327849ms step_avg:68.29ms +[2025-07-07 17:32:23] [Rank 0] step:4801/10000 train_time:327849ms step_avg:68.29ms +[2025-07-07 17:32:24] [Rank 0] step:4821/10000 train_time:329222ms step_avg:68.29ms +[2025-07-07 17:32:24] [Rank 0] step:4821/10000 train_time:329222ms step_avg:68.29ms +[2025-07-07 17:32:25] [Rank 0] step:4841/10000 train_time:330595ms step_avg:68.29ms +[2025-07-07 17:32:25] [Rank 0] step:4841/10000 train_time:330595ms step_avg:68.29ms +[2025-07-07 17:32:27] [Rank 0] step:4861/10000 train_time:331969ms step_avg:68.29ms +[2025-07-07 17:32:27] [Rank 0] step:4861/10000 train_time:331969ms step_avg:68.29ms +[2025-07-07 17:32:28] [Rank 0] step:4881/10000 train_time:333391ms step_avg:68.30ms +[2025-07-07 17:32:28] [Rank 0] step:4881/10000 train_time:333391ms step_avg:68.30ms +[2025-07-07 17:32:30] [Rank 0] step:4901/10000 train_time:334765ms step_avg:68.31ms +[2025-07-07 17:32:30] [Rank 0] step:4901/10000 train_time:334765ms step_avg:68.31ms +[2025-07-07 17:32:31] [Rank 0] step:4921/10000 train_time:336141ms step_avg:68.31ms +[2025-07-07 17:32:31] [Rank 0] step:4921/10000 train_time:336141ms step_avg:68.31ms +[2025-07-07 17:32:32] [Rank 0] step:4941/10000 train_time:337515ms step_avg:68.31ms +[2025-07-07 17:32:32] [Rank 0] step:4941/10000 train_time:337515ms step_avg:68.31ms +[2025-07-07 17:32:34] [Rank 0] step:4961/10000 train_time:338889ms step_avg:68.31ms +[2025-07-07 17:32:34] [Rank 0] step:4961/10000 train_time:338889ms step_avg:68.31ms +[2025-07-07 17:32:35] [Rank 0] step:4981/10000 train_time:340264ms step_avg:68.31ms +[2025-07-07 17:32:35] [Rank 0] step:4981/10000 train_time:340264ms step_avg:68.31ms +[2025-07-07 17:32:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:32:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:32:37] [Rank 0] PRINT: step:5000/10000 train_loss:1.0956 val_loss:1.1006 train_time:342262ms step_avg:68.45ms +[2025-07-07 17:32:37] [Rank 0] PRINT: step:5000/10000 train_loss:1.0956 val_loss:1.1006 train_time:342262ms step_avg:68.45ms +[2025-07-07 17:32:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:32:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:32:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:32:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:32:38] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:32:38] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:38:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:38:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:38:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:38:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:38:01] [Rank 0] Total Loss: 5.0989 +[2025-07-07 17:38:01] [Rank 0] Total Loss: 5.0989 +[2025-07-07 17:38:01] [Rank 0] Total FTA: 0.4823 +[2025-07-07 17:38:01] [Rank 0] Total FTA: 0.4823 +[2025-07-07 17:38:01] [Rank 0] Group 0 Loss: 5.5177 +[2025-07-07 17:38:01] [Rank 0] Group 0 Loss: 5.5177 +[2025-07-07 17:38:01] [Rank 0] Group 1 Loss: 5.0542 +[2025-07-07 17:38:01] [Rank 0] Group 1 Loss: 5.0542 +[2025-07-07 17:38:01] [Rank 0] Group 2 Loss: 4.8082 +[2025-07-07 17:38:01] [Rank 0] Group 2 Loss: 4.8082 +[2025-07-07 17:38:01] [Rank 0] Group 3 Loss: 5.0379 +[2025-07-07 17:38:01] [Rank 0] Group 3 Loss: 5.0379 +[2025-07-07 17:38:01] [Rank 0] Group 4 Loss: 5.0359 +[2025-07-07 17:38:01] [Rank 0] Group 4 Loss: 5.0359 +[2025-07-07 17:38:01] [Rank 0] Group 5 Loss: 5.0128 +[2025-07-07 17:38:01] [Rank 0] Group 5 Loss: 5.0128 +[2025-07-07 17:38:01] [Rank 0] Group 6 Loss: 4.9901 +[2025-07-07 17:38:01] [Rank 0] Group 6 Loss: 4.9901 +[2025-07-07 17:38:01] [Rank 0] Group 7 Loss: 5.0759 +[2025-07-07 17:38:01] [Rank 0] Group 7 Loss: 5.0759 +[2025-07-07 17:38:01] [Rank 0] Group 8 Loss: 5.0096 +[2025-07-07 17:38:01] [Rank 0] Group 8 Loss: 5.0096 +[2025-07-07 17:38:01] [Rank 0] Group 9 Loss: 5.0668 +[2025-07-07 17:38:01] [Rank 0] Group 9 Loss: 5.0668 +[2025-07-07 17:38:01] [Rank 0] Group 10 Loss: 5.1137 +[2025-07-07 17:38:01] [Rank 0] Group 10 Loss: 5.1137 +[2025-07-07 17:38:01] [Rank 0] Group 11 Loss: 5.0723 +[2025-07-07 17:38:01] [Rank 0] Group 11 Loss: 5.0723 +[2025-07-07 17:38:01] [Rank 0] Group 0 FTA: 0.3446 +[2025-07-07 17:38:01] [Rank 0] Group 0 FTA: 0.3446 +[2025-07-07 17:38:01] [Rank 0] Group 1 FTA: 0.4661 +[2025-07-07 17:38:01] [Rank 0] Group 1 FTA: 0.4661 +[2025-07-07 17:38:01] [Rank 0] Group 2 FTA: 0.5625 +[2025-07-07 17:38:01] [Rank 0] Group 2 FTA: 0.5625 +[2025-07-07 17:38:01] [Rank 0] Group 3 FTA: 0.5182 +[2025-07-07 17:38:01] [Rank 0] Group 3 FTA: 0.5182 +[2025-07-07 17:38:01] [Rank 0] Group 4 FTA: 0.4870 +[2025-07-07 17:38:01] [Rank 0] Group 4 FTA: 0.4870 +[2025-07-07 17:38:01] [Rank 0] Group 5 FTA: 0.5495 +[2025-07-07 17:38:01] [Rank 0] Group 5 FTA: 0.5495 +[2025-07-07 17:38:01] [Rank 0] Group 6 FTA: 0.5130 +[2025-07-07 17:38:01] [Rank 0] Group 6 FTA: 0.5130 +[2025-07-07 17:38:01] [Rank 0] Group 7 FTA: 0.4688 +[2025-07-07 17:38:01] [Rank 0] Group 7 FTA: 0.4688 +[2025-07-07 17:38:01] [Rank 0] Group 8 FTA: 0.5104 +[2025-07-07 17:38:01] [Rank 0] Group 8 FTA: 0.5104 +[2025-07-07 17:38:01] [Rank 0] Group 9 FTA: 0.5039 +[2025-07-07 17:38:01] [Rank 0] Group 9 FTA: 0.5039 +[2025-07-07 17:38:01] [Rank 0] Group 10 FTA: 0.4941 +[2025-07-07 17:38:01] [Rank 0] Group 10 FTA: 0.4941 +[2025-07-07 17:38:01] [Rank 0] Group 11 FTA: 0.4932 +[2025-07-07 17:38:01] [Rank 0] Group 11 FTA: 0.4932 +[2025-07-07 17:38:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 17:38:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 17:38:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 17:38:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 17:38:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 17:38:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 17:38:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 17:38:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 17:38:02] [Rank 0] step:5001/10000 train_time:342273ms step_avg:68.44ms +[2025-07-07 17:38:02] [Rank 0] step:5001/10000 train_time:342273ms step_avg:68.44ms +[2025-07-07 17:38:04] [Rank 0] step:5021/10000 train_time:343026ms step_avg:68.32ms +[2025-07-07 17:38:04] [Rank 0] step:5021/10000 train_time:343026ms step_avg:68.32ms +[2025-07-07 17:38:05] [Rank 0] step:5041/10000 train_time:344394ms step_avg:68.32ms +[2025-07-07 17:38:05] [Rank 0] step:5041/10000 train_time:344394ms step_avg:68.32ms +[2025-07-07 17:38:07] [Rank 0] step:5061/10000 train_time:345762ms step_avg:68.32ms +[2025-07-07 17:38:07] [Rank 0] step:5061/10000 train_time:345762ms step_avg:68.32ms +[2025-07-07 17:38:08] [Rank 0] step:5081/10000 train_time:347132ms step_avg:68.32ms +[2025-07-07 17:38:08] [Rank 0] step:5081/10000 train_time:347132ms step_avg:68.32ms +[2025-07-07 17:38:09] [Rank 0] step:5101/10000 train_time:348501ms step_avg:68.32ms +[2025-07-07 17:38:09] [Rank 0] step:5101/10000 train_time:348501ms step_avg:68.32ms +[2025-07-07 17:38:11] [Rank 0] step:5121/10000 train_time:349872ms step_avg:68.32ms +[2025-07-07 17:38:11] [Rank 0] step:5121/10000 train_time:349872ms step_avg:68.32ms +[2025-07-07 17:38:12] [Rank 0] step:5141/10000 train_time:351242ms step_avg:68.32ms +[2025-07-07 17:38:12] [Rank 0] step:5141/10000 train_time:351242ms step_avg:68.32ms +[2025-07-07 17:38:13] [Rank 0] step:5161/10000 train_time:352613ms step_avg:68.32ms +[2025-07-07 17:38:13] [Rank 0] step:5161/10000 train_time:352613ms step_avg:68.32ms +[2025-07-07 17:38:15] [Rank 0] step:5181/10000 train_time:353986ms step_avg:68.32ms +[2025-07-07 17:38:15] [Rank 0] step:5181/10000 train_time:353986ms step_avg:68.32ms +[2025-07-07 17:38:16] [Rank 0] step:5201/10000 train_time:355358ms step_avg:68.32ms +[2025-07-07 17:38:16] [Rank 0] step:5201/10000 train_time:355358ms step_avg:68.32ms +[2025-07-07 17:38:18] [Rank 0] step:5221/10000 train_time:356983ms step_avg:68.37ms +[2025-07-07 17:38:18] [Rank 0] step:5221/10000 train_time:356983ms step_avg:68.37ms +[2025-07-07 17:38:19] [Rank 0] step:5241/10000 train_time:358133ms step_avg:68.33ms +[2025-07-07 17:38:19] [Rank 0] step:5241/10000 train_time:358133ms step_avg:68.33ms +[2025-07-07 17:38:20] [Rank 0] step:5261/10000 train_time:359505ms step_avg:68.33ms +[2025-07-07 17:38:20] [Rank 0] step:5261/10000 train_time:359505ms step_avg:68.33ms +[2025-07-07 17:38:22] [Rank 0] step:5281/10000 train_time:360878ms step_avg:68.34ms +[2025-07-07 17:38:22] [Rank 0] step:5281/10000 train_time:360878ms step_avg:68.34ms +[2025-07-07 17:38:23] [Rank 0] step:5301/10000 train_time:362251ms step_avg:68.34ms +[2025-07-07 17:38:23] [Rank 0] step:5301/10000 train_time:362251ms step_avg:68.34ms +[2025-07-07 17:38:24] [Rank 0] step:5321/10000 train_time:363625ms step_avg:68.34ms +[2025-07-07 17:38:24] [Rank 0] step:5321/10000 train_time:363625ms step_avg:68.34ms +[2025-07-07 17:38:26] [Rank 0] step:5341/10000 train_time:364998ms step_avg:68.34ms +[2025-07-07 17:38:26] [Rank 0] step:5341/10000 train_time:364998ms step_avg:68.34ms +[2025-07-07 17:38:27] [Rank 0] step:5361/10000 train_time:366372ms step_avg:68.34ms +[2025-07-07 17:38:27] [Rank 0] step:5361/10000 train_time:366372ms step_avg:68.34ms +[2025-07-07 17:38:29] [Rank 0] step:5381/10000 train_time:367746ms step_avg:68.34ms +[2025-07-07 17:38:29] [Rank 0] step:5381/10000 train_time:367746ms step_avg:68.34ms +[2025-07-07 17:38:30] [Rank 0] step:5401/10000 train_time:369120ms step_avg:68.34ms +[2025-07-07 17:38:30] [Rank 0] step:5401/10000 train_time:369120ms step_avg:68.34ms +[2025-07-07 17:38:31] [Rank 0] step:5421/10000 train_time:370518ms step_avg:68.35ms +[2025-07-07 17:38:31] [Rank 0] step:5421/10000 train_time:370518ms step_avg:68.35ms +[2025-07-07 17:38:33] [Rank 0] step:5441/10000 train_time:371891ms step_avg:68.35ms +[2025-07-07 17:38:33] [Rank 0] step:5441/10000 train_time:371891ms step_avg:68.35ms +[2025-07-07 17:38:34] [Rank 0] step:5461/10000 train_time:373265ms step_avg:68.35ms +[2025-07-07 17:38:34] [Rank 0] step:5461/10000 train_time:373265ms step_avg:68.35ms +[2025-07-07 17:38:35] [Rank 0] step:5481/10000 train_time:374641ms step_avg:68.35ms +[2025-07-07 17:38:35] [Rank 0] step:5481/10000 train_time:374641ms step_avg:68.35ms +[2025-07-07 17:38:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:38:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:38:38] [Rank 0] PRINT: step:5500/10000 train_loss:1.0559 val_loss:1.0726 train_time:376639ms step_avg:68.48ms +[2025-07-07 17:38:38] [Rank 0] PRINT: step:5500/10000 train_loss:1.0559 val_loss:1.0726 train_time:376639ms step_avg:68.48ms +[2025-07-07 17:38:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:38:38] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:38:38] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:38:38] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:38:38] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:38:38] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:44:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:44:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:44:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:44:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:44:02] [Rank 0] Total Loss: 5.2541 +[2025-07-07 17:44:02] [Rank 0] Total Loss: 5.2541 +[2025-07-07 17:44:02] [Rank 0] Total FTA: 0.5526 +[2025-07-07 17:44:02] [Rank 0] Total FTA: 0.5526 +[2025-07-07 17:44:02] [Rank 0] Group 0 Loss: 5.5415 +[2025-07-07 17:44:02] [Rank 0] Group 0 Loss: 5.5415 +[2025-07-07 17:44:02] [Rank 0] Group 1 Loss: 5.3579 +[2025-07-07 17:44:02] [Rank 0] Group 1 Loss: 5.3579 +[2025-07-07 17:44:02] [Rank 0] Group 2 Loss: 5.0559 +[2025-07-07 17:44:02] [Rank 0] Group 2 Loss: 5.0559 +[2025-07-07 17:44:02] [Rank 0] Group 3 Loss: 5.1720 +[2025-07-07 17:44:02] [Rank 0] Group 3 Loss: 5.1720 +[2025-07-07 17:44:02] [Rank 0] Group 4 Loss: 5.2348 +[2025-07-07 17:44:02] [Rank 0] Group 4 Loss: 5.2348 +[2025-07-07 17:44:02] [Rank 0] Group 5 Loss: 5.0999 +[2025-07-07 17:44:02] [Rank 0] Group 5 Loss: 5.0999 +[2025-07-07 17:44:02] [Rank 0] Group 6 Loss: 5.1278 +[2025-07-07 17:44:02] [Rank 0] Group 6 Loss: 5.1278 +[2025-07-07 17:44:02] [Rank 0] Group 7 Loss: 5.2665 +[2025-07-07 17:44:02] [Rank 0] Group 7 Loss: 5.2665 +[2025-07-07 17:44:02] [Rank 0] Group 8 Loss: 5.1930 +[2025-07-07 17:44:02] [Rank 0] Group 8 Loss: 5.1930 +[2025-07-07 17:44:02] [Rank 0] Group 9 Loss: 5.2105 +[2025-07-07 17:44:02] [Rank 0] Group 9 Loss: 5.2105 +[2025-07-07 17:44:02] [Rank 0] Group 10 Loss: 5.2368 +[2025-07-07 17:44:02] [Rank 0] Group 10 Loss: 5.2368 +[2025-07-07 17:44:02] [Rank 0] Group 11 Loss: 5.2549 +[2025-07-07 17:44:02] [Rank 0] Group 11 Loss: 5.2549 +[2025-07-07 17:44:02] [Rank 0] Group 0 FTA: 0.3186 +[2025-07-07 17:44:02] [Rank 0] Group 0 FTA: 0.3186 +[2025-07-07 17:44:02] [Rank 0] Group 1 FTA: 0.6797 +[2025-07-07 17:44:02] [Rank 0] Group 1 FTA: 0.6797 +[2025-07-07 17:44:02] [Rank 0] Group 2 FTA: 0.5911 +[2025-07-07 17:44:02] [Rank 0] Group 2 FTA: 0.5911 +[2025-07-07 17:44:02] [Rank 0] Group 3 FTA: 0.5469 +[2025-07-07 17:44:02] [Rank 0] Group 3 FTA: 0.5469 +[2025-07-07 17:44:02] [Rank 0] Group 4 FTA: 0.5807 +[2025-07-07 17:44:02] [Rank 0] Group 4 FTA: 0.5807 +[2025-07-07 17:44:02] [Rank 0] Group 5 FTA: 0.5990 +[2025-07-07 17:44:02] [Rank 0] Group 5 FTA: 0.5990 +[2025-07-07 17:44:02] [Rank 0] Group 6 FTA: 0.5755 +[2025-07-07 17:44:02] [Rank 0] Group 6 FTA: 0.5755 +[2025-07-07 17:44:02] [Rank 0] Group 7 FTA: 0.5365 +[2025-07-07 17:44:02] [Rank 0] Group 7 FTA: 0.5365 +[2025-07-07 17:44:02] [Rank 0] Group 8 FTA: 0.5807 +[2025-07-07 17:44:02] [Rank 0] Group 8 FTA: 0.5807 +[2025-07-07 17:44:02] [Rank 0] Group 9 FTA: 0.6211 +[2025-07-07 17:44:02] [Rank 0] Group 9 FTA: 0.6211 +[2025-07-07 17:44:02] [Rank 0] Group 10 FTA: 0.5918 +[2025-07-07 17:44:02] [Rank 0] Group 10 FTA: 0.5918 +[2025-07-07 17:44:02] [Rank 0] Group 11 FTA: 0.5908 +[2025-07-07 17:44:02] [Rank 0] Group 11 FTA: 0.5908 +[2025-07-07 17:44:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 17:44:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 17:44:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 17:44:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 17:44:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 17:44:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 17:44:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 17:44:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 17:44:03] [Rank 0] step:5501/10000 train_time:376652ms step_avg:68.47ms +[2025-07-07 17:44:03] [Rank 0] step:5501/10000 train_time:376652ms step_avg:68.47ms +[2025-07-07 17:44:05] [Rank 0] step:5521/10000 train_time:377413ms step_avg:68.36ms +[2025-07-07 17:44:05] [Rank 0] step:5521/10000 train_time:377413ms step_avg:68.36ms +[2025-07-07 17:44:06] [Rank 0] step:5541/10000 train_time:378780ms step_avg:68.36ms +[2025-07-07 17:44:06] [Rank 0] step:5541/10000 train_time:378780ms step_avg:68.36ms +[2025-07-07 17:44:08] [Rank 0] step:5561/10000 train_time:380148ms step_avg:68.36ms +[2025-07-07 17:44:08] [Rank 0] step:5561/10000 train_time:380148ms step_avg:68.36ms +[2025-07-07 17:44:09] [Rank 0] step:5581/10000 train_time:381517ms step_avg:68.36ms +[2025-07-07 17:44:09] [Rank 0] step:5581/10000 train_time:381517ms step_avg:68.36ms +[2025-07-07 17:44:10] [Rank 0] step:5601/10000 train_time:382932ms step_avg:68.37ms +[2025-07-07 17:44:10] [Rank 0] step:5601/10000 train_time:382932ms step_avg:68.37ms +[2025-07-07 17:44:12] [Rank 0] step:5621/10000 train_time:384305ms step_avg:68.37ms +[2025-07-07 17:44:12] [Rank 0] step:5621/10000 train_time:384305ms step_avg:68.37ms +[2025-07-07 17:44:13] [Rank 0] step:5641/10000 train_time:385676ms step_avg:68.37ms +[2025-07-07 17:44:13] [Rank 0] step:5641/10000 train_time:385676ms step_avg:68.37ms +[2025-07-07 17:44:14] [Rank 0] step:5661/10000 train_time:387046ms step_avg:68.37ms +[2025-07-07 17:44:14] [Rank 0] step:5661/10000 train_time:387046ms step_avg:68.37ms +[2025-07-07 17:44:16] [Rank 0] step:5681/10000 train_time:388417ms step_avg:68.37ms +[2025-07-07 17:44:16] [Rank 0] step:5681/10000 train_time:388417ms step_avg:68.37ms +[2025-07-07 17:44:17] [Rank 0] step:5701/10000 train_time:389789ms step_avg:68.37ms +[2025-07-07 17:44:17] [Rank 0] step:5701/10000 train_time:389789ms step_avg:68.37ms +[2025-07-07 17:44:19] [Rank 0] step:5721/10000 train_time:391162ms step_avg:68.37ms +[2025-07-07 17:44:19] [Rank 0] step:5721/10000 train_time:391162ms step_avg:68.37ms +[2025-07-07 17:44:20] [Rank 0] step:5741/10000 train_time:392535ms step_avg:68.37ms +[2025-07-07 17:44:20] [Rank 0] step:5741/10000 train_time:392535ms step_avg:68.37ms +[2025-07-07 17:44:21] [Rank 0] step:5761/10000 train_time:393908ms step_avg:68.37ms +[2025-07-07 17:44:21] [Rank 0] step:5761/10000 train_time:393908ms step_avg:68.37ms +[2025-07-07 17:44:23] [Rank 0] step:5781/10000 train_time:395318ms step_avg:68.38ms +[2025-07-07 17:44:23] [Rank 0] step:5781/10000 train_time:395318ms step_avg:68.38ms +[2025-07-07 17:44:24] [Rank 0] step:5801/10000 train_time:396690ms step_avg:68.38ms +[2025-07-07 17:44:24] [Rank 0] step:5801/10000 train_time:396690ms step_avg:68.38ms +[2025-07-07 17:44:26] [Rank 0] step:5821/10000 train_time:398065ms step_avg:68.38ms +[2025-07-07 17:44:26] [Rank 0] step:5821/10000 train_time:398065ms step_avg:68.38ms +[2025-07-07 17:44:27] [Rank 0] step:5841/10000 train_time:399439ms step_avg:68.39ms +[2025-07-07 17:44:27] [Rank 0] step:5841/10000 train_time:399439ms step_avg:68.39ms +[2025-07-07 17:44:28] [Rank 0] step:5861/10000 train_time:400812ms step_avg:68.39ms +[2025-07-07 17:44:28] [Rank 0] step:5861/10000 train_time:400812ms step_avg:68.39ms +[2025-07-07 17:44:30] [Rank 0] step:5881/10000 train_time:402188ms step_avg:68.39ms +[2025-07-07 17:44:30] [Rank 0] step:5881/10000 train_time:402188ms step_avg:68.39ms +[2025-07-07 17:44:31] [Rank 0] step:5901/10000 train_time:403564ms step_avg:68.39ms +[2025-07-07 17:44:31] [Rank 0] step:5901/10000 train_time:403564ms step_avg:68.39ms +[2025-07-07 17:44:32] [Rank 0] step:5921/10000 train_time:404939ms step_avg:68.39ms +[2025-07-07 17:44:32] [Rank 0] step:5921/10000 train_time:404939ms step_avg:68.39ms +[2025-07-07 17:44:34] [Rank 0] step:5941/10000 train_time:406359ms step_avg:68.40ms +[2025-07-07 17:44:34] [Rank 0] step:5941/10000 train_time:406359ms step_avg:68.40ms +[2025-07-07 17:44:35] [Rank 0] step:5961/10000 train_time:407733ms step_avg:68.40ms +[2025-07-07 17:44:35] [Rank 0] step:5961/10000 train_time:407733ms step_avg:68.40ms +[2025-07-07 17:44:37] [Rank 0] step:5981/10000 train_time:409109ms step_avg:68.40ms +[2025-07-07 17:44:37] [Rank 0] step:5981/10000 train_time:409109ms step_avg:68.40ms +[2025-07-07 17:44:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:44:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:44:39] [Rank 0] PRINT: step:6000/10000 train_loss:1.0135 val_loss:1.0339 train_time:411109ms step_avg:68.52ms +[2025-07-07 17:44:39] [Rank 0] PRINT: step:6000/10000 train_loss:1.0135 val_loss:1.0339 train_time:411109ms step_avg:68.52ms +[2025-07-07 17:44:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:44:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:44:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:44:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:44:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:44:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:50:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:50:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:50:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:50:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:50:03] [Rank 0] Total Loss: 5.2353 +[2025-07-07 17:50:03] [Rank 0] Total Loss: 5.2353 +[2025-07-07 17:50:03] [Rank 0] Total FTA: 0.6581 +[2025-07-07 17:50:03] [Rank 0] Total FTA: 0.6581 +[2025-07-07 17:50:03] [Rank 0] Group 0 Loss: 5.3144 +[2025-07-07 17:50:03] [Rank 0] Group 0 Loss: 5.3144 +[2025-07-07 17:50:03] [Rank 0] Group 1 Loss: 5.2493 +[2025-07-07 17:50:03] [Rank 0] Group 1 Loss: 5.2493 +[2025-07-07 17:50:03] [Rank 0] Group 2 Loss: 5.0301 +[2025-07-07 17:50:03] [Rank 0] Group 2 Loss: 5.0301 +[2025-07-07 17:50:03] [Rank 0] Group 3 Loss: 5.2175 +[2025-07-07 17:50:03] [Rank 0] Group 3 Loss: 5.2175 +[2025-07-07 17:50:03] [Rank 0] Group 4 Loss: 5.2502 +[2025-07-07 17:50:03] [Rank 0] Group 4 Loss: 5.2502 +[2025-07-07 17:50:03] [Rank 0] Group 5 Loss: 5.1570 +[2025-07-07 17:50:03] [Rank 0] Group 5 Loss: 5.1570 +[2025-07-07 17:50:03] [Rank 0] Group 6 Loss: 5.1609 +[2025-07-07 17:50:03] [Rank 0] Group 6 Loss: 5.1609 +[2025-07-07 17:50:03] [Rank 0] Group 7 Loss: 5.2615 +[2025-07-07 17:50:03] [Rank 0] Group 7 Loss: 5.2615 +[2025-07-07 17:50:03] [Rank 0] Group 8 Loss: 5.2396 +[2025-07-07 17:50:03] [Rank 0] Group 8 Loss: 5.2396 +[2025-07-07 17:50:03] [Rank 0] Group 9 Loss: 5.2158 +[2025-07-07 17:50:03] [Rank 0] Group 9 Loss: 5.2158 +[2025-07-07 17:50:03] [Rank 0] Group 10 Loss: 5.3241 +[2025-07-07 17:50:03] [Rank 0] Group 10 Loss: 5.3241 +[2025-07-07 17:50:03] [Rank 0] Group 11 Loss: 5.2548 +[2025-07-07 17:50:03] [Rank 0] Group 11 Loss: 5.2548 +[2025-07-07 17:50:03] [Rank 0] Group 0 FTA: 0.8466 +[2025-07-07 17:50:03] [Rank 0] Group 0 FTA: 0.8466 +[2025-07-07 17:50:03] [Rank 0] Group 1 FTA: 0.7031 +[2025-07-07 17:50:03] [Rank 0] Group 1 FTA: 0.7031 +[2025-07-07 17:50:03] [Rank 0] Group 2 FTA: 0.5833 +[2025-07-07 17:50:03] [Rank 0] Group 2 FTA: 0.5833 +[2025-07-07 17:50:03] [Rank 0] Group 3 FTA: 0.6250 +[2025-07-07 17:50:03] [Rank 0] Group 3 FTA: 0.6250 +[2025-07-07 17:50:03] [Rank 0] Group 4 FTA: 0.6589 +[2025-07-07 17:50:03] [Rank 0] Group 4 FTA: 0.6589 +[2025-07-07 17:50:03] [Rank 0] Group 5 FTA: 0.6406 +[2025-07-07 17:50:03] [Rank 0] Group 5 FTA: 0.6406 +[2025-07-07 17:50:03] [Rank 0] Group 6 FTA: 0.6276 +[2025-07-07 17:50:03] [Rank 0] Group 6 FTA: 0.6276 +[2025-07-07 17:50:03] [Rank 0] Group 7 FTA: 0.5677 +[2025-07-07 17:50:03] [Rank 0] Group 7 FTA: 0.5677 +[2025-07-07 17:50:03] [Rank 0] Group 8 FTA: 0.6068 +[2025-07-07 17:50:03] [Rank 0] Group 8 FTA: 0.6068 +[2025-07-07 17:50:03] [Rank 0] Group 9 FTA: 0.5664 +[2025-07-07 17:50:03] [Rank 0] Group 9 FTA: 0.5664 +[2025-07-07 17:50:03] [Rank 0] Group 10 FTA: 0.6504 +[2025-07-07 17:50:03] [Rank 0] Group 10 FTA: 0.6504 +[2025-07-07 17:50:03] [Rank 0] Group 11 FTA: 0.6377 +[2025-07-07 17:50:03] [Rank 0] Group 11 FTA: 0.6377 +[2025-07-07 17:50:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 17:50:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 17:50:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 17:50:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 17:50:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 17:50:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 17:50:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 17:50:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 17:50:05] [Rank 0] step:6001/10000 train_time:411122ms step_avg:68.51ms +[2025-07-07 17:50:05] [Rank 0] step:6001/10000 train_time:411122ms step_avg:68.51ms +[2025-07-07 17:50:06] [Rank 0] step:6021/10000 train_time:411900ms step_avg:68.41ms +[2025-07-07 17:50:06] [Rank 0] step:6021/10000 train_time:411900ms step_avg:68.41ms +[2025-07-07 17:50:07] [Rank 0] step:6041/10000 train_time:413268ms step_avg:68.41ms +[2025-07-07 17:50:07] [Rank 0] step:6041/10000 train_time:413268ms step_avg:68.41ms +[2025-07-07 17:50:09] [Rank 0] step:6061/10000 train_time:414637ms step_avg:68.41ms +[2025-07-07 17:50:09] [Rank 0] step:6061/10000 train_time:414637ms step_avg:68.41ms +[2025-07-07 17:50:10] [Rank 0] step:6081/10000 train_time:416006ms step_avg:68.41ms +[2025-07-07 17:50:10] [Rank 0] step:6081/10000 train_time:416006ms step_avg:68.41ms +[2025-07-07 17:50:12] [Rank 0] step:6101/10000 train_time:417376ms step_avg:68.41ms +[2025-07-07 17:50:12] [Rank 0] step:6101/10000 train_time:417376ms step_avg:68.41ms +[2025-07-07 17:50:13] [Rank 0] step:6121/10000 train_time:418796ms step_avg:68.42ms +[2025-07-07 17:50:13] [Rank 0] step:6121/10000 train_time:418796ms step_avg:68.42ms +[2025-07-07 17:50:14] [Rank 0] step:6141/10000 train_time:420152ms step_avg:68.42ms +[2025-07-07 17:50:14] [Rank 0] step:6141/10000 train_time:420152ms step_avg:68.42ms +[2025-07-07 17:50:16] [Rank 0] step:6161/10000 train_time:421523ms step_avg:68.42ms +[2025-07-07 17:50:16] [Rank 0] step:6161/10000 train_time:421523ms step_avg:68.42ms +[2025-07-07 17:50:17] [Rank 0] step:6181/10000 train_time:422896ms step_avg:68.42ms +[2025-07-07 17:50:17] [Rank 0] step:6181/10000 train_time:422896ms step_avg:68.42ms +[2025-07-07 17:50:18] [Rank 0] step:6201/10000 train_time:424269ms step_avg:68.42ms +[2025-07-07 17:50:18] [Rank 0] step:6201/10000 train_time:424269ms step_avg:68.42ms +[2025-07-07 17:50:20] [Rank 0] step:6221/10000 train_time:425642ms step_avg:68.42ms +[2025-07-07 17:50:20] [Rank 0] step:6221/10000 train_time:425642ms step_avg:68.42ms +[2025-07-07 17:50:21] [Rank 0] step:6241/10000 train_time:427037ms step_avg:68.42ms +[2025-07-07 17:50:21] [Rank 0] step:6241/10000 train_time:427037ms step_avg:68.42ms +[2025-07-07 17:50:23] [Rank 0] step:6261/10000 train_time:428410ms step_avg:68.43ms +[2025-07-07 17:50:23] [Rank 0] step:6261/10000 train_time:428410ms step_avg:68.43ms +[2025-07-07 17:50:24] [Rank 0] step:6281/10000 train_time:429783ms step_avg:68.43ms +[2025-07-07 17:50:24] [Rank 0] step:6281/10000 train_time:429783ms step_avg:68.43ms +[2025-07-07 17:50:25] [Rank 0] step:6301/10000 train_time:431204ms step_avg:68.43ms +[2025-07-07 17:50:25] [Rank 0] step:6301/10000 train_time:431204ms step_avg:68.43ms +[2025-07-07 17:50:27] [Rank 0] step:6321/10000 train_time:432570ms step_avg:68.43ms +[2025-07-07 17:50:27] [Rank 0] step:6321/10000 train_time:432570ms step_avg:68.43ms +[2025-07-07 17:50:28] [Rank 0] step:6341/10000 train_time:433943ms step_avg:68.43ms +[2025-07-07 17:50:28] [Rank 0] step:6341/10000 train_time:433943ms step_avg:68.43ms +[2025-07-07 17:50:29] [Rank 0] step:6361/10000 train_time:435317ms step_avg:68.44ms +[2025-07-07 17:50:29] [Rank 0] step:6361/10000 train_time:435317ms step_avg:68.44ms +[2025-07-07 17:50:31] [Rank 0] step:6381/10000 train_time:436691ms step_avg:68.44ms +[2025-07-07 17:50:31] [Rank 0] step:6381/10000 train_time:436691ms step_avg:68.44ms +[2025-07-07 17:50:32] [Rank 0] step:6401/10000 train_time:438065ms step_avg:68.44ms +[2025-07-07 17:50:32] [Rank 0] step:6401/10000 train_time:438065ms step_avg:68.44ms +[2025-07-07 17:50:34] [Rank 0] step:6421/10000 train_time:439440ms step_avg:68.44ms +[2025-07-07 17:50:34] [Rank 0] step:6421/10000 train_time:439440ms step_avg:68.44ms +[2025-07-07 17:50:35] [Rank 0] step:6441/10000 train_time:440814ms step_avg:68.44ms +[2025-07-07 17:50:35] [Rank 0] step:6441/10000 train_time:440814ms step_avg:68.44ms +[2025-07-07 17:50:36] [Rank 0] step:6461/10000 train_time:442189ms step_avg:68.44ms +[2025-07-07 17:50:36] [Rank 0] step:6461/10000 train_time:442189ms step_avg:68.44ms +[2025-07-07 17:50:38] [Rank 0] step:6481/10000 train_time:444244ms step_avg:68.55ms +[2025-07-07 17:50:38] [Rank 0] step:6481/10000 train_time:444244ms step_avg:68.55ms +[2025-07-07 17:50:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:50:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:50:40] [Rank 0] PRINT: step:6500/10000 train_loss:0.9730 val_loss:0.9980 train_time:445608ms step_avg:68.56ms +[2025-07-07 17:50:40] [Rank 0] PRINT: step:6500/10000 train_loss:0.9730 val_loss:0.9980 train_time:445608ms step_avg:68.56ms +[2025-07-07 17:50:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:50:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:50:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:50:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:50:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:50:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:56:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:56:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 17:56:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:56:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 17:56:05] [Rank 0] Total Loss: 5.4641 +[2025-07-07 17:56:05] [Rank 0] Total Loss: 5.4641 +[2025-07-07 17:56:05] [Rank 0] Total FTA: 0.7587 +[2025-07-07 17:56:05] [Rank 0] Total FTA: 0.7587 +[2025-07-07 17:56:05] [Rank 0] Group 0 Loss: 5.7236 +[2025-07-07 17:56:05] [Rank 0] Group 0 Loss: 5.7236 +[2025-07-07 17:56:05] [Rank 0] Group 1 Loss: 5.5361 +[2025-07-07 17:56:05] [Rank 0] Group 1 Loss: 5.5361 +[2025-07-07 17:56:05] [Rank 0] Group 2 Loss: 5.3737 +[2025-07-07 17:56:05] [Rank 0] Group 2 Loss: 5.3737 +[2025-07-07 17:56:05] [Rank 0] Group 3 Loss: 5.3729 +[2025-07-07 17:56:05] [Rank 0] Group 3 Loss: 5.3729 +[2025-07-07 17:56:05] [Rank 0] Group 4 Loss: 5.3305 +[2025-07-07 17:56:05] [Rank 0] Group 4 Loss: 5.3305 +[2025-07-07 17:56:05] [Rank 0] Group 5 Loss: 5.3516 +[2025-07-07 17:56:05] [Rank 0] Group 5 Loss: 5.3516 +[2025-07-07 17:56:05] [Rank 0] Group 6 Loss: 5.3973 +[2025-07-07 17:56:05] [Rank 0] Group 6 Loss: 5.3973 +[2025-07-07 17:56:05] [Rank 0] Group 7 Loss: 5.4585 +[2025-07-07 17:56:05] [Rank 0] Group 7 Loss: 5.4585 +[2025-07-07 17:56:05] [Rank 0] Group 8 Loss: 5.4282 +[2025-07-07 17:56:05] [Rank 0] Group 8 Loss: 5.4282 +[2025-07-07 17:56:05] [Rank 0] Group 9 Loss: 5.4273 +[2025-07-07 17:56:05] [Rank 0] Group 9 Loss: 5.4273 +[2025-07-07 17:56:05] [Rank 0] Group 10 Loss: 5.4764 +[2025-07-07 17:56:05] [Rank 0] Group 10 Loss: 5.4764 +[2025-07-07 17:56:05] [Rank 0] Group 11 Loss: 5.4462 +[2025-07-07 17:56:05] [Rank 0] Group 11 Loss: 5.4462 +[2025-07-07 17:56:05] [Rank 0] Group 0 FTA: 0.8492 +[2025-07-07 17:56:05] [Rank 0] Group 0 FTA: 0.8492 +[2025-07-07 17:56:05] [Rank 0] Group 1 FTA: 0.8385 +[2025-07-07 17:56:05] [Rank 0] Group 1 FTA: 0.8385 +[2025-07-07 17:56:05] [Rank 0] Group 2 FTA: 0.6120 +[2025-07-07 17:56:05] [Rank 0] Group 2 FTA: 0.6120 +[2025-07-07 17:56:05] [Rank 0] Group 3 FTA: 0.8906 +[2025-07-07 17:56:05] [Rank 0] Group 3 FTA: 0.8906 +[2025-07-07 17:56:05] [Rank 0] Group 4 FTA: 0.6979 +[2025-07-07 17:56:05] [Rank 0] Group 4 FTA: 0.6979 +[2025-07-07 17:56:05] [Rank 0] Group 5 FTA: 0.7865 +[2025-07-07 17:56:05] [Rank 0] Group 5 FTA: 0.7865 +[2025-07-07 17:56:05] [Rank 0] Group 6 FTA: 0.6771 +[2025-07-07 17:56:05] [Rank 0] Group 6 FTA: 0.6771 +[2025-07-07 17:56:05] [Rank 0] Group 7 FTA: 0.7474 +[2025-07-07 17:56:05] [Rank 0] Group 7 FTA: 0.7474 +[2025-07-07 17:56:05] [Rank 0] Group 8 FTA: 0.7682 +[2025-07-07 17:56:05] [Rank 0] Group 8 FTA: 0.7682 +[2025-07-07 17:56:05] [Rank 0] Group 9 FTA: 0.7461 +[2025-07-07 17:56:05] [Rank 0] Group 9 FTA: 0.7461 +[2025-07-07 17:56:05] [Rank 0] Group 10 FTA: 0.7402 +[2025-07-07 17:56:05] [Rank 0] Group 10 FTA: 0.7402 +[2025-07-07 17:56:05] [Rank 0] Group 11 FTA: 0.7227 +[2025-07-07 17:56:05] [Rank 0] Group 11 FTA: 0.7227 +[2025-07-07 17:56:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 17:56:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 17:56:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 17:56:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 17:56:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 17:56:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 17:56:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 17:56:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 17:56:06] [Rank 0] step:6501/10000 train_time:445619ms step_avg:68.55ms +[2025-07-07 17:56:06] [Rank 0] step:6501/10000 train_time:445619ms step_avg:68.55ms +[2025-07-07 17:56:08] [Rank 0] step:6521/10000 train_time:446376ms step_avg:68.45ms +[2025-07-07 17:56:08] [Rank 0] step:6521/10000 train_time:446376ms step_avg:68.45ms +[2025-07-07 17:56:09] [Rank 0] step:6541/10000 train_time:447742ms step_avg:68.45ms +[2025-07-07 17:56:09] [Rank 0] step:6541/10000 train_time:447742ms step_avg:68.45ms +[2025-07-07 17:56:10] [Rank 0] step:6561/10000 train_time:449110ms step_avg:68.45ms +[2025-07-07 17:56:10] [Rank 0] step:6561/10000 train_time:449110ms step_avg:68.45ms +[2025-07-07 17:56:12] [Rank 0] step:6581/10000 train_time:450478ms step_avg:68.45ms +[2025-07-07 17:56:12] [Rank 0] step:6581/10000 train_time:450478ms step_avg:68.45ms +[2025-07-07 17:56:13] [Rank 0] step:6601/10000 train_time:451850ms step_avg:68.45ms +[2025-07-07 17:56:13] [Rank 0] step:6601/10000 train_time:451850ms step_avg:68.45ms +[2025-07-07 17:56:15] [Rank 0] step:6621/10000 train_time:453219ms step_avg:68.45ms +[2025-07-07 17:56:15] [Rank 0] step:6621/10000 train_time:453219ms step_avg:68.45ms +[2025-07-07 17:56:16] [Rank 0] step:6641/10000 train_time:454590ms step_avg:68.45ms +[2025-07-07 17:56:16] [Rank 0] step:6641/10000 train_time:454590ms step_avg:68.45ms +[2025-07-07 17:56:17] [Rank 0] step:6661/10000 train_time:456213ms step_avg:68.49ms +[2025-07-07 17:56:17] [Rank 0] step:6661/10000 train_time:456213ms step_avg:68.49ms +[2025-07-07 17:56:19] [Rank 0] step:6681/10000 train_time:457370ms step_avg:68.46ms +[2025-07-07 17:56:19] [Rank 0] step:6681/10000 train_time:457370ms step_avg:68.46ms +[2025-07-07 17:56:20] [Rank 0] step:6701/10000 train_time:458742ms step_avg:68.46ms +[2025-07-07 17:56:20] [Rank 0] step:6701/10000 train_time:458742ms step_avg:68.46ms +[2025-07-07 17:56:21] [Rank 0] step:6721/10000 train_time:460114ms step_avg:68.46ms +[2025-07-07 17:56:21] [Rank 0] step:6721/10000 train_time:460114ms step_avg:68.46ms +[2025-07-07 17:56:23] [Rank 0] step:6741/10000 train_time:461488ms step_avg:68.46ms +[2025-07-07 17:56:23] [Rank 0] step:6741/10000 train_time:461488ms step_avg:68.46ms +[2025-07-07 17:56:24] [Rank 0] step:6761/10000 train_time:462862ms step_avg:68.46ms +[2025-07-07 17:56:24] [Rank 0] step:6761/10000 train_time:462862ms step_avg:68.46ms +[2025-07-07 17:56:26] [Rank 0] step:6781/10000 train_time:464235ms step_avg:68.46ms +[2025-07-07 17:56:26] [Rank 0] step:6781/10000 train_time:464235ms step_avg:68.46ms +[2025-07-07 17:56:27] [Rank 0] step:6801/10000 train_time:465608ms step_avg:68.46ms +[2025-07-07 17:56:27] [Rank 0] step:6801/10000 train_time:465608ms step_avg:68.46ms +[2025-07-07 17:56:28] [Rank 0] step:6821/10000 train_time:466982ms step_avg:68.46ms +[2025-07-07 17:56:28] [Rank 0] step:6821/10000 train_time:466982ms step_avg:68.46ms +[2025-07-07 17:56:30] [Rank 0] step:6841/10000 train_time:468405ms step_avg:68.47ms +[2025-07-07 17:56:30] [Rank 0] step:6841/10000 train_time:468405ms step_avg:68.47ms +[2025-07-07 17:56:31] [Rank 0] step:6861/10000 train_time:469756ms step_avg:68.47ms +[2025-07-07 17:56:31] [Rank 0] step:6861/10000 train_time:469756ms step_avg:68.47ms +[2025-07-07 17:56:32] [Rank 0] step:6881/10000 train_time:471131ms step_avg:68.47ms +[2025-07-07 17:56:32] [Rank 0] step:6881/10000 train_time:471131ms step_avg:68.47ms +[2025-07-07 17:56:34] [Rank 0] step:6901/10000 train_time:472505ms step_avg:68.47ms +[2025-07-07 17:56:34] [Rank 0] step:6901/10000 train_time:472505ms step_avg:68.47ms +[2025-07-07 17:56:35] [Rank 0] step:6921/10000 train_time:473879ms step_avg:68.47ms +[2025-07-07 17:56:35] [Rank 0] step:6921/10000 train_time:473879ms step_avg:68.47ms +[2025-07-07 17:56:37] [Rank 0] step:6941/10000 train_time:475254ms step_avg:68.47ms +[2025-07-07 17:56:37] [Rank 0] step:6941/10000 train_time:475254ms step_avg:68.47ms +[2025-07-07 17:56:38] [Rank 0] step:6961/10000 train_time:476630ms step_avg:68.47ms +[2025-07-07 17:56:38] [Rank 0] step:6961/10000 train_time:476630ms step_avg:68.47ms +[2025-07-07 17:56:39] [Rank 0] step:6981/10000 train_time:478005ms step_avg:68.47ms +[2025-07-07 17:56:39] [Rank 0] step:6981/10000 train_time:478005ms step_avg:68.47ms +[2025-07-07 17:56:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:56:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 17:56:42] [Rank 0] PRINT: step:7000/10000 train_loss:0.9386 val_loss:0.9978 train_time:480007ms step_avg:68.57ms +[2025-07-07 17:56:42] [Rank 0] PRINT: step:7000/10000 train_loss:0.9386 val_loss:0.9978 train_time:480007ms step_avg:68.57ms +[2025-07-07 17:56:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:56:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 17:56:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:56:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 17:56:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 17:56:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:02:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:02:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:02:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:02:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:02:06] [Rank 0] Total Loss: 5.3849 +[2025-07-07 18:02:06] [Rank 0] Total Loss: 5.3849 +[2025-07-07 18:02:06] [Rank 0] Total FTA: 0.6590 +[2025-07-07 18:02:06] [Rank 0] Total FTA: 0.6590 +[2025-07-07 18:02:06] [Rank 0] Group 0 Loss: 5.6538 +[2025-07-07 18:02:06] [Rank 0] Group 0 Loss: 5.6538 +[2025-07-07 18:02:06] [Rank 0] Group 1 Loss: 5.3039 +[2025-07-07 18:02:06] [Rank 0] Group 1 Loss: 5.3039 +[2025-07-07 18:02:06] [Rank 0] Group 2 Loss: 5.1021 +[2025-07-07 18:02:06] [Rank 0] Group 2 Loss: 5.1021 +[2025-07-07 18:02:06] [Rank 0] Group 3 Loss: 5.3501 +[2025-07-07 18:02:06] [Rank 0] Group 3 Loss: 5.3501 +[2025-07-07 18:02:06] [Rank 0] Group 4 Loss: 5.2720 +[2025-07-07 18:02:06] [Rank 0] Group 4 Loss: 5.2720 +[2025-07-07 18:02:06] [Rank 0] Group 5 Loss: 5.2649 +[2025-07-07 18:02:06] [Rank 0] Group 5 Loss: 5.2649 +[2025-07-07 18:02:06] [Rank 0] Group 6 Loss: 5.3127 +[2025-07-07 18:02:06] [Rank 0] Group 6 Loss: 5.3127 +[2025-07-07 18:02:06] [Rank 0] Group 7 Loss: 5.3953 +[2025-07-07 18:02:06] [Rank 0] Group 7 Loss: 5.3953 +[2025-07-07 18:02:06] [Rank 0] Group 8 Loss: 5.3610 +[2025-07-07 18:02:06] [Rank 0] Group 8 Loss: 5.3610 +[2025-07-07 18:02:06] [Rank 0] Group 9 Loss: 5.3777 +[2025-07-07 18:02:06] [Rank 0] Group 9 Loss: 5.3777 +[2025-07-07 18:02:06] [Rank 0] Group 10 Loss: 5.4306 +[2025-07-07 18:02:06] [Rank 0] Group 10 Loss: 5.4306 +[2025-07-07 18:02:06] [Rank 0] Group 11 Loss: 5.4309 +[2025-07-07 18:02:06] [Rank 0] Group 11 Loss: 5.4309 +[2025-07-07 18:02:07] [Rank 0] Group 0 FTA: 0.5085 +[2025-07-07 18:02:07] [Rank 0] Group 0 FTA: 0.5085 +[2025-07-07 18:02:07] [Rank 0] Group 1 FTA: 0.8411 +[2025-07-07 18:02:07] [Rank 0] Group 1 FTA: 0.8411 +[2025-07-07 18:02:07] [Rank 0] Group 2 FTA: 0.4844 +[2025-07-07 18:02:07] [Rank 0] Group 2 FTA: 0.4844 +[2025-07-07 18:02:07] [Rank 0] Group 3 FTA: 0.7839 +[2025-07-07 18:02:07] [Rank 0] Group 3 FTA: 0.7839 +[2025-07-07 18:02:07] [Rank 0] Group 4 FTA: 0.6172 +[2025-07-07 18:02:07] [Rank 0] Group 4 FTA: 0.6172 +[2025-07-07 18:02:07] [Rank 0] Group 5 FTA: 0.6771 +[2025-07-07 18:02:07] [Rank 0] Group 5 FTA: 0.6771 +[2025-07-07 18:02:07] [Rank 0] Group 6 FTA: 0.6641 +[2025-07-07 18:02:07] [Rank 0] Group 6 FTA: 0.6641 +[2025-07-07 18:02:07] [Rank 0] Group 7 FTA: 0.6354 +[2025-07-07 18:02:07] [Rank 0] Group 7 FTA: 0.6354 +[2025-07-07 18:02:07] [Rank 0] Group 8 FTA: 0.7240 +[2025-07-07 18:02:07] [Rank 0] Group 8 FTA: 0.7240 +[2025-07-07 18:02:07] [Rank 0] Group 9 FTA: 0.6914 +[2025-07-07 18:02:07] [Rank 0] Group 9 FTA: 0.6914 +[2025-07-07 18:02:07] [Rank 0] Group 10 FTA: 0.6855 +[2025-07-07 18:02:07] [Rank 0] Group 10 FTA: 0.6855 +[2025-07-07 18:02:07] [Rank 0] Group 11 FTA: 0.6924 +[2025-07-07 18:02:07] [Rank 0] Group 11 FTA: 0.6924 +[2025-07-07 18:02:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 18:02:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 18:02:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 18:02:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 18:02:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 18:02:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 18:02:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 18:02:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 18:02:08] [Rank 0] step:7001/10000 train_time:480017ms step_avg:68.56ms +[2025-07-07 18:02:08] [Rank 0] step:7001/10000 train_time:480017ms step_avg:68.56ms +[2025-07-07 18:02:09] [Rank 0] step:7021/10000 train_time:480845ms step_avg:68.49ms +[2025-07-07 18:02:09] [Rank 0] step:7021/10000 train_time:480845ms step_avg:68.49ms +[2025-07-07 18:02:11] [Rank 0] step:7041/10000 train_time:482219ms step_avg:68.49ms +[2025-07-07 18:02:11] [Rank 0] step:7041/10000 train_time:482219ms step_avg:68.49ms +[2025-07-07 18:02:12] [Rank 0] step:7061/10000 train_time:483586ms step_avg:68.49ms +[2025-07-07 18:02:12] [Rank 0] step:7061/10000 train_time:483586ms step_avg:68.49ms +[2025-07-07 18:02:14] [Rank 0] step:7081/10000 train_time:484955ms step_avg:68.49ms +[2025-07-07 18:02:14] [Rank 0] step:7081/10000 train_time:484955ms step_avg:68.49ms +[2025-07-07 18:02:15] [Rank 0] step:7101/10000 train_time:486324ms step_avg:68.49ms +[2025-07-07 18:02:15] [Rank 0] step:7101/10000 train_time:486324ms step_avg:68.49ms +[2025-07-07 18:02:16] [Rank 0] step:7121/10000 train_time:487694ms step_avg:68.49ms +[2025-07-07 18:02:16] [Rank 0] step:7121/10000 train_time:487694ms step_avg:68.49ms +[2025-07-07 18:02:18] [Rank 0] step:7141/10000 train_time:489064ms step_avg:68.49ms +[2025-07-07 18:02:18] [Rank 0] step:7141/10000 train_time:489064ms step_avg:68.49ms +[2025-07-07 18:02:19] [Rank 0] step:7161/10000 train_time:490435ms step_avg:68.49ms +[2025-07-07 18:02:19] [Rank 0] step:7161/10000 train_time:490435ms step_avg:68.49ms +[2025-07-07 18:02:20] [Rank 0] step:7181/10000 train_time:491806ms step_avg:68.49ms +[2025-07-07 18:02:20] [Rank 0] step:7181/10000 train_time:491806ms step_avg:68.49ms +[2025-07-07 18:02:22] [Rank 0] step:7201/10000 train_time:493429ms step_avg:68.52ms +[2025-07-07 18:02:22] [Rank 0] step:7201/10000 train_time:493429ms step_avg:68.52ms +[2025-07-07 18:02:23] [Rank 0] step:7221/10000 train_time:494574ms step_avg:68.49ms +[2025-07-07 18:02:23] [Rank 0] step:7221/10000 train_time:494574ms step_avg:68.49ms +[2025-07-07 18:02:25] [Rank 0] step:7241/10000 train_time:495945ms step_avg:68.49ms +[2025-07-07 18:02:25] [Rank 0] step:7241/10000 train_time:495945ms step_avg:68.49ms +[2025-07-07 18:02:26] [Rank 0] step:7261/10000 train_time:497317ms step_avg:68.49ms +[2025-07-07 18:02:26] [Rank 0] step:7261/10000 train_time:497317ms step_avg:68.49ms +[2025-07-07 18:02:27] [Rank 0] step:7281/10000 train_time:498691ms step_avg:68.49ms +[2025-07-07 18:02:27] [Rank 0] step:7281/10000 train_time:498691ms step_avg:68.49ms +[2025-07-07 18:02:29] [Rank 0] step:7301/10000 train_time:500063ms step_avg:68.49ms +[2025-07-07 18:02:29] [Rank 0] step:7301/10000 train_time:500063ms step_avg:68.49ms +[2025-07-07 18:02:30] [Rank 0] step:7321/10000 train_time:501436ms step_avg:68.49ms +[2025-07-07 18:02:30] [Rank 0] step:7321/10000 train_time:501436ms step_avg:68.49ms +[2025-07-07 18:02:31] [Rank 0] step:7341/10000 train_time:502809ms step_avg:68.49ms +[2025-07-07 18:02:31] [Rank 0] step:7341/10000 train_time:502809ms step_avg:68.49ms +[2025-07-07 18:02:33] [Rank 0] step:7361/10000 train_time:504183ms step_avg:68.49ms +[2025-07-07 18:02:33] [Rank 0] step:7361/10000 train_time:504183ms step_avg:68.49ms +[2025-07-07 18:02:34] [Rank 0] step:7381/10000 train_time:505556ms step_avg:68.49ms +[2025-07-07 18:02:34] [Rank 0] step:7381/10000 train_time:505556ms step_avg:68.49ms +[2025-07-07 18:02:36] [Rank 0] step:7401/10000 train_time:506958ms step_avg:68.50ms +[2025-07-07 18:02:36] [Rank 0] step:7401/10000 train_time:506958ms step_avg:68.50ms +[2025-07-07 18:02:37] [Rank 0] step:7421/10000 train_time:508331ms step_avg:68.50ms +[2025-07-07 18:02:37] [Rank 0] step:7421/10000 train_time:508331ms step_avg:68.50ms +[2025-07-07 18:02:38] [Rank 0] step:7441/10000 train_time:509706ms step_avg:68.50ms +[2025-07-07 18:02:38] [Rank 0] step:7441/10000 train_time:509706ms step_avg:68.50ms +[2025-07-07 18:02:40] [Rank 0] step:7461/10000 train_time:511081ms step_avg:68.50ms +[2025-07-07 18:02:40] [Rank 0] step:7461/10000 train_time:511081ms step_avg:68.50ms +[2025-07-07 18:02:41] [Rank 0] step:7481/10000 train_time:512454ms step_avg:68.50ms +[2025-07-07 18:02:41] [Rank 0] step:7481/10000 train_time:512454ms step_avg:68.50ms +[2025-07-07 18:02:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:02:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:02:43] [Rank 0] PRINT: step:7500/10000 train_loss:0.9100 val_loss:0.9773 train_time:514452ms step_avg:68.59ms +[2025-07-07 18:02:43] [Rank 0] PRINT: step:7500/10000 train_loss:0.9100 val_loss:0.9773 train_time:514452ms step_avg:68.59ms +[2025-07-07 18:02:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:02:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:02:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:02:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:02:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:02:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:08:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:08:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:08:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:08:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:08:06] [Rank 0] Total Loss: 5.5560 +[2025-07-07 18:08:06] [Rank 0] Total Loss: 5.5560 +[2025-07-07 18:08:06] [Rank 0] Total FTA: 0.8482 +[2025-07-07 18:08:06] [Rank 0] Total FTA: 0.8482 +[2025-07-07 18:08:06] [Rank 0] Group 0 Loss: 5.9316 +[2025-07-07 18:08:06] [Rank 0] Group 0 Loss: 5.9316 +[2025-07-07 18:08:06] [Rank 0] Group 1 Loss: 5.6476 +[2025-07-07 18:08:06] [Rank 0] Group 1 Loss: 5.6476 +[2025-07-07 18:08:06] [Rank 0] Group 2 Loss: 5.3218 +[2025-07-07 18:08:06] [Rank 0] Group 2 Loss: 5.3218 +[2025-07-07 18:08:06] [Rank 0] Group 3 Loss: 5.4680 +[2025-07-07 18:08:06] [Rank 0] Group 3 Loss: 5.4680 +[2025-07-07 18:08:06] [Rank 0] Group 4 Loss: 5.5081 +[2025-07-07 18:08:06] [Rank 0] Group 4 Loss: 5.5081 +[2025-07-07 18:08:06] [Rank 0] Group 5 Loss: 5.3989 +[2025-07-07 18:08:06] [Rank 0] Group 5 Loss: 5.3989 +[2025-07-07 18:08:06] [Rank 0] Group 6 Loss: 5.4029 +[2025-07-07 18:08:06] [Rank 0] Group 6 Loss: 5.4029 +[2025-07-07 18:08:06] [Rank 0] Group 7 Loss: 5.5351 +[2025-07-07 18:08:06] [Rank 0] Group 7 Loss: 5.5351 +[2025-07-07 18:08:06] [Rank 0] Group 8 Loss: 5.5143 +[2025-07-07 18:08:06] [Rank 0] Group 8 Loss: 5.5143 +[2025-07-07 18:08:06] [Rank 0] Group 9 Loss: 5.5160 +[2025-07-07 18:08:06] [Rank 0] Group 9 Loss: 5.5160 +[2025-07-07 18:08:06] [Rank 0] Group 10 Loss: 5.5311 +[2025-07-07 18:08:06] [Rank 0] Group 10 Loss: 5.5311 +[2025-07-07 18:08:06] [Rank 0] Group 11 Loss: 5.5409 +[2025-07-07 18:08:06] [Rank 0] Group 11 Loss: 5.5409 +[2025-07-07 18:08:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 18:08:06] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 18:08:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 18:08:06] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 18:08:06] [Rank 0] Group 2 FTA: 0.6484 +[2025-07-07 18:08:06] [Rank 0] Group 2 FTA: 0.6484 +[2025-07-07 18:08:06] [Rank 0] Group 3 FTA: 0.8802 +[2025-07-07 18:08:06] [Rank 0] Group 3 FTA: 0.8802 +[2025-07-07 18:08:06] [Rank 0] Group 4 FTA: 0.7865 +[2025-07-07 18:08:06] [Rank 0] Group 4 FTA: 0.7865 +[2025-07-07 18:08:06] [Rank 0] Group 5 FTA: 0.8411 +[2025-07-07 18:08:06] [Rank 0] Group 5 FTA: 0.8411 +[2025-07-07 18:08:06] [Rank 0] Group 6 FTA: 0.7786 +[2025-07-07 18:08:06] [Rank 0] Group 6 FTA: 0.7786 +[2025-07-07 18:08:06] [Rank 0] Group 7 FTA: 0.8229 +[2025-07-07 18:08:06] [Rank 0] Group 7 FTA: 0.8229 +[2025-07-07 18:08:07] [Rank 0] Group 8 FTA: 0.8099 +[2025-07-07 18:08:07] [Rank 0] Group 8 FTA: 0.8099 +[2025-07-07 18:08:07] [Rank 0] Group 9 FTA: 0.7969 +[2025-07-07 18:08:07] [Rank 0] Group 9 FTA: 0.7969 +[2025-07-07 18:08:07] [Rank 0] Group 10 FTA: 0.8398 +[2025-07-07 18:08:07] [Rank 0] Group 10 FTA: 0.8398 +[2025-07-07 18:08:07] [Rank 0] Group 11 FTA: 0.8330 +[2025-07-07 18:08:07] [Rank 0] Group 11 FTA: 0.8330 +[2025-07-07 18:08:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 18:08:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 18:08:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 18:08:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 18:08:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 18:08:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 18:08:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 18:08:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 18:08:08] [Rank 0] step:7501/10000 train_time:514464ms step_avg:68.59ms +[2025-07-07 18:08:08] [Rank 0] step:7501/10000 train_time:514464ms step_avg:68.59ms +[2025-07-07 18:08:09] [Rank 0] step:7521/10000 train_time:515213ms step_avg:68.50ms +[2025-07-07 18:08:09] [Rank 0] step:7521/10000 train_time:515213ms step_avg:68.50ms +[2025-07-07 18:08:11] [Rank 0] step:7541/10000 train_time:516582ms step_avg:68.50ms +[2025-07-07 18:08:11] [Rank 0] step:7541/10000 train_time:516582ms step_avg:68.50ms +[2025-07-07 18:08:12] [Rank 0] step:7561/10000 train_time:517952ms step_avg:68.50ms +[2025-07-07 18:08:12] [Rank 0] step:7561/10000 train_time:517952ms step_avg:68.50ms +[2025-07-07 18:08:13] [Rank 0] step:7581/10000 train_time:519352ms step_avg:68.51ms +[2025-07-07 18:08:13] [Rank 0] step:7581/10000 train_time:519352ms step_avg:68.51ms +[2025-07-07 18:08:15] [Rank 0] step:7601/10000 train_time:520722ms step_avg:68.51ms +[2025-07-07 18:08:15] [Rank 0] step:7601/10000 train_time:520722ms step_avg:68.51ms +[2025-07-07 18:08:16] [Rank 0] step:7621/10000 train_time:522093ms step_avg:68.51ms +[2025-07-07 18:08:16] [Rank 0] step:7621/10000 train_time:522093ms step_avg:68.51ms +[2025-07-07 18:08:18] [Rank 0] step:7641/10000 train_time:523466ms step_avg:68.51ms +[2025-07-07 18:08:18] [Rank 0] step:7641/10000 train_time:523466ms step_avg:68.51ms +[2025-07-07 18:08:19] [Rank 0] step:7661/10000 train_time:524837ms step_avg:68.51ms +[2025-07-07 18:08:19] [Rank 0] step:7661/10000 train_time:524837ms step_avg:68.51ms +[2025-07-07 18:08:20] [Rank 0] step:7681/10000 train_time:526209ms step_avg:68.51ms +[2025-07-07 18:08:20] [Rank 0] step:7681/10000 train_time:526209ms step_avg:68.51ms +[2025-07-07 18:08:22] [Rank 0] step:7701/10000 train_time:527581ms step_avg:68.51ms +[2025-07-07 18:08:22] [Rank 0] step:7701/10000 train_time:527581ms step_avg:68.51ms +[2025-07-07 18:08:23] [Rank 0] step:7721/10000 train_time:528955ms step_avg:68.51ms +[2025-07-07 18:08:23] [Rank 0] step:7721/10000 train_time:528955ms step_avg:68.51ms +[2025-07-07 18:08:25] [Rank 0] step:7741/10000 train_time:530996ms step_avg:68.60ms +[2025-07-07 18:08:25] [Rank 0] step:7741/10000 train_time:530996ms step_avg:68.60ms +[2025-07-07 18:08:26] [Rank 0] step:7761/10000 train_time:531737ms step_avg:68.51ms +[2025-07-07 18:08:26] [Rank 0] step:7761/10000 train_time:531737ms step_avg:68.51ms +[2025-07-07 18:08:27] [Rank 0] step:7781/10000 train_time:533111ms step_avg:68.51ms +[2025-07-07 18:08:27] [Rank 0] step:7781/10000 train_time:533111ms step_avg:68.51ms +[2025-07-07 18:08:29] [Rank 0] step:7801/10000 train_time:534485ms step_avg:68.51ms +[2025-07-07 18:08:29] [Rank 0] step:7801/10000 train_time:534485ms step_avg:68.51ms +[2025-07-07 18:08:30] [Rank 0] step:7821/10000 train_time:535858ms step_avg:68.52ms +[2025-07-07 18:08:30] [Rank 0] step:7821/10000 train_time:535858ms step_avg:68.52ms +[2025-07-07 18:08:31] [Rank 0] step:7841/10000 train_time:537231ms step_avg:68.52ms +[2025-07-07 18:08:31] [Rank 0] step:7841/10000 train_time:537231ms step_avg:68.52ms +[2025-07-07 18:08:33] [Rank 0] step:7861/10000 train_time:538605ms step_avg:68.52ms +[2025-07-07 18:08:33] [Rank 0] step:7861/10000 train_time:538605ms step_avg:68.52ms +[2025-07-07 18:08:34] [Rank 0] step:7881/10000 train_time:539979ms step_avg:68.52ms +[2025-07-07 18:08:34] [Rank 0] step:7881/10000 train_time:539979ms step_avg:68.52ms +[2025-07-07 18:08:35] [Rank 0] step:7901/10000 train_time:541354ms step_avg:68.52ms +[2025-07-07 18:08:35] [Rank 0] step:7901/10000 train_time:541354ms step_avg:68.52ms +[2025-07-07 18:08:37] [Rank 0] step:7921/10000 train_time:542731ms step_avg:68.52ms +[2025-07-07 18:08:37] [Rank 0] step:7921/10000 train_time:542731ms step_avg:68.52ms +[2025-07-07 18:08:38] [Rank 0] step:7941/10000 train_time:544155ms step_avg:68.52ms +[2025-07-07 18:08:38] [Rank 0] step:7941/10000 train_time:544155ms step_avg:68.52ms +[2025-07-07 18:08:40] [Rank 0] step:7961/10000 train_time:545530ms step_avg:68.53ms +[2025-07-07 18:08:40] [Rank 0] step:7961/10000 train_time:545530ms step_avg:68.53ms +[2025-07-07 18:08:41] [Rank 0] step:7981/10000 train_time:546904ms step_avg:68.53ms +[2025-07-07 18:08:41] [Rank 0] step:7981/10000 train_time:546904ms step_avg:68.53ms +[2025-07-07 18:08:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:08:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:08:43] [Rank 0] PRINT: step:8000/10000 train_loss:0.8912 val_loss:0.9577 train_time:548904ms step_avg:68.61ms +[2025-07-07 18:08:43] [Rank 0] PRINT: step:8000/10000 train_loss:0.8912 val_loss:0.9577 train_time:548904ms step_avg:68.61ms +[2025-07-07 18:08:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:08:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:08:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:08:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:08:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:08:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:14:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:14:08] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:14:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:14:08] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:14:08] [Rank 0] Total Loss: 5.4398 +[2025-07-07 18:14:08] [Rank 0] Total Loss: 5.4398 +[2025-07-07 18:14:08] [Rank 0] Total FTA: 0.8766 +[2025-07-07 18:14:08] [Rank 0] Total FTA: 0.8766 +[2025-07-07 18:14:08] [Rank 0] Group 0 Loss: 5.5926 +[2025-07-07 18:14:08] [Rank 0] Group 0 Loss: 5.5926 +[2025-07-07 18:14:08] [Rank 0] Group 1 Loss: 5.4746 +[2025-07-07 18:14:08] [Rank 0] Group 1 Loss: 5.4746 +[2025-07-07 18:14:08] [Rank 0] Group 2 Loss: 5.1152 +[2025-07-07 18:14:08] [Rank 0] Group 2 Loss: 5.1152 +[2025-07-07 18:14:08] [Rank 0] Group 3 Loss: 5.5263 +[2025-07-07 18:14:08] [Rank 0] Group 3 Loss: 5.5263 +[2025-07-07 18:14:08] [Rank 0] Group 4 Loss: 5.3216 +[2025-07-07 18:14:08] [Rank 0] Group 4 Loss: 5.3216 +[2025-07-07 18:14:08] [Rank 0] Group 5 Loss: 5.3755 +[2025-07-07 18:14:08] [Rank 0] Group 5 Loss: 5.3755 +[2025-07-07 18:14:08] [Rank 0] Group 6 Loss: 5.3202 +[2025-07-07 18:14:08] [Rank 0] Group 6 Loss: 5.3202 +[2025-07-07 18:14:08] [Rank 0] Group 7 Loss: 5.4652 +[2025-07-07 18:14:08] [Rank 0] Group 7 Loss: 5.4652 +[2025-07-07 18:14:09] [Rank 0] Group 8 Loss: 5.4713 +[2025-07-07 18:14:09] [Rank 0] Group 8 Loss: 5.4713 +[2025-07-07 18:14:09] [Rank 0] Group 9 Loss: 5.4918 +[2025-07-07 18:14:09] [Rank 0] Group 9 Loss: 5.4918 +[2025-07-07 18:14:09] [Rank 0] Group 10 Loss: 5.4490 +[2025-07-07 18:14:09] [Rank 0] Group 10 Loss: 5.4490 +[2025-07-07 18:14:09] [Rank 0] Group 11 Loss: 5.4759 +[2025-07-07 18:14:09] [Rank 0] Group 11 Loss: 5.4759 +[2025-07-07 18:14:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 18:14:09] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 18:14:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 18:14:09] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 18:14:09] [Rank 0] Group 2 FTA: 0.9193 +[2025-07-07 18:14:09] [Rank 0] Group 2 FTA: 0.9193 +[2025-07-07 18:14:09] [Rank 0] Group 3 FTA: 0.8646 +[2025-07-07 18:14:09] [Rank 0] Group 3 FTA: 0.8646 +[2025-07-07 18:14:09] [Rank 0] Group 4 FTA: 0.8984 +[2025-07-07 18:14:09] [Rank 0] Group 4 FTA: 0.8984 +[2025-07-07 18:14:09] [Rank 0] Group 5 FTA: 0.7943 +[2025-07-07 18:14:09] [Rank 0] Group 5 FTA: 0.7943 +[2025-07-07 18:14:09] [Rank 0] Group 6 FTA: 0.8333 +[2025-07-07 18:14:09] [Rank 0] Group 6 FTA: 0.8333 +[2025-07-07 18:14:09] [Rank 0] Group 7 FTA: 0.8542 +[2025-07-07 18:14:09] [Rank 0] Group 7 FTA: 0.8542 +[2025-07-07 18:14:09] [Rank 0] Group 8 FTA: 0.8099 +[2025-07-07 18:14:09] [Rank 0] Group 8 FTA: 0.8099 +[2025-07-07 18:14:09] [Rank 0] Group 9 FTA: 0.8398 +[2025-07-07 18:14:09] [Rank 0] Group 9 FTA: 0.8398 +[2025-07-07 18:14:09] [Rank 0] Group 10 FTA: 0.8398 +[2025-07-07 18:14:09] [Rank 0] Group 10 FTA: 0.8398 +[2025-07-07 18:14:09] [Rank 0] Group 11 FTA: 0.8262 +[2025-07-07 18:14:09] [Rank 0] Group 11 FTA: 0.8262 +[2025-07-07 18:14:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 18:14:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 18:14:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 18:14:09] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 18:14:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 18:14:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 18:14:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 18:14:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 18:14:10] [Rank 0] step:8001/10000 train_time:548915ms step_avg:68.61ms +[2025-07-07 18:14:10] [Rank 0] step:8001/10000 train_time:548915ms step_avg:68.61ms +[2025-07-07 18:14:12] [Rank 0] step:8021/10000 train_time:549840ms step_avg:68.55ms +[2025-07-07 18:14:12] [Rank 0] step:8021/10000 train_time:549840ms step_avg:68.55ms +[2025-07-07 18:14:13] [Rank 0] step:8041/10000 train_time:551205ms step_avg:68.55ms +[2025-07-07 18:14:13] [Rank 0] step:8041/10000 train_time:551205ms step_avg:68.55ms +[2025-07-07 18:14:14] [Rank 0] step:8061/10000 train_time:552575ms step_avg:68.55ms +[2025-07-07 18:14:14] [Rank 0] step:8061/10000 train_time:552575ms step_avg:68.55ms +[2025-07-07 18:14:16] [Rank 0] step:8081/10000 train_time:553945ms step_avg:68.55ms +[2025-07-07 18:14:16] [Rank 0] step:8081/10000 train_time:553945ms step_avg:68.55ms +[2025-07-07 18:14:17] [Rank 0] step:8101/10000 train_time:555567ms step_avg:68.58ms +[2025-07-07 18:14:17] [Rank 0] step:8101/10000 train_time:555567ms step_avg:68.58ms +[2025-07-07 18:14:19] [Rank 0] step:8121/10000 train_time:556739ms step_avg:68.56ms +[2025-07-07 18:14:19] [Rank 0] step:8121/10000 train_time:556739ms step_avg:68.56ms +[2025-07-07 18:14:20] [Rank 0] step:8141/10000 train_time:558111ms step_avg:68.56ms +[2025-07-07 18:14:20] [Rank 0] step:8141/10000 train_time:558111ms step_avg:68.56ms +[2025-07-07 18:14:21] [Rank 0] step:8161/10000 train_time:559483ms step_avg:68.56ms +[2025-07-07 18:14:21] [Rank 0] step:8161/10000 train_time:559483ms step_avg:68.56ms +[2025-07-07 18:14:23] [Rank 0] step:8181/10000 train_time:560855ms step_avg:68.56ms +[2025-07-07 18:14:23] [Rank 0] step:8181/10000 train_time:560855ms step_avg:68.56ms +[2025-07-07 18:14:24] [Rank 0] step:8201/10000 train_time:562227ms step_avg:68.56ms +[2025-07-07 18:14:24] [Rank 0] step:8201/10000 train_time:562227ms step_avg:68.56ms +[2025-07-07 18:14:25] [Rank 0] step:8221/10000 train_time:563599ms step_avg:68.56ms +[2025-07-07 18:14:25] [Rank 0] step:8221/10000 train_time:563599ms step_avg:68.56ms +[2025-07-07 18:14:27] [Rank 0] step:8241/10000 train_time:564971ms step_avg:68.56ms +[2025-07-07 18:14:27] [Rank 0] step:8241/10000 train_time:564971ms step_avg:68.56ms +[2025-07-07 18:14:28] [Rank 0] step:8261/10000 train_time:566344ms step_avg:68.56ms +[2025-07-07 18:14:28] [Rank 0] step:8261/10000 train_time:566344ms step_avg:68.56ms +[2025-07-07 18:14:30] [Rank 0] step:8281/10000 train_time:567769ms step_avg:68.56ms +[2025-07-07 18:14:30] [Rank 0] step:8281/10000 train_time:567769ms step_avg:68.56ms +[2025-07-07 18:14:31] [Rank 0] step:8301/10000 train_time:569121ms step_avg:68.56ms +[2025-07-07 18:14:31] [Rank 0] step:8301/10000 train_time:569121ms step_avg:68.56ms +[2025-07-07 18:14:32] [Rank 0] step:8321/10000 train_time:570494ms step_avg:68.56ms +[2025-07-07 18:14:32] [Rank 0] step:8321/10000 train_time:570494ms step_avg:68.56ms +[2025-07-07 18:14:34] [Rank 0] step:8341/10000 train_time:571867ms step_avg:68.56ms +[2025-07-07 18:14:34] [Rank 0] step:8341/10000 train_time:571867ms step_avg:68.56ms +[2025-07-07 18:14:35] [Rank 0] step:8361/10000 train_time:573240ms step_avg:68.56ms +[2025-07-07 18:14:35] [Rank 0] step:8361/10000 train_time:573240ms step_avg:68.56ms +[2025-07-07 18:14:36] [Rank 0] step:8381/10000 train_time:574612ms step_avg:68.56ms +[2025-07-07 18:14:36] [Rank 0] step:8381/10000 train_time:574612ms step_avg:68.56ms +[2025-07-07 18:14:38] [Rank 0] step:8401/10000 train_time:575986ms step_avg:68.56ms +[2025-07-07 18:14:38] [Rank 0] step:8401/10000 train_time:575986ms step_avg:68.56ms +[2025-07-07 18:14:39] [Rank 0] step:8421/10000 train_time:577360ms step_avg:68.56ms +[2025-07-07 18:14:39] [Rank 0] step:8421/10000 train_time:577360ms step_avg:68.56ms +[2025-07-07 18:14:41] [Rank 0] step:8441/10000 train_time:578736ms step_avg:68.56ms +[2025-07-07 18:14:41] [Rank 0] step:8441/10000 train_time:578736ms step_avg:68.56ms +[2025-07-07 18:14:42] [Rank 0] step:8461/10000 train_time:580111ms step_avg:68.56ms +[2025-07-07 18:14:42] [Rank 0] step:8461/10000 train_time:580111ms step_avg:68.56ms +[2025-07-07 18:14:43] [Rank 0] step:8481/10000 train_time:581504ms step_avg:68.57ms +[2025-07-07 18:14:43] [Rank 0] step:8481/10000 train_time:581504ms step_avg:68.57ms +[2025-07-07 18:14:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:14:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:14:46] [Rank 0] PRINT: step:8500/10000 train_loss:0.8806 val_loss:0.9346 train_time:583501ms step_avg:68.65ms +[2025-07-07 18:14:46] [Rank 0] PRINT: step:8500/10000 train_loss:0.8806 val_loss:0.9346 train_time:583501ms step_avg:68.65ms +[2025-07-07 18:14:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:14:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:14:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:14:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:14:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:14:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:20:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:20:10] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:20:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:20:10] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:20:10] [Rank 0] Total Loss: 5.6015 +[2025-07-07 18:20:10] [Rank 0] Total Loss: 5.6015 +[2025-07-07 18:20:10] [Rank 0] Total FTA: 0.8974 +[2025-07-07 18:20:10] [Rank 0] Total FTA: 0.8974 +[2025-07-07 18:20:10] [Rank 0] Group 0 Loss: 5.9107 +[2025-07-07 18:20:10] [Rank 0] Group 0 Loss: 5.9107 +[2025-07-07 18:20:10] [Rank 0] Group 1 Loss: 5.4603 +[2025-07-07 18:20:10] [Rank 0] Group 1 Loss: 5.4603 +[2025-07-07 18:20:10] [Rank 0] Group 2 Loss: 5.2275 +[2025-07-07 18:20:10] [Rank 0] Group 2 Loss: 5.2275 +[2025-07-07 18:20:10] [Rank 0] Group 3 Loss: 5.5824 +[2025-07-07 18:20:10] [Rank 0] Group 3 Loss: 5.5824 +[2025-07-07 18:20:10] [Rank 0] Group 4 Loss: 5.6053 +[2025-07-07 18:20:10] [Rank 0] Group 4 Loss: 5.6053 +[2025-07-07 18:20:10] [Rank 0] Group 5 Loss: 5.4982 +[2025-07-07 18:20:10] [Rank 0] Group 5 Loss: 5.4982 +[2025-07-07 18:20:10] [Rank 0] Group 6 Loss: 5.5739 +[2025-07-07 18:20:10] [Rank 0] Group 6 Loss: 5.5739 +[2025-07-07 18:20:10] [Rank 0] Group 7 Loss: 5.6156 +[2025-07-07 18:20:10] [Rank 0] Group 7 Loss: 5.6156 +[2025-07-07 18:20:10] [Rank 0] Group 8 Loss: 5.5789 +[2025-07-07 18:20:10] [Rank 0] Group 8 Loss: 5.5789 +[2025-07-07 18:20:10] [Rank 0] Group 9 Loss: 5.6120 +[2025-07-07 18:20:10] [Rank 0] Group 9 Loss: 5.6120 +[2025-07-07 18:20:10] [Rank 0] Group 10 Loss: 5.6413 +[2025-07-07 18:20:10] [Rank 0] Group 10 Loss: 5.6413 +[2025-07-07 18:20:10] [Rank 0] Group 11 Loss: 5.5979 +[2025-07-07 18:20:10] [Rank 0] Group 11 Loss: 5.5979 +[2025-07-07 18:20:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 18:20:10] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 18:20:10] [Rank 0] Group 1 FTA: 0.8125 +[2025-07-07 18:20:10] [Rank 0] Group 1 FTA: 0.8125 +[2025-07-07 18:20:10] [Rank 0] Group 2 FTA: 0.8490 +[2025-07-07 18:20:10] [Rank 0] Group 2 FTA: 0.8490 +[2025-07-07 18:20:10] [Rank 0] Group 3 FTA: 0.8307 +[2025-07-07 18:20:10] [Rank 0] Group 3 FTA: 0.8307 +[2025-07-07 18:20:10] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-07 18:20:10] [Rank 0] Group 4 FTA: 0.9766 +[2025-07-07 18:20:10] [Rank 0] Group 5 FTA: 0.9505 +[2025-07-07 18:20:10] [Rank 0] Group 5 FTA: 0.9505 +[2025-07-07 18:20:10] [Rank 0] Group 6 FTA: 0.8568 +[2025-07-07 18:20:10] [Rank 0] Group 6 FTA: 0.8568 +[2025-07-07 18:20:10] [Rank 0] Group 7 FTA: 0.8620 +[2025-07-07 18:20:10] [Rank 0] Group 7 FTA: 0.8620 +[2025-07-07 18:20:10] [Rank 0] Group 8 FTA: 0.8828 +[2025-07-07 18:20:10] [Rank 0] Group 8 FTA: 0.8828 +[2025-07-07 18:20:10] [Rank 0] Group 9 FTA: 0.8750 +[2025-07-07 18:20:10] [Rank 0] Group 9 FTA: 0.8750 +[2025-07-07 18:20:10] [Rank 0] Group 10 FTA: 0.8789 +[2025-07-07 18:20:10] [Rank 0] Group 10 FTA: 0.8789 +[2025-07-07 18:20:10] [Rank 0] Group 11 FTA: 0.8945 +[2025-07-07 18:20:10] [Rank 0] Group 11 FTA: 0.8945 +[2025-07-07 18:20:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 18:20:11] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 18:20:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 18:20:11] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 18:20:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 18:20:11] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 18:20:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 18:20:12] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 18:20:12] [Rank 0] step:8501/10000 train_time:583512ms step_avg:68.64ms +[2025-07-07 18:20:12] [Rank 0] step:8501/10000 train_time:583512ms step_avg:68.64ms +[2025-07-07 18:20:13] [Rank 0] step:8521/10000 train_time:584275ms step_avg:68.57ms +[2025-07-07 18:20:13] [Rank 0] step:8521/10000 train_time:584275ms step_avg:68.57ms +[2025-07-07 18:20:14] [Rank 0] step:8541/10000 train_time:585644ms step_avg:68.57ms +[2025-07-07 18:20:14] [Rank 0] step:8541/10000 train_time:585644ms step_avg:68.57ms +[2025-07-07 18:20:16] [Rank 0] step:8561/10000 train_time:587013ms step_avg:68.57ms +[2025-07-07 18:20:16] [Rank 0] step:8561/10000 train_time:587013ms step_avg:68.57ms +[2025-07-07 18:20:17] [Rank 0] step:8581/10000 train_time:588381ms step_avg:68.57ms +[2025-07-07 18:20:17] [Rank 0] step:8581/10000 train_time:588381ms step_avg:68.57ms +[2025-07-07 18:20:19] [Rank 0] step:8601/10000 train_time:589752ms step_avg:68.57ms +[2025-07-07 18:20:19] [Rank 0] step:8601/10000 train_time:589752ms step_avg:68.57ms +[2025-07-07 18:20:20] [Rank 0] step:8621/10000 train_time:591122ms step_avg:68.57ms +[2025-07-07 18:20:20] [Rank 0] step:8621/10000 train_time:591122ms step_avg:68.57ms +[2025-07-07 18:20:21] [Rank 0] step:8641/10000 train_time:593148ms step_avg:68.64ms +[2025-07-07 18:20:21] [Rank 0] step:8641/10000 train_time:593148ms step_avg:68.64ms +[2025-07-07 18:20:23] [Rank 0] step:8661/10000 train_time:593887ms step_avg:68.57ms +[2025-07-07 18:20:23] [Rank 0] step:8661/10000 train_time:593887ms step_avg:68.57ms +[2025-07-07 18:20:24] [Rank 0] step:8681/10000 train_time:595260ms step_avg:68.57ms +[2025-07-07 18:20:24] [Rank 0] step:8681/10000 train_time:595260ms step_avg:68.57ms +[2025-07-07 18:20:25] [Rank 0] step:8701/10000 train_time:596633ms step_avg:68.57ms +[2025-07-07 18:20:25] [Rank 0] step:8701/10000 train_time:596633ms step_avg:68.57ms +[2025-07-07 18:20:27] [Rank 0] step:8721/10000 train_time:598007ms step_avg:68.57ms +[2025-07-07 18:20:27] [Rank 0] step:8721/10000 train_time:598007ms step_avg:68.57ms +[2025-07-07 18:20:28] [Rank 0] step:8741/10000 train_time:599381ms step_avg:68.57ms +[2025-07-07 18:20:28] [Rank 0] step:8741/10000 train_time:599381ms step_avg:68.57ms +[2025-07-07 18:20:30] [Rank 0] step:8761/10000 train_time:600754ms step_avg:68.57ms +[2025-07-07 18:20:30] [Rank 0] step:8761/10000 train_time:600754ms step_avg:68.57ms +[2025-07-07 18:20:31] [Rank 0] step:8781/10000 train_time:602128ms step_avg:68.57ms +[2025-07-07 18:20:31] [Rank 0] step:8781/10000 train_time:602128ms step_avg:68.57ms +[2025-07-07 18:20:32] [Rank 0] step:8801/10000 train_time:603502ms step_avg:68.57ms +[2025-07-07 18:20:32] [Rank 0] step:8801/10000 train_time:603502ms step_avg:68.57ms +[2025-07-07 18:20:34] [Rank 0] step:8821/10000 train_time:605532ms step_avg:68.65ms +[2025-07-07 18:20:34] [Rank 0] step:8821/10000 train_time:605532ms step_avg:68.65ms +[2025-07-07 18:20:35] [Rank 0] step:8841/10000 train_time:606273ms step_avg:68.58ms +[2025-07-07 18:20:35] [Rank 0] step:8841/10000 train_time:606273ms step_avg:68.58ms +[2025-07-07 18:20:36] [Rank 0] step:8861/10000 train_time:607648ms step_avg:68.58ms +[2025-07-07 18:20:36] [Rank 0] step:8861/10000 train_time:607648ms step_avg:68.58ms +[2025-07-07 18:20:38] [Rank 0] step:8881/10000 train_time:609023ms step_avg:68.58ms +[2025-07-07 18:20:38] [Rank 0] step:8881/10000 train_time:609023ms step_avg:68.58ms +[2025-07-07 18:20:39] [Rank 0] step:8901/10000 train_time:610397ms step_avg:68.58ms +[2025-07-07 18:20:39] [Rank 0] step:8901/10000 train_time:610397ms step_avg:68.58ms +[2025-07-07 18:20:41] [Rank 0] step:8921/10000 train_time:611771ms step_avg:68.58ms +[2025-07-07 18:20:41] [Rank 0] step:8921/10000 train_time:611771ms step_avg:68.58ms +[2025-07-07 18:20:42] [Rank 0] step:8941/10000 train_time:613147ms step_avg:68.58ms +[2025-07-07 18:20:42] [Rank 0] step:8941/10000 train_time:613147ms step_avg:68.58ms +[2025-07-07 18:20:43] [Rank 0] step:8961/10000 train_time:614522ms step_avg:68.58ms +[2025-07-07 18:20:43] [Rank 0] step:8961/10000 train_time:614522ms step_avg:68.58ms +[2025-07-07 18:20:45] [Rank 0] step:8981/10000 train_time:615898ms step_avg:68.58ms +[2025-07-07 18:20:45] [Rank 0] step:8981/10000 train_time:615898ms step_avg:68.58ms +[2025-07-07 18:20:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:20:46] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:20:47] [Rank 0] PRINT: step:9000/10000 train_loss:0.8725 val_loss:0.9242 train_time:617897ms step_avg:68.66ms +[2025-07-07 18:20:47] [Rank 0] PRINT: step:9000/10000 train_loss:0.8725 val_loss:0.9242 train_time:617897ms step_avg:68.66ms +[2025-07-07 18:20:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:20:47] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:20:47] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:20:47] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:20:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:20:47] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:26:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:26:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:26:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:26:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:26:15] [Rank 0] Total Loss: 5.4663 +[2025-07-07 18:26:15] [Rank 0] Total Loss: 5.4663 +[2025-07-07 18:26:15] [Rank 0] Total FTA: 0.9254 +[2025-07-07 18:26:15] [Rank 0] Total FTA: 0.9254 +[2025-07-07 18:26:15] [Rank 0] Group 0 Loss: 5.7165 +[2025-07-07 18:26:15] [Rank 0] Group 0 Loss: 5.7165 +[2025-07-07 18:26:15] [Rank 0] Group 1 Loss: 5.4447 +[2025-07-07 18:26:15] [Rank 0] Group 1 Loss: 5.4447 +[2025-07-07 18:26:15] [Rank 0] Group 2 Loss: 5.1947 +[2025-07-07 18:26:15] [Rank 0] Group 2 Loss: 5.1947 +[2025-07-07 18:26:15] [Rank 0] Group 3 Loss: 5.5587 +[2025-07-07 18:26:15] [Rank 0] Group 3 Loss: 5.5587 +[2025-07-07 18:26:15] [Rank 0] Group 4 Loss: 5.4369 +[2025-07-07 18:26:15] [Rank 0] Group 4 Loss: 5.4369 +[2025-07-07 18:26:15] [Rank 0] Group 5 Loss: 5.3791 +[2025-07-07 18:26:15] [Rank 0] Group 5 Loss: 5.3791 +[2025-07-07 18:26:15] [Rank 0] Group 6 Loss: 5.3686 +[2025-07-07 18:26:15] [Rank 0] Group 6 Loss: 5.3686 +[2025-07-07 18:26:15] [Rank 0] Group 7 Loss: 5.4889 +[2025-07-07 18:26:15] [Rank 0] Group 7 Loss: 5.4889 +[2025-07-07 18:26:15] [Rank 0] Group 8 Loss: 5.4788 +[2025-07-07 18:26:15] [Rank 0] Group 8 Loss: 5.4788 +[2025-07-07 18:26:15] [Rank 0] Group 9 Loss: 5.4124 +[2025-07-07 18:26:15] [Rank 0] Group 9 Loss: 5.4124 +[2025-07-07 18:26:15] [Rank 0] Group 10 Loss: 5.4526 +[2025-07-07 18:26:15] [Rank 0] Group 10 Loss: 5.4526 +[2025-07-07 18:26:15] [Rank 0] Group 11 Loss: 5.4414 +[2025-07-07 18:26:15] [Rank 0] Group 11 Loss: 5.4414 +[2025-07-07 18:26:15] [Rank 0] Group 0 FTA: 0.8401 +[2025-07-07 18:26:15] [Rank 0] Group 0 FTA: 0.8401 +[2025-07-07 18:26:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 18:26:15] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 18:26:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 18:26:15] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 18:26:15] [Rank 0] Group 3 FTA: 0.8568 +[2025-07-07 18:26:15] [Rank 0] Group 3 FTA: 0.8568 +[2025-07-07 18:26:15] [Rank 0] Group 4 FTA: 0.9583 +[2025-07-07 18:26:15] [Rank 0] Group 4 FTA: 0.9583 +[2025-07-07 18:26:15] [Rank 0] Group 5 FTA: 0.9349 +[2025-07-07 18:26:15] [Rank 0] Group 5 FTA: 0.9349 +[2025-07-07 18:26:15] [Rank 0] Group 6 FTA: 0.9115 +[2025-07-07 18:26:15] [Rank 0] Group 6 FTA: 0.9115 +[2025-07-07 18:26:15] [Rank 0] Group 7 FTA: 0.9479 +[2025-07-07 18:26:15] [Rank 0] Group 7 FTA: 0.9479 +[2025-07-07 18:26:15] [Rank 0] Group 8 FTA: 0.9401 +[2025-07-07 18:26:15] [Rank 0] Group 8 FTA: 0.9401 +[2025-07-07 18:26:15] [Rank 0] Group 9 FTA: 0.9492 +[2025-07-07 18:26:15] [Rank 0] Group 9 FTA: 0.9492 +[2025-07-07 18:26:15] [Rank 0] Group 10 FTA: 0.9238 +[2025-07-07 18:26:15] [Rank 0] Group 10 FTA: 0.9238 +[2025-07-07 18:26:15] [Rank 0] Group 11 FTA: 0.9297 +[2025-07-07 18:26:15] [Rank 0] Group 11 FTA: 0.9297 +[2025-07-07 18:26:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 18:26:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 18:26:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 18:26:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 18:26:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 18:26:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 18:26:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 18:26:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 18:26:17] [Rank 0] step:9001/10000 train_time:617921ms step_avg:68.65ms +[2025-07-07 18:26:17] [Rank 0] step:9001/10000 train_time:617921ms step_avg:68.65ms +[2025-07-07 18:26:18] [Rank 0] step:9021/10000 train_time:618757ms step_avg:68.59ms +[2025-07-07 18:26:18] [Rank 0] step:9021/10000 train_time:618757ms step_avg:68.59ms +[2025-07-07 18:26:20] [Rank 0] step:9041/10000 train_time:620122ms step_avg:68.59ms +[2025-07-07 18:26:20] [Rank 0] step:9041/10000 train_time:620122ms step_avg:68.59ms +[2025-07-07 18:26:21] [Rank 0] step:9061/10000 train_time:621489ms step_avg:68.59ms +[2025-07-07 18:26:21] [Rank 0] step:9061/10000 train_time:621489ms step_avg:68.59ms +[2025-07-07 18:26:22] [Rank 0] step:9081/10000 train_time:622856ms step_avg:68.59ms +[2025-07-07 18:26:22] [Rank 0] step:9081/10000 train_time:622856ms step_avg:68.59ms +[2025-07-07 18:26:24] [Rank 0] step:9101/10000 train_time:624224ms step_avg:68.59ms +[2025-07-07 18:26:24] [Rank 0] step:9101/10000 train_time:624224ms step_avg:68.59ms +[2025-07-07 18:26:25] [Rank 0] step:9121/10000 train_time:625592ms step_avg:68.59ms +[2025-07-07 18:26:25] [Rank 0] step:9121/10000 train_time:625592ms step_avg:68.59ms +[2025-07-07 18:26:26] [Rank 0] step:9141/10000 train_time:626960ms step_avg:68.59ms +[2025-07-07 18:26:26] [Rank 0] step:9141/10000 train_time:626960ms step_avg:68.59ms +[2025-07-07 18:26:28] [Rank 0] step:9161/10000 train_time:628333ms step_avg:68.59ms +[2025-07-07 18:26:28] [Rank 0] step:9161/10000 train_time:628333ms step_avg:68.59ms +[2025-07-07 18:26:29] [Rank 0] step:9181/10000 train_time:629703ms step_avg:68.59ms +[2025-07-07 18:26:29] [Rank 0] step:9181/10000 train_time:629703ms step_avg:68.59ms +[2025-07-07 18:26:31] [Rank 0] step:9201/10000 train_time:631111ms step_avg:68.59ms +[2025-07-07 18:26:31] [Rank 0] step:9201/10000 train_time:631111ms step_avg:68.59ms +[2025-07-07 18:26:32] [Rank 0] step:9221/10000 train_time:632482ms step_avg:68.59ms +[2025-07-07 18:26:32] [Rank 0] step:9221/10000 train_time:632482ms step_avg:68.59ms +[2025-07-07 18:26:33] [Rank 0] step:9241/10000 train_time:633853ms step_avg:68.59ms +[2025-07-07 18:26:33] [Rank 0] step:9241/10000 train_time:633853ms step_avg:68.59ms +[2025-07-07 18:26:35] [Rank 0] step:9261/10000 train_time:635223ms step_avg:68.59ms +[2025-07-07 18:26:35] [Rank 0] step:9261/10000 train_time:635223ms step_avg:68.59ms +[2025-07-07 18:26:36] [Rank 0] step:9281/10000 train_time:636594ms step_avg:68.59ms +[2025-07-07 18:26:36] [Rank 0] step:9281/10000 train_time:636594ms step_avg:68.59ms +[2025-07-07 18:26:37] [Rank 0] step:9301/10000 train_time:637964ms step_avg:68.59ms +[2025-07-07 18:26:37] [Rank 0] step:9301/10000 train_time:637964ms step_avg:68.59ms +[2025-07-07 18:26:39] [Rank 0] step:9321/10000 train_time:639335ms step_avg:68.59ms +[2025-07-07 18:26:39] [Rank 0] step:9321/10000 train_time:639335ms step_avg:68.59ms +[2025-07-07 18:26:40] [Rank 0] step:9341/10000 train_time:640706ms step_avg:68.59ms +[2025-07-07 18:26:40] [Rank 0] step:9341/10000 train_time:640706ms step_avg:68.59ms +[2025-07-07 18:26:42] [Rank 0] step:9361/10000 train_time:642077ms step_avg:68.59ms +[2025-07-07 18:26:42] [Rank 0] step:9361/10000 train_time:642077ms step_avg:68.59ms +[2025-07-07 18:26:43] [Rank 0] step:9381/10000 train_time:643497ms step_avg:68.60ms +[2025-07-07 18:26:43] [Rank 0] step:9381/10000 train_time:643497ms step_avg:68.60ms +[2025-07-07 18:26:44] [Rank 0] step:9401/10000 train_time:644868ms step_avg:68.60ms +[2025-07-07 18:26:44] [Rank 0] step:9401/10000 train_time:644868ms step_avg:68.60ms +[2025-07-07 18:26:46] [Rank 0] step:9421/10000 train_time:646241ms step_avg:68.60ms +[2025-07-07 18:26:46] [Rank 0] step:9421/10000 train_time:646241ms step_avg:68.60ms +[2025-07-07 18:26:47] [Rank 0] step:9441/10000 train_time:647614ms step_avg:68.60ms +[2025-07-07 18:26:47] [Rank 0] step:9441/10000 train_time:647614ms step_avg:68.60ms +[2025-07-07 18:26:48] [Rank 0] step:9461/10000 train_time:648986ms step_avg:68.60ms +[2025-07-07 18:26:48] [Rank 0] step:9461/10000 train_time:648986ms step_avg:68.60ms +[2025-07-07 18:26:50] [Rank 0] step:9481/10000 train_time:650360ms step_avg:68.60ms +[2025-07-07 18:26:50] [Rank 0] step:9481/10000 train_time:650360ms step_avg:68.60ms +[2025-07-07 18:26:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:26:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:26:52] [Rank 0] PRINT: step:9500/10000 train_loss:0.8656 val_loss:0.9180 train_time:652356ms step_avg:68.67ms +[2025-07-07 18:26:52] [Rank 0] PRINT: step:9500/10000 train_loss:0.8656 val_loss:0.9180 train_time:652356ms step_avg:68.67ms +[2025-07-07 18:26:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:26:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:26:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:26:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:26:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:26:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:32:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:32:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:32:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:32:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:32:19] [Rank 0] Total Loss: 5.6453 +[2025-07-07 18:32:19] [Rank 0] Total Loss: 5.6453 +[2025-07-07 18:32:19] [Rank 0] Total FTA: 0.9540 +[2025-07-07 18:32:19] [Rank 0] Total FTA: 0.9540 +[2025-07-07 18:32:19] [Rank 0] Group 0 Loss: 5.8691 +[2025-07-07 18:32:19] [Rank 0] Group 0 Loss: 5.8691 +[2025-07-07 18:32:19] [Rank 0] Group 1 Loss: 5.6616 +[2025-07-07 18:32:19] [Rank 0] Group 1 Loss: 5.6616 +[2025-07-07 18:32:19] [Rank 0] Group 2 Loss: 5.3514 +[2025-07-07 18:32:19] [Rank 0] Group 2 Loss: 5.3514 +[2025-07-07 18:32:19] [Rank 0] Group 3 Loss: 5.6298 +[2025-07-07 18:32:19] [Rank 0] Group 3 Loss: 5.6298 +[2025-07-07 18:32:19] [Rank 0] Group 4 Loss: 5.6356 +[2025-07-07 18:32:19] [Rank 0] Group 4 Loss: 5.6356 +[2025-07-07 18:32:19] [Rank 0] Group 5 Loss: 5.5198 +[2025-07-07 18:32:19] [Rank 0] Group 5 Loss: 5.5198 +[2025-07-07 18:32:19] [Rank 0] Group 6 Loss: 5.5625 +[2025-07-07 18:32:19] [Rank 0] Group 6 Loss: 5.5625 +[2025-07-07 18:32:19] [Rank 0] Group 7 Loss: 5.6382 +[2025-07-07 18:32:19] [Rank 0] Group 7 Loss: 5.6382 +[2025-07-07 18:32:19] [Rank 0] Group 8 Loss: 5.6420 +[2025-07-07 18:32:19] [Rank 0] Group 8 Loss: 5.6420 +[2025-07-07 18:32:19] [Rank 0] Group 9 Loss: 5.6919 +[2025-07-07 18:32:19] [Rank 0] Group 9 Loss: 5.6919 +[2025-07-07 18:32:19] [Rank 0] Group 10 Loss: 5.6611 +[2025-07-07 18:32:19] [Rank 0] Group 10 Loss: 5.6611 +[2025-07-07 18:32:19] [Rank 0] Group 11 Loss: 5.6532 +[2025-07-07 18:32:19] [Rank 0] Group 11 Loss: 5.6532 +[2025-07-07 18:32:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 18:32:19] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 18:32:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 18:32:19] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 18:32:19] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 18:32:19] [Rank 0] Group 2 FTA: 1.0000 +[2025-07-07 18:32:19] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 18:32:19] [Rank 0] Group 3 FTA: 1.0000 +[2025-07-07 18:32:19] [Rank 0] Group 4 FTA: 0.9375 +[2025-07-07 18:32:19] [Rank 0] Group 4 FTA: 0.9375 +[2025-07-07 18:32:19] [Rank 0] Group 5 FTA: 0.9271 +[2025-07-07 18:32:19] [Rank 0] Group 5 FTA: 0.9271 +[2025-07-07 18:32:19] [Rank 0] Group 6 FTA: 0.9141 +[2025-07-07 18:32:19] [Rank 0] Group 6 FTA: 0.9141 +[2025-07-07 18:32:19] [Rank 0] Group 7 FTA: 0.9349 +[2025-07-07 18:32:19] [Rank 0] Group 7 FTA: 0.9349 +[2025-07-07 18:32:19] [Rank 0] Group 8 FTA: 0.9427 +[2025-07-07 18:32:19] [Rank 0] Group 8 FTA: 0.9427 +[2025-07-07 18:32:19] [Rank 0] Group 9 FTA: 0.9492 +[2025-07-07 18:32:19] [Rank 0] Group 9 FTA: 0.9492 +[2025-07-07 18:32:19] [Rank 0] Group 10 FTA: 0.9277 +[2025-07-07 18:32:19] [Rank 0] Group 10 FTA: 0.9277 +[2025-07-07 18:32:19] [Rank 0] Group 11 FTA: 0.9248 +[2025-07-07 18:32:19] [Rank 0] Group 11 FTA: 0.9248 +[2025-07-07 18:32:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 18:32:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 18:32:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 18:32:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 18:32:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 18:32:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 18:32:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 18:32:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 18:32:21] [Rank 0] step:9501/10000 train_time:652368ms step_avg:68.66ms +[2025-07-07 18:32:21] [Rank 0] step:9501/10000 train_time:652368ms step_avg:68.66ms +[2025-07-07 18:32:22] [Rank 0] step:9521/10000 train_time:653127ms step_avg:68.60ms +[2025-07-07 18:32:22] [Rank 0] step:9521/10000 train_time:653127ms step_avg:68.60ms +[2025-07-07 18:32:23] [Rank 0] step:9541/10000 train_time:654745ms step_avg:68.62ms +[2025-07-07 18:32:23] [Rank 0] step:9541/10000 train_time:654745ms step_avg:68.62ms +[2025-07-07 18:32:25] [Rank 0] step:9561/10000 train_time:655906ms step_avg:68.60ms +[2025-07-07 18:32:25] [Rank 0] step:9561/10000 train_time:655906ms step_avg:68.60ms +[2025-07-07 18:32:26] [Rank 0] step:9581/10000 train_time:657271ms step_avg:68.60ms +[2025-07-07 18:32:26] [Rank 0] step:9581/10000 train_time:657271ms step_avg:68.60ms +[2025-07-07 18:32:28] [Rank 0] step:9601/10000 train_time:658638ms step_avg:68.60ms +[2025-07-07 18:32:28] [Rank 0] step:9601/10000 train_time:658638ms step_avg:68.60ms +[2025-07-07 18:32:29] [Rank 0] step:9621/10000 train_time:660007ms step_avg:68.60ms +[2025-07-07 18:32:29] [Rank 0] step:9621/10000 train_time:660007ms step_avg:68.60ms +[2025-07-07 18:32:30] [Rank 0] step:9641/10000 train_time:661375ms step_avg:68.60ms +[2025-07-07 18:32:30] [Rank 0] step:9641/10000 train_time:661375ms step_avg:68.60ms +[2025-07-07 18:32:32] [Rank 0] step:9661/10000 train_time:662745ms step_avg:68.60ms +[2025-07-07 18:32:32] [Rank 0] step:9661/10000 train_time:662745ms step_avg:68.60ms +[2025-07-07 18:32:33] [Rank 0] step:9681/10000 train_time:664113ms step_avg:68.60ms +[2025-07-07 18:32:33] [Rank 0] step:9681/10000 train_time:664113ms step_avg:68.60ms +[2025-07-07 18:32:34] [Rank 0] step:9701/10000 train_time:665482ms step_avg:68.60ms +[2025-07-07 18:32:34] [Rank 0] step:9701/10000 train_time:665482ms step_avg:68.60ms +[2025-07-07 18:32:36] [Rank 0] step:9721/10000 train_time:667105ms step_avg:68.63ms +[2025-07-07 18:32:36] [Rank 0] step:9721/10000 train_time:667105ms step_avg:68.63ms +[2025-07-07 18:32:37] [Rank 0] step:9741/10000 train_time:668259ms step_avg:68.60ms +[2025-07-07 18:32:37] [Rank 0] step:9741/10000 train_time:668259ms step_avg:68.60ms +[2025-07-07 18:32:39] [Rank 0] step:9761/10000 train_time:669630ms step_avg:68.60ms +[2025-07-07 18:32:39] [Rank 0] step:9761/10000 train_time:669630ms step_avg:68.60ms +[2025-07-07 18:32:40] [Rank 0] step:9781/10000 train_time:671001ms step_avg:68.60ms +[2025-07-07 18:32:40] [Rank 0] step:9781/10000 train_time:671001ms step_avg:68.60ms +[2025-07-07 18:32:41] [Rank 0] step:9801/10000 train_time:672373ms step_avg:68.60ms +[2025-07-07 18:32:41] [Rank 0] step:9801/10000 train_time:672373ms step_avg:68.60ms +[2025-07-07 18:32:43] [Rank 0] step:9821/10000 train_time:673745ms step_avg:68.60ms +[2025-07-07 18:32:43] [Rank 0] step:9821/10000 train_time:673745ms step_avg:68.60ms +[2025-07-07 18:32:44] [Rank 0] step:9841/10000 train_time:675116ms step_avg:68.60ms +[2025-07-07 18:32:44] [Rank 0] step:9841/10000 train_time:675116ms step_avg:68.60ms +[2025-07-07 18:32:45] [Rank 0] step:9861/10000 train_time:676487ms step_avg:68.60ms +[2025-07-07 18:32:45] [Rank 0] step:9861/10000 train_time:676487ms step_avg:68.60ms +[2025-07-07 18:32:47] [Rank 0] step:9881/10000 train_time:677858ms step_avg:68.60ms +[2025-07-07 18:32:47] [Rank 0] step:9881/10000 train_time:677858ms step_avg:68.60ms +[2025-07-07 18:32:48] [Rank 0] step:9901/10000 train_time:679480ms step_avg:68.63ms +[2025-07-07 18:32:48] [Rank 0] step:9901/10000 train_time:679480ms step_avg:68.63ms +[2025-07-07 18:32:50] [Rank 0] step:9921/10000 train_time:680644ms step_avg:68.61ms +[2025-07-07 18:32:50] [Rank 0] step:9921/10000 train_time:680644ms step_avg:68.61ms +[2025-07-07 18:32:51] [Rank 0] step:9941/10000 train_time:682014ms step_avg:68.61ms +[2025-07-07 18:32:51] [Rank 0] step:9941/10000 train_time:682014ms step_avg:68.61ms +[2025-07-07 18:32:52] [Rank 0] step:9961/10000 train_time:683385ms step_avg:68.61ms +[2025-07-07 18:32:52] [Rank 0] step:9961/10000 train_time:683385ms step_avg:68.61ms +[2025-07-07 18:32:54] [Rank 0] step:9981/10000 train_time:684758ms step_avg:68.61ms +[2025-07-07 18:32:54] [Rank 0] step:9981/10000 train_time:684758ms step_avg:68.61ms +[2025-07-07 18:32:55] [Rank 0] step:10000/10000 train_time:686060ms step_avg:68.61ms +[2025-07-07 18:32:55] [Rank 0] step:10000/10000 train_time:686060ms step_avg:68.61ms +[2025-07-07 18:32:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:32:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 18:32:56] [Rank 0] PRINT: step:10000/10000 train_loss:0.8579 val_loss:0.9146 train_time:686759ms step_avg:68.68ms +[2025-07-07 18:32:56] [Rank 0] PRINT: step:10000/10000 train_loss:0.8579 val_loss:0.9146 train_time:686759ms step_avg:68.68ms +[2025-07-07 18:32:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:32:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 18:32:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:32:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 18:32:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:32:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 18:38:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:38:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 18:38:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:38:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 18:38:22] [Rank 0] Total Loss: 5.5706 +[2025-07-07 18:38:22] [Rank 0] Total Loss: 5.5706 +[2025-07-07 18:38:22] [Rank 0] Total FTA: 0.9485 +[2025-07-07 18:38:22] [Rank 0] Total FTA: 0.9485 +[2025-07-07 18:38:22] [Rank 0] Group 0 Loss: 5.7655 +[2025-07-07 18:38:22] [Rank 0] Group 0 Loss: 5.7655 +[2025-07-07 18:38:22] [Rank 0] Group 1 Loss: 5.5880 +[2025-07-07 18:38:22] [Rank 0] Group 1 Loss: 5.5880 +[2025-07-07 18:38:22] [Rank 0] Group 2 Loss: 5.4005 +[2025-07-07 18:38:22] [Rank 0] Group 2 Loss: 5.4005 +[2025-07-07 18:38:22] [Rank 0] Group 3 Loss: 5.7368 +[2025-07-07 18:38:22] [Rank 0] Group 3 Loss: 5.7368 +[2025-07-07 18:38:22] [Rank 0] Group 4 Loss: 5.5127 +[2025-07-07 18:38:22] [Rank 0] Group 4 Loss: 5.5127 +[2025-07-07 18:38:22] [Rank 0] Group 5 Loss: 5.4030 +[2025-07-07 18:38:22] [Rank 0] Group 5 Loss: 5.4030 +[2025-07-07 18:38:22] [Rank 0] Group 6 Loss: 5.4776 +[2025-07-07 18:38:22] [Rank 0] Group 6 Loss: 5.4776 +[2025-07-07 18:38:22] [Rank 0] Group 7 Loss: 5.5727 +[2025-07-07 18:38:22] [Rank 0] Group 7 Loss: 5.5727 +[2025-07-07 18:38:22] [Rank 0] Group 8 Loss: 5.5330 +[2025-07-07 18:38:22] [Rank 0] Group 8 Loss: 5.5330 +[2025-07-07 18:38:22] [Rank 0] Group 9 Loss: 5.5097 +[2025-07-07 18:38:22] [Rank 0] Group 9 Loss: 5.5097 +[2025-07-07 18:38:23] [Rank 0] Group 10 Loss: 5.5670 +[2025-07-07 18:38:23] [Rank 0] Group 10 Loss: 5.5670 +[2025-07-07 18:38:23] [Rank 0] Group 11 Loss: 5.5689 +[2025-07-07 18:38:23] [Rank 0] Group 11 Loss: 5.5689 +[2025-07-07 18:38:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 18:38:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 18:38:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 18:38:23] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-07 18:38:23] [Rank 0] Group 2 FTA: 0.8151 +[2025-07-07 18:38:23] [Rank 0] Group 2 FTA: 0.8151 +[2025-07-07 18:38:23] [Rank 0] Group 3 FTA: 0.9062 +[2025-07-07 18:38:23] [Rank 0] Group 3 FTA: 0.9062 +[2025-07-07 18:38:23] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 18:38:23] [Rank 0] Group 4 FTA: 1.0000 +[2025-07-07 18:38:23] [Rank 0] Group 5 FTA: 0.9609 +[2025-07-07 18:38:23] [Rank 0] Group 5 FTA: 0.9609 +[2025-07-07 18:38:23] [Rank 0] Group 6 FTA: 0.9427 +[2025-07-07 18:38:23] [Rank 0] Group 6 FTA: 0.9427 +[2025-07-07 18:38:23] [Rank 0] Group 7 FTA: 0.9635 +[2025-07-07 18:38:23] [Rank 0] Group 7 FTA: 0.9635 +[2025-07-07 18:38:23] [Rank 0] Group 8 FTA: 0.9453 +[2025-07-07 18:38:23] [Rank 0] Group 8 FTA: 0.9453 +[2025-07-07 18:38:23] [Rank 0] Group 9 FTA: 0.9453 +[2025-07-07 18:38:23] [Rank 0] Group 9 FTA: 0.9453 +[2025-07-07 18:38:23] [Rank 0] Group 10 FTA: 0.9414 +[2025-07-07 18:38:23] [Rank 0] Group 10 FTA: 0.9414 +[2025-07-07 18:38:23] [Rank 0] Group 11 FTA: 0.9346 +[2025-07-07 18:38:23] [Rank 0] Group 11 FTA: 0.9346 +[2025-07-07 18:38:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 18:38:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_loss_curves.png +[2025-07-07 18:38:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 18:38:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/per_class_acc_curves.png +[2025-07-07 18:38:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 18:38:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_loss_curve.png +[2025-07-07 18:38:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 18:38:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.001_seed_48/total_acc_curve.png +[2025-07-07 18:38:24] [Rank 0] step:10001/10000 train_time:686771ms step_avg:68.67ms +[2025-07-07 18:38:24] [Rank 0] step:10001/10000 train_time:686771ms step_avg:68.67ms +[2025-07-07 18:38:24] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 18:38:24 2025 --- +[2025-07-07 18:38:24] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 18:38:24 2025 --- +[2025-07-07 18:38:24] [Rank 0] PRINT: Peak memory allocated: 9138 MiB reserved: 10596 MiB +[2025-07-07 18:38:24] [Rank 0] PRINT: Peak memory allocated: 9138 MiB reserved: 10596 MiB diff --git a/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/config.json b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..4540c071223aa1a437ea5f419cc3a4bfc9809261 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "9a35b1da-6cd4-44f2-bc1d-978dcf83a0cd", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..161683b9f40a9fc3739fd0f2926c921d12ea837d --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89d4717cf5874c648561e2787a9f0475e0dc3bdd87ebda167001dee923ec1dcc +size 431980 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..a7db5b3b1d9de3ce67d4fcb3895b856f969231ae --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b627fbe2f76a5ca9fd840c8e51dd09046d15d74c072d62fb6ea6383637eb048d +size 395167 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..4a584961591ed1b3b6cbb83e5f04f3166614918f --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8a9b02e62dd1ab20bfd98e6cf005d0ad446191533480509702d145af420257f +size 106687 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..1abf0b5aca9d5dcd0bbcd1c185f8dd985d9074cd --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c29987cb70e88731429b037ae56e1a935cad0157a42c7eba7f2c9d3f3428e0ca +size 119447 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/training_log_9a35b1da-6cd4-44f2-bc1d-978dcf83a0cd.txt b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/training_log_9a35b1da-6cd4-44f2-bc1d-978dcf83a0cd.txt new file mode 100644 index 0000000000000000000000000000000000000000..8e4564aaa89e5a281ea9dc85eab8a4734352009f --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/training_log_9a35b1da-6cd4-44f2-bc1d-978dcf83a0cd.txt @@ -0,0 +1,5132 @@ +[2025-07-07 06:09:03] [Rank 0] PRINT: --- Script Start: Mon Jul 7 06:09:03 2025 --- +[2025-07-07 06:09:03] [Rank 0] PRINT: --- Script Start: Mon Jul 7 06:09:03 2025 --- +[2025-07-07 06:09:03] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-07 06:09:03] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-07 06:09:03] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 06:09:03] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 06:09:03] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-07 06:09:03] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-07 06:09:03] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42 +[2025-07-07 06:09:03] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42 +[2025-07-07 06:09:03] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 06:09:03] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 06:09:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 06:09:04] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 06:09:04] [Rank 0] PRINT: Constructing model... +[2025-07-07 06:09:04] [Rank 0] PRINT: Constructing model... +[2025-07-07 06:09:06] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 06:09:06] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 06:09:06] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 06:09:06] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 06:09:06] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 06:09:06] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 06:09:07] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 06:09:07] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 06:09:07] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 06:09:07] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 06:09:07] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 06:09:07] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 06:09:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 06:09:07] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 06:09:07] [Rank 0] PRINT: Model returns: +[2025-07-07 06:09:07] [Rank 0] PRINT: Model returns: +[2025-07-07 06:09:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 06:09:07] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 06:09:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 06:09:07] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 06:09:07] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-07 06:09:07] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-07 06:09:07] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 06:09:07] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 06:09:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 06:09:07] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 06:09:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 06:09:07] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 06:09:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 06:09:07] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 06:09:07] [Rank 0] PRINT: Starting warmup... +[2025-07-07 06:09:07] [Rank 0] PRINT: Starting warmup... +[2025-07-07 06:10:41] [Rank 0] PRINT: Warmup complete. +[2025-07-07 06:10:41] [Rank 0] PRINT: Warmup complete. +[2025-07-07 06:10:42] [Rank 0] PRINT: Starting training... +[2025-07-07 06:10:42] [Rank 0] PRINT: Starting training... +[2025-07-07 06:10:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:10:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:10:49] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 06:10:49] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 06:10:50] [Rank 0] step:21/10000 train_time:810ms step_avg:38.59ms +[2025-07-07 06:10:50] [Rank 0] step:21/10000 train_time:810ms step_avg:38.59ms +[2025-07-07 06:10:52] [Rank 0] step:41/10000 train_time:2126ms step_avg:51.84ms +[2025-07-07 06:10:52] [Rank 0] step:41/10000 train_time:2126ms step_avg:51.84ms +[2025-07-07 06:10:53] [Rank 0] step:61/10000 train_time:3444ms step_avg:56.46ms +[2025-07-07 06:10:53] [Rank 0] step:61/10000 train_time:3444ms step_avg:56.46ms +[2025-07-07 06:10:54] [Rank 0] step:81/10000 train_time:4764ms step_avg:58.82ms +[2025-07-07 06:10:54] [Rank 0] step:81/10000 train_time:4764ms step_avg:58.82ms +[2025-07-07 06:10:56] [Rank 0] step:101/10000 train_time:6085ms step_avg:60.25ms +[2025-07-07 06:10:56] [Rank 0] step:101/10000 train_time:6085ms step_avg:60.25ms +[2025-07-07 06:10:57] [Rank 0] step:121/10000 train_time:7410ms step_avg:61.24ms +[2025-07-07 06:10:57] [Rank 0] step:121/10000 train_time:7410ms step_avg:61.24ms +[2025-07-07 06:10:58] [Rank 0] step:141/10000 train_time:8736ms step_avg:61.95ms +[2025-07-07 06:10:58] [Rank 0] step:141/10000 train_time:8736ms step_avg:61.95ms +[2025-07-07 06:11:00] [Rank 0] step:161/10000 train_time:10063ms step_avg:62.50ms +[2025-07-07 06:11:00] [Rank 0] step:161/10000 train_time:10063ms step_avg:62.50ms +[2025-07-07 06:11:01] [Rank 0] step:181/10000 train_time:12076ms step_avg:66.72ms +[2025-07-07 06:11:01] [Rank 0] step:181/10000 train_time:12076ms step_avg:66.72ms +[2025-07-07 06:11:02] [Rank 0] step:201/10000 train_time:12792ms step_avg:63.64ms +[2025-07-07 06:11:02] [Rank 0] step:201/10000 train_time:12792ms step_avg:63.64ms +[2025-07-07 06:11:04] [Rank 0] step:221/10000 train_time:14121ms step_avg:63.90ms +[2025-07-07 06:11:04] [Rank 0] step:221/10000 train_time:14121ms step_avg:63.90ms +[2025-07-07 06:11:05] [Rank 0] step:241/10000 train_time:15447ms step_avg:64.10ms +[2025-07-07 06:11:05] [Rank 0] step:241/10000 train_time:15447ms step_avg:64.10ms +[2025-07-07 06:11:06] [Rank 0] step:261/10000 train_time:16777ms step_avg:64.28ms +[2025-07-07 06:11:06] [Rank 0] step:261/10000 train_time:16777ms step_avg:64.28ms +[2025-07-07 06:11:08] [Rank 0] step:281/10000 train_time:18108ms step_avg:64.44ms +[2025-07-07 06:11:08] [Rank 0] step:281/10000 train_time:18108ms step_avg:64.44ms +[2025-07-07 06:11:09] [Rank 0] step:301/10000 train_time:19438ms step_avg:64.58ms +[2025-07-07 06:11:09] [Rank 0] step:301/10000 train_time:19438ms step_avg:64.58ms +[2025-07-07 06:11:10] [Rank 0] step:321/10000 train_time:20769ms step_avg:64.70ms +[2025-07-07 06:11:10] [Rank 0] step:321/10000 train_time:20769ms step_avg:64.70ms +[2025-07-07 06:11:12] [Rank 0] step:341/10000 train_time:22101ms step_avg:64.81ms +[2025-07-07 06:11:12] [Rank 0] step:341/10000 train_time:22101ms step_avg:64.81ms +[2025-07-07 06:11:13] [Rank 0] step:361/10000 train_time:23481ms step_avg:65.04ms +[2025-07-07 06:11:13] [Rank 0] step:361/10000 train_time:23481ms step_avg:65.04ms +[2025-07-07 06:11:14] [Rank 0] step:381/10000 train_time:24825ms step_avg:65.16ms +[2025-07-07 06:11:14] [Rank 0] step:381/10000 train_time:24825ms step_avg:65.16ms +[2025-07-07 06:11:16] [Rank 0] step:401/10000 train_time:26156ms step_avg:65.23ms +[2025-07-07 06:11:16] [Rank 0] step:401/10000 train_time:26156ms step_avg:65.23ms +[2025-07-07 06:11:17] [Rank 0] step:421/10000 train_time:27490ms step_avg:65.30ms +[2025-07-07 06:11:17] [Rank 0] step:421/10000 train_time:27490ms step_avg:65.30ms +[2025-07-07 06:11:18] [Rank 0] step:441/10000 train_time:28822ms step_avg:65.36ms +[2025-07-07 06:11:18] [Rank 0] step:441/10000 train_time:28822ms step_avg:65.36ms +[2025-07-07 06:11:20] [Rank 0] step:461/10000 train_time:30154ms step_avg:65.41ms +[2025-07-07 06:11:20] [Rank 0] step:461/10000 train_time:30154ms step_avg:65.41ms +[2025-07-07 06:11:21] [Rank 0] step:481/10000 train_time:31485ms step_avg:65.46ms +[2025-07-07 06:11:21] [Rank 0] step:481/10000 train_time:31485ms step_avg:65.46ms +[2025-07-07 06:11:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:11:22] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:11:23] [Rank 0] PRINT: step:500/10000 train_loss:4.0125 val_loss:1.9781 train_time:33423ms step_avg:66.85ms +[2025-07-07 06:11:23] [Rank 0] PRINT: step:500/10000 train_loss:4.0125 val_loss:1.9781 train_time:33423ms step_avg:66.85ms +[2025-07-07 06:11:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:11:23] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:11:24] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:11:24] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:11:24] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:11:24] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:16:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:16:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:16:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:16:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:16:40] [Rank 0] Total Loss: 4.7477 +[2025-07-07 06:16:40] [Rank 0] Total Loss: 4.7477 +[2025-07-07 06:16:40] [Rank 0] Total FTA: 0.0868 +[2025-07-07 06:16:40] [Rank 0] Total FTA: 0.0868 +[2025-07-07 06:16:40] [Rank 0] Group 0 Loss: 4.9155 +[2025-07-07 06:16:40] [Rank 0] Group 0 Loss: 4.9155 +[2025-07-07 06:16:40] [Rank 0] Group 1 Loss: 4.7679 +[2025-07-07 06:16:40] [Rank 0] Group 1 Loss: 4.7679 +[2025-07-07 06:16:40] [Rank 0] Group 2 Loss: 4.5705 +[2025-07-07 06:16:40] [Rank 0] Group 2 Loss: 4.5705 +[2025-07-07 06:16:40] [Rank 0] Group 3 Loss: 4.6590 +[2025-07-07 06:16:40] [Rank 0] Group 3 Loss: 4.6590 +[2025-07-07 06:16:40] [Rank 0] Group 4 Loss: 4.7141 +[2025-07-07 06:16:40] [Rank 0] Group 4 Loss: 4.7141 +[2025-07-07 06:16:40] [Rank 0] Group 5 Loss: 4.6928 +[2025-07-07 06:16:40] [Rank 0] Group 5 Loss: 4.6928 +[2025-07-07 06:16:40] [Rank 0] Group 6 Loss: 4.6703 +[2025-07-07 06:16:40] [Rank 0] Group 6 Loss: 4.6703 +[2025-07-07 06:16:40] [Rank 0] Group 7 Loss: 4.7812 +[2025-07-07 06:16:40] [Rank 0] Group 7 Loss: 4.7812 +[2025-07-07 06:16:40] [Rank 0] Group 8 Loss: 4.7276 +[2025-07-07 06:16:40] [Rank 0] Group 8 Loss: 4.7276 +[2025-07-07 06:16:40] [Rank 0] Group 9 Loss: 4.6764 +[2025-07-07 06:16:40] [Rank 0] Group 9 Loss: 4.6764 +[2025-07-07 06:16:40] [Rank 0] Group 10 Loss: 4.7244 +[2025-07-07 06:16:40] [Rank 0] Group 10 Loss: 4.7244 +[2025-07-07 06:16:40] [Rank 0] Group 11 Loss: 4.8007 +[2025-07-07 06:16:40] [Rank 0] Group 11 Loss: 4.8007 +[2025-07-07 06:16:40] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-07 06:16:40] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-07 06:16:40] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:16:40] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:16:40] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-07 06:16:40] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-07 06:16:40] [Rank 0] Group 3 FTA: 0.0547 +[2025-07-07 06:16:40] [Rank 0] Group 3 FTA: 0.0547 +[2025-07-07 06:16:40] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-07 06:16:40] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-07 06:16:40] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-07 06:16:40] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-07 06:16:40] [Rank 0] Group 6 FTA: 0.0859 +[2025-07-07 06:16:40] [Rank 0] Group 6 FTA: 0.0859 +[2025-07-07 06:16:40] [Rank 0] Group 7 FTA: 0.1224 +[2025-07-07 06:16:40] [Rank 0] Group 7 FTA: 0.1224 +[2025-07-07 06:16:40] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-07 06:16:40] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-07 06:16:40] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-07 06:16:40] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-07 06:16:40] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 06:16:40] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 06:16:40] [Rank 0] Group 11 FTA: 0.0811 +[2025-07-07 06:16:40] [Rank 0] Group 11 FTA: 0.0811 +[2025-07-07 06:16:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 06:16:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 06:16:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 06:16:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 06:16:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 06:16:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 06:16:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 06:16:42] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 06:16:42] [Rank 0] step:501/10000 train_time:33435ms step_avg:66.74ms +[2025-07-07 06:16:42] [Rank 0] step:501/10000 train_time:33435ms step_avg:66.74ms +[2025-07-07 06:16:43] [Rank 0] step:521/10000 train_time:34173ms step_avg:65.59ms +[2025-07-07 06:16:43] [Rank 0] step:521/10000 train_time:34173ms step_avg:65.59ms +[2025-07-07 06:16:44] [Rank 0] step:541/10000 train_time:35553ms step_avg:65.72ms +[2025-07-07 06:16:44] [Rank 0] step:541/10000 train_time:35553ms step_avg:65.72ms +[2025-07-07 06:16:46] [Rank 0] step:561/10000 train_time:36885ms step_avg:65.75ms +[2025-07-07 06:16:46] [Rank 0] step:561/10000 train_time:36885ms step_avg:65.75ms +[2025-07-07 06:16:47] [Rank 0] step:581/10000 train_time:38214ms step_avg:65.77ms +[2025-07-07 06:16:47] [Rank 0] step:581/10000 train_time:38214ms step_avg:65.77ms +[2025-07-07 06:16:48] [Rank 0] step:601/10000 train_time:39542ms step_avg:65.79ms +[2025-07-07 06:16:48] [Rank 0] step:601/10000 train_time:39542ms step_avg:65.79ms +[2025-07-07 06:16:50] [Rank 0] step:621/10000 train_time:40870ms step_avg:65.81ms +[2025-07-07 06:16:50] [Rank 0] step:621/10000 train_time:40870ms step_avg:65.81ms +[2025-07-07 06:16:51] [Rank 0] step:641/10000 train_time:42199ms step_avg:65.83ms +[2025-07-07 06:16:51] [Rank 0] step:641/10000 train_time:42199ms step_avg:65.83ms +[2025-07-07 06:16:52] [Rank 0] step:661/10000 train_time:43530ms step_avg:65.85ms +[2025-07-07 06:16:52] [Rank 0] step:661/10000 train_time:43530ms step_avg:65.85ms +[2025-07-07 06:16:54] [Rank 0] step:681/10000 train_time:44860ms step_avg:65.87ms +[2025-07-07 06:16:54] [Rank 0] step:681/10000 train_time:44860ms step_avg:65.87ms +[2025-07-07 06:16:55] [Rank 0] step:701/10000 train_time:46263ms step_avg:66.00ms +[2025-07-07 06:16:55] [Rank 0] step:701/10000 train_time:46263ms step_avg:66.00ms +[2025-07-07 06:16:57] [Rank 0] step:721/10000 train_time:48266ms step_avg:66.94ms +[2025-07-07 06:16:57] [Rank 0] step:721/10000 train_time:48266ms step_avg:66.94ms +[2025-07-07 06:16:58] [Rank 0] step:741/10000 train_time:48982ms step_avg:66.10ms +[2025-07-07 06:16:58] [Rank 0] step:741/10000 train_time:48982ms step_avg:66.10ms +[2025-07-07 06:16:59] [Rank 0] step:761/10000 train_time:50318ms step_avg:66.12ms +[2025-07-07 06:16:59] [Rank 0] step:761/10000 train_time:50318ms step_avg:66.12ms +[2025-07-07 06:17:01] [Rank 0] step:781/10000 train_time:51657ms step_avg:66.14ms +[2025-07-07 06:17:01] [Rank 0] step:781/10000 train_time:51657ms step_avg:66.14ms +[2025-07-07 06:17:02] [Rank 0] step:801/10000 train_time:52998ms step_avg:66.16ms +[2025-07-07 06:17:02] [Rank 0] step:801/10000 train_time:52998ms step_avg:66.16ms +[2025-07-07 06:17:03] [Rank 0] step:821/10000 train_time:54339ms step_avg:66.19ms +[2025-07-07 06:17:03] [Rank 0] step:821/10000 train_time:54339ms step_avg:66.19ms +[2025-07-07 06:17:05] [Rank 0] step:841/10000 train_time:55680ms step_avg:66.21ms +[2025-07-07 06:17:05] [Rank 0] step:841/10000 train_time:55680ms step_avg:66.21ms +[2025-07-07 06:17:06] [Rank 0] step:861/10000 train_time:57021ms step_avg:66.23ms +[2025-07-07 06:17:06] [Rank 0] step:861/10000 train_time:57021ms step_avg:66.23ms +[2025-07-07 06:17:07] [Rank 0] step:881/10000 train_time:58362ms step_avg:66.25ms +[2025-07-07 06:17:07] [Rank 0] step:881/10000 train_time:58362ms step_avg:66.25ms +[2025-07-07 06:17:09] [Rank 0] step:901/10000 train_time:59754ms step_avg:66.32ms +[2025-07-07 06:17:09] [Rank 0] step:901/10000 train_time:59754ms step_avg:66.32ms +[2025-07-07 06:17:10] [Rank 0] step:921/10000 train_time:61085ms step_avg:66.32ms +[2025-07-07 06:17:10] [Rank 0] step:921/10000 train_time:61085ms step_avg:66.32ms +[2025-07-07 06:17:11] [Rank 0] step:941/10000 train_time:62426ms step_avg:66.34ms +[2025-07-07 06:17:11] [Rank 0] step:941/10000 train_time:62426ms step_avg:66.34ms +[2025-07-07 06:17:13] [Rank 0] step:961/10000 train_time:63771ms step_avg:66.36ms +[2025-07-07 06:17:13] [Rank 0] step:961/10000 train_time:63771ms step_avg:66.36ms +[2025-07-07 06:17:14] [Rank 0] step:981/10000 train_time:65115ms step_avg:66.38ms +[2025-07-07 06:17:14] [Rank 0] step:981/10000 train_time:65115ms step_avg:66.38ms +[2025-07-07 06:17:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:17:15] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:17:16] [Rank 0] PRINT: step:1000/10000 train_loss:1.7843 val_loss:1.6682 train_time:67067ms step_avg:67.07ms +[2025-07-07 06:17:16] [Rank 0] PRINT: step:1000/10000 train_loss:1.7843 val_loss:1.6682 train_time:67067ms step_avg:67.07ms +[2025-07-07 06:17:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:17:16] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:17:16] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:17:16] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:17:16] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:17:16] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:22:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:22:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:22:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:22:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:22:33] [Rank 0] Total Loss: 5.3145 +[2025-07-07 06:22:33] [Rank 0] Total Loss: 5.3145 +[2025-07-07 06:22:33] [Rank 0] Total FTA: 0.0893 +[2025-07-07 06:22:33] [Rank 0] Total FTA: 0.0893 +[2025-07-07 06:22:33] [Rank 0] Group 0 Loss: 5.6618 +[2025-07-07 06:22:33] [Rank 0] Group 0 Loss: 5.6618 +[2025-07-07 06:22:33] [Rank 0] Group 1 Loss: 5.2346 +[2025-07-07 06:22:33] [Rank 0] Group 1 Loss: 5.2346 +[2025-07-07 06:22:33] [Rank 0] Group 2 Loss: 4.9439 +[2025-07-07 06:22:33] [Rank 0] Group 2 Loss: 4.9439 +[2025-07-07 06:22:33] [Rank 0] Group 3 Loss: 5.2389 +[2025-07-07 06:22:33] [Rank 0] Group 3 Loss: 5.2389 +[2025-07-07 06:22:33] [Rank 0] Group 4 Loss: 5.3339 +[2025-07-07 06:22:33] [Rank 0] Group 4 Loss: 5.3339 +[2025-07-07 06:22:33] [Rank 0] Group 5 Loss: 5.2544 +[2025-07-07 06:22:33] [Rank 0] Group 5 Loss: 5.2544 +[2025-07-07 06:22:33] [Rank 0] Group 6 Loss: 5.2094 +[2025-07-07 06:22:33] [Rank 0] Group 6 Loss: 5.2094 +[2025-07-07 06:22:33] [Rank 0] Group 7 Loss: 5.2896 +[2025-07-07 06:22:33] [Rank 0] Group 7 Loss: 5.2896 +[2025-07-07 06:22:33] [Rank 0] Group 8 Loss: 5.3356 +[2025-07-07 06:22:33] [Rank 0] Group 8 Loss: 5.3356 +[2025-07-07 06:22:33] [Rank 0] Group 9 Loss: 5.2059 +[2025-07-07 06:22:33] [Rank 0] Group 9 Loss: 5.2059 +[2025-07-07 06:22:33] [Rank 0] Group 10 Loss: 5.2913 +[2025-07-07 06:22:33] [Rank 0] Group 10 Loss: 5.2913 +[2025-07-07 06:22:33] [Rank 0] Group 11 Loss: 5.3456 +[2025-07-07 06:22:33] [Rank 0] Group 11 Loss: 5.3456 +[2025-07-07 06:22:33] [Rank 0] Group 0 FTA: 0.1547 +[2025-07-07 06:22:33] [Rank 0] Group 0 FTA: 0.1547 +[2025-07-07 06:22:33] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:22:33] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:22:33] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 06:22:33] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 06:22:33] [Rank 0] Group 3 FTA: 0.0729 +[2025-07-07 06:22:33] [Rank 0] Group 3 FTA: 0.0729 +[2025-07-07 06:22:33] [Rank 0] Group 4 FTA: 0.0495 +[2025-07-07 06:22:33] [Rank 0] Group 4 FTA: 0.0495 +[2025-07-07 06:22:33] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-07 06:22:33] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-07 06:22:33] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-07 06:22:33] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-07 06:22:33] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-07 06:22:33] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-07 06:22:33] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 06:22:33] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 06:22:33] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-07 06:22:33] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-07 06:22:33] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 06:22:33] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 06:22:33] [Rank 0] Group 11 FTA: 0.1045 +[2025-07-07 06:22:33] [Rank 0] Group 11 FTA: 0.1045 +[2025-07-07 06:22:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 06:22:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 06:22:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 06:22:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 06:22:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 06:22:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 06:22:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 06:22:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 06:22:34] [Rank 0] step:1001/10000 train_time:67082ms step_avg:67.02ms +[2025-07-07 06:22:34] [Rank 0] step:1001/10000 train_time:67082ms step_avg:67.02ms +[2025-07-07 06:22:36] [Rank 0] step:1021/10000 train_time:67823ms step_avg:66.43ms +[2025-07-07 06:22:36] [Rank 0] step:1021/10000 train_time:67823ms step_avg:66.43ms +[2025-07-07 06:22:37] [Rank 0] step:1041/10000 train_time:69158ms step_avg:66.43ms +[2025-07-07 06:22:37] [Rank 0] step:1041/10000 train_time:69158ms step_avg:66.43ms +[2025-07-07 06:22:38] [Rank 0] step:1061/10000 train_time:70496ms step_avg:66.44ms +[2025-07-07 06:22:38] [Rank 0] step:1061/10000 train_time:70496ms step_avg:66.44ms +[2025-07-07 06:22:40] [Rank 0] step:1081/10000 train_time:71835ms step_avg:66.45ms +[2025-07-07 06:22:40] [Rank 0] step:1081/10000 train_time:71835ms step_avg:66.45ms +[2025-07-07 06:22:41] [Rank 0] step:1101/10000 train_time:73224ms step_avg:66.51ms +[2025-07-07 06:22:41] [Rank 0] step:1101/10000 train_time:73224ms step_avg:66.51ms +[2025-07-07 06:22:42] [Rank 0] step:1121/10000 train_time:74565ms step_avg:66.52ms +[2025-07-07 06:22:42] [Rank 0] step:1121/10000 train_time:74565ms step_avg:66.52ms +[2025-07-07 06:22:44] [Rank 0] step:1141/10000 train_time:75904ms step_avg:66.52ms +[2025-07-07 06:22:44] [Rank 0] step:1141/10000 train_time:75904ms step_avg:66.52ms +[2025-07-07 06:22:45] [Rank 0] step:1161/10000 train_time:77245ms step_avg:66.53ms +[2025-07-07 06:22:45] [Rank 0] step:1161/10000 train_time:77245ms step_avg:66.53ms +[2025-07-07 06:22:46] [Rank 0] step:1181/10000 train_time:78585ms step_avg:66.54ms +[2025-07-07 06:22:46] [Rank 0] step:1181/10000 train_time:78585ms step_avg:66.54ms +[2025-07-07 06:22:48] [Rank 0] step:1201/10000 train_time:79927ms step_avg:66.55ms +[2025-07-07 06:22:48] [Rank 0] step:1201/10000 train_time:79927ms step_avg:66.55ms +[2025-07-07 06:22:49] [Rank 0] step:1221/10000 train_time:81269ms step_avg:66.56ms +[2025-07-07 06:22:49] [Rank 0] step:1221/10000 train_time:81269ms step_avg:66.56ms +[2025-07-07 06:22:50] [Rank 0] step:1241/10000 train_time:82610ms step_avg:66.57ms +[2025-07-07 06:22:50] [Rank 0] step:1241/10000 train_time:82610ms step_avg:66.57ms +[2025-07-07 06:22:52] [Rank 0] step:1261/10000 train_time:83954ms step_avg:66.58ms +[2025-07-07 06:22:52] [Rank 0] step:1261/10000 train_time:83954ms step_avg:66.58ms +[2025-07-07 06:22:53] [Rank 0] step:1281/10000 train_time:85356ms step_avg:66.63ms +[2025-07-07 06:22:53] [Rank 0] step:1281/10000 train_time:85356ms step_avg:66.63ms +[2025-07-07 06:22:55] [Rank 0] step:1301/10000 train_time:86700ms step_avg:66.64ms +[2025-07-07 06:22:55] [Rank 0] step:1301/10000 train_time:86700ms step_avg:66.64ms +[2025-07-07 06:22:56] [Rank 0] step:1321/10000 train_time:88045ms step_avg:66.65ms +[2025-07-07 06:22:56] [Rank 0] step:1321/10000 train_time:88045ms step_avg:66.65ms +[2025-07-07 06:22:57] [Rank 0] step:1341/10000 train_time:89389ms step_avg:66.66ms +[2025-07-07 06:22:57] [Rank 0] step:1341/10000 train_time:89389ms step_avg:66.66ms +[2025-07-07 06:22:59] [Rank 0] step:1361/10000 train_time:90736ms step_avg:66.67ms +[2025-07-07 06:22:59] [Rank 0] step:1361/10000 train_time:90736ms step_avg:66.67ms +[2025-07-07 06:23:00] [Rank 0] step:1381/10000 train_time:92080ms step_avg:66.68ms +[2025-07-07 06:23:00] [Rank 0] step:1381/10000 train_time:92080ms step_avg:66.68ms +[2025-07-07 06:23:01] [Rank 0] step:1401/10000 train_time:93426ms step_avg:66.68ms +[2025-07-07 06:23:01] [Rank 0] step:1401/10000 train_time:93426ms step_avg:66.68ms +[2025-07-07 06:23:03] [Rank 0] step:1421/10000 train_time:94771ms step_avg:66.69ms +[2025-07-07 06:23:03] [Rank 0] step:1421/10000 train_time:94771ms step_avg:66.69ms +[2025-07-07 06:23:04] [Rank 0] step:1441/10000 train_time:96790ms step_avg:67.17ms +[2025-07-07 06:23:04] [Rank 0] step:1441/10000 train_time:96790ms step_avg:67.17ms +[2025-07-07 06:23:05] [Rank 0] step:1461/10000 train_time:97517ms step_avg:66.75ms +[2025-07-07 06:23:05] [Rank 0] step:1461/10000 train_time:97517ms step_avg:66.75ms +[2025-07-07 06:23:07] [Rank 0] step:1481/10000 train_time:98864ms step_avg:66.75ms +[2025-07-07 06:23:07] [Rank 0] step:1481/10000 train_time:98864ms step_avg:66.75ms +[2025-07-07 06:23:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:23:08] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:23:09] [Rank 0] PRINT: step:1500/10000 train_loss:1.5536 val_loss:1.4098 train_time:100823ms step_avg:67.22ms +[2025-07-07 06:23:09] [Rank 0] PRINT: step:1500/10000 train_loss:1.5536 val_loss:1.4098 train_time:100823ms step_avg:67.22ms +[2025-07-07 06:23:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:23:09] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:23:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:23:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:23:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:23:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:28:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:28:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:28:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:28:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:28:25] [Rank 0] Total Loss: 5.2417 +[2025-07-07 06:28:25] [Rank 0] Total Loss: 5.2417 +[2025-07-07 06:28:25] [Rank 0] Total FTA: 0.0946 +[2025-07-07 06:28:25] [Rank 0] Total FTA: 0.0946 +[2025-07-07 06:28:25] [Rank 0] Group 0 Loss: 5.5985 +[2025-07-07 06:28:25] [Rank 0] Group 0 Loss: 5.5985 +[2025-07-07 06:28:25] [Rank 0] Group 1 Loss: 5.2372 +[2025-07-07 06:28:25] [Rank 0] Group 1 Loss: 5.2372 +[2025-07-07 06:28:25] [Rank 0] Group 2 Loss: 4.8726 +[2025-07-07 06:28:25] [Rank 0] Group 2 Loss: 4.8726 +[2025-07-07 06:28:25] [Rank 0] Group 3 Loss: 5.2837 +[2025-07-07 06:28:25] [Rank 0] Group 3 Loss: 5.2837 +[2025-07-07 06:28:25] [Rank 0] Group 4 Loss: 5.2495 +[2025-07-07 06:28:25] [Rank 0] Group 4 Loss: 5.2495 +[2025-07-07 06:28:25] [Rank 0] Group 5 Loss: 5.1786 +[2025-07-07 06:28:25] [Rank 0] Group 5 Loss: 5.1786 +[2025-07-07 06:28:25] [Rank 0] Group 6 Loss: 5.1236 +[2025-07-07 06:28:25] [Rank 0] Group 6 Loss: 5.1236 +[2025-07-07 06:28:25] [Rank 0] Group 7 Loss: 5.2054 +[2025-07-07 06:28:25] [Rank 0] Group 7 Loss: 5.2054 +[2025-07-07 06:28:25] [Rank 0] Group 8 Loss: 5.2132 +[2025-07-07 06:28:25] [Rank 0] Group 8 Loss: 5.2132 +[2025-07-07 06:28:25] [Rank 0] Group 9 Loss: 5.1453 +[2025-07-07 06:28:25] [Rank 0] Group 9 Loss: 5.1453 +[2025-07-07 06:28:25] [Rank 0] Group 10 Loss: 5.2162 +[2025-07-07 06:28:25] [Rank 0] Group 10 Loss: 5.2162 +[2025-07-07 06:28:25] [Rank 0] Group 11 Loss: 5.2243 +[2025-07-07 06:28:25] [Rank 0] Group 11 Loss: 5.2243 +[2025-07-07 06:28:25] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 06:28:25] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 06:28:25] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:28:25] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:28:25] [Rank 0] Group 2 FTA: 0.1797 +[2025-07-07 06:28:25] [Rank 0] Group 2 FTA: 0.1797 +[2025-07-07 06:28:25] [Rank 0] Group 3 FTA: 0.0521 +[2025-07-07 06:28:25] [Rank 0] Group 3 FTA: 0.0521 +[2025-07-07 06:28:25] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 06:28:25] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 06:28:25] [Rank 0] Group 5 FTA: 0.0573 +[2025-07-07 06:28:25] [Rank 0] Group 5 FTA: 0.0573 +[2025-07-07 06:28:25] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-07 06:28:25] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-07 06:28:25] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-07 06:28:25] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-07 06:28:25] [Rank 0] Group 8 FTA: 0.1068 +[2025-07-07 06:28:25] [Rank 0] Group 8 FTA: 0.1068 +[2025-07-07 06:28:25] [Rank 0] Group 9 FTA: 0.0625 +[2025-07-07 06:28:25] [Rank 0] Group 9 FTA: 0.0625 +[2025-07-07 06:28:25] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 06:28:25] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 06:28:26] [Rank 0] Group 11 FTA: 0.0977 +[2025-07-07 06:28:26] [Rank 0] Group 11 FTA: 0.0977 +[2025-07-07 06:28:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 06:28:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 06:28:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 06:28:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 06:28:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 06:28:27] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 06:28:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 06:28:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 06:28:27] [Rank 0] step:1501/10000 train_time:100833ms step_avg:67.18ms +[2025-07-07 06:28:27] [Rank 0] step:1501/10000 train_time:100833ms step_avg:67.18ms +[2025-07-07 06:28:28] [Rank 0] step:1521/10000 train_time:101583ms step_avg:66.79ms +[2025-07-07 06:28:28] [Rank 0] step:1521/10000 train_time:101583ms step_avg:66.79ms +[2025-07-07 06:28:30] [Rank 0] step:1541/10000 train_time:102924ms step_avg:66.79ms +[2025-07-07 06:28:30] [Rank 0] step:1541/10000 train_time:102924ms step_avg:66.79ms +[2025-07-07 06:28:31] [Rank 0] step:1561/10000 train_time:104276ms step_avg:66.80ms +[2025-07-07 06:28:31] [Rank 0] step:1561/10000 train_time:104276ms step_avg:66.80ms +[2025-07-07 06:28:32] [Rank 0] step:1581/10000 train_time:105618ms step_avg:66.80ms +[2025-07-07 06:28:32] [Rank 0] step:1581/10000 train_time:105618ms step_avg:66.80ms +[2025-07-07 06:28:34] [Rank 0] step:1601/10000 train_time:106960ms step_avg:66.81ms +[2025-07-07 06:28:34] [Rank 0] step:1601/10000 train_time:106960ms step_avg:66.81ms +[2025-07-07 06:28:35] [Rank 0] step:1621/10000 train_time:108307ms step_avg:66.81ms +[2025-07-07 06:28:35] [Rank 0] step:1621/10000 train_time:108307ms step_avg:66.81ms +[2025-07-07 06:28:36] [Rank 0] step:1641/10000 train_time:109696ms step_avg:66.85ms +[2025-07-07 06:28:36] [Rank 0] step:1641/10000 train_time:109696ms step_avg:66.85ms +[2025-07-07 06:28:38] [Rank 0] step:1661/10000 train_time:111040ms step_avg:66.85ms +[2025-07-07 06:28:38] [Rank 0] step:1661/10000 train_time:111040ms step_avg:66.85ms +[2025-07-07 06:28:39] [Rank 0] step:1681/10000 train_time:112385ms step_avg:66.86ms +[2025-07-07 06:28:39] [Rank 0] step:1681/10000 train_time:112385ms step_avg:66.86ms +[2025-07-07 06:28:40] [Rank 0] step:1701/10000 train_time:113730ms step_avg:66.86ms +[2025-07-07 06:28:40] [Rank 0] step:1701/10000 train_time:113730ms step_avg:66.86ms +[2025-07-07 06:28:42] [Rank 0] step:1721/10000 train_time:115076ms step_avg:66.87ms +[2025-07-07 06:28:42] [Rank 0] step:1721/10000 train_time:115076ms step_avg:66.87ms +[2025-07-07 06:28:43] [Rank 0] step:1741/10000 train_time:116421ms step_avg:66.87ms +[2025-07-07 06:28:43] [Rank 0] step:1741/10000 train_time:116421ms step_avg:66.87ms +[2025-07-07 06:28:44] [Rank 0] step:1761/10000 train_time:117768ms step_avg:66.88ms +[2025-07-07 06:28:44] [Rank 0] step:1761/10000 train_time:117768ms step_avg:66.88ms +[2025-07-07 06:28:46] [Rank 0] step:1781/10000 train_time:119114ms step_avg:66.88ms +[2025-07-07 06:28:46] [Rank 0] step:1781/10000 train_time:119114ms step_avg:66.88ms +[2025-07-07 06:28:47] [Rank 0] step:1801/10000 train_time:120461ms step_avg:66.89ms +[2025-07-07 06:28:47] [Rank 0] step:1801/10000 train_time:120461ms step_avg:66.89ms +[2025-07-07 06:28:48] [Rank 0] step:1821/10000 train_time:121856ms step_avg:66.92ms +[2025-07-07 06:28:48] [Rank 0] step:1821/10000 train_time:121856ms step_avg:66.92ms +[2025-07-07 06:28:50] [Rank 0] step:1841/10000 train_time:123202ms step_avg:66.92ms +[2025-07-07 06:28:50] [Rank 0] step:1841/10000 train_time:123202ms step_avg:66.92ms +[2025-07-07 06:28:51] [Rank 0] step:1861/10000 train_time:124548ms step_avg:66.93ms +[2025-07-07 06:28:51] [Rank 0] step:1861/10000 train_time:124548ms step_avg:66.93ms +[2025-07-07 06:28:53] [Rank 0] step:1881/10000 train_time:125896ms step_avg:66.93ms +[2025-07-07 06:28:53] [Rank 0] step:1881/10000 train_time:125896ms step_avg:66.93ms +[2025-07-07 06:28:54] [Rank 0] step:1901/10000 train_time:127242ms step_avg:66.93ms +[2025-07-07 06:28:54] [Rank 0] step:1901/10000 train_time:127242ms step_avg:66.93ms +[2025-07-07 06:28:55] [Rank 0] step:1921/10000 train_time:128590ms step_avg:66.94ms +[2025-07-07 06:28:55] [Rank 0] step:1921/10000 train_time:128590ms step_avg:66.94ms +[2025-07-07 06:28:57] [Rank 0] step:1941/10000 train_time:129936ms step_avg:66.94ms +[2025-07-07 06:28:57] [Rank 0] step:1941/10000 train_time:129936ms step_avg:66.94ms +[2025-07-07 06:28:58] [Rank 0] step:1961/10000 train_time:131282ms step_avg:66.95ms +[2025-07-07 06:28:58] [Rank 0] step:1961/10000 train_time:131282ms step_avg:66.95ms +[2025-07-07 06:28:59] [Rank 0] step:1981/10000 train_time:132628ms step_avg:66.95ms +[2025-07-07 06:28:59] [Rank 0] step:1981/10000 train_time:132628ms step_avg:66.95ms +[2025-07-07 06:29:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:29:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:29:02] [Rank 0] PRINT: step:2000/10000 train_loss:1.3426 val_loss:1.3090 train_time:134629ms step_avg:67.31ms +[2025-07-07 06:29:02] [Rank 0] PRINT: step:2000/10000 train_loss:1.3426 val_loss:1.3090 train_time:134629ms step_avg:67.31ms +[2025-07-07 06:29:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:29:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:29:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:29:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:29:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:29:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:34:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:34:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:34:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:34:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:34:19] [Rank 0] Total Loss: 5.3140 +[2025-07-07 06:34:19] [Rank 0] Total Loss: 5.3140 +[2025-07-07 06:34:19] [Rank 0] Total FTA: 0.1008 +[2025-07-07 06:34:19] [Rank 0] Total FTA: 0.1008 +[2025-07-07 06:34:19] [Rank 0] Group 0 Loss: 5.6189 +[2025-07-07 06:34:19] [Rank 0] Group 0 Loss: 5.6189 +[2025-07-07 06:34:19] [Rank 0] Group 1 Loss: 5.2115 +[2025-07-07 06:34:19] [Rank 0] Group 1 Loss: 5.2115 +[2025-07-07 06:34:19] [Rank 0] Group 2 Loss: 4.9222 +[2025-07-07 06:34:19] [Rank 0] Group 2 Loss: 4.9222 +[2025-07-07 06:34:19] [Rank 0] Group 3 Loss: 5.4911 +[2025-07-07 06:34:19] [Rank 0] Group 3 Loss: 5.4911 +[2025-07-07 06:34:19] [Rank 0] Group 4 Loss: 5.2926 +[2025-07-07 06:34:19] [Rank 0] Group 4 Loss: 5.2926 +[2025-07-07 06:34:19] [Rank 0] Group 5 Loss: 5.2311 +[2025-07-07 06:34:19] [Rank 0] Group 5 Loss: 5.2311 +[2025-07-07 06:34:19] [Rank 0] Group 6 Loss: 5.2126 +[2025-07-07 06:34:19] [Rank 0] Group 6 Loss: 5.2126 +[2025-07-07 06:34:19] [Rank 0] Group 7 Loss: 5.3183 +[2025-07-07 06:34:19] [Rank 0] Group 7 Loss: 5.3183 +[2025-07-07 06:34:19] [Rank 0] Group 8 Loss: 5.2891 +[2025-07-07 06:34:19] [Rank 0] Group 8 Loss: 5.2891 +[2025-07-07 06:34:19] [Rank 0] Group 9 Loss: 5.2736 +[2025-07-07 06:34:19] [Rank 0] Group 9 Loss: 5.2736 +[2025-07-07 06:34:19] [Rank 0] Group 10 Loss: 5.2922 +[2025-07-07 06:34:19] [Rank 0] Group 10 Loss: 5.2922 +[2025-07-07 06:34:19] [Rank 0] Group 11 Loss: 5.3097 +[2025-07-07 06:34:19] [Rank 0] Group 11 Loss: 5.3097 +[2025-07-07 06:34:19] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-07 06:34:19] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-07 06:34:19] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-07 06:34:19] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-07 06:34:19] [Rank 0] Group 2 FTA: 0.0964 +[2025-07-07 06:34:19] [Rank 0] Group 2 FTA: 0.0964 +[2025-07-07 06:34:19] [Rank 0] Group 3 FTA: 0.0833 +[2025-07-07 06:34:19] [Rank 0] Group 3 FTA: 0.0833 +[2025-07-07 06:34:19] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 06:34:19] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 06:34:19] [Rank 0] Group 5 FTA: 0.0807 +[2025-07-07 06:34:19] [Rank 0] Group 5 FTA: 0.0807 +[2025-07-07 06:34:19] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-07 06:34:19] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-07 06:34:19] [Rank 0] Group 7 FTA: 0.0781 +[2025-07-07 06:34:19] [Rank 0] Group 7 FTA: 0.0781 +[2025-07-07 06:34:19] [Rank 0] Group 8 FTA: 0.0807 +[2025-07-07 06:34:19] [Rank 0] Group 8 FTA: 0.0807 +[2025-07-07 06:34:19] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 06:34:19] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 06:34:19] [Rank 0] Group 10 FTA: 0.1035 +[2025-07-07 06:34:19] [Rank 0] Group 10 FTA: 0.1035 +[2025-07-07 06:34:20] [Rank 0] Group 11 FTA: 0.0967 +[2025-07-07 06:34:20] [Rank 0] Group 11 FTA: 0.0967 +[2025-07-07 06:34:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 06:34:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 06:34:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 06:34:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 06:34:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 06:34:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 06:34:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 06:34:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 06:34:21] [Rank 0] step:2001/10000 train_time:134639ms step_avg:67.29ms +[2025-07-07 06:34:21] [Rank 0] step:2001/10000 train_time:134639ms step_avg:67.29ms +[2025-07-07 06:34:22] [Rank 0] step:2021/10000 train_time:135392ms step_avg:66.99ms +[2025-07-07 06:34:22] [Rank 0] step:2021/10000 train_time:135392ms step_avg:66.99ms +[2025-07-07 06:34:24] [Rank 0] step:2041/10000 train_time:136732ms step_avg:66.99ms +[2025-07-07 06:34:24] [Rank 0] step:2041/10000 train_time:136732ms step_avg:66.99ms +[2025-07-07 06:34:25] [Rank 0] step:2061/10000 train_time:138073ms step_avg:66.99ms +[2025-07-07 06:34:25] [Rank 0] step:2061/10000 train_time:138073ms step_avg:66.99ms +[2025-07-07 06:34:26] [Rank 0] step:2081/10000 train_time:139415ms step_avg:66.99ms +[2025-07-07 06:34:26] [Rank 0] step:2081/10000 train_time:139415ms step_avg:66.99ms +[2025-07-07 06:34:28] [Rank 0] step:2101/10000 train_time:140757ms step_avg:67.00ms +[2025-07-07 06:34:28] [Rank 0] step:2101/10000 train_time:140757ms step_avg:67.00ms +[2025-07-07 06:34:29] [Rank 0] step:2121/10000 train_time:142104ms step_avg:67.00ms +[2025-07-07 06:34:29] [Rank 0] step:2121/10000 train_time:142104ms step_avg:67.00ms +[2025-07-07 06:34:30] [Rank 0] step:2141/10000 train_time:143448ms step_avg:67.00ms +[2025-07-07 06:34:30] [Rank 0] step:2141/10000 train_time:143448ms step_avg:67.00ms +[2025-07-07 06:34:32] [Rank 0] step:2161/10000 train_time:145474ms step_avg:67.32ms +[2025-07-07 06:34:32] [Rank 0] step:2161/10000 train_time:145474ms step_avg:67.32ms +[2025-07-07 06:34:33] [Rank 0] step:2181/10000 train_time:146197ms step_avg:67.03ms +[2025-07-07 06:34:33] [Rank 0] step:2181/10000 train_time:146197ms step_avg:67.03ms +[2025-07-07 06:34:34] [Rank 0] step:2201/10000 train_time:147541ms step_avg:67.03ms +[2025-07-07 06:34:34] [Rank 0] step:2201/10000 train_time:147541ms step_avg:67.03ms +[2025-07-07 06:34:36] [Rank 0] step:2221/10000 train_time:148987ms step_avg:67.08ms +[2025-07-07 06:34:36] [Rank 0] step:2221/10000 train_time:148987ms step_avg:67.08ms +[2025-07-07 06:34:37] [Rank 0] step:2241/10000 train_time:150342ms step_avg:67.09ms +[2025-07-07 06:34:37] [Rank 0] step:2241/10000 train_time:150342ms step_avg:67.09ms +[2025-07-07 06:34:39] [Rank 0] step:2261/10000 train_time:151811ms step_avg:67.14ms +[2025-07-07 06:34:39] [Rank 0] step:2261/10000 train_time:151811ms step_avg:67.14ms +[2025-07-07 06:34:40] [Rank 0] step:2281/10000 train_time:153182ms step_avg:67.16ms +[2025-07-07 06:34:40] [Rank 0] step:2281/10000 train_time:153182ms step_avg:67.16ms +[2025-07-07 06:34:41] [Rank 0] step:2301/10000 train_time:154552ms step_avg:67.17ms +[2025-07-07 06:34:41] [Rank 0] step:2301/10000 train_time:154552ms step_avg:67.17ms +[2025-07-07 06:34:43] [Rank 0] step:2321/10000 train_time:155923ms step_avg:67.18ms +[2025-07-07 06:34:43] [Rank 0] step:2321/10000 train_time:155923ms step_avg:67.18ms +[2025-07-07 06:34:44] [Rank 0] step:2341/10000 train_time:157294ms step_avg:67.19ms +[2025-07-07 06:34:44] [Rank 0] step:2341/10000 train_time:157294ms step_avg:67.19ms +[2025-07-07 06:34:46] [Rank 0] step:2361/10000 train_time:158694ms step_avg:67.21ms +[2025-07-07 06:34:46] [Rank 0] step:2361/10000 train_time:158694ms step_avg:67.21ms +[2025-07-07 06:34:47] [Rank 0] step:2381/10000 train_time:160066ms step_avg:67.23ms +[2025-07-07 06:34:47] [Rank 0] step:2381/10000 train_time:160066ms step_avg:67.23ms +[2025-07-07 06:34:48] [Rank 0] step:2401/10000 train_time:161436ms step_avg:67.24ms +[2025-07-07 06:34:48] [Rank 0] step:2401/10000 train_time:161436ms step_avg:67.24ms +[2025-07-07 06:34:50] [Rank 0] step:2421/10000 train_time:162808ms step_avg:67.25ms +[2025-07-07 06:34:50] [Rank 0] step:2421/10000 train_time:162808ms step_avg:67.25ms +[2025-07-07 06:34:51] [Rank 0] step:2441/10000 train_time:164181ms step_avg:67.26ms +[2025-07-07 06:34:51] [Rank 0] step:2441/10000 train_time:164181ms step_avg:67.26ms +[2025-07-07 06:34:52] [Rank 0] step:2461/10000 train_time:165554ms step_avg:67.27ms +[2025-07-07 06:34:52] [Rank 0] step:2461/10000 train_time:165554ms step_avg:67.27ms +[2025-07-07 06:34:54] [Rank 0] step:2481/10000 train_time:166926ms step_avg:67.28ms +[2025-07-07 06:34:54] [Rank 0] step:2481/10000 train_time:166926ms step_avg:67.28ms +[2025-07-07 06:34:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:34:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:34:56] [Rank 0] PRINT: step:2500/10000 train_loss:1.2958 val_loss:1.2830 train_time:168923ms step_avg:67.57ms +[2025-07-07 06:34:56] [Rank 0] PRINT: step:2500/10000 train_loss:1.2958 val_loss:1.2830 train_time:168923ms step_avg:67.57ms +[2025-07-07 06:34:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:34:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:34:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:34:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:34:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:34:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:40:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:40:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:40:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:40:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:40:13] [Rank 0] Total Loss: 5.4746 +[2025-07-07 06:40:13] [Rank 0] Total Loss: 5.4746 +[2025-07-07 06:40:13] [Rank 0] Total FTA: 0.1074 +[2025-07-07 06:40:13] [Rank 0] Total FTA: 0.1074 +[2025-07-07 06:40:13] [Rank 0] Group 0 Loss: 5.7897 +[2025-07-07 06:40:13] [Rank 0] Group 0 Loss: 5.7897 +[2025-07-07 06:40:13] [Rank 0] Group 1 Loss: 5.1800 +[2025-07-07 06:40:13] [Rank 0] Group 1 Loss: 5.1800 +[2025-07-07 06:40:13] [Rank 0] Group 2 Loss: 5.1146 +[2025-07-07 06:40:13] [Rank 0] Group 2 Loss: 5.1146 +[2025-07-07 06:40:13] [Rank 0] Group 3 Loss: 5.5755 +[2025-07-07 06:40:13] [Rank 0] Group 3 Loss: 5.5755 +[2025-07-07 06:40:13] [Rank 0] Group 4 Loss: 5.4646 +[2025-07-07 06:40:13] [Rank 0] Group 4 Loss: 5.4646 +[2025-07-07 06:40:13] [Rank 0] Group 5 Loss: 5.4089 +[2025-07-07 06:40:13] [Rank 0] Group 5 Loss: 5.4089 +[2025-07-07 06:40:13] [Rank 0] Group 6 Loss: 5.3778 +[2025-07-07 06:40:13] [Rank 0] Group 6 Loss: 5.3778 +[2025-07-07 06:40:14] [Rank 0] Group 7 Loss: 5.4536 +[2025-07-07 06:40:14] [Rank 0] Group 7 Loss: 5.4536 +[2025-07-07 06:40:14] [Rank 0] Group 8 Loss: 5.5354 +[2025-07-07 06:40:14] [Rank 0] Group 8 Loss: 5.5354 +[2025-07-07 06:40:14] [Rank 0] Group 9 Loss: 5.4716 +[2025-07-07 06:40:14] [Rank 0] Group 9 Loss: 5.4716 +[2025-07-07 06:40:14] [Rank 0] Group 10 Loss: 5.4680 +[2025-07-07 06:40:14] [Rank 0] Group 10 Loss: 5.4680 +[2025-07-07 06:40:14] [Rank 0] Group 11 Loss: 5.4997 +[2025-07-07 06:40:14] [Rank 0] Group 11 Loss: 5.4997 +[2025-07-07 06:40:14] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-07 06:40:14] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-07 06:40:14] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-07 06:40:14] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-07 06:40:14] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-07 06:40:14] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-07 06:40:14] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 06:40:14] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 06:40:14] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-07 06:40:14] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-07 06:40:14] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-07 06:40:14] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-07 06:40:14] [Rank 0] Group 6 FTA: 0.1068 +[2025-07-07 06:40:14] [Rank 0] Group 6 FTA: 0.1068 +[2025-07-07 06:40:14] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-07 06:40:14] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-07 06:40:14] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-07 06:40:14] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-07 06:40:14] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 06:40:14] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 06:40:14] [Rank 0] Group 10 FTA: 0.1406 +[2025-07-07 06:40:14] [Rank 0] Group 10 FTA: 0.1406 +[2025-07-07 06:40:14] [Rank 0] Group 11 FTA: 0.1094 +[2025-07-07 06:40:14] [Rank 0] Group 11 FTA: 0.1094 +[2025-07-07 06:40:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 06:40:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 06:40:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 06:40:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 06:40:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 06:40:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 06:40:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 06:40:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 06:40:15] [Rank 0] step:2501/10000 train_time:168936ms step_avg:67.55ms +[2025-07-07 06:40:15] [Rank 0] step:2501/10000 train_time:168936ms step_avg:67.55ms +[2025-07-07 06:40:17] [Rank 0] step:2521/10000 train_time:169697ms step_avg:67.31ms +[2025-07-07 06:40:17] [Rank 0] step:2521/10000 train_time:169697ms step_avg:67.31ms +[2025-07-07 06:40:18] [Rank 0] step:2541/10000 train_time:171060ms step_avg:67.32ms +[2025-07-07 06:40:18] [Rank 0] step:2541/10000 train_time:171060ms step_avg:67.32ms +[2025-07-07 06:40:19] [Rank 0] step:2561/10000 train_time:172426ms step_avg:67.33ms +[2025-07-07 06:40:19] [Rank 0] step:2561/10000 train_time:172426ms step_avg:67.33ms +[2025-07-07 06:40:21] [Rank 0] step:2581/10000 train_time:173793ms step_avg:67.34ms +[2025-07-07 06:40:21] [Rank 0] step:2581/10000 train_time:173793ms step_avg:67.34ms +[2025-07-07 06:40:22] [Rank 0] step:2601/10000 train_time:175159ms step_avg:67.34ms +[2025-07-07 06:40:22] [Rank 0] step:2601/10000 train_time:175159ms step_avg:67.34ms +[2025-07-07 06:40:23] [Rank 0] step:2621/10000 train_time:176527ms step_avg:67.35ms +[2025-07-07 06:40:23] [Rank 0] step:2621/10000 train_time:176527ms step_avg:67.35ms +[2025-07-07 06:40:25] [Rank 0] step:2641/10000 train_time:177895ms step_avg:67.36ms +[2025-07-07 06:40:25] [Rank 0] step:2641/10000 train_time:177895ms step_avg:67.36ms +[2025-07-07 06:40:26] [Rank 0] step:2661/10000 train_time:179264ms step_avg:67.37ms +[2025-07-07 06:40:26] [Rank 0] step:2661/10000 train_time:179264ms step_avg:67.37ms +[2025-07-07 06:40:27] [Rank 0] step:2681/10000 train_time:180634ms step_avg:67.38ms +[2025-07-07 06:40:27] [Rank 0] step:2681/10000 train_time:180634ms step_avg:67.38ms +[2025-07-07 06:40:29] [Rank 0] step:2701/10000 train_time:182669ms step_avg:67.63ms +[2025-07-07 06:40:29] [Rank 0] step:2701/10000 train_time:182669ms step_avg:67.63ms +[2025-07-07 06:40:30] [Rank 0] step:2721/10000 train_time:183406ms step_avg:67.40ms +[2025-07-07 06:40:30] [Rank 0] step:2721/10000 train_time:183406ms step_avg:67.40ms +[2025-07-07 06:40:32] [Rank 0] step:2741/10000 train_time:184777ms step_avg:67.41ms +[2025-07-07 06:40:32] [Rank 0] step:2741/10000 train_time:184777ms step_avg:67.41ms +[2025-07-07 06:40:33] [Rank 0] step:2761/10000 train_time:186149ms step_avg:67.42ms +[2025-07-07 06:40:33] [Rank 0] step:2761/10000 train_time:186149ms step_avg:67.42ms +[2025-07-07 06:40:34] [Rank 0] step:2781/10000 train_time:187520ms step_avg:67.43ms +[2025-07-07 06:40:34] [Rank 0] step:2781/10000 train_time:187520ms step_avg:67.43ms +[2025-07-07 06:40:36] [Rank 0] step:2801/10000 train_time:188891ms step_avg:67.44ms +[2025-07-07 06:40:36] [Rank 0] step:2801/10000 train_time:188891ms step_avg:67.44ms +[2025-07-07 06:40:37] [Rank 0] step:2821/10000 train_time:190261ms step_avg:67.44ms +[2025-07-07 06:40:37] [Rank 0] step:2821/10000 train_time:190261ms step_avg:67.44ms +[2025-07-07 06:40:38] [Rank 0] step:2841/10000 train_time:191633ms step_avg:67.45ms +[2025-07-07 06:40:38] [Rank 0] step:2841/10000 train_time:191633ms step_avg:67.45ms +[2025-07-07 06:40:40] [Rank 0] step:2861/10000 train_time:193005ms step_avg:67.46ms +[2025-07-07 06:40:40] [Rank 0] step:2861/10000 train_time:193005ms step_avg:67.46ms +[2025-07-07 06:40:41] [Rank 0] step:2881/10000 train_time:194378ms step_avg:67.47ms +[2025-07-07 06:40:41] [Rank 0] step:2881/10000 train_time:194378ms step_avg:67.47ms +[2025-07-07 06:40:43] [Rank 0] step:2901/10000 train_time:195793ms step_avg:67.49ms +[2025-07-07 06:40:43] [Rank 0] step:2901/10000 train_time:195793ms step_avg:67.49ms +[2025-07-07 06:40:44] [Rank 0] step:2921/10000 train_time:197165ms step_avg:67.50ms +[2025-07-07 06:40:44] [Rank 0] step:2921/10000 train_time:197165ms step_avg:67.50ms +[2025-07-07 06:40:45] [Rank 0] step:2941/10000 train_time:198537ms step_avg:67.51ms +[2025-07-07 06:40:45] [Rank 0] step:2941/10000 train_time:198537ms step_avg:67.51ms +[2025-07-07 06:40:47] [Rank 0] step:2961/10000 train_time:199910ms step_avg:67.51ms +[2025-07-07 06:40:47] [Rank 0] step:2961/10000 train_time:199910ms step_avg:67.51ms +[2025-07-07 06:40:48] [Rank 0] step:2981/10000 train_time:201283ms step_avg:67.52ms +[2025-07-07 06:40:48] [Rank 0] step:2981/10000 train_time:201283ms step_avg:67.52ms +[2025-07-07 06:40:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:40:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:40:50] [Rank 0] PRINT: step:3000/10000 train_loss:1.2756 val_loss:1.2697 train_time:203277ms step_avg:67.76ms +[2025-07-07 06:40:50] [Rank 0] PRINT: step:3000/10000 train_loss:1.2756 val_loss:1.2697 train_time:203277ms step_avg:67.76ms +[2025-07-07 06:40:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:40:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:40:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:40:51] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:40:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:40:51] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:46:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:46:09] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:46:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:46:09] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:46:09] [Rank 0] Total Loss: 5.6254 +[2025-07-07 06:46:09] [Rank 0] Total Loss: 5.6254 +[2025-07-07 06:46:09] [Rank 0] Total FTA: 0.1166 +[2025-07-07 06:46:09] [Rank 0] Total FTA: 0.1166 +[2025-07-07 06:46:09] [Rank 0] Group 0 Loss: 5.9734 +[2025-07-07 06:46:09] [Rank 0] Group 0 Loss: 5.9734 +[2025-07-07 06:46:09] [Rank 0] Group 1 Loss: 5.6140 +[2025-07-07 06:46:09] [Rank 0] Group 1 Loss: 5.6140 +[2025-07-07 06:46:09] [Rank 0] Group 2 Loss: 5.3778 +[2025-07-07 06:46:09] [Rank 0] Group 2 Loss: 5.3778 +[2025-07-07 06:46:09] [Rank 0] Group 3 Loss: 5.6303 +[2025-07-07 06:46:09] [Rank 0] Group 3 Loss: 5.6303 +[2025-07-07 06:46:09] [Rank 0] Group 4 Loss: 5.5973 +[2025-07-07 06:46:09] [Rank 0] Group 4 Loss: 5.5973 +[2025-07-07 06:46:09] [Rank 0] Group 5 Loss: 5.5408 +[2025-07-07 06:46:09] [Rank 0] Group 5 Loss: 5.5408 +[2025-07-07 06:46:09] [Rank 0] Group 6 Loss: 5.5151 +[2025-07-07 06:46:09] [Rank 0] Group 6 Loss: 5.5151 +[2025-07-07 06:46:09] [Rank 0] Group 7 Loss: 5.6050 +[2025-07-07 06:46:09] [Rank 0] Group 7 Loss: 5.6050 +[2025-07-07 06:46:09] [Rank 0] Group 8 Loss: 5.5820 +[2025-07-07 06:46:09] [Rank 0] Group 8 Loss: 5.5820 +[2025-07-07 06:46:09] [Rank 0] Group 9 Loss: 5.5982 +[2025-07-07 06:46:09] [Rank 0] Group 9 Loss: 5.5982 +[2025-07-07 06:46:09] [Rank 0] Group 10 Loss: 5.5629 +[2025-07-07 06:46:09] [Rank 0] Group 10 Loss: 5.5629 +[2025-07-07 06:46:09] [Rank 0] Group 11 Loss: 5.6048 +[2025-07-07 06:46:09] [Rank 0] Group 11 Loss: 5.6048 +[2025-07-07 06:46:09] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 06:46:09] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 06:46:09] [Rank 0] Group 1 FTA: 0.1797 +[2025-07-07 06:46:09] [Rank 0] Group 1 FTA: 0.1797 +[2025-07-07 06:46:09] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 06:46:09] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 06:46:09] [Rank 0] Group 3 FTA: 0.0703 +[2025-07-07 06:46:09] [Rank 0] Group 3 FTA: 0.0703 +[2025-07-07 06:46:09] [Rank 0] Group 4 FTA: 0.0547 +[2025-07-07 06:46:09] [Rank 0] Group 4 FTA: 0.0547 +[2025-07-07 06:46:09] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-07 06:46:09] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-07 06:46:09] [Rank 0] Group 6 FTA: 0.1198 +[2025-07-07 06:46:09] [Rank 0] Group 6 FTA: 0.1198 +[2025-07-07 06:46:09] [Rank 0] Group 7 FTA: 0.1094 +[2025-07-07 06:46:09] [Rank 0] Group 7 FTA: 0.1094 +[2025-07-07 06:46:09] [Rank 0] Group 8 FTA: 0.1068 +[2025-07-07 06:46:09] [Rank 0] Group 8 FTA: 0.1068 +[2025-07-07 06:46:09] [Rank 0] Group 9 FTA: 0.1250 +[2025-07-07 06:46:09] [Rank 0] Group 9 FTA: 0.1250 +[2025-07-07 06:46:09] [Rank 0] Group 10 FTA: 0.1191 +[2025-07-07 06:46:09] [Rank 0] Group 10 FTA: 0.1191 +[2025-07-07 06:46:09] [Rank 0] Group 11 FTA: 0.1113 +[2025-07-07 06:46:09] [Rank 0] Group 11 FTA: 0.1113 +[2025-07-07 06:46:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 06:46:09] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 06:46:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 06:46:10] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 06:46:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 06:46:10] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 06:46:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 06:46:10] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 06:46:10] [Rank 0] step:3001/10000 train_time:203289ms step_avg:67.74ms +[2025-07-07 06:46:10] [Rank 0] step:3001/10000 train_time:203289ms step_avg:67.74ms +[2025-07-07 06:46:12] [Rank 0] step:3021/10000 train_time:204066ms step_avg:67.55ms +[2025-07-07 06:46:12] [Rank 0] step:3021/10000 train_time:204066ms step_avg:67.55ms +[2025-07-07 06:46:13] [Rank 0] step:3041/10000 train_time:205429ms step_avg:67.55ms +[2025-07-07 06:46:13] [Rank 0] step:3041/10000 train_time:205429ms step_avg:67.55ms +[2025-07-07 06:46:15] [Rank 0] step:3061/10000 train_time:207485ms step_avg:67.78ms +[2025-07-07 06:46:15] [Rank 0] step:3061/10000 train_time:207485ms step_avg:67.78ms +[2025-07-07 06:46:16] [Rank 0] step:3081/10000 train_time:208222ms step_avg:67.58ms +[2025-07-07 06:46:16] [Rank 0] step:3081/10000 train_time:208222ms step_avg:67.58ms +[2025-07-07 06:46:17] [Rank 0] step:3101/10000 train_time:209589ms step_avg:67.59ms +[2025-07-07 06:46:17] [Rank 0] step:3101/10000 train_time:209589ms step_avg:67.59ms +[2025-07-07 06:46:19] [Rank 0] step:3121/10000 train_time:210958ms step_avg:67.59ms +[2025-07-07 06:46:19] [Rank 0] step:3121/10000 train_time:210958ms step_avg:67.59ms +[2025-07-07 06:46:20] [Rank 0] step:3141/10000 train_time:212325ms step_avg:67.60ms +[2025-07-07 06:46:20] [Rank 0] step:3141/10000 train_time:212325ms step_avg:67.60ms +[2025-07-07 06:46:21] [Rank 0] step:3161/10000 train_time:213694ms step_avg:67.60ms +[2025-07-07 06:46:21] [Rank 0] step:3161/10000 train_time:213694ms step_avg:67.60ms +[2025-07-07 06:46:23] [Rank 0] step:3181/10000 train_time:215064ms step_avg:67.61ms +[2025-07-07 06:46:23] [Rank 0] step:3181/10000 train_time:215064ms step_avg:67.61ms +[2025-07-07 06:46:24] [Rank 0] step:3201/10000 train_time:216433ms step_avg:67.61ms +[2025-07-07 06:46:24] [Rank 0] step:3201/10000 train_time:216433ms step_avg:67.61ms +[2025-07-07 06:46:26] [Rank 0] step:3221/10000 train_time:217855ms step_avg:67.64ms +[2025-07-07 06:46:26] [Rank 0] step:3221/10000 train_time:217855ms step_avg:67.64ms +[2025-07-07 06:46:27] [Rank 0] step:3241/10000 train_time:219276ms step_avg:67.66ms +[2025-07-07 06:46:27] [Rank 0] step:3241/10000 train_time:219276ms step_avg:67.66ms +[2025-07-07 06:46:28] [Rank 0] step:3261/10000 train_time:220596ms step_avg:67.65ms +[2025-07-07 06:46:28] [Rank 0] step:3261/10000 train_time:220596ms step_avg:67.65ms +[2025-07-07 06:46:30] [Rank 0] step:3281/10000 train_time:221966ms step_avg:67.65ms +[2025-07-07 06:46:30] [Rank 0] step:3281/10000 train_time:221966ms step_avg:67.65ms +[2025-07-07 06:46:31] [Rank 0] step:3301/10000 train_time:223337ms step_avg:67.66ms +[2025-07-07 06:46:31] [Rank 0] step:3301/10000 train_time:223337ms step_avg:67.66ms +[2025-07-07 06:46:32] [Rank 0] step:3321/10000 train_time:224707ms step_avg:67.66ms +[2025-07-07 06:46:32] [Rank 0] step:3321/10000 train_time:224707ms step_avg:67.66ms +[2025-07-07 06:46:34] [Rank 0] step:3341/10000 train_time:226077ms step_avg:67.67ms +[2025-07-07 06:46:34] [Rank 0] step:3341/10000 train_time:226077ms step_avg:67.67ms +[2025-07-07 06:46:35] [Rank 0] step:3361/10000 train_time:227449ms step_avg:67.67ms +[2025-07-07 06:46:35] [Rank 0] step:3361/10000 train_time:227449ms step_avg:67.67ms +[2025-07-07 06:46:37] [Rank 0] step:3381/10000 train_time:228820ms step_avg:67.68ms +[2025-07-07 06:46:37] [Rank 0] step:3381/10000 train_time:228820ms step_avg:67.68ms +[2025-07-07 06:46:38] [Rank 0] step:3401/10000 train_time:230192ms step_avg:67.68ms +[2025-07-07 06:46:38] [Rank 0] step:3401/10000 train_time:230192ms step_avg:67.68ms +[2025-07-07 06:46:39] [Rank 0] step:3421/10000 train_time:231817ms step_avg:67.76ms +[2025-07-07 06:46:39] [Rank 0] step:3421/10000 train_time:231817ms step_avg:67.76ms +[2025-07-07 06:46:41] [Rank 0] step:3441/10000 train_time:232959ms step_avg:67.70ms +[2025-07-07 06:46:41] [Rank 0] step:3441/10000 train_time:232959ms step_avg:67.70ms +[2025-07-07 06:46:42] [Rank 0] step:3461/10000 train_time:234331ms step_avg:67.71ms +[2025-07-07 06:46:42] [Rank 0] step:3461/10000 train_time:234331ms step_avg:67.71ms +[2025-07-07 06:46:43] [Rank 0] step:3481/10000 train_time:235703ms step_avg:67.71ms +[2025-07-07 06:46:43] [Rank 0] step:3481/10000 train_time:235703ms step_avg:67.71ms +[2025-07-07 06:46:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:46:45] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:46:46] [Rank 0] PRINT: step:3500/10000 train_loss:1.2541 val_loss:1.2552 train_time:237697ms step_avg:67.91ms +[2025-07-07 06:46:46] [Rank 0] PRINT: step:3500/10000 train_loss:1.2541 val_loss:1.2552 train_time:237697ms step_avg:67.91ms +[2025-07-07 06:46:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:46:46] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:46:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:46:46] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:46:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:46:46] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:52:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:52:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:52:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:52:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:52:06] [Rank 0] Total Loss: 5.3969 +[2025-07-07 06:52:06] [Rank 0] Total Loss: 5.3969 +[2025-07-07 06:52:06] [Rank 0] Total FTA: 0.1470 +[2025-07-07 06:52:06] [Rank 0] Total FTA: 0.1470 +[2025-07-07 06:52:06] [Rank 0] Group 0 Loss: 5.6902 +[2025-07-07 06:52:06] [Rank 0] Group 0 Loss: 5.6902 +[2025-07-07 06:52:06] [Rank 0] Group 1 Loss: 5.3911 +[2025-07-07 06:52:06] [Rank 0] Group 1 Loss: 5.3911 +[2025-07-07 06:52:06] [Rank 0] Group 2 Loss: 4.9639 +[2025-07-07 06:52:06] [Rank 0] Group 2 Loss: 4.9639 +[2025-07-07 06:52:06] [Rank 0] Group 3 Loss: 5.4710 +[2025-07-07 06:52:06] [Rank 0] Group 3 Loss: 5.4710 +[2025-07-07 06:52:06] [Rank 0] Group 4 Loss: 5.4198 +[2025-07-07 06:52:06] [Rank 0] Group 4 Loss: 5.4198 +[2025-07-07 06:52:06] [Rank 0] Group 5 Loss: 5.3394 +[2025-07-07 06:52:06] [Rank 0] Group 5 Loss: 5.3394 +[2025-07-07 06:52:06] [Rank 0] Group 6 Loss: 5.3208 +[2025-07-07 06:52:06] [Rank 0] Group 6 Loss: 5.3208 +[2025-07-07 06:52:06] [Rank 0] Group 7 Loss: 5.3888 +[2025-07-07 06:52:06] [Rank 0] Group 7 Loss: 5.3888 +[2025-07-07 06:52:06] [Rank 0] Group 8 Loss: 5.3950 +[2025-07-07 06:52:06] [Rank 0] Group 8 Loss: 5.3950 +[2025-07-07 06:52:06] [Rank 0] Group 9 Loss: 5.3186 +[2025-07-07 06:52:06] [Rank 0] Group 9 Loss: 5.3186 +[2025-07-07 06:52:06] [Rank 0] Group 10 Loss: 5.3706 +[2025-07-07 06:52:06] [Rank 0] Group 10 Loss: 5.3706 +[2025-07-07 06:52:06] [Rank 0] Group 11 Loss: 5.3912 +[2025-07-07 06:52:06] [Rank 0] Group 11 Loss: 5.3912 +[2025-07-07 06:52:06] [Rank 0] Group 0 FTA: 0.1821 +[2025-07-07 06:52:06] [Rank 0] Group 0 FTA: 0.1821 +[2025-07-07 06:52:06] [Rank 0] Group 1 FTA: 0.2031 +[2025-07-07 06:52:06] [Rank 0] Group 1 FTA: 0.2031 +[2025-07-07 06:52:06] [Rank 0] Group 2 FTA: 0.1745 +[2025-07-07 06:52:06] [Rank 0] Group 2 FTA: 0.1745 +[2025-07-07 06:52:06] [Rank 0] Group 3 FTA: 0.1042 +[2025-07-07 06:52:06] [Rank 0] Group 3 FTA: 0.1042 +[2025-07-07 06:52:06] [Rank 0] Group 4 FTA: 0.0911 +[2025-07-07 06:52:06] [Rank 0] Group 4 FTA: 0.0911 +[2025-07-07 06:52:06] [Rank 0] Group 5 FTA: 0.0885 +[2025-07-07 06:52:06] [Rank 0] Group 5 FTA: 0.0885 +[2025-07-07 06:52:06] [Rank 0] Group 6 FTA: 0.1406 +[2025-07-07 06:52:06] [Rank 0] Group 6 FTA: 0.1406 +[2025-07-07 06:52:06] [Rank 0] Group 7 FTA: 0.1380 +[2025-07-07 06:52:06] [Rank 0] Group 7 FTA: 0.1380 +[2025-07-07 06:52:06] [Rank 0] Group 8 FTA: 0.1536 +[2025-07-07 06:52:06] [Rank 0] Group 8 FTA: 0.1536 +[2025-07-07 06:52:06] [Rank 0] Group 9 FTA: 0.1719 +[2025-07-07 06:52:06] [Rank 0] Group 9 FTA: 0.1719 +[2025-07-07 06:52:06] [Rank 0] Group 10 FTA: 0.1289 +[2025-07-07 06:52:06] [Rank 0] Group 10 FTA: 0.1289 +[2025-07-07 06:52:06] [Rank 0] Group 11 FTA: 0.1543 +[2025-07-07 06:52:06] [Rank 0] Group 11 FTA: 0.1543 +[2025-07-07 06:52:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 06:52:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 06:52:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 06:52:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 06:52:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 06:52:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 06:52:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 06:52:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 06:52:08] [Rank 0] step:3501/10000 train_time:237708ms step_avg:67.90ms +[2025-07-07 06:52:08] [Rank 0] step:3501/10000 train_time:237708ms step_avg:67.90ms +[2025-07-07 06:52:09] [Rank 0] step:3521/10000 train_time:238483ms step_avg:67.73ms +[2025-07-07 06:52:09] [Rank 0] step:3521/10000 train_time:238483ms step_avg:67.73ms +[2025-07-07 06:52:11] [Rank 0] step:3541/10000 train_time:239849ms step_avg:67.73ms +[2025-07-07 06:52:11] [Rank 0] step:3541/10000 train_time:239849ms step_avg:67.73ms +[2025-07-07 06:52:12] [Rank 0] step:3561/10000 train_time:241215ms step_avg:67.74ms +[2025-07-07 06:52:12] [Rank 0] step:3561/10000 train_time:241215ms step_avg:67.74ms +[2025-07-07 06:52:13] [Rank 0] step:3581/10000 train_time:242582ms step_avg:67.74ms +[2025-07-07 06:52:13] [Rank 0] step:3581/10000 train_time:242582ms step_avg:67.74ms +[2025-07-07 06:52:15] [Rank 0] step:3601/10000 train_time:244206ms step_avg:67.82ms +[2025-07-07 06:52:15] [Rank 0] step:3601/10000 train_time:244206ms step_avg:67.82ms +[2025-07-07 06:52:16] [Rank 0] step:3621/10000 train_time:245375ms step_avg:67.76ms +[2025-07-07 06:52:16] [Rank 0] step:3621/10000 train_time:245375ms step_avg:67.76ms +[2025-07-07 06:52:18] [Rank 0] step:3641/10000 train_time:246845ms step_avg:67.80ms +[2025-07-07 06:52:18] [Rank 0] step:3641/10000 train_time:246845ms step_avg:67.80ms +[2025-07-07 06:52:19] [Rank 0] step:3661/10000 train_time:248215ms step_avg:67.80ms +[2025-07-07 06:52:19] [Rank 0] step:3661/10000 train_time:248215ms step_avg:67.80ms +[2025-07-07 06:52:20] [Rank 0] step:3681/10000 train_time:249583ms step_avg:67.80ms +[2025-07-07 06:52:20] [Rank 0] step:3681/10000 train_time:249583ms step_avg:67.80ms +[2025-07-07 06:52:22] [Rank 0] step:3701/10000 train_time:250952ms step_avg:67.81ms +[2025-07-07 06:52:22] [Rank 0] step:3701/10000 train_time:250952ms step_avg:67.81ms +[2025-07-07 06:52:23] [Rank 0] step:3721/10000 train_time:252322ms step_avg:67.81ms +[2025-07-07 06:52:23] [Rank 0] step:3721/10000 train_time:252322ms step_avg:67.81ms +[2025-07-07 06:52:24] [Rank 0] step:3741/10000 train_time:253692ms step_avg:67.81ms +[2025-07-07 06:52:24] [Rank 0] step:3741/10000 train_time:253692ms step_avg:67.81ms +[2025-07-07 06:52:26] [Rank 0] step:3761/10000 train_time:255062ms step_avg:67.82ms +[2025-07-07 06:52:26] [Rank 0] step:3761/10000 train_time:255062ms step_avg:67.82ms +[2025-07-07 06:52:27] [Rank 0] step:3781/10000 train_time:256433ms step_avg:67.82ms +[2025-07-07 06:52:27] [Rank 0] step:3781/10000 train_time:256433ms step_avg:67.82ms +[2025-07-07 06:52:29] [Rank 0] step:3801/10000 train_time:257857ms step_avg:67.84ms +[2025-07-07 06:52:29] [Rank 0] step:3801/10000 train_time:257857ms step_avg:67.84ms +[2025-07-07 06:52:30] [Rank 0] step:3821/10000 train_time:259228ms step_avg:67.84ms +[2025-07-07 06:52:30] [Rank 0] step:3821/10000 train_time:259228ms step_avg:67.84ms +[2025-07-07 06:52:31] [Rank 0] step:3841/10000 train_time:260600ms step_avg:67.85ms +[2025-07-07 06:52:31] [Rank 0] step:3841/10000 train_time:260600ms step_avg:67.85ms +[2025-07-07 06:52:33] [Rank 0] step:3861/10000 train_time:261973ms step_avg:67.85ms +[2025-07-07 06:52:33] [Rank 0] step:3861/10000 train_time:261973ms step_avg:67.85ms +[2025-07-07 06:52:34] [Rank 0] step:3881/10000 train_time:263345ms step_avg:67.85ms +[2025-07-07 06:52:34] [Rank 0] step:3881/10000 train_time:263345ms step_avg:67.85ms +[2025-07-07 06:52:35] [Rank 0] step:3901/10000 train_time:264717ms step_avg:67.86ms +[2025-07-07 06:52:35] [Rank 0] step:3901/10000 train_time:264717ms step_avg:67.86ms +[2025-07-07 06:52:37] [Rank 0] step:3921/10000 train_time:266091ms step_avg:67.86ms +[2025-07-07 06:52:37] [Rank 0] step:3921/10000 train_time:266091ms step_avg:67.86ms +[2025-07-07 06:52:38] [Rank 0] step:3941/10000 train_time:267564ms step_avg:67.89ms +[2025-07-07 06:52:38] [Rank 0] step:3941/10000 train_time:267564ms step_avg:67.89ms +[2025-07-07 06:52:40] [Rank 0] step:3961/10000 train_time:269189ms step_avg:67.96ms +[2025-07-07 06:52:40] [Rank 0] step:3961/10000 train_time:269189ms step_avg:67.96ms +[2025-07-07 06:52:41] [Rank 0] step:3981/10000 train_time:270335ms step_avg:67.91ms +[2025-07-07 06:52:41] [Rank 0] step:3981/10000 train_time:270335ms step_avg:67.91ms +[2025-07-07 06:52:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:52:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:52:43] [Rank 0] PRINT: step:4000/10000 train_loss:1.2273 val_loss:1.2317 train_time:272333ms step_avg:68.08ms +[2025-07-07 06:52:43] [Rank 0] PRINT: step:4000/10000 train_loss:1.2273 val_loss:1.2317 train_time:272333ms step_avg:68.08ms +[2025-07-07 06:52:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:52:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:52:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:52:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:52:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:52:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:58:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:58:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:58:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:58:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:58:02] [Rank 0] Total Loss: 5.3716 +[2025-07-07 06:58:02] [Rank 0] Total Loss: 5.3716 +[2025-07-07 06:58:02] [Rank 0] Total FTA: 0.1552 +[2025-07-07 06:58:02] [Rank 0] Total FTA: 0.1552 +[2025-07-07 06:58:02] [Rank 0] Group 0 Loss: 5.6912 +[2025-07-07 06:58:02] [Rank 0] Group 0 Loss: 5.6912 +[2025-07-07 06:58:02] [Rank 0] Group 1 Loss: 5.2617 +[2025-07-07 06:58:02] [Rank 0] Group 1 Loss: 5.2617 +[2025-07-07 06:58:02] [Rank 0] Group 2 Loss: 5.0126 +[2025-07-07 06:58:02] [Rank 0] Group 2 Loss: 5.0126 +[2025-07-07 06:58:02] [Rank 0] Group 3 Loss: 5.3815 +[2025-07-07 06:58:02] [Rank 0] Group 3 Loss: 5.3815 +[2025-07-07 06:58:02] [Rank 0] Group 4 Loss: 5.4304 +[2025-07-07 06:58:02] [Rank 0] Group 4 Loss: 5.4304 +[2025-07-07 06:58:02] [Rank 0] Group 5 Loss: 5.3005 +[2025-07-07 06:58:02] [Rank 0] Group 5 Loss: 5.3005 +[2025-07-07 06:58:02] [Rank 0] Group 6 Loss: 5.2721 +[2025-07-07 06:58:02] [Rank 0] Group 6 Loss: 5.2721 +[2025-07-07 06:58:02] [Rank 0] Group 7 Loss: 5.3586 +[2025-07-07 06:58:02] [Rank 0] Group 7 Loss: 5.3586 +[2025-07-07 06:58:02] [Rank 0] Group 8 Loss: 5.3585 +[2025-07-07 06:58:02] [Rank 0] Group 8 Loss: 5.3585 +[2025-07-07 06:58:02] [Rank 0] Group 9 Loss: 5.3588 +[2025-07-07 06:58:02] [Rank 0] Group 9 Loss: 5.3588 +[2025-07-07 06:58:02] [Rank 0] Group 10 Loss: 5.3644 +[2025-07-07 06:58:02] [Rank 0] Group 10 Loss: 5.3644 +[2025-07-07 06:58:02] [Rank 0] Group 11 Loss: 5.3622 +[2025-07-07 06:58:02] [Rank 0] Group 11 Loss: 5.3622 +[2025-07-07 06:58:02] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 06:58:02] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 06:58:02] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:58:02] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:58:02] [Rank 0] Group 2 FTA: 0.1719 +[2025-07-07 06:58:02] [Rank 0] Group 2 FTA: 0.1719 +[2025-07-07 06:58:02] [Rank 0] Group 3 FTA: 0.1979 +[2025-07-07 06:58:02] [Rank 0] Group 3 FTA: 0.1979 +[2025-07-07 06:58:02] [Rank 0] Group 4 FTA: 0.0990 +[2025-07-07 06:58:02] [Rank 0] Group 4 FTA: 0.0990 +[2025-07-07 06:58:02] [Rank 0] Group 5 FTA: 0.1328 +[2025-07-07 06:58:02] [Rank 0] Group 5 FTA: 0.1328 +[2025-07-07 06:58:02] [Rank 0] Group 6 FTA: 0.1745 +[2025-07-07 06:58:02] [Rank 0] Group 6 FTA: 0.1745 +[2025-07-07 06:58:02] [Rank 0] Group 7 FTA: 0.1771 +[2025-07-07 06:58:02] [Rank 0] Group 7 FTA: 0.1771 +[2025-07-07 06:58:02] [Rank 0] Group 8 FTA: 0.1823 +[2025-07-07 06:58:02] [Rank 0] Group 8 FTA: 0.1823 +[2025-07-07 06:58:02] [Rank 0] Group 9 FTA: 0.1680 +[2025-07-07 06:58:02] [Rank 0] Group 9 FTA: 0.1680 +[2025-07-07 06:58:02] [Rank 0] Group 10 FTA: 0.2070 +[2025-07-07 06:58:02] [Rank 0] Group 10 FTA: 0.2070 +[2025-07-07 06:58:02] [Rank 0] Group 11 FTA: 0.1719 +[2025-07-07 06:58:02] [Rank 0] Group 11 FTA: 0.1719 +[2025-07-07 06:58:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 06:58:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 06:58:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 06:58:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 06:58:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 06:58:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 06:58:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 06:58:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 06:58:04] [Rank 0] step:4001/10000 train_time:272347ms step_avg:68.07ms +[2025-07-07 06:58:04] [Rank 0] step:4001/10000 train_time:272347ms step_avg:68.07ms +[2025-07-07 06:58:05] [Rank 0] step:4021/10000 train_time:273099ms step_avg:67.92ms +[2025-07-07 06:58:05] [Rank 0] step:4021/10000 train_time:273099ms step_avg:67.92ms +[2025-07-07 06:58:07] [Rank 0] step:4041/10000 train_time:274463ms step_avg:67.92ms +[2025-07-07 06:58:07] [Rank 0] step:4041/10000 train_time:274463ms step_avg:67.92ms +[2025-07-07 06:58:08] [Rank 0] step:4061/10000 train_time:275829ms step_avg:67.92ms +[2025-07-07 06:58:08] [Rank 0] step:4061/10000 train_time:275829ms step_avg:67.92ms +[2025-07-07 06:58:09] [Rank 0] step:4081/10000 train_time:277198ms step_avg:67.92ms +[2025-07-07 06:58:09] [Rank 0] step:4081/10000 train_time:277198ms step_avg:67.92ms +[2025-07-07 06:58:11] [Rank 0] step:4101/10000 train_time:278565ms step_avg:67.93ms +[2025-07-07 06:58:11] [Rank 0] step:4101/10000 train_time:278565ms step_avg:67.93ms +[2025-07-07 06:58:12] [Rank 0] step:4121/10000 train_time:279934ms step_avg:67.93ms +[2025-07-07 06:58:12] [Rank 0] step:4121/10000 train_time:279934ms step_avg:67.93ms +[2025-07-07 06:58:13] [Rank 0] step:4141/10000 train_time:281553ms step_avg:67.99ms +[2025-07-07 06:58:13] [Rank 0] step:4141/10000 train_time:281553ms step_avg:67.99ms +[2025-07-07 06:58:15] [Rank 0] step:4161/10000 train_time:282702ms step_avg:67.94ms +[2025-07-07 06:58:15] [Rank 0] step:4161/10000 train_time:282702ms step_avg:67.94ms +[2025-07-07 06:58:16] [Rank 0] step:4181/10000 train_time:284072ms step_avg:67.94ms +[2025-07-07 06:58:16] [Rank 0] step:4181/10000 train_time:284072ms step_avg:67.94ms +[2025-07-07 06:58:18] [Rank 0] step:4201/10000 train_time:285443ms step_avg:67.95ms +[2025-07-07 06:58:18] [Rank 0] step:4201/10000 train_time:285443ms step_avg:67.95ms +[2025-07-07 06:58:19] [Rank 0] step:4221/10000 train_time:286814ms step_avg:67.95ms +[2025-07-07 06:58:19] [Rank 0] step:4221/10000 train_time:286814ms step_avg:67.95ms +[2025-07-07 06:58:20] [Rank 0] step:4241/10000 train_time:288184ms step_avg:67.95ms +[2025-07-07 06:58:20] [Rank 0] step:4241/10000 train_time:288184ms step_avg:67.95ms +[2025-07-07 06:58:22] [Rank 0] step:4261/10000 train_time:289556ms step_avg:67.95ms +[2025-07-07 06:58:22] [Rank 0] step:4261/10000 train_time:289556ms step_avg:67.95ms +[2025-07-07 06:58:23] [Rank 0] step:4281/10000 train_time:290927ms step_avg:67.96ms +[2025-07-07 06:58:23] [Rank 0] step:4281/10000 train_time:290927ms step_avg:67.96ms +[2025-07-07 06:58:24] [Rank 0] step:4301/10000 train_time:292299ms step_avg:67.96ms +[2025-07-07 06:58:24] [Rank 0] step:4301/10000 train_time:292299ms step_avg:67.96ms +[2025-07-07 06:58:26] [Rank 0] step:4321/10000 train_time:293924ms step_avg:68.02ms +[2025-07-07 06:58:26] [Rank 0] step:4321/10000 train_time:293924ms step_avg:68.02ms +[2025-07-07 06:58:27] [Rank 0] step:4341/10000 train_time:295089ms step_avg:67.98ms +[2025-07-07 06:58:27] [Rank 0] step:4341/10000 train_time:295089ms step_avg:67.98ms +[2025-07-07 06:58:29] [Rank 0] step:4361/10000 train_time:296461ms step_avg:67.98ms +[2025-07-07 06:58:29] [Rank 0] step:4361/10000 train_time:296461ms step_avg:67.98ms +[2025-07-07 06:58:30] [Rank 0] step:4381/10000 train_time:297834ms step_avg:67.98ms +[2025-07-07 06:58:30] [Rank 0] step:4381/10000 train_time:297834ms step_avg:67.98ms +[2025-07-07 06:58:31] [Rank 0] step:4401/10000 train_time:299206ms step_avg:67.99ms +[2025-07-07 06:58:31] [Rank 0] step:4401/10000 train_time:299206ms step_avg:67.99ms +[2025-07-07 06:58:33] [Rank 0] step:4421/10000 train_time:300578ms step_avg:67.99ms +[2025-07-07 06:58:33] [Rank 0] step:4421/10000 train_time:300578ms step_avg:67.99ms +[2025-07-07 06:58:34] [Rank 0] step:4441/10000 train_time:301949ms step_avg:67.99ms +[2025-07-07 06:58:34] [Rank 0] step:4441/10000 train_time:301949ms step_avg:67.99ms +[2025-07-07 06:58:35] [Rank 0] step:4461/10000 train_time:303321ms step_avg:67.99ms +[2025-07-07 06:58:35] [Rank 0] step:4461/10000 train_time:303321ms step_avg:67.99ms +[2025-07-07 06:58:37] [Rank 0] step:4481/10000 train_time:304693ms step_avg:68.00ms +[2025-07-07 06:58:37] [Rank 0] step:4481/10000 train_time:304693ms step_avg:68.00ms +[2025-07-07 06:58:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:58:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:58:39] [Rank 0] PRINT: step:4500/10000 train_loss:1.1930 val_loss:1.2146 train_time:306689ms step_avg:68.15ms +[2025-07-07 06:58:39] [Rank 0] PRINT: step:4500/10000 train_loss:1.1930 val_loss:1.2146 train_time:306689ms step_avg:68.15ms +[2025-07-07 06:58:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:58:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:58:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:58:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:58:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:58:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:03:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:03:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:03:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:03:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:03:57] [Rank 0] Total Loss: 5.4352 +[2025-07-07 07:03:57] [Rank 0] Total Loss: 5.4352 +[2025-07-07 07:03:57] [Rank 0] Total FTA: 0.2192 +[2025-07-07 07:03:57] [Rank 0] Total FTA: 0.2192 +[2025-07-07 07:03:57] [Rank 0] Group 0 Loss: 5.7108 +[2025-07-07 07:03:57] [Rank 0] Group 0 Loss: 5.7108 +[2025-07-07 07:03:57] [Rank 0] Group 1 Loss: 5.1725 +[2025-07-07 07:03:57] [Rank 0] Group 1 Loss: 5.1725 +[2025-07-07 07:03:57] [Rank 0] Group 2 Loss: 5.1927 +[2025-07-07 07:03:57] [Rank 0] Group 2 Loss: 5.1927 +[2025-07-07 07:03:57] [Rank 0] Group 3 Loss: 5.5079 +[2025-07-07 07:03:57] [Rank 0] Group 3 Loss: 5.5079 +[2025-07-07 07:03:57] [Rank 0] Group 4 Loss: 5.4010 +[2025-07-07 07:03:57] [Rank 0] Group 4 Loss: 5.4010 +[2025-07-07 07:03:57] [Rank 0] Group 5 Loss: 5.3845 +[2025-07-07 07:03:57] [Rank 0] Group 5 Loss: 5.3845 +[2025-07-07 07:03:57] [Rank 0] Group 6 Loss: 5.3545 +[2025-07-07 07:03:57] [Rank 0] Group 6 Loss: 5.3545 +[2025-07-07 07:03:57] [Rank 0] Group 7 Loss: 5.4496 +[2025-07-07 07:03:57] [Rank 0] Group 7 Loss: 5.4496 +[2025-07-07 07:03:57] [Rank 0] Group 8 Loss: 5.4575 +[2025-07-07 07:03:57] [Rank 0] Group 8 Loss: 5.4575 +[2025-07-07 07:03:57] [Rank 0] Group 9 Loss: 5.4126 +[2025-07-07 07:03:57] [Rank 0] Group 9 Loss: 5.4126 +[2025-07-07 07:03:57] [Rank 0] Group 10 Loss: 5.4207 +[2025-07-07 07:03:57] [Rank 0] Group 10 Loss: 5.4207 +[2025-07-07 07:03:57] [Rank 0] Group 11 Loss: 5.4518 +[2025-07-07 07:03:57] [Rank 0] Group 11 Loss: 5.4518 +[2025-07-07 07:03:57] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-07 07:03:57] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-07 07:03:57] [Rank 0] Group 1 FTA: 0.2005 +[2025-07-07 07:03:57] [Rank 0] Group 1 FTA: 0.2005 +[2025-07-07 07:03:57] [Rank 0] Group 2 FTA: 0.3177 +[2025-07-07 07:03:57] [Rank 0] Group 2 FTA: 0.3177 +[2025-07-07 07:03:57] [Rank 0] Group 3 FTA: 0.1562 +[2025-07-07 07:03:57] [Rank 0] Group 3 FTA: 0.1562 +[2025-07-07 07:03:57] [Rank 0] Group 4 FTA: 0.1354 +[2025-07-07 07:03:57] [Rank 0] Group 4 FTA: 0.1354 +[2025-07-07 07:03:57] [Rank 0] Group 5 FTA: 0.2292 +[2025-07-07 07:03:57] [Rank 0] Group 5 FTA: 0.2292 +[2025-07-07 07:03:57] [Rank 0] Group 6 FTA: 0.2214 +[2025-07-07 07:03:57] [Rank 0] Group 6 FTA: 0.2214 +[2025-07-07 07:03:57] [Rank 0] Group 7 FTA: 0.2396 +[2025-07-07 07:03:57] [Rank 0] Group 7 FTA: 0.2396 +[2025-07-07 07:03:57] [Rank 0] Group 8 FTA: 0.2943 +[2025-07-07 07:03:57] [Rank 0] Group 8 FTA: 0.2943 +[2025-07-07 07:03:57] [Rank 0] Group 9 FTA: 0.2695 +[2025-07-07 07:03:57] [Rank 0] Group 9 FTA: 0.2695 +[2025-07-07 07:03:57] [Rank 0] Group 10 FTA: 0.2090 +[2025-07-07 07:03:57] [Rank 0] Group 10 FTA: 0.2090 +[2025-07-07 07:03:57] [Rank 0] Group 11 FTA: 0.2393 +[2025-07-07 07:03:57] [Rank 0] Group 11 FTA: 0.2393 +[2025-07-07 07:03:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 07:03:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 07:03:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 07:03:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 07:03:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 07:03:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 07:03:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 07:03:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 07:03:59] [Rank 0] step:4501/10000 train_time:306710ms step_avg:68.14ms +[2025-07-07 07:03:59] [Rank 0] step:4501/10000 train_time:306710ms step_avg:68.14ms +[2025-07-07 07:04:01] [Rank 0] step:4521/10000 train_time:308162ms step_avg:68.16ms +[2025-07-07 07:04:01] [Rank 0] step:4521/10000 train_time:308162ms step_avg:68.16ms +[2025-07-07 07:04:02] [Rank 0] step:4541/10000 train_time:309528ms step_avg:68.16ms +[2025-07-07 07:04:02] [Rank 0] step:4541/10000 train_time:309528ms step_avg:68.16ms +[2025-07-07 07:04:03] [Rank 0] step:4561/10000 train_time:310895ms step_avg:68.16ms +[2025-07-07 07:04:03] [Rank 0] step:4561/10000 train_time:310895ms step_avg:68.16ms +[2025-07-07 07:04:05] [Rank 0] step:4581/10000 train_time:312264ms step_avg:68.17ms +[2025-07-07 07:04:05] [Rank 0] step:4581/10000 train_time:312264ms step_avg:68.17ms +[2025-07-07 07:04:06] [Rank 0] step:4601/10000 train_time:313632ms step_avg:68.17ms +[2025-07-07 07:04:06] [Rank 0] step:4601/10000 train_time:313632ms step_avg:68.17ms +[2025-07-07 07:04:08] [Rank 0] step:4621/10000 train_time:315001ms step_avg:68.17ms +[2025-07-07 07:04:08] [Rank 0] step:4621/10000 train_time:315001ms step_avg:68.17ms +[2025-07-07 07:04:09] [Rank 0] step:4641/10000 train_time:316370ms step_avg:68.17ms +[2025-07-07 07:04:09] [Rank 0] step:4641/10000 train_time:316370ms step_avg:68.17ms +[2025-07-07 07:04:10] [Rank 0] step:4661/10000 train_time:317740ms step_avg:68.17ms +[2025-07-07 07:04:10] [Rank 0] step:4661/10000 train_time:317740ms step_avg:68.17ms +[2025-07-07 07:04:12] [Rank 0] step:4681/10000 train_time:319109ms step_avg:68.17ms +[2025-07-07 07:04:12] [Rank 0] step:4681/10000 train_time:319109ms step_avg:68.17ms +[2025-07-07 07:04:13] [Rank 0] step:4701/10000 train_time:320516ms step_avg:68.18ms +[2025-07-07 07:04:13] [Rank 0] step:4701/10000 train_time:320516ms step_avg:68.18ms +[2025-07-07 07:04:14] [Rank 0] step:4721/10000 train_time:321888ms step_avg:68.18ms +[2025-07-07 07:04:14] [Rank 0] step:4721/10000 train_time:321888ms step_avg:68.18ms +[2025-07-07 07:04:16] [Rank 0] step:4741/10000 train_time:323260ms step_avg:68.18ms +[2025-07-07 07:04:16] [Rank 0] step:4741/10000 train_time:323260ms step_avg:68.18ms +[2025-07-07 07:04:17] [Rank 0] step:4761/10000 train_time:324630ms step_avg:68.19ms +[2025-07-07 07:04:17] [Rank 0] step:4761/10000 train_time:324630ms step_avg:68.19ms +[2025-07-07 07:04:19] [Rank 0] step:4781/10000 train_time:326000ms step_avg:68.19ms +[2025-07-07 07:04:19] [Rank 0] step:4781/10000 train_time:326000ms step_avg:68.19ms +[2025-07-07 07:04:20] [Rank 0] step:4801/10000 train_time:327374ms step_avg:68.19ms +[2025-07-07 07:04:20] [Rank 0] step:4801/10000 train_time:327374ms step_avg:68.19ms +[2025-07-07 07:04:21] [Rank 0] step:4821/10000 train_time:328746ms step_avg:68.19ms +[2025-07-07 07:04:21] [Rank 0] step:4821/10000 train_time:328746ms step_avg:68.19ms +[2025-07-07 07:04:23] [Rank 0] step:4841/10000 train_time:330220ms step_avg:68.21ms +[2025-07-07 07:04:23] [Rank 0] step:4841/10000 train_time:330220ms step_avg:68.21ms +[2025-07-07 07:04:24] [Rank 0] step:4861/10000 train_time:331939ms step_avg:68.29ms +[2025-07-07 07:04:24] [Rank 0] step:4861/10000 train_time:331939ms step_avg:68.29ms +[2025-07-07 07:04:26] [Rank 0] step:4881/10000 train_time:333195ms step_avg:68.26ms +[2025-07-07 07:04:26] [Rank 0] step:4881/10000 train_time:333195ms step_avg:68.26ms +[2025-07-07 07:04:27] [Rank 0] step:4901/10000 train_time:334669ms step_avg:68.29ms +[2025-07-07 07:04:27] [Rank 0] step:4901/10000 train_time:334669ms step_avg:68.29ms +[2025-07-07 07:04:29] [Rank 0] step:4921/10000 train_time:336042ms step_avg:68.29ms +[2025-07-07 07:04:29] [Rank 0] step:4921/10000 train_time:336042ms step_avg:68.29ms +[2025-07-07 07:04:30] [Rank 0] step:4941/10000 train_time:337516ms step_avg:68.31ms +[2025-07-07 07:04:30] [Rank 0] step:4941/10000 train_time:337516ms step_avg:68.31ms +[2025-07-07 07:04:31] [Rank 0] step:4961/10000 train_time:338892ms step_avg:68.31ms +[2025-07-07 07:04:31] [Rank 0] step:4961/10000 train_time:338892ms step_avg:68.31ms +[2025-07-07 07:04:33] [Rank 0] step:4981/10000 train_time:340267ms step_avg:68.31ms +[2025-07-07 07:04:33] [Rank 0] step:4981/10000 train_time:340267ms step_avg:68.31ms +[2025-07-07 07:04:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:04:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:04:35] [Rank 0] PRINT: step:5000/10000 train_loss:1.1486 val_loss:1.1785 train_time:342268ms step_avg:68.45ms +[2025-07-07 07:04:35] [Rank 0] PRINT: step:5000/10000 train_loss:1.1486 val_loss:1.1785 train_time:342268ms step_avg:68.45ms +[2025-07-07 07:04:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:04:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:04:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:04:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:04:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:04:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:09:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:09:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:09:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:09:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:09:53] [Rank 0] Total Loss: 5.4035 +[2025-07-07 07:09:53] [Rank 0] Total Loss: 5.4035 +[2025-07-07 07:09:53] [Rank 0] Total FTA: 0.2578 +[2025-07-07 07:09:53] [Rank 0] Total FTA: 0.2578 +[2025-07-07 07:09:53] [Rank 0] Group 0 Loss: 5.7028 +[2025-07-07 07:09:53] [Rank 0] Group 0 Loss: 5.7028 +[2025-07-07 07:09:53] [Rank 0] Group 1 Loss: 5.0057 +[2025-07-07 07:09:53] [Rank 0] Group 1 Loss: 5.0057 +[2025-07-07 07:09:53] [Rank 0] Group 2 Loss: 5.2739 +[2025-07-07 07:09:53] [Rank 0] Group 2 Loss: 5.2739 +[2025-07-07 07:09:53] [Rank 0] Group 3 Loss: 5.3167 +[2025-07-07 07:09:53] [Rank 0] Group 3 Loss: 5.3167 +[2025-07-07 07:09:53] [Rank 0] Group 4 Loss: 5.4574 +[2025-07-07 07:09:53] [Rank 0] Group 4 Loss: 5.4574 +[2025-07-07 07:09:53] [Rank 0] Group 5 Loss: 5.4101 +[2025-07-07 07:09:53] [Rank 0] Group 5 Loss: 5.4101 +[2025-07-07 07:09:53] [Rank 0] Group 6 Loss: 5.3267 +[2025-07-07 07:09:53] [Rank 0] Group 6 Loss: 5.3267 +[2025-07-07 07:09:53] [Rank 0] Group 7 Loss: 5.3598 +[2025-07-07 07:09:53] [Rank 0] Group 7 Loss: 5.3598 +[2025-07-07 07:09:53] [Rank 0] Group 8 Loss: 5.3981 +[2025-07-07 07:09:53] [Rank 0] Group 8 Loss: 5.3981 +[2025-07-07 07:09:53] [Rank 0] Group 9 Loss: 5.3844 +[2025-07-07 07:09:53] [Rank 0] Group 9 Loss: 5.3844 +[2025-07-07 07:09:53] [Rank 0] Group 10 Loss: 5.4207 +[2025-07-07 07:09:53] [Rank 0] Group 10 Loss: 5.4207 +[2025-07-07 07:09:53] [Rank 0] Group 11 Loss: 5.4299 +[2025-07-07 07:09:53] [Rank 0] Group 11 Loss: 5.4299 +[2025-07-07 07:09:53] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 07:09:53] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 07:09:53] [Rank 0] Group 1 FTA: 0.1432 +[2025-07-07 07:09:53] [Rank 0] Group 1 FTA: 0.1432 +[2025-07-07 07:09:53] [Rank 0] Group 2 FTA: 0.3411 +[2025-07-07 07:09:53] [Rank 0] Group 2 FTA: 0.3411 +[2025-07-07 07:09:53] [Rank 0] Group 3 FTA: 0.2604 +[2025-07-07 07:09:53] [Rank 0] Group 3 FTA: 0.2604 +[2025-07-07 07:09:53] [Rank 0] Group 4 FTA: 0.1849 +[2025-07-07 07:09:53] [Rank 0] Group 4 FTA: 0.1849 +[2025-07-07 07:09:53] [Rank 0] Group 5 FTA: 0.2969 +[2025-07-07 07:09:53] [Rank 0] Group 5 FTA: 0.2969 +[2025-07-07 07:09:53] [Rank 0] Group 6 FTA: 0.2943 +[2025-07-07 07:09:53] [Rank 0] Group 6 FTA: 0.2943 +[2025-07-07 07:09:53] [Rank 0] Group 7 FTA: 0.2682 +[2025-07-07 07:09:53] [Rank 0] Group 7 FTA: 0.2682 +[2025-07-07 07:09:53] [Rank 0] Group 8 FTA: 0.2812 +[2025-07-07 07:09:53] [Rank 0] Group 8 FTA: 0.2812 +[2025-07-07 07:09:53] [Rank 0] Group 9 FTA: 0.2969 +[2025-07-07 07:09:53] [Rank 0] Group 9 FTA: 0.2969 +[2025-07-07 07:09:53] [Rank 0] Group 10 FTA: 0.3086 +[2025-07-07 07:09:53] [Rank 0] Group 10 FTA: 0.3086 +[2025-07-07 07:09:53] [Rank 0] Group 11 FTA: 0.2891 +[2025-07-07 07:09:53] [Rank 0] Group 11 FTA: 0.2891 +[2025-07-07 07:09:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 07:09:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 07:09:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 07:09:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 07:09:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 07:09:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 07:09:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 07:09:55] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 07:09:55] [Rank 0] step:5001/10000 train_time:342279ms step_avg:68.44ms +[2025-07-07 07:09:55] [Rank 0] step:5001/10000 train_time:342279ms step_avg:68.44ms +[2025-07-07 07:09:56] [Rank 0] step:5021/10000 train_time:343029ms step_avg:68.32ms +[2025-07-07 07:09:56] [Rank 0] step:5021/10000 train_time:343029ms step_avg:68.32ms +[2025-07-07 07:09:58] [Rank 0] step:5041/10000 train_time:344446ms step_avg:68.33ms +[2025-07-07 07:09:58] [Rank 0] step:5041/10000 train_time:344446ms step_avg:68.33ms +[2025-07-07 07:09:59] [Rank 0] step:5061/10000 train_time:345818ms step_avg:68.33ms +[2025-07-07 07:09:59] [Rank 0] step:5061/10000 train_time:345818ms step_avg:68.33ms +[2025-07-07 07:10:00] [Rank 0] step:5081/10000 train_time:347187ms step_avg:68.33ms +[2025-07-07 07:10:00] [Rank 0] step:5081/10000 train_time:347187ms step_avg:68.33ms +[2025-07-07 07:10:02] [Rank 0] step:5101/10000 train_time:348556ms step_avg:68.33ms +[2025-07-07 07:10:02] [Rank 0] step:5101/10000 train_time:348556ms step_avg:68.33ms +[2025-07-07 07:10:03] [Rank 0] step:5121/10000 train_time:349925ms step_avg:68.33ms +[2025-07-07 07:10:03] [Rank 0] step:5121/10000 train_time:349925ms step_avg:68.33ms +[2025-07-07 07:10:04] [Rank 0] step:5141/10000 train_time:351296ms step_avg:68.33ms +[2025-07-07 07:10:04] [Rank 0] step:5141/10000 train_time:351296ms step_avg:68.33ms +[2025-07-07 07:10:06] [Rank 0] step:5161/10000 train_time:352666ms step_avg:68.33ms +[2025-07-07 07:10:06] [Rank 0] step:5161/10000 train_time:352666ms step_avg:68.33ms +[2025-07-07 07:10:07] [Rank 0] step:5181/10000 train_time:354038ms step_avg:68.33ms +[2025-07-07 07:10:07] [Rank 0] step:5181/10000 train_time:354038ms step_avg:68.33ms +[2025-07-07 07:10:09] [Rank 0] step:5201/10000 train_time:355408ms step_avg:68.33ms +[2025-07-07 07:10:09] [Rank 0] step:5201/10000 train_time:355408ms step_avg:68.33ms +[2025-07-07 07:10:10] [Rank 0] step:5221/10000 train_time:356779ms step_avg:68.34ms +[2025-07-07 07:10:10] [Rank 0] step:5221/10000 train_time:356779ms step_avg:68.34ms +[2025-07-07 07:10:11] [Rank 0] step:5241/10000 train_time:358203ms step_avg:68.35ms +[2025-07-07 07:10:11] [Rank 0] step:5241/10000 train_time:358203ms step_avg:68.35ms +[2025-07-07 07:10:13] [Rank 0] step:5261/10000 train_time:359576ms step_avg:68.35ms +[2025-07-07 07:10:13] [Rank 0] step:5261/10000 train_time:359576ms step_avg:68.35ms +[2025-07-07 07:10:14] [Rank 0] step:5281/10000 train_time:360948ms step_avg:68.35ms +[2025-07-07 07:10:14] [Rank 0] step:5281/10000 train_time:360948ms step_avg:68.35ms +[2025-07-07 07:10:15] [Rank 0] step:5301/10000 train_time:362321ms step_avg:68.35ms +[2025-07-07 07:10:15] [Rank 0] step:5301/10000 train_time:362321ms step_avg:68.35ms +[2025-07-07 07:10:17] [Rank 0] step:5321/10000 train_time:363694ms step_avg:68.35ms +[2025-07-07 07:10:17] [Rank 0] step:5321/10000 train_time:363694ms step_avg:68.35ms +[2025-07-07 07:10:18] [Rank 0] step:5341/10000 train_time:365069ms step_avg:68.35ms +[2025-07-07 07:10:18] [Rank 0] step:5341/10000 train_time:365069ms step_avg:68.35ms +[2025-07-07 07:10:20] [Rank 0] step:5361/10000 train_time:366443ms step_avg:68.35ms +[2025-07-07 07:10:20] [Rank 0] step:5361/10000 train_time:366443ms step_avg:68.35ms +[2025-07-07 07:10:21] [Rank 0] step:5381/10000 train_time:367816ms step_avg:68.35ms +[2025-07-07 07:10:21] [Rank 0] step:5381/10000 train_time:367816ms step_avg:68.35ms +[2025-07-07 07:10:22] [Rank 0] step:5401/10000 train_time:369847ms step_avg:68.48ms +[2025-07-07 07:10:22] [Rank 0] step:5401/10000 train_time:369847ms step_avg:68.48ms +[2025-07-07 07:10:24] [Rank 0] step:5421/10000 train_time:370587ms step_avg:68.36ms +[2025-07-07 07:10:24] [Rank 0] step:5421/10000 train_time:370587ms step_avg:68.36ms +[2025-07-07 07:10:25] [Rank 0] step:5441/10000 train_time:371961ms step_avg:68.36ms +[2025-07-07 07:10:25] [Rank 0] step:5441/10000 train_time:371961ms step_avg:68.36ms +[2025-07-07 07:10:27] [Rank 0] step:5461/10000 train_time:373334ms step_avg:68.36ms +[2025-07-07 07:10:27] [Rank 0] step:5461/10000 train_time:373334ms step_avg:68.36ms +[2025-07-07 07:10:28] [Rank 0] step:5481/10000 train_time:374707ms step_avg:68.36ms +[2025-07-07 07:10:28] [Rank 0] step:5481/10000 train_time:374707ms step_avg:68.36ms +[2025-07-07 07:10:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:10:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:10:30] [Rank 0] PRINT: step:5500/10000 train_loss:1.0996 val_loss:1.1496 train_time:376706ms step_avg:68.49ms +[2025-07-07 07:10:30] [Rank 0] PRINT: step:5500/10000 train_loss:1.0996 val_loss:1.1496 train_time:376706ms step_avg:68.49ms +[2025-07-07 07:10:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:10:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:10:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:10:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:10:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:10:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:15:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:15:48] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:15:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:15:48] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:15:48] [Rank 0] Total Loss: 5.4623 +[2025-07-07 07:15:48] [Rank 0] Total Loss: 5.4623 +[2025-07-07 07:15:48] [Rank 0] Total FTA: 0.3650 +[2025-07-07 07:15:48] [Rank 0] Total FTA: 0.3650 +[2025-07-07 07:15:48] [Rank 0] Group 0 Loss: 5.7094 +[2025-07-07 07:15:48] [Rank 0] Group 0 Loss: 5.7094 +[2025-07-07 07:15:48] [Rank 0] Group 1 Loss: 5.0356 +[2025-07-07 07:15:48] [Rank 0] Group 1 Loss: 5.0356 +[2025-07-07 07:15:48] [Rank 0] Group 2 Loss: 5.2292 +[2025-07-07 07:15:48] [Rank 0] Group 2 Loss: 5.2292 +[2025-07-07 07:15:48] [Rank 0] Group 3 Loss: 5.5240 +[2025-07-07 07:15:48] [Rank 0] Group 3 Loss: 5.5240 +[2025-07-07 07:15:48] [Rank 0] Group 4 Loss: 5.4794 +[2025-07-07 07:15:48] [Rank 0] Group 4 Loss: 5.4794 +[2025-07-07 07:15:48] [Rank 0] Group 5 Loss: 5.4906 +[2025-07-07 07:15:48] [Rank 0] Group 5 Loss: 5.4906 +[2025-07-07 07:15:48] [Rank 0] Group 6 Loss: 5.4314 +[2025-07-07 07:15:48] [Rank 0] Group 6 Loss: 5.4314 +[2025-07-07 07:15:48] [Rank 0] Group 7 Loss: 5.5165 +[2025-07-07 07:15:48] [Rank 0] Group 7 Loss: 5.5165 +[2025-07-07 07:15:48] [Rank 0] Group 8 Loss: 5.5077 +[2025-07-07 07:15:48] [Rank 0] Group 8 Loss: 5.5077 +[2025-07-07 07:15:48] [Rank 0] Group 9 Loss: 5.4054 +[2025-07-07 07:15:48] [Rank 0] Group 9 Loss: 5.4054 +[2025-07-07 07:15:48] [Rank 0] Group 10 Loss: 5.4295 +[2025-07-07 07:15:48] [Rank 0] Group 10 Loss: 5.4295 +[2025-07-07 07:15:48] [Rank 0] Group 11 Loss: 5.4890 +[2025-07-07 07:15:48] [Rank 0] Group 11 Loss: 5.4890 +[2025-07-07 07:15:48] [Rank 0] Group 0 FTA: 0.3316 +[2025-07-07 07:15:48] [Rank 0] Group 0 FTA: 0.3316 +[2025-07-07 07:15:48] [Rank 0] Group 1 FTA: 0.5443 +[2025-07-07 07:15:48] [Rank 0] Group 1 FTA: 0.5443 +[2025-07-07 07:15:48] [Rank 0] Group 2 FTA: 0.4427 +[2025-07-07 07:15:48] [Rank 0] Group 2 FTA: 0.4427 +[2025-07-07 07:15:48] [Rank 0] Group 3 FTA: 0.4401 +[2025-07-07 07:15:48] [Rank 0] Group 3 FTA: 0.4401 +[2025-07-07 07:15:48] [Rank 0] Group 4 FTA: 0.3047 +[2025-07-07 07:15:48] [Rank 0] Group 4 FTA: 0.3047 +[2025-07-07 07:15:48] [Rank 0] Group 5 FTA: 0.3438 +[2025-07-07 07:15:48] [Rank 0] Group 5 FTA: 0.3438 +[2025-07-07 07:15:48] [Rank 0] Group 6 FTA: 0.3099 +[2025-07-07 07:15:48] [Rank 0] Group 6 FTA: 0.3099 +[2025-07-07 07:15:48] [Rank 0] Group 7 FTA: 0.3698 +[2025-07-07 07:15:48] [Rank 0] Group 7 FTA: 0.3698 +[2025-07-07 07:15:48] [Rank 0] Group 8 FTA: 0.3516 +[2025-07-07 07:15:48] [Rank 0] Group 8 FTA: 0.3516 +[2025-07-07 07:15:48] [Rank 0] Group 9 FTA: 0.3008 +[2025-07-07 07:15:48] [Rank 0] Group 9 FTA: 0.3008 +[2025-07-07 07:15:48] [Rank 0] Group 10 FTA: 0.3535 +[2025-07-07 07:15:48] [Rank 0] Group 10 FTA: 0.3535 +[2025-07-07 07:15:48] [Rank 0] Group 11 FTA: 0.3418 +[2025-07-07 07:15:48] [Rank 0] Group 11 FTA: 0.3418 +[2025-07-07 07:15:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 07:15:48] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 07:15:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 07:15:49] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 07:15:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 07:15:49] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 07:15:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 07:15:49] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 07:15:49] [Rank 0] step:5501/10000 train_time:376715ms step_avg:68.48ms +[2025-07-07 07:15:49] [Rank 0] step:5501/10000 train_time:376715ms step_avg:68.48ms +[2025-07-07 07:15:51] [Rank 0] step:5521/10000 train_time:377474ms step_avg:68.37ms +[2025-07-07 07:15:51] [Rank 0] step:5521/10000 train_time:377474ms step_avg:68.37ms +[2025-07-07 07:15:52] [Rank 0] step:5541/10000 train_time:378841ms step_avg:68.37ms +[2025-07-07 07:15:52] [Rank 0] step:5541/10000 train_time:378841ms step_avg:68.37ms +[2025-07-07 07:15:53] [Rank 0] step:5561/10000 train_time:380209ms step_avg:68.37ms +[2025-07-07 07:15:53] [Rank 0] step:5561/10000 train_time:380209ms step_avg:68.37ms +[2025-07-07 07:15:55] [Rank 0] step:5581/10000 train_time:381832ms step_avg:68.42ms +[2025-07-07 07:15:55] [Rank 0] step:5581/10000 train_time:381832ms step_avg:68.42ms +[2025-07-07 07:15:56] [Rank 0] step:5601/10000 train_time:382997ms step_avg:68.38ms +[2025-07-07 07:15:56] [Rank 0] step:5601/10000 train_time:382997ms step_avg:68.38ms +[2025-07-07 07:15:58] [Rank 0] step:5621/10000 train_time:384368ms step_avg:68.38ms +[2025-07-07 07:15:58] [Rank 0] step:5621/10000 train_time:384368ms step_avg:68.38ms +[2025-07-07 07:15:59] [Rank 0] step:5641/10000 train_time:385738ms step_avg:68.38ms +[2025-07-07 07:15:59] [Rank 0] step:5641/10000 train_time:385738ms step_avg:68.38ms +[2025-07-07 07:16:00] [Rank 0] step:5661/10000 train_time:387109ms step_avg:68.38ms +[2025-07-07 07:16:00] [Rank 0] step:5661/10000 train_time:387109ms step_avg:68.38ms +[2025-07-07 07:16:02] [Rank 0] step:5681/10000 train_time:388480ms step_avg:68.38ms +[2025-07-07 07:16:02] [Rank 0] step:5681/10000 train_time:388480ms step_avg:68.38ms +[2025-07-07 07:16:03] [Rank 0] step:5701/10000 train_time:389854ms step_avg:68.38ms +[2025-07-07 07:16:03] [Rank 0] step:5701/10000 train_time:389854ms step_avg:68.38ms +[2025-07-07 07:16:04] [Rank 0] step:5721/10000 train_time:391225ms step_avg:68.38ms +[2025-07-07 07:16:04] [Rank 0] step:5721/10000 train_time:391225ms step_avg:68.38ms +[2025-07-07 07:16:06] [Rank 0] step:5741/10000 train_time:392598ms step_avg:68.39ms +[2025-07-07 07:16:06] [Rank 0] step:5741/10000 train_time:392598ms step_avg:68.39ms +[2025-07-07 07:16:07] [Rank 0] step:5761/10000 train_time:393969ms step_avg:68.39ms +[2025-07-07 07:16:07] [Rank 0] step:5761/10000 train_time:393969ms step_avg:68.39ms +[2025-07-07 07:16:09] [Rank 0] step:5781/10000 train_time:395381ms step_avg:68.39ms +[2025-07-07 07:16:09] [Rank 0] step:5781/10000 train_time:395381ms step_avg:68.39ms +[2025-07-07 07:16:10] [Rank 0] step:5801/10000 train_time:396753ms step_avg:68.39ms +[2025-07-07 07:16:10] [Rank 0] step:5801/10000 train_time:396753ms step_avg:68.39ms +[2025-07-07 07:16:11] [Rank 0] step:5821/10000 train_time:398126ms step_avg:68.39ms +[2025-07-07 07:16:11] [Rank 0] step:5821/10000 train_time:398126ms step_avg:68.39ms +[2025-07-07 07:16:13] [Rank 0] step:5841/10000 train_time:399498ms step_avg:68.40ms +[2025-07-07 07:16:13] [Rank 0] step:5841/10000 train_time:399498ms step_avg:68.40ms +[2025-07-07 07:16:14] [Rank 0] step:5861/10000 train_time:400869ms step_avg:68.40ms +[2025-07-07 07:16:14] [Rank 0] step:5861/10000 train_time:400869ms step_avg:68.40ms +[2025-07-07 07:16:15] [Rank 0] step:5881/10000 train_time:402243ms step_avg:68.40ms +[2025-07-07 07:16:15] [Rank 0] step:5881/10000 train_time:402243ms step_avg:68.40ms +[2025-07-07 07:16:17] [Rank 0] step:5901/10000 train_time:403615ms step_avg:68.40ms +[2025-07-07 07:16:17] [Rank 0] step:5901/10000 train_time:403615ms step_avg:68.40ms +[2025-07-07 07:16:18] [Rank 0] step:5921/10000 train_time:404988ms step_avg:68.40ms +[2025-07-07 07:16:18] [Rank 0] step:5921/10000 train_time:404988ms step_avg:68.40ms +[2025-07-07 07:16:20] [Rank 0] step:5941/10000 train_time:407024ms step_avg:68.51ms +[2025-07-07 07:16:20] [Rank 0] step:5941/10000 train_time:407024ms step_avg:68.51ms +[2025-07-07 07:16:21] [Rank 0] step:5961/10000 train_time:407766ms step_avg:68.41ms +[2025-07-07 07:16:21] [Rank 0] step:5961/10000 train_time:407766ms step_avg:68.41ms +[2025-07-07 07:16:22] [Rank 0] step:5981/10000 train_time:409140ms step_avg:68.41ms +[2025-07-07 07:16:22] [Rank 0] step:5981/10000 train_time:409140ms step_avg:68.41ms +[2025-07-07 07:16:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:16:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:16:25] [Rank 0] PRINT: step:6000/10000 train_loss:1.0451 val_loss:1.1173 train_time:411137ms step_avg:68.52ms +[2025-07-07 07:16:25] [Rank 0] PRINT: step:6000/10000 train_loss:1.0451 val_loss:1.1173 train_time:411137ms step_avg:68.52ms +[2025-07-07 07:16:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:16:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:16:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:16:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:16:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:16:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:21:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:21:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:21:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:21:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:21:42] [Rank 0] Total Loss: 5.4918 +[2025-07-07 07:21:42] [Rank 0] Total Loss: 5.4918 +[2025-07-07 07:21:42] [Rank 0] Total FTA: 0.4415 +[2025-07-07 07:21:42] [Rank 0] Total FTA: 0.4415 +[2025-07-07 07:21:42] [Rank 0] Group 0 Loss: 5.5535 +[2025-07-07 07:21:42] [Rank 0] Group 0 Loss: 5.5535 +[2025-07-07 07:21:42] [Rank 0] Group 1 Loss: 5.3529 +[2025-07-07 07:21:42] [Rank 0] Group 1 Loss: 5.3529 +[2025-07-07 07:21:42] [Rank 0] Group 2 Loss: 5.1926 +[2025-07-07 07:21:42] [Rank 0] Group 2 Loss: 5.1926 +[2025-07-07 07:21:42] [Rank 0] Group 3 Loss: 5.5329 +[2025-07-07 07:21:42] [Rank 0] Group 3 Loss: 5.5329 +[2025-07-07 07:21:42] [Rank 0] Group 4 Loss: 5.5504 +[2025-07-07 07:21:42] [Rank 0] Group 4 Loss: 5.5504 +[2025-07-07 07:21:42] [Rank 0] Group 5 Loss: 5.4812 +[2025-07-07 07:21:42] [Rank 0] Group 5 Loss: 5.4812 +[2025-07-07 07:21:42] [Rank 0] Group 6 Loss: 5.4752 +[2025-07-07 07:21:42] [Rank 0] Group 6 Loss: 5.4752 +[2025-07-07 07:21:42] [Rank 0] Group 7 Loss: 5.5225 +[2025-07-07 07:21:42] [Rank 0] Group 7 Loss: 5.5225 +[2025-07-07 07:21:42] [Rank 0] Group 8 Loss: 5.6092 +[2025-07-07 07:21:42] [Rank 0] Group 8 Loss: 5.6092 +[2025-07-07 07:21:42] [Rank 0] Group 9 Loss: 5.4576 +[2025-07-07 07:21:42] [Rank 0] Group 9 Loss: 5.4576 +[2025-07-07 07:21:42] [Rank 0] Group 10 Loss: 5.5109 +[2025-07-07 07:21:42] [Rank 0] Group 10 Loss: 5.5109 +[2025-07-07 07:21:42] [Rank 0] Group 11 Loss: 5.5262 +[2025-07-07 07:21:42] [Rank 0] Group 11 Loss: 5.5262 +[2025-07-07 07:21:42] [Rank 0] Group 0 FTA: 0.4954 +[2025-07-07 07:21:42] [Rank 0] Group 0 FTA: 0.4954 +[2025-07-07 07:21:42] [Rank 0] Group 1 FTA: 0.4740 +[2025-07-07 07:21:42] [Rank 0] Group 1 FTA: 0.4740 +[2025-07-07 07:21:42] [Rank 0] Group 2 FTA: 0.4922 +[2025-07-07 07:21:42] [Rank 0] Group 2 FTA: 0.4922 +[2025-07-07 07:21:42] [Rank 0] Group 3 FTA: 0.4740 +[2025-07-07 07:21:42] [Rank 0] Group 3 FTA: 0.4740 +[2025-07-07 07:21:42] [Rank 0] Group 4 FTA: 0.4479 +[2025-07-07 07:21:42] [Rank 0] Group 4 FTA: 0.4479 +[2025-07-07 07:21:42] [Rank 0] Group 5 FTA: 0.4948 +[2025-07-07 07:21:42] [Rank 0] Group 5 FTA: 0.4948 +[2025-07-07 07:21:42] [Rank 0] Group 6 FTA: 0.4036 +[2025-07-07 07:21:42] [Rank 0] Group 6 FTA: 0.4036 +[2025-07-07 07:21:42] [Rank 0] Group 7 FTA: 0.4010 +[2025-07-07 07:21:42] [Rank 0] Group 7 FTA: 0.4010 +[2025-07-07 07:21:42] [Rank 0] Group 8 FTA: 0.4141 +[2025-07-07 07:21:42] [Rank 0] Group 8 FTA: 0.4141 +[2025-07-07 07:21:42] [Rank 0] Group 9 FTA: 0.3828 +[2025-07-07 07:21:42] [Rank 0] Group 9 FTA: 0.3828 +[2025-07-07 07:21:42] [Rank 0] Group 10 FTA: 0.4336 +[2025-07-07 07:21:42] [Rank 0] Group 10 FTA: 0.4336 +[2025-07-07 07:21:42] [Rank 0] Group 11 FTA: 0.3936 +[2025-07-07 07:21:42] [Rank 0] Group 11 FTA: 0.3936 +[2025-07-07 07:21:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 07:21:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 07:21:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 07:21:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 07:21:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 07:21:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 07:21:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 07:21:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 07:21:44] [Rank 0] step:6001/10000 train_time:411146ms step_avg:68.51ms +[2025-07-07 07:21:44] [Rank 0] step:6001/10000 train_time:411146ms step_avg:68.51ms +[2025-07-07 07:21:45] [Rank 0] step:6021/10000 train_time:411916ms step_avg:68.41ms +[2025-07-07 07:21:45] [Rank 0] step:6021/10000 train_time:411916ms step_avg:68.41ms +[2025-07-07 07:21:46] [Rank 0] step:6041/10000 train_time:413282ms step_avg:68.41ms +[2025-07-07 07:21:46] [Rank 0] step:6041/10000 train_time:413282ms step_avg:68.41ms +[2025-07-07 07:21:48] [Rank 0] step:6061/10000 train_time:414650ms step_avg:68.41ms +[2025-07-07 07:21:48] [Rank 0] step:6061/10000 train_time:414650ms step_avg:68.41ms +[2025-07-07 07:21:49] [Rank 0] step:6081/10000 train_time:416018ms step_avg:68.41ms +[2025-07-07 07:21:49] [Rank 0] step:6081/10000 train_time:416018ms step_avg:68.41ms +[2025-07-07 07:21:51] [Rank 0] step:6101/10000 train_time:417388ms step_avg:68.41ms +[2025-07-07 07:21:51] [Rank 0] step:6101/10000 train_time:417388ms step_avg:68.41ms +[2025-07-07 07:21:52] [Rank 0] step:6121/10000 train_time:419423ms step_avg:68.52ms +[2025-07-07 07:21:52] [Rank 0] step:6121/10000 train_time:419423ms step_avg:68.52ms +[2025-07-07 07:21:53] [Rank 0] step:6141/10000 train_time:420161ms step_avg:68.42ms +[2025-07-07 07:21:53] [Rank 0] step:6141/10000 train_time:420161ms step_avg:68.42ms +[2025-07-07 07:21:55] [Rank 0] step:6161/10000 train_time:421535ms step_avg:68.42ms +[2025-07-07 07:21:55] [Rank 0] step:6161/10000 train_time:421535ms step_avg:68.42ms +[2025-07-07 07:21:56] [Rank 0] step:6181/10000 train_time:422906ms step_avg:68.42ms +[2025-07-07 07:21:56] [Rank 0] step:6181/10000 train_time:422906ms step_avg:68.42ms +[2025-07-07 07:21:57] [Rank 0] step:6201/10000 train_time:424278ms step_avg:68.42ms +[2025-07-07 07:21:57] [Rank 0] step:6201/10000 train_time:424278ms step_avg:68.42ms +[2025-07-07 07:21:59] [Rank 0] step:6221/10000 train_time:425647ms step_avg:68.42ms +[2025-07-07 07:21:59] [Rank 0] step:6221/10000 train_time:425647ms step_avg:68.42ms +[2025-07-07 07:22:00] [Rank 0] step:6241/10000 train_time:427019ms step_avg:68.42ms +[2025-07-07 07:22:00] [Rank 0] step:6241/10000 train_time:427019ms step_avg:68.42ms +[2025-07-07 07:22:02] [Rank 0] step:6261/10000 train_time:428392ms step_avg:68.42ms +[2025-07-07 07:22:02] [Rank 0] step:6261/10000 train_time:428392ms step_avg:68.42ms +[2025-07-07 07:22:03] [Rank 0] step:6281/10000 train_time:429768ms step_avg:68.42ms +[2025-07-07 07:22:03] [Rank 0] step:6281/10000 train_time:429768ms step_avg:68.42ms +[2025-07-07 07:22:04] [Rank 0] step:6301/10000 train_time:431140ms step_avg:68.42ms +[2025-07-07 07:22:04] [Rank 0] step:6301/10000 train_time:431140ms step_avg:68.42ms +[2025-07-07 07:22:06] [Rank 0] step:6321/10000 train_time:432615ms step_avg:68.44ms +[2025-07-07 07:22:06] [Rank 0] step:6321/10000 train_time:432615ms step_avg:68.44ms +[2025-07-07 07:22:07] [Rank 0] step:6341/10000 train_time:433989ms step_avg:68.44ms +[2025-07-07 07:22:07] [Rank 0] step:6341/10000 train_time:433989ms step_avg:68.44ms +[2025-07-07 07:22:09] [Rank 0] step:6361/10000 train_time:435362ms step_avg:68.44ms +[2025-07-07 07:22:09] [Rank 0] step:6361/10000 train_time:435362ms step_avg:68.44ms +[2025-07-07 07:22:10] [Rank 0] step:6381/10000 train_time:436837ms step_avg:68.46ms +[2025-07-07 07:22:10] [Rank 0] step:6381/10000 train_time:436837ms step_avg:68.46ms +[2025-07-07 07:22:11] [Rank 0] step:6401/10000 train_time:438211ms step_avg:68.46ms +[2025-07-07 07:22:11] [Rank 0] step:6401/10000 train_time:438211ms step_avg:68.46ms +[2025-07-07 07:22:13] [Rank 0] step:6421/10000 train_time:439688ms step_avg:68.48ms +[2025-07-07 07:22:13] [Rank 0] step:6421/10000 train_time:439688ms step_avg:68.48ms +[2025-07-07 07:22:14] [Rank 0] step:6441/10000 train_time:441062ms step_avg:68.48ms +[2025-07-07 07:22:14] [Rank 0] step:6441/10000 train_time:441062ms step_avg:68.48ms +[2025-07-07 07:22:16] [Rank 0] step:6461/10000 train_time:442450ms step_avg:68.48ms +[2025-07-07 07:22:16] [Rank 0] step:6461/10000 train_time:442450ms step_avg:68.48ms +[2025-07-07 07:22:17] [Rank 0] step:6481/10000 train_time:444178ms step_avg:68.54ms +[2025-07-07 07:22:17] [Rank 0] step:6481/10000 train_time:444178ms step_avg:68.54ms +[2025-07-07 07:22:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:22:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:22:20] [Rank 0] PRINT: step:6500/10000 train_loss:0.9930 val_loss:1.0959 train_time:446075ms step_avg:68.63ms +[2025-07-07 07:22:20] [Rank 0] PRINT: step:6500/10000 train_loss:0.9930 val_loss:1.0959 train_time:446075ms step_avg:68.63ms +[2025-07-07 07:22:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:22:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:22:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:22:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:22:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:22:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:27:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:27:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:27:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:27:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:27:38] [Rank 0] Total Loss: 5.6122 +[2025-07-07 07:27:38] [Rank 0] Total Loss: 5.6122 +[2025-07-07 07:27:38] [Rank 0] Total FTA: 0.4328 +[2025-07-07 07:27:38] [Rank 0] Total FTA: 0.4328 +[2025-07-07 07:27:38] [Rank 0] Group 0 Loss: 5.7317 +[2025-07-07 07:27:38] [Rank 0] Group 0 Loss: 5.7317 +[2025-07-07 07:27:38] [Rank 0] Group 1 Loss: 5.4706 +[2025-07-07 07:27:38] [Rank 0] Group 1 Loss: 5.4706 +[2025-07-07 07:27:38] [Rank 0] Group 2 Loss: 5.2585 +[2025-07-07 07:27:38] [Rank 0] Group 2 Loss: 5.2585 +[2025-07-07 07:27:38] [Rank 0] Group 3 Loss: 5.6293 +[2025-07-07 07:27:38] [Rank 0] Group 3 Loss: 5.6293 +[2025-07-07 07:27:38] [Rank 0] Group 4 Loss: 5.6939 +[2025-07-07 07:27:38] [Rank 0] Group 4 Loss: 5.6939 +[2025-07-07 07:27:38] [Rank 0] Group 5 Loss: 5.6417 +[2025-07-07 07:27:38] [Rank 0] Group 5 Loss: 5.6417 +[2025-07-07 07:27:38] [Rank 0] Group 6 Loss: 5.6139 +[2025-07-07 07:27:38] [Rank 0] Group 6 Loss: 5.6139 +[2025-07-07 07:27:38] [Rank 0] Group 7 Loss: 5.6020 +[2025-07-07 07:27:38] [Rank 0] Group 7 Loss: 5.6020 +[2025-07-07 07:27:38] [Rank 0] Group 8 Loss: 5.6463 +[2025-07-07 07:27:38] [Rank 0] Group 8 Loss: 5.6463 +[2025-07-07 07:27:38] [Rank 0] Group 9 Loss: 5.6094 +[2025-07-07 07:27:38] [Rank 0] Group 9 Loss: 5.6094 +[2025-07-07 07:27:38] [Rank 0] Group 10 Loss: 5.6351 +[2025-07-07 07:27:38] [Rank 0] Group 10 Loss: 5.6351 +[2025-07-07 07:27:38] [Rank 0] Group 11 Loss: 5.6400 +[2025-07-07 07:27:38] [Rank 0] Group 11 Loss: 5.6400 +[2025-07-07 07:27:38] [Rank 0] Group 0 FTA: 0.2965 +[2025-07-07 07:27:38] [Rank 0] Group 0 FTA: 0.2965 +[2025-07-07 07:27:38] [Rank 0] Group 1 FTA: 0.3333 +[2025-07-07 07:27:38] [Rank 0] Group 1 FTA: 0.3333 +[2025-07-07 07:27:38] [Rank 0] Group 2 FTA: 0.4922 +[2025-07-07 07:27:38] [Rank 0] Group 2 FTA: 0.4922 +[2025-07-07 07:27:38] [Rank 0] Group 3 FTA: 0.5469 +[2025-07-07 07:27:38] [Rank 0] Group 3 FTA: 0.5469 +[2025-07-07 07:27:38] [Rank 0] Group 4 FTA: 0.4792 +[2025-07-07 07:27:38] [Rank 0] Group 4 FTA: 0.4792 +[2025-07-07 07:27:38] [Rank 0] Group 5 FTA: 0.4193 +[2025-07-07 07:27:38] [Rank 0] Group 5 FTA: 0.4193 +[2025-07-07 07:27:38] [Rank 0] Group 6 FTA: 0.4089 +[2025-07-07 07:27:38] [Rank 0] Group 6 FTA: 0.4089 +[2025-07-07 07:27:38] [Rank 0] Group 7 FTA: 0.4453 +[2025-07-07 07:27:38] [Rank 0] Group 7 FTA: 0.4453 +[2025-07-07 07:27:38] [Rank 0] Group 8 FTA: 0.4479 +[2025-07-07 07:27:38] [Rank 0] Group 8 FTA: 0.4479 +[2025-07-07 07:27:38] [Rank 0] Group 9 FTA: 0.4688 +[2025-07-07 07:27:38] [Rank 0] Group 9 FTA: 0.4688 +[2025-07-07 07:27:38] [Rank 0] Group 10 FTA: 0.4277 +[2025-07-07 07:27:38] [Rank 0] Group 10 FTA: 0.4277 +[2025-07-07 07:27:38] [Rank 0] Group 11 FTA: 0.4873 +[2025-07-07 07:27:38] [Rank 0] Group 11 FTA: 0.4873 +[2025-07-07 07:27:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 07:27:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 07:27:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 07:27:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 07:27:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 07:27:40] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 07:27:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 07:27:40] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 07:27:40] [Rank 0] step:6501/10000 train_time:446085ms step_avg:68.62ms +[2025-07-07 07:27:40] [Rank 0] step:6501/10000 train_time:446085ms step_avg:68.62ms +[2025-07-07 07:27:41] [Rank 0] step:6521/10000 train_time:446843ms step_avg:68.52ms +[2025-07-07 07:27:41] [Rank 0] step:6521/10000 train_time:446843ms step_avg:68.52ms +[2025-07-07 07:27:43] [Rank 0] step:6541/10000 train_time:448210ms step_avg:68.52ms +[2025-07-07 07:27:43] [Rank 0] step:6541/10000 train_time:448210ms step_avg:68.52ms +[2025-07-07 07:27:44] [Rank 0] step:6561/10000 train_time:449578ms step_avg:68.52ms +[2025-07-07 07:27:44] [Rank 0] step:6561/10000 train_time:449578ms step_avg:68.52ms +[2025-07-07 07:27:45] [Rank 0] step:6581/10000 train_time:450947ms step_avg:68.52ms +[2025-07-07 07:27:45] [Rank 0] step:6581/10000 train_time:450947ms step_avg:68.52ms +[2025-07-07 07:27:47] [Rank 0] step:6601/10000 train_time:452316ms step_avg:68.52ms +[2025-07-07 07:27:47] [Rank 0] step:6601/10000 train_time:452316ms step_avg:68.52ms +[2025-07-07 07:27:48] [Rank 0] step:6621/10000 train_time:453685ms step_avg:68.52ms +[2025-07-07 07:27:48] [Rank 0] step:6621/10000 train_time:453685ms step_avg:68.52ms +[2025-07-07 07:27:49] [Rank 0] step:6641/10000 train_time:455053ms step_avg:68.52ms +[2025-07-07 07:27:49] [Rank 0] step:6641/10000 train_time:455053ms step_avg:68.52ms +[2025-07-07 07:27:51] [Rank 0] step:6661/10000 train_time:456473ms step_avg:68.53ms +[2025-07-07 07:27:51] [Rank 0] step:6661/10000 train_time:456473ms step_avg:68.53ms +[2025-07-07 07:27:52] [Rank 0] step:6681/10000 train_time:457835ms step_avg:68.53ms +[2025-07-07 07:27:52] [Rank 0] step:6681/10000 train_time:457835ms step_avg:68.53ms +[2025-07-07 07:27:54] [Rank 0] step:6701/10000 train_time:459230ms step_avg:68.53ms +[2025-07-07 07:27:54] [Rank 0] step:6701/10000 train_time:459230ms step_avg:68.53ms +[2025-07-07 07:27:55] [Rank 0] step:6721/10000 train_time:460600ms step_avg:68.53ms +[2025-07-07 07:27:55] [Rank 0] step:6721/10000 train_time:460600ms step_avg:68.53ms +[2025-07-07 07:27:56] [Rank 0] step:6741/10000 train_time:461973ms step_avg:68.53ms +[2025-07-07 07:27:56] [Rank 0] step:6741/10000 train_time:461973ms step_avg:68.53ms +[2025-07-07 07:27:58] [Rank 0] step:6761/10000 train_time:463347ms step_avg:68.53ms +[2025-07-07 07:27:58] [Rank 0] step:6761/10000 train_time:463347ms step_avg:68.53ms +[2025-07-07 07:27:59] [Rank 0] step:6781/10000 train_time:464720ms step_avg:68.53ms +[2025-07-07 07:27:59] [Rank 0] step:6781/10000 train_time:464720ms step_avg:68.53ms +[2025-07-07 07:28:00] [Rank 0] step:6801/10000 train_time:466094ms step_avg:68.53ms +[2025-07-07 07:28:00] [Rank 0] step:6801/10000 train_time:466094ms step_avg:68.53ms +[2025-07-07 07:28:02] [Rank 0] step:6821/10000 train_time:467468ms step_avg:68.53ms +[2025-07-07 07:28:02] [Rank 0] step:6821/10000 train_time:467468ms step_avg:68.53ms +[2025-07-07 07:28:03] [Rank 0] step:6841/10000 train_time:469091ms step_avg:68.57ms +[2025-07-07 07:28:03] [Rank 0] step:6841/10000 train_time:469091ms step_avg:68.57ms +[2025-07-07 07:28:05] [Rank 0] step:6861/10000 train_time:470261ms step_avg:68.54ms +[2025-07-07 07:28:05] [Rank 0] step:6861/10000 train_time:470261ms step_avg:68.54ms +[2025-07-07 07:28:06] [Rank 0] step:6881/10000 train_time:471635ms step_avg:68.54ms +[2025-07-07 07:28:06] [Rank 0] step:6881/10000 train_time:471635ms step_avg:68.54ms +[2025-07-07 07:28:07] [Rank 0] step:6901/10000 train_time:473009ms step_avg:68.54ms +[2025-07-07 07:28:07] [Rank 0] step:6901/10000 train_time:473009ms step_avg:68.54ms +[2025-07-07 07:28:09] [Rank 0] step:6921/10000 train_time:474384ms step_avg:68.54ms +[2025-07-07 07:28:09] [Rank 0] step:6921/10000 train_time:474384ms step_avg:68.54ms +[2025-07-07 07:28:10] [Rank 0] step:6941/10000 train_time:475757ms step_avg:68.54ms +[2025-07-07 07:28:10] [Rank 0] step:6941/10000 train_time:475757ms step_avg:68.54ms +[2025-07-07 07:28:12] [Rank 0] step:6961/10000 train_time:477135ms step_avg:68.54ms +[2025-07-07 07:28:12] [Rank 0] step:6961/10000 train_time:477135ms step_avg:68.54ms +[2025-07-07 07:28:13] [Rank 0] step:6981/10000 train_time:478510ms step_avg:68.54ms +[2025-07-07 07:28:13] [Rank 0] step:6981/10000 train_time:478510ms step_avg:68.54ms +[2025-07-07 07:28:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:28:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:28:15] [Rank 0] PRINT: step:7000/10000 train_loss:0.9568 val_loss:1.0682 train_time:480509ms step_avg:68.64ms +[2025-07-07 07:28:15] [Rank 0] PRINT: step:7000/10000 train_loss:0.9568 val_loss:1.0682 train_time:480509ms step_avg:68.64ms +[2025-07-07 07:28:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:28:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:28:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:28:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:28:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:28:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:33:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:33:38] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:33:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:33:38] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:33:38] [Rank 0] Total Loss: 5.6280 +[2025-07-07 07:33:38] [Rank 0] Total Loss: 5.6280 +[2025-07-07 07:33:38] [Rank 0] Total FTA: 0.5194 +[2025-07-07 07:33:38] [Rank 0] Total FTA: 0.5194 +[2025-07-07 07:33:38] [Rank 0] Group 0 Loss: 5.9876 +[2025-07-07 07:33:38] [Rank 0] Group 0 Loss: 5.9876 +[2025-07-07 07:33:38] [Rank 0] Group 1 Loss: 5.7381 +[2025-07-07 07:33:38] [Rank 0] Group 1 Loss: 5.7381 +[2025-07-07 07:33:38] [Rank 0] Group 2 Loss: 5.2424 +[2025-07-07 07:33:38] [Rank 0] Group 2 Loss: 5.2424 +[2025-07-07 07:33:38] [Rank 0] Group 3 Loss: 5.5756 +[2025-07-07 07:33:38] [Rank 0] Group 3 Loss: 5.5756 +[2025-07-07 07:33:38] [Rank 0] Group 4 Loss: 5.6359 +[2025-07-07 07:33:38] [Rank 0] Group 4 Loss: 5.6359 +[2025-07-07 07:33:38] [Rank 0] Group 5 Loss: 5.5255 +[2025-07-07 07:33:38] [Rank 0] Group 5 Loss: 5.5255 +[2025-07-07 07:33:38] [Rank 0] Group 6 Loss: 5.5052 +[2025-07-07 07:33:38] [Rank 0] Group 6 Loss: 5.5052 +[2025-07-07 07:33:38] [Rank 0] Group 7 Loss: 5.6453 +[2025-07-07 07:33:38] [Rank 0] Group 7 Loss: 5.6453 +[2025-07-07 07:33:38] [Rank 0] Group 8 Loss: 5.5712 +[2025-07-07 07:33:38] [Rank 0] Group 8 Loss: 5.5712 +[2025-07-07 07:33:38] [Rank 0] Group 9 Loss: 5.5764 +[2025-07-07 07:33:38] [Rank 0] Group 9 Loss: 5.5764 +[2025-07-07 07:33:38] [Rank 0] Group 10 Loss: 5.5802 +[2025-07-07 07:33:38] [Rank 0] Group 10 Loss: 5.5802 +[2025-07-07 07:33:38] [Rank 0] Group 11 Loss: 5.6142 +[2025-07-07 07:33:38] [Rank 0] Group 11 Loss: 5.6142 +[2025-07-07 07:33:38] [Rank 0] Group 0 FTA: 0.4967 +[2025-07-07 07:33:38] [Rank 0] Group 0 FTA: 0.4967 +[2025-07-07 07:33:38] [Rank 0] Group 1 FTA: 0.5182 +[2025-07-07 07:33:38] [Rank 0] Group 1 FTA: 0.5182 +[2025-07-07 07:33:38] [Rank 0] Group 2 FTA: 0.5599 +[2025-07-07 07:33:38] [Rank 0] Group 2 FTA: 0.5599 +[2025-07-07 07:33:38] [Rank 0] Group 3 FTA: 0.6224 +[2025-07-07 07:33:38] [Rank 0] Group 3 FTA: 0.6224 +[2025-07-07 07:33:38] [Rank 0] Group 4 FTA: 0.5052 +[2025-07-07 07:33:38] [Rank 0] Group 4 FTA: 0.5052 +[2025-07-07 07:33:38] [Rank 0] Group 5 FTA: 0.5130 +[2025-07-07 07:33:38] [Rank 0] Group 5 FTA: 0.5130 +[2025-07-07 07:33:38] [Rank 0] Group 6 FTA: 0.4818 +[2025-07-07 07:33:38] [Rank 0] Group 6 FTA: 0.4818 +[2025-07-07 07:33:38] [Rank 0] Group 7 FTA: 0.5026 +[2025-07-07 07:33:38] [Rank 0] Group 7 FTA: 0.5026 +[2025-07-07 07:33:38] [Rank 0] Group 8 FTA: 0.4948 +[2025-07-07 07:33:38] [Rank 0] Group 8 FTA: 0.4948 +[2025-07-07 07:33:38] [Rank 0] Group 9 FTA: 0.5273 +[2025-07-07 07:33:38] [Rank 0] Group 9 FTA: 0.5273 +[2025-07-07 07:33:38] [Rank 0] Group 10 FTA: 0.5469 +[2025-07-07 07:33:38] [Rank 0] Group 10 FTA: 0.5469 +[2025-07-07 07:33:38] [Rank 0] Group 11 FTA: 0.5049 +[2025-07-07 07:33:38] [Rank 0] Group 11 FTA: 0.5049 +[2025-07-07 07:33:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 07:33:39] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 07:33:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 07:33:39] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 07:33:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 07:33:39] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 07:33:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 07:33:39] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 07:33:40] [Rank 0] step:7001/10000 train_time:480519ms step_avg:68.64ms +[2025-07-07 07:33:40] [Rank 0] step:7001/10000 train_time:480519ms step_avg:68.64ms +[2025-07-07 07:33:41] [Rank 0] step:7021/10000 train_time:481540ms step_avg:68.59ms +[2025-07-07 07:33:41] [Rank 0] step:7021/10000 train_time:481540ms step_avg:68.59ms +[2025-07-07 07:33:42] [Rank 0] step:7041/10000 train_time:482682ms step_avg:68.55ms +[2025-07-07 07:33:42] [Rank 0] step:7041/10000 train_time:482682ms step_avg:68.55ms +[2025-07-07 07:33:44] [Rank 0] step:7061/10000 train_time:484052ms step_avg:68.55ms +[2025-07-07 07:33:44] [Rank 0] step:7061/10000 train_time:484052ms step_avg:68.55ms +[2025-07-07 07:33:45] [Rank 0] step:7081/10000 train_time:485421ms step_avg:68.55ms +[2025-07-07 07:33:45] [Rank 0] step:7081/10000 train_time:485421ms step_avg:68.55ms +[2025-07-07 07:33:46] [Rank 0] step:7101/10000 train_time:486789ms step_avg:68.55ms +[2025-07-07 07:33:46] [Rank 0] step:7101/10000 train_time:486789ms step_avg:68.55ms +[2025-07-07 07:33:48] [Rank 0] step:7121/10000 train_time:488158ms step_avg:68.55ms +[2025-07-07 07:33:48] [Rank 0] step:7121/10000 train_time:488158ms step_avg:68.55ms +[2025-07-07 07:33:49] [Rank 0] step:7141/10000 train_time:489529ms step_avg:68.55ms +[2025-07-07 07:33:49] [Rank 0] step:7141/10000 train_time:489529ms step_avg:68.55ms +[2025-07-07 07:33:51] [Rank 0] step:7161/10000 train_time:490940ms step_avg:68.56ms +[2025-07-07 07:33:51] [Rank 0] step:7161/10000 train_time:490940ms step_avg:68.56ms +[2025-07-07 07:33:52] [Rank 0] step:7181/10000 train_time:492310ms step_avg:68.56ms +[2025-07-07 07:33:52] [Rank 0] step:7181/10000 train_time:492310ms step_avg:68.56ms +[2025-07-07 07:33:53] [Rank 0] step:7201/10000 train_time:493964ms step_avg:68.60ms +[2025-07-07 07:33:53] [Rank 0] step:7201/10000 train_time:493964ms step_avg:68.60ms +[2025-07-07 07:33:55] [Rank 0] step:7221/10000 train_time:495134ms step_avg:68.57ms +[2025-07-07 07:33:55] [Rank 0] step:7221/10000 train_time:495134ms step_avg:68.57ms +[2025-07-07 07:33:56] [Rank 0] step:7241/10000 train_time:496506ms step_avg:68.57ms +[2025-07-07 07:33:56] [Rank 0] step:7241/10000 train_time:496506ms step_avg:68.57ms +[2025-07-07 07:33:58] [Rank 0] step:7261/10000 train_time:497879ms step_avg:68.57ms +[2025-07-07 07:33:58] [Rank 0] step:7261/10000 train_time:497879ms step_avg:68.57ms +[2025-07-07 07:33:59] [Rank 0] step:7281/10000 train_time:499252ms step_avg:68.57ms +[2025-07-07 07:33:59] [Rank 0] step:7281/10000 train_time:499252ms step_avg:68.57ms +[2025-07-07 07:34:00] [Rank 0] step:7301/10000 train_time:500626ms step_avg:68.57ms +[2025-07-07 07:34:00] [Rank 0] step:7301/10000 train_time:500626ms step_avg:68.57ms +[2025-07-07 07:34:02] [Rank 0] step:7321/10000 train_time:502000ms step_avg:68.57ms +[2025-07-07 07:34:02] [Rank 0] step:7321/10000 train_time:502000ms step_avg:68.57ms +[2025-07-07 07:34:03] [Rank 0] step:7341/10000 train_time:503374ms step_avg:68.57ms +[2025-07-07 07:34:03] [Rank 0] step:7341/10000 train_time:503374ms step_avg:68.57ms +[2025-07-07 07:34:04] [Rank 0] step:7361/10000 train_time:504751ms step_avg:68.57ms +[2025-07-07 07:34:04] [Rank 0] step:7361/10000 train_time:504751ms step_avg:68.57ms +[2025-07-07 07:34:06] [Rank 0] step:7381/10000 train_time:506124ms step_avg:68.57ms +[2025-07-07 07:34:06] [Rank 0] step:7381/10000 train_time:506124ms step_avg:68.57ms +[2025-07-07 07:34:07] [Rank 0] step:7401/10000 train_time:507549ms step_avg:68.58ms +[2025-07-07 07:34:07] [Rank 0] step:7401/10000 train_time:507549ms step_avg:68.58ms +[2025-07-07 07:34:09] [Rank 0] step:7421/10000 train_time:508922ms step_avg:68.58ms +[2025-07-07 07:34:09] [Rank 0] step:7421/10000 train_time:508922ms step_avg:68.58ms +[2025-07-07 07:34:10] [Rank 0] step:7441/10000 train_time:510297ms step_avg:68.58ms +[2025-07-07 07:34:10] [Rank 0] step:7441/10000 train_time:510297ms step_avg:68.58ms +[2025-07-07 07:34:11] [Rank 0] step:7461/10000 train_time:511671ms step_avg:68.58ms +[2025-07-07 07:34:11] [Rank 0] step:7461/10000 train_time:511671ms step_avg:68.58ms +[2025-07-07 07:34:13] [Rank 0] step:7481/10000 train_time:513045ms step_avg:68.58ms +[2025-07-07 07:34:13] [Rank 0] step:7481/10000 train_time:513045ms step_avg:68.58ms +[2025-07-07 07:34:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:34:14] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:34:15] [Rank 0] PRINT: step:7500/10000 train_loss:0.9257 val_loss:1.0432 train_time:515045ms step_avg:68.67ms +[2025-07-07 07:34:15] [Rank 0] PRINT: step:7500/10000 train_loss:0.9257 val_loss:1.0432 train_time:515045ms step_avg:68.67ms +[2025-07-07 07:34:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:34:15] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:34:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:34:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:34:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:34:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:39:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:39:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:39:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:39:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:39:35] [Rank 0] Total Loss: 5.7897 +[2025-07-07 07:39:35] [Rank 0] Total Loss: 5.7897 +[2025-07-07 07:39:35] [Rank 0] Total FTA: 0.5651 +[2025-07-07 07:39:35] [Rank 0] Total FTA: 0.5651 +[2025-07-07 07:39:35] [Rank 0] Group 0 Loss: 6.0918 +[2025-07-07 07:39:35] [Rank 0] Group 0 Loss: 6.0918 +[2025-07-07 07:39:35] [Rank 0] Group 1 Loss: 5.4701 +[2025-07-07 07:39:35] [Rank 0] Group 1 Loss: 5.4701 +[2025-07-07 07:39:35] [Rank 0] Group 2 Loss: 5.4486 +[2025-07-07 07:39:35] [Rank 0] Group 2 Loss: 5.4486 +[2025-07-07 07:39:35] [Rank 0] Group 3 Loss: 5.8580 +[2025-07-07 07:39:35] [Rank 0] Group 3 Loss: 5.8580 +[2025-07-07 07:39:35] [Rank 0] Group 4 Loss: 5.8726 +[2025-07-07 07:39:35] [Rank 0] Group 4 Loss: 5.8726 +[2025-07-07 07:39:35] [Rank 0] Group 5 Loss: 5.7158 +[2025-07-07 07:39:35] [Rank 0] Group 5 Loss: 5.7158 +[2025-07-07 07:39:35] [Rank 0] Group 6 Loss: 5.7413 +[2025-07-07 07:39:35] [Rank 0] Group 6 Loss: 5.7413 +[2025-07-07 07:39:35] [Rank 0] Group 7 Loss: 5.7628 +[2025-07-07 07:39:35] [Rank 0] Group 7 Loss: 5.7628 +[2025-07-07 07:39:35] [Rank 0] Group 8 Loss: 5.7964 +[2025-07-07 07:39:35] [Rank 0] Group 8 Loss: 5.7964 +[2025-07-07 07:39:35] [Rank 0] Group 9 Loss: 5.8068 +[2025-07-07 07:39:35] [Rank 0] Group 9 Loss: 5.8068 +[2025-07-07 07:39:35] [Rank 0] Group 10 Loss: 5.7886 +[2025-07-07 07:39:35] [Rank 0] Group 10 Loss: 5.7886 +[2025-07-07 07:39:35] [Rank 0] Group 11 Loss: 5.8037 +[2025-07-07 07:39:35] [Rank 0] Group 11 Loss: 5.8037 +[2025-07-07 07:39:35] [Rank 0] Group 0 FTA: 0.4928 +[2025-07-07 07:39:35] [Rank 0] Group 0 FTA: 0.4928 +[2025-07-07 07:39:35] [Rank 0] Group 1 FTA: 0.6615 +[2025-07-07 07:39:35] [Rank 0] Group 1 FTA: 0.6615 +[2025-07-07 07:39:35] [Rank 0] Group 2 FTA: 0.5521 +[2025-07-07 07:39:35] [Rank 0] Group 2 FTA: 0.5521 +[2025-07-07 07:39:35] [Rank 0] Group 3 FTA: 0.7318 +[2025-07-07 07:39:35] [Rank 0] Group 3 FTA: 0.7318 +[2025-07-07 07:39:35] [Rank 0] Group 4 FTA: 0.4844 +[2025-07-07 07:39:35] [Rank 0] Group 4 FTA: 0.4844 +[2025-07-07 07:39:35] [Rank 0] Group 5 FTA: 0.6615 +[2025-07-07 07:39:35] [Rank 0] Group 5 FTA: 0.6615 +[2025-07-07 07:39:35] [Rank 0] Group 6 FTA: 0.5182 +[2025-07-07 07:39:35] [Rank 0] Group 6 FTA: 0.5182 +[2025-07-07 07:39:35] [Rank 0] Group 7 FTA: 0.5469 +[2025-07-07 07:39:35] [Rank 0] Group 7 FTA: 0.5469 +[2025-07-07 07:39:35] [Rank 0] Group 8 FTA: 0.5443 +[2025-07-07 07:39:35] [Rank 0] Group 8 FTA: 0.5443 +[2025-07-07 07:39:35] [Rank 0] Group 9 FTA: 0.5664 +[2025-07-07 07:39:35] [Rank 0] Group 9 FTA: 0.5664 +[2025-07-07 07:39:35] [Rank 0] Group 10 FTA: 0.5840 +[2025-07-07 07:39:35] [Rank 0] Group 10 FTA: 0.5840 +[2025-07-07 07:39:35] [Rank 0] Group 11 FTA: 0.5420 +[2025-07-07 07:39:35] [Rank 0] Group 11 FTA: 0.5420 +[2025-07-07 07:39:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 07:39:35] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 07:39:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 07:39:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 07:39:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 07:39:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 07:39:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 07:39:36] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 07:39:36] [Rank 0] step:7501/10000 train_time:515056ms step_avg:68.67ms +[2025-07-07 07:39:36] [Rank 0] step:7501/10000 train_time:515056ms step_avg:68.67ms +[2025-07-07 07:39:38] [Rank 0] step:7521/10000 train_time:515828ms step_avg:68.59ms +[2025-07-07 07:39:38] [Rank 0] step:7521/10000 train_time:515828ms step_avg:68.59ms +[2025-07-07 07:39:39] [Rank 0] step:7541/10000 train_time:517193ms step_avg:68.58ms +[2025-07-07 07:39:39] [Rank 0] step:7541/10000 train_time:517193ms step_avg:68.58ms +[2025-07-07 07:39:40] [Rank 0] step:7561/10000 train_time:518561ms step_avg:68.58ms +[2025-07-07 07:39:40] [Rank 0] step:7561/10000 train_time:518561ms step_avg:68.58ms +[2025-07-07 07:39:42] [Rank 0] step:7581/10000 train_time:519982ms step_avg:68.59ms +[2025-07-07 07:39:42] [Rank 0] step:7581/10000 train_time:519982ms step_avg:68.59ms +[2025-07-07 07:39:43] [Rank 0] step:7601/10000 train_time:521351ms step_avg:68.59ms +[2025-07-07 07:39:43] [Rank 0] step:7601/10000 train_time:521351ms step_avg:68.59ms +[2025-07-07 07:39:45] [Rank 0] step:7621/10000 train_time:522720ms step_avg:68.59ms +[2025-07-07 07:39:45] [Rank 0] step:7621/10000 train_time:522720ms step_avg:68.59ms +[2025-07-07 07:39:46] [Rank 0] step:7641/10000 train_time:524090ms step_avg:68.59ms +[2025-07-07 07:39:46] [Rank 0] step:7641/10000 train_time:524090ms step_avg:68.59ms +[2025-07-07 07:39:47] [Rank 0] step:7661/10000 train_time:525461ms step_avg:68.59ms +[2025-07-07 07:39:47] [Rank 0] step:7661/10000 train_time:525461ms step_avg:68.59ms +[2025-07-07 07:39:49] [Rank 0] step:7681/10000 train_time:526832ms step_avg:68.59ms +[2025-07-07 07:39:49] [Rank 0] step:7681/10000 train_time:526832ms step_avg:68.59ms +[2025-07-07 07:39:50] [Rank 0] step:7701/10000 train_time:528203ms step_avg:68.59ms +[2025-07-07 07:39:50] [Rank 0] step:7701/10000 train_time:528203ms step_avg:68.59ms +[2025-07-07 07:39:51] [Rank 0] step:7721/10000 train_time:529575ms step_avg:68.59ms +[2025-07-07 07:39:51] [Rank 0] step:7721/10000 train_time:529575ms step_avg:68.59ms +[2025-07-07 07:39:53] [Rank 0] step:7741/10000 train_time:531200ms step_avg:68.62ms +[2025-07-07 07:39:53] [Rank 0] step:7741/10000 train_time:531200ms step_avg:68.62ms +[2025-07-07 07:39:54] [Rank 0] step:7761/10000 train_time:532369ms step_avg:68.60ms +[2025-07-07 07:39:54] [Rank 0] step:7761/10000 train_time:532369ms step_avg:68.60ms +[2025-07-07 07:39:56] [Rank 0] step:7781/10000 train_time:533742ms step_avg:68.60ms +[2025-07-07 07:39:56] [Rank 0] step:7781/10000 train_time:533742ms step_avg:68.60ms +[2025-07-07 07:39:57] [Rank 0] step:7801/10000 train_time:535115ms step_avg:68.60ms +[2025-07-07 07:39:57] [Rank 0] step:7801/10000 train_time:535115ms step_avg:68.60ms +[2025-07-07 07:39:58] [Rank 0] step:7821/10000 train_time:536490ms step_avg:68.60ms +[2025-07-07 07:39:58] [Rank 0] step:7821/10000 train_time:536490ms step_avg:68.60ms +[2025-07-07 07:40:00] [Rank 0] step:7841/10000 train_time:537863ms step_avg:68.60ms +[2025-07-07 07:40:00] [Rank 0] step:7841/10000 train_time:537863ms step_avg:68.60ms +[2025-07-07 07:40:01] [Rank 0] step:7861/10000 train_time:539237ms step_avg:68.60ms +[2025-07-07 07:40:01] [Rank 0] step:7861/10000 train_time:539237ms step_avg:68.60ms +[2025-07-07 07:40:02] [Rank 0] step:7881/10000 train_time:540611ms step_avg:68.60ms +[2025-07-07 07:40:02] [Rank 0] step:7881/10000 train_time:540611ms step_avg:68.60ms +[2025-07-07 07:40:04] [Rank 0] step:7901/10000 train_time:541983ms step_avg:68.60ms +[2025-07-07 07:40:04] [Rank 0] step:7901/10000 train_time:541983ms step_avg:68.60ms +[2025-07-07 07:40:05] [Rank 0] step:7921/10000 train_time:543356ms step_avg:68.60ms +[2025-07-07 07:40:05] [Rank 0] step:7921/10000 train_time:543356ms step_avg:68.60ms +[2025-07-07 07:40:07] [Rank 0] step:7941/10000 train_time:544753ms step_avg:68.60ms +[2025-07-07 07:40:07] [Rank 0] step:7941/10000 train_time:544753ms step_avg:68.60ms +[2025-07-07 07:40:08] [Rank 0] step:7961/10000 train_time:546125ms step_avg:68.60ms +[2025-07-07 07:40:08] [Rank 0] step:7961/10000 train_time:546125ms step_avg:68.60ms +[2025-07-07 07:40:09] [Rank 0] step:7981/10000 train_time:547499ms step_avg:68.60ms +[2025-07-07 07:40:09] [Rank 0] step:7981/10000 train_time:547499ms step_avg:68.60ms +[2025-07-07 07:40:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:40:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:40:12] [Rank 0] PRINT: step:8000/10000 train_loss:0.9051 val_loss:1.0180 train_time:549540ms step_avg:68.69ms +[2025-07-07 07:40:12] [Rank 0] PRINT: step:8000/10000 train_loss:0.9051 val_loss:1.0180 train_time:549540ms step_avg:68.69ms +[2025-07-07 07:40:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:40:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:40:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:40:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:40:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:40:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:45:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:45:35] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:45:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:45:35] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:45:35] [Rank 0] Total Loss: 5.7624 +[2025-07-07 07:45:35] [Rank 0] Total Loss: 5.7624 +[2025-07-07 07:45:35] [Rank 0] Total FTA: 0.6304 +[2025-07-07 07:45:35] [Rank 0] Total FTA: 0.6304 +[2025-07-07 07:45:35] [Rank 0] Group 0 Loss: 5.9540 +[2025-07-07 07:45:35] [Rank 0] Group 0 Loss: 5.9540 +[2025-07-07 07:45:35] [Rank 0] Group 1 Loss: 5.6734 +[2025-07-07 07:45:35] [Rank 0] Group 1 Loss: 5.6734 +[2025-07-07 07:45:35] [Rank 0] Group 2 Loss: 5.3697 +[2025-07-07 07:45:35] [Rank 0] Group 2 Loss: 5.3697 +[2025-07-07 07:45:35] [Rank 0] Group 3 Loss: 5.8415 +[2025-07-07 07:45:35] [Rank 0] Group 3 Loss: 5.8415 +[2025-07-07 07:45:35] [Rank 0] Group 4 Loss: 5.7697 +[2025-07-07 07:45:35] [Rank 0] Group 4 Loss: 5.7697 +[2025-07-07 07:45:35] [Rank 0] Group 5 Loss: 5.7274 +[2025-07-07 07:45:35] [Rank 0] Group 5 Loss: 5.7274 +[2025-07-07 07:45:35] [Rank 0] Group 6 Loss: 5.6964 +[2025-07-07 07:45:35] [Rank 0] Group 6 Loss: 5.6964 +[2025-07-07 07:45:35] [Rank 0] Group 7 Loss: 5.7636 +[2025-07-07 07:45:35] [Rank 0] Group 7 Loss: 5.7636 +[2025-07-07 07:45:35] [Rank 0] Group 8 Loss: 5.8209 +[2025-07-07 07:45:35] [Rank 0] Group 8 Loss: 5.8209 +[2025-07-07 07:45:35] [Rank 0] Group 9 Loss: 5.7597 +[2025-07-07 07:45:35] [Rank 0] Group 9 Loss: 5.7597 +[2025-07-07 07:45:35] [Rank 0] Group 10 Loss: 5.7639 +[2025-07-07 07:45:35] [Rank 0] Group 10 Loss: 5.7639 +[2025-07-07 07:45:35] [Rank 0] Group 11 Loss: 5.7820 +[2025-07-07 07:45:35] [Rank 0] Group 11 Loss: 5.7820 +[2025-07-07 07:45:35] [Rank 0] Group 0 FTA: 0.6502 +[2025-07-07 07:45:35] [Rank 0] Group 0 FTA: 0.6502 +[2025-07-07 07:45:35] [Rank 0] Group 1 FTA: 0.6615 +[2025-07-07 07:45:35] [Rank 0] Group 1 FTA: 0.6615 +[2025-07-07 07:45:35] [Rank 0] Group 2 FTA: 0.8646 +[2025-07-07 07:45:35] [Rank 0] Group 2 FTA: 0.8646 +[2025-07-07 07:45:35] [Rank 0] Group 3 FTA: 0.6120 +[2025-07-07 07:45:35] [Rank 0] Group 3 FTA: 0.6120 +[2025-07-07 07:45:35] [Rank 0] Group 4 FTA: 0.5807 +[2025-07-07 07:45:35] [Rank 0] Group 4 FTA: 0.5807 +[2025-07-07 07:45:35] [Rank 0] Group 5 FTA: 0.6198 +[2025-07-07 07:45:35] [Rank 0] Group 5 FTA: 0.6198 +[2025-07-07 07:45:35] [Rank 0] Group 6 FTA: 0.6016 +[2025-07-07 07:45:35] [Rank 0] Group 6 FTA: 0.6016 +[2025-07-07 07:45:35] [Rank 0] Group 7 FTA: 0.5807 +[2025-07-07 07:45:35] [Rank 0] Group 7 FTA: 0.5807 +[2025-07-07 07:45:35] [Rank 0] Group 8 FTA: 0.5911 +[2025-07-07 07:45:35] [Rank 0] Group 8 FTA: 0.5911 +[2025-07-07 07:45:35] [Rank 0] Group 9 FTA: 0.6211 +[2025-07-07 07:45:35] [Rank 0] Group 9 FTA: 0.6211 +[2025-07-07 07:45:35] [Rank 0] Group 10 FTA: 0.6211 +[2025-07-07 07:45:35] [Rank 0] Group 10 FTA: 0.6211 +[2025-07-07 07:45:35] [Rank 0] Group 11 FTA: 0.5967 +[2025-07-07 07:45:35] [Rank 0] Group 11 FTA: 0.5967 +[2025-07-07 07:45:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 07:45:36] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 07:45:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 07:45:36] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 07:45:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 07:45:36] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 07:45:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 07:45:37] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 07:45:37] [Rank 0] step:8001/10000 train_time:549550ms step_avg:68.69ms +[2025-07-07 07:45:37] [Rank 0] step:8001/10000 train_time:549550ms step_avg:68.69ms +[2025-07-07 07:45:38] [Rank 0] step:8021/10000 train_time:550312ms step_avg:68.61ms +[2025-07-07 07:45:38] [Rank 0] step:8021/10000 train_time:550312ms step_avg:68.61ms +[2025-07-07 07:45:39] [Rank 0] step:8041/10000 train_time:551678ms step_avg:68.61ms +[2025-07-07 07:45:39] [Rank 0] step:8041/10000 train_time:551678ms step_avg:68.61ms +[2025-07-07 07:45:41] [Rank 0] step:8061/10000 train_time:553048ms step_avg:68.61ms +[2025-07-07 07:45:41] [Rank 0] step:8061/10000 train_time:553048ms step_avg:68.61ms +[2025-07-07 07:45:42] [Rank 0] step:8081/10000 train_time:554416ms step_avg:68.61ms +[2025-07-07 07:45:42] [Rank 0] step:8081/10000 train_time:554416ms step_avg:68.61ms +[2025-07-07 07:45:44] [Rank 0] step:8101/10000 train_time:555832ms step_avg:68.61ms +[2025-07-07 07:45:44] [Rank 0] step:8101/10000 train_time:555832ms step_avg:68.61ms +[2025-07-07 07:45:45] [Rank 0] step:8121/10000 train_time:557184ms step_avg:68.61ms +[2025-07-07 07:45:45] [Rank 0] step:8121/10000 train_time:557184ms step_avg:68.61ms +[2025-07-07 07:45:46] [Rank 0] step:8141/10000 train_time:558554ms step_avg:68.61ms +[2025-07-07 07:45:46] [Rank 0] step:8141/10000 train_time:558554ms step_avg:68.61ms +[2025-07-07 07:45:48] [Rank 0] step:8161/10000 train_time:559925ms step_avg:68.61ms +[2025-07-07 07:45:48] [Rank 0] step:8161/10000 train_time:559925ms step_avg:68.61ms +[2025-07-07 07:45:49] [Rank 0] step:8181/10000 train_time:561295ms step_avg:68.61ms +[2025-07-07 07:45:49] [Rank 0] step:8181/10000 train_time:561295ms step_avg:68.61ms +[2025-07-07 07:45:50] [Rank 0] step:8201/10000 train_time:562666ms step_avg:68.61ms +[2025-07-07 07:45:50] [Rank 0] step:8201/10000 train_time:562666ms step_avg:68.61ms +[2025-07-07 07:45:52] [Rank 0] step:8221/10000 train_time:564038ms step_avg:68.61ms +[2025-07-07 07:45:52] [Rank 0] step:8221/10000 train_time:564038ms step_avg:68.61ms +[2025-07-07 07:45:53] [Rank 0] step:8241/10000 train_time:565410ms step_avg:68.61ms +[2025-07-07 07:45:53] [Rank 0] step:8241/10000 train_time:565410ms step_avg:68.61ms +[2025-07-07 07:45:55] [Rank 0] step:8261/10000 train_time:566783ms step_avg:68.61ms +[2025-07-07 07:45:55] [Rank 0] step:8261/10000 train_time:566783ms step_avg:68.61ms +[2025-07-07 07:45:56] [Rank 0] step:8281/10000 train_time:568203ms step_avg:68.62ms +[2025-07-07 07:45:56] [Rank 0] step:8281/10000 train_time:568203ms step_avg:68.62ms +[2025-07-07 07:45:57] [Rank 0] step:8301/10000 train_time:569571ms step_avg:68.61ms +[2025-07-07 07:45:57] [Rank 0] step:8301/10000 train_time:569571ms step_avg:68.61ms +[2025-07-07 07:45:59] [Rank 0] step:8321/10000 train_time:570945ms step_avg:68.62ms +[2025-07-07 07:45:59] [Rank 0] step:8321/10000 train_time:570945ms step_avg:68.62ms +[2025-07-07 07:46:00] [Rank 0] step:8341/10000 train_time:572321ms step_avg:68.62ms +[2025-07-07 07:46:00] [Rank 0] step:8341/10000 train_time:572321ms step_avg:68.62ms +[2025-07-07 07:46:02] [Rank 0] step:8361/10000 train_time:573696ms step_avg:68.62ms +[2025-07-07 07:46:02] [Rank 0] step:8361/10000 train_time:573696ms step_avg:68.62ms +[2025-07-07 07:46:03] [Rank 0] step:8381/10000 train_time:575071ms step_avg:68.62ms +[2025-07-07 07:46:03] [Rank 0] step:8381/10000 train_time:575071ms step_avg:68.62ms +[2025-07-07 07:46:04] [Rank 0] step:8401/10000 train_time:576446ms step_avg:68.62ms +[2025-07-07 07:46:04] [Rank 0] step:8401/10000 train_time:576446ms step_avg:68.62ms +[2025-07-07 07:46:06] [Rank 0] step:8421/10000 train_time:577822ms step_avg:68.62ms +[2025-07-07 07:46:06] [Rank 0] step:8421/10000 train_time:577822ms step_avg:68.62ms +[2025-07-07 07:46:07] [Rank 0] step:8441/10000 train_time:579198ms step_avg:68.62ms +[2025-07-07 07:46:07] [Rank 0] step:8441/10000 train_time:579198ms step_avg:68.62ms +[2025-07-07 07:46:08] [Rank 0] step:8461/10000 train_time:580826ms step_avg:68.65ms +[2025-07-07 07:46:08] [Rank 0] step:8461/10000 train_time:580826ms step_avg:68.65ms +[2025-07-07 07:46:10] [Rank 0] step:8481/10000 train_time:581986ms step_avg:68.62ms +[2025-07-07 07:46:10] [Rank 0] step:8481/10000 train_time:581986ms step_avg:68.62ms +[2025-07-07 07:46:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:46:11] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:46:12] [Rank 0] PRINT: step:8500/10000 train_loss:0.8842 val_loss:0.9899 train_time:583986ms step_avg:68.70ms +[2025-07-07 07:46:12] [Rank 0] PRINT: step:8500/10000 train_loss:0.8842 val_loss:0.9899 train_time:583986ms step_avg:68.70ms +[2025-07-07 07:46:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:46:12] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:46:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:46:12] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:46:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:46:12] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:51:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:51:31] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:51:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:51:31] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:51:31] [Rank 0] Total Loss: 5.8006 +[2025-07-07 07:51:31] [Rank 0] Total Loss: 5.8006 +[2025-07-07 07:51:31] [Rank 0] Total FTA: 0.6963 +[2025-07-07 07:51:31] [Rank 0] Total FTA: 0.6963 +[2025-07-07 07:51:31] [Rank 0] Group 0 Loss: 5.9728 +[2025-07-07 07:51:31] [Rank 0] Group 0 Loss: 5.9728 +[2025-07-07 07:51:31] [Rank 0] Group 1 Loss: 5.6697 +[2025-07-07 07:51:31] [Rank 0] Group 1 Loss: 5.6697 +[2025-07-07 07:51:31] [Rank 0] Group 2 Loss: 5.3996 +[2025-07-07 07:51:31] [Rank 0] Group 2 Loss: 5.3996 +[2025-07-07 07:51:31] [Rank 0] Group 3 Loss: 5.8594 +[2025-07-07 07:51:31] [Rank 0] Group 3 Loss: 5.8594 +[2025-07-07 07:51:31] [Rank 0] Group 4 Loss: 5.8210 +[2025-07-07 07:51:31] [Rank 0] Group 4 Loss: 5.8210 +[2025-07-07 07:51:31] [Rank 0] Group 5 Loss: 5.8463 +[2025-07-07 07:51:31] [Rank 0] Group 5 Loss: 5.8463 +[2025-07-07 07:51:31] [Rank 0] Group 6 Loss: 5.7091 +[2025-07-07 07:51:31] [Rank 0] Group 6 Loss: 5.7091 +[2025-07-07 07:51:31] [Rank 0] Group 7 Loss: 5.8639 +[2025-07-07 07:51:31] [Rank 0] Group 7 Loss: 5.8639 +[2025-07-07 07:51:31] [Rank 0] Group 8 Loss: 5.8328 +[2025-07-07 07:51:31] [Rank 0] Group 8 Loss: 5.8328 +[2025-07-07 07:51:31] [Rank 0] Group 9 Loss: 5.8394 +[2025-07-07 07:51:31] [Rank 0] Group 9 Loss: 5.8394 +[2025-07-07 07:51:31] [Rank 0] Group 10 Loss: 5.7716 +[2025-07-07 07:51:31] [Rank 0] Group 10 Loss: 5.7716 +[2025-07-07 07:51:31] [Rank 0] Group 11 Loss: 5.8273 +[2025-07-07 07:51:31] [Rank 0] Group 11 Loss: 5.8273 +[2025-07-07 07:51:31] [Rank 0] Group 0 FTA: 0.8505 +[2025-07-07 07:51:31] [Rank 0] Group 0 FTA: 0.8505 +[2025-07-07 07:51:31] [Rank 0] Group 1 FTA: 0.6641 +[2025-07-07 07:51:31] [Rank 0] Group 1 FTA: 0.6641 +[2025-07-07 07:51:31] [Rank 0] Group 2 FTA: 0.7344 +[2025-07-07 07:51:31] [Rank 0] Group 2 FTA: 0.7344 +[2025-07-07 07:51:31] [Rank 0] Group 3 FTA: 0.7188 +[2025-07-07 07:51:31] [Rank 0] Group 3 FTA: 0.7188 +[2025-07-07 07:51:31] [Rank 0] Group 4 FTA: 0.6432 +[2025-07-07 07:51:31] [Rank 0] Group 4 FTA: 0.6432 +[2025-07-07 07:51:31] [Rank 0] Group 5 FTA: 0.7266 +[2025-07-07 07:51:31] [Rank 0] Group 5 FTA: 0.7266 +[2025-07-07 07:51:31] [Rank 0] Group 6 FTA: 0.6328 +[2025-07-07 07:51:31] [Rank 0] Group 6 FTA: 0.6328 +[2025-07-07 07:51:31] [Rank 0] Group 7 FTA: 0.6406 +[2025-07-07 07:51:31] [Rank 0] Group 7 FTA: 0.6406 +[2025-07-07 07:51:31] [Rank 0] Group 8 FTA: 0.6979 +[2025-07-07 07:51:31] [Rank 0] Group 8 FTA: 0.6979 +[2025-07-07 07:51:31] [Rank 0] Group 9 FTA: 0.6641 +[2025-07-07 07:51:31] [Rank 0] Group 9 FTA: 0.6641 +[2025-07-07 07:51:31] [Rank 0] Group 10 FTA: 0.6953 +[2025-07-07 07:51:31] [Rank 0] Group 10 FTA: 0.6953 +[2025-07-07 07:51:31] [Rank 0] Group 11 FTA: 0.6309 +[2025-07-07 07:51:31] [Rank 0] Group 11 FTA: 0.6309 +[2025-07-07 07:51:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 07:51:32] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 07:51:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 07:51:32] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 07:51:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 07:51:32] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 07:51:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 07:51:33] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 07:51:33] [Rank 0] step:8501/10000 train_time:583996ms step_avg:68.70ms +[2025-07-07 07:51:33] [Rank 0] step:8501/10000 train_time:583996ms step_avg:68.70ms +[2025-07-07 07:51:34] [Rank 0] step:8521/10000 train_time:584770ms step_avg:68.63ms +[2025-07-07 07:51:34] [Rank 0] step:8521/10000 train_time:584770ms step_avg:68.63ms +[2025-07-07 07:51:36] [Rank 0] step:8541/10000 train_time:586137ms step_avg:68.63ms +[2025-07-07 07:51:36] [Rank 0] step:8541/10000 train_time:586137ms step_avg:68.63ms +[2025-07-07 07:51:37] [Rank 0] step:8561/10000 train_time:587503ms step_avg:68.63ms +[2025-07-07 07:51:37] [Rank 0] step:8561/10000 train_time:587503ms step_avg:68.63ms +[2025-07-07 07:51:38] [Rank 0] step:8581/10000 train_time:588871ms step_avg:68.63ms +[2025-07-07 07:51:38] [Rank 0] step:8581/10000 train_time:588871ms step_avg:68.63ms +[2025-07-07 07:51:40] [Rank 0] step:8601/10000 train_time:590240ms step_avg:68.62ms +[2025-07-07 07:51:40] [Rank 0] step:8601/10000 train_time:590240ms step_avg:68.62ms +[2025-07-07 07:51:41] [Rank 0] step:8621/10000 train_time:591608ms step_avg:68.62ms +[2025-07-07 07:51:41] [Rank 0] step:8621/10000 train_time:591608ms step_avg:68.62ms +[2025-07-07 07:51:42] [Rank 0] step:8641/10000 train_time:592979ms step_avg:68.62ms +[2025-07-07 07:51:42] [Rank 0] step:8641/10000 train_time:592979ms step_avg:68.62ms +[2025-07-07 07:51:44] [Rank 0] step:8661/10000 train_time:594392ms step_avg:68.63ms +[2025-07-07 07:51:44] [Rank 0] step:8661/10000 train_time:594392ms step_avg:68.63ms +[2025-07-07 07:51:45] [Rank 0] step:8681/10000 train_time:595764ms step_avg:68.63ms +[2025-07-07 07:51:45] [Rank 0] step:8681/10000 train_time:595764ms step_avg:68.63ms +[2025-07-07 07:51:47] [Rank 0] step:8701/10000 train_time:597135ms step_avg:68.63ms +[2025-07-07 07:51:47] [Rank 0] step:8701/10000 train_time:597135ms step_avg:68.63ms +[2025-07-07 07:51:48] [Rank 0] step:8721/10000 train_time:598507ms step_avg:68.63ms +[2025-07-07 07:51:48] [Rank 0] step:8721/10000 train_time:598507ms step_avg:68.63ms +[2025-07-07 07:51:49] [Rank 0] step:8741/10000 train_time:599881ms step_avg:68.63ms +[2025-07-07 07:51:49] [Rank 0] step:8741/10000 train_time:599881ms step_avg:68.63ms +[2025-07-07 07:51:51] [Rank 0] step:8761/10000 train_time:601254ms step_avg:68.63ms +[2025-07-07 07:51:51] [Rank 0] step:8761/10000 train_time:601254ms step_avg:68.63ms +[2025-07-07 07:51:52] [Rank 0] step:8781/10000 train_time:602632ms step_avg:68.63ms +[2025-07-07 07:51:52] [Rank 0] step:8781/10000 train_time:602632ms step_avg:68.63ms +[2025-07-07 07:51:53] [Rank 0] step:8801/10000 train_time:604005ms step_avg:68.63ms +[2025-07-07 07:51:53] [Rank 0] step:8801/10000 train_time:604005ms step_avg:68.63ms +[2025-07-07 07:51:55] [Rank 0] step:8821/10000 train_time:605378ms step_avg:68.63ms +[2025-07-07 07:51:55] [Rank 0] step:8821/10000 train_time:605378ms step_avg:68.63ms +[2025-07-07 07:51:56] [Rank 0] step:8841/10000 train_time:606777ms step_avg:68.63ms +[2025-07-07 07:51:56] [Rank 0] step:8841/10000 train_time:606777ms step_avg:68.63ms +[2025-07-07 07:51:58] [Rank 0] step:8861/10000 train_time:608149ms step_avg:68.63ms +[2025-07-07 07:51:58] [Rank 0] step:8861/10000 train_time:608149ms step_avg:68.63ms +[2025-07-07 07:51:59] [Rank 0] step:8881/10000 train_time:609522ms step_avg:68.63ms +[2025-07-07 07:51:59] [Rank 0] step:8881/10000 train_time:609522ms step_avg:68.63ms +[2025-07-07 07:52:00] [Rank 0] step:8901/10000 train_time:610896ms step_avg:68.63ms +[2025-07-07 07:52:00] [Rank 0] step:8901/10000 train_time:610896ms step_avg:68.63ms +[2025-07-07 07:52:02] [Rank 0] step:8921/10000 train_time:612269ms step_avg:68.63ms +[2025-07-07 07:52:02] [Rank 0] step:8921/10000 train_time:612269ms step_avg:68.63ms +[2025-07-07 07:52:03] [Rank 0] step:8941/10000 train_time:613642ms step_avg:68.63ms +[2025-07-07 07:52:03] [Rank 0] step:8941/10000 train_time:613642ms step_avg:68.63ms +[2025-07-07 07:52:04] [Rank 0] step:8961/10000 train_time:615015ms step_avg:68.63ms +[2025-07-07 07:52:04] [Rank 0] step:8961/10000 train_time:615015ms step_avg:68.63ms +[2025-07-07 07:52:06] [Rank 0] step:8981/10000 train_time:616388ms step_avg:68.63ms +[2025-07-07 07:52:06] [Rank 0] step:8981/10000 train_time:616388ms step_avg:68.63ms +[2025-07-07 07:52:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:52:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:52:08] [Rank 0] PRINT: step:9000/10000 train_loss:0.8677 val_loss:0.9669 train_time:618381ms step_avg:68.71ms +[2025-07-07 07:52:08] [Rank 0] PRINT: step:9000/10000 train_loss:0.8677 val_loss:0.9669 train_time:618381ms step_avg:68.71ms +[2025-07-07 07:52:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:52:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:52:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:52:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:52:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:52:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:57:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:57:26] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:57:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:57:26] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:57:26] [Rank 0] Total Loss: 5.8121 +[2025-07-07 07:57:26] [Rank 0] Total Loss: 5.8121 +[2025-07-07 07:57:26] [Rank 0] Total FTA: 0.7128 +[2025-07-07 07:57:26] [Rank 0] Total FTA: 0.7128 +[2025-07-07 07:57:26] [Rank 0] Group 0 Loss: 6.1167 +[2025-07-07 07:57:26] [Rank 0] Group 0 Loss: 6.1167 +[2025-07-07 07:57:26] [Rank 0] Group 1 Loss: 5.6968 +[2025-07-07 07:57:26] [Rank 0] Group 1 Loss: 5.6968 +[2025-07-07 07:57:26] [Rank 0] Group 2 Loss: 5.3237 +[2025-07-07 07:57:26] [Rank 0] Group 2 Loss: 5.3237 +[2025-07-07 07:57:26] [Rank 0] Group 3 Loss: 6.0178 +[2025-07-07 07:57:26] [Rank 0] Group 3 Loss: 6.0178 +[2025-07-07 07:57:26] [Rank 0] Group 4 Loss: 5.8029 +[2025-07-07 07:57:26] [Rank 0] Group 4 Loss: 5.8029 +[2025-07-07 07:57:26] [Rank 0] Group 5 Loss: 5.7466 +[2025-07-07 07:57:26] [Rank 0] Group 5 Loss: 5.7466 +[2025-07-07 07:57:26] [Rank 0] Group 6 Loss: 5.6961 +[2025-07-07 07:57:26] [Rank 0] Group 6 Loss: 5.6961 +[2025-07-07 07:57:26] [Rank 0] Group 7 Loss: 5.7237 +[2025-07-07 07:57:26] [Rank 0] Group 7 Loss: 5.7237 +[2025-07-07 07:57:26] [Rank 0] Group 8 Loss: 5.8555 +[2025-07-07 07:57:26] [Rank 0] Group 8 Loss: 5.8555 +[2025-07-07 07:57:26] [Rank 0] Group 9 Loss: 5.7603 +[2025-07-07 07:57:26] [Rank 0] Group 9 Loss: 5.7603 +[2025-07-07 07:57:26] [Rank 0] Group 10 Loss: 5.8165 +[2025-07-07 07:57:26] [Rank 0] Group 10 Loss: 5.8165 +[2025-07-07 07:57:26] [Rank 0] Group 11 Loss: 5.8319 +[2025-07-07 07:57:26] [Rank 0] Group 11 Loss: 5.8319 +[2025-07-07 07:57:26] [Rank 0] Group 0 FTA: 0.8166 +[2025-07-07 07:57:26] [Rank 0] Group 0 FTA: 0.8166 +[2025-07-07 07:57:26] [Rank 0] Group 1 FTA: 0.4661 +[2025-07-07 07:57:26] [Rank 0] Group 1 FTA: 0.4661 +[2025-07-07 07:57:26] [Rank 0] Group 2 FTA: 0.7552 +[2025-07-07 07:57:26] [Rank 0] Group 2 FTA: 0.7552 +[2025-07-07 07:57:26] [Rank 0] Group 3 FTA: 0.7708 +[2025-07-07 07:57:26] [Rank 0] Group 3 FTA: 0.7708 +[2025-07-07 07:57:26] [Rank 0] Group 4 FTA: 0.6562 +[2025-07-07 07:57:26] [Rank 0] Group 4 FTA: 0.6562 +[2025-07-07 07:57:26] [Rank 0] Group 5 FTA: 0.7240 +[2025-07-07 07:57:26] [Rank 0] Group 5 FTA: 0.7240 +[2025-07-07 07:57:26] [Rank 0] Group 6 FTA: 0.7031 +[2025-07-07 07:57:26] [Rank 0] Group 6 FTA: 0.7031 +[2025-07-07 07:57:26] [Rank 0] Group 7 FTA: 0.7318 +[2025-07-07 07:57:26] [Rank 0] Group 7 FTA: 0.7318 +[2025-07-07 07:57:26] [Rank 0] Group 8 FTA: 0.7188 +[2025-07-07 07:57:26] [Rank 0] Group 8 FTA: 0.7188 +[2025-07-07 07:57:26] [Rank 0] Group 9 FTA: 0.7266 +[2025-07-07 07:57:26] [Rank 0] Group 9 FTA: 0.7266 +[2025-07-07 07:57:26] [Rank 0] Group 10 FTA: 0.7363 +[2025-07-07 07:57:26] [Rank 0] Group 10 FTA: 0.7363 +[2025-07-07 07:57:26] [Rank 0] Group 11 FTA: 0.6855 +[2025-07-07 07:57:26] [Rank 0] Group 11 FTA: 0.6855 +[2025-07-07 07:57:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 07:57:27] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 07:57:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 07:57:27] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 07:57:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 07:57:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 07:57:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 07:57:28] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 07:57:29] [Rank 0] step:9001/10000 train_time:618707ms step_avg:68.74ms +[2025-07-07 07:57:29] [Rank 0] step:9001/10000 train_time:618707ms step_avg:68.74ms +[2025-07-07 07:57:30] [Rank 0] step:9021/10000 train_time:619873ms step_avg:68.71ms +[2025-07-07 07:57:30] [Rank 0] step:9021/10000 train_time:619873ms step_avg:68.71ms +[2025-07-07 07:57:31] [Rank 0] step:9041/10000 train_time:621240ms step_avg:68.71ms +[2025-07-07 07:57:31] [Rank 0] step:9041/10000 train_time:621240ms step_avg:68.71ms +[2025-07-07 07:57:33] [Rank 0] step:9061/10000 train_time:622609ms step_avg:68.71ms +[2025-07-07 07:57:33] [Rank 0] step:9061/10000 train_time:622609ms step_avg:68.71ms +[2025-07-07 07:57:34] [Rank 0] step:9081/10000 train_time:623977ms step_avg:68.71ms +[2025-07-07 07:57:34] [Rank 0] step:9081/10000 train_time:623977ms step_avg:68.71ms +[2025-07-07 07:57:35] [Rank 0] step:9101/10000 train_time:625346ms step_avg:68.71ms +[2025-07-07 07:57:35] [Rank 0] step:9101/10000 train_time:625346ms step_avg:68.71ms +[2025-07-07 07:57:37] [Rank 0] step:9121/10000 train_time:626715ms step_avg:68.71ms +[2025-07-07 07:57:37] [Rank 0] step:9121/10000 train_time:626715ms step_avg:68.71ms +[2025-07-07 07:57:38] [Rank 0] step:9141/10000 train_time:628085ms step_avg:68.71ms +[2025-07-07 07:57:38] [Rank 0] step:9141/10000 train_time:628085ms step_avg:68.71ms +[2025-07-07 07:57:40] [Rank 0] step:9161/10000 train_time:629559ms step_avg:68.72ms +[2025-07-07 07:57:40] [Rank 0] step:9161/10000 train_time:629559ms step_avg:68.72ms +[2025-07-07 07:57:41] [Rank 0] step:9181/10000 train_time:631613ms step_avg:68.80ms +[2025-07-07 07:57:41] [Rank 0] step:9181/10000 train_time:631613ms step_avg:68.80ms +[2025-07-07 07:57:42] [Rank 0] step:9201/10000 train_time:632352ms step_avg:68.73ms +[2025-07-07 07:57:42] [Rank 0] step:9201/10000 train_time:632352ms step_avg:68.73ms +[2025-07-07 07:57:44] [Rank 0] step:9221/10000 train_time:633724ms step_avg:68.73ms +[2025-07-07 07:57:44] [Rank 0] step:9221/10000 train_time:633724ms step_avg:68.73ms +[2025-07-07 07:57:45] [Rank 0] step:9241/10000 train_time:635095ms step_avg:68.73ms +[2025-07-07 07:57:45] [Rank 0] step:9241/10000 train_time:635095ms step_avg:68.73ms +[2025-07-07 07:57:47] [Rank 0] step:9261/10000 train_time:636468ms step_avg:68.73ms +[2025-07-07 07:57:47] [Rank 0] step:9261/10000 train_time:636468ms step_avg:68.73ms +[2025-07-07 07:57:48] [Rank 0] step:9281/10000 train_time:637840ms step_avg:68.73ms +[2025-07-07 07:57:48] [Rank 0] step:9281/10000 train_time:637840ms step_avg:68.73ms +[2025-07-07 07:57:49] [Rank 0] step:9301/10000 train_time:639212ms step_avg:68.73ms +[2025-07-07 07:57:49] [Rank 0] step:9301/10000 train_time:639212ms step_avg:68.73ms +[2025-07-07 07:57:51] [Rank 0] step:9321/10000 train_time:640585ms step_avg:68.72ms +[2025-07-07 07:57:51] [Rank 0] step:9321/10000 train_time:640585ms step_avg:68.72ms +[2025-07-07 07:57:52] [Rank 0] step:9341/10000 train_time:641960ms step_avg:68.72ms +[2025-07-07 07:57:52] [Rank 0] step:9341/10000 train_time:641960ms step_avg:68.72ms +[2025-07-07 07:57:53] [Rank 0] step:9361/10000 train_time:643584ms step_avg:68.75ms +[2025-07-07 07:57:53] [Rank 0] step:9361/10000 train_time:643584ms step_avg:68.75ms +[2025-07-07 07:57:55] [Rank 0] step:9381/10000 train_time:644739ms step_avg:68.73ms +[2025-07-07 07:57:55] [Rank 0] step:9381/10000 train_time:644739ms step_avg:68.73ms +[2025-07-07 07:57:56] [Rank 0] step:9401/10000 train_time:646112ms step_avg:68.73ms +[2025-07-07 07:57:56] [Rank 0] step:9401/10000 train_time:646112ms step_avg:68.73ms +[2025-07-07 07:57:58] [Rank 0] step:9421/10000 train_time:647486ms step_avg:68.73ms +[2025-07-07 07:57:58] [Rank 0] step:9421/10000 train_time:647486ms step_avg:68.73ms +[2025-07-07 07:57:59] [Rank 0] step:9441/10000 train_time:648863ms step_avg:68.73ms +[2025-07-07 07:57:59] [Rank 0] step:9441/10000 train_time:648863ms step_avg:68.73ms +[2025-07-07 07:58:00] [Rank 0] step:9461/10000 train_time:650238ms step_avg:68.73ms +[2025-07-07 07:58:00] [Rank 0] step:9461/10000 train_time:650238ms step_avg:68.73ms +[2025-07-07 07:58:02] [Rank 0] step:9481/10000 train_time:651613ms step_avg:68.73ms +[2025-07-07 07:58:02] [Rank 0] step:9481/10000 train_time:651613ms step_avg:68.73ms +[2025-07-07 07:58:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:58:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:58:04] [Rank 0] PRINT: step:9500/10000 train_loss:0.8534 val_loss:0.9499 train_time:653612ms step_avg:68.80ms +[2025-07-07 07:58:04] [Rank 0] PRINT: step:9500/10000 train_loss:0.8534 val_loss:0.9499 train_time:653612ms step_avg:68.80ms +[2025-07-07 07:58:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:58:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:58:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:58:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:58:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:58:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:03:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:03:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:03:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:03:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:03:23] [Rank 0] Total Loss: 5.8663 +[2025-07-07 08:03:23] [Rank 0] Total Loss: 5.8663 +[2025-07-07 08:03:23] [Rank 0] Total FTA: 0.7847 +[2025-07-07 08:03:23] [Rank 0] Total FTA: 0.7847 +[2025-07-07 08:03:23] [Rank 0] Group 0 Loss: 6.3798 +[2025-07-07 08:03:23] [Rank 0] Group 0 Loss: 6.3798 +[2025-07-07 08:03:23] [Rank 0] Group 1 Loss: 5.5231 +[2025-07-07 08:03:23] [Rank 0] Group 1 Loss: 5.5231 +[2025-07-07 08:03:23] [Rank 0] Group 2 Loss: 5.4476 +[2025-07-07 08:03:23] [Rank 0] Group 2 Loss: 5.4476 +[2025-07-07 08:03:23] [Rank 0] Group 3 Loss: 5.8280 +[2025-07-07 08:03:23] [Rank 0] Group 3 Loss: 5.8280 +[2025-07-07 08:03:23] [Rank 0] Group 4 Loss: 5.8622 +[2025-07-07 08:03:23] [Rank 0] Group 4 Loss: 5.8622 +[2025-07-07 08:03:23] [Rank 0] Group 5 Loss: 5.8285 +[2025-07-07 08:03:23] [Rank 0] Group 5 Loss: 5.8285 +[2025-07-07 08:03:23] [Rank 0] Group 6 Loss: 5.7012 +[2025-07-07 08:03:23] [Rank 0] Group 6 Loss: 5.7012 +[2025-07-07 08:03:23] [Rank 0] Group 7 Loss: 5.8166 +[2025-07-07 08:03:23] [Rank 0] Group 7 Loss: 5.8166 +[2025-07-07 08:03:23] [Rank 0] Group 8 Loss: 5.9005 +[2025-07-07 08:03:23] [Rank 0] Group 8 Loss: 5.9005 +[2025-07-07 08:03:23] [Rank 0] Group 9 Loss: 5.8668 +[2025-07-07 08:03:23] [Rank 0] Group 9 Loss: 5.8668 +[2025-07-07 08:03:23] [Rank 0] Group 10 Loss: 5.8593 +[2025-07-07 08:03:23] [Rank 0] Group 10 Loss: 5.8593 +[2025-07-07 08:03:23] [Rank 0] Group 11 Loss: 5.8678 +[2025-07-07 08:03:23] [Rank 0] Group 11 Loss: 5.8678 +[2025-07-07 08:03:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 08:03:23] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 08:03:23] [Rank 0] Group 1 FTA: 0.6510 +[2025-07-07 08:03:23] [Rank 0] Group 1 FTA: 0.6510 +[2025-07-07 08:03:23] [Rank 0] Group 2 FTA: 0.8516 +[2025-07-07 08:03:23] [Rank 0] Group 2 FTA: 0.8516 +[2025-07-07 08:03:23] [Rank 0] Group 3 FTA: 0.7656 +[2025-07-07 08:03:23] [Rank 0] Group 3 FTA: 0.7656 +[2025-07-07 08:03:23] [Rank 0] Group 4 FTA: 0.8229 +[2025-07-07 08:03:23] [Rank 0] Group 4 FTA: 0.8229 +[2025-07-07 08:03:23] [Rank 0] Group 5 FTA: 0.8021 +[2025-07-07 08:03:23] [Rank 0] Group 5 FTA: 0.8021 +[2025-07-07 08:03:23] [Rank 0] Group 6 FTA: 0.6693 +[2025-07-07 08:03:23] [Rank 0] Group 6 FTA: 0.6693 +[2025-07-07 08:03:23] [Rank 0] Group 7 FTA: 0.7474 +[2025-07-07 08:03:23] [Rank 0] Group 7 FTA: 0.7474 +[2025-07-07 08:03:23] [Rank 0] Group 8 FTA: 0.7370 +[2025-07-07 08:03:23] [Rank 0] Group 8 FTA: 0.7370 +[2025-07-07 08:03:23] [Rank 0] Group 9 FTA: 0.6875 +[2025-07-07 08:03:23] [Rank 0] Group 9 FTA: 0.6875 +[2025-07-07 08:03:23] [Rank 0] Group 10 FTA: 0.7559 +[2025-07-07 08:03:23] [Rank 0] Group 10 FTA: 0.7559 +[2025-07-07 08:03:23] [Rank 0] Group 11 FTA: 0.7480 +[2025-07-07 08:03:23] [Rank 0] Group 11 FTA: 0.7480 +[2025-07-07 08:03:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 08:03:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 08:03:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 08:03:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 08:03:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 08:03:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 08:03:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 08:03:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 08:03:24] [Rank 0] step:9501/10000 train_time:653623ms step_avg:68.80ms +[2025-07-07 08:03:24] [Rank 0] step:9501/10000 train_time:653623ms step_avg:68.80ms +[2025-07-07 08:03:26] [Rank 0] step:9521/10000 train_time:654379ms step_avg:68.73ms +[2025-07-07 08:03:26] [Rank 0] step:9521/10000 train_time:654379ms step_avg:68.73ms +[2025-07-07 08:03:27] [Rank 0] step:9541/10000 train_time:655744ms step_avg:68.73ms +[2025-07-07 08:03:27] [Rank 0] step:9541/10000 train_time:655744ms step_avg:68.73ms +[2025-07-07 08:03:28] [Rank 0] step:9561/10000 train_time:657256ms step_avg:68.74ms +[2025-07-07 08:03:28] [Rank 0] step:9561/10000 train_time:657256ms step_avg:68.74ms +[2025-07-07 08:03:30] [Rank 0] step:9581/10000 train_time:658623ms step_avg:68.74ms +[2025-07-07 08:03:30] [Rank 0] step:9581/10000 train_time:658623ms step_avg:68.74ms +[2025-07-07 08:03:31] [Rank 0] step:9601/10000 train_time:659991ms step_avg:68.74ms +[2025-07-07 08:03:31] [Rank 0] step:9601/10000 train_time:659991ms step_avg:68.74ms +[2025-07-07 08:03:33] [Rank 0] step:9621/10000 train_time:661361ms step_avg:68.74ms +[2025-07-07 08:03:33] [Rank 0] step:9621/10000 train_time:661361ms step_avg:68.74ms +[2025-07-07 08:03:34] [Rank 0] step:9641/10000 train_time:662731ms step_avg:68.74ms +[2025-07-07 08:03:34] [Rank 0] step:9641/10000 train_time:662731ms step_avg:68.74ms +[2025-07-07 08:03:35] [Rank 0] step:9661/10000 train_time:664102ms step_avg:68.74ms +[2025-07-07 08:03:35] [Rank 0] step:9661/10000 train_time:664102ms step_avg:68.74ms +[2025-07-07 08:03:37] [Rank 0] step:9681/10000 train_time:665471ms step_avg:68.74ms +[2025-07-07 08:03:37] [Rank 0] step:9681/10000 train_time:665471ms step_avg:68.74ms +[2025-07-07 08:03:38] [Rank 0] step:9701/10000 train_time:666845ms step_avg:68.74ms +[2025-07-07 08:03:38] [Rank 0] step:9701/10000 train_time:666845ms step_avg:68.74ms +[2025-07-07 08:03:40] [Rank 0] step:9721/10000 train_time:668262ms step_avg:68.74ms +[2025-07-07 08:03:40] [Rank 0] step:9721/10000 train_time:668262ms step_avg:68.74ms +[2025-07-07 08:03:41] [Rank 0] step:9741/10000 train_time:669640ms step_avg:68.74ms +[2025-07-07 08:03:41] [Rank 0] step:9741/10000 train_time:669640ms step_avg:68.74ms +[2025-07-07 08:03:42] [Rank 0] step:9761/10000 train_time:671012ms step_avg:68.74ms +[2025-07-07 08:03:42] [Rank 0] step:9761/10000 train_time:671012ms step_avg:68.74ms +[2025-07-07 08:03:44] [Rank 0] step:9781/10000 train_time:672384ms step_avg:68.74ms +[2025-07-07 08:03:44] [Rank 0] step:9781/10000 train_time:672384ms step_avg:68.74ms +[2025-07-07 08:03:45] [Rank 0] step:9801/10000 train_time:673856ms step_avg:68.75ms +[2025-07-07 08:03:45] [Rank 0] step:9801/10000 train_time:673856ms step_avg:68.75ms +[2025-07-07 08:03:46] [Rank 0] step:9821/10000 train_time:675228ms step_avg:68.75ms +[2025-07-07 08:03:46] [Rank 0] step:9821/10000 train_time:675228ms step_avg:68.75ms +[2025-07-07 08:03:48] [Rank 0] step:9841/10000 train_time:676603ms step_avg:68.75ms +[2025-07-07 08:03:48] [Rank 0] step:9841/10000 train_time:676603ms step_avg:68.75ms +[2025-07-07 08:03:49] [Rank 0] step:9861/10000 train_time:677977ms step_avg:68.75ms +[2025-07-07 08:03:49] [Rank 0] step:9861/10000 train_time:677977ms step_avg:68.75ms +[2025-07-07 08:03:51] [Rank 0] step:9881/10000 train_time:679350ms step_avg:68.75ms +[2025-07-07 08:03:51] [Rank 0] step:9881/10000 train_time:679350ms step_avg:68.75ms +[2025-07-07 08:03:52] [Rank 0] step:9901/10000 train_time:680723ms step_avg:68.75ms +[2025-07-07 08:03:52] [Rank 0] step:9901/10000 train_time:680723ms step_avg:68.75ms +[2025-07-07 08:03:53] [Rank 0] step:9921/10000 train_time:682121ms step_avg:68.76ms +[2025-07-07 08:03:53] [Rank 0] step:9921/10000 train_time:682121ms step_avg:68.76ms +[2025-07-07 08:03:55] [Rank 0] step:9941/10000 train_time:683495ms step_avg:68.76ms +[2025-07-07 08:03:55] [Rank 0] step:9941/10000 train_time:683495ms step_avg:68.76ms +[2025-07-07 08:03:56] [Rank 0] step:9961/10000 train_time:684869ms step_avg:68.76ms +[2025-07-07 08:03:56] [Rank 0] step:9961/10000 train_time:684869ms step_avg:68.76ms +[2025-07-07 08:03:57] [Rank 0] step:9981/10000 train_time:686243ms step_avg:68.75ms +[2025-07-07 08:03:57] [Rank 0] step:9981/10000 train_time:686243ms step_avg:68.75ms +[2025-07-07 08:03:59] [Rank 0] step:10000/10000 train_time:687549ms step_avg:68.75ms +[2025-07-07 08:03:59] [Rank 0] step:10000/10000 train_time:687549ms step_avg:68.75ms +[2025-07-07 08:03:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:03:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:04:00] [Rank 0] PRINT: step:10000/10000 train_loss:0.8482 val_loss:0.9442 train_time:688247ms step_avg:68.82ms +[2025-07-07 08:04:00] [Rank 0] PRINT: step:10000/10000 train_loss:0.8482 val_loss:0.9442 train_time:688247ms step_avg:68.82ms +[2025-07-07 08:04:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:04:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:04:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:04:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:04:00] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:04:00] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:09:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:09:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:09:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:09:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:09:21] [Rank 0] Total Loss: 5.8522 +[2025-07-07 08:09:21] [Rank 0] Total Loss: 5.8522 +[2025-07-07 08:09:21] [Rank 0] Total FTA: 0.8022 +[2025-07-07 08:09:21] [Rank 0] Total FTA: 0.8022 +[2025-07-07 08:09:21] [Rank 0] Group 0 Loss: 6.2253 +[2025-07-07 08:09:21] [Rank 0] Group 0 Loss: 6.2253 +[2025-07-07 08:09:21] [Rank 0] Group 1 Loss: 5.5811 +[2025-07-07 08:09:21] [Rank 0] Group 1 Loss: 5.5811 +[2025-07-07 08:09:21] [Rank 0] Group 2 Loss: 5.4555 +[2025-07-07 08:09:21] [Rank 0] Group 2 Loss: 5.4555 +[2025-07-07 08:09:21] [Rank 0] Group 3 Loss: 5.8160 +[2025-07-07 08:09:21] [Rank 0] Group 3 Loss: 5.8160 +[2025-07-07 08:09:21] [Rank 0] Group 4 Loss: 5.8544 +[2025-07-07 08:09:21] [Rank 0] Group 4 Loss: 5.8544 +[2025-07-07 08:09:21] [Rank 0] Group 5 Loss: 5.7764 +[2025-07-07 08:09:21] [Rank 0] Group 5 Loss: 5.7764 +[2025-07-07 08:09:21] [Rank 0] Group 6 Loss: 5.7613 +[2025-07-07 08:09:21] [Rank 0] Group 6 Loss: 5.7613 +[2025-07-07 08:09:21] [Rank 0] Group 7 Loss: 5.8949 +[2025-07-07 08:09:21] [Rank 0] Group 7 Loss: 5.8949 +[2025-07-07 08:09:21] [Rank 0] Group 8 Loss: 5.8942 +[2025-07-07 08:09:21] [Rank 0] Group 8 Loss: 5.8942 +[2025-07-07 08:09:21] [Rank 0] Group 9 Loss: 5.7725 +[2025-07-07 08:09:21] [Rank 0] Group 9 Loss: 5.7725 +[2025-07-07 08:09:21] [Rank 0] Group 10 Loss: 5.8572 +[2025-07-07 08:09:21] [Rank 0] Group 10 Loss: 5.8572 +[2025-07-07 08:09:21] [Rank 0] Group 11 Loss: 5.8833 +[2025-07-07 08:09:21] [Rank 0] Group 11 Loss: 5.8833 +[2025-07-07 08:09:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 08:09:21] [Rank 0] Group 0 FTA: 1.0000 +[2025-07-07 08:09:21] [Rank 0] Group 1 FTA: 0.6615 +[2025-07-07 08:09:21] [Rank 0] Group 1 FTA: 0.6615 +[2025-07-07 08:09:21] [Rank 0] Group 2 FTA: 0.6771 +[2025-07-07 08:09:21] [Rank 0] Group 2 FTA: 0.6771 +[2025-07-07 08:09:21] [Rank 0] Group 3 FTA: 0.8802 +[2025-07-07 08:09:21] [Rank 0] Group 3 FTA: 0.8802 +[2025-07-07 08:09:21] [Rank 0] Group 4 FTA: 0.7812 +[2025-07-07 08:09:21] [Rank 0] Group 4 FTA: 0.7812 +[2025-07-07 08:09:22] [Rank 0] Group 5 FTA: 0.8047 +[2025-07-07 08:09:22] [Rank 0] Group 5 FTA: 0.8047 +[2025-07-07 08:09:22] [Rank 0] Group 6 FTA: 0.7734 +[2025-07-07 08:09:22] [Rank 0] Group 6 FTA: 0.7734 +[2025-07-07 08:09:22] [Rank 0] Group 7 FTA: 0.7943 +[2025-07-07 08:09:22] [Rank 0] Group 7 FTA: 0.7943 +[2025-07-07 08:09:22] [Rank 0] Group 8 FTA: 0.7500 +[2025-07-07 08:09:22] [Rank 0] Group 8 FTA: 0.7500 +[2025-07-07 08:09:22] [Rank 0] Group 9 FTA: 0.7852 +[2025-07-07 08:09:22] [Rank 0] Group 9 FTA: 0.7852 +[2025-07-07 08:09:22] [Rank 0] Group 10 FTA: 0.8066 +[2025-07-07 08:09:22] [Rank 0] Group 10 FTA: 0.8066 +[2025-07-07 08:09:22] [Rank 0] Group 11 FTA: 0.7666 +[2025-07-07 08:09:22] [Rank 0] Group 11 FTA: 0.7666 +[2025-07-07 08:09:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 08:09:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_loss_curves.png +[2025-07-07 08:09:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 08:09:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/per_class_acc_curves.png +[2025-07-07 08:09:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 08:09:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_loss_curve.png +[2025-07-07 08:09:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 08:09:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_42/total_acc_curve.png +[2025-07-07 08:09:23] [Rank 0] step:10001/10000 train_time:688258ms step_avg:68.82ms +[2025-07-07 08:09:23] [Rank 0] step:10001/10000 train_time:688258ms step_avg:68.82ms +[2025-07-07 08:09:23] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 08:09:23 2025 --- +[2025-07-07 08:09:23] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 08:09:23 2025 --- +[2025-07-07 08:09:23] [Rank 0] PRINT: Peak memory allocated: 9064 MiB reserved: 10316 MiB +[2025-07-07 08:09:23] [Rank 0] PRINT: Peak memory allocated: 9064 MiB reserved: 10316 MiB diff --git a/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/config.json b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..498f4d8cbd3a8e157d799d0d999662c54fe5eb90 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "c92a03d1-97d5-4e6a-806a-4d1fe09a516e", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..fb26d6b1761cf50db3081cd05e8277d2564f2203 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc730ad3352a6f9e761bc0ac3b6a5c9c13b81fa8b9b84876fc9f52b6b3c48629 +size 358303 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..8a1a71892ddbaf4316dd6bb6b2cfe6a9b0e75dfb --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d86d4fd460bc8965eaf1524e7ac015ca54a7ea0f20b89a1d017b2ec55c2009af +size 382415 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..eb7ca9e49d754109976f5b1a3f69a082faddb48a --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e22858de6f2dbd09570353b33e4a7659e693f6d14a6153ae9ad38692ca1e95b9 +size 94427 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..6e0bfbddda8ac7ac180b1853765177b7671f2a27 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6dbf3f7a58d5b0ca50e5f7f2ea4f7ba000e5d1ad1c660cc2531e9548d3546b3a +size 106646 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/training_log_c92a03d1-97d5-4e6a-806a-4d1fe09a516e.txt b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/training_log_c92a03d1-97d5-4e6a-806a-4d1fe09a516e.txt new file mode 100644 index 0000000000000000000000000000000000000000..52b510af82aac8bec21de052d853a8748c022d4c --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/training_log_c92a03d1-97d5-4e6a-806a-4d1fe09a516e.txt @@ -0,0 +1,4052 @@ +[2025-07-07 08:08:53] [Rank 0] PRINT: --- Script Start: Mon Jul 7 08:08:53 2025 --- +[2025-07-07 08:08:53] [Rank 0] PRINT: --- Script Start: Mon Jul 7 08:08:53 2025 --- +[2025-07-07 08:08:53] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-07 08:08:53] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-07 08:08:53] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 08:08:53] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 08:08:53] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-07 08:08:53] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-07 08:08:53] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45 +[2025-07-07 08:08:53] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45 +[2025-07-07 08:08:53] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 08:08:53] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 08:08:54] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 08:08:54] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 08:08:54] [Rank 0] PRINT: Constructing model... +[2025-07-07 08:08:54] [Rank 0] PRINT: Constructing model... +[2025-07-07 08:08:56] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 08:08:56] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 08:08:56] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 08:08:56] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 08:08:56] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 08:08:56] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 08:08:57] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 08:08:57] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 08:08:57] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 08:08:57] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 08:08:57] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 08:08:57] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 08:08:57] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 08:08:57] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 08:08:57] [Rank 0] PRINT: Model returns: +[2025-07-07 08:08:57] [Rank 0] PRINT: Model returns: +[2025-07-07 08:08:57] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 08:08:57] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 08:08:57] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 08:08:57] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 08:08:57] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-07 08:08:57] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-07 08:08:57] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 08:08:57] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 08:08:57] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 08:08:57] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 08:08:57] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 08:08:57] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 08:08:57] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 08:08:57] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 08:08:57] [Rank 0] PRINT: Starting warmup... +[2025-07-07 08:08:57] [Rank 0] PRINT: Starting warmup... +[2025-07-07 08:10:01] [Rank 0] PRINT: Warmup complete. +[2025-07-07 08:10:01] [Rank 0] PRINT: Warmup complete. +[2025-07-07 08:10:01] [Rank 0] PRINT: Starting training... +[2025-07-07 08:10:01] [Rank 0] PRINT: Starting training... +[2025-07-07 08:10:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:10:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:10:09] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 08:10:09] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 08:10:10] [Rank 0] step:21/10000 train_time:1016ms step_avg:48.37ms +[2025-07-07 08:10:10] [Rank 0] step:21/10000 train_time:1016ms step_avg:48.37ms +[2025-07-07 08:10:12] [Rank 0] step:41/10000 train_time:2329ms step_avg:56.82ms +[2025-07-07 08:10:12] [Rank 0] step:41/10000 train_time:2329ms step_avg:56.82ms +[2025-07-07 08:10:13] [Rank 0] step:61/10000 train_time:3645ms step_avg:59.75ms +[2025-07-07 08:10:13] [Rank 0] step:61/10000 train_time:3645ms step_avg:59.75ms +[2025-07-07 08:10:14] [Rank 0] step:81/10000 train_time:4964ms step_avg:61.29ms +[2025-07-07 08:10:14] [Rank 0] step:81/10000 train_time:4964ms step_avg:61.29ms +[2025-07-07 08:10:16] [Rank 0] step:101/10000 train_time:6286ms step_avg:62.24ms +[2025-07-07 08:10:16] [Rank 0] step:101/10000 train_time:6286ms step_avg:62.24ms +[2025-07-07 08:10:17] [Rank 0] step:121/10000 train_time:7611ms step_avg:62.90ms +[2025-07-07 08:10:17] [Rank 0] step:121/10000 train_time:7611ms step_avg:62.90ms +[2025-07-07 08:10:18] [Rank 0] step:141/10000 train_time:8934ms step_avg:63.37ms +[2025-07-07 08:10:18] [Rank 0] step:141/10000 train_time:8934ms step_avg:63.37ms +[2025-07-07 08:10:20] [Rank 0] step:161/10000 train_time:10261ms step_avg:63.73ms +[2025-07-07 08:10:20] [Rank 0] step:161/10000 train_time:10261ms step_avg:63.73ms +[2025-07-07 08:10:21] [Rank 0] step:181/10000 train_time:12250ms step_avg:67.68ms +[2025-07-07 08:10:21] [Rank 0] step:181/10000 train_time:12250ms step_avg:67.68ms +[2025-07-07 08:10:22] [Rank 0] step:201/10000 train_time:12965ms step_avg:64.50ms +[2025-07-07 08:10:22] [Rank 0] step:201/10000 train_time:12965ms step_avg:64.50ms +[2025-07-07 08:10:24] [Rank 0] step:221/10000 train_time:14293ms step_avg:64.67ms +[2025-07-07 08:10:24] [Rank 0] step:221/10000 train_time:14293ms step_avg:64.67ms +[2025-07-07 08:10:25] [Rank 0] step:241/10000 train_time:15621ms step_avg:64.82ms +[2025-07-07 08:10:25] [Rank 0] step:241/10000 train_time:15621ms step_avg:64.82ms +[2025-07-07 08:10:26] [Rank 0] step:261/10000 train_time:16951ms step_avg:64.95ms +[2025-07-07 08:10:26] [Rank 0] step:261/10000 train_time:16951ms step_avg:64.95ms +[2025-07-07 08:10:28] [Rank 0] step:281/10000 train_time:18280ms step_avg:65.05ms +[2025-07-07 08:10:28] [Rank 0] step:281/10000 train_time:18280ms step_avg:65.05ms +[2025-07-07 08:10:29] [Rank 0] step:301/10000 train_time:19611ms step_avg:65.15ms +[2025-07-07 08:10:29] [Rank 0] step:301/10000 train_time:19611ms step_avg:65.15ms +[2025-07-07 08:10:30] [Rank 0] step:321/10000 train_time:20941ms step_avg:65.24ms +[2025-07-07 08:10:30] [Rank 0] step:321/10000 train_time:20941ms step_avg:65.24ms +[2025-07-07 08:10:32] [Rank 0] step:341/10000 train_time:22272ms step_avg:65.31ms +[2025-07-07 08:10:32] [Rank 0] step:341/10000 train_time:22272ms step_avg:65.31ms +[2025-07-07 08:10:33] [Rank 0] step:361/10000 train_time:24283ms step_avg:67.27ms +[2025-07-07 08:10:33] [Rank 0] step:361/10000 train_time:24283ms step_avg:67.27ms +[2025-07-07 08:10:34] [Rank 0] step:381/10000 train_time:25000ms step_avg:65.62ms +[2025-07-07 08:10:34] [Rank 0] step:381/10000 train_time:25000ms step_avg:65.62ms +[2025-07-07 08:10:36] [Rank 0] step:401/10000 train_time:26335ms step_avg:65.67ms +[2025-07-07 08:10:36] [Rank 0] step:401/10000 train_time:26335ms step_avg:65.67ms +[2025-07-07 08:10:37] [Rank 0] step:421/10000 train_time:27667ms step_avg:65.72ms +[2025-07-07 08:10:37] [Rank 0] step:421/10000 train_time:27667ms step_avg:65.72ms +[2025-07-07 08:10:38] [Rank 0] step:441/10000 train_time:28998ms step_avg:65.76ms +[2025-07-07 08:10:38] [Rank 0] step:441/10000 train_time:28998ms step_avg:65.76ms +[2025-07-07 08:10:40] [Rank 0] step:461/10000 train_time:30332ms step_avg:65.80ms +[2025-07-07 08:10:40] [Rank 0] step:461/10000 train_time:30332ms step_avg:65.80ms +[2025-07-07 08:10:41] [Rank 0] step:481/10000 train_time:31664ms step_avg:65.83ms +[2025-07-07 08:10:41] [Rank 0] step:481/10000 train_time:31664ms step_avg:65.83ms +[2025-07-07 08:10:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:10:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:10:43] [Rank 0] PRINT: step:500/10000 train_loss:3.9016 val_loss:1.8786 train_time:33600ms step_avg:67.20ms +[2025-07-07 08:10:43] [Rank 0] PRINT: step:500/10000 train_loss:3.9016 val_loss:1.8786 train_time:33600ms step_avg:67.20ms +[2025-07-07 08:10:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:10:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:10:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:10:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:10:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:10:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:16:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:16:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:16:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:16:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:16:00] [Rank 0] Total Loss: 4.5313 +[2025-07-07 08:16:00] [Rank 0] Total Loss: 4.5313 +[2025-07-07 08:16:00] [Rank 0] Total FTA: 0.0897 +[2025-07-07 08:16:00] [Rank 0] Total FTA: 0.0897 +[2025-07-07 08:16:00] [Rank 0] Group 0 Loss: 4.6862 +[2025-07-07 08:16:00] [Rank 0] Group 0 Loss: 4.6862 +[2025-07-07 08:16:00] [Rank 0] Group 1 Loss: 4.3692 +[2025-07-07 08:16:00] [Rank 0] Group 1 Loss: 4.3692 +[2025-07-07 08:16:00] [Rank 0] Group 2 Loss: 4.4552 +[2025-07-07 08:16:00] [Rank 0] Group 2 Loss: 4.4552 +[2025-07-07 08:16:00] [Rank 0] Group 3 Loss: 4.5255 +[2025-07-07 08:16:00] [Rank 0] Group 3 Loss: 4.5255 +[2025-07-07 08:16:00] [Rank 0] Group 4 Loss: 4.5333 +[2025-07-07 08:16:00] [Rank 0] Group 4 Loss: 4.5333 +[2025-07-07 08:16:00] [Rank 0] Group 5 Loss: 4.4749 +[2025-07-07 08:16:00] [Rank 0] Group 5 Loss: 4.4749 +[2025-07-07 08:16:00] [Rank 0] Group 6 Loss: 4.5022 +[2025-07-07 08:16:00] [Rank 0] Group 6 Loss: 4.5022 +[2025-07-07 08:16:00] [Rank 0] Group 7 Loss: 4.5335 +[2025-07-07 08:16:00] [Rank 0] Group 7 Loss: 4.5335 +[2025-07-07 08:16:00] [Rank 0] Group 8 Loss: 4.5034 +[2025-07-07 08:16:00] [Rank 0] Group 8 Loss: 4.5034 +[2025-07-07 08:16:01] [Rank 0] Group 9 Loss: 4.5562 +[2025-07-07 08:16:01] [Rank 0] Group 9 Loss: 4.5562 +[2025-07-07 08:16:01] [Rank 0] Group 10 Loss: 4.5432 +[2025-07-07 08:16:01] [Rank 0] Group 10 Loss: 4.5432 +[2025-07-07 08:16:01] [Rank 0] Group 11 Loss: 4.5355 +[2025-07-07 08:16:01] [Rank 0] Group 11 Loss: 4.5355 +[2025-07-07 08:16:01] [Rank 0] Group 0 FTA: 0.1795 +[2025-07-07 08:16:01] [Rank 0] Group 0 FTA: 0.1795 +[2025-07-07 08:16:01] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 08:16:01] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 08:16:01] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-07 08:16:01] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-07 08:16:01] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 08:16:01] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 08:16:01] [Rank 0] Group 4 FTA: 0.0130 +[2025-07-07 08:16:01] [Rank 0] Group 4 FTA: 0.0130 +[2025-07-07 08:16:01] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-07 08:16:01] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-07 08:16:01] [Rank 0] Group 6 FTA: 0.1068 +[2025-07-07 08:16:01] [Rank 0] Group 6 FTA: 0.1068 +[2025-07-07 08:16:01] [Rank 0] Group 7 FTA: 0.1276 +[2025-07-07 08:16:01] [Rank 0] Group 7 FTA: 0.1276 +[2025-07-07 08:16:01] [Rank 0] Group 8 FTA: 0.0729 +[2025-07-07 08:16:01] [Rank 0] Group 8 FTA: 0.0729 +[2025-07-07 08:16:01] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 08:16:01] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 08:16:01] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-07 08:16:01] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-07 08:16:01] [Rank 0] Group 11 FTA: 0.0898 +[2025-07-07 08:16:01] [Rank 0] Group 11 FTA: 0.0898 +[2025-07-07 08:16:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-07 08:16:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-07 08:16:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-07 08:16:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-07 08:16:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-07 08:16:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-07 08:16:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-07 08:16:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-07 08:16:02] [Rank 0] step:501/10000 train_time:33612ms step_avg:67.09ms +[2025-07-07 08:16:02] [Rank 0] step:501/10000 train_time:33612ms step_avg:67.09ms +[2025-07-07 08:16:03] [Rank 0] step:521/10000 train_time:34356ms step_avg:65.94ms +[2025-07-07 08:16:03] [Rank 0] step:521/10000 train_time:34356ms step_avg:65.94ms +[2025-07-07 08:16:05] [Rank 0] step:541/10000 train_time:35933ms step_avg:66.42ms +[2025-07-07 08:16:05] [Rank 0] step:541/10000 train_time:35933ms step_avg:66.42ms +[2025-07-07 08:16:06] [Rank 0] step:561/10000 train_time:37080ms step_avg:66.10ms +[2025-07-07 08:16:06] [Rank 0] step:561/10000 train_time:37080ms step_avg:66.10ms +[2025-07-07 08:16:07] [Rank 0] step:581/10000 train_time:38406ms step_avg:66.10ms +[2025-07-07 08:16:07] [Rank 0] step:581/10000 train_time:38406ms step_avg:66.10ms +[2025-07-07 08:16:09] [Rank 0] step:601/10000 train_time:39733ms step_avg:66.11ms +[2025-07-07 08:16:09] [Rank 0] step:601/10000 train_time:39733ms step_avg:66.11ms +[2025-07-07 08:16:10] [Rank 0] step:621/10000 train_time:41059ms step_avg:66.12ms +[2025-07-07 08:16:10] [Rank 0] step:621/10000 train_time:41059ms step_avg:66.12ms +[2025-07-07 08:16:11] [Rank 0] step:641/10000 train_time:42388ms step_avg:66.13ms +[2025-07-07 08:16:11] [Rank 0] step:641/10000 train_time:42388ms step_avg:66.13ms +[2025-07-07 08:16:13] [Rank 0] step:661/10000 train_time:43716ms step_avg:66.14ms +[2025-07-07 08:16:13] [Rank 0] step:661/10000 train_time:43716ms step_avg:66.14ms +[2025-07-07 08:16:14] [Rank 0] step:681/10000 train_time:45044ms step_avg:66.14ms +[2025-07-07 08:16:14] [Rank 0] step:681/10000 train_time:45044ms step_avg:66.14ms +[2025-07-07 08:16:15] [Rank 0] step:701/10000 train_time:46372ms step_avg:66.15ms +[2025-07-07 08:16:15] [Rank 0] step:701/10000 train_time:46372ms step_avg:66.15ms +[2025-07-07 08:16:17] [Rank 0] step:721/10000 train_time:48366ms step_avg:67.08ms +[2025-07-07 08:16:17] [Rank 0] step:721/10000 train_time:48366ms step_avg:67.08ms +[2025-07-07 08:16:18] [Rank 0] step:741/10000 train_time:49083ms step_avg:66.24ms +[2025-07-07 08:16:18] [Rank 0] step:741/10000 train_time:49083ms step_avg:66.24ms +[2025-07-07 08:16:19] [Rank 0] step:761/10000 train_time:50417ms step_avg:66.25ms +[2025-07-07 08:16:19] [Rank 0] step:761/10000 train_time:50417ms step_avg:66.25ms +[2025-07-07 08:16:21] [Rank 0] step:781/10000 train_time:51755ms step_avg:66.27ms +[2025-07-07 08:16:21] [Rank 0] step:781/10000 train_time:51755ms step_avg:66.27ms +[2025-07-07 08:16:22] [Rank 0] step:801/10000 train_time:53094ms step_avg:66.28ms +[2025-07-07 08:16:22] [Rank 0] step:801/10000 train_time:53094ms step_avg:66.28ms +[2025-07-07 08:16:23] [Rank 0] step:821/10000 train_time:54432ms step_avg:66.30ms +[2025-07-07 08:16:23] [Rank 0] step:821/10000 train_time:54432ms step_avg:66.30ms +[2025-07-07 08:16:25] [Rank 0] step:841/10000 train_time:55773ms step_avg:66.32ms +[2025-07-07 08:16:25] [Rank 0] step:841/10000 train_time:55773ms step_avg:66.32ms +[2025-07-07 08:16:26] [Rank 0] step:861/10000 train_time:57112ms step_avg:66.33ms +[2025-07-07 08:16:26] [Rank 0] step:861/10000 train_time:57112ms step_avg:66.33ms +[2025-07-07 08:16:27] [Rank 0] step:881/10000 train_time:58453ms step_avg:66.35ms +[2025-07-07 08:16:27] [Rank 0] step:881/10000 train_time:58453ms step_avg:66.35ms +[2025-07-07 08:16:29] [Rank 0] step:901/10000 train_time:60462ms step_avg:67.11ms +[2025-07-07 08:16:29] [Rank 0] step:901/10000 train_time:60462ms step_avg:67.11ms +[2025-07-07 08:16:30] [Rank 0] step:921/10000 train_time:61186ms step_avg:66.43ms +[2025-07-07 08:16:30] [Rank 0] step:921/10000 train_time:61186ms step_avg:66.43ms +[2025-07-07 08:16:31] [Rank 0] step:941/10000 train_time:62528ms step_avg:66.45ms +[2025-07-07 08:16:31] [Rank 0] step:941/10000 train_time:62528ms step_avg:66.45ms +[2025-07-07 08:16:33] [Rank 0] step:961/10000 train_time:63870ms step_avg:66.46ms +[2025-07-07 08:16:33] [Rank 0] step:961/10000 train_time:63870ms step_avg:66.46ms +[2025-07-07 08:16:34] [Rank 0] step:981/10000 train_time:65214ms step_avg:66.48ms +[2025-07-07 08:16:34] [Rank 0] step:981/10000 train_time:65214ms step_avg:66.48ms +[2025-07-07 08:16:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:16:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:16:36] [Rank 0] PRINT: step:1000/10000 train_loss:1.7289 val_loss:1.5987 train_time:67168ms step_avg:67.17ms +[2025-07-07 08:16:36] [Rank 0] PRINT: step:1000/10000 train_loss:1.7289 val_loss:1.5987 train_time:67168ms step_avg:67.17ms +[2025-07-07 08:16:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:16:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:16:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:16:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:16:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:16:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:21:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:21:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:21:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:21:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:21:54] [Rank 0] Total Loss: 4.7476 +[2025-07-07 08:21:54] [Rank 0] Total Loss: 4.7476 +[2025-07-07 08:21:54] [Rank 0] Total FTA: 0.0907 +[2025-07-07 08:21:54] [Rank 0] Total FTA: 0.0907 +[2025-07-07 08:21:55] [Rank 0] Group 0 Loss: 4.9997 +[2025-07-07 08:21:55] [Rank 0] Group 0 Loss: 4.9997 +[2025-07-07 08:21:55] [Rank 0] Group 1 Loss: 4.6619 +[2025-07-07 08:21:55] [Rank 0] Group 1 Loss: 4.6619 +[2025-07-07 08:21:55] [Rank 0] Group 2 Loss: 4.5333 +[2025-07-07 08:21:55] [Rank 0] Group 2 Loss: 4.5333 +[2025-07-07 08:21:55] [Rank 0] Group 3 Loss: 4.6626 +[2025-07-07 08:21:55] [Rank 0] Group 3 Loss: 4.6626 +[2025-07-07 08:21:55] [Rank 0] Group 4 Loss: 4.7146 +[2025-07-07 08:21:55] [Rank 0] Group 4 Loss: 4.7146 +[2025-07-07 08:21:55] [Rank 0] Group 5 Loss: 4.6544 +[2025-07-07 08:21:55] [Rank 0] Group 5 Loss: 4.6544 +[2025-07-07 08:21:55] [Rank 0] Group 6 Loss: 4.6655 +[2025-07-07 08:21:55] [Rank 0] Group 6 Loss: 4.6655 +[2025-07-07 08:21:55] [Rank 0] Group 7 Loss: 4.7844 +[2025-07-07 08:21:55] [Rank 0] Group 7 Loss: 4.7844 +[2025-07-07 08:21:55] [Rank 0] Group 8 Loss: 4.7432 +[2025-07-07 08:21:55] [Rank 0] Group 8 Loss: 4.7432 +[2025-07-07 08:21:55] [Rank 0] Group 9 Loss: 4.7057 +[2025-07-07 08:21:55] [Rank 0] Group 9 Loss: 4.7057 +[2025-07-07 08:21:55] [Rank 0] Group 10 Loss: 4.7503 +[2025-07-07 08:21:55] [Rank 0] Group 10 Loss: 4.7503 +[2025-07-07 08:21:55] [Rank 0] Group 11 Loss: 4.7777 +[2025-07-07 08:21:55] [Rank 0] Group 11 Loss: 4.7777 +[2025-07-07 08:21:55] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-07 08:21:55] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-07 08:21:55] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 08:21:55] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 08:21:55] [Rank 0] Group 2 FTA: 0.0938 +[2025-07-07 08:21:55] [Rank 0] Group 2 FTA: 0.0938 +[2025-07-07 08:21:55] [Rank 0] Group 3 FTA: 0.0964 +[2025-07-07 08:21:55] [Rank 0] Group 3 FTA: 0.0964 +[2025-07-07 08:21:55] [Rank 0] Group 4 FTA: 0.0312 +[2025-07-07 08:21:55] [Rank 0] Group 4 FTA: 0.0312 +[2025-07-07 08:21:55] [Rank 0] Group 5 FTA: 0.0443 +[2025-07-07 08:21:55] [Rank 0] Group 5 FTA: 0.0443 +[2025-07-07 08:21:55] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-07 08:21:55] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-07 08:21:55] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-07 08:21:55] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-07 08:21:55] [Rank 0] Group 8 FTA: 0.0755 +[2025-07-07 08:21:55] [Rank 0] Group 8 FTA: 0.0755 +[2025-07-07 08:21:55] [Rank 0] Group 9 FTA: 0.1172 +[2025-07-07 08:21:55] [Rank 0] Group 9 FTA: 0.1172 +[2025-07-07 08:21:55] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-07 08:21:55] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-07 08:21:55] [Rank 0] Group 11 FTA: 0.1133 +[2025-07-07 08:21:55] [Rank 0] Group 11 FTA: 0.1133 +[2025-07-07 08:21:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-07 08:21:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-07 08:21:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-07 08:21:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-07 08:21:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-07 08:21:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-07 08:21:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-07 08:21:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-07 08:21:56] [Rank 0] step:1001/10000 train_time:67178ms step_avg:67.11ms +[2025-07-07 08:21:56] [Rank 0] step:1001/10000 train_time:67178ms step_avg:67.11ms +[2025-07-07 08:21:57] [Rank 0] step:1021/10000 train_time:67917ms step_avg:66.52ms +[2025-07-07 08:21:57] [Rank 0] step:1021/10000 train_time:67917ms step_avg:66.52ms +[2025-07-07 08:21:59] [Rank 0] step:1041/10000 train_time:69249ms step_avg:66.52ms +[2025-07-07 08:21:59] [Rank 0] step:1041/10000 train_time:69249ms step_avg:66.52ms +[2025-07-07 08:22:00] [Rank 0] step:1061/10000 train_time:70582ms step_avg:66.52ms +[2025-07-07 08:22:00] [Rank 0] step:1061/10000 train_time:70582ms step_avg:66.52ms +[2025-07-07 08:22:01] [Rank 0] step:1081/10000 train_time:72602ms step_avg:67.16ms +[2025-07-07 08:22:01] [Rank 0] step:1081/10000 train_time:72602ms step_avg:67.16ms +[2025-07-07 08:22:03] [Rank 0] step:1101/10000 train_time:73323ms step_avg:66.60ms +[2025-07-07 08:22:03] [Rank 0] step:1101/10000 train_time:73323ms step_avg:66.60ms +[2025-07-07 08:22:04] [Rank 0] step:1121/10000 train_time:74659ms step_avg:66.60ms +[2025-07-07 08:22:04] [Rank 0] step:1121/10000 train_time:74659ms step_avg:66.60ms +[2025-07-07 08:22:05] [Rank 0] step:1141/10000 train_time:75996ms step_avg:66.60ms +[2025-07-07 08:22:05] [Rank 0] step:1141/10000 train_time:75996ms step_avg:66.60ms +[2025-07-07 08:22:07] [Rank 0] step:1161/10000 train_time:77334ms step_avg:66.61ms +[2025-07-07 08:22:07] [Rank 0] step:1161/10000 train_time:77334ms step_avg:66.61ms +[2025-07-07 08:22:08] [Rank 0] step:1181/10000 train_time:78671ms step_avg:66.61ms +[2025-07-07 08:22:08] [Rank 0] step:1181/10000 train_time:78671ms step_avg:66.61ms +[2025-07-07 08:22:09] [Rank 0] step:1201/10000 train_time:80009ms step_avg:66.62ms +[2025-07-07 08:22:09] [Rank 0] step:1201/10000 train_time:80009ms step_avg:66.62ms +[2025-07-07 08:22:11] [Rank 0] step:1221/10000 train_time:81348ms step_avg:66.62ms +[2025-07-07 08:22:11] [Rank 0] step:1221/10000 train_time:81348ms step_avg:66.62ms +[2025-07-07 08:22:12] [Rank 0] step:1241/10000 train_time:82688ms step_avg:66.63ms +[2025-07-07 08:22:12] [Rank 0] step:1241/10000 train_time:82688ms step_avg:66.63ms +[2025-07-07 08:22:14] [Rank 0] step:1261/10000 train_time:84030ms step_avg:66.64ms +[2025-07-07 08:22:14] [Rank 0] step:1261/10000 train_time:84030ms step_avg:66.64ms +[2025-07-07 08:22:15] [Rank 0] step:1281/10000 train_time:85422ms step_avg:66.68ms +[2025-07-07 08:22:15] [Rank 0] step:1281/10000 train_time:85422ms step_avg:66.68ms +[2025-07-07 08:22:16] [Rank 0] step:1301/10000 train_time:86763ms step_avg:66.69ms +[2025-07-07 08:22:16] [Rank 0] step:1301/10000 train_time:86763ms step_avg:66.69ms +[2025-07-07 08:22:18] [Rank 0] step:1321/10000 train_time:88104ms step_avg:66.69ms +[2025-07-07 08:22:18] [Rank 0] step:1321/10000 train_time:88104ms step_avg:66.69ms +[2025-07-07 08:22:19] [Rank 0] step:1341/10000 train_time:89446ms step_avg:66.70ms +[2025-07-07 08:22:19] [Rank 0] step:1341/10000 train_time:89446ms step_avg:66.70ms +[2025-07-07 08:22:20] [Rank 0] step:1361/10000 train_time:90788ms step_avg:66.71ms +[2025-07-07 08:22:20] [Rank 0] step:1361/10000 train_time:90788ms step_avg:66.71ms +[2025-07-07 08:22:22] [Rank 0] step:1381/10000 train_time:92129ms step_avg:66.71ms +[2025-07-07 08:22:22] [Rank 0] step:1381/10000 train_time:92129ms step_avg:66.71ms +[2025-07-07 08:22:23] [Rank 0] step:1401/10000 train_time:93471ms step_avg:66.72ms +[2025-07-07 08:22:23] [Rank 0] step:1401/10000 train_time:93471ms step_avg:66.72ms +[2025-07-07 08:22:24] [Rank 0] step:1421/10000 train_time:94814ms step_avg:66.72ms +[2025-07-07 08:22:24] [Rank 0] step:1421/10000 train_time:94814ms step_avg:66.72ms +[2025-07-07 08:22:26] [Rank 0] step:1441/10000 train_time:96156ms step_avg:66.73ms +[2025-07-07 08:22:26] [Rank 0] step:1441/10000 train_time:96156ms step_avg:66.73ms +[2025-07-07 08:22:27] [Rank 0] step:1461/10000 train_time:97549ms step_avg:66.77ms +[2025-07-07 08:22:27] [Rank 0] step:1461/10000 train_time:97549ms step_avg:66.77ms +[2025-07-07 08:22:28] [Rank 0] step:1481/10000 train_time:98892ms step_avg:66.77ms +[2025-07-07 08:22:28] [Rank 0] step:1481/10000 train_time:98892ms step_avg:66.77ms +[2025-07-07 08:22:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:22:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:22:31] [Rank 0] PRINT: step:1500/10000 train_loss:1.4570 val_loss:1.3367 train_time:100846ms step_avg:67.23ms +[2025-07-07 08:22:31] [Rank 0] PRINT: step:1500/10000 train_loss:1.4570 val_loss:1.3367 train_time:100846ms step_avg:67.23ms +[2025-07-07 08:22:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:22:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:22:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:22:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:22:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:22:31] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:27:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:27:49] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:27:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:27:49] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:27:49] [Rank 0] Total Loss: 4.7396 +[2025-07-07 08:27:49] [Rank 0] Total Loss: 4.7396 +[2025-07-07 08:27:49] [Rank 0] Total FTA: 0.0921 +[2025-07-07 08:27:49] [Rank 0] Total FTA: 0.0921 +[2025-07-07 08:27:49] [Rank 0] Group 0 Loss: 4.8480 +[2025-07-07 08:27:49] [Rank 0] Group 0 Loss: 4.8480 +[2025-07-07 08:27:49] [Rank 0] Group 1 Loss: 4.6159 +[2025-07-07 08:27:49] [Rank 0] Group 1 Loss: 4.6159 +[2025-07-07 08:27:49] [Rank 0] Group 2 Loss: 4.5194 +[2025-07-07 08:27:49] [Rank 0] Group 2 Loss: 4.5194 +[2025-07-07 08:27:49] [Rank 0] Group 3 Loss: 4.7459 +[2025-07-07 08:27:49] [Rank 0] Group 3 Loss: 4.7459 +[2025-07-07 08:27:49] [Rank 0] Group 4 Loss: 4.7652 +[2025-07-07 08:27:49] [Rank 0] Group 4 Loss: 4.7652 +[2025-07-07 08:27:49] [Rank 0] Group 5 Loss: 4.7231 +[2025-07-07 08:27:49] [Rank 0] Group 5 Loss: 4.7231 +[2025-07-07 08:27:49] [Rank 0] Group 6 Loss: 4.6785 +[2025-07-07 08:27:49] [Rank 0] Group 6 Loss: 4.6785 +[2025-07-07 08:27:49] [Rank 0] Group 7 Loss: 4.7782 +[2025-07-07 08:27:49] [Rank 0] Group 7 Loss: 4.7782 +[2025-07-07 08:27:49] [Rank 0] Group 8 Loss: 4.7219 +[2025-07-07 08:27:49] [Rank 0] Group 8 Loss: 4.7219 +[2025-07-07 08:27:49] [Rank 0] Group 9 Loss: 4.7301 +[2025-07-07 08:27:49] [Rank 0] Group 9 Loss: 4.7301 +[2025-07-07 08:27:49] [Rank 0] Group 10 Loss: 4.7729 +[2025-07-07 08:27:49] [Rank 0] Group 10 Loss: 4.7729 +[2025-07-07 08:27:49] [Rank 0] Group 11 Loss: 4.7821 +[2025-07-07 08:27:49] [Rank 0] Group 11 Loss: 4.7821 +[2025-07-07 08:27:49] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 08:27:49] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 08:27:49] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 08:27:49] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 08:27:49] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-07 08:27:49] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-07 08:27:49] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 08:27:49] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 08:27:49] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 08:27:49] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 08:27:49] [Rank 0] Group 5 FTA: 0.0755 +[2025-07-07 08:27:49] [Rank 0] Group 5 FTA: 0.0755 +[2025-07-07 08:27:49] [Rank 0] Group 6 FTA: 0.0703 +[2025-07-07 08:27:49] [Rank 0] Group 6 FTA: 0.0703 +[2025-07-07 08:27:49] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-07 08:27:49] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-07 08:27:49] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-07 08:27:49] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-07 08:27:49] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 08:27:49] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 08:27:49] [Rank 0] Group 10 FTA: 0.0859 +[2025-07-07 08:27:49] [Rank 0] Group 10 FTA: 0.0859 +[2025-07-07 08:27:49] [Rank 0] Group 11 FTA: 0.0830 +[2025-07-07 08:27:49] [Rank 0] Group 11 FTA: 0.0830 +[2025-07-07 08:27:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-07 08:27:50] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-07 08:27:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-07 08:27:50] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-07 08:27:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-07 08:27:50] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-07 08:27:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-07 08:27:50] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-07 08:27:50] [Rank 0] step:1501/10000 train_time:100855ms step_avg:67.19ms +[2025-07-07 08:27:50] [Rank 0] step:1501/10000 train_time:100855ms step_avg:67.19ms +[2025-07-07 08:27:52] [Rank 0] step:1521/10000 train_time:101605ms step_avg:66.80ms +[2025-07-07 08:27:52] [Rank 0] step:1521/10000 train_time:101605ms step_avg:66.80ms +[2025-07-07 08:27:53] [Rank 0] step:1541/10000 train_time:102942ms step_avg:66.80ms +[2025-07-07 08:27:53] [Rank 0] step:1541/10000 train_time:102942ms step_avg:66.80ms +[2025-07-07 08:27:55] [Rank 0] step:1561/10000 train_time:104276ms step_avg:66.80ms +[2025-07-07 08:27:55] [Rank 0] step:1561/10000 train_time:104276ms step_avg:66.80ms +[2025-07-07 08:27:56] [Rank 0] step:1581/10000 train_time:105622ms step_avg:66.81ms +[2025-07-07 08:27:56] [Rank 0] step:1581/10000 train_time:105622ms step_avg:66.81ms +[2025-07-07 08:27:57] [Rank 0] step:1601/10000 train_time:106959ms step_avg:66.81ms +[2025-07-07 08:27:57] [Rank 0] step:1601/10000 train_time:106959ms step_avg:66.81ms +[2025-07-07 08:27:59] [Rank 0] step:1621/10000 train_time:108344ms step_avg:66.84ms +[2025-07-07 08:27:59] [Rank 0] step:1621/10000 train_time:108344ms step_avg:66.84ms +[2025-07-07 08:28:00] [Rank 0] step:1641/10000 train_time:109690ms step_avg:66.84ms +[2025-07-07 08:28:00] [Rank 0] step:1641/10000 train_time:109690ms step_avg:66.84ms +[2025-07-07 08:28:01] [Rank 0] step:1661/10000 train_time:111028ms step_avg:66.84ms +[2025-07-07 08:28:01] [Rank 0] step:1661/10000 train_time:111028ms step_avg:66.84ms +[2025-07-07 08:28:03] [Rank 0] step:1681/10000 train_time:112369ms step_avg:66.85ms +[2025-07-07 08:28:03] [Rank 0] step:1681/10000 train_time:112369ms step_avg:66.85ms +[2025-07-07 08:28:04] [Rank 0] step:1701/10000 train_time:113709ms step_avg:66.85ms +[2025-07-07 08:28:04] [Rank 0] step:1701/10000 train_time:113709ms step_avg:66.85ms +[2025-07-07 08:28:05] [Rank 0] step:1721/10000 train_time:115050ms step_avg:66.85ms +[2025-07-07 08:28:05] [Rank 0] step:1721/10000 train_time:115050ms step_avg:66.85ms +[2025-07-07 08:28:07] [Rank 0] step:1741/10000 train_time:116391ms step_avg:66.85ms +[2025-07-07 08:28:07] [Rank 0] step:1741/10000 train_time:116391ms step_avg:66.85ms +[2025-07-07 08:28:08] [Rank 0] step:1761/10000 train_time:117733ms step_avg:66.86ms +[2025-07-07 08:28:08] [Rank 0] step:1761/10000 train_time:117733ms step_avg:66.86ms +[2025-07-07 08:28:09] [Rank 0] step:1781/10000 train_time:119075ms step_avg:66.86ms +[2025-07-07 08:28:09] [Rank 0] step:1781/10000 train_time:119075ms step_avg:66.86ms +[2025-07-07 08:28:11] [Rank 0] step:1801/10000 train_time:120417ms step_avg:66.86ms +[2025-07-07 08:28:11] [Rank 0] step:1801/10000 train_time:120417ms step_avg:66.86ms +[2025-07-07 08:28:12] [Rank 0] step:1821/10000 train_time:121817ms step_avg:66.90ms +[2025-07-07 08:28:12] [Rank 0] step:1821/10000 train_time:121817ms step_avg:66.90ms +[2025-07-07 08:28:13] [Rank 0] step:1841/10000 train_time:123160ms step_avg:66.90ms +[2025-07-07 08:28:13] [Rank 0] step:1841/10000 train_time:123160ms step_avg:66.90ms +[2025-07-07 08:28:15] [Rank 0] step:1861/10000 train_time:124503ms step_avg:66.90ms +[2025-07-07 08:28:15] [Rank 0] step:1861/10000 train_time:124503ms step_avg:66.90ms +[2025-07-07 08:28:16] [Rank 0] step:1881/10000 train_time:125847ms step_avg:66.90ms +[2025-07-07 08:28:16] [Rank 0] step:1881/10000 train_time:125847ms step_avg:66.90ms +[2025-07-07 08:28:17] [Rank 0] step:1901/10000 train_time:127191ms step_avg:66.91ms +[2025-07-07 08:28:17] [Rank 0] step:1901/10000 train_time:127191ms step_avg:66.91ms +[2025-07-07 08:28:19] [Rank 0] step:1921/10000 train_time:128537ms step_avg:66.91ms +[2025-07-07 08:28:19] [Rank 0] step:1921/10000 train_time:128537ms step_avg:66.91ms +[2025-07-07 08:28:20] [Rank 0] step:1941/10000 train_time:129884ms step_avg:66.92ms +[2025-07-07 08:28:20] [Rank 0] step:1941/10000 train_time:129884ms step_avg:66.92ms +[2025-07-07 08:28:21] [Rank 0] step:1961/10000 train_time:131229ms step_avg:66.92ms +[2025-07-07 08:28:21] [Rank 0] step:1961/10000 train_time:131229ms step_avg:66.92ms +[2025-07-07 08:28:23] [Rank 0] step:1981/10000 train_time:132829ms step_avg:67.05ms +[2025-07-07 08:28:23] [Rank 0] step:1981/10000 train_time:132829ms step_avg:67.05ms +[2025-07-07 08:28:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:28:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:28:25] [Rank 0] PRINT: step:2000/10000 train_loss:1.3120 val_loss:1.2977 train_time:134592ms step_avg:67.30ms +[2025-07-07 08:28:25] [Rank 0] PRINT: step:2000/10000 train_loss:1.3120 val_loss:1.2977 train_time:134592ms step_avg:67.30ms +[2025-07-07 08:28:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:28:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:28:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:28:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:28:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:28:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:33:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:33:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:33:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:33:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:33:42] [Rank 0] Total Loss: 4.9267 +[2025-07-07 08:33:42] [Rank 0] Total Loss: 4.9267 +[2025-07-07 08:33:43] [Rank 0] Total FTA: 0.0996 +[2025-07-07 08:33:43] [Rank 0] Total FTA: 0.0996 +[2025-07-07 08:33:43] [Rank 0] Group 0 Loss: 5.1114 +[2025-07-07 08:33:43] [Rank 0] Group 0 Loss: 5.1114 +[2025-07-07 08:33:43] [Rank 0] Group 1 Loss: 4.7835 +[2025-07-07 08:33:43] [Rank 0] Group 1 Loss: 4.7835 +[2025-07-07 08:33:43] [Rank 0] Group 2 Loss: 4.5245 +[2025-07-07 08:33:43] [Rank 0] Group 2 Loss: 4.5245 +[2025-07-07 08:33:43] [Rank 0] Group 3 Loss: 5.0063 +[2025-07-07 08:33:43] [Rank 0] Group 3 Loss: 5.0063 +[2025-07-07 08:33:43] [Rank 0] Group 4 Loss: 4.9261 +[2025-07-07 08:33:43] [Rank 0] Group 4 Loss: 4.9261 +[2025-07-07 08:33:43] [Rank 0] Group 5 Loss: 4.9281 +[2025-07-07 08:33:43] [Rank 0] Group 5 Loss: 4.9281 +[2025-07-07 08:33:43] [Rank 0] Group 6 Loss: 4.8394 +[2025-07-07 08:33:43] [Rank 0] Group 6 Loss: 4.8394 +[2025-07-07 08:33:43] [Rank 0] Group 7 Loss: 4.9423 +[2025-07-07 08:33:43] [Rank 0] Group 7 Loss: 4.9423 +[2025-07-07 08:33:43] [Rank 0] Group 8 Loss: 4.9506 +[2025-07-07 08:33:43] [Rank 0] Group 8 Loss: 4.9506 +[2025-07-07 08:33:43] [Rank 0] Group 9 Loss: 5.0098 +[2025-07-07 08:33:43] [Rank 0] Group 9 Loss: 5.0098 +[2025-07-07 08:33:43] [Rank 0] Group 10 Loss: 4.9735 +[2025-07-07 08:33:43] [Rank 0] Group 10 Loss: 4.9735 +[2025-07-07 08:33:43] [Rank 0] Group 11 Loss: 4.9362 +[2025-07-07 08:33:43] [Rank 0] Group 11 Loss: 4.9362 +[2025-07-07 08:33:43] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 08:33:43] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 08:33:43] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 08:33:43] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 08:33:43] [Rank 0] Group 2 FTA: 0.0885 +[2025-07-07 08:33:43] [Rank 0] Group 2 FTA: 0.0885 +[2025-07-07 08:33:43] [Rank 0] Group 3 FTA: 0.0807 +[2025-07-07 08:33:43] [Rank 0] Group 3 FTA: 0.0807 +[2025-07-07 08:33:43] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 08:33:43] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 08:33:43] [Rank 0] Group 5 FTA: 0.1068 +[2025-07-07 08:33:43] [Rank 0] Group 5 FTA: 0.1068 +[2025-07-07 08:33:43] [Rank 0] Group 6 FTA: 0.1302 +[2025-07-07 08:33:43] [Rank 0] Group 6 FTA: 0.1302 +[2025-07-07 08:33:43] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-07 08:33:43] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-07 08:33:43] [Rank 0] Group 8 FTA: 0.0911 +[2025-07-07 08:33:43] [Rank 0] Group 8 FTA: 0.0911 +[2025-07-07 08:33:43] [Rank 0] Group 9 FTA: 0.1602 +[2025-07-07 08:33:43] [Rank 0] Group 9 FTA: 0.1602 +[2025-07-07 08:33:43] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-07 08:33:43] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-07 08:33:43] [Rank 0] Group 11 FTA: 0.1084 +[2025-07-07 08:33:43] [Rank 0] Group 11 FTA: 0.1084 +[2025-07-07 08:33:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-07 08:33:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-07 08:33:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-07 08:33:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-07 08:33:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-07 08:33:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-07 08:33:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-07 08:33:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-07 08:33:44] [Rank 0] step:2001/10000 train_time:134601ms step_avg:67.27ms +[2025-07-07 08:33:44] [Rank 0] step:2001/10000 train_time:134601ms step_avg:67.27ms +[2025-07-07 08:33:45] [Rank 0] step:2021/10000 train_time:135341ms step_avg:66.97ms +[2025-07-07 08:33:45] [Rank 0] step:2021/10000 train_time:135341ms step_avg:66.97ms +[2025-07-07 08:33:47] [Rank 0] step:2041/10000 train_time:136677ms step_avg:66.97ms +[2025-07-07 08:33:47] [Rank 0] step:2041/10000 train_time:136677ms step_avg:66.97ms +[2025-07-07 08:33:48] [Rank 0] step:2061/10000 train_time:138013ms step_avg:66.96ms +[2025-07-07 08:33:48] [Rank 0] step:2061/10000 train_time:138013ms step_avg:66.96ms +[2025-07-07 08:33:49] [Rank 0] step:2081/10000 train_time:139351ms step_avg:66.96ms +[2025-07-07 08:33:49] [Rank 0] step:2081/10000 train_time:139351ms step_avg:66.96ms +[2025-07-07 08:33:51] [Rank 0] step:2101/10000 train_time:140692ms step_avg:66.96ms +[2025-07-07 08:33:51] [Rank 0] step:2101/10000 train_time:140692ms step_avg:66.96ms +[2025-07-07 08:33:52] [Rank 0] step:2121/10000 train_time:142030ms step_avg:66.96ms +[2025-07-07 08:33:52] [Rank 0] step:2121/10000 train_time:142030ms step_avg:66.96ms +[2025-07-07 08:33:53] [Rank 0] step:2141/10000 train_time:143369ms step_avg:66.96ms +[2025-07-07 08:33:53] [Rank 0] step:2141/10000 train_time:143369ms step_avg:66.96ms +[2025-07-07 08:33:55] [Rank 0] step:2161/10000 train_time:144710ms step_avg:66.96ms +[2025-07-07 08:33:55] [Rank 0] step:2161/10000 train_time:144710ms step_avg:66.96ms +[2025-07-07 08:33:56] [Rank 0] step:2181/10000 train_time:146096ms step_avg:66.99ms +[2025-07-07 08:33:56] [Rank 0] step:2181/10000 train_time:146096ms step_avg:66.99ms +[2025-07-07 08:33:58] [Rank 0] step:2201/10000 train_time:147438ms step_avg:66.99ms +[2025-07-07 08:33:58] [Rank 0] step:2201/10000 train_time:147438ms step_avg:66.99ms +[2025-07-07 08:33:59] [Rank 0] step:2221/10000 train_time:148779ms step_avg:66.99ms +[2025-07-07 08:33:59] [Rank 0] step:2221/10000 train_time:148779ms step_avg:66.99ms +[2025-07-07 08:34:00] [Rank 0] step:2241/10000 train_time:150132ms step_avg:66.99ms +[2025-07-07 08:34:00] [Rank 0] step:2241/10000 train_time:150132ms step_avg:66.99ms +[2025-07-07 08:34:02] [Rank 0] step:2261/10000 train_time:151498ms step_avg:67.00ms +[2025-07-07 08:34:02] [Rank 0] step:2261/10000 train_time:151498ms step_avg:67.00ms +[2025-07-07 08:34:03] [Rank 0] step:2281/10000 train_time:152864ms step_avg:67.02ms +[2025-07-07 08:34:03] [Rank 0] step:2281/10000 train_time:152864ms step_avg:67.02ms +[2025-07-07 08:34:04] [Rank 0] step:2301/10000 train_time:154232ms step_avg:67.03ms +[2025-07-07 08:34:04] [Rank 0] step:2301/10000 train_time:154232ms step_avg:67.03ms +[2025-07-07 08:34:06] [Rank 0] step:2321/10000 train_time:155608ms step_avg:67.04ms +[2025-07-07 08:34:06] [Rank 0] step:2321/10000 train_time:155608ms step_avg:67.04ms +[2025-07-07 08:34:07] [Rank 0] step:2341/10000 train_time:156977ms step_avg:67.06ms +[2025-07-07 08:34:07] [Rank 0] step:2341/10000 train_time:156977ms step_avg:67.06ms +[2025-07-07 08:34:09] [Rank 0] step:2361/10000 train_time:158400ms step_avg:67.09ms +[2025-07-07 08:34:09] [Rank 0] step:2361/10000 train_time:158400ms step_avg:67.09ms +[2025-07-07 08:34:10] [Rank 0] step:2381/10000 train_time:159767ms step_avg:67.10ms +[2025-07-07 08:34:10] [Rank 0] step:2381/10000 train_time:159767ms step_avg:67.10ms +[2025-07-07 08:34:11] [Rank 0] step:2401/10000 train_time:161136ms step_avg:67.11ms +[2025-07-07 08:34:11] [Rank 0] step:2401/10000 train_time:161136ms step_avg:67.11ms +[2025-07-07 08:34:13] [Rank 0] step:2421/10000 train_time:162504ms step_avg:67.12ms +[2025-07-07 08:34:13] [Rank 0] step:2421/10000 train_time:162504ms step_avg:67.12ms +[2025-07-07 08:34:14] [Rank 0] step:2441/10000 train_time:163872ms step_avg:67.13ms +[2025-07-07 08:34:14] [Rank 0] step:2441/10000 train_time:163872ms step_avg:67.13ms +[2025-07-07 08:34:15] [Rank 0] step:2461/10000 train_time:165241ms step_avg:67.14ms +[2025-07-07 08:34:15] [Rank 0] step:2461/10000 train_time:165241ms step_avg:67.14ms +[2025-07-07 08:34:17] [Rank 0] step:2481/10000 train_time:166610ms step_avg:67.15ms +[2025-07-07 08:34:17] [Rank 0] step:2481/10000 train_time:166610ms step_avg:67.15ms +[2025-07-07 08:34:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:34:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:34:19] [Rank 0] PRINT: step:2500/10000 train_loss:1.2815 val_loss:1.2713 train_time:168601ms step_avg:67.44ms +[2025-07-07 08:34:19] [Rank 0] PRINT: step:2500/10000 train_loss:1.2815 val_loss:1.2713 train_time:168601ms step_avg:67.44ms +[2025-07-07 08:34:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:34:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:34:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:34:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:34:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:34:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:39:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:39:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:39:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:39:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:39:37] [Rank 0] Total Loss: 5.0651 +[2025-07-07 08:39:37] [Rank 0] Total Loss: 5.0651 +[2025-07-07 08:39:37] [Rank 0] Total FTA: 0.1307 +[2025-07-07 08:39:37] [Rank 0] Total FTA: 0.1307 +[2025-07-07 08:39:37] [Rank 0] Group 0 Loss: 5.2538 +[2025-07-07 08:39:37] [Rank 0] Group 0 Loss: 5.2538 +[2025-07-07 08:39:37] [Rank 0] Group 1 Loss: 4.8206 +[2025-07-07 08:39:37] [Rank 0] Group 1 Loss: 4.8206 +[2025-07-07 08:39:37] [Rank 0] Group 2 Loss: 4.8604 +[2025-07-07 08:39:37] [Rank 0] Group 2 Loss: 4.8604 +[2025-07-07 08:39:37] [Rank 0] Group 3 Loss: 5.0537 +[2025-07-07 08:39:37] [Rank 0] Group 3 Loss: 5.0537 +[2025-07-07 08:39:37] [Rank 0] Group 4 Loss: 5.1594 +[2025-07-07 08:39:37] [Rank 0] Group 4 Loss: 5.1594 +[2025-07-07 08:39:37] [Rank 0] Group 5 Loss: 5.0532 +[2025-07-07 08:39:37] [Rank 0] Group 5 Loss: 5.0532 +[2025-07-07 08:39:37] [Rank 0] Group 6 Loss: 4.9858 +[2025-07-07 08:39:37] [Rank 0] Group 6 Loss: 4.9858 +[2025-07-07 08:39:37] [Rank 0] Group 7 Loss: 5.0434 +[2025-07-07 08:39:37] [Rank 0] Group 7 Loss: 5.0434 +[2025-07-07 08:39:37] [Rank 0] Group 8 Loss: 5.0257 +[2025-07-07 08:39:37] [Rank 0] Group 8 Loss: 5.0257 +[2025-07-07 08:39:37] [Rank 0] Group 9 Loss: 5.0854 +[2025-07-07 08:39:37] [Rank 0] Group 9 Loss: 5.0854 +[2025-07-07 08:39:37] [Rank 0] Group 10 Loss: 5.1017 +[2025-07-07 08:39:37] [Rank 0] Group 10 Loss: 5.1017 +[2025-07-07 08:39:37] [Rank 0] Group 11 Loss: 5.0944 +[2025-07-07 08:39:37] [Rank 0] Group 11 Loss: 5.0944 +[2025-07-07 08:39:37] [Rank 0] Group 0 FTA: 0.1730 +[2025-07-07 08:39:37] [Rank 0] Group 0 FTA: 0.1730 +[2025-07-07 08:39:37] [Rank 0] Group 1 FTA: 0.1823 +[2025-07-07 08:39:37] [Rank 0] Group 1 FTA: 0.1823 +[2025-07-07 08:39:37] [Rank 0] Group 2 FTA: 0.1589 +[2025-07-07 08:39:37] [Rank 0] Group 2 FTA: 0.1589 +[2025-07-07 08:39:37] [Rank 0] Group 3 FTA: 0.1250 +[2025-07-07 08:39:37] [Rank 0] Group 3 FTA: 0.1250 +[2025-07-07 08:39:37] [Rank 0] Group 4 FTA: 0.0599 +[2025-07-07 08:39:37] [Rank 0] Group 4 FTA: 0.0599 +[2025-07-07 08:39:37] [Rank 0] Group 5 FTA: 0.0729 +[2025-07-07 08:39:37] [Rank 0] Group 5 FTA: 0.0729 +[2025-07-07 08:39:37] [Rank 0] Group 6 FTA: 0.1146 +[2025-07-07 08:39:37] [Rank 0] Group 6 FTA: 0.1146 +[2025-07-07 08:39:37] [Rank 0] Group 7 FTA: 0.1432 +[2025-07-07 08:39:37] [Rank 0] Group 7 FTA: 0.1432 +[2025-07-07 08:39:37] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-07 08:39:37] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-07 08:39:37] [Rank 0] Group 9 FTA: 0.1250 +[2025-07-07 08:39:37] [Rank 0] Group 9 FTA: 0.1250 +[2025-07-07 08:39:37] [Rank 0] Group 10 FTA: 0.1426 +[2025-07-07 08:39:37] [Rank 0] Group 10 FTA: 0.1426 +[2025-07-07 08:39:37] [Rank 0] Group 11 FTA: 0.1230 +[2025-07-07 08:39:37] [Rank 0] Group 11 FTA: 0.1230 +[2025-07-07 08:39:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-07 08:39:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-07 08:39:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-07 08:39:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-07 08:39:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-07 08:39:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-07 08:39:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-07 08:39:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-07 08:39:38] [Rank 0] step:2501/10000 train_time:168610ms step_avg:67.42ms +[2025-07-07 08:39:38] [Rank 0] step:2501/10000 train_time:168610ms step_avg:67.42ms +[2025-07-07 08:39:40] [Rank 0] step:2521/10000 train_time:170050ms step_avg:67.45ms +[2025-07-07 08:39:40] [Rank 0] step:2521/10000 train_time:170050ms step_avg:67.45ms +[2025-07-07 08:39:41] [Rank 0] step:2541/10000 train_time:170783ms step_avg:67.21ms +[2025-07-07 08:39:41] [Rank 0] step:2541/10000 train_time:170783ms step_avg:67.21ms +[2025-07-07 08:39:43] [Rank 0] step:2561/10000 train_time:172144ms step_avg:67.22ms +[2025-07-07 08:39:43] [Rank 0] step:2561/10000 train_time:172144ms step_avg:67.22ms +[2025-07-07 08:39:44] [Rank 0] step:2581/10000 train_time:173504ms step_avg:67.22ms +[2025-07-07 08:39:44] [Rank 0] step:2581/10000 train_time:173504ms step_avg:67.22ms +[2025-07-07 08:39:45] [Rank 0] step:2601/10000 train_time:174865ms step_avg:67.23ms +[2025-07-07 08:39:45] [Rank 0] step:2601/10000 train_time:174865ms step_avg:67.23ms +[2025-07-07 08:39:47] [Rank 0] step:2621/10000 train_time:176228ms step_avg:67.24ms +[2025-07-07 08:39:47] [Rank 0] step:2621/10000 train_time:176228ms step_avg:67.24ms +[2025-07-07 08:39:48] [Rank 0] step:2641/10000 train_time:177592ms step_avg:67.24ms +[2025-07-07 08:39:48] [Rank 0] step:2641/10000 train_time:177592ms step_avg:67.24ms +[2025-07-07 08:39:49] [Rank 0] step:2661/10000 train_time:178955ms step_avg:67.25ms +[2025-07-07 08:39:49] [Rank 0] step:2661/10000 train_time:178955ms step_avg:67.25ms +[2025-07-07 08:39:51] [Rank 0] step:2681/10000 train_time:180320ms step_avg:67.26ms +[2025-07-07 08:39:51] [Rank 0] step:2681/10000 train_time:180320ms step_avg:67.26ms +[2025-07-07 08:39:52] [Rank 0] step:2701/10000 train_time:181940ms step_avg:67.36ms +[2025-07-07 08:39:52] [Rank 0] step:2701/10000 train_time:181940ms step_avg:67.36ms +[2025-07-07 08:39:53] [Rank 0] step:2721/10000 train_time:183050ms step_avg:67.27ms +[2025-07-07 08:39:53] [Rank 0] step:2721/10000 train_time:183050ms step_avg:67.27ms +[2025-07-07 08:39:55] [Rank 0] step:2741/10000 train_time:184415ms step_avg:67.28ms +[2025-07-07 08:39:55] [Rank 0] step:2741/10000 train_time:184415ms step_avg:67.28ms +[2025-07-07 08:39:56] [Rank 0] step:2761/10000 train_time:185780ms step_avg:67.29ms +[2025-07-07 08:39:56] [Rank 0] step:2761/10000 train_time:185780ms step_avg:67.29ms +[2025-07-07 08:39:58] [Rank 0] step:2781/10000 train_time:187147ms step_avg:67.29ms +[2025-07-07 08:39:58] [Rank 0] step:2781/10000 train_time:187147ms step_avg:67.29ms +[2025-07-07 08:39:59] [Rank 0] step:2801/10000 train_time:188513ms step_avg:67.30ms +[2025-07-07 08:39:59] [Rank 0] step:2801/10000 train_time:188513ms step_avg:67.30ms +[2025-07-07 08:40:00] [Rank 0] step:2821/10000 train_time:189880ms step_avg:67.31ms +[2025-07-07 08:40:00] [Rank 0] step:2821/10000 train_time:189880ms step_avg:67.31ms +[2025-07-07 08:40:02] [Rank 0] step:2841/10000 train_time:191249ms step_avg:67.32ms +[2025-07-07 08:40:02] [Rank 0] step:2841/10000 train_time:191249ms step_avg:67.32ms +[2025-07-07 08:40:03] [Rank 0] step:2861/10000 train_time:192618ms step_avg:67.33ms +[2025-07-07 08:40:03] [Rank 0] step:2861/10000 train_time:192618ms step_avg:67.33ms +[2025-07-07 08:40:04] [Rank 0] step:2881/10000 train_time:194659ms step_avg:67.57ms +[2025-07-07 08:40:04] [Rank 0] step:2881/10000 train_time:194659ms step_avg:67.57ms +[2025-07-07 08:40:06] [Rank 0] step:2901/10000 train_time:195399ms step_avg:67.36ms +[2025-07-07 08:40:06] [Rank 0] step:2901/10000 train_time:195399ms step_avg:67.36ms +[2025-07-07 08:40:07] [Rank 0] step:2921/10000 train_time:196768ms step_avg:67.36ms +[2025-07-07 08:40:07] [Rank 0] step:2921/10000 train_time:196768ms step_avg:67.36ms +[2025-07-07 08:40:09] [Rank 0] step:2941/10000 train_time:198139ms step_avg:67.37ms +[2025-07-07 08:40:09] [Rank 0] step:2941/10000 train_time:198139ms step_avg:67.37ms +[2025-07-07 08:40:10] [Rank 0] step:2961/10000 train_time:199509ms step_avg:67.38ms +[2025-07-07 08:40:10] [Rank 0] step:2961/10000 train_time:199509ms step_avg:67.38ms +[2025-07-07 08:40:11] [Rank 0] step:2981/10000 train_time:200878ms step_avg:67.39ms +[2025-07-07 08:40:11] [Rank 0] step:2981/10000 train_time:200878ms step_avg:67.39ms +[2025-07-07 08:40:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:40:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:40:14] [Rank 0] PRINT: step:3000/10000 train_loss:1.2514 val_loss:1.2453 train_time:202871ms step_avg:67.62ms +[2025-07-07 08:40:14] [Rank 0] PRINT: step:3000/10000 train_loss:1.2514 val_loss:1.2453 train_time:202871ms step_avg:67.62ms +[2025-07-07 08:40:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:40:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:40:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:40:14] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:40:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:40:14] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:45:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:45:33] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:45:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:45:33] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:45:33] [Rank 0] Total Loss: 5.1828 +[2025-07-07 08:45:33] [Rank 0] Total Loss: 5.1828 +[2025-07-07 08:45:33] [Rank 0] Total FTA: 0.1694 +[2025-07-07 08:45:33] [Rank 0] Total FTA: 0.1694 +[2025-07-07 08:45:33] [Rank 0] Group 0 Loss: 5.3509 +[2025-07-07 08:45:33] [Rank 0] Group 0 Loss: 5.3509 +[2025-07-07 08:45:33] [Rank 0] Group 1 Loss: 4.9635 +[2025-07-07 08:45:33] [Rank 0] Group 1 Loss: 4.9635 +[2025-07-07 08:45:33] [Rank 0] Group 2 Loss: 4.9127 +[2025-07-07 08:45:33] [Rank 0] Group 2 Loss: 4.9127 +[2025-07-07 08:45:33] [Rank 0] Group 3 Loss: 5.2594 +[2025-07-07 08:45:33] [Rank 0] Group 3 Loss: 5.2594 +[2025-07-07 08:45:33] [Rank 0] Group 4 Loss: 5.2536 +[2025-07-07 08:45:33] [Rank 0] Group 4 Loss: 5.2536 +[2025-07-07 08:45:33] [Rank 0] Group 5 Loss: 5.2565 +[2025-07-07 08:45:33] [Rank 0] Group 5 Loss: 5.2565 +[2025-07-07 08:45:33] [Rank 0] Group 6 Loss: 5.0706 +[2025-07-07 08:45:33] [Rank 0] Group 6 Loss: 5.0706 +[2025-07-07 08:45:33] [Rank 0] Group 7 Loss: 5.2358 +[2025-07-07 08:45:33] [Rank 0] Group 7 Loss: 5.2358 +[2025-07-07 08:45:33] [Rank 0] Group 8 Loss: 5.1911 +[2025-07-07 08:45:33] [Rank 0] Group 8 Loss: 5.1911 +[2025-07-07 08:45:33] [Rank 0] Group 9 Loss: 5.1497 +[2025-07-07 08:45:33] [Rank 0] Group 9 Loss: 5.1497 +[2025-07-07 08:45:33] [Rank 0] Group 10 Loss: 5.2150 +[2025-07-07 08:45:33] [Rank 0] Group 10 Loss: 5.2150 +[2025-07-07 08:45:33] [Rank 0] Group 11 Loss: 5.1686 +[2025-07-07 08:45:33] [Rank 0] Group 11 Loss: 5.1686 +[2025-07-07 08:45:33] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-07 08:45:33] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-07 08:45:33] [Rank 0] Group 1 FTA: 0.1667 +[2025-07-07 08:45:33] [Rank 0] Group 1 FTA: 0.1667 +[2025-07-07 08:45:33] [Rank 0] Group 2 FTA: 0.2214 +[2025-07-07 08:45:33] [Rank 0] Group 2 FTA: 0.2214 +[2025-07-07 08:45:33] [Rank 0] Group 3 FTA: 0.1771 +[2025-07-07 08:45:33] [Rank 0] Group 3 FTA: 0.1771 +[2025-07-07 08:45:33] [Rank 0] Group 4 FTA: 0.1120 +[2025-07-07 08:45:33] [Rank 0] Group 4 FTA: 0.1120 +[2025-07-07 08:45:33] [Rank 0] Group 5 FTA: 0.1979 +[2025-07-07 08:45:33] [Rank 0] Group 5 FTA: 0.1979 +[2025-07-07 08:45:33] [Rank 0] Group 6 FTA: 0.1536 +[2025-07-07 08:45:33] [Rank 0] Group 6 FTA: 0.1536 +[2025-07-07 08:45:33] [Rank 0] Group 7 FTA: 0.1823 +[2025-07-07 08:45:33] [Rank 0] Group 7 FTA: 0.1823 +[2025-07-07 08:45:33] [Rank 0] Group 8 FTA: 0.2057 +[2025-07-07 08:45:33] [Rank 0] Group 8 FTA: 0.2057 +[2025-07-07 08:45:33] [Rank 0] Group 9 FTA: 0.1641 +[2025-07-07 08:45:33] [Rank 0] Group 9 FTA: 0.1641 +[2025-07-07 08:45:33] [Rank 0] Group 10 FTA: 0.1543 +[2025-07-07 08:45:33] [Rank 0] Group 10 FTA: 0.1543 +[2025-07-07 08:45:33] [Rank 0] Group 11 FTA: 0.1592 +[2025-07-07 08:45:33] [Rank 0] Group 11 FTA: 0.1592 +[2025-07-07 08:45:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-07 08:45:34] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-07 08:45:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-07 08:45:34] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-07 08:45:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-07 08:45:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-07 08:45:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-07 08:45:35] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-07 08:45:35] [Rank 0] step:3001/10000 train_time:202881ms step_avg:67.60ms +[2025-07-07 08:45:35] [Rank 0] step:3001/10000 train_time:202881ms step_avg:67.60ms +[2025-07-07 08:45:36] [Rank 0] step:3021/10000 train_time:203652ms step_avg:67.41ms +[2025-07-07 08:45:36] [Rank 0] step:3021/10000 train_time:203652ms step_avg:67.41ms +[2025-07-07 08:45:37] [Rank 0] step:3041/10000 train_time:205009ms step_avg:67.41ms +[2025-07-07 08:45:37] [Rank 0] step:3041/10000 train_time:205009ms step_avg:67.41ms +[2025-07-07 08:45:39] [Rank 0] step:3061/10000 train_time:206368ms step_avg:67.42ms +[2025-07-07 08:45:39] [Rank 0] step:3061/10000 train_time:206368ms step_avg:67.42ms +[2025-07-07 08:45:40] [Rank 0] step:3081/10000 train_time:207769ms step_avg:67.44ms +[2025-07-07 08:45:40] [Rank 0] step:3081/10000 train_time:207769ms step_avg:67.44ms +[2025-07-07 08:45:42] [Rank 0] step:3101/10000 train_time:209132ms step_avg:67.44ms +[2025-07-07 08:45:42] [Rank 0] step:3101/10000 train_time:209132ms step_avg:67.44ms +[2025-07-07 08:45:43] [Rank 0] step:3121/10000 train_time:210494ms step_avg:67.44ms +[2025-07-07 08:45:43] [Rank 0] step:3121/10000 train_time:210494ms step_avg:67.44ms +[2025-07-07 08:45:44] [Rank 0] step:3141/10000 train_time:211857ms step_avg:67.45ms +[2025-07-07 08:45:44] [Rank 0] step:3141/10000 train_time:211857ms step_avg:67.45ms +[2025-07-07 08:45:46] [Rank 0] step:3161/10000 train_time:213220ms step_avg:67.45ms +[2025-07-07 08:45:46] [Rank 0] step:3161/10000 train_time:213220ms step_avg:67.45ms +[2025-07-07 08:45:47] [Rank 0] step:3181/10000 train_time:214583ms step_avg:67.46ms +[2025-07-07 08:45:47] [Rank 0] step:3181/10000 train_time:214583ms step_avg:67.46ms +[2025-07-07 08:45:48] [Rank 0] step:3201/10000 train_time:215947ms step_avg:67.46ms +[2025-07-07 08:45:48] [Rank 0] step:3201/10000 train_time:215947ms step_avg:67.46ms +[2025-07-07 08:45:50] [Rank 0] step:3221/10000 train_time:217312ms step_avg:67.47ms +[2025-07-07 08:45:50] [Rank 0] step:3221/10000 train_time:217312ms step_avg:67.47ms +[2025-07-07 08:45:51] [Rank 0] step:3241/10000 train_time:218679ms step_avg:67.47ms +[2025-07-07 08:45:51] [Rank 0] step:3241/10000 train_time:218679ms step_avg:67.47ms +[2025-07-07 08:45:53] [Rank 0] step:3261/10000 train_time:220091ms step_avg:67.49ms +[2025-07-07 08:45:53] [Rank 0] step:3261/10000 train_time:220091ms step_avg:67.49ms +[2025-07-07 08:45:54] [Rank 0] step:3281/10000 train_time:221558ms step_avg:67.53ms +[2025-07-07 08:45:54] [Rank 0] step:3281/10000 train_time:221558ms step_avg:67.53ms +[2025-07-07 08:45:55] [Rank 0] step:3301/10000 train_time:222924ms step_avg:67.53ms +[2025-07-07 08:45:55] [Rank 0] step:3301/10000 train_time:222924ms step_avg:67.53ms +[2025-07-07 08:45:57] [Rank 0] step:3321/10000 train_time:224290ms step_avg:67.54ms +[2025-07-07 08:45:57] [Rank 0] step:3321/10000 train_time:224290ms step_avg:67.54ms +[2025-07-07 08:45:58] [Rank 0] step:3341/10000 train_time:225657ms step_avg:67.54ms +[2025-07-07 08:45:58] [Rank 0] step:3341/10000 train_time:225657ms step_avg:67.54ms +[2025-07-07 08:46:00] [Rank 0] step:3361/10000 train_time:227024ms step_avg:67.55ms +[2025-07-07 08:46:00] [Rank 0] step:3361/10000 train_time:227024ms step_avg:67.55ms +[2025-07-07 08:46:01] [Rank 0] step:3381/10000 train_time:228399ms step_avg:67.55ms +[2025-07-07 08:46:01] [Rank 0] step:3381/10000 train_time:228399ms step_avg:67.55ms +[2025-07-07 08:46:02] [Rank 0] step:3401/10000 train_time:229766ms step_avg:67.56ms +[2025-07-07 08:46:02] [Rank 0] step:3401/10000 train_time:229766ms step_avg:67.56ms +[2025-07-07 08:46:04] [Rank 0] step:3421/10000 train_time:231182ms step_avg:67.58ms +[2025-07-07 08:46:04] [Rank 0] step:3421/10000 train_time:231182ms step_avg:67.58ms +[2025-07-07 08:46:05] [Rank 0] step:3441/10000 train_time:232543ms step_avg:67.58ms +[2025-07-07 08:46:05] [Rank 0] step:3441/10000 train_time:232543ms step_avg:67.58ms +[2025-07-07 08:46:06] [Rank 0] step:3461/10000 train_time:233913ms step_avg:67.59ms +[2025-07-07 08:46:06] [Rank 0] step:3461/10000 train_time:233913ms step_avg:67.59ms +[2025-07-07 08:46:08] [Rank 0] step:3481/10000 train_time:235284ms step_avg:67.59ms +[2025-07-07 08:46:08] [Rank 0] step:3481/10000 train_time:235284ms step_avg:67.59ms +[2025-07-07 08:46:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:46:09] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:46:10] [Rank 0] PRINT: step:3500/10000 train_loss:1.2019 val_loss:1.2074 train_time:237278ms step_avg:67.79ms +[2025-07-07 08:46:10] [Rank 0] PRINT: step:3500/10000 train_loss:1.2019 val_loss:1.2074 train_time:237278ms step_avg:67.79ms +[2025-07-07 08:46:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:46:10] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:46:10] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:46:10] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:46:10] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:46:10] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:51:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:51:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:51:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:51:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:51:29] [Rank 0] Total Loss: 5.1859 +[2025-07-07 08:51:29] [Rank 0] Total Loss: 5.1859 +[2025-07-07 08:51:29] [Rank 0] Total FTA: 0.2645 +[2025-07-07 08:51:29] [Rank 0] Total FTA: 0.2645 +[2025-07-07 08:51:29] [Rank 0] Group 0 Loss: 5.3690 +[2025-07-07 08:51:29] [Rank 0] Group 0 Loss: 5.3690 +[2025-07-07 08:51:29] [Rank 0] Group 1 Loss: 5.0238 +[2025-07-07 08:51:29] [Rank 0] Group 1 Loss: 5.0238 +[2025-07-07 08:51:29] [Rank 0] Group 2 Loss: 4.9805 +[2025-07-07 08:51:29] [Rank 0] Group 2 Loss: 4.9805 +[2025-07-07 08:51:29] [Rank 0] Group 3 Loss: 5.2212 +[2025-07-07 08:51:29] [Rank 0] Group 3 Loss: 5.2212 +[2025-07-07 08:51:29] [Rank 0] Group 4 Loss: 5.2162 +[2025-07-07 08:51:29] [Rank 0] Group 4 Loss: 5.2162 +[2025-07-07 08:51:29] [Rank 0] Group 5 Loss: 5.2499 +[2025-07-07 08:51:29] [Rank 0] Group 5 Loss: 5.2499 +[2025-07-07 08:51:29] [Rank 0] Group 6 Loss: 5.0581 +[2025-07-07 08:51:29] [Rank 0] Group 6 Loss: 5.0581 +[2025-07-07 08:51:29] [Rank 0] Group 7 Loss: 5.1681 +[2025-07-07 08:51:29] [Rank 0] Group 7 Loss: 5.1681 +[2025-07-07 08:51:29] [Rank 0] Group 8 Loss: 5.1721 +[2025-07-07 08:51:29] [Rank 0] Group 8 Loss: 5.1721 +[2025-07-07 08:51:29] [Rank 0] Group 9 Loss: 5.1931 +[2025-07-07 08:51:29] [Rank 0] Group 9 Loss: 5.1931 +[2025-07-07 08:51:29] [Rank 0] Group 10 Loss: 5.2000 +[2025-07-07 08:51:29] [Rank 0] Group 10 Loss: 5.2000 +[2025-07-07 08:51:29] [Rank 0] Group 11 Loss: 5.1887 +[2025-07-07 08:51:29] [Rank 0] Group 11 Loss: 5.1887 +[2025-07-07 08:51:29] [Rank 0] Group 0 FTA: 0.3316 +[2025-07-07 08:51:29] [Rank 0] Group 0 FTA: 0.3316 +[2025-07-07 08:51:29] [Rank 0] Group 1 FTA: 0.3177 +[2025-07-07 08:51:29] [Rank 0] Group 1 FTA: 0.3177 +[2025-07-07 08:51:29] [Rank 0] Group 2 FTA: 0.4870 +[2025-07-07 08:51:29] [Rank 0] Group 2 FTA: 0.4870 +[2025-07-07 08:51:29] [Rank 0] Group 3 FTA: 0.3229 +[2025-07-07 08:51:29] [Rank 0] Group 3 FTA: 0.3229 +[2025-07-07 08:51:29] [Rank 0] Group 4 FTA: 0.2370 +[2025-07-07 08:51:29] [Rank 0] Group 4 FTA: 0.2370 +[2025-07-07 08:51:29] [Rank 0] Group 5 FTA: 0.2682 +[2025-07-07 08:51:29] [Rank 0] Group 5 FTA: 0.2682 +[2025-07-07 08:51:29] [Rank 0] Group 6 FTA: 0.1693 +[2025-07-07 08:51:29] [Rank 0] Group 6 FTA: 0.1693 +[2025-07-07 08:51:29] [Rank 0] Group 7 FTA: 0.2266 +[2025-07-07 08:51:29] [Rank 0] Group 7 FTA: 0.2266 +[2025-07-07 08:51:29] [Rank 0] Group 8 FTA: 0.2005 +[2025-07-07 08:51:29] [Rank 0] Group 8 FTA: 0.2005 +[2025-07-07 08:51:29] [Rank 0] Group 9 FTA: 0.2344 +[2025-07-07 08:51:29] [Rank 0] Group 9 FTA: 0.2344 +[2025-07-07 08:51:29] [Rank 0] Group 10 FTA: 0.2129 +[2025-07-07 08:51:29] [Rank 0] Group 10 FTA: 0.2129 +[2025-07-07 08:51:29] [Rank 0] Group 11 FTA: 0.2051 +[2025-07-07 08:51:29] [Rank 0] Group 11 FTA: 0.2051 +[2025-07-07 08:51:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-07 08:51:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-07 08:51:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-07 08:51:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-07 08:51:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-07 08:51:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-07 08:51:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-07 08:51:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-07 08:51:30] [Rank 0] step:3501/10000 train_time:237288ms step_avg:67.78ms +[2025-07-07 08:51:30] [Rank 0] step:3501/10000 train_time:237288ms step_avg:67.78ms +[2025-07-07 08:51:32] [Rank 0] step:3521/10000 train_time:238049ms step_avg:67.61ms +[2025-07-07 08:51:32] [Rank 0] step:3521/10000 train_time:238049ms step_avg:67.61ms +[2025-07-07 08:51:33] [Rank 0] step:3541/10000 train_time:239408ms step_avg:67.61ms +[2025-07-07 08:51:33] [Rank 0] step:3541/10000 train_time:239408ms step_avg:67.61ms +[2025-07-07 08:51:34] [Rank 0] step:3561/10000 train_time:240768ms step_avg:67.61ms +[2025-07-07 08:51:34] [Rank 0] step:3561/10000 train_time:240768ms step_avg:67.61ms +[2025-07-07 08:51:36] [Rank 0] step:3581/10000 train_time:242130ms step_avg:67.62ms +[2025-07-07 08:51:36] [Rank 0] step:3581/10000 train_time:242130ms step_avg:67.62ms +[2025-07-07 08:51:37] [Rank 0] step:3601/10000 train_time:243491ms step_avg:67.62ms +[2025-07-07 08:51:37] [Rank 0] step:3601/10000 train_time:243491ms step_avg:67.62ms +[2025-07-07 08:51:38] [Rank 0] step:3621/10000 train_time:244908ms step_avg:67.64ms +[2025-07-07 08:51:38] [Rank 0] step:3621/10000 train_time:244908ms step_avg:67.64ms +[2025-07-07 08:51:40] [Rank 0] step:3641/10000 train_time:246272ms step_avg:67.64ms +[2025-07-07 08:51:40] [Rank 0] step:3641/10000 train_time:246272ms step_avg:67.64ms +[2025-07-07 08:51:41] [Rank 0] step:3661/10000 train_time:247635ms step_avg:67.64ms +[2025-07-07 08:51:41] [Rank 0] step:3661/10000 train_time:247635ms step_avg:67.64ms +[2025-07-07 08:51:43] [Rank 0] step:3681/10000 train_time:248998ms step_avg:67.64ms +[2025-07-07 08:51:43] [Rank 0] step:3681/10000 train_time:248998ms step_avg:67.64ms +[2025-07-07 08:51:44] [Rank 0] step:3701/10000 train_time:250364ms step_avg:67.65ms +[2025-07-07 08:51:44] [Rank 0] step:3701/10000 train_time:250364ms step_avg:67.65ms +[2025-07-07 08:51:45] [Rank 0] step:3721/10000 train_time:251729ms step_avg:67.65ms +[2025-07-07 08:51:45] [Rank 0] step:3721/10000 train_time:251729ms step_avg:67.65ms +[2025-07-07 08:51:47] [Rank 0] step:3741/10000 train_time:253096ms step_avg:67.65ms +[2025-07-07 08:51:47] [Rank 0] step:3741/10000 train_time:253096ms step_avg:67.65ms +[2025-07-07 08:51:48] [Rank 0] step:3761/10000 train_time:254463ms step_avg:67.66ms +[2025-07-07 08:51:48] [Rank 0] step:3761/10000 train_time:254463ms step_avg:67.66ms +[2025-07-07 08:51:49] [Rank 0] step:3781/10000 train_time:256080ms step_avg:67.73ms +[2025-07-07 08:51:49] [Rank 0] step:3781/10000 train_time:256080ms step_avg:67.73ms +[2025-07-07 08:51:51] [Rank 0] step:3801/10000 train_time:257228ms step_avg:67.67ms +[2025-07-07 08:51:51] [Rank 0] step:3801/10000 train_time:257228ms step_avg:67.67ms +[2025-07-07 08:51:52] [Rank 0] step:3821/10000 train_time:258596ms step_avg:67.68ms +[2025-07-07 08:51:52] [Rank 0] step:3821/10000 train_time:258596ms step_avg:67.68ms +[2025-07-07 08:51:53] [Rank 0] step:3841/10000 train_time:259963ms step_avg:67.68ms +[2025-07-07 08:51:53] [Rank 0] step:3841/10000 train_time:259963ms step_avg:67.68ms +[2025-07-07 08:51:55] [Rank 0] step:3861/10000 train_time:261332ms step_avg:67.69ms +[2025-07-07 08:51:55] [Rank 0] step:3861/10000 train_time:261332ms step_avg:67.69ms +[2025-07-07 08:51:56] [Rank 0] step:3881/10000 train_time:262699ms step_avg:67.69ms +[2025-07-07 08:51:56] [Rank 0] step:3881/10000 train_time:262699ms step_avg:67.69ms +[2025-07-07 08:51:58] [Rank 0] step:3901/10000 train_time:264075ms step_avg:67.69ms +[2025-07-07 08:51:58] [Rank 0] step:3901/10000 train_time:264075ms step_avg:67.69ms +[2025-07-07 08:51:59] [Rank 0] step:3921/10000 train_time:265436ms step_avg:67.70ms +[2025-07-07 08:51:59] [Rank 0] step:3921/10000 train_time:265436ms step_avg:67.70ms +[2025-07-07 08:52:00] [Rank 0] step:3941/10000 train_time:266804ms step_avg:67.70ms +[2025-07-07 08:52:00] [Rank 0] step:3941/10000 train_time:266804ms step_avg:67.70ms +[2025-07-07 08:52:02] [Rank 0] step:3961/10000 train_time:268220ms step_avg:67.72ms +[2025-07-07 08:52:02] [Rank 0] step:3961/10000 train_time:268220ms step_avg:67.72ms +[2025-07-07 08:52:03] [Rank 0] step:3981/10000 train_time:269584ms step_avg:67.72ms +[2025-07-07 08:52:03] [Rank 0] step:3981/10000 train_time:269584ms step_avg:67.72ms +[2025-07-07 08:52:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:52:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:52:05] [Rank 0] PRINT: step:4000/10000 train_loss:1.1355 val_loss:1.1550 train_time:271577ms step_avg:67.89ms +[2025-07-07 08:52:05] [Rank 0] PRINT: step:4000/10000 train_loss:1.1355 val_loss:1.1550 train_time:271577ms step_avg:67.89ms +[2025-07-07 08:52:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:52:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:52:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:52:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:52:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:52:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:57:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:57:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:57:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:57:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:57:25] [Rank 0] Total Loss: 5.3533 +[2025-07-07 08:57:25] [Rank 0] Total Loss: 5.3533 +[2025-07-07 08:57:25] [Rank 0] Total FTA: 0.3648 +[2025-07-07 08:57:25] [Rank 0] Total FTA: 0.3648 +[2025-07-07 08:57:25] [Rank 0] Group 0 Loss: 5.5935 +[2025-07-07 08:57:25] [Rank 0] Group 0 Loss: 5.5935 +[2025-07-07 08:57:25] [Rank 0] Group 1 Loss: 5.1721 +[2025-07-07 08:57:25] [Rank 0] Group 1 Loss: 5.1721 +[2025-07-07 08:57:25] [Rank 0] Group 2 Loss: 5.0301 +[2025-07-07 08:57:25] [Rank 0] Group 2 Loss: 5.0301 +[2025-07-07 08:57:25] [Rank 0] Group 3 Loss: 5.4079 +[2025-07-07 08:57:25] [Rank 0] Group 3 Loss: 5.4079 +[2025-07-07 08:57:25] [Rank 0] Group 4 Loss: 5.2719 +[2025-07-07 08:57:25] [Rank 0] Group 4 Loss: 5.2719 +[2025-07-07 08:57:25] [Rank 0] Group 5 Loss: 5.3111 +[2025-07-07 08:57:25] [Rank 0] Group 5 Loss: 5.3111 +[2025-07-07 08:57:25] [Rank 0] Group 6 Loss: 5.1989 +[2025-07-07 08:57:25] [Rank 0] Group 6 Loss: 5.1989 +[2025-07-07 08:57:25] [Rank 0] Group 7 Loss: 5.3658 +[2025-07-07 08:57:25] [Rank 0] Group 7 Loss: 5.3658 +[2025-07-07 08:57:25] [Rank 0] Group 8 Loss: 5.3844 +[2025-07-07 08:57:25] [Rank 0] Group 8 Loss: 5.3844 +[2025-07-07 08:57:25] [Rank 0] Group 9 Loss: 5.3570 +[2025-07-07 08:57:25] [Rank 0] Group 9 Loss: 5.3570 +[2025-07-07 08:57:25] [Rank 0] Group 10 Loss: 5.4098 +[2025-07-07 08:57:25] [Rank 0] Group 10 Loss: 5.4098 +[2025-07-07 08:57:25] [Rank 0] Group 11 Loss: 5.4003 +[2025-07-07 08:57:25] [Rank 0] Group 11 Loss: 5.4003 +[2025-07-07 08:57:25] [Rank 0] Group 0 FTA: 0.3290 +[2025-07-07 08:57:25] [Rank 0] Group 0 FTA: 0.3290 +[2025-07-07 08:57:25] [Rank 0] Group 1 FTA: 0.6745 +[2025-07-07 08:57:25] [Rank 0] Group 1 FTA: 0.6745 +[2025-07-07 08:57:25] [Rank 0] Group 2 FTA: 0.4766 +[2025-07-07 08:57:25] [Rank 0] Group 2 FTA: 0.4766 +[2025-07-07 08:57:25] [Rank 0] Group 3 FTA: 0.3177 +[2025-07-07 08:57:25] [Rank 0] Group 3 FTA: 0.3177 +[2025-07-07 08:57:25] [Rank 0] Group 4 FTA: 0.3568 +[2025-07-07 08:57:25] [Rank 0] Group 4 FTA: 0.3568 +[2025-07-07 08:57:25] [Rank 0] Group 5 FTA: 0.3125 +[2025-07-07 08:57:25] [Rank 0] Group 5 FTA: 0.3125 +[2025-07-07 08:57:25] [Rank 0] Group 6 FTA: 0.2995 +[2025-07-07 08:57:25] [Rank 0] Group 6 FTA: 0.2995 +[2025-07-07 08:57:25] [Rank 0] Group 7 FTA: 0.3203 +[2025-07-07 08:57:25] [Rank 0] Group 7 FTA: 0.3203 +[2025-07-07 08:57:25] [Rank 0] Group 8 FTA: 0.3125 +[2025-07-07 08:57:25] [Rank 0] Group 8 FTA: 0.3125 +[2025-07-07 08:57:25] [Rank 0] Group 9 FTA: 0.3047 +[2025-07-07 08:57:25] [Rank 0] Group 9 FTA: 0.3047 +[2025-07-07 08:57:25] [Rank 0] Group 10 FTA: 0.3574 +[2025-07-07 08:57:25] [Rank 0] Group 10 FTA: 0.3574 +[2025-07-07 08:57:25] [Rank 0] Group 11 FTA: 0.3535 +[2025-07-07 08:57:25] [Rank 0] Group 11 FTA: 0.3535 +[2025-07-07 08:57:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-07 08:57:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-07 08:57:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-07 08:57:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-07 08:57:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-07 08:57:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-07 08:57:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-07 08:57:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-07 08:57:26] [Rank 0] step:4001/10000 train_time:271586ms step_avg:67.88ms +[2025-07-07 08:57:26] [Rank 0] step:4001/10000 train_time:271586ms step_avg:67.88ms +[2025-07-07 08:57:28] [Rank 0] step:4021/10000 train_time:272357ms step_avg:67.73ms +[2025-07-07 08:57:28] [Rank 0] step:4021/10000 train_time:272357ms step_avg:67.73ms +[2025-07-07 08:57:29] [Rank 0] step:4041/10000 train_time:273717ms step_avg:67.74ms +[2025-07-07 08:57:29] [Rank 0] step:4041/10000 train_time:273717ms step_avg:67.74ms +[2025-07-07 08:57:30] [Rank 0] step:4061/10000 train_time:275080ms step_avg:67.74ms +[2025-07-07 08:57:30] [Rank 0] step:4061/10000 train_time:275080ms step_avg:67.74ms +[2025-07-07 08:57:32] [Rank 0] step:4081/10000 train_time:276440ms step_avg:67.74ms +[2025-07-07 08:57:32] [Rank 0] step:4081/10000 train_time:276440ms step_avg:67.74ms +[2025-07-07 08:57:33] [Rank 0] step:4101/10000 train_time:277803ms step_avg:67.74ms +[2025-07-07 08:57:33] [Rank 0] step:4101/10000 train_time:277803ms step_avg:67.74ms +[2025-07-07 08:57:34] [Rank 0] step:4121/10000 train_time:279166ms step_avg:67.74ms +[2025-07-07 08:57:34] [Rank 0] step:4121/10000 train_time:279166ms step_avg:67.74ms +[2025-07-07 08:57:36] [Rank 0] step:4141/10000 train_time:281206ms step_avg:67.91ms +[2025-07-07 08:57:36] [Rank 0] step:4141/10000 train_time:281206ms step_avg:67.91ms +[2025-07-07 08:57:37] [Rank 0] step:4161/10000 train_time:281942ms step_avg:67.76ms +[2025-07-07 08:57:37] [Rank 0] step:4161/10000 train_time:281942ms step_avg:67.76ms +[2025-07-07 08:57:39] [Rank 0] step:4181/10000 train_time:283306ms step_avg:67.76ms +[2025-07-07 08:57:39] [Rank 0] step:4181/10000 train_time:283306ms step_avg:67.76ms +[2025-07-07 08:57:40] [Rank 0] step:4201/10000 train_time:284672ms step_avg:67.76ms +[2025-07-07 08:57:40] [Rank 0] step:4201/10000 train_time:284672ms step_avg:67.76ms +[2025-07-07 08:57:41] [Rank 0] step:4221/10000 train_time:286038ms step_avg:67.77ms +[2025-07-07 08:57:41] [Rank 0] step:4221/10000 train_time:286038ms step_avg:67.77ms +[2025-07-07 08:57:43] [Rank 0] step:4241/10000 train_time:287404ms step_avg:67.77ms +[2025-07-07 08:57:43] [Rank 0] step:4241/10000 train_time:287404ms step_avg:67.77ms +[2025-07-07 08:57:44] [Rank 0] step:4261/10000 train_time:288771ms step_avg:67.77ms +[2025-07-07 08:57:44] [Rank 0] step:4261/10000 train_time:288771ms step_avg:67.77ms +[2025-07-07 08:57:45] [Rank 0] step:4281/10000 train_time:290137ms step_avg:67.77ms +[2025-07-07 08:57:45] [Rank 0] step:4281/10000 train_time:290137ms step_avg:67.77ms +[2025-07-07 08:57:47] [Rank 0] step:4301/10000 train_time:291505ms step_avg:67.78ms +[2025-07-07 08:57:47] [Rank 0] step:4301/10000 train_time:291505ms step_avg:67.78ms +[2025-07-07 08:57:48] [Rank 0] step:4321/10000 train_time:293527ms step_avg:67.93ms +[2025-07-07 08:57:48] [Rank 0] step:4321/10000 train_time:293527ms step_avg:67.93ms +[2025-07-07 08:57:50] [Rank 0] step:4341/10000 train_time:294265ms step_avg:67.79ms +[2025-07-07 08:57:50] [Rank 0] step:4341/10000 train_time:294265ms step_avg:67.79ms +[2025-07-07 08:57:51] [Rank 0] step:4361/10000 train_time:295634ms step_avg:67.79ms +[2025-07-07 08:57:51] [Rank 0] step:4361/10000 train_time:295634ms step_avg:67.79ms +[2025-07-07 08:57:52] [Rank 0] step:4381/10000 train_time:297003ms step_avg:67.79ms +[2025-07-07 08:57:52] [Rank 0] step:4381/10000 train_time:297003ms step_avg:67.79ms +[2025-07-07 08:57:54] [Rank 0] step:4401/10000 train_time:298373ms step_avg:67.80ms +[2025-07-07 08:57:54] [Rank 0] step:4401/10000 train_time:298373ms step_avg:67.80ms +[2025-07-07 08:57:55] [Rank 0] step:4421/10000 train_time:299743ms step_avg:67.80ms +[2025-07-07 08:57:55] [Rank 0] step:4421/10000 train_time:299743ms step_avg:67.80ms +[2025-07-07 08:57:56] [Rank 0] step:4441/10000 train_time:301112ms step_avg:67.80ms +[2025-07-07 08:57:56] [Rank 0] step:4441/10000 train_time:301112ms step_avg:67.80ms +[2025-07-07 08:57:58] [Rank 0] step:4461/10000 train_time:302482ms step_avg:67.81ms +[2025-07-07 08:57:58] [Rank 0] step:4461/10000 train_time:302482ms step_avg:67.81ms +[2025-07-07 08:57:59] [Rank 0] step:4481/10000 train_time:303853ms step_avg:67.81ms +[2025-07-07 08:57:59] [Rank 0] step:4481/10000 train_time:303853ms step_avg:67.81ms +[2025-07-07 08:58:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:58:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:58:01] [Rank 0] PRINT: step:4500/10000 train_loss:1.0760 val_loss:1.1351 train_time:305846ms step_avg:67.97ms +[2025-07-07 08:58:01] [Rank 0] PRINT: step:4500/10000 train_loss:1.0760 val_loss:1.1351 train_time:305846ms step_avg:67.97ms +[2025-07-07 08:58:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:58:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:58:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:58:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:58:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:58:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:03:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:03:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:03:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:03:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:03:20] [Rank 0] Total Loss: 5.4543 +[2025-07-07 09:03:20] [Rank 0] Total Loss: 5.4543 +[2025-07-07 09:03:20] [Rank 0] Total FTA: 0.4278 +[2025-07-07 09:03:20] [Rank 0] Total FTA: 0.4278 +[2025-07-07 09:03:20] [Rank 0] Group 0 Loss: 5.5098 +[2025-07-07 09:03:20] [Rank 0] Group 0 Loss: 5.5098 +[2025-07-07 09:03:20] [Rank 0] Group 1 Loss: 5.2930 +[2025-07-07 09:03:20] [Rank 0] Group 1 Loss: 5.2930 +[2025-07-07 09:03:20] [Rank 0] Group 2 Loss: 5.1720 +[2025-07-07 09:03:20] [Rank 0] Group 2 Loss: 5.1720 +[2025-07-07 09:03:20] [Rank 0] Group 3 Loss: 5.5381 +[2025-07-07 09:03:20] [Rank 0] Group 3 Loss: 5.5381 +[2025-07-07 09:03:20] [Rank 0] Group 4 Loss: 5.5633 +[2025-07-07 09:03:20] [Rank 0] Group 4 Loss: 5.5633 +[2025-07-07 09:03:20] [Rank 0] Group 5 Loss: 5.4204 +[2025-07-07 09:03:20] [Rank 0] Group 5 Loss: 5.4204 +[2025-07-07 09:03:20] [Rank 0] Group 6 Loss: 5.3868 +[2025-07-07 09:03:20] [Rank 0] Group 6 Loss: 5.3868 +[2025-07-07 09:03:20] [Rank 0] Group 7 Loss: 5.5147 +[2025-07-07 09:03:20] [Rank 0] Group 7 Loss: 5.5147 +[2025-07-07 09:03:20] [Rank 0] Group 8 Loss: 5.4630 +[2025-07-07 09:03:20] [Rank 0] Group 8 Loss: 5.4630 +[2025-07-07 09:03:20] [Rank 0] Group 9 Loss: 5.4469 +[2025-07-07 09:03:20] [Rank 0] Group 9 Loss: 5.4469 +[2025-07-07 09:03:20] [Rank 0] Group 10 Loss: 5.4944 +[2025-07-07 09:03:20] [Rank 0] Group 10 Loss: 5.4944 +[2025-07-07 09:03:20] [Rank 0] Group 11 Loss: 5.5006 +[2025-07-07 09:03:20] [Rank 0] Group 11 Loss: 5.5006 +[2025-07-07 09:03:20] [Rank 0] Group 0 FTA: 0.6645 +[2025-07-07 09:03:20] [Rank 0] Group 0 FTA: 0.6645 +[2025-07-07 09:03:20] [Rank 0] Group 1 FTA: 0.3333 +[2025-07-07 09:03:20] [Rank 0] Group 1 FTA: 0.3333 +[2025-07-07 09:03:20] [Rank 0] Group 2 FTA: 0.4453 +[2025-07-07 09:03:20] [Rank 0] Group 2 FTA: 0.4453 +[2025-07-07 09:03:20] [Rank 0] Group 3 FTA: 0.3281 +[2025-07-07 09:03:20] [Rank 0] Group 3 FTA: 0.3281 +[2025-07-07 09:03:20] [Rank 0] Group 4 FTA: 0.3438 +[2025-07-07 09:03:20] [Rank 0] Group 4 FTA: 0.3438 +[2025-07-07 09:03:20] [Rank 0] Group 5 FTA: 0.4714 +[2025-07-07 09:03:20] [Rank 0] Group 5 FTA: 0.4714 +[2025-07-07 09:03:20] [Rank 0] Group 6 FTA: 0.3828 +[2025-07-07 09:03:20] [Rank 0] Group 6 FTA: 0.3828 +[2025-07-07 09:03:20] [Rank 0] Group 7 FTA: 0.3958 +[2025-07-07 09:03:20] [Rank 0] Group 7 FTA: 0.3958 +[2025-07-07 09:03:20] [Rank 0] Group 8 FTA: 0.4036 +[2025-07-07 09:03:20] [Rank 0] Group 8 FTA: 0.4036 +[2025-07-07 09:03:20] [Rank 0] Group 9 FTA: 0.3906 +[2025-07-07 09:03:20] [Rank 0] Group 9 FTA: 0.3906 +[2025-07-07 09:03:20] [Rank 0] Group 10 FTA: 0.3848 +[2025-07-07 09:03:20] [Rank 0] Group 10 FTA: 0.3848 +[2025-07-07 09:03:20] [Rank 0] Group 11 FTA: 0.4004 +[2025-07-07 09:03:20] [Rank 0] Group 11 FTA: 0.4004 +[2025-07-07 09:03:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-07 09:03:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-07 09:03:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-07 09:03:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-07 09:03:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-07 09:03:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-07 09:03:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-07 09:03:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-07 09:03:22] [Rank 0] step:4501/10000 train_time:305864ms step_avg:67.95ms +[2025-07-07 09:03:22] [Rank 0] step:4501/10000 train_time:305864ms step_avg:67.95ms +[2025-07-07 09:03:24] [Rank 0] step:4521/10000 train_time:307328ms step_avg:67.98ms +[2025-07-07 09:03:24] [Rank 0] step:4521/10000 train_time:307328ms step_avg:67.98ms +[2025-07-07 09:03:25] [Rank 0] step:4541/10000 train_time:308687ms step_avg:67.98ms +[2025-07-07 09:03:25] [Rank 0] step:4541/10000 train_time:308687ms step_avg:67.98ms +[2025-07-07 09:03:26] [Rank 0] step:4561/10000 train_time:310049ms step_avg:67.98ms +[2025-07-07 09:03:26] [Rank 0] step:4561/10000 train_time:310049ms step_avg:67.98ms +[2025-07-07 09:03:28] [Rank 0] step:4581/10000 train_time:311412ms step_avg:67.98ms +[2025-07-07 09:03:28] [Rank 0] step:4581/10000 train_time:311412ms step_avg:67.98ms +[2025-07-07 09:03:29] [Rank 0] step:4601/10000 train_time:312775ms step_avg:67.98ms +[2025-07-07 09:03:29] [Rank 0] step:4601/10000 train_time:312775ms step_avg:67.98ms +[2025-07-07 09:03:30] [Rank 0] step:4621/10000 train_time:314139ms step_avg:67.98ms +[2025-07-07 09:03:30] [Rank 0] step:4621/10000 train_time:314139ms step_avg:67.98ms +[2025-07-07 09:03:32] [Rank 0] step:4641/10000 train_time:315503ms step_avg:67.98ms +[2025-07-07 09:03:32] [Rank 0] step:4641/10000 train_time:315503ms step_avg:67.98ms +[2025-07-07 09:03:33] [Rank 0] step:4661/10000 train_time:316867ms step_avg:67.98ms +[2025-07-07 09:03:33] [Rank 0] step:4661/10000 train_time:316867ms step_avg:67.98ms +[2025-07-07 09:03:35] [Rank 0] step:4681/10000 train_time:318913ms step_avg:68.13ms +[2025-07-07 09:03:35] [Rank 0] step:4681/10000 train_time:318913ms step_avg:68.13ms +[2025-07-07 09:03:36] [Rank 0] step:4701/10000 train_time:319649ms step_avg:68.00ms +[2025-07-07 09:03:36] [Rank 0] step:4701/10000 train_time:319649ms step_avg:68.00ms +[2025-07-07 09:03:37] [Rank 0] step:4721/10000 train_time:321014ms step_avg:68.00ms +[2025-07-07 09:03:37] [Rank 0] step:4721/10000 train_time:321014ms step_avg:68.00ms +[2025-07-07 09:03:39] [Rank 0] step:4741/10000 train_time:322381ms step_avg:68.00ms +[2025-07-07 09:03:39] [Rank 0] step:4741/10000 train_time:322381ms step_avg:68.00ms +[2025-07-07 09:03:40] [Rank 0] step:4761/10000 train_time:323748ms step_avg:68.00ms +[2025-07-07 09:03:40] [Rank 0] step:4761/10000 train_time:323748ms step_avg:68.00ms +[2025-07-07 09:03:41] [Rank 0] step:4781/10000 train_time:325116ms step_avg:68.00ms +[2025-07-07 09:03:41] [Rank 0] step:4781/10000 train_time:325116ms step_avg:68.00ms +[2025-07-07 09:03:43] [Rank 0] step:4801/10000 train_time:326484ms step_avg:68.00ms +[2025-07-07 09:03:43] [Rank 0] step:4801/10000 train_time:326484ms step_avg:68.00ms +[2025-07-07 09:03:44] [Rank 0] step:4821/10000 train_time:327853ms step_avg:68.01ms +[2025-07-07 09:03:44] [Rank 0] step:4821/10000 train_time:327853ms step_avg:68.01ms +[2025-07-07 09:03:45] [Rank 0] step:4841/10000 train_time:329222ms step_avg:68.01ms +[2025-07-07 09:03:45] [Rank 0] step:4841/10000 train_time:329222ms step_avg:68.01ms +[2025-07-07 09:03:47] [Rank 0] step:4861/10000 train_time:331264ms step_avg:68.15ms +[2025-07-07 09:03:47] [Rank 0] step:4861/10000 train_time:331264ms step_avg:68.15ms +[2025-07-07 09:03:48] [Rank 0] step:4881/10000 train_time:332002ms step_avg:68.02ms +[2025-07-07 09:03:48] [Rank 0] step:4881/10000 train_time:332002ms step_avg:68.02ms +[2025-07-07 09:03:50] [Rank 0] step:4901/10000 train_time:333373ms step_avg:68.02ms +[2025-07-07 09:03:50] [Rank 0] step:4901/10000 train_time:333373ms step_avg:68.02ms +[2025-07-07 09:03:51] [Rank 0] step:4921/10000 train_time:334745ms step_avg:68.02ms +[2025-07-07 09:03:51] [Rank 0] step:4921/10000 train_time:334745ms step_avg:68.02ms +[2025-07-07 09:03:52] [Rank 0] step:4941/10000 train_time:336115ms step_avg:68.03ms +[2025-07-07 09:03:52] [Rank 0] step:4941/10000 train_time:336115ms step_avg:68.03ms +[2025-07-07 09:03:54] [Rank 0] step:4961/10000 train_time:337487ms step_avg:68.03ms +[2025-07-07 09:03:54] [Rank 0] step:4961/10000 train_time:337487ms step_avg:68.03ms +[2025-07-07 09:03:55] [Rank 0] step:4981/10000 train_time:338860ms step_avg:68.03ms +[2025-07-07 09:03:55] [Rank 0] step:4981/10000 train_time:338860ms step_avg:68.03ms +[2025-07-07 09:03:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:03:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:03:57] [Rank 0] PRINT: step:5000/10000 train_loss:1.0156 val_loss:1.0868 train_time:340856ms step_avg:68.17ms +[2025-07-07 09:03:57] [Rank 0] PRINT: step:5000/10000 train_loss:1.0156 val_loss:1.0868 train_time:340856ms step_avg:68.17ms +[2025-07-07 09:03:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:03:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:03:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:03:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:03:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:03:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:09:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:09:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:09:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:09:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:09:17] [Rank 0] Total Loss: 5.3766 +[2025-07-07 09:09:17] [Rank 0] Total Loss: 5.3766 +[2025-07-07 09:09:17] [Rank 0] Total FTA: 0.5333 +[2025-07-07 09:09:17] [Rank 0] Total FTA: 0.5333 +[2025-07-07 09:09:17] [Rank 0] Group 0 Loss: 5.4792 +[2025-07-07 09:09:17] [Rank 0] Group 0 Loss: 5.4792 +[2025-07-07 09:09:17] [Rank 0] Group 1 Loss: 5.1152 +[2025-07-07 09:09:17] [Rank 0] Group 1 Loss: 5.1152 +[2025-07-07 09:09:17] [Rank 0] Group 2 Loss: 5.1572 +[2025-07-07 09:09:17] [Rank 0] Group 2 Loss: 5.1572 +[2025-07-07 09:09:17] [Rank 0] Group 3 Loss: 5.3128 +[2025-07-07 09:09:17] [Rank 0] Group 3 Loss: 5.3128 +[2025-07-07 09:09:17] [Rank 0] Group 4 Loss: 5.4042 +[2025-07-07 09:09:17] [Rank 0] Group 4 Loss: 5.4042 +[2025-07-07 09:09:17] [Rank 0] Group 5 Loss: 5.3892 +[2025-07-07 09:09:17] [Rank 0] Group 5 Loss: 5.3892 +[2025-07-07 09:09:17] [Rank 0] Group 6 Loss: 5.3532 +[2025-07-07 09:09:17] [Rank 0] Group 6 Loss: 5.3532 +[2025-07-07 09:09:17] [Rank 0] Group 7 Loss: 5.4402 +[2025-07-07 09:09:17] [Rank 0] Group 7 Loss: 5.4402 +[2025-07-07 09:09:17] [Rank 0] Group 8 Loss: 5.4165 +[2025-07-07 09:09:17] [Rank 0] Group 8 Loss: 5.4165 +[2025-07-07 09:09:17] [Rank 0] Group 9 Loss: 5.4163 +[2025-07-07 09:09:17] [Rank 0] Group 9 Loss: 5.4163 +[2025-07-07 09:09:17] [Rank 0] Group 10 Loss: 5.4341 +[2025-07-07 09:09:17] [Rank 0] Group 10 Loss: 5.4341 +[2025-07-07 09:09:17] [Rank 0] Group 11 Loss: 5.4201 +[2025-07-07 09:09:17] [Rank 0] Group 11 Loss: 5.4201 +[2025-07-07 09:09:17] [Rank 0] Group 0 FTA: 0.6632 +[2025-07-07 09:09:17] [Rank 0] Group 0 FTA: 0.6632 +[2025-07-07 09:09:17] [Rank 0] Group 1 FTA: 0.8542 +[2025-07-07 09:09:17] [Rank 0] Group 1 FTA: 0.8542 +[2025-07-07 09:09:17] [Rank 0] Group 2 FTA: 0.4661 +[2025-07-07 09:09:17] [Rank 0] Group 2 FTA: 0.4661 +[2025-07-07 09:09:17] [Rank 0] Group 3 FTA: 0.5234 +[2025-07-07 09:09:17] [Rank 0] Group 3 FTA: 0.5234 +[2025-07-07 09:09:17] [Rank 0] Group 4 FTA: 0.4922 +[2025-07-07 09:09:17] [Rank 0] Group 4 FTA: 0.4922 +[2025-07-07 09:09:17] [Rank 0] Group 5 FTA: 0.5312 +[2025-07-07 09:09:17] [Rank 0] Group 5 FTA: 0.5312 +[2025-07-07 09:09:17] [Rank 0] Group 6 FTA: 0.4714 +[2025-07-07 09:09:17] [Rank 0] Group 6 FTA: 0.4714 +[2025-07-07 09:09:17] [Rank 0] Group 7 FTA: 0.4766 +[2025-07-07 09:09:17] [Rank 0] Group 7 FTA: 0.4766 +[2025-07-07 09:09:17] [Rank 0] Group 8 FTA: 0.4401 +[2025-07-07 09:09:17] [Rank 0] Group 8 FTA: 0.4401 +[2025-07-07 09:09:17] [Rank 0] Group 9 FTA: 0.5000 +[2025-07-07 09:09:17] [Rank 0] Group 9 FTA: 0.5000 +[2025-07-07 09:09:17] [Rank 0] Group 10 FTA: 0.4805 +[2025-07-07 09:09:17] [Rank 0] Group 10 FTA: 0.4805 +[2025-07-07 09:09:17] [Rank 0] Group 11 FTA: 0.4746 +[2025-07-07 09:09:17] [Rank 0] Group 11 FTA: 0.4746 +[2025-07-07 09:09:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-07 09:09:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-07 09:09:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-07 09:09:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-07 09:09:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-07 09:09:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-07 09:09:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-07 09:09:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-07 09:09:18] [Rank 0] step:5001/10000 train_time:340865ms step_avg:68.16ms +[2025-07-07 09:09:18] [Rank 0] step:5001/10000 train_time:340865ms step_avg:68.16ms +[2025-07-07 09:09:20] [Rank 0] step:5021/10000 train_time:341615ms step_avg:68.04ms +[2025-07-07 09:09:20] [Rank 0] step:5021/10000 train_time:341615ms step_avg:68.04ms +[2025-07-07 09:09:21] [Rank 0] step:5041/10000 train_time:342975ms step_avg:68.04ms +[2025-07-07 09:09:21] [Rank 0] step:5041/10000 train_time:342975ms step_avg:68.04ms +[2025-07-07 09:09:22] [Rank 0] step:5061/10000 train_time:344368ms step_avg:68.04ms +[2025-07-07 09:09:22] [Rank 0] step:5061/10000 train_time:344368ms step_avg:68.04ms +[2025-07-07 09:09:24] [Rank 0] step:5081/10000 train_time:345732ms step_avg:68.04ms +[2025-07-07 09:09:24] [Rank 0] step:5081/10000 train_time:345732ms step_avg:68.04ms +[2025-07-07 09:09:25] [Rank 0] step:5101/10000 train_time:347095ms step_avg:68.04ms +[2025-07-07 09:09:25] [Rank 0] step:5101/10000 train_time:347095ms step_avg:68.04ms +[2025-07-07 09:09:26] [Rank 0] step:5121/10000 train_time:348459ms step_avg:68.05ms +[2025-07-07 09:09:26] [Rank 0] step:5121/10000 train_time:348459ms step_avg:68.05ms +[2025-07-07 09:09:28] [Rank 0] step:5141/10000 train_time:349822ms step_avg:68.05ms +[2025-07-07 09:09:28] [Rank 0] step:5141/10000 train_time:349822ms step_avg:68.05ms +[2025-07-07 09:09:29] [Rank 0] step:5161/10000 train_time:351186ms step_avg:68.05ms +[2025-07-07 09:09:29] [Rank 0] step:5161/10000 train_time:351186ms step_avg:68.05ms +[2025-07-07 09:09:30] [Rank 0] step:5181/10000 train_time:352552ms step_avg:68.05ms +[2025-07-07 09:09:30] [Rank 0] step:5181/10000 train_time:352552ms step_avg:68.05ms +[2025-07-07 09:09:32] [Rank 0] step:5201/10000 train_time:353918ms step_avg:68.05ms +[2025-07-07 09:09:32] [Rank 0] step:5201/10000 train_time:353918ms step_avg:68.05ms +[2025-07-07 09:09:33] [Rank 0] step:5221/10000 train_time:355283ms step_avg:68.05ms +[2025-07-07 09:09:33] [Rank 0] step:5221/10000 train_time:355283ms step_avg:68.05ms +[2025-07-07 09:09:35] [Rank 0] step:5241/10000 train_time:356691ms step_avg:68.06ms +[2025-07-07 09:09:35] [Rank 0] step:5241/10000 train_time:356691ms step_avg:68.06ms +[2025-07-07 09:09:36] [Rank 0] step:5261/10000 train_time:358058ms step_avg:68.06ms +[2025-07-07 09:09:36] [Rank 0] step:5261/10000 train_time:358058ms step_avg:68.06ms +[2025-07-07 09:09:37] [Rank 0] step:5281/10000 train_time:359428ms step_avg:68.06ms +[2025-07-07 09:09:37] [Rank 0] step:5281/10000 train_time:359428ms step_avg:68.06ms +[2025-07-07 09:09:39] [Rank 0] step:5301/10000 train_time:360796ms step_avg:68.06ms +[2025-07-07 09:09:39] [Rank 0] step:5301/10000 train_time:360796ms step_avg:68.06ms +[2025-07-07 09:09:40] [Rank 0] step:5321/10000 train_time:362164ms step_avg:68.06ms +[2025-07-07 09:09:40] [Rank 0] step:5321/10000 train_time:362164ms step_avg:68.06ms +[2025-07-07 09:09:41] [Rank 0] step:5341/10000 train_time:363532ms step_avg:68.06ms +[2025-07-07 09:09:41] [Rank 0] step:5341/10000 train_time:363532ms step_avg:68.06ms +[2025-07-07 09:09:43] [Rank 0] step:5361/10000 train_time:364900ms step_avg:68.07ms +[2025-07-07 09:09:43] [Rank 0] step:5361/10000 train_time:364900ms step_avg:68.07ms +[2025-07-07 09:09:44] [Rank 0] step:5381/10000 train_time:366268ms step_avg:68.07ms +[2025-07-07 09:09:44] [Rank 0] step:5381/10000 train_time:366268ms step_avg:68.07ms +[2025-07-07 09:09:46] [Rank 0] step:5401/10000 train_time:368306ms step_avg:68.19ms +[2025-07-07 09:09:46] [Rank 0] step:5401/10000 train_time:368306ms step_avg:68.19ms +[2025-07-07 09:09:47] [Rank 0] step:5421/10000 train_time:369044ms step_avg:68.08ms +[2025-07-07 09:09:47] [Rank 0] step:5421/10000 train_time:369044ms step_avg:68.08ms +[2025-07-07 09:09:48] [Rank 0] step:5441/10000 train_time:370414ms step_avg:68.08ms +[2025-07-07 09:09:48] [Rank 0] step:5441/10000 train_time:370414ms step_avg:68.08ms +[2025-07-07 09:09:50] [Rank 0] step:5461/10000 train_time:371782ms step_avg:68.08ms +[2025-07-07 09:09:50] [Rank 0] step:5461/10000 train_time:371782ms step_avg:68.08ms +[2025-07-07 09:09:51] [Rank 0] step:5481/10000 train_time:373153ms step_avg:68.08ms +[2025-07-07 09:09:51] [Rank 0] step:5481/10000 train_time:373153ms step_avg:68.08ms +[2025-07-07 09:09:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:09:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:09:53] [Rank 0] PRINT: step:5500/10000 train_loss:0.9631 val_loss:1.0776 train_time:375145ms step_avg:68.21ms +[2025-07-07 09:09:53] [Rank 0] PRINT: step:5500/10000 train_loss:0.9631 val_loss:1.0776 train_time:375145ms step_avg:68.21ms +[2025-07-07 09:09:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:09:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:09:53] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:09:53] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:09:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:09:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:15:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:15:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:15:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:15:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:15:11] [Rank 0] Total Loss: 5.5303 +[2025-07-07 09:15:11] [Rank 0] Total Loss: 5.5303 +[2025-07-07 09:15:11] [Rank 0] Total FTA: 0.5173 +[2025-07-07 09:15:11] [Rank 0] Total FTA: 0.5173 +[2025-07-07 09:15:11] [Rank 0] Group 0 Loss: 5.5053 +[2025-07-07 09:15:11] [Rank 0] Group 0 Loss: 5.5053 +[2025-07-07 09:15:11] [Rank 0] Group 1 Loss: 5.4251 +[2025-07-07 09:15:11] [Rank 0] Group 1 Loss: 5.4251 +[2025-07-07 09:15:11] [Rank 0] Group 2 Loss: 5.4988 +[2025-07-07 09:15:11] [Rank 0] Group 2 Loss: 5.4988 +[2025-07-07 09:15:11] [Rank 0] Group 3 Loss: 5.6347 +[2025-07-07 09:15:11] [Rank 0] Group 3 Loss: 5.6347 +[2025-07-07 09:15:11] [Rank 0] Group 4 Loss: 5.5161 +[2025-07-07 09:15:11] [Rank 0] Group 4 Loss: 5.5161 +[2025-07-07 09:15:11] [Rank 0] Group 5 Loss: 5.4667 +[2025-07-07 09:15:11] [Rank 0] Group 5 Loss: 5.4667 +[2025-07-07 09:15:11] [Rank 0] Group 6 Loss: 5.4065 +[2025-07-07 09:15:11] [Rank 0] Group 6 Loss: 5.4065 +[2025-07-07 09:15:11] [Rank 0] Group 7 Loss: 5.5707 +[2025-07-07 09:15:11] [Rank 0] Group 7 Loss: 5.5707 +[2025-07-07 09:15:11] [Rank 0] Group 8 Loss: 5.6226 +[2025-07-07 09:15:11] [Rank 0] Group 8 Loss: 5.6226 +[2025-07-07 09:15:11] [Rank 0] Group 9 Loss: 5.5264 +[2025-07-07 09:15:11] [Rank 0] Group 9 Loss: 5.5264 +[2025-07-07 09:15:11] [Rank 0] Group 10 Loss: 5.5456 +[2025-07-07 09:15:11] [Rank 0] Group 10 Loss: 5.5456 +[2025-07-07 09:15:11] [Rank 0] Group 11 Loss: 5.5801 +[2025-07-07 09:15:11] [Rank 0] Group 11 Loss: 5.5801 +[2025-07-07 09:15:11] [Rank 0] Group 0 FTA: 0.5098 +[2025-07-07 09:15:11] [Rank 0] Group 0 FTA: 0.5098 +[2025-07-07 09:15:11] [Rank 0] Group 1 FTA: 0.6719 +[2025-07-07 09:15:11] [Rank 0] Group 1 FTA: 0.6719 +[2025-07-07 09:15:11] [Rank 0] Group 2 FTA: 0.4297 +[2025-07-07 09:15:11] [Rank 0] Group 2 FTA: 0.4297 +[2025-07-07 09:15:11] [Rank 0] Group 3 FTA: 0.6641 +[2025-07-07 09:15:11] [Rank 0] Group 3 FTA: 0.6641 +[2025-07-07 09:15:11] [Rank 0] Group 4 FTA: 0.6016 +[2025-07-07 09:15:11] [Rank 0] Group 4 FTA: 0.6016 +[2025-07-07 09:15:12] [Rank 0] Group 5 FTA: 0.4948 +[2025-07-07 09:15:12] [Rank 0] Group 5 FTA: 0.4948 +[2025-07-07 09:15:12] [Rank 0] Group 6 FTA: 0.4505 +[2025-07-07 09:15:12] [Rank 0] Group 6 FTA: 0.4505 +[2025-07-07 09:15:12] [Rank 0] Group 7 FTA: 0.4922 +[2025-07-07 09:15:12] [Rank 0] Group 7 FTA: 0.4922 +[2025-07-07 09:15:12] [Rank 0] Group 8 FTA: 0.5104 +[2025-07-07 09:15:12] [Rank 0] Group 8 FTA: 0.5104 +[2025-07-07 09:15:12] [Rank 0] Group 9 FTA: 0.5078 +[2025-07-07 09:15:12] [Rank 0] Group 9 FTA: 0.5078 +[2025-07-07 09:15:12] [Rank 0] Group 10 FTA: 0.4707 +[2025-07-07 09:15:12] [Rank 0] Group 10 FTA: 0.4707 +[2025-07-07 09:15:12] [Rank 0] Group 11 FTA: 0.4824 +[2025-07-07 09:15:12] [Rank 0] Group 11 FTA: 0.4824 +[2025-07-07 09:15:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-07 09:15:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_loss_curves.png +[2025-07-07 09:15:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-07 09:15:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/per_class_acc_curves.png +[2025-07-07 09:15:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-07 09:15:13] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_loss_curve.png +[2025-07-07 09:15:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-07 09:15:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_45/total_acc_curve.png +[2025-07-07 09:15:13] [Rank 0] step:5501/10000 train_time:375155ms step_avg:68.20ms +[2025-07-07 09:15:13] [Rank 0] step:5501/10000 train_time:375155ms step_avg:68.20ms +[2025-07-07 09:15:14] [Rank 0] step:5521/10000 train_time:375921ms step_avg:68.09ms +[2025-07-07 09:15:14] [Rank 0] step:5521/10000 train_time:375921ms step_avg:68.09ms +[2025-07-07 09:15:16] [Rank 0] step:5541/10000 train_time:377282ms step_avg:68.09ms +[2025-07-07 09:15:16] [Rank 0] step:5541/10000 train_time:377282ms step_avg:68.09ms +[2025-07-07 09:15:17] [Rank 0] step:5561/10000 train_time:378644ms step_avg:68.09ms +[2025-07-07 09:15:17] [Rank 0] step:5561/10000 train_time:378644ms step_avg:68.09ms +[2025-07-07 09:15:18] [Rank 0] step:5581/10000 train_time:380059ms step_avg:68.10ms +[2025-07-07 09:15:18] [Rank 0] step:5581/10000 train_time:380059ms step_avg:68.10ms +[2025-07-07 09:15:20] [Rank 0] step:5601/10000 train_time:381412ms step_avg:68.10ms +[2025-07-07 09:15:20] [Rank 0] step:5601/10000 train_time:381412ms step_avg:68.10ms +[2025-07-07 09:15:21] [Rank 0] step:5621/10000 train_time:382774ms step_avg:68.10ms +[2025-07-07 09:15:21] [Rank 0] step:5621/10000 train_time:382774ms step_avg:68.10ms +[2025-07-07 09:15:23] [Rank 0] step:5641/10000 train_time:384138ms step_avg:68.10ms +[2025-07-07 09:15:23] [Rank 0] step:5641/10000 train_time:384138ms step_avg:68.10ms +[2025-07-07 09:15:24] [Rank 0] step:5661/10000 train_time:385503ms step_avg:68.10ms +[2025-07-07 09:15:24] [Rank 0] step:5661/10000 train_time:385503ms step_avg:68.10ms +[2025-07-07 09:15:25] [Rank 0] step:5681/10000 train_time:386868ms step_avg:68.10ms +[2025-07-07 09:15:25] [Rank 0] step:5681/10000 train_time:386868ms step_avg:68.10ms +[2025-07-07 09:15:27] [Rank 0] step:5701/10000 train_time:388234ms step_avg:68.10ms +[2025-07-07 09:15:27] [Rank 0] step:5701/10000 train_time:388234ms step_avg:68.10ms +[2025-07-07 09:15:28] [Rank 0] step:5721/10000 train_time:389600ms step_avg:68.10ms +[2025-07-07 09:15:28] [Rank 0] step:5721/10000 train_time:389600ms step_avg:68.10ms +[2025-07-07 09:15:29] [Rank 0] step:5741/10000 train_time:390967ms step_avg:68.10ms +[2025-07-07 09:15:29] [Rank 0] step:5741/10000 train_time:390967ms step_avg:68.10ms +[2025-07-07 09:15:31] [Rank 0] step:5761/10000 train_time:392379ms step_avg:68.11ms +[2025-07-07 09:15:31] [Rank 0] step:5761/10000 train_time:392379ms step_avg:68.11ms +[2025-07-07 09:15:32] [Rank 0] step:5781/10000 train_time:393733ms step_avg:68.11ms +[2025-07-07 09:15:32] [Rank 0] step:5781/10000 train_time:393733ms step_avg:68.11ms +[2025-07-07 09:15:34] [Rank 0] step:5801/10000 train_time:395102ms step_avg:68.11ms +[2025-07-07 09:15:34] [Rank 0] step:5801/10000 train_time:395102ms step_avg:68.11ms +[2025-07-07 09:15:35] [Rank 0] step:5821/10000 train_time:396470ms step_avg:68.11ms +[2025-07-07 09:15:35] [Rank 0] step:5821/10000 train_time:396470ms step_avg:68.11ms +[2025-07-07 09:15:36] [Rank 0] step:5841/10000 train_time:397839ms step_avg:68.11ms +[2025-07-07 09:15:36] [Rank 0] step:5841/10000 train_time:397839ms step_avg:68.11ms +[2025-07-07 09:15:38] [Rank 0] step:5861/10000 train_time:399208ms step_avg:68.11ms +[2025-07-07 09:15:38] [Rank 0] step:5861/10000 train_time:399208ms step_avg:68.11ms +[2025-07-07 09:15:39] [Rank 0] step:5881/10000 train_time:400578ms step_avg:68.11ms +[2025-07-07 09:15:39] [Rank 0] step:5881/10000 train_time:400578ms step_avg:68.11ms +[2025-07-07 09:15:40] [Rank 0] step:5901/10000 train_time:401948ms step_avg:68.12ms +[2025-07-07 09:15:40] [Rank 0] step:5901/10000 train_time:401948ms step_avg:68.12ms +[2025-07-07 09:15:42] [Rank 0] step:5921/10000 train_time:403319ms step_avg:68.12ms +[2025-07-07 09:15:42] [Rank 0] step:5921/10000 train_time:403319ms step_avg:68.12ms +[2025-07-07 09:15:43] [Rank 0] step:5941/10000 train_time:405347ms step_avg:68.23ms +[2025-07-07 09:15:43] [Rank 0] step:5941/10000 train_time:405347ms step_avg:68.23ms +[2025-07-07 09:15:45] [Rank 0] step:5961/10000 train_time:406087ms step_avg:68.12ms +[2025-07-07 09:15:45] [Rank 0] step:5961/10000 train_time:406087ms step_avg:68.12ms +[2025-07-07 09:15:46] [Rank 0] step:5981/10000 train_time:407458ms step_avg:68.13ms +[2025-07-07 09:15:46] [Rank 0] step:5981/10000 train_time:407458ms step_avg:68.13ms +[2025-07-07 09:15:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:15:47] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:15:48] [Rank 0] PRINT: step:6000/10000 train_loss:0.9187 val_loss:1.0547 train_time:409452ms step_avg:68.24ms +[2025-07-07 09:15:48] [Rank 0] PRINT: step:6000/10000 train_loss:0.9187 val_loss:1.0547 train_time:409452ms step_avg:68.24ms +[2025-07-07 09:15:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:15:48] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:15:48] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:15:48] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:15:48] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:15:48] [Rank 0] Evaluation set size after sampling: 5633 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/config.json b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ff08034100a70c04cda55ab77b0cd69a6668705f --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.002 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "67e26732-e287-4b4f-8047-932a907039e1", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..23baa7de5344b093f5fd6f3ffb001db83f8def86 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d711cc16b6321559bda2a98e52f9eeb0196a2fe8c01bad40c523264c3f6b86b +size 312876 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..4219130dbb378d44ce7237b9eaefe4fa4b6a1a0e --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d90acf54d4b6d00888b475b7b2ab6a0288b0d76e2aa6acf1d500002c1b548c1 +size 382827 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..076d3248552680ca90ee44f35f22dbdda9d43bd5 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ea026b54e86a783be80689bbcc9946d75c39e2357a8f9b9cd81db3989300347 +size 83537 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..8448f587884a5ea2bf94aac7d0a7f786b41dec6a --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5927fdb7a1237cb8bc36eeefee1701f8cbd0a9ec19e89827362caebaf5f6d505 +size 105326 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/training_log_67e26732-e287-4b4f-8047-932a907039e1.txt b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/training_log_67e26732-e287-4b4f-8047-932a907039e1.txt new file mode 100644 index 0000000000000000000000000000000000000000..8502ad07c92e38ea71ae7a1a0349b2dbd7a47a7b --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/training_log_67e26732-e287-4b4f-8047-932a907039e1.txt @@ -0,0 +1,3926 @@ +[2025-07-07 08:13:26] [Rank 0] PRINT: --- Script Start: Mon Jul 7 08:13:26 2025 --- +[2025-07-07 08:13:26] [Rank 0] PRINT: --- Script Start: Mon Jul 7 08:13:26 2025 --- +[2025-07-07 08:13:26] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-07 08:13:26] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.002) +[2025-07-07 08:13:26] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 08:13:26] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 08:13:26] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-07 08:13:26] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-07 08:13:26] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48 +[2025-07-07 08:13:26] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48 +[2025-07-07 08:13:26] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 08:13:26] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 08:13:27] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 08:13:27] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 08:13:27] [Rank 0] PRINT: Constructing model... +[2025-07-07 08:13:27] [Rank 0] PRINT: Constructing model... +[2025-07-07 08:13:29] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 08:13:29] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 08:13:29] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 08:13:29] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 08:13:29] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 08:13:29] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 08:13:29] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 08:13:29] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 08:13:29] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 08:13:29] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 08:13:29] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 08:13:29] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 08:13:30] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 08:13:30] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 08:13:30] [Rank 0] PRINT: Model returns: +[2025-07-07 08:13:30] [Rank 0] PRINT: Model returns: +[2025-07-07 08:13:30] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 08:13:30] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 08:13:30] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 08:13:30] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 08:13:30] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-07 08:13:30] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.002). +[2025-07-07 08:13:30] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 08:13:30] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 08:13:30] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 08:13:30] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 08:13:30] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 08:13:30] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 08:13:30] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 08:13:30] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 08:13:30] [Rank 0] PRINT: Starting warmup... +[2025-07-07 08:13:30] [Rank 0] PRINT: Starting warmup... +[2025-07-07 08:14:37] [Rank 0] PRINT: Warmup complete. +[2025-07-07 08:14:37] [Rank 0] PRINT: Warmup complete. +[2025-07-07 08:14:37] [Rank 0] PRINT: Starting training... +[2025-07-07 08:14:37] [Rank 0] PRINT: Starting training... +[2025-07-07 08:14:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:14:37] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:14:44] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 08:14:44] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 08:14:46] [Rank 0] step:21/10000 train_time:1017ms step_avg:48.41ms +[2025-07-07 08:14:46] [Rank 0] step:21/10000 train_time:1017ms step_avg:48.41ms +[2025-07-07 08:14:47] [Rank 0] step:41/10000 train_time:2329ms step_avg:56.80ms +[2025-07-07 08:14:47] [Rank 0] step:41/10000 train_time:2329ms step_avg:56.80ms +[2025-07-07 08:14:48] [Rank 0] step:61/10000 train_time:3644ms step_avg:59.74ms +[2025-07-07 08:14:48] [Rank 0] step:61/10000 train_time:3644ms step_avg:59.74ms +[2025-07-07 08:14:50] [Rank 0] step:81/10000 train_time:4962ms step_avg:61.26ms +[2025-07-07 08:14:50] [Rank 0] step:81/10000 train_time:4962ms step_avg:61.26ms +[2025-07-07 08:14:51] [Rank 0] step:101/10000 train_time:6282ms step_avg:62.20ms +[2025-07-07 08:14:51] [Rank 0] step:101/10000 train_time:6282ms step_avg:62.20ms +[2025-07-07 08:14:52] [Rank 0] step:121/10000 train_time:7605ms step_avg:62.85ms +[2025-07-07 08:14:52] [Rank 0] step:121/10000 train_time:7605ms step_avg:62.85ms +[2025-07-07 08:14:54] [Rank 0] step:141/10000 train_time:8929ms step_avg:63.33ms +[2025-07-07 08:14:54] [Rank 0] step:141/10000 train_time:8929ms step_avg:63.33ms +[2025-07-07 08:14:55] [Rank 0] step:161/10000 train_time:10254ms step_avg:63.69ms +[2025-07-07 08:14:55] [Rank 0] step:161/10000 train_time:10254ms step_avg:63.69ms +[2025-07-07 08:14:56] [Rank 0] step:181/10000 train_time:11834ms step_avg:65.38ms +[2025-07-07 08:14:56] [Rank 0] step:181/10000 train_time:11834ms step_avg:65.38ms +[2025-07-07 08:14:58] [Rank 0] step:201/10000 train_time:12980ms step_avg:64.58ms +[2025-07-07 08:14:58] [Rank 0] step:201/10000 train_time:12980ms step_avg:64.58ms +[2025-07-07 08:14:59] [Rank 0] step:221/10000 train_time:14307ms step_avg:64.74ms +[2025-07-07 08:14:59] [Rank 0] step:221/10000 train_time:14307ms step_avg:64.74ms +[2025-07-07 08:15:00] [Rank 0] step:241/10000 train_time:15635ms step_avg:64.88ms +[2025-07-07 08:15:00] [Rank 0] step:241/10000 train_time:15635ms step_avg:64.88ms +[2025-07-07 08:15:02] [Rank 0] step:261/10000 train_time:16994ms step_avg:65.11ms +[2025-07-07 08:15:02] [Rank 0] step:261/10000 train_time:16994ms step_avg:65.11ms +[2025-07-07 08:15:03] [Rank 0] step:281/10000 train_time:18297ms step_avg:65.11ms +[2025-07-07 08:15:03] [Rank 0] step:281/10000 train_time:18297ms step_avg:65.11ms +[2025-07-07 08:15:04] [Rank 0] step:301/10000 train_time:19626ms step_avg:65.20ms +[2025-07-07 08:15:04] [Rank 0] step:301/10000 train_time:19626ms step_avg:65.20ms +[2025-07-07 08:15:06] [Rank 0] step:321/10000 train_time:20957ms step_avg:65.29ms +[2025-07-07 08:15:06] [Rank 0] step:321/10000 train_time:20957ms step_avg:65.29ms +[2025-07-07 08:15:07] [Rank 0] step:341/10000 train_time:22288ms step_avg:65.36ms +[2025-07-07 08:15:07] [Rank 0] step:341/10000 train_time:22288ms step_avg:65.36ms +[2025-07-07 08:15:08] [Rank 0] step:361/10000 train_time:23666ms step_avg:65.56ms +[2025-07-07 08:15:08] [Rank 0] step:361/10000 train_time:23666ms step_avg:65.56ms +[2025-07-07 08:15:10] [Rank 0] step:381/10000 train_time:25015ms step_avg:65.66ms +[2025-07-07 08:15:10] [Rank 0] step:381/10000 train_time:25015ms step_avg:65.66ms +[2025-07-07 08:15:11] [Rank 0] step:401/10000 train_time:26344ms step_avg:65.70ms +[2025-07-07 08:15:11] [Rank 0] step:401/10000 train_time:26344ms step_avg:65.70ms +[2025-07-07 08:15:12] [Rank 0] step:421/10000 train_time:27676ms step_avg:65.74ms +[2025-07-07 08:15:12] [Rank 0] step:421/10000 train_time:27676ms step_avg:65.74ms +[2025-07-07 08:15:14] [Rank 0] step:441/10000 train_time:29008ms step_avg:65.78ms +[2025-07-07 08:15:14] [Rank 0] step:441/10000 train_time:29008ms step_avg:65.78ms +[2025-07-07 08:15:15] [Rank 0] step:461/10000 train_time:30339ms step_avg:65.81ms +[2025-07-07 08:15:15] [Rank 0] step:461/10000 train_time:30339ms step_avg:65.81ms +[2025-07-07 08:15:16] [Rank 0] step:481/10000 train_time:31670ms step_avg:65.84ms +[2025-07-07 08:15:16] [Rank 0] step:481/10000 train_time:31670ms step_avg:65.84ms +[2025-07-07 08:15:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:15:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:15:19] [Rank 0] PRINT: step:500/10000 train_loss:3.9587 val_loss:1.9257 train_time:33607ms step_avg:67.21ms +[2025-07-07 08:15:19] [Rank 0] PRINT: step:500/10000 train_loss:3.9587 val_loss:1.9257 train_time:33607ms step_avg:67.21ms +[2025-07-07 08:15:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:15:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:15:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:15:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:15:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:15:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:20:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:20:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:20:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:20:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:20:42] [Rank 0] Total Loss: 4.5627 +[2025-07-07 08:20:42] [Rank 0] Total Loss: 4.5627 +[2025-07-07 08:20:42] [Rank 0] Total FTA: 0.0815 +[2025-07-07 08:20:42] [Rank 0] Total FTA: 0.0815 +[2025-07-07 08:20:42] [Rank 0] Group 0 Loss: 4.8416 +[2025-07-07 08:20:42] [Rank 0] Group 0 Loss: 4.8416 +[2025-07-07 08:20:42] [Rank 0] Group 1 Loss: 4.3261 +[2025-07-07 08:20:42] [Rank 0] Group 1 Loss: 4.3261 +[2025-07-07 08:20:42] [Rank 0] Group 2 Loss: 4.4530 +[2025-07-07 08:20:42] [Rank 0] Group 2 Loss: 4.4530 +[2025-07-07 08:20:42] [Rank 0] Group 3 Loss: 4.6105 +[2025-07-07 08:20:42] [Rank 0] Group 3 Loss: 4.6105 +[2025-07-07 08:20:42] [Rank 0] Group 4 Loss: 4.5288 +[2025-07-07 08:20:42] [Rank 0] Group 4 Loss: 4.5288 +[2025-07-07 08:20:42] [Rank 0] Group 5 Loss: 4.5341 +[2025-07-07 08:20:42] [Rank 0] Group 5 Loss: 4.5341 +[2025-07-07 08:20:42] [Rank 0] Group 6 Loss: 4.4724 +[2025-07-07 08:20:42] [Rank 0] Group 6 Loss: 4.4724 +[2025-07-07 08:20:42] [Rank 0] Group 7 Loss: 4.5453 +[2025-07-07 08:20:42] [Rank 0] Group 7 Loss: 4.5453 +[2025-07-07 08:20:42] [Rank 0] Group 8 Loss: 4.5207 +[2025-07-07 08:20:42] [Rank 0] Group 8 Loss: 4.5207 +[2025-07-07 08:20:42] [Rank 0] Group 9 Loss: 4.5447 +[2025-07-07 08:20:42] [Rank 0] Group 9 Loss: 4.5447 +[2025-07-07 08:20:42] [Rank 0] Group 10 Loss: 4.5398 +[2025-07-07 08:20:42] [Rank 0] Group 10 Loss: 4.5398 +[2025-07-07 08:20:42] [Rank 0] Group 11 Loss: 4.5606 +[2025-07-07 08:20:42] [Rank 0] Group 11 Loss: 4.5606 +[2025-07-07 08:20:42] [Rank 0] Group 0 FTA: 0.1404 +[2025-07-07 08:20:42] [Rank 0] Group 0 FTA: 0.1404 +[2025-07-07 08:20:42] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 08:20:42] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 08:20:42] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-07 08:20:42] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-07 08:20:42] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-07 08:20:42] [Rank 0] Group 3 FTA: 0.0469 +[2025-07-07 08:20:42] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 08:20:42] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 08:20:42] [Rank 0] Group 5 FTA: 0.0833 +[2025-07-07 08:20:42] [Rank 0] Group 5 FTA: 0.0833 +[2025-07-07 08:20:42] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-07 08:20:42] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-07 08:20:42] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-07 08:20:42] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-07 08:20:42] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-07 08:20:42] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-07 08:20:42] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 08:20:42] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 08:20:42] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 08:20:42] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 08:20:42] [Rank 0] Group 11 FTA: 0.0820 +[2025-07-07 08:20:42] [Rank 0] Group 11 FTA: 0.0820 +[2025-07-07 08:20:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-07 08:20:42] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-07 08:20:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-07 08:20:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-07 08:20:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-07 08:20:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-07 08:20:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-07 08:20:43] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-07 08:20:43] [Rank 0] step:501/10000 train_time:33617ms step_avg:67.10ms +[2025-07-07 08:20:43] [Rank 0] step:501/10000 train_time:33617ms step_avg:67.10ms +[2025-07-07 08:20:45] [Rank 0] step:521/10000 train_time:34348ms step_avg:65.93ms +[2025-07-07 08:20:45] [Rank 0] step:521/10000 train_time:34348ms step_avg:65.93ms +[2025-07-07 08:20:46] [Rank 0] step:541/10000 train_time:35721ms step_avg:66.03ms +[2025-07-07 08:20:46] [Rank 0] step:541/10000 train_time:35721ms step_avg:66.03ms +[2025-07-07 08:20:47] [Rank 0] step:561/10000 train_time:37063ms step_avg:66.07ms +[2025-07-07 08:20:47] [Rank 0] step:561/10000 train_time:37063ms step_avg:66.07ms +[2025-07-07 08:20:49] [Rank 0] step:581/10000 train_time:38391ms step_avg:66.08ms +[2025-07-07 08:20:49] [Rank 0] step:581/10000 train_time:38391ms step_avg:66.08ms +[2025-07-07 08:20:50] [Rank 0] step:601/10000 train_time:39719ms step_avg:66.09ms +[2025-07-07 08:20:50] [Rank 0] step:601/10000 train_time:39719ms step_avg:66.09ms +[2025-07-07 08:20:51] [Rank 0] step:621/10000 train_time:41046ms step_avg:66.10ms +[2025-07-07 08:20:51] [Rank 0] step:621/10000 train_time:41046ms step_avg:66.10ms +[2025-07-07 08:20:53] [Rank 0] step:641/10000 train_time:42375ms step_avg:66.11ms +[2025-07-07 08:20:53] [Rank 0] step:641/10000 train_time:42375ms step_avg:66.11ms +[2025-07-07 08:20:54] [Rank 0] step:661/10000 train_time:43703ms step_avg:66.12ms +[2025-07-07 08:20:54] [Rank 0] step:661/10000 train_time:43703ms step_avg:66.12ms +[2025-07-07 08:20:55] [Rank 0] step:681/10000 train_time:45032ms step_avg:66.13ms +[2025-07-07 08:20:55] [Rank 0] step:681/10000 train_time:45032ms step_avg:66.13ms +[2025-07-07 08:20:57] [Rank 0] step:701/10000 train_time:46361ms step_avg:66.14ms +[2025-07-07 08:20:57] [Rank 0] step:701/10000 train_time:46361ms step_avg:66.14ms +[2025-07-07 08:20:58] [Rank 0] step:721/10000 train_time:47747ms step_avg:66.22ms +[2025-07-07 08:20:58] [Rank 0] step:721/10000 train_time:47747ms step_avg:66.22ms +[2025-07-07 08:20:59] [Rank 0] step:741/10000 train_time:49133ms step_avg:66.31ms +[2025-07-07 08:20:59] [Rank 0] step:741/10000 train_time:49133ms step_avg:66.31ms +[2025-07-07 08:21:01] [Rank 0] step:761/10000 train_time:50468ms step_avg:66.32ms +[2025-07-07 08:21:01] [Rank 0] step:761/10000 train_time:50468ms step_avg:66.32ms +[2025-07-07 08:21:02] [Rank 0] step:781/10000 train_time:51806ms step_avg:66.33ms +[2025-07-07 08:21:02] [Rank 0] step:781/10000 train_time:51806ms step_avg:66.33ms +[2025-07-07 08:21:03] [Rank 0] step:801/10000 train_time:53145ms step_avg:66.35ms +[2025-07-07 08:21:03] [Rank 0] step:801/10000 train_time:53145ms step_avg:66.35ms +[2025-07-07 08:21:05] [Rank 0] step:821/10000 train_time:54484ms step_avg:66.36ms +[2025-07-07 08:21:05] [Rank 0] step:821/10000 train_time:54484ms step_avg:66.36ms +[2025-07-07 08:21:06] [Rank 0] step:841/10000 train_time:55825ms step_avg:66.38ms +[2025-07-07 08:21:06] [Rank 0] step:841/10000 train_time:55825ms step_avg:66.38ms +[2025-07-07 08:21:07] [Rank 0] step:861/10000 train_time:57166ms step_avg:66.39ms +[2025-07-07 08:21:07] [Rank 0] step:861/10000 train_time:57166ms step_avg:66.39ms +[2025-07-07 08:21:09] [Rank 0] step:881/10000 train_time:58506ms step_avg:66.41ms +[2025-07-07 08:21:09] [Rank 0] step:881/10000 train_time:58506ms step_avg:66.41ms +[2025-07-07 08:21:10] [Rank 0] step:901/10000 train_time:60101ms step_avg:66.71ms +[2025-07-07 08:21:10] [Rank 0] step:901/10000 train_time:60101ms step_avg:66.71ms +[2025-07-07 08:21:11] [Rank 0] step:921/10000 train_time:61232ms step_avg:66.48ms +[2025-07-07 08:21:11] [Rank 0] step:921/10000 train_time:61232ms step_avg:66.48ms +[2025-07-07 08:21:13] [Rank 0] step:941/10000 train_time:62574ms step_avg:66.50ms +[2025-07-07 08:21:13] [Rank 0] step:941/10000 train_time:62574ms step_avg:66.50ms +[2025-07-07 08:21:14] [Rank 0] step:961/10000 train_time:63919ms step_avg:66.51ms +[2025-07-07 08:21:14] [Rank 0] step:961/10000 train_time:63919ms step_avg:66.51ms +[2025-07-07 08:21:16] [Rank 0] step:981/10000 train_time:65265ms step_avg:66.53ms +[2025-07-07 08:21:16] [Rank 0] step:981/10000 train_time:65265ms step_avg:66.53ms +[2025-07-07 08:21:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:21:17] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:21:18] [Rank 0] PRINT: step:1000/10000 train_loss:1.7887 val_loss:1.6852 train_time:67221ms step_avg:67.22ms +[2025-07-07 08:21:18] [Rank 0] PRINT: step:1000/10000 train_loss:1.7887 val_loss:1.6852 train_time:67221ms step_avg:67.22ms +[2025-07-07 08:21:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:21:18] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:21:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:21:18] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:21:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:21:18] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:26:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:26:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:26:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:26:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:26:43] [Rank 0] Total Loss: 5.1267 +[2025-07-07 08:26:43] [Rank 0] Total Loss: 5.1267 +[2025-07-07 08:26:43] [Rank 0] Total FTA: 0.0934 +[2025-07-07 08:26:43] [Rank 0] Total FTA: 0.0934 +[2025-07-07 08:26:43] [Rank 0] Group 0 Loss: 5.3921 +[2025-07-07 08:26:43] [Rank 0] Group 0 Loss: 5.3921 +[2025-07-07 08:26:43] [Rank 0] Group 1 Loss: 5.0485 +[2025-07-07 08:26:43] [Rank 0] Group 1 Loss: 5.0485 +[2025-07-07 08:26:43] [Rank 0] Group 2 Loss: 4.9098 +[2025-07-07 08:26:43] [Rank 0] Group 2 Loss: 4.9098 +[2025-07-07 08:26:43] [Rank 0] Group 3 Loss: 5.1086 +[2025-07-07 08:26:43] [Rank 0] Group 3 Loss: 5.1086 +[2025-07-07 08:26:43] [Rank 0] Group 4 Loss: 5.2186 +[2025-07-07 08:26:43] [Rank 0] Group 4 Loss: 5.2186 +[2025-07-07 08:26:43] [Rank 0] Group 5 Loss: 5.0118 +[2025-07-07 08:26:43] [Rank 0] Group 5 Loss: 5.0118 +[2025-07-07 08:26:43] [Rank 0] Group 6 Loss: 5.0127 +[2025-07-07 08:26:43] [Rank 0] Group 6 Loss: 5.0127 +[2025-07-07 08:26:43] [Rank 0] Group 7 Loss: 5.1146 +[2025-07-07 08:26:43] [Rank 0] Group 7 Loss: 5.1146 +[2025-07-07 08:26:43] [Rank 0] Group 8 Loss: 5.0757 +[2025-07-07 08:26:43] [Rank 0] Group 8 Loss: 5.0757 +[2025-07-07 08:26:43] [Rank 0] Group 9 Loss: 5.0939 +[2025-07-07 08:26:43] [Rank 0] Group 9 Loss: 5.0939 +[2025-07-07 08:26:43] [Rank 0] Group 10 Loss: 5.1388 +[2025-07-07 08:26:43] [Rank 0] Group 10 Loss: 5.1388 +[2025-07-07 08:26:43] [Rank 0] Group 11 Loss: 5.1221 +[2025-07-07 08:26:43] [Rank 0] Group 11 Loss: 5.1221 +[2025-07-07 08:26:43] [Rank 0] Group 0 FTA: 0.1534 +[2025-07-07 08:26:43] [Rank 0] Group 0 FTA: 0.1534 +[2025-07-07 08:26:43] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 08:26:43] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 08:26:43] [Rank 0] Group 2 FTA: 0.1589 +[2025-07-07 08:26:43] [Rank 0] Group 2 FTA: 0.1589 +[2025-07-07 08:26:43] [Rank 0] Group 3 FTA: 0.1042 +[2025-07-07 08:26:43] [Rank 0] Group 3 FTA: 0.1042 +[2025-07-07 08:26:43] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-07 08:26:43] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-07 08:26:43] [Rank 0] Group 5 FTA: 0.0339 +[2025-07-07 08:26:43] [Rank 0] Group 5 FTA: 0.0339 +[2025-07-07 08:26:43] [Rank 0] Group 6 FTA: 0.0911 +[2025-07-07 08:26:43] [Rank 0] Group 6 FTA: 0.0911 +[2025-07-07 08:26:43] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-07 08:26:43] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-07 08:26:43] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-07 08:26:43] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-07 08:26:43] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 08:26:43] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 08:26:43] [Rank 0] Group 10 FTA: 0.0977 +[2025-07-07 08:26:43] [Rank 0] Group 10 FTA: 0.0977 +[2025-07-07 08:26:43] [Rank 0] Group 11 FTA: 0.1006 +[2025-07-07 08:26:43] [Rank 0] Group 11 FTA: 0.1006 +[2025-07-07 08:26:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-07 08:26:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-07 08:26:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-07 08:26:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-07 08:26:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-07 08:26:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-07 08:26:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-07 08:26:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-07 08:26:45] [Rank 0] step:1001/10000 train_time:67231ms step_avg:67.16ms +[2025-07-07 08:26:45] [Rank 0] step:1001/10000 train_time:67231ms step_avg:67.16ms +[2025-07-07 08:26:46] [Rank 0] step:1021/10000 train_time:67971ms step_avg:66.57ms +[2025-07-07 08:26:46] [Rank 0] step:1021/10000 train_time:67971ms step_avg:66.57ms +[2025-07-07 08:26:47] [Rank 0] step:1041/10000 train_time:69305ms step_avg:66.58ms +[2025-07-07 08:26:47] [Rank 0] step:1041/10000 train_time:69305ms step_avg:66.58ms +[2025-07-07 08:26:49] [Rank 0] step:1061/10000 train_time:70640ms step_avg:66.58ms +[2025-07-07 08:26:49] [Rank 0] step:1061/10000 train_time:70640ms step_avg:66.58ms +[2025-07-07 08:26:50] [Rank 0] step:1081/10000 train_time:72026ms step_avg:66.63ms +[2025-07-07 08:26:50] [Rank 0] step:1081/10000 train_time:72026ms step_avg:66.63ms +[2025-07-07 08:26:51] [Rank 0] step:1101/10000 train_time:73354ms step_avg:66.62ms +[2025-07-07 08:26:51] [Rank 0] step:1101/10000 train_time:73354ms step_avg:66.62ms +[2025-07-07 08:26:53] [Rank 0] step:1121/10000 train_time:74690ms step_avg:66.63ms +[2025-07-07 08:26:53] [Rank 0] step:1121/10000 train_time:74690ms step_avg:66.63ms +[2025-07-07 08:26:54] [Rank 0] step:1141/10000 train_time:76027ms step_avg:66.63ms +[2025-07-07 08:26:54] [Rank 0] step:1141/10000 train_time:76027ms step_avg:66.63ms +[2025-07-07 08:26:55] [Rank 0] step:1161/10000 train_time:77365ms step_avg:66.64ms +[2025-07-07 08:26:55] [Rank 0] step:1161/10000 train_time:77365ms step_avg:66.64ms +[2025-07-07 08:26:57] [Rank 0] step:1181/10000 train_time:78703ms step_avg:66.64ms +[2025-07-07 08:26:57] [Rank 0] step:1181/10000 train_time:78703ms step_avg:66.64ms +[2025-07-07 08:26:58] [Rank 0] step:1201/10000 train_time:80042ms step_avg:66.65ms +[2025-07-07 08:26:58] [Rank 0] step:1201/10000 train_time:80042ms step_avg:66.65ms +[2025-07-07 08:26:59] [Rank 0] step:1221/10000 train_time:81381ms step_avg:66.65ms +[2025-07-07 08:26:59] [Rank 0] step:1221/10000 train_time:81381ms step_avg:66.65ms +[2025-07-07 08:27:01] [Rank 0] step:1241/10000 train_time:82721ms step_avg:66.66ms +[2025-07-07 08:27:01] [Rank 0] step:1241/10000 train_time:82721ms step_avg:66.66ms +[2025-07-07 08:27:02] [Rank 0] step:1261/10000 train_time:84314ms step_avg:66.86ms +[2025-07-07 08:27:02] [Rank 0] step:1261/10000 train_time:84314ms step_avg:66.86ms +[2025-07-07 08:27:03] [Rank 0] step:1281/10000 train_time:85456ms step_avg:66.71ms +[2025-07-07 08:27:03] [Rank 0] step:1281/10000 train_time:85456ms step_avg:66.71ms +[2025-07-07 08:27:05] [Rank 0] step:1301/10000 train_time:86798ms step_avg:66.72ms +[2025-07-07 08:27:05] [Rank 0] step:1301/10000 train_time:86798ms step_avg:66.72ms +[2025-07-07 08:27:06] [Rank 0] step:1321/10000 train_time:88139ms step_avg:66.72ms +[2025-07-07 08:27:06] [Rank 0] step:1321/10000 train_time:88139ms step_avg:66.72ms +[2025-07-07 08:27:07] [Rank 0] step:1341/10000 train_time:89481ms step_avg:66.73ms +[2025-07-07 08:27:07] [Rank 0] step:1341/10000 train_time:89481ms step_avg:66.73ms +[2025-07-07 08:27:09] [Rank 0] step:1361/10000 train_time:90822ms step_avg:66.73ms +[2025-07-07 08:27:09] [Rank 0] step:1361/10000 train_time:90822ms step_avg:66.73ms +[2025-07-07 08:27:10] [Rank 0] step:1381/10000 train_time:92164ms step_avg:66.74ms +[2025-07-07 08:27:10] [Rank 0] step:1381/10000 train_time:92164ms step_avg:66.74ms +[2025-07-07 08:27:11] [Rank 0] step:1401/10000 train_time:93505ms step_avg:66.74ms +[2025-07-07 08:27:11] [Rank 0] step:1401/10000 train_time:93505ms step_avg:66.74ms +[2025-07-07 08:27:13] [Rank 0] step:1421/10000 train_time:94847ms step_avg:66.75ms +[2025-07-07 08:27:13] [Rank 0] step:1421/10000 train_time:94847ms step_avg:66.75ms +[2025-07-07 08:27:14] [Rank 0] step:1441/10000 train_time:96858ms step_avg:67.22ms +[2025-07-07 08:27:14] [Rank 0] step:1441/10000 train_time:96858ms step_avg:67.22ms +[2025-07-07 08:27:16] [Rank 0] step:1461/10000 train_time:97580ms step_avg:66.79ms +[2025-07-07 08:27:16] [Rank 0] step:1461/10000 train_time:97580ms step_avg:66.79ms +[2025-07-07 08:27:17] [Rank 0] step:1481/10000 train_time:98945ms step_avg:66.81ms +[2025-07-07 08:27:17] [Rank 0] step:1481/10000 train_time:98945ms step_avg:66.81ms +[2025-07-07 08:27:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:27:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:27:19] [Rank 0] PRINT: step:1500/10000 train_loss:1.5510 val_loss:1.4084 train_time:100874ms step_avg:67.25ms +[2025-07-07 08:27:19] [Rank 0] PRINT: step:1500/10000 train_loss:1.5510 val_loss:1.4084 train_time:100874ms step_avg:67.25ms +[2025-07-07 08:27:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:27:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:27:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:27:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:27:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:27:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:32:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:32:46] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:32:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:32:46] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:32:46] [Rank 0] Total Loss: 5.0581 +[2025-07-07 08:32:46] [Rank 0] Total Loss: 5.0581 +[2025-07-07 08:32:46] [Rank 0] Total FTA: 0.0909 +[2025-07-07 08:32:46] [Rank 0] Total FTA: 0.0909 +[2025-07-07 08:32:46] [Rank 0] Group 0 Loss: 5.1613 +[2025-07-07 08:32:46] [Rank 0] Group 0 Loss: 5.1613 +[2025-07-07 08:32:46] [Rank 0] Group 1 Loss: 4.9810 +[2025-07-07 08:32:46] [Rank 0] Group 1 Loss: 4.9810 +[2025-07-07 08:32:46] [Rank 0] Group 2 Loss: 4.8064 +[2025-07-07 08:32:46] [Rank 0] Group 2 Loss: 4.8064 +[2025-07-07 08:32:46] [Rank 0] Group 3 Loss: 5.0467 +[2025-07-07 08:32:46] [Rank 0] Group 3 Loss: 5.0467 +[2025-07-07 08:32:46] [Rank 0] Group 4 Loss: 5.1378 +[2025-07-07 08:32:46] [Rank 0] Group 4 Loss: 5.1378 +[2025-07-07 08:32:46] [Rank 0] Group 5 Loss: 4.9919 +[2025-07-07 08:32:46] [Rank 0] Group 5 Loss: 4.9919 +[2025-07-07 08:32:46] [Rank 0] Group 6 Loss: 4.9833 +[2025-07-07 08:32:46] [Rank 0] Group 6 Loss: 4.9833 +[2025-07-07 08:32:46] [Rank 0] Group 7 Loss: 5.1138 +[2025-07-07 08:32:46] [Rank 0] Group 7 Loss: 5.1138 +[2025-07-07 08:32:46] [Rank 0] Group 8 Loss: 5.0896 +[2025-07-07 08:32:46] [Rank 0] Group 8 Loss: 5.0896 +[2025-07-07 08:32:46] [Rank 0] Group 9 Loss: 5.0634 +[2025-07-07 08:32:46] [Rank 0] Group 9 Loss: 5.0634 +[2025-07-07 08:32:46] [Rank 0] Group 10 Loss: 5.0701 +[2025-07-07 08:32:46] [Rank 0] Group 10 Loss: 5.0701 +[2025-07-07 08:32:46] [Rank 0] Group 11 Loss: 5.0914 +[2025-07-07 08:32:46] [Rank 0] Group 11 Loss: 5.0914 +[2025-07-07 08:32:46] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 08:32:46] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 08:32:46] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 08:32:46] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 08:32:46] [Rank 0] Group 2 FTA: 0.1458 +[2025-07-07 08:32:46] [Rank 0] Group 2 FTA: 0.1458 +[2025-07-07 08:32:46] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 08:32:46] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 08:32:46] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 08:32:46] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 08:32:46] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-07 08:32:46] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-07 08:32:46] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-07 08:32:46] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-07 08:32:46] [Rank 0] Group 7 FTA: 0.1224 +[2025-07-07 08:32:46] [Rank 0] Group 7 FTA: 0.1224 +[2025-07-07 08:32:46] [Rank 0] Group 8 FTA: 0.0755 +[2025-07-07 08:32:46] [Rank 0] Group 8 FTA: 0.0755 +[2025-07-07 08:32:46] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 08:32:46] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 08:32:46] [Rank 0] Group 10 FTA: 0.0801 +[2025-07-07 08:32:46] [Rank 0] Group 10 FTA: 0.0801 +[2025-07-07 08:32:46] [Rank 0] Group 11 FTA: 0.0908 +[2025-07-07 08:32:46] [Rank 0] Group 11 FTA: 0.0908 +[2025-07-07 08:32:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-07 08:32:47] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-07 08:32:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-07 08:32:47] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-07 08:32:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-07 08:32:47] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-07 08:32:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-07 08:32:48] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-07 08:32:48] [Rank 0] step:1501/10000 train_time:100885ms step_avg:67.21ms +[2025-07-07 08:32:48] [Rank 0] step:1501/10000 train_time:100885ms step_avg:67.21ms +[2025-07-07 08:32:49] [Rank 0] step:1521/10000 train_time:101640ms step_avg:66.82ms +[2025-07-07 08:32:49] [Rank 0] step:1521/10000 train_time:101640ms step_avg:66.82ms +[2025-07-07 08:32:50] [Rank 0] step:1541/10000 train_time:102973ms step_avg:66.82ms +[2025-07-07 08:32:50] [Rank 0] step:1541/10000 train_time:102973ms step_avg:66.82ms +[2025-07-07 08:32:52] [Rank 0] step:1561/10000 train_time:104309ms step_avg:66.82ms +[2025-07-07 08:32:52] [Rank 0] step:1561/10000 train_time:104309ms step_avg:66.82ms +[2025-07-07 08:32:53] [Rank 0] step:1581/10000 train_time:105646ms step_avg:66.82ms +[2025-07-07 08:32:53] [Rank 0] step:1581/10000 train_time:105646ms step_avg:66.82ms +[2025-07-07 08:32:54] [Rank 0] step:1601/10000 train_time:106982ms step_avg:66.82ms +[2025-07-07 08:32:54] [Rank 0] step:1601/10000 train_time:106982ms step_avg:66.82ms +[2025-07-07 08:32:56] [Rank 0] step:1621/10000 train_time:108319ms step_avg:66.82ms +[2025-07-07 08:32:56] [Rank 0] step:1621/10000 train_time:108319ms step_avg:66.82ms +[2025-07-07 08:32:57] [Rank 0] step:1641/10000 train_time:109721ms step_avg:66.86ms +[2025-07-07 08:32:57] [Rank 0] step:1641/10000 train_time:109721ms step_avg:66.86ms +[2025-07-07 08:32:58] [Rank 0] step:1661/10000 train_time:111060ms step_avg:66.86ms +[2025-07-07 08:32:58] [Rank 0] step:1661/10000 train_time:111060ms step_avg:66.86ms +[2025-07-07 08:33:00] [Rank 0] step:1681/10000 train_time:112398ms step_avg:66.86ms +[2025-07-07 08:33:00] [Rank 0] step:1681/10000 train_time:112398ms step_avg:66.86ms +[2025-07-07 08:33:01] [Rank 0] step:1701/10000 train_time:113737ms step_avg:66.86ms +[2025-07-07 08:33:01] [Rank 0] step:1701/10000 train_time:113737ms step_avg:66.86ms +[2025-07-07 08:33:02] [Rank 0] step:1721/10000 train_time:115077ms step_avg:66.87ms +[2025-07-07 08:33:02] [Rank 0] step:1721/10000 train_time:115077ms step_avg:66.87ms +[2025-07-07 08:33:04] [Rank 0] step:1741/10000 train_time:116415ms step_avg:66.87ms +[2025-07-07 08:33:04] [Rank 0] step:1741/10000 train_time:116415ms step_avg:66.87ms +[2025-07-07 08:33:05] [Rank 0] step:1761/10000 train_time:117755ms step_avg:66.87ms +[2025-07-07 08:33:05] [Rank 0] step:1761/10000 train_time:117755ms step_avg:66.87ms +[2025-07-07 08:33:07] [Rank 0] step:1781/10000 train_time:119094ms step_avg:66.87ms +[2025-07-07 08:33:07] [Rank 0] step:1781/10000 train_time:119094ms step_avg:66.87ms +[2025-07-07 08:33:08] [Rank 0] step:1801/10000 train_time:120434ms step_avg:66.87ms +[2025-07-07 08:33:08] [Rank 0] step:1801/10000 train_time:120434ms step_avg:66.87ms +[2025-07-07 08:33:09] [Rank 0] step:1821/10000 train_time:121818ms step_avg:66.90ms +[2025-07-07 08:33:09] [Rank 0] step:1821/10000 train_time:121818ms step_avg:66.90ms +[2025-07-07 08:33:11] [Rank 0] step:1841/10000 train_time:123158ms step_avg:66.90ms +[2025-07-07 08:33:11] [Rank 0] step:1841/10000 train_time:123158ms step_avg:66.90ms +[2025-07-07 08:33:12] [Rank 0] step:1861/10000 train_time:124499ms step_avg:66.90ms +[2025-07-07 08:33:12] [Rank 0] step:1861/10000 train_time:124499ms step_avg:66.90ms +[2025-07-07 08:33:13] [Rank 0] step:1881/10000 train_time:125840ms step_avg:66.90ms +[2025-07-07 08:33:13] [Rank 0] step:1881/10000 train_time:125840ms step_avg:66.90ms +[2025-07-07 08:33:15] [Rank 0] step:1901/10000 train_time:127182ms step_avg:66.90ms +[2025-07-07 08:33:15] [Rank 0] step:1901/10000 train_time:127182ms step_avg:66.90ms +[2025-07-07 08:33:16] [Rank 0] step:1921/10000 train_time:128523ms step_avg:66.90ms +[2025-07-07 08:33:16] [Rank 0] step:1921/10000 train_time:128523ms step_avg:66.90ms +[2025-07-07 08:33:17] [Rank 0] step:1941/10000 train_time:129864ms step_avg:66.91ms +[2025-07-07 08:33:17] [Rank 0] step:1941/10000 train_time:129864ms step_avg:66.91ms +[2025-07-07 08:33:19] [Rank 0] step:1961/10000 train_time:131206ms step_avg:66.91ms +[2025-07-07 08:33:19] [Rank 0] step:1961/10000 train_time:131206ms step_avg:66.91ms +[2025-07-07 08:33:20] [Rank 0] step:1981/10000 train_time:132549ms step_avg:66.91ms +[2025-07-07 08:33:20] [Rank 0] step:1981/10000 train_time:132549ms step_avg:66.91ms +[2025-07-07 08:33:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:33:21] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:33:22] [Rank 0] PRINT: step:2000/10000 train_loss:1.3443 val_loss:1.3054 train_time:134559ms step_avg:67.28ms +[2025-07-07 08:33:22] [Rank 0] PRINT: step:2000/10000 train_loss:1.3443 val_loss:1.3054 train_time:134559ms step_avg:67.28ms +[2025-07-07 08:33:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:33:22] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:33:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:33:22] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:33:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:33:22] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:38:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:38:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:38:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:38:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:38:44] [Rank 0] Total Loss: 5.0178 +[2025-07-07 08:38:44] [Rank 0] Total Loss: 5.0178 +[2025-07-07 08:38:44] [Rank 0] Total FTA: 0.0920 +[2025-07-07 08:38:44] [Rank 0] Total FTA: 0.0920 +[2025-07-07 08:38:44] [Rank 0] Group 0 Loss: 5.1258 +[2025-07-07 08:38:44] [Rank 0] Group 0 Loss: 5.1258 +[2025-07-07 08:38:44] [Rank 0] Group 1 Loss: 5.0637 +[2025-07-07 08:38:44] [Rank 0] Group 1 Loss: 5.0637 +[2025-07-07 08:38:44] [Rank 0] Group 2 Loss: 4.7598 +[2025-07-07 08:38:44] [Rank 0] Group 2 Loss: 4.7598 +[2025-07-07 08:38:44] [Rank 0] Group 3 Loss: 5.1245 +[2025-07-07 08:38:44] [Rank 0] Group 3 Loss: 5.1245 +[2025-07-07 08:38:44] [Rank 0] Group 4 Loss: 5.0555 +[2025-07-07 08:38:44] [Rank 0] Group 4 Loss: 5.0555 +[2025-07-07 08:38:44] [Rank 0] Group 5 Loss: 4.9438 +[2025-07-07 08:38:44] [Rank 0] Group 5 Loss: 4.9438 +[2025-07-07 08:38:44] [Rank 0] Group 6 Loss: 4.9466 +[2025-07-07 08:38:44] [Rank 0] Group 6 Loss: 4.9466 +[2025-07-07 08:38:44] [Rank 0] Group 7 Loss: 5.0710 +[2025-07-07 08:38:44] [Rank 0] Group 7 Loss: 5.0710 +[2025-07-07 08:38:44] [Rank 0] Group 8 Loss: 5.0273 +[2025-07-07 08:38:44] [Rank 0] Group 8 Loss: 5.0273 +[2025-07-07 08:38:44] [Rank 0] Group 9 Loss: 4.9424 +[2025-07-07 08:38:44] [Rank 0] Group 9 Loss: 4.9424 +[2025-07-07 08:38:44] [Rank 0] Group 10 Loss: 4.9926 +[2025-07-07 08:38:44] [Rank 0] Group 10 Loss: 4.9926 +[2025-07-07 08:38:44] [Rank 0] Group 11 Loss: 5.0246 +[2025-07-07 08:38:44] [Rank 0] Group 11 Loss: 5.0246 +[2025-07-07 08:38:44] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 08:38:44] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 08:38:44] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 08:38:44] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 08:38:44] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 08:38:44] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 08:38:44] [Rank 0] Group 3 FTA: 0.0625 +[2025-07-07 08:38:44] [Rank 0] Group 3 FTA: 0.0625 +[2025-07-07 08:38:44] [Rank 0] Group 4 FTA: 0.0339 +[2025-07-07 08:38:44] [Rank 0] Group 4 FTA: 0.0339 +[2025-07-07 08:38:44] [Rank 0] Group 5 FTA: 0.0599 +[2025-07-07 08:38:44] [Rank 0] Group 5 FTA: 0.0599 +[2025-07-07 08:38:44] [Rank 0] Group 6 FTA: 0.1120 +[2025-07-07 08:38:44] [Rank 0] Group 6 FTA: 0.1120 +[2025-07-07 08:38:44] [Rank 0] Group 7 FTA: 0.1224 +[2025-07-07 08:38:44] [Rank 0] Group 7 FTA: 0.1224 +[2025-07-07 08:38:44] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-07 08:38:44] [Rank 0] Group 8 FTA: 0.1120 +[2025-07-07 08:38:44] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 08:38:44] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 08:38:44] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 08:38:44] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 08:38:44] [Rank 0] Group 11 FTA: 0.0908 +[2025-07-07 08:38:44] [Rank 0] Group 11 FTA: 0.0908 +[2025-07-07 08:38:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-07 08:38:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-07 08:38:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-07 08:38:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-07 08:38:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-07 08:38:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-07 08:38:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-07 08:38:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-07 08:38:46] [Rank 0] step:2001/10000 train_time:134569ms step_avg:67.25ms +[2025-07-07 08:38:46] [Rank 0] step:2001/10000 train_time:134569ms step_avg:67.25ms +[2025-07-07 08:38:47] [Rank 0] step:2021/10000 train_time:135323ms step_avg:66.96ms +[2025-07-07 08:38:47] [Rank 0] step:2021/10000 train_time:135323ms step_avg:66.96ms +[2025-07-07 08:38:48] [Rank 0] step:2041/10000 train_time:136658ms step_avg:66.96ms +[2025-07-07 08:38:48] [Rank 0] step:2041/10000 train_time:136658ms step_avg:66.96ms +[2025-07-07 08:38:50] [Rank 0] step:2061/10000 train_time:137996ms step_avg:66.96ms +[2025-07-07 08:38:50] [Rank 0] step:2061/10000 train_time:137996ms step_avg:66.96ms +[2025-07-07 08:38:51] [Rank 0] step:2081/10000 train_time:139548ms step_avg:67.06ms +[2025-07-07 08:38:51] [Rank 0] step:2081/10000 train_time:139548ms step_avg:67.06ms +[2025-07-07 08:38:53] [Rank 0] step:2101/10000 train_time:140898ms step_avg:67.06ms +[2025-07-07 08:38:53] [Rank 0] step:2101/10000 train_time:140898ms step_avg:67.06ms +[2025-07-07 08:38:54] [Rank 0] step:2121/10000 train_time:142237ms step_avg:67.06ms +[2025-07-07 08:38:54] [Rank 0] step:2121/10000 train_time:142237ms step_avg:67.06ms +[2025-07-07 08:38:55] [Rank 0] step:2141/10000 train_time:143576ms step_avg:67.06ms +[2025-07-07 08:38:55] [Rank 0] step:2141/10000 train_time:143576ms step_avg:67.06ms +[2025-07-07 08:38:57] [Rank 0] step:2161/10000 train_time:144917ms step_avg:67.06ms +[2025-07-07 08:38:57] [Rank 0] step:2161/10000 train_time:144917ms step_avg:67.06ms +[2025-07-07 08:38:58] [Rank 0] step:2181/10000 train_time:146301ms step_avg:67.08ms +[2025-07-07 08:38:58] [Rank 0] step:2181/10000 train_time:146301ms step_avg:67.08ms +[2025-07-07 08:38:59] [Rank 0] step:2201/10000 train_time:147642ms step_avg:67.08ms +[2025-07-07 08:38:59] [Rank 0] step:2201/10000 train_time:147642ms step_avg:67.08ms +[2025-07-07 08:39:01] [Rank 0] step:2221/10000 train_time:148983ms step_avg:67.08ms +[2025-07-07 08:39:01] [Rank 0] step:2221/10000 train_time:148983ms step_avg:67.08ms +[2025-07-07 08:39:02] [Rank 0] step:2241/10000 train_time:150334ms step_avg:67.08ms +[2025-07-07 08:39:02] [Rank 0] step:2241/10000 train_time:150334ms step_avg:67.08ms +[2025-07-07 08:39:03] [Rank 0] step:2261/10000 train_time:151701ms step_avg:67.09ms +[2025-07-07 08:39:03] [Rank 0] step:2261/10000 train_time:151701ms step_avg:67.09ms +[2025-07-07 08:39:05] [Rank 0] step:2281/10000 train_time:153067ms step_avg:67.11ms +[2025-07-07 08:39:05] [Rank 0] step:2281/10000 train_time:153067ms step_avg:67.11ms +[2025-07-07 08:39:06] [Rank 0] step:2301/10000 train_time:154432ms step_avg:67.12ms +[2025-07-07 08:39:06] [Rank 0] step:2301/10000 train_time:154432ms step_avg:67.12ms +[2025-07-07 08:39:07] [Rank 0] step:2321/10000 train_time:155797ms step_avg:67.13ms +[2025-07-07 08:39:07] [Rank 0] step:2321/10000 train_time:155797ms step_avg:67.13ms +[2025-07-07 08:39:09] [Rank 0] step:2341/10000 train_time:157835ms step_avg:67.42ms +[2025-07-07 08:39:09] [Rank 0] step:2341/10000 train_time:157835ms step_avg:67.42ms +[2025-07-07 08:39:10] [Rank 0] step:2361/10000 train_time:158570ms step_avg:67.16ms +[2025-07-07 08:39:10] [Rank 0] step:2361/10000 train_time:158570ms step_avg:67.16ms +[2025-07-07 08:39:12] [Rank 0] step:2381/10000 train_time:159937ms step_avg:67.17ms +[2025-07-07 08:39:12] [Rank 0] step:2381/10000 train_time:159937ms step_avg:67.17ms +[2025-07-07 08:39:13] [Rank 0] step:2401/10000 train_time:161304ms step_avg:67.18ms +[2025-07-07 08:39:13] [Rank 0] step:2401/10000 train_time:161304ms step_avg:67.18ms +[2025-07-07 08:39:14] [Rank 0] step:2421/10000 train_time:162671ms step_avg:67.19ms +[2025-07-07 08:39:14] [Rank 0] step:2421/10000 train_time:162671ms step_avg:67.19ms +[2025-07-07 08:39:16] [Rank 0] step:2441/10000 train_time:164038ms step_avg:67.20ms +[2025-07-07 08:39:16] [Rank 0] step:2441/10000 train_time:164038ms step_avg:67.20ms +[2025-07-07 08:39:17] [Rank 0] step:2461/10000 train_time:165405ms step_avg:67.21ms +[2025-07-07 08:39:17] [Rank 0] step:2461/10000 train_time:165405ms step_avg:67.21ms +[2025-07-07 08:39:18] [Rank 0] step:2481/10000 train_time:166772ms step_avg:67.22ms +[2025-07-07 08:39:18] [Rank 0] step:2481/10000 train_time:166772ms step_avg:67.22ms +[2025-07-07 08:39:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:39:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:39:21] [Rank 0] PRINT: step:2500/10000 train_loss:1.2949 val_loss:1.2842 train_time:168762ms step_avg:67.50ms +[2025-07-07 08:39:21] [Rank 0] PRINT: step:2500/10000 train_loss:1.2949 val_loss:1.2842 train_time:168762ms step_avg:67.50ms +[2025-07-07 08:39:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:39:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:39:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:39:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:39:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:39:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:44:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:44:43] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:44:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:44:43] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:44:43] [Rank 0] Total Loss: 5.1534 +[2025-07-07 08:44:43] [Rank 0] Total Loss: 5.1534 +[2025-07-07 08:44:43] [Rank 0] Total FTA: 0.1005 +[2025-07-07 08:44:43] [Rank 0] Total FTA: 0.1005 +[2025-07-07 08:44:43] [Rank 0] Group 0 Loss: 5.3381 +[2025-07-07 08:44:43] [Rank 0] Group 0 Loss: 5.3381 +[2025-07-07 08:44:43] [Rank 0] Group 1 Loss: 5.0516 +[2025-07-07 08:44:43] [Rank 0] Group 1 Loss: 5.0516 +[2025-07-07 08:44:43] [Rank 0] Group 2 Loss: 4.9064 +[2025-07-07 08:44:43] [Rank 0] Group 2 Loss: 4.9064 +[2025-07-07 08:44:43] [Rank 0] Group 3 Loss: 5.1595 +[2025-07-07 08:44:43] [Rank 0] Group 3 Loss: 5.1595 +[2025-07-07 08:44:43] [Rank 0] Group 4 Loss: 5.1824 +[2025-07-07 08:44:43] [Rank 0] Group 4 Loss: 5.1824 +[2025-07-07 08:44:43] [Rank 0] Group 5 Loss: 5.1243 +[2025-07-07 08:44:43] [Rank 0] Group 5 Loss: 5.1243 +[2025-07-07 08:44:43] [Rank 0] Group 6 Loss: 5.0895 +[2025-07-07 08:44:43] [Rank 0] Group 6 Loss: 5.0895 +[2025-07-07 08:44:43] [Rank 0] Group 7 Loss: 5.1551 +[2025-07-07 08:44:43] [Rank 0] Group 7 Loss: 5.1551 +[2025-07-07 08:44:43] [Rank 0] Group 8 Loss: 5.1390 +[2025-07-07 08:44:43] [Rank 0] Group 8 Loss: 5.1390 +[2025-07-07 08:44:43] [Rank 0] Group 9 Loss: 5.1374 +[2025-07-07 08:44:43] [Rank 0] Group 9 Loss: 5.1374 +[2025-07-07 08:44:43] [Rank 0] Group 10 Loss: 5.1846 +[2025-07-07 08:44:43] [Rank 0] Group 10 Loss: 5.1846 +[2025-07-07 08:44:43] [Rank 0] Group 11 Loss: 5.1603 +[2025-07-07 08:44:43] [Rank 0] Group 11 Loss: 5.1603 +[2025-07-07 08:44:43] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 08:44:43] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 08:44:43] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-07 08:44:43] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-07 08:44:43] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 08:44:43] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 08:44:43] [Rank 0] Group 3 FTA: 0.0964 +[2025-07-07 08:44:43] [Rank 0] Group 3 FTA: 0.0964 +[2025-07-07 08:44:43] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 08:44:43] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 08:44:43] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-07 08:44:43] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-07 08:44:43] [Rank 0] Group 6 FTA: 0.0938 +[2025-07-07 08:44:43] [Rank 0] Group 6 FTA: 0.0938 +[2025-07-07 08:44:43] [Rank 0] Group 7 FTA: 0.1276 +[2025-07-07 08:44:43] [Rank 0] Group 7 FTA: 0.1276 +[2025-07-07 08:44:43] [Rank 0] Group 8 FTA: 0.0859 +[2025-07-07 08:44:43] [Rank 0] Group 8 FTA: 0.0859 +[2025-07-07 08:44:43] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 08:44:43] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 08:44:43] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 08:44:43] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 08:44:43] [Rank 0] Group 11 FTA: 0.0957 +[2025-07-07 08:44:43] [Rank 0] Group 11 FTA: 0.0957 +[2025-07-07 08:44:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-07 08:44:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-07 08:44:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-07 08:44:44] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-07 08:44:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-07 08:44:44] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-07 08:44:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-07 08:44:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-07 08:44:44] [Rank 0] step:2501/10000 train_time:168772ms step_avg:67.48ms +[2025-07-07 08:44:44] [Rank 0] step:2501/10000 train_time:168772ms step_avg:67.48ms +[2025-07-07 08:44:46] [Rank 0] step:2521/10000 train_time:169595ms step_avg:67.27ms +[2025-07-07 08:44:46] [Rank 0] step:2521/10000 train_time:169595ms step_avg:67.27ms +[2025-07-07 08:44:47] [Rank 0] step:2541/10000 train_time:170954ms step_avg:67.28ms +[2025-07-07 08:44:47] [Rank 0] step:2541/10000 train_time:170954ms step_avg:67.28ms +[2025-07-07 08:44:48] [Rank 0] step:2561/10000 train_time:172317ms step_avg:67.29ms +[2025-07-07 08:44:48] [Rank 0] step:2561/10000 train_time:172317ms step_avg:67.29ms +[2025-07-07 08:44:50] [Rank 0] step:2581/10000 train_time:173679ms step_avg:67.29ms +[2025-07-07 08:44:50] [Rank 0] step:2581/10000 train_time:173679ms step_avg:67.29ms +[2025-07-07 08:44:51] [Rank 0] step:2601/10000 train_time:175041ms step_avg:67.30ms +[2025-07-07 08:44:51] [Rank 0] step:2601/10000 train_time:175041ms step_avg:67.30ms +[2025-07-07 08:44:53] [Rank 0] step:2621/10000 train_time:176407ms step_avg:67.31ms +[2025-07-07 08:44:53] [Rank 0] step:2621/10000 train_time:176407ms step_avg:67.31ms +[2025-07-07 08:44:54] [Rank 0] step:2641/10000 train_time:177771ms step_avg:67.31ms +[2025-07-07 08:44:54] [Rank 0] step:2641/10000 train_time:177771ms step_avg:67.31ms +[2025-07-07 08:44:55] [Rank 0] step:2661/10000 train_time:179135ms step_avg:67.32ms +[2025-07-07 08:44:55] [Rank 0] step:2661/10000 train_time:179135ms step_avg:67.32ms +[2025-07-07 08:44:57] [Rank 0] step:2681/10000 train_time:180500ms step_avg:67.33ms +[2025-07-07 08:44:57] [Rank 0] step:2681/10000 train_time:180500ms step_avg:67.33ms +[2025-07-07 08:44:58] [Rank 0] step:2701/10000 train_time:181866ms step_avg:67.33ms +[2025-07-07 08:44:58] [Rank 0] step:2701/10000 train_time:181866ms step_avg:67.33ms +[2025-07-07 08:44:59] [Rank 0] step:2721/10000 train_time:183279ms step_avg:67.36ms +[2025-07-07 08:44:59] [Rank 0] step:2721/10000 train_time:183279ms step_avg:67.36ms +[2025-07-07 08:45:01] [Rank 0] step:2741/10000 train_time:184644ms step_avg:67.36ms +[2025-07-07 08:45:01] [Rank 0] step:2741/10000 train_time:184644ms step_avg:67.36ms +[2025-07-07 08:45:02] [Rank 0] step:2761/10000 train_time:186012ms step_avg:67.37ms +[2025-07-07 08:45:02] [Rank 0] step:2761/10000 train_time:186012ms step_avg:67.37ms +[2025-07-07 08:45:03] [Rank 0] step:2781/10000 train_time:187379ms step_avg:67.38ms +[2025-07-07 08:45:03] [Rank 0] step:2781/10000 train_time:187379ms step_avg:67.38ms +[2025-07-07 08:45:05] [Rank 0] step:2801/10000 train_time:188746ms step_avg:67.39ms +[2025-07-07 08:45:05] [Rank 0] step:2801/10000 train_time:188746ms step_avg:67.39ms +[2025-07-07 08:45:06] [Rank 0] step:2821/10000 train_time:190114ms step_avg:67.39ms +[2025-07-07 08:45:06] [Rank 0] step:2821/10000 train_time:190114ms step_avg:67.39ms +[2025-07-07 08:45:08] [Rank 0] step:2841/10000 train_time:191481ms step_avg:67.40ms +[2025-07-07 08:45:08] [Rank 0] step:2841/10000 train_time:191481ms step_avg:67.40ms +[2025-07-07 08:45:09] [Rank 0] step:2861/10000 train_time:192851ms step_avg:67.41ms +[2025-07-07 08:45:09] [Rank 0] step:2861/10000 train_time:192851ms step_avg:67.41ms +[2025-07-07 08:45:10] [Rank 0] step:2881/10000 train_time:194219ms step_avg:67.41ms +[2025-07-07 08:45:10] [Rank 0] step:2881/10000 train_time:194219ms step_avg:67.41ms +[2025-07-07 08:45:12] [Rank 0] step:2901/10000 train_time:195847ms step_avg:67.51ms +[2025-07-07 08:45:12] [Rank 0] step:2901/10000 train_time:195847ms step_avg:67.51ms +[2025-07-07 08:45:13] [Rank 0] step:2921/10000 train_time:197002ms step_avg:67.44ms +[2025-07-07 08:45:13] [Rank 0] step:2921/10000 train_time:197002ms step_avg:67.44ms +[2025-07-07 08:45:14] [Rank 0] step:2941/10000 train_time:198372ms step_avg:67.45ms +[2025-07-07 08:45:14] [Rank 0] step:2941/10000 train_time:198372ms step_avg:67.45ms +[2025-07-07 08:45:16] [Rank 0] step:2961/10000 train_time:199742ms step_avg:67.46ms +[2025-07-07 08:45:16] [Rank 0] step:2961/10000 train_time:199742ms step_avg:67.46ms +[2025-07-07 08:45:17] [Rank 0] step:2981/10000 train_time:201111ms step_avg:67.46ms +[2025-07-07 08:45:17] [Rank 0] step:2981/10000 train_time:201111ms step_avg:67.46ms +[2025-07-07 08:45:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:45:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:45:20] [Rank 0] PRINT: step:3000/10000 train_loss:1.2765 val_loss:1.2688 train_time:203105ms step_avg:67.70ms +[2025-07-07 08:45:20] [Rank 0] PRINT: step:3000/10000 train_loss:1.2765 val_loss:1.2688 train_time:203105ms step_avg:67.70ms +[2025-07-07 08:45:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:45:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:45:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:45:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:45:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:45:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:50:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:50:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:50:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:50:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:50:44] [Rank 0] Total Loss: 5.1779 +[2025-07-07 08:50:44] [Rank 0] Total Loss: 5.1779 +[2025-07-07 08:50:44] [Rank 0] Total FTA: 0.1015 +[2025-07-07 08:50:44] [Rank 0] Total FTA: 0.1015 +[2025-07-07 08:50:44] [Rank 0] Group 0 Loss: 5.3433 +[2025-07-07 08:50:44] [Rank 0] Group 0 Loss: 5.3433 +[2025-07-07 08:50:44] [Rank 0] Group 1 Loss: 5.0621 +[2025-07-07 08:50:44] [Rank 0] Group 1 Loss: 5.0621 +[2025-07-07 08:50:44] [Rank 0] Group 2 Loss: 4.8404 +[2025-07-07 08:50:44] [Rank 0] Group 2 Loss: 4.8404 +[2025-07-07 08:50:44] [Rank 0] Group 3 Loss: 5.4171 +[2025-07-07 08:50:44] [Rank 0] Group 3 Loss: 5.4171 +[2025-07-07 08:50:44] [Rank 0] Group 4 Loss: 5.2309 +[2025-07-07 08:50:44] [Rank 0] Group 4 Loss: 5.2309 +[2025-07-07 08:50:44] [Rank 0] Group 5 Loss: 5.0854 +[2025-07-07 08:50:44] [Rank 0] Group 5 Loss: 5.0854 +[2025-07-07 08:50:44] [Rank 0] Group 6 Loss: 5.0685 +[2025-07-07 08:50:44] [Rank 0] Group 6 Loss: 5.0685 +[2025-07-07 08:50:44] [Rank 0] Group 7 Loss: 5.2221 +[2025-07-07 08:50:44] [Rank 0] Group 7 Loss: 5.2221 +[2025-07-07 08:50:44] [Rank 0] Group 8 Loss: 5.1594 +[2025-07-07 08:50:44] [Rank 0] Group 8 Loss: 5.1594 +[2025-07-07 08:50:44] [Rank 0] Group 9 Loss: 5.1774 +[2025-07-07 08:50:44] [Rank 0] Group 9 Loss: 5.1774 +[2025-07-07 08:50:44] [Rank 0] Group 10 Loss: 5.1442 +[2025-07-07 08:50:44] [Rank 0] Group 10 Loss: 5.1442 +[2025-07-07 08:50:44] [Rank 0] Group 11 Loss: 5.1973 +[2025-07-07 08:50:44] [Rank 0] Group 11 Loss: 5.1973 +[2025-07-07 08:50:44] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 08:50:44] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 08:50:44] [Rank 0] Group 1 FTA: 0.1406 +[2025-07-07 08:50:44] [Rank 0] Group 1 FTA: 0.1406 +[2025-07-07 08:50:44] [Rank 0] Group 2 FTA: 0.2422 +[2025-07-07 08:50:44] [Rank 0] Group 2 FTA: 0.2422 +[2025-07-07 08:50:44] [Rank 0] Group 3 FTA: 0.1667 +[2025-07-07 08:50:44] [Rank 0] Group 3 FTA: 0.1667 +[2025-07-07 08:50:44] [Rank 0] Group 4 FTA: 0.0469 +[2025-07-07 08:50:44] [Rank 0] Group 4 FTA: 0.0469 +[2025-07-07 08:50:44] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-07 08:50:44] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-07 08:50:44] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-07 08:50:44] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-07 08:50:44] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-07 08:50:44] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-07 08:50:44] [Rank 0] Group 8 FTA: 0.1068 +[2025-07-07 08:50:44] [Rank 0] Group 8 FTA: 0.1068 +[2025-07-07 08:50:44] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 08:50:44] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 08:50:44] [Rank 0] Group 10 FTA: 0.1250 +[2025-07-07 08:50:44] [Rank 0] Group 10 FTA: 0.1250 +[2025-07-07 08:50:44] [Rank 0] Group 11 FTA: 0.1084 +[2025-07-07 08:50:44] [Rank 0] Group 11 FTA: 0.1084 +[2025-07-07 08:50:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-07 08:50:45] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-07 08:50:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-07 08:50:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-07 08:50:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-07 08:50:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-07 08:50:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-07 08:50:46] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-07 08:50:46] [Rank 0] step:3001/10000 train_time:203115ms step_avg:67.68ms +[2025-07-07 08:50:46] [Rank 0] step:3001/10000 train_time:203115ms step_avg:67.68ms +[2025-07-07 08:50:47] [Rank 0] step:3021/10000 train_time:203890ms step_avg:67.49ms +[2025-07-07 08:50:47] [Rank 0] step:3021/10000 train_time:203890ms step_avg:67.49ms +[2025-07-07 08:50:49] [Rank 0] step:3041/10000 train_time:205251ms step_avg:67.49ms +[2025-07-07 08:50:49] [Rank 0] step:3041/10000 train_time:205251ms step_avg:67.49ms +[2025-07-07 08:50:50] [Rank 0] step:3061/10000 train_time:206661ms step_avg:67.51ms +[2025-07-07 08:50:50] [Rank 0] step:3061/10000 train_time:206661ms step_avg:67.51ms +[2025-07-07 08:50:51] [Rank 0] step:3081/10000 train_time:208019ms step_avg:67.52ms +[2025-07-07 08:50:51] [Rank 0] step:3081/10000 train_time:208019ms step_avg:67.52ms +[2025-07-07 08:50:53] [Rank 0] step:3101/10000 train_time:209382ms step_avg:67.52ms +[2025-07-07 08:50:53] [Rank 0] step:3101/10000 train_time:209382ms step_avg:67.52ms +[2025-07-07 08:50:54] [Rank 0] step:3121/10000 train_time:210745ms step_avg:67.52ms +[2025-07-07 08:50:54] [Rank 0] step:3121/10000 train_time:210745ms step_avg:67.52ms +[2025-07-07 08:50:55] [Rank 0] step:3141/10000 train_time:212110ms step_avg:67.53ms +[2025-07-07 08:50:55] [Rank 0] step:3141/10000 train_time:212110ms step_avg:67.53ms +[2025-07-07 08:50:57] [Rank 0] step:3161/10000 train_time:213476ms step_avg:67.53ms +[2025-07-07 08:50:57] [Rank 0] step:3161/10000 train_time:213476ms step_avg:67.53ms +[2025-07-07 08:50:58] [Rank 0] step:3181/10000 train_time:214841ms step_avg:67.54ms +[2025-07-07 08:50:58] [Rank 0] step:3181/10000 train_time:214841ms step_avg:67.54ms +[2025-07-07 08:51:00] [Rank 0] step:3201/10000 train_time:216207ms step_avg:67.54ms +[2025-07-07 08:51:00] [Rank 0] step:3201/10000 train_time:216207ms step_avg:67.54ms +[2025-07-07 08:51:01] [Rank 0] step:3221/10000 train_time:217573ms step_avg:67.55ms +[2025-07-07 08:51:01] [Rank 0] step:3221/10000 train_time:217573ms step_avg:67.55ms +[2025-07-07 08:51:02] [Rank 0] step:3241/10000 train_time:218940ms step_avg:67.55ms +[2025-07-07 08:51:02] [Rank 0] step:3241/10000 train_time:218940ms step_avg:67.55ms +[2025-07-07 08:51:04] [Rank 0] step:3261/10000 train_time:220341ms step_avg:67.57ms +[2025-07-07 08:51:04] [Rank 0] step:3261/10000 train_time:220341ms step_avg:67.57ms +[2025-07-07 08:51:05] [Rank 0] step:3281/10000 train_time:221707ms step_avg:67.57ms +[2025-07-07 08:51:05] [Rank 0] step:3281/10000 train_time:221707ms step_avg:67.57ms +[2025-07-07 08:51:06] [Rank 0] step:3301/10000 train_time:223073ms step_avg:67.58ms +[2025-07-07 08:51:06] [Rank 0] step:3301/10000 train_time:223073ms step_avg:67.58ms +[2025-07-07 08:51:08] [Rank 0] step:3321/10000 train_time:224440ms step_avg:67.58ms +[2025-07-07 08:51:08] [Rank 0] step:3321/10000 train_time:224440ms step_avg:67.58ms +[2025-07-07 08:51:09] [Rank 0] step:3341/10000 train_time:225805ms step_avg:67.59ms +[2025-07-07 08:51:09] [Rank 0] step:3341/10000 train_time:225805ms step_avg:67.59ms +[2025-07-07 08:51:10] [Rank 0] step:3361/10000 train_time:227172ms step_avg:67.59ms +[2025-07-07 08:51:10] [Rank 0] step:3361/10000 train_time:227172ms step_avg:67.59ms +[2025-07-07 08:51:12] [Rank 0] step:3381/10000 train_time:228539ms step_avg:67.60ms +[2025-07-07 08:51:12] [Rank 0] step:3381/10000 train_time:228539ms step_avg:67.60ms +[2025-07-07 08:51:13] [Rank 0] step:3401/10000 train_time:229906ms step_avg:67.60ms +[2025-07-07 08:51:13] [Rank 0] step:3401/10000 train_time:229906ms step_avg:67.60ms +[2025-07-07 08:51:15] [Rank 0] step:3421/10000 train_time:231957ms step_avg:67.80ms +[2025-07-07 08:51:15] [Rank 0] step:3421/10000 train_time:231957ms step_avg:67.80ms +[2025-07-07 08:51:16] [Rank 0] step:3441/10000 train_time:232693ms step_avg:67.62ms +[2025-07-07 08:51:16] [Rank 0] step:3441/10000 train_time:232693ms step_avg:67.62ms +[2025-07-07 08:51:17] [Rank 0] step:3461/10000 train_time:234060ms step_avg:67.63ms +[2025-07-07 08:51:17] [Rank 0] step:3461/10000 train_time:234060ms step_avg:67.63ms +[2025-07-07 08:51:19] [Rank 0] step:3481/10000 train_time:235427ms step_avg:67.63ms +[2025-07-07 08:51:19] [Rank 0] step:3481/10000 train_time:235427ms step_avg:67.63ms +[2025-07-07 08:51:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:51:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:51:21] [Rank 0] PRINT: step:3500/10000 train_loss:1.2501 val_loss:1.2466 train_time:237416ms step_avg:67.83ms +[2025-07-07 08:51:21] [Rank 0] PRINT: step:3500/10000 train_loss:1.2501 val_loss:1.2466 train_time:237416ms step_avg:67.83ms +[2025-07-07 08:51:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:51:21] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:51:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:51:21] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:51:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:51:21] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:56:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:56:42] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:56:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:56:42] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:56:42] [Rank 0] Total Loss: 5.0848 +[2025-07-07 08:56:42] [Rank 0] Total Loss: 5.0848 +[2025-07-07 08:56:42] [Rank 0] Total FTA: 0.1424 +[2025-07-07 08:56:42] [Rank 0] Total FTA: 0.1424 +[2025-07-07 08:56:42] [Rank 0] Group 0 Loss: 5.0806 +[2025-07-07 08:56:42] [Rank 0] Group 0 Loss: 5.0806 +[2025-07-07 08:56:42] [Rank 0] Group 1 Loss: 5.1689 +[2025-07-07 08:56:42] [Rank 0] Group 1 Loss: 5.1689 +[2025-07-07 08:56:42] [Rank 0] Group 2 Loss: 4.8360 +[2025-07-07 08:56:42] [Rank 0] Group 2 Loss: 4.8360 +[2025-07-07 08:56:42] [Rank 0] Group 3 Loss: 5.1313 +[2025-07-07 08:56:42] [Rank 0] Group 3 Loss: 5.1313 +[2025-07-07 08:56:42] [Rank 0] Group 4 Loss: 5.1446 +[2025-07-07 08:56:42] [Rank 0] Group 4 Loss: 5.1446 +[2025-07-07 08:56:42] [Rank 0] Group 5 Loss: 5.0589 +[2025-07-07 08:56:42] [Rank 0] Group 5 Loss: 5.0589 +[2025-07-07 08:56:42] [Rank 0] Group 6 Loss: 5.0089 +[2025-07-07 08:56:42] [Rank 0] Group 6 Loss: 5.0089 +[2025-07-07 08:56:42] [Rank 0] Group 7 Loss: 5.1432 +[2025-07-07 08:56:42] [Rank 0] Group 7 Loss: 5.1432 +[2025-07-07 08:56:42] [Rank 0] Group 8 Loss: 5.1551 +[2025-07-07 08:56:42] [Rank 0] Group 8 Loss: 5.1551 +[2025-07-07 08:56:42] [Rank 0] Group 9 Loss: 5.0545 +[2025-07-07 08:56:42] [Rank 0] Group 9 Loss: 5.0545 +[2025-07-07 08:56:42] [Rank 0] Group 10 Loss: 5.1288 +[2025-07-07 08:56:42] [Rank 0] Group 10 Loss: 5.1288 +[2025-07-07 08:56:42] [Rank 0] Group 11 Loss: 5.0853 +[2025-07-07 08:56:42] [Rank 0] Group 11 Loss: 5.0853 +[2025-07-07 08:56:42] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 08:56:42] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 08:56:42] [Rank 0] Group 1 FTA: 0.1849 +[2025-07-07 08:56:42] [Rank 0] Group 1 FTA: 0.1849 +[2025-07-07 08:56:42] [Rank 0] Group 2 FTA: 0.2500 +[2025-07-07 08:56:42] [Rank 0] Group 2 FTA: 0.2500 +[2025-07-07 08:56:42] [Rank 0] Group 3 FTA: 0.2214 +[2025-07-07 08:56:42] [Rank 0] Group 3 FTA: 0.2214 +[2025-07-07 08:56:42] [Rank 0] Group 4 FTA: 0.0911 +[2025-07-07 08:56:42] [Rank 0] Group 4 FTA: 0.0911 +[2025-07-07 08:56:42] [Rank 0] Group 5 FTA: 0.1380 +[2025-07-07 08:56:42] [Rank 0] Group 5 FTA: 0.1380 +[2025-07-07 08:56:42] [Rank 0] Group 6 FTA: 0.1380 +[2025-07-07 08:56:42] [Rank 0] Group 6 FTA: 0.1380 +[2025-07-07 08:56:42] [Rank 0] Group 7 FTA: 0.1641 +[2025-07-07 08:56:42] [Rank 0] Group 7 FTA: 0.1641 +[2025-07-07 08:56:42] [Rank 0] Group 8 FTA: 0.1953 +[2025-07-07 08:56:42] [Rank 0] Group 8 FTA: 0.1953 +[2025-07-07 08:56:42] [Rank 0] Group 9 FTA: 0.1602 +[2025-07-07 08:56:42] [Rank 0] Group 9 FTA: 0.1602 +[2025-07-07 08:56:42] [Rank 0] Group 10 FTA: 0.1797 +[2025-07-07 08:56:42] [Rank 0] Group 10 FTA: 0.1797 +[2025-07-07 08:56:42] [Rank 0] Group 11 FTA: 0.1348 +[2025-07-07 08:56:42] [Rank 0] Group 11 FTA: 0.1348 +[2025-07-07 08:56:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-07 08:56:43] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-07 08:56:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-07 08:56:43] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-07 08:56:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-07 08:56:43] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-07 08:56:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-07 08:56:44] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-07 08:56:44] [Rank 0] step:3501/10000 train_time:237427ms step_avg:67.82ms +[2025-07-07 08:56:44] [Rank 0] step:3501/10000 train_time:237427ms step_avg:67.82ms +[2025-07-07 08:56:45] [Rank 0] step:3521/10000 train_time:238188ms step_avg:67.65ms +[2025-07-07 08:56:45] [Rank 0] step:3521/10000 train_time:238188ms step_avg:67.65ms +[2025-07-07 08:56:46] [Rank 0] step:3541/10000 train_time:239547ms step_avg:67.65ms +[2025-07-07 08:56:46] [Rank 0] step:3541/10000 train_time:239547ms step_avg:67.65ms +[2025-07-07 08:56:48] [Rank 0] step:3561/10000 train_time:240948ms step_avg:67.66ms +[2025-07-07 08:56:48] [Rank 0] step:3561/10000 train_time:240948ms step_avg:67.66ms +[2025-07-07 08:56:49] [Rank 0] step:3581/10000 train_time:242311ms step_avg:67.67ms +[2025-07-07 08:56:49] [Rank 0] step:3581/10000 train_time:242311ms step_avg:67.67ms +[2025-07-07 08:56:51] [Rank 0] step:3601/10000 train_time:243747ms step_avg:67.69ms +[2025-07-07 08:56:51] [Rank 0] step:3601/10000 train_time:243747ms step_avg:67.69ms +[2025-07-07 08:56:52] [Rank 0] step:3621/10000 train_time:245163ms step_avg:67.71ms +[2025-07-07 08:56:52] [Rank 0] step:3621/10000 train_time:245163ms step_avg:67.71ms +[2025-07-07 08:56:53] [Rank 0] step:3641/10000 train_time:246527ms step_avg:67.71ms +[2025-07-07 08:56:53] [Rank 0] step:3641/10000 train_time:246527ms step_avg:67.71ms +[2025-07-07 08:56:55] [Rank 0] step:3661/10000 train_time:247891ms step_avg:67.71ms +[2025-07-07 08:56:55] [Rank 0] step:3661/10000 train_time:247891ms step_avg:67.71ms +[2025-07-07 08:56:56] [Rank 0] step:3681/10000 train_time:249255ms step_avg:67.71ms +[2025-07-07 08:56:56] [Rank 0] step:3681/10000 train_time:249255ms step_avg:67.71ms +[2025-07-07 08:56:57] [Rank 0] step:3701/10000 train_time:250620ms step_avg:67.72ms +[2025-07-07 08:56:57] [Rank 0] step:3701/10000 train_time:250620ms step_avg:67.72ms +[2025-07-07 08:56:59] [Rank 0] step:3721/10000 train_time:251986ms step_avg:67.72ms +[2025-07-07 08:56:59] [Rank 0] step:3721/10000 train_time:251986ms step_avg:67.72ms +[2025-07-07 08:57:00] [Rank 0] step:3741/10000 train_time:253351ms step_avg:67.72ms +[2025-07-07 08:57:00] [Rank 0] step:3741/10000 train_time:253351ms step_avg:67.72ms +[2025-07-07 08:57:02] [Rank 0] step:3761/10000 train_time:254717ms step_avg:67.73ms +[2025-07-07 08:57:02] [Rank 0] step:3761/10000 train_time:254717ms step_avg:67.73ms +[2025-07-07 08:57:03] [Rank 0] step:3781/10000 train_time:256085ms step_avg:67.73ms +[2025-07-07 08:57:03] [Rank 0] step:3781/10000 train_time:256085ms step_avg:67.73ms +[2025-07-07 08:57:04] [Rank 0] step:3801/10000 train_time:257483ms step_avg:67.74ms +[2025-07-07 08:57:04] [Rank 0] step:3801/10000 train_time:257483ms step_avg:67.74ms +[2025-07-07 08:57:06] [Rank 0] step:3821/10000 train_time:258849ms step_avg:67.74ms +[2025-07-07 08:57:06] [Rank 0] step:3821/10000 train_time:258849ms step_avg:67.74ms +[2025-07-07 08:57:07] [Rank 0] step:3841/10000 train_time:260216ms step_avg:67.75ms +[2025-07-07 08:57:07] [Rank 0] step:3841/10000 train_time:260216ms step_avg:67.75ms +[2025-07-07 08:57:08] [Rank 0] step:3861/10000 train_time:261585ms step_avg:67.75ms +[2025-07-07 08:57:08] [Rank 0] step:3861/10000 train_time:261585ms step_avg:67.75ms +[2025-07-07 08:57:10] [Rank 0] step:3881/10000 train_time:262951ms step_avg:67.75ms +[2025-07-07 08:57:10] [Rank 0] step:3881/10000 train_time:262951ms step_avg:67.75ms +[2025-07-07 08:57:11] [Rank 0] step:3901/10000 train_time:264320ms step_avg:67.76ms +[2025-07-07 08:57:11] [Rank 0] step:3901/10000 train_time:264320ms step_avg:67.76ms +[2025-07-07 08:57:13] [Rank 0] step:3921/10000 train_time:265688ms step_avg:67.76ms +[2025-07-07 08:57:13] [Rank 0] step:3921/10000 train_time:265688ms step_avg:67.76ms +[2025-07-07 08:57:14] [Rank 0] step:3941/10000 train_time:267056ms step_avg:67.76ms +[2025-07-07 08:57:14] [Rank 0] step:3941/10000 train_time:267056ms step_avg:67.76ms +[2025-07-07 08:57:15] [Rank 0] step:3961/10000 train_time:269097ms step_avg:67.94ms +[2025-07-07 08:57:15] [Rank 0] step:3961/10000 train_time:269097ms step_avg:67.94ms +[2025-07-07 08:57:17] [Rank 0] step:3981/10000 train_time:269834ms step_avg:67.78ms +[2025-07-07 08:57:17] [Rank 0] step:3981/10000 train_time:269834ms step_avg:67.78ms +[2025-07-07 08:57:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:57:18] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:57:19] [Rank 0] PRINT: step:4000/10000 train_loss:1.2116 val_loss:1.2200 train_time:271824ms step_avg:67.96ms +[2025-07-07 08:57:19] [Rank 0] PRINT: step:4000/10000 train_loss:1.2116 val_loss:1.2200 train_time:271824ms step_avg:67.96ms +[2025-07-07 08:57:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:57:19] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:57:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:57:19] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:57:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:57:19] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:02:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:02:40] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:02:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:02:40] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:02:40] [Rank 0] Total Loss: 5.3463 +[2025-07-07 09:02:40] [Rank 0] Total Loss: 5.3463 +[2025-07-07 09:02:40] [Rank 0] Total FTA: 0.2004 +[2025-07-07 09:02:40] [Rank 0] Total FTA: 0.2004 +[2025-07-07 09:02:40] [Rank 0] Group 0 Loss: 5.4734 +[2025-07-07 09:02:40] [Rank 0] Group 0 Loss: 5.4734 +[2025-07-07 09:02:40] [Rank 0] Group 1 Loss: 5.2113 +[2025-07-07 09:02:40] [Rank 0] Group 1 Loss: 5.2113 +[2025-07-07 09:02:40] [Rank 0] Group 2 Loss: 4.9867 +[2025-07-07 09:02:40] [Rank 0] Group 2 Loss: 4.9867 +[2025-07-07 09:02:40] [Rank 0] Group 3 Loss: 5.4494 +[2025-07-07 09:02:40] [Rank 0] Group 3 Loss: 5.4494 +[2025-07-07 09:02:40] [Rank 0] Group 4 Loss: 5.5091 +[2025-07-07 09:02:40] [Rank 0] Group 4 Loss: 5.5091 +[2025-07-07 09:02:40] [Rank 0] Group 5 Loss: 5.3255 +[2025-07-07 09:02:40] [Rank 0] Group 5 Loss: 5.3255 +[2025-07-07 09:02:40] [Rank 0] Group 6 Loss: 5.2642 +[2025-07-07 09:02:40] [Rank 0] Group 6 Loss: 5.2642 +[2025-07-07 09:02:40] [Rank 0] Group 7 Loss: 5.4110 +[2025-07-07 09:02:40] [Rank 0] Group 7 Loss: 5.4110 +[2025-07-07 09:02:40] [Rank 0] Group 8 Loss: 5.3346 +[2025-07-07 09:02:40] [Rank 0] Group 8 Loss: 5.3346 +[2025-07-07 09:02:40] [Rank 0] Group 9 Loss: 5.3318 +[2025-07-07 09:02:40] [Rank 0] Group 9 Loss: 5.3318 +[2025-07-07 09:02:40] [Rank 0] Group 10 Loss: 5.3666 +[2025-07-07 09:02:40] [Rank 0] Group 10 Loss: 5.3666 +[2025-07-07 09:02:40] [Rank 0] Group 11 Loss: 5.3488 +[2025-07-07 09:02:40] [Rank 0] Group 11 Loss: 5.3488 +[2025-07-07 09:02:40] [Rank 0] Group 0 FTA: 0.2926 +[2025-07-07 09:02:40] [Rank 0] Group 0 FTA: 0.2926 +[2025-07-07 09:02:40] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 09:02:40] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 09:02:40] [Rank 0] Group 2 FTA: 0.3594 +[2025-07-07 09:02:40] [Rank 0] Group 2 FTA: 0.3594 +[2025-07-07 09:02:40] [Rank 0] Group 3 FTA: 0.2161 +[2025-07-07 09:02:40] [Rank 0] Group 3 FTA: 0.2161 +[2025-07-07 09:02:40] [Rank 0] Group 4 FTA: 0.0938 +[2025-07-07 09:02:40] [Rank 0] Group 4 FTA: 0.0938 +[2025-07-07 09:02:40] [Rank 0] Group 5 FTA: 0.1380 +[2025-07-07 09:02:40] [Rank 0] Group 5 FTA: 0.1380 +[2025-07-07 09:02:40] [Rank 0] Group 6 FTA: 0.1979 +[2025-07-07 09:02:40] [Rank 0] Group 6 FTA: 0.1979 +[2025-07-07 09:02:40] [Rank 0] Group 7 FTA: 0.2135 +[2025-07-07 09:02:40] [Rank 0] Group 7 FTA: 0.2135 +[2025-07-07 09:02:40] [Rank 0] Group 8 FTA: 0.2109 +[2025-07-07 09:02:40] [Rank 0] Group 8 FTA: 0.2109 +[2025-07-07 09:02:40] [Rank 0] Group 9 FTA: 0.2148 +[2025-07-07 09:02:40] [Rank 0] Group 9 FTA: 0.2148 +[2025-07-07 09:02:40] [Rank 0] Group 10 FTA: 0.2012 +[2025-07-07 09:02:40] [Rank 0] Group 10 FTA: 0.2012 +[2025-07-07 09:02:40] [Rank 0] Group 11 FTA: 0.1924 +[2025-07-07 09:02:40] [Rank 0] Group 11 FTA: 0.1924 +[2025-07-07 09:02:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-07 09:02:41] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-07 09:02:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-07 09:02:41] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-07 09:02:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-07 09:02:41] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-07 09:02:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-07 09:02:41] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-07 09:02:42] [Rank 0] step:4001/10000 train_time:271834ms step_avg:67.94ms +[2025-07-07 09:02:42] [Rank 0] step:4001/10000 train_time:271834ms step_avg:67.94ms +[2025-07-07 09:02:43] [Rank 0] step:4021/10000 train_time:272615ms step_avg:67.80ms +[2025-07-07 09:02:43] [Rank 0] step:4021/10000 train_time:272615ms step_avg:67.80ms +[2025-07-07 09:02:44] [Rank 0] step:4041/10000 train_time:273974ms step_avg:67.80ms +[2025-07-07 09:02:44] [Rank 0] step:4041/10000 train_time:273974ms step_avg:67.80ms +[2025-07-07 09:02:46] [Rank 0] step:4061/10000 train_time:275336ms step_avg:67.80ms +[2025-07-07 09:02:46] [Rank 0] step:4061/10000 train_time:275336ms step_avg:67.80ms +[2025-07-07 09:02:47] [Rank 0] step:4081/10000 train_time:276698ms step_avg:67.80ms +[2025-07-07 09:02:47] [Rank 0] step:4081/10000 train_time:276698ms step_avg:67.80ms +[2025-07-07 09:02:48] [Rank 0] step:4101/10000 train_time:278061ms step_avg:67.80ms +[2025-07-07 09:02:48] [Rank 0] step:4101/10000 train_time:278061ms step_avg:67.80ms +[2025-07-07 09:02:50] [Rank 0] step:4121/10000 train_time:279426ms step_avg:67.81ms +[2025-07-07 09:02:50] [Rank 0] step:4121/10000 train_time:279426ms step_avg:67.81ms +[2025-07-07 09:02:51] [Rank 0] step:4141/10000 train_time:280791ms step_avg:67.81ms +[2025-07-07 09:02:51] [Rank 0] step:4141/10000 train_time:280791ms step_avg:67.81ms +[2025-07-07 09:02:52] [Rank 0] step:4161/10000 train_time:282188ms step_avg:67.82ms +[2025-07-07 09:02:52] [Rank 0] step:4161/10000 train_time:282188ms step_avg:67.82ms +[2025-07-07 09:02:54] [Rank 0] step:4181/10000 train_time:283554ms step_avg:67.82ms +[2025-07-07 09:02:54] [Rank 0] step:4181/10000 train_time:283554ms step_avg:67.82ms +[2025-07-07 09:02:55] [Rank 0] step:4201/10000 train_time:284919ms step_avg:67.82ms +[2025-07-07 09:02:55] [Rank 0] step:4201/10000 train_time:284919ms step_avg:67.82ms +[2025-07-07 09:02:57] [Rank 0] step:4221/10000 train_time:286286ms step_avg:67.82ms +[2025-07-07 09:02:57] [Rank 0] step:4221/10000 train_time:286286ms step_avg:67.82ms +[2025-07-07 09:02:58] [Rank 0] step:4241/10000 train_time:287652ms step_avg:67.83ms +[2025-07-07 09:02:58] [Rank 0] step:4241/10000 train_time:287652ms step_avg:67.83ms +[2025-07-07 09:02:59] [Rank 0] step:4261/10000 train_time:289020ms step_avg:67.83ms +[2025-07-07 09:02:59] [Rank 0] step:4261/10000 train_time:289020ms step_avg:67.83ms +[2025-07-07 09:03:01] [Rank 0] step:4281/10000 train_time:290387ms step_avg:67.83ms +[2025-07-07 09:03:01] [Rank 0] step:4281/10000 train_time:290387ms step_avg:67.83ms +[2025-07-07 09:03:02] [Rank 0] step:4301/10000 train_time:291756ms step_avg:67.83ms +[2025-07-07 09:03:02] [Rank 0] step:4301/10000 train_time:291756ms step_avg:67.83ms +[2025-07-07 09:03:03] [Rank 0] step:4321/10000 train_time:293809ms step_avg:68.00ms +[2025-07-07 09:03:03] [Rank 0] step:4321/10000 train_time:293809ms step_avg:68.00ms +[2025-07-07 09:03:05] [Rank 0] step:4341/10000 train_time:294545ms step_avg:67.85ms +[2025-07-07 09:03:05] [Rank 0] step:4341/10000 train_time:294545ms step_avg:67.85ms +[2025-07-07 09:03:06] [Rank 0] step:4361/10000 train_time:295912ms step_avg:67.85ms +[2025-07-07 09:03:06] [Rank 0] step:4361/10000 train_time:295912ms step_avg:67.85ms +[2025-07-07 09:03:08] [Rank 0] step:4381/10000 train_time:297282ms step_avg:67.86ms +[2025-07-07 09:03:08] [Rank 0] step:4381/10000 train_time:297282ms step_avg:67.86ms +[2025-07-07 09:03:09] [Rank 0] step:4401/10000 train_time:298650ms step_avg:67.86ms +[2025-07-07 09:03:09] [Rank 0] step:4401/10000 train_time:298650ms step_avg:67.86ms +[2025-07-07 09:03:10] [Rank 0] step:4421/10000 train_time:300018ms step_avg:67.86ms +[2025-07-07 09:03:10] [Rank 0] step:4421/10000 train_time:300018ms step_avg:67.86ms +[2025-07-07 09:03:12] [Rank 0] step:4441/10000 train_time:301385ms step_avg:67.86ms +[2025-07-07 09:03:12] [Rank 0] step:4441/10000 train_time:301385ms step_avg:67.86ms +[2025-07-07 09:03:13] [Rank 0] step:4461/10000 train_time:302752ms step_avg:67.87ms +[2025-07-07 09:03:13] [Rank 0] step:4461/10000 train_time:302752ms step_avg:67.87ms +[2025-07-07 09:03:14] [Rank 0] step:4481/10000 train_time:304121ms step_avg:67.87ms +[2025-07-07 09:03:14] [Rank 0] step:4481/10000 train_time:304121ms step_avg:67.87ms +[2025-07-07 09:03:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:03:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:03:17] [Rank 0] PRINT: step:4500/10000 train_loss:1.1699 val_loss:1.1918 train_time:306111ms step_avg:68.02ms +[2025-07-07 09:03:17] [Rank 0] PRINT: step:4500/10000 train_loss:1.1699 val_loss:1.1918 train_time:306111ms step_avg:68.02ms +[2025-07-07 09:03:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:03:17] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:03:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:03:17] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:03:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:03:17] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:08:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:08:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:08:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:08:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:08:37] [Rank 0] Total Loss: 5.4455 +[2025-07-07 09:08:37] [Rank 0] Total Loss: 5.4455 +[2025-07-07 09:08:37] [Rank 0] Total FTA: 0.2146 +[2025-07-07 09:08:37] [Rank 0] Total FTA: 0.2146 +[2025-07-07 09:08:37] [Rank 0] Group 0 Loss: 5.6629 +[2025-07-07 09:08:37] [Rank 0] Group 0 Loss: 5.6629 +[2025-07-07 09:08:37] [Rank 0] Group 1 Loss: 5.6225 +[2025-07-07 09:08:37] [Rank 0] Group 1 Loss: 5.6225 +[2025-07-07 09:08:37] [Rank 0] Group 2 Loss: 4.9651 +[2025-07-07 09:08:37] [Rank 0] Group 2 Loss: 4.9651 +[2025-07-07 09:08:37] [Rank 0] Group 3 Loss: 5.5847 +[2025-07-07 09:08:37] [Rank 0] Group 3 Loss: 5.5847 +[2025-07-07 09:08:37] [Rank 0] Group 4 Loss: 5.4834 +[2025-07-07 09:08:37] [Rank 0] Group 4 Loss: 5.4834 +[2025-07-07 09:08:37] [Rank 0] Group 5 Loss: 5.3616 +[2025-07-07 09:08:37] [Rank 0] Group 5 Loss: 5.3616 +[2025-07-07 09:08:37] [Rank 0] Group 6 Loss: 5.3681 +[2025-07-07 09:08:37] [Rank 0] Group 6 Loss: 5.3681 +[2025-07-07 09:08:37] [Rank 0] Group 7 Loss: 5.4291 +[2025-07-07 09:08:37] [Rank 0] Group 7 Loss: 5.4291 +[2025-07-07 09:08:37] [Rank 0] Group 8 Loss: 5.4115 +[2025-07-07 09:08:37] [Rank 0] Group 8 Loss: 5.4115 +[2025-07-07 09:08:37] [Rank 0] Group 9 Loss: 5.4108 +[2025-07-07 09:08:37] [Rank 0] Group 9 Loss: 5.4108 +[2025-07-07 09:08:37] [Rank 0] Group 10 Loss: 5.4286 +[2025-07-07 09:08:37] [Rank 0] Group 10 Loss: 5.4286 +[2025-07-07 09:08:37] [Rank 0] Group 11 Loss: 5.4263 +[2025-07-07 09:08:37] [Rank 0] Group 11 Loss: 5.4263 +[2025-07-07 09:08:37] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 09:08:37] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 09:08:37] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-07 09:08:37] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-07 09:08:37] [Rank 0] Group 2 FTA: 0.3125 +[2025-07-07 09:08:37] [Rank 0] Group 2 FTA: 0.3125 +[2025-07-07 09:08:37] [Rank 0] Group 3 FTA: 0.1484 +[2025-07-07 09:08:37] [Rank 0] Group 3 FTA: 0.1484 +[2025-07-07 09:08:37] [Rank 0] Group 4 FTA: 0.2083 +[2025-07-07 09:08:37] [Rank 0] Group 4 FTA: 0.2083 +[2025-07-07 09:08:37] [Rank 0] Group 5 FTA: 0.2474 +[2025-07-07 09:08:37] [Rank 0] Group 5 FTA: 0.2474 +[2025-07-07 09:08:37] [Rank 0] Group 6 FTA: 0.2786 +[2025-07-07 09:08:37] [Rank 0] Group 6 FTA: 0.2786 +[2025-07-07 09:08:37] [Rank 0] Group 7 FTA: 0.2578 +[2025-07-07 09:08:37] [Rank 0] Group 7 FTA: 0.2578 +[2025-07-07 09:08:37] [Rank 0] Group 8 FTA: 0.2682 +[2025-07-07 09:08:37] [Rank 0] Group 8 FTA: 0.2682 +[2025-07-07 09:08:37] [Rank 0] Group 9 FTA: 0.2461 +[2025-07-07 09:08:37] [Rank 0] Group 9 FTA: 0.2461 +[2025-07-07 09:08:37] [Rank 0] Group 10 FTA: 0.2676 +[2025-07-07 09:08:37] [Rank 0] Group 10 FTA: 0.2676 +[2025-07-07 09:08:37] [Rank 0] Group 11 FTA: 0.2812 +[2025-07-07 09:08:37] [Rank 0] Group 11 FTA: 0.2812 +[2025-07-07 09:08:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-07 09:08:38] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-07 09:08:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-07 09:08:38] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-07 09:08:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-07 09:08:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-07 09:08:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-07 09:08:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-07 09:08:39] [Rank 0] step:4501/10000 train_time:306231ms step_avg:68.04ms +[2025-07-07 09:08:39] [Rank 0] step:4501/10000 train_time:306231ms step_avg:68.04ms +[2025-07-07 09:08:41] [Rank 0] step:4521/10000 train_time:307569ms step_avg:68.03ms +[2025-07-07 09:08:41] [Rank 0] step:4521/10000 train_time:307569ms step_avg:68.03ms +[2025-07-07 09:08:42] [Rank 0] step:4541/10000 train_time:308931ms step_avg:68.03ms +[2025-07-07 09:08:42] [Rank 0] step:4541/10000 train_time:308931ms step_avg:68.03ms +[2025-07-07 09:08:43] [Rank 0] step:4561/10000 train_time:310293ms step_avg:68.03ms +[2025-07-07 09:08:43] [Rank 0] step:4561/10000 train_time:310293ms step_avg:68.03ms +[2025-07-07 09:08:45] [Rank 0] step:4581/10000 train_time:311657ms step_avg:68.03ms +[2025-07-07 09:08:45] [Rank 0] step:4581/10000 train_time:311657ms step_avg:68.03ms +[2025-07-07 09:08:46] [Rank 0] step:4601/10000 train_time:313021ms step_avg:68.03ms +[2025-07-07 09:08:46] [Rank 0] step:4601/10000 train_time:313021ms step_avg:68.03ms +[2025-07-07 09:08:47] [Rank 0] step:4621/10000 train_time:314385ms step_avg:68.03ms +[2025-07-07 09:08:47] [Rank 0] step:4621/10000 train_time:314385ms step_avg:68.03ms +[2025-07-07 09:08:49] [Rank 0] step:4641/10000 train_time:315749ms step_avg:68.03ms +[2025-07-07 09:08:49] [Rank 0] step:4641/10000 train_time:315749ms step_avg:68.03ms +[2025-07-07 09:08:50] [Rank 0] step:4661/10000 train_time:317114ms step_avg:68.04ms +[2025-07-07 09:08:50] [Rank 0] step:4661/10000 train_time:317114ms step_avg:68.04ms +[2025-07-07 09:08:52] [Rank 0] step:4681/10000 train_time:319159ms step_avg:68.18ms +[2025-07-07 09:08:52] [Rank 0] step:4681/10000 train_time:319159ms step_avg:68.18ms +[2025-07-07 09:08:53] [Rank 0] step:4701/10000 train_time:319894ms step_avg:68.05ms +[2025-07-07 09:08:53] [Rank 0] step:4701/10000 train_time:319894ms step_avg:68.05ms +[2025-07-07 09:08:54] [Rank 0] step:4721/10000 train_time:321260ms step_avg:68.05ms +[2025-07-07 09:08:54] [Rank 0] step:4721/10000 train_time:321260ms step_avg:68.05ms +[2025-07-07 09:08:56] [Rank 0] step:4741/10000 train_time:322627ms step_avg:68.05ms +[2025-07-07 09:08:56] [Rank 0] step:4741/10000 train_time:322627ms step_avg:68.05ms +[2025-07-07 09:08:57] [Rank 0] step:4761/10000 train_time:323993ms step_avg:68.05ms +[2025-07-07 09:08:57] [Rank 0] step:4761/10000 train_time:323993ms step_avg:68.05ms +[2025-07-07 09:08:58] [Rank 0] step:4781/10000 train_time:325362ms step_avg:68.05ms +[2025-07-07 09:08:58] [Rank 0] step:4781/10000 train_time:325362ms step_avg:68.05ms +[2025-07-07 09:09:00] [Rank 0] step:4801/10000 train_time:326731ms step_avg:68.05ms +[2025-07-07 09:09:00] [Rank 0] step:4801/10000 train_time:326731ms step_avg:68.05ms +[2025-07-07 09:09:01] [Rank 0] step:4821/10000 train_time:328100ms step_avg:68.06ms +[2025-07-07 09:09:01] [Rank 0] step:4821/10000 train_time:328100ms step_avg:68.06ms +[2025-07-07 09:09:03] [Rank 0] step:4841/10000 train_time:329470ms step_avg:68.06ms +[2025-07-07 09:09:03] [Rank 0] step:4841/10000 train_time:329470ms step_avg:68.06ms +[2025-07-07 09:09:04] [Rank 0] step:4861/10000 train_time:330889ms step_avg:68.07ms +[2025-07-07 09:09:04] [Rank 0] step:4861/10000 train_time:330889ms step_avg:68.07ms +[2025-07-07 09:09:05] [Rank 0] step:4881/10000 train_time:332247ms step_avg:68.07ms +[2025-07-07 09:09:05] [Rank 0] step:4881/10000 train_time:332247ms step_avg:68.07ms +[2025-07-07 09:09:07] [Rank 0] step:4901/10000 train_time:333619ms step_avg:68.07ms +[2025-07-07 09:09:07] [Rank 0] step:4901/10000 train_time:333619ms step_avg:68.07ms +[2025-07-07 09:09:08] [Rank 0] step:4921/10000 train_time:334992ms step_avg:68.07ms +[2025-07-07 09:09:08] [Rank 0] step:4921/10000 train_time:334992ms step_avg:68.07ms +[2025-07-07 09:09:09] [Rank 0] step:4941/10000 train_time:336364ms step_avg:68.08ms +[2025-07-07 09:09:09] [Rank 0] step:4941/10000 train_time:336364ms step_avg:68.08ms +[2025-07-07 09:09:11] [Rank 0] step:4961/10000 train_time:337736ms step_avg:68.08ms +[2025-07-07 09:09:11] [Rank 0] step:4961/10000 train_time:337736ms step_avg:68.08ms +[2025-07-07 09:09:12] [Rank 0] step:4981/10000 train_time:339107ms step_avg:68.08ms +[2025-07-07 09:09:12] [Rank 0] step:4981/10000 train_time:339107ms step_avg:68.08ms +[2025-07-07 09:09:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:09:13] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:09:14] [Rank 0] PRINT: step:5000/10000 train_loss:1.1234 val_loss:1.1645 train_time:341105ms step_avg:68.22ms +[2025-07-07 09:09:14] [Rank 0] PRINT: step:5000/10000 train_loss:1.1234 val_loss:1.1645 train_time:341105ms step_avg:68.22ms +[2025-07-07 09:09:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:09:14] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:09:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:09:15] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:09:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:09:15] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:14:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:14:37] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 09:14:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:14:37] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 09:14:37] [Rank 0] Total Loss: 5.4567 +[2025-07-07 09:14:37] [Rank 0] Total Loss: 5.4567 +[2025-07-07 09:14:37] [Rank 0] Total FTA: 0.3178 +[2025-07-07 09:14:37] [Rank 0] Total FTA: 0.3178 +[2025-07-07 09:14:37] [Rank 0] Group 0 Loss: 5.6196 +[2025-07-07 09:14:37] [Rank 0] Group 0 Loss: 5.6196 +[2025-07-07 09:14:37] [Rank 0] Group 1 Loss: 5.2390 +[2025-07-07 09:14:37] [Rank 0] Group 1 Loss: 5.2390 +[2025-07-07 09:14:37] [Rank 0] Group 2 Loss: 5.0280 +[2025-07-07 09:14:37] [Rank 0] Group 2 Loss: 5.0280 +[2025-07-07 09:14:37] [Rank 0] Group 3 Loss: 5.5072 +[2025-07-07 09:14:37] [Rank 0] Group 3 Loss: 5.5072 +[2025-07-07 09:14:37] [Rank 0] Group 4 Loss: 5.6094 +[2025-07-07 09:14:37] [Rank 0] Group 4 Loss: 5.6094 +[2025-07-07 09:14:37] [Rank 0] Group 5 Loss: 5.4791 +[2025-07-07 09:14:37] [Rank 0] Group 5 Loss: 5.4791 +[2025-07-07 09:14:37] [Rank 0] Group 6 Loss: 5.3864 +[2025-07-07 09:14:37] [Rank 0] Group 6 Loss: 5.3864 +[2025-07-07 09:14:37] [Rank 0] Group 7 Loss: 5.4995 +[2025-07-07 09:14:37] [Rank 0] Group 7 Loss: 5.4995 +[2025-07-07 09:14:37] [Rank 0] Group 8 Loss: 5.3999 +[2025-07-07 09:14:37] [Rank 0] Group 8 Loss: 5.3999 +[2025-07-07 09:14:37] [Rank 0] Group 9 Loss: 5.4693 +[2025-07-07 09:14:37] [Rank 0] Group 9 Loss: 5.4693 +[2025-07-07 09:14:37] [Rank 0] Group 10 Loss: 5.5239 +[2025-07-07 09:14:37] [Rank 0] Group 10 Loss: 5.5239 +[2025-07-07 09:14:37] [Rank 0] Group 11 Loss: 5.4872 +[2025-07-07 09:14:37] [Rank 0] Group 11 Loss: 5.4872 +[2025-07-07 09:14:37] [Rank 0] Group 0 FTA: 0.3381 +[2025-07-07 09:14:37] [Rank 0] Group 0 FTA: 0.3381 +[2025-07-07 09:14:37] [Rank 0] Group 1 FTA: 0.3307 +[2025-07-07 09:14:37] [Rank 0] Group 1 FTA: 0.3307 +[2025-07-07 09:14:37] [Rank 0] Group 2 FTA: 0.2161 +[2025-07-07 09:14:37] [Rank 0] Group 2 FTA: 0.2161 +[2025-07-07 09:14:37] [Rank 0] Group 3 FTA: 0.3906 +[2025-07-07 09:14:37] [Rank 0] Group 3 FTA: 0.3906 +[2025-07-07 09:14:37] [Rank 0] Group 4 FTA: 0.2865 +[2025-07-07 09:14:37] [Rank 0] Group 4 FTA: 0.2865 +[2025-07-07 09:14:37] [Rank 0] Group 5 FTA: 0.2682 +[2025-07-07 09:14:37] [Rank 0] Group 5 FTA: 0.2682 +[2025-07-07 09:14:37] [Rank 0] Group 6 FTA: 0.3333 +[2025-07-07 09:14:37] [Rank 0] Group 6 FTA: 0.3333 +[2025-07-07 09:14:37] [Rank 0] Group 7 FTA: 0.3385 +[2025-07-07 09:14:37] [Rank 0] Group 7 FTA: 0.3385 +[2025-07-07 09:14:37] [Rank 0] Group 8 FTA: 0.2969 +[2025-07-07 09:14:37] [Rank 0] Group 8 FTA: 0.2969 +[2025-07-07 09:14:37] [Rank 0] Group 9 FTA: 0.3086 +[2025-07-07 09:14:37] [Rank 0] Group 9 FTA: 0.3086 +[2025-07-07 09:14:37] [Rank 0] Group 10 FTA: 0.3633 +[2025-07-07 09:14:37] [Rank 0] Group 10 FTA: 0.3633 +[2025-07-07 09:14:37] [Rank 0] Group 11 FTA: 0.3125 +[2025-07-07 09:14:37] [Rank 0] Group 11 FTA: 0.3125 +[2025-07-07 09:14:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-07 09:14:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_loss_curves.png +[2025-07-07 09:14:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-07 09:14:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/per_class_acc_curves.png +[2025-07-07 09:14:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-07 09:14:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_loss_curve.png +[2025-07-07 09:14:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-07 09:14:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.002_seed_48/total_acc_curve.png +[2025-07-07 09:14:38] [Rank 0] step:5001/10000 train_time:341115ms step_avg:68.21ms +[2025-07-07 09:14:38] [Rank 0] step:5001/10000 train_time:341115ms step_avg:68.21ms +[2025-07-07 09:14:39] [Rank 0] step:5021/10000 train_time:341892ms step_avg:68.09ms +[2025-07-07 09:14:39] [Rank 0] step:5021/10000 train_time:341892ms step_avg:68.09ms +[2025-07-07 09:14:41] [Rank 0] step:5041/10000 train_time:343918ms step_avg:68.22ms +[2025-07-07 09:14:41] [Rank 0] step:5041/10000 train_time:343918ms step_avg:68.22ms +[2025-07-07 09:14:42] [Rank 0] step:5061/10000 train_time:344919ms step_avg:68.15ms +[2025-07-07 09:14:42] [Rank 0] step:5061/10000 train_time:344919ms step_avg:68.15ms +[2025-07-07 09:14:44] [Rank 0] step:5081/10000 train_time:346016ms step_avg:68.10ms +[2025-07-07 09:14:44] [Rank 0] step:5081/10000 train_time:346016ms step_avg:68.10ms +[2025-07-07 09:14:45] [Rank 0] step:5101/10000 train_time:347380ms step_avg:68.10ms +[2025-07-07 09:14:45] [Rank 0] step:5101/10000 train_time:347380ms step_avg:68.10ms +[2025-07-07 09:14:46] [Rank 0] step:5121/10000 train_time:348744ms step_avg:68.10ms +[2025-07-07 09:14:46] [Rank 0] step:5121/10000 train_time:348744ms step_avg:68.10ms +[2025-07-07 09:14:48] [Rank 0] step:5141/10000 train_time:350108ms step_avg:68.10ms +[2025-07-07 09:14:48] [Rank 0] step:5141/10000 train_time:350108ms step_avg:68.10ms +[2025-07-07 09:14:49] [Rank 0] step:5161/10000 train_time:351474ms step_avg:68.10ms +[2025-07-07 09:14:49] [Rank 0] step:5161/10000 train_time:351474ms step_avg:68.10ms +[2025-07-07 09:14:50] [Rank 0] step:5181/10000 train_time:352838ms step_avg:68.10ms +[2025-07-07 09:14:50] [Rank 0] step:5181/10000 train_time:352838ms step_avg:68.10ms +[2025-07-07 09:14:52] [Rank 0] step:5201/10000 train_time:354205ms step_avg:68.10ms +[2025-07-07 09:14:52] [Rank 0] step:5201/10000 train_time:354205ms step_avg:68.10ms +[2025-07-07 09:14:53] [Rank 0] step:5221/10000 train_time:355822ms step_avg:68.15ms +[2025-07-07 09:14:53] [Rank 0] step:5221/10000 train_time:355822ms step_avg:68.15ms +[2025-07-07 09:14:55] [Rank 0] step:5241/10000 train_time:356974ms step_avg:68.11ms +[2025-07-07 09:14:55] [Rank 0] step:5241/10000 train_time:356974ms step_avg:68.11ms +[2025-07-07 09:14:56] [Rank 0] step:5261/10000 train_time:358342ms step_avg:68.11ms +[2025-07-07 09:14:56] [Rank 0] step:5261/10000 train_time:358342ms step_avg:68.11ms +[2025-07-07 09:14:57] [Rank 0] step:5281/10000 train_time:359711ms step_avg:68.11ms +[2025-07-07 09:14:57] [Rank 0] step:5281/10000 train_time:359711ms step_avg:68.11ms +[2025-07-07 09:14:59] [Rank 0] step:5301/10000 train_time:361079ms step_avg:68.12ms +[2025-07-07 09:14:59] [Rank 0] step:5301/10000 train_time:361079ms step_avg:68.12ms +[2025-07-07 09:15:00] [Rank 0] step:5321/10000 train_time:362447ms step_avg:68.12ms +[2025-07-07 09:15:00] [Rank 0] step:5321/10000 train_time:362447ms step_avg:68.12ms +[2025-07-07 09:15:01] [Rank 0] step:5341/10000 train_time:363815ms step_avg:68.12ms +[2025-07-07 09:15:01] [Rank 0] step:5341/10000 train_time:363815ms step_avg:68.12ms +[2025-07-07 09:15:03] [Rank 0] step:5361/10000 train_time:365184ms step_avg:68.12ms +[2025-07-07 09:15:03] [Rank 0] step:5361/10000 train_time:365184ms step_avg:68.12ms +[2025-07-07 09:15:04] [Rank 0] step:5381/10000 train_time:366553ms step_avg:68.12ms +[2025-07-07 09:15:04] [Rank 0] step:5381/10000 train_time:366553ms step_avg:68.12ms +[2025-07-07 09:15:06] [Rank 0] step:5401/10000 train_time:367922ms step_avg:68.12ms +[2025-07-07 09:15:06] [Rank 0] step:5401/10000 train_time:367922ms step_avg:68.12ms +[2025-07-07 09:15:07] [Rank 0] step:5421/10000 train_time:369329ms step_avg:68.13ms +[2025-07-07 09:15:07] [Rank 0] step:5421/10000 train_time:369329ms step_avg:68.13ms +[2025-07-07 09:15:08] [Rank 0] step:5441/10000 train_time:370698ms step_avg:68.13ms +[2025-07-07 09:15:08] [Rank 0] step:5441/10000 train_time:370698ms step_avg:68.13ms +[2025-07-07 09:15:10] [Rank 0] step:5461/10000 train_time:372068ms step_avg:68.13ms +[2025-07-07 09:15:10] [Rank 0] step:5461/10000 train_time:372068ms step_avg:68.13ms +[2025-07-07 09:15:11] [Rank 0] step:5481/10000 train_time:373442ms step_avg:68.13ms +[2025-07-07 09:15:11] [Rank 0] step:5481/10000 train_time:373442ms step_avg:68.13ms +[2025-07-07 09:15:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:15:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 09:15:13] [Rank 0] PRINT: step:5500/10000 train_loss:1.0729 val_loss:1.1351 train_time:375433ms step_avg:68.26ms +[2025-07-07 09:15:13] [Rank 0] PRINT: step:5500/10000 train_loss:1.0729 val_loss:1.1351 train_time:375433ms step_avg:68.26ms +[2025-07-07 09:15:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:15:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 09:15:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:15:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 09:15:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 09:15:13] [Rank 0] Evaluation set size after sampling: 5633 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/config.json b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/config.json new file mode 100644 index 0000000000000000000000000000000000000000..c4cc9ba23be8e8273fa3b4751da897a33dcfad8e --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 42, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "d41039bf-edd7-4094-90e2-41790dbb7e54", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..3c9175d9392be7226cd545611b605b64dd4d0582 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3edcae781333ce0dd6c08706e49f603bfae21ab13adda18a90f1065ed17f3b02 +size 418209 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..d2101330793ff4227926a79879bf12436ebd7420 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:946fca8a38c15ca6d5d03ff5310b81ef8926f724205c2b928640c42b9be7349f +size 479307 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..637cada0196e5449f7651cc21d71d1a00f37c408 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:62bb773a894516c43888592f481cdf6acef1905bd757e8511831246b286bcef0 +size 104100 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..bd499ee673d56b41ca64d330825e4a49ea988e5c --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0895f1b7f90a861df41c9ae04166713329e3a1d3e726b8191c3b37329e524bbe +size 122576 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/training_log_72216744-89a4-453e-bea1-f0f245f4fa63.txt b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/training_log_72216744-89a4-453e-bea1-f0f245f4fa63.txt new file mode 100644 index 0000000000000000000000000000000000000000..52bd0016707bfc465ee4c08f6f706dde358f73f0 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/training_log_72216744-89a4-453e-bea1-f0f245f4fa63.txt @@ -0,0 +1,5132 @@ +[2025-07-06 16:59:54] [Rank 0] PRINT: --- Script Start: Sun Jul 6 16:59:54 2025 --- +[2025-07-06 16:59:54] [Rank 0] PRINT: --- Script Start: Sun Jul 6 16:59:54 2025 --- +[2025-07-06 16:59:54] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-06 16:59:54] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-06 16:59:54] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 16:59:54] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-06 16:59:54] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-06 16:59:54] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-06 16:59:54] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42 +[2025-07-06 16:59:54] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42 +[2025-07-06 16:59:54] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 16:59:54] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-06 16:59:54] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 16:59:54] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-06 16:59:54] [Rank 0] PRINT: Constructing model... +[2025-07-06 16:59:54] [Rank 0] PRINT: Constructing model... +[2025-07-06 16:59:57] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 16:59:57] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-06 16:59:57] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 16:59:57] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-06 16:59:57] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 16:59:57] [Rank 0] PRINT: Testing model forward function: +[2025-07-06 16:59:58] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 16:59:58] [Rank 0] PRINT: Model test - Result type: +[2025-07-06 16:59:58] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 16:59:58] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-06 16:59:58] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 16:59:58] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-06 16:59:58] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 16:59:58] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-06 16:59:58] [Rank 0] PRINT: Model returns: +[2025-07-06 16:59:58] [Rank 0] PRINT: Model returns: +[2025-07-06 16:59:58] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 16:59:58] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-06 16:59:58] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-06 16:59:58] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-06 16:59:58] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-06 16:59:58] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-06 16:59:58] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-06 16:59:58] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-06 16:59:58] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-06 16:59:58] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-06 16:59:58] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 16:59:58] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-06 16:59:58] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 16:59:58] [Rank 0] PRINT: Model compilation complete. +[2025-07-06 16:59:58] [Rank 0] PRINT: Starting warmup... +[2025-07-06 16:59:58] [Rank 0] PRINT: Starting warmup... +[2025-07-06 17:01:03] [Rank 0] PRINT: Warmup complete. +[2025-07-06 17:01:03] [Rank 0] PRINT: Warmup complete. +[2025-07-06 17:01:03] [Rank 0] PRINT: Starting training... +[2025-07-06 17:01:03] [Rank 0] PRINT: Starting training... +[2025-07-06 17:01:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:01:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:01:10] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 17:01:10] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-06 17:01:12] [Rank 0] step:21/10000 train_time:1017ms step_avg:48.41ms +[2025-07-06 17:01:12] [Rank 0] step:21/10000 train_time:1017ms step_avg:48.41ms +[2025-07-06 17:01:13] [Rank 0] step:41/10000 train_time:2332ms step_avg:56.89ms +[2025-07-06 17:01:13] [Rank 0] step:41/10000 train_time:2332ms step_avg:56.89ms +[2025-07-06 17:01:14] [Rank 0] step:61/10000 train_time:3648ms step_avg:59.81ms +[2025-07-06 17:01:14] [Rank 0] step:61/10000 train_time:3648ms step_avg:59.81ms +[2025-07-06 17:01:16] [Rank 0] step:81/10000 train_time:4980ms step_avg:61.48ms +[2025-07-06 17:01:16] [Rank 0] step:81/10000 train_time:4980ms step_avg:61.48ms +[2025-07-06 17:01:17] [Rank 0] step:101/10000 train_time:6304ms step_avg:62.42ms +[2025-07-06 17:01:17] [Rank 0] step:101/10000 train_time:6304ms step_avg:62.42ms +[2025-07-06 17:01:18] [Rank 0] step:121/10000 train_time:7638ms step_avg:63.13ms +[2025-07-06 17:01:18] [Rank 0] step:121/10000 train_time:7638ms step_avg:63.13ms +[2025-07-06 17:01:20] [Rank 0] step:141/10000 train_time:8968ms step_avg:63.60ms +[2025-07-06 17:01:20] [Rank 0] step:141/10000 train_time:8968ms step_avg:63.60ms +[2025-07-06 17:01:21] [Rank 0] step:161/10000 train_time:10297ms step_avg:63.96ms +[2025-07-06 17:01:21] [Rank 0] step:161/10000 train_time:10297ms step_avg:63.96ms +[2025-07-06 17:01:23] [Rank 0] step:181/10000 train_time:11679ms step_avg:64.53ms +[2025-07-06 17:01:23] [Rank 0] step:181/10000 train_time:11679ms step_avg:64.53ms +[2025-07-06 17:01:24] [Rank 0] step:201/10000 train_time:13110ms step_avg:65.22ms +[2025-07-06 17:01:24] [Rank 0] step:201/10000 train_time:13110ms step_avg:65.22ms +[2025-07-06 17:01:25] [Rank 0] step:221/10000 train_time:14444ms step_avg:65.36ms +[2025-07-06 17:01:25] [Rank 0] step:221/10000 train_time:14444ms step_avg:65.36ms +[2025-07-06 17:01:27] [Rank 0] step:241/10000 train_time:15778ms step_avg:65.47ms +[2025-07-06 17:01:27] [Rank 0] step:241/10000 train_time:15778ms step_avg:65.47ms +[2025-07-06 17:01:28] [Rank 0] step:261/10000 train_time:17113ms step_avg:65.57ms +[2025-07-06 17:01:28] [Rank 0] step:261/10000 train_time:17113ms step_avg:65.57ms +[2025-07-06 17:01:29] [Rank 0] step:281/10000 train_time:18448ms step_avg:65.65ms +[2025-07-06 17:01:29] [Rank 0] step:281/10000 train_time:18448ms step_avg:65.65ms +[2025-07-06 17:01:31] [Rank 0] step:301/10000 train_time:19783ms step_avg:65.72ms +[2025-07-06 17:01:31] [Rank 0] step:301/10000 train_time:19783ms step_avg:65.72ms +[2025-07-06 17:01:32] [Rank 0] step:321/10000 train_time:21119ms step_avg:65.79ms +[2025-07-06 17:01:32] [Rank 0] step:321/10000 train_time:21119ms step_avg:65.79ms +[2025-07-06 17:01:33] [Rank 0] step:341/10000 train_time:22456ms step_avg:65.85ms +[2025-07-06 17:01:33] [Rank 0] step:341/10000 train_time:22456ms step_avg:65.85ms +[2025-07-06 17:01:35] [Rank 0] step:361/10000 train_time:24048ms step_avg:66.61ms +[2025-07-06 17:01:35] [Rank 0] step:361/10000 train_time:24048ms step_avg:66.61ms +[2025-07-06 17:01:36] [Rank 0] step:381/10000 train_time:25172ms step_avg:66.07ms +[2025-07-06 17:01:36] [Rank 0] step:381/10000 train_time:25172ms step_avg:66.07ms +[2025-07-06 17:01:37] [Rank 0] step:401/10000 train_time:26509ms step_avg:66.11ms +[2025-07-06 17:01:37] [Rank 0] step:401/10000 train_time:26509ms step_avg:66.11ms +[2025-07-06 17:01:39] [Rank 0] step:421/10000 train_time:27845ms step_avg:66.14ms +[2025-07-06 17:01:39] [Rank 0] step:421/10000 train_time:27845ms step_avg:66.14ms +[2025-07-06 17:01:40] [Rank 0] step:441/10000 train_time:29181ms step_avg:66.17ms +[2025-07-06 17:01:40] [Rank 0] step:441/10000 train_time:29181ms step_avg:66.17ms +[2025-07-06 17:01:41] [Rank 0] step:461/10000 train_time:30518ms step_avg:66.20ms +[2025-07-06 17:01:41] [Rank 0] step:461/10000 train_time:30518ms step_avg:66.20ms +[2025-07-06 17:01:43] [Rank 0] step:481/10000 train_time:31855ms step_avg:66.23ms +[2025-07-06 17:01:43] [Rank 0] step:481/10000 train_time:31855ms step_avg:66.23ms +[2025-07-06 17:01:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:01:44] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:01:45] [Rank 0] PRINT: step:500/10000 train_loss:3.5501 val_loss:1.9030 train_time:33800ms step_avg:67.60ms +[2025-07-06 17:01:45] [Rank 0] PRINT: step:500/10000 train_loss:3.5501 val_loss:1.9030 train_time:33800ms step_avg:67.60ms +[2025-07-06 17:01:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:01:45] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:01:45] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:01:45] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:01:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:01:45] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:07:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:07:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:07:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:07:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:07:06] [Rank 0] Total Loss: 4.9540 +[2025-07-06 17:07:06] [Rank 0] Total Loss: 4.9540 +[2025-07-06 17:07:06] [Rank 0] Total FTA: 0.0900 +[2025-07-06 17:07:06] [Rank 0] Total FTA: 0.0900 +[2025-07-06 17:07:06] [Rank 0] Group 0 Loss: 5.1197 +[2025-07-06 17:07:06] [Rank 0] Group 0 Loss: 5.1197 +[2025-07-06 17:07:06] [Rank 0] Group 1 Loss: 4.8153 +[2025-07-06 17:07:06] [Rank 0] Group 1 Loss: 4.8153 +[2025-07-06 17:07:06] [Rank 0] Group 2 Loss: 4.8444 +[2025-07-06 17:07:06] [Rank 0] Group 2 Loss: 4.8444 +[2025-07-06 17:07:06] [Rank 0] Group 3 Loss: 4.9467 +[2025-07-06 17:07:06] [Rank 0] Group 3 Loss: 4.9467 +[2025-07-06 17:07:06] [Rank 0] Group 4 Loss: 4.9317 +[2025-07-06 17:07:06] [Rank 0] Group 4 Loss: 4.9317 +[2025-07-06 17:07:06] [Rank 0] Group 5 Loss: 4.8898 +[2025-07-06 17:07:06] [Rank 0] Group 5 Loss: 4.8898 +[2025-07-06 17:07:06] [Rank 0] Group 6 Loss: 4.8736 +[2025-07-06 17:07:06] [Rank 0] Group 6 Loss: 4.8736 +[2025-07-06 17:07:06] [Rank 0] Group 7 Loss: 5.0091 +[2025-07-06 17:07:06] [Rank 0] Group 7 Loss: 5.0091 +[2025-07-06 17:07:06] [Rank 0] Group 8 Loss: 4.9285 +[2025-07-06 17:07:06] [Rank 0] Group 8 Loss: 4.9285 +[2025-07-06 17:07:06] [Rank 0] Group 9 Loss: 4.8865 +[2025-07-06 17:07:06] [Rank 0] Group 9 Loss: 4.8865 +[2025-07-06 17:07:06] [Rank 0] Group 10 Loss: 4.9218 +[2025-07-06 17:07:06] [Rank 0] Group 10 Loss: 4.9218 +[2025-07-06 17:07:06] [Rank 0] Group 11 Loss: 5.0098 +[2025-07-06 17:07:06] [Rank 0] Group 11 Loss: 5.0098 +[2025-07-06 17:07:06] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-06 17:07:06] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-06 17:07:06] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 17:07:06] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 17:07:06] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-06 17:07:06] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-06 17:07:06] [Rank 0] Group 3 FTA: 0.0990 +[2025-07-06 17:07:06] [Rank 0] Group 3 FTA: 0.0990 +[2025-07-06 17:07:06] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-06 17:07:06] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-06 17:07:06] [Rank 0] Group 5 FTA: 0.1146 +[2025-07-06 17:07:06] [Rank 0] Group 5 FTA: 0.1146 +[2025-07-06 17:07:06] [Rank 0] Group 6 FTA: 0.0859 +[2025-07-06 17:07:06] [Rank 0] Group 6 FTA: 0.0859 +[2025-07-06 17:07:06] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-06 17:07:06] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-06 17:07:06] [Rank 0] Group 8 FTA: 0.1016 +[2025-07-06 17:07:06] [Rank 0] Group 8 FTA: 0.1016 +[2025-07-06 17:07:06] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-06 17:07:06] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-06 17:07:06] [Rank 0] Group 10 FTA: 0.0840 +[2025-07-06 17:07:06] [Rank 0] Group 10 FTA: 0.0840 +[2025-07-06 17:07:06] [Rank 0] Group 11 FTA: 0.0928 +[2025-07-06 17:07:06] [Rank 0] Group 11 FTA: 0.0928 +[2025-07-06 17:07:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 17:07:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 17:07:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 17:07:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 17:07:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 17:07:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 17:07:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 17:07:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 17:07:07] [Rank 0] step:501/10000 train_time:33813ms step_avg:67.49ms +[2025-07-06 17:07:07] [Rank 0] step:501/10000 train_time:33813ms step_avg:67.49ms +[2025-07-06 17:07:09] [Rank 0] step:521/10000 train_time:34573ms step_avg:66.36ms +[2025-07-06 17:07:09] [Rank 0] step:521/10000 train_time:34573ms step_avg:66.36ms +[2025-07-06 17:07:10] [Rank 0] step:541/10000 train_time:36155ms step_avg:66.83ms +[2025-07-06 17:07:10] [Rank 0] step:541/10000 train_time:36155ms step_avg:66.83ms +[2025-07-06 17:07:11] [Rank 0] step:561/10000 train_time:37303ms step_avg:66.49ms +[2025-07-06 17:07:11] [Rank 0] step:561/10000 train_time:37303ms step_avg:66.49ms +[2025-07-06 17:07:13] [Rank 0] step:581/10000 train_time:38635ms step_avg:66.50ms +[2025-07-06 17:07:13] [Rank 0] step:581/10000 train_time:38635ms step_avg:66.50ms +[2025-07-06 17:07:14] [Rank 0] step:601/10000 train_time:39966ms step_avg:66.50ms +[2025-07-06 17:07:14] [Rank 0] step:601/10000 train_time:39966ms step_avg:66.50ms +[2025-07-06 17:07:15] [Rank 0] step:621/10000 train_time:41298ms step_avg:66.50ms +[2025-07-06 17:07:15] [Rank 0] step:621/10000 train_time:41298ms step_avg:66.50ms +[2025-07-06 17:07:17] [Rank 0] step:641/10000 train_time:42633ms step_avg:66.51ms +[2025-07-06 17:07:17] [Rank 0] step:641/10000 train_time:42633ms step_avg:66.51ms +[2025-07-06 17:07:18] [Rank 0] step:661/10000 train_time:43967ms step_avg:66.52ms +[2025-07-06 17:07:18] [Rank 0] step:661/10000 train_time:43967ms step_avg:66.52ms +[2025-07-06 17:07:19] [Rank 0] step:681/10000 train_time:45302ms step_avg:66.52ms +[2025-07-06 17:07:19] [Rank 0] step:681/10000 train_time:45302ms step_avg:66.52ms +[2025-07-06 17:07:21] [Rank 0] step:701/10000 train_time:46636ms step_avg:66.53ms +[2025-07-06 17:07:21] [Rank 0] step:701/10000 train_time:46636ms step_avg:66.53ms +[2025-07-06 17:07:22] [Rank 0] step:721/10000 train_time:48020ms step_avg:66.60ms +[2025-07-06 17:07:22] [Rank 0] step:721/10000 train_time:48020ms step_avg:66.60ms +[2025-07-06 17:07:23] [Rank 0] step:741/10000 train_time:49370ms step_avg:66.63ms +[2025-07-06 17:07:23] [Rank 0] step:741/10000 train_time:49370ms step_avg:66.63ms +[2025-07-06 17:07:25] [Rank 0] step:761/10000 train_time:50710ms step_avg:66.64ms +[2025-07-06 17:07:25] [Rank 0] step:761/10000 train_time:50710ms step_avg:66.64ms +[2025-07-06 17:07:26] [Rank 0] step:781/10000 train_time:52054ms step_avg:66.65ms +[2025-07-06 17:07:26] [Rank 0] step:781/10000 train_time:52054ms step_avg:66.65ms +[2025-07-06 17:07:27] [Rank 0] step:801/10000 train_time:53399ms step_avg:66.67ms +[2025-07-06 17:07:27] [Rank 0] step:801/10000 train_time:53399ms step_avg:66.67ms +[2025-07-06 17:07:29] [Rank 0] step:821/10000 train_time:54746ms step_avg:66.68ms +[2025-07-06 17:07:29] [Rank 0] step:821/10000 train_time:54746ms step_avg:66.68ms +[2025-07-06 17:07:30] [Rank 0] step:841/10000 train_time:56093ms step_avg:66.70ms +[2025-07-06 17:07:30] [Rank 0] step:841/10000 train_time:56093ms step_avg:66.70ms +[2025-07-06 17:07:31] [Rank 0] step:861/10000 train_time:57440ms step_avg:66.71ms +[2025-07-06 17:07:31] [Rank 0] step:861/10000 train_time:57440ms step_avg:66.71ms +[2025-07-06 17:07:33] [Rank 0] step:881/10000 train_time:58789ms step_avg:66.73ms +[2025-07-06 17:07:33] [Rank 0] step:881/10000 train_time:58789ms step_avg:66.73ms +[2025-07-06 17:07:34] [Rank 0] step:901/10000 train_time:60188ms step_avg:66.80ms +[2025-07-06 17:07:34] [Rank 0] step:901/10000 train_time:60188ms step_avg:66.80ms +[2025-07-06 17:07:36] [Rank 0] step:921/10000 train_time:61543ms step_avg:66.82ms +[2025-07-06 17:07:36] [Rank 0] step:921/10000 train_time:61543ms step_avg:66.82ms +[2025-07-06 17:07:37] [Rank 0] step:941/10000 train_time:62891ms step_avg:66.83ms +[2025-07-06 17:07:37] [Rank 0] step:941/10000 train_time:62891ms step_avg:66.83ms +[2025-07-06 17:07:38] [Rank 0] step:961/10000 train_time:64239ms step_avg:66.85ms +[2025-07-06 17:07:38] [Rank 0] step:961/10000 train_time:64239ms step_avg:66.85ms +[2025-07-06 17:07:40] [Rank 0] step:981/10000 train_time:65589ms step_avg:66.86ms +[2025-07-06 17:07:40] [Rank 0] step:981/10000 train_time:65589ms step_avg:66.86ms +[2025-07-06 17:07:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:07:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:07:42] [Rank 0] PRINT: step:1000/10000 train_loss:1.7779 val_loss:1.6623 train_time:67551ms step_avg:67.55ms +[2025-07-06 17:07:42] [Rank 0] PRINT: step:1000/10000 train_loss:1.7779 val_loss:1.6623 train_time:67551ms step_avg:67.55ms +[2025-07-06 17:07:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:07:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:07:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:07:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:07:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:07:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:13:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:13:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:13:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:13:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:13:03] [Rank 0] Total Loss: 5.4370 +[2025-07-06 17:13:03] [Rank 0] Total Loss: 5.4370 +[2025-07-06 17:13:03] [Rank 0] Total FTA: 0.0911 +[2025-07-06 17:13:03] [Rank 0] Total FTA: 0.0911 +[2025-07-06 17:13:03] [Rank 0] Group 0 Loss: 5.7301 +[2025-07-06 17:13:03] [Rank 0] Group 0 Loss: 5.7301 +[2025-07-06 17:13:03] [Rank 0] Group 1 Loss: 5.3506 +[2025-07-06 17:13:03] [Rank 0] Group 1 Loss: 5.3506 +[2025-07-06 17:13:03] [Rank 0] Group 2 Loss: 5.2077 +[2025-07-06 17:13:03] [Rank 0] Group 2 Loss: 5.2077 +[2025-07-06 17:13:03] [Rank 0] Group 3 Loss: 5.3738 +[2025-07-06 17:13:03] [Rank 0] Group 3 Loss: 5.3738 +[2025-07-06 17:13:03] [Rank 0] Group 4 Loss: 5.3894 +[2025-07-06 17:13:03] [Rank 0] Group 4 Loss: 5.3894 +[2025-07-06 17:13:03] [Rank 0] Group 5 Loss: 5.3187 +[2025-07-06 17:13:03] [Rank 0] Group 5 Loss: 5.3187 +[2025-07-06 17:13:03] [Rank 0] Group 6 Loss: 5.4177 +[2025-07-06 17:13:03] [Rank 0] Group 6 Loss: 5.4177 +[2025-07-06 17:13:03] [Rank 0] Group 7 Loss: 5.4272 +[2025-07-06 17:13:03] [Rank 0] Group 7 Loss: 5.4272 +[2025-07-06 17:13:03] [Rank 0] Group 8 Loss: 5.4537 +[2025-07-06 17:13:03] [Rank 0] Group 8 Loss: 5.4537 +[2025-07-06 17:13:03] [Rank 0] Group 9 Loss: 5.3388 +[2025-07-06 17:13:03] [Rank 0] Group 9 Loss: 5.3388 +[2025-07-06 17:13:03] [Rank 0] Group 10 Loss: 5.4016 +[2025-07-06 17:13:03] [Rank 0] Group 10 Loss: 5.4016 +[2025-07-06 17:13:03] [Rank 0] Group 11 Loss: 5.4679 +[2025-07-06 17:13:03] [Rank 0] Group 11 Loss: 5.4679 +[2025-07-06 17:13:03] [Rank 0] Group 0 FTA: 0.1547 +[2025-07-06 17:13:03] [Rank 0] Group 0 FTA: 0.1547 +[2025-07-06 17:13:03] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 17:13:03] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 17:13:03] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-06 17:13:03] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-06 17:13:03] [Rank 0] Group 3 FTA: 0.0729 +[2025-07-06 17:13:03] [Rank 0] Group 3 FTA: 0.0729 +[2025-07-06 17:13:03] [Rank 0] Group 4 FTA: 0.0573 +[2025-07-06 17:13:03] [Rank 0] Group 4 FTA: 0.0573 +[2025-07-06 17:13:03] [Rank 0] Group 5 FTA: 0.0547 +[2025-07-06 17:13:03] [Rank 0] Group 5 FTA: 0.0547 +[2025-07-06 17:13:03] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-06 17:13:03] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-06 17:13:03] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-06 17:13:03] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-06 17:13:03] [Rank 0] Group 8 FTA: 0.1068 +[2025-07-06 17:13:03] [Rank 0] Group 8 FTA: 0.1068 +[2025-07-06 17:13:03] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-06 17:13:03] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-06 17:13:03] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-06 17:13:03] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-06 17:13:03] [Rank 0] Group 11 FTA: 0.1006 +[2025-07-06 17:13:03] [Rank 0] Group 11 FTA: 0.1006 +[2025-07-06 17:13:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 17:13:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 17:13:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 17:13:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 17:13:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 17:13:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 17:13:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 17:13:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 17:13:04] [Rank 0] step:1001/10000 train_time:67568ms step_avg:67.50ms +[2025-07-06 17:13:04] [Rank 0] step:1001/10000 train_time:67568ms step_avg:67.50ms +[2025-07-06 17:13:06] [Rank 0] step:1021/10000 train_time:68307ms step_avg:66.90ms +[2025-07-06 17:13:06] [Rank 0] step:1021/10000 train_time:68307ms step_avg:66.90ms +[2025-07-06 17:13:07] [Rank 0] step:1041/10000 train_time:69645ms step_avg:66.90ms +[2025-07-06 17:13:07] [Rank 0] step:1041/10000 train_time:69645ms step_avg:66.90ms +[2025-07-06 17:13:08] [Rank 0] step:1061/10000 train_time:70985ms step_avg:66.90ms +[2025-07-06 17:13:08] [Rank 0] step:1061/10000 train_time:70985ms step_avg:66.90ms +[2025-07-06 17:13:10] [Rank 0] step:1081/10000 train_time:72379ms step_avg:66.96ms +[2025-07-06 17:13:10] [Rank 0] step:1081/10000 train_time:72379ms step_avg:66.96ms +[2025-07-06 17:13:11] [Rank 0] step:1101/10000 train_time:73716ms step_avg:66.95ms +[2025-07-06 17:13:11] [Rank 0] step:1101/10000 train_time:73716ms step_avg:66.95ms +[2025-07-06 17:13:12] [Rank 0] step:1121/10000 train_time:75058ms step_avg:66.96ms +[2025-07-06 17:13:12] [Rank 0] step:1121/10000 train_time:75058ms step_avg:66.96ms +[2025-07-06 17:13:14] [Rank 0] step:1141/10000 train_time:76402ms step_avg:66.96ms +[2025-07-06 17:13:14] [Rank 0] step:1141/10000 train_time:76402ms step_avg:66.96ms +[2025-07-06 17:13:15] [Rank 0] step:1161/10000 train_time:77747ms step_avg:66.97ms +[2025-07-06 17:13:15] [Rank 0] step:1161/10000 train_time:77747ms step_avg:66.97ms +[2025-07-06 17:13:16] [Rank 0] step:1181/10000 train_time:79092ms step_avg:66.97ms +[2025-07-06 17:13:16] [Rank 0] step:1181/10000 train_time:79092ms step_avg:66.97ms +[2025-07-06 17:13:18] [Rank 0] step:1201/10000 train_time:80436ms step_avg:66.97ms +[2025-07-06 17:13:18] [Rank 0] step:1201/10000 train_time:80436ms step_avg:66.97ms +[2025-07-06 17:13:19] [Rank 0] step:1221/10000 train_time:81780ms step_avg:66.98ms +[2025-07-06 17:13:19] [Rank 0] step:1221/10000 train_time:81780ms step_avg:66.98ms +[2025-07-06 17:13:20] [Rank 0] step:1241/10000 train_time:83125ms step_avg:66.98ms +[2025-07-06 17:13:20] [Rank 0] step:1241/10000 train_time:83125ms step_avg:66.98ms +[2025-07-06 17:13:22] [Rank 0] step:1261/10000 train_time:84519ms step_avg:67.03ms +[2025-07-06 17:13:22] [Rank 0] step:1261/10000 train_time:84519ms step_avg:67.03ms +[2025-07-06 17:13:23] [Rank 0] step:1281/10000 train_time:85877ms step_avg:67.04ms +[2025-07-06 17:13:23] [Rank 0] step:1281/10000 train_time:85877ms step_avg:67.04ms +[2025-07-06 17:13:25] [Rank 0] step:1301/10000 train_time:87223ms step_avg:67.04ms +[2025-07-06 17:13:25] [Rank 0] step:1301/10000 train_time:87223ms step_avg:67.04ms +[2025-07-06 17:13:26] [Rank 0] step:1321/10000 train_time:88570ms step_avg:67.05ms +[2025-07-06 17:13:26] [Rank 0] step:1321/10000 train_time:88570ms step_avg:67.05ms +[2025-07-06 17:13:27] [Rank 0] step:1341/10000 train_time:89918ms step_avg:67.05ms +[2025-07-06 17:13:27] [Rank 0] step:1341/10000 train_time:89918ms step_avg:67.05ms +[2025-07-06 17:13:29] [Rank 0] step:1361/10000 train_time:91266ms step_avg:67.06ms +[2025-07-06 17:13:29] [Rank 0] step:1361/10000 train_time:91266ms step_avg:67.06ms +[2025-07-06 17:13:30] [Rank 0] step:1381/10000 train_time:92614ms step_avg:67.06ms +[2025-07-06 17:13:30] [Rank 0] step:1381/10000 train_time:92614ms step_avg:67.06ms +[2025-07-06 17:13:31] [Rank 0] step:1401/10000 train_time:93962ms step_avg:67.07ms +[2025-07-06 17:13:31] [Rank 0] step:1401/10000 train_time:93962ms step_avg:67.07ms +[2025-07-06 17:13:33] [Rank 0] step:1421/10000 train_time:95310ms step_avg:67.07ms +[2025-07-06 17:13:33] [Rank 0] step:1421/10000 train_time:95310ms step_avg:67.07ms +[2025-07-06 17:13:34] [Rank 0] step:1441/10000 train_time:96911ms step_avg:67.25ms +[2025-07-06 17:13:34] [Rank 0] step:1441/10000 train_time:96911ms step_avg:67.25ms +[2025-07-06 17:13:35] [Rank 0] step:1461/10000 train_time:98068ms step_avg:67.12ms +[2025-07-06 17:13:35] [Rank 0] step:1461/10000 train_time:98068ms step_avg:67.12ms +[2025-07-06 17:13:37] [Rank 0] step:1481/10000 train_time:99415ms step_avg:67.13ms +[2025-07-06 17:13:37] [Rank 0] step:1481/10000 train_time:99415ms step_avg:67.13ms +[2025-07-06 17:13:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:13:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:13:39] [Rank 0] PRINT: step:1500/10000 train_loss:1.5000 val_loss:1.3509 train_time:101377ms step_avg:67.58ms +[2025-07-06 17:13:39] [Rank 0] PRINT: step:1500/10000 train_loss:1.5000 val_loss:1.3509 train_time:101377ms step_avg:67.58ms +[2025-07-06 17:13:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:13:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:13:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:13:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:13:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:13:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:19:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:19:01] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:19:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:19:01] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:19:01] [Rank 0] Total Loss: 5.4024 +[2025-07-06 17:19:01] [Rank 0] Total Loss: 5.4024 +[2025-07-06 17:19:01] [Rank 0] Total FTA: 0.0982 +[2025-07-06 17:19:01] [Rank 0] Total FTA: 0.0982 +[2025-07-06 17:19:01] [Rank 0] Group 0 Loss: 5.7753 +[2025-07-06 17:19:01] [Rank 0] Group 0 Loss: 5.7753 +[2025-07-06 17:19:01] [Rank 0] Group 1 Loss: 5.1760 +[2025-07-06 17:19:01] [Rank 0] Group 1 Loss: 5.1760 +[2025-07-06 17:19:01] [Rank 0] Group 2 Loss: 4.9795 +[2025-07-06 17:19:01] [Rank 0] Group 2 Loss: 4.9795 +[2025-07-06 17:19:01] [Rank 0] Group 3 Loss: 5.4168 +[2025-07-06 17:19:01] [Rank 0] Group 3 Loss: 5.4168 +[2025-07-06 17:19:01] [Rank 0] Group 4 Loss: 5.3586 +[2025-07-06 17:19:01] [Rank 0] Group 4 Loss: 5.3586 +[2025-07-06 17:19:01] [Rank 0] Group 5 Loss: 5.3374 +[2025-07-06 17:19:01] [Rank 0] Group 5 Loss: 5.3374 +[2025-07-06 17:19:01] [Rank 0] Group 6 Loss: 5.2922 +[2025-07-06 17:19:01] [Rank 0] Group 6 Loss: 5.2922 +[2025-07-06 17:19:01] [Rank 0] Group 7 Loss: 5.4415 +[2025-07-06 17:19:01] [Rank 0] Group 7 Loss: 5.4415 +[2025-07-06 17:19:01] [Rank 0] Group 8 Loss: 5.3972 +[2025-07-06 17:19:01] [Rank 0] Group 8 Loss: 5.3972 +[2025-07-06 17:19:01] [Rank 0] Group 9 Loss: 5.3474 +[2025-07-06 17:19:01] [Rank 0] Group 9 Loss: 5.3474 +[2025-07-06 17:19:01] [Rank 0] Group 10 Loss: 5.4206 +[2025-07-06 17:19:01] [Rank 0] Group 10 Loss: 5.4206 +[2025-07-06 17:19:01] [Rank 0] Group 11 Loss: 5.4344 +[2025-07-06 17:19:01] [Rank 0] Group 11 Loss: 5.4344 +[2025-07-06 17:19:01] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-06 17:19:01] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-06 17:19:01] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 17:19:01] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 17:19:01] [Rank 0] Group 2 FTA: 0.1797 +[2025-07-06 17:19:01] [Rank 0] Group 2 FTA: 0.1797 +[2025-07-06 17:19:01] [Rank 0] Group 3 FTA: 0.0990 +[2025-07-06 17:19:01] [Rank 0] Group 3 FTA: 0.0990 +[2025-07-06 17:19:01] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-06 17:19:01] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-06 17:19:01] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-06 17:19:01] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-06 17:19:01] [Rank 0] Group 6 FTA: 0.1146 +[2025-07-06 17:19:01] [Rank 0] Group 6 FTA: 0.1146 +[2025-07-06 17:19:01] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-06 17:19:01] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-06 17:19:01] [Rank 0] Group 8 FTA: 0.1068 +[2025-07-06 17:19:01] [Rank 0] Group 8 FTA: 0.1068 +[2025-07-06 17:19:01] [Rank 0] Group 9 FTA: 0.0703 +[2025-07-06 17:19:01] [Rank 0] Group 9 FTA: 0.0703 +[2025-07-06 17:19:01] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-06 17:19:01] [Rank 0] Group 10 FTA: 0.0898 +[2025-07-06 17:19:01] [Rank 0] Group 11 FTA: 0.0977 +[2025-07-06 17:19:01] [Rank 0] Group 11 FTA: 0.0977 +[2025-07-06 17:19:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 17:19:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 17:19:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 17:19:02] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 17:19:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 17:19:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 17:19:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 17:19:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 17:19:02] [Rank 0] step:1501/10000 train_time:101388ms step_avg:67.55ms +[2025-07-06 17:19:02] [Rank 0] step:1501/10000 train_time:101388ms step_avg:67.55ms +[2025-07-06 17:19:03] [Rank 0] step:1521/10000 train_time:102111ms step_avg:67.13ms +[2025-07-06 17:19:03] [Rank 0] step:1521/10000 train_time:102111ms step_avg:67.13ms +[2025-07-06 17:19:05] [Rank 0] step:1541/10000 train_time:103451ms step_avg:67.13ms +[2025-07-06 17:19:05] [Rank 0] step:1541/10000 train_time:103451ms step_avg:67.13ms +[2025-07-06 17:19:06] [Rank 0] step:1561/10000 train_time:104793ms step_avg:67.13ms +[2025-07-06 17:19:06] [Rank 0] step:1561/10000 train_time:104793ms step_avg:67.13ms +[2025-07-06 17:19:08] [Rank 0] step:1581/10000 train_time:106135ms step_avg:67.13ms +[2025-07-06 17:19:08] [Rank 0] step:1581/10000 train_time:106135ms step_avg:67.13ms +[2025-07-06 17:19:09] [Rank 0] step:1601/10000 train_time:107478ms step_avg:67.13ms +[2025-07-06 17:19:09] [Rank 0] step:1601/10000 train_time:107478ms step_avg:67.13ms +[2025-07-06 17:19:10] [Rank 0] step:1621/10000 train_time:109074ms step_avg:67.29ms +[2025-07-06 17:19:10] [Rank 0] step:1621/10000 train_time:109074ms step_avg:67.29ms +[2025-07-06 17:19:12] [Rank 0] step:1641/10000 train_time:110220ms step_avg:67.17ms +[2025-07-06 17:19:12] [Rank 0] step:1641/10000 train_time:110220ms step_avg:67.17ms +[2025-07-06 17:19:13] [Rank 0] step:1661/10000 train_time:111565ms step_avg:67.17ms +[2025-07-06 17:19:13] [Rank 0] step:1661/10000 train_time:111565ms step_avg:67.17ms +[2025-07-06 17:19:14] [Rank 0] step:1681/10000 train_time:112909ms step_avg:67.17ms +[2025-07-06 17:19:14] [Rank 0] step:1681/10000 train_time:112909ms step_avg:67.17ms +[2025-07-06 17:19:16] [Rank 0] step:1701/10000 train_time:114254ms step_avg:67.17ms +[2025-07-06 17:19:16] [Rank 0] step:1701/10000 train_time:114254ms step_avg:67.17ms +[2025-07-06 17:19:17] [Rank 0] step:1721/10000 train_time:115599ms step_avg:67.17ms +[2025-07-06 17:19:17] [Rank 0] step:1721/10000 train_time:115599ms step_avg:67.17ms +[2025-07-06 17:19:18] [Rank 0] step:1741/10000 train_time:116945ms step_avg:67.17ms +[2025-07-06 17:19:18] [Rank 0] step:1741/10000 train_time:116945ms step_avg:67.17ms +[2025-07-06 17:19:20] [Rank 0] step:1761/10000 train_time:118292ms step_avg:67.17ms +[2025-07-06 17:19:20] [Rank 0] step:1761/10000 train_time:118292ms step_avg:67.17ms +[2025-07-06 17:19:21] [Rank 0] step:1781/10000 train_time:119638ms step_avg:67.17ms +[2025-07-06 17:19:21] [Rank 0] step:1781/10000 train_time:119638ms step_avg:67.17ms +[2025-07-06 17:19:22] [Rank 0] step:1801/10000 train_time:121238ms step_avg:67.32ms +[2025-07-06 17:19:22] [Rank 0] step:1801/10000 train_time:121238ms step_avg:67.32ms +[2025-07-06 17:19:24] [Rank 0] step:1821/10000 train_time:122373ms step_avg:67.20ms +[2025-07-06 17:19:24] [Rank 0] step:1821/10000 train_time:122373ms step_avg:67.20ms +[2025-07-06 17:19:25] [Rank 0] step:1841/10000 train_time:123719ms step_avg:67.20ms +[2025-07-06 17:19:25] [Rank 0] step:1841/10000 train_time:123719ms step_avg:67.20ms +[2025-07-06 17:19:26] [Rank 0] step:1861/10000 train_time:125066ms step_avg:67.20ms +[2025-07-06 17:19:26] [Rank 0] step:1861/10000 train_time:125066ms step_avg:67.20ms +[2025-07-06 17:19:28] [Rank 0] step:1881/10000 train_time:126414ms step_avg:67.21ms +[2025-07-06 17:19:28] [Rank 0] step:1881/10000 train_time:126414ms step_avg:67.21ms +[2025-07-06 17:19:29] [Rank 0] step:1901/10000 train_time:127761ms step_avg:67.21ms +[2025-07-06 17:19:29] [Rank 0] step:1901/10000 train_time:127761ms step_avg:67.21ms +[2025-07-06 17:19:30] [Rank 0] step:1921/10000 train_time:129109ms step_avg:67.21ms +[2025-07-06 17:19:30] [Rank 0] step:1921/10000 train_time:129109ms step_avg:67.21ms +[2025-07-06 17:19:32] [Rank 0] step:1941/10000 train_time:130458ms step_avg:67.21ms +[2025-07-06 17:19:32] [Rank 0] step:1941/10000 train_time:130458ms step_avg:67.21ms +[2025-07-06 17:19:33] [Rank 0] step:1961/10000 train_time:131811ms step_avg:67.22ms +[2025-07-06 17:19:33] [Rank 0] step:1961/10000 train_time:131811ms step_avg:67.22ms +[2025-07-06 17:19:35] [Rank 0] step:1981/10000 train_time:133206ms step_avg:67.24ms +[2025-07-06 17:19:35] [Rank 0] step:1981/10000 train_time:133206ms step_avg:67.24ms +[2025-07-06 17:19:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:19:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:19:37] [Rank 0] PRINT: step:2000/10000 train_loss:1.3138 val_loss:1.2921 train_time:135179ms step_avg:67.59ms +[2025-07-06 17:19:37] [Rank 0] PRINT: step:2000/10000 train_loss:1.3138 val_loss:1.2921 train_time:135179ms step_avg:67.59ms +[2025-07-06 17:19:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:19:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:19:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:19:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:19:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:19:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:24:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:24:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:24:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:24:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:24:58] [Rank 0] Total Loss: 5.5695 +[2025-07-06 17:24:58] [Rank 0] Total Loss: 5.5695 +[2025-07-06 17:24:58] [Rank 0] Total FTA: 0.1255 +[2025-07-06 17:24:58] [Rank 0] Total FTA: 0.1255 +[2025-07-06 17:24:58] [Rank 0] Group 0 Loss: 6.0180 +[2025-07-06 17:24:58] [Rank 0] Group 0 Loss: 6.0180 +[2025-07-06 17:24:58] [Rank 0] Group 1 Loss: 5.3796 +[2025-07-06 17:24:58] [Rank 0] Group 1 Loss: 5.3796 +[2025-07-06 17:24:58] [Rank 0] Group 2 Loss: 5.3301 +[2025-07-06 17:24:58] [Rank 0] Group 2 Loss: 5.3301 +[2025-07-06 17:24:58] [Rank 0] Group 3 Loss: 5.4588 +[2025-07-06 17:24:58] [Rank 0] Group 3 Loss: 5.4588 +[2025-07-06 17:24:58] [Rank 0] Group 4 Loss: 5.5258 +[2025-07-06 17:24:58] [Rank 0] Group 4 Loss: 5.5258 +[2025-07-06 17:24:58] [Rank 0] Group 5 Loss: 5.4133 +[2025-07-06 17:24:58] [Rank 0] Group 5 Loss: 5.4133 +[2025-07-06 17:24:58] [Rank 0] Group 6 Loss: 5.4800 +[2025-07-06 17:24:58] [Rank 0] Group 6 Loss: 5.4800 +[2025-07-06 17:24:58] [Rank 0] Group 7 Loss: 5.6183 +[2025-07-06 17:24:58] [Rank 0] Group 7 Loss: 5.6183 +[2025-07-06 17:24:58] [Rank 0] Group 8 Loss: 5.5469 +[2025-07-06 17:24:58] [Rank 0] Group 8 Loss: 5.5469 +[2025-07-06 17:24:58] [Rank 0] Group 9 Loss: 5.5165 +[2025-07-06 17:24:58] [Rank 0] Group 9 Loss: 5.5165 +[2025-07-06 17:24:58] [Rank 0] Group 10 Loss: 5.5591 +[2025-07-06 17:24:58] [Rank 0] Group 10 Loss: 5.5591 +[2025-07-06 17:24:58] [Rank 0] Group 11 Loss: 5.5526 +[2025-07-06 17:24:58] [Rank 0] Group 11 Loss: 5.5526 +[2025-07-06 17:24:58] [Rank 0] Group 0 FTA: 0.3225 +[2025-07-06 17:24:58] [Rank 0] Group 0 FTA: 0.3225 +[2025-07-06 17:24:58] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-06 17:24:58] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-06 17:24:58] [Rank 0] Group 2 FTA: 0.1849 +[2025-07-06 17:24:58] [Rank 0] Group 2 FTA: 0.1849 +[2025-07-06 17:24:58] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-06 17:24:58] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-06 17:24:58] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-06 17:24:58] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-06 17:24:58] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-06 17:24:58] [Rank 0] Group 5 FTA: 0.0781 +[2025-07-06 17:24:58] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-06 17:24:58] [Rank 0] Group 6 FTA: 0.0964 +[2025-07-06 17:24:58] [Rank 0] Group 7 FTA: 0.0859 +[2025-07-06 17:24:58] [Rank 0] Group 7 FTA: 0.0859 +[2025-07-06 17:24:58] [Rank 0] Group 8 FTA: 0.0729 +[2025-07-06 17:24:58] [Rank 0] Group 8 FTA: 0.0729 +[2025-07-06 17:24:58] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-06 17:24:58] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-06 17:24:58] [Rank 0] Group 10 FTA: 0.1016 +[2025-07-06 17:24:58] [Rank 0] Group 10 FTA: 0.1016 +[2025-07-06 17:24:58] [Rank 0] Group 11 FTA: 0.0938 +[2025-07-06 17:24:58] [Rank 0] Group 11 FTA: 0.0938 +[2025-07-06 17:24:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 17:24:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 17:24:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 17:24:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 17:24:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 17:24:59] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 17:24:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 17:24:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 17:24:59] [Rank 0] step:2001/10000 train_time:135191ms step_avg:67.56ms +[2025-07-06 17:24:59] [Rank 0] step:2001/10000 train_time:135191ms step_avg:67.56ms +[2025-07-06 17:25:01] [Rank 0] step:2021/10000 train_time:135925ms step_avg:67.26ms +[2025-07-06 17:25:01] [Rank 0] step:2021/10000 train_time:135925ms step_avg:67.26ms +[2025-07-06 17:25:02] [Rank 0] step:2041/10000 train_time:137266ms step_avg:67.25ms +[2025-07-06 17:25:02] [Rank 0] step:2041/10000 train_time:137266ms step_avg:67.25ms +[2025-07-06 17:25:03] [Rank 0] step:2061/10000 train_time:138607ms step_avg:67.25ms +[2025-07-06 17:25:03] [Rank 0] step:2061/10000 train_time:138607ms step_avg:67.25ms +[2025-07-06 17:25:05] [Rank 0] step:2081/10000 train_time:139951ms step_avg:67.25ms +[2025-07-06 17:25:05] [Rank 0] step:2081/10000 train_time:139951ms step_avg:67.25ms +[2025-07-06 17:25:06] [Rank 0] step:2101/10000 train_time:141296ms step_avg:67.25ms +[2025-07-06 17:25:06] [Rank 0] step:2101/10000 train_time:141296ms step_avg:67.25ms +[2025-07-06 17:25:07] [Rank 0] step:2121/10000 train_time:142642ms step_avg:67.25ms +[2025-07-06 17:25:07] [Rank 0] step:2121/10000 train_time:142642ms step_avg:67.25ms +[2025-07-06 17:25:09] [Rank 0] step:2141/10000 train_time:143985ms step_avg:67.25ms +[2025-07-06 17:25:09] [Rank 0] step:2141/10000 train_time:143985ms step_avg:67.25ms +[2025-07-06 17:25:10] [Rank 0] step:2161/10000 train_time:145585ms step_avg:67.37ms +[2025-07-06 17:25:10] [Rank 0] step:2161/10000 train_time:145585ms step_avg:67.37ms +[2025-07-06 17:25:12] [Rank 0] step:2181/10000 train_time:146722ms step_avg:67.27ms +[2025-07-06 17:25:12] [Rank 0] step:2181/10000 train_time:146722ms step_avg:67.27ms +[2025-07-06 17:25:13] [Rank 0] step:2201/10000 train_time:148066ms step_avg:67.27ms +[2025-07-06 17:25:13] [Rank 0] step:2201/10000 train_time:148066ms step_avg:67.27ms +[2025-07-06 17:25:14] [Rank 0] step:2221/10000 train_time:149412ms step_avg:67.27ms +[2025-07-06 17:25:14] [Rank 0] step:2221/10000 train_time:149412ms step_avg:67.27ms +[2025-07-06 17:25:16] [Rank 0] step:2241/10000 train_time:150767ms step_avg:67.28ms +[2025-07-06 17:25:16] [Rank 0] step:2241/10000 train_time:150767ms step_avg:67.28ms +[2025-07-06 17:25:17] [Rank 0] step:2261/10000 train_time:152139ms step_avg:67.29ms +[2025-07-06 17:25:17] [Rank 0] step:2261/10000 train_time:152139ms step_avg:67.29ms +[2025-07-06 17:25:18] [Rank 0] step:2281/10000 train_time:153509ms step_avg:67.30ms +[2025-07-06 17:25:18] [Rank 0] step:2281/10000 train_time:153509ms step_avg:67.30ms +[2025-07-06 17:25:20] [Rank 0] step:2301/10000 train_time:154881ms step_avg:67.31ms +[2025-07-06 17:25:20] [Rank 0] step:2301/10000 train_time:154881ms step_avg:67.31ms +[2025-07-06 17:25:21] [Rank 0] step:2321/10000 train_time:156252ms step_avg:67.32ms +[2025-07-06 17:25:21] [Rank 0] step:2321/10000 train_time:156252ms step_avg:67.32ms +[2025-07-06 17:25:23] [Rank 0] step:2341/10000 train_time:157625ms step_avg:67.33ms +[2025-07-06 17:25:23] [Rank 0] step:2341/10000 train_time:157625ms step_avg:67.33ms +[2025-07-06 17:25:24] [Rank 0] step:2361/10000 train_time:159027ms step_avg:67.36ms +[2025-07-06 17:25:24] [Rank 0] step:2361/10000 train_time:159027ms step_avg:67.36ms +[2025-07-06 17:25:25] [Rank 0] step:2381/10000 train_time:160400ms step_avg:67.37ms +[2025-07-06 17:25:25] [Rank 0] step:2381/10000 train_time:160400ms step_avg:67.37ms +[2025-07-06 17:25:27] [Rank 0] step:2401/10000 train_time:161772ms step_avg:67.38ms +[2025-07-06 17:25:27] [Rank 0] step:2401/10000 train_time:161772ms step_avg:67.38ms +[2025-07-06 17:25:28] [Rank 0] step:2421/10000 train_time:163145ms step_avg:67.39ms +[2025-07-06 17:25:28] [Rank 0] step:2421/10000 train_time:163145ms step_avg:67.39ms +[2025-07-06 17:25:29] [Rank 0] step:2441/10000 train_time:164517ms step_avg:67.40ms +[2025-07-06 17:25:29] [Rank 0] step:2441/10000 train_time:164517ms step_avg:67.40ms +[2025-07-06 17:25:31] [Rank 0] step:2461/10000 train_time:165976ms step_avg:67.44ms +[2025-07-06 17:25:31] [Rank 0] step:2461/10000 train_time:165976ms step_avg:67.44ms +[2025-07-06 17:25:32] [Rank 0] step:2481/10000 train_time:167347ms step_avg:67.45ms +[2025-07-06 17:25:32] [Rank 0] step:2481/10000 train_time:167347ms step_avg:67.45ms +[2025-07-06 17:25:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:25:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:25:34] [Rank 0] PRINT: step:2500/10000 train_loss:1.2782 val_loss:1.2665 train_time:169342ms step_avg:67.74ms +[2025-07-06 17:25:34] [Rank 0] PRINT: step:2500/10000 train_loss:1.2782 val_loss:1.2665 train_time:169342ms step_avg:67.74ms +[2025-07-06 17:25:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:25:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:25:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:25:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:25:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:25:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:30:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:30:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:30:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:30:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:30:56] [Rank 0] Total Loss: 5.5954 +[2025-07-06 17:30:56] [Rank 0] Total Loss: 5.5954 +[2025-07-06 17:30:56] [Rank 0] Total FTA: 0.1200 +[2025-07-06 17:30:56] [Rank 0] Total FTA: 0.1200 +[2025-07-06 17:30:56] [Rank 0] Group 0 Loss: 5.8135 +[2025-07-06 17:30:56] [Rank 0] Group 0 Loss: 5.8135 +[2025-07-06 17:30:56] [Rank 0] Group 1 Loss: 5.4772 +[2025-07-06 17:30:56] [Rank 0] Group 1 Loss: 5.4772 +[2025-07-06 17:30:56] [Rank 0] Group 2 Loss: 5.3080 +[2025-07-06 17:30:56] [Rank 0] Group 2 Loss: 5.3080 +[2025-07-06 17:30:56] [Rank 0] Group 3 Loss: 5.5285 +[2025-07-06 17:30:56] [Rank 0] Group 3 Loss: 5.5285 +[2025-07-06 17:30:56] [Rank 0] Group 4 Loss: 5.7012 +[2025-07-06 17:30:56] [Rank 0] Group 4 Loss: 5.7012 +[2025-07-06 17:30:56] [Rank 0] Group 5 Loss: 5.5483 +[2025-07-06 17:30:56] [Rank 0] Group 5 Loss: 5.5483 +[2025-07-06 17:30:56] [Rank 0] Group 6 Loss: 5.5083 +[2025-07-06 17:30:56] [Rank 0] Group 6 Loss: 5.5083 +[2025-07-06 17:30:56] [Rank 0] Group 7 Loss: 5.6147 +[2025-07-06 17:30:56] [Rank 0] Group 7 Loss: 5.6147 +[2025-07-06 17:30:56] [Rank 0] Group 8 Loss: 5.6263 +[2025-07-06 17:30:56] [Rank 0] Group 8 Loss: 5.6263 +[2025-07-06 17:30:56] [Rank 0] Group 9 Loss: 5.5784 +[2025-07-06 17:30:56] [Rank 0] Group 9 Loss: 5.5784 +[2025-07-06 17:30:56] [Rank 0] Group 10 Loss: 5.6019 +[2025-07-06 17:30:56] [Rank 0] Group 10 Loss: 5.6019 +[2025-07-06 17:30:56] [Rank 0] Group 11 Loss: 5.6017 +[2025-07-06 17:30:56] [Rank 0] Group 11 Loss: 5.6017 +[2025-07-06 17:30:56] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-06 17:30:56] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-06 17:30:56] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-06 17:30:56] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-06 17:30:56] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-06 17:30:56] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-06 17:30:56] [Rank 0] Group 3 FTA: 0.1380 +[2025-07-06 17:30:56] [Rank 0] Group 3 FTA: 0.1380 +[2025-07-06 17:30:56] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-06 17:30:56] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-06 17:30:56] [Rank 0] Group 5 FTA: 0.1016 +[2025-07-06 17:30:56] [Rank 0] Group 5 FTA: 0.1016 +[2025-07-06 17:30:56] [Rank 0] Group 6 FTA: 0.1354 +[2025-07-06 17:30:56] [Rank 0] Group 6 FTA: 0.1354 +[2025-07-06 17:30:56] [Rank 0] Group 7 FTA: 0.1146 +[2025-07-06 17:30:56] [Rank 0] Group 7 FTA: 0.1146 +[2025-07-06 17:30:56] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-06 17:30:56] [Rank 0] Group 8 FTA: 0.1146 +[2025-07-06 17:30:56] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-06 17:30:56] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-06 17:30:56] [Rank 0] Group 10 FTA: 0.1367 +[2025-07-06 17:30:56] [Rank 0] Group 10 FTA: 0.1367 +[2025-07-06 17:30:56] [Rank 0] Group 11 FTA: 0.1074 +[2025-07-06 17:30:56] [Rank 0] Group 11 FTA: 0.1074 +[2025-07-06 17:30:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 17:30:57] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 17:30:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 17:30:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 17:30:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 17:30:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 17:30:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 17:30:58] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 17:30:58] [Rank 0] step:2501/10000 train_time:169352ms step_avg:67.71ms +[2025-07-06 17:30:58] [Rank 0] step:2501/10000 train_time:169352ms step_avg:67.71ms +[2025-07-06 17:30:59] [Rank 0] step:2521/10000 train_time:170108ms step_avg:67.48ms +[2025-07-06 17:30:59] [Rank 0] step:2521/10000 train_time:170108ms step_avg:67.48ms +[2025-07-06 17:31:01] [Rank 0] step:2541/10000 train_time:171521ms step_avg:67.50ms +[2025-07-06 17:31:01] [Rank 0] step:2541/10000 train_time:171521ms step_avg:67.50ms +[2025-07-06 17:31:02] [Rank 0] step:2561/10000 train_time:172888ms step_avg:67.51ms +[2025-07-06 17:31:02] [Rank 0] step:2561/10000 train_time:172888ms step_avg:67.51ms +[2025-07-06 17:31:03] [Rank 0] step:2581/10000 train_time:174252ms step_avg:67.51ms +[2025-07-06 17:31:03] [Rank 0] step:2581/10000 train_time:174252ms step_avg:67.51ms +[2025-07-06 17:31:05] [Rank 0] step:2601/10000 train_time:175620ms step_avg:67.52ms +[2025-07-06 17:31:05] [Rank 0] step:2601/10000 train_time:175620ms step_avg:67.52ms +[2025-07-06 17:31:06] [Rank 0] step:2621/10000 train_time:176987ms step_avg:67.53ms +[2025-07-06 17:31:06] [Rank 0] step:2621/10000 train_time:176987ms step_avg:67.53ms +[2025-07-06 17:31:08] [Rank 0] step:2641/10000 train_time:178354ms step_avg:67.53ms +[2025-07-06 17:31:08] [Rank 0] step:2641/10000 train_time:178354ms step_avg:67.53ms +[2025-07-06 17:31:09] [Rank 0] step:2661/10000 train_time:179723ms step_avg:67.54ms +[2025-07-06 17:31:09] [Rank 0] step:2661/10000 train_time:179723ms step_avg:67.54ms +[2025-07-06 17:31:10] [Rank 0] step:2681/10000 train_time:181092ms step_avg:67.55ms +[2025-07-06 17:31:10] [Rank 0] step:2681/10000 train_time:181092ms step_avg:67.55ms +[2025-07-06 17:31:12] [Rank 0] step:2701/10000 train_time:182460ms step_avg:67.55ms +[2025-07-06 17:31:12] [Rank 0] step:2701/10000 train_time:182460ms step_avg:67.55ms +[2025-07-06 17:31:13] [Rank 0] step:2721/10000 train_time:183829ms step_avg:67.56ms +[2025-07-06 17:31:13] [Rank 0] step:2721/10000 train_time:183829ms step_avg:67.56ms +[2025-07-06 17:31:14] [Rank 0] step:2741/10000 train_time:185199ms step_avg:67.57ms +[2025-07-06 17:31:14] [Rank 0] step:2741/10000 train_time:185199ms step_avg:67.57ms +[2025-07-06 17:31:16] [Rank 0] step:2761/10000 train_time:186568ms step_avg:67.57ms +[2025-07-06 17:31:16] [Rank 0] step:2761/10000 train_time:186568ms step_avg:67.57ms +[2025-07-06 17:31:17] [Rank 0] step:2781/10000 train_time:187940ms step_avg:67.58ms +[2025-07-06 17:31:17] [Rank 0] step:2781/10000 train_time:187940ms step_avg:67.58ms +[2025-07-06 17:31:19] [Rank 0] step:2801/10000 train_time:189312ms step_avg:67.59ms +[2025-07-06 17:31:19] [Rank 0] step:2801/10000 train_time:189312ms step_avg:67.59ms +[2025-07-06 17:31:20] [Rank 0] step:2821/10000 train_time:190683ms step_avg:67.59ms +[2025-07-06 17:31:20] [Rank 0] step:2821/10000 train_time:190683ms step_avg:67.59ms +[2025-07-06 17:31:21] [Rank 0] step:2841/10000 train_time:192063ms step_avg:67.60ms +[2025-07-06 17:31:21] [Rank 0] step:2841/10000 train_time:192063ms step_avg:67.60ms +[2025-07-06 17:31:23] [Rank 0] step:2861/10000 train_time:193435ms step_avg:67.61ms +[2025-07-06 17:31:23] [Rank 0] step:2861/10000 train_time:193435ms step_avg:67.61ms +[2025-07-06 17:31:24] [Rank 0] step:2881/10000 train_time:194807ms step_avg:67.62ms +[2025-07-06 17:31:24] [Rank 0] step:2881/10000 train_time:194807ms step_avg:67.62ms +[2025-07-06 17:31:25] [Rank 0] step:2901/10000 train_time:196231ms step_avg:67.64ms +[2025-07-06 17:31:25] [Rank 0] step:2901/10000 train_time:196231ms step_avg:67.64ms +[2025-07-06 17:31:27] [Rank 0] step:2921/10000 train_time:197603ms step_avg:67.65ms +[2025-07-06 17:31:27] [Rank 0] step:2921/10000 train_time:197603ms step_avg:67.65ms +[2025-07-06 17:31:28] [Rank 0] step:2941/10000 train_time:198977ms step_avg:67.66ms +[2025-07-06 17:31:28] [Rank 0] step:2941/10000 train_time:198977ms step_avg:67.66ms +[2025-07-06 17:31:30] [Rank 0] step:2961/10000 train_time:200350ms step_avg:67.66ms +[2025-07-06 17:31:30] [Rank 0] step:2961/10000 train_time:200350ms step_avg:67.66ms +[2025-07-06 17:31:31] [Rank 0] step:2981/10000 train_time:201725ms step_avg:67.67ms +[2025-07-06 17:31:31] [Rank 0] step:2981/10000 train_time:201725ms step_avg:67.67ms +[2025-07-06 17:31:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:31:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:31:33] [Rank 0] PRINT: step:3000/10000 train_loss:1.2468 val_loss:1.2412 train_time:203722ms step_avg:67.91ms +[2025-07-06 17:31:33] [Rank 0] PRINT: step:3000/10000 train_loss:1.2468 val_loss:1.2412 train_time:203722ms step_avg:67.91ms +[2025-07-06 17:31:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:31:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:31:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:31:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:31:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:31:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:36:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:36:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:36:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:36:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:36:57] [Rank 0] Total Loss: 5.5450 +[2025-07-06 17:36:57] [Rank 0] Total Loss: 5.5450 +[2025-07-06 17:36:57] [Rank 0] Total FTA: 0.1001 +[2025-07-06 17:36:57] [Rank 0] Total FTA: 0.1001 +[2025-07-06 17:36:57] [Rank 0] Group 0 Loss: 5.8730 +[2025-07-06 17:36:57] [Rank 0] Group 0 Loss: 5.8730 +[2025-07-06 17:36:57] [Rank 0] Group 1 Loss: 5.5248 +[2025-07-06 17:36:57] [Rank 0] Group 1 Loss: 5.5248 +[2025-07-06 17:36:57] [Rank 0] Group 2 Loss: 5.3543 +[2025-07-06 17:36:57] [Rank 0] Group 2 Loss: 5.3543 +[2025-07-06 17:36:57] [Rank 0] Group 3 Loss: 5.5209 +[2025-07-06 17:36:57] [Rank 0] Group 3 Loss: 5.5209 +[2025-07-06 17:36:57] [Rank 0] Group 4 Loss: 5.4575 +[2025-07-06 17:36:57] [Rank 0] Group 4 Loss: 5.4575 +[2025-07-06 17:36:57] [Rank 0] Group 5 Loss: 5.4419 +[2025-07-06 17:36:57] [Rank 0] Group 5 Loss: 5.4419 +[2025-07-06 17:36:57] [Rank 0] Group 6 Loss: 5.4765 +[2025-07-06 17:36:57] [Rank 0] Group 6 Loss: 5.4765 +[2025-07-06 17:36:57] [Rank 0] Group 7 Loss: 5.5789 +[2025-07-06 17:36:57] [Rank 0] Group 7 Loss: 5.5789 +[2025-07-06 17:36:57] [Rank 0] Group 8 Loss: 5.5251 +[2025-07-06 17:36:57] [Rank 0] Group 8 Loss: 5.5251 +[2025-07-06 17:36:57] [Rank 0] Group 9 Loss: 5.4570 +[2025-07-06 17:36:57] [Rank 0] Group 9 Loss: 5.4570 +[2025-07-06 17:36:57] [Rank 0] Group 10 Loss: 5.4877 +[2025-07-06 17:36:57] [Rank 0] Group 10 Loss: 5.4877 +[2025-07-06 17:36:57] [Rank 0] Group 11 Loss: 5.5293 +[2025-07-06 17:36:57] [Rank 0] Group 11 Loss: 5.5293 +[2025-07-06 17:36:57] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-06 17:36:57] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-06 17:36:57] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 17:36:57] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-06 17:36:57] [Rank 0] Group 2 FTA: 0.1615 +[2025-07-06 17:36:57] [Rank 0] Group 2 FTA: 0.1615 +[2025-07-06 17:36:57] [Rank 0] Group 3 FTA: 0.0703 +[2025-07-06 17:36:57] [Rank 0] Group 3 FTA: 0.0703 +[2025-07-06 17:36:57] [Rank 0] Group 4 FTA: 0.1094 +[2025-07-06 17:36:57] [Rank 0] Group 4 FTA: 0.1094 +[2025-07-06 17:36:57] [Rank 0] Group 5 FTA: 0.1276 +[2025-07-06 17:36:57] [Rank 0] Group 5 FTA: 0.1276 +[2025-07-06 17:36:57] [Rank 0] Group 6 FTA: 0.1328 +[2025-07-06 17:36:57] [Rank 0] Group 6 FTA: 0.1328 +[2025-07-06 17:36:57] [Rank 0] Group 7 FTA: 0.1146 +[2025-07-06 17:36:57] [Rank 0] Group 7 FTA: 0.1146 +[2025-07-06 17:36:57] [Rank 0] Group 8 FTA: 0.1250 +[2025-07-06 17:36:57] [Rank 0] Group 8 FTA: 0.1250 +[2025-07-06 17:36:57] [Rank 0] Group 9 FTA: 0.1289 +[2025-07-06 17:36:57] [Rank 0] Group 9 FTA: 0.1289 +[2025-07-06 17:36:57] [Rank 0] Group 10 FTA: 0.1387 +[2025-07-06 17:36:57] [Rank 0] Group 10 FTA: 0.1387 +[2025-07-06 17:36:57] [Rank 0] Group 11 FTA: 0.1338 +[2025-07-06 17:36:57] [Rank 0] Group 11 FTA: 0.1338 +[2025-07-06 17:36:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 17:36:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 17:36:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 17:36:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 17:36:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 17:36:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 17:36:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 17:36:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 17:36:59] [Rank 0] step:3001/10000 train_time:203732ms step_avg:67.89ms +[2025-07-06 17:36:59] [Rank 0] step:3001/10000 train_time:203732ms step_avg:67.89ms +[2025-07-06 17:37:00] [Rank 0] step:3021/10000 train_time:204483ms step_avg:67.69ms +[2025-07-06 17:37:00] [Rank 0] step:3021/10000 train_time:204483ms step_avg:67.69ms +[2025-07-06 17:37:01] [Rank 0] step:3041/10000 train_time:205847ms step_avg:67.69ms +[2025-07-06 17:37:01] [Rank 0] step:3041/10000 train_time:205847ms step_avg:67.69ms +[2025-07-06 17:37:03] [Rank 0] step:3061/10000 train_time:207212ms step_avg:67.69ms +[2025-07-06 17:37:03] [Rank 0] step:3061/10000 train_time:207212ms step_avg:67.69ms +[2025-07-06 17:37:04] [Rank 0] step:3081/10000 train_time:208612ms step_avg:67.71ms +[2025-07-06 17:37:04] [Rank 0] step:3081/10000 train_time:208612ms step_avg:67.71ms +[2025-07-06 17:37:06] [Rank 0] step:3101/10000 train_time:210234ms step_avg:67.80ms +[2025-07-06 17:37:06] [Rank 0] step:3101/10000 train_time:210234ms step_avg:67.80ms +[2025-07-06 17:37:07] [Rank 0] step:3121/10000 train_time:211382ms step_avg:67.73ms +[2025-07-06 17:37:07] [Rank 0] step:3121/10000 train_time:211382ms step_avg:67.73ms +[2025-07-06 17:37:08] [Rank 0] step:3141/10000 train_time:212750ms step_avg:67.73ms +[2025-07-06 17:37:08] [Rank 0] step:3141/10000 train_time:212750ms step_avg:67.73ms +[2025-07-06 17:37:10] [Rank 0] step:3161/10000 train_time:214118ms step_avg:67.74ms +[2025-07-06 17:37:10] [Rank 0] step:3161/10000 train_time:214118ms step_avg:67.74ms +[2025-07-06 17:37:11] [Rank 0] step:3181/10000 train_time:215486ms step_avg:67.74ms +[2025-07-06 17:37:11] [Rank 0] step:3181/10000 train_time:215486ms step_avg:67.74ms +[2025-07-06 17:37:12] [Rank 0] step:3201/10000 train_time:216854ms step_avg:67.75ms +[2025-07-06 17:37:12] [Rank 0] step:3201/10000 train_time:216854ms step_avg:67.75ms +[2025-07-06 17:37:14] [Rank 0] step:3221/10000 train_time:218222ms step_avg:67.75ms +[2025-07-06 17:37:14] [Rank 0] step:3221/10000 train_time:218222ms step_avg:67.75ms +[2025-07-06 17:37:15] [Rank 0] step:3241/10000 train_time:219845ms step_avg:67.83ms +[2025-07-06 17:37:15] [Rank 0] step:3241/10000 train_time:219845ms step_avg:67.83ms +[2025-07-06 17:37:17] [Rank 0] step:3261/10000 train_time:220996ms step_avg:67.77ms +[2025-07-06 17:37:17] [Rank 0] step:3261/10000 train_time:220996ms step_avg:67.77ms +[2025-07-06 17:37:18] [Rank 0] step:3281/10000 train_time:222366ms step_avg:67.77ms +[2025-07-06 17:37:18] [Rank 0] step:3281/10000 train_time:222366ms step_avg:67.77ms +[2025-07-06 17:37:19] [Rank 0] step:3301/10000 train_time:223736ms step_avg:67.78ms +[2025-07-06 17:37:19] [Rank 0] step:3301/10000 train_time:223736ms step_avg:67.78ms +[2025-07-06 17:37:21] [Rank 0] step:3321/10000 train_time:225107ms step_avg:67.78ms +[2025-07-06 17:37:21] [Rank 0] step:3321/10000 train_time:225107ms step_avg:67.78ms +[2025-07-06 17:37:22] [Rank 0] step:3341/10000 train_time:226478ms step_avg:67.79ms +[2025-07-06 17:37:22] [Rank 0] step:3341/10000 train_time:226478ms step_avg:67.79ms +[2025-07-06 17:37:23] [Rank 0] step:3361/10000 train_time:227850ms step_avg:67.79ms +[2025-07-06 17:37:23] [Rank 0] step:3361/10000 train_time:227850ms step_avg:67.79ms +[2025-07-06 17:37:25] [Rank 0] step:3381/10000 train_time:229222ms step_avg:67.80ms +[2025-07-06 17:37:25] [Rank 0] step:3381/10000 train_time:229222ms step_avg:67.80ms +[2025-07-06 17:37:26] [Rank 0] step:3401/10000 train_time:230595ms step_avg:67.80ms +[2025-07-06 17:37:26] [Rank 0] step:3401/10000 train_time:230595ms step_avg:67.80ms +[2025-07-06 17:37:28] [Rank 0] step:3421/10000 train_time:232641ms step_avg:68.00ms +[2025-07-06 17:37:28] [Rank 0] step:3421/10000 train_time:232641ms step_avg:68.00ms +[2025-07-06 17:37:29] [Rank 0] step:3441/10000 train_time:233381ms step_avg:67.82ms +[2025-07-06 17:37:29] [Rank 0] step:3441/10000 train_time:233381ms step_avg:67.82ms +[2025-07-06 17:37:30] [Rank 0] step:3461/10000 train_time:234754ms step_avg:67.83ms +[2025-07-06 17:37:30] [Rank 0] step:3461/10000 train_time:234754ms step_avg:67.83ms +[2025-07-06 17:37:32] [Rank 0] step:3481/10000 train_time:236128ms step_avg:67.83ms +[2025-07-06 17:37:32] [Rank 0] step:3481/10000 train_time:236128ms step_avg:67.83ms +[2025-07-06 17:37:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:37:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:37:34] [Rank 0] PRINT: step:3500/10000 train_loss:1.2027 val_loss:1.2115 train_time:238127ms step_avg:68.04ms +[2025-07-06 17:37:34] [Rank 0] PRINT: step:3500/10000 train_loss:1.2027 val_loss:1.2115 train_time:238127ms step_avg:68.04ms +[2025-07-06 17:37:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:37:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:37:34] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:37:34] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:37:34] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:37:34] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:42:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:42:56] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:42:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:42:56] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:42:56] [Rank 0] Total Loss: 5.5288 +[2025-07-06 17:42:56] [Rank 0] Total Loss: 5.5288 +[2025-07-06 17:42:56] [Rank 0] Total FTA: 0.1963 +[2025-07-06 17:42:56] [Rank 0] Total FTA: 0.1963 +[2025-07-06 17:42:56] [Rank 0] Group 0 Loss: 5.7584 +[2025-07-06 17:42:56] [Rank 0] Group 0 Loss: 5.7584 +[2025-07-06 17:42:56] [Rank 0] Group 1 Loss: 5.3865 +[2025-07-06 17:42:56] [Rank 0] Group 1 Loss: 5.3865 +[2025-07-06 17:42:56] [Rank 0] Group 2 Loss: 5.2897 +[2025-07-06 17:42:56] [Rank 0] Group 2 Loss: 5.2897 +[2025-07-06 17:42:56] [Rank 0] Group 3 Loss: 5.5676 +[2025-07-06 17:42:56] [Rank 0] Group 3 Loss: 5.5676 +[2025-07-06 17:42:56] [Rank 0] Group 4 Loss: 5.5222 +[2025-07-06 17:42:56] [Rank 0] Group 4 Loss: 5.5222 +[2025-07-06 17:42:56] [Rank 0] Group 5 Loss: 5.5287 +[2025-07-06 17:42:56] [Rank 0] Group 5 Loss: 5.5287 +[2025-07-06 17:42:56] [Rank 0] Group 6 Loss: 5.4523 +[2025-07-06 17:42:56] [Rank 0] Group 6 Loss: 5.4523 +[2025-07-06 17:42:56] [Rank 0] Group 7 Loss: 5.5661 +[2025-07-06 17:42:56] [Rank 0] Group 7 Loss: 5.5661 +[2025-07-06 17:42:56] [Rank 0] Group 8 Loss: 5.5605 +[2025-07-06 17:42:56] [Rank 0] Group 8 Loss: 5.5605 +[2025-07-06 17:42:56] [Rank 0] Group 9 Loss: 5.4965 +[2025-07-06 17:42:56] [Rank 0] Group 9 Loss: 5.4965 +[2025-07-06 17:42:56] [Rank 0] Group 10 Loss: 5.4934 +[2025-07-06 17:42:56] [Rank 0] Group 10 Loss: 5.4934 +[2025-07-06 17:42:56] [Rank 0] Group 11 Loss: 5.5159 +[2025-07-06 17:42:56] [Rank 0] Group 11 Loss: 5.5159 +[2025-07-06 17:42:56] [Rank 0] Group 0 FTA: 0.1521 +[2025-07-06 17:42:56] [Rank 0] Group 0 FTA: 0.1521 +[2025-07-06 17:42:56] [Rank 0] Group 1 FTA: 0.2031 +[2025-07-06 17:42:56] [Rank 0] Group 1 FTA: 0.2031 +[2025-07-06 17:42:56] [Rank 0] Group 2 FTA: 0.3672 +[2025-07-06 17:42:56] [Rank 0] Group 2 FTA: 0.3672 +[2025-07-06 17:42:56] [Rank 0] Group 3 FTA: 0.1953 +[2025-07-06 17:42:56] [Rank 0] Group 3 FTA: 0.1953 +[2025-07-06 17:42:56] [Rank 0] Group 4 FTA: 0.1276 +[2025-07-06 17:42:56] [Rank 0] Group 4 FTA: 0.1276 +[2025-07-06 17:42:56] [Rank 0] Group 5 FTA: 0.2266 +[2025-07-06 17:42:56] [Rank 0] Group 5 FTA: 0.2266 +[2025-07-06 17:42:56] [Rank 0] Group 6 FTA: 0.2318 +[2025-07-06 17:42:56] [Rank 0] Group 6 FTA: 0.2318 +[2025-07-06 17:42:56] [Rank 0] Group 7 FTA: 0.1641 +[2025-07-06 17:42:56] [Rank 0] Group 7 FTA: 0.1641 +[2025-07-06 17:42:56] [Rank 0] Group 8 FTA: 0.2031 +[2025-07-06 17:42:56] [Rank 0] Group 8 FTA: 0.2031 +[2025-07-06 17:42:56] [Rank 0] Group 9 FTA: 0.1992 +[2025-07-06 17:42:56] [Rank 0] Group 9 FTA: 0.1992 +[2025-07-06 17:42:56] [Rank 0] Group 10 FTA: 0.1797 +[2025-07-06 17:42:56] [Rank 0] Group 10 FTA: 0.1797 +[2025-07-06 17:42:56] [Rank 0] Group 11 FTA: 0.1816 +[2025-07-06 17:42:56] [Rank 0] Group 11 FTA: 0.1816 +[2025-07-06 17:42:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 17:42:56] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 17:42:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 17:42:57] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 17:42:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 17:42:57] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 17:42:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 17:42:57] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 17:42:57] [Rank 0] step:3501/10000 train_time:238138ms step_avg:68.02ms +[2025-07-06 17:42:57] [Rank 0] step:3501/10000 train_time:238138ms step_avg:68.02ms +[2025-07-06 17:42:59] [Rank 0] step:3521/10000 train_time:238894ms step_avg:67.85ms +[2025-07-06 17:42:59] [Rank 0] step:3521/10000 train_time:238894ms step_avg:67.85ms +[2025-07-06 17:43:00] [Rank 0] step:3541/10000 train_time:240257ms step_avg:67.85ms +[2025-07-06 17:43:00] [Rank 0] step:3541/10000 train_time:240257ms step_avg:67.85ms +[2025-07-06 17:43:01] [Rank 0] step:3561/10000 train_time:241624ms step_avg:67.85ms +[2025-07-06 17:43:01] [Rank 0] step:3561/10000 train_time:241624ms step_avg:67.85ms +[2025-07-06 17:43:03] [Rank 0] step:3581/10000 train_time:242991ms step_avg:67.86ms +[2025-07-06 17:43:03] [Rank 0] step:3581/10000 train_time:242991ms step_avg:67.86ms +[2025-07-06 17:43:04] [Rank 0] step:3601/10000 train_time:244406ms step_avg:67.87ms +[2025-07-06 17:43:04] [Rank 0] step:3601/10000 train_time:244406ms step_avg:67.87ms +[2025-07-06 17:43:06] [Rank 0] step:3621/10000 train_time:245771ms step_avg:67.87ms +[2025-07-06 17:43:06] [Rank 0] step:3621/10000 train_time:245771ms step_avg:67.87ms +[2025-07-06 17:43:07] [Rank 0] step:3641/10000 train_time:247139ms step_avg:67.88ms +[2025-07-06 17:43:07] [Rank 0] step:3641/10000 train_time:247139ms step_avg:67.88ms +[2025-07-06 17:43:08] [Rank 0] step:3661/10000 train_time:248508ms step_avg:67.88ms +[2025-07-06 17:43:08] [Rank 0] step:3661/10000 train_time:248508ms step_avg:67.88ms +[2025-07-06 17:43:10] [Rank 0] step:3681/10000 train_time:249876ms step_avg:67.88ms +[2025-07-06 17:43:10] [Rank 0] step:3681/10000 train_time:249876ms step_avg:67.88ms +[2025-07-06 17:43:11] [Rank 0] step:3701/10000 train_time:251245ms step_avg:67.89ms +[2025-07-06 17:43:11] [Rank 0] step:3701/10000 train_time:251245ms step_avg:67.89ms +[2025-07-06 17:43:12] [Rank 0] step:3721/10000 train_time:252616ms step_avg:67.89ms +[2025-07-06 17:43:12] [Rank 0] step:3721/10000 train_time:252616ms step_avg:67.89ms +[2025-07-06 17:43:14] [Rank 0] step:3741/10000 train_time:253986ms step_avg:67.89ms +[2025-07-06 17:43:14] [Rank 0] step:3741/10000 train_time:253986ms step_avg:67.89ms +[2025-07-06 17:43:15] [Rank 0] step:3761/10000 train_time:255357ms step_avg:67.90ms +[2025-07-06 17:43:15] [Rank 0] step:3761/10000 train_time:255357ms step_avg:67.90ms +[2025-07-06 17:43:17] [Rank 0] step:3781/10000 train_time:257383ms step_avg:68.07ms +[2025-07-06 17:43:17] [Rank 0] step:3781/10000 train_time:257383ms step_avg:68.07ms +[2025-07-06 17:43:18] [Rank 0] step:3801/10000 train_time:258123ms step_avg:67.91ms +[2025-07-06 17:43:18] [Rank 0] step:3801/10000 train_time:258123ms step_avg:67.91ms +[2025-07-06 17:43:19] [Rank 0] step:3821/10000 train_time:259493ms step_avg:67.91ms +[2025-07-06 17:43:19] [Rank 0] step:3821/10000 train_time:259493ms step_avg:67.91ms +[2025-07-06 17:43:21] [Rank 0] step:3841/10000 train_time:260864ms step_avg:67.92ms +[2025-07-06 17:43:21] [Rank 0] step:3841/10000 train_time:260864ms step_avg:67.92ms +[2025-07-06 17:43:22] [Rank 0] step:3861/10000 train_time:262235ms step_avg:67.92ms +[2025-07-06 17:43:22] [Rank 0] step:3861/10000 train_time:262235ms step_avg:67.92ms +[2025-07-06 17:43:23] [Rank 0] step:3881/10000 train_time:263610ms step_avg:67.92ms +[2025-07-06 17:43:23] [Rank 0] step:3881/10000 train_time:263610ms step_avg:67.92ms +[2025-07-06 17:43:25] [Rank 0] step:3901/10000 train_time:265241ms step_avg:67.99ms +[2025-07-06 17:43:25] [Rank 0] step:3901/10000 train_time:265241ms step_avg:67.99ms +[2025-07-06 17:43:26] [Rank 0] step:3921/10000 train_time:266388ms step_avg:67.94ms +[2025-07-06 17:43:26] [Rank 0] step:3921/10000 train_time:266388ms step_avg:67.94ms +[2025-07-06 17:43:28] [Rank 0] step:3941/10000 train_time:267761ms step_avg:67.94ms +[2025-07-06 17:43:28] [Rank 0] step:3941/10000 train_time:267761ms step_avg:67.94ms +[2025-07-06 17:43:29] [Rank 0] step:3961/10000 train_time:269383ms step_avg:68.01ms +[2025-07-06 17:43:29] [Rank 0] step:3961/10000 train_time:269383ms step_avg:68.01ms +[2025-07-06 17:43:30] [Rank 0] step:3981/10000 train_time:270540ms step_avg:67.96ms +[2025-07-06 17:43:30] [Rank 0] step:3981/10000 train_time:270540ms step_avg:67.96ms +[2025-07-06 17:43:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:43:32] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:43:33] [Rank 0] PRINT: step:4000/10000 train_loss:1.1437 val_loss:1.1845 train_time:272536ms step_avg:68.13ms +[2025-07-06 17:43:33] [Rank 0] PRINT: step:4000/10000 train_loss:1.1437 val_loss:1.1845 train_time:272536ms step_avg:68.13ms +[2025-07-06 17:43:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:43:33] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:43:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:43:33] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:43:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:43:33] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:48:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:48:58] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:48:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:48:58] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:48:58] [Rank 0] Total Loss: 5.4987 +[2025-07-06 17:48:58] [Rank 0] Total Loss: 5.4987 +[2025-07-06 17:48:58] [Rank 0] Total FTA: 0.1825 +[2025-07-06 17:48:58] [Rank 0] Total FTA: 0.1825 +[2025-07-06 17:48:58] [Rank 0] Group 0 Loss: 5.7974 +[2025-07-06 17:48:58] [Rank 0] Group 0 Loss: 5.7974 +[2025-07-06 17:48:58] [Rank 0] Group 1 Loss: 5.2747 +[2025-07-06 17:48:58] [Rank 0] Group 1 Loss: 5.2747 +[2025-07-06 17:48:58] [Rank 0] Group 2 Loss: 5.2927 +[2025-07-06 17:48:58] [Rank 0] Group 2 Loss: 5.2927 +[2025-07-06 17:48:58] [Rank 0] Group 3 Loss: 5.3949 +[2025-07-06 17:48:58] [Rank 0] Group 3 Loss: 5.3949 +[2025-07-06 17:48:58] [Rank 0] Group 4 Loss: 5.5221 +[2025-07-06 17:48:58] [Rank 0] Group 4 Loss: 5.5221 +[2025-07-06 17:48:58] [Rank 0] Group 5 Loss: 5.4684 +[2025-07-06 17:48:58] [Rank 0] Group 5 Loss: 5.4684 +[2025-07-06 17:48:58] [Rank 0] Group 6 Loss: 5.3608 +[2025-07-06 17:48:58] [Rank 0] Group 6 Loss: 5.3608 +[2025-07-06 17:48:58] [Rank 0] Group 7 Loss: 5.5285 +[2025-07-06 17:48:58] [Rank 0] Group 7 Loss: 5.5285 +[2025-07-06 17:48:58] [Rank 0] Group 8 Loss: 5.5239 +[2025-07-06 17:48:58] [Rank 0] Group 8 Loss: 5.5239 +[2025-07-06 17:48:58] [Rank 0] Group 9 Loss: 5.4877 +[2025-07-06 17:48:58] [Rank 0] Group 9 Loss: 5.4877 +[2025-07-06 17:48:58] [Rank 0] Group 10 Loss: 5.5354 +[2025-07-06 17:48:58] [Rank 0] Group 10 Loss: 5.5354 +[2025-07-06 17:48:58] [Rank 0] Group 11 Loss: 5.4928 +[2025-07-06 17:48:58] [Rank 0] Group 11 Loss: 5.4928 +[2025-07-06 17:48:58] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-06 17:48:58] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-06 17:48:58] [Rank 0] Group 1 FTA: 0.1641 +[2025-07-06 17:48:58] [Rank 0] Group 1 FTA: 0.1641 +[2025-07-06 17:48:58] [Rank 0] Group 2 FTA: 0.1719 +[2025-07-06 17:48:58] [Rank 0] Group 2 FTA: 0.1719 +[2025-07-06 17:48:58] [Rank 0] Group 3 FTA: 0.1458 +[2025-07-06 17:48:58] [Rank 0] Group 3 FTA: 0.1458 +[2025-07-06 17:48:58] [Rank 0] Group 4 FTA: 0.1458 +[2025-07-06 17:48:58] [Rank 0] Group 4 FTA: 0.1458 +[2025-07-06 17:48:58] [Rank 0] Group 5 FTA: 0.1901 +[2025-07-06 17:48:58] [Rank 0] Group 5 FTA: 0.1901 +[2025-07-06 17:48:58] [Rank 0] Group 6 FTA: 0.1953 +[2025-07-06 17:48:58] [Rank 0] Group 6 FTA: 0.1953 +[2025-07-06 17:48:58] [Rank 0] Group 7 FTA: 0.2344 +[2025-07-06 17:48:58] [Rank 0] Group 7 FTA: 0.2344 +[2025-07-06 17:48:58] [Rank 0] Group 8 FTA: 0.1849 +[2025-07-06 17:48:58] [Rank 0] Group 8 FTA: 0.1849 +[2025-07-06 17:48:58] [Rank 0] Group 9 FTA: 0.1875 +[2025-07-06 17:48:58] [Rank 0] Group 9 FTA: 0.1875 +[2025-07-06 17:48:58] [Rank 0] Group 10 FTA: 0.2129 +[2025-07-06 17:48:58] [Rank 0] Group 10 FTA: 0.2129 +[2025-07-06 17:48:58] [Rank 0] Group 11 FTA: 0.2031 +[2025-07-06 17:48:58] [Rank 0] Group 11 FTA: 0.2031 +[2025-07-06 17:48:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 17:48:59] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 17:48:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 17:48:59] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 17:49:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 17:49:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 17:49:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 17:49:00] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 17:49:00] [Rank 0] step:4001/10000 train_time:272546ms step_avg:68.12ms +[2025-07-06 17:49:00] [Rank 0] step:4001/10000 train_time:272546ms step_avg:68.12ms +[2025-07-06 17:49:01] [Rank 0] step:4021/10000 train_time:273325ms step_avg:67.97ms +[2025-07-06 17:49:01] [Rank 0] step:4021/10000 train_time:273325ms step_avg:67.97ms +[2025-07-06 17:49:03] [Rank 0] step:4041/10000 train_time:274691ms step_avg:67.98ms +[2025-07-06 17:49:03] [Rank 0] step:4041/10000 train_time:274691ms step_avg:67.98ms +[2025-07-06 17:49:04] [Rank 0] step:4061/10000 train_time:276057ms step_avg:67.98ms +[2025-07-06 17:49:04] [Rank 0] step:4061/10000 train_time:276057ms step_avg:67.98ms +[2025-07-06 17:49:05] [Rank 0] step:4081/10000 train_time:277424ms step_avg:67.98ms +[2025-07-06 17:49:05] [Rank 0] step:4081/10000 train_time:277424ms step_avg:67.98ms +[2025-07-06 17:49:07] [Rank 0] step:4101/10000 train_time:278792ms step_avg:67.98ms +[2025-07-06 17:49:07] [Rank 0] step:4101/10000 train_time:278792ms step_avg:67.98ms +[2025-07-06 17:49:08] [Rank 0] step:4121/10000 train_time:280160ms step_avg:67.98ms +[2025-07-06 17:49:08] [Rank 0] step:4121/10000 train_time:280160ms step_avg:67.98ms +[2025-07-06 17:49:10] [Rank 0] step:4141/10000 train_time:281779ms step_avg:68.05ms +[2025-07-06 17:49:10] [Rank 0] step:4141/10000 train_time:281779ms step_avg:68.05ms +[2025-07-06 17:49:11] [Rank 0] step:4161/10000 train_time:282926ms step_avg:67.99ms +[2025-07-06 17:49:11] [Rank 0] step:4161/10000 train_time:282926ms step_avg:67.99ms +[2025-07-06 17:49:12] [Rank 0] step:4181/10000 train_time:284296ms step_avg:68.00ms +[2025-07-06 17:49:12] [Rank 0] step:4181/10000 train_time:284296ms step_avg:68.00ms +[2025-07-06 17:49:14] [Rank 0] step:4201/10000 train_time:285664ms step_avg:68.00ms +[2025-07-06 17:49:14] [Rank 0] step:4201/10000 train_time:285664ms step_avg:68.00ms +[2025-07-06 17:49:15] [Rank 0] step:4221/10000 train_time:287035ms step_avg:68.00ms +[2025-07-06 17:49:15] [Rank 0] step:4221/10000 train_time:287035ms step_avg:68.00ms +[2025-07-06 17:49:16] [Rank 0] step:4241/10000 train_time:288405ms step_avg:68.00ms +[2025-07-06 17:49:16] [Rank 0] step:4241/10000 train_time:288405ms step_avg:68.00ms +[2025-07-06 17:49:18] [Rank 0] step:4261/10000 train_time:289775ms step_avg:68.01ms +[2025-07-06 17:49:18] [Rank 0] step:4261/10000 train_time:289775ms step_avg:68.01ms +[2025-07-06 17:49:19] [Rank 0] step:4281/10000 train_time:291146ms step_avg:68.01ms +[2025-07-06 17:49:19] [Rank 0] step:4281/10000 train_time:291146ms step_avg:68.01ms +[2025-07-06 17:49:21] [Rank 0] step:4301/10000 train_time:292517ms step_avg:68.01ms +[2025-07-06 17:49:21] [Rank 0] step:4301/10000 train_time:292517ms step_avg:68.01ms +[2025-07-06 17:49:22] [Rank 0] step:4321/10000 train_time:293935ms step_avg:68.02ms +[2025-07-06 17:49:22] [Rank 0] step:4321/10000 train_time:293935ms step_avg:68.02ms +[2025-07-06 17:49:23] [Rank 0] step:4341/10000 train_time:295312ms step_avg:68.03ms +[2025-07-06 17:49:23] [Rank 0] step:4341/10000 train_time:295312ms step_avg:68.03ms +[2025-07-06 17:49:25] [Rank 0] step:4361/10000 train_time:296684ms step_avg:68.03ms +[2025-07-06 17:49:25] [Rank 0] step:4361/10000 train_time:296684ms step_avg:68.03ms +[2025-07-06 17:49:26] [Rank 0] step:4381/10000 train_time:298056ms step_avg:68.03ms +[2025-07-06 17:49:26] [Rank 0] step:4381/10000 train_time:298056ms step_avg:68.03ms +[2025-07-06 17:49:27] [Rank 0] step:4401/10000 train_time:299428ms step_avg:68.04ms +[2025-07-06 17:49:27] [Rank 0] step:4401/10000 train_time:299428ms step_avg:68.04ms +[2025-07-06 17:49:29] [Rank 0] step:4421/10000 train_time:300799ms step_avg:68.04ms +[2025-07-06 17:49:29] [Rank 0] step:4421/10000 train_time:300799ms step_avg:68.04ms +[2025-07-06 17:49:30] [Rank 0] step:4441/10000 train_time:302171ms step_avg:68.04ms +[2025-07-06 17:49:30] [Rank 0] step:4441/10000 train_time:302171ms step_avg:68.04ms +[2025-07-06 17:49:32] [Rank 0] step:4461/10000 train_time:303544ms step_avg:68.04ms +[2025-07-06 17:49:32] [Rank 0] step:4461/10000 train_time:303544ms step_avg:68.04ms +[2025-07-06 17:49:33] [Rank 0] step:4481/10000 train_time:304917ms step_avg:68.05ms +[2025-07-06 17:49:33] [Rank 0] step:4481/10000 train_time:304917ms step_avg:68.05ms +[2025-07-06 17:49:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:49:34] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:49:35] [Rank 0] PRINT: step:4500/10000 train_loss:1.0712 val_loss:1.1579 train_time:306915ms step_avg:68.20ms +[2025-07-06 17:49:35] [Rank 0] PRINT: step:4500/10000 train_loss:1.0712 val_loss:1.1579 train_time:306915ms step_avg:68.20ms +[2025-07-06 17:49:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:49:35] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:49:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:49:35] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:49:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:49:35] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:54:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:54:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 17:54:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:54:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 17:54:59] [Rank 0] Total Loss: 5.4332 +[2025-07-06 17:54:59] [Rank 0] Total Loss: 5.4332 +[2025-07-06 17:54:59] [Rank 0] Total FTA: 0.2480 +[2025-07-06 17:54:59] [Rank 0] Total FTA: 0.2480 +[2025-07-06 17:54:59] [Rank 0] Group 0 Loss: 5.7122 +[2025-07-06 17:54:59] [Rank 0] Group 0 Loss: 5.7122 +[2025-07-06 17:54:59] [Rank 0] Group 1 Loss: 5.1258 +[2025-07-06 17:54:59] [Rank 0] Group 1 Loss: 5.1258 +[2025-07-06 17:54:59] [Rank 0] Group 2 Loss: 5.2953 +[2025-07-06 17:54:59] [Rank 0] Group 2 Loss: 5.2953 +[2025-07-06 17:54:59] [Rank 0] Group 3 Loss: 5.3572 +[2025-07-06 17:54:59] [Rank 0] Group 3 Loss: 5.3572 +[2025-07-06 17:54:59] [Rank 0] Group 4 Loss: 5.4549 +[2025-07-06 17:54:59] [Rank 0] Group 4 Loss: 5.4549 +[2025-07-06 17:54:59] [Rank 0] Group 5 Loss: 5.4743 +[2025-07-06 17:54:59] [Rank 0] Group 5 Loss: 5.4743 +[2025-07-06 17:55:00] [Rank 0] Group 6 Loss: 5.3719 +[2025-07-06 17:55:00] [Rank 0] Group 6 Loss: 5.3719 +[2025-07-06 17:55:00] [Rank 0] Group 7 Loss: 5.4415 +[2025-07-06 17:55:00] [Rank 0] Group 7 Loss: 5.4415 +[2025-07-06 17:55:00] [Rank 0] Group 8 Loss: 5.4320 +[2025-07-06 17:55:00] [Rank 0] Group 8 Loss: 5.4320 +[2025-07-06 17:55:00] [Rank 0] Group 9 Loss: 5.3996 +[2025-07-06 17:55:00] [Rank 0] Group 9 Loss: 5.3996 +[2025-07-06 17:55:00] [Rank 0] Group 10 Loss: 5.4097 +[2025-07-06 17:55:00] [Rank 0] Group 10 Loss: 5.4097 +[2025-07-06 17:55:00] [Rank 0] Group 11 Loss: 5.4360 +[2025-07-06 17:55:00] [Rank 0] Group 11 Loss: 5.4360 +[2025-07-06 17:55:00] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-06 17:55:00] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-06 17:55:00] [Rank 0] Group 1 FTA: 0.5339 +[2025-07-06 17:55:00] [Rank 0] Group 1 FTA: 0.5339 +[2025-07-06 17:55:00] [Rank 0] Group 2 FTA: 0.2396 +[2025-07-06 17:55:00] [Rank 0] Group 2 FTA: 0.2396 +[2025-07-06 17:55:00] [Rank 0] Group 3 FTA: 0.2448 +[2025-07-06 17:55:00] [Rank 0] Group 3 FTA: 0.2448 +[2025-07-06 17:55:00] [Rank 0] Group 4 FTA: 0.1016 +[2025-07-06 17:55:00] [Rank 0] Group 4 FTA: 0.1016 +[2025-07-06 17:55:00] [Rank 0] Group 5 FTA: 0.1823 +[2025-07-06 17:55:00] [Rank 0] Group 5 FTA: 0.1823 +[2025-07-06 17:55:00] [Rank 0] Group 6 FTA: 0.3438 +[2025-07-06 17:55:00] [Rank 0] Group 6 FTA: 0.3438 +[2025-07-06 17:55:00] [Rank 0] Group 7 FTA: 0.3021 +[2025-07-06 17:55:00] [Rank 0] Group 7 FTA: 0.3021 +[2025-07-06 17:55:00] [Rank 0] Group 8 FTA: 0.2734 +[2025-07-06 17:55:00] [Rank 0] Group 8 FTA: 0.2734 +[2025-07-06 17:55:00] [Rank 0] Group 9 FTA: 0.2344 +[2025-07-06 17:55:00] [Rank 0] Group 9 FTA: 0.2344 +[2025-07-06 17:55:00] [Rank 0] Group 10 FTA: 0.2285 +[2025-07-06 17:55:00] [Rank 0] Group 10 FTA: 0.2285 +[2025-07-06 17:55:00] [Rank 0] Group 11 FTA: 0.2363 +[2025-07-06 17:55:00] [Rank 0] Group 11 FTA: 0.2363 +[2025-07-06 17:55:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 17:55:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 17:55:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 17:55:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 17:55:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 17:55:01] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 17:55:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 17:55:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 17:55:01] [Rank 0] step:4501/10000 train_time:307038ms step_avg:68.22ms +[2025-07-06 17:55:01] [Rank 0] step:4501/10000 train_time:307038ms step_avg:68.22ms +[2025-07-06 17:55:03] [Rank 0] step:4521/10000 train_time:307973ms step_avg:68.12ms +[2025-07-06 17:55:03] [Rank 0] step:4521/10000 train_time:307973ms step_avg:68.12ms +[2025-07-06 17:55:04] [Rank 0] step:4541/10000 train_time:309336ms step_avg:68.12ms +[2025-07-06 17:55:04] [Rank 0] step:4541/10000 train_time:309336ms step_avg:68.12ms +[2025-07-06 17:55:05] [Rank 0] step:4561/10000 train_time:310701ms step_avg:68.12ms +[2025-07-06 17:55:05] [Rank 0] step:4561/10000 train_time:310701ms step_avg:68.12ms +[2025-07-06 17:55:07] [Rank 0] step:4581/10000 train_time:312065ms step_avg:68.12ms +[2025-07-06 17:55:07] [Rank 0] step:4581/10000 train_time:312065ms step_avg:68.12ms +[2025-07-06 17:55:08] [Rank 0] step:4601/10000 train_time:313433ms step_avg:68.12ms +[2025-07-06 17:55:08] [Rank 0] step:4601/10000 train_time:313433ms step_avg:68.12ms +[2025-07-06 17:55:10] [Rank 0] step:4621/10000 train_time:314800ms step_avg:68.12ms +[2025-07-06 17:55:10] [Rank 0] step:4621/10000 train_time:314800ms step_avg:68.12ms +[2025-07-06 17:55:11] [Rank 0] step:4641/10000 train_time:316168ms step_avg:68.12ms +[2025-07-06 17:55:11] [Rank 0] step:4641/10000 train_time:316168ms step_avg:68.12ms +[2025-07-06 17:55:12] [Rank 0] step:4661/10000 train_time:317541ms step_avg:68.13ms +[2025-07-06 17:55:12] [Rank 0] step:4661/10000 train_time:317541ms step_avg:68.13ms +[2025-07-06 17:55:14] [Rank 0] step:4681/10000 train_time:318909ms step_avg:68.13ms +[2025-07-06 17:55:14] [Rank 0] step:4681/10000 train_time:318909ms step_avg:68.13ms +[2025-07-06 17:55:15] [Rank 0] step:4701/10000 train_time:320324ms step_avg:68.14ms +[2025-07-06 17:55:15] [Rank 0] step:4701/10000 train_time:320324ms step_avg:68.14ms +[2025-07-06 17:55:16] [Rank 0] step:4721/10000 train_time:321693ms step_avg:68.14ms +[2025-07-06 17:55:16] [Rank 0] step:4721/10000 train_time:321693ms step_avg:68.14ms +[2025-07-06 17:55:18] [Rank 0] step:4741/10000 train_time:323063ms step_avg:68.14ms +[2025-07-06 17:55:18] [Rank 0] step:4741/10000 train_time:323063ms step_avg:68.14ms +[2025-07-06 17:55:19] [Rank 0] step:4761/10000 train_time:324433ms step_avg:68.14ms +[2025-07-06 17:55:19] [Rank 0] step:4761/10000 train_time:324433ms step_avg:68.14ms +[2025-07-06 17:55:21] [Rank 0] step:4781/10000 train_time:325802ms step_avg:68.15ms +[2025-07-06 17:55:21] [Rank 0] step:4781/10000 train_time:325802ms step_avg:68.15ms +[2025-07-06 17:55:22] [Rank 0] step:4801/10000 train_time:327173ms step_avg:68.15ms +[2025-07-06 17:55:22] [Rank 0] step:4801/10000 train_time:327173ms step_avg:68.15ms +[2025-07-06 17:55:23] [Rank 0] step:4821/10000 train_time:328543ms step_avg:68.15ms +[2025-07-06 17:55:23] [Rank 0] step:4821/10000 train_time:328543ms step_avg:68.15ms +[2025-07-06 17:55:25] [Rank 0] step:4841/10000 train_time:329913ms step_avg:68.15ms +[2025-07-06 17:55:25] [Rank 0] step:4841/10000 train_time:329913ms step_avg:68.15ms +[2025-07-06 17:55:26] [Rank 0] step:4861/10000 train_time:331328ms step_avg:68.16ms +[2025-07-06 17:55:26] [Rank 0] step:4861/10000 train_time:331328ms step_avg:68.16ms +[2025-07-06 17:55:27] [Rank 0] step:4881/10000 train_time:332676ms step_avg:68.16ms +[2025-07-06 17:55:27] [Rank 0] step:4881/10000 train_time:332676ms step_avg:68.16ms +[2025-07-06 17:55:29] [Rank 0] step:4901/10000 train_time:334047ms step_avg:68.16ms +[2025-07-06 17:55:29] [Rank 0] step:4901/10000 train_time:334047ms step_avg:68.16ms +[2025-07-06 17:55:30] [Rank 0] step:4921/10000 train_time:335417ms step_avg:68.16ms +[2025-07-06 17:55:30] [Rank 0] step:4921/10000 train_time:335417ms step_avg:68.16ms +[2025-07-06 17:55:32] [Rank 0] step:4941/10000 train_time:336789ms step_avg:68.16ms +[2025-07-06 17:55:32] [Rank 0] step:4941/10000 train_time:336789ms step_avg:68.16ms +[2025-07-06 17:55:33] [Rank 0] step:4961/10000 train_time:338162ms step_avg:68.16ms +[2025-07-06 17:55:33] [Rank 0] step:4961/10000 train_time:338162ms step_avg:68.16ms +[2025-07-06 17:55:34] [Rank 0] step:4981/10000 train_time:339534ms step_avg:68.17ms +[2025-07-06 17:55:34] [Rank 0] step:4981/10000 train_time:339534ms step_avg:68.17ms +[2025-07-06 17:55:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:55:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 17:55:37] [Rank 0] PRINT: step:5000/10000 train_loss:1.0022 val_loss:1.1078 train_time:341528ms step_avg:68.31ms +[2025-07-06 17:55:37] [Rank 0] PRINT: step:5000/10000 train_loss:1.0022 val_loss:1.1078 train_time:341528ms step_avg:68.31ms +[2025-07-06 17:55:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:55:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 17:55:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:55:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 17:55:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 17:55:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:01:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:01:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:01:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:01:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:01:02] [Rank 0] Total Loss: 5.4001 +[2025-07-06 18:01:02] [Rank 0] Total Loss: 5.4001 +[2025-07-06 18:01:02] [Rank 0] Total FTA: 0.3378 +[2025-07-06 18:01:02] [Rank 0] Total FTA: 0.3378 +[2025-07-06 18:01:02] [Rank 0] Group 0 Loss: 5.6841 +[2025-07-06 18:01:02] [Rank 0] Group 0 Loss: 5.6841 +[2025-07-06 18:01:02] [Rank 0] Group 1 Loss: 5.1202 +[2025-07-06 18:01:02] [Rank 0] Group 1 Loss: 5.1202 +[2025-07-06 18:01:02] [Rank 0] Group 2 Loss: 5.1191 +[2025-07-06 18:01:02] [Rank 0] Group 2 Loss: 5.1191 +[2025-07-06 18:01:02] [Rank 0] Group 3 Loss: 5.4170 +[2025-07-06 18:01:02] [Rank 0] Group 3 Loss: 5.4170 +[2025-07-06 18:01:02] [Rank 0] Group 4 Loss: 5.3586 +[2025-07-06 18:01:02] [Rank 0] Group 4 Loss: 5.3586 +[2025-07-06 18:01:02] [Rank 0] Group 5 Loss: 5.4236 +[2025-07-06 18:01:02] [Rank 0] Group 5 Loss: 5.4236 +[2025-07-06 18:01:02] [Rank 0] Group 6 Loss: 5.3429 +[2025-07-06 18:01:02] [Rank 0] Group 6 Loss: 5.3429 +[2025-07-06 18:01:02] [Rank 0] Group 7 Loss: 5.3788 +[2025-07-06 18:01:02] [Rank 0] Group 7 Loss: 5.3788 +[2025-07-06 18:01:02] [Rank 0] Group 8 Loss: 5.4347 +[2025-07-06 18:01:02] [Rank 0] Group 8 Loss: 5.4347 +[2025-07-06 18:01:02] [Rank 0] Group 9 Loss: 5.3617 +[2025-07-06 18:01:02] [Rank 0] Group 9 Loss: 5.3617 +[2025-07-06 18:01:02] [Rank 0] Group 10 Loss: 5.4315 +[2025-07-06 18:01:02] [Rank 0] Group 10 Loss: 5.4315 +[2025-07-06 18:01:02] [Rank 0] Group 11 Loss: 5.4077 +[2025-07-06 18:01:02] [Rank 0] Group 11 Loss: 5.4077 +[2025-07-06 18:01:02] [Rank 0] Group 0 FTA: 0.1808 +[2025-07-06 18:01:02] [Rank 0] Group 0 FTA: 0.1808 +[2025-07-06 18:01:02] [Rank 0] Group 1 FTA: 0.3073 +[2025-07-06 18:01:02] [Rank 0] Group 1 FTA: 0.3073 +[2025-07-06 18:01:02] [Rank 0] Group 2 FTA: 0.5781 +[2025-07-06 18:01:02] [Rank 0] Group 2 FTA: 0.5781 +[2025-07-06 18:01:02] [Rank 0] Group 3 FTA: 0.4271 +[2025-07-06 18:01:02] [Rank 0] Group 3 FTA: 0.4271 +[2025-07-06 18:01:02] [Rank 0] Group 4 FTA: 0.2188 +[2025-07-06 18:01:02] [Rank 0] Group 4 FTA: 0.2188 +[2025-07-06 18:01:02] [Rank 0] Group 5 FTA: 0.3802 +[2025-07-06 18:01:02] [Rank 0] Group 5 FTA: 0.3802 +[2025-07-06 18:01:02] [Rank 0] Group 6 FTA: 0.3698 +[2025-07-06 18:01:02] [Rank 0] Group 6 FTA: 0.3698 +[2025-07-06 18:01:02] [Rank 0] Group 7 FTA: 0.3229 +[2025-07-06 18:01:02] [Rank 0] Group 7 FTA: 0.3229 +[2025-07-06 18:01:02] [Rank 0] Group 8 FTA: 0.3464 +[2025-07-06 18:01:02] [Rank 0] Group 8 FTA: 0.3464 +[2025-07-06 18:01:02] [Rank 0] Group 9 FTA: 0.3281 +[2025-07-06 18:01:02] [Rank 0] Group 9 FTA: 0.3281 +[2025-07-06 18:01:02] [Rank 0] Group 10 FTA: 0.3828 +[2025-07-06 18:01:02] [Rank 0] Group 10 FTA: 0.3828 +[2025-07-06 18:01:02] [Rank 0] Group 11 FTA: 0.3428 +[2025-07-06 18:01:02] [Rank 0] Group 11 FTA: 0.3428 +[2025-07-06 18:01:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 18:01:02] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 18:01:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 18:01:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 18:01:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 18:01:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 18:01:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 18:01:03] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 18:01:03] [Rank 0] step:5001/10000 train_time:341539ms step_avg:68.29ms +[2025-07-06 18:01:03] [Rank 0] step:5001/10000 train_time:341539ms step_avg:68.29ms +[2025-07-06 18:01:05] [Rank 0] step:5021/10000 train_time:342319ms step_avg:68.18ms +[2025-07-06 18:01:05] [Rank 0] step:5021/10000 train_time:342319ms step_avg:68.18ms +[2025-07-06 18:01:06] [Rank 0] step:5041/10000 train_time:343684ms step_avg:68.18ms +[2025-07-06 18:01:06] [Rank 0] step:5041/10000 train_time:343684ms step_avg:68.18ms +[2025-07-06 18:01:08] [Rank 0] step:5061/10000 train_time:345080ms step_avg:68.18ms +[2025-07-06 18:01:08] [Rank 0] step:5061/10000 train_time:345080ms step_avg:68.18ms +[2025-07-06 18:01:09] [Rank 0] step:5081/10000 train_time:346445ms step_avg:68.18ms +[2025-07-06 18:01:09] [Rank 0] step:5081/10000 train_time:346445ms step_avg:68.18ms +[2025-07-06 18:01:10] [Rank 0] step:5101/10000 train_time:347813ms step_avg:68.19ms +[2025-07-06 18:01:10] [Rank 0] step:5101/10000 train_time:347813ms step_avg:68.19ms +[2025-07-06 18:01:12] [Rank 0] step:5121/10000 train_time:349181ms step_avg:68.19ms +[2025-07-06 18:01:12] [Rank 0] step:5121/10000 train_time:349181ms step_avg:68.19ms +[2025-07-06 18:01:13] [Rank 0] step:5141/10000 train_time:350549ms step_avg:68.19ms +[2025-07-06 18:01:13] [Rank 0] step:5141/10000 train_time:350549ms step_avg:68.19ms +[2025-07-06 18:01:14] [Rank 0] step:5161/10000 train_time:351917ms step_avg:68.19ms +[2025-07-06 18:01:14] [Rank 0] step:5161/10000 train_time:351917ms step_avg:68.19ms +[2025-07-06 18:01:16] [Rank 0] step:5181/10000 train_time:353285ms step_avg:68.19ms +[2025-07-06 18:01:16] [Rank 0] step:5181/10000 train_time:353285ms step_avg:68.19ms +[2025-07-06 18:01:17] [Rank 0] step:5201/10000 train_time:354654ms step_avg:68.19ms +[2025-07-06 18:01:17] [Rank 0] step:5201/10000 train_time:354654ms step_avg:68.19ms +[2025-07-06 18:01:19] [Rank 0] step:5221/10000 train_time:356273ms step_avg:68.24ms +[2025-07-06 18:01:19] [Rank 0] step:5221/10000 train_time:356273ms step_avg:68.24ms +[2025-07-06 18:01:20] [Rank 0] step:5241/10000 train_time:357579ms step_avg:68.23ms +[2025-07-06 18:01:20] [Rank 0] step:5241/10000 train_time:357579ms step_avg:68.23ms +[2025-07-06 18:01:21] [Rank 0] step:5261/10000 train_time:358832ms step_avg:68.21ms +[2025-07-06 18:01:21] [Rank 0] step:5261/10000 train_time:358832ms step_avg:68.21ms +[2025-07-06 18:01:23] [Rank 0] step:5281/10000 train_time:360202ms step_avg:68.21ms +[2025-07-06 18:01:23] [Rank 0] step:5281/10000 train_time:360202ms step_avg:68.21ms +[2025-07-06 18:01:24] [Rank 0] step:5301/10000 train_time:361574ms step_avg:68.21ms +[2025-07-06 18:01:24] [Rank 0] step:5301/10000 train_time:361574ms step_avg:68.21ms +[2025-07-06 18:01:25] [Rank 0] step:5321/10000 train_time:362945ms step_avg:68.21ms +[2025-07-06 18:01:25] [Rank 0] step:5321/10000 train_time:362945ms step_avg:68.21ms +[2025-07-06 18:01:27] [Rank 0] step:5341/10000 train_time:364315ms step_avg:68.21ms +[2025-07-06 18:01:27] [Rank 0] step:5341/10000 train_time:364315ms step_avg:68.21ms +[2025-07-06 18:01:28] [Rank 0] step:5361/10000 train_time:365687ms step_avg:68.21ms +[2025-07-06 18:01:28] [Rank 0] step:5361/10000 train_time:365687ms step_avg:68.21ms +[2025-07-06 18:01:30] [Rank 0] step:5381/10000 train_time:367058ms step_avg:68.21ms +[2025-07-06 18:01:30] [Rank 0] step:5381/10000 train_time:367058ms step_avg:68.21ms +[2025-07-06 18:01:31] [Rank 0] step:5401/10000 train_time:368473ms step_avg:68.22ms +[2025-07-06 18:01:31] [Rank 0] step:5401/10000 train_time:368473ms step_avg:68.22ms +[2025-07-06 18:01:32] [Rank 0] step:5421/10000 train_time:369849ms step_avg:68.23ms +[2025-07-06 18:01:32] [Rank 0] step:5421/10000 train_time:369849ms step_avg:68.23ms +[2025-07-06 18:01:34] [Rank 0] step:5441/10000 train_time:371221ms step_avg:68.23ms +[2025-07-06 18:01:34] [Rank 0] step:5441/10000 train_time:371221ms step_avg:68.23ms +[2025-07-06 18:01:35] [Rank 0] step:5461/10000 train_time:372593ms step_avg:68.23ms +[2025-07-06 18:01:35] [Rank 0] step:5461/10000 train_time:372593ms step_avg:68.23ms +[2025-07-06 18:01:36] [Rank 0] step:5481/10000 train_time:373965ms step_avg:68.23ms +[2025-07-06 18:01:36] [Rank 0] step:5481/10000 train_time:373965ms step_avg:68.23ms +[2025-07-06 18:01:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:01:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:01:39] [Rank 0] PRINT: step:5500/10000 train_loss:0.9447 val_loss:1.1113 train_time:375960ms step_avg:68.36ms +[2025-07-06 18:01:39] [Rank 0] PRINT: step:5500/10000 train_loss:0.9447 val_loss:1.1113 train_time:375960ms step_avg:68.36ms +[2025-07-06 18:01:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:01:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:01:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:01:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:01:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:01:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:07:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:07:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:07:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:07:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:07:03] [Rank 0] Total Loss: 5.4444 +[2025-07-06 18:07:03] [Rank 0] Total Loss: 5.4444 +[2025-07-06 18:07:03] [Rank 0] Total FTA: 0.3900 +[2025-07-06 18:07:03] [Rank 0] Total FTA: 0.3900 +[2025-07-06 18:07:03] [Rank 0] Group 0 Loss: 5.7728 +[2025-07-06 18:07:03] [Rank 0] Group 0 Loss: 5.7728 +[2025-07-06 18:07:03] [Rank 0] Group 1 Loss: 5.2673 +[2025-07-06 18:07:03] [Rank 0] Group 1 Loss: 5.2673 +[2025-07-06 18:07:03] [Rank 0] Group 2 Loss: 5.0688 +[2025-07-06 18:07:03] [Rank 0] Group 2 Loss: 5.0688 +[2025-07-06 18:07:03] [Rank 0] Group 3 Loss: 5.3602 +[2025-07-06 18:07:03] [Rank 0] Group 3 Loss: 5.3602 +[2025-07-06 18:07:03] [Rank 0] Group 4 Loss: 5.3626 +[2025-07-06 18:07:03] [Rank 0] Group 4 Loss: 5.3626 +[2025-07-06 18:07:03] [Rank 0] Group 5 Loss: 5.4851 +[2025-07-06 18:07:03] [Rank 0] Group 5 Loss: 5.4851 +[2025-07-06 18:07:03] [Rank 0] Group 6 Loss: 5.3411 +[2025-07-06 18:07:03] [Rank 0] Group 6 Loss: 5.3411 +[2025-07-06 18:07:03] [Rank 0] Group 7 Loss: 5.5303 +[2025-07-06 18:07:03] [Rank 0] Group 7 Loss: 5.5303 +[2025-07-06 18:07:03] [Rank 0] Group 8 Loss: 5.4961 +[2025-07-06 18:07:03] [Rank 0] Group 8 Loss: 5.4961 +[2025-07-06 18:07:03] [Rank 0] Group 9 Loss: 5.4029 +[2025-07-06 18:07:03] [Rank 0] Group 9 Loss: 5.4029 +[2025-07-06 18:07:03] [Rank 0] Group 10 Loss: 5.4329 +[2025-07-06 18:07:03] [Rank 0] Group 10 Loss: 5.4329 +[2025-07-06 18:07:03] [Rank 0] Group 11 Loss: 5.4554 +[2025-07-06 18:07:03] [Rank 0] Group 11 Loss: 5.4554 +[2025-07-06 18:07:03] [Rank 0] Group 0 FTA: 0.5397 +[2025-07-06 18:07:03] [Rank 0] Group 0 FTA: 0.5397 +[2025-07-06 18:07:03] [Rank 0] Group 1 FTA: 0.2969 +[2025-07-06 18:07:03] [Rank 0] Group 1 FTA: 0.2969 +[2025-07-06 18:07:03] [Rank 0] Group 2 FTA: 0.5677 +[2025-07-06 18:07:03] [Rank 0] Group 2 FTA: 0.5677 +[2025-07-06 18:07:03] [Rank 0] Group 3 FTA: 0.3672 +[2025-07-06 18:07:03] [Rank 0] Group 3 FTA: 0.3672 +[2025-07-06 18:07:03] [Rank 0] Group 4 FTA: 0.2682 +[2025-07-06 18:07:03] [Rank 0] Group 4 FTA: 0.2682 +[2025-07-06 18:07:03] [Rank 0] Group 5 FTA: 0.3438 +[2025-07-06 18:07:03] [Rank 0] Group 5 FTA: 0.3438 +[2025-07-06 18:07:03] [Rank 0] Group 6 FTA: 0.3542 +[2025-07-06 18:07:03] [Rank 0] Group 6 FTA: 0.3542 +[2025-07-06 18:07:03] [Rank 0] Group 7 FTA: 0.3568 +[2025-07-06 18:07:03] [Rank 0] Group 7 FTA: 0.3568 +[2025-07-06 18:07:03] [Rank 0] Group 8 FTA: 0.3620 +[2025-07-06 18:07:03] [Rank 0] Group 8 FTA: 0.3620 +[2025-07-06 18:07:03] [Rank 0] Group 9 FTA: 0.3516 +[2025-07-06 18:07:03] [Rank 0] Group 9 FTA: 0.3516 +[2025-07-06 18:07:03] [Rank 0] Group 10 FTA: 0.3555 +[2025-07-06 18:07:03] [Rank 0] Group 10 FTA: 0.3555 +[2025-07-06 18:07:03] [Rank 0] Group 11 FTA: 0.3809 +[2025-07-06 18:07:03] [Rank 0] Group 11 FTA: 0.3809 +[2025-07-06 18:07:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 18:07:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 18:07:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 18:07:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 18:07:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 18:07:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 18:07:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 18:07:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 18:07:04] [Rank 0] step:5501/10000 train_time:375970ms step_avg:68.35ms +[2025-07-06 18:07:04] [Rank 0] step:5501/10000 train_time:375970ms step_avg:68.35ms +[2025-07-06 18:07:06] [Rank 0] step:5521/10000 train_time:376727ms step_avg:68.24ms +[2025-07-06 18:07:06] [Rank 0] step:5521/10000 train_time:376727ms step_avg:68.24ms +[2025-07-06 18:07:07] [Rank 0] step:5541/10000 train_time:378090ms step_avg:68.23ms +[2025-07-06 18:07:07] [Rank 0] step:5541/10000 train_time:378090ms step_avg:68.23ms +[2025-07-06 18:07:08] [Rank 0] step:5561/10000 train_time:379456ms step_avg:68.24ms +[2025-07-06 18:07:08] [Rank 0] step:5561/10000 train_time:379456ms step_avg:68.24ms +[2025-07-06 18:07:10] [Rank 0] step:5581/10000 train_time:380822ms step_avg:68.24ms +[2025-07-06 18:07:10] [Rank 0] step:5581/10000 train_time:380822ms step_avg:68.24ms +[2025-07-06 18:07:11] [Rank 0] step:5601/10000 train_time:382222ms step_avg:68.24ms +[2025-07-06 18:07:11] [Rank 0] step:5601/10000 train_time:382222ms step_avg:68.24ms +[2025-07-06 18:07:13] [Rank 0] step:5621/10000 train_time:383590ms step_avg:68.24ms +[2025-07-06 18:07:13] [Rank 0] step:5621/10000 train_time:383590ms step_avg:68.24ms +[2025-07-06 18:07:14] [Rank 0] step:5641/10000 train_time:384957ms step_avg:68.24ms +[2025-07-06 18:07:14] [Rank 0] step:5641/10000 train_time:384957ms step_avg:68.24ms +[2025-07-06 18:07:15] [Rank 0] step:5661/10000 train_time:386326ms step_avg:68.24ms +[2025-07-06 18:07:15] [Rank 0] step:5661/10000 train_time:386326ms step_avg:68.24ms +[2025-07-06 18:07:17] [Rank 0] step:5681/10000 train_time:387695ms step_avg:68.24ms +[2025-07-06 18:07:17] [Rank 0] step:5681/10000 train_time:387695ms step_avg:68.24ms +[2025-07-06 18:07:18] [Rank 0] step:5701/10000 train_time:389063ms step_avg:68.24ms +[2025-07-06 18:07:18] [Rank 0] step:5701/10000 train_time:389063ms step_avg:68.24ms +[2025-07-06 18:07:19] [Rank 0] step:5721/10000 train_time:390433ms step_avg:68.25ms +[2025-07-06 18:07:19] [Rank 0] step:5721/10000 train_time:390433ms step_avg:68.25ms +[2025-07-06 18:07:21] [Rank 0] step:5741/10000 train_time:391803ms step_avg:68.25ms +[2025-07-06 18:07:21] [Rank 0] step:5741/10000 train_time:391803ms step_avg:68.25ms +[2025-07-06 18:07:22] [Rank 0] step:5761/10000 train_time:393173ms step_avg:68.25ms +[2025-07-06 18:07:22] [Rank 0] step:5761/10000 train_time:393173ms step_avg:68.25ms +[2025-07-06 18:07:24] [Rank 0] step:5781/10000 train_time:394575ms step_avg:68.25ms +[2025-07-06 18:07:24] [Rank 0] step:5781/10000 train_time:394575ms step_avg:68.25ms +[2025-07-06 18:07:25] [Rank 0] step:5801/10000 train_time:395945ms step_avg:68.25ms +[2025-07-06 18:07:25] [Rank 0] step:5801/10000 train_time:395945ms step_avg:68.25ms +[2025-07-06 18:07:26] [Rank 0] step:5821/10000 train_time:397316ms step_avg:68.26ms +[2025-07-06 18:07:26] [Rank 0] step:5821/10000 train_time:397316ms step_avg:68.26ms +[2025-07-06 18:07:28] [Rank 0] step:5841/10000 train_time:398687ms step_avg:68.26ms +[2025-07-06 18:07:28] [Rank 0] step:5841/10000 train_time:398687ms step_avg:68.26ms +[2025-07-06 18:07:29] [Rank 0] step:5861/10000 train_time:400059ms step_avg:68.26ms +[2025-07-06 18:07:29] [Rank 0] step:5861/10000 train_time:400059ms step_avg:68.26ms +[2025-07-06 18:07:30] [Rank 0] step:5881/10000 train_time:401429ms step_avg:68.26ms +[2025-07-06 18:07:30] [Rank 0] step:5881/10000 train_time:401429ms step_avg:68.26ms +[2025-07-06 18:07:32] [Rank 0] step:5901/10000 train_time:402801ms step_avg:68.26ms +[2025-07-06 18:07:32] [Rank 0] step:5901/10000 train_time:402801ms step_avg:68.26ms +[2025-07-06 18:07:33] [Rank 0] step:5921/10000 train_time:404174ms step_avg:68.26ms +[2025-07-06 18:07:33] [Rank 0] step:5921/10000 train_time:404174ms step_avg:68.26ms +[2025-07-06 18:07:35] [Rank 0] step:5941/10000 train_time:405596ms step_avg:68.27ms +[2025-07-06 18:07:35] [Rank 0] step:5941/10000 train_time:405596ms step_avg:68.27ms +[2025-07-06 18:07:36] [Rank 0] step:5961/10000 train_time:406921ms step_avg:68.26ms +[2025-07-06 18:07:36] [Rank 0] step:5961/10000 train_time:406921ms step_avg:68.26ms +[2025-07-06 18:07:37] [Rank 0] step:5981/10000 train_time:408294ms step_avg:68.27ms +[2025-07-06 18:07:37] [Rank 0] step:5981/10000 train_time:408294ms step_avg:68.27ms +[2025-07-06 18:07:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:07:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:07:39] [Rank 0] PRINT: step:6000/10000 train_loss:0.8993 val_loss:1.0517 train_time:410296ms step_avg:68.38ms +[2025-07-06 18:07:39] [Rank 0] PRINT: step:6000/10000 train_loss:0.8993 val_loss:1.0517 train_time:410296ms step_avg:68.38ms +[2025-07-06 18:07:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:07:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:07:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:07:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:07:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:07:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:13:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:13:02] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:13:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:13:02] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:13:02] [Rank 0] Total Loss: 5.6152 +[2025-07-06 18:13:02] [Rank 0] Total Loss: 5.6152 +[2025-07-06 18:13:02] [Rank 0] Total FTA: 0.4165 +[2025-07-06 18:13:02] [Rank 0] Total FTA: 0.4165 +[2025-07-06 18:13:02] [Rank 0] Group 0 Loss: 5.9630 +[2025-07-06 18:13:02] [Rank 0] Group 0 Loss: 5.9630 +[2025-07-06 18:13:02] [Rank 0] Group 1 Loss: 5.3423 +[2025-07-06 18:13:02] [Rank 0] Group 1 Loss: 5.3423 +[2025-07-06 18:13:02] [Rank 0] Group 2 Loss: 5.3536 +[2025-07-06 18:13:02] [Rank 0] Group 2 Loss: 5.3536 +[2025-07-06 18:13:02] [Rank 0] Group 3 Loss: 5.6381 +[2025-07-06 18:13:02] [Rank 0] Group 3 Loss: 5.6381 +[2025-07-06 18:13:02] [Rank 0] Group 4 Loss: 5.6094 +[2025-07-06 18:13:02] [Rank 0] Group 4 Loss: 5.6094 +[2025-07-06 18:13:02] [Rank 0] Group 5 Loss: 5.5990 +[2025-07-06 18:13:02] [Rank 0] Group 5 Loss: 5.5990 +[2025-07-06 18:13:02] [Rank 0] Group 6 Loss: 5.4886 +[2025-07-06 18:13:02] [Rank 0] Group 6 Loss: 5.4886 +[2025-07-06 18:13:02] [Rank 0] Group 7 Loss: 5.6237 +[2025-07-06 18:13:02] [Rank 0] Group 7 Loss: 5.6237 +[2025-07-06 18:13:02] [Rank 0] Group 8 Loss: 5.6699 +[2025-07-06 18:13:02] [Rank 0] Group 8 Loss: 5.6699 +[2025-07-06 18:13:02] [Rank 0] Group 9 Loss: 5.5443 +[2025-07-06 18:13:02] [Rank 0] Group 9 Loss: 5.5443 +[2025-07-06 18:13:02] [Rank 0] Group 10 Loss: 5.5790 +[2025-07-06 18:13:02] [Rank 0] Group 10 Loss: 5.5790 +[2025-07-06 18:13:02] [Rank 0] Group 11 Loss: 5.6137 +[2025-07-06 18:13:02] [Rank 0] Group 11 Loss: 5.6137 +[2025-07-06 18:13:02] [Rank 0] Group 0 FTA: 0.3121 +[2025-07-06 18:13:02] [Rank 0] Group 0 FTA: 0.3121 +[2025-07-06 18:13:02] [Rank 0] Group 1 FTA: 0.4479 +[2025-07-06 18:13:02] [Rank 0] Group 1 FTA: 0.4479 +[2025-07-06 18:13:02] [Rank 0] Group 2 FTA: 0.4401 +[2025-07-06 18:13:02] [Rank 0] Group 2 FTA: 0.4401 +[2025-07-06 18:13:02] [Rank 0] Group 3 FTA: 0.5469 +[2025-07-06 18:13:02] [Rank 0] Group 3 FTA: 0.5469 +[2025-07-06 18:13:02] [Rank 0] Group 4 FTA: 0.3490 +[2025-07-06 18:13:02] [Rank 0] Group 4 FTA: 0.3490 +[2025-07-06 18:13:02] [Rank 0] Group 5 FTA: 0.4427 +[2025-07-06 18:13:02] [Rank 0] Group 5 FTA: 0.4427 +[2025-07-06 18:13:02] [Rank 0] Group 6 FTA: 0.3828 +[2025-07-06 18:13:02] [Rank 0] Group 6 FTA: 0.3828 +[2025-07-06 18:13:02] [Rank 0] Group 7 FTA: 0.4010 +[2025-07-06 18:13:02] [Rank 0] Group 7 FTA: 0.4010 +[2025-07-06 18:13:02] [Rank 0] Group 8 FTA: 0.4531 +[2025-07-06 18:13:02] [Rank 0] Group 8 FTA: 0.4531 +[2025-07-06 18:13:02] [Rank 0] Group 9 FTA: 0.4414 +[2025-07-06 18:13:02] [Rank 0] Group 9 FTA: 0.4414 +[2025-07-06 18:13:02] [Rank 0] Group 10 FTA: 0.4395 +[2025-07-06 18:13:02] [Rank 0] Group 10 FTA: 0.4395 +[2025-07-06 18:13:02] [Rank 0] Group 11 FTA: 0.4277 +[2025-07-06 18:13:02] [Rank 0] Group 11 FTA: 0.4277 +[2025-07-06 18:13:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 18:13:03] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 18:13:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 18:13:03] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 18:13:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 18:13:03] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 18:13:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 18:13:04] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 18:13:04] [Rank 0] step:6001/10000 train_time:410309ms step_avg:68.37ms +[2025-07-06 18:13:04] [Rank 0] step:6001/10000 train_time:410309ms step_avg:68.37ms +[2025-07-06 18:13:05] [Rank 0] step:6021/10000 train_time:411061ms step_avg:68.27ms +[2025-07-06 18:13:05] [Rank 0] step:6021/10000 train_time:411061ms step_avg:68.27ms +[2025-07-06 18:13:07] [Rank 0] step:6041/10000 train_time:412427ms step_avg:68.27ms +[2025-07-06 18:13:07] [Rank 0] step:6041/10000 train_time:412427ms step_avg:68.27ms +[2025-07-06 18:13:08] [Rank 0] step:6061/10000 train_time:413795ms step_avg:68.27ms +[2025-07-06 18:13:08] [Rank 0] step:6061/10000 train_time:413795ms step_avg:68.27ms +[2025-07-06 18:13:09] [Rank 0] step:6081/10000 train_time:415161ms step_avg:68.27ms +[2025-07-06 18:13:09] [Rank 0] step:6081/10000 train_time:415161ms step_avg:68.27ms +[2025-07-06 18:13:11] [Rank 0] step:6101/10000 train_time:416530ms step_avg:68.27ms +[2025-07-06 18:13:11] [Rank 0] step:6101/10000 train_time:416530ms step_avg:68.27ms +[2025-07-06 18:13:12] [Rank 0] step:6121/10000 train_time:418553ms step_avg:68.38ms +[2025-07-06 18:13:12] [Rank 0] step:6121/10000 train_time:418553ms step_avg:68.38ms +[2025-07-06 18:13:13] [Rank 0] step:6141/10000 train_time:419291ms step_avg:68.28ms +[2025-07-06 18:13:13] [Rank 0] step:6141/10000 train_time:419291ms step_avg:68.28ms +[2025-07-06 18:13:15] [Rank 0] step:6161/10000 train_time:420660ms step_avg:68.28ms +[2025-07-06 18:13:15] [Rank 0] step:6161/10000 train_time:420660ms step_avg:68.28ms +[2025-07-06 18:13:16] [Rank 0] step:6181/10000 train_time:422029ms step_avg:68.28ms +[2025-07-06 18:13:16] [Rank 0] step:6181/10000 train_time:422029ms step_avg:68.28ms +[2025-07-06 18:13:18] [Rank 0] step:6201/10000 train_time:423399ms step_avg:68.28ms +[2025-07-06 18:13:18] [Rank 0] step:6201/10000 train_time:423399ms step_avg:68.28ms +[2025-07-06 18:13:19] [Rank 0] step:6221/10000 train_time:424770ms step_avg:68.28ms +[2025-07-06 18:13:19] [Rank 0] step:6221/10000 train_time:424770ms step_avg:68.28ms +[2025-07-06 18:13:20] [Rank 0] step:6241/10000 train_time:426141ms step_avg:68.28ms +[2025-07-06 18:13:20] [Rank 0] step:6241/10000 train_time:426141ms step_avg:68.28ms +[2025-07-06 18:13:22] [Rank 0] step:6261/10000 train_time:427511ms step_avg:68.28ms +[2025-07-06 18:13:22] [Rank 0] step:6261/10000 train_time:427511ms step_avg:68.28ms +[2025-07-06 18:13:23] [Rank 0] step:6281/10000 train_time:428882ms step_avg:68.28ms +[2025-07-06 18:13:23] [Rank 0] step:6281/10000 train_time:428882ms step_avg:68.28ms +[2025-07-06 18:13:24] [Rank 0] step:6301/10000 train_time:430937ms step_avg:68.39ms +[2025-07-06 18:13:24] [Rank 0] step:6301/10000 train_time:430937ms step_avg:68.39ms +[2025-07-06 18:13:26] [Rank 0] step:6321/10000 train_time:431675ms step_avg:68.29ms +[2025-07-06 18:13:26] [Rank 0] step:6321/10000 train_time:431675ms step_avg:68.29ms +[2025-07-06 18:13:27] [Rank 0] step:6341/10000 train_time:433045ms step_avg:68.29ms +[2025-07-06 18:13:27] [Rank 0] step:6341/10000 train_time:433045ms step_avg:68.29ms +[2025-07-06 18:13:29] [Rank 0] step:6361/10000 train_time:434417ms step_avg:68.29ms +[2025-07-06 18:13:29] [Rank 0] step:6361/10000 train_time:434417ms step_avg:68.29ms +[2025-07-06 18:13:30] [Rank 0] step:6381/10000 train_time:435789ms step_avg:68.29ms +[2025-07-06 18:13:30] [Rank 0] step:6381/10000 train_time:435789ms step_avg:68.29ms +[2025-07-06 18:13:31] [Rank 0] step:6401/10000 train_time:437159ms step_avg:68.30ms +[2025-07-06 18:13:31] [Rank 0] step:6401/10000 train_time:437159ms step_avg:68.30ms +[2025-07-06 18:13:33] [Rank 0] step:6421/10000 train_time:438532ms step_avg:68.30ms +[2025-07-06 18:13:33] [Rank 0] step:6421/10000 train_time:438532ms step_avg:68.30ms +[2025-07-06 18:13:34] [Rank 0] step:6441/10000 train_time:439903ms step_avg:68.30ms +[2025-07-06 18:13:34] [Rank 0] step:6441/10000 train_time:439903ms step_avg:68.30ms +[2025-07-06 18:13:35] [Rank 0] step:6461/10000 train_time:441277ms step_avg:68.30ms +[2025-07-06 18:13:35] [Rank 0] step:6461/10000 train_time:441277ms step_avg:68.30ms +[2025-07-06 18:13:37] [Rank 0] step:6481/10000 train_time:443321ms step_avg:68.40ms +[2025-07-06 18:13:37] [Rank 0] step:6481/10000 train_time:443321ms step_avg:68.40ms +[2025-07-06 18:13:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:13:38] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:13:39] [Rank 0] PRINT: step:6500/10000 train_loss:0.8713 val_loss:1.0498 train_time:444685ms step_avg:68.41ms +[2025-07-06 18:13:39] [Rank 0] PRINT: step:6500/10000 train_loss:0.8713 val_loss:1.0498 train_time:444685ms step_avg:68.41ms +[2025-07-06 18:13:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:13:39] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:13:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:13:39] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:13:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:13:39] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:19:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:19:03] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:19:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:19:03] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:19:03] [Rank 0] Total Loss: 5.5241 +[2025-07-06 18:19:03] [Rank 0] Total Loss: 5.5241 +[2025-07-06 18:19:03] [Rank 0] Total FTA: 0.5205 +[2025-07-06 18:19:03] [Rank 0] Total FTA: 0.5205 +[2025-07-06 18:19:03] [Rank 0] Group 0 Loss: 5.7797 +[2025-07-06 18:19:03] [Rank 0] Group 0 Loss: 5.7797 +[2025-07-06 18:19:03] [Rank 0] Group 1 Loss: 5.1098 +[2025-07-06 18:19:03] [Rank 0] Group 1 Loss: 5.1098 +[2025-07-06 18:19:03] [Rank 0] Group 2 Loss: 5.2523 +[2025-07-06 18:19:03] [Rank 0] Group 2 Loss: 5.2523 +[2025-07-06 18:19:03] [Rank 0] Group 3 Loss: 5.5202 +[2025-07-06 18:19:03] [Rank 0] Group 3 Loss: 5.5202 +[2025-07-06 18:19:03] [Rank 0] Group 4 Loss: 5.5545 +[2025-07-06 18:19:03] [Rank 0] Group 4 Loss: 5.5545 +[2025-07-06 18:19:03] [Rank 0] Group 5 Loss: 5.5732 +[2025-07-06 18:19:03] [Rank 0] Group 5 Loss: 5.5732 +[2025-07-06 18:19:03] [Rank 0] Group 6 Loss: 5.5347 +[2025-07-06 18:19:03] [Rank 0] Group 6 Loss: 5.5347 +[2025-07-06 18:19:03] [Rank 0] Group 7 Loss: 5.5235 +[2025-07-06 18:19:03] [Rank 0] Group 7 Loss: 5.5235 +[2025-07-06 18:19:03] [Rank 0] Group 8 Loss: 5.5261 +[2025-07-06 18:19:03] [Rank 0] Group 8 Loss: 5.5261 +[2025-07-06 18:19:03] [Rank 0] Group 9 Loss: 5.5277 +[2025-07-06 18:19:03] [Rank 0] Group 9 Loss: 5.5277 +[2025-07-06 18:19:03] [Rank 0] Group 10 Loss: 5.5424 +[2025-07-06 18:19:03] [Rank 0] Group 10 Loss: 5.5424 +[2025-07-06 18:19:03] [Rank 0] Group 11 Loss: 5.5466 +[2025-07-06 18:19:03] [Rank 0] Group 11 Loss: 5.5466 +[2025-07-06 18:19:03] [Rank 0] Group 0 FTA: 0.3277 +[2025-07-06 18:19:03] [Rank 0] Group 0 FTA: 0.3277 +[2025-07-06 18:19:03] [Rank 0] Group 1 FTA: 0.5078 +[2025-07-06 18:19:03] [Rank 0] Group 1 FTA: 0.5078 +[2025-07-06 18:19:03] [Rank 0] Group 2 FTA: 0.5833 +[2025-07-06 18:19:03] [Rank 0] Group 2 FTA: 0.5833 +[2025-07-06 18:19:03] [Rank 0] Group 3 FTA: 0.5234 +[2025-07-06 18:19:03] [Rank 0] Group 3 FTA: 0.5234 +[2025-07-06 18:19:03] [Rank 0] Group 4 FTA: 0.5130 +[2025-07-06 18:19:03] [Rank 0] Group 4 FTA: 0.5130 +[2025-07-06 18:19:03] [Rank 0] Group 5 FTA: 0.5547 +[2025-07-06 18:19:03] [Rank 0] Group 5 FTA: 0.5547 +[2025-07-06 18:19:03] [Rank 0] Group 6 FTA: 0.5625 +[2025-07-06 18:19:03] [Rank 0] Group 6 FTA: 0.5625 +[2025-07-06 18:19:03] [Rank 0] Group 7 FTA: 0.5469 +[2025-07-06 18:19:03] [Rank 0] Group 7 FTA: 0.5469 +[2025-07-06 18:19:03] [Rank 0] Group 8 FTA: 0.5208 +[2025-07-06 18:19:03] [Rank 0] Group 8 FTA: 0.5208 +[2025-07-06 18:19:03] [Rank 0] Group 9 FTA: 0.6172 +[2025-07-06 18:19:03] [Rank 0] Group 9 FTA: 0.6172 +[2025-07-06 18:19:03] [Rank 0] Group 10 FTA: 0.5371 +[2025-07-06 18:19:03] [Rank 0] Group 10 FTA: 0.5371 +[2025-07-06 18:19:03] [Rank 0] Group 11 FTA: 0.5771 +[2025-07-06 18:19:03] [Rank 0] Group 11 FTA: 0.5771 +[2025-07-06 18:19:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 18:19:04] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 18:19:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 18:19:04] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 18:19:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 18:19:04] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 18:19:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 18:19:05] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 18:19:05] [Rank 0] step:6501/10000 train_time:444697ms step_avg:68.40ms +[2025-07-06 18:19:05] [Rank 0] step:6501/10000 train_time:444697ms step_avg:68.40ms +[2025-07-06 18:19:06] [Rank 0] step:6521/10000 train_time:445465ms step_avg:68.31ms +[2025-07-06 18:19:06] [Rank 0] step:6521/10000 train_time:445465ms step_avg:68.31ms +[2025-07-06 18:19:07] [Rank 0] step:6541/10000 train_time:446830ms step_avg:68.31ms +[2025-07-06 18:19:07] [Rank 0] step:6541/10000 train_time:446830ms step_avg:68.31ms +[2025-07-06 18:19:09] [Rank 0] step:6561/10000 train_time:448195ms step_avg:68.31ms +[2025-07-06 18:19:09] [Rank 0] step:6561/10000 train_time:448195ms step_avg:68.31ms +[2025-07-06 18:19:10] [Rank 0] step:6581/10000 train_time:449561ms step_avg:68.31ms +[2025-07-06 18:19:10] [Rank 0] step:6581/10000 train_time:449561ms step_avg:68.31ms +[2025-07-06 18:19:12] [Rank 0] step:6601/10000 train_time:450929ms step_avg:68.31ms +[2025-07-06 18:19:12] [Rank 0] step:6601/10000 train_time:450929ms step_avg:68.31ms +[2025-07-06 18:19:13] [Rank 0] step:6621/10000 train_time:452296ms step_avg:68.31ms +[2025-07-06 18:19:13] [Rank 0] step:6621/10000 train_time:452296ms step_avg:68.31ms +[2025-07-06 18:19:14] [Rank 0] step:6641/10000 train_time:453665ms step_avg:68.31ms +[2025-07-06 18:19:14] [Rank 0] step:6641/10000 train_time:453665ms step_avg:68.31ms +[2025-07-06 18:19:16] [Rank 0] step:6661/10000 train_time:455144ms step_avg:68.33ms +[2025-07-06 18:19:16] [Rank 0] step:6661/10000 train_time:455144ms step_avg:68.33ms +[2025-07-06 18:19:17] [Rank 0] step:6681/10000 train_time:456464ms step_avg:68.32ms +[2025-07-06 18:19:17] [Rank 0] step:6681/10000 train_time:456464ms step_avg:68.32ms +[2025-07-06 18:19:18] [Rank 0] step:6701/10000 train_time:457835ms step_avg:68.32ms +[2025-07-06 18:19:18] [Rank 0] step:6701/10000 train_time:457835ms step_avg:68.32ms +[2025-07-06 18:19:20] [Rank 0] step:6721/10000 train_time:459206ms step_avg:68.32ms +[2025-07-06 18:19:20] [Rank 0] step:6721/10000 train_time:459206ms step_avg:68.32ms +[2025-07-06 18:19:21] [Rank 0] step:6741/10000 train_time:460576ms step_avg:68.32ms +[2025-07-06 18:19:21] [Rank 0] step:6741/10000 train_time:460576ms step_avg:68.32ms +[2025-07-06 18:19:23] [Rank 0] step:6761/10000 train_time:461945ms step_avg:68.32ms +[2025-07-06 18:19:23] [Rank 0] step:6761/10000 train_time:461945ms step_avg:68.32ms +[2025-07-06 18:19:24] [Rank 0] step:6781/10000 train_time:463316ms step_avg:68.33ms +[2025-07-06 18:19:24] [Rank 0] step:6781/10000 train_time:463316ms step_avg:68.33ms +[2025-07-06 18:19:25] [Rank 0] step:6801/10000 train_time:464687ms step_avg:68.33ms +[2025-07-06 18:19:25] [Rank 0] step:6801/10000 train_time:464687ms step_avg:68.33ms +[2025-07-06 18:19:27] [Rank 0] step:6821/10000 train_time:466057ms step_avg:68.33ms +[2025-07-06 18:19:27] [Rank 0] step:6821/10000 train_time:466057ms step_avg:68.33ms +[2025-07-06 18:19:28] [Rank 0] step:6841/10000 train_time:467675ms step_avg:68.36ms +[2025-07-06 18:19:28] [Rank 0] step:6841/10000 train_time:467675ms step_avg:68.36ms +[2025-07-06 18:19:29] [Rank 0] step:6861/10000 train_time:468823ms step_avg:68.33ms +[2025-07-06 18:19:29] [Rank 0] step:6861/10000 train_time:468823ms step_avg:68.33ms +[2025-07-06 18:19:31] [Rank 0] step:6881/10000 train_time:470194ms step_avg:68.33ms +[2025-07-06 18:19:31] [Rank 0] step:6881/10000 train_time:470194ms step_avg:68.33ms +[2025-07-06 18:19:32] [Rank 0] step:6901/10000 train_time:471565ms step_avg:68.33ms +[2025-07-06 18:19:32] [Rank 0] step:6901/10000 train_time:471565ms step_avg:68.33ms +[2025-07-06 18:19:34] [Rank 0] step:6921/10000 train_time:472937ms step_avg:68.33ms +[2025-07-06 18:19:34] [Rank 0] step:6921/10000 train_time:472937ms step_avg:68.33ms +[2025-07-06 18:19:35] [Rank 0] step:6941/10000 train_time:474309ms step_avg:68.33ms +[2025-07-06 18:19:35] [Rank 0] step:6941/10000 train_time:474309ms step_avg:68.33ms +[2025-07-06 18:19:36] [Rank 0] step:6961/10000 train_time:475681ms step_avg:68.34ms +[2025-07-06 18:19:36] [Rank 0] step:6961/10000 train_time:475681ms step_avg:68.34ms +[2025-07-06 18:19:38] [Rank 0] step:6981/10000 train_time:477054ms step_avg:68.34ms +[2025-07-06 18:19:38] [Rank 0] step:6981/10000 train_time:477054ms step_avg:68.34ms +[2025-07-06 18:19:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:19:39] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:19:40] [Rank 0] PRINT: step:7000/10000 train_loss:0.8562 val_loss:1.0157 train_time:479050ms step_avg:68.44ms +[2025-07-06 18:19:40] [Rank 0] PRINT: step:7000/10000 train_loss:0.8562 val_loss:1.0157 train_time:479050ms step_avg:68.44ms +[2025-07-06 18:19:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:19:40] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:19:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:19:40] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:19:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:19:40] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:25:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:25:04] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:25:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:25:04] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:25:04] [Rank 0] Total Loss: 5.5058 +[2025-07-06 18:25:04] [Rank 0] Total Loss: 5.5058 +[2025-07-06 18:25:04] [Rank 0] Total FTA: 0.5329 +[2025-07-06 18:25:04] [Rank 0] Total FTA: 0.5329 +[2025-07-06 18:25:04] [Rank 0] Group 0 Loss: 5.9320 +[2025-07-06 18:25:04] [Rank 0] Group 0 Loss: 5.9320 +[2025-07-06 18:25:04] [Rank 0] Group 1 Loss: 5.2741 +[2025-07-06 18:25:04] [Rank 0] Group 1 Loss: 5.2741 +[2025-07-06 18:25:04] [Rank 0] Group 2 Loss: 5.0092 +[2025-07-06 18:25:04] [Rank 0] Group 2 Loss: 5.0092 +[2025-07-06 18:25:04] [Rank 0] Group 3 Loss: 5.5352 +[2025-07-06 18:25:04] [Rank 0] Group 3 Loss: 5.5352 +[2025-07-06 18:25:04] [Rank 0] Group 4 Loss: 5.5928 +[2025-07-06 18:25:04] [Rank 0] Group 4 Loss: 5.5928 +[2025-07-06 18:25:04] [Rank 0] Group 5 Loss: 5.4280 +[2025-07-06 18:25:04] [Rank 0] Group 5 Loss: 5.4280 +[2025-07-06 18:25:04] [Rank 0] Group 6 Loss: 5.3399 +[2025-07-06 18:25:04] [Rank 0] Group 6 Loss: 5.3399 +[2025-07-06 18:25:04] [Rank 0] Group 7 Loss: 5.5649 +[2025-07-06 18:25:04] [Rank 0] Group 7 Loss: 5.5649 +[2025-07-06 18:25:04] [Rank 0] Group 8 Loss: 5.4559 +[2025-07-06 18:25:04] [Rank 0] Group 8 Loss: 5.4559 +[2025-07-06 18:25:04] [Rank 0] Group 9 Loss: 5.4919 +[2025-07-06 18:25:04] [Rank 0] Group 9 Loss: 5.4919 +[2025-07-06 18:25:04] [Rank 0] Group 10 Loss: 5.5200 +[2025-07-06 18:25:04] [Rank 0] Group 10 Loss: 5.5200 +[2025-07-06 18:25:04] [Rank 0] Group 11 Loss: 5.4996 +[2025-07-06 18:25:04] [Rank 0] Group 11 Loss: 5.4996 +[2025-07-06 18:25:04] [Rank 0] Group 0 FTA: 0.4967 +[2025-07-06 18:25:04] [Rank 0] Group 0 FTA: 0.4967 +[2025-07-06 18:25:04] [Rank 0] Group 1 FTA: 0.6823 +[2025-07-06 18:25:04] [Rank 0] Group 1 FTA: 0.6823 +[2025-07-06 18:25:04] [Rank 0] Group 2 FTA: 0.5495 +[2025-07-06 18:25:04] [Rank 0] Group 2 FTA: 0.5495 +[2025-07-06 18:25:04] [Rank 0] Group 3 FTA: 0.4870 +[2025-07-06 18:25:04] [Rank 0] Group 3 FTA: 0.4870 +[2025-07-06 18:25:04] [Rank 0] Group 4 FTA: 0.4115 +[2025-07-06 18:25:04] [Rank 0] Group 4 FTA: 0.4115 +[2025-07-06 18:25:04] [Rank 0] Group 5 FTA: 0.4714 +[2025-07-06 18:25:04] [Rank 0] Group 5 FTA: 0.4714 +[2025-07-06 18:25:04] [Rank 0] Group 6 FTA: 0.5026 +[2025-07-06 18:25:04] [Rank 0] Group 6 FTA: 0.5026 +[2025-07-06 18:25:04] [Rank 0] Group 7 FTA: 0.5391 +[2025-07-06 18:25:04] [Rank 0] Group 7 FTA: 0.5391 +[2025-07-06 18:25:04] [Rank 0] Group 8 FTA: 0.5521 +[2025-07-06 18:25:04] [Rank 0] Group 8 FTA: 0.5521 +[2025-07-06 18:25:04] [Rank 0] Group 9 FTA: 0.5430 +[2025-07-06 18:25:04] [Rank 0] Group 9 FTA: 0.5430 +[2025-07-06 18:25:04] [Rank 0] Group 10 FTA: 0.5625 +[2025-07-06 18:25:04] [Rank 0] Group 10 FTA: 0.5625 +[2025-07-06 18:25:04] [Rank 0] Group 11 FTA: 0.5684 +[2025-07-06 18:25:04] [Rank 0] Group 11 FTA: 0.5684 +[2025-07-06 18:25:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 18:25:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 18:25:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 18:25:05] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 18:25:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 18:25:05] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 18:25:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 18:25:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 18:25:06] [Rank 0] step:7001/10000 train_time:479061ms step_avg:68.43ms +[2025-07-06 18:25:06] [Rank 0] step:7001/10000 train_time:479061ms step_avg:68.43ms +[2025-07-06 18:25:07] [Rank 0] step:7021/10000 train_time:479891ms step_avg:68.35ms +[2025-07-06 18:25:07] [Rank 0] step:7021/10000 train_time:479891ms step_avg:68.35ms +[2025-07-06 18:25:09] [Rank 0] step:7041/10000 train_time:481252ms step_avg:68.35ms +[2025-07-06 18:25:09] [Rank 0] step:7041/10000 train_time:481252ms step_avg:68.35ms +[2025-07-06 18:25:10] [Rank 0] step:7061/10000 train_time:482618ms step_avg:68.35ms +[2025-07-06 18:25:10] [Rank 0] step:7061/10000 train_time:482618ms step_avg:68.35ms +[2025-07-06 18:25:11] [Rank 0] step:7081/10000 train_time:483985ms step_avg:68.35ms +[2025-07-06 18:25:11] [Rank 0] step:7081/10000 train_time:483985ms step_avg:68.35ms +[2025-07-06 18:25:13] [Rank 0] step:7101/10000 train_time:485354ms step_avg:68.35ms +[2025-07-06 18:25:13] [Rank 0] step:7101/10000 train_time:485354ms step_avg:68.35ms +[2025-07-06 18:25:14] [Rank 0] step:7121/10000 train_time:486722ms step_avg:68.35ms +[2025-07-06 18:25:14] [Rank 0] step:7121/10000 train_time:486722ms step_avg:68.35ms +[2025-07-06 18:25:15] [Rank 0] step:7141/10000 train_time:488092ms step_avg:68.35ms +[2025-07-06 18:25:15] [Rank 0] step:7141/10000 train_time:488092ms step_avg:68.35ms +[2025-07-06 18:25:17] [Rank 0] step:7161/10000 train_time:489460ms step_avg:68.35ms +[2025-07-06 18:25:17] [Rank 0] step:7161/10000 train_time:489460ms step_avg:68.35ms +[2025-07-06 18:25:18] [Rank 0] step:7181/10000 train_time:490830ms step_avg:68.35ms +[2025-07-06 18:25:18] [Rank 0] step:7181/10000 train_time:490830ms step_avg:68.35ms +[2025-07-06 18:25:20] [Rank 0] step:7201/10000 train_time:492201ms step_avg:68.35ms +[2025-07-06 18:25:20] [Rank 0] step:7201/10000 train_time:492201ms step_avg:68.35ms +[2025-07-06 18:25:21] [Rank 0] step:7221/10000 train_time:493607ms step_avg:68.36ms +[2025-07-06 18:25:21] [Rank 0] step:7221/10000 train_time:493607ms step_avg:68.36ms +[2025-07-06 18:25:22] [Rank 0] step:7241/10000 train_time:494977ms step_avg:68.36ms +[2025-07-06 18:25:22] [Rank 0] step:7241/10000 train_time:494977ms step_avg:68.36ms +[2025-07-06 18:25:24] [Rank 0] step:7261/10000 train_time:496348ms step_avg:68.36ms +[2025-07-06 18:25:24] [Rank 0] step:7261/10000 train_time:496348ms step_avg:68.36ms +[2025-07-06 18:25:25] [Rank 0] step:7281/10000 train_time:497720ms step_avg:68.36ms +[2025-07-06 18:25:25] [Rank 0] step:7281/10000 train_time:497720ms step_avg:68.36ms +[2025-07-06 18:25:26] [Rank 0] step:7301/10000 train_time:499092ms step_avg:68.36ms +[2025-07-06 18:25:26] [Rank 0] step:7301/10000 train_time:499092ms step_avg:68.36ms +[2025-07-06 18:25:28] [Rank 0] step:7321/10000 train_time:500463ms step_avg:68.36ms +[2025-07-06 18:25:28] [Rank 0] step:7321/10000 train_time:500463ms step_avg:68.36ms +[2025-07-06 18:25:29] [Rank 0] step:7341/10000 train_time:501838ms step_avg:68.36ms +[2025-07-06 18:25:29] [Rank 0] step:7341/10000 train_time:501838ms step_avg:68.36ms +[2025-07-06 18:25:30] [Rank 0] step:7361/10000 train_time:503210ms step_avg:68.36ms +[2025-07-06 18:25:30] [Rank 0] step:7361/10000 train_time:503210ms step_avg:68.36ms +[2025-07-06 18:25:32] [Rank 0] step:7381/10000 train_time:504841ms step_avg:68.40ms +[2025-07-06 18:25:32] [Rank 0] step:7381/10000 train_time:504841ms step_avg:68.40ms +[2025-07-06 18:25:33] [Rank 0] step:7401/10000 train_time:505956ms step_avg:68.36ms +[2025-07-06 18:25:33] [Rank 0] step:7401/10000 train_time:505956ms step_avg:68.36ms +[2025-07-06 18:25:35] [Rank 0] step:7421/10000 train_time:507328ms step_avg:68.36ms +[2025-07-06 18:25:35] [Rank 0] step:7421/10000 train_time:507328ms step_avg:68.36ms +[2025-07-06 18:25:36] [Rank 0] step:7441/10000 train_time:508702ms step_avg:68.36ms +[2025-07-06 18:25:36] [Rank 0] step:7441/10000 train_time:508702ms step_avg:68.36ms +[2025-07-06 18:25:37] [Rank 0] step:7461/10000 train_time:510076ms step_avg:68.37ms +[2025-07-06 18:25:37] [Rank 0] step:7461/10000 train_time:510076ms step_avg:68.37ms +[2025-07-06 18:25:39] [Rank 0] step:7481/10000 train_time:511448ms step_avg:68.37ms +[2025-07-06 18:25:39] [Rank 0] step:7481/10000 train_time:511448ms step_avg:68.37ms +[2025-07-06 18:25:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:25:40] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:25:41] [Rank 0] PRINT: step:7500/10000 train_loss:0.8472 val_loss:0.9780 train_time:513446ms step_avg:68.46ms +[2025-07-06 18:25:41] [Rank 0] PRINT: step:7500/10000 train_loss:0.8472 val_loss:0.9780 train_time:513446ms step_avg:68.46ms +[2025-07-06 18:25:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:25:41] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:25:41] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:25:41] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:25:41] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:25:41] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:31:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:31:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:31:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:31:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:31:05] [Rank 0] Total Loss: 5.5127 +[2025-07-06 18:31:05] [Rank 0] Total Loss: 5.5127 +[2025-07-06 18:31:05] [Rank 0] Total FTA: 0.6309 +[2025-07-06 18:31:05] [Rank 0] Total FTA: 0.6309 +[2025-07-06 18:31:05] [Rank 0] Group 0 Loss: 5.8751 +[2025-07-06 18:31:05] [Rank 0] Group 0 Loss: 5.8751 +[2025-07-06 18:31:05] [Rank 0] Group 1 Loss: 5.2752 +[2025-07-06 18:31:05] [Rank 0] Group 1 Loss: 5.2752 +[2025-07-06 18:31:05] [Rank 0] Group 2 Loss: 5.2509 +[2025-07-06 18:31:05] [Rank 0] Group 2 Loss: 5.2509 +[2025-07-06 18:31:05] [Rank 0] Group 3 Loss: 5.6718 +[2025-07-06 18:31:05] [Rank 0] Group 3 Loss: 5.6718 +[2025-07-06 18:31:05] [Rank 0] Group 4 Loss: 5.4399 +[2025-07-06 18:31:05] [Rank 0] Group 4 Loss: 5.4399 +[2025-07-06 18:31:05] [Rank 0] Group 5 Loss: 5.4862 +[2025-07-06 18:31:05] [Rank 0] Group 5 Loss: 5.4862 +[2025-07-06 18:31:05] [Rank 0] Group 6 Loss: 5.4257 +[2025-07-06 18:31:05] [Rank 0] Group 6 Loss: 5.4257 +[2025-07-06 18:31:05] [Rank 0] Group 7 Loss: 5.4528 +[2025-07-06 18:31:05] [Rank 0] Group 7 Loss: 5.4528 +[2025-07-06 18:31:05] [Rank 0] Group 8 Loss: 5.4798 +[2025-07-06 18:31:05] [Rank 0] Group 8 Loss: 5.4798 +[2025-07-06 18:31:05] [Rank 0] Group 9 Loss: 5.4764 +[2025-07-06 18:31:05] [Rank 0] Group 9 Loss: 5.4764 +[2025-07-06 18:31:05] [Rank 0] Group 10 Loss: 5.5251 +[2025-07-06 18:31:05] [Rank 0] Group 10 Loss: 5.5251 +[2025-07-06 18:31:05] [Rank 0] Group 11 Loss: 5.4756 +[2025-07-06 18:31:05] [Rank 0] Group 11 Loss: 5.4756 +[2025-07-06 18:31:05] [Rank 0] Group 0 FTA: 0.6541 +[2025-07-06 18:31:05] [Rank 0] Group 0 FTA: 0.6541 +[2025-07-06 18:31:05] [Rank 0] Group 1 FTA: 0.6536 +[2025-07-06 18:31:05] [Rank 0] Group 1 FTA: 0.6536 +[2025-07-06 18:31:05] [Rank 0] Group 2 FTA: 0.5365 +[2025-07-06 18:31:05] [Rank 0] Group 2 FTA: 0.5365 +[2025-07-06 18:31:05] [Rank 0] Group 3 FTA: 0.5078 +[2025-07-06 18:31:05] [Rank 0] Group 3 FTA: 0.5078 +[2025-07-06 18:31:05] [Rank 0] Group 4 FTA: 0.6120 +[2025-07-06 18:31:05] [Rank 0] Group 4 FTA: 0.6120 +[2025-07-06 18:31:05] [Rank 0] Group 5 FTA: 0.7031 +[2025-07-06 18:31:05] [Rank 0] Group 5 FTA: 0.7031 +[2025-07-06 18:31:05] [Rank 0] Group 6 FTA: 0.6849 +[2025-07-06 18:31:05] [Rank 0] Group 6 FTA: 0.6849 +[2025-07-06 18:31:05] [Rank 0] Group 7 FTA: 0.5521 +[2025-07-06 18:31:05] [Rank 0] Group 7 FTA: 0.5521 +[2025-07-06 18:31:05] [Rank 0] Group 8 FTA: 0.6380 +[2025-07-06 18:31:05] [Rank 0] Group 8 FTA: 0.6380 +[2025-07-06 18:31:05] [Rank 0] Group 9 FTA: 0.6641 +[2025-07-06 18:31:05] [Rank 0] Group 9 FTA: 0.6641 +[2025-07-06 18:31:05] [Rank 0] Group 10 FTA: 0.6777 +[2025-07-06 18:31:05] [Rank 0] Group 10 FTA: 0.6777 +[2025-07-06 18:31:05] [Rank 0] Group 11 FTA: 0.6416 +[2025-07-06 18:31:05] [Rank 0] Group 11 FTA: 0.6416 +[2025-07-06 18:31:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 18:31:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 18:31:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 18:31:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 18:31:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 18:31:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 18:31:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 18:31:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 18:31:07] [Rank 0] step:7501/10000 train_time:513455ms step_avg:68.45ms +[2025-07-06 18:31:07] [Rank 0] step:7501/10000 train_time:513455ms step_avg:68.45ms +[2025-07-06 18:31:08] [Rank 0] step:7521/10000 train_time:514234ms step_avg:68.37ms +[2025-07-06 18:31:08] [Rank 0] step:7521/10000 train_time:514234ms step_avg:68.37ms +[2025-07-06 18:31:10] [Rank 0] step:7541/10000 train_time:515599ms step_avg:68.37ms +[2025-07-06 18:31:10] [Rank 0] step:7541/10000 train_time:515599ms step_avg:68.37ms +[2025-07-06 18:31:11] [Rank 0] step:7561/10000 train_time:517015ms step_avg:68.38ms +[2025-07-06 18:31:11] [Rank 0] step:7561/10000 train_time:517015ms step_avg:68.38ms +[2025-07-06 18:31:12] [Rank 0] step:7581/10000 train_time:518375ms step_avg:68.38ms +[2025-07-06 18:31:12] [Rank 0] step:7581/10000 train_time:518375ms step_avg:68.38ms +[2025-07-06 18:31:14] [Rank 0] step:7601/10000 train_time:519742ms step_avg:68.38ms +[2025-07-06 18:31:14] [Rank 0] step:7601/10000 train_time:519742ms step_avg:68.38ms +[2025-07-06 18:31:15] [Rank 0] step:7621/10000 train_time:521112ms step_avg:68.38ms +[2025-07-06 18:31:15] [Rank 0] step:7621/10000 train_time:521112ms step_avg:68.38ms +[2025-07-06 18:31:16] [Rank 0] step:7641/10000 train_time:522480ms step_avg:68.38ms +[2025-07-06 18:31:16] [Rank 0] step:7641/10000 train_time:522480ms step_avg:68.38ms +[2025-07-06 18:31:18] [Rank 0] step:7661/10000 train_time:523850ms step_avg:68.38ms +[2025-07-06 18:31:18] [Rank 0] step:7661/10000 train_time:523850ms step_avg:68.38ms +[2025-07-06 18:31:19] [Rank 0] step:7681/10000 train_time:525219ms step_avg:68.38ms +[2025-07-06 18:31:19] [Rank 0] step:7681/10000 train_time:525219ms step_avg:68.38ms +[2025-07-06 18:31:21] [Rank 0] step:7701/10000 train_time:526590ms step_avg:68.38ms +[2025-07-06 18:31:21] [Rank 0] step:7701/10000 train_time:526590ms step_avg:68.38ms +[2025-07-06 18:31:22] [Rank 0] step:7721/10000 train_time:527960ms step_avg:68.38ms +[2025-07-06 18:31:22] [Rank 0] step:7721/10000 train_time:527960ms step_avg:68.38ms +[2025-07-06 18:31:23] [Rank 0] step:7741/10000 train_time:529331ms step_avg:68.38ms +[2025-07-06 18:31:23] [Rank 0] step:7741/10000 train_time:529331ms step_avg:68.38ms +[2025-07-06 18:31:25] [Rank 0] step:7761/10000 train_time:530727ms step_avg:68.38ms +[2025-07-06 18:31:25] [Rank 0] step:7761/10000 train_time:530727ms step_avg:68.38ms +[2025-07-06 18:31:26] [Rank 0] step:7781/10000 train_time:532099ms step_avg:68.38ms +[2025-07-06 18:31:26] [Rank 0] step:7781/10000 train_time:532099ms step_avg:68.38ms +[2025-07-06 18:31:27] [Rank 0] step:7801/10000 train_time:533470ms step_avg:68.38ms +[2025-07-06 18:31:27] [Rank 0] step:7801/10000 train_time:533470ms step_avg:68.38ms +[2025-07-06 18:31:29] [Rank 0] step:7821/10000 train_time:534843ms step_avg:68.39ms +[2025-07-06 18:31:29] [Rank 0] step:7821/10000 train_time:534843ms step_avg:68.39ms +[2025-07-06 18:31:30] [Rank 0] step:7841/10000 train_time:536215ms step_avg:68.39ms +[2025-07-06 18:31:30] [Rank 0] step:7841/10000 train_time:536215ms step_avg:68.39ms +[2025-07-06 18:31:32] [Rank 0] step:7861/10000 train_time:537589ms step_avg:68.39ms +[2025-07-06 18:31:32] [Rank 0] step:7861/10000 train_time:537589ms step_avg:68.39ms +[2025-07-06 18:31:33] [Rank 0] step:7881/10000 train_time:538961ms step_avg:68.39ms +[2025-07-06 18:31:33] [Rank 0] step:7881/10000 train_time:538961ms step_avg:68.39ms +[2025-07-06 18:31:34] [Rank 0] step:7901/10000 train_time:540334ms step_avg:68.39ms +[2025-07-06 18:31:34] [Rank 0] step:7901/10000 train_time:540334ms step_avg:68.39ms +[2025-07-06 18:31:36] [Rank 0] step:7921/10000 train_time:541964ms step_avg:68.42ms +[2025-07-06 18:31:36] [Rank 0] step:7921/10000 train_time:541964ms step_avg:68.42ms +[2025-07-06 18:31:37] [Rank 0] step:7941/10000 train_time:543081ms step_avg:68.39ms +[2025-07-06 18:31:37] [Rank 0] step:7941/10000 train_time:543081ms step_avg:68.39ms +[2025-07-06 18:31:38] [Rank 0] step:7961/10000 train_time:544455ms step_avg:68.39ms +[2025-07-06 18:31:38] [Rank 0] step:7961/10000 train_time:544455ms step_avg:68.39ms +[2025-07-06 18:31:40] [Rank 0] step:7981/10000 train_time:545828ms step_avg:68.39ms +[2025-07-06 18:31:40] [Rank 0] step:7981/10000 train_time:545828ms step_avg:68.39ms +[2025-07-06 18:31:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:31:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:31:42] [Rank 0] PRINT: step:8000/10000 train_loss:0.8379 val_loss:0.9567 train_time:547826ms step_avg:68.48ms +[2025-07-06 18:31:42] [Rank 0] PRINT: step:8000/10000 train_loss:0.8379 val_loss:0.9567 train_time:547826ms step_avg:68.48ms +[2025-07-06 18:31:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:31:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:31:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:31:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:31:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:31:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:37:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:37:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:37:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:37:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:37:05] [Rank 0] Total Loss: 5.4493 +[2025-07-06 18:37:05] [Rank 0] Total Loss: 5.4493 +[2025-07-06 18:37:05] [Rank 0] Total FTA: 0.6682 +[2025-07-06 18:37:05] [Rank 0] Total FTA: 0.6682 +[2025-07-06 18:37:05] [Rank 0] Group 0 Loss: 6.0212 +[2025-07-06 18:37:05] [Rank 0] Group 0 Loss: 6.0212 +[2025-07-06 18:37:05] [Rank 0] Group 1 Loss: 5.1285 +[2025-07-06 18:37:05] [Rank 0] Group 1 Loss: 5.1285 +[2025-07-06 18:37:05] [Rank 0] Group 2 Loss: 4.9398 +[2025-07-06 18:37:05] [Rank 0] Group 2 Loss: 4.9398 +[2025-07-06 18:37:05] [Rank 0] Group 3 Loss: 5.4973 +[2025-07-06 18:37:05] [Rank 0] Group 3 Loss: 5.4973 +[2025-07-06 18:37:05] [Rank 0] Group 4 Loss: 5.4063 +[2025-07-06 18:37:05] [Rank 0] Group 4 Loss: 5.4063 +[2025-07-06 18:37:05] [Rank 0] Group 5 Loss: 5.4373 +[2025-07-06 18:37:05] [Rank 0] Group 5 Loss: 5.4373 +[2025-07-06 18:37:05] [Rank 0] Group 6 Loss: 5.2963 +[2025-07-06 18:37:05] [Rank 0] Group 6 Loss: 5.2963 +[2025-07-06 18:37:05] [Rank 0] Group 7 Loss: 5.4267 +[2025-07-06 18:37:05] [Rank 0] Group 7 Loss: 5.4267 +[2025-07-06 18:37:05] [Rank 0] Group 8 Loss: 5.4410 +[2025-07-06 18:37:05] [Rank 0] Group 8 Loss: 5.4410 +[2025-07-06 18:37:05] [Rank 0] Group 9 Loss: 5.3913 +[2025-07-06 18:37:05] [Rank 0] Group 9 Loss: 5.3913 +[2025-07-06 18:37:05] [Rank 0] Group 10 Loss: 5.4402 +[2025-07-06 18:37:05] [Rank 0] Group 10 Loss: 5.4402 +[2025-07-06 18:37:05] [Rank 0] Group 11 Loss: 5.4219 +[2025-07-06 18:37:05] [Rank 0] Group 11 Loss: 5.4219 +[2025-07-06 18:37:05] [Rank 0] Group 0 FTA: 0.4915 +[2025-07-06 18:37:05] [Rank 0] Group 0 FTA: 0.4915 +[2025-07-06 18:37:05] [Rank 0] Group 1 FTA: 0.6615 +[2025-07-06 18:37:05] [Rank 0] Group 1 FTA: 0.6615 +[2025-07-06 18:37:05] [Rank 0] Group 2 FTA: 0.8151 +[2025-07-06 18:37:05] [Rank 0] Group 2 FTA: 0.8151 +[2025-07-06 18:37:05] [Rank 0] Group 3 FTA: 0.6719 +[2025-07-06 18:37:05] [Rank 0] Group 3 FTA: 0.6719 +[2025-07-06 18:37:05] [Rank 0] Group 4 FTA: 0.6302 +[2025-07-06 18:37:05] [Rank 0] Group 4 FTA: 0.6302 +[2025-07-06 18:37:05] [Rank 0] Group 5 FTA: 0.6484 +[2025-07-06 18:37:05] [Rank 0] Group 5 FTA: 0.6484 +[2025-07-06 18:37:05] [Rank 0] Group 6 FTA: 0.6901 +[2025-07-06 18:37:05] [Rank 0] Group 6 FTA: 0.6901 +[2025-07-06 18:37:05] [Rank 0] Group 7 FTA: 0.6823 +[2025-07-06 18:37:05] [Rank 0] Group 7 FTA: 0.6823 +[2025-07-06 18:37:05] [Rank 0] Group 8 FTA: 0.7188 +[2025-07-06 18:37:05] [Rank 0] Group 8 FTA: 0.7188 +[2025-07-06 18:37:05] [Rank 0] Group 9 FTA: 0.6719 +[2025-07-06 18:37:05] [Rank 0] Group 9 FTA: 0.6719 +[2025-07-06 18:37:05] [Rank 0] Group 10 FTA: 0.7305 +[2025-07-06 18:37:05] [Rank 0] Group 10 FTA: 0.7305 +[2025-07-06 18:37:05] [Rank 0] Group 11 FTA: 0.7041 +[2025-07-06 18:37:05] [Rank 0] Group 11 FTA: 0.7041 +[2025-07-06 18:37:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 18:37:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 18:37:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 18:37:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 18:37:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 18:37:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 18:37:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 18:37:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 18:37:07] [Rank 0] step:8001/10000 train_time:547837ms step_avg:68.47ms +[2025-07-06 18:37:07] [Rank 0] step:8001/10000 train_time:547837ms step_avg:68.47ms +[2025-07-06 18:37:08] [Rank 0] step:8021/10000 train_time:548617ms step_avg:68.40ms +[2025-07-06 18:37:08] [Rank 0] step:8021/10000 train_time:548617ms step_avg:68.40ms +[2025-07-06 18:37:10] [Rank 0] step:8041/10000 train_time:550128ms step_avg:68.42ms +[2025-07-06 18:37:10] [Rank 0] step:8041/10000 train_time:550128ms step_avg:68.42ms +[2025-07-06 18:37:11] [Rank 0] step:8061/10000 train_time:551372ms step_avg:68.40ms +[2025-07-06 18:37:11] [Rank 0] step:8061/10000 train_time:551372ms step_avg:68.40ms +[2025-07-06 18:37:13] [Rank 0] step:8081/10000 train_time:552739ms step_avg:68.40ms +[2025-07-06 18:37:13] [Rank 0] step:8081/10000 train_time:552739ms step_avg:68.40ms +[2025-07-06 18:37:14] [Rank 0] step:8101/10000 train_time:554357ms step_avg:68.43ms +[2025-07-06 18:37:14] [Rank 0] step:8101/10000 train_time:554357ms step_avg:68.43ms +[2025-07-06 18:37:15] [Rank 0] step:8121/10000 train_time:555531ms step_avg:68.41ms +[2025-07-06 18:37:15] [Rank 0] step:8121/10000 train_time:555531ms step_avg:68.41ms +[2025-07-06 18:37:17] [Rank 0] step:8141/10000 train_time:556899ms step_avg:68.41ms +[2025-07-06 18:37:17] [Rank 0] step:8141/10000 train_time:556899ms step_avg:68.41ms +[2025-07-06 18:37:18] [Rank 0] step:8161/10000 train_time:558267ms step_avg:68.41ms +[2025-07-06 18:37:18] [Rank 0] step:8161/10000 train_time:558267ms step_avg:68.41ms +[2025-07-06 18:37:19] [Rank 0] step:8181/10000 train_time:559637ms step_avg:68.41ms +[2025-07-06 18:37:19] [Rank 0] step:8181/10000 train_time:559637ms step_avg:68.41ms +[2025-07-06 18:37:21] [Rank 0] step:8201/10000 train_time:561008ms step_avg:68.41ms +[2025-07-06 18:37:21] [Rank 0] step:8201/10000 train_time:561008ms step_avg:68.41ms +[2025-07-06 18:37:22] [Rank 0] step:8221/10000 train_time:562379ms step_avg:68.41ms +[2025-07-06 18:37:22] [Rank 0] step:8221/10000 train_time:562379ms step_avg:68.41ms +[2025-07-06 18:37:24] [Rank 0] step:8241/10000 train_time:563752ms step_avg:68.41ms +[2025-07-06 18:37:24] [Rank 0] step:8241/10000 train_time:563752ms step_avg:68.41ms +[2025-07-06 18:37:25] [Rank 0] step:8261/10000 train_time:565123ms step_avg:68.41ms +[2025-07-06 18:37:25] [Rank 0] step:8261/10000 train_time:565123ms step_avg:68.41ms +[2025-07-06 18:37:26] [Rank 0] step:8281/10000 train_time:567172ms step_avg:68.49ms +[2025-07-06 18:37:26] [Rank 0] step:8281/10000 train_time:567172ms step_avg:68.49ms +[2025-07-06 18:37:28] [Rank 0] step:8301/10000 train_time:567913ms step_avg:68.42ms +[2025-07-06 18:37:28] [Rank 0] step:8301/10000 train_time:567913ms step_avg:68.42ms +[2025-07-06 18:37:29] [Rank 0] step:8321/10000 train_time:569285ms step_avg:68.42ms +[2025-07-06 18:37:29] [Rank 0] step:8321/10000 train_time:569285ms step_avg:68.42ms +[2025-07-06 18:37:30] [Rank 0] step:8341/10000 train_time:570660ms step_avg:68.42ms +[2025-07-06 18:37:30] [Rank 0] step:8341/10000 train_time:570660ms step_avg:68.42ms +[2025-07-06 18:37:32] [Rank 0] step:8361/10000 train_time:572033ms step_avg:68.42ms +[2025-07-06 18:37:32] [Rank 0] step:8361/10000 train_time:572033ms step_avg:68.42ms +[2025-07-06 18:37:33] [Rank 0] step:8381/10000 train_time:573407ms step_avg:68.42ms +[2025-07-06 18:37:33] [Rank 0] step:8381/10000 train_time:573407ms step_avg:68.42ms +[2025-07-06 18:37:35] [Rank 0] step:8401/10000 train_time:574780ms step_avg:68.42ms +[2025-07-06 18:37:35] [Rank 0] step:8401/10000 train_time:574780ms step_avg:68.42ms +[2025-07-06 18:37:36] [Rank 0] step:8421/10000 train_time:576154ms step_avg:68.42ms +[2025-07-06 18:37:36] [Rank 0] step:8421/10000 train_time:576154ms step_avg:68.42ms +[2025-07-06 18:37:37] [Rank 0] step:8441/10000 train_time:577530ms step_avg:68.42ms +[2025-07-06 18:37:37] [Rank 0] step:8441/10000 train_time:577530ms step_avg:68.42ms +[2025-07-06 18:37:39] [Rank 0] step:8461/10000 train_time:578903ms step_avg:68.42ms +[2025-07-06 18:37:39] [Rank 0] step:8461/10000 train_time:578903ms step_avg:68.42ms +[2025-07-06 18:37:40] [Rank 0] step:8481/10000 train_time:580298ms step_avg:68.42ms +[2025-07-06 18:37:40] [Rank 0] step:8481/10000 train_time:580298ms step_avg:68.42ms +[2025-07-06 18:37:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:37:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:37:42] [Rank 0] PRINT: step:8500/10000 train_loss:0.8310 val_loss:0.9541 train_time:582299ms step_avg:68.51ms +[2025-07-06 18:37:42] [Rank 0] PRINT: step:8500/10000 train_loss:0.8310 val_loss:0.9541 train_time:582299ms step_avg:68.51ms +[2025-07-06 18:37:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:37:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:37:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:37:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:37:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:37:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:43:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:43:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:43:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:43:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:43:07] [Rank 0] Total Loss: 5.5454 +[2025-07-06 18:43:07] [Rank 0] Total Loss: 5.5454 +[2025-07-06 18:43:07] [Rank 0] Total FTA: 0.6876 +[2025-07-06 18:43:07] [Rank 0] Total FTA: 0.6876 +[2025-07-06 18:43:07] [Rank 0] Group 0 Loss: 5.9674 +[2025-07-06 18:43:07] [Rank 0] Group 0 Loss: 5.9674 +[2025-07-06 18:43:07] [Rank 0] Group 1 Loss: 5.3426 +[2025-07-06 18:43:07] [Rank 0] Group 1 Loss: 5.3426 +[2025-07-06 18:43:07] [Rank 0] Group 2 Loss: 5.2231 +[2025-07-06 18:43:07] [Rank 0] Group 2 Loss: 5.2231 +[2025-07-06 18:43:07] [Rank 0] Group 3 Loss: 5.5178 +[2025-07-06 18:43:07] [Rank 0] Group 3 Loss: 5.5178 +[2025-07-06 18:43:07] [Rank 0] Group 4 Loss: 5.5315 +[2025-07-06 18:43:07] [Rank 0] Group 4 Loss: 5.5315 +[2025-07-06 18:43:07] [Rank 0] Group 5 Loss: 5.5554 +[2025-07-06 18:43:07] [Rank 0] Group 5 Loss: 5.5554 +[2025-07-06 18:43:07] [Rank 0] Group 6 Loss: 5.4866 +[2025-07-06 18:43:07] [Rank 0] Group 6 Loss: 5.4866 +[2025-07-06 18:43:07] [Rank 0] Group 7 Loss: 5.5181 +[2025-07-06 18:43:07] [Rank 0] Group 7 Loss: 5.5181 +[2025-07-06 18:43:07] [Rank 0] Group 8 Loss: 5.5280 +[2025-07-06 18:43:07] [Rank 0] Group 8 Loss: 5.5280 +[2025-07-06 18:43:07] [Rank 0] Group 9 Loss: 5.5244 +[2025-07-06 18:43:07] [Rank 0] Group 9 Loss: 5.5244 +[2025-07-06 18:43:07] [Rank 0] Group 10 Loss: 5.4806 +[2025-07-06 18:43:07] [Rank 0] Group 10 Loss: 5.4806 +[2025-07-06 18:43:07] [Rank 0] Group 11 Loss: 5.5136 +[2025-07-06 18:43:07] [Rank 0] Group 11 Loss: 5.5136 +[2025-07-06 18:43:07] [Rank 0] Group 0 FTA: 0.6788 +[2025-07-06 18:43:07] [Rank 0] Group 0 FTA: 0.6788 +[2025-07-06 18:43:07] [Rank 0] Group 1 FTA: 0.8255 +[2025-07-06 18:43:07] [Rank 0] Group 1 FTA: 0.8255 +[2025-07-06 18:43:07] [Rank 0] Group 2 FTA: 0.6432 +[2025-07-06 18:43:07] [Rank 0] Group 2 FTA: 0.6432 +[2025-07-06 18:43:07] [Rank 0] Group 3 FTA: 0.5312 +[2025-07-06 18:43:07] [Rank 0] Group 3 FTA: 0.5312 +[2025-07-06 18:43:07] [Rank 0] Group 4 FTA: 0.5599 +[2025-07-06 18:43:07] [Rank 0] Group 4 FTA: 0.5599 +[2025-07-06 18:43:07] [Rank 0] Group 5 FTA: 0.7031 +[2025-07-06 18:43:07] [Rank 0] Group 5 FTA: 0.7031 +[2025-07-06 18:43:07] [Rank 0] Group 6 FTA: 0.7005 +[2025-07-06 18:43:07] [Rank 0] Group 6 FTA: 0.7005 +[2025-07-06 18:43:07] [Rank 0] Group 7 FTA: 0.7109 +[2025-07-06 18:43:07] [Rank 0] Group 7 FTA: 0.7109 +[2025-07-06 18:43:07] [Rank 0] Group 8 FTA: 0.7240 +[2025-07-06 18:43:07] [Rank 0] Group 8 FTA: 0.7240 +[2025-07-06 18:43:07] [Rank 0] Group 9 FTA: 0.7344 +[2025-07-06 18:43:07] [Rank 0] Group 9 FTA: 0.7344 +[2025-07-06 18:43:07] [Rank 0] Group 10 FTA: 0.7012 +[2025-07-06 18:43:07] [Rank 0] Group 10 FTA: 0.7012 +[2025-07-06 18:43:07] [Rank 0] Group 11 FTA: 0.7139 +[2025-07-06 18:43:07] [Rank 0] Group 11 FTA: 0.7139 +[2025-07-06 18:43:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 18:43:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 18:43:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 18:43:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 18:43:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 18:43:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 18:43:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 18:43:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 18:43:09] [Rank 0] step:8501/10000 train_time:582310ms step_avg:68.50ms +[2025-07-06 18:43:09] [Rank 0] step:8501/10000 train_time:582310ms step_avg:68.50ms +[2025-07-06 18:43:10] [Rank 0] step:8521/10000 train_time:583083ms step_avg:68.43ms +[2025-07-06 18:43:10] [Rank 0] step:8521/10000 train_time:583083ms step_avg:68.43ms +[2025-07-06 18:43:11] [Rank 0] step:8541/10000 train_time:584453ms step_avg:68.43ms +[2025-07-06 18:43:11] [Rank 0] step:8541/10000 train_time:584453ms step_avg:68.43ms +[2025-07-06 18:43:13] [Rank 0] step:8561/10000 train_time:585819ms step_avg:68.43ms +[2025-07-06 18:43:13] [Rank 0] step:8561/10000 train_time:585819ms step_avg:68.43ms +[2025-07-06 18:43:14] [Rank 0] step:8581/10000 train_time:587187ms step_avg:68.43ms +[2025-07-06 18:43:14] [Rank 0] step:8581/10000 train_time:587187ms step_avg:68.43ms +[2025-07-06 18:43:16] [Rank 0] step:8601/10000 train_time:588553ms step_avg:68.43ms +[2025-07-06 18:43:16] [Rank 0] step:8601/10000 train_time:588553ms step_avg:68.43ms +[2025-07-06 18:43:17] [Rank 0] step:8621/10000 train_time:589925ms step_avg:68.43ms +[2025-07-06 18:43:17] [Rank 0] step:8621/10000 train_time:589925ms step_avg:68.43ms +[2025-07-06 18:43:18] [Rank 0] step:8641/10000 train_time:591341ms step_avg:68.43ms +[2025-07-06 18:43:18] [Rank 0] step:8641/10000 train_time:591341ms step_avg:68.43ms +[2025-07-06 18:43:20] [Rank 0] step:8661/10000 train_time:592701ms step_avg:68.43ms +[2025-07-06 18:43:20] [Rank 0] step:8661/10000 train_time:592701ms step_avg:68.43ms +[2025-07-06 18:43:21] [Rank 0] step:8681/10000 train_time:594073ms step_avg:68.43ms +[2025-07-06 18:43:21] [Rank 0] step:8681/10000 train_time:594073ms step_avg:68.43ms +[2025-07-06 18:43:22] [Rank 0] step:8701/10000 train_time:595446ms step_avg:68.43ms +[2025-07-06 18:43:22] [Rank 0] step:8701/10000 train_time:595446ms step_avg:68.43ms +[2025-07-06 18:43:24] [Rank 0] step:8721/10000 train_time:596818ms step_avg:68.43ms +[2025-07-06 18:43:24] [Rank 0] step:8721/10000 train_time:596818ms step_avg:68.43ms +[2025-07-06 18:43:25] [Rank 0] step:8741/10000 train_time:598188ms step_avg:68.43ms +[2025-07-06 18:43:25] [Rank 0] step:8741/10000 train_time:598188ms step_avg:68.43ms +[2025-07-06 18:43:27] [Rank 0] step:8761/10000 train_time:599559ms step_avg:68.43ms +[2025-07-06 18:43:27] [Rank 0] step:8761/10000 train_time:599559ms step_avg:68.43ms +[2025-07-06 18:43:28] [Rank 0] step:8781/10000 train_time:600931ms step_avg:68.44ms +[2025-07-06 18:43:28] [Rank 0] step:8781/10000 train_time:600931ms step_avg:68.44ms +[2025-07-06 18:43:29] [Rank 0] step:8801/10000 train_time:602304ms step_avg:68.44ms +[2025-07-06 18:43:29] [Rank 0] step:8801/10000 train_time:602304ms step_avg:68.44ms +[2025-07-06 18:43:31] [Rank 0] step:8821/10000 train_time:603929ms step_avg:68.46ms +[2025-07-06 18:43:31] [Rank 0] step:8821/10000 train_time:603929ms step_avg:68.46ms +[2025-07-06 18:43:32] [Rank 0] step:8841/10000 train_time:605087ms step_avg:68.44ms +[2025-07-06 18:43:32] [Rank 0] step:8841/10000 train_time:605087ms step_avg:68.44ms +[2025-07-06 18:43:33] [Rank 0] step:8861/10000 train_time:606460ms step_avg:68.44ms +[2025-07-06 18:43:33] [Rank 0] step:8861/10000 train_time:606460ms step_avg:68.44ms +[2025-07-06 18:43:35] [Rank 0] step:8881/10000 train_time:607835ms step_avg:68.44ms +[2025-07-06 18:43:35] [Rank 0] step:8881/10000 train_time:607835ms step_avg:68.44ms +[2025-07-06 18:43:36] [Rank 0] step:8901/10000 train_time:609209ms step_avg:68.44ms +[2025-07-06 18:43:36] [Rank 0] step:8901/10000 train_time:609209ms step_avg:68.44ms +[2025-07-06 18:43:38] [Rank 0] step:8921/10000 train_time:610585ms step_avg:68.44ms +[2025-07-06 18:43:38] [Rank 0] step:8921/10000 train_time:610585ms step_avg:68.44ms +[2025-07-06 18:43:39] [Rank 0] step:8941/10000 train_time:611962ms step_avg:68.44ms +[2025-07-06 18:43:39] [Rank 0] step:8941/10000 train_time:611962ms step_avg:68.44ms +[2025-07-06 18:43:40] [Rank 0] step:8961/10000 train_time:613337ms step_avg:68.45ms +[2025-07-06 18:43:40] [Rank 0] step:8961/10000 train_time:613337ms step_avg:68.45ms +[2025-07-06 18:43:42] [Rank 0] step:8981/10000 train_time:614712ms step_avg:68.45ms +[2025-07-06 18:43:42] [Rank 0] step:8981/10000 train_time:614712ms step_avg:68.45ms +[2025-07-06 18:43:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:43:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:43:44] [Rank 0] PRINT: step:9000/10000 train_loss:0.8257 val_loss:0.9483 train_time:616711ms step_avg:68.52ms +[2025-07-06 18:43:44] [Rank 0] PRINT: step:9000/10000 train_loss:0.8257 val_loss:0.9483 train_time:616711ms step_avg:68.52ms +[2025-07-06 18:43:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:43:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:43:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:43:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:43:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:43:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:49:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:49:11] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:49:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:49:11] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:49:11] [Rank 0] Total Loss: 5.5508 +[2025-07-06 18:49:11] [Rank 0] Total Loss: 5.5508 +[2025-07-06 18:49:11] [Rank 0] Total FTA: 0.7192 +[2025-07-06 18:49:11] [Rank 0] Total FTA: 0.7192 +[2025-07-06 18:49:11] [Rank 0] Group 0 Loss: 5.9345 +[2025-07-06 18:49:11] [Rank 0] Group 0 Loss: 5.9345 +[2025-07-06 18:49:11] [Rank 0] Group 1 Loss: 5.5199 +[2025-07-06 18:49:11] [Rank 0] Group 1 Loss: 5.5199 +[2025-07-06 18:49:11] [Rank 0] Group 2 Loss: 5.1583 +[2025-07-06 18:49:11] [Rank 0] Group 2 Loss: 5.1583 +[2025-07-06 18:49:11] [Rank 0] Group 3 Loss: 5.5633 +[2025-07-06 18:49:11] [Rank 0] Group 3 Loss: 5.5633 +[2025-07-06 18:49:11] [Rank 0] Group 4 Loss: 5.4650 +[2025-07-06 18:49:11] [Rank 0] Group 4 Loss: 5.4650 +[2025-07-06 18:49:11] [Rank 0] Group 5 Loss: 5.6041 +[2025-07-06 18:49:11] [Rank 0] Group 5 Loss: 5.6041 +[2025-07-06 18:49:11] [Rank 0] Group 6 Loss: 5.4719 +[2025-07-06 18:49:11] [Rank 0] Group 6 Loss: 5.4719 +[2025-07-06 18:49:11] [Rank 0] Group 7 Loss: 5.4375 +[2025-07-06 18:49:11] [Rank 0] Group 7 Loss: 5.4375 +[2025-07-06 18:49:11] [Rank 0] Group 8 Loss: 5.5384 +[2025-07-06 18:49:11] [Rank 0] Group 8 Loss: 5.5384 +[2025-07-06 18:49:11] [Rank 0] Group 9 Loss: 5.4646 +[2025-07-06 18:49:11] [Rank 0] Group 9 Loss: 5.4646 +[2025-07-06 18:49:11] [Rank 0] Group 10 Loss: 5.5206 +[2025-07-06 18:49:11] [Rank 0] Group 10 Loss: 5.5206 +[2025-07-06 18:49:11] [Rank 0] Group 11 Loss: 5.5425 +[2025-07-06 18:49:11] [Rank 0] Group 11 Loss: 5.5425 +[2025-07-06 18:49:11] [Rank 0] Group 0 FTA: 0.6515 +[2025-07-06 18:49:11] [Rank 0] Group 0 FTA: 0.6515 +[2025-07-06 18:49:11] [Rank 0] Group 1 FTA: 0.6198 +[2025-07-06 18:49:11] [Rank 0] Group 1 FTA: 0.6198 +[2025-07-06 18:49:11] [Rank 0] Group 2 FTA: 0.7526 +[2025-07-06 18:49:11] [Rank 0] Group 2 FTA: 0.7526 +[2025-07-06 18:49:11] [Rank 0] Group 3 FTA: 0.6432 +[2025-07-06 18:49:11] [Rank 0] Group 3 FTA: 0.6432 +[2025-07-06 18:49:11] [Rank 0] Group 4 FTA: 0.6979 +[2025-07-06 18:49:11] [Rank 0] Group 4 FTA: 0.6979 +[2025-07-06 18:49:11] [Rank 0] Group 5 FTA: 0.7734 +[2025-07-06 18:49:11] [Rank 0] Group 5 FTA: 0.7734 +[2025-07-06 18:49:11] [Rank 0] Group 6 FTA: 0.7448 +[2025-07-06 18:49:11] [Rank 0] Group 6 FTA: 0.7448 +[2025-07-06 18:49:11] [Rank 0] Group 7 FTA: 0.7057 +[2025-07-06 18:49:11] [Rank 0] Group 7 FTA: 0.7057 +[2025-07-06 18:49:11] [Rank 0] Group 8 FTA: 0.7266 +[2025-07-06 18:49:11] [Rank 0] Group 8 FTA: 0.7266 +[2025-07-06 18:49:11] [Rank 0] Group 9 FTA: 0.7344 +[2025-07-06 18:49:11] [Rank 0] Group 9 FTA: 0.7344 +[2025-07-06 18:49:11] [Rank 0] Group 10 FTA: 0.7773 +[2025-07-06 18:49:11] [Rank 0] Group 10 FTA: 0.7773 +[2025-07-06 18:49:11] [Rank 0] Group 11 FTA: 0.7705 +[2025-07-06 18:49:11] [Rank 0] Group 11 FTA: 0.7705 +[2025-07-06 18:49:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 18:49:12] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 18:49:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 18:49:12] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 18:49:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 18:49:12] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 18:49:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 18:49:13] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 18:49:14] [Rank 0] step:9001/10000 train_time:616830ms step_avg:68.53ms +[2025-07-06 18:49:14] [Rank 0] step:9001/10000 train_time:616830ms step_avg:68.53ms +[2025-07-06 18:49:15] [Rank 0] step:9021/10000 train_time:618199ms step_avg:68.53ms +[2025-07-06 18:49:15] [Rank 0] step:9021/10000 train_time:618199ms step_avg:68.53ms +[2025-07-06 18:49:16] [Rank 0] step:9041/10000 train_time:619563ms step_avg:68.53ms +[2025-07-06 18:49:16] [Rank 0] step:9041/10000 train_time:619563ms step_avg:68.53ms +[2025-07-06 18:49:18] [Rank 0] step:9061/10000 train_time:620931ms step_avg:68.53ms +[2025-07-06 18:49:18] [Rank 0] step:9061/10000 train_time:620931ms step_avg:68.53ms +[2025-07-06 18:49:19] [Rank 0] step:9081/10000 train_time:622298ms step_avg:68.53ms +[2025-07-06 18:49:19] [Rank 0] step:9081/10000 train_time:622298ms step_avg:68.53ms +[2025-07-06 18:49:20] [Rank 0] step:9101/10000 train_time:623667ms step_avg:68.53ms +[2025-07-06 18:49:20] [Rank 0] step:9101/10000 train_time:623667ms step_avg:68.53ms +[2025-07-06 18:49:22] [Rank 0] step:9121/10000 train_time:625036ms step_avg:68.53ms +[2025-07-06 18:49:22] [Rank 0] step:9121/10000 train_time:625036ms step_avg:68.53ms +[2025-07-06 18:49:23] [Rank 0] step:9141/10000 train_time:626407ms step_avg:68.53ms +[2025-07-06 18:49:23] [Rank 0] step:9141/10000 train_time:626407ms step_avg:68.53ms +[2025-07-06 18:49:25] [Rank 0] step:9161/10000 train_time:627777ms step_avg:68.53ms +[2025-07-06 18:49:25] [Rank 0] step:9161/10000 train_time:627777ms step_avg:68.53ms +[2025-07-06 18:49:26] [Rank 0] step:9181/10000 train_time:629395ms step_avg:68.55ms +[2025-07-06 18:49:26] [Rank 0] step:9181/10000 train_time:629395ms step_avg:68.55ms +[2025-07-06 18:49:27] [Rank 0] step:9201/10000 train_time:630551ms step_avg:68.53ms +[2025-07-06 18:49:27] [Rank 0] step:9201/10000 train_time:630551ms step_avg:68.53ms +[2025-07-06 18:49:29] [Rank 0] step:9221/10000 train_time:631922ms step_avg:68.53ms +[2025-07-06 18:49:29] [Rank 0] step:9221/10000 train_time:631922ms step_avg:68.53ms +[2025-07-06 18:49:30] [Rank 0] step:9241/10000 train_time:633292ms step_avg:68.53ms +[2025-07-06 18:49:30] [Rank 0] step:9241/10000 train_time:633292ms step_avg:68.53ms +[2025-07-06 18:49:31] [Rank 0] step:9261/10000 train_time:634664ms step_avg:68.53ms +[2025-07-06 18:49:31] [Rank 0] step:9261/10000 train_time:634664ms step_avg:68.53ms +[2025-07-06 18:49:33] [Rank 0] step:9281/10000 train_time:636034ms step_avg:68.53ms +[2025-07-06 18:49:33] [Rank 0] step:9281/10000 train_time:636034ms step_avg:68.53ms +[2025-07-06 18:49:34] [Rank 0] step:9301/10000 train_time:637407ms step_avg:68.53ms +[2025-07-06 18:49:34] [Rank 0] step:9301/10000 train_time:637407ms step_avg:68.53ms +[2025-07-06 18:49:36] [Rank 0] step:9321/10000 train_time:638779ms step_avg:68.53ms +[2025-07-06 18:49:36] [Rank 0] step:9321/10000 train_time:638779ms step_avg:68.53ms +[2025-07-06 18:49:37] [Rank 0] step:9341/10000 train_time:640151ms step_avg:68.53ms +[2025-07-06 18:49:37] [Rank 0] step:9341/10000 train_time:640151ms step_avg:68.53ms +[2025-07-06 18:49:38] [Rank 0] step:9361/10000 train_time:641775ms step_avg:68.56ms +[2025-07-06 18:49:38] [Rank 0] step:9361/10000 train_time:641775ms step_avg:68.56ms +[2025-07-06 18:49:40] [Rank 0] step:9381/10000 train_time:642938ms step_avg:68.54ms +[2025-07-06 18:49:40] [Rank 0] step:9381/10000 train_time:642938ms step_avg:68.54ms +[2025-07-06 18:49:41] [Rank 0] step:9401/10000 train_time:644312ms step_avg:68.54ms +[2025-07-06 18:49:41] [Rank 0] step:9401/10000 train_time:644312ms step_avg:68.54ms +[2025-07-06 18:49:42] [Rank 0] step:9421/10000 train_time:645688ms step_avg:68.54ms +[2025-07-06 18:49:42] [Rank 0] step:9421/10000 train_time:645688ms step_avg:68.54ms +[2025-07-06 18:49:44] [Rank 0] step:9441/10000 train_time:647062ms step_avg:68.54ms +[2025-07-06 18:49:44] [Rank 0] step:9441/10000 train_time:647062ms step_avg:68.54ms +[2025-07-06 18:49:45] [Rank 0] step:9461/10000 train_time:648439ms step_avg:68.54ms +[2025-07-06 18:49:45] [Rank 0] step:9461/10000 train_time:648439ms step_avg:68.54ms +[2025-07-06 18:49:47] [Rank 0] step:9481/10000 train_time:649815ms step_avg:68.54ms +[2025-07-06 18:49:47] [Rank 0] step:9481/10000 train_time:649815ms step_avg:68.54ms +[2025-07-06 18:49:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:49:48] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:49:49] [Rank 0] PRINT: step:9500/10000 train_loss:0.8180 val_loss:0.9529 train_time:651819ms step_avg:68.61ms +[2025-07-06 18:49:49] [Rank 0] PRINT: step:9500/10000 train_loss:0.8180 val_loss:0.9529 train_time:651819ms step_avg:68.61ms +[2025-07-06 18:49:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:49:49] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:49:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:49:49] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:49:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:49:49] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:55:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:55:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 18:55:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:55:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 18:55:16] [Rank 0] Total Loss: 5.5794 +[2025-07-06 18:55:16] [Rank 0] Total Loss: 5.5794 +[2025-07-06 18:55:16] [Rank 0] Total FTA: 0.7476 +[2025-07-06 18:55:16] [Rank 0] Total FTA: 0.7476 +[2025-07-06 18:55:16] [Rank 0] Group 0 Loss: 5.7612 +[2025-07-06 18:55:16] [Rank 0] Group 0 Loss: 5.7612 +[2025-07-06 18:55:16] [Rank 0] Group 1 Loss: 5.4929 +[2025-07-06 18:55:16] [Rank 0] Group 1 Loss: 5.4929 +[2025-07-06 18:55:16] [Rank 0] Group 2 Loss: 5.2743 +[2025-07-06 18:55:16] [Rank 0] Group 2 Loss: 5.2743 +[2025-07-06 18:55:16] [Rank 0] Group 3 Loss: 5.5890 +[2025-07-06 18:55:16] [Rank 0] Group 3 Loss: 5.5890 +[2025-07-06 18:55:16] [Rank 0] Group 4 Loss: 5.5921 +[2025-07-06 18:55:16] [Rank 0] Group 4 Loss: 5.5921 +[2025-07-06 18:55:16] [Rank 0] Group 5 Loss: 5.6367 +[2025-07-06 18:55:16] [Rank 0] Group 5 Loss: 5.6367 +[2025-07-06 18:55:16] [Rank 0] Group 6 Loss: 5.5326 +[2025-07-06 18:55:16] [Rank 0] Group 6 Loss: 5.5326 +[2025-07-06 18:55:16] [Rank 0] Group 7 Loss: 5.5404 +[2025-07-06 18:55:16] [Rank 0] Group 7 Loss: 5.5404 +[2025-07-06 18:55:16] [Rank 0] Group 8 Loss: 5.6069 +[2025-07-06 18:55:16] [Rank 0] Group 8 Loss: 5.6069 +[2025-07-06 18:55:16] [Rank 0] Group 9 Loss: 5.5951 +[2025-07-06 18:55:16] [Rank 0] Group 9 Loss: 5.5951 +[2025-07-06 18:55:16] [Rank 0] Group 10 Loss: 5.5794 +[2025-07-06 18:55:16] [Rank 0] Group 10 Loss: 5.5794 +[2025-07-06 18:55:16] [Rank 0] Group 11 Loss: 5.5775 +[2025-07-06 18:55:16] [Rank 0] Group 11 Loss: 5.5775 +[2025-07-06 18:55:16] [Rank 0] Group 0 FTA: 0.6801 +[2025-07-06 18:55:16] [Rank 0] Group 0 FTA: 0.6801 +[2025-07-06 18:55:16] [Rank 0] Group 1 FTA: 0.8229 +[2025-07-06 18:55:16] [Rank 0] Group 1 FTA: 0.8229 +[2025-07-06 18:55:16] [Rank 0] Group 2 FTA: 0.6979 +[2025-07-06 18:55:16] [Rank 0] Group 2 FTA: 0.6979 +[2025-07-06 18:55:16] [Rank 0] Group 3 FTA: 0.7292 +[2025-07-06 18:55:16] [Rank 0] Group 3 FTA: 0.7292 +[2025-07-06 18:55:16] [Rank 0] Group 4 FTA: 0.6771 +[2025-07-06 18:55:16] [Rank 0] Group 4 FTA: 0.6771 +[2025-07-06 18:55:16] [Rank 0] Group 5 FTA: 0.8229 +[2025-07-06 18:55:16] [Rank 0] Group 5 FTA: 0.8229 +[2025-07-06 18:55:16] [Rank 0] Group 6 FTA: 0.7422 +[2025-07-06 18:55:16] [Rank 0] Group 6 FTA: 0.7422 +[2025-07-06 18:55:16] [Rank 0] Group 7 FTA: 0.6927 +[2025-07-06 18:55:16] [Rank 0] Group 7 FTA: 0.6927 +[2025-07-06 18:55:16] [Rank 0] Group 8 FTA: 0.7422 +[2025-07-06 18:55:16] [Rank 0] Group 8 FTA: 0.7422 +[2025-07-06 18:55:16] [Rank 0] Group 9 FTA: 0.7617 +[2025-07-06 18:55:16] [Rank 0] Group 9 FTA: 0.7617 +[2025-07-06 18:55:16] [Rank 0] Group 10 FTA: 0.8047 +[2025-07-06 18:55:16] [Rank 0] Group 10 FTA: 0.8047 +[2025-07-06 18:55:16] [Rank 0] Group 11 FTA: 0.7861 +[2025-07-06 18:55:16] [Rank 0] Group 11 FTA: 0.7861 +[2025-07-06 18:55:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 18:55:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 18:55:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 18:55:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 18:55:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 18:55:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 18:55:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 18:55:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 18:55:18] [Rank 0] step:9501/10000 train_time:651828ms step_avg:68.61ms +[2025-07-06 18:55:18] [Rank 0] step:9501/10000 train_time:651828ms step_avg:68.61ms +[2025-07-06 18:55:19] [Rank 0] step:9521/10000 train_time:652599ms step_avg:68.54ms +[2025-07-06 18:55:19] [Rank 0] step:9521/10000 train_time:652599ms step_avg:68.54ms +[2025-07-06 18:55:21] [Rank 0] step:9541/10000 train_time:654634ms step_avg:68.61ms +[2025-07-06 18:55:21] [Rank 0] step:9541/10000 train_time:654634ms step_avg:68.61ms +[2025-07-06 18:55:22] [Rank 0] step:9561/10000 train_time:655371ms step_avg:68.55ms +[2025-07-06 18:55:22] [Rank 0] step:9561/10000 train_time:655371ms step_avg:68.55ms +[2025-07-06 18:55:23] [Rank 0] step:9581/10000 train_time:656737ms step_avg:68.55ms +[2025-07-06 18:55:23] [Rank 0] step:9581/10000 train_time:656737ms step_avg:68.55ms +[2025-07-06 18:55:25] [Rank 0] step:9601/10000 train_time:658105ms step_avg:68.55ms +[2025-07-06 18:55:25] [Rank 0] step:9601/10000 train_time:658105ms step_avg:68.55ms +[2025-07-06 18:55:26] [Rank 0] step:9621/10000 train_time:659475ms step_avg:68.55ms +[2025-07-06 18:55:26] [Rank 0] step:9621/10000 train_time:659475ms step_avg:68.55ms +[2025-07-06 18:55:28] [Rank 0] step:9641/10000 train_time:660844ms step_avg:68.55ms +[2025-07-06 18:55:28] [Rank 0] step:9641/10000 train_time:660844ms step_avg:68.55ms +[2025-07-06 18:55:29] [Rank 0] step:9661/10000 train_time:662213ms step_avg:68.55ms +[2025-07-06 18:55:29] [Rank 0] step:9661/10000 train_time:662213ms step_avg:68.55ms +[2025-07-06 18:55:30] [Rank 0] step:9681/10000 train_time:663582ms step_avg:68.54ms +[2025-07-06 18:55:30] [Rank 0] step:9681/10000 train_time:663582ms step_avg:68.54ms +[2025-07-06 18:55:32] [Rank 0] step:9701/10000 train_time:664953ms step_avg:68.54ms +[2025-07-06 18:55:32] [Rank 0] step:9701/10000 train_time:664953ms step_avg:68.54ms +[2025-07-06 18:55:33] [Rank 0] step:9721/10000 train_time:666987ms step_avg:68.61ms +[2025-07-06 18:55:33] [Rank 0] step:9721/10000 train_time:666987ms step_avg:68.61ms +[2025-07-06 18:55:34] [Rank 0] step:9741/10000 train_time:667726ms step_avg:68.55ms +[2025-07-06 18:55:34] [Rank 0] step:9741/10000 train_time:667726ms step_avg:68.55ms +[2025-07-06 18:55:36] [Rank 0] step:9761/10000 train_time:669098ms step_avg:68.55ms +[2025-07-06 18:55:36] [Rank 0] step:9761/10000 train_time:669098ms step_avg:68.55ms +[2025-07-06 18:55:37] [Rank 0] step:9781/10000 train_time:670470ms step_avg:68.55ms +[2025-07-06 18:55:37] [Rank 0] step:9781/10000 train_time:670470ms step_avg:68.55ms +[2025-07-06 18:55:39] [Rank 0] step:9801/10000 train_time:671843ms step_avg:68.55ms +[2025-07-06 18:55:39] [Rank 0] step:9801/10000 train_time:671843ms step_avg:68.55ms +[2025-07-06 18:55:40] [Rank 0] step:9821/10000 train_time:673213ms step_avg:68.55ms +[2025-07-06 18:55:40] [Rank 0] step:9821/10000 train_time:673213ms step_avg:68.55ms +[2025-07-06 18:55:41] [Rank 0] step:9841/10000 train_time:674585ms step_avg:68.55ms +[2025-07-06 18:55:41] [Rank 0] step:9841/10000 train_time:674585ms step_avg:68.55ms +[2025-07-06 18:55:43] [Rank 0] step:9861/10000 train_time:675956ms step_avg:68.55ms +[2025-07-06 18:55:43] [Rank 0] step:9861/10000 train_time:675956ms step_avg:68.55ms +[2025-07-06 18:55:44] [Rank 0] step:9881/10000 train_time:677328ms step_avg:68.55ms +[2025-07-06 18:55:44] [Rank 0] step:9881/10000 train_time:677328ms step_avg:68.55ms +[2025-07-06 18:55:45] [Rank 0] step:9901/10000 train_time:678950ms step_avg:68.57ms +[2025-07-06 18:55:45] [Rank 0] step:9901/10000 train_time:678950ms step_avg:68.57ms +[2025-07-06 18:55:47] [Rank 0] step:9921/10000 train_time:680113ms step_avg:68.55ms +[2025-07-06 18:55:47] [Rank 0] step:9921/10000 train_time:680113ms step_avg:68.55ms +[2025-07-06 18:55:48] [Rank 0] step:9941/10000 train_time:681486ms step_avg:68.55ms +[2025-07-06 18:55:48] [Rank 0] step:9941/10000 train_time:681486ms step_avg:68.55ms +[2025-07-06 18:55:50] [Rank 0] step:9961/10000 train_time:682858ms step_avg:68.55ms +[2025-07-06 18:55:50] [Rank 0] step:9961/10000 train_time:682858ms step_avg:68.55ms +[2025-07-06 18:55:51] [Rank 0] step:9981/10000 train_time:684232ms step_avg:68.55ms +[2025-07-06 18:55:51] [Rank 0] step:9981/10000 train_time:684232ms step_avg:68.55ms +[2025-07-06 18:55:52] [Rank 0] step:10000/10000 train_time:685537ms step_avg:68.55ms +[2025-07-06 18:55:52] [Rank 0] step:10000/10000 train_time:685537ms step_avg:68.55ms +[2025-07-06 18:55:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:55:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-06 18:55:53] [Rank 0] PRINT: step:10000/10000 train_loss:0.8121 val_loss:0.9678 train_time:686238ms step_avg:68.62ms +[2025-07-06 18:55:53] [Rank 0] PRINT: step:10000/10000 train_loss:0.8121 val_loss:0.9678 train_time:686238ms step_avg:68.62ms +[2025-07-06 18:55:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:55:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-06 18:55:53] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:55:53] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-06 18:55:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 18:55:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-06 19:01:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:01:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-06 19:01:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:01:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-06 19:01:18] [Rank 0] Total Loss: 5.5959 +[2025-07-06 19:01:18] [Rank 0] Total Loss: 5.5959 +[2025-07-06 19:01:18] [Rank 0] Total FTA: 0.6888 +[2025-07-06 19:01:18] [Rank 0] Total FTA: 0.6888 +[2025-07-06 19:01:18] [Rank 0] Group 0 Loss: 5.7661 +[2025-07-06 19:01:18] [Rank 0] Group 0 Loss: 5.7661 +[2025-07-06 19:01:18] [Rank 0] Group 1 Loss: 5.5141 +[2025-07-06 19:01:18] [Rank 0] Group 1 Loss: 5.5141 +[2025-07-06 19:01:18] [Rank 0] Group 2 Loss: 5.2735 +[2025-07-06 19:01:18] [Rank 0] Group 2 Loss: 5.2735 +[2025-07-06 19:01:18] [Rank 0] Group 3 Loss: 5.5228 +[2025-07-06 19:01:18] [Rank 0] Group 3 Loss: 5.5228 +[2025-07-06 19:01:18] [Rank 0] Group 4 Loss: 5.5472 +[2025-07-06 19:01:18] [Rank 0] Group 4 Loss: 5.5472 +[2025-07-06 19:01:18] [Rank 0] Group 5 Loss: 5.6439 +[2025-07-06 19:01:18] [Rank 0] Group 5 Loss: 5.6439 +[2025-07-06 19:01:18] [Rank 0] Group 6 Loss: 5.5438 +[2025-07-06 19:01:18] [Rank 0] Group 6 Loss: 5.5438 +[2025-07-06 19:01:18] [Rank 0] Group 7 Loss: 5.5918 +[2025-07-06 19:01:18] [Rank 0] Group 7 Loss: 5.5918 +[2025-07-06 19:01:18] [Rank 0] Group 8 Loss: 5.6788 +[2025-07-06 19:01:18] [Rank 0] Group 8 Loss: 5.6788 +[2025-07-06 19:01:18] [Rank 0] Group 9 Loss: 5.5602 +[2025-07-06 19:01:18] [Rank 0] Group 9 Loss: 5.5602 +[2025-07-06 19:01:18] [Rank 0] Group 10 Loss: 5.6271 +[2025-07-06 19:01:18] [Rank 0] Group 10 Loss: 5.6271 +[2025-07-06 19:01:18] [Rank 0] Group 11 Loss: 5.6309 +[2025-07-06 19:01:18] [Rank 0] Group 11 Loss: 5.6309 +[2025-07-06 19:01:18] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-06 19:01:18] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-06 19:01:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:01:18] [Rank 0] Group 1 FTA: 1.0000 +[2025-07-06 19:01:18] [Rank 0] Group 2 FTA: 0.8229 +[2025-07-06 19:01:18] [Rank 0] Group 2 FTA: 0.8229 +[2025-07-06 19:01:18] [Rank 0] Group 3 FTA: 0.6641 +[2025-07-06 19:01:18] [Rank 0] Group 3 FTA: 0.6641 +[2025-07-06 19:01:18] [Rank 0] Group 4 FTA: 0.6849 +[2025-07-06 19:01:18] [Rank 0] Group 4 FTA: 0.6849 +[2025-07-06 19:01:18] [Rank 0] Group 5 FTA: 0.7370 +[2025-07-06 19:01:18] [Rank 0] Group 5 FTA: 0.7370 +[2025-07-06 19:01:18] [Rank 0] Group 6 FTA: 0.7422 +[2025-07-06 19:01:18] [Rank 0] Group 6 FTA: 0.7422 +[2025-07-06 19:01:18] [Rank 0] Group 7 FTA: 0.7552 +[2025-07-06 19:01:18] [Rank 0] Group 7 FTA: 0.7552 +[2025-07-06 19:01:18] [Rank 0] Group 8 FTA: 0.7474 +[2025-07-06 19:01:18] [Rank 0] Group 8 FTA: 0.7474 +[2025-07-06 19:01:18] [Rank 0] Group 9 FTA: 0.7422 +[2025-07-06 19:01:18] [Rank 0] Group 9 FTA: 0.7422 +[2025-07-06 19:01:18] [Rank 0] Group 10 FTA: 0.7949 +[2025-07-06 19:01:18] [Rank 0] Group 10 FTA: 0.7949 +[2025-07-06 19:01:18] [Rank 0] Group 11 FTA: 0.7734 +[2025-07-06 19:01:18] [Rank 0] Group 11 FTA: 0.7734 +[2025-07-06 19:01:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 19:01:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-06 19:01:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 19:01:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-06 19:01:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 19:01:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-06 19:01:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 19:01:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-06 19:01:19] [Rank 0] step:10001/10000 train_time:686249ms step_avg:68.62ms +[2025-07-06 19:01:19] [Rank 0] step:10001/10000 train_time:686249ms step_avg:68.62ms +[2025-07-06 19:01:19] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 19:01:19 2025 --- +[2025-07-06 19:01:19] [Rank 0] PRINT: --- Training Finished: Sun Jul 6 19:01:19 2025 --- +[2025-07-06 19:01:20] [Rank 0] PRINT: Peak memory allocated: 8720 MiB reserved: 10716 MiB +[2025-07-06 19:01:20] [Rank 0] PRINT: Peak memory allocated: 8720 MiB reserved: 10716 MiB diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/training_log_d41039bf-edd7-4094-90e2-41790dbb7e54.txt b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/training_log_d41039bf-edd7-4094-90e2-41790dbb7e54.txt new file mode 100644 index 0000000000000000000000000000000000000000..918084b1f45815e0e4d98d3508438ea50e57c006 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/training_log_d41039bf-edd7-4094-90e2-41790dbb7e54.txt @@ -0,0 +1,5132 @@ +[2025-07-07 03:54:25] [Rank 0] PRINT: --- Script Start: Mon Jul 7 03:54:25 2025 --- +[2025-07-07 03:54:25] [Rank 0] PRINT: --- Script Start: Mon Jul 7 03:54:25 2025 --- +[2025-07-07 03:54:25] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-07 03:54:25] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=42, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-07 03:54:25] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 03:54:25] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 03:54:25] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-07 03:54:25] [Rank 0] PRINT: Using fixed seed: 42 +[2025-07-07 03:54:25] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42 +[2025-07-07 03:54:25] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42 +[2025-07-07 03:54:25] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 03:54:25] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 03:54:26] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 03:54:26] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 03:54:26] [Rank 0] PRINT: Constructing model... +[2025-07-07 03:54:26] [Rank 0] PRINT: Constructing model... +[2025-07-07 03:54:28] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 03:54:28] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 03:54:28] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 03:54:28] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 03:54:28] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 03:54:28] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 03:54:30] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 03:54:30] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 03:54:30] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 03:54:30] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 03:54:30] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 03:54:30] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 03:54:30] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 03:54:30] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 03:54:30] [Rank 0] PRINT: Model returns: +[2025-07-07 03:54:30] [Rank 0] PRINT: Model returns: +[2025-07-07 03:54:30] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 03:54:30] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 03:54:30] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 03:54:30] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 03:54:30] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-07 03:54:30] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-07 03:54:30] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 03:54:30] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 03:54:30] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 03:54:30] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 03:54:30] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 03:54:30] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 03:54:30] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 03:54:30] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 03:54:30] [Rank 0] PRINT: Starting warmup... +[2025-07-07 03:54:30] [Rank 0] PRINT: Starting warmup... +[2025-07-07 04:04:20] [Rank 0] PRINT: Warmup complete. +[2025-07-07 04:04:20] [Rank 0] PRINT: Warmup complete. +[2025-07-07 04:04:20] [Rank 0] PRINT: Starting training... +[2025-07-07 04:04:20] [Rank 0] PRINT: Starting training... +[2025-07-07 04:04:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:04:20] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:08:33] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 04:08:33] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 04:08:35] [Rank 0] step:21/10000 train_time:1019ms step_avg:48.51ms +[2025-07-07 04:08:35] [Rank 0] step:21/10000 train_time:1019ms step_avg:48.51ms +[2025-07-07 04:08:36] [Rank 0] step:41/10000 train_time:2338ms step_avg:57.03ms +[2025-07-07 04:08:36] [Rank 0] step:41/10000 train_time:2338ms step_avg:57.03ms +[2025-07-07 04:08:37] [Rank 0] step:61/10000 train_time:3663ms step_avg:60.04ms +[2025-07-07 04:08:37] [Rank 0] step:61/10000 train_time:3663ms step_avg:60.04ms +[2025-07-07 04:08:39] [Rank 0] step:81/10000 train_time:4988ms step_avg:61.57ms +[2025-07-07 04:08:39] [Rank 0] step:81/10000 train_time:4988ms step_avg:61.57ms +[2025-07-07 04:08:40] [Rank 0] step:101/10000 train_time:6312ms step_avg:62.50ms +[2025-07-07 04:08:40] [Rank 0] step:101/10000 train_time:6312ms step_avg:62.50ms +[2025-07-07 04:08:41] [Rank 0] step:121/10000 train_time:7643ms step_avg:63.16ms +[2025-07-07 04:08:41] [Rank 0] step:121/10000 train_time:7643ms step_avg:63.16ms +[2025-07-07 04:08:43] [Rank 0] step:141/10000 train_time:8973ms step_avg:63.64ms +[2025-07-07 04:08:43] [Rank 0] step:141/10000 train_time:8973ms step_avg:63.64ms +[2025-07-07 04:08:44] [Rank 0] step:161/10000 train_time:10307ms step_avg:64.02ms +[2025-07-07 04:08:44] [Rank 0] step:161/10000 train_time:10307ms step_avg:64.02ms +[2025-07-07 04:08:45] [Rank 0] step:181/10000 train_time:12331ms step_avg:68.13ms +[2025-07-07 04:08:45] [Rank 0] step:181/10000 train_time:12331ms step_avg:68.13ms +[2025-07-07 04:08:47] [Rank 0] step:201/10000 train_time:13051ms step_avg:64.93ms +[2025-07-07 04:08:47] [Rank 0] step:201/10000 train_time:13051ms step_avg:64.93ms +[2025-07-07 04:08:48] [Rank 0] step:221/10000 train_time:14390ms step_avg:65.11ms +[2025-07-07 04:08:48] [Rank 0] step:221/10000 train_time:14390ms step_avg:65.11ms +[2025-07-07 04:08:49] [Rank 0] step:241/10000 train_time:15731ms step_avg:65.27ms +[2025-07-07 04:08:49] [Rank 0] step:241/10000 train_time:15731ms step_avg:65.27ms +[2025-07-07 04:08:51] [Rank 0] step:261/10000 train_time:17073ms step_avg:65.41ms +[2025-07-07 04:08:51] [Rank 0] step:261/10000 train_time:17073ms step_avg:65.41ms +[2025-07-07 04:08:52] [Rank 0] step:281/10000 train_time:18414ms step_avg:65.53ms +[2025-07-07 04:08:52] [Rank 0] step:281/10000 train_time:18414ms step_avg:65.53ms +[2025-07-07 04:08:53] [Rank 0] step:301/10000 train_time:19754ms step_avg:65.63ms +[2025-07-07 04:08:53] [Rank 0] step:301/10000 train_time:19754ms step_avg:65.63ms +[2025-07-07 04:08:55] [Rank 0] step:321/10000 train_time:21097ms step_avg:65.72ms +[2025-07-07 04:08:55] [Rank 0] step:321/10000 train_time:21097ms step_avg:65.72ms +[2025-07-07 04:08:56] [Rank 0] step:341/10000 train_time:22438ms step_avg:65.80ms +[2025-07-07 04:08:56] [Rank 0] step:341/10000 train_time:22438ms step_avg:65.80ms +[2025-07-07 04:08:57] [Rank 0] step:361/10000 train_time:24459ms step_avg:67.75ms +[2025-07-07 04:08:57] [Rank 0] step:361/10000 train_time:24459ms step_avg:67.75ms +[2025-07-07 04:08:59] [Rank 0] step:381/10000 train_time:25181ms step_avg:66.09ms +[2025-07-07 04:08:59] [Rank 0] step:381/10000 train_time:25181ms step_avg:66.09ms +[2025-07-07 04:09:00] [Rank 0] step:401/10000 train_time:26523ms step_avg:66.14ms +[2025-07-07 04:09:00] [Rank 0] step:401/10000 train_time:26523ms step_avg:66.14ms +[2025-07-07 04:09:02] [Rank 0] step:421/10000 train_time:27866ms step_avg:66.19ms +[2025-07-07 04:09:02] [Rank 0] step:421/10000 train_time:27866ms step_avg:66.19ms +[2025-07-07 04:09:03] [Rank 0] step:441/10000 train_time:29208ms step_avg:66.23ms +[2025-07-07 04:09:03] [Rank 0] step:441/10000 train_time:29208ms step_avg:66.23ms +[2025-07-07 04:09:04] [Rank 0] step:461/10000 train_time:30550ms step_avg:66.27ms +[2025-07-07 04:09:04] [Rank 0] step:461/10000 train_time:30550ms step_avg:66.27ms +[2025-07-07 04:09:06] [Rank 0] step:481/10000 train_time:31891ms step_avg:66.30ms +[2025-07-07 04:09:06] [Rank 0] step:481/10000 train_time:31891ms step_avg:66.30ms +[2025-07-07 04:09:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:09:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:09:08] [Rank 0] PRINT: step:500/10000 train_loss:4.1091 val_loss:2.1697 train_time:33842ms step_avg:67.68ms +[2025-07-07 04:09:08] [Rank 0] PRINT: step:500/10000 train_loss:4.1091 val_loss:2.1697 train_time:33842ms step_avg:67.68ms +[2025-07-07 04:09:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:09:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:09:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:09:08] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:09:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:09:08] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:14:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:14:27] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:14:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:14:27] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:14:27] [Rank 0] Total Loss: 5.1948 +[2025-07-07 04:14:27] [Rank 0] Total Loss: 5.1948 +[2025-07-07 04:14:27] [Rank 0] Total FTA: 0.0921 +[2025-07-07 04:14:27] [Rank 0] Total FTA: 0.0921 +[2025-07-07 04:14:27] [Rank 0] Group 0 Loss: 5.0727 +[2025-07-07 04:14:27] [Rank 0] Group 0 Loss: 5.0727 +[2025-07-07 04:14:27] [Rank 0] Group 1 Loss: 4.9432 +[2025-07-07 04:14:27] [Rank 0] Group 1 Loss: 4.9432 +[2025-07-07 04:14:27] [Rank 0] Group 2 Loss: 5.2467 +[2025-07-07 04:14:27] [Rank 0] Group 2 Loss: 5.2467 +[2025-07-07 04:14:27] [Rank 0] Group 3 Loss: 5.2735 +[2025-07-07 04:14:27] [Rank 0] Group 3 Loss: 5.2735 +[2025-07-07 04:14:27] [Rank 0] Group 4 Loss: 5.2286 +[2025-07-07 04:14:27] [Rank 0] Group 4 Loss: 5.2286 +[2025-07-07 04:14:27] [Rank 0] Group 5 Loss: 5.2322 +[2025-07-07 04:14:27] [Rank 0] Group 5 Loss: 5.2322 +[2025-07-07 04:14:27] [Rank 0] Group 6 Loss: 5.2064 +[2025-07-07 04:14:27] [Rank 0] Group 6 Loss: 5.2064 +[2025-07-07 04:14:27] [Rank 0] Group 7 Loss: 5.2401 +[2025-07-07 04:14:27] [Rank 0] Group 7 Loss: 5.2401 +[2025-07-07 04:14:27] [Rank 0] Group 8 Loss: 5.2063 +[2025-07-07 04:14:27] [Rank 0] Group 8 Loss: 5.2063 +[2025-07-07 04:14:27] [Rank 0] Group 9 Loss: 5.1774 +[2025-07-07 04:14:27] [Rank 0] Group 9 Loss: 5.1774 +[2025-07-07 04:14:27] [Rank 0] Group 10 Loss: 5.2039 +[2025-07-07 04:14:27] [Rank 0] Group 10 Loss: 5.2039 +[2025-07-07 04:14:27] [Rank 0] Group 11 Loss: 5.2795 +[2025-07-07 04:14:27] [Rank 0] Group 11 Loss: 5.2795 +[2025-07-07 04:14:27] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-07 04:14:27] [Rank 0] Group 0 FTA: 0.1417 +[2025-07-07 04:14:27] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:14:27] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:14:27] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-07 04:14:27] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-07 04:14:27] [Rank 0] Group 3 FTA: 0.0990 +[2025-07-07 04:14:27] [Rank 0] Group 3 FTA: 0.0990 +[2025-07-07 04:14:27] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-07 04:14:27] [Rank 0] Group 4 FTA: 0.0182 +[2025-07-07 04:14:27] [Rank 0] Group 5 FTA: 0.1328 +[2025-07-07 04:14:27] [Rank 0] Group 5 FTA: 0.1328 +[2025-07-07 04:14:27] [Rank 0] Group 6 FTA: 0.0911 +[2025-07-07 04:14:27] [Rank 0] Group 6 FTA: 0.0911 +[2025-07-07 04:14:27] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-07 04:14:27] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-07 04:14:27] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 04:14:27] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 04:14:27] [Rank 0] Group 9 FTA: 0.1094 +[2025-07-07 04:14:27] [Rank 0] Group 9 FTA: 0.1094 +[2025-07-07 04:14:27] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 04:14:27] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 04:14:27] [Rank 0] Group 11 FTA: 0.0889 +[2025-07-07 04:14:27] [Rank 0] Group 11 FTA: 0.0889 +[2025-07-07 04:14:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 04:14:28] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 04:14:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 04:14:28] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 04:14:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 04:14:28] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 04:14:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 04:14:29] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 04:14:29] [Rank 0] step:501/10000 train_time:33852ms step_avg:67.57ms +[2025-07-07 04:14:29] [Rank 0] step:501/10000 train_time:33852ms step_avg:67.57ms +[2025-07-07 04:14:30] [Rank 0] step:521/10000 train_time:34583ms step_avg:66.38ms +[2025-07-07 04:14:30] [Rank 0] step:521/10000 train_time:34583ms step_avg:66.38ms +[2025-07-07 04:14:31] [Rank 0] step:541/10000 train_time:36170ms step_avg:66.86ms +[2025-07-07 04:14:31] [Rank 0] step:541/10000 train_time:36170ms step_avg:66.86ms +[2025-07-07 04:14:33] [Rank 0] step:561/10000 train_time:37304ms step_avg:66.49ms +[2025-07-07 04:14:33] [Rank 0] step:561/10000 train_time:37304ms step_avg:66.49ms +[2025-07-07 04:14:34] [Rank 0] step:581/10000 train_time:38637ms step_avg:66.50ms +[2025-07-07 04:14:34] [Rank 0] step:581/10000 train_time:38637ms step_avg:66.50ms +[2025-07-07 04:14:35] [Rank 0] step:601/10000 train_time:39972ms step_avg:66.51ms +[2025-07-07 04:14:35] [Rank 0] step:601/10000 train_time:39972ms step_avg:66.51ms +[2025-07-07 04:14:37] [Rank 0] step:621/10000 train_time:41307ms step_avg:66.52ms +[2025-07-07 04:14:37] [Rank 0] step:621/10000 train_time:41307ms step_avg:66.52ms +[2025-07-07 04:14:38] [Rank 0] step:641/10000 train_time:42642ms step_avg:66.52ms +[2025-07-07 04:14:38] [Rank 0] step:641/10000 train_time:42642ms step_avg:66.52ms +[2025-07-07 04:14:39] [Rank 0] step:661/10000 train_time:43978ms step_avg:66.53ms +[2025-07-07 04:14:39] [Rank 0] step:661/10000 train_time:43978ms step_avg:66.53ms +[2025-07-07 04:14:41] [Rank 0] step:681/10000 train_time:45340ms step_avg:66.58ms +[2025-07-07 04:14:41] [Rank 0] step:681/10000 train_time:45340ms step_avg:66.58ms +[2025-07-07 04:14:42] [Rank 0] step:701/10000 train_time:46677ms step_avg:66.59ms +[2025-07-07 04:14:42] [Rank 0] step:701/10000 train_time:46677ms step_avg:66.59ms +[2025-07-07 04:14:44] [Rank 0] step:721/10000 train_time:48044ms step_avg:66.64ms +[2025-07-07 04:14:44] [Rank 0] step:721/10000 train_time:48044ms step_avg:66.64ms +[2025-07-07 04:14:45] [Rank 0] step:741/10000 train_time:49433ms step_avg:66.71ms +[2025-07-07 04:14:45] [Rank 0] step:741/10000 train_time:49433ms step_avg:66.71ms +[2025-07-07 04:14:46] [Rank 0] step:761/10000 train_time:50778ms step_avg:66.72ms +[2025-07-07 04:14:46] [Rank 0] step:761/10000 train_time:50778ms step_avg:66.72ms +[2025-07-07 04:14:48] [Rank 0] step:781/10000 train_time:52127ms step_avg:66.74ms +[2025-07-07 04:14:48] [Rank 0] step:781/10000 train_time:52127ms step_avg:66.74ms +[2025-07-07 04:14:49] [Rank 0] step:801/10000 train_time:53475ms step_avg:66.76ms +[2025-07-07 04:14:49] [Rank 0] step:801/10000 train_time:53475ms step_avg:66.76ms +[2025-07-07 04:14:50] [Rank 0] step:821/10000 train_time:54825ms step_avg:66.78ms +[2025-07-07 04:14:50] [Rank 0] step:821/10000 train_time:54825ms step_avg:66.78ms +[2025-07-07 04:14:52] [Rank 0] step:841/10000 train_time:56175ms step_avg:66.80ms +[2025-07-07 04:14:52] [Rank 0] step:841/10000 train_time:56175ms step_avg:66.80ms +[2025-07-07 04:14:53] [Rank 0] step:861/10000 train_time:57532ms step_avg:66.82ms +[2025-07-07 04:14:53] [Rank 0] step:861/10000 train_time:57532ms step_avg:66.82ms +[2025-07-07 04:14:54] [Rank 0] step:881/10000 train_time:58882ms step_avg:66.84ms +[2025-07-07 04:14:54] [Rank 0] step:881/10000 train_time:58882ms step_avg:66.84ms +[2025-07-07 04:14:56] [Rank 0] step:901/10000 train_time:60283ms step_avg:66.91ms +[2025-07-07 04:14:56] [Rank 0] step:901/10000 train_time:60283ms step_avg:66.91ms +[2025-07-07 04:14:57] [Rank 0] step:921/10000 train_time:61633ms step_avg:66.92ms +[2025-07-07 04:14:57] [Rank 0] step:921/10000 train_time:61633ms step_avg:66.92ms +[2025-07-07 04:14:58] [Rank 0] step:941/10000 train_time:62984ms step_avg:66.93ms +[2025-07-07 04:14:58] [Rank 0] step:941/10000 train_time:62984ms step_avg:66.93ms +[2025-07-07 04:15:00] [Rank 0] step:961/10000 train_time:64334ms step_avg:66.94ms +[2025-07-07 04:15:00] [Rank 0] step:961/10000 train_time:64334ms step_avg:66.94ms +[2025-07-07 04:15:01] [Rank 0] step:981/10000 train_time:65684ms step_avg:66.96ms +[2025-07-07 04:15:01] [Rank 0] step:981/10000 train_time:65684ms step_avg:66.96ms +[2025-07-07 04:15:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:15:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:15:03] [Rank 0] PRINT: step:1000/10000 train_loss:1.9099 val_loss:1.7843 train_time:67648ms step_avg:67.65ms +[2025-07-07 04:15:03] [Rank 0] PRINT: step:1000/10000 train_loss:1.9099 val_loss:1.7843 train_time:67648ms step_avg:67.65ms +[2025-07-07 04:15:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:15:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:15:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:15:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:15:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:15:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:20:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:20:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:20:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:20:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:20:25] [Rank 0] Total Loss: 5.5415 +[2025-07-07 04:20:25] [Rank 0] Total Loss: 5.5415 +[2025-07-07 04:20:25] [Rank 0] Total FTA: 0.0895 +[2025-07-07 04:20:25] [Rank 0] Total FTA: 0.0895 +[2025-07-07 04:20:25] [Rank 0] Group 0 Loss: 5.6042 +[2025-07-07 04:20:25] [Rank 0] Group 0 Loss: 5.6042 +[2025-07-07 04:20:25] [Rank 0] Group 1 Loss: 5.3181 +[2025-07-07 04:20:25] [Rank 0] Group 1 Loss: 5.3181 +[2025-07-07 04:20:25] [Rank 0] Group 2 Loss: 5.4904 +[2025-07-07 04:20:25] [Rank 0] Group 2 Loss: 5.4904 +[2025-07-07 04:20:25] [Rank 0] Group 3 Loss: 5.5406 +[2025-07-07 04:20:25] [Rank 0] Group 3 Loss: 5.5406 +[2025-07-07 04:20:25] [Rank 0] Group 4 Loss: 5.7050 +[2025-07-07 04:20:25] [Rank 0] Group 4 Loss: 5.7050 +[2025-07-07 04:20:25] [Rank 0] Group 5 Loss: 5.5179 +[2025-07-07 04:20:25] [Rank 0] Group 5 Loss: 5.5179 +[2025-07-07 04:20:25] [Rank 0] Group 6 Loss: 5.5465 +[2025-07-07 04:20:25] [Rank 0] Group 6 Loss: 5.5465 +[2025-07-07 04:20:25] [Rank 0] Group 7 Loss: 5.4889 +[2025-07-07 04:20:25] [Rank 0] Group 7 Loss: 5.4889 +[2025-07-07 04:20:25] [Rank 0] Group 8 Loss: 5.5444 +[2025-07-07 04:20:25] [Rank 0] Group 8 Loss: 5.5444 +[2025-07-07 04:20:25] [Rank 0] Group 9 Loss: 5.4965 +[2025-07-07 04:20:25] [Rank 0] Group 9 Loss: 5.4965 +[2025-07-07 04:20:25] [Rank 0] Group 10 Loss: 5.5358 +[2025-07-07 04:20:25] [Rank 0] Group 10 Loss: 5.5358 +[2025-07-07 04:20:25] [Rank 0] Group 11 Loss: 5.5761 +[2025-07-07 04:20:25] [Rank 0] Group 11 Loss: 5.5761 +[2025-07-07 04:20:25] [Rank 0] Group 0 FTA: 0.1547 +[2025-07-07 04:20:25] [Rank 0] Group 0 FTA: 0.1547 +[2025-07-07 04:20:25] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:20:25] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:20:25] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 04:20:25] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 04:20:25] [Rank 0] Group 3 FTA: 0.0729 +[2025-07-07 04:20:25] [Rank 0] Group 3 FTA: 0.0729 +[2025-07-07 04:20:25] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 04:20:25] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 04:20:25] [Rank 0] Group 5 FTA: 0.0521 +[2025-07-07 04:20:25] [Rank 0] Group 5 FTA: 0.0521 +[2025-07-07 04:20:25] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-07 04:20:25] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-07 04:20:25] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 04:20:25] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 04:20:25] [Rank 0] Group 8 FTA: 0.1068 +[2025-07-07 04:20:25] [Rank 0] Group 8 FTA: 0.1068 +[2025-07-07 04:20:25] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 04:20:25] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 04:20:25] [Rank 0] Group 10 FTA: 0.1035 +[2025-07-07 04:20:25] [Rank 0] Group 10 FTA: 0.1035 +[2025-07-07 04:20:25] [Rank 0] Group 11 FTA: 0.1016 +[2025-07-07 04:20:25] [Rank 0] Group 11 FTA: 0.1016 +[2025-07-07 04:20:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 04:20:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 04:20:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 04:20:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 04:20:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 04:20:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 04:20:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 04:20:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 04:20:26] [Rank 0] step:1001/10000 train_time:67659ms step_avg:67.59ms +[2025-07-07 04:20:26] [Rank 0] step:1001/10000 train_time:67659ms step_avg:67.59ms +[2025-07-07 04:20:28] [Rank 0] step:1021/10000 train_time:68408ms step_avg:67.00ms +[2025-07-07 04:20:28] [Rank 0] step:1021/10000 train_time:68408ms step_avg:67.00ms +[2025-07-07 04:20:29] [Rank 0] step:1041/10000 train_time:69750ms step_avg:67.00ms +[2025-07-07 04:20:29] [Rank 0] step:1041/10000 train_time:69750ms step_avg:67.00ms +[2025-07-07 04:20:30] [Rank 0] step:1061/10000 train_time:71094ms step_avg:67.01ms +[2025-07-07 04:20:30] [Rank 0] step:1061/10000 train_time:71094ms step_avg:67.01ms +[2025-07-07 04:20:32] [Rank 0] step:1081/10000 train_time:72696ms step_avg:67.25ms +[2025-07-07 04:20:32] [Rank 0] step:1081/10000 train_time:72696ms step_avg:67.25ms +[2025-07-07 04:20:33] [Rank 0] step:1101/10000 train_time:73853ms step_avg:67.08ms +[2025-07-07 04:20:33] [Rank 0] step:1101/10000 train_time:73853ms step_avg:67.08ms +[2025-07-07 04:20:34] [Rank 0] step:1121/10000 train_time:75197ms step_avg:67.08ms +[2025-07-07 04:20:34] [Rank 0] step:1121/10000 train_time:75197ms step_avg:67.08ms +[2025-07-07 04:20:36] [Rank 0] step:1141/10000 train_time:76543ms step_avg:67.08ms +[2025-07-07 04:20:36] [Rank 0] step:1141/10000 train_time:76543ms step_avg:67.08ms +[2025-07-07 04:20:37] [Rank 0] step:1161/10000 train_time:77890ms step_avg:67.09ms +[2025-07-07 04:20:37] [Rank 0] step:1161/10000 train_time:77890ms step_avg:67.09ms +[2025-07-07 04:20:38] [Rank 0] step:1181/10000 train_time:79236ms step_avg:67.09ms +[2025-07-07 04:20:38] [Rank 0] step:1181/10000 train_time:79236ms step_avg:67.09ms +[2025-07-07 04:20:40] [Rank 0] step:1201/10000 train_time:80584ms step_avg:67.10ms +[2025-07-07 04:20:40] [Rank 0] step:1201/10000 train_time:80584ms step_avg:67.10ms +[2025-07-07 04:20:41] [Rank 0] step:1221/10000 train_time:81931ms step_avg:67.10ms +[2025-07-07 04:20:41] [Rank 0] step:1221/10000 train_time:81931ms step_avg:67.10ms +[2025-07-07 04:20:43] [Rank 0] step:1241/10000 train_time:83280ms step_avg:67.11ms +[2025-07-07 04:20:43] [Rank 0] step:1241/10000 train_time:83280ms step_avg:67.11ms +[2025-07-07 04:20:44] [Rank 0] step:1261/10000 train_time:84883ms step_avg:67.31ms +[2025-07-07 04:20:44] [Rank 0] step:1261/10000 train_time:84883ms step_avg:67.31ms +[2025-07-07 04:20:45] [Rank 0] step:1281/10000 train_time:86017ms step_avg:67.15ms +[2025-07-07 04:20:45] [Rank 0] step:1281/10000 train_time:86017ms step_avg:67.15ms +[2025-07-07 04:20:47] [Rank 0] step:1301/10000 train_time:87367ms step_avg:67.15ms +[2025-07-07 04:20:47] [Rank 0] step:1301/10000 train_time:87367ms step_avg:67.15ms +[2025-07-07 04:20:48] [Rank 0] step:1321/10000 train_time:88719ms step_avg:67.16ms +[2025-07-07 04:20:48] [Rank 0] step:1321/10000 train_time:88719ms step_avg:67.16ms +[2025-07-07 04:20:49] [Rank 0] step:1341/10000 train_time:90070ms step_avg:67.17ms +[2025-07-07 04:20:49] [Rank 0] step:1341/10000 train_time:90070ms step_avg:67.17ms +[2025-07-07 04:20:51] [Rank 0] step:1361/10000 train_time:91420ms step_avg:67.17ms +[2025-07-07 04:20:51] [Rank 0] step:1361/10000 train_time:91420ms step_avg:67.17ms +[2025-07-07 04:20:52] [Rank 0] step:1381/10000 train_time:92771ms step_avg:67.18ms +[2025-07-07 04:20:52] [Rank 0] step:1381/10000 train_time:92771ms step_avg:67.18ms +[2025-07-07 04:20:53] [Rank 0] step:1401/10000 train_time:94123ms step_avg:67.18ms +[2025-07-07 04:20:53] [Rank 0] step:1401/10000 train_time:94123ms step_avg:67.18ms +[2025-07-07 04:20:55] [Rank 0] step:1421/10000 train_time:95474ms step_avg:67.19ms +[2025-07-07 04:20:55] [Rank 0] step:1421/10000 train_time:95474ms step_avg:67.19ms +[2025-07-07 04:20:56] [Rank 0] step:1441/10000 train_time:96875ms step_avg:67.23ms +[2025-07-07 04:20:56] [Rank 0] step:1441/10000 train_time:96875ms step_avg:67.23ms +[2025-07-07 04:20:57] [Rank 0] step:1461/10000 train_time:98175ms step_avg:67.20ms +[2025-07-07 04:20:57] [Rank 0] step:1461/10000 train_time:98175ms step_avg:67.20ms +[2025-07-07 04:20:59] [Rank 0] step:1481/10000 train_time:99525ms step_avg:67.20ms +[2025-07-07 04:20:59] [Rank 0] step:1481/10000 train_time:99525ms step_avg:67.20ms +[2025-07-07 04:21:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:21:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:21:01] [Rank 0] PRINT: step:1500/10000 train_loss:1.7270 val_loss:1.6285 train_time:101489ms step_avg:67.66ms +[2025-07-07 04:21:01] [Rank 0] PRINT: step:1500/10000 train_loss:1.7270 val_loss:1.6285 train_time:101489ms step_avg:67.66ms +[2025-07-07 04:21:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:21:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:21:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:21:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:21:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:21:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:26:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:26:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:26:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:26:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:26:22] [Rank 0] Total Loss: 6.1839 +[2025-07-07 04:26:22] [Rank 0] Total Loss: 6.1839 +[2025-07-07 04:26:22] [Rank 0] Total FTA: 0.0952 +[2025-07-07 04:26:22] [Rank 0] Total FTA: 0.0952 +[2025-07-07 04:26:22] [Rank 0] Group 0 Loss: 6.2803 +[2025-07-07 04:26:22] [Rank 0] Group 0 Loss: 6.2803 +[2025-07-07 04:26:22] [Rank 0] Group 1 Loss: 6.0269 +[2025-07-07 04:26:22] [Rank 0] Group 1 Loss: 6.0269 +[2025-07-07 04:26:22] [Rank 0] Group 2 Loss: 5.9729 +[2025-07-07 04:26:22] [Rank 0] Group 2 Loss: 5.9729 +[2025-07-07 04:26:22] [Rank 0] Group 3 Loss: 6.3635 +[2025-07-07 04:26:22] [Rank 0] Group 3 Loss: 6.3635 +[2025-07-07 04:26:22] [Rank 0] Group 4 Loss: 6.2606 +[2025-07-07 04:26:22] [Rank 0] Group 4 Loss: 6.2606 +[2025-07-07 04:26:22] [Rank 0] Group 5 Loss: 6.0596 +[2025-07-07 04:26:22] [Rank 0] Group 5 Loss: 6.0596 +[2025-07-07 04:26:22] [Rank 0] Group 6 Loss: 6.1847 +[2025-07-07 04:26:22] [Rank 0] Group 6 Loss: 6.1847 +[2025-07-07 04:26:22] [Rank 0] Group 7 Loss: 6.2082 +[2025-07-07 04:26:22] [Rank 0] Group 7 Loss: 6.2082 +[2025-07-07 04:26:22] [Rank 0] Group 8 Loss: 6.1591 +[2025-07-07 04:26:22] [Rank 0] Group 8 Loss: 6.1591 +[2025-07-07 04:26:22] [Rank 0] Group 9 Loss: 6.1549 +[2025-07-07 04:26:22] [Rank 0] Group 9 Loss: 6.1549 +[2025-07-07 04:26:22] [Rank 0] Group 10 Loss: 6.1967 +[2025-07-07 04:26:22] [Rank 0] Group 10 Loss: 6.1967 +[2025-07-07 04:26:22] [Rank 0] Group 11 Loss: 6.2007 +[2025-07-07 04:26:22] [Rank 0] Group 11 Loss: 6.2007 +[2025-07-07 04:26:22] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 04:26:22] [Rank 0] Group 0 FTA: 0.1664 +[2025-07-07 04:26:22] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:26:22] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:26:22] [Rank 0] Group 2 FTA: 0.1797 +[2025-07-07 04:26:22] [Rank 0] Group 2 FTA: 0.1797 +[2025-07-07 04:26:22] [Rank 0] Group 3 FTA: 0.0521 +[2025-07-07 04:26:22] [Rank 0] Group 3 FTA: 0.0521 +[2025-07-07 04:26:22] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 04:26:22] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 04:26:22] [Rank 0] Group 5 FTA: 0.0573 +[2025-07-07 04:26:22] [Rank 0] Group 5 FTA: 0.0573 +[2025-07-07 04:26:22] [Rank 0] Group 6 FTA: 0.1068 +[2025-07-07 04:26:22] [Rank 0] Group 6 FTA: 0.1068 +[2025-07-07 04:26:22] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-07 04:26:22] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-07 04:26:22] [Rank 0] Group 8 FTA: 0.1016 +[2025-07-07 04:26:22] [Rank 0] Group 8 FTA: 0.1016 +[2025-07-07 04:26:22] [Rank 0] Group 9 FTA: 0.0625 +[2025-07-07 04:26:22] [Rank 0] Group 9 FTA: 0.0625 +[2025-07-07 04:26:22] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-07 04:26:22] [Rank 0] Group 10 FTA: 0.0938 +[2025-07-07 04:26:22] [Rank 0] Group 11 FTA: 0.1006 +[2025-07-07 04:26:22] [Rank 0] Group 11 FTA: 0.1006 +[2025-07-07 04:26:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 04:26:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 04:26:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 04:26:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 04:26:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 04:26:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 04:26:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 04:26:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 04:26:24] [Rank 0] step:1501/10000 train_time:101500ms step_avg:67.62ms +[2025-07-07 04:26:24] [Rank 0] step:1501/10000 train_time:101500ms step_avg:67.62ms +[2025-07-07 04:26:25] [Rank 0] step:1521/10000 train_time:102244ms step_avg:67.22ms +[2025-07-07 04:26:25] [Rank 0] step:1521/10000 train_time:102244ms step_avg:67.22ms +[2025-07-07 04:26:26] [Rank 0] step:1541/10000 train_time:103588ms step_avg:67.22ms +[2025-07-07 04:26:26] [Rank 0] step:1541/10000 train_time:103588ms step_avg:67.22ms +[2025-07-07 04:26:28] [Rank 0] step:1561/10000 train_time:104934ms step_avg:67.22ms +[2025-07-07 04:26:28] [Rank 0] step:1561/10000 train_time:104934ms step_avg:67.22ms +[2025-07-07 04:26:29] [Rank 0] step:1581/10000 train_time:106280ms step_avg:67.22ms +[2025-07-07 04:26:29] [Rank 0] step:1581/10000 train_time:106280ms step_avg:67.22ms +[2025-07-07 04:26:30] [Rank 0] step:1601/10000 train_time:107626ms step_avg:67.22ms +[2025-07-07 04:26:30] [Rank 0] step:1601/10000 train_time:107626ms step_avg:67.22ms +[2025-07-07 04:26:32] [Rank 0] step:1621/10000 train_time:109018ms step_avg:67.25ms +[2025-07-07 04:26:32] [Rank 0] step:1621/10000 train_time:109018ms step_avg:67.25ms +[2025-07-07 04:26:33] [Rank 0] step:1641/10000 train_time:110377ms step_avg:67.26ms +[2025-07-07 04:26:33] [Rank 0] step:1641/10000 train_time:110377ms step_avg:67.26ms +[2025-07-07 04:26:35] [Rank 0] step:1661/10000 train_time:111724ms step_avg:67.26ms +[2025-07-07 04:26:35] [Rank 0] step:1661/10000 train_time:111724ms step_avg:67.26ms +[2025-07-07 04:26:36] [Rank 0] step:1681/10000 train_time:113071ms step_avg:67.26ms +[2025-07-07 04:26:36] [Rank 0] step:1681/10000 train_time:113071ms step_avg:67.26ms +[2025-07-07 04:26:37] [Rank 0] step:1701/10000 train_time:114419ms step_avg:67.27ms +[2025-07-07 04:26:37] [Rank 0] step:1701/10000 train_time:114419ms step_avg:67.27ms +[2025-07-07 04:26:39] [Rank 0] step:1721/10000 train_time:115768ms step_avg:67.27ms +[2025-07-07 04:26:39] [Rank 0] step:1721/10000 train_time:115768ms step_avg:67.27ms +[2025-07-07 04:26:40] [Rank 0] step:1741/10000 train_time:117118ms step_avg:67.27ms +[2025-07-07 04:26:40] [Rank 0] step:1741/10000 train_time:117118ms step_avg:67.27ms +[2025-07-07 04:26:41] [Rank 0] step:1761/10000 train_time:118469ms step_avg:67.27ms +[2025-07-07 04:26:41] [Rank 0] step:1761/10000 train_time:118469ms step_avg:67.27ms +[2025-07-07 04:26:43] [Rank 0] step:1781/10000 train_time:119819ms step_avg:67.28ms +[2025-07-07 04:26:43] [Rank 0] step:1781/10000 train_time:119819ms step_avg:67.28ms +[2025-07-07 04:26:44] [Rank 0] step:1801/10000 train_time:121270ms step_avg:67.34ms +[2025-07-07 04:26:44] [Rank 0] step:1801/10000 train_time:121270ms step_avg:67.34ms +[2025-07-07 04:26:45] [Rank 0] step:1821/10000 train_time:122668ms step_avg:67.36ms +[2025-07-07 04:26:45] [Rank 0] step:1821/10000 train_time:122668ms step_avg:67.36ms +[2025-07-07 04:26:47] [Rank 0] step:1841/10000 train_time:124018ms step_avg:67.36ms +[2025-07-07 04:26:47] [Rank 0] step:1841/10000 train_time:124018ms step_avg:67.36ms +[2025-07-07 04:26:48] [Rank 0] step:1861/10000 train_time:125369ms step_avg:67.37ms +[2025-07-07 04:26:48] [Rank 0] step:1861/10000 train_time:125369ms step_avg:67.37ms +[2025-07-07 04:26:50] [Rank 0] step:1881/10000 train_time:126818ms step_avg:67.42ms +[2025-07-07 04:26:50] [Rank 0] step:1881/10000 train_time:126818ms step_avg:67.42ms +[2025-07-07 04:26:51] [Rank 0] step:1901/10000 train_time:128171ms step_avg:67.42ms +[2025-07-07 04:26:51] [Rank 0] step:1901/10000 train_time:128171ms step_avg:67.42ms +[2025-07-07 04:26:52] [Rank 0] step:1921/10000 train_time:129522ms step_avg:67.42ms +[2025-07-07 04:26:52] [Rank 0] step:1921/10000 train_time:129522ms step_avg:67.42ms +[2025-07-07 04:26:54] [Rank 0] step:1941/10000 train_time:130974ms step_avg:67.48ms +[2025-07-07 04:26:54] [Rank 0] step:1941/10000 train_time:130974ms step_avg:67.48ms +[2025-07-07 04:26:55] [Rank 0] step:1961/10000 train_time:132325ms step_avg:67.48ms +[2025-07-07 04:26:55] [Rank 0] step:1961/10000 train_time:132325ms step_avg:67.48ms +[2025-07-07 04:26:57] [Rank 0] step:1981/10000 train_time:133776ms step_avg:67.53ms +[2025-07-07 04:26:57] [Rank 0] step:1981/10000 train_time:133776ms step_avg:67.53ms +[2025-07-07 04:26:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:26:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:26:59] [Rank 0] PRINT: step:2000/10000 train_loss:1.4851 val_loss:1.3632 train_time:135795ms step_avg:67.90ms +[2025-07-07 04:26:59] [Rank 0] PRINT: step:2000/10000 train_loss:1.4851 val_loss:1.3632 train_time:135795ms step_avg:67.90ms +[2025-07-07 04:26:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:26:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:26:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:26:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:26:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:26:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:32:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:32:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:32:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:32:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:32:21] [Rank 0] Total Loss: 6.2469 +[2025-07-07 04:32:21] [Rank 0] Total Loss: 6.2469 +[2025-07-07 04:32:21] [Rank 0] Total FTA: 0.0902 +[2025-07-07 04:32:21] [Rank 0] Total FTA: 0.0902 +[2025-07-07 04:32:21] [Rank 0] Group 0 Loss: 6.3840 +[2025-07-07 04:32:21] [Rank 0] Group 0 Loss: 6.3840 +[2025-07-07 04:32:21] [Rank 0] Group 1 Loss: 5.9763 +[2025-07-07 04:32:21] [Rank 0] Group 1 Loss: 5.9763 +[2025-07-07 04:32:21] [Rank 0] Group 2 Loss: 6.0392 +[2025-07-07 04:32:21] [Rank 0] Group 2 Loss: 6.0392 +[2025-07-07 04:32:21] [Rank 0] Group 3 Loss: 6.2683 +[2025-07-07 04:32:21] [Rank 0] Group 3 Loss: 6.2683 +[2025-07-07 04:32:21] [Rank 0] Group 4 Loss: 6.2335 +[2025-07-07 04:32:21] [Rank 0] Group 4 Loss: 6.2335 +[2025-07-07 04:32:21] [Rank 0] Group 5 Loss: 6.1603 +[2025-07-07 04:32:21] [Rank 0] Group 5 Loss: 6.1603 +[2025-07-07 04:32:21] [Rank 0] Group 6 Loss: 6.3781 +[2025-07-07 04:32:21] [Rank 0] Group 6 Loss: 6.3781 +[2025-07-07 04:32:21] [Rank 0] Group 7 Loss: 6.3297 +[2025-07-07 04:32:21] [Rank 0] Group 7 Loss: 6.3297 +[2025-07-07 04:32:21] [Rank 0] Group 8 Loss: 6.2608 +[2025-07-07 04:32:21] [Rank 0] Group 8 Loss: 6.2608 +[2025-07-07 04:32:21] [Rank 0] Group 9 Loss: 6.1976 +[2025-07-07 04:32:21] [Rank 0] Group 9 Loss: 6.1976 +[2025-07-07 04:32:21] [Rank 0] Group 10 Loss: 6.3017 +[2025-07-07 04:32:21] [Rank 0] Group 10 Loss: 6.3017 +[2025-07-07 04:32:21] [Rank 0] Group 11 Loss: 6.2523 +[2025-07-07 04:32:21] [Rank 0] Group 11 Loss: 6.2523 +[2025-07-07 04:32:21] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-07 04:32:21] [Rank 0] Group 0 FTA: 0.1625 +[2025-07-07 04:32:21] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:32:21] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 04:32:22] [Rank 0] Group 2 FTA: 0.0964 +[2025-07-07 04:32:22] [Rank 0] Group 2 FTA: 0.0964 +[2025-07-07 04:32:22] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 04:32:22] [Rank 0] Group 3 FTA: 0.0417 +[2025-07-07 04:32:22] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 04:32:22] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 04:32:22] [Rank 0] Group 5 FTA: 0.0938 +[2025-07-07 04:32:22] [Rank 0] Group 5 FTA: 0.0938 +[2025-07-07 04:32:22] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-07 04:32:22] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-07 04:32:22] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 04:32:22] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 04:32:22] [Rank 0] Group 8 FTA: 0.0703 +[2025-07-07 04:32:22] [Rank 0] Group 8 FTA: 0.0703 +[2025-07-07 04:32:22] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 04:32:22] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 04:32:22] [Rank 0] Group 10 FTA: 0.1055 +[2025-07-07 04:32:22] [Rank 0] Group 10 FTA: 0.1055 +[2025-07-07 04:32:22] [Rank 0] Group 11 FTA: 0.0947 +[2025-07-07 04:32:22] [Rank 0] Group 11 FTA: 0.0947 +[2025-07-07 04:32:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 04:32:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 04:32:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 04:32:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 04:32:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 04:32:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 04:32:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 04:32:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 04:32:23] [Rank 0] step:2001/10000 train_time:135806ms step_avg:67.87ms +[2025-07-07 04:32:23] [Rank 0] step:2001/10000 train_time:135806ms step_avg:67.87ms +[2025-07-07 04:32:24] [Rank 0] step:2021/10000 train_time:136553ms step_avg:67.57ms +[2025-07-07 04:32:24] [Rank 0] step:2021/10000 train_time:136553ms step_avg:67.57ms +[2025-07-07 04:32:26] [Rank 0] step:2041/10000 train_time:137895ms step_avg:67.56ms +[2025-07-07 04:32:26] [Rank 0] step:2041/10000 train_time:137895ms step_avg:67.56ms +[2025-07-07 04:32:27] [Rank 0] step:2061/10000 train_time:139240ms step_avg:67.56ms +[2025-07-07 04:32:27] [Rank 0] step:2061/10000 train_time:139240ms step_avg:67.56ms +[2025-07-07 04:32:28] [Rank 0] step:2081/10000 train_time:140586ms step_avg:67.56ms +[2025-07-07 04:32:28] [Rank 0] step:2081/10000 train_time:140586ms step_avg:67.56ms +[2025-07-07 04:32:30] [Rank 0] step:2101/10000 train_time:141931ms step_avg:67.55ms +[2025-07-07 04:32:30] [Rank 0] step:2101/10000 train_time:141931ms step_avg:67.55ms +[2025-07-07 04:32:31] [Rank 0] step:2121/10000 train_time:143277ms step_avg:67.55ms +[2025-07-07 04:32:31] [Rank 0] step:2121/10000 train_time:143277ms step_avg:67.55ms +[2025-07-07 04:32:32] [Rank 0] step:2141/10000 train_time:144624ms step_avg:67.55ms +[2025-07-07 04:32:32] [Rank 0] step:2141/10000 train_time:144624ms step_avg:67.55ms +[2025-07-07 04:32:34] [Rank 0] step:2161/10000 train_time:146643ms step_avg:67.86ms +[2025-07-07 04:32:34] [Rank 0] step:2161/10000 train_time:146643ms step_avg:67.86ms +[2025-07-07 04:32:35] [Rank 0] step:2181/10000 train_time:147369ms step_avg:67.57ms +[2025-07-07 04:32:35] [Rank 0] step:2181/10000 train_time:147369ms step_avg:67.57ms +[2025-07-07 04:32:37] [Rank 0] step:2201/10000 train_time:148717ms step_avg:67.57ms +[2025-07-07 04:32:37] [Rank 0] step:2201/10000 train_time:148717ms step_avg:67.57ms +[2025-07-07 04:32:38] [Rank 0] step:2221/10000 train_time:150066ms step_avg:67.57ms +[2025-07-07 04:32:38] [Rank 0] step:2221/10000 train_time:150066ms step_avg:67.57ms +[2025-07-07 04:32:39] [Rank 0] step:2241/10000 train_time:151423ms step_avg:67.57ms +[2025-07-07 04:32:39] [Rank 0] step:2241/10000 train_time:151423ms step_avg:67.57ms +[2025-07-07 04:32:41] [Rank 0] step:2261/10000 train_time:152796ms step_avg:67.58ms +[2025-07-07 04:32:41] [Rank 0] step:2261/10000 train_time:152796ms step_avg:67.58ms +[2025-07-07 04:32:42] [Rank 0] step:2281/10000 train_time:154170ms step_avg:67.59ms +[2025-07-07 04:32:42] [Rank 0] step:2281/10000 train_time:154170ms step_avg:67.59ms +[2025-07-07 04:32:43] [Rank 0] step:2301/10000 train_time:155543ms step_avg:67.60ms +[2025-07-07 04:32:43] [Rank 0] step:2301/10000 train_time:155543ms step_avg:67.60ms +[2025-07-07 04:32:45] [Rank 0] step:2321/10000 train_time:156917ms step_avg:67.61ms +[2025-07-07 04:32:45] [Rank 0] step:2321/10000 train_time:156917ms step_avg:67.61ms +[2025-07-07 04:32:46] [Rank 0] step:2341/10000 train_time:158290ms step_avg:67.62ms +[2025-07-07 04:32:46] [Rank 0] step:2341/10000 train_time:158290ms step_avg:67.62ms +[2025-07-07 04:32:48] [Rank 0] step:2361/10000 train_time:159705ms step_avg:67.64ms +[2025-07-07 04:32:48] [Rank 0] step:2361/10000 train_time:159705ms step_avg:67.64ms +[2025-07-07 04:32:49] [Rank 0] step:2381/10000 train_time:161079ms step_avg:67.65ms +[2025-07-07 04:32:49] [Rank 0] step:2381/10000 train_time:161079ms step_avg:67.65ms +[2025-07-07 04:32:50] [Rank 0] step:2401/10000 train_time:162453ms step_avg:67.66ms +[2025-07-07 04:32:50] [Rank 0] step:2401/10000 train_time:162453ms step_avg:67.66ms +[2025-07-07 04:32:52] [Rank 0] step:2421/10000 train_time:163826ms step_avg:67.67ms +[2025-07-07 04:32:52] [Rank 0] step:2421/10000 train_time:163826ms step_avg:67.67ms +[2025-07-07 04:32:53] [Rank 0] step:2441/10000 train_time:165199ms step_avg:67.68ms +[2025-07-07 04:32:53] [Rank 0] step:2441/10000 train_time:165199ms step_avg:67.68ms +[2025-07-07 04:32:54] [Rank 0] step:2461/10000 train_time:166573ms step_avg:67.69ms +[2025-07-07 04:32:54] [Rank 0] step:2461/10000 train_time:166573ms step_avg:67.69ms +[2025-07-07 04:32:56] [Rank 0] step:2481/10000 train_time:167947ms step_avg:67.69ms +[2025-07-07 04:32:56] [Rank 0] step:2481/10000 train_time:167947ms step_avg:67.69ms +[2025-07-07 04:32:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:32:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:32:58] [Rank 0] PRINT: step:2500/10000 train_loss:1.3243 val_loss:1.2954 train_time:169946ms step_avg:67.98ms +[2025-07-07 04:32:58] [Rank 0] PRINT: step:2500/10000 train_loss:1.3243 val_loss:1.2954 train_time:169946ms step_avg:67.98ms +[2025-07-07 04:32:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:32:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:32:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:32:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:32:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:32:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:38:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:38:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:38:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:38:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:38:17] [Rank 0] Total Loss: 6.2454 +[2025-07-07 04:38:17] [Rank 0] Total Loss: 6.2454 +[2025-07-07 04:38:17] [Rank 0] Total FTA: 0.1090 +[2025-07-07 04:38:17] [Rank 0] Total FTA: 0.1090 +[2025-07-07 04:38:17] [Rank 0] Group 0 Loss: 6.4736 +[2025-07-07 04:38:17] [Rank 0] Group 0 Loss: 6.4736 +[2025-07-07 04:38:17] [Rank 0] Group 1 Loss: 5.8248 +[2025-07-07 04:38:17] [Rank 0] Group 1 Loss: 5.8248 +[2025-07-07 04:38:17] [Rank 0] Group 2 Loss: 5.8811 +[2025-07-07 04:38:17] [Rank 0] Group 2 Loss: 5.8811 +[2025-07-07 04:38:17] [Rank 0] Group 3 Loss: 6.1596 +[2025-07-07 04:38:17] [Rank 0] Group 3 Loss: 6.1596 +[2025-07-07 04:38:17] [Rank 0] Group 4 Loss: 6.2201 +[2025-07-07 04:38:17] [Rank 0] Group 4 Loss: 6.2201 +[2025-07-07 04:38:17] [Rank 0] Group 5 Loss: 6.2206 +[2025-07-07 04:38:17] [Rank 0] Group 5 Loss: 6.2206 +[2025-07-07 04:38:17] [Rank 0] Group 6 Loss: 6.3181 +[2025-07-07 04:38:17] [Rank 0] Group 6 Loss: 6.3181 +[2025-07-07 04:38:17] [Rank 0] Group 7 Loss: 6.2256 +[2025-07-07 04:38:17] [Rank 0] Group 7 Loss: 6.2256 +[2025-07-07 04:38:17] [Rank 0] Group 8 Loss: 6.3807 +[2025-07-07 04:38:17] [Rank 0] Group 8 Loss: 6.3807 +[2025-07-07 04:38:17] [Rank 0] Group 9 Loss: 6.2779 +[2025-07-07 04:38:17] [Rank 0] Group 9 Loss: 6.2779 +[2025-07-07 04:38:17] [Rank 0] Group 10 Loss: 6.3037 +[2025-07-07 04:38:17] [Rank 0] Group 10 Loss: 6.3037 +[2025-07-07 04:38:17] [Rank 0] Group 11 Loss: 6.3112 +[2025-07-07 04:38:17] [Rank 0] Group 11 Loss: 6.3112 +[2025-07-07 04:38:17] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-07 04:38:17] [Rank 0] Group 0 FTA: 0.1612 +[2025-07-07 04:38:17] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-07 04:38:17] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-07 04:38:17] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-07 04:38:17] [Rank 0] Group 2 FTA: 0.0990 +[2025-07-07 04:38:17] [Rank 0] Group 3 FTA: 0.1250 +[2025-07-07 04:38:17] [Rank 0] Group 3 FTA: 0.1250 +[2025-07-07 04:38:17] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 04:38:17] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 04:38:17] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-07 04:38:17] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-07 04:38:17] [Rank 0] Group 6 FTA: 0.1172 +[2025-07-07 04:38:17] [Rank 0] Group 6 FTA: 0.1172 +[2025-07-07 04:38:17] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-07 04:38:17] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-07 04:38:17] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-07 04:38:17] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-07 04:38:17] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 04:38:17] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 04:38:17] [Rank 0] Group 10 FTA: 0.1387 +[2025-07-07 04:38:17] [Rank 0] Group 10 FTA: 0.1387 +[2025-07-07 04:38:17] [Rank 0] Group 11 FTA: 0.0996 +[2025-07-07 04:38:17] [Rank 0] Group 11 FTA: 0.0996 +[2025-07-07 04:38:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 04:38:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 04:38:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 04:38:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 04:38:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 04:38:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 04:38:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 04:38:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 04:38:19] [Rank 0] step:2501/10000 train_time:169956ms step_avg:67.96ms +[2025-07-07 04:38:19] [Rank 0] step:2501/10000 train_time:169956ms step_avg:67.96ms +[2025-07-07 04:38:20] [Rank 0] step:2521/10000 train_time:170780ms step_avg:67.74ms +[2025-07-07 04:38:20] [Rank 0] step:2521/10000 train_time:170780ms step_avg:67.74ms +[2025-07-07 04:38:21] [Rank 0] step:2541/10000 train_time:172155ms step_avg:67.75ms +[2025-07-07 04:38:21] [Rank 0] step:2541/10000 train_time:172155ms step_avg:67.75ms +[2025-07-07 04:38:23] [Rank 0] step:2561/10000 train_time:173523ms step_avg:67.76ms +[2025-07-07 04:38:23] [Rank 0] step:2561/10000 train_time:173523ms step_avg:67.76ms +[2025-07-07 04:38:24] [Rank 0] step:2581/10000 train_time:174892ms step_avg:67.76ms +[2025-07-07 04:38:24] [Rank 0] step:2581/10000 train_time:174892ms step_avg:67.76ms +[2025-07-07 04:38:26] [Rank 0] step:2601/10000 train_time:176261ms step_avg:67.77ms +[2025-07-07 04:38:26] [Rank 0] step:2601/10000 train_time:176261ms step_avg:67.77ms +[2025-07-07 04:38:27] [Rank 0] step:2621/10000 train_time:177632ms step_avg:67.77ms +[2025-07-07 04:38:27] [Rank 0] step:2621/10000 train_time:177632ms step_avg:67.77ms +[2025-07-07 04:38:28] [Rank 0] step:2641/10000 train_time:179003ms step_avg:67.78ms +[2025-07-07 04:38:28] [Rank 0] step:2641/10000 train_time:179003ms step_avg:67.78ms +[2025-07-07 04:38:30] [Rank 0] step:2661/10000 train_time:180374ms step_avg:67.78ms +[2025-07-07 04:38:30] [Rank 0] step:2661/10000 train_time:180374ms step_avg:67.78ms +[2025-07-07 04:38:31] [Rank 0] step:2681/10000 train_time:181745ms step_avg:67.79ms +[2025-07-07 04:38:31] [Rank 0] step:2681/10000 train_time:181745ms step_avg:67.79ms +[2025-07-07 04:38:33] [Rank 0] step:2701/10000 train_time:183118ms step_avg:67.80ms +[2025-07-07 04:38:33] [Rank 0] step:2701/10000 train_time:183118ms step_avg:67.80ms +[2025-07-07 04:38:34] [Rank 0] step:2721/10000 train_time:184541ms step_avg:67.82ms +[2025-07-07 04:38:34] [Rank 0] step:2721/10000 train_time:184541ms step_avg:67.82ms +[2025-07-07 04:38:35] [Rank 0] step:2741/10000 train_time:185915ms step_avg:67.83ms +[2025-07-07 04:38:35] [Rank 0] step:2741/10000 train_time:185915ms step_avg:67.83ms +[2025-07-07 04:38:37] [Rank 0] step:2761/10000 train_time:187289ms step_avg:67.83ms +[2025-07-07 04:38:37] [Rank 0] step:2761/10000 train_time:187289ms step_avg:67.83ms +[2025-07-07 04:38:38] [Rank 0] step:2781/10000 train_time:188663ms step_avg:67.84ms +[2025-07-07 04:38:38] [Rank 0] step:2781/10000 train_time:188663ms step_avg:67.84ms +[2025-07-07 04:38:39] [Rank 0] step:2801/10000 train_time:190036ms step_avg:67.85ms +[2025-07-07 04:38:39] [Rank 0] step:2801/10000 train_time:190036ms step_avg:67.85ms +[2025-07-07 04:38:41] [Rank 0] step:2821/10000 train_time:191410ms step_avg:67.85ms +[2025-07-07 04:38:41] [Rank 0] step:2821/10000 train_time:191410ms step_avg:67.85ms +[2025-07-07 04:38:42] [Rank 0] step:2841/10000 train_time:192784ms step_avg:67.86ms +[2025-07-07 04:38:42] [Rank 0] step:2841/10000 train_time:192784ms step_avg:67.86ms +[2025-07-07 04:38:43] [Rank 0] step:2861/10000 train_time:194158ms step_avg:67.86ms +[2025-07-07 04:38:43] [Rank 0] step:2861/10000 train_time:194158ms step_avg:67.86ms +[2025-07-07 04:38:45] [Rank 0] step:2881/10000 train_time:195784ms step_avg:67.96ms +[2025-07-07 04:38:45] [Rank 0] step:2881/10000 train_time:195784ms step_avg:67.96ms +[2025-07-07 04:38:46] [Rank 0] step:2901/10000 train_time:196962ms step_avg:67.89ms +[2025-07-07 04:38:46] [Rank 0] step:2901/10000 train_time:196962ms step_avg:67.89ms +[2025-07-07 04:38:48] [Rank 0] step:2921/10000 train_time:198337ms step_avg:67.90ms +[2025-07-07 04:38:48] [Rank 0] step:2921/10000 train_time:198337ms step_avg:67.90ms +[2025-07-07 04:38:49] [Rank 0] step:2941/10000 train_time:199711ms step_avg:67.91ms +[2025-07-07 04:38:49] [Rank 0] step:2941/10000 train_time:199711ms step_avg:67.91ms +[2025-07-07 04:38:50] [Rank 0] step:2961/10000 train_time:201086ms step_avg:67.91ms +[2025-07-07 04:38:50] [Rank 0] step:2961/10000 train_time:201086ms step_avg:67.91ms +[2025-07-07 04:38:52] [Rank 0] step:2981/10000 train_time:202461ms step_avg:67.92ms +[2025-07-07 04:38:52] [Rank 0] step:2981/10000 train_time:202461ms step_avg:67.92ms +[2025-07-07 04:38:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:38:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:38:54] [Rank 0] PRINT: step:3000/10000 train_loss:1.2865 val_loss:1.2759 train_time:204458ms step_avg:68.15ms +[2025-07-07 04:38:54] [Rank 0] PRINT: step:3000/10000 train_loss:1.2865 val_loss:1.2759 train_time:204458ms step_avg:68.15ms +[2025-07-07 04:38:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:38:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:38:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:38:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:38:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:38:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:44:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:44:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:44:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:44:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:44:17] [Rank 0] Total Loss: 6.1427 +[2025-07-07 04:44:17] [Rank 0] Total Loss: 6.1427 +[2025-07-07 04:44:17] [Rank 0] Total FTA: 0.1094 +[2025-07-07 04:44:17] [Rank 0] Total FTA: 0.1094 +[2025-07-07 04:44:17] [Rank 0] Group 0 Loss: 6.2865 +[2025-07-07 04:44:17] [Rank 0] Group 0 Loss: 6.2865 +[2025-07-07 04:44:17] [Rank 0] Group 1 Loss: 5.7498 +[2025-07-07 04:44:17] [Rank 0] Group 1 Loss: 5.7498 +[2025-07-07 04:44:17] [Rank 0] Group 2 Loss: 5.8113 +[2025-07-07 04:44:17] [Rank 0] Group 2 Loss: 5.8113 +[2025-07-07 04:44:17] [Rank 0] Group 3 Loss: 6.2631 +[2025-07-07 04:44:17] [Rank 0] Group 3 Loss: 6.2631 +[2025-07-07 04:44:17] [Rank 0] Group 4 Loss: 6.0456 +[2025-07-07 04:44:17] [Rank 0] Group 4 Loss: 6.0456 +[2025-07-07 04:44:17] [Rank 0] Group 5 Loss: 6.2154 +[2025-07-07 04:44:17] [Rank 0] Group 5 Loss: 6.2154 +[2025-07-07 04:44:17] [Rank 0] Group 6 Loss: 6.2665 +[2025-07-07 04:44:17] [Rank 0] Group 6 Loss: 6.2665 +[2025-07-07 04:44:17] [Rank 0] Group 7 Loss: 6.1965 +[2025-07-07 04:44:17] [Rank 0] Group 7 Loss: 6.1965 +[2025-07-07 04:44:17] [Rank 0] Group 8 Loss: 6.1564 +[2025-07-07 04:44:17] [Rank 0] Group 8 Loss: 6.1564 +[2025-07-07 04:44:17] [Rank 0] Group 9 Loss: 6.1587 +[2025-07-07 04:44:17] [Rank 0] Group 9 Loss: 6.1587 +[2025-07-07 04:44:17] [Rank 0] Group 10 Loss: 6.1625 +[2025-07-07 04:44:17] [Rank 0] Group 10 Loss: 6.1625 +[2025-07-07 04:44:17] [Rank 0] Group 11 Loss: 6.1844 +[2025-07-07 04:44:17] [Rank 0] Group 11 Loss: 6.1844 +[2025-07-07 04:44:17] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 04:44:17] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 04:44:17] [Rank 0] Group 1 FTA: 0.1797 +[2025-07-07 04:44:17] [Rank 0] Group 1 FTA: 0.1797 +[2025-07-07 04:44:17] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 04:44:17] [Rank 0] Group 2 FTA: 0.0911 +[2025-07-07 04:44:17] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-07 04:44:17] [Rank 0] Group 3 FTA: 0.0312 +[2025-07-07 04:44:17] [Rank 0] Group 4 FTA: 0.0990 +[2025-07-07 04:44:17] [Rank 0] Group 4 FTA: 0.0990 +[2025-07-07 04:44:17] [Rank 0] Group 5 FTA: 0.1042 +[2025-07-07 04:44:17] [Rank 0] Group 5 FTA: 0.1042 +[2025-07-07 04:44:17] [Rank 0] Group 6 FTA: 0.1068 +[2025-07-07 04:44:17] [Rank 0] Group 6 FTA: 0.1068 +[2025-07-07 04:44:17] [Rank 0] Group 7 FTA: 0.0859 +[2025-07-07 04:44:17] [Rank 0] Group 7 FTA: 0.0859 +[2025-07-07 04:44:17] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-07 04:44:17] [Rank 0] Group 8 FTA: 0.1094 +[2025-07-07 04:44:17] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 04:44:17] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 04:44:17] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 04:44:17] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 04:44:17] [Rank 0] Group 11 FTA: 0.0996 +[2025-07-07 04:44:17] [Rank 0] Group 11 FTA: 0.0996 +[2025-07-07 04:44:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 04:44:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 04:44:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 04:44:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 04:44:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 04:44:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 04:44:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 04:44:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 04:44:19] [Rank 0] step:3001/10000 train_time:204470ms step_avg:68.13ms +[2025-07-07 04:44:19] [Rank 0] step:3001/10000 train_time:204470ms step_avg:68.13ms +[2025-07-07 04:44:20] [Rank 0] step:3021/10000 train_time:205237ms step_avg:67.94ms +[2025-07-07 04:44:20] [Rank 0] step:3021/10000 train_time:205237ms step_avg:67.94ms +[2025-07-07 04:44:21] [Rank 0] step:3041/10000 train_time:206605ms step_avg:67.94ms +[2025-07-07 04:44:21] [Rank 0] step:3041/10000 train_time:206605ms step_avg:67.94ms +[2025-07-07 04:44:23] [Rank 0] step:3061/10000 train_time:207974ms step_avg:67.94ms +[2025-07-07 04:44:23] [Rank 0] step:3061/10000 train_time:207974ms step_avg:67.94ms +[2025-07-07 04:44:24] [Rank 0] step:3081/10000 train_time:209370ms step_avg:67.96ms +[2025-07-07 04:44:24] [Rank 0] step:3081/10000 train_time:209370ms step_avg:67.96ms +[2025-07-07 04:44:26] [Rank 0] step:3101/10000 train_time:210739ms step_avg:67.96ms +[2025-07-07 04:44:26] [Rank 0] step:3101/10000 train_time:210739ms step_avg:67.96ms +[2025-07-07 04:44:27] [Rank 0] step:3121/10000 train_time:212108ms step_avg:67.96ms +[2025-07-07 04:44:27] [Rank 0] step:3121/10000 train_time:212108ms step_avg:67.96ms +[2025-07-07 04:44:28] [Rank 0] step:3141/10000 train_time:213479ms step_avg:67.97ms +[2025-07-07 04:44:28] [Rank 0] step:3141/10000 train_time:213479ms step_avg:67.97ms +[2025-07-07 04:44:30] [Rank 0] step:3161/10000 train_time:214849ms step_avg:67.97ms +[2025-07-07 04:44:30] [Rank 0] step:3161/10000 train_time:214849ms step_avg:67.97ms +[2025-07-07 04:44:31] [Rank 0] step:3181/10000 train_time:216221ms step_avg:67.97ms +[2025-07-07 04:44:31] [Rank 0] step:3181/10000 train_time:216221ms step_avg:67.97ms +[2025-07-07 04:44:32] [Rank 0] step:3201/10000 train_time:217593ms step_avg:67.98ms +[2025-07-07 04:44:32] [Rank 0] step:3201/10000 train_time:217593ms step_avg:67.98ms +[2025-07-07 04:44:34] [Rank 0] step:3221/10000 train_time:218966ms step_avg:67.98ms +[2025-07-07 04:44:34] [Rank 0] step:3221/10000 train_time:218966ms step_avg:67.98ms +[2025-07-07 04:44:35] [Rank 0] step:3241/10000 train_time:220340ms step_avg:67.99ms +[2025-07-07 04:44:35] [Rank 0] step:3241/10000 train_time:220340ms step_avg:67.99ms +[2025-07-07 04:44:37] [Rank 0] step:3261/10000 train_time:221757ms step_avg:68.00ms +[2025-07-07 04:44:37] [Rank 0] step:3261/10000 train_time:221757ms step_avg:68.00ms +[2025-07-07 04:44:38] [Rank 0] step:3281/10000 train_time:223129ms step_avg:68.01ms +[2025-07-07 04:44:38] [Rank 0] step:3281/10000 train_time:223129ms step_avg:68.01ms +[2025-07-07 04:44:39] [Rank 0] step:3301/10000 train_time:224500ms step_avg:68.01ms +[2025-07-07 04:44:39] [Rank 0] step:3301/10000 train_time:224500ms step_avg:68.01ms +[2025-07-07 04:44:41] [Rank 0] step:3321/10000 train_time:225873ms step_avg:68.01ms +[2025-07-07 04:44:41] [Rank 0] step:3321/10000 train_time:225873ms step_avg:68.01ms +[2025-07-07 04:44:42] [Rank 0] step:3341/10000 train_time:227246ms step_avg:68.02ms +[2025-07-07 04:44:42] [Rank 0] step:3341/10000 train_time:227246ms step_avg:68.02ms +[2025-07-07 04:44:43] [Rank 0] step:3361/10000 train_time:228620ms step_avg:68.02ms +[2025-07-07 04:44:43] [Rank 0] step:3361/10000 train_time:228620ms step_avg:68.02ms +[2025-07-07 04:44:45] [Rank 0] step:3381/10000 train_time:229993ms step_avg:68.03ms +[2025-07-07 04:44:45] [Rank 0] step:3381/10000 train_time:229993ms step_avg:68.03ms +[2025-07-07 04:44:46] [Rank 0] step:3401/10000 train_time:231367ms step_avg:68.03ms +[2025-07-07 04:44:46] [Rank 0] step:3401/10000 train_time:231367ms step_avg:68.03ms +[2025-07-07 04:44:48] [Rank 0] step:3421/10000 train_time:232787ms step_avg:68.05ms +[2025-07-07 04:44:48] [Rank 0] step:3421/10000 train_time:232787ms step_avg:68.05ms +[2025-07-07 04:44:49] [Rank 0] step:3441/10000 train_time:234141ms step_avg:68.04ms +[2025-07-07 04:44:49] [Rank 0] step:3441/10000 train_time:234141ms step_avg:68.04ms +[2025-07-07 04:44:50] [Rank 0] step:3461/10000 train_time:235518ms step_avg:68.05ms +[2025-07-07 04:44:50] [Rank 0] step:3461/10000 train_time:235518ms step_avg:68.05ms +[2025-07-07 04:44:52] [Rank 0] step:3481/10000 train_time:236892ms step_avg:68.05ms +[2025-07-07 04:44:52] [Rank 0] step:3481/10000 train_time:236892ms step_avg:68.05ms +[2025-07-07 04:44:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:44:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:44:54] [Rank 0] PRINT: step:3500/10000 train_loss:1.2668 val_loss:1.2628 train_time:238891ms step_avg:68.25ms +[2025-07-07 04:44:54] [Rank 0] PRINT: step:3500/10000 train_loss:1.2668 val_loss:1.2628 train_time:238891ms step_avg:68.25ms +[2025-07-07 04:44:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:44:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:44:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:44:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:44:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:44:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:50:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:50:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:50:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:50:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:50:14] [Rank 0] Total Loss: 6.0979 +[2025-07-07 04:50:14] [Rank 0] Total Loss: 6.0979 +[2025-07-07 04:50:15] [Rank 0] Total FTA: 0.1426 +[2025-07-07 04:50:15] [Rank 0] Total FTA: 0.1426 +[2025-07-07 04:50:15] [Rank 0] Group 0 Loss: 6.2654 +[2025-07-07 04:50:15] [Rank 0] Group 0 Loss: 6.2654 +[2025-07-07 04:50:15] [Rank 0] Group 1 Loss: 5.7543 +[2025-07-07 04:50:15] [Rank 0] Group 1 Loss: 5.7543 +[2025-07-07 04:50:15] [Rank 0] Group 2 Loss: 5.8914 +[2025-07-07 04:50:15] [Rank 0] Group 2 Loss: 5.8914 +[2025-07-07 04:50:15] [Rank 0] Group 3 Loss: 6.1770 +[2025-07-07 04:50:15] [Rank 0] Group 3 Loss: 6.1770 +[2025-07-07 04:50:15] [Rank 0] Group 4 Loss: 6.0500 +[2025-07-07 04:50:15] [Rank 0] Group 4 Loss: 6.0500 +[2025-07-07 04:50:15] [Rank 0] Group 5 Loss: 6.0834 +[2025-07-07 04:50:15] [Rank 0] Group 5 Loss: 6.0834 +[2025-07-07 04:50:15] [Rank 0] Group 6 Loss: 6.1609 +[2025-07-07 04:50:15] [Rank 0] Group 6 Loss: 6.1609 +[2025-07-07 04:50:15] [Rank 0] Group 7 Loss: 6.1411 +[2025-07-07 04:50:15] [Rank 0] Group 7 Loss: 6.1411 +[2025-07-07 04:50:15] [Rank 0] Group 8 Loss: 6.1758 +[2025-07-07 04:50:15] [Rank 0] Group 8 Loss: 6.1758 +[2025-07-07 04:50:15] [Rank 0] Group 9 Loss: 6.0954 +[2025-07-07 04:50:15] [Rank 0] Group 9 Loss: 6.0954 +[2025-07-07 04:50:15] [Rank 0] Group 10 Loss: 6.1281 +[2025-07-07 04:50:15] [Rank 0] Group 10 Loss: 6.1281 +[2025-07-07 04:50:15] [Rank 0] Group 11 Loss: 6.0889 +[2025-07-07 04:50:15] [Rank 0] Group 11 Loss: 6.0889 +[2025-07-07 04:50:15] [Rank 0] Group 0 FTA: 0.1821 +[2025-07-07 04:50:15] [Rank 0] Group 0 FTA: 0.1821 +[2025-07-07 04:50:15] [Rank 0] Group 1 FTA: 0.3724 +[2025-07-07 04:50:15] [Rank 0] Group 1 FTA: 0.3724 +[2025-07-07 04:50:15] [Rank 0] Group 2 FTA: 0.2057 +[2025-07-07 04:50:15] [Rank 0] Group 2 FTA: 0.2057 +[2025-07-07 04:50:15] [Rank 0] Group 3 FTA: 0.1328 +[2025-07-07 04:50:15] [Rank 0] Group 3 FTA: 0.1328 +[2025-07-07 04:50:15] [Rank 0] Group 4 FTA: 0.0859 +[2025-07-07 04:50:15] [Rank 0] Group 4 FTA: 0.0859 +[2025-07-07 04:50:15] [Rank 0] Group 5 FTA: 0.1172 +[2025-07-07 04:50:15] [Rank 0] Group 5 FTA: 0.1172 +[2025-07-07 04:50:15] [Rank 0] Group 6 FTA: 0.1302 +[2025-07-07 04:50:15] [Rank 0] Group 6 FTA: 0.1302 +[2025-07-07 04:50:15] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-07 04:50:15] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-07 04:50:15] [Rank 0] Group 8 FTA: 0.1172 +[2025-07-07 04:50:15] [Rank 0] Group 8 FTA: 0.1172 +[2025-07-07 04:50:15] [Rank 0] Group 9 FTA: 0.1094 +[2025-07-07 04:50:15] [Rank 0] Group 9 FTA: 0.1094 +[2025-07-07 04:50:15] [Rank 0] Group 10 FTA: 0.0820 +[2025-07-07 04:50:15] [Rank 0] Group 10 FTA: 0.0820 +[2025-07-07 04:50:15] [Rank 0] Group 11 FTA: 0.1094 +[2025-07-07 04:50:15] [Rank 0] Group 11 FTA: 0.1094 +[2025-07-07 04:50:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 04:50:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 04:50:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 04:50:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 04:50:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 04:50:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 04:50:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 04:50:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 04:50:16] [Rank 0] step:3501/10000 train_time:238903ms step_avg:68.24ms +[2025-07-07 04:50:16] [Rank 0] step:3501/10000 train_time:238903ms step_avg:68.24ms +[2025-07-07 04:50:18] [Rank 0] step:3521/10000 train_time:239667ms step_avg:68.07ms +[2025-07-07 04:50:18] [Rank 0] step:3521/10000 train_time:239667ms step_avg:68.07ms +[2025-07-07 04:50:19] [Rank 0] step:3541/10000 train_time:241033ms step_avg:68.07ms +[2025-07-07 04:50:19] [Rank 0] step:3541/10000 train_time:241033ms step_avg:68.07ms +[2025-07-07 04:50:20] [Rank 0] step:3561/10000 train_time:242400ms step_avg:68.07ms +[2025-07-07 04:50:20] [Rank 0] step:3561/10000 train_time:242400ms step_avg:68.07ms +[2025-07-07 04:50:22] [Rank 0] step:3581/10000 train_time:243771ms step_avg:68.07ms +[2025-07-07 04:50:22] [Rank 0] step:3581/10000 train_time:243771ms step_avg:68.07ms +[2025-07-07 04:50:23] [Rank 0] step:3601/10000 train_time:245139ms step_avg:68.08ms +[2025-07-07 04:50:23] [Rank 0] step:3601/10000 train_time:245139ms step_avg:68.08ms +[2025-07-07 04:50:24] [Rank 0] step:3621/10000 train_time:246544ms step_avg:68.09ms +[2025-07-07 04:50:24] [Rank 0] step:3621/10000 train_time:246544ms step_avg:68.09ms +[2025-07-07 04:50:26] [Rank 0] step:3641/10000 train_time:247913ms step_avg:68.09ms +[2025-07-07 04:50:26] [Rank 0] step:3641/10000 train_time:247913ms step_avg:68.09ms +[2025-07-07 04:50:27] [Rank 0] step:3661/10000 train_time:249283ms step_avg:68.09ms +[2025-07-07 04:50:27] [Rank 0] step:3661/10000 train_time:249283ms step_avg:68.09ms +[2025-07-07 04:50:29] [Rank 0] step:3681/10000 train_time:250653ms step_avg:68.09ms +[2025-07-07 04:50:29] [Rank 0] step:3681/10000 train_time:250653ms step_avg:68.09ms +[2025-07-07 04:50:30] [Rank 0] step:3701/10000 train_time:252025ms step_avg:68.10ms +[2025-07-07 04:50:30] [Rank 0] step:3701/10000 train_time:252025ms step_avg:68.10ms +[2025-07-07 04:50:31] [Rank 0] step:3721/10000 train_time:253395ms step_avg:68.10ms +[2025-07-07 04:50:31] [Rank 0] step:3721/10000 train_time:253395ms step_avg:68.10ms +[2025-07-07 04:50:33] [Rank 0] step:3741/10000 train_time:254765ms step_avg:68.10ms +[2025-07-07 04:50:33] [Rank 0] step:3741/10000 train_time:254765ms step_avg:68.10ms +[2025-07-07 04:50:34] [Rank 0] step:3761/10000 train_time:256137ms step_avg:68.10ms +[2025-07-07 04:50:34] [Rank 0] step:3761/10000 train_time:256137ms step_avg:68.10ms +[2025-07-07 04:50:36] [Rank 0] step:3781/10000 train_time:257510ms step_avg:68.11ms +[2025-07-07 04:50:36] [Rank 0] step:3781/10000 train_time:257510ms step_avg:68.11ms +[2025-07-07 04:50:37] [Rank 0] step:3801/10000 train_time:258930ms step_avg:68.12ms +[2025-07-07 04:50:37] [Rank 0] step:3801/10000 train_time:258930ms step_avg:68.12ms +[2025-07-07 04:50:38] [Rank 0] step:3821/10000 train_time:260405ms step_avg:68.15ms +[2025-07-07 04:50:38] [Rank 0] step:3821/10000 train_time:260405ms step_avg:68.15ms +[2025-07-07 04:50:40] [Rank 0] step:3841/10000 train_time:261880ms step_avg:68.18ms +[2025-07-07 04:50:40] [Rank 0] step:3841/10000 train_time:261880ms step_avg:68.18ms +[2025-07-07 04:50:41] [Rank 0] step:3861/10000 train_time:263357ms step_avg:68.21ms +[2025-07-07 04:50:41] [Rank 0] step:3861/10000 train_time:263357ms step_avg:68.21ms +[2025-07-07 04:50:43] [Rank 0] step:3881/10000 train_time:264729ms step_avg:68.21ms +[2025-07-07 04:50:43] [Rank 0] step:3881/10000 train_time:264729ms step_avg:68.21ms +[2025-07-07 04:50:44] [Rank 0] step:3901/10000 train_time:266205ms step_avg:68.24ms +[2025-07-07 04:50:44] [Rank 0] step:3901/10000 train_time:266205ms step_avg:68.24ms +[2025-07-07 04:50:46] [Rank 0] step:3921/10000 train_time:267579ms step_avg:68.24ms +[2025-07-07 04:50:46] [Rank 0] step:3921/10000 train_time:267579ms step_avg:68.24ms +[2025-07-07 04:50:47] [Rank 0] step:3941/10000 train_time:268953ms step_avg:68.24ms +[2025-07-07 04:50:47] [Rank 0] step:3941/10000 train_time:268953ms step_avg:68.24ms +[2025-07-07 04:50:48] [Rank 0] step:3961/10000 train_time:270328ms step_avg:68.25ms +[2025-07-07 04:50:48] [Rank 0] step:3961/10000 train_time:270328ms step_avg:68.25ms +[2025-07-07 04:50:50] [Rank 0] step:3981/10000 train_time:271730ms step_avg:68.26ms +[2025-07-07 04:50:50] [Rank 0] step:3981/10000 train_time:271730ms step_avg:68.26ms +[2025-07-07 04:50:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:50:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:50:52] [Rank 0] PRINT: step:4000/10000 train_loss:1.2452 val_loss:1.2455 train_time:273727ms step_avg:68.43ms +[2025-07-07 04:50:52] [Rank 0] PRINT: step:4000/10000 train_loss:1.2452 val_loss:1.2455 train_time:273727ms step_avg:68.43ms +[2025-07-07 04:50:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:50:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:50:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:50:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:50:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:50:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:56:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:56:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 04:56:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:56:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 04:56:15] [Rank 0] Total Loss: 6.1073 +[2025-07-07 04:56:15] [Rank 0] Total Loss: 6.1073 +[2025-07-07 04:56:15] [Rank 0] Total FTA: 0.1120 +[2025-07-07 04:56:15] [Rank 0] Total FTA: 0.1120 +[2025-07-07 04:56:15] [Rank 0] Group 0 Loss: 6.3323 +[2025-07-07 04:56:15] [Rank 0] Group 0 Loss: 6.3323 +[2025-07-07 04:56:15] [Rank 0] Group 1 Loss: 5.6359 +[2025-07-07 04:56:15] [Rank 0] Group 1 Loss: 5.6359 +[2025-07-07 04:56:15] [Rank 0] Group 2 Loss: 5.8747 +[2025-07-07 04:56:15] [Rank 0] Group 2 Loss: 5.8747 +[2025-07-07 04:56:15] [Rank 0] Group 3 Loss: 6.3174 +[2025-07-07 04:56:15] [Rank 0] Group 3 Loss: 6.3174 +[2025-07-07 04:56:15] [Rank 0] Group 4 Loss: 6.1392 +[2025-07-07 04:56:15] [Rank 0] Group 4 Loss: 6.1392 +[2025-07-07 04:56:15] [Rank 0] Group 5 Loss: 6.1184 +[2025-07-07 04:56:15] [Rank 0] Group 5 Loss: 6.1184 +[2025-07-07 04:56:15] [Rank 0] Group 6 Loss: 6.1390 +[2025-07-07 04:56:15] [Rank 0] Group 6 Loss: 6.1390 +[2025-07-07 04:56:15] [Rank 0] Group 7 Loss: 6.1554 +[2025-07-07 04:56:15] [Rank 0] Group 7 Loss: 6.1554 +[2025-07-07 04:56:15] [Rank 0] Group 8 Loss: 6.0808 +[2025-07-07 04:56:15] [Rank 0] Group 8 Loss: 6.0808 +[2025-07-07 04:56:15] [Rank 0] Group 9 Loss: 6.1141 +[2025-07-07 04:56:15] [Rank 0] Group 9 Loss: 6.1141 +[2025-07-07 04:56:15] [Rank 0] Group 10 Loss: 6.1250 +[2025-07-07 04:56:15] [Rank 0] Group 10 Loss: 6.1250 +[2025-07-07 04:56:15] [Rank 0] Group 11 Loss: 6.0768 +[2025-07-07 04:56:15] [Rank 0] Group 11 Loss: 6.0768 +[2025-07-07 04:56:15] [Rank 0] Group 0 FTA: 0.1508 +[2025-07-07 04:56:15] [Rank 0] Group 0 FTA: 0.1508 +[2025-07-07 04:56:15] [Rank 0] Group 1 FTA: 0.1641 +[2025-07-07 04:56:15] [Rank 0] Group 1 FTA: 0.1641 +[2025-07-07 04:56:15] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 04:56:15] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 04:56:15] [Rank 0] Group 3 FTA: 0.1328 +[2025-07-07 04:56:15] [Rank 0] Group 3 FTA: 0.1328 +[2025-07-07 04:56:15] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-07 04:56:15] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-07 04:56:15] [Rank 0] Group 5 FTA: 0.1120 +[2025-07-07 04:56:15] [Rank 0] Group 5 FTA: 0.1120 +[2025-07-07 04:56:15] [Rank 0] Group 6 FTA: 0.1198 +[2025-07-07 04:56:15] [Rank 0] Group 6 FTA: 0.1198 +[2025-07-07 04:56:15] [Rank 0] Group 7 FTA: 0.1094 +[2025-07-07 04:56:15] [Rank 0] Group 7 FTA: 0.1094 +[2025-07-07 04:56:15] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 04:56:15] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 04:56:15] [Rank 0] Group 9 FTA: 0.1250 +[2025-07-07 04:56:15] [Rank 0] Group 9 FTA: 0.1250 +[2025-07-07 04:56:15] [Rank 0] Group 10 FTA: 0.1094 +[2025-07-07 04:56:15] [Rank 0] Group 10 FTA: 0.1094 +[2025-07-07 04:56:15] [Rank 0] Group 11 FTA: 0.1016 +[2025-07-07 04:56:15] [Rank 0] Group 11 FTA: 0.1016 +[2025-07-07 04:56:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 04:56:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 04:56:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 04:56:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 04:56:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 04:56:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 04:56:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 04:56:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 04:56:17] [Rank 0] step:4001/10000 train_time:273739ms step_avg:68.42ms +[2025-07-07 04:56:17] [Rank 0] step:4001/10000 train_time:273739ms step_avg:68.42ms +[2025-07-07 04:56:18] [Rank 0] step:4021/10000 train_time:274489ms step_avg:68.26ms +[2025-07-07 04:56:18] [Rank 0] step:4021/10000 train_time:274489ms step_avg:68.26ms +[2025-07-07 04:56:20] [Rank 0] step:4041/10000 train_time:275854ms step_avg:68.26ms +[2025-07-07 04:56:20] [Rank 0] step:4041/10000 train_time:275854ms step_avg:68.26ms +[2025-07-07 04:56:21] [Rank 0] step:4061/10000 train_time:277223ms step_avg:68.26ms +[2025-07-07 04:56:21] [Rank 0] step:4061/10000 train_time:277223ms step_avg:68.26ms +[2025-07-07 04:56:22] [Rank 0] step:4081/10000 train_time:278591ms step_avg:68.27ms +[2025-07-07 04:56:22] [Rank 0] step:4081/10000 train_time:278591ms step_avg:68.27ms +[2025-07-07 04:56:24] [Rank 0] step:4101/10000 train_time:279959ms step_avg:68.27ms +[2025-07-07 04:56:24] [Rank 0] step:4101/10000 train_time:279959ms step_avg:68.27ms +[2025-07-07 04:56:25] [Rank 0] step:4121/10000 train_time:281327ms step_avg:68.27ms +[2025-07-07 04:56:25] [Rank 0] step:4121/10000 train_time:281327ms step_avg:68.27ms +[2025-07-07 04:56:27] [Rank 0] step:4141/10000 train_time:283352ms step_avg:68.43ms +[2025-07-07 04:56:27] [Rank 0] step:4141/10000 train_time:283352ms step_avg:68.43ms +[2025-07-07 04:56:28] [Rank 0] step:4161/10000 train_time:284091ms step_avg:68.27ms +[2025-07-07 04:56:28] [Rank 0] step:4161/10000 train_time:284091ms step_avg:68.27ms +[2025-07-07 04:56:29] [Rank 0] step:4181/10000 train_time:285462ms step_avg:68.28ms +[2025-07-07 04:56:29] [Rank 0] step:4181/10000 train_time:285462ms step_avg:68.28ms +[2025-07-07 04:56:31] [Rank 0] step:4201/10000 train_time:286832ms step_avg:68.28ms +[2025-07-07 04:56:31] [Rank 0] step:4201/10000 train_time:286832ms step_avg:68.28ms +[2025-07-07 04:56:32] [Rank 0] step:4221/10000 train_time:288204ms step_avg:68.28ms +[2025-07-07 04:56:32] [Rank 0] step:4221/10000 train_time:288204ms step_avg:68.28ms +[2025-07-07 04:56:33] [Rank 0] step:4241/10000 train_time:289576ms step_avg:68.28ms +[2025-07-07 04:56:33] [Rank 0] step:4241/10000 train_time:289576ms step_avg:68.28ms +[2025-07-07 04:56:35] [Rank 0] step:4261/10000 train_time:290948ms step_avg:68.28ms +[2025-07-07 04:56:35] [Rank 0] step:4261/10000 train_time:290948ms step_avg:68.28ms +[2025-07-07 04:56:36] [Rank 0] step:4281/10000 train_time:292319ms step_avg:68.28ms +[2025-07-07 04:56:36] [Rank 0] step:4281/10000 train_time:292319ms step_avg:68.28ms +[2025-07-07 04:56:38] [Rank 0] step:4301/10000 train_time:293691ms step_avg:68.28ms +[2025-07-07 04:56:38] [Rank 0] step:4301/10000 train_time:293691ms step_avg:68.28ms +[2025-07-07 04:56:39] [Rank 0] step:4321/10000 train_time:295737ms step_avg:68.44ms +[2025-07-07 04:56:39] [Rank 0] step:4321/10000 train_time:295737ms step_avg:68.44ms +[2025-07-07 04:56:40] [Rank 0] step:4341/10000 train_time:296476ms step_avg:68.30ms +[2025-07-07 04:56:40] [Rank 0] step:4341/10000 train_time:296476ms step_avg:68.30ms +[2025-07-07 04:56:42] [Rank 0] step:4361/10000 train_time:297849ms step_avg:68.30ms +[2025-07-07 04:56:42] [Rank 0] step:4361/10000 train_time:297849ms step_avg:68.30ms +[2025-07-07 04:56:43] [Rank 0] step:4381/10000 train_time:299222ms step_avg:68.30ms +[2025-07-07 04:56:43] [Rank 0] step:4381/10000 train_time:299222ms step_avg:68.30ms +[2025-07-07 04:56:44] [Rank 0] step:4401/10000 train_time:300594ms step_avg:68.30ms +[2025-07-07 04:56:44] [Rank 0] step:4401/10000 train_time:300594ms step_avg:68.30ms +[2025-07-07 04:56:46] [Rank 0] step:4421/10000 train_time:301966ms step_avg:68.30ms +[2025-07-07 04:56:46] [Rank 0] step:4421/10000 train_time:301966ms step_avg:68.30ms +[2025-07-07 04:56:47] [Rank 0] step:4441/10000 train_time:303338ms step_avg:68.30ms +[2025-07-07 04:56:47] [Rank 0] step:4441/10000 train_time:303338ms step_avg:68.30ms +[2025-07-07 04:56:49] [Rank 0] step:4461/10000 train_time:304710ms step_avg:68.31ms +[2025-07-07 04:56:49] [Rank 0] step:4461/10000 train_time:304710ms step_avg:68.31ms +[2025-07-07 04:56:50] [Rank 0] step:4481/10000 train_time:306127ms step_avg:68.32ms +[2025-07-07 04:56:50] [Rank 0] step:4481/10000 train_time:306127ms step_avg:68.32ms +[2025-07-07 04:56:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:56:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 04:56:52] [Rank 0] PRINT: step:4500/10000 train_loss:1.2190 val_loss:1.2306 train_time:308123ms step_avg:68.47ms +[2025-07-07 04:56:52] [Rank 0] PRINT: step:4500/10000 train_loss:1.2190 val_loss:1.2306 train_time:308123ms step_avg:68.47ms +[2025-07-07 04:56:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:56:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 04:56:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:56:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 04:56:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 04:56:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:02:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:02:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:02:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:02:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:02:16] [Rank 0] Total Loss: 5.9966 +[2025-07-07 05:02:16] [Rank 0] Total Loss: 5.9966 +[2025-07-07 05:02:16] [Rank 0] Total FTA: 0.1298 +[2025-07-07 05:02:16] [Rank 0] Total FTA: 0.1298 +[2025-07-07 05:02:16] [Rank 0] Group 0 Loss: 6.3351 +[2025-07-07 05:02:16] [Rank 0] Group 0 Loss: 6.3351 +[2025-07-07 05:02:16] [Rank 0] Group 1 Loss: 5.4270 +[2025-07-07 05:02:16] [Rank 0] Group 1 Loss: 5.4270 +[2025-07-07 05:02:16] [Rank 0] Group 2 Loss: 5.7444 +[2025-07-07 05:02:16] [Rank 0] Group 2 Loss: 5.7444 +[2025-07-07 05:02:16] [Rank 0] Group 3 Loss: 6.0782 +[2025-07-07 05:02:16] [Rank 0] Group 3 Loss: 6.0782 +[2025-07-07 05:02:16] [Rank 0] Group 4 Loss: 5.9102 +[2025-07-07 05:02:16] [Rank 0] Group 4 Loss: 5.9102 +[2025-07-07 05:02:16] [Rank 0] Group 5 Loss: 5.9461 +[2025-07-07 05:02:16] [Rank 0] Group 5 Loss: 5.9461 +[2025-07-07 05:02:16] [Rank 0] Group 6 Loss: 6.0343 +[2025-07-07 05:02:16] [Rank 0] Group 6 Loss: 6.0343 +[2025-07-07 05:02:16] [Rank 0] Group 7 Loss: 6.0515 +[2025-07-07 05:02:16] [Rank 0] Group 7 Loss: 6.0515 +[2025-07-07 05:02:16] [Rank 0] Group 8 Loss: 6.0466 +[2025-07-07 05:02:16] [Rank 0] Group 8 Loss: 6.0466 +[2025-07-07 05:02:16] [Rank 0] Group 9 Loss: 6.0010 +[2025-07-07 05:02:16] [Rank 0] Group 9 Loss: 6.0010 +[2025-07-07 05:02:16] [Rank 0] Group 10 Loss: 6.0161 +[2025-07-07 05:02:16] [Rank 0] Group 10 Loss: 6.0161 +[2025-07-07 05:02:16] [Rank 0] Group 11 Loss: 6.0068 +[2025-07-07 05:02:16] [Rank 0] Group 11 Loss: 6.0068 +[2025-07-07 05:02:16] [Rank 0] Group 0 FTA: 0.1717 +[2025-07-07 05:02:16] [Rank 0] Group 0 FTA: 0.1717 +[2025-07-07 05:02:16] [Rank 0] Group 1 FTA: 0.2005 +[2025-07-07 05:02:16] [Rank 0] Group 1 FTA: 0.2005 +[2025-07-07 05:02:16] [Rank 0] Group 2 FTA: 0.0677 +[2025-07-07 05:02:16] [Rank 0] Group 2 FTA: 0.0677 +[2025-07-07 05:02:16] [Rank 0] Group 3 FTA: 0.1641 +[2025-07-07 05:02:16] [Rank 0] Group 3 FTA: 0.1641 +[2025-07-07 05:02:16] [Rank 0] Group 4 FTA: 0.1224 +[2025-07-07 05:02:16] [Rank 0] Group 4 FTA: 0.1224 +[2025-07-07 05:02:16] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-07 05:02:16] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-07 05:02:16] [Rank 0] Group 6 FTA: 0.1224 +[2025-07-07 05:02:16] [Rank 0] Group 6 FTA: 0.1224 +[2025-07-07 05:02:16] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-07 05:02:16] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-07 05:02:16] [Rank 0] Group 8 FTA: 0.1562 +[2025-07-07 05:02:16] [Rank 0] Group 8 FTA: 0.1562 +[2025-07-07 05:02:16] [Rank 0] Group 9 FTA: 0.1562 +[2025-07-07 05:02:16] [Rank 0] Group 9 FTA: 0.1562 +[2025-07-07 05:02:16] [Rank 0] Group 10 FTA: 0.1035 +[2025-07-07 05:02:16] [Rank 0] Group 10 FTA: 0.1035 +[2025-07-07 05:02:16] [Rank 0] Group 11 FTA: 0.1055 +[2025-07-07 05:02:16] [Rank 0] Group 11 FTA: 0.1055 +[2025-07-07 05:02:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 05:02:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 05:02:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 05:02:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 05:02:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 05:02:17] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 05:02:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 05:02:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 05:02:18] [Rank 0] step:4501/10000 train_time:308246ms step_avg:68.48ms +[2025-07-07 05:02:18] [Rank 0] step:4501/10000 train_time:308246ms step_avg:68.48ms +[2025-07-07 05:02:20] [Rank 0] step:4521/10000 train_time:309608ms step_avg:68.48ms +[2025-07-07 05:02:20] [Rank 0] step:4521/10000 train_time:309608ms step_avg:68.48ms +[2025-07-07 05:02:21] [Rank 0] step:4541/10000 train_time:310975ms step_avg:68.48ms +[2025-07-07 05:02:21] [Rank 0] step:4541/10000 train_time:310975ms step_avg:68.48ms +[2025-07-07 05:02:22] [Rank 0] step:4561/10000 train_time:312343ms step_avg:68.48ms +[2025-07-07 05:02:22] [Rank 0] step:4561/10000 train_time:312343ms step_avg:68.48ms +[2025-07-07 05:02:24] [Rank 0] step:4581/10000 train_time:313710ms step_avg:68.48ms +[2025-07-07 05:02:24] [Rank 0] step:4581/10000 train_time:313710ms step_avg:68.48ms +[2025-07-07 05:02:25] [Rank 0] step:4601/10000 train_time:315079ms step_avg:68.48ms +[2025-07-07 05:02:25] [Rank 0] step:4601/10000 train_time:315079ms step_avg:68.48ms +[2025-07-07 05:02:26] [Rank 0] step:4621/10000 train_time:316450ms step_avg:68.48ms +[2025-07-07 05:02:26] [Rank 0] step:4621/10000 train_time:316450ms step_avg:68.48ms +[2025-07-07 05:02:28] [Rank 0] step:4641/10000 train_time:317820ms step_avg:68.48ms +[2025-07-07 05:02:28] [Rank 0] step:4641/10000 train_time:317820ms step_avg:68.48ms +[2025-07-07 05:02:29] [Rank 0] step:4661/10000 train_time:319190ms step_avg:68.48ms +[2025-07-07 05:02:29] [Rank 0] step:4661/10000 train_time:319190ms step_avg:68.48ms +[2025-07-07 05:02:30] [Rank 0] step:4681/10000 train_time:320610ms step_avg:68.49ms +[2025-07-07 05:02:30] [Rank 0] step:4681/10000 train_time:320610ms step_avg:68.49ms +[2025-07-07 05:02:32] [Rank 0] step:4701/10000 train_time:321963ms step_avg:68.49ms +[2025-07-07 05:02:32] [Rank 0] step:4701/10000 train_time:321963ms step_avg:68.49ms +[2025-07-07 05:02:33] [Rank 0] step:4721/10000 train_time:323333ms step_avg:68.49ms +[2025-07-07 05:02:33] [Rank 0] step:4721/10000 train_time:323333ms step_avg:68.49ms +[2025-07-07 05:02:35] [Rank 0] step:4741/10000 train_time:324703ms step_avg:68.49ms +[2025-07-07 05:02:35] [Rank 0] step:4741/10000 train_time:324703ms step_avg:68.49ms +[2025-07-07 05:02:36] [Rank 0] step:4761/10000 train_time:326075ms step_avg:68.49ms +[2025-07-07 05:02:36] [Rank 0] step:4761/10000 train_time:326075ms step_avg:68.49ms +[2025-07-07 05:02:37] [Rank 0] step:4781/10000 train_time:327446ms step_avg:68.49ms +[2025-07-07 05:02:37] [Rank 0] step:4781/10000 train_time:327446ms step_avg:68.49ms +[2025-07-07 05:02:39] [Rank 0] step:4801/10000 train_time:328817ms step_avg:68.49ms +[2025-07-07 05:02:39] [Rank 0] step:4801/10000 train_time:328817ms step_avg:68.49ms +[2025-07-07 05:02:40] [Rank 0] step:4821/10000 train_time:330189ms step_avg:68.49ms +[2025-07-07 05:02:40] [Rank 0] step:4821/10000 train_time:330189ms step_avg:68.49ms +[2025-07-07 05:02:41] [Rank 0] step:4841/10000 train_time:331562ms step_avg:68.49ms +[2025-07-07 05:02:41] [Rank 0] step:4841/10000 train_time:331562ms step_avg:68.49ms +[2025-07-07 05:02:43] [Rank 0] step:4861/10000 train_time:333186ms step_avg:68.54ms +[2025-07-07 05:02:43] [Rank 0] step:4861/10000 train_time:333186ms step_avg:68.54ms +[2025-07-07 05:02:44] [Rank 0] step:4881/10000 train_time:334348ms step_avg:68.50ms +[2025-07-07 05:02:44] [Rank 0] step:4881/10000 train_time:334348ms step_avg:68.50ms +[2025-07-07 05:02:46] [Rank 0] step:4901/10000 train_time:335722ms step_avg:68.50ms +[2025-07-07 05:02:46] [Rank 0] step:4901/10000 train_time:335722ms step_avg:68.50ms +[2025-07-07 05:02:47] [Rank 0] step:4921/10000 train_time:337095ms step_avg:68.50ms +[2025-07-07 05:02:47] [Rank 0] step:4921/10000 train_time:337095ms step_avg:68.50ms +[2025-07-07 05:02:48] [Rank 0] step:4941/10000 train_time:338468ms step_avg:68.50ms +[2025-07-07 05:02:48] [Rank 0] step:4941/10000 train_time:338468ms step_avg:68.50ms +[2025-07-07 05:02:50] [Rank 0] step:4961/10000 train_time:339875ms step_avg:68.51ms +[2025-07-07 05:02:50] [Rank 0] step:4961/10000 train_time:339875ms step_avg:68.51ms +[2025-07-07 05:02:51] [Rank 0] step:4981/10000 train_time:341247ms step_avg:68.51ms +[2025-07-07 05:02:51] [Rank 0] step:4981/10000 train_time:341247ms step_avg:68.51ms +[2025-07-07 05:02:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:02:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:02:53] [Rank 0] PRINT: step:5000/10000 train_loss:1.1840 val_loss:1.2097 train_time:343244ms step_avg:68.65ms +[2025-07-07 05:02:53] [Rank 0] PRINT: step:5000/10000 train_loss:1.1840 val_loss:1.2097 train_time:343244ms step_avg:68.65ms +[2025-07-07 05:02:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:02:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:02:53] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:02:53] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:02:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:02:53] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:08:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:08:14] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:08:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:08:14] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:08:14] [Rank 0] Total Loss: 6.0084 +[2025-07-07 05:08:14] [Rank 0] Total Loss: 6.0084 +[2025-07-07 05:08:14] [Rank 0] Total FTA: 0.1408 +[2025-07-07 05:08:14] [Rank 0] Total FTA: 0.1408 +[2025-07-07 05:08:14] [Rank 0] Group 0 Loss: 6.1898 +[2025-07-07 05:08:14] [Rank 0] Group 0 Loss: 6.1898 +[2025-07-07 05:08:15] [Rank 0] Group 1 Loss: 5.3301 +[2025-07-07 05:08:15] [Rank 0] Group 1 Loss: 5.3301 +[2025-07-07 05:08:15] [Rank 0] Group 2 Loss: 5.9027 +[2025-07-07 05:08:15] [Rank 0] Group 2 Loss: 5.9027 +[2025-07-07 05:08:15] [Rank 0] Group 3 Loss: 6.2198 +[2025-07-07 05:08:15] [Rank 0] Group 3 Loss: 6.2198 +[2025-07-07 05:08:15] [Rank 0] Group 4 Loss: 5.9474 +[2025-07-07 05:08:15] [Rank 0] Group 4 Loss: 5.9474 +[2025-07-07 05:08:15] [Rank 0] Group 5 Loss: 6.0250 +[2025-07-07 05:08:15] [Rank 0] Group 5 Loss: 6.0250 +[2025-07-07 05:08:15] [Rank 0] Group 6 Loss: 6.1304 +[2025-07-07 05:08:15] [Rank 0] Group 6 Loss: 6.1304 +[2025-07-07 05:08:15] [Rank 0] Group 7 Loss: 6.0136 +[2025-07-07 05:08:15] [Rank 0] Group 7 Loss: 6.0136 +[2025-07-07 05:08:15] [Rank 0] Group 8 Loss: 6.1379 +[2025-07-07 05:08:15] [Rank 0] Group 8 Loss: 6.1379 +[2025-07-07 05:08:15] [Rank 0] Group 9 Loss: 6.0327 +[2025-07-07 05:08:15] [Rank 0] Group 9 Loss: 6.0327 +[2025-07-07 05:08:15] [Rank 0] Group 10 Loss: 6.0153 +[2025-07-07 05:08:15] [Rank 0] Group 10 Loss: 6.0153 +[2025-07-07 05:08:15] [Rank 0] Group 11 Loss: 5.9977 +[2025-07-07 05:08:15] [Rank 0] Group 11 Loss: 5.9977 +[2025-07-07 05:08:15] [Rank 0] Group 0 FTA: 0.1547 +[2025-07-07 05:08:15] [Rank 0] Group 0 FTA: 0.1547 +[2025-07-07 05:08:15] [Rank 0] Group 1 FTA: 0.1432 +[2025-07-07 05:08:15] [Rank 0] Group 1 FTA: 0.1432 +[2025-07-07 05:08:15] [Rank 0] Group 2 FTA: 0.2682 +[2025-07-07 05:08:15] [Rank 0] Group 2 FTA: 0.2682 +[2025-07-07 05:08:15] [Rank 0] Group 3 FTA: 0.1615 +[2025-07-07 05:08:15] [Rank 0] Group 3 FTA: 0.1615 +[2025-07-07 05:08:15] [Rank 0] Group 4 FTA: 0.1250 +[2025-07-07 05:08:15] [Rank 0] Group 4 FTA: 0.1250 +[2025-07-07 05:08:15] [Rank 0] Group 5 FTA: 0.1823 +[2025-07-07 05:08:15] [Rank 0] Group 5 FTA: 0.1823 +[2025-07-07 05:08:15] [Rank 0] Group 6 FTA: 0.0911 +[2025-07-07 05:08:15] [Rank 0] Group 6 FTA: 0.0911 +[2025-07-07 05:08:15] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-07 05:08:15] [Rank 0] Group 7 FTA: 0.0990 +[2025-07-07 05:08:15] [Rank 0] Group 8 FTA: 0.1042 +[2025-07-07 05:08:15] [Rank 0] Group 8 FTA: 0.1042 +[2025-07-07 05:08:15] [Rank 0] Group 9 FTA: 0.1484 +[2025-07-07 05:08:15] [Rank 0] Group 9 FTA: 0.1484 +[2025-07-07 05:08:15] [Rank 0] Group 10 FTA: 0.1172 +[2025-07-07 05:08:15] [Rank 0] Group 10 FTA: 0.1172 +[2025-07-07 05:08:15] [Rank 0] Group 11 FTA: 0.1221 +[2025-07-07 05:08:15] [Rank 0] Group 11 FTA: 0.1221 +[2025-07-07 05:08:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 05:08:15] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 05:08:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 05:08:15] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 05:08:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 05:08:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 05:08:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 05:08:16] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 05:08:16] [Rank 0] step:5001/10000 train_time:343254ms step_avg:68.64ms +[2025-07-07 05:08:16] [Rank 0] step:5001/10000 train_time:343254ms step_avg:68.64ms +[2025-07-07 05:08:17] [Rank 0] step:5021/10000 train_time:344009ms step_avg:68.51ms +[2025-07-07 05:08:17] [Rank 0] step:5021/10000 train_time:344009ms step_avg:68.51ms +[2025-07-07 05:08:19] [Rank 0] step:5041/10000 train_time:345425ms step_avg:68.52ms +[2025-07-07 05:08:19] [Rank 0] step:5041/10000 train_time:345425ms step_avg:68.52ms +[2025-07-07 05:08:20] [Rank 0] step:5061/10000 train_time:346776ms step_avg:68.52ms +[2025-07-07 05:08:20] [Rank 0] step:5061/10000 train_time:346776ms step_avg:68.52ms +[2025-07-07 05:08:22] [Rank 0] step:5081/10000 train_time:348145ms step_avg:68.52ms +[2025-07-07 05:08:22] [Rank 0] step:5081/10000 train_time:348145ms step_avg:68.52ms +[2025-07-07 05:08:23] [Rank 0] step:5101/10000 train_time:349513ms step_avg:68.52ms +[2025-07-07 05:08:23] [Rank 0] step:5101/10000 train_time:349513ms step_avg:68.52ms +[2025-07-07 05:08:24] [Rank 0] step:5121/10000 train_time:350883ms step_avg:68.52ms +[2025-07-07 05:08:24] [Rank 0] step:5121/10000 train_time:350883ms step_avg:68.52ms +[2025-07-07 05:08:26] [Rank 0] step:5141/10000 train_time:352299ms step_avg:68.53ms +[2025-07-07 05:08:26] [Rank 0] step:5141/10000 train_time:352299ms step_avg:68.53ms +[2025-07-07 05:08:27] [Rank 0] step:5161/10000 train_time:353668ms step_avg:68.53ms +[2025-07-07 05:08:27] [Rank 0] step:5161/10000 train_time:353668ms step_avg:68.53ms +[2025-07-07 05:08:29] [Rank 0] step:5181/10000 train_time:355113ms step_avg:68.54ms +[2025-07-07 05:08:29] [Rank 0] step:5181/10000 train_time:355113ms step_avg:68.54ms +[2025-07-07 05:08:30] [Rank 0] step:5201/10000 train_time:356484ms step_avg:68.54ms +[2025-07-07 05:08:30] [Rank 0] step:5201/10000 train_time:356484ms step_avg:68.54ms +[2025-07-07 05:08:31] [Rank 0] step:5221/10000 train_time:358107ms step_avg:68.59ms +[2025-07-07 05:08:31] [Rank 0] step:5221/10000 train_time:358107ms step_avg:68.59ms +[2025-07-07 05:08:33] [Rank 0] step:5241/10000 train_time:359259ms step_avg:68.55ms +[2025-07-07 05:08:33] [Rank 0] step:5241/10000 train_time:359259ms step_avg:68.55ms +[2025-07-07 05:08:34] [Rank 0] step:5261/10000 train_time:360631ms step_avg:68.55ms +[2025-07-07 05:08:34] [Rank 0] step:5261/10000 train_time:360631ms step_avg:68.55ms +[2025-07-07 05:08:35] [Rank 0] step:5281/10000 train_time:362003ms step_avg:68.55ms +[2025-07-07 05:08:35] [Rank 0] step:5281/10000 train_time:362003ms step_avg:68.55ms +[2025-07-07 05:08:37] [Rank 0] step:5301/10000 train_time:363374ms step_avg:68.55ms +[2025-07-07 05:08:37] [Rank 0] step:5301/10000 train_time:363374ms step_avg:68.55ms +[2025-07-07 05:08:38] [Rank 0] step:5321/10000 train_time:364747ms step_avg:68.55ms +[2025-07-07 05:08:38] [Rank 0] step:5321/10000 train_time:364747ms step_avg:68.55ms +[2025-07-07 05:08:40] [Rank 0] step:5341/10000 train_time:366120ms step_avg:68.55ms +[2025-07-07 05:08:40] [Rank 0] step:5341/10000 train_time:366120ms step_avg:68.55ms +[2025-07-07 05:08:41] [Rank 0] step:5361/10000 train_time:367495ms step_avg:68.55ms +[2025-07-07 05:08:41] [Rank 0] step:5361/10000 train_time:367495ms step_avg:68.55ms +[2025-07-07 05:08:42] [Rank 0] step:5381/10000 train_time:368867ms step_avg:68.55ms +[2025-07-07 05:08:42] [Rank 0] step:5381/10000 train_time:368867ms step_avg:68.55ms +[2025-07-07 05:08:44] [Rank 0] step:5401/10000 train_time:370286ms step_avg:68.56ms +[2025-07-07 05:08:44] [Rank 0] step:5401/10000 train_time:370286ms step_avg:68.56ms +[2025-07-07 05:08:45] [Rank 0] step:5421/10000 train_time:371644ms step_avg:68.56ms +[2025-07-07 05:08:45] [Rank 0] step:5421/10000 train_time:371644ms step_avg:68.56ms +[2025-07-07 05:08:46] [Rank 0] step:5441/10000 train_time:373018ms step_avg:68.56ms +[2025-07-07 05:08:46] [Rank 0] step:5441/10000 train_time:373018ms step_avg:68.56ms +[2025-07-07 05:08:48] [Rank 0] step:5461/10000 train_time:374391ms step_avg:68.56ms +[2025-07-07 05:08:48] [Rank 0] step:5461/10000 train_time:374391ms step_avg:68.56ms +[2025-07-07 05:08:49] [Rank 0] step:5481/10000 train_time:375764ms step_avg:68.56ms +[2025-07-07 05:08:49] [Rank 0] step:5481/10000 train_time:375764ms step_avg:68.56ms +[2025-07-07 05:08:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:08:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:08:51] [Rank 0] PRINT: step:5500/10000 train_loss:1.1453 val_loss:1.1988 train_time:377761ms step_avg:68.68ms +[2025-07-07 05:08:51] [Rank 0] PRINT: step:5500/10000 train_loss:1.1453 val_loss:1.1988 train_time:377761ms step_avg:68.68ms +[2025-07-07 05:08:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:08:51] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:08:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:08:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:08:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:08:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:14:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:14:15] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:14:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:14:15] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:14:15] [Rank 0] Total Loss: 5.8100 +[2025-07-07 05:14:15] [Rank 0] Total Loss: 5.8100 +[2025-07-07 05:14:15] [Rank 0] Total FTA: 0.1530 +[2025-07-07 05:14:15] [Rank 0] Total FTA: 0.1530 +[2025-07-07 05:14:15] [Rank 0] Group 0 Loss: 6.1552 +[2025-07-07 05:14:15] [Rank 0] Group 0 Loss: 6.1552 +[2025-07-07 05:14:15] [Rank 0] Group 1 Loss: 5.3655 +[2025-07-07 05:14:15] [Rank 0] Group 1 Loss: 5.3655 +[2025-07-07 05:14:15] [Rank 0] Group 2 Loss: 5.6007 +[2025-07-07 05:14:15] [Rank 0] Group 2 Loss: 5.6007 +[2025-07-07 05:14:15] [Rank 0] Group 3 Loss: 5.9707 +[2025-07-07 05:14:15] [Rank 0] Group 3 Loss: 5.9707 +[2025-07-07 05:14:15] [Rank 0] Group 4 Loss: 5.6979 +[2025-07-07 05:14:15] [Rank 0] Group 4 Loss: 5.6979 +[2025-07-07 05:14:15] [Rank 0] Group 5 Loss: 5.8518 +[2025-07-07 05:14:15] [Rank 0] Group 5 Loss: 5.8518 +[2025-07-07 05:14:15] [Rank 0] Group 6 Loss: 5.8218 +[2025-07-07 05:14:15] [Rank 0] Group 6 Loss: 5.8218 +[2025-07-07 05:14:15] [Rank 0] Group 7 Loss: 5.8303 +[2025-07-07 05:14:15] [Rank 0] Group 7 Loss: 5.8303 +[2025-07-07 05:14:15] [Rank 0] Group 8 Loss: 5.7954 +[2025-07-07 05:14:15] [Rank 0] Group 8 Loss: 5.7954 +[2025-07-07 05:14:15] [Rank 0] Group 9 Loss: 5.7769 +[2025-07-07 05:14:15] [Rank 0] Group 9 Loss: 5.7769 +[2025-07-07 05:14:15] [Rank 0] Group 10 Loss: 5.7721 +[2025-07-07 05:14:15] [Rank 0] Group 10 Loss: 5.7721 +[2025-07-07 05:14:15] [Rank 0] Group 11 Loss: 5.7826 +[2025-07-07 05:14:15] [Rank 0] Group 11 Loss: 5.7826 +[2025-07-07 05:14:15] [Rank 0] Group 0 FTA: 0.1404 +[2025-07-07 05:14:15] [Rank 0] Group 0 FTA: 0.1404 +[2025-07-07 05:14:15] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-07 05:14:15] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-07 05:14:15] [Rank 0] Group 2 FTA: 0.2839 +[2025-07-07 05:14:15] [Rank 0] Group 2 FTA: 0.2839 +[2025-07-07 05:14:15] [Rank 0] Group 3 FTA: 0.0885 +[2025-07-07 05:14:15] [Rank 0] Group 3 FTA: 0.0885 +[2025-07-07 05:14:15] [Rank 0] Group 4 FTA: 0.0938 +[2025-07-07 05:14:15] [Rank 0] Group 4 FTA: 0.0938 +[2025-07-07 05:14:15] [Rank 0] Group 5 FTA: 0.1328 +[2025-07-07 05:14:15] [Rank 0] Group 5 FTA: 0.1328 +[2025-07-07 05:14:15] [Rank 0] Group 6 FTA: 0.1380 +[2025-07-07 05:14:15] [Rank 0] Group 6 FTA: 0.1380 +[2025-07-07 05:14:15] [Rank 0] Group 7 FTA: 0.1458 +[2025-07-07 05:14:15] [Rank 0] Group 7 FTA: 0.1458 +[2025-07-07 05:14:15] [Rank 0] Group 8 FTA: 0.1615 +[2025-07-07 05:14:15] [Rank 0] Group 8 FTA: 0.1615 +[2025-07-07 05:14:15] [Rank 0] Group 9 FTA: 0.1641 +[2025-07-07 05:14:15] [Rank 0] Group 9 FTA: 0.1641 +[2025-07-07 05:14:15] [Rank 0] Group 10 FTA: 0.1641 +[2025-07-07 05:14:15] [Rank 0] Group 10 FTA: 0.1641 +[2025-07-07 05:14:15] [Rank 0] Group 11 FTA: 0.1572 +[2025-07-07 05:14:15] [Rank 0] Group 11 FTA: 0.1572 +[2025-07-07 05:14:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 05:14:16] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 05:14:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 05:14:16] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 05:14:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 05:14:16] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 05:14:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 05:14:17] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 05:14:17] [Rank 0] step:5501/10000 train_time:377772ms step_avg:68.67ms +[2025-07-07 05:14:17] [Rank 0] step:5501/10000 train_time:377772ms step_avg:68.67ms +[2025-07-07 05:14:18] [Rank 0] step:5521/10000 train_time:378537ms step_avg:68.56ms +[2025-07-07 05:14:18] [Rank 0] step:5521/10000 train_time:378537ms step_avg:68.56ms +[2025-07-07 05:14:20] [Rank 0] step:5541/10000 train_time:379903ms step_avg:68.56ms +[2025-07-07 05:14:20] [Rank 0] step:5541/10000 train_time:379903ms step_avg:68.56ms +[2025-07-07 05:14:21] [Rank 0] step:5561/10000 train_time:381270ms step_avg:68.56ms +[2025-07-07 05:14:21] [Rank 0] step:5561/10000 train_time:381270ms step_avg:68.56ms +[2025-07-07 05:14:22] [Rank 0] step:5581/10000 train_time:382691ms step_avg:68.57ms +[2025-07-07 05:14:22] [Rank 0] step:5581/10000 train_time:382691ms step_avg:68.57ms +[2025-07-07 05:14:24] [Rank 0] step:5601/10000 train_time:384006ms step_avg:68.56ms +[2025-07-07 05:14:24] [Rank 0] step:5601/10000 train_time:384006ms step_avg:68.56ms +[2025-07-07 05:14:25] [Rank 0] step:5621/10000 train_time:385377ms step_avg:68.56ms +[2025-07-07 05:14:25] [Rank 0] step:5621/10000 train_time:385377ms step_avg:68.56ms +[2025-07-07 05:14:26] [Rank 0] step:5641/10000 train_time:386747ms step_avg:68.56ms +[2025-07-07 05:14:26] [Rank 0] step:5641/10000 train_time:386747ms step_avg:68.56ms +[2025-07-07 05:14:28] [Rank 0] step:5661/10000 train_time:388117ms step_avg:68.56ms +[2025-07-07 05:14:28] [Rank 0] step:5661/10000 train_time:388117ms step_avg:68.56ms +[2025-07-07 05:14:29] [Rank 0] step:5681/10000 train_time:389489ms step_avg:68.56ms +[2025-07-07 05:14:29] [Rank 0] step:5681/10000 train_time:389489ms step_avg:68.56ms +[2025-07-07 05:14:31] [Rank 0] step:5701/10000 train_time:390861ms step_avg:68.56ms +[2025-07-07 05:14:31] [Rank 0] step:5701/10000 train_time:390861ms step_avg:68.56ms +[2025-07-07 05:14:32] [Rank 0] step:5721/10000 train_time:392234ms step_avg:68.56ms +[2025-07-07 05:14:32] [Rank 0] step:5721/10000 train_time:392234ms step_avg:68.56ms +[2025-07-07 05:14:33] [Rank 0] step:5741/10000 train_time:393606ms step_avg:68.56ms +[2025-07-07 05:14:33] [Rank 0] step:5741/10000 train_time:393606ms step_avg:68.56ms +[2025-07-07 05:14:35] [Rank 0] step:5761/10000 train_time:395227ms step_avg:68.60ms +[2025-07-07 05:14:35] [Rank 0] step:5761/10000 train_time:395227ms step_avg:68.60ms +[2025-07-07 05:14:36] [Rank 0] step:5781/10000 train_time:396401ms step_avg:68.57ms +[2025-07-07 05:14:36] [Rank 0] step:5781/10000 train_time:396401ms step_avg:68.57ms +[2025-07-07 05:14:37] [Rank 0] step:5801/10000 train_time:397772ms step_avg:68.57ms +[2025-07-07 05:14:37] [Rank 0] step:5801/10000 train_time:397772ms step_avg:68.57ms +[2025-07-07 05:14:39] [Rank 0] step:5821/10000 train_time:399146ms step_avg:68.57ms +[2025-07-07 05:14:39] [Rank 0] step:5821/10000 train_time:399146ms step_avg:68.57ms +[2025-07-07 05:14:40] [Rank 0] step:5841/10000 train_time:400520ms step_avg:68.57ms +[2025-07-07 05:14:40] [Rank 0] step:5841/10000 train_time:400520ms step_avg:68.57ms +[2025-07-07 05:14:42] [Rank 0] step:5861/10000 train_time:401894ms step_avg:68.57ms +[2025-07-07 05:14:42] [Rank 0] step:5861/10000 train_time:401894ms step_avg:68.57ms +[2025-07-07 05:14:43] [Rank 0] step:5881/10000 train_time:403268ms step_avg:68.57ms +[2025-07-07 05:14:43] [Rank 0] step:5881/10000 train_time:403268ms step_avg:68.57ms +[2025-07-07 05:14:44] [Rank 0] step:5901/10000 train_time:404641ms step_avg:68.57ms +[2025-07-07 05:14:44] [Rank 0] step:5901/10000 train_time:404641ms step_avg:68.57ms +[2025-07-07 05:14:46] [Rank 0] step:5921/10000 train_time:406025ms step_avg:68.57ms +[2025-07-07 05:14:46] [Rank 0] step:5921/10000 train_time:406025ms step_avg:68.57ms +[2025-07-07 05:14:47] [Rank 0] step:5941/10000 train_time:407446ms step_avg:68.58ms +[2025-07-07 05:14:47] [Rank 0] step:5941/10000 train_time:407446ms step_avg:68.58ms +[2025-07-07 05:14:48] [Rank 0] step:5961/10000 train_time:408771ms step_avg:68.57ms +[2025-07-07 05:14:48] [Rank 0] step:5961/10000 train_time:408771ms step_avg:68.57ms +[2025-07-07 05:14:50] [Rank 0] step:5981/10000 train_time:410146ms step_avg:68.57ms +[2025-07-07 05:14:50] [Rank 0] step:5981/10000 train_time:410146ms step_avg:68.57ms +[2025-07-07 05:14:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:14:51] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:14:52] [Rank 0] PRINT: step:6000/10000 train_loss:1.1014 val_loss:1.1974 train_time:412143ms step_avg:68.69ms +[2025-07-07 05:14:52] [Rank 0] PRINT: step:6000/10000 train_loss:1.1014 val_loss:1.1974 train_time:412143ms step_avg:68.69ms +[2025-07-07 05:14:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:14:52] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:14:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:14:52] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:14:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:14:52] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:20:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:20:16] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:20:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:20:16] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:20:16] [Rank 0] Total Loss: 5.7759 +[2025-07-07 05:20:16] [Rank 0] Total Loss: 5.7759 +[2025-07-07 05:20:16] [Rank 0] Total FTA: 0.1420 +[2025-07-07 05:20:16] [Rank 0] Total FTA: 0.1420 +[2025-07-07 05:20:16] [Rank 0] Group 0 Loss: 6.1659 +[2025-07-07 05:20:16] [Rank 0] Group 0 Loss: 6.1659 +[2025-07-07 05:20:16] [Rank 0] Group 1 Loss: 5.2410 +[2025-07-07 05:20:16] [Rank 0] Group 1 Loss: 5.2410 +[2025-07-07 05:20:16] [Rank 0] Group 2 Loss: 5.8264 +[2025-07-07 05:20:16] [Rank 0] Group 2 Loss: 5.8264 +[2025-07-07 05:20:16] [Rank 0] Group 3 Loss: 5.8367 +[2025-07-07 05:20:16] [Rank 0] Group 3 Loss: 5.8367 +[2025-07-07 05:20:16] [Rank 0] Group 4 Loss: 5.5436 +[2025-07-07 05:20:16] [Rank 0] Group 4 Loss: 5.5436 +[2025-07-07 05:20:16] [Rank 0] Group 5 Loss: 5.7675 +[2025-07-07 05:20:16] [Rank 0] Group 5 Loss: 5.7675 +[2025-07-07 05:20:16] [Rank 0] Group 6 Loss: 5.7448 +[2025-07-07 05:20:16] [Rank 0] Group 6 Loss: 5.7448 +[2025-07-07 05:20:16] [Rank 0] Group 7 Loss: 5.7451 +[2025-07-07 05:20:16] [Rank 0] Group 7 Loss: 5.7451 +[2025-07-07 05:20:16] [Rank 0] Group 8 Loss: 5.8168 +[2025-07-07 05:20:16] [Rank 0] Group 8 Loss: 5.8168 +[2025-07-07 05:20:16] [Rank 0] Group 9 Loss: 5.7318 +[2025-07-07 05:20:16] [Rank 0] Group 9 Loss: 5.7318 +[2025-07-07 05:20:16] [Rank 0] Group 10 Loss: 5.7336 +[2025-07-07 05:20:16] [Rank 0] Group 10 Loss: 5.7336 +[2025-07-07 05:20:16] [Rank 0] Group 11 Loss: 5.7723 +[2025-07-07 05:20:16] [Rank 0] Group 11 Loss: 5.7723 +[2025-07-07 05:20:16] [Rank 0] Group 0 FTA: 0.1560 +[2025-07-07 05:20:16] [Rank 0] Group 0 FTA: 0.1560 +[2025-07-07 05:20:16] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 05:20:16] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 05:20:16] [Rank 0] Group 2 FTA: 0.2422 +[2025-07-07 05:20:16] [Rank 0] Group 2 FTA: 0.2422 +[2025-07-07 05:20:16] [Rank 0] Group 3 FTA: 0.1042 +[2025-07-07 05:20:16] [Rank 0] Group 3 FTA: 0.1042 +[2025-07-07 05:20:16] [Rank 0] Group 4 FTA: 0.0781 +[2025-07-07 05:20:16] [Rank 0] Group 4 FTA: 0.0781 +[2025-07-07 05:20:16] [Rank 0] Group 5 FTA: 0.1849 +[2025-07-07 05:20:16] [Rank 0] Group 5 FTA: 0.1849 +[2025-07-07 05:20:16] [Rank 0] Group 6 FTA: 0.1536 +[2025-07-07 05:20:16] [Rank 0] Group 6 FTA: 0.1536 +[2025-07-07 05:20:16] [Rank 0] Group 7 FTA: 0.1458 +[2025-07-07 05:20:16] [Rank 0] Group 7 FTA: 0.1458 +[2025-07-07 05:20:16] [Rank 0] Group 8 FTA: 0.1510 +[2025-07-07 05:20:16] [Rank 0] Group 8 FTA: 0.1510 +[2025-07-07 05:20:16] [Rank 0] Group 9 FTA: 0.1328 +[2025-07-07 05:20:16] [Rank 0] Group 9 FTA: 0.1328 +[2025-07-07 05:20:16] [Rank 0] Group 10 FTA: 0.1680 +[2025-07-07 05:20:16] [Rank 0] Group 10 FTA: 0.1680 +[2025-07-07 05:20:16] [Rank 0] Group 11 FTA: 0.1494 +[2025-07-07 05:20:16] [Rank 0] Group 11 FTA: 0.1494 +[2025-07-07 05:20:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 05:20:17] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 05:20:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 05:20:17] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 05:20:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 05:20:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 05:20:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 05:20:18] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 05:20:18] [Rank 0] step:6001/10000 train_time:412155ms step_avg:68.68ms +[2025-07-07 05:20:18] [Rank 0] step:6001/10000 train_time:412155ms step_avg:68.68ms +[2025-07-07 05:20:19] [Rank 0] step:6021/10000 train_time:412912ms step_avg:68.58ms +[2025-07-07 05:20:19] [Rank 0] step:6021/10000 train_time:412912ms step_avg:68.58ms +[2025-07-07 05:20:21] [Rank 0] step:6041/10000 train_time:414278ms step_avg:68.58ms +[2025-07-07 05:20:21] [Rank 0] step:6041/10000 train_time:414278ms step_avg:68.58ms +[2025-07-07 05:20:22] [Rank 0] step:6061/10000 train_time:415645ms step_avg:68.58ms +[2025-07-07 05:20:22] [Rank 0] step:6061/10000 train_time:415645ms step_avg:68.58ms +[2025-07-07 05:20:24] [Rank 0] step:6081/10000 train_time:417014ms step_avg:68.58ms +[2025-07-07 05:20:24] [Rank 0] step:6081/10000 train_time:417014ms step_avg:68.58ms +[2025-07-07 05:20:25] [Rank 0] step:6101/10000 train_time:418386ms step_avg:68.58ms +[2025-07-07 05:20:25] [Rank 0] step:6101/10000 train_time:418386ms step_avg:68.58ms +[2025-07-07 05:20:26] [Rank 0] step:6121/10000 train_time:419756ms step_avg:68.58ms +[2025-07-07 05:20:26] [Rank 0] step:6121/10000 train_time:419756ms step_avg:68.58ms +[2025-07-07 05:20:28] [Rank 0] step:6141/10000 train_time:421175ms step_avg:68.58ms +[2025-07-07 05:20:28] [Rank 0] step:6141/10000 train_time:421175ms step_avg:68.58ms +[2025-07-07 05:20:29] [Rank 0] step:6161/10000 train_time:422546ms step_avg:68.58ms +[2025-07-07 05:20:29] [Rank 0] step:6161/10000 train_time:422546ms step_avg:68.58ms +[2025-07-07 05:20:30] [Rank 0] step:6181/10000 train_time:423917ms step_avg:68.58ms +[2025-07-07 05:20:30] [Rank 0] step:6181/10000 train_time:423917ms step_avg:68.58ms +[2025-07-07 05:20:32] [Rank 0] step:6201/10000 train_time:425288ms step_avg:68.58ms +[2025-07-07 05:20:32] [Rank 0] step:6201/10000 train_time:425288ms step_avg:68.58ms +[2025-07-07 05:20:33] [Rank 0] step:6221/10000 train_time:426659ms step_avg:68.58ms +[2025-07-07 05:20:33] [Rank 0] step:6221/10000 train_time:426659ms step_avg:68.58ms +[2025-07-07 05:20:35] [Rank 0] step:6241/10000 train_time:428032ms step_avg:68.58ms +[2025-07-07 05:20:35] [Rank 0] step:6241/10000 train_time:428032ms step_avg:68.58ms +[2025-07-07 05:20:36] [Rank 0] step:6261/10000 train_time:429404ms step_avg:68.58ms +[2025-07-07 05:20:36] [Rank 0] step:6261/10000 train_time:429404ms step_avg:68.58ms +[2025-07-07 05:20:37] [Rank 0] step:6281/10000 train_time:430776ms step_avg:68.58ms +[2025-07-07 05:20:37] [Rank 0] step:6281/10000 train_time:430776ms step_avg:68.58ms +[2025-07-07 05:20:39] [Rank 0] step:6301/10000 train_time:432148ms step_avg:68.58ms +[2025-07-07 05:20:39] [Rank 0] step:6301/10000 train_time:432148ms step_avg:68.58ms +[2025-07-07 05:20:40] [Rank 0] step:6321/10000 train_time:433558ms step_avg:68.59ms +[2025-07-07 05:20:40] [Rank 0] step:6321/10000 train_time:433558ms step_avg:68.59ms +[2025-07-07 05:20:41] [Rank 0] step:6341/10000 train_time:434933ms step_avg:68.59ms +[2025-07-07 05:20:41] [Rank 0] step:6341/10000 train_time:434933ms step_avg:68.59ms +[2025-07-07 05:20:43] [Rank 0] step:6361/10000 train_time:436306ms step_avg:68.59ms +[2025-07-07 05:20:43] [Rank 0] step:6361/10000 train_time:436306ms step_avg:68.59ms +[2025-07-07 05:20:44] [Rank 0] step:6381/10000 train_time:437680ms step_avg:68.59ms +[2025-07-07 05:20:44] [Rank 0] step:6381/10000 train_time:437680ms step_avg:68.59ms +[2025-07-07 05:20:46] [Rank 0] step:6401/10000 train_time:439053ms step_avg:68.59ms +[2025-07-07 05:20:46] [Rank 0] step:6401/10000 train_time:439053ms step_avg:68.59ms +[2025-07-07 05:20:47] [Rank 0] step:6421/10000 train_time:440427ms step_avg:68.59ms +[2025-07-07 05:20:47] [Rank 0] step:6421/10000 train_time:440427ms step_avg:68.59ms +[2025-07-07 05:20:48] [Rank 0] step:6441/10000 train_time:441801ms step_avg:68.59ms +[2025-07-07 05:20:48] [Rank 0] step:6441/10000 train_time:441801ms step_avg:68.59ms +[2025-07-07 05:20:50] [Rank 0] step:6461/10000 train_time:443175ms step_avg:68.59ms +[2025-07-07 05:20:50] [Rank 0] step:6461/10000 train_time:443175ms step_avg:68.59ms +[2025-07-07 05:20:51] [Rank 0] step:6481/10000 train_time:444551ms step_avg:68.59ms +[2025-07-07 05:20:51] [Rank 0] step:6481/10000 train_time:444551ms step_avg:68.59ms +[2025-07-07 05:20:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:20:52] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:20:53] [Rank 0] PRINT: step:6500/10000 train_loss:1.0585 val_loss:1.1439 train_time:446600ms step_avg:68.71ms +[2025-07-07 05:20:53] [Rank 0] PRINT: step:6500/10000 train_loss:1.0585 val_loss:1.1439 train_time:446600ms step_avg:68.71ms +[2025-07-07 05:20:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:20:53] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:20:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:20:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:20:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:20:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:26:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:26:18] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:26:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:26:18] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:26:18] [Rank 0] Total Loss: 5.9086 +[2025-07-07 05:26:18] [Rank 0] Total Loss: 5.9086 +[2025-07-07 05:26:18] [Rank 0] Total FTA: 0.2239 +[2025-07-07 05:26:18] [Rank 0] Total FTA: 0.2239 +[2025-07-07 05:26:18] [Rank 0] Group 0 Loss: 6.2928 +[2025-07-07 05:26:18] [Rank 0] Group 0 Loss: 6.2928 +[2025-07-07 05:26:18] [Rank 0] Group 1 Loss: 5.4129 +[2025-07-07 05:26:18] [Rank 0] Group 1 Loss: 5.4129 +[2025-07-07 05:26:18] [Rank 0] Group 2 Loss: 5.6233 +[2025-07-07 05:26:18] [Rank 0] Group 2 Loss: 5.6233 +[2025-07-07 05:26:18] [Rank 0] Group 3 Loss: 6.1649 +[2025-07-07 05:26:18] [Rank 0] Group 3 Loss: 6.1649 +[2025-07-07 05:26:18] [Rank 0] Group 4 Loss: 5.8162 +[2025-07-07 05:26:18] [Rank 0] Group 4 Loss: 5.8162 +[2025-07-07 05:26:18] [Rank 0] Group 5 Loss: 5.9141 +[2025-07-07 05:26:18] [Rank 0] Group 5 Loss: 5.9141 +[2025-07-07 05:26:18] [Rank 0] Group 6 Loss: 5.9323 +[2025-07-07 05:26:18] [Rank 0] Group 6 Loss: 5.9323 +[2025-07-07 05:26:18] [Rank 0] Group 7 Loss: 5.8892 +[2025-07-07 05:26:18] [Rank 0] Group 7 Loss: 5.8892 +[2025-07-07 05:26:18] [Rank 0] Group 8 Loss: 5.8847 +[2025-07-07 05:26:18] [Rank 0] Group 8 Loss: 5.8847 +[2025-07-07 05:26:18] [Rank 0] Group 9 Loss: 5.8776 +[2025-07-07 05:26:18] [Rank 0] Group 9 Loss: 5.8776 +[2025-07-07 05:26:18] [Rank 0] Group 10 Loss: 5.8958 +[2025-07-07 05:26:18] [Rank 0] Group 10 Loss: 5.8958 +[2025-07-07 05:26:18] [Rank 0] Group 11 Loss: 5.8709 +[2025-07-07 05:26:18] [Rank 0] Group 11 Loss: 5.8709 +[2025-07-07 05:26:18] [Rank 0] Group 0 FTA: 0.3446 +[2025-07-07 05:26:18] [Rank 0] Group 0 FTA: 0.3446 +[2025-07-07 05:26:18] [Rank 0] Group 1 FTA: 0.3125 +[2025-07-07 05:26:18] [Rank 0] Group 1 FTA: 0.3125 +[2025-07-07 05:26:18] [Rank 0] Group 2 FTA: 0.2266 +[2025-07-07 05:26:18] [Rank 0] Group 2 FTA: 0.2266 +[2025-07-07 05:26:18] [Rank 0] Group 3 FTA: 0.1589 +[2025-07-07 05:26:18] [Rank 0] Group 3 FTA: 0.1589 +[2025-07-07 05:26:18] [Rank 0] Group 4 FTA: 0.1901 +[2025-07-07 05:26:18] [Rank 0] Group 4 FTA: 0.1901 +[2025-07-07 05:26:18] [Rank 0] Group 5 FTA: 0.2057 +[2025-07-07 05:26:18] [Rank 0] Group 5 FTA: 0.2057 +[2025-07-07 05:26:18] [Rank 0] Group 6 FTA: 0.1693 +[2025-07-07 05:26:18] [Rank 0] Group 6 FTA: 0.1693 +[2025-07-07 05:26:18] [Rank 0] Group 7 FTA: 0.1823 +[2025-07-07 05:26:18] [Rank 0] Group 7 FTA: 0.1823 +[2025-07-07 05:26:18] [Rank 0] Group 8 FTA: 0.1953 +[2025-07-07 05:26:18] [Rank 0] Group 8 FTA: 0.1953 +[2025-07-07 05:26:18] [Rank 0] Group 9 FTA: 0.2031 +[2025-07-07 05:26:18] [Rank 0] Group 9 FTA: 0.2031 +[2025-07-07 05:26:18] [Rank 0] Group 10 FTA: 0.1895 +[2025-07-07 05:26:18] [Rank 0] Group 10 FTA: 0.1895 +[2025-07-07 05:26:18] [Rank 0] Group 11 FTA: 0.2119 +[2025-07-07 05:26:18] [Rank 0] Group 11 FTA: 0.2119 +[2025-07-07 05:26:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 05:26:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 05:26:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 05:26:19] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 05:26:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 05:26:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 05:26:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 05:26:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 05:26:19] [Rank 0] step:6501/10000 train_time:446612ms step_avg:68.70ms +[2025-07-07 05:26:19] [Rank 0] step:6501/10000 train_time:446612ms step_avg:68.70ms +[2025-07-07 05:26:21] [Rank 0] step:6521/10000 train_time:447379ms step_avg:68.61ms +[2025-07-07 05:26:21] [Rank 0] step:6521/10000 train_time:447379ms step_avg:68.61ms +[2025-07-07 05:26:22] [Rank 0] step:6541/10000 train_time:448744ms step_avg:68.60ms +[2025-07-07 05:26:22] [Rank 0] step:6541/10000 train_time:448744ms step_avg:68.60ms +[2025-07-07 05:26:23] [Rank 0] step:6561/10000 train_time:450113ms step_avg:68.60ms +[2025-07-07 05:26:23] [Rank 0] step:6561/10000 train_time:450113ms step_avg:68.60ms +[2025-07-07 05:26:25] [Rank 0] step:6581/10000 train_time:451481ms step_avg:68.60ms +[2025-07-07 05:26:25] [Rank 0] step:6581/10000 train_time:451481ms step_avg:68.60ms +[2025-07-07 05:26:26] [Rank 0] step:6601/10000 train_time:452849ms step_avg:68.60ms +[2025-07-07 05:26:26] [Rank 0] step:6601/10000 train_time:452849ms step_avg:68.60ms +[2025-07-07 05:26:28] [Rank 0] step:6621/10000 train_time:454219ms step_avg:68.60ms +[2025-07-07 05:26:28] [Rank 0] step:6621/10000 train_time:454219ms step_avg:68.60ms +[2025-07-07 05:26:29] [Rank 0] step:6641/10000 train_time:455588ms step_avg:68.60ms +[2025-07-07 05:26:29] [Rank 0] step:6641/10000 train_time:455588ms step_avg:68.60ms +[2025-07-07 05:26:30] [Rank 0] step:6661/10000 train_time:457618ms step_avg:68.70ms +[2025-07-07 05:26:30] [Rank 0] step:6661/10000 train_time:457618ms step_avg:68.70ms +[2025-07-07 05:26:32] [Rank 0] step:6681/10000 train_time:458357ms step_avg:68.61ms +[2025-07-07 05:26:32] [Rank 0] step:6681/10000 train_time:458357ms step_avg:68.61ms +[2025-07-07 05:26:33] [Rank 0] step:6701/10000 train_time:459729ms step_avg:68.61ms +[2025-07-07 05:26:33] [Rank 0] step:6701/10000 train_time:459729ms step_avg:68.61ms +[2025-07-07 05:26:34] [Rank 0] step:6721/10000 train_time:461100ms step_avg:68.61ms +[2025-07-07 05:26:34] [Rank 0] step:6721/10000 train_time:461100ms step_avg:68.61ms +[2025-07-07 05:26:36] [Rank 0] step:6741/10000 train_time:462473ms step_avg:68.61ms +[2025-07-07 05:26:36] [Rank 0] step:6741/10000 train_time:462473ms step_avg:68.61ms +[2025-07-07 05:26:37] [Rank 0] step:6761/10000 train_time:463845ms step_avg:68.61ms +[2025-07-07 05:26:37] [Rank 0] step:6761/10000 train_time:463845ms step_avg:68.61ms +[2025-07-07 05:26:39] [Rank 0] step:6781/10000 train_time:465217ms step_avg:68.61ms +[2025-07-07 05:26:39] [Rank 0] step:6781/10000 train_time:465217ms step_avg:68.61ms +[2025-07-07 05:26:40] [Rank 0] step:6801/10000 train_time:466590ms step_avg:68.61ms +[2025-07-07 05:26:40] [Rank 0] step:6801/10000 train_time:466590ms step_avg:68.61ms +[2025-07-07 05:26:41] [Rank 0] step:6821/10000 train_time:467964ms step_avg:68.61ms +[2025-07-07 05:26:41] [Rank 0] step:6821/10000 train_time:467964ms step_avg:68.61ms +[2025-07-07 05:26:43] [Rank 0] step:6841/10000 train_time:469588ms step_avg:68.64ms +[2025-07-07 05:26:43] [Rank 0] step:6841/10000 train_time:469588ms step_avg:68.64ms +[2025-07-07 05:26:44] [Rank 0] step:6861/10000 train_time:470742ms step_avg:68.61ms +[2025-07-07 05:26:44] [Rank 0] step:6861/10000 train_time:470742ms step_avg:68.61ms +[2025-07-07 05:26:45] [Rank 0] step:6881/10000 train_time:472116ms step_avg:68.61ms +[2025-07-07 05:26:45] [Rank 0] step:6881/10000 train_time:472116ms step_avg:68.61ms +[2025-07-07 05:26:47] [Rank 0] step:6901/10000 train_time:473490ms step_avg:68.61ms +[2025-07-07 05:26:47] [Rank 0] step:6901/10000 train_time:473490ms step_avg:68.61ms +[2025-07-07 05:26:48] [Rank 0] step:6921/10000 train_time:474863ms step_avg:68.61ms +[2025-07-07 05:26:48] [Rank 0] step:6921/10000 train_time:474863ms step_avg:68.61ms +[2025-07-07 05:26:50] [Rank 0] step:6941/10000 train_time:476236ms step_avg:68.61ms +[2025-07-07 05:26:50] [Rank 0] step:6941/10000 train_time:476236ms step_avg:68.61ms +[2025-07-07 05:26:51] [Rank 0] step:6961/10000 train_time:477609ms step_avg:68.61ms +[2025-07-07 05:26:51] [Rank 0] step:6961/10000 train_time:477609ms step_avg:68.61ms +[2025-07-07 05:26:52] [Rank 0] step:6981/10000 train_time:478981ms step_avg:68.61ms +[2025-07-07 05:26:52] [Rank 0] step:6981/10000 train_time:478981ms step_avg:68.61ms +[2025-07-07 05:26:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:26:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:26:55] [Rank 0] PRINT: step:7000/10000 train_loss:1.0134 val_loss:1.1512 train_time:480977ms step_avg:68.71ms +[2025-07-07 05:26:55] [Rank 0] PRINT: step:7000/10000 train_loss:1.0134 val_loss:1.1512 train_time:480977ms step_avg:68.71ms +[2025-07-07 05:26:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:26:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:26:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:26:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:26:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:26:55] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:32:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:32:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:32:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:32:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:32:20] [Rank 0] Total Loss: 5.8034 +[2025-07-07 05:32:20] [Rank 0] Total Loss: 5.8034 +[2025-07-07 05:32:20] [Rank 0] Total FTA: 0.1724 +[2025-07-07 05:32:20] [Rank 0] Total FTA: 0.1724 +[2025-07-07 05:32:20] [Rank 0] Group 0 Loss: 6.2128 +[2025-07-07 05:32:20] [Rank 0] Group 0 Loss: 6.2128 +[2025-07-07 05:32:20] [Rank 0] Group 1 Loss: 5.2577 +[2025-07-07 05:32:20] [Rank 0] Group 1 Loss: 5.2577 +[2025-07-07 05:32:20] [Rank 0] Group 2 Loss: 5.4969 +[2025-07-07 05:32:20] [Rank 0] Group 2 Loss: 5.4969 +[2025-07-07 05:32:20] [Rank 0] Group 3 Loss: 5.9782 +[2025-07-07 05:32:20] [Rank 0] Group 3 Loss: 5.9782 +[2025-07-07 05:32:21] [Rank 0] Group 4 Loss: 5.6319 +[2025-07-07 05:32:21] [Rank 0] Group 4 Loss: 5.6319 +[2025-07-07 05:32:21] [Rank 0] Group 5 Loss: 5.7283 +[2025-07-07 05:32:21] [Rank 0] Group 5 Loss: 5.7283 +[2025-07-07 05:32:21] [Rank 0] Group 6 Loss: 5.9187 +[2025-07-07 05:32:21] [Rank 0] Group 6 Loss: 5.9187 +[2025-07-07 05:32:21] [Rank 0] Group 7 Loss: 5.9006 +[2025-07-07 05:32:21] [Rank 0] Group 7 Loss: 5.9006 +[2025-07-07 05:32:21] [Rank 0] Group 8 Loss: 5.8262 +[2025-07-07 05:32:21] [Rank 0] Group 8 Loss: 5.8262 +[2025-07-07 05:32:21] [Rank 0] Group 9 Loss: 5.8287 +[2025-07-07 05:32:21] [Rank 0] Group 9 Loss: 5.8287 +[2025-07-07 05:32:21] [Rank 0] Group 10 Loss: 5.7809 +[2025-07-07 05:32:21] [Rank 0] Group 10 Loss: 5.7809 +[2025-07-07 05:32:21] [Rank 0] Group 11 Loss: 5.7591 +[2025-07-07 05:32:21] [Rank 0] Group 11 Loss: 5.7591 +[2025-07-07 05:32:21] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 05:32:21] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 05:32:21] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 05:32:21] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 05:32:21] [Rank 0] Group 2 FTA: 0.2734 +[2025-07-07 05:32:21] [Rank 0] Group 2 FTA: 0.2734 +[2025-07-07 05:32:21] [Rank 0] Group 3 FTA: 0.1797 +[2025-07-07 05:32:21] [Rank 0] Group 3 FTA: 0.1797 +[2025-07-07 05:32:21] [Rank 0] Group 4 FTA: 0.1432 +[2025-07-07 05:32:21] [Rank 0] Group 4 FTA: 0.1432 +[2025-07-07 05:32:21] [Rank 0] Group 5 FTA: 0.2214 +[2025-07-07 05:32:21] [Rank 0] Group 5 FTA: 0.2214 +[2025-07-07 05:32:21] [Rank 0] Group 6 FTA: 0.2057 +[2025-07-07 05:32:21] [Rank 0] Group 6 FTA: 0.2057 +[2025-07-07 05:32:21] [Rank 0] Group 7 FTA: 0.1823 +[2025-07-07 05:32:21] [Rank 0] Group 7 FTA: 0.1823 +[2025-07-07 05:32:21] [Rank 0] Group 8 FTA: 0.2135 +[2025-07-07 05:32:21] [Rank 0] Group 8 FTA: 0.2135 +[2025-07-07 05:32:21] [Rank 0] Group 9 FTA: 0.1914 +[2025-07-07 05:32:21] [Rank 0] Group 9 FTA: 0.1914 +[2025-07-07 05:32:21] [Rank 0] Group 10 FTA: 0.2559 +[2025-07-07 05:32:21] [Rank 0] Group 10 FTA: 0.2559 +[2025-07-07 05:32:21] [Rank 0] Group 11 FTA: 0.2402 +[2025-07-07 05:32:21] [Rank 0] Group 11 FTA: 0.2402 +[2025-07-07 05:32:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 05:32:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 05:32:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 05:32:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 05:32:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 05:32:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 05:32:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 05:32:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 05:32:22] [Rank 0] step:7001/10000 train_time:480988ms step_avg:68.70ms +[2025-07-07 05:32:22] [Rank 0] step:7001/10000 train_time:480988ms step_avg:68.70ms +[2025-07-07 05:32:24] [Rank 0] step:7021/10000 train_time:481759ms step_avg:68.62ms +[2025-07-07 05:32:24] [Rank 0] step:7021/10000 train_time:481759ms step_avg:68.62ms +[2025-07-07 05:32:25] [Rank 0] step:7041/10000 train_time:483181ms step_avg:68.62ms +[2025-07-07 05:32:25] [Rank 0] step:7041/10000 train_time:483181ms step_avg:68.62ms +[2025-07-07 05:32:26] [Rank 0] step:7061/10000 train_time:484550ms step_avg:68.62ms +[2025-07-07 05:32:26] [Rank 0] step:7061/10000 train_time:484550ms step_avg:68.62ms +[2025-07-07 05:32:28] [Rank 0] step:7081/10000 train_time:485918ms step_avg:68.62ms +[2025-07-07 05:32:28] [Rank 0] step:7081/10000 train_time:485918ms step_avg:68.62ms +[2025-07-07 05:32:29] [Rank 0] step:7101/10000 train_time:487286ms step_avg:68.62ms +[2025-07-07 05:32:29] [Rank 0] step:7101/10000 train_time:487286ms step_avg:68.62ms +[2025-07-07 05:32:30] [Rank 0] step:7121/10000 train_time:488657ms step_avg:68.62ms +[2025-07-07 05:32:30] [Rank 0] step:7121/10000 train_time:488657ms step_avg:68.62ms +[2025-07-07 05:32:32] [Rank 0] step:7141/10000 train_time:490027ms step_avg:68.62ms +[2025-07-07 05:32:32] [Rank 0] step:7141/10000 train_time:490027ms step_avg:68.62ms +[2025-07-07 05:32:33] [Rank 0] step:7161/10000 train_time:491396ms step_avg:68.62ms +[2025-07-07 05:32:33] [Rank 0] step:7161/10000 train_time:491396ms step_avg:68.62ms +[2025-07-07 05:32:34] [Rank 0] step:7181/10000 train_time:492766ms step_avg:68.62ms +[2025-07-07 05:32:34] [Rank 0] step:7181/10000 train_time:492766ms step_avg:68.62ms +[2025-07-07 05:32:36] [Rank 0] step:7201/10000 train_time:494797ms step_avg:68.71ms +[2025-07-07 05:32:36] [Rank 0] step:7201/10000 train_time:494797ms step_avg:68.71ms +[2025-07-07 05:32:37] [Rank 0] step:7221/10000 train_time:495536ms step_avg:68.62ms +[2025-07-07 05:32:37] [Rank 0] step:7221/10000 train_time:495536ms step_avg:68.62ms +[2025-07-07 05:32:39] [Rank 0] step:7241/10000 train_time:496909ms step_avg:68.62ms +[2025-07-07 05:32:39] [Rank 0] step:7241/10000 train_time:496909ms step_avg:68.62ms +[2025-07-07 05:32:40] [Rank 0] step:7261/10000 train_time:498280ms step_avg:68.62ms +[2025-07-07 05:32:40] [Rank 0] step:7261/10000 train_time:498280ms step_avg:68.62ms +[2025-07-07 05:32:41] [Rank 0] step:7281/10000 train_time:499652ms step_avg:68.62ms +[2025-07-07 05:32:41] [Rank 0] step:7281/10000 train_time:499652ms step_avg:68.62ms +[2025-07-07 05:32:43] [Rank 0] step:7301/10000 train_time:501025ms step_avg:68.62ms +[2025-07-07 05:32:43] [Rank 0] step:7301/10000 train_time:501025ms step_avg:68.62ms +[2025-07-07 05:32:44] [Rank 0] step:7321/10000 train_time:502397ms step_avg:68.62ms +[2025-07-07 05:32:44] [Rank 0] step:7321/10000 train_time:502397ms step_avg:68.62ms +[2025-07-07 05:32:45] [Rank 0] step:7341/10000 train_time:503769ms step_avg:68.62ms +[2025-07-07 05:32:45] [Rank 0] step:7341/10000 train_time:503769ms step_avg:68.62ms +[2025-07-07 05:32:47] [Rank 0] step:7361/10000 train_time:505141ms step_avg:68.62ms +[2025-07-07 05:32:47] [Rank 0] step:7361/10000 train_time:505141ms step_avg:68.62ms +[2025-07-07 05:32:48] [Rank 0] step:7381/10000 train_time:507181ms step_avg:68.71ms +[2025-07-07 05:32:48] [Rank 0] step:7381/10000 train_time:507181ms step_avg:68.71ms +[2025-07-07 05:32:50] [Rank 0] step:7401/10000 train_time:507922ms step_avg:68.63ms +[2025-07-07 05:32:50] [Rank 0] step:7401/10000 train_time:507922ms step_avg:68.63ms +[2025-07-07 05:32:51] [Rank 0] step:7421/10000 train_time:509295ms step_avg:68.63ms +[2025-07-07 05:32:51] [Rank 0] step:7421/10000 train_time:509295ms step_avg:68.63ms +[2025-07-07 05:32:52] [Rank 0] step:7441/10000 train_time:510669ms step_avg:68.63ms +[2025-07-07 05:32:52] [Rank 0] step:7441/10000 train_time:510669ms step_avg:68.63ms +[2025-07-07 05:32:54] [Rank 0] step:7461/10000 train_time:512044ms step_avg:68.63ms +[2025-07-07 05:32:54] [Rank 0] step:7461/10000 train_time:512044ms step_avg:68.63ms +[2025-07-07 05:32:55] [Rank 0] step:7481/10000 train_time:513416ms step_avg:68.63ms +[2025-07-07 05:32:55] [Rank 0] step:7481/10000 train_time:513416ms step_avg:68.63ms +[2025-07-07 05:32:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:32:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:32:57] [Rank 0] PRINT: step:7500/10000 train_loss:0.9749 val_loss:1.1014 train_time:515413ms step_avg:68.72ms +[2025-07-07 05:32:57] [Rank 0] PRINT: step:7500/10000 train_loss:0.9749 val_loss:1.1014 train_time:515413ms step_avg:68.72ms +[2025-07-07 05:32:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:32:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:32:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:32:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:32:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:32:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:38:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:38:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:38:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:38:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:38:22] [Rank 0] Total Loss: 5.8297 +[2025-07-07 05:38:22] [Rank 0] Total Loss: 5.8297 +[2025-07-07 05:38:22] [Rank 0] Total FTA: 0.2810 +[2025-07-07 05:38:22] [Rank 0] Total FTA: 0.2810 +[2025-07-07 05:38:22] [Rank 0] Group 0 Loss: 6.2339 +[2025-07-07 05:38:22] [Rank 0] Group 0 Loss: 6.2339 +[2025-07-07 05:38:22] [Rank 0] Group 1 Loss: 5.2676 +[2025-07-07 05:38:22] [Rank 0] Group 1 Loss: 5.2676 +[2025-07-07 05:38:22] [Rank 0] Group 2 Loss: 5.8729 +[2025-07-07 05:38:22] [Rank 0] Group 2 Loss: 5.8729 +[2025-07-07 05:38:22] [Rank 0] Group 3 Loss: 5.9969 +[2025-07-07 05:38:22] [Rank 0] Group 3 Loss: 5.9969 +[2025-07-07 05:38:22] [Rank 0] Group 4 Loss: 5.7458 +[2025-07-07 05:38:22] [Rank 0] Group 4 Loss: 5.7458 +[2025-07-07 05:38:22] [Rank 0] Group 5 Loss: 5.7928 +[2025-07-07 05:38:22] [Rank 0] Group 5 Loss: 5.7928 +[2025-07-07 05:38:22] [Rank 0] Group 6 Loss: 5.9039 +[2025-07-07 05:38:22] [Rank 0] Group 6 Loss: 5.9039 +[2025-07-07 05:38:22] [Rank 0] Group 7 Loss: 5.7243 +[2025-07-07 05:38:22] [Rank 0] Group 7 Loss: 5.7243 +[2025-07-07 05:38:22] [Rank 0] Group 8 Loss: 5.7746 +[2025-07-07 05:38:22] [Rank 0] Group 8 Loss: 5.7746 +[2025-07-07 05:38:22] [Rank 0] Group 9 Loss: 5.8260 +[2025-07-07 05:38:22] [Rank 0] Group 9 Loss: 5.8260 +[2025-07-07 05:38:22] [Rank 0] Group 10 Loss: 5.7948 +[2025-07-07 05:38:22] [Rank 0] Group 10 Loss: 5.7948 +[2025-07-07 05:38:22] [Rank 0] Group 11 Loss: 5.7541 +[2025-07-07 05:38:22] [Rank 0] Group 11 Loss: 5.7541 +[2025-07-07 05:38:22] [Rank 0] Group 0 FTA: 0.5137 +[2025-07-07 05:38:22] [Rank 0] Group 0 FTA: 0.5137 +[2025-07-07 05:38:22] [Rank 0] Group 1 FTA: 0.1432 +[2025-07-07 05:38:22] [Rank 0] Group 1 FTA: 0.1432 +[2025-07-07 05:38:22] [Rank 0] Group 2 FTA: 0.2630 +[2025-07-07 05:38:22] [Rank 0] Group 2 FTA: 0.2630 +[2025-07-07 05:38:22] [Rank 0] Group 3 FTA: 0.2578 +[2025-07-07 05:38:22] [Rank 0] Group 3 FTA: 0.2578 +[2025-07-07 05:38:22] [Rank 0] Group 4 FTA: 0.2917 +[2025-07-07 05:38:22] [Rank 0] Group 4 FTA: 0.2917 +[2025-07-07 05:38:22] [Rank 0] Group 5 FTA: 0.2578 +[2025-07-07 05:38:22] [Rank 0] Group 5 FTA: 0.2578 +[2025-07-07 05:38:23] [Rank 0] Group 6 FTA: 0.1406 +[2025-07-07 05:38:23] [Rank 0] Group 6 FTA: 0.1406 +[2025-07-07 05:38:23] [Rank 0] Group 7 FTA: 0.2891 +[2025-07-07 05:38:23] [Rank 0] Group 7 FTA: 0.2891 +[2025-07-07 05:38:23] [Rank 0] Group 8 FTA: 0.2474 +[2025-07-07 05:38:23] [Rank 0] Group 8 FTA: 0.2474 +[2025-07-07 05:38:23] [Rank 0] Group 9 FTA: 0.2578 +[2025-07-07 05:38:23] [Rank 0] Group 9 FTA: 0.2578 +[2025-07-07 05:38:23] [Rank 0] Group 10 FTA: 0.2852 +[2025-07-07 05:38:23] [Rank 0] Group 10 FTA: 0.2852 +[2025-07-07 05:38:23] [Rank 0] Group 11 FTA: 0.2441 +[2025-07-07 05:38:23] [Rank 0] Group 11 FTA: 0.2441 +[2025-07-07 05:38:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 05:38:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 05:38:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 05:38:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 05:38:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 05:38:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 05:38:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 05:38:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 05:38:24] [Rank 0] step:7501/10000 train_time:515424ms step_avg:68.71ms +[2025-07-07 05:38:24] [Rank 0] step:7501/10000 train_time:515424ms step_avg:68.71ms +[2025-07-07 05:38:25] [Rank 0] step:7521/10000 train_time:516192ms step_avg:68.63ms +[2025-07-07 05:38:25] [Rank 0] step:7521/10000 train_time:516192ms step_avg:68.63ms +[2025-07-07 05:38:27] [Rank 0] step:7541/10000 train_time:517558ms step_avg:68.63ms +[2025-07-07 05:38:27] [Rank 0] step:7541/10000 train_time:517558ms step_avg:68.63ms +[2025-07-07 05:38:28] [Rank 0] step:7561/10000 train_time:518926ms step_avg:68.63ms +[2025-07-07 05:38:28] [Rank 0] step:7561/10000 train_time:518926ms step_avg:68.63ms +[2025-07-07 05:38:30] [Rank 0] step:7581/10000 train_time:520333ms step_avg:68.64ms +[2025-07-07 05:38:30] [Rank 0] step:7581/10000 train_time:520333ms step_avg:68.64ms +[2025-07-07 05:38:31] [Rank 0] step:7601/10000 train_time:521703ms step_avg:68.64ms +[2025-07-07 05:38:31] [Rank 0] step:7601/10000 train_time:521703ms step_avg:68.64ms +[2025-07-07 05:38:32] [Rank 0] step:7621/10000 train_time:523073ms step_avg:68.64ms +[2025-07-07 05:38:32] [Rank 0] step:7621/10000 train_time:523073ms step_avg:68.64ms +[2025-07-07 05:38:34] [Rank 0] step:7641/10000 train_time:524444ms step_avg:68.64ms +[2025-07-07 05:38:34] [Rank 0] step:7641/10000 train_time:524444ms step_avg:68.64ms +[2025-07-07 05:38:35] [Rank 0] step:7661/10000 train_time:525813ms step_avg:68.64ms +[2025-07-07 05:38:35] [Rank 0] step:7661/10000 train_time:525813ms step_avg:68.64ms +[2025-07-07 05:38:36] [Rank 0] step:7681/10000 train_time:527184ms step_avg:68.63ms +[2025-07-07 05:38:36] [Rank 0] step:7681/10000 train_time:527184ms step_avg:68.63ms +[2025-07-07 05:38:38] [Rank 0] step:7701/10000 train_time:528555ms step_avg:68.63ms +[2025-07-07 05:38:38] [Rank 0] step:7701/10000 train_time:528555ms step_avg:68.63ms +[2025-07-07 05:38:39] [Rank 0] step:7721/10000 train_time:529925ms step_avg:68.63ms +[2025-07-07 05:38:39] [Rank 0] step:7721/10000 train_time:529925ms step_avg:68.63ms +[2025-07-07 05:38:41] [Rank 0] step:7741/10000 train_time:531550ms step_avg:68.67ms +[2025-07-07 05:38:41] [Rank 0] step:7741/10000 train_time:531550ms step_avg:68.67ms +[2025-07-07 05:38:42] [Rank 0] step:7761/10000 train_time:532717ms step_avg:68.64ms +[2025-07-07 05:38:42] [Rank 0] step:7761/10000 train_time:532717ms step_avg:68.64ms +[2025-07-07 05:38:43] [Rank 0] step:7781/10000 train_time:534089ms step_avg:68.64ms +[2025-07-07 05:38:43] [Rank 0] step:7781/10000 train_time:534089ms step_avg:68.64ms +[2025-07-07 05:38:45] [Rank 0] step:7801/10000 train_time:535462ms step_avg:68.64ms +[2025-07-07 05:38:45] [Rank 0] step:7801/10000 train_time:535462ms step_avg:68.64ms +[2025-07-07 05:38:46] [Rank 0] step:7821/10000 train_time:536833ms step_avg:68.64ms +[2025-07-07 05:38:46] [Rank 0] step:7821/10000 train_time:536833ms step_avg:68.64ms +[2025-07-07 05:38:47] [Rank 0] step:7841/10000 train_time:538204ms step_avg:68.64ms +[2025-07-07 05:38:47] [Rank 0] step:7841/10000 train_time:538204ms step_avg:68.64ms +[2025-07-07 05:38:49] [Rank 0] step:7861/10000 train_time:539575ms step_avg:68.64ms +[2025-07-07 05:38:49] [Rank 0] step:7861/10000 train_time:539575ms step_avg:68.64ms +[2025-07-07 05:38:50] [Rank 0] step:7881/10000 train_time:540947ms step_avg:68.64ms +[2025-07-07 05:38:50] [Rank 0] step:7881/10000 train_time:540947ms step_avg:68.64ms +[2025-07-07 05:38:52] [Rank 0] step:7901/10000 train_time:542318ms step_avg:68.64ms +[2025-07-07 05:38:52] [Rank 0] step:7901/10000 train_time:542318ms step_avg:68.64ms +[2025-07-07 05:38:53] [Rank 0] step:7921/10000 train_time:543941ms step_avg:68.67ms +[2025-07-07 05:38:53] [Rank 0] step:7921/10000 train_time:543941ms step_avg:68.67ms +[2025-07-07 05:38:54] [Rank 0] step:7941/10000 train_time:545101ms step_avg:68.64ms +[2025-07-07 05:38:54] [Rank 0] step:7941/10000 train_time:545101ms step_avg:68.64ms +[2025-07-07 05:38:56] [Rank 0] step:7961/10000 train_time:546474ms step_avg:68.64ms +[2025-07-07 05:38:56] [Rank 0] step:7961/10000 train_time:546474ms step_avg:68.64ms +[2025-07-07 05:38:57] [Rank 0] step:7981/10000 train_time:547847ms step_avg:68.64ms +[2025-07-07 05:38:57] [Rank 0] step:7981/10000 train_time:547847ms step_avg:68.64ms +[2025-07-07 05:38:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:38:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:38:59] [Rank 0] PRINT: step:8000/10000 train_loss:0.9419 val_loss:1.1165 train_time:549844ms step_avg:68.73ms +[2025-07-07 05:38:59] [Rank 0] PRINT: step:8000/10000 train_loss:0.9419 val_loss:1.1165 train_time:549844ms step_avg:68.73ms +[2025-07-07 05:38:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:38:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:39:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:39:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:39:00] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:39:00] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:44:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:44:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:44:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:44:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:44:23] [Rank 0] Total Loss: 5.8535 +[2025-07-07 05:44:23] [Rank 0] Total Loss: 5.8535 +[2025-07-07 05:44:23] [Rank 0] Total FTA: 0.2611 +[2025-07-07 05:44:23] [Rank 0] Total FTA: 0.2611 +[2025-07-07 05:44:23] [Rank 0] Group 0 Loss: 6.1555 +[2025-07-07 05:44:23] [Rank 0] Group 0 Loss: 6.1555 +[2025-07-07 05:44:23] [Rank 0] Group 1 Loss: 5.3571 +[2025-07-07 05:44:23] [Rank 0] Group 1 Loss: 5.3571 +[2025-07-07 05:44:23] [Rank 0] Group 2 Loss: 5.8131 +[2025-07-07 05:44:23] [Rank 0] Group 2 Loss: 5.8131 +[2025-07-07 05:44:23] [Rank 0] Group 3 Loss: 6.1057 +[2025-07-07 05:44:23] [Rank 0] Group 3 Loss: 6.1057 +[2025-07-07 05:44:23] [Rank 0] Group 4 Loss: 5.6099 +[2025-07-07 05:44:23] [Rank 0] Group 4 Loss: 5.6099 +[2025-07-07 05:44:23] [Rank 0] Group 5 Loss: 5.8426 +[2025-07-07 05:44:23] [Rank 0] Group 5 Loss: 5.8426 +[2025-07-07 05:44:23] [Rank 0] Group 6 Loss: 5.8016 +[2025-07-07 05:44:23] [Rank 0] Group 6 Loss: 5.8016 +[2025-07-07 05:44:23] [Rank 0] Group 7 Loss: 5.8766 +[2025-07-07 05:44:23] [Rank 0] Group 7 Loss: 5.8766 +[2025-07-07 05:44:23] [Rank 0] Group 8 Loss: 5.8741 +[2025-07-07 05:44:23] [Rank 0] Group 8 Loss: 5.8741 +[2025-07-07 05:44:23] [Rank 0] Group 9 Loss: 5.8386 +[2025-07-07 05:44:23] [Rank 0] Group 9 Loss: 5.8386 +[2025-07-07 05:44:23] [Rank 0] Group 10 Loss: 5.8821 +[2025-07-07 05:44:23] [Rank 0] Group 10 Loss: 5.8821 +[2025-07-07 05:44:23] [Rank 0] Group 11 Loss: 5.8211 +[2025-07-07 05:44:23] [Rank 0] Group 11 Loss: 5.8211 +[2025-07-07 05:44:23] [Rank 0] Group 0 FTA: 0.3342 +[2025-07-07 05:44:23] [Rank 0] Group 0 FTA: 0.3342 +[2025-07-07 05:44:23] [Rank 0] Group 1 FTA: 0.4948 +[2025-07-07 05:44:23] [Rank 0] Group 1 FTA: 0.4948 +[2025-07-07 05:44:23] [Rank 0] Group 2 FTA: 0.2604 +[2025-07-07 05:44:23] [Rank 0] Group 2 FTA: 0.2604 +[2025-07-07 05:44:23] [Rank 0] Group 3 FTA: 0.1458 +[2025-07-07 05:44:23] [Rank 0] Group 3 FTA: 0.1458 +[2025-07-07 05:44:23] [Rank 0] Group 4 FTA: 0.1745 +[2025-07-07 05:44:23] [Rank 0] Group 4 FTA: 0.1745 +[2025-07-07 05:44:23] [Rank 0] Group 5 FTA: 0.2161 +[2025-07-07 05:44:23] [Rank 0] Group 5 FTA: 0.2161 +[2025-07-07 05:44:23] [Rank 0] Group 6 FTA: 0.2292 +[2025-07-07 05:44:23] [Rank 0] Group 6 FTA: 0.2292 +[2025-07-07 05:44:23] [Rank 0] Group 7 FTA: 0.2396 +[2025-07-07 05:44:23] [Rank 0] Group 7 FTA: 0.2396 +[2025-07-07 05:44:23] [Rank 0] Group 8 FTA: 0.2786 +[2025-07-07 05:44:23] [Rank 0] Group 8 FTA: 0.2786 +[2025-07-07 05:44:23] [Rank 0] Group 9 FTA: 0.2188 +[2025-07-07 05:44:23] [Rank 0] Group 9 FTA: 0.2188 +[2025-07-07 05:44:23] [Rank 0] Group 10 FTA: 0.2539 +[2025-07-07 05:44:23] [Rank 0] Group 10 FTA: 0.2539 +[2025-07-07 05:44:23] [Rank 0] Group 11 FTA: 0.2393 +[2025-07-07 05:44:23] [Rank 0] Group 11 FTA: 0.2393 +[2025-07-07 05:44:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 05:44:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 05:44:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 05:44:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 05:44:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 05:44:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 05:44:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 05:44:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 05:44:25] [Rank 0] step:8001/10000 train_time:549854ms step_avg:68.72ms +[2025-07-07 05:44:25] [Rank 0] step:8001/10000 train_time:549854ms step_avg:68.72ms +[2025-07-07 05:44:26] [Rank 0] step:8021/10000 train_time:550793ms step_avg:68.67ms +[2025-07-07 05:44:26] [Rank 0] step:8021/10000 train_time:550793ms step_avg:68.67ms +[2025-07-07 05:44:28] [Rank 0] step:8041/10000 train_time:552157ms step_avg:68.67ms +[2025-07-07 05:44:28] [Rank 0] step:8041/10000 train_time:552157ms step_avg:68.67ms +[2025-07-07 05:44:29] [Rank 0] step:8061/10000 train_time:553522ms step_avg:68.67ms +[2025-07-07 05:44:29] [Rank 0] step:8061/10000 train_time:553522ms step_avg:68.67ms +[2025-07-07 05:44:31] [Rank 0] step:8081/10000 train_time:554890ms step_avg:68.67ms +[2025-07-07 05:44:31] [Rank 0] step:8081/10000 train_time:554890ms step_avg:68.67ms +[2025-07-07 05:44:32] [Rank 0] step:8101/10000 train_time:556304ms step_avg:68.67ms +[2025-07-07 05:44:32] [Rank 0] step:8101/10000 train_time:556304ms step_avg:68.67ms +[2025-07-07 05:44:33] [Rank 0] step:8121/10000 train_time:557656ms step_avg:68.67ms +[2025-07-07 05:44:33] [Rank 0] step:8121/10000 train_time:557656ms step_avg:68.67ms +[2025-07-07 05:44:35] [Rank 0] step:8141/10000 train_time:559026ms step_avg:68.67ms +[2025-07-07 05:44:35] [Rank 0] step:8141/10000 train_time:559026ms step_avg:68.67ms +[2025-07-07 05:44:36] [Rank 0] step:8161/10000 train_time:560397ms step_avg:68.67ms +[2025-07-07 05:44:36] [Rank 0] step:8161/10000 train_time:560397ms step_avg:68.67ms +[2025-07-07 05:44:37] [Rank 0] step:8181/10000 train_time:561767ms step_avg:68.67ms +[2025-07-07 05:44:37] [Rank 0] step:8181/10000 train_time:561767ms step_avg:68.67ms +[2025-07-07 05:44:39] [Rank 0] step:8201/10000 train_time:563137ms step_avg:68.67ms +[2025-07-07 05:44:39] [Rank 0] step:8201/10000 train_time:563137ms step_avg:68.67ms +[2025-07-07 05:44:40] [Rank 0] step:8221/10000 train_time:564509ms step_avg:68.67ms +[2025-07-07 05:44:40] [Rank 0] step:8221/10000 train_time:564509ms step_avg:68.67ms +[2025-07-07 05:44:42] [Rank 0] step:8241/10000 train_time:565880ms step_avg:68.67ms +[2025-07-07 05:44:42] [Rank 0] step:8241/10000 train_time:565880ms step_avg:68.67ms +[2025-07-07 05:44:43] [Rank 0] step:8261/10000 train_time:567252ms step_avg:68.67ms +[2025-07-07 05:44:43] [Rank 0] step:8261/10000 train_time:567252ms step_avg:68.67ms +[2025-07-07 05:44:44] [Rank 0] step:8281/10000 train_time:568876ms step_avg:68.70ms +[2025-07-07 05:44:44] [Rank 0] step:8281/10000 train_time:568876ms step_avg:68.70ms +[2025-07-07 05:44:46] [Rank 0] step:8301/10000 train_time:570042ms step_avg:68.67ms +[2025-07-07 05:44:46] [Rank 0] step:8301/10000 train_time:570042ms step_avg:68.67ms +[2025-07-07 05:44:47] [Rank 0] step:8321/10000 train_time:571414ms step_avg:68.67ms +[2025-07-07 05:44:47] [Rank 0] step:8321/10000 train_time:571414ms step_avg:68.67ms +[2025-07-07 05:44:48] [Rank 0] step:8341/10000 train_time:572786ms step_avg:68.67ms +[2025-07-07 05:44:48] [Rank 0] step:8341/10000 train_time:572786ms step_avg:68.67ms +[2025-07-07 05:44:50] [Rank 0] step:8361/10000 train_time:574159ms step_avg:68.67ms +[2025-07-07 05:44:50] [Rank 0] step:8361/10000 train_time:574159ms step_avg:68.67ms +[2025-07-07 05:44:51] [Rank 0] step:8381/10000 train_time:575533ms step_avg:68.67ms +[2025-07-07 05:44:51] [Rank 0] step:8381/10000 train_time:575533ms step_avg:68.67ms +[2025-07-07 05:44:53] [Rank 0] step:8401/10000 train_time:576907ms step_avg:68.67ms +[2025-07-07 05:44:53] [Rank 0] step:8401/10000 train_time:576907ms step_avg:68.67ms +[2025-07-07 05:44:54] [Rank 0] step:8421/10000 train_time:578281ms step_avg:68.67ms +[2025-07-07 05:44:54] [Rank 0] step:8421/10000 train_time:578281ms step_avg:68.67ms +[2025-07-07 05:44:55] [Rank 0] step:8441/10000 train_time:579654ms step_avg:68.67ms +[2025-07-07 05:44:55] [Rank 0] step:8441/10000 train_time:579654ms step_avg:68.67ms +[2025-07-07 05:44:57] [Rank 0] step:8461/10000 train_time:581280ms step_avg:68.70ms +[2025-07-07 05:44:57] [Rank 0] step:8461/10000 train_time:581280ms step_avg:68.70ms +[2025-07-07 05:44:58] [Rank 0] step:8481/10000 train_time:582401ms step_avg:68.67ms +[2025-07-07 05:44:58] [Rank 0] step:8481/10000 train_time:582401ms step_avg:68.67ms +[2025-07-07 05:44:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:44:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:45:00] [Rank 0] PRINT: step:8500/10000 train_loss:0.9090 val_loss:1.0700 train_time:584399ms step_avg:68.75ms +[2025-07-07 05:45:00] [Rank 0] PRINT: step:8500/10000 train_loss:0.9090 val_loss:1.0700 train_time:584399ms step_avg:68.75ms +[2025-07-07 05:45:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:45:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:45:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:45:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:45:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:45:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:50:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:50:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:50:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:50:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:50:23] [Rank 0] Total Loss: 5.8768 +[2025-07-07 05:50:23] [Rank 0] Total Loss: 5.8768 +[2025-07-07 05:50:23] [Rank 0] Total FTA: 0.2945 +[2025-07-07 05:50:23] [Rank 0] Total FTA: 0.2945 +[2025-07-07 05:50:23] [Rank 0] Group 0 Loss: 6.4680 +[2025-07-07 05:50:23] [Rank 0] Group 0 Loss: 6.4680 +[2025-07-07 05:50:23] [Rank 0] Group 1 Loss: 5.2747 +[2025-07-07 05:50:23] [Rank 0] Group 1 Loss: 5.2747 +[2025-07-07 05:50:23] [Rank 0] Group 2 Loss: 5.7836 +[2025-07-07 05:50:23] [Rank 0] Group 2 Loss: 5.7836 +[2025-07-07 05:50:23] [Rank 0] Group 3 Loss: 6.2163 +[2025-07-07 05:50:23] [Rank 0] Group 3 Loss: 6.2163 +[2025-07-07 05:50:23] [Rank 0] Group 4 Loss: 5.6167 +[2025-07-07 05:50:23] [Rank 0] Group 4 Loss: 5.6167 +[2025-07-07 05:50:23] [Rank 0] Group 5 Loss: 5.8630 +[2025-07-07 05:50:23] [Rank 0] Group 5 Loss: 5.8630 +[2025-07-07 05:50:23] [Rank 0] Group 6 Loss: 5.8987 +[2025-07-07 05:50:23] [Rank 0] Group 6 Loss: 5.8987 +[2025-07-07 05:50:23] [Rank 0] Group 7 Loss: 5.7539 +[2025-07-07 05:50:23] [Rank 0] Group 7 Loss: 5.7539 +[2025-07-07 05:50:23] [Rank 0] Group 8 Loss: 5.8634 +[2025-07-07 05:50:23] [Rank 0] Group 8 Loss: 5.8634 +[2025-07-07 05:50:23] [Rank 0] Group 9 Loss: 5.8023 +[2025-07-07 05:50:23] [Rank 0] Group 9 Loss: 5.8023 +[2025-07-07 05:50:23] [Rank 0] Group 10 Loss: 5.7482 +[2025-07-07 05:50:23] [Rank 0] Group 10 Loss: 5.7482 +[2025-07-07 05:50:24] [Rank 0] Group 11 Loss: 5.7946 +[2025-07-07 05:50:24] [Rank 0] Group 11 Loss: 5.7946 +[2025-07-07 05:50:24] [Rank 0] Group 0 FTA: 0.3472 +[2025-07-07 05:50:24] [Rank 0] Group 0 FTA: 0.3472 +[2025-07-07 05:50:24] [Rank 0] Group 1 FTA: 0.6536 +[2025-07-07 05:50:24] [Rank 0] Group 1 FTA: 0.6536 +[2025-07-07 05:50:24] [Rank 0] Group 2 FTA: 0.2083 +[2025-07-07 05:50:24] [Rank 0] Group 2 FTA: 0.2083 +[2025-07-07 05:50:24] [Rank 0] Group 3 FTA: 0.2422 +[2025-07-07 05:50:24] [Rank 0] Group 3 FTA: 0.2422 +[2025-07-07 05:50:24] [Rank 0] Group 4 FTA: 0.2344 +[2025-07-07 05:50:24] [Rank 0] Group 4 FTA: 0.2344 +[2025-07-07 05:50:24] [Rank 0] Group 5 FTA: 0.2708 +[2025-07-07 05:50:24] [Rank 0] Group 5 FTA: 0.2708 +[2025-07-07 05:50:24] [Rank 0] Group 6 FTA: 0.2578 +[2025-07-07 05:50:24] [Rank 0] Group 6 FTA: 0.2578 +[2025-07-07 05:50:24] [Rank 0] Group 7 FTA: 0.2344 +[2025-07-07 05:50:24] [Rank 0] Group 7 FTA: 0.2344 +[2025-07-07 05:50:24] [Rank 0] Group 8 FTA: 0.2552 +[2025-07-07 05:50:24] [Rank 0] Group 8 FTA: 0.2552 +[2025-07-07 05:50:24] [Rank 0] Group 9 FTA: 0.2852 +[2025-07-07 05:50:24] [Rank 0] Group 9 FTA: 0.2852 +[2025-07-07 05:50:24] [Rank 0] Group 10 FTA: 0.2402 +[2025-07-07 05:50:24] [Rank 0] Group 10 FTA: 0.2402 +[2025-07-07 05:50:24] [Rank 0] Group 11 FTA: 0.2842 +[2025-07-07 05:50:24] [Rank 0] Group 11 FTA: 0.2842 +[2025-07-07 05:50:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 05:50:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 05:50:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 05:50:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 05:50:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 05:50:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 05:50:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 05:50:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 05:50:25] [Rank 0] step:8501/10000 train_time:584411ms step_avg:68.75ms +[2025-07-07 05:50:25] [Rank 0] step:8501/10000 train_time:584411ms step_avg:68.75ms +[2025-07-07 05:50:26] [Rank 0] step:8521/10000 train_time:585166ms step_avg:68.67ms +[2025-07-07 05:50:26] [Rank 0] step:8521/10000 train_time:585166ms step_avg:68.67ms +[2025-07-07 05:50:28] [Rank 0] step:8541/10000 train_time:586633ms step_avg:68.68ms +[2025-07-07 05:50:28] [Rank 0] step:8541/10000 train_time:586633ms step_avg:68.68ms +[2025-07-07 05:50:29] [Rank 0] step:8561/10000 train_time:588000ms step_avg:68.68ms +[2025-07-07 05:50:29] [Rank 0] step:8561/10000 train_time:588000ms step_avg:68.68ms +[2025-07-07 05:50:31] [Rank 0] step:8581/10000 train_time:589469ms step_avg:68.69ms +[2025-07-07 05:50:31] [Rank 0] step:8581/10000 train_time:589469ms step_avg:68.69ms +[2025-07-07 05:50:32] [Rank 0] step:8601/10000 train_time:590837ms step_avg:68.69ms +[2025-07-07 05:50:32] [Rank 0] step:8601/10000 train_time:590837ms step_avg:68.69ms +[2025-07-07 05:50:34] [Rank 0] step:8621/10000 train_time:592206ms step_avg:68.69ms +[2025-07-07 05:50:34] [Rank 0] step:8621/10000 train_time:592206ms step_avg:68.69ms +[2025-07-07 05:50:35] [Rank 0] step:8641/10000 train_time:593832ms step_avg:68.72ms +[2025-07-07 05:50:35] [Rank 0] step:8641/10000 train_time:593832ms step_avg:68.72ms +[2025-07-07 05:50:36] [Rank 0] step:8661/10000 train_time:594978ms step_avg:68.70ms +[2025-07-07 05:50:36] [Rank 0] step:8661/10000 train_time:594978ms step_avg:68.70ms +[2025-07-07 05:50:38] [Rank 0] step:8681/10000 train_time:596352ms step_avg:68.70ms +[2025-07-07 05:50:38] [Rank 0] step:8681/10000 train_time:596352ms step_avg:68.70ms +[2025-07-07 05:50:39] [Rank 0] step:8701/10000 train_time:597721ms step_avg:68.70ms +[2025-07-07 05:50:39] [Rank 0] step:8701/10000 train_time:597721ms step_avg:68.70ms +[2025-07-07 05:50:40] [Rank 0] step:8721/10000 train_time:599093ms step_avg:68.70ms +[2025-07-07 05:50:40] [Rank 0] step:8721/10000 train_time:599093ms step_avg:68.70ms +[2025-07-07 05:50:42] [Rank 0] step:8741/10000 train_time:600463ms step_avg:68.69ms +[2025-07-07 05:50:42] [Rank 0] step:8741/10000 train_time:600463ms step_avg:68.69ms +[2025-07-07 05:50:43] [Rank 0] step:8761/10000 train_time:601935ms step_avg:68.71ms +[2025-07-07 05:50:43] [Rank 0] step:8761/10000 train_time:601935ms step_avg:68.71ms +[2025-07-07 05:50:45] [Rank 0] step:8781/10000 train_time:603309ms step_avg:68.71ms +[2025-07-07 05:50:45] [Rank 0] step:8781/10000 train_time:603309ms step_avg:68.71ms +[2025-07-07 05:50:46] [Rank 0] step:8801/10000 train_time:604681ms step_avg:68.71ms +[2025-07-07 05:50:46] [Rank 0] step:8801/10000 train_time:604681ms step_avg:68.71ms +[2025-07-07 05:50:47] [Rank 0] step:8821/10000 train_time:606100ms step_avg:68.71ms +[2025-07-07 05:50:47] [Rank 0] step:8821/10000 train_time:606100ms step_avg:68.71ms +[2025-07-07 05:50:49] [Rank 0] step:8841/10000 train_time:607464ms step_avg:68.71ms +[2025-07-07 05:50:49] [Rank 0] step:8841/10000 train_time:607464ms step_avg:68.71ms +[2025-07-07 05:50:50] [Rank 0] step:8861/10000 train_time:608936ms step_avg:68.72ms +[2025-07-07 05:50:50] [Rank 0] step:8861/10000 train_time:608936ms step_avg:68.72ms +[2025-07-07 05:50:52] [Rank 0] step:8881/10000 train_time:610309ms step_avg:68.72ms +[2025-07-07 05:50:52] [Rank 0] step:8881/10000 train_time:610309ms step_avg:68.72ms +[2025-07-07 05:50:53] [Rank 0] step:8901/10000 train_time:611682ms step_avg:68.72ms +[2025-07-07 05:50:53] [Rank 0] step:8901/10000 train_time:611682ms step_avg:68.72ms +[2025-07-07 05:50:54] [Rank 0] step:8921/10000 train_time:613057ms step_avg:68.72ms +[2025-07-07 05:50:54] [Rank 0] step:8921/10000 train_time:613057ms step_avg:68.72ms +[2025-07-07 05:50:56] [Rank 0] step:8941/10000 train_time:614430ms step_avg:68.72ms +[2025-07-07 05:50:56] [Rank 0] step:8941/10000 train_time:614430ms step_avg:68.72ms +[2025-07-07 05:50:57] [Rank 0] step:8961/10000 train_time:615804ms step_avg:68.72ms +[2025-07-07 05:50:57] [Rank 0] step:8961/10000 train_time:615804ms step_avg:68.72ms +[2025-07-07 05:50:58] [Rank 0] step:8981/10000 train_time:617177ms step_avg:68.72ms +[2025-07-07 05:50:58] [Rank 0] step:8981/10000 train_time:617177ms step_avg:68.72ms +[2025-07-07 05:51:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:51:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:51:01] [Rank 0] PRINT: step:9000/10000 train_loss:0.8839 val_loss:1.0466 train_time:619176ms step_avg:68.80ms +[2025-07-07 05:51:01] [Rank 0] PRINT: step:9000/10000 train_loss:0.8839 val_loss:1.0466 train_time:619176ms step_avg:68.80ms +[2025-07-07 05:51:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:51:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:51:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:51:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:51:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:51:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:56:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:56:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 05:56:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:56:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 05:56:23] [Rank 0] Total Loss: 5.8478 +[2025-07-07 05:56:23] [Rank 0] Total Loss: 5.8478 +[2025-07-07 05:56:23] [Rank 0] Total FTA: 0.3611 +[2025-07-07 05:56:23] [Rank 0] Total FTA: 0.3611 +[2025-07-07 05:56:23] [Rank 0] Group 0 Loss: 6.1089 +[2025-07-07 05:56:23] [Rank 0] Group 0 Loss: 6.1089 +[2025-07-07 05:56:23] [Rank 0] Group 1 Loss: 5.5185 +[2025-07-07 05:56:23] [Rank 0] Group 1 Loss: 5.5185 +[2025-07-07 05:56:23] [Rank 0] Group 2 Loss: 5.8049 +[2025-07-07 05:56:23] [Rank 0] Group 2 Loss: 5.8049 +[2025-07-07 05:56:23] [Rank 0] Group 3 Loss: 6.2569 +[2025-07-07 05:56:23] [Rank 0] Group 3 Loss: 6.2569 +[2025-07-07 05:56:23] [Rank 0] Group 4 Loss: 5.6360 +[2025-07-07 05:56:23] [Rank 0] Group 4 Loss: 5.6360 +[2025-07-07 05:56:23] [Rank 0] Group 5 Loss: 5.7755 +[2025-07-07 05:56:23] [Rank 0] Group 5 Loss: 5.7755 +[2025-07-07 05:56:23] [Rank 0] Group 6 Loss: 5.8281 +[2025-07-07 05:56:23] [Rank 0] Group 6 Loss: 5.8281 +[2025-07-07 05:56:23] [Rank 0] Group 7 Loss: 5.6972 +[2025-07-07 05:56:23] [Rank 0] Group 7 Loss: 5.6972 +[2025-07-07 05:56:23] [Rank 0] Group 8 Loss: 5.8369 +[2025-07-07 05:56:23] [Rank 0] Group 8 Loss: 5.8369 +[2025-07-07 05:56:23] [Rank 0] Group 9 Loss: 5.7096 +[2025-07-07 05:56:23] [Rank 0] Group 9 Loss: 5.7096 +[2025-07-07 05:56:23] [Rank 0] Group 10 Loss: 5.8523 +[2025-07-07 05:56:23] [Rank 0] Group 10 Loss: 5.8523 +[2025-07-07 05:56:23] [Rank 0] Group 11 Loss: 5.8444 +[2025-07-07 05:56:23] [Rank 0] Group 11 Loss: 5.8444 +[2025-07-07 05:56:23] [Rank 0] Group 0 FTA: 0.6359 +[2025-07-07 05:56:23] [Rank 0] Group 0 FTA: 0.6359 +[2025-07-07 05:56:23] [Rank 0] Group 1 FTA: 0.8047 +[2025-07-07 05:56:23] [Rank 0] Group 1 FTA: 0.8047 +[2025-07-07 05:56:23] [Rank 0] Group 2 FTA: 0.1615 +[2025-07-07 05:56:23] [Rank 0] Group 2 FTA: 0.1615 +[2025-07-07 05:56:23] [Rank 0] Group 3 FTA: 0.3307 +[2025-07-07 05:56:23] [Rank 0] Group 3 FTA: 0.3307 +[2025-07-07 05:56:23] [Rank 0] Group 4 FTA: 0.1901 +[2025-07-07 05:56:23] [Rank 0] Group 4 FTA: 0.1901 +[2025-07-07 05:56:23] [Rank 0] Group 5 FTA: 0.3047 +[2025-07-07 05:56:23] [Rank 0] Group 5 FTA: 0.3047 +[2025-07-07 05:56:23] [Rank 0] Group 6 FTA: 0.2943 +[2025-07-07 05:56:23] [Rank 0] Group 6 FTA: 0.2943 +[2025-07-07 05:56:23] [Rank 0] Group 7 FTA: 0.2891 +[2025-07-07 05:56:23] [Rank 0] Group 7 FTA: 0.2891 +[2025-07-07 05:56:23] [Rank 0] Group 8 FTA: 0.2786 +[2025-07-07 05:56:23] [Rank 0] Group 8 FTA: 0.2786 +[2025-07-07 05:56:23] [Rank 0] Group 9 FTA: 0.3047 +[2025-07-07 05:56:23] [Rank 0] Group 9 FTA: 0.3047 +[2025-07-07 05:56:23] [Rank 0] Group 10 FTA: 0.2949 +[2025-07-07 05:56:23] [Rank 0] Group 10 FTA: 0.2949 +[2025-07-07 05:56:23] [Rank 0] Group 11 FTA: 0.2900 +[2025-07-07 05:56:23] [Rank 0] Group 11 FTA: 0.2900 +[2025-07-07 05:56:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 05:56:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 05:56:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 05:56:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 05:56:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 05:56:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 05:56:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 05:56:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 05:56:27] [Rank 0] step:9001/10000 train_time:619194ms step_avg:68.79ms +[2025-07-07 05:56:27] [Rank 0] step:9001/10000 train_time:619194ms step_avg:68.79ms +[2025-07-07 05:56:29] [Rank 0] step:9021/10000 train_time:620647ms step_avg:68.80ms +[2025-07-07 05:56:29] [Rank 0] step:9021/10000 train_time:620647ms step_avg:68.80ms +[2025-07-07 05:56:30] [Rank 0] step:9041/10000 train_time:622013ms step_avg:68.80ms +[2025-07-07 05:56:30] [Rank 0] step:9041/10000 train_time:622013ms step_avg:68.80ms +[2025-07-07 05:56:31] [Rank 0] step:9061/10000 train_time:623381ms step_avg:68.80ms +[2025-07-07 05:56:31] [Rank 0] step:9061/10000 train_time:623381ms step_avg:68.80ms +[2025-07-07 05:56:33] [Rank 0] step:9081/10000 train_time:624749ms step_avg:68.80ms +[2025-07-07 05:56:33] [Rank 0] step:9081/10000 train_time:624749ms step_avg:68.80ms +[2025-07-07 05:56:34] [Rank 0] step:9101/10000 train_time:626117ms step_avg:68.80ms +[2025-07-07 05:56:34] [Rank 0] step:9101/10000 train_time:626117ms step_avg:68.80ms +[2025-07-07 05:56:36] [Rank 0] step:9121/10000 train_time:627486ms step_avg:68.80ms +[2025-07-07 05:56:36] [Rank 0] step:9121/10000 train_time:627486ms step_avg:68.80ms +[2025-07-07 05:56:37] [Rank 0] step:9141/10000 train_time:628854ms step_avg:68.79ms +[2025-07-07 05:56:37] [Rank 0] step:9141/10000 train_time:628854ms step_avg:68.79ms +[2025-07-07 05:56:38] [Rank 0] step:9161/10000 train_time:630224ms step_avg:68.79ms +[2025-07-07 05:56:38] [Rank 0] step:9161/10000 train_time:630224ms step_avg:68.79ms +[2025-07-07 05:56:40] [Rank 0] step:9181/10000 train_time:631594ms step_avg:68.79ms +[2025-07-07 05:56:40] [Rank 0] step:9181/10000 train_time:631594ms step_avg:68.79ms +[2025-07-07 05:56:41] [Rank 0] step:9201/10000 train_time:633001ms step_avg:68.80ms +[2025-07-07 05:56:41] [Rank 0] step:9201/10000 train_time:633001ms step_avg:68.80ms +[2025-07-07 05:56:42] [Rank 0] step:9221/10000 train_time:634371ms step_avg:68.80ms +[2025-07-07 05:56:42] [Rank 0] step:9221/10000 train_time:634371ms step_avg:68.80ms +[2025-07-07 05:56:44] [Rank 0] step:9241/10000 train_time:635843ms step_avg:68.81ms +[2025-07-07 05:56:44] [Rank 0] step:9241/10000 train_time:635843ms step_avg:68.81ms +[2025-07-07 05:56:45] [Rank 0] step:9261/10000 train_time:637213ms step_avg:68.81ms +[2025-07-07 05:56:45] [Rank 0] step:9261/10000 train_time:637213ms step_avg:68.81ms +[2025-07-07 05:56:47] [Rank 0] step:9281/10000 train_time:638584ms step_avg:68.81ms +[2025-07-07 05:56:47] [Rank 0] step:9281/10000 train_time:638584ms step_avg:68.81ms +[2025-07-07 05:56:48] [Rank 0] step:9301/10000 train_time:639957ms step_avg:68.81ms +[2025-07-07 05:56:48] [Rank 0] step:9301/10000 train_time:639957ms step_avg:68.81ms +[2025-07-07 05:56:49] [Rank 0] step:9321/10000 train_time:641330ms step_avg:68.80ms +[2025-07-07 05:56:49] [Rank 0] step:9321/10000 train_time:641330ms step_avg:68.80ms +[2025-07-07 05:56:51] [Rank 0] step:9341/10000 train_time:642703ms step_avg:68.80ms +[2025-07-07 05:56:51] [Rank 0] step:9341/10000 train_time:642703ms step_avg:68.80ms +[2025-07-07 05:56:52] [Rank 0] step:9361/10000 train_time:644124ms step_avg:68.81ms +[2025-07-07 05:56:52] [Rank 0] step:9361/10000 train_time:644124ms step_avg:68.81ms +[2025-07-07 05:56:54] [Rank 0] step:9381/10000 train_time:645481ms step_avg:68.81ms +[2025-07-07 05:56:54] [Rank 0] step:9381/10000 train_time:645481ms step_avg:68.81ms +[2025-07-07 05:56:55] [Rank 0] step:9401/10000 train_time:646853ms step_avg:68.81ms +[2025-07-07 05:56:55] [Rank 0] step:9401/10000 train_time:646853ms step_avg:68.81ms +[2025-07-07 05:56:56] [Rank 0] step:9421/10000 train_time:648328ms step_avg:68.82ms +[2025-07-07 05:56:56] [Rank 0] step:9421/10000 train_time:648328ms step_avg:68.82ms +[2025-07-07 05:56:58] [Rank 0] step:9441/10000 train_time:649699ms step_avg:68.82ms +[2025-07-07 05:56:58] [Rank 0] step:9441/10000 train_time:649699ms step_avg:68.82ms +[2025-07-07 05:56:59] [Rank 0] step:9461/10000 train_time:651073ms step_avg:68.82ms +[2025-07-07 05:56:59] [Rank 0] step:9461/10000 train_time:651073ms step_avg:68.82ms +[2025-07-07 05:57:00] [Rank 0] step:9481/10000 train_time:652447ms step_avg:68.82ms +[2025-07-07 05:57:00] [Rank 0] step:9481/10000 train_time:652447ms step_avg:68.82ms +[2025-07-07 05:57:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:57:02] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 05:57:03] [Rank 0] PRINT: step:9500/10000 train_loss:0.8692 val_loss:1.0109 train_time:654543ms step_avg:68.90ms +[2025-07-07 05:57:03] [Rank 0] PRINT: step:9500/10000 train_loss:0.8692 val_loss:1.0109 train_time:654543ms step_avg:68.90ms +[2025-07-07 05:57:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:57:03] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 05:57:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:57:03] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 05:57:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 05:57:03] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:02:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:02:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:02:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:02:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:02:28] [Rank 0] Total Loss: 5.9633 +[2025-07-07 06:02:28] [Rank 0] Total Loss: 5.9633 +[2025-07-07 06:02:28] [Rank 0] Total FTA: 0.3343 +[2025-07-07 06:02:28] [Rank 0] Total FTA: 0.3343 +[2025-07-07 06:02:28] [Rank 0] Group 0 Loss: 6.4430 +[2025-07-07 06:02:28] [Rank 0] Group 0 Loss: 6.4430 +[2025-07-07 06:02:28] [Rank 0] Group 1 Loss: 5.4111 +[2025-07-07 06:02:28] [Rank 0] Group 1 Loss: 5.4111 +[2025-07-07 06:02:28] [Rank 0] Group 2 Loss: 6.0674 +[2025-07-07 06:02:28] [Rank 0] Group 2 Loss: 6.0674 +[2025-07-07 06:02:28] [Rank 0] Group 3 Loss: 6.1087 +[2025-07-07 06:02:28] [Rank 0] Group 3 Loss: 6.1087 +[2025-07-07 06:02:28] [Rank 0] Group 4 Loss: 5.8000 +[2025-07-07 06:02:28] [Rank 0] Group 4 Loss: 5.8000 +[2025-07-07 06:02:29] [Rank 0] Group 5 Loss: 5.8805 +[2025-07-07 06:02:29] [Rank 0] Group 5 Loss: 5.8805 +[2025-07-07 06:02:29] [Rank 0] Group 6 Loss: 5.9481 +[2025-07-07 06:02:29] [Rank 0] Group 6 Loss: 5.9481 +[2025-07-07 06:02:29] [Rank 0] Group 7 Loss: 5.8529 +[2025-07-07 06:02:29] [Rank 0] Group 7 Loss: 5.8529 +[2025-07-07 06:02:29] [Rank 0] Group 8 Loss: 5.9386 +[2025-07-07 06:02:29] [Rank 0] Group 8 Loss: 5.9386 +[2025-07-07 06:02:29] [Rank 0] Group 9 Loss: 5.9592 +[2025-07-07 06:02:29] [Rank 0] Group 9 Loss: 5.9592 +[2025-07-07 06:02:29] [Rank 0] Group 10 Loss: 5.9171 +[2025-07-07 06:02:29] [Rank 0] Group 10 Loss: 5.9171 +[2025-07-07 06:02:29] [Rank 0] Group 11 Loss: 5.8895 +[2025-07-07 06:02:29] [Rank 0] Group 11 Loss: 5.8895 +[2025-07-07 06:02:29] [Rank 0] Group 0 FTA: 0.3225 +[2025-07-07 06:02:29] [Rank 0] Group 0 FTA: 0.3225 +[2025-07-07 06:02:29] [Rank 0] Group 1 FTA: 0.6901 +[2025-07-07 06:02:29] [Rank 0] Group 1 FTA: 0.6901 +[2025-07-07 06:02:29] [Rank 0] Group 2 FTA: 0.2552 +[2025-07-07 06:02:29] [Rank 0] Group 2 FTA: 0.2552 +[2025-07-07 06:02:29] [Rank 0] Group 3 FTA: 0.2708 +[2025-07-07 06:02:29] [Rank 0] Group 3 FTA: 0.2708 +[2025-07-07 06:02:29] [Rank 0] Group 4 FTA: 0.4115 +[2025-07-07 06:02:29] [Rank 0] Group 4 FTA: 0.4115 +[2025-07-07 06:02:29] [Rank 0] Group 5 FTA: 0.3229 +[2025-07-07 06:02:29] [Rank 0] Group 5 FTA: 0.3229 +[2025-07-07 06:02:29] [Rank 0] Group 6 FTA: 0.2786 +[2025-07-07 06:02:29] [Rank 0] Group 6 FTA: 0.2786 +[2025-07-07 06:02:29] [Rank 0] Group 7 FTA: 0.2786 +[2025-07-07 06:02:29] [Rank 0] Group 7 FTA: 0.2786 +[2025-07-07 06:02:29] [Rank 0] Group 8 FTA: 0.2786 +[2025-07-07 06:02:29] [Rank 0] Group 8 FTA: 0.2786 +[2025-07-07 06:02:29] [Rank 0] Group 9 FTA: 0.2812 +[2025-07-07 06:02:29] [Rank 0] Group 9 FTA: 0.2812 +[2025-07-07 06:02:29] [Rank 0] Group 10 FTA: 0.3418 +[2025-07-07 06:02:29] [Rank 0] Group 10 FTA: 0.3418 +[2025-07-07 06:02:29] [Rank 0] Group 11 FTA: 0.3105 +[2025-07-07 06:02:29] [Rank 0] Group 11 FTA: 0.3105 +[2025-07-07 06:02:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 06:02:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 06:02:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 06:02:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 06:02:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 06:02:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 06:02:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 06:02:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 06:02:30] [Rank 0] step:9501/10000 train_time:654553ms step_avg:68.89ms +[2025-07-07 06:02:30] [Rank 0] step:9501/10000 train_time:654553ms step_avg:68.89ms +[2025-07-07 06:02:32] [Rank 0] step:9521/10000 train_time:655309ms step_avg:68.83ms +[2025-07-07 06:02:32] [Rank 0] step:9521/10000 train_time:655309ms step_avg:68.83ms +[2025-07-07 06:02:33] [Rank 0] step:9541/10000 train_time:656773ms step_avg:68.84ms +[2025-07-07 06:02:33] [Rank 0] step:9541/10000 train_time:656773ms step_avg:68.84ms +[2025-07-07 06:02:34] [Rank 0] step:9561/10000 train_time:658138ms step_avg:68.84ms +[2025-07-07 06:02:34] [Rank 0] step:9561/10000 train_time:658138ms step_avg:68.84ms +[2025-07-07 06:02:36] [Rank 0] step:9581/10000 train_time:659505ms step_avg:68.83ms +[2025-07-07 06:02:36] [Rank 0] step:9581/10000 train_time:659505ms step_avg:68.83ms +[2025-07-07 06:02:37] [Rank 0] step:9601/10000 train_time:660873ms step_avg:68.83ms +[2025-07-07 06:02:37] [Rank 0] step:9601/10000 train_time:660873ms step_avg:68.83ms +[2025-07-07 06:02:39] [Rank 0] step:9621/10000 train_time:662240ms step_avg:68.83ms +[2025-07-07 06:02:39] [Rank 0] step:9621/10000 train_time:662240ms step_avg:68.83ms +[2025-07-07 06:02:40] [Rank 0] step:9641/10000 train_time:663607ms step_avg:68.83ms +[2025-07-07 06:02:40] [Rank 0] step:9641/10000 train_time:663607ms step_avg:68.83ms +[2025-07-07 06:02:41] [Rank 0] step:9661/10000 train_time:664975ms step_avg:68.83ms +[2025-07-07 06:02:41] [Rank 0] step:9661/10000 train_time:664975ms step_avg:68.83ms +[2025-07-07 06:02:43] [Rank 0] step:9681/10000 train_time:666345ms step_avg:68.83ms +[2025-07-07 06:02:43] [Rank 0] step:9681/10000 train_time:666345ms step_avg:68.83ms +[2025-07-07 06:02:44] [Rank 0] step:9701/10000 train_time:667715ms step_avg:68.83ms +[2025-07-07 06:02:44] [Rank 0] step:9701/10000 train_time:667715ms step_avg:68.83ms +[2025-07-07 06:02:45] [Rank 0] step:9721/10000 train_time:669130ms step_avg:68.83ms +[2025-07-07 06:02:45] [Rank 0] step:9721/10000 train_time:669130ms step_avg:68.83ms +[2025-07-07 06:02:47] [Rank 0] step:9741/10000 train_time:670501ms step_avg:68.83ms +[2025-07-07 06:02:47] [Rank 0] step:9741/10000 train_time:670501ms step_avg:68.83ms +[2025-07-07 06:02:48] [Rank 0] step:9761/10000 train_time:671871ms step_avg:68.83ms +[2025-07-07 06:02:48] [Rank 0] step:9761/10000 train_time:671871ms step_avg:68.83ms +[2025-07-07 06:02:50] [Rank 0] step:9781/10000 train_time:673241ms step_avg:68.83ms +[2025-07-07 06:02:50] [Rank 0] step:9781/10000 train_time:673241ms step_avg:68.83ms +[2025-07-07 06:02:51] [Rank 0] step:9801/10000 train_time:674610ms step_avg:68.83ms +[2025-07-07 06:02:51] [Rank 0] step:9801/10000 train_time:674610ms step_avg:68.83ms +[2025-07-07 06:02:52] [Rank 0] step:9821/10000 train_time:675980ms step_avg:68.83ms +[2025-07-07 06:02:52] [Rank 0] step:9821/10000 train_time:675980ms step_avg:68.83ms +[2025-07-07 06:02:54] [Rank 0] step:9841/10000 train_time:677351ms step_avg:68.83ms +[2025-07-07 06:02:54] [Rank 0] step:9841/10000 train_time:677351ms step_avg:68.83ms +[2025-07-07 06:02:55] [Rank 0] step:9861/10000 train_time:678721ms step_avg:68.83ms +[2025-07-07 06:02:55] [Rank 0] step:9861/10000 train_time:678721ms step_avg:68.83ms +[2025-07-07 06:02:56] [Rank 0] step:9881/10000 train_time:680091ms step_avg:68.83ms +[2025-07-07 06:02:56] [Rank 0] step:9881/10000 train_time:680091ms step_avg:68.83ms +[2025-07-07 06:02:58] [Rank 0] step:9901/10000 train_time:681461ms step_avg:68.83ms +[2025-07-07 06:02:58] [Rank 0] step:9901/10000 train_time:681461ms step_avg:68.83ms +[2025-07-07 06:02:59] [Rank 0] step:9921/10000 train_time:682854ms step_avg:68.83ms +[2025-07-07 06:02:59] [Rank 0] step:9921/10000 train_time:682854ms step_avg:68.83ms +[2025-07-07 06:03:01] [Rank 0] step:9941/10000 train_time:684228ms step_avg:68.83ms +[2025-07-07 06:03:01] [Rank 0] step:9941/10000 train_time:684228ms step_avg:68.83ms +[2025-07-07 06:03:02] [Rank 0] step:9961/10000 train_time:685600ms step_avg:68.83ms +[2025-07-07 06:03:02] [Rank 0] step:9961/10000 train_time:685600ms step_avg:68.83ms +[2025-07-07 06:03:03] [Rank 0] step:9981/10000 train_time:686972ms step_avg:68.83ms +[2025-07-07 06:03:03] [Rank 0] step:9981/10000 train_time:686972ms step_avg:68.83ms +[2025-07-07 06:03:05] [Rank 0] step:10000/10000 train_time:688277ms step_avg:68.83ms +[2025-07-07 06:03:05] [Rank 0] step:10000/10000 train_time:688277ms step_avg:68.83ms +[2025-07-07 06:03:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:03:05] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:03:06] [Rank 0] PRINT: step:10000/10000 train_loss:0.8595 val_loss:0.9805 train_time:688976ms step_avg:68.90ms +[2025-07-07 06:03:06] [Rank 0] PRINT: step:10000/10000 train_loss:0.8595 val_loss:0.9805 train_time:688976ms step_avg:68.90ms +[2025-07-07 06:03:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:03:06] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:03:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:03:06] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:03:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:03:06] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:08:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:08:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:08:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:08:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:08:28] [Rank 0] Total Loss: 5.8309 +[2025-07-07 06:08:28] [Rank 0] Total Loss: 5.8309 +[2025-07-07 06:08:28] [Rank 0] Total FTA: 0.3783 +[2025-07-07 06:08:28] [Rank 0] Total FTA: 0.3783 +[2025-07-07 06:08:28] [Rank 0] Group 0 Loss: 6.0304 +[2025-07-07 06:08:28] [Rank 0] Group 0 Loss: 6.0304 +[2025-07-07 06:08:28] [Rank 0] Group 1 Loss: 5.3754 +[2025-07-07 06:08:28] [Rank 0] Group 1 Loss: 5.3754 +[2025-07-07 06:08:28] [Rank 0] Group 2 Loss: 5.8685 +[2025-07-07 06:08:28] [Rank 0] Group 2 Loss: 5.8685 +[2025-07-07 06:08:28] [Rank 0] Group 3 Loss: 6.1782 +[2025-07-07 06:08:28] [Rank 0] Group 3 Loss: 6.1782 +[2025-07-07 06:08:28] [Rank 0] Group 4 Loss: 5.6949 +[2025-07-07 06:08:28] [Rank 0] Group 4 Loss: 5.6949 +[2025-07-07 06:08:28] [Rank 0] Group 5 Loss: 5.8258 +[2025-07-07 06:08:28] [Rank 0] Group 5 Loss: 5.8258 +[2025-07-07 06:08:28] [Rank 0] Group 6 Loss: 5.8249 +[2025-07-07 06:08:28] [Rank 0] Group 6 Loss: 5.8249 +[2025-07-07 06:08:28] [Rank 0] Group 7 Loss: 5.8001 +[2025-07-07 06:08:28] [Rank 0] Group 7 Loss: 5.8001 +[2025-07-07 06:08:28] [Rank 0] Group 8 Loss: 5.8822 +[2025-07-07 06:08:28] [Rank 0] Group 8 Loss: 5.8822 +[2025-07-07 06:08:28] [Rank 0] Group 9 Loss: 5.7552 +[2025-07-07 06:08:28] [Rank 0] Group 9 Loss: 5.7552 +[2025-07-07 06:08:28] [Rank 0] Group 10 Loss: 5.8168 +[2025-07-07 06:08:28] [Rank 0] Group 10 Loss: 5.8168 +[2025-07-07 06:08:28] [Rank 0] Group 11 Loss: 5.7810 +[2025-07-07 06:08:28] [Rank 0] Group 11 Loss: 5.7810 +[2025-07-07 06:08:28] [Rank 0] Group 0 FTA: 0.6580 +[2025-07-07 06:08:28] [Rank 0] Group 0 FTA: 0.6580 +[2025-07-07 06:08:28] [Rank 0] Group 1 FTA: 0.4792 +[2025-07-07 06:08:28] [Rank 0] Group 1 FTA: 0.4792 +[2025-07-07 06:08:28] [Rank 0] Group 2 FTA: 0.3359 +[2025-07-07 06:08:28] [Rank 0] Group 2 FTA: 0.3359 +[2025-07-07 06:08:28] [Rank 0] Group 3 FTA: 0.2578 +[2025-07-07 06:08:28] [Rank 0] Group 3 FTA: 0.2578 +[2025-07-07 06:08:28] [Rank 0] Group 4 FTA: 0.3776 +[2025-07-07 06:08:28] [Rank 0] Group 4 FTA: 0.3776 +[2025-07-07 06:08:28] [Rank 0] Group 5 FTA: 0.3490 +[2025-07-07 06:08:28] [Rank 0] Group 5 FTA: 0.3490 +[2025-07-07 06:08:28] [Rank 0] Group 6 FTA: 0.2839 +[2025-07-07 06:08:28] [Rank 0] Group 6 FTA: 0.2839 +[2025-07-07 06:08:28] [Rank 0] Group 7 FTA: 0.3125 +[2025-07-07 06:08:28] [Rank 0] Group 7 FTA: 0.3125 +[2025-07-07 06:08:28] [Rank 0] Group 8 FTA: 0.3073 +[2025-07-07 06:08:28] [Rank 0] Group 8 FTA: 0.3073 +[2025-07-07 06:08:28] [Rank 0] Group 9 FTA: 0.3359 +[2025-07-07 06:08:28] [Rank 0] Group 9 FTA: 0.3359 +[2025-07-07 06:08:28] [Rank 0] Group 10 FTA: 0.3359 +[2025-07-07 06:08:28] [Rank 0] Group 10 FTA: 0.3359 +[2025-07-07 06:08:28] [Rank 0] Group 11 FTA: 0.3213 +[2025-07-07 06:08:28] [Rank 0] Group 11 FTA: 0.3213 +[2025-07-07 06:08:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 06:08:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_loss_curves.png +[2025-07-07 06:08:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 06:08:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/per_class_acc_curves.png +[2025-07-07 06:08:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 06:08:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_loss_curve.png +[2025-07-07 06:08:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 06:08:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_42/total_acc_curve.png +[2025-07-07 06:08:30] [Rank 0] step:10001/10000 train_time:688989ms step_avg:68.89ms +[2025-07-07 06:08:30] [Rank 0] step:10001/10000 train_time:688989ms step_avg:68.89ms +[2025-07-07 06:08:30] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 06:08:30 2025 --- +[2025-07-07 06:08:30] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 06:08:30 2025 --- +[2025-07-07 06:08:30] [Rank 0] PRINT: Peak memory allocated: 9138 MiB reserved: 10596 MiB +[2025-07-07 06:08:30] [Rank 0] PRINT: Peak memory allocated: 9138 MiB reserved: 10596 MiB diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/config.json b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/config.json new file mode 100644 index 0000000000000000000000000000000000000000..10ff8d08b4c8f5886bfd23b4bd32895ebd042b18 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 43, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "a6789957-0711-4163-8bd8-00265e47c7ba", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..51d62bbff0b5eb570def2ad6a36707b9080e4953 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad93e36c5b165ac931423f7aa00ecc33c4eca396a693d59ff6bc85c8ed773ca2 +size 173018 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..10607438d29bea0d6db58936ededf5b92bc11aa4 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b53a3b482165998d29942cd7572cc4cd825c3011d1f4484fb60e6e627f105f3 +size 325971 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/total_acc_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..70e030d1a235bb5f5af0f410c3d3e0c96fa69583 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d398192f9eb5548882f31da4f2c72e76a3b5cb965035f2328a31a31a0eca064 +size 74383 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/total_loss_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..92a268246c70b1491ed0d0c66c492b76b414b3a0 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b915d09a761dcf7ac110f104e0708e7b036f28bf601288c6c2d4ac19e65883dd +size 103242 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/training_log_a6789957-0711-4163-8bd8-00265e47c7ba.txt b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/training_log_a6789957-0711-4163-8bd8-00265e47c7ba.txt new file mode 100644 index 0000000000000000000000000000000000000000..91344108c883da2335b400d27dfc399c53335480 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/training_log_a6789957-0711-4163-8bd8-00265e47c7ba.txt @@ -0,0 +1,3056 @@ +[2025-07-08 07:18:07] [Rank 0] PRINT: --- Script Start: Tue Jul 8 07:18:07 2025 --- +[2025-07-08 07:18:07] [Rank 0] PRINT: --- Script Start: Tue Jul 8 07:18:07 2025 --- +[2025-07-08 07:18:07] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-08 07:18:07] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=43, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-08 07:18:07] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-08 07:18:07] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-08 07:18:07] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-08 07:18:07] [Rank 0] PRINT: Using fixed seed: 43 +[2025-07-08 07:18:07] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43 +[2025-07-08 07:18:07] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43 +[2025-07-08 07:18:07] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-08 07:18:07] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = base_log_dir / run_folder_name +run_flag = not os.path.exists(run_dir_path) + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + if run_flag: + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + +if run_flag: + print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) + print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) + print0(f"PRINT: Hyperparameters: {args}", console=True) + print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) + if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) + print0(code) # Log the code + # ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + +if run_flag: + ######################################## + # Construct model and optimizer # + ######################################## + if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) + else: + ft_tokenizer = None + + print0("PRINT: Constructing model...", console=True) + model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() + for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() + print0("PRINT: Broadcasting model parameters...", console=True) + for param in model.parameters(): + dist.broadcast(param.detach(), 0) + print0("PRINT: Model constructed and broadcasted.", console=True) + + + if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + + model_for_inference = model + print0("PRINT: Saved original model reference for inference.", console=True) + + + if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + + # --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- + elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + + for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + + # learning rate schedule: stable then decay (KEEP AS IS, but check assert) + def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + + # attention window size schedule (KEEP AS IS) + def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) + @lru_cache(1) + def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) + def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + + print0("PRINT: Compiling model with TorchInductor...", console=True) + # Use 'model' for compilation, not 'model_compiled' before it's defined + + model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") + print0("PRINT: Model compilation complete.", console=True) + + ######################################## + # Warmup kernels + ######################################## + print0("PRINT: Starting warmup...", console=True) + warmup_steps = 10 + initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] + ) + + for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + + del initial_state + print0("PRINT: Warmup complete.", console=True) + torch.cuda.synchronize() + + ######################################## + # Training and validation + ######################################## + print0("PRINT: Starting training...", console=True) + train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + training_time_ms = 0 + torch.cuda.synchronize() + t0 = time.perf_counter() + train_steps = args.num_iterations + + + + if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + + for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + + print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) + print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + + if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-08 07:18:07] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-08 07:18:07] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-08 07:18:07] [Rank 0] PRINT: Constructing model... +[2025-07-08 07:18:07] [Rank 0] PRINT: Constructing model... +[2025-07-08 07:18:09] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-08 07:18:09] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-08 07:18:09] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-08 07:18:09] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-08 07:18:09] [Rank 0] PRINT: Testing model forward function: +[2025-07-08 07:18:09] [Rank 0] PRINT: Testing model forward function: +[2025-07-08 07:18:10] [Rank 0] PRINT: Model test - Result type: +[2025-07-08 07:18:10] [Rank 0] PRINT: Model test - Result type: +[2025-07-08 07:18:10] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-08 07:18:10] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-08 07:18:10] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-08 07:18:10] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-08 07:18:10] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-08 07:18:10] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-08 07:18:10] [Rank 0] PRINT: Model returns: +[2025-07-08 07:18:10] [Rank 0] PRINT: Model returns: +[2025-07-08 07:18:10] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-08 07:18:10] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-08 07:18:10] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-08 07:18:10] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-08 07:18:10] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-08 07:18:10] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-08 07:18:10] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-08 07:18:10] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-08 07:18:10] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-08 07:18:10] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-08 07:18:10] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-08 07:18:10] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-08 07:18:10] [Rank 0] PRINT: Model compilation complete. +[2025-07-08 07:18:10] [Rank 0] PRINT: Model compilation complete. +[2025-07-08 07:18:10] [Rank 0] PRINT: Starting warmup... +[2025-07-08 07:18:10] [Rank 0] PRINT: Starting warmup... +[2025-07-08 07:19:16] [Rank 0] PRINT: Warmup complete. +[2025-07-08 07:19:16] [Rank 0] PRINT: Warmup complete. +[2025-07-08 07:19:16] [Rank 0] PRINT: Starting training... +[2025-07-08 07:19:16] [Rank 0] PRINT: Starting training... +[2025-07-08 07:19:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:19:16] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:19:23] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-08 07:19:23] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-08 07:19:25] [Rank 0] step:21/10000 train_time:1015ms step_avg:48.31ms +[2025-07-08 07:19:25] [Rank 0] step:21/10000 train_time:1015ms step_avg:48.31ms +[2025-07-08 07:19:26] [Rank 0] step:41/10000 train_time:2329ms step_avg:56.80ms +[2025-07-08 07:19:26] [Rank 0] step:41/10000 train_time:2329ms step_avg:56.80ms +[2025-07-08 07:19:27] [Rank 0] step:61/10000 train_time:3644ms step_avg:59.73ms +[2025-07-08 07:19:27] [Rank 0] step:61/10000 train_time:3644ms step_avg:59.73ms +[2025-07-08 07:19:29] [Rank 0] step:81/10000 train_time:4967ms step_avg:61.32ms +[2025-07-08 07:19:29] [Rank 0] step:81/10000 train_time:4967ms step_avg:61.32ms +[2025-07-08 07:19:30] [Rank 0] step:101/10000 train_time:6291ms step_avg:62.28ms +[2025-07-08 07:19:30] [Rank 0] step:101/10000 train_time:6291ms step_avg:62.28ms +[2025-07-08 07:19:31] [Rank 0] step:121/10000 train_time:7616ms step_avg:62.94ms +[2025-07-08 07:19:31] [Rank 0] step:121/10000 train_time:7616ms step_avg:62.94ms +[2025-07-08 07:19:33] [Rank 0] step:141/10000 train_time:8944ms step_avg:63.43ms +[2025-07-08 07:19:33] [Rank 0] step:141/10000 train_time:8944ms step_avg:63.43ms +[2025-07-08 07:19:34] [Rank 0] step:161/10000 train_time:10273ms step_avg:63.81ms +[2025-07-08 07:19:34] [Rank 0] step:161/10000 train_time:10273ms step_avg:63.81ms +[2025-07-08 07:19:35] [Rank 0] step:181/10000 train_time:11857ms step_avg:65.51ms +[2025-07-08 07:19:35] [Rank 0] step:181/10000 train_time:11857ms step_avg:65.51ms +[2025-07-08 07:19:37] [Rank 0] step:201/10000 train_time:12987ms step_avg:64.61ms +[2025-07-08 07:19:37] [Rank 0] step:201/10000 train_time:12987ms step_avg:64.61ms +[2025-07-08 07:19:38] [Rank 0] step:221/10000 train_time:14322ms step_avg:64.81ms +[2025-07-08 07:19:38] [Rank 0] step:221/10000 train_time:14322ms step_avg:64.81ms +[2025-07-08 07:19:39] [Rank 0] step:241/10000 train_time:15654ms step_avg:64.95ms +[2025-07-08 07:19:39] [Rank 0] step:241/10000 train_time:15654ms step_avg:64.95ms +[2025-07-08 07:19:41] [Rank 0] step:261/10000 train_time:16988ms step_avg:65.09ms +[2025-07-08 07:19:41] [Rank 0] step:261/10000 train_time:16988ms step_avg:65.09ms +[2025-07-08 07:19:42] [Rank 0] step:281/10000 train_time:18322ms step_avg:65.20ms +[2025-07-08 07:19:42] [Rank 0] step:281/10000 train_time:18322ms step_avg:65.20ms +[2025-07-08 07:19:43] [Rank 0] step:301/10000 train_time:19656ms step_avg:65.30ms +[2025-07-08 07:19:43] [Rank 0] step:301/10000 train_time:19656ms step_avg:65.30ms +[2025-07-08 07:19:45] [Rank 0] step:321/10000 train_time:20992ms step_avg:65.39ms +[2025-07-08 07:19:45] [Rank 0] step:321/10000 train_time:20992ms step_avg:65.39ms +[2025-07-08 07:19:46] [Rank 0] step:341/10000 train_time:22327ms step_avg:65.47ms +[2025-07-08 07:19:46] [Rank 0] step:341/10000 train_time:22327ms step_avg:65.47ms +[2025-07-08 07:19:47] [Rank 0] step:361/10000 train_time:24333ms step_avg:67.40ms +[2025-07-08 07:19:47] [Rank 0] step:361/10000 train_time:24333ms step_avg:67.40ms +[2025-07-08 07:19:49] [Rank 0] step:381/10000 train_time:25053ms step_avg:65.76ms +[2025-07-08 07:19:49] [Rank 0] step:381/10000 train_time:25053ms step_avg:65.76ms +[2025-07-08 07:19:50] [Rank 0] step:401/10000 train_time:26387ms step_avg:65.80ms +[2025-07-08 07:19:50] [Rank 0] step:401/10000 train_time:26387ms step_avg:65.80ms +[2025-07-08 07:19:51] [Rank 0] step:421/10000 train_time:27720ms step_avg:65.84ms +[2025-07-08 07:19:51] [Rank 0] step:421/10000 train_time:27720ms step_avg:65.84ms +[2025-07-08 07:19:53] [Rank 0] step:441/10000 train_time:29054ms step_avg:65.88ms +[2025-07-08 07:19:53] [Rank 0] step:441/10000 train_time:29054ms step_avg:65.88ms +[2025-07-08 07:19:54] [Rank 0] step:461/10000 train_time:30388ms step_avg:65.92ms +[2025-07-08 07:19:54] [Rank 0] step:461/10000 train_time:30388ms step_avg:65.92ms +[2025-07-08 07:19:55] [Rank 0] step:481/10000 train_time:31723ms step_avg:65.95ms +[2025-07-08 07:19:55] [Rank 0] step:481/10000 train_time:31723ms step_avg:65.95ms +[2025-07-08 07:19:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:19:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:19:58] [Rank 0] PRINT: step:500/10000 train_loss:3.6045 val_loss:1.9714 train_time:33665ms step_avg:67.33ms +[2025-07-08 07:19:58] [Rank 0] PRINT: step:500/10000 train_loss:3.6045 val_loss:1.9714 train_time:33665ms step_avg:67.33ms +[2025-07-08 07:19:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:19:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:19:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:19:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:19:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:19:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:25:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:25:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:25:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:25:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:25:25] [Rank 0] Total Loss: 5.2363 +[2025-07-08 07:25:25] [Rank 0] Total Loss: 5.2363 +[2025-07-08 07:25:25] [Rank 0] Total FTA: 0.0872 +[2025-07-08 07:25:25] [Rank 0] Total FTA: 0.0872 +[2025-07-08 07:25:25] [Rank 0] Group 0 Loss: 5.3086 +[2025-07-08 07:25:25] [Rank 0] Group 0 Loss: 5.3086 +[2025-07-08 07:25:25] [Rank 0] Group 1 Loss: 5.2418 +[2025-07-08 07:25:25] [Rank 0] Group 1 Loss: 5.2418 +[2025-07-08 07:25:25] [Rank 0] Group 2 Loss: 5.1753 +[2025-07-08 07:25:25] [Rank 0] Group 2 Loss: 5.1753 +[2025-07-08 07:25:25] [Rank 0] Group 3 Loss: 5.1943 +[2025-07-08 07:25:25] [Rank 0] Group 3 Loss: 5.1943 +[2025-07-08 07:25:25] [Rank 0] Group 4 Loss: 5.1459 +[2025-07-08 07:25:25] [Rank 0] Group 4 Loss: 5.1459 +[2025-07-08 07:25:25] [Rank 0] Group 5 Loss: 5.1943 +[2025-07-08 07:25:25] [Rank 0] Group 5 Loss: 5.1943 +[2025-07-08 07:25:25] [Rank 0] Group 6 Loss: 5.1868 +[2025-07-08 07:25:25] [Rank 0] Group 6 Loss: 5.1868 +[2025-07-08 07:25:25] [Rank 0] Group 7 Loss: 5.3072 +[2025-07-08 07:25:25] [Rank 0] Group 7 Loss: 5.3072 +[2025-07-08 07:25:25] [Rank 0] Group 8 Loss: 5.2249 +[2025-07-08 07:25:25] [Rank 0] Group 8 Loss: 5.2249 +[2025-07-08 07:25:25] [Rank 0] Group 9 Loss: 5.2177 +[2025-07-08 07:25:25] [Rank 0] Group 9 Loss: 5.2177 +[2025-07-08 07:25:25] [Rank 0] Group 10 Loss: 5.2651 +[2025-07-08 07:25:25] [Rank 0] Group 10 Loss: 5.2651 +[2025-07-08 07:25:25] [Rank 0] Group 11 Loss: 5.2546 +[2025-07-08 07:25:25] [Rank 0] Group 11 Loss: 5.2546 +[2025-07-08 07:25:25] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-08 07:25:25] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-08 07:25:25] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 07:25:25] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 07:25:25] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-08 07:25:25] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-08 07:25:25] [Rank 0] Group 3 FTA: 0.0781 +[2025-07-08 07:25:25] [Rank 0] Group 3 FTA: 0.0781 +[2025-07-08 07:25:25] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-08 07:25:25] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-08 07:25:25] [Rank 0] Group 5 FTA: 0.1016 +[2025-07-08 07:25:25] [Rank 0] Group 5 FTA: 0.1016 +[2025-07-08 07:25:25] [Rank 0] Group 6 FTA: 0.0703 +[2025-07-08 07:25:25] [Rank 0] Group 6 FTA: 0.0703 +[2025-07-08 07:25:25] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-08 07:25:25] [Rank 0] Group 7 FTA: 0.1120 +[2025-07-08 07:25:25] [Rank 0] Group 8 FTA: 0.0807 +[2025-07-08 07:25:25] [Rank 0] Group 8 FTA: 0.0807 +[2025-07-08 07:25:25] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-08 07:25:25] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-08 07:25:25] [Rank 0] Group 10 FTA: 0.0703 +[2025-07-08 07:25:25] [Rank 0] Group 10 FTA: 0.0703 +[2025-07-08 07:25:25] [Rank 0] Group 11 FTA: 0.0928 +[2025-07-08 07:25:25] [Rank 0] Group 11 FTA: 0.0928 +[2025-07-08 07:25:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-08 07:25:25] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-08 07:25:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-08 07:25:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-08 07:25:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-08 07:25:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-08 07:25:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-08 07:25:26] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-08 07:25:26] [Rank 0] step:501/10000 train_time:33675ms step_avg:67.21ms +[2025-07-08 07:25:26] [Rank 0] step:501/10000 train_time:33675ms step_avg:67.21ms +[2025-07-08 07:25:28] [Rank 0] step:521/10000 train_time:34438ms step_avg:66.10ms +[2025-07-08 07:25:28] [Rank 0] step:521/10000 train_time:34438ms step_avg:66.10ms +[2025-07-08 07:25:29] [Rank 0] step:541/10000 train_time:36025ms step_avg:66.59ms +[2025-07-08 07:25:29] [Rank 0] step:541/10000 train_time:36025ms step_avg:66.59ms +[2025-07-08 07:25:30] [Rank 0] step:561/10000 train_time:37167ms step_avg:66.25ms +[2025-07-08 07:25:30] [Rank 0] step:561/10000 train_time:37167ms step_avg:66.25ms +[2025-07-08 07:25:32] [Rank 0] step:581/10000 train_time:38497ms step_avg:66.26ms +[2025-07-08 07:25:32] [Rank 0] step:581/10000 train_time:38497ms step_avg:66.26ms +[2025-07-08 07:25:33] [Rank 0] step:601/10000 train_time:39828ms step_avg:66.27ms +[2025-07-08 07:25:33] [Rank 0] step:601/10000 train_time:39828ms step_avg:66.27ms +[2025-07-08 07:25:34] [Rank 0] step:621/10000 train_time:41160ms step_avg:66.28ms +[2025-07-08 07:25:34] [Rank 0] step:621/10000 train_time:41160ms step_avg:66.28ms +[2025-07-08 07:25:36] [Rank 0] step:641/10000 train_time:42490ms step_avg:66.29ms +[2025-07-08 07:25:36] [Rank 0] step:641/10000 train_time:42490ms step_avg:66.29ms +[2025-07-08 07:25:37] [Rank 0] step:661/10000 train_time:43822ms step_avg:66.30ms +[2025-07-08 07:25:37] [Rank 0] step:661/10000 train_time:43822ms step_avg:66.30ms +[2025-07-08 07:25:38] [Rank 0] step:681/10000 train_time:45155ms step_avg:66.31ms +[2025-07-08 07:25:38] [Rank 0] step:681/10000 train_time:45155ms step_avg:66.31ms +[2025-07-08 07:25:40] [Rank 0] step:701/10000 train_time:46489ms step_avg:66.32ms +[2025-07-08 07:25:40] [Rank 0] step:701/10000 train_time:46489ms step_avg:66.32ms +[2025-07-08 07:25:41] [Rank 0] step:721/10000 train_time:48077ms step_avg:66.68ms +[2025-07-08 07:25:41] [Rank 0] step:721/10000 train_time:48077ms step_avg:66.68ms +[2025-07-08 07:25:42] [Rank 0] step:741/10000 train_time:49200ms step_avg:66.40ms +[2025-07-08 07:25:42] [Rank 0] step:741/10000 train_time:49200ms step_avg:66.40ms +[2025-07-08 07:25:44] [Rank 0] step:761/10000 train_time:50540ms step_avg:66.41ms +[2025-07-08 07:25:44] [Rank 0] step:761/10000 train_time:50540ms step_avg:66.41ms +[2025-07-08 07:25:45] [Rank 0] step:781/10000 train_time:51885ms step_avg:66.43ms +[2025-07-08 07:25:45] [Rank 0] step:781/10000 train_time:51885ms step_avg:66.43ms +[2025-07-08 07:25:46] [Rank 0] step:801/10000 train_time:53230ms step_avg:66.45ms +[2025-07-08 07:25:46] [Rank 0] step:801/10000 train_time:53230ms step_avg:66.45ms +[2025-07-08 07:25:48] [Rank 0] step:821/10000 train_time:54577ms step_avg:66.48ms +[2025-07-08 07:25:48] [Rank 0] step:821/10000 train_time:54577ms step_avg:66.48ms +[2025-07-08 07:25:49] [Rank 0] step:841/10000 train_time:55925ms step_avg:66.50ms +[2025-07-08 07:25:49] [Rank 0] step:841/10000 train_time:55925ms step_avg:66.50ms +[2025-07-08 07:25:51] [Rank 0] step:861/10000 train_time:57273ms step_avg:66.52ms +[2025-07-08 07:25:51] [Rank 0] step:861/10000 train_time:57273ms step_avg:66.52ms +[2025-07-08 07:25:52] [Rank 0] step:881/10000 train_time:58623ms step_avg:66.54ms +[2025-07-08 07:25:52] [Rank 0] step:881/10000 train_time:58623ms step_avg:66.54ms +[2025-07-08 07:25:53] [Rank 0] step:901/10000 train_time:60644ms step_avg:67.31ms +[2025-07-08 07:25:53] [Rank 0] step:901/10000 train_time:60644ms step_avg:67.31ms +[2025-07-08 07:25:55] [Rank 0] step:921/10000 train_time:61369ms step_avg:66.63ms +[2025-07-08 07:25:55] [Rank 0] step:921/10000 train_time:61369ms step_avg:66.63ms +[2025-07-08 07:25:56] [Rank 0] step:941/10000 train_time:62718ms step_avg:66.65ms +[2025-07-08 07:25:56] [Rank 0] step:941/10000 train_time:62718ms step_avg:66.65ms +[2025-07-08 07:25:57] [Rank 0] step:961/10000 train_time:64064ms step_avg:66.66ms +[2025-07-08 07:25:57] [Rank 0] step:961/10000 train_time:64064ms step_avg:66.66ms +[2025-07-08 07:25:59] [Rank 0] step:981/10000 train_time:65413ms step_avg:66.68ms +[2025-07-08 07:25:59] [Rank 0] step:981/10000 train_time:65413ms step_avg:66.68ms +[2025-07-08 07:26:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:26:00] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:26:01] [Rank 0] PRINT: step:1000/10000 train_loss:1.8220 val_loss:1.7389 train_time:67370ms step_avg:67.37ms +[2025-07-08 07:26:01] [Rank 0] PRINT: step:1000/10000 train_loss:1.8220 val_loss:1.7389 train_time:67370ms step_avg:67.37ms +[2025-07-08 07:26:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:26:01] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:26:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:26:01] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:26:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:26:01] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:31:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:31:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:31:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:31:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:31:28] [Rank 0] Total Loss: 5.7855 +[2025-07-08 07:31:28] [Rank 0] Total Loss: 5.7855 +[2025-07-08 07:31:28] [Rank 0] Total FTA: 0.0840 +[2025-07-08 07:31:28] [Rank 0] Total FTA: 0.0840 +[2025-07-08 07:31:28] [Rank 0] Group 0 Loss: 6.0063 +[2025-07-08 07:31:28] [Rank 0] Group 0 Loss: 6.0063 +[2025-07-08 07:31:28] [Rank 0] Group 1 Loss: 5.7403 +[2025-07-08 07:31:28] [Rank 0] Group 1 Loss: 5.7403 +[2025-07-08 07:31:28] [Rank 0] Group 2 Loss: 5.4495 +[2025-07-08 07:31:28] [Rank 0] Group 2 Loss: 5.4495 +[2025-07-08 07:31:28] [Rank 0] Group 3 Loss: 5.7209 +[2025-07-08 07:31:28] [Rank 0] Group 3 Loss: 5.7209 +[2025-07-08 07:31:28] [Rank 0] Group 4 Loss: 5.8464 +[2025-07-08 07:31:28] [Rank 0] Group 4 Loss: 5.8464 +[2025-07-08 07:31:28] [Rank 0] Group 5 Loss: 5.7331 +[2025-07-08 07:31:28] [Rank 0] Group 5 Loss: 5.7331 +[2025-07-08 07:31:28] [Rank 0] Group 6 Loss: 5.7092 +[2025-07-08 07:31:28] [Rank 0] Group 6 Loss: 5.7092 +[2025-07-08 07:31:28] [Rank 0] Group 7 Loss: 5.8208 +[2025-07-08 07:31:28] [Rank 0] Group 7 Loss: 5.8208 +[2025-07-08 07:31:28] [Rank 0] Group 8 Loss: 5.8047 +[2025-07-08 07:31:28] [Rank 0] Group 8 Loss: 5.8047 +[2025-07-08 07:31:28] [Rank 0] Group 9 Loss: 5.7869 +[2025-07-08 07:31:28] [Rank 0] Group 9 Loss: 5.7869 +[2025-07-08 07:31:28] [Rank 0] Group 10 Loss: 5.7896 +[2025-07-08 07:31:28] [Rank 0] Group 10 Loss: 5.7896 +[2025-07-08 07:31:28] [Rank 0] Group 11 Loss: 5.7895 +[2025-07-08 07:31:28] [Rank 0] Group 11 Loss: 5.7895 +[2025-07-08 07:31:28] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-08 07:31:28] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-08 07:31:28] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 07:31:28] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 07:31:28] [Rank 0] Group 2 FTA: 0.0677 +[2025-07-08 07:31:28] [Rank 0] Group 2 FTA: 0.0677 +[2025-07-08 07:31:28] [Rank 0] Group 3 FTA: 0.1068 +[2025-07-08 07:31:28] [Rank 0] Group 3 FTA: 0.1068 +[2025-07-08 07:31:28] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-08 07:31:28] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-08 07:31:28] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-08 07:31:28] [Rank 0] Group 5 FTA: 0.0417 +[2025-07-08 07:31:28] [Rank 0] Group 6 FTA: 0.0911 +[2025-07-08 07:31:28] [Rank 0] Group 6 FTA: 0.0911 +[2025-07-08 07:31:28] [Rank 0] Group 7 FTA: 0.0859 +[2025-07-08 07:31:28] [Rank 0] Group 7 FTA: 0.0859 +[2025-07-08 07:31:28] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-08 07:31:28] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-08 07:31:28] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-08 07:31:28] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-08 07:31:28] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-08 07:31:28] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-08 07:31:28] [Rank 0] Group 11 FTA: 0.0742 +[2025-07-08 07:31:28] [Rank 0] Group 11 FTA: 0.0742 +[2025-07-08 07:31:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-08 07:31:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-08 07:31:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-08 07:31:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-08 07:31:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-08 07:31:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-08 07:31:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-08 07:31:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-08 07:31:30] [Rank 0] step:1001/10000 train_time:67380ms step_avg:67.31ms +[2025-07-08 07:31:30] [Rank 0] step:1001/10000 train_time:67380ms step_avg:67.31ms +[2025-07-08 07:31:31] [Rank 0] step:1021/10000 train_time:68132ms step_avg:66.73ms +[2025-07-08 07:31:31] [Rank 0] step:1021/10000 train_time:68132ms step_avg:66.73ms +[2025-07-08 07:31:32] [Rank 0] step:1041/10000 train_time:69471ms step_avg:66.73ms +[2025-07-08 07:31:32] [Rank 0] step:1041/10000 train_time:69471ms step_avg:66.73ms +[2025-07-08 07:31:34] [Rank 0] step:1061/10000 train_time:70811ms step_avg:66.74ms +[2025-07-08 07:31:34] [Rank 0] step:1061/10000 train_time:70811ms step_avg:66.74ms +[2025-07-08 07:31:35] [Rank 0] step:1081/10000 train_time:72152ms step_avg:66.75ms +[2025-07-08 07:31:35] [Rank 0] step:1081/10000 train_time:72152ms step_avg:66.75ms +[2025-07-08 07:31:37] [Rank 0] step:1101/10000 train_time:73548ms step_avg:66.80ms +[2025-07-08 07:31:37] [Rank 0] step:1101/10000 train_time:73548ms step_avg:66.80ms +[2025-07-08 07:31:38] [Rank 0] step:1121/10000 train_time:74889ms step_avg:66.81ms +[2025-07-08 07:31:38] [Rank 0] step:1121/10000 train_time:74889ms step_avg:66.81ms +[2025-07-08 07:31:39] [Rank 0] step:1141/10000 train_time:76237ms step_avg:66.82ms +[2025-07-08 07:31:39] [Rank 0] step:1141/10000 train_time:76237ms step_avg:66.82ms +[2025-07-08 07:31:41] [Rank 0] step:1161/10000 train_time:77579ms step_avg:66.82ms +[2025-07-08 07:31:41] [Rank 0] step:1161/10000 train_time:77579ms step_avg:66.82ms +[2025-07-08 07:31:42] [Rank 0] step:1181/10000 train_time:78921ms step_avg:66.83ms +[2025-07-08 07:31:42] [Rank 0] step:1181/10000 train_time:78921ms step_avg:66.83ms +[2025-07-08 07:31:43] [Rank 0] step:1201/10000 train_time:80264ms step_avg:66.83ms +[2025-07-08 07:31:43] [Rank 0] step:1201/10000 train_time:80264ms step_avg:66.83ms +[2025-07-08 07:31:45] [Rank 0] step:1221/10000 train_time:81608ms step_avg:66.84ms +[2025-07-08 07:31:45] [Rank 0] step:1221/10000 train_time:81608ms step_avg:66.84ms +[2025-07-08 07:31:46] [Rank 0] step:1241/10000 train_time:82951ms step_avg:66.84ms +[2025-07-08 07:31:46] [Rank 0] step:1241/10000 train_time:82951ms step_avg:66.84ms +[2025-07-08 07:31:47] [Rank 0] step:1261/10000 train_time:84344ms step_avg:66.89ms +[2025-07-08 07:31:47] [Rank 0] step:1261/10000 train_time:84344ms step_avg:66.89ms +[2025-07-08 07:31:49] [Rank 0] step:1281/10000 train_time:85677ms step_avg:66.88ms +[2025-07-08 07:31:49] [Rank 0] step:1281/10000 train_time:85677ms step_avg:66.88ms +[2025-07-08 07:31:50] [Rank 0] step:1301/10000 train_time:87023ms step_avg:66.89ms +[2025-07-08 07:31:50] [Rank 0] step:1301/10000 train_time:87023ms step_avg:66.89ms +[2025-07-08 07:31:51] [Rank 0] step:1321/10000 train_time:88369ms step_avg:66.90ms +[2025-07-08 07:31:51] [Rank 0] step:1321/10000 train_time:88369ms step_avg:66.90ms +[2025-07-08 07:31:53] [Rank 0] step:1341/10000 train_time:89714ms step_avg:66.90ms +[2025-07-08 07:31:53] [Rank 0] step:1341/10000 train_time:89714ms step_avg:66.90ms +[2025-07-08 07:31:54] [Rank 0] step:1361/10000 train_time:91060ms step_avg:66.91ms +[2025-07-08 07:31:54] [Rank 0] step:1361/10000 train_time:91060ms step_avg:66.91ms +[2025-07-08 07:31:55] [Rank 0] step:1381/10000 train_time:92406ms step_avg:66.91ms +[2025-07-08 07:31:55] [Rank 0] step:1381/10000 train_time:92406ms step_avg:66.91ms +[2025-07-08 07:31:57] [Rank 0] step:1401/10000 train_time:93754ms step_avg:66.92ms +[2025-07-08 07:31:57] [Rank 0] step:1401/10000 train_time:93754ms step_avg:66.92ms +[2025-07-08 07:31:58] [Rank 0] step:1421/10000 train_time:95101ms step_avg:66.93ms +[2025-07-08 07:31:58] [Rank 0] step:1421/10000 train_time:95101ms step_avg:66.93ms +[2025-07-08 07:31:59] [Rank 0] step:1441/10000 train_time:97112ms step_avg:67.39ms +[2025-07-08 07:31:59] [Rank 0] step:1441/10000 train_time:97112ms step_avg:67.39ms +[2025-07-08 07:32:01] [Rank 0] step:1461/10000 train_time:97838ms step_avg:66.97ms +[2025-07-08 07:32:01] [Rank 0] step:1461/10000 train_time:97838ms step_avg:66.97ms +[2025-07-08 07:32:02] [Rank 0] step:1481/10000 train_time:99185ms step_avg:66.97ms +[2025-07-08 07:32:02] [Rank 0] step:1481/10000 train_time:99185ms step_avg:66.97ms +[2025-07-08 07:32:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:32:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:32:04] [Rank 0] PRINT: step:1500/10000 train_loss:1.6619 val_loss:1.5590 train_time:101145ms step_avg:67.43ms +[2025-07-08 07:32:04] [Rank 0] PRINT: step:1500/10000 train_loss:1.6619 val_loss:1.5590 train_time:101145ms step_avg:67.43ms +[2025-07-08 07:32:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:32:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:32:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:32:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:32:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:32:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:37:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:37:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-08 07:37:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:37:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-08 07:37:32] [Rank 0] Total Loss: 6.0521 +[2025-07-08 07:37:32] [Rank 0] Total Loss: 6.0521 +[2025-07-08 07:37:32] [Rank 0] Total FTA: 0.0934 +[2025-07-08 07:37:32] [Rank 0] Total FTA: 0.0934 +[2025-07-08 07:37:32] [Rank 0] Group 0 Loss: 6.2913 +[2025-07-08 07:37:32] [Rank 0] Group 0 Loss: 6.2913 +[2025-07-08 07:37:32] [Rank 0] Group 1 Loss: 5.8288 +[2025-07-08 07:37:32] [Rank 0] Group 1 Loss: 5.8288 +[2025-07-08 07:37:32] [Rank 0] Group 2 Loss: 5.6382 +[2025-07-08 07:37:32] [Rank 0] Group 2 Loss: 5.6382 +[2025-07-08 07:37:32] [Rank 0] Group 3 Loss: 5.9609 +[2025-07-08 07:37:32] [Rank 0] Group 3 Loss: 5.9609 +[2025-07-08 07:37:32] [Rank 0] Group 4 Loss: 6.0860 +[2025-07-08 07:37:32] [Rank 0] Group 4 Loss: 6.0860 +[2025-07-08 07:37:32] [Rank 0] Group 5 Loss: 5.9917 +[2025-07-08 07:37:32] [Rank 0] Group 5 Loss: 5.9917 +[2025-07-08 07:37:32] [Rank 0] Group 6 Loss: 5.9221 +[2025-07-08 07:37:32] [Rank 0] Group 6 Loss: 5.9221 +[2025-07-08 07:37:32] [Rank 0] Group 7 Loss: 6.1387 +[2025-07-08 07:37:32] [Rank 0] Group 7 Loss: 6.1387 +[2025-07-08 07:37:32] [Rank 0] Group 8 Loss: 6.0533 +[2025-07-08 07:37:32] [Rank 0] Group 8 Loss: 6.0533 +[2025-07-08 07:37:32] [Rank 0] Group 9 Loss: 6.1648 +[2025-07-08 07:37:32] [Rank 0] Group 9 Loss: 6.1648 +[2025-07-08 07:37:32] [Rank 0] Group 10 Loss: 6.0818 +[2025-07-08 07:37:32] [Rank 0] Group 10 Loss: 6.0818 +[2025-07-08 07:37:32] [Rank 0] Group 11 Loss: 6.1281 +[2025-07-08 07:37:32] [Rank 0] Group 11 Loss: 6.1281 +[2025-07-08 07:37:32] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-08 07:37:32] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-08 07:37:32] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 07:37:32] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-08 07:37:32] [Rank 0] Group 2 FTA: 0.1849 +[2025-07-08 07:37:32] [Rank 0] Group 2 FTA: 0.1849 +[2025-07-08 07:37:32] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-08 07:37:32] [Rank 0] Group 3 FTA: 0.0339 +[2025-07-08 07:37:32] [Rank 0] Group 4 FTA: 0.0521 +[2025-07-08 07:37:32] [Rank 0] Group 4 FTA: 0.0521 +[2025-07-08 07:37:32] [Rank 0] Group 5 FTA: 0.0391 +[2025-07-08 07:37:32] [Rank 0] Group 5 FTA: 0.0391 +[2025-07-08 07:37:32] [Rank 0] Group 6 FTA: 0.0911 +[2025-07-08 07:37:32] [Rank 0] Group 6 FTA: 0.0911 +[2025-07-08 07:37:32] [Rank 0] Group 7 FTA: 0.1198 +[2025-07-08 07:37:32] [Rank 0] Group 7 FTA: 0.1198 +[2025-07-08 07:37:32] [Rank 0] Group 8 FTA: 0.1068 +[2025-07-08 07:37:32] [Rank 0] Group 8 FTA: 0.1068 +[2025-07-08 07:37:32] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-08 07:37:32] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-08 07:37:32] [Rank 0] Group 10 FTA: 0.1016 +[2025-07-08 07:37:32] [Rank 0] Group 10 FTA: 0.1016 +[2025-07-08 07:37:32] [Rank 0] Group 11 FTA: 0.0850 +[2025-07-08 07:37:32] [Rank 0] Group 11 FTA: 0.0850 +[2025-07-08 07:37:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-08 07:37:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/per_class_loss_curves.png +[2025-07-08 07:37:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-08 07:37:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/per_class_acc_curves.png +[2025-07-08 07:37:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-08 07:37:33] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/total_loss_curve.png +[2025-07-08 07:37:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-08 07:37:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_43/total_acc_curve.png +[2025-07-08 07:37:34] [Rank 0] step:1501/10000 train_time:101155ms step_avg:67.39ms +[2025-07-08 07:37:34] [Rank 0] step:1501/10000 train_time:101155ms step_avg:67.39ms +[2025-07-08 07:37:35] [Rank 0] step:1521/10000 train_time:101916ms step_avg:67.01ms +[2025-07-08 07:37:35] [Rank 0] step:1521/10000 train_time:101916ms step_avg:67.01ms +[2025-07-08 07:37:36] [Rank 0] step:1541/10000 train_time:103255ms step_avg:67.01ms +[2025-07-08 07:37:36] [Rank 0] step:1541/10000 train_time:103255ms step_avg:67.01ms +[2025-07-08 07:37:38] [Rank 0] step:1561/10000 train_time:104596ms step_avg:67.01ms +[2025-07-08 07:37:38] [Rank 0] step:1561/10000 train_time:104596ms step_avg:67.01ms +[2025-07-08 07:37:39] [Rank 0] step:1581/10000 train_time:105938ms step_avg:67.01ms +[2025-07-08 07:37:39] [Rank 0] step:1581/10000 train_time:105938ms step_avg:67.01ms +[2025-07-08 07:37:40] [Rank 0] step:1601/10000 train_time:107280ms step_avg:67.01ms +[2025-07-08 07:37:40] [Rank 0] step:1601/10000 train_time:107280ms step_avg:67.01ms +[2025-07-08 07:37:42] [Rank 0] step:1621/10000 train_time:108873ms step_avg:67.16ms +[2025-07-08 07:37:42] [Rank 0] step:1621/10000 train_time:108873ms step_avg:67.16ms +[2025-07-08 07:37:43] [Rank 0] step:1641/10000 train_time:110009ms step_avg:67.04ms +[2025-07-08 07:37:43] [Rank 0] step:1641/10000 train_time:110009ms step_avg:67.04ms +[2025-07-08 07:37:45] [Rank 0] step:1661/10000 train_time:111351ms step_avg:67.04ms +[2025-07-08 07:37:45] [Rank 0] step:1661/10000 train_time:111351ms step_avg:67.04ms +[2025-07-08 07:37:46] [Rank 0] step:1681/10000 train_time:112695ms step_avg:67.04ms +[2025-07-08 07:37:46] [Rank 0] step:1681/10000 train_time:112695ms step_avg:67.04ms +[2025-07-08 07:37:47] [Rank 0] step:1701/10000 train_time:114041ms step_avg:67.04ms +[2025-07-08 07:37:47] [Rank 0] step:1701/10000 train_time:114041ms step_avg:67.04ms +[2025-07-08 07:37:49] [Rank 0] step:1721/10000 train_time:115385ms step_avg:67.05ms +[2025-07-08 07:37:49] [Rank 0] step:1721/10000 train_time:115385ms step_avg:67.05ms +[2025-07-08 07:37:50] [Rank 0] step:1741/10000 train_time:116730ms step_avg:67.05ms +[2025-07-08 07:37:50] [Rank 0] step:1741/10000 train_time:116730ms step_avg:67.05ms +[2025-07-08 07:37:51] [Rank 0] step:1761/10000 train_time:118075ms step_avg:67.05ms +[2025-07-08 07:37:51] [Rank 0] step:1761/10000 train_time:118075ms step_avg:67.05ms +[2025-07-08 07:37:53] [Rank 0] step:1781/10000 train_time:119422ms step_avg:67.05ms +[2025-07-08 07:37:53] [Rank 0] step:1781/10000 train_time:119422ms step_avg:67.05ms +[2025-07-08 07:37:54] [Rank 0] step:1801/10000 train_time:120766ms step_avg:67.06ms +[2025-07-08 07:37:54] [Rank 0] step:1801/10000 train_time:120766ms step_avg:67.06ms +[2025-07-08 07:37:55] [Rank 0] step:1821/10000 train_time:122171ms step_avg:67.09ms +[2025-07-08 07:37:55] [Rank 0] step:1821/10000 train_time:122171ms step_avg:67.09ms +[2025-07-08 07:37:57] [Rank 0] step:1841/10000 train_time:123516ms step_avg:67.09ms +[2025-07-08 07:37:57] [Rank 0] step:1841/10000 train_time:123516ms step_avg:67.09ms +[2025-07-08 07:37:58] [Rank 0] step:1861/10000 train_time:124862ms step_avg:67.09ms +[2025-07-08 07:37:58] [Rank 0] step:1861/10000 train_time:124862ms step_avg:67.09ms +[2025-07-08 07:37:59] [Rank 0] step:1881/10000 train_time:126207ms step_avg:67.10ms +[2025-07-08 07:37:59] [Rank 0] step:1881/10000 train_time:126207ms step_avg:67.10ms +[2025-07-08 07:38:01] [Rank 0] step:1901/10000 train_time:127553ms step_avg:67.10ms +[2025-07-08 07:38:01] [Rank 0] step:1901/10000 train_time:127553ms step_avg:67.10ms +[2025-07-08 07:38:02] [Rank 0] step:1921/10000 train_time:128902ms step_avg:67.10ms +[2025-07-08 07:38:02] [Rank 0] step:1921/10000 train_time:128902ms step_avg:67.10ms +[2025-07-08 07:38:03] [Rank 0] step:1941/10000 train_time:130250ms step_avg:67.10ms +[2025-07-08 07:38:03] [Rank 0] step:1941/10000 train_time:130250ms step_avg:67.10ms +[2025-07-08 07:38:05] [Rank 0] step:1961/10000 train_time:131596ms step_avg:67.11ms +[2025-07-08 07:38:05] [Rank 0] step:1961/10000 train_time:131596ms step_avg:67.11ms +[2025-07-08 07:38:06] [Rank 0] step:1981/10000 train_time:132942ms step_avg:67.11ms +[2025-07-08 07:38:06] [Rank 0] step:1981/10000 train_time:132942ms step_avg:67.11ms +[2025-07-08 07:38:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:38:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-08 07:38:08] [Rank 0] PRINT: step:2000/10000 train_loss:1.4388 val_loss:1.3452 train_time:134942ms step_avg:67.47ms +[2025-07-08 07:38:08] [Rank 0] PRINT: step:2000/10000 train_loss:1.4388 val_loss:1.3452 train_time:134942ms step_avg:67.47ms +[2025-07-08 07:38:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:38:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-08 07:38:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:38:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-08 07:38:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-08 07:38:09] [Rank 0] Evaluation set size after sampling: 5633 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/config.json b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/config.json new file mode 100644 index 0000000000000000000000000000000000000000..52a75102d02a77920904195bc807fb73c972d1b1 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 45, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "80cdfa7b-ac35-4ac9-8103-2b9d277e69e0", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..7099ea3db97fbce62a04c630609de0a29059a362 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32f1f55d478674537024c200eef1363acc9c9e69d32e692ae8fdf509e6502942 +size 434047 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..418db9cc251e3e0dd443bb40b0876974f2e2871a --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:746be3cf481e6e82ce2260f1f799bfb5d2fb09e2f9f9ab34c758b86abc9e7649 +size 469980 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..c77dd3ebc807e832fc1a76bef724ed10b4e3a888 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea5a3edbe291530899e3bfc2fcac6dd33df652ee78377ea487ba33e3f1d65089 +size 106482 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..d3300e3af13a77b2caa1c76825f943d78dcada31 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b99efad4bd2329e12e608fccebf1504c92eacdeb8e15b8eee4aedbd1b0838cdb +size 115699 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/training_log_80cdfa7b-ac35-4ac9-8103-2b9d277e69e0.txt b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/training_log_80cdfa7b-ac35-4ac9-8103-2b9d277e69e0.txt new file mode 100644 index 0000000000000000000000000000000000000000..aac9792dbcf0a86487f3e0a70ef0a4c28af54065 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/training_log_80cdfa7b-ac35-4ac9-8103-2b9d277e69e0.txt @@ -0,0 +1,5132 @@ +[2025-07-07 06:07:22] [Rank 0] PRINT: --- Script Start: Mon Jul 7 06:07:22 2025 --- +[2025-07-07 06:07:22] [Rank 0] PRINT: --- Script Start: Mon Jul 7 06:07:22 2025 --- +[2025-07-07 06:07:22] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-07 06:07:22] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=45, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-07 06:07:22] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 06:07:22] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 06:07:22] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-07 06:07:22] [Rank 0] PRINT: Using fixed seed: 45 +[2025-07-07 06:07:22] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45 +[2025-07-07 06:07:22] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45 +[2025-07-07 06:07:22] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 06:07:22] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 06:07:22] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 06:07:22] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 06:07:22] [Rank 0] PRINT: Constructing model... +[2025-07-07 06:07:22] [Rank 0] PRINT: Constructing model... +[2025-07-07 06:07:24] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 06:07:24] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 06:07:24] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 06:07:24] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 06:07:24] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 06:07:24] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 06:07:25] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 06:07:25] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 06:07:25] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 06:07:25] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 06:07:25] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 06:07:25] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 06:07:25] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 06:07:25] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 06:07:25] [Rank 0] PRINT: Model returns: +[2025-07-07 06:07:25] [Rank 0] PRINT: Model returns: +[2025-07-07 06:07:25] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 06:07:25] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 06:07:25] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 06:07:25] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 06:07:25] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-07 06:07:25] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-07 06:07:25] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 06:07:25] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 06:07:25] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 06:07:25] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 06:07:25] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 06:07:25] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 06:07:25] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 06:07:25] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 06:07:25] [Rank 0] PRINT: Starting warmup... +[2025-07-07 06:07:25] [Rank 0] PRINT: Starting warmup... +[2025-07-07 06:08:31] [Rank 0] PRINT: Warmup complete. +[2025-07-07 06:08:31] [Rank 0] PRINT: Warmup complete. +[2025-07-07 06:08:31] [Rank 0] PRINT: Starting training... +[2025-07-07 06:08:31] [Rank 0] PRINT: Starting training... +[2025-07-07 06:08:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:08:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:08:39] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 06:08:39] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 06:08:40] [Rank 0] step:21/10000 train_time:814ms step_avg:38.75ms +[2025-07-07 06:08:40] [Rank 0] step:21/10000 train_time:814ms step_avg:38.75ms +[2025-07-07 06:08:41] [Rank 0] step:41/10000 train_time:2124ms step_avg:51.80ms +[2025-07-07 06:08:41] [Rank 0] step:41/10000 train_time:2124ms step_avg:51.80ms +[2025-07-07 06:08:43] [Rank 0] step:61/10000 train_time:3439ms step_avg:56.38ms +[2025-07-07 06:08:43] [Rank 0] step:61/10000 train_time:3439ms step_avg:56.38ms +[2025-07-07 06:08:44] [Rank 0] step:81/10000 train_time:4756ms step_avg:58.71ms +[2025-07-07 06:08:44] [Rank 0] step:81/10000 train_time:4756ms step_avg:58.71ms +[2025-07-07 06:08:45] [Rank 0] step:101/10000 train_time:6074ms step_avg:60.14ms +[2025-07-07 06:08:45] [Rank 0] step:101/10000 train_time:6074ms step_avg:60.14ms +[2025-07-07 06:08:47] [Rank 0] step:121/10000 train_time:7394ms step_avg:61.11ms +[2025-07-07 06:08:47] [Rank 0] step:121/10000 train_time:7394ms step_avg:61.11ms +[2025-07-07 06:08:48] [Rank 0] step:141/10000 train_time:8718ms step_avg:61.83ms +[2025-07-07 06:08:48] [Rank 0] step:141/10000 train_time:8718ms step_avg:61.83ms +[2025-07-07 06:08:49] [Rank 0] step:161/10000 train_time:10046ms step_avg:62.40ms +[2025-07-07 06:08:49] [Rank 0] step:161/10000 train_time:10046ms step_avg:62.40ms +[2025-07-07 06:08:51] [Rank 0] step:181/10000 train_time:11374ms step_avg:62.84ms +[2025-07-07 06:08:51] [Rank 0] step:181/10000 train_time:11374ms step_avg:62.84ms +[2025-07-07 06:08:52] [Rank 0] step:201/10000 train_time:12778ms step_avg:63.57ms +[2025-07-07 06:08:52] [Rank 0] step:201/10000 train_time:12778ms step_avg:63.57ms +[2025-07-07 06:08:53] [Rank 0] step:221/10000 train_time:14109ms step_avg:63.84ms +[2025-07-07 06:08:53] [Rank 0] step:221/10000 train_time:14109ms step_avg:63.84ms +[2025-07-07 06:08:55] [Rank 0] step:241/10000 train_time:15443ms step_avg:64.08ms +[2025-07-07 06:08:55] [Rank 0] step:241/10000 train_time:15443ms step_avg:64.08ms +[2025-07-07 06:08:56] [Rank 0] step:261/10000 train_time:16777ms step_avg:64.28ms +[2025-07-07 06:08:56] [Rank 0] step:261/10000 train_time:16777ms step_avg:64.28ms +[2025-07-07 06:08:57] [Rank 0] step:281/10000 train_time:18112ms step_avg:64.45ms +[2025-07-07 06:08:57] [Rank 0] step:281/10000 train_time:18112ms step_avg:64.45ms +[2025-07-07 06:08:59] [Rank 0] step:301/10000 train_time:19446ms step_avg:64.60ms +[2025-07-07 06:08:59] [Rank 0] step:301/10000 train_time:19446ms step_avg:64.60ms +[2025-07-07 06:09:00] [Rank 0] step:321/10000 train_time:20780ms step_avg:64.73ms +[2025-07-07 06:09:00] [Rank 0] step:321/10000 train_time:20780ms step_avg:64.73ms +[2025-07-07 06:09:01] [Rank 0] step:341/10000 train_time:22115ms step_avg:64.85ms +[2025-07-07 06:09:01] [Rank 0] step:341/10000 train_time:22115ms step_avg:64.85ms +[2025-07-07 06:09:03] [Rank 0] step:361/10000 train_time:23453ms step_avg:64.97ms +[2025-07-07 06:09:03] [Rank 0] step:361/10000 train_time:23453ms step_avg:64.97ms +[2025-07-07 06:09:04] [Rank 0] step:381/10000 train_time:24845ms step_avg:65.21ms +[2025-07-07 06:09:04] [Rank 0] step:381/10000 train_time:24845ms step_avg:65.21ms +[2025-07-07 06:09:05] [Rank 0] step:401/10000 train_time:26181ms step_avg:65.29ms +[2025-07-07 06:09:05] [Rank 0] step:401/10000 train_time:26181ms step_avg:65.29ms +[2025-07-07 06:09:07] [Rank 0] step:421/10000 train_time:27517ms step_avg:65.36ms +[2025-07-07 06:09:07] [Rank 0] step:421/10000 train_time:27517ms step_avg:65.36ms +[2025-07-07 06:09:08] [Rank 0] step:441/10000 train_time:28854ms step_avg:65.43ms +[2025-07-07 06:09:08] [Rank 0] step:441/10000 train_time:28854ms step_avg:65.43ms +[2025-07-07 06:09:09] [Rank 0] step:461/10000 train_time:30190ms step_avg:65.49ms +[2025-07-07 06:09:09] [Rank 0] step:461/10000 train_time:30190ms step_avg:65.49ms +[2025-07-07 06:09:11] [Rank 0] step:481/10000 train_time:31526ms step_avg:65.54ms +[2025-07-07 06:09:11] [Rank 0] step:481/10000 train_time:31526ms step_avg:65.54ms +[2025-07-07 06:09:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:09:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:09:13] [Rank 0] PRINT: step:500/10000 train_loss:3.7724 val_loss:1.9472 train_time:33472ms step_avg:66.94ms +[2025-07-07 06:09:13] [Rank 0] PRINT: step:500/10000 train_loss:3.7724 val_loss:1.9472 train_time:33472ms step_avg:66.94ms +[2025-07-07 06:09:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:09:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:09:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:09:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:09:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:09:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:14:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:14:32] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:14:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:14:32] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:14:32] [Rank 0] Total Loss: 5.1270 +[2025-07-07 06:14:32] [Rank 0] Total Loss: 5.1270 +[2025-07-07 06:14:32] [Rank 0] Total FTA: 0.0921 +[2025-07-07 06:14:32] [Rank 0] Total FTA: 0.0921 +[2025-07-07 06:14:32] [Rank 0] Group 0 Loss: 5.2441 +[2025-07-07 06:14:32] [Rank 0] Group 0 Loss: 5.2441 +[2025-07-07 06:14:32] [Rank 0] Group 1 Loss: 5.0096 +[2025-07-07 06:14:32] [Rank 0] Group 1 Loss: 5.0096 +[2025-07-07 06:14:32] [Rank 0] Group 2 Loss: 5.0722 +[2025-07-07 06:14:32] [Rank 0] Group 2 Loss: 5.0722 +[2025-07-07 06:14:32] [Rank 0] Group 3 Loss: 5.0609 +[2025-07-07 06:14:32] [Rank 0] Group 3 Loss: 5.0609 +[2025-07-07 06:14:32] [Rank 0] Group 4 Loss: 5.1242 +[2025-07-07 06:14:32] [Rank 0] Group 4 Loss: 5.1242 +[2025-07-07 06:14:32] [Rank 0] Group 5 Loss: 5.1275 +[2025-07-07 06:14:32] [Rank 0] Group 5 Loss: 5.1275 +[2025-07-07 06:14:32] [Rank 0] Group 6 Loss: 5.0830 +[2025-07-07 06:14:32] [Rank 0] Group 6 Loss: 5.0830 +[2025-07-07 06:14:32] [Rank 0] Group 7 Loss: 5.1289 +[2025-07-07 06:14:32] [Rank 0] Group 7 Loss: 5.1289 +[2025-07-07 06:14:32] [Rank 0] Group 8 Loss: 5.0988 +[2025-07-07 06:14:32] [Rank 0] Group 8 Loss: 5.0988 +[2025-07-07 06:14:32] [Rank 0] Group 9 Loss: 5.1383 +[2025-07-07 06:14:32] [Rank 0] Group 9 Loss: 5.1383 +[2025-07-07 06:14:32] [Rank 0] Group 10 Loss: 5.1508 +[2025-07-07 06:14:32] [Rank 0] Group 10 Loss: 5.1508 +[2025-07-07 06:14:32] [Rank 0] Group 11 Loss: 5.1407 +[2025-07-07 06:14:32] [Rank 0] Group 11 Loss: 5.1407 +[2025-07-07 06:14:32] [Rank 0] Group 0 FTA: 0.1795 +[2025-07-07 06:14:32] [Rank 0] Group 0 FTA: 0.1795 +[2025-07-07 06:14:32] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:14:32] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:14:32] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-07 06:14:32] [Rank 0] Group 2 FTA: 0.0729 +[2025-07-07 06:14:32] [Rank 0] Group 3 FTA: 0.0964 +[2025-07-07 06:14:32] [Rank 0] Group 3 FTA: 0.0964 +[2025-07-07 06:14:32] [Rank 0] Group 4 FTA: 0.0130 +[2025-07-07 06:14:32] [Rank 0] Group 4 FTA: 0.0130 +[2025-07-07 06:14:32] [Rank 0] Group 5 FTA: 0.1146 +[2025-07-07 06:14:32] [Rank 0] Group 5 FTA: 0.1146 +[2025-07-07 06:14:32] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-07 06:14:32] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-07 06:14:32] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-07 06:14:32] [Rank 0] Group 7 FTA: 0.1042 +[2025-07-07 06:14:32] [Rank 0] Group 8 FTA: 0.0703 +[2025-07-07 06:14:32] [Rank 0] Group 8 FTA: 0.0703 +[2025-07-07 06:14:32] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 06:14:32] [Rank 0] Group 9 FTA: 0.0898 +[2025-07-07 06:14:32] [Rank 0] Group 10 FTA: 0.1016 +[2025-07-07 06:14:32] [Rank 0] Group 10 FTA: 0.1016 +[2025-07-07 06:14:33] [Rank 0] Group 11 FTA: 0.0850 +[2025-07-07 06:14:33] [Rank 0] Group 11 FTA: 0.0850 +[2025-07-07 06:14:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 06:14:33] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 06:14:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 06:14:33] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 06:14:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 06:14:34] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 06:14:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 06:14:34] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 06:14:34] [Rank 0] step:501/10000 train_time:33481ms step_avg:66.83ms +[2025-07-07 06:14:34] [Rank 0] step:501/10000 train_time:33481ms step_avg:66.83ms +[2025-07-07 06:14:35] [Rank 0] step:521/10000 train_time:34219ms step_avg:65.68ms +[2025-07-07 06:14:35] [Rank 0] step:521/10000 train_time:34219ms step_avg:65.68ms +[2025-07-07 06:14:37] [Rank 0] step:541/10000 train_time:36226ms step_avg:66.96ms +[2025-07-07 06:14:37] [Rank 0] step:541/10000 train_time:36226ms step_avg:66.96ms +[2025-07-07 06:14:38] [Rank 0] step:561/10000 train_time:36941ms step_avg:65.85ms +[2025-07-07 06:14:38] [Rank 0] step:561/10000 train_time:36941ms step_avg:65.85ms +[2025-07-07 06:14:39] [Rank 0] step:581/10000 train_time:38268ms step_avg:65.87ms +[2025-07-07 06:14:39] [Rank 0] step:581/10000 train_time:38268ms step_avg:65.87ms +[2025-07-07 06:14:41] [Rank 0] step:601/10000 train_time:39598ms step_avg:65.89ms +[2025-07-07 06:14:41] [Rank 0] step:601/10000 train_time:39598ms step_avg:65.89ms +[2025-07-07 06:14:42] [Rank 0] step:621/10000 train_time:40928ms step_avg:65.91ms +[2025-07-07 06:14:42] [Rank 0] step:621/10000 train_time:40928ms step_avg:65.91ms +[2025-07-07 06:14:43] [Rank 0] step:641/10000 train_time:42258ms step_avg:65.93ms +[2025-07-07 06:14:43] [Rank 0] step:641/10000 train_time:42258ms step_avg:65.93ms +[2025-07-07 06:14:45] [Rank 0] step:661/10000 train_time:43588ms step_avg:65.94ms +[2025-07-07 06:14:45] [Rank 0] step:661/10000 train_time:43588ms step_avg:65.94ms +[2025-07-07 06:14:46] [Rank 0] step:681/10000 train_time:44919ms step_avg:65.96ms +[2025-07-07 06:14:46] [Rank 0] step:681/10000 train_time:44919ms step_avg:65.96ms +[2025-07-07 06:14:47] [Rank 0] step:701/10000 train_time:46251ms step_avg:65.98ms +[2025-07-07 06:14:47] [Rank 0] step:701/10000 train_time:46251ms step_avg:65.98ms +[2025-07-07 06:14:49] [Rank 0] step:721/10000 train_time:47585ms step_avg:66.00ms +[2025-07-07 06:14:49] [Rank 0] step:721/10000 train_time:47585ms step_avg:66.00ms +[2025-07-07 06:14:50] [Rank 0] step:741/10000 train_time:48977ms step_avg:66.10ms +[2025-07-07 06:14:50] [Rank 0] step:741/10000 train_time:48977ms step_avg:66.10ms +[2025-07-07 06:14:51] [Rank 0] step:761/10000 train_time:50323ms step_avg:66.13ms +[2025-07-07 06:14:51] [Rank 0] step:761/10000 train_time:50323ms step_avg:66.13ms +[2025-07-07 06:14:53] [Rank 0] step:781/10000 train_time:51666ms step_avg:66.15ms +[2025-07-07 06:14:53] [Rank 0] step:781/10000 train_time:51666ms step_avg:66.15ms +[2025-07-07 06:14:54] [Rank 0] step:801/10000 train_time:53009ms step_avg:66.18ms +[2025-07-07 06:14:54] [Rank 0] step:801/10000 train_time:53009ms step_avg:66.18ms +[2025-07-07 06:14:55] [Rank 0] step:821/10000 train_time:54353ms step_avg:66.20ms +[2025-07-07 06:14:55] [Rank 0] step:821/10000 train_time:54353ms step_avg:66.20ms +[2025-07-07 06:14:57] [Rank 0] step:841/10000 train_time:55696ms step_avg:66.23ms +[2025-07-07 06:14:57] [Rank 0] step:841/10000 train_time:55696ms step_avg:66.23ms +[2025-07-07 06:14:58] [Rank 0] step:861/10000 train_time:57040ms step_avg:66.25ms +[2025-07-07 06:14:58] [Rank 0] step:861/10000 train_time:57040ms step_avg:66.25ms +[2025-07-07 06:14:59] [Rank 0] step:881/10000 train_time:58385ms step_avg:66.27ms +[2025-07-07 06:14:59] [Rank 0] step:881/10000 train_time:58385ms step_avg:66.27ms +[2025-07-07 06:15:01] [Rank 0] step:901/10000 train_time:59730ms step_avg:66.29ms +[2025-07-07 06:15:01] [Rank 0] step:901/10000 train_time:59730ms step_avg:66.29ms +[2025-07-07 06:15:02] [Rank 0] step:921/10000 train_time:61113ms step_avg:66.35ms +[2025-07-07 06:15:02] [Rank 0] step:921/10000 train_time:61113ms step_avg:66.35ms +[2025-07-07 06:15:03] [Rank 0] step:941/10000 train_time:62461ms step_avg:66.38ms +[2025-07-07 06:15:03] [Rank 0] step:941/10000 train_time:62461ms step_avg:66.38ms +[2025-07-07 06:15:05] [Rank 0] step:961/10000 train_time:63807ms step_avg:66.40ms +[2025-07-07 06:15:05] [Rank 0] step:961/10000 train_time:63807ms step_avg:66.40ms +[2025-07-07 06:15:06] [Rank 0] step:981/10000 train_time:65154ms step_avg:66.42ms +[2025-07-07 06:15:06] [Rank 0] step:981/10000 train_time:65154ms step_avg:66.42ms +[2025-07-07 06:15:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:15:07] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:15:08] [Rank 0] PRINT: step:1000/10000 train_loss:1.8357 val_loss:1.7593 train_time:67112ms step_avg:67.11ms +[2025-07-07 06:15:08] [Rank 0] PRINT: step:1000/10000 train_loss:1.8357 val_loss:1.7593 train_time:67112ms step_avg:67.11ms +[2025-07-07 06:15:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:15:08] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:15:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:15:09] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:15:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:15:09] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:20:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:20:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:20:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:20:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:20:29] [Rank 0] Total Loss: 5.6660 +[2025-07-07 06:20:29] [Rank 0] Total Loss: 5.6660 +[2025-07-07 06:20:29] [Rank 0] Total FTA: 0.0889 +[2025-07-07 06:20:29] [Rank 0] Total FTA: 0.0889 +[2025-07-07 06:20:29] [Rank 0] Group 0 Loss: 5.9687 +[2025-07-07 06:20:29] [Rank 0] Group 0 Loss: 5.9687 +[2025-07-07 06:20:29] [Rank 0] Group 1 Loss: 5.5624 +[2025-07-07 06:20:29] [Rank 0] Group 1 Loss: 5.5624 +[2025-07-07 06:20:29] [Rank 0] Group 2 Loss: 5.4118 +[2025-07-07 06:20:29] [Rank 0] Group 2 Loss: 5.4118 +[2025-07-07 06:20:29] [Rank 0] Group 3 Loss: 5.7251 +[2025-07-07 06:20:29] [Rank 0] Group 3 Loss: 5.7251 +[2025-07-07 06:20:29] [Rank 0] Group 4 Loss: 5.6802 +[2025-07-07 06:20:29] [Rank 0] Group 4 Loss: 5.6802 +[2025-07-07 06:20:29] [Rank 0] Group 5 Loss: 5.6601 +[2025-07-07 06:20:29] [Rank 0] Group 5 Loss: 5.6601 +[2025-07-07 06:20:29] [Rank 0] Group 6 Loss: 5.5433 +[2025-07-07 06:20:29] [Rank 0] Group 6 Loss: 5.5433 +[2025-07-07 06:20:29] [Rank 0] Group 7 Loss: 5.6500 +[2025-07-07 06:20:29] [Rank 0] Group 7 Loss: 5.6500 +[2025-07-07 06:20:29] [Rank 0] Group 8 Loss: 5.6048 +[2025-07-07 06:20:29] [Rank 0] Group 8 Loss: 5.6048 +[2025-07-07 06:20:29] [Rank 0] Group 9 Loss: 5.5988 +[2025-07-07 06:20:29] [Rank 0] Group 9 Loss: 5.5988 +[2025-07-07 06:20:29] [Rank 0] Group 10 Loss: 5.6202 +[2025-07-07 06:20:29] [Rank 0] Group 10 Loss: 5.6202 +[2025-07-07 06:20:29] [Rank 0] Group 11 Loss: 5.6624 +[2025-07-07 06:20:29] [Rank 0] Group 11 Loss: 5.6624 +[2025-07-07 06:20:29] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-07 06:20:29] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-07 06:20:29] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:20:29] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:20:29] [Rank 0] Group 2 FTA: 0.0938 +[2025-07-07 06:20:29] [Rank 0] Group 2 FTA: 0.0938 +[2025-07-07 06:20:29] [Rank 0] Group 3 FTA: 0.0964 +[2025-07-07 06:20:29] [Rank 0] Group 3 FTA: 0.0964 +[2025-07-07 06:20:29] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 06:20:29] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 06:20:29] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-07 06:20:29] [Rank 0] Group 5 FTA: 0.0495 +[2025-07-07 06:20:29] [Rank 0] Group 6 FTA: 0.0703 +[2025-07-07 06:20:29] [Rank 0] Group 6 FTA: 0.0703 +[2025-07-07 06:20:29] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-07 06:20:29] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-07 06:20:29] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-07 06:20:29] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-07 06:20:29] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 06:20:29] [Rank 0] Group 9 FTA: 0.0977 +[2025-07-07 06:20:29] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 06:20:29] [Rank 0] Group 10 FTA: 0.0957 +[2025-07-07 06:20:29] [Rank 0] Group 11 FTA: 0.1064 +[2025-07-07 06:20:29] [Rank 0] Group 11 FTA: 0.1064 +[2025-07-07 06:20:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 06:20:30] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 06:20:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 06:20:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 06:20:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 06:20:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 06:20:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 06:20:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 06:20:31] [Rank 0] step:1001/10000 train_time:67121ms step_avg:67.05ms +[2025-07-07 06:20:31] [Rank 0] step:1001/10000 train_time:67121ms step_avg:67.05ms +[2025-07-07 06:20:32] [Rank 0] step:1021/10000 train_time:67868ms step_avg:66.47ms +[2025-07-07 06:20:32] [Rank 0] step:1021/10000 train_time:67868ms step_avg:66.47ms +[2025-07-07 06:20:33] [Rank 0] step:1041/10000 train_time:69204ms step_avg:66.48ms +[2025-07-07 06:20:33] [Rank 0] step:1041/10000 train_time:69204ms step_avg:66.48ms +[2025-07-07 06:20:35] [Rank 0] step:1061/10000 train_time:70542ms step_avg:66.49ms +[2025-07-07 06:20:35] [Rank 0] step:1061/10000 train_time:70542ms step_avg:66.49ms +[2025-07-07 06:20:36] [Rank 0] step:1081/10000 train_time:71931ms step_avg:66.54ms +[2025-07-07 06:20:36] [Rank 0] step:1081/10000 train_time:71931ms step_avg:66.54ms +[2025-07-07 06:20:37] [Rank 0] step:1101/10000 train_time:73264ms step_avg:66.54ms +[2025-07-07 06:20:37] [Rank 0] step:1101/10000 train_time:73264ms step_avg:66.54ms +[2025-07-07 06:20:39] [Rank 0] step:1121/10000 train_time:74606ms step_avg:66.55ms +[2025-07-07 06:20:39] [Rank 0] step:1121/10000 train_time:74606ms step_avg:66.55ms +[2025-07-07 06:20:40] [Rank 0] step:1141/10000 train_time:75947ms step_avg:66.56ms +[2025-07-07 06:20:40] [Rank 0] step:1141/10000 train_time:75947ms step_avg:66.56ms +[2025-07-07 06:20:41] [Rank 0] step:1161/10000 train_time:77288ms step_avg:66.57ms +[2025-07-07 06:20:41] [Rank 0] step:1161/10000 train_time:77288ms step_avg:66.57ms +[2025-07-07 06:20:43] [Rank 0] step:1181/10000 train_time:78630ms step_avg:66.58ms +[2025-07-07 06:20:43] [Rank 0] step:1181/10000 train_time:78630ms step_avg:66.58ms +[2025-07-07 06:20:44] [Rank 0] step:1201/10000 train_time:79972ms step_avg:66.59ms +[2025-07-07 06:20:44] [Rank 0] step:1201/10000 train_time:79972ms step_avg:66.59ms +[2025-07-07 06:20:45] [Rank 0] step:1221/10000 train_time:81313ms step_avg:66.60ms +[2025-07-07 06:20:45] [Rank 0] step:1221/10000 train_time:81313ms step_avg:66.60ms +[2025-07-07 06:20:47] [Rank 0] step:1241/10000 train_time:82656ms step_avg:66.60ms +[2025-07-07 06:20:47] [Rank 0] step:1241/10000 train_time:82656ms step_avg:66.60ms +[2025-07-07 06:20:48] [Rank 0] step:1261/10000 train_time:84668ms step_avg:67.14ms +[2025-07-07 06:20:48] [Rank 0] step:1261/10000 train_time:84668ms step_avg:67.14ms +[2025-07-07 06:20:49] [Rank 0] step:1281/10000 train_time:85391ms step_avg:66.66ms +[2025-07-07 06:20:49] [Rank 0] step:1281/10000 train_time:85391ms step_avg:66.66ms +[2025-07-07 06:20:51] [Rank 0] step:1301/10000 train_time:86735ms step_avg:66.67ms +[2025-07-07 06:20:51] [Rank 0] step:1301/10000 train_time:86735ms step_avg:66.67ms +[2025-07-07 06:20:52] [Rank 0] step:1321/10000 train_time:88079ms step_avg:66.68ms +[2025-07-07 06:20:52] [Rank 0] step:1321/10000 train_time:88079ms step_avg:66.68ms +[2025-07-07 06:20:53] [Rank 0] step:1341/10000 train_time:89424ms step_avg:66.68ms +[2025-07-07 06:20:53] [Rank 0] step:1341/10000 train_time:89424ms step_avg:66.68ms +[2025-07-07 06:20:55] [Rank 0] step:1361/10000 train_time:90768ms step_avg:66.69ms +[2025-07-07 06:20:55] [Rank 0] step:1361/10000 train_time:90768ms step_avg:66.69ms +[2025-07-07 06:20:56] [Rank 0] step:1381/10000 train_time:92113ms step_avg:66.70ms +[2025-07-07 06:20:56] [Rank 0] step:1381/10000 train_time:92113ms step_avg:66.70ms +[2025-07-07 06:20:58] [Rank 0] step:1401/10000 train_time:93459ms step_avg:66.71ms +[2025-07-07 06:20:58] [Rank 0] step:1401/10000 train_time:93459ms step_avg:66.71ms +[2025-07-07 06:20:59] [Rank 0] step:1421/10000 train_time:94807ms step_avg:66.72ms +[2025-07-07 06:20:59] [Rank 0] step:1421/10000 train_time:94807ms step_avg:66.72ms +[2025-07-07 06:21:00] [Rank 0] step:1441/10000 train_time:96203ms step_avg:66.76ms +[2025-07-07 06:21:00] [Rank 0] step:1441/10000 train_time:96203ms step_avg:66.76ms +[2025-07-07 06:21:02] [Rank 0] step:1461/10000 train_time:97555ms step_avg:66.77ms +[2025-07-07 06:21:02] [Rank 0] step:1461/10000 train_time:97555ms step_avg:66.77ms +[2025-07-07 06:21:03] [Rank 0] step:1481/10000 train_time:98904ms step_avg:66.78ms +[2025-07-07 06:21:03] [Rank 0] step:1481/10000 train_time:98904ms step_avg:66.78ms +[2025-07-07 06:21:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:21:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:21:05] [Rank 0] PRINT: step:1500/10000 train_loss:1.6684 val_loss:1.5427 train_time:100864ms step_avg:67.24ms +[2025-07-07 06:21:05] [Rank 0] PRINT: step:1500/10000 train_loss:1.6684 val_loss:1.5427 train_time:100864ms step_avg:67.24ms +[2025-07-07 06:21:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:21:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:21:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:21:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:21:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:21:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:26:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:26:28] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:26:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:26:28] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:26:28] [Rank 0] Total Loss: 5.7740 +[2025-07-07 06:26:28] [Rank 0] Total Loss: 5.7740 +[2025-07-07 06:26:28] [Rank 0] Total FTA: 0.0932 +[2025-07-07 06:26:28] [Rank 0] Total FTA: 0.0932 +[2025-07-07 06:26:28] [Rank 0] Group 0 Loss: 6.0063 +[2025-07-07 06:26:28] [Rank 0] Group 0 Loss: 6.0063 +[2025-07-07 06:26:28] [Rank 0] Group 1 Loss: 5.5218 +[2025-07-07 06:26:28] [Rank 0] Group 1 Loss: 5.5218 +[2025-07-07 06:26:28] [Rank 0] Group 2 Loss: 5.4542 +[2025-07-07 06:26:28] [Rank 0] Group 2 Loss: 5.4542 +[2025-07-07 06:26:28] [Rank 0] Group 3 Loss: 5.8858 +[2025-07-07 06:26:28] [Rank 0] Group 3 Loss: 5.8858 +[2025-07-07 06:26:28] [Rank 0] Group 4 Loss: 5.7300 +[2025-07-07 06:26:28] [Rank 0] Group 4 Loss: 5.7300 +[2025-07-07 06:26:28] [Rank 0] Group 5 Loss: 5.8411 +[2025-07-07 06:26:28] [Rank 0] Group 5 Loss: 5.8411 +[2025-07-07 06:26:28] [Rank 0] Group 6 Loss: 5.6796 +[2025-07-07 06:26:28] [Rank 0] Group 6 Loss: 5.6796 +[2025-07-07 06:26:28] [Rank 0] Group 7 Loss: 5.7994 +[2025-07-07 06:26:28] [Rank 0] Group 7 Loss: 5.7994 +[2025-07-07 06:26:28] [Rank 0] Group 8 Loss: 5.7686 +[2025-07-07 06:26:28] [Rank 0] Group 8 Loss: 5.7686 +[2025-07-07 06:26:28] [Rank 0] Group 9 Loss: 5.7348 +[2025-07-07 06:26:28] [Rank 0] Group 9 Loss: 5.7348 +[2025-07-07 06:26:28] [Rank 0] Group 10 Loss: 5.7720 +[2025-07-07 06:26:28] [Rank 0] Group 10 Loss: 5.7720 +[2025-07-07 06:26:28] [Rank 0] Group 11 Loss: 5.8021 +[2025-07-07 06:26:28] [Rank 0] Group 11 Loss: 5.8021 +[2025-07-07 06:26:28] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 06:26:28] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 06:26:28] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:26:28] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:26:28] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-07 06:26:28] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-07 06:26:28] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 06:26:28] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 06:26:28] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 06:26:28] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 06:26:28] [Rank 0] Group 5 FTA: 0.0755 +[2025-07-07 06:26:28] [Rank 0] Group 5 FTA: 0.0755 +[2025-07-07 06:26:28] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 06:26:28] [Rank 0] Group 6 FTA: 0.0781 +[2025-07-07 06:26:28] [Rank 0] Group 7 FTA: 0.1094 +[2025-07-07 06:26:28] [Rank 0] Group 7 FTA: 0.1094 +[2025-07-07 06:26:28] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 06:26:28] [Rank 0] Group 8 FTA: 0.0938 +[2025-07-07 06:26:28] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 06:26:28] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 06:26:28] [Rank 0] Group 10 FTA: 0.0859 +[2025-07-07 06:26:28] [Rank 0] Group 10 FTA: 0.0859 +[2025-07-07 06:26:28] [Rank 0] Group 11 FTA: 0.0840 +[2025-07-07 06:26:28] [Rank 0] Group 11 FTA: 0.0840 +[2025-07-07 06:26:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 06:26:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 06:26:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 06:26:29] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 06:26:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 06:26:29] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 06:26:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 06:26:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 06:26:30] [Rank 0] step:1501/10000 train_time:100873ms step_avg:67.20ms +[2025-07-07 06:26:30] [Rank 0] step:1501/10000 train_time:100873ms step_avg:67.20ms +[2025-07-07 06:26:31] [Rank 0] step:1521/10000 train_time:101629ms step_avg:66.82ms +[2025-07-07 06:26:31] [Rank 0] step:1521/10000 train_time:101629ms step_avg:66.82ms +[2025-07-07 06:26:32] [Rank 0] step:1541/10000 train_time:102965ms step_avg:66.82ms +[2025-07-07 06:26:32] [Rank 0] step:1541/10000 train_time:102965ms step_avg:66.82ms +[2025-07-07 06:26:34] [Rank 0] step:1561/10000 train_time:104302ms step_avg:66.82ms +[2025-07-07 06:26:34] [Rank 0] step:1561/10000 train_time:104302ms step_avg:66.82ms +[2025-07-07 06:26:35] [Rank 0] step:1581/10000 train_time:105640ms step_avg:66.82ms +[2025-07-07 06:26:35] [Rank 0] step:1581/10000 train_time:105640ms step_avg:66.82ms +[2025-07-07 06:26:36] [Rank 0] step:1601/10000 train_time:106984ms step_avg:66.82ms +[2025-07-07 06:26:36] [Rank 0] step:1601/10000 train_time:106984ms step_avg:66.82ms +[2025-07-07 06:26:38] [Rank 0] step:1621/10000 train_time:108324ms step_avg:66.83ms +[2025-07-07 06:26:38] [Rank 0] step:1621/10000 train_time:108324ms step_avg:66.83ms +[2025-07-07 06:26:39] [Rank 0] step:1641/10000 train_time:109709ms step_avg:66.85ms +[2025-07-07 06:26:39] [Rank 0] step:1641/10000 train_time:109709ms step_avg:66.85ms +[2025-07-07 06:26:40] [Rank 0] step:1661/10000 train_time:111051ms step_avg:66.86ms +[2025-07-07 06:26:40] [Rank 0] step:1661/10000 train_time:111051ms step_avg:66.86ms +[2025-07-07 06:26:42] [Rank 0] step:1681/10000 train_time:112394ms step_avg:66.86ms +[2025-07-07 06:26:42] [Rank 0] step:1681/10000 train_time:112394ms step_avg:66.86ms +[2025-07-07 06:26:43] [Rank 0] step:1701/10000 train_time:113738ms step_avg:66.87ms +[2025-07-07 06:26:43] [Rank 0] step:1701/10000 train_time:113738ms step_avg:66.87ms +[2025-07-07 06:26:44] [Rank 0] step:1721/10000 train_time:115081ms step_avg:66.87ms +[2025-07-07 06:26:44] [Rank 0] step:1721/10000 train_time:115081ms step_avg:66.87ms +[2025-07-07 06:26:46] [Rank 0] step:1741/10000 train_time:116424ms step_avg:66.87ms +[2025-07-07 06:26:46] [Rank 0] step:1741/10000 train_time:116424ms step_avg:66.87ms +[2025-07-07 06:26:47] [Rank 0] step:1761/10000 train_time:117767ms step_avg:66.88ms +[2025-07-07 06:26:47] [Rank 0] step:1761/10000 train_time:117767ms step_avg:66.88ms +[2025-07-07 06:26:49] [Rank 0] step:1781/10000 train_time:119113ms step_avg:66.88ms +[2025-07-07 06:26:49] [Rank 0] step:1781/10000 train_time:119113ms step_avg:66.88ms +[2025-07-07 06:26:50] [Rank 0] step:1801/10000 train_time:120457ms step_avg:66.88ms +[2025-07-07 06:26:50] [Rank 0] step:1801/10000 train_time:120457ms step_avg:66.88ms +[2025-07-07 06:26:51] [Rank 0] step:1821/10000 train_time:121839ms step_avg:66.91ms +[2025-07-07 06:26:51] [Rank 0] step:1821/10000 train_time:121839ms step_avg:66.91ms +[2025-07-07 06:26:53] [Rank 0] step:1841/10000 train_time:123185ms step_avg:66.91ms +[2025-07-07 06:26:53] [Rank 0] step:1841/10000 train_time:123185ms step_avg:66.91ms +[2025-07-07 06:26:54] [Rank 0] step:1861/10000 train_time:124531ms step_avg:66.92ms +[2025-07-07 06:26:54] [Rank 0] step:1861/10000 train_time:124531ms step_avg:66.92ms +[2025-07-07 06:26:55] [Rank 0] step:1881/10000 train_time:125877ms step_avg:66.92ms +[2025-07-07 06:26:55] [Rank 0] step:1881/10000 train_time:125877ms step_avg:66.92ms +[2025-07-07 06:26:57] [Rank 0] step:1901/10000 train_time:127223ms step_avg:66.92ms +[2025-07-07 06:26:57] [Rank 0] step:1901/10000 train_time:127223ms step_avg:66.92ms +[2025-07-07 06:26:58] [Rank 0] step:1921/10000 train_time:128570ms step_avg:66.93ms +[2025-07-07 06:26:58] [Rank 0] step:1921/10000 train_time:128570ms step_avg:66.93ms +[2025-07-07 06:26:59] [Rank 0] step:1941/10000 train_time:129919ms step_avg:66.93ms +[2025-07-07 06:26:59] [Rank 0] step:1941/10000 train_time:129919ms step_avg:66.93ms +[2025-07-07 06:27:01] [Rank 0] step:1961/10000 train_time:131267ms step_avg:66.94ms +[2025-07-07 06:27:01] [Rank 0] step:1961/10000 train_time:131267ms step_avg:66.94ms +[2025-07-07 06:27:02] [Rank 0] step:1981/10000 train_time:132615ms step_avg:66.94ms +[2025-07-07 06:27:02] [Rank 0] step:1981/10000 train_time:132615ms step_avg:66.94ms +[2025-07-07 06:27:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:27:03] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:27:04] [Rank 0] PRINT: step:2000/10000 train_loss:1.4227 val_loss:1.3220 train_time:134616ms step_avg:67.31ms +[2025-07-07 06:27:04] [Rank 0] PRINT: step:2000/10000 train_loss:1.4227 val_loss:1.3220 train_time:134616ms step_avg:67.31ms +[2025-07-07 06:27:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:27:04] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:27:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:27:04] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:27:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:27:04] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:32:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:32:25] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:32:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:32:25] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:32:25] [Rank 0] Total Loss: 5.8439 +[2025-07-07 06:32:25] [Rank 0] Total Loss: 5.8439 +[2025-07-07 06:32:25] [Rank 0] Total FTA: 0.0893 +[2025-07-07 06:32:25] [Rank 0] Total FTA: 0.0893 +[2025-07-07 06:32:25] [Rank 0] Group 0 Loss: 6.0540 +[2025-07-07 06:32:25] [Rank 0] Group 0 Loss: 6.0540 +[2025-07-07 06:32:25] [Rank 0] Group 1 Loss: 5.5233 +[2025-07-07 06:32:25] [Rank 0] Group 1 Loss: 5.5233 +[2025-07-07 06:32:25] [Rank 0] Group 2 Loss: 5.5270 +[2025-07-07 06:32:25] [Rank 0] Group 2 Loss: 5.5270 +[2025-07-07 06:32:25] [Rank 0] Group 3 Loss: 5.9093 +[2025-07-07 06:32:25] [Rank 0] Group 3 Loss: 5.9093 +[2025-07-07 06:32:25] [Rank 0] Group 4 Loss: 5.7901 +[2025-07-07 06:32:25] [Rank 0] Group 4 Loss: 5.7901 +[2025-07-07 06:32:25] [Rank 0] Group 5 Loss: 5.9697 +[2025-07-07 06:32:25] [Rank 0] Group 5 Loss: 5.9697 +[2025-07-07 06:32:25] [Rank 0] Group 6 Loss: 5.8121 +[2025-07-07 06:32:25] [Rank 0] Group 6 Loss: 5.8121 +[2025-07-07 06:32:25] [Rank 0] Group 7 Loss: 5.8894 +[2025-07-07 06:32:25] [Rank 0] Group 7 Loss: 5.8894 +[2025-07-07 06:32:25] [Rank 0] Group 8 Loss: 5.8404 +[2025-07-07 06:32:25] [Rank 0] Group 8 Loss: 5.8404 +[2025-07-07 06:32:25] [Rank 0] Group 9 Loss: 5.8817 +[2025-07-07 06:32:25] [Rank 0] Group 9 Loss: 5.8817 +[2025-07-07 06:32:25] [Rank 0] Group 10 Loss: 5.8684 +[2025-07-07 06:32:25] [Rank 0] Group 10 Loss: 5.8684 +[2025-07-07 06:32:25] [Rank 0] Group 11 Loss: 5.8482 +[2025-07-07 06:32:25] [Rank 0] Group 11 Loss: 5.8482 +[2025-07-07 06:32:25] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 06:32:25] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 06:32:25] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:32:25] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:32:25] [Rank 0] Group 2 FTA: 0.0885 +[2025-07-07 06:32:25] [Rank 0] Group 2 FTA: 0.0885 +[2025-07-07 06:32:25] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 06:32:25] [Rank 0] Group 3 FTA: 0.0391 +[2025-07-07 06:32:25] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 06:32:25] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 06:32:25] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-07 06:32:25] [Rank 0] Group 5 FTA: 0.0651 +[2025-07-07 06:32:25] [Rank 0] Group 6 FTA: 0.1120 +[2025-07-07 06:32:25] [Rank 0] Group 6 FTA: 0.1120 +[2025-07-07 06:32:25] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 06:32:25] [Rank 0] Group 7 FTA: 0.0964 +[2025-07-07 06:32:25] [Rank 0] Group 8 FTA: 0.0833 +[2025-07-07 06:32:25] [Rank 0] Group 8 FTA: 0.0833 +[2025-07-07 06:32:25] [Rank 0] Group 9 FTA: 0.1289 +[2025-07-07 06:32:25] [Rank 0] Group 9 FTA: 0.1289 +[2025-07-07 06:32:25] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 06:32:25] [Rank 0] Group 10 FTA: 0.0879 +[2025-07-07 06:32:25] [Rank 0] Group 11 FTA: 0.1006 +[2025-07-07 06:32:25] [Rank 0] Group 11 FTA: 0.1006 +[2025-07-07 06:32:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 06:32:26] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 06:32:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 06:32:26] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 06:32:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 06:32:26] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 06:32:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 06:32:27] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 06:32:27] [Rank 0] step:2001/10000 train_time:134627ms step_avg:67.28ms +[2025-07-07 06:32:27] [Rank 0] step:2001/10000 train_time:134627ms step_avg:67.28ms +[2025-07-07 06:32:28] [Rank 0] step:2021/10000 train_time:135369ms step_avg:66.98ms +[2025-07-07 06:32:28] [Rank 0] step:2021/10000 train_time:135369ms step_avg:66.98ms +[2025-07-07 06:32:29] [Rank 0] step:2041/10000 train_time:136706ms step_avg:66.98ms +[2025-07-07 06:32:29] [Rank 0] step:2041/10000 train_time:136706ms step_avg:66.98ms +[2025-07-07 06:32:31] [Rank 0] step:2061/10000 train_time:138044ms step_avg:66.98ms +[2025-07-07 06:32:31] [Rank 0] step:2061/10000 train_time:138044ms step_avg:66.98ms +[2025-07-07 06:32:32] [Rank 0] step:2081/10000 train_time:139382ms step_avg:66.98ms +[2025-07-07 06:32:32] [Rank 0] step:2081/10000 train_time:139382ms step_avg:66.98ms +[2025-07-07 06:32:33] [Rank 0] step:2101/10000 train_time:140722ms step_avg:66.98ms +[2025-07-07 06:32:33] [Rank 0] step:2101/10000 train_time:140722ms step_avg:66.98ms +[2025-07-07 06:32:35] [Rank 0] step:2121/10000 train_time:142063ms step_avg:66.98ms +[2025-07-07 06:32:35] [Rank 0] step:2121/10000 train_time:142063ms step_avg:66.98ms +[2025-07-07 06:32:36] [Rank 0] step:2141/10000 train_time:143405ms step_avg:66.98ms +[2025-07-07 06:32:36] [Rank 0] step:2141/10000 train_time:143405ms step_avg:66.98ms +[2025-07-07 06:32:38] [Rank 0] step:2161/10000 train_time:144748ms step_avg:66.98ms +[2025-07-07 06:32:38] [Rank 0] step:2161/10000 train_time:144748ms step_avg:66.98ms +[2025-07-07 06:32:39] [Rank 0] step:2181/10000 train_time:146131ms step_avg:67.00ms +[2025-07-07 06:32:39] [Rank 0] step:2181/10000 train_time:146131ms step_avg:67.00ms +[2025-07-07 06:32:40] [Rank 0] step:2201/10000 train_time:147475ms step_avg:67.00ms +[2025-07-07 06:32:40] [Rank 0] step:2201/10000 train_time:147475ms step_avg:67.00ms +[2025-07-07 06:32:42] [Rank 0] step:2221/10000 train_time:148818ms step_avg:67.01ms +[2025-07-07 06:32:42] [Rank 0] step:2221/10000 train_time:148818ms step_avg:67.01ms +[2025-07-07 06:32:43] [Rank 0] step:2241/10000 train_time:150171ms step_avg:67.01ms +[2025-07-07 06:32:43] [Rank 0] step:2241/10000 train_time:150171ms step_avg:67.01ms +[2025-07-07 06:32:44] [Rank 0] step:2261/10000 train_time:151539ms step_avg:67.02ms +[2025-07-07 06:32:44] [Rank 0] step:2261/10000 train_time:151539ms step_avg:67.02ms +[2025-07-07 06:32:46] [Rank 0] step:2281/10000 train_time:152907ms step_avg:67.04ms +[2025-07-07 06:32:46] [Rank 0] step:2281/10000 train_time:152907ms step_avg:67.04ms +[2025-07-07 06:32:47] [Rank 0] step:2301/10000 train_time:154276ms step_avg:67.05ms +[2025-07-07 06:32:47] [Rank 0] step:2301/10000 train_time:154276ms step_avg:67.05ms +[2025-07-07 06:32:48] [Rank 0] step:2321/10000 train_time:155645ms step_avg:67.06ms +[2025-07-07 06:32:48] [Rank 0] step:2321/10000 train_time:155645ms step_avg:67.06ms +[2025-07-07 06:32:50] [Rank 0] step:2341/10000 train_time:157015ms step_avg:67.07ms +[2025-07-07 06:32:50] [Rank 0] step:2341/10000 train_time:157015ms step_avg:67.07ms +[2025-07-07 06:32:51] [Rank 0] step:2361/10000 train_time:158434ms step_avg:67.10ms +[2025-07-07 06:32:51] [Rank 0] step:2361/10000 train_time:158434ms step_avg:67.10ms +[2025-07-07 06:32:53] [Rank 0] step:2381/10000 train_time:159804ms step_avg:67.12ms +[2025-07-07 06:32:53] [Rank 0] step:2381/10000 train_time:159804ms step_avg:67.12ms +[2025-07-07 06:32:54] [Rank 0] step:2401/10000 train_time:161174ms step_avg:67.13ms +[2025-07-07 06:32:54] [Rank 0] step:2401/10000 train_time:161174ms step_avg:67.13ms +[2025-07-07 06:32:55] [Rank 0] step:2421/10000 train_time:162544ms step_avg:67.14ms +[2025-07-07 06:32:55] [Rank 0] step:2421/10000 train_time:162544ms step_avg:67.14ms +[2025-07-07 06:32:57] [Rank 0] step:2441/10000 train_time:163913ms step_avg:67.15ms +[2025-07-07 06:32:57] [Rank 0] step:2441/10000 train_time:163913ms step_avg:67.15ms +[2025-07-07 06:32:58] [Rank 0] step:2461/10000 train_time:165283ms step_avg:67.16ms +[2025-07-07 06:32:58] [Rank 0] step:2461/10000 train_time:165283ms step_avg:67.16ms +[2025-07-07 06:32:59] [Rank 0] step:2481/10000 train_time:166654ms step_avg:67.17ms +[2025-07-07 06:32:59] [Rank 0] step:2481/10000 train_time:166654ms step_avg:67.17ms +[2025-07-07 06:33:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:33:01] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:33:02] [Rank 0] PRINT: step:2500/10000 train_loss:1.3084 val_loss:1.2916 train_time:168646ms step_avg:67.46ms +[2025-07-07 06:33:02] [Rank 0] PRINT: step:2500/10000 train_loss:1.3084 val_loss:1.2916 train_time:168646ms step_avg:67.46ms +[2025-07-07 06:33:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:33:02] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:33:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:33:02] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:33:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:33:02] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:38:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:38:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:38:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:38:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:38:23] [Rank 0] Total Loss: 5.8146 +[2025-07-07 06:38:23] [Rank 0] Total Loss: 5.8146 +[2025-07-07 06:38:23] [Rank 0] Total FTA: 0.0836 +[2025-07-07 06:38:23] [Rank 0] Total FTA: 0.0836 +[2025-07-07 06:38:23] [Rank 0] Group 0 Loss: 5.9922 +[2025-07-07 06:38:23] [Rank 0] Group 0 Loss: 5.9922 +[2025-07-07 06:38:23] [Rank 0] Group 1 Loss: 5.3712 +[2025-07-07 06:38:23] [Rank 0] Group 1 Loss: 5.3712 +[2025-07-07 06:38:23] [Rank 0] Group 2 Loss: 5.3902 +[2025-07-07 06:38:23] [Rank 0] Group 2 Loss: 5.3902 +[2025-07-07 06:38:23] [Rank 0] Group 3 Loss: 5.9363 +[2025-07-07 06:38:23] [Rank 0] Group 3 Loss: 5.9363 +[2025-07-07 06:38:23] [Rank 0] Group 4 Loss: 5.8561 +[2025-07-07 06:38:23] [Rank 0] Group 4 Loss: 5.8561 +[2025-07-07 06:38:23] [Rank 0] Group 5 Loss: 5.9686 +[2025-07-07 06:38:23] [Rank 0] Group 5 Loss: 5.9686 +[2025-07-07 06:38:23] [Rank 0] Group 6 Loss: 5.7371 +[2025-07-07 06:38:23] [Rank 0] Group 6 Loss: 5.7371 +[2025-07-07 06:38:23] [Rank 0] Group 7 Loss: 5.8742 +[2025-07-07 06:38:23] [Rank 0] Group 7 Loss: 5.8742 +[2025-07-07 06:38:23] [Rank 0] Group 8 Loss: 5.8236 +[2025-07-07 06:38:23] [Rank 0] Group 8 Loss: 5.8236 +[2025-07-07 06:38:23] [Rank 0] Group 9 Loss: 5.8352 +[2025-07-07 06:38:23] [Rank 0] Group 9 Loss: 5.8352 +[2025-07-07 06:38:23] [Rank 0] Group 10 Loss: 5.8721 +[2025-07-07 06:38:23] [Rank 0] Group 10 Loss: 5.8721 +[2025-07-07 06:38:23] [Rank 0] Group 11 Loss: 5.8569 +[2025-07-07 06:38:23] [Rank 0] Group 11 Loss: 5.8569 +[2025-07-07 06:38:23] [Rank 0] Group 0 FTA: 0.1730 +[2025-07-07 06:38:23] [Rank 0] Group 0 FTA: 0.1730 +[2025-07-07 06:38:23] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:38:23] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:38:23] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-07 06:38:23] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-07 06:38:23] [Rank 0] Group 3 FTA: 0.0286 +[2025-07-07 06:38:23] [Rank 0] Group 3 FTA: 0.0286 +[2025-07-07 06:38:23] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-07 06:38:23] [Rank 0] Group 4 FTA: 0.0234 +[2025-07-07 06:38:23] [Rank 0] Group 5 FTA: 0.0703 +[2025-07-07 06:38:23] [Rank 0] Group 5 FTA: 0.0703 +[2025-07-07 06:38:23] [Rank 0] Group 6 FTA: 0.0807 +[2025-07-07 06:38:23] [Rank 0] Group 6 FTA: 0.0807 +[2025-07-07 06:38:23] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-07 06:38:23] [Rank 0] Group 7 FTA: 0.1068 +[2025-07-07 06:38:23] [Rank 0] Group 8 FTA: 0.0755 +[2025-07-07 06:38:23] [Rank 0] Group 8 FTA: 0.0755 +[2025-07-07 06:38:23] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 06:38:23] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 06:38:23] [Rank 0] Group 10 FTA: 0.0977 +[2025-07-07 06:38:23] [Rank 0] Group 10 FTA: 0.0977 +[2025-07-07 06:38:23] [Rank 0] Group 11 FTA: 0.0830 +[2025-07-07 06:38:23] [Rank 0] Group 11 FTA: 0.0830 +[2025-07-07 06:38:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 06:38:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 06:38:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 06:38:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 06:38:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 06:38:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 06:38:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 06:38:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 06:38:24] [Rank 0] step:2501/10000 train_time:168656ms step_avg:67.44ms +[2025-07-07 06:38:24] [Rank 0] step:2501/10000 train_time:168656ms step_avg:67.44ms +[2025-07-07 06:38:26] [Rank 0] step:2521/10000 train_time:169591ms step_avg:67.27ms +[2025-07-07 06:38:26] [Rank 0] step:2521/10000 train_time:169591ms step_avg:67.27ms +[2025-07-07 06:38:27] [Rank 0] step:2541/10000 train_time:170802ms step_avg:67.22ms +[2025-07-07 06:38:27] [Rank 0] step:2541/10000 train_time:170802ms step_avg:67.22ms +[2025-07-07 06:38:29] [Rank 0] step:2561/10000 train_time:172164ms step_avg:67.23ms +[2025-07-07 06:38:29] [Rank 0] step:2561/10000 train_time:172164ms step_avg:67.23ms +[2025-07-07 06:38:30] [Rank 0] step:2581/10000 train_time:173528ms step_avg:67.23ms +[2025-07-07 06:38:30] [Rank 0] step:2581/10000 train_time:173528ms step_avg:67.23ms +[2025-07-07 06:38:31] [Rank 0] step:2601/10000 train_time:174892ms step_avg:67.24ms +[2025-07-07 06:38:31] [Rank 0] step:2601/10000 train_time:174892ms step_avg:67.24ms +[2025-07-07 06:38:33] [Rank 0] step:2621/10000 train_time:176255ms step_avg:67.25ms +[2025-07-07 06:38:33] [Rank 0] step:2621/10000 train_time:176255ms step_avg:67.25ms +[2025-07-07 06:38:34] [Rank 0] step:2641/10000 train_time:177621ms step_avg:67.26ms +[2025-07-07 06:38:34] [Rank 0] step:2641/10000 train_time:177621ms step_avg:67.26ms +[2025-07-07 06:38:35] [Rank 0] step:2661/10000 train_time:178986ms step_avg:67.26ms +[2025-07-07 06:38:35] [Rank 0] step:2661/10000 train_time:178986ms step_avg:67.26ms +[2025-07-07 06:38:37] [Rank 0] step:2681/10000 train_time:180354ms step_avg:67.27ms +[2025-07-07 06:38:37] [Rank 0] step:2681/10000 train_time:180354ms step_avg:67.27ms +[2025-07-07 06:38:38] [Rank 0] step:2701/10000 train_time:181720ms step_avg:67.28ms +[2025-07-07 06:38:38] [Rank 0] step:2701/10000 train_time:181720ms step_avg:67.28ms +[2025-07-07 06:38:39] [Rank 0] step:2721/10000 train_time:183131ms step_avg:67.30ms +[2025-07-07 06:38:39] [Rank 0] step:2721/10000 train_time:183131ms step_avg:67.30ms +[2025-07-07 06:38:41] [Rank 0] step:2741/10000 train_time:184499ms step_avg:67.31ms +[2025-07-07 06:38:41] [Rank 0] step:2741/10000 train_time:184499ms step_avg:67.31ms +[2025-07-07 06:38:42] [Rank 0] step:2761/10000 train_time:185868ms step_avg:67.32ms +[2025-07-07 06:38:42] [Rank 0] step:2761/10000 train_time:185868ms step_avg:67.32ms +[2025-07-07 06:38:44] [Rank 0] step:2781/10000 train_time:187236ms step_avg:67.33ms +[2025-07-07 06:38:44] [Rank 0] step:2781/10000 train_time:187236ms step_avg:67.33ms +[2025-07-07 06:38:45] [Rank 0] step:2801/10000 train_time:188605ms step_avg:67.33ms +[2025-07-07 06:38:45] [Rank 0] step:2801/10000 train_time:188605ms step_avg:67.33ms +[2025-07-07 06:38:46] [Rank 0] step:2821/10000 train_time:189973ms step_avg:67.34ms +[2025-07-07 06:38:46] [Rank 0] step:2821/10000 train_time:189973ms step_avg:67.34ms +[2025-07-07 06:38:48] [Rank 0] step:2841/10000 train_time:191344ms step_avg:67.35ms +[2025-07-07 06:38:48] [Rank 0] step:2841/10000 train_time:191344ms step_avg:67.35ms +[2025-07-07 06:38:49] [Rank 0] step:2861/10000 train_time:192713ms step_avg:67.36ms +[2025-07-07 06:38:49] [Rank 0] step:2861/10000 train_time:192713ms step_avg:67.36ms +[2025-07-07 06:38:50] [Rank 0] step:2881/10000 train_time:194744ms step_avg:67.60ms +[2025-07-07 06:38:50] [Rank 0] step:2881/10000 train_time:194744ms step_avg:67.60ms +[2025-07-07 06:38:52] [Rank 0] step:2901/10000 train_time:195484ms step_avg:67.39ms +[2025-07-07 06:38:52] [Rank 0] step:2901/10000 train_time:195484ms step_avg:67.39ms +[2025-07-07 06:38:53] [Rank 0] step:2921/10000 train_time:196857ms step_avg:67.39ms +[2025-07-07 06:38:53] [Rank 0] step:2921/10000 train_time:196857ms step_avg:67.39ms +[2025-07-07 06:38:55] [Rank 0] step:2941/10000 train_time:198228ms step_avg:67.40ms +[2025-07-07 06:38:55] [Rank 0] step:2941/10000 train_time:198228ms step_avg:67.40ms +[2025-07-07 06:38:56] [Rank 0] step:2961/10000 train_time:199611ms step_avg:67.41ms +[2025-07-07 06:38:56] [Rank 0] step:2961/10000 train_time:199611ms step_avg:67.41ms +[2025-07-07 06:38:57] [Rank 0] step:2981/10000 train_time:200982ms step_avg:67.42ms +[2025-07-07 06:38:57] [Rank 0] step:2981/10000 train_time:200982ms step_avg:67.42ms +[2025-07-07 06:38:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:38:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:39:00] [Rank 0] PRINT: step:3000/10000 train_loss:1.2818 val_loss:1.2742 train_time:202974ms step_avg:67.66ms +[2025-07-07 06:39:00] [Rank 0] PRINT: step:3000/10000 train_loss:1.2818 val_loss:1.2742 train_time:202974ms step_avg:67.66ms +[2025-07-07 06:39:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:39:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:39:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:39:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:39:00] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:39:00] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:44:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:44:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:44:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:44:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:44:23] [Rank 0] Total Loss: 5.8651 +[2025-07-07 06:44:23] [Rank 0] Total Loss: 5.8651 +[2025-07-07 06:44:23] [Rank 0] Total FTA: 0.1244 +[2025-07-07 06:44:23] [Rank 0] Total FTA: 0.1244 +[2025-07-07 06:44:23] [Rank 0] Group 0 Loss: 6.1392 +[2025-07-07 06:44:23] [Rank 0] Group 0 Loss: 6.1392 +[2025-07-07 06:44:23] [Rank 0] Group 1 Loss: 5.4896 +[2025-07-07 06:44:23] [Rank 0] Group 1 Loss: 5.4896 +[2025-07-07 06:44:23] [Rank 0] Group 2 Loss: 5.4839 +[2025-07-07 06:44:23] [Rank 0] Group 2 Loss: 5.4839 +[2025-07-07 06:44:23] [Rank 0] Group 3 Loss: 5.9558 +[2025-07-07 06:44:23] [Rank 0] Group 3 Loss: 5.9558 +[2025-07-07 06:44:23] [Rank 0] Group 4 Loss: 5.8157 +[2025-07-07 06:44:23] [Rank 0] Group 4 Loss: 5.8157 +[2025-07-07 06:44:23] [Rank 0] Group 5 Loss: 6.0079 +[2025-07-07 06:44:23] [Rank 0] Group 5 Loss: 6.0079 +[2025-07-07 06:44:23] [Rank 0] Group 6 Loss: 5.8388 +[2025-07-07 06:44:23] [Rank 0] Group 6 Loss: 5.8388 +[2025-07-07 06:44:23] [Rank 0] Group 7 Loss: 5.9411 +[2025-07-07 06:44:23] [Rank 0] Group 7 Loss: 5.9411 +[2025-07-07 06:44:23] [Rank 0] Group 8 Loss: 5.8520 +[2025-07-07 06:44:23] [Rank 0] Group 8 Loss: 5.8520 +[2025-07-07 06:44:23] [Rank 0] Group 9 Loss: 5.8720 +[2025-07-07 06:44:23] [Rank 0] Group 9 Loss: 5.8720 +[2025-07-07 06:44:23] [Rank 0] Group 10 Loss: 5.8902 +[2025-07-07 06:44:23] [Rank 0] Group 10 Loss: 5.8902 +[2025-07-07 06:44:23] [Rank 0] Group 11 Loss: 5.8458 +[2025-07-07 06:44:23] [Rank 0] Group 11 Loss: 5.8458 +[2025-07-07 06:44:23] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-07 06:44:23] [Rank 0] Group 0 FTA: 0.1638 +[2025-07-07 06:44:23] [Rank 0] Group 1 FTA: 0.1667 +[2025-07-07 06:44:23] [Rank 0] Group 1 FTA: 0.1667 +[2025-07-07 06:44:23] [Rank 0] Group 2 FTA: 0.2448 +[2025-07-07 06:44:23] [Rank 0] Group 2 FTA: 0.2448 +[2025-07-07 06:44:23] [Rank 0] Group 3 FTA: 0.0859 +[2025-07-07 06:44:23] [Rank 0] Group 3 FTA: 0.0859 +[2025-07-07 06:44:23] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-07 06:44:23] [Rank 0] Group 4 FTA: 0.0443 +[2025-07-07 06:44:23] [Rank 0] Group 5 FTA: 0.1484 +[2025-07-07 06:44:23] [Rank 0] Group 5 FTA: 0.1484 +[2025-07-07 06:44:23] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-07 06:44:23] [Rank 0] Group 6 FTA: 0.0885 +[2025-07-07 06:44:23] [Rank 0] Group 7 FTA: 0.1250 +[2025-07-07 06:44:23] [Rank 0] Group 7 FTA: 0.1250 +[2025-07-07 06:44:23] [Rank 0] Group 8 FTA: 0.1198 +[2025-07-07 06:44:23] [Rank 0] Group 8 FTA: 0.1198 +[2025-07-07 06:44:23] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-07 06:44:23] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-07 06:44:23] [Rank 0] Group 10 FTA: 0.1094 +[2025-07-07 06:44:23] [Rank 0] Group 10 FTA: 0.1094 +[2025-07-07 06:44:23] [Rank 0] Group 11 FTA: 0.1035 +[2025-07-07 06:44:23] [Rank 0] Group 11 FTA: 0.1035 +[2025-07-07 06:44:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 06:44:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 06:44:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 06:44:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 06:44:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 06:44:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 06:44:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 06:44:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 06:44:25] [Rank 0] step:3001/10000 train_time:202984ms step_avg:67.64ms +[2025-07-07 06:44:25] [Rank 0] step:3001/10000 train_time:202984ms step_avg:67.64ms +[2025-07-07 06:44:26] [Rank 0] step:3021/10000 train_time:203758ms step_avg:67.45ms +[2025-07-07 06:44:26] [Rank 0] step:3021/10000 train_time:203758ms step_avg:67.45ms +[2025-07-07 06:44:27] [Rank 0] step:3041/10000 train_time:205117ms step_avg:67.45ms +[2025-07-07 06:44:27] [Rank 0] step:3041/10000 train_time:205117ms step_avg:67.45ms +[2025-07-07 06:44:29] [Rank 0] step:3061/10000 train_time:206479ms step_avg:67.45ms +[2025-07-07 06:44:29] [Rank 0] step:3061/10000 train_time:206479ms step_avg:67.45ms +[2025-07-07 06:44:30] [Rank 0] step:3081/10000 train_time:207890ms step_avg:67.47ms +[2025-07-07 06:44:30] [Rank 0] step:3081/10000 train_time:207890ms step_avg:67.47ms +[2025-07-07 06:44:32] [Rank 0] step:3101/10000 train_time:209252ms step_avg:67.48ms +[2025-07-07 06:44:32] [Rank 0] step:3101/10000 train_time:209252ms step_avg:67.48ms +[2025-07-07 06:44:33] [Rank 0] step:3121/10000 train_time:210624ms step_avg:67.49ms +[2025-07-07 06:44:33] [Rank 0] step:3121/10000 train_time:210624ms step_avg:67.49ms +[2025-07-07 06:44:34] [Rank 0] step:3141/10000 train_time:211989ms step_avg:67.49ms +[2025-07-07 06:44:34] [Rank 0] step:3141/10000 train_time:211989ms step_avg:67.49ms +[2025-07-07 06:44:36] [Rank 0] step:3161/10000 train_time:213355ms step_avg:67.50ms +[2025-07-07 06:44:36] [Rank 0] step:3161/10000 train_time:213355ms step_avg:67.50ms +[2025-07-07 06:44:37] [Rank 0] step:3181/10000 train_time:214721ms step_avg:67.50ms +[2025-07-07 06:44:37] [Rank 0] step:3181/10000 train_time:214721ms step_avg:67.50ms +[2025-07-07 06:44:38] [Rank 0] step:3201/10000 train_time:216088ms step_avg:67.51ms +[2025-07-07 06:44:38] [Rank 0] step:3201/10000 train_time:216088ms step_avg:67.51ms +[2025-07-07 06:44:40] [Rank 0] step:3221/10000 train_time:217454ms step_avg:67.51ms +[2025-07-07 06:44:40] [Rank 0] step:3221/10000 train_time:217454ms step_avg:67.51ms +[2025-07-07 06:44:41] [Rank 0] step:3241/10000 train_time:218820ms step_avg:67.52ms +[2025-07-07 06:44:41] [Rank 0] step:3241/10000 train_time:218820ms step_avg:67.52ms +[2025-07-07 06:44:42] [Rank 0] step:3261/10000 train_time:220212ms step_avg:67.53ms +[2025-07-07 06:44:42] [Rank 0] step:3261/10000 train_time:220212ms step_avg:67.53ms +[2025-07-07 06:44:44] [Rank 0] step:3281/10000 train_time:221580ms step_avg:67.53ms +[2025-07-07 06:44:44] [Rank 0] step:3281/10000 train_time:221580ms step_avg:67.53ms +[2025-07-07 06:44:45] [Rank 0] step:3301/10000 train_time:222950ms step_avg:67.54ms +[2025-07-07 06:44:45] [Rank 0] step:3301/10000 train_time:222950ms step_avg:67.54ms +[2025-07-07 06:44:47] [Rank 0] step:3321/10000 train_time:224319ms step_avg:67.55ms +[2025-07-07 06:44:47] [Rank 0] step:3321/10000 train_time:224319ms step_avg:67.55ms +[2025-07-07 06:44:48] [Rank 0] step:3341/10000 train_time:225689ms step_avg:67.55ms +[2025-07-07 06:44:48] [Rank 0] step:3341/10000 train_time:225689ms step_avg:67.55ms +[2025-07-07 06:44:49] [Rank 0] step:3361/10000 train_time:227061ms step_avg:67.56ms +[2025-07-07 06:44:49] [Rank 0] step:3361/10000 train_time:227061ms step_avg:67.56ms +[2025-07-07 06:44:51] [Rank 0] step:3381/10000 train_time:228431ms step_avg:67.56ms +[2025-07-07 06:44:51] [Rank 0] step:3381/10000 train_time:228431ms step_avg:67.56ms +[2025-07-07 06:44:52] [Rank 0] step:3401/10000 train_time:229801ms step_avg:67.57ms +[2025-07-07 06:44:52] [Rank 0] step:3401/10000 train_time:229801ms step_avg:67.57ms +[2025-07-07 06:44:53] [Rank 0] step:3421/10000 train_time:231171ms step_avg:67.57ms +[2025-07-07 06:44:53] [Rank 0] step:3421/10000 train_time:231171ms step_avg:67.57ms +[2025-07-07 06:44:55] [Rank 0] step:3441/10000 train_time:232567ms step_avg:67.59ms +[2025-07-07 06:44:55] [Rank 0] step:3441/10000 train_time:232567ms step_avg:67.59ms +[2025-07-07 06:44:56] [Rank 0] step:3461/10000 train_time:233938ms step_avg:67.59ms +[2025-07-07 06:44:56] [Rank 0] step:3461/10000 train_time:233938ms step_avg:67.59ms +[2025-07-07 06:44:58] [Rank 0] step:3481/10000 train_time:235310ms step_avg:67.60ms +[2025-07-07 06:44:58] [Rank 0] step:3481/10000 train_time:235310ms step_avg:67.60ms +[2025-07-07 06:44:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:44:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:45:00] [Rank 0] PRINT: step:3500/10000 train_loss:1.2622 val_loss:1.2612 train_time:237304ms step_avg:67.80ms +[2025-07-07 06:45:00] [Rank 0] PRINT: step:3500/10000 train_loss:1.2622 val_loss:1.2612 train_time:237304ms step_avg:67.80ms +[2025-07-07 06:45:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:45:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:45:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:45:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:45:00] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:45:00] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:50:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:50:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:50:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:50:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:50:21] [Rank 0] Total Loss: 5.7368 +[2025-07-07 06:50:21] [Rank 0] Total Loss: 5.7368 +[2025-07-07 06:50:21] [Rank 0] Total FTA: 0.1209 +[2025-07-07 06:50:21] [Rank 0] Total FTA: 0.1209 +[2025-07-07 06:50:21] [Rank 0] Group 0 Loss: 5.7325 +[2025-07-07 06:50:21] [Rank 0] Group 0 Loss: 5.7325 +[2025-07-07 06:50:21] [Rank 0] Group 1 Loss: 5.4745 +[2025-07-07 06:50:21] [Rank 0] Group 1 Loss: 5.4745 +[2025-07-07 06:50:21] [Rank 0] Group 2 Loss: 5.3665 +[2025-07-07 06:50:21] [Rank 0] Group 2 Loss: 5.3665 +[2025-07-07 06:50:21] [Rank 0] Group 3 Loss: 5.9347 +[2025-07-07 06:50:21] [Rank 0] Group 3 Loss: 5.9347 +[2025-07-07 06:50:21] [Rank 0] Group 4 Loss: 5.8643 +[2025-07-07 06:50:21] [Rank 0] Group 4 Loss: 5.8643 +[2025-07-07 06:50:22] [Rank 0] Group 5 Loss: 5.8601 +[2025-07-07 06:50:22] [Rank 0] Group 5 Loss: 5.8601 +[2025-07-07 06:50:22] [Rank 0] Group 6 Loss: 5.6881 +[2025-07-07 06:50:22] [Rank 0] Group 6 Loss: 5.6881 +[2025-07-07 06:50:22] [Rank 0] Group 7 Loss: 5.7736 +[2025-07-07 06:50:22] [Rank 0] Group 7 Loss: 5.7736 +[2025-07-07 06:50:22] [Rank 0] Group 8 Loss: 5.7487 +[2025-07-07 06:50:22] [Rank 0] Group 8 Loss: 5.7487 +[2025-07-07 06:50:22] [Rank 0] Group 9 Loss: 5.7416 +[2025-07-07 06:50:22] [Rank 0] Group 9 Loss: 5.7416 +[2025-07-07 06:50:22] [Rank 0] Group 10 Loss: 5.7783 +[2025-07-07 06:50:22] [Rank 0] Group 10 Loss: 5.7783 +[2025-07-07 06:50:22] [Rank 0] Group 11 Loss: 5.7871 +[2025-07-07 06:50:22] [Rank 0] Group 11 Loss: 5.7871 +[2025-07-07 06:50:22] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 06:50:22] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 06:50:22] [Rank 0] Group 1 FTA: 0.1745 +[2025-07-07 06:50:22] [Rank 0] Group 1 FTA: 0.1745 +[2025-07-07 06:50:22] [Rank 0] Group 2 FTA: 0.1432 +[2025-07-07 06:50:22] [Rank 0] Group 2 FTA: 0.1432 +[2025-07-07 06:50:22] [Rank 0] Group 3 FTA: 0.1458 +[2025-07-07 06:50:22] [Rank 0] Group 3 FTA: 0.1458 +[2025-07-07 06:50:22] [Rank 0] Group 4 FTA: 0.0312 +[2025-07-07 06:50:22] [Rank 0] Group 4 FTA: 0.0312 +[2025-07-07 06:50:22] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-07 06:50:22] [Rank 0] Group 5 FTA: 0.0911 +[2025-07-07 06:50:22] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-07 06:50:22] [Rank 0] Group 6 FTA: 0.0755 +[2025-07-07 06:50:22] [Rank 0] Group 7 FTA: 0.0859 +[2025-07-07 06:50:22] [Rank 0] Group 7 FTA: 0.0859 +[2025-07-07 06:50:22] [Rank 0] Group 8 FTA: 0.1536 +[2025-07-07 06:50:22] [Rank 0] Group 8 FTA: 0.1536 +[2025-07-07 06:50:22] [Rank 0] Group 9 FTA: 0.1172 +[2025-07-07 06:50:22] [Rank 0] Group 9 FTA: 0.1172 +[2025-07-07 06:50:22] [Rank 0] Group 10 FTA: 0.1172 +[2025-07-07 06:50:22] [Rank 0] Group 10 FTA: 0.1172 +[2025-07-07 06:50:22] [Rank 0] Group 11 FTA: 0.1152 +[2025-07-07 06:50:22] [Rank 0] Group 11 FTA: 0.1152 +[2025-07-07 06:50:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 06:50:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 06:50:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 06:50:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 06:50:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 06:50:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 06:50:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 06:50:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 06:50:23] [Rank 0] step:3501/10000 train_time:237315ms step_avg:67.78ms +[2025-07-07 06:50:23] [Rank 0] step:3501/10000 train_time:237315ms step_avg:67.78ms +[2025-07-07 06:50:24] [Rank 0] step:3521/10000 train_time:238090ms step_avg:67.62ms +[2025-07-07 06:50:24] [Rank 0] step:3521/10000 train_time:238090ms step_avg:67.62ms +[2025-07-07 06:50:26] [Rank 0] step:3541/10000 train_time:239451ms step_avg:67.62ms +[2025-07-07 06:50:26] [Rank 0] step:3541/10000 train_time:239451ms step_avg:67.62ms +[2025-07-07 06:50:27] [Rank 0] step:3561/10000 train_time:240812ms step_avg:67.62ms +[2025-07-07 06:50:27] [Rank 0] step:3561/10000 train_time:240812ms step_avg:67.62ms +[2025-07-07 06:50:28] [Rank 0] step:3581/10000 train_time:242175ms step_avg:67.63ms +[2025-07-07 06:50:28] [Rank 0] step:3581/10000 train_time:242175ms step_avg:67.63ms +[2025-07-07 06:50:30] [Rank 0] step:3601/10000 train_time:243790ms step_avg:67.70ms +[2025-07-07 06:50:30] [Rank 0] step:3601/10000 train_time:243790ms step_avg:67.70ms +[2025-07-07 06:50:31] [Rank 0] step:3621/10000 train_time:244933ms step_avg:67.64ms +[2025-07-07 06:50:31] [Rank 0] step:3621/10000 train_time:244933ms step_avg:67.64ms +[2025-07-07 06:50:33] [Rank 0] step:3641/10000 train_time:246298ms step_avg:67.65ms +[2025-07-07 06:50:33] [Rank 0] step:3641/10000 train_time:246298ms step_avg:67.65ms +[2025-07-07 06:50:34] [Rank 0] step:3661/10000 train_time:247662ms step_avg:67.65ms +[2025-07-07 06:50:34] [Rank 0] step:3661/10000 train_time:247662ms step_avg:67.65ms +[2025-07-07 06:50:35] [Rank 0] step:3681/10000 train_time:249029ms step_avg:67.65ms +[2025-07-07 06:50:35] [Rank 0] step:3681/10000 train_time:249029ms step_avg:67.65ms +[2025-07-07 06:50:37] [Rank 0] step:3701/10000 train_time:250396ms step_avg:67.66ms +[2025-07-07 06:50:37] [Rank 0] step:3701/10000 train_time:250396ms step_avg:67.66ms +[2025-07-07 06:50:38] [Rank 0] step:3721/10000 train_time:251763ms step_avg:67.66ms +[2025-07-07 06:50:38] [Rank 0] step:3721/10000 train_time:251763ms step_avg:67.66ms +[2025-07-07 06:50:39] [Rank 0] step:3741/10000 train_time:253129ms step_avg:67.66ms +[2025-07-07 06:50:39] [Rank 0] step:3741/10000 train_time:253129ms step_avg:67.66ms +[2025-07-07 06:50:41] [Rank 0] step:3761/10000 train_time:254497ms step_avg:67.67ms +[2025-07-07 06:50:41] [Rank 0] step:3761/10000 train_time:254497ms step_avg:67.67ms +[2025-07-07 06:50:42] [Rank 0] step:3781/10000 train_time:256114ms step_avg:67.74ms +[2025-07-07 06:50:42] [Rank 0] step:3781/10000 train_time:256114ms step_avg:67.74ms +[2025-07-07 06:50:44] [Rank 0] step:3801/10000 train_time:257286ms step_avg:67.69ms +[2025-07-07 06:50:44] [Rank 0] step:3801/10000 train_time:257286ms step_avg:67.69ms +[2025-07-07 06:50:45] [Rank 0] step:3821/10000 train_time:258656ms step_avg:67.69ms +[2025-07-07 06:50:45] [Rank 0] step:3821/10000 train_time:258656ms step_avg:67.69ms +[2025-07-07 06:50:46] [Rank 0] step:3841/10000 train_time:260027ms step_avg:67.70ms +[2025-07-07 06:50:46] [Rank 0] step:3841/10000 train_time:260027ms step_avg:67.70ms +[2025-07-07 06:50:48] [Rank 0] step:3861/10000 train_time:261429ms step_avg:67.71ms +[2025-07-07 06:50:48] [Rank 0] step:3861/10000 train_time:261429ms step_avg:67.71ms +[2025-07-07 06:50:49] [Rank 0] step:3881/10000 train_time:262798ms step_avg:67.71ms +[2025-07-07 06:50:49] [Rank 0] step:3881/10000 train_time:262798ms step_avg:67.71ms +[2025-07-07 06:50:50] [Rank 0] step:3901/10000 train_time:264170ms step_avg:67.72ms +[2025-07-07 06:50:50] [Rank 0] step:3901/10000 train_time:264170ms step_avg:67.72ms +[2025-07-07 06:50:52] [Rank 0] step:3921/10000 train_time:265539ms step_avg:67.72ms +[2025-07-07 06:50:52] [Rank 0] step:3921/10000 train_time:265539ms step_avg:67.72ms +[2025-07-07 06:50:53] [Rank 0] step:3941/10000 train_time:266911ms step_avg:67.73ms +[2025-07-07 06:50:53] [Rank 0] step:3941/10000 train_time:266911ms step_avg:67.73ms +[2025-07-07 06:50:55] [Rank 0] step:3961/10000 train_time:268531ms step_avg:67.79ms +[2025-07-07 06:50:55] [Rank 0] step:3961/10000 train_time:268531ms step_avg:67.79ms +[2025-07-07 06:50:56] [Rank 0] step:3981/10000 train_time:269655ms step_avg:67.74ms +[2025-07-07 06:50:56] [Rank 0] step:3981/10000 train_time:269655ms step_avg:67.74ms +[2025-07-07 06:50:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:50:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:50:58] [Rank 0] PRINT: step:4000/10000 train_loss:1.2407 val_loss:1.2506 train_time:271652ms step_avg:67.91ms +[2025-07-07 06:50:58] [Rank 0] PRINT: step:4000/10000 train_loss:1.2407 val_loss:1.2506 train_time:271652ms step_avg:67.91ms +[2025-07-07 06:50:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:50:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:50:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:50:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:50:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:50:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:56:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:56:20] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:56:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:56:20] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:56:21] [Rank 0] Total Loss: 5.7412 +[2025-07-07 06:56:21] [Rank 0] Total Loss: 5.7412 +[2025-07-07 06:56:21] [Rank 0] Total FTA: 0.1434 +[2025-07-07 06:56:21] [Rank 0] Total FTA: 0.1434 +[2025-07-07 06:56:21] [Rank 0] Group 0 Loss: 5.8655 +[2025-07-07 06:56:21] [Rank 0] Group 0 Loss: 5.8655 +[2025-07-07 06:56:21] [Rank 0] Group 1 Loss: 5.5069 +[2025-07-07 06:56:21] [Rank 0] Group 1 Loss: 5.5069 +[2025-07-07 06:56:21] [Rank 0] Group 2 Loss: 5.3978 +[2025-07-07 06:56:21] [Rank 0] Group 2 Loss: 5.3978 +[2025-07-07 06:56:21] [Rank 0] Group 3 Loss: 5.9146 +[2025-07-07 06:56:21] [Rank 0] Group 3 Loss: 5.9146 +[2025-07-07 06:56:21] [Rank 0] Group 4 Loss: 5.6923 +[2025-07-07 06:56:21] [Rank 0] Group 4 Loss: 5.6923 +[2025-07-07 06:56:21] [Rank 0] Group 5 Loss: 5.7884 +[2025-07-07 06:56:21] [Rank 0] Group 5 Loss: 5.7884 +[2025-07-07 06:56:21] [Rank 0] Group 6 Loss: 5.6952 +[2025-07-07 06:56:21] [Rank 0] Group 6 Loss: 5.6952 +[2025-07-07 06:56:21] [Rank 0] Group 7 Loss: 5.7768 +[2025-07-07 06:56:21] [Rank 0] Group 7 Loss: 5.7768 +[2025-07-07 06:56:21] [Rank 0] Group 8 Loss: 5.7619 +[2025-07-07 06:56:21] [Rank 0] Group 8 Loss: 5.7619 +[2025-07-07 06:56:21] [Rank 0] Group 9 Loss: 5.7529 +[2025-07-07 06:56:21] [Rank 0] Group 9 Loss: 5.7529 +[2025-07-07 06:56:21] [Rank 0] Group 10 Loss: 5.7558 +[2025-07-07 06:56:21] [Rank 0] Group 10 Loss: 5.7558 +[2025-07-07 06:56:21] [Rank 0] Group 11 Loss: 5.7858 +[2025-07-07 06:56:21] [Rank 0] Group 11 Loss: 5.7858 +[2025-07-07 06:56:21] [Rank 0] Group 0 FTA: 0.1899 +[2025-07-07 06:56:21] [Rank 0] Group 0 FTA: 0.1899 +[2025-07-07 06:56:21] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-07 06:56:21] [Rank 0] Group 1 FTA: 0.1484 +[2025-07-07 06:56:21] [Rank 0] Group 2 FTA: 0.1693 +[2025-07-07 06:56:21] [Rank 0] Group 2 FTA: 0.1693 +[2025-07-07 06:56:21] [Rank 0] Group 3 FTA: 0.1250 +[2025-07-07 06:56:21] [Rank 0] Group 3 FTA: 0.1250 +[2025-07-07 06:56:21] [Rank 0] Group 4 FTA: 0.1354 +[2025-07-07 06:56:21] [Rank 0] Group 4 FTA: 0.1354 +[2025-07-07 06:56:21] [Rank 0] Group 5 FTA: 0.1120 +[2025-07-07 06:56:21] [Rank 0] Group 5 FTA: 0.1120 +[2025-07-07 06:56:21] [Rank 0] Group 6 FTA: 0.1536 +[2025-07-07 06:56:21] [Rank 0] Group 6 FTA: 0.1536 +[2025-07-07 06:56:21] [Rank 0] Group 7 FTA: 0.1094 +[2025-07-07 06:56:21] [Rank 0] Group 7 FTA: 0.1094 +[2025-07-07 06:56:21] [Rank 0] Group 8 FTA: 0.1589 +[2025-07-07 06:56:21] [Rank 0] Group 8 FTA: 0.1589 +[2025-07-07 06:56:21] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 06:56:21] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 06:56:21] [Rank 0] Group 10 FTA: 0.1406 +[2025-07-07 06:56:21] [Rank 0] Group 10 FTA: 0.1406 +[2025-07-07 06:56:21] [Rank 0] Group 11 FTA: 0.1338 +[2025-07-07 06:56:21] [Rank 0] Group 11 FTA: 0.1338 +[2025-07-07 06:56:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 06:56:21] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 06:56:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 06:56:21] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 06:56:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 06:56:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 06:56:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 06:56:22] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 06:56:22] [Rank 0] step:4001/10000 train_time:271662ms step_avg:67.90ms +[2025-07-07 06:56:22] [Rank 0] step:4001/10000 train_time:271662ms step_avg:67.90ms +[2025-07-07 06:56:23] [Rank 0] step:4021/10000 train_time:272415ms step_avg:67.75ms +[2025-07-07 06:56:23] [Rank 0] step:4021/10000 train_time:272415ms step_avg:67.75ms +[2025-07-07 06:56:25] [Rank 0] step:4041/10000 train_time:273776ms step_avg:67.75ms +[2025-07-07 06:56:25] [Rank 0] step:4041/10000 train_time:273776ms step_avg:67.75ms +[2025-07-07 06:56:26] [Rank 0] step:4061/10000 train_time:275138ms step_avg:67.75ms +[2025-07-07 06:56:26] [Rank 0] step:4061/10000 train_time:275138ms step_avg:67.75ms +[2025-07-07 06:56:28] [Rank 0] step:4081/10000 train_time:276501ms step_avg:67.75ms +[2025-07-07 06:56:28] [Rank 0] step:4081/10000 train_time:276501ms step_avg:67.75ms +[2025-07-07 06:56:29] [Rank 0] step:4101/10000 train_time:277863ms step_avg:67.75ms +[2025-07-07 06:56:29] [Rank 0] step:4101/10000 train_time:277863ms step_avg:67.75ms +[2025-07-07 06:56:30] [Rank 0] step:4121/10000 train_time:279227ms step_avg:67.76ms +[2025-07-07 06:56:30] [Rank 0] step:4121/10000 train_time:279227ms step_avg:67.76ms +[2025-07-07 06:56:32] [Rank 0] step:4141/10000 train_time:280595ms step_avg:67.76ms +[2025-07-07 06:56:32] [Rank 0] step:4141/10000 train_time:280595ms step_avg:67.76ms +[2025-07-07 06:56:33] [Rank 0] step:4161/10000 train_time:282003ms step_avg:67.77ms +[2025-07-07 06:56:33] [Rank 0] step:4161/10000 train_time:282003ms step_avg:67.77ms +[2025-07-07 06:56:34] [Rank 0] step:4181/10000 train_time:283370ms step_avg:67.78ms +[2025-07-07 06:56:34] [Rank 0] step:4181/10000 train_time:283370ms step_avg:67.78ms +[2025-07-07 06:56:36] [Rank 0] step:4201/10000 train_time:284737ms step_avg:67.78ms +[2025-07-07 06:56:36] [Rank 0] step:4201/10000 train_time:284737ms step_avg:67.78ms +[2025-07-07 06:56:37] [Rank 0] step:4221/10000 train_time:286102ms step_avg:67.78ms +[2025-07-07 06:56:37] [Rank 0] step:4221/10000 train_time:286102ms step_avg:67.78ms +[2025-07-07 06:56:39] [Rank 0] step:4241/10000 train_time:287469ms step_avg:67.78ms +[2025-07-07 06:56:39] [Rank 0] step:4241/10000 train_time:287469ms step_avg:67.78ms +[2025-07-07 06:56:40] [Rank 0] step:4261/10000 train_time:288836ms step_avg:67.79ms +[2025-07-07 06:56:40] [Rank 0] step:4261/10000 train_time:288836ms step_avg:67.79ms +[2025-07-07 06:56:41] [Rank 0] step:4281/10000 train_time:290203ms step_avg:67.79ms +[2025-07-07 06:56:41] [Rank 0] step:4281/10000 train_time:290203ms step_avg:67.79ms +[2025-07-07 06:56:43] [Rank 0] step:4301/10000 train_time:291572ms step_avg:67.79ms +[2025-07-07 06:56:43] [Rank 0] step:4301/10000 train_time:291572ms step_avg:67.79ms +[2025-07-07 06:56:44] [Rank 0] step:4321/10000 train_time:292941ms step_avg:67.79ms +[2025-07-07 06:56:44] [Rank 0] step:4321/10000 train_time:292941ms step_avg:67.79ms +[2025-07-07 06:56:45] [Rank 0] step:4341/10000 train_time:294355ms step_avg:67.81ms +[2025-07-07 06:56:45] [Rank 0] step:4341/10000 train_time:294355ms step_avg:67.81ms +[2025-07-07 06:56:47] [Rank 0] step:4361/10000 train_time:295724ms step_avg:67.81ms +[2025-07-07 06:56:47] [Rank 0] step:4361/10000 train_time:295724ms step_avg:67.81ms +[2025-07-07 06:56:48] [Rank 0] step:4381/10000 train_time:297093ms step_avg:67.81ms +[2025-07-07 06:56:48] [Rank 0] step:4381/10000 train_time:297093ms step_avg:67.81ms +[2025-07-07 06:56:50] [Rank 0] step:4401/10000 train_time:298462ms step_avg:67.82ms +[2025-07-07 06:56:50] [Rank 0] step:4401/10000 train_time:298462ms step_avg:67.82ms +[2025-07-07 06:56:51] [Rank 0] step:4421/10000 train_time:299831ms step_avg:67.82ms +[2025-07-07 06:56:51] [Rank 0] step:4421/10000 train_time:299831ms step_avg:67.82ms +[2025-07-07 06:56:52] [Rank 0] step:4441/10000 train_time:301200ms step_avg:67.82ms +[2025-07-07 06:56:52] [Rank 0] step:4441/10000 train_time:301200ms step_avg:67.82ms +[2025-07-07 06:56:54] [Rank 0] step:4461/10000 train_time:302569ms step_avg:67.83ms +[2025-07-07 06:56:54] [Rank 0] step:4461/10000 train_time:302569ms step_avg:67.83ms +[2025-07-07 06:56:55] [Rank 0] step:4481/10000 train_time:303939ms step_avg:67.83ms +[2025-07-07 06:56:55] [Rank 0] step:4481/10000 train_time:303939ms step_avg:67.83ms +[2025-07-07 06:56:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:56:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:56:57] [Rank 0] PRINT: step:4500/10000 train_loss:1.2130 val_loss:1.2458 train_time:305932ms step_avg:67.98ms +[2025-07-07 06:56:57] [Rank 0] PRINT: step:4500/10000 train_loss:1.2130 val_loss:1.2458 train_time:305932ms step_avg:67.98ms +[2025-07-07 06:56:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:56:57] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:56:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:56:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:56:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:56:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:02:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:02:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:02:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:02:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:02:19] [Rank 0] Total Loss: 5.6616 +[2025-07-07 07:02:19] [Rank 0] Total Loss: 5.6616 +[2025-07-07 07:02:19] [Rank 0] Total FTA: 0.1065 +[2025-07-07 07:02:19] [Rank 0] Total FTA: 0.1065 +[2025-07-07 07:02:19] [Rank 0] Group 0 Loss: 5.8485 +[2025-07-07 07:02:19] [Rank 0] Group 0 Loss: 5.8485 +[2025-07-07 07:02:19] [Rank 0] Group 1 Loss: 5.5749 +[2025-07-07 07:02:19] [Rank 0] Group 1 Loss: 5.5749 +[2025-07-07 07:02:19] [Rank 0] Group 2 Loss: 5.2692 +[2025-07-07 07:02:19] [Rank 0] Group 2 Loss: 5.2692 +[2025-07-07 07:02:19] [Rank 0] Group 3 Loss: 5.7553 +[2025-07-07 07:02:19] [Rank 0] Group 3 Loss: 5.7553 +[2025-07-07 07:02:19] [Rank 0] Group 4 Loss: 5.6678 +[2025-07-07 07:02:19] [Rank 0] Group 4 Loss: 5.6678 +[2025-07-07 07:02:19] [Rank 0] Group 5 Loss: 5.7441 +[2025-07-07 07:02:19] [Rank 0] Group 5 Loss: 5.7441 +[2025-07-07 07:02:19] [Rank 0] Group 6 Loss: 5.5669 +[2025-07-07 07:02:19] [Rank 0] Group 6 Loss: 5.5669 +[2025-07-07 07:02:19] [Rank 0] Group 7 Loss: 5.6621 +[2025-07-07 07:02:19] [Rank 0] Group 7 Loss: 5.6621 +[2025-07-07 07:02:19] [Rank 0] Group 8 Loss: 5.6392 +[2025-07-07 07:02:19] [Rank 0] Group 8 Loss: 5.6392 +[2025-07-07 07:02:19] [Rank 0] Group 9 Loss: 5.6780 +[2025-07-07 07:02:19] [Rank 0] Group 9 Loss: 5.6780 +[2025-07-07 07:02:19] [Rank 0] Group 10 Loss: 5.6545 +[2025-07-07 07:02:19] [Rank 0] Group 10 Loss: 5.6545 +[2025-07-07 07:02:19] [Rank 0] Group 11 Loss: 5.6756 +[2025-07-07 07:02:19] [Rank 0] Group 11 Loss: 5.6756 +[2025-07-07 07:02:19] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 07:02:19] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 07:02:19] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 07:02:19] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 07:02:19] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-07 07:02:19] [Rank 0] Group 2 FTA: 0.0833 +[2025-07-07 07:02:19] [Rank 0] Group 3 FTA: 0.1094 +[2025-07-07 07:02:19] [Rank 0] Group 3 FTA: 0.1094 +[2025-07-07 07:02:20] [Rank 0] Group 4 FTA: 0.1250 +[2025-07-07 07:02:20] [Rank 0] Group 4 FTA: 0.1250 +[2025-07-07 07:02:20] [Rank 0] Group 5 FTA: 0.1823 +[2025-07-07 07:02:20] [Rank 0] Group 5 FTA: 0.1823 +[2025-07-07 07:02:20] [Rank 0] Group 6 FTA: 0.1302 +[2025-07-07 07:02:20] [Rank 0] Group 6 FTA: 0.1302 +[2025-07-07 07:02:20] [Rank 0] Group 7 FTA: 0.1328 +[2025-07-07 07:02:20] [Rank 0] Group 7 FTA: 0.1328 +[2025-07-07 07:02:20] [Rank 0] Group 8 FTA: 0.1406 +[2025-07-07 07:02:20] [Rank 0] Group 8 FTA: 0.1406 +[2025-07-07 07:02:20] [Rank 0] Group 9 FTA: 0.1289 +[2025-07-07 07:02:20] [Rank 0] Group 9 FTA: 0.1289 +[2025-07-07 07:02:20] [Rank 0] Group 10 FTA: 0.1484 +[2025-07-07 07:02:20] [Rank 0] Group 10 FTA: 0.1484 +[2025-07-07 07:02:20] [Rank 0] Group 11 FTA: 0.1406 +[2025-07-07 07:02:20] [Rank 0] Group 11 FTA: 0.1406 +[2025-07-07 07:02:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 07:02:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 07:02:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 07:02:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 07:02:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 07:02:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 07:02:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 07:02:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 07:02:21] [Rank 0] step:4501/10000 train_time:306253ms step_avg:68.04ms +[2025-07-07 07:02:21] [Rank 0] step:4501/10000 train_time:306253ms step_avg:68.04ms +[2025-07-07 07:02:23] [Rank 0] step:4521/10000 train_time:307035ms step_avg:67.91ms +[2025-07-07 07:02:23] [Rank 0] step:4521/10000 train_time:307035ms step_avg:67.91ms +[2025-07-07 07:02:24] [Rank 0] step:4541/10000 train_time:308396ms step_avg:67.91ms +[2025-07-07 07:02:24] [Rank 0] step:4541/10000 train_time:308396ms step_avg:67.91ms +[2025-07-07 07:02:25] [Rank 0] step:4561/10000 train_time:309758ms step_avg:67.91ms +[2025-07-07 07:02:25] [Rank 0] step:4561/10000 train_time:309758ms step_avg:67.91ms +[2025-07-07 07:02:27] [Rank 0] step:4581/10000 train_time:311119ms step_avg:67.92ms +[2025-07-07 07:02:27] [Rank 0] step:4581/10000 train_time:311119ms step_avg:67.92ms +[2025-07-07 07:02:28] [Rank 0] step:4601/10000 train_time:312483ms step_avg:67.92ms +[2025-07-07 07:02:28] [Rank 0] step:4601/10000 train_time:312483ms step_avg:67.92ms +[2025-07-07 07:02:29] [Rank 0] step:4621/10000 train_time:313847ms step_avg:67.92ms +[2025-07-07 07:02:29] [Rank 0] step:4621/10000 train_time:313847ms step_avg:67.92ms +[2025-07-07 07:02:31] [Rank 0] step:4641/10000 train_time:315212ms step_avg:67.92ms +[2025-07-07 07:02:31] [Rank 0] step:4641/10000 train_time:315212ms step_avg:67.92ms +[2025-07-07 07:02:32] [Rank 0] step:4661/10000 train_time:316577ms step_avg:67.92ms +[2025-07-07 07:02:32] [Rank 0] step:4661/10000 train_time:316577ms step_avg:67.92ms +[2025-07-07 07:02:34] [Rank 0] step:4681/10000 train_time:318617ms step_avg:68.07ms +[2025-07-07 07:02:34] [Rank 0] step:4681/10000 train_time:318617ms step_avg:68.07ms +[2025-07-07 07:02:35] [Rank 0] step:4701/10000 train_time:319353ms step_avg:67.93ms +[2025-07-07 07:02:35] [Rank 0] step:4701/10000 train_time:319353ms step_avg:67.93ms +[2025-07-07 07:02:36] [Rank 0] step:4721/10000 train_time:320720ms step_avg:67.93ms +[2025-07-07 07:02:36] [Rank 0] step:4721/10000 train_time:320720ms step_avg:67.93ms +[2025-07-07 07:02:38] [Rank 0] step:4741/10000 train_time:322087ms step_avg:67.94ms +[2025-07-07 07:02:38] [Rank 0] step:4741/10000 train_time:322087ms step_avg:67.94ms +[2025-07-07 07:02:39] [Rank 0] step:4761/10000 train_time:323455ms step_avg:67.94ms +[2025-07-07 07:02:39] [Rank 0] step:4761/10000 train_time:323455ms step_avg:67.94ms +[2025-07-07 07:02:40] [Rank 0] step:4781/10000 train_time:324823ms step_avg:67.94ms +[2025-07-07 07:02:40] [Rank 0] step:4781/10000 train_time:324823ms step_avg:67.94ms +[2025-07-07 07:02:42] [Rank 0] step:4801/10000 train_time:326192ms step_avg:67.94ms +[2025-07-07 07:02:42] [Rank 0] step:4801/10000 train_time:326192ms step_avg:67.94ms +[2025-07-07 07:02:43] [Rank 0] step:4821/10000 train_time:327560ms step_avg:67.94ms +[2025-07-07 07:02:43] [Rank 0] step:4821/10000 train_time:327560ms step_avg:67.94ms +[2025-07-07 07:02:45] [Rank 0] step:4841/10000 train_time:328929ms step_avg:67.95ms +[2025-07-07 07:02:45] [Rank 0] step:4841/10000 train_time:328929ms step_avg:67.95ms +[2025-07-07 07:02:46] [Rank 0] step:4861/10000 train_time:330348ms step_avg:67.96ms +[2025-07-07 07:02:46] [Rank 0] step:4861/10000 train_time:330348ms step_avg:67.96ms +[2025-07-07 07:02:47] [Rank 0] step:4881/10000 train_time:331707ms step_avg:67.96ms +[2025-07-07 07:02:47] [Rank 0] step:4881/10000 train_time:331707ms step_avg:67.96ms +[2025-07-07 07:02:49] [Rank 0] step:4901/10000 train_time:333077ms step_avg:67.96ms +[2025-07-07 07:02:49] [Rank 0] step:4901/10000 train_time:333077ms step_avg:67.96ms +[2025-07-07 07:02:50] [Rank 0] step:4921/10000 train_time:334446ms step_avg:67.96ms +[2025-07-07 07:02:50] [Rank 0] step:4921/10000 train_time:334446ms step_avg:67.96ms +[2025-07-07 07:02:51] [Rank 0] step:4941/10000 train_time:335815ms step_avg:67.97ms +[2025-07-07 07:02:51] [Rank 0] step:4941/10000 train_time:335815ms step_avg:67.97ms +[2025-07-07 07:02:53] [Rank 0] step:4961/10000 train_time:337186ms step_avg:67.97ms +[2025-07-07 07:02:53] [Rank 0] step:4961/10000 train_time:337186ms step_avg:67.97ms +[2025-07-07 07:02:54] [Rank 0] step:4981/10000 train_time:338557ms step_avg:67.97ms +[2025-07-07 07:02:54] [Rank 0] step:4981/10000 train_time:338557ms step_avg:67.97ms +[2025-07-07 07:02:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:02:56] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:02:56] [Rank 0] PRINT: step:5000/10000 train_loss:1.1772 val_loss:1.2344 train_time:340549ms step_avg:68.11ms +[2025-07-07 07:02:56] [Rank 0] PRINT: step:5000/10000 train_loss:1.1772 val_loss:1.2344 train_time:340549ms step_avg:68.11ms +[2025-07-07 07:02:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:02:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:02:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:02:57] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:02:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:02:57] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:08:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:08:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:08:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:08:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:08:19] [Rank 0] Total Loss: 5.5832 +[2025-07-07 07:08:19] [Rank 0] Total Loss: 5.5832 +[2025-07-07 07:08:19] [Rank 0] Total FTA: 0.1472 +[2025-07-07 07:08:19] [Rank 0] Total FTA: 0.1472 +[2025-07-07 07:08:19] [Rank 0] Group 0 Loss: 5.8343 +[2025-07-07 07:08:19] [Rank 0] Group 0 Loss: 5.8343 +[2025-07-07 07:08:19] [Rank 0] Group 1 Loss: 5.3452 +[2025-07-07 07:08:19] [Rank 0] Group 1 Loss: 5.3452 +[2025-07-07 07:08:19] [Rank 0] Group 2 Loss: 5.2035 +[2025-07-07 07:08:19] [Rank 0] Group 2 Loss: 5.2035 +[2025-07-07 07:08:19] [Rank 0] Group 3 Loss: 5.6245 +[2025-07-07 07:08:19] [Rank 0] Group 3 Loss: 5.6245 +[2025-07-07 07:08:19] [Rank 0] Group 4 Loss: 5.4859 +[2025-07-07 07:08:19] [Rank 0] Group 4 Loss: 5.4859 +[2025-07-07 07:08:19] [Rank 0] Group 5 Loss: 5.7515 +[2025-07-07 07:08:19] [Rank 0] Group 5 Loss: 5.7515 +[2025-07-07 07:08:19] [Rank 0] Group 6 Loss: 5.5667 +[2025-07-07 07:08:19] [Rank 0] Group 6 Loss: 5.5667 +[2025-07-07 07:08:19] [Rank 0] Group 7 Loss: 5.5808 +[2025-07-07 07:08:19] [Rank 0] Group 7 Loss: 5.5808 +[2025-07-07 07:08:19] [Rank 0] Group 8 Loss: 5.5834 +[2025-07-07 07:08:19] [Rank 0] Group 8 Loss: 5.5834 +[2025-07-07 07:08:19] [Rank 0] Group 9 Loss: 5.6350 +[2025-07-07 07:08:19] [Rank 0] Group 9 Loss: 5.6350 +[2025-07-07 07:08:19] [Rank 0] Group 10 Loss: 5.5700 +[2025-07-07 07:08:19] [Rank 0] Group 10 Loss: 5.5700 +[2025-07-07 07:08:19] [Rank 0] Group 11 Loss: 5.5849 +[2025-07-07 07:08:19] [Rank 0] Group 11 Loss: 5.5849 +[2025-07-07 07:08:19] [Rank 0] Group 0 FTA: 0.1534 +[2025-07-07 07:08:19] [Rank 0] Group 0 FTA: 0.1534 +[2025-07-07 07:08:19] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 07:08:19] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 07:08:19] [Rank 0] Group 2 FTA: 0.2396 +[2025-07-07 07:08:19] [Rank 0] Group 2 FTA: 0.2396 +[2025-07-07 07:08:19] [Rank 0] Group 3 FTA: 0.1458 +[2025-07-07 07:08:19] [Rank 0] Group 3 FTA: 0.1458 +[2025-07-07 07:08:19] [Rank 0] Group 4 FTA: 0.1042 +[2025-07-07 07:08:19] [Rank 0] Group 4 FTA: 0.1042 +[2025-07-07 07:08:19] [Rank 0] Group 5 FTA: 0.1953 +[2025-07-07 07:08:19] [Rank 0] Group 5 FTA: 0.1953 +[2025-07-07 07:08:19] [Rank 0] Group 6 FTA: 0.1745 +[2025-07-07 07:08:19] [Rank 0] Group 6 FTA: 0.1745 +[2025-07-07 07:08:19] [Rank 0] Group 7 FTA: 0.1484 +[2025-07-07 07:08:19] [Rank 0] Group 7 FTA: 0.1484 +[2025-07-07 07:08:19] [Rank 0] Group 8 FTA: 0.1458 +[2025-07-07 07:08:19] [Rank 0] Group 8 FTA: 0.1458 +[2025-07-07 07:08:19] [Rank 0] Group 9 FTA: 0.1328 +[2025-07-07 07:08:19] [Rank 0] Group 9 FTA: 0.1328 +[2025-07-07 07:08:19] [Rank 0] Group 10 FTA: 0.1562 +[2025-07-07 07:08:19] [Rank 0] Group 10 FTA: 0.1562 +[2025-07-07 07:08:19] [Rank 0] Group 11 FTA: 0.1504 +[2025-07-07 07:08:19] [Rank 0] Group 11 FTA: 0.1504 +[2025-07-07 07:08:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 07:08:19] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 07:08:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 07:08:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 07:08:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 07:08:20] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 07:08:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 07:08:20] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 07:08:20] [Rank 0] step:5001/10000 train_time:340560ms step_avg:68.10ms +[2025-07-07 07:08:20] [Rank 0] step:5001/10000 train_time:340560ms step_avg:68.10ms +[2025-07-07 07:08:21] [Rank 0] step:5021/10000 train_time:341329ms step_avg:67.98ms +[2025-07-07 07:08:21] [Rank 0] step:5021/10000 train_time:341329ms step_avg:67.98ms +[2025-07-07 07:08:23] [Rank 0] step:5041/10000 train_time:342692ms step_avg:67.98ms +[2025-07-07 07:08:23] [Rank 0] step:5041/10000 train_time:342692ms step_avg:67.98ms +[2025-07-07 07:08:24] [Rank 0] step:5061/10000 train_time:344111ms step_avg:67.99ms +[2025-07-07 07:08:24] [Rank 0] step:5061/10000 train_time:344111ms step_avg:67.99ms +[2025-07-07 07:08:26] [Rank 0] step:5081/10000 train_time:345472ms step_avg:67.99ms +[2025-07-07 07:08:26] [Rank 0] step:5081/10000 train_time:345472ms step_avg:67.99ms +[2025-07-07 07:08:27] [Rank 0] step:5101/10000 train_time:346835ms step_avg:67.99ms +[2025-07-07 07:08:27] [Rank 0] step:5101/10000 train_time:346835ms step_avg:67.99ms +[2025-07-07 07:08:28] [Rank 0] step:5121/10000 train_time:348199ms step_avg:67.99ms +[2025-07-07 07:08:28] [Rank 0] step:5121/10000 train_time:348199ms step_avg:67.99ms +[2025-07-07 07:08:30] [Rank 0] step:5141/10000 train_time:349566ms step_avg:68.00ms +[2025-07-07 07:08:30] [Rank 0] step:5141/10000 train_time:349566ms step_avg:68.00ms +[2025-07-07 07:08:31] [Rank 0] step:5161/10000 train_time:350931ms step_avg:68.00ms +[2025-07-07 07:08:31] [Rank 0] step:5161/10000 train_time:350931ms step_avg:68.00ms +[2025-07-07 07:08:32] [Rank 0] step:5181/10000 train_time:352298ms step_avg:68.00ms +[2025-07-07 07:08:32] [Rank 0] step:5181/10000 train_time:352298ms step_avg:68.00ms +[2025-07-07 07:08:34] [Rank 0] step:5201/10000 train_time:353663ms step_avg:68.00ms +[2025-07-07 07:08:34] [Rank 0] step:5201/10000 train_time:353663ms step_avg:68.00ms +[2025-07-07 07:08:35] [Rank 0] step:5221/10000 train_time:355075ms step_avg:68.01ms +[2025-07-07 07:08:35] [Rank 0] step:5221/10000 train_time:355075ms step_avg:68.01ms +[2025-07-07 07:08:37] [Rank 0] step:5241/10000 train_time:356434ms step_avg:68.01ms +[2025-07-07 07:08:37] [Rank 0] step:5241/10000 train_time:356434ms step_avg:68.01ms +[2025-07-07 07:08:38] [Rank 0] step:5261/10000 train_time:357801ms step_avg:68.01ms +[2025-07-07 07:08:38] [Rank 0] step:5261/10000 train_time:357801ms step_avg:68.01ms +[2025-07-07 07:08:39] [Rank 0] step:5281/10000 train_time:359169ms step_avg:68.01ms +[2025-07-07 07:08:39] [Rank 0] step:5281/10000 train_time:359169ms step_avg:68.01ms +[2025-07-07 07:08:41] [Rank 0] step:5301/10000 train_time:360538ms step_avg:68.01ms +[2025-07-07 07:08:41] [Rank 0] step:5301/10000 train_time:360538ms step_avg:68.01ms +[2025-07-07 07:08:42] [Rank 0] step:5321/10000 train_time:361908ms step_avg:68.02ms +[2025-07-07 07:08:42] [Rank 0] step:5321/10000 train_time:361908ms step_avg:68.02ms +[2025-07-07 07:08:43] [Rank 0] step:5341/10000 train_time:363278ms step_avg:68.02ms +[2025-07-07 07:08:43] [Rank 0] step:5341/10000 train_time:363278ms step_avg:68.02ms +[2025-07-07 07:08:45] [Rank 0] step:5361/10000 train_time:364649ms step_avg:68.02ms +[2025-07-07 07:08:45] [Rank 0] step:5361/10000 train_time:364649ms step_avg:68.02ms +[2025-07-07 07:08:46] [Rank 0] step:5381/10000 train_time:366021ms step_avg:68.02ms +[2025-07-07 07:08:46] [Rank 0] step:5381/10000 train_time:366021ms step_avg:68.02ms +[2025-07-07 07:08:48] [Rank 0] step:5401/10000 train_time:368048ms step_avg:68.14ms +[2025-07-07 07:08:48] [Rank 0] step:5401/10000 train_time:368048ms step_avg:68.14ms +[2025-07-07 07:08:49] [Rank 0] step:5421/10000 train_time:368788ms step_avg:68.03ms +[2025-07-07 07:08:49] [Rank 0] step:5421/10000 train_time:368788ms step_avg:68.03ms +[2025-07-07 07:08:50] [Rank 0] step:5441/10000 train_time:370161ms step_avg:68.03ms +[2025-07-07 07:08:50] [Rank 0] step:5441/10000 train_time:370161ms step_avg:68.03ms +[2025-07-07 07:08:52] [Rank 0] step:5461/10000 train_time:371533ms step_avg:68.03ms +[2025-07-07 07:08:52] [Rank 0] step:5461/10000 train_time:371533ms step_avg:68.03ms +[2025-07-07 07:08:53] [Rank 0] step:5481/10000 train_time:372906ms step_avg:68.04ms +[2025-07-07 07:08:53] [Rank 0] step:5481/10000 train_time:372906ms step_avg:68.04ms +[2025-07-07 07:08:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:08:54] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:08:55] [Rank 0] PRINT: step:5500/10000 train_loss:1.1357 val_loss:1.2036 train_time:374900ms step_avg:68.16ms +[2025-07-07 07:08:55] [Rank 0] PRINT: step:5500/10000 train_loss:1.1357 val_loss:1.2036 train_time:374900ms step_avg:68.16ms +[2025-07-07 07:08:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:08:55] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:08:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:08:55] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:08:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:08:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:14:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:14:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:14:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:14:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:14:17] [Rank 0] Total Loss: 5.6157 +[2025-07-07 07:14:17] [Rank 0] Total Loss: 5.6157 +[2025-07-07 07:14:17] [Rank 0] Total FTA: 0.1852 +[2025-07-07 07:14:17] [Rank 0] Total FTA: 0.1852 +[2025-07-07 07:14:17] [Rank 0] Group 0 Loss: 5.8046 +[2025-07-07 07:14:17] [Rank 0] Group 0 Loss: 5.8046 +[2025-07-07 07:14:17] [Rank 0] Group 1 Loss: 5.2930 +[2025-07-07 07:14:17] [Rank 0] Group 1 Loss: 5.2930 +[2025-07-07 07:14:17] [Rank 0] Group 2 Loss: 5.2416 +[2025-07-07 07:14:17] [Rank 0] Group 2 Loss: 5.2416 +[2025-07-07 07:14:17] [Rank 0] Group 3 Loss: 5.7059 +[2025-07-07 07:14:17] [Rank 0] Group 3 Loss: 5.7059 +[2025-07-07 07:14:17] [Rank 0] Group 4 Loss: 5.5662 +[2025-07-07 07:14:17] [Rank 0] Group 4 Loss: 5.5662 +[2025-07-07 07:14:17] [Rank 0] Group 5 Loss: 5.7074 +[2025-07-07 07:14:17] [Rank 0] Group 5 Loss: 5.7074 +[2025-07-07 07:14:17] [Rank 0] Group 6 Loss: 5.6169 +[2025-07-07 07:14:17] [Rank 0] Group 6 Loss: 5.6169 +[2025-07-07 07:14:17] [Rank 0] Group 7 Loss: 5.6847 +[2025-07-07 07:14:17] [Rank 0] Group 7 Loss: 5.6847 +[2025-07-07 07:14:17] [Rank 0] Group 8 Loss: 5.6087 +[2025-07-07 07:14:17] [Rank 0] Group 8 Loss: 5.6087 +[2025-07-07 07:14:17] [Rank 0] Group 9 Loss: 5.6186 +[2025-07-07 07:14:17] [Rank 0] Group 9 Loss: 5.6186 +[2025-07-07 07:14:17] [Rank 0] Group 10 Loss: 5.6404 +[2025-07-07 07:14:17] [Rank 0] Group 10 Loss: 5.6404 +[2025-07-07 07:14:17] [Rank 0] Group 11 Loss: 5.6486 +[2025-07-07 07:14:17] [Rank 0] Group 11 Loss: 5.6486 +[2025-07-07 07:14:17] [Rank 0] Group 0 FTA: 0.1873 +[2025-07-07 07:14:17] [Rank 0] Group 0 FTA: 0.1873 +[2025-07-07 07:14:17] [Rank 0] Group 1 FTA: 0.1615 +[2025-07-07 07:14:17] [Rank 0] Group 1 FTA: 0.1615 +[2025-07-07 07:14:17] [Rank 0] Group 2 FTA: 0.0964 +[2025-07-07 07:14:17] [Rank 0] Group 2 FTA: 0.0964 +[2025-07-07 07:14:17] [Rank 0] Group 3 FTA: 0.2031 +[2025-07-07 07:14:17] [Rank 0] Group 3 FTA: 0.2031 +[2025-07-07 07:14:17] [Rank 0] Group 4 FTA: 0.1354 +[2025-07-07 07:14:17] [Rank 0] Group 4 FTA: 0.1354 +[2025-07-07 07:14:17] [Rank 0] Group 5 FTA: 0.2552 +[2025-07-07 07:14:17] [Rank 0] Group 5 FTA: 0.2552 +[2025-07-07 07:14:17] [Rank 0] Group 6 FTA: 0.1458 +[2025-07-07 07:14:17] [Rank 0] Group 6 FTA: 0.1458 +[2025-07-07 07:14:17] [Rank 0] Group 7 FTA: 0.2135 +[2025-07-07 07:14:17] [Rank 0] Group 7 FTA: 0.2135 +[2025-07-07 07:14:17] [Rank 0] Group 8 FTA: 0.2161 +[2025-07-07 07:14:17] [Rank 0] Group 8 FTA: 0.2161 +[2025-07-07 07:14:17] [Rank 0] Group 9 FTA: 0.2305 +[2025-07-07 07:14:17] [Rank 0] Group 9 FTA: 0.2305 +[2025-07-07 07:14:17] [Rank 0] Group 10 FTA: 0.1777 +[2025-07-07 07:14:17] [Rank 0] Group 10 FTA: 0.1777 +[2025-07-07 07:14:17] [Rank 0] Group 11 FTA: 0.1963 +[2025-07-07 07:14:17] [Rank 0] Group 11 FTA: 0.1963 +[2025-07-07 07:14:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 07:14:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 07:14:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 07:14:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 07:14:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 07:14:18] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 07:14:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 07:14:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 07:14:19] [Rank 0] step:5501/10000 train_time:374909ms step_avg:68.15ms +[2025-07-07 07:14:19] [Rank 0] step:5501/10000 train_time:374909ms step_avg:68.15ms +[2025-07-07 07:14:20] [Rank 0] step:5521/10000 train_time:375662ms step_avg:68.04ms +[2025-07-07 07:14:20] [Rank 0] step:5521/10000 train_time:375662ms step_avg:68.04ms +[2025-07-07 07:14:21] [Rank 0] step:5541/10000 train_time:377020ms step_avg:68.04ms +[2025-07-07 07:14:21] [Rank 0] step:5541/10000 train_time:377020ms step_avg:68.04ms +[2025-07-07 07:14:23] [Rank 0] step:5561/10000 train_time:378384ms step_avg:68.04ms +[2025-07-07 07:14:23] [Rank 0] step:5561/10000 train_time:378384ms step_avg:68.04ms +[2025-07-07 07:14:24] [Rank 0] step:5581/10000 train_time:380419ms step_avg:68.16ms +[2025-07-07 07:14:24] [Rank 0] step:5581/10000 train_time:380419ms step_avg:68.16ms +[2025-07-07 07:14:25] [Rank 0] step:5601/10000 train_time:381158ms step_avg:68.05ms +[2025-07-07 07:14:25] [Rank 0] step:5601/10000 train_time:381158ms step_avg:68.05ms +[2025-07-07 07:14:27] [Rank 0] step:5621/10000 train_time:382523ms step_avg:68.05ms +[2025-07-07 07:14:27] [Rank 0] step:5621/10000 train_time:382523ms step_avg:68.05ms +[2025-07-07 07:14:28] [Rank 0] step:5641/10000 train_time:383889ms step_avg:68.05ms +[2025-07-07 07:14:28] [Rank 0] step:5641/10000 train_time:383889ms step_avg:68.05ms +[2025-07-07 07:14:30] [Rank 0] step:5661/10000 train_time:385254ms step_avg:68.05ms +[2025-07-07 07:14:30] [Rank 0] step:5661/10000 train_time:385254ms step_avg:68.05ms +[2025-07-07 07:14:31] [Rank 0] step:5681/10000 train_time:386619ms step_avg:68.05ms +[2025-07-07 07:14:31] [Rank 0] step:5681/10000 train_time:386619ms step_avg:68.05ms +[2025-07-07 07:14:32] [Rank 0] step:5701/10000 train_time:387986ms step_avg:68.06ms +[2025-07-07 07:14:32] [Rank 0] step:5701/10000 train_time:387986ms step_avg:68.06ms +[2025-07-07 07:14:34] [Rank 0] step:5721/10000 train_time:389352ms step_avg:68.06ms +[2025-07-07 07:14:34] [Rank 0] step:5721/10000 train_time:389352ms step_avg:68.06ms +[2025-07-07 07:14:35] [Rank 0] step:5741/10000 train_time:390718ms step_avg:68.06ms +[2025-07-07 07:14:35] [Rank 0] step:5741/10000 train_time:390718ms step_avg:68.06ms +[2025-07-07 07:14:36] [Rank 0] step:5761/10000 train_time:392135ms step_avg:68.07ms +[2025-07-07 07:14:36] [Rank 0] step:5761/10000 train_time:392135ms step_avg:68.07ms +[2025-07-07 07:14:38] [Rank 0] step:5781/10000 train_time:393511ms step_avg:68.07ms +[2025-07-07 07:14:38] [Rank 0] step:5781/10000 train_time:393511ms step_avg:68.07ms +[2025-07-07 07:14:39] [Rank 0] step:5801/10000 train_time:394881ms step_avg:68.07ms +[2025-07-07 07:14:39] [Rank 0] step:5801/10000 train_time:394881ms step_avg:68.07ms +[2025-07-07 07:14:41] [Rank 0] step:5821/10000 train_time:396251ms step_avg:68.07ms +[2025-07-07 07:14:41] [Rank 0] step:5821/10000 train_time:396251ms step_avg:68.07ms +[2025-07-07 07:14:42] [Rank 0] step:5841/10000 train_time:397621ms step_avg:68.07ms +[2025-07-07 07:14:42] [Rank 0] step:5841/10000 train_time:397621ms step_avg:68.07ms +[2025-07-07 07:14:43] [Rank 0] step:5861/10000 train_time:398993ms step_avg:68.08ms +[2025-07-07 07:14:43] [Rank 0] step:5861/10000 train_time:398993ms step_avg:68.08ms +[2025-07-07 07:14:45] [Rank 0] step:5881/10000 train_time:400364ms step_avg:68.08ms +[2025-07-07 07:14:45] [Rank 0] step:5881/10000 train_time:400364ms step_avg:68.08ms +[2025-07-07 07:14:46] [Rank 0] step:5901/10000 train_time:401736ms step_avg:68.08ms +[2025-07-07 07:14:46] [Rank 0] step:5901/10000 train_time:401736ms step_avg:68.08ms +[2025-07-07 07:14:47] [Rank 0] step:5921/10000 train_time:403108ms step_avg:68.08ms +[2025-07-07 07:14:47] [Rank 0] step:5921/10000 train_time:403108ms step_avg:68.08ms +[2025-07-07 07:14:49] [Rank 0] step:5941/10000 train_time:405155ms step_avg:68.20ms +[2025-07-07 07:14:49] [Rank 0] step:5941/10000 train_time:405155ms step_avg:68.20ms +[2025-07-07 07:14:50] [Rank 0] step:5961/10000 train_time:405895ms step_avg:68.09ms +[2025-07-07 07:14:50] [Rank 0] step:5961/10000 train_time:405895ms step_avg:68.09ms +[2025-07-07 07:14:52] [Rank 0] step:5981/10000 train_time:407267ms step_avg:68.09ms +[2025-07-07 07:14:52] [Rank 0] step:5981/10000 train_time:407267ms step_avg:68.09ms +[2025-07-07 07:14:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:14:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:14:54] [Rank 0] PRINT: step:6000/10000 train_loss:1.0893 val_loss:1.2022 train_time:409264ms step_avg:68.21ms +[2025-07-07 07:14:54] [Rank 0] PRINT: step:6000/10000 train_loss:1.0893 val_loss:1.2022 train_time:409264ms step_avg:68.21ms +[2025-07-07 07:14:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:14:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:14:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:14:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:14:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:14:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:20:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:20:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:20:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:20:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:20:17] [Rank 0] Total Loss: 5.6065 +[2025-07-07 07:20:17] [Rank 0] Total Loss: 5.6065 +[2025-07-07 07:20:17] [Rank 0] Total FTA: 0.1992 +[2025-07-07 07:20:17] [Rank 0] Total FTA: 0.1992 +[2025-07-07 07:20:17] [Rank 0] Group 0 Loss: 5.7356 +[2025-07-07 07:20:17] [Rank 0] Group 0 Loss: 5.7356 +[2025-07-07 07:20:17] [Rank 0] Group 1 Loss: 5.3090 +[2025-07-07 07:20:17] [Rank 0] Group 1 Loss: 5.3090 +[2025-07-07 07:20:17] [Rank 0] Group 2 Loss: 5.2855 +[2025-07-07 07:20:17] [Rank 0] Group 2 Loss: 5.2855 +[2025-07-07 07:20:17] [Rank 0] Group 3 Loss: 5.8208 +[2025-07-07 07:20:17] [Rank 0] Group 3 Loss: 5.8208 +[2025-07-07 07:20:17] [Rank 0] Group 4 Loss: 5.5194 +[2025-07-07 07:20:17] [Rank 0] Group 4 Loss: 5.5194 +[2025-07-07 07:20:17] [Rank 0] Group 5 Loss: 5.6678 +[2025-07-07 07:20:17] [Rank 0] Group 5 Loss: 5.6678 +[2025-07-07 07:20:17] [Rank 0] Group 6 Loss: 5.5738 +[2025-07-07 07:20:17] [Rank 0] Group 6 Loss: 5.5738 +[2025-07-07 07:20:17] [Rank 0] Group 7 Loss: 5.6517 +[2025-07-07 07:20:17] [Rank 0] Group 7 Loss: 5.6517 +[2025-07-07 07:20:17] [Rank 0] Group 8 Loss: 5.6111 +[2025-07-07 07:20:17] [Rank 0] Group 8 Loss: 5.6111 +[2025-07-07 07:20:17] [Rank 0] Group 9 Loss: 5.6228 +[2025-07-07 07:20:17] [Rank 0] Group 9 Loss: 5.6228 +[2025-07-07 07:20:17] [Rank 0] Group 10 Loss: 5.6127 +[2025-07-07 07:20:17] [Rank 0] Group 10 Loss: 5.6127 +[2025-07-07 07:20:17] [Rank 0] Group 11 Loss: 5.6575 +[2025-07-07 07:20:17] [Rank 0] Group 11 Loss: 5.6575 +[2025-07-07 07:20:17] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 07:20:17] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 07:20:17] [Rank 0] Group 1 FTA: 0.1823 +[2025-07-07 07:20:17] [Rank 0] Group 1 FTA: 0.1823 +[2025-07-07 07:20:17] [Rank 0] Group 2 FTA: 0.3047 +[2025-07-07 07:20:17] [Rank 0] Group 2 FTA: 0.3047 +[2025-07-07 07:20:17] [Rank 0] Group 3 FTA: 0.2604 +[2025-07-07 07:20:17] [Rank 0] Group 3 FTA: 0.2604 +[2025-07-07 07:20:17] [Rank 0] Group 4 FTA: 0.1745 +[2025-07-07 07:20:17] [Rank 0] Group 4 FTA: 0.1745 +[2025-07-07 07:20:17] [Rank 0] Group 5 FTA: 0.2474 +[2025-07-07 07:20:17] [Rank 0] Group 5 FTA: 0.2474 +[2025-07-07 07:20:17] [Rank 0] Group 6 FTA: 0.2135 +[2025-07-07 07:20:17] [Rank 0] Group 6 FTA: 0.2135 +[2025-07-07 07:20:17] [Rank 0] Group 7 FTA: 0.2448 +[2025-07-07 07:20:17] [Rank 0] Group 7 FTA: 0.2448 +[2025-07-07 07:20:17] [Rank 0] Group 8 FTA: 0.2214 +[2025-07-07 07:20:17] [Rank 0] Group 8 FTA: 0.2214 +[2025-07-07 07:20:17] [Rank 0] Group 9 FTA: 0.2578 +[2025-07-07 07:20:17] [Rank 0] Group 9 FTA: 0.2578 +[2025-07-07 07:20:17] [Rank 0] Group 10 FTA: 0.2012 +[2025-07-07 07:20:17] [Rank 0] Group 10 FTA: 0.2012 +[2025-07-07 07:20:17] [Rank 0] Group 11 FTA: 0.2373 +[2025-07-07 07:20:17] [Rank 0] Group 11 FTA: 0.2373 +[2025-07-07 07:20:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 07:20:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 07:20:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 07:20:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 07:20:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 07:20:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 07:20:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 07:20:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 07:20:19] [Rank 0] step:6001/10000 train_time:409275ms step_avg:68.20ms +[2025-07-07 07:20:19] [Rank 0] step:6001/10000 train_time:409275ms step_avg:68.20ms +[2025-07-07 07:20:20] [Rank 0] step:6021/10000 train_time:410056ms step_avg:68.10ms +[2025-07-07 07:20:20] [Rank 0] step:6021/10000 train_time:410056ms step_avg:68.10ms +[2025-07-07 07:20:22] [Rank 0] step:6041/10000 train_time:411417ms step_avg:68.10ms +[2025-07-07 07:20:22] [Rank 0] step:6041/10000 train_time:411417ms step_avg:68.10ms +[2025-07-07 07:20:23] [Rank 0] step:6061/10000 train_time:412781ms step_avg:68.10ms +[2025-07-07 07:20:23] [Rank 0] step:6061/10000 train_time:412781ms step_avg:68.10ms +[2025-07-07 07:20:24] [Rank 0] step:6081/10000 train_time:414142ms step_avg:68.10ms +[2025-07-07 07:20:24] [Rank 0] step:6081/10000 train_time:414142ms step_avg:68.10ms +[2025-07-07 07:20:26] [Rank 0] step:6101/10000 train_time:415506ms step_avg:68.10ms +[2025-07-07 07:20:26] [Rank 0] step:6101/10000 train_time:415506ms step_avg:68.10ms +[2025-07-07 07:20:27] [Rank 0] step:6121/10000 train_time:417122ms step_avg:68.15ms +[2025-07-07 07:20:27] [Rank 0] step:6121/10000 train_time:417122ms step_avg:68.15ms +[2025-07-07 07:20:28] [Rank 0] step:6141/10000 train_time:418279ms step_avg:68.11ms +[2025-07-07 07:20:28] [Rank 0] step:6141/10000 train_time:418279ms step_avg:68.11ms +[2025-07-07 07:20:30] [Rank 0] step:6161/10000 train_time:419644ms step_avg:68.11ms +[2025-07-07 07:20:30] [Rank 0] step:6161/10000 train_time:419644ms step_avg:68.11ms +[2025-07-07 07:20:31] [Rank 0] step:6181/10000 train_time:421010ms step_avg:68.11ms +[2025-07-07 07:20:31] [Rank 0] step:6181/10000 train_time:421010ms step_avg:68.11ms +[2025-07-07 07:20:33] [Rank 0] step:6201/10000 train_time:422377ms step_avg:68.11ms +[2025-07-07 07:20:33] [Rank 0] step:6201/10000 train_time:422377ms step_avg:68.11ms +[2025-07-07 07:20:34] [Rank 0] step:6221/10000 train_time:423744ms step_avg:68.12ms +[2025-07-07 07:20:34] [Rank 0] step:6221/10000 train_time:423744ms step_avg:68.12ms +[2025-07-07 07:20:35] [Rank 0] step:6241/10000 train_time:425111ms step_avg:68.12ms +[2025-07-07 07:20:35] [Rank 0] step:6241/10000 train_time:425111ms step_avg:68.12ms +[2025-07-07 07:20:37] [Rank 0] step:6261/10000 train_time:426479ms step_avg:68.12ms +[2025-07-07 07:20:37] [Rank 0] step:6261/10000 train_time:426479ms step_avg:68.12ms +[2025-07-07 07:20:38] [Rank 0] step:6281/10000 train_time:427847ms step_avg:68.12ms +[2025-07-07 07:20:38] [Rank 0] step:6281/10000 train_time:427847ms step_avg:68.12ms +[2025-07-07 07:20:39] [Rank 0] step:6301/10000 train_time:429261ms step_avg:68.13ms +[2025-07-07 07:20:39] [Rank 0] step:6301/10000 train_time:429261ms step_avg:68.13ms +[2025-07-07 07:20:41] [Rank 0] step:6321/10000 train_time:430632ms step_avg:68.13ms +[2025-07-07 07:20:41] [Rank 0] step:6321/10000 train_time:430632ms step_avg:68.13ms +[2025-07-07 07:20:42] [Rank 0] step:6341/10000 train_time:432000ms step_avg:68.13ms +[2025-07-07 07:20:42] [Rank 0] step:6341/10000 train_time:432000ms step_avg:68.13ms +[2025-07-07 07:20:44] [Rank 0] step:6361/10000 train_time:433367ms step_avg:68.13ms +[2025-07-07 07:20:44] [Rank 0] step:6361/10000 train_time:433367ms step_avg:68.13ms +[2025-07-07 07:20:45] [Rank 0] step:6381/10000 train_time:434737ms step_avg:68.13ms +[2025-07-07 07:20:45] [Rank 0] step:6381/10000 train_time:434737ms step_avg:68.13ms +[2025-07-07 07:20:46] [Rank 0] step:6401/10000 train_time:436106ms step_avg:68.13ms +[2025-07-07 07:20:46] [Rank 0] step:6401/10000 train_time:436106ms step_avg:68.13ms +[2025-07-07 07:20:48] [Rank 0] step:6421/10000 train_time:437475ms step_avg:68.13ms +[2025-07-07 07:20:48] [Rank 0] step:6421/10000 train_time:437475ms step_avg:68.13ms +[2025-07-07 07:20:49] [Rank 0] step:6441/10000 train_time:438844ms step_avg:68.13ms +[2025-07-07 07:20:49] [Rank 0] step:6441/10000 train_time:438844ms step_avg:68.13ms +[2025-07-07 07:20:50] [Rank 0] step:6461/10000 train_time:440213ms step_avg:68.13ms +[2025-07-07 07:20:50] [Rank 0] step:6461/10000 train_time:440213ms step_avg:68.13ms +[2025-07-07 07:20:52] [Rank 0] step:6481/10000 train_time:441583ms step_avg:68.14ms +[2025-07-07 07:20:52] [Rank 0] step:6481/10000 train_time:441583ms step_avg:68.14ms +[2025-07-07 07:20:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:20:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:20:54] [Rank 0] PRINT: step:6500/10000 train_loss:1.0446 val_loss:1.1745 train_time:443608ms step_avg:68.25ms +[2025-07-07 07:20:54] [Rank 0] PRINT: step:6500/10000 train_loss:1.0446 val_loss:1.1745 train_time:443608ms step_avg:68.25ms +[2025-07-07 07:20:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:20:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:20:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:20:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:20:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:20:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:26:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:26:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:26:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:26:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:26:17] [Rank 0] Total Loss: 5.5749 +[2025-07-07 07:26:17] [Rank 0] Total Loss: 5.5749 +[2025-07-07 07:26:17] [Rank 0] Total FTA: 0.2665 +[2025-07-07 07:26:17] [Rank 0] Total FTA: 0.2665 +[2025-07-07 07:26:17] [Rank 0] Group 0 Loss: 5.7450 +[2025-07-07 07:26:17] [Rank 0] Group 0 Loss: 5.7450 +[2025-07-07 07:26:17] [Rank 0] Group 1 Loss: 5.1676 +[2025-07-07 07:26:17] [Rank 0] Group 1 Loss: 5.1676 +[2025-07-07 07:26:17] [Rank 0] Group 2 Loss: 5.3811 +[2025-07-07 07:26:17] [Rank 0] Group 2 Loss: 5.3811 +[2025-07-07 07:26:17] [Rank 0] Group 3 Loss: 5.7649 +[2025-07-07 07:26:17] [Rank 0] Group 3 Loss: 5.7649 +[2025-07-07 07:26:17] [Rank 0] Group 4 Loss: 5.5230 +[2025-07-07 07:26:17] [Rank 0] Group 4 Loss: 5.5230 +[2025-07-07 07:26:17] [Rank 0] Group 5 Loss: 5.6464 +[2025-07-07 07:26:17] [Rank 0] Group 5 Loss: 5.6464 +[2025-07-07 07:26:17] [Rank 0] Group 6 Loss: 5.5754 +[2025-07-07 07:26:17] [Rank 0] Group 6 Loss: 5.5754 +[2025-07-07 07:26:17] [Rank 0] Group 7 Loss: 5.5925 +[2025-07-07 07:26:17] [Rank 0] Group 7 Loss: 5.5925 +[2025-07-07 07:26:17] [Rank 0] Group 8 Loss: 5.5461 +[2025-07-07 07:26:17] [Rank 0] Group 8 Loss: 5.5461 +[2025-07-07 07:26:17] [Rank 0] Group 9 Loss: 5.5239 +[2025-07-07 07:26:17] [Rank 0] Group 9 Loss: 5.5239 +[2025-07-07 07:26:17] [Rank 0] Group 10 Loss: 5.6231 +[2025-07-07 07:26:17] [Rank 0] Group 10 Loss: 5.6231 +[2025-07-07 07:26:17] [Rank 0] Group 11 Loss: 5.5868 +[2025-07-07 07:26:17] [Rank 0] Group 11 Loss: 5.5868 +[2025-07-07 07:26:17] [Rank 0] Group 0 FTA: 0.3108 +[2025-07-07 07:26:17] [Rank 0] Group 0 FTA: 0.3108 +[2025-07-07 07:26:17] [Rank 0] Group 1 FTA: 0.1615 +[2025-07-07 07:26:17] [Rank 0] Group 1 FTA: 0.1615 +[2025-07-07 07:26:17] [Rank 0] Group 2 FTA: 0.2734 +[2025-07-07 07:26:17] [Rank 0] Group 2 FTA: 0.2734 +[2025-07-07 07:26:17] [Rank 0] Group 3 FTA: 0.2995 +[2025-07-07 07:26:17] [Rank 0] Group 3 FTA: 0.2995 +[2025-07-07 07:26:17] [Rank 0] Group 4 FTA: 0.2865 +[2025-07-07 07:26:17] [Rank 0] Group 4 FTA: 0.2865 +[2025-07-07 07:26:17] [Rank 0] Group 5 FTA: 0.2656 +[2025-07-07 07:26:17] [Rank 0] Group 5 FTA: 0.2656 +[2025-07-07 07:26:17] [Rank 0] Group 6 FTA: 0.2422 +[2025-07-07 07:26:17] [Rank 0] Group 6 FTA: 0.2422 +[2025-07-07 07:26:17] [Rank 0] Group 7 FTA: 0.2708 +[2025-07-07 07:26:17] [Rank 0] Group 7 FTA: 0.2708 +[2025-07-07 07:26:17] [Rank 0] Group 8 FTA: 0.2578 +[2025-07-07 07:26:17] [Rank 0] Group 8 FTA: 0.2578 +[2025-07-07 07:26:17] [Rank 0] Group 9 FTA: 0.2852 +[2025-07-07 07:26:17] [Rank 0] Group 9 FTA: 0.2852 +[2025-07-07 07:26:17] [Rank 0] Group 10 FTA: 0.2715 +[2025-07-07 07:26:17] [Rank 0] Group 10 FTA: 0.2715 +[2025-07-07 07:26:17] [Rank 0] Group 11 FTA: 0.2539 +[2025-07-07 07:26:17] [Rank 0] Group 11 FTA: 0.2539 +[2025-07-07 07:26:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 07:26:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 07:26:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 07:26:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 07:26:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 07:26:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 07:26:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 07:26:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 07:26:19] [Rank 0] step:6501/10000 train_time:443618ms step_avg:68.24ms +[2025-07-07 07:26:19] [Rank 0] step:6501/10000 train_time:443618ms step_avg:68.24ms +[2025-07-07 07:26:20] [Rank 0] step:6521/10000 train_time:444383ms step_avg:68.15ms +[2025-07-07 07:26:20] [Rank 0] step:6521/10000 train_time:444383ms step_avg:68.15ms +[2025-07-07 07:26:22] [Rank 0] step:6541/10000 train_time:445745ms step_avg:68.15ms +[2025-07-07 07:26:22] [Rank 0] step:6541/10000 train_time:445745ms step_avg:68.15ms +[2025-07-07 07:26:23] [Rank 0] step:6561/10000 train_time:447108ms step_avg:68.15ms +[2025-07-07 07:26:23] [Rank 0] step:6561/10000 train_time:447108ms step_avg:68.15ms +[2025-07-07 07:26:24] [Rank 0] step:6581/10000 train_time:448470ms step_avg:68.15ms +[2025-07-07 07:26:24] [Rank 0] step:6581/10000 train_time:448470ms step_avg:68.15ms +[2025-07-07 07:26:26] [Rank 0] step:6601/10000 train_time:449834ms step_avg:68.15ms +[2025-07-07 07:26:26] [Rank 0] step:6601/10000 train_time:449834ms step_avg:68.15ms +[2025-07-07 07:26:27] [Rank 0] step:6621/10000 train_time:451197ms step_avg:68.15ms +[2025-07-07 07:26:27] [Rank 0] step:6621/10000 train_time:451197ms step_avg:68.15ms +[2025-07-07 07:26:28] [Rank 0] step:6641/10000 train_time:452560ms step_avg:68.15ms +[2025-07-07 07:26:28] [Rank 0] step:6641/10000 train_time:452560ms step_avg:68.15ms +[2025-07-07 07:26:30] [Rank 0] step:6661/10000 train_time:453927ms step_avg:68.15ms +[2025-07-07 07:26:30] [Rank 0] step:6661/10000 train_time:453927ms step_avg:68.15ms +[2025-07-07 07:26:31] [Rank 0] step:6681/10000 train_time:455339ms step_avg:68.15ms +[2025-07-07 07:26:31] [Rank 0] step:6681/10000 train_time:455339ms step_avg:68.15ms +[2025-07-07 07:26:33] [Rank 0] step:6701/10000 train_time:456706ms step_avg:68.15ms +[2025-07-07 07:26:33] [Rank 0] step:6701/10000 train_time:456706ms step_avg:68.15ms +[2025-07-07 07:26:34] [Rank 0] step:6721/10000 train_time:458075ms step_avg:68.16ms +[2025-07-07 07:26:34] [Rank 0] step:6721/10000 train_time:458075ms step_avg:68.16ms +[2025-07-07 07:26:35] [Rank 0] step:6741/10000 train_time:459441ms step_avg:68.16ms +[2025-07-07 07:26:35] [Rank 0] step:6741/10000 train_time:459441ms step_avg:68.16ms +[2025-07-07 07:26:37] [Rank 0] step:6761/10000 train_time:460809ms step_avg:68.16ms +[2025-07-07 07:26:37] [Rank 0] step:6761/10000 train_time:460809ms step_avg:68.16ms +[2025-07-07 07:26:38] [Rank 0] step:6781/10000 train_time:462256ms step_avg:68.17ms +[2025-07-07 07:26:38] [Rank 0] step:6781/10000 train_time:462256ms step_avg:68.17ms +[2025-07-07 07:26:40] [Rank 0] step:6801/10000 train_time:463622ms step_avg:68.17ms +[2025-07-07 07:26:40] [Rank 0] step:6801/10000 train_time:463622ms step_avg:68.17ms +[2025-07-07 07:26:41] [Rank 0] step:6821/10000 train_time:464992ms step_avg:68.17ms +[2025-07-07 07:26:41] [Rank 0] step:6821/10000 train_time:464992ms step_avg:68.17ms +[2025-07-07 07:26:42] [Rank 0] step:6841/10000 train_time:467018ms step_avg:68.27ms +[2025-07-07 07:26:42] [Rank 0] step:6841/10000 train_time:467018ms step_avg:68.27ms +[2025-07-07 07:26:44] [Rank 0] step:6861/10000 train_time:467758ms step_avg:68.18ms +[2025-07-07 07:26:44] [Rank 0] step:6861/10000 train_time:467758ms step_avg:68.18ms +[2025-07-07 07:26:45] [Rank 0] step:6881/10000 train_time:469128ms step_avg:68.18ms +[2025-07-07 07:26:45] [Rank 0] step:6881/10000 train_time:469128ms step_avg:68.18ms +[2025-07-07 07:26:46] [Rank 0] step:6901/10000 train_time:470498ms step_avg:68.18ms +[2025-07-07 07:26:46] [Rank 0] step:6901/10000 train_time:470498ms step_avg:68.18ms +[2025-07-07 07:26:48] [Rank 0] step:6921/10000 train_time:471867ms step_avg:68.18ms +[2025-07-07 07:26:48] [Rank 0] step:6921/10000 train_time:471867ms step_avg:68.18ms +[2025-07-07 07:26:49] [Rank 0] step:6941/10000 train_time:473236ms step_avg:68.18ms +[2025-07-07 07:26:49] [Rank 0] step:6941/10000 train_time:473236ms step_avg:68.18ms +[2025-07-07 07:26:50] [Rank 0] step:6961/10000 train_time:474606ms step_avg:68.18ms +[2025-07-07 07:26:50] [Rank 0] step:6961/10000 train_time:474606ms step_avg:68.18ms +[2025-07-07 07:26:52] [Rank 0] step:6981/10000 train_time:475976ms step_avg:68.18ms +[2025-07-07 07:26:52] [Rank 0] step:6981/10000 train_time:475976ms step_avg:68.18ms +[2025-07-07 07:26:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:26:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:26:54] [Rank 0] PRINT: step:7000/10000 train_loss:1.0094 val_loss:1.1898 train_time:477967ms step_avg:68.28ms +[2025-07-07 07:26:54] [Rank 0] PRINT: step:7000/10000 train_loss:1.0094 val_loss:1.1898 train_time:477967ms step_avg:68.28ms +[2025-07-07 07:26:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:26:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:26:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:26:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:26:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:26:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:32:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:32:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:32:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:32:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:32:17] [Rank 0] Total Loss: 5.7107 +[2025-07-07 07:32:17] [Rank 0] Total Loss: 5.7107 +[2025-07-07 07:32:17] [Rank 0] Total FTA: 0.2402 +[2025-07-07 07:32:17] [Rank 0] Total FTA: 0.2402 +[2025-07-07 07:32:17] [Rank 0] Group 0 Loss: 5.8768 +[2025-07-07 07:32:17] [Rank 0] Group 0 Loss: 5.8768 +[2025-07-07 07:32:17] [Rank 0] Group 1 Loss: 5.1715 +[2025-07-07 07:32:17] [Rank 0] Group 1 Loss: 5.1715 +[2025-07-07 07:32:17] [Rank 0] Group 2 Loss: 5.2982 +[2025-07-07 07:32:17] [Rank 0] Group 2 Loss: 5.2982 +[2025-07-07 07:32:17] [Rank 0] Group 3 Loss: 5.8534 +[2025-07-07 07:32:17] [Rank 0] Group 3 Loss: 5.8534 +[2025-07-07 07:32:17] [Rank 0] Group 4 Loss: 5.6877 +[2025-07-07 07:32:17] [Rank 0] Group 4 Loss: 5.6877 +[2025-07-07 07:32:17] [Rank 0] Group 5 Loss: 5.9717 +[2025-07-07 07:32:17] [Rank 0] Group 5 Loss: 5.9717 +[2025-07-07 07:32:17] [Rank 0] Group 6 Loss: 5.7144 +[2025-07-07 07:32:17] [Rank 0] Group 6 Loss: 5.7144 +[2025-07-07 07:32:17] [Rank 0] Group 7 Loss: 5.7219 +[2025-07-07 07:32:17] [Rank 0] Group 7 Loss: 5.7219 +[2025-07-07 07:32:17] [Rank 0] Group 8 Loss: 5.7057 +[2025-07-07 07:32:17] [Rank 0] Group 8 Loss: 5.7057 +[2025-07-07 07:32:17] [Rank 0] Group 9 Loss: 5.7508 +[2025-07-07 07:32:17] [Rank 0] Group 9 Loss: 5.7508 +[2025-07-07 07:32:17] [Rank 0] Group 10 Loss: 5.7626 +[2025-07-07 07:32:17] [Rank 0] Group 10 Loss: 5.7626 +[2025-07-07 07:32:17] [Rank 0] Group 11 Loss: 5.7604 +[2025-07-07 07:32:17] [Rank 0] Group 11 Loss: 5.7604 +[2025-07-07 07:32:17] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 07:32:17] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 07:32:17] [Rank 0] Group 1 FTA: 0.2891 +[2025-07-07 07:32:17] [Rank 0] Group 1 FTA: 0.2891 +[2025-07-07 07:32:17] [Rank 0] Group 2 FTA: 0.3255 +[2025-07-07 07:32:17] [Rank 0] Group 2 FTA: 0.3255 +[2025-07-07 07:32:17] [Rank 0] Group 3 FTA: 0.2891 +[2025-07-07 07:32:17] [Rank 0] Group 3 FTA: 0.2891 +[2025-07-07 07:32:17] [Rank 0] Group 4 FTA: 0.1927 +[2025-07-07 07:32:17] [Rank 0] Group 4 FTA: 0.1927 +[2025-07-07 07:32:17] [Rank 0] Group 5 FTA: 0.2917 +[2025-07-07 07:32:17] [Rank 0] Group 5 FTA: 0.2917 +[2025-07-07 07:32:17] [Rank 0] Group 6 FTA: 0.2839 +[2025-07-07 07:32:17] [Rank 0] Group 6 FTA: 0.2839 +[2025-07-07 07:32:17] [Rank 0] Group 7 FTA: 0.2292 +[2025-07-07 07:32:17] [Rank 0] Group 7 FTA: 0.2292 +[2025-07-07 07:32:17] [Rank 0] Group 8 FTA: 0.3333 +[2025-07-07 07:32:17] [Rank 0] Group 8 FTA: 0.3333 +[2025-07-07 07:32:17] [Rank 0] Group 9 FTA: 0.2500 +[2025-07-07 07:32:17] [Rank 0] Group 9 FTA: 0.2500 +[2025-07-07 07:32:17] [Rank 0] Group 10 FTA: 0.2891 +[2025-07-07 07:32:17] [Rank 0] Group 10 FTA: 0.2891 +[2025-07-07 07:32:17] [Rank 0] Group 11 FTA: 0.2764 +[2025-07-07 07:32:17] [Rank 0] Group 11 FTA: 0.2764 +[2025-07-07 07:32:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 07:32:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 07:32:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 07:32:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 07:32:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 07:32:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 07:32:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 07:32:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 07:32:19] [Rank 0] step:7001/10000 train_time:477977ms step_avg:68.27ms +[2025-07-07 07:32:19] [Rank 0] step:7001/10000 train_time:477977ms step_avg:68.27ms +[2025-07-07 07:32:20] [Rank 0] step:7021/10000 train_time:478807ms step_avg:68.20ms +[2025-07-07 07:32:20] [Rank 0] step:7021/10000 train_time:478807ms step_avg:68.20ms +[2025-07-07 07:32:22] [Rank 0] step:7041/10000 train_time:480171ms step_avg:68.20ms +[2025-07-07 07:32:22] [Rank 0] step:7041/10000 train_time:480171ms step_avg:68.20ms +[2025-07-07 07:32:23] [Rank 0] step:7061/10000 train_time:481533ms step_avg:68.20ms +[2025-07-07 07:32:23] [Rank 0] step:7061/10000 train_time:481533ms step_avg:68.20ms +[2025-07-07 07:32:25] [Rank 0] step:7081/10000 train_time:482897ms step_avg:68.20ms +[2025-07-07 07:32:25] [Rank 0] step:7081/10000 train_time:482897ms step_avg:68.20ms +[2025-07-07 07:32:26] [Rank 0] step:7101/10000 train_time:484261ms step_avg:68.20ms +[2025-07-07 07:32:26] [Rank 0] step:7101/10000 train_time:484261ms step_avg:68.20ms +[2025-07-07 07:32:27] [Rank 0] step:7121/10000 train_time:485626ms step_avg:68.20ms +[2025-07-07 07:32:27] [Rank 0] step:7121/10000 train_time:485626ms step_avg:68.20ms +[2025-07-07 07:32:29] [Rank 0] step:7141/10000 train_time:486993ms step_avg:68.20ms +[2025-07-07 07:32:29] [Rank 0] step:7141/10000 train_time:486993ms step_avg:68.20ms +[2025-07-07 07:32:30] [Rank 0] step:7161/10000 train_time:488358ms step_avg:68.20ms +[2025-07-07 07:32:30] [Rank 0] step:7161/10000 train_time:488358ms step_avg:68.20ms +[2025-07-07 07:32:31] [Rank 0] step:7181/10000 train_time:489724ms step_avg:68.20ms +[2025-07-07 07:32:31] [Rank 0] step:7181/10000 train_time:489724ms step_avg:68.20ms +[2025-07-07 07:32:33] [Rank 0] step:7201/10000 train_time:491342ms step_avg:68.23ms +[2025-07-07 07:32:33] [Rank 0] step:7201/10000 train_time:491342ms step_avg:68.23ms +[2025-07-07 07:32:34] [Rank 0] step:7221/10000 train_time:492493ms step_avg:68.20ms +[2025-07-07 07:32:34] [Rank 0] step:7221/10000 train_time:492493ms step_avg:68.20ms +[2025-07-07 07:32:35] [Rank 0] step:7241/10000 train_time:493862ms step_avg:68.20ms +[2025-07-07 07:32:35] [Rank 0] step:7241/10000 train_time:493862ms step_avg:68.20ms +[2025-07-07 07:32:37] [Rank 0] step:7261/10000 train_time:495229ms step_avg:68.20ms +[2025-07-07 07:32:37] [Rank 0] step:7261/10000 train_time:495229ms step_avg:68.20ms +[2025-07-07 07:32:38] [Rank 0] step:7281/10000 train_time:496597ms step_avg:68.20ms +[2025-07-07 07:32:38] [Rank 0] step:7281/10000 train_time:496597ms step_avg:68.20ms +[2025-07-07 07:32:40] [Rank 0] step:7301/10000 train_time:497965ms step_avg:68.21ms +[2025-07-07 07:32:40] [Rank 0] step:7301/10000 train_time:497965ms step_avg:68.21ms +[2025-07-07 07:32:41] [Rank 0] step:7321/10000 train_time:499334ms step_avg:68.21ms +[2025-07-07 07:32:41] [Rank 0] step:7321/10000 train_time:499334ms step_avg:68.21ms +[2025-07-07 07:32:42] [Rank 0] step:7341/10000 train_time:500704ms step_avg:68.21ms +[2025-07-07 07:32:42] [Rank 0] step:7341/10000 train_time:500704ms step_avg:68.21ms +[2025-07-07 07:32:44] [Rank 0] step:7361/10000 train_time:502072ms step_avg:68.21ms +[2025-07-07 07:32:44] [Rank 0] step:7361/10000 train_time:502072ms step_avg:68.21ms +[2025-07-07 07:32:45] [Rank 0] step:7381/10000 train_time:504107ms step_avg:68.30ms +[2025-07-07 07:32:45] [Rank 0] step:7381/10000 train_time:504107ms step_avg:68.30ms +[2025-07-07 07:32:46] [Rank 0] step:7401/10000 train_time:504845ms step_avg:68.21ms +[2025-07-07 07:32:46] [Rank 0] step:7401/10000 train_time:504845ms step_avg:68.21ms +[2025-07-07 07:32:48] [Rank 0] step:7421/10000 train_time:506215ms step_avg:68.21ms +[2025-07-07 07:32:48] [Rank 0] step:7421/10000 train_time:506215ms step_avg:68.21ms +[2025-07-07 07:32:49] [Rank 0] step:7441/10000 train_time:507584ms step_avg:68.21ms +[2025-07-07 07:32:49] [Rank 0] step:7441/10000 train_time:507584ms step_avg:68.21ms +[2025-07-07 07:32:51] [Rank 0] step:7461/10000 train_time:508954ms step_avg:68.22ms +[2025-07-07 07:32:51] [Rank 0] step:7461/10000 train_time:508954ms step_avg:68.22ms +[2025-07-07 07:32:52] [Rank 0] step:7481/10000 train_time:510325ms step_avg:68.22ms +[2025-07-07 07:32:52] [Rank 0] step:7481/10000 train_time:510325ms step_avg:68.22ms +[2025-07-07 07:32:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:32:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:32:54] [Rank 0] PRINT: step:7500/10000 train_loss:0.9794 val_loss:1.1355 train_time:512319ms step_avg:68.31ms +[2025-07-07 07:32:54] [Rank 0] PRINT: step:7500/10000 train_loss:0.9794 val_loss:1.1355 train_time:512319ms step_avg:68.31ms +[2025-07-07 07:32:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:32:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:32:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:32:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:32:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:32:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:38:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:38:19] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:38:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:38:19] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:38:19] [Rank 0] Total Loss: 5.6764 +[2025-07-07 07:38:19] [Rank 0] Total Loss: 5.6764 +[2025-07-07 07:38:19] [Rank 0] Total FTA: 0.3405 +[2025-07-07 07:38:19] [Rank 0] Total FTA: 0.3405 +[2025-07-07 07:38:19] [Rank 0] Group 0 Loss: 5.8589 +[2025-07-07 07:38:19] [Rank 0] Group 0 Loss: 5.8589 +[2025-07-07 07:38:19] [Rank 0] Group 1 Loss: 5.2732 +[2025-07-07 07:38:19] [Rank 0] Group 1 Loss: 5.2732 +[2025-07-07 07:38:19] [Rank 0] Group 2 Loss: 5.3251 +[2025-07-07 07:38:19] [Rank 0] Group 2 Loss: 5.3251 +[2025-07-07 07:38:19] [Rank 0] Group 3 Loss: 5.9806 +[2025-07-07 07:38:19] [Rank 0] Group 3 Loss: 5.9806 +[2025-07-07 07:38:19] [Rank 0] Group 4 Loss: 5.5640 +[2025-07-07 07:38:19] [Rank 0] Group 4 Loss: 5.5640 +[2025-07-07 07:38:19] [Rank 0] Group 5 Loss: 5.7682 +[2025-07-07 07:38:19] [Rank 0] Group 5 Loss: 5.7682 +[2025-07-07 07:38:19] [Rank 0] Group 6 Loss: 5.7164 +[2025-07-07 07:38:19] [Rank 0] Group 6 Loss: 5.7164 +[2025-07-07 07:38:19] [Rank 0] Group 7 Loss: 5.6653 +[2025-07-07 07:38:19] [Rank 0] Group 7 Loss: 5.6653 +[2025-07-07 07:38:19] [Rank 0] Group 8 Loss: 5.6478 +[2025-07-07 07:38:19] [Rank 0] Group 8 Loss: 5.6478 +[2025-07-07 07:38:19] [Rank 0] Group 9 Loss: 5.6635 +[2025-07-07 07:38:19] [Rank 0] Group 9 Loss: 5.6635 +[2025-07-07 07:38:19] [Rank 0] Group 10 Loss: 5.6702 +[2025-07-07 07:38:19] [Rank 0] Group 10 Loss: 5.6702 +[2025-07-07 07:38:19] [Rank 0] Group 11 Loss: 5.7221 +[2025-07-07 07:38:19] [Rank 0] Group 11 Loss: 5.7221 +[2025-07-07 07:38:19] [Rank 0] Group 0 FTA: 0.3277 +[2025-07-07 07:38:19] [Rank 0] Group 0 FTA: 0.3277 +[2025-07-07 07:38:19] [Rank 0] Group 1 FTA: 0.1536 +[2025-07-07 07:38:19] [Rank 0] Group 1 FTA: 0.1536 +[2025-07-07 07:38:19] [Rank 0] Group 2 FTA: 0.4609 +[2025-07-07 07:38:19] [Rank 0] Group 2 FTA: 0.4609 +[2025-07-07 07:38:19] [Rank 0] Group 3 FTA: 0.3932 +[2025-07-07 07:38:19] [Rank 0] Group 3 FTA: 0.3932 +[2025-07-07 07:38:19] [Rank 0] Group 4 FTA: 0.3125 +[2025-07-07 07:38:19] [Rank 0] Group 4 FTA: 0.3125 +[2025-07-07 07:38:19] [Rank 0] Group 5 FTA: 0.3281 +[2025-07-07 07:38:19] [Rank 0] Group 5 FTA: 0.3281 +[2025-07-07 07:38:19] [Rank 0] Group 6 FTA: 0.3151 +[2025-07-07 07:38:19] [Rank 0] Group 6 FTA: 0.3151 +[2025-07-07 07:38:20] [Rank 0] Group 7 FTA: 0.3307 +[2025-07-07 07:38:20] [Rank 0] Group 7 FTA: 0.3307 +[2025-07-07 07:38:20] [Rank 0] Group 8 FTA: 0.3750 +[2025-07-07 07:38:20] [Rank 0] Group 8 FTA: 0.3750 +[2025-07-07 07:38:20] [Rank 0] Group 9 FTA: 0.3711 +[2025-07-07 07:38:20] [Rank 0] Group 9 FTA: 0.3711 +[2025-07-07 07:38:20] [Rank 0] Group 10 FTA: 0.3613 +[2025-07-07 07:38:20] [Rank 0] Group 10 FTA: 0.3613 +[2025-07-07 07:38:20] [Rank 0] Group 11 FTA: 0.3525 +[2025-07-07 07:38:20] [Rank 0] Group 11 FTA: 0.3525 +[2025-07-07 07:38:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 07:38:20] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 07:38:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 07:38:20] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 07:38:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 07:38:21] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 07:38:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 07:38:21] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 07:38:21] [Rank 0] step:7501/10000 train_time:512328ms step_avg:68.30ms +[2025-07-07 07:38:21] [Rank 0] step:7501/10000 train_time:512328ms step_avg:68.30ms +[2025-07-07 07:38:22] [Rank 0] step:7521/10000 train_time:513100ms step_avg:68.22ms +[2025-07-07 07:38:22] [Rank 0] step:7521/10000 train_time:513100ms step_avg:68.22ms +[2025-07-07 07:38:24] [Rank 0] step:7541/10000 train_time:514460ms step_avg:68.22ms +[2025-07-07 07:38:24] [Rank 0] step:7541/10000 train_time:514460ms step_avg:68.22ms +[2025-07-07 07:38:25] [Rank 0] step:7561/10000 train_time:516506ms step_avg:68.31ms +[2025-07-07 07:38:25] [Rank 0] step:7561/10000 train_time:516506ms step_avg:68.31ms +[2025-07-07 07:38:26] [Rank 0] step:7581/10000 train_time:517239ms step_avg:68.23ms +[2025-07-07 07:38:26] [Rank 0] step:7581/10000 train_time:517239ms step_avg:68.23ms +[2025-07-07 07:38:28] [Rank 0] step:7601/10000 train_time:518602ms step_avg:68.23ms +[2025-07-07 07:38:28] [Rank 0] step:7601/10000 train_time:518602ms step_avg:68.23ms +[2025-07-07 07:38:29] [Rank 0] step:7621/10000 train_time:519965ms step_avg:68.23ms +[2025-07-07 07:38:29] [Rank 0] step:7621/10000 train_time:519965ms step_avg:68.23ms +[2025-07-07 07:38:31] [Rank 0] step:7641/10000 train_time:521331ms step_avg:68.23ms +[2025-07-07 07:38:31] [Rank 0] step:7641/10000 train_time:521331ms step_avg:68.23ms +[2025-07-07 07:38:32] [Rank 0] step:7661/10000 train_time:522696ms step_avg:68.23ms +[2025-07-07 07:38:32] [Rank 0] step:7661/10000 train_time:522696ms step_avg:68.23ms +[2025-07-07 07:38:33] [Rank 0] step:7681/10000 train_time:524064ms step_avg:68.23ms +[2025-07-07 07:38:33] [Rank 0] step:7681/10000 train_time:524064ms step_avg:68.23ms +[2025-07-07 07:38:35] [Rank 0] step:7701/10000 train_time:525430ms step_avg:68.23ms +[2025-07-07 07:38:35] [Rank 0] step:7701/10000 train_time:525430ms step_avg:68.23ms +[2025-07-07 07:38:36] [Rank 0] step:7721/10000 train_time:526801ms step_avg:68.23ms +[2025-07-07 07:38:36] [Rank 0] step:7721/10000 train_time:526801ms step_avg:68.23ms +[2025-07-07 07:38:37] [Rank 0] step:7741/10000 train_time:528421ms step_avg:68.26ms +[2025-07-07 07:38:37] [Rank 0] step:7741/10000 train_time:528421ms step_avg:68.26ms +[2025-07-07 07:38:39] [Rank 0] step:7761/10000 train_time:529591ms step_avg:68.24ms +[2025-07-07 07:38:39] [Rank 0] step:7761/10000 train_time:529591ms step_avg:68.24ms +[2025-07-07 07:38:40] [Rank 0] step:7781/10000 train_time:530962ms step_avg:68.24ms +[2025-07-07 07:38:40] [Rank 0] step:7781/10000 train_time:530962ms step_avg:68.24ms +[2025-07-07 07:38:42] [Rank 0] step:7801/10000 train_time:532329ms step_avg:68.24ms +[2025-07-07 07:38:42] [Rank 0] step:7801/10000 train_time:532329ms step_avg:68.24ms +[2025-07-07 07:38:43] [Rank 0] step:7821/10000 train_time:533699ms step_avg:68.24ms +[2025-07-07 07:38:43] [Rank 0] step:7821/10000 train_time:533699ms step_avg:68.24ms +[2025-07-07 07:38:44] [Rank 0] step:7841/10000 train_time:535069ms step_avg:68.24ms +[2025-07-07 07:38:44] [Rank 0] step:7841/10000 train_time:535069ms step_avg:68.24ms +[2025-07-07 07:38:46] [Rank 0] step:7861/10000 train_time:536440ms step_avg:68.24ms +[2025-07-07 07:38:46] [Rank 0] step:7861/10000 train_time:536440ms step_avg:68.24ms +[2025-07-07 07:38:47] [Rank 0] step:7881/10000 train_time:537809ms step_avg:68.24ms +[2025-07-07 07:38:47] [Rank 0] step:7881/10000 train_time:537809ms step_avg:68.24ms +[2025-07-07 07:38:48] [Rank 0] step:7901/10000 train_time:539180ms step_avg:68.24ms +[2025-07-07 07:38:48] [Rank 0] step:7901/10000 train_time:539180ms step_avg:68.24ms +[2025-07-07 07:38:50] [Rank 0] step:7921/10000 train_time:540801ms step_avg:68.27ms +[2025-07-07 07:38:50] [Rank 0] step:7921/10000 train_time:540801ms step_avg:68.27ms +[2025-07-07 07:38:51] [Rank 0] step:7941/10000 train_time:541946ms step_avg:68.25ms +[2025-07-07 07:38:51] [Rank 0] step:7941/10000 train_time:541946ms step_avg:68.25ms +[2025-07-07 07:38:53] [Rank 0] step:7961/10000 train_time:543317ms step_avg:68.25ms +[2025-07-07 07:38:53] [Rank 0] step:7961/10000 train_time:543317ms step_avg:68.25ms +[2025-07-07 07:38:54] [Rank 0] step:7981/10000 train_time:544692ms step_avg:68.25ms +[2025-07-07 07:38:54] [Rank 0] step:7981/10000 train_time:544692ms step_avg:68.25ms +[2025-07-07 07:38:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:38:55] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:38:56] [Rank 0] PRINT: step:8000/10000 train_loss:0.9527 val_loss:1.1635 train_time:546688ms step_avg:68.34ms +[2025-07-07 07:38:56] [Rank 0] PRINT: step:8000/10000 train_loss:0.9527 val_loss:1.1635 train_time:546688ms step_avg:68.34ms +[2025-07-07 07:38:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:38:56] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:38:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:38:56] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:38:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:38:56] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:44:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:44:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:44:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:44:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:44:21] [Rank 0] Total Loss: 5.7068 +[2025-07-07 07:44:21] [Rank 0] Total Loss: 5.7068 +[2025-07-07 07:44:21] [Rank 0] Total FTA: 0.2970 +[2025-07-07 07:44:21] [Rank 0] Total FTA: 0.2970 +[2025-07-07 07:44:21] [Rank 0] Group 0 Loss: 5.8258 +[2025-07-07 07:44:21] [Rank 0] Group 0 Loss: 5.8258 +[2025-07-07 07:44:21] [Rank 0] Group 1 Loss: 5.3092 +[2025-07-07 07:44:21] [Rank 0] Group 1 Loss: 5.3092 +[2025-07-07 07:44:21] [Rank 0] Group 2 Loss: 5.2556 +[2025-07-07 07:44:21] [Rank 0] Group 2 Loss: 5.2556 +[2025-07-07 07:44:21] [Rank 0] Group 3 Loss: 5.8916 +[2025-07-07 07:44:21] [Rank 0] Group 3 Loss: 5.8916 +[2025-07-07 07:44:22] [Rank 0] Group 4 Loss: 5.7249 +[2025-07-07 07:44:22] [Rank 0] Group 4 Loss: 5.7249 +[2025-07-07 07:44:22] [Rank 0] Group 5 Loss: 5.9132 +[2025-07-07 07:44:22] [Rank 0] Group 5 Loss: 5.9132 +[2025-07-07 07:44:22] [Rank 0] Group 6 Loss: 5.6826 +[2025-07-07 07:44:22] [Rank 0] Group 6 Loss: 5.6826 +[2025-07-07 07:44:22] [Rank 0] Group 7 Loss: 5.6999 +[2025-07-07 07:44:22] [Rank 0] Group 7 Loss: 5.6999 +[2025-07-07 07:44:22] [Rank 0] Group 8 Loss: 5.7277 +[2025-07-07 07:44:22] [Rank 0] Group 8 Loss: 5.7277 +[2025-07-07 07:44:22] [Rank 0] Group 9 Loss: 5.7485 +[2025-07-07 07:44:22] [Rank 0] Group 9 Loss: 5.7485 +[2025-07-07 07:44:22] [Rank 0] Group 10 Loss: 5.7548 +[2025-07-07 07:44:22] [Rank 0] Group 10 Loss: 5.7548 +[2025-07-07 07:44:22] [Rank 0] Group 11 Loss: 5.7514 +[2025-07-07 07:44:22] [Rank 0] Group 11 Loss: 5.7514 +[2025-07-07 07:44:22] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 07:44:22] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 07:44:22] [Rank 0] Group 1 FTA: 0.5365 +[2025-07-07 07:44:22] [Rank 0] Group 1 FTA: 0.5365 +[2025-07-07 07:44:22] [Rank 0] Group 2 FTA: 0.5677 +[2025-07-07 07:44:22] [Rank 0] Group 2 FTA: 0.5677 +[2025-07-07 07:44:22] [Rank 0] Group 3 FTA: 0.2266 +[2025-07-07 07:44:22] [Rank 0] Group 3 FTA: 0.2266 +[2025-07-07 07:44:22] [Rank 0] Group 4 FTA: 0.2214 +[2025-07-07 07:44:22] [Rank 0] Group 4 FTA: 0.2214 +[2025-07-07 07:44:22] [Rank 0] Group 5 FTA: 0.4010 +[2025-07-07 07:44:22] [Rank 0] Group 5 FTA: 0.4010 +[2025-07-07 07:44:22] [Rank 0] Group 6 FTA: 0.2682 +[2025-07-07 07:44:22] [Rank 0] Group 6 FTA: 0.2682 +[2025-07-07 07:44:22] [Rank 0] Group 7 FTA: 0.3255 +[2025-07-07 07:44:22] [Rank 0] Group 7 FTA: 0.3255 +[2025-07-07 07:44:22] [Rank 0] Group 8 FTA: 0.3281 +[2025-07-07 07:44:22] [Rank 0] Group 8 FTA: 0.3281 +[2025-07-07 07:44:22] [Rank 0] Group 9 FTA: 0.2852 +[2025-07-07 07:44:22] [Rank 0] Group 9 FTA: 0.2852 +[2025-07-07 07:44:22] [Rank 0] Group 10 FTA: 0.3730 +[2025-07-07 07:44:22] [Rank 0] Group 10 FTA: 0.3730 +[2025-07-07 07:44:22] [Rank 0] Group 11 FTA: 0.2979 +[2025-07-07 07:44:22] [Rank 0] Group 11 FTA: 0.2979 +[2025-07-07 07:44:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 07:44:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 07:44:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 07:44:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 07:44:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 07:44:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 07:44:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 07:44:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 07:44:23] [Rank 0] step:8001/10000 train_time:546698ms step_avg:68.33ms +[2025-07-07 07:44:23] [Rank 0] step:8001/10000 train_time:546698ms step_avg:68.33ms +[2025-07-07 07:44:24] [Rank 0] step:8021/10000 train_time:547450ms step_avg:68.25ms +[2025-07-07 07:44:24] [Rank 0] step:8021/10000 train_time:547450ms step_avg:68.25ms +[2025-07-07 07:44:26] [Rank 0] step:8041/10000 train_time:548810ms step_avg:68.25ms +[2025-07-07 07:44:26] [Rank 0] step:8041/10000 train_time:548810ms step_avg:68.25ms +[2025-07-07 07:44:27] [Rank 0] step:8061/10000 train_time:550172ms step_avg:68.25ms +[2025-07-07 07:44:27] [Rank 0] step:8061/10000 train_time:550172ms step_avg:68.25ms +[2025-07-07 07:44:29] [Rank 0] step:8081/10000 train_time:551535ms step_avg:68.25ms +[2025-07-07 07:44:29] [Rank 0] step:8081/10000 train_time:551535ms step_avg:68.25ms +[2025-07-07 07:44:30] [Rank 0] step:8101/10000 train_time:552900ms step_avg:68.25ms +[2025-07-07 07:44:30] [Rank 0] step:8101/10000 train_time:552900ms step_avg:68.25ms +[2025-07-07 07:44:31] [Rank 0] step:8121/10000 train_time:554265ms step_avg:68.25ms +[2025-07-07 07:44:31] [Rank 0] step:8121/10000 train_time:554265ms step_avg:68.25ms +[2025-07-07 07:44:33] [Rank 0] step:8141/10000 train_time:555629ms step_avg:68.25ms +[2025-07-07 07:44:33] [Rank 0] step:8141/10000 train_time:555629ms step_avg:68.25ms +[2025-07-07 07:44:34] [Rank 0] step:8161/10000 train_time:556995ms step_avg:68.25ms +[2025-07-07 07:44:34] [Rank 0] step:8161/10000 train_time:556995ms step_avg:68.25ms +[2025-07-07 07:44:35] [Rank 0] step:8181/10000 train_time:558362ms step_avg:68.25ms +[2025-07-07 07:44:35] [Rank 0] step:8181/10000 train_time:558362ms step_avg:68.25ms +[2025-07-07 07:44:37] [Rank 0] step:8201/10000 train_time:559729ms step_avg:68.25ms +[2025-07-07 07:44:37] [Rank 0] step:8201/10000 train_time:559729ms step_avg:68.25ms +[2025-07-07 07:44:38] [Rank 0] step:8221/10000 train_time:561096ms step_avg:68.25ms +[2025-07-07 07:44:38] [Rank 0] step:8221/10000 train_time:561096ms step_avg:68.25ms +[2025-07-07 07:44:39] [Rank 0] step:8241/10000 train_time:562463ms step_avg:68.25ms +[2025-07-07 07:44:39] [Rank 0] step:8241/10000 train_time:562463ms step_avg:68.25ms +[2025-07-07 07:44:41] [Rank 0] step:8261/10000 train_time:563830ms step_avg:68.25ms +[2025-07-07 07:44:41] [Rank 0] step:8261/10000 train_time:563830ms step_avg:68.25ms +[2025-07-07 07:44:42] [Rank 0] step:8281/10000 train_time:565199ms step_avg:68.25ms +[2025-07-07 07:44:42] [Rank 0] step:8281/10000 train_time:565199ms step_avg:68.25ms +[2025-07-07 07:44:44] [Rank 0] step:8301/10000 train_time:566595ms step_avg:68.26ms +[2025-07-07 07:44:44] [Rank 0] step:8301/10000 train_time:566595ms step_avg:68.26ms +[2025-07-07 07:44:45] [Rank 0] step:8321/10000 train_time:567965ms step_avg:68.26ms +[2025-07-07 07:44:45] [Rank 0] step:8321/10000 train_time:567965ms step_avg:68.26ms +[2025-07-07 07:44:46] [Rank 0] step:8341/10000 train_time:569335ms step_avg:68.26ms +[2025-07-07 07:44:46] [Rank 0] step:8341/10000 train_time:569335ms step_avg:68.26ms +[2025-07-07 07:44:48] [Rank 0] step:8361/10000 train_time:570704ms step_avg:68.26ms +[2025-07-07 07:44:48] [Rank 0] step:8361/10000 train_time:570704ms step_avg:68.26ms +[2025-07-07 07:44:49] [Rank 0] step:8381/10000 train_time:572075ms step_avg:68.26ms +[2025-07-07 07:44:49] [Rank 0] step:8381/10000 train_time:572075ms step_avg:68.26ms +[2025-07-07 07:44:50] [Rank 0] step:8401/10000 train_time:573447ms step_avg:68.26ms +[2025-07-07 07:44:50] [Rank 0] step:8401/10000 train_time:573447ms step_avg:68.26ms +[2025-07-07 07:44:52] [Rank 0] step:8421/10000 train_time:574818ms step_avg:68.26ms +[2025-07-07 07:44:52] [Rank 0] step:8421/10000 train_time:574818ms step_avg:68.26ms +[2025-07-07 07:44:53] [Rank 0] step:8441/10000 train_time:576190ms step_avg:68.26ms +[2025-07-07 07:44:53] [Rank 0] step:8441/10000 train_time:576190ms step_avg:68.26ms +[2025-07-07 07:44:55] [Rank 0] step:8461/10000 train_time:577608ms step_avg:68.27ms +[2025-07-07 07:44:55] [Rank 0] step:8461/10000 train_time:577608ms step_avg:68.27ms +[2025-07-07 07:44:56] [Rank 0] step:8481/10000 train_time:578981ms step_avg:68.27ms +[2025-07-07 07:44:56] [Rank 0] step:8481/10000 train_time:578981ms step_avg:68.27ms +[2025-07-07 07:44:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:44:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:44:58] [Rank 0] PRINT: step:8500/10000 train_loss:0.9328 val_loss:1.1365 train_time:580978ms step_avg:68.35ms +[2025-07-07 07:44:58] [Rank 0] PRINT: step:8500/10000 train_loss:0.9328 val_loss:1.1365 train_time:580978ms step_avg:68.35ms +[2025-07-07 07:44:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:44:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:44:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:44:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:44:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:44:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:50:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:50:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:50:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:50:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:50:22] [Rank 0] Total Loss: 5.6803 +[2025-07-07 07:50:22] [Rank 0] Total Loss: 5.6803 +[2025-07-07 07:50:22] [Rank 0] Total FTA: 0.3519 +[2025-07-07 07:50:22] [Rank 0] Total FTA: 0.3519 +[2025-07-07 07:50:22] [Rank 0] Group 0 Loss: 5.7083 +[2025-07-07 07:50:22] [Rank 0] Group 0 Loss: 5.7083 +[2025-07-07 07:50:22] [Rank 0] Group 1 Loss: 5.1472 +[2025-07-07 07:50:22] [Rank 0] Group 1 Loss: 5.1472 +[2025-07-07 07:50:22] [Rank 0] Group 2 Loss: 5.4330 +[2025-07-07 07:50:22] [Rank 0] Group 2 Loss: 5.4330 +[2025-07-07 07:50:22] [Rank 0] Group 3 Loss: 5.8063 +[2025-07-07 07:50:22] [Rank 0] Group 3 Loss: 5.8063 +[2025-07-07 07:50:22] [Rank 0] Group 4 Loss: 5.7198 +[2025-07-07 07:50:22] [Rank 0] Group 4 Loss: 5.7198 +[2025-07-07 07:50:22] [Rank 0] Group 5 Loss: 5.8552 +[2025-07-07 07:50:22] [Rank 0] Group 5 Loss: 5.8552 +[2025-07-07 07:50:22] [Rank 0] Group 6 Loss: 5.7477 +[2025-07-07 07:50:22] [Rank 0] Group 6 Loss: 5.7477 +[2025-07-07 07:50:22] [Rank 0] Group 7 Loss: 5.6943 +[2025-07-07 07:50:22] [Rank 0] Group 7 Loss: 5.6943 +[2025-07-07 07:50:22] [Rank 0] Group 8 Loss: 5.6749 +[2025-07-07 07:50:22] [Rank 0] Group 8 Loss: 5.6749 +[2025-07-07 07:50:22] [Rank 0] Group 9 Loss: 5.6968 +[2025-07-07 07:50:22] [Rank 0] Group 9 Loss: 5.6968 +[2025-07-07 07:50:22] [Rank 0] Group 10 Loss: 5.7032 +[2025-07-07 07:50:22] [Rank 0] Group 10 Loss: 5.7032 +[2025-07-07 07:50:22] [Rank 0] Group 11 Loss: 5.7800 +[2025-07-07 07:50:22] [Rank 0] Group 11 Loss: 5.7800 +[2025-07-07 07:50:22] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-07 07:50:22] [Rank 0] Group 0 FTA: 0.1599 +[2025-07-07 07:50:22] [Rank 0] Group 1 FTA: 0.5182 +[2025-07-07 07:50:22] [Rank 0] Group 1 FTA: 0.5182 +[2025-07-07 07:50:22] [Rank 0] Group 2 FTA: 0.4219 +[2025-07-07 07:50:22] [Rank 0] Group 2 FTA: 0.4219 +[2025-07-07 07:50:22] [Rank 0] Group 3 FTA: 0.3281 +[2025-07-07 07:50:22] [Rank 0] Group 3 FTA: 0.3281 +[2025-07-07 07:50:22] [Rank 0] Group 4 FTA: 0.3203 +[2025-07-07 07:50:22] [Rank 0] Group 4 FTA: 0.3203 +[2025-07-07 07:50:22] [Rank 0] Group 5 FTA: 0.3906 +[2025-07-07 07:50:22] [Rank 0] Group 5 FTA: 0.3906 +[2025-07-07 07:50:22] [Rank 0] Group 6 FTA: 0.3620 +[2025-07-07 07:50:22] [Rank 0] Group 6 FTA: 0.3620 +[2025-07-07 07:50:22] [Rank 0] Group 7 FTA: 0.3984 +[2025-07-07 07:50:22] [Rank 0] Group 7 FTA: 0.3984 +[2025-07-07 07:50:22] [Rank 0] Group 8 FTA: 0.4089 +[2025-07-07 07:50:22] [Rank 0] Group 8 FTA: 0.4089 +[2025-07-07 07:50:22] [Rank 0] Group 9 FTA: 0.3867 +[2025-07-07 07:50:22] [Rank 0] Group 9 FTA: 0.3867 +[2025-07-07 07:50:22] [Rank 0] Group 10 FTA: 0.3418 +[2025-07-07 07:50:22] [Rank 0] Group 10 FTA: 0.3418 +[2025-07-07 07:50:22] [Rank 0] Group 11 FTA: 0.3672 +[2025-07-07 07:50:22] [Rank 0] Group 11 FTA: 0.3672 +[2025-07-07 07:50:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 07:50:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 07:50:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 07:50:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 07:50:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 07:50:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 07:50:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 07:50:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 07:50:23] [Rank 0] step:8501/10000 train_time:580987ms step_avg:68.34ms +[2025-07-07 07:50:23] [Rank 0] step:8501/10000 train_time:580987ms step_avg:68.34ms +[2025-07-07 07:50:25] [Rank 0] step:8521/10000 train_time:581753ms step_avg:68.27ms +[2025-07-07 07:50:25] [Rank 0] step:8521/10000 train_time:581753ms step_avg:68.27ms +[2025-07-07 07:50:26] [Rank 0] step:8541/10000 train_time:583113ms step_avg:68.27ms +[2025-07-07 07:50:26] [Rank 0] step:8541/10000 train_time:583113ms step_avg:68.27ms +[2025-07-07 07:50:27] [Rank 0] step:8561/10000 train_time:584475ms step_avg:68.27ms +[2025-07-07 07:50:27] [Rank 0] step:8561/10000 train_time:584475ms step_avg:68.27ms +[2025-07-07 07:50:29] [Rank 0] step:8581/10000 train_time:585838ms step_avg:68.27ms +[2025-07-07 07:50:29] [Rank 0] step:8581/10000 train_time:585838ms step_avg:68.27ms +[2025-07-07 07:50:30] [Rank 0] step:8601/10000 train_time:587201ms step_avg:68.27ms +[2025-07-07 07:50:30] [Rank 0] step:8601/10000 train_time:587201ms step_avg:68.27ms +[2025-07-07 07:50:31] [Rank 0] step:8621/10000 train_time:588565ms step_avg:68.27ms +[2025-07-07 07:50:31] [Rank 0] step:8621/10000 train_time:588565ms step_avg:68.27ms +[2025-07-07 07:50:33] [Rank 0] step:8641/10000 train_time:590582ms step_avg:68.35ms +[2025-07-07 07:50:33] [Rank 0] step:8641/10000 train_time:590582ms step_avg:68.35ms +[2025-07-07 07:50:34] [Rank 0] step:8661/10000 train_time:591319ms step_avg:68.27ms +[2025-07-07 07:50:34] [Rank 0] step:8661/10000 train_time:591319ms step_avg:68.27ms +[2025-07-07 07:50:35] [Rank 0] step:8681/10000 train_time:592684ms step_avg:68.27ms +[2025-07-07 07:50:35] [Rank 0] step:8681/10000 train_time:592684ms step_avg:68.27ms +[2025-07-07 07:50:37] [Rank 0] step:8701/10000 train_time:594049ms step_avg:68.27ms +[2025-07-07 07:50:37] [Rank 0] step:8701/10000 train_time:594049ms step_avg:68.27ms +[2025-07-07 07:50:38] [Rank 0] step:8721/10000 train_time:595416ms step_avg:68.27ms +[2025-07-07 07:50:38] [Rank 0] step:8721/10000 train_time:595416ms step_avg:68.27ms +[2025-07-07 07:50:40] [Rank 0] step:8741/10000 train_time:596781ms step_avg:68.27ms +[2025-07-07 07:50:40] [Rank 0] step:8741/10000 train_time:596781ms step_avg:68.27ms +[2025-07-07 07:50:41] [Rank 0] step:8761/10000 train_time:598148ms step_avg:68.27ms +[2025-07-07 07:50:41] [Rank 0] step:8761/10000 train_time:598148ms step_avg:68.27ms +[2025-07-07 07:50:42] [Rank 0] step:8781/10000 train_time:599515ms step_avg:68.27ms +[2025-07-07 07:50:42] [Rank 0] step:8781/10000 train_time:599515ms step_avg:68.27ms +[2025-07-07 07:50:44] [Rank 0] step:8801/10000 train_time:600883ms step_avg:68.27ms +[2025-07-07 07:50:44] [Rank 0] step:8801/10000 train_time:600883ms step_avg:68.27ms +[2025-07-07 07:50:45] [Rank 0] step:8821/10000 train_time:602506ms step_avg:68.30ms +[2025-07-07 07:50:45] [Rank 0] step:8821/10000 train_time:602506ms step_avg:68.30ms +[2025-07-07 07:50:46] [Rank 0] step:8841/10000 train_time:603672ms step_avg:68.28ms +[2025-07-07 07:50:46] [Rank 0] step:8841/10000 train_time:603672ms step_avg:68.28ms +[2025-07-07 07:50:48] [Rank 0] step:8861/10000 train_time:605041ms step_avg:68.28ms +[2025-07-07 07:50:48] [Rank 0] step:8861/10000 train_time:605041ms step_avg:68.28ms +[2025-07-07 07:50:49] [Rank 0] step:8881/10000 train_time:606410ms step_avg:68.28ms +[2025-07-07 07:50:49] [Rank 0] step:8881/10000 train_time:606410ms step_avg:68.28ms +[2025-07-07 07:50:51] [Rank 0] step:8901/10000 train_time:607780ms step_avg:68.28ms +[2025-07-07 07:50:51] [Rank 0] step:8901/10000 train_time:607780ms step_avg:68.28ms +[2025-07-07 07:50:52] [Rank 0] step:8921/10000 train_time:609148ms step_avg:68.28ms +[2025-07-07 07:50:52] [Rank 0] step:8921/10000 train_time:609148ms step_avg:68.28ms +[2025-07-07 07:50:53] [Rank 0] step:8941/10000 train_time:610518ms step_avg:68.28ms +[2025-07-07 07:50:53] [Rank 0] step:8941/10000 train_time:610518ms step_avg:68.28ms +[2025-07-07 07:50:55] [Rank 0] step:8961/10000 train_time:611888ms step_avg:68.28ms +[2025-07-07 07:50:55] [Rank 0] step:8961/10000 train_time:611888ms step_avg:68.28ms +[2025-07-07 07:50:56] [Rank 0] step:8981/10000 train_time:613258ms step_avg:68.28ms +[2025-07-07 07:50:56] [Rank 0] step:8981/10000 train_time:613258ms step_avg:68.28ms +[2025-07-07 07:50:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:50:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:50:58] [Rank 0] PRINT: step:9000/10000 train_loss:0.9109 val_loss:1.0654 train_time:615252ms step_avg:68.36ms +[2025-07-07 07:50:58] [Rank 0] PRINT: step:9000/10000 train_loss:0.9109 val_loss:1.0654 train_time:615252ms step_avg:68.36ms +[2025-07-07 07:50:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:50:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:50:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:50:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:50:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:50:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:56:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:56:22] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:56:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:56:22] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:56:22] [Rank 0] Total Loss: 5.7260 +[2025-07-07 07:56:22] [Rank 0] Total Loss: 5.7260 +[2025-07-07 07:56:22] [Rank 0] Total FTA: 0.3872 +[2025-07-07 07:56:22] [Rank 0] Total FTA: 0.3872 +[2025-07-07 07:56:22] [Rank 0] Group 0 Loss: 5.6842 +[2025-07-07 07:56:22] [Rank 0] Group 0 Loss: 5.6842 +[2025-07-07 07:56:22] [Rank 0] Group 1 Loss: 5.3039 +[2025-07-07 07:56:22] [Rank 0] Group 1 Loss: 5.3039 +[2025-07-07 07:56:22] [Rank 0] Group 2 Loss: 5.2682 +[2025-07-07 07:56:22] [Rank 0] Group 2 Loss: 5.2682 +[2025-07-07 07:56:22] [Rank 0] Group 3 Loss: 5.8760 +[2025-07-07 07:56:22] [Rank 0] Group 3 Loss: 5.8760 +[2025-07-07 07:56:22] [Rank 0] Group 4 Loss: 5.6863 +[2025-07-07 07:56:22] [Rank 0] Group 4 Loss: 5.6863 +[2025-07-07 07:56:22] [Rank 0] Group 5 Loss: 6.1036 +[2025-07-07 07:56:22] [Rank 0] Group 5 Loss: 6.1036 +[2025-07-07 07:56:22] [Rank 0] Group 6 Loss: 5.7798 +[2025-07-07 07:56:22] [Rank 0] Group 6 Loss: 5.7798 +[2025-07-07 07:56:22] [Rank 0] Group 7 Loss: 5.7231 +[2025-07-07 07:56:22] [Rank 0] Group 7 Loss: 5.7231 +[2025-07-07 07:56:22] [Rank 0] Group 8 Loss: 5.8373 +[2025-07-07 07:56:22] [Rank 0] Group 8 Loss: 5.8373 +[2025-07-07 07:56:22] [Rank 0] Group 9 Loss: 5.7425 +[2025-07-07 07:56:22] [Rank 0] Group 9 Loss: 5.7425 +[2025-07-07 07:56:22] [Rank 0] Group 10 Loss: 5.8155 +[2025-07-07 07:56:22] [Rank 0] Group 10 Loss: 5.8155 +[2025-07-07 07:56:22] [Rank 0] Group 11 Loss: 5.7944 +[2025-07-07 07:56:22] [Rank 0] Group 11 Loss: 5.7944 +[2025-07-07 07:56:22] [Rank 0] Group 0 FTA: 0.1730 +[2025-07-07 07:56:22] [Rank 0] Group 0 FTA: 0.1730 +[2025-07-07 07:56:22] [Rank 0] Group 1 FTA: 0.5052 +[2025-07-07 07:56:22] [Rank 0] Group 1 FTA: 0.5052 +[2025-07-07 07:56:22] [Rank 0] Group 2 FTA: 0.4740 +[2025-07-07 07:56:22] [Rank 0] Group 2 FTA: 0.4740 +[2025-07-07 07:56:22] [Rank 0] Group 3 FTA: 0.4401 +[2025-07-07 07:56:22] [Rank 0] Group 3 FTA: 0.4401 +[2025-07-07 07:56:22] [Rank 0] Group 4 FTA: 0.2396 +[2025-07-07 07:56:22] [Rank 0] Group 4 FTA: 0.2396 +[2025-07-07 07:56:22] [Rank 0] Group 5 FTA: 0.4688 +[2025-07-07 07:56:22] [Rank 0] Group 5 FTA: 0.4688 +[2025-07-07 07:56:22] [Rank 0] Group 6 FTA: 0.4010 +[2025-07-07 07:56:22] [Rank 0] Group 6 FTA: 0.4010 +[2025-07-07 07:56:22] [Rank 0] Group 7 FTA: 0.4062 +[2025-07-07 07:56:22] [Rank 0] Group 7 FTA: 0.4062 +[2025-07-07 07:56:22] [Rank 0] Group 8 FTA: 0.4219 +[2025-07-07 07:56:22] [Rank 0] Group 8 FTA: 0.4219 +[2025-07-07 07:56:22] [Rank 0] Group 9 FTA: 0.3828 +[2025-07-07 07:56:22] [Rank 0] Group 9 FTA: 0.3828 +[2025-07-07 07:56:22] [Rank 0] Group 10 FTA: 0.4355 +[2025-07-07 07:56:22] [Rank 0] Group 10 FTA: 0.4355 +[2025-07-07 07:56:22] [Rank 0] Group 11 FTA: 0.4277 +[2025-07-07 07:56:22] [Rank 0] Group 11 FTA: 0.4277 +[2025-07-07 07:56:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 07:56:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 07:56:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 07:56:23] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 07:56:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 07:56:23] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 07:56:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 07:56:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 07:56:24] [Rank 0] step:9001/10000 train_time:615268ms step_avg:68.36ms +[2025-07-07 07:56:24] [Rank 0] step:9001/10000 train_time:615268ms step_avg:68.36ms +[2025-07-07 07:56:25] [Rank 0] step:9021/10000 train_time:616729ms step_avg:68.37ms +[2025-07-07 07:56:25] [Rank 0] step:9021/10000 train_time:616729ms step_avg:68.37ms +[2025-07-07 07:56:27] [Rank 0] step:9041/10000 train_time:618090ms step_avg:68.37ms +[2025-07-07 07:56:27] [Rank 0] step:9041/10000 train_time:618090ms step_avg:68.37ms +[2025-07-07 07:56:28] [Rank 0] step:9061/10000 train_time:619452ms step_avg:68.36ms +[2025-07-07 07:56:28] [Rank 0] step:9061/10000 train_time:619452ms step_avg:68.36ms +[2025-07-07 07:56:29] [Rank 0] step:9081/10000 train_time:620816ms step_avg:68.36ms +[2025-07-07 07:56:29] [Rank 0] step:9081/10000 train_time:620816ms step_avg:68.36ms +[2025-07-07 07:56:31] [Rank 0] step:9101/10000 train_time:622181ms step_avg:68.36ms +[2025-07-07 07:56:31] [Rank 0] step:9101/10000 train_time:622181ms step_avg:68.36ms +[2025-07-07 07:56:32] [Rank 0] step:9121/10000 train_time:623544ms step_avg:68.36ms +[2025-07-07 07:56:32] [Rank 0] step:9121/10000 train_time:623544ms step_avg:68.36ms +[2025-07-07 07:56:33] [Rank 0] step:9141/10000 train_time:624909ms step_avg:68.36ms +[2025-07-07 07:56:33] [Rank 0] step:9141/10000 train_time:624909ms step_avg:68.36ms +[2025-07-07 07:56:35] [Rank 0] step:9161/10000 train_time:626275ms step_avg:68.36ms +[2025-07-07 07:56:35] [Rank 0] step:9161/10000 train_time:626275ms step_avg:68.36ms +[2025-07-07 07:56:36] [Rank 0] step:9181/10000 train_time:627640ms step_avg:68.36ms +[2025-07-07 07:56:36] [Rank 0] step:9181/10000 train_time:627640ms step_avg:68.36ms +[2025-07-07 07:56:38] [Rank 0] step:9201/10000 train_time:629051ms step_avg:68.37ms +[2025-07-07 07:56:38] [Rank 0] step:9201/10000 train_time:629051ms step_avg:68.37ms +[2025-07-07 07:56:39] [Rank 0] step:9221/10000 train_time:630420ms step_avg:68.37ms +[2025-07-07 07:56:39] [Rank 0] step:9221/10000 train_time:630420ms step_avg:68.37ms +[2025-07-07 07:56:40] [Rank 0] step:9241/10000 train_time:631788ms step_avg:68.37ms +[2025-07-07 07:56:40] [Rank 0] step:9241/10000 train_time:631788ms step_avg:68.37ms +[2025-07-07 07:56:42] [Rank 0] step:9261/10000 train_time:633156ms step_avg:68.37ms +[2025-07-07 07:56:42] [Rank 0] step:9261/10000 train_time:633156ms step_avg:68.37ms +[2025-07-07 07:56:43] [Rank 0] step:9281/10000 train_time:634525ms step_avg:68.37ms +[2025-07-07 07:56:43] [Rank 0] step:9281/10000 train_time:634525ms step_avg:68.37ms +[2025-07-07 07:56:44] [Rank 0] step:9301/10000 train_time:635895ms step_avg:68.37ms +[2025-07-07 07:56:44] [Rank 0] step:9301/10000 train_time:635895ms step_avg:68.37ms +[2025-07-07 07:56:46] [Rank 0] step:9321/10000 train_time:637262ms step_avg:68.37ms +[2025-07-07 07:56:46] [Rank 0] step:9321/10000 train_time:637262ms step_avg:68.37ms +[2025-07-07 07:56:47] [Rank 0] step:9341/10000 train_time:638630ms step_avg:68.37ms +[2025-07-07 07:56:47] [Rank 0] step:9341/10000 train_time:638630ms step_avg:68.37ms +[2025-07-07 07:56:49] [Rank 0] step:9361/10000 train_time:640666ms step_avg:68.44ms +[2025-07-07 07:56:49] [Rank 0] step:9361/10000 train_time:640666ms step_avg:68.44ms +[2025-07-07 07:56:50] [Rank 0] step:9381/10000 train_time:641404ms step_avg:68.37ms +[2025-07-07 07:56:50] [Rank 0] step:9381/10000 train_time:641404ms step_avg:68.37ms +[2025-07-07 07:56:51] [Rank 0] step:9401/10000 train_time:642773ms step_avg:68.37ms +[2025-07-07 07:56:51] [Rank 0] step:9401/10000 train_time:642773ms step_avg:68.37ms +[2025-07-07 07:56:53] [Rank 0] step:9421/10000 train_time:644142ms step_avg:68.37ms +[2025-07-07 07:56:53] [Rank 0] step:9421/10000 train_time:644142ms step_avg:68.37ms +[2025-07-07 07:56:54] [Rank 0] step:9441/10000 train_time:645512ms step_avg:68.37ms +[2025-07-07 07:56:54] [Rank 0] step:9441/10000 train_time:645512ms step_avg:68.37ms +[2025-07-07 07:56:55] [Rank 0] step:9461/10000 train_time:646881ms step_avg:68.37ms +[2025-07-07 07:56:55] [Rank 0] step:9461/10000 train_time:646881ms step_avg:68.37ms +[2025-07-07 07:56:57] [Rank 0] step:9481/10000 train_time:648251ms step_avg:68.37ms +[2025-07-07 07:56:57] [Rank 0] step:9481/10000 train_time:648251ms step_avg:68.37ms +[2025-07-07 07:56:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:56:58] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:56:59] [Rank 0] PRINT: step:9500/10000 train_loss:0.8968 val_loss:1.0762 train_time:650244ms step_avg:68.45ms +[2025-07-07 07:56:59] [Rank 0] PRINT: step:9500/10000 train_loss:0.8968 val_loss:1.0762 train_time:650244ms step_avg:68.45ms +[2025-07-07 07:56:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:56:59] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:56:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:56:59] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:56:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:56:59] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:02:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:02:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:02:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:02:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:02:23] [Rank 0] Total Loss: 5.7442 +[2025-07-07 08:02:23] [Rank 0] Total Loss: 5.7442 +[2025-07-07 08:02:23] [Rank 0] Total FTA: 0.4001 +[2025-07-07 08:02:23] [Rank 0] Total FTA: 0.4001 +[2025-07-07 08:02:23] [Rank 0] Group 0 Loss: 5.8180 +[2025-07-07 08:02:23] [Rank 0] Group 0 Loss: 5.8180 +[2025-07-07 08:02:23] [Rank 0] Group 1 Loss: 5.1607 +[2025-07-07 08:02:23] [Rank 0] Group 1 Loss: 5.1607 +[2025-07-07 08:02:23] [Rank 0] Group 2 Loss: 5.4947 +[2025-07-07 08:02:23] [Rank 0] Group 2 Loss: 5.4947 +[2025-07-07 08:02:23] [Rank 0] Group 3 Loss: 5.9062 +[2025-07-07 08:02:23] [Rank 0] Group 3 Loss: 5.9062 +[2025-07-07 08:02:23] [Rank 0] Group 4 Loss: 5.6645 +[2025-07-07 08:02:23] [Rank 0] Group 4 Loss: 5.6645 +[2025-07-07 08:02:23] [Rank 0] Group 5 Loss: 6.0764 +[2025-07-07 08:02:23] [Rank 0] Group 5 Loss: 6.0764 +[2025-07-07 08:02:23] [Rank 0] Group 6 Loss: 5.7607 +[2025-07-07 08:02:23] [Rank 0] Group 6 Loss: 5.7607 +[2025-07-07 08:02:23] [Rank 0] Group 7 Loss: 5.7684 +[2025-07-07 08:02:23] [Rank 0] Group 7 Loss: 5.7684 +[2025-07-07 08:02:23] [Rank 0] Group 8 Loss: 5.6925 +[2025-07-07 08:02:23] [Rank 0] Group 8 Loss: 5.6925 +[2025-07-07 08:02:23] [Rank 0] Group 9 Loss: 5.8039 +[2025-07-07 08:02:23] [Rank 0] Group 9 Loss: 5.8039 +[2025-07-07 08:02:23] [Rank 0] Group 10 Loss: 5.8204 +[2025-07-07 08:02:23] [Rank 0] Group 10 Loss: 5.8204 +[2025-07-07 08:02:23] [Rank 0] Group 11 Loss: 5.7967 +[2025-07-07 08:02:23] [Rank 0] Group 11 Loss: 5.7967 +[2025-07-07 08:02:23] [Rank 0] Group 0 FTA: 0.3199 +[2025-07-07 08:02:23] [Rank 0] Group 0 FTA: 0.3199 +[2025-07-07 08:02:23] [Rank 0] Group 1 FTA: 0.4792 +[2025-07-07 08:02:23] [Rank 0] Group 1 FTA: 0.4792 +[2025-07-07 08:02:23] [Rank 0] Group 2 FTA: 0.4453 +[2025-07-07 08:02:23] [Rank 0] Group 2 FTA: 0.4453 +[2025-07-07 08:02:23] [Rank 0] Group 3 FTA: 0.4323 +[2025-07-07 08:02:23] [Rank 0] Group 3 FTA: 0.4323 +[2025-07-07 08:02:23] [Rank 0] Group 4 FTA: 0.3125 +[2025-07-07 08:02:23] [Rank 0] Group 4 FTA: 0.3125 +[2025-07-07 08:02:23] [Rank 0] Group 5 FTA: 0.4115 +[2025-07-07 08:02:23] [Rank 0] Group 5 FTA: 0.4115 +[2025-07-07 08:02:24] [Rank 0] Group 6 FTA: 0.3542 +[2025-07-07 08:02:24] [Rank 0] Group 6 FTA: 0.3542 +[2025-07-07 08:02:24] [Rank 0] Group 7 FTA: 0.3828 +[2025-07-07 08:02:24] [Rank 0] Group 7 FTA: 0.3828 +[2025-07-07 08:02:24] [Rank 0] Group 8 FTA: 0.4245 +[2025-07-07 08:02:24] [Rank 0] Group 8 FTA: 0.4245 +[2025-07-07 08:02:24] [Rank 0] Group 9 FTA: 0.3906 +[2025-07-07 08:02:24] [Rank 0] Group 9 FTA: 0.3906 +[2025-07-07 08:02:24] [Rank 0] Group 10 FTA: 0.4434 +[2025-07-07 08:02:24] [Rank 0] Group 10 FTA: 0.4434 +[2025-07-07 08:02:24] [Rank 0] Group 11 FTA: 0.4258 +[2025-07-07 08:02:24] [Rank 0] Group 11 FTA: 0.4258 +[2025-07-07 08:02:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 08:02:24] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 08:02:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 08:02:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 08:02:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 08:02:25] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 08:02:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 08:02:25] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 08:02:25] [Rank 0] step:9501/10000 train_time:650253ms step_avg:68.44ms +[2025-07-07 08:02:25] [Rank 0] step:9501/10000 train_time:650253ms step_avg:68.44ms +[2025-07-07 08:02:26] [Rank 0] step:9521/10000 train_time:651027ms step_avg:68.38ms +[2025-07-07 08:02:26] [Rank 0] step:9521/10000 train_time:651027ms step_avg:68.38ms +[2025-07-07 08:02:28] [Rank 0] step:9541/10000 train_time:652388ms step_avg:68.38ms +[2025-07-07 08:02:28] [Rank 0] step:9541/10000 train_time:652388ms step_avg:68.38ms +[2025-07-07 08:02:29] [Rank 0] step:9561/10000 train_time:653749ms step_avg:68.38ms +[2025-07-07 08:02:29] [Rank 0] step:9561/10000 train_time:653749ms step_avg:68.38ms +[2025-07-07 08:02:30] [Rank 0] step:9581/10000 train_time:655112ms step_avg:68.38ms +[2025-07-07 08:02:30] [Rank 0] step:9581/10000 train_time:655112ms step_avg:68.38ms +[2025-07-07 08:02:32] [Rank 0] step:9601/10000 train_time:656475ms step_avg:68.38ms +[2025-07-07 08:02:32] [Rank 0] step:9601/10000 train_time:656475ms step_avg:68.38ms +[2025-07-07 08:02:33] [Rank 0] step:9621/10000 train_time:657838ms step_avg:68.38ms +[2025-07-07 08:02:33] [Rank 0] step:9621/10000 train_time:657838ms step_avg:68.38ms +[2025-07-07 08:02:34] [Rank 0] step:9641/10000 train_time:659203ms step_avg:68.37ms +[2025-07-07 08:02:34] [Rank 0] step:9641/10000 train_time:659203ms step_avg:68.37ms +[2025-07-07 08:02:36] [Rank 0] step:9661/10000 train_time:660569ms step_avg:68.37ms +[2025-07-07 08:02:36] [Rank 0] step:9661/10000 train_time:660569ms step_avg:68.37ms +[2025-07-07 08:02:37] [Rank 0] step:9681/10000 train_time:661934ms step_avg:68.37ms +[2025-07-07 08:02:37] [Rank 0] step:9681/10000 train_time:661934ms step_avg:68.37ms +[2025-07-07 08:02:39] [Rank 0] step:9701/10000 train_time:663300ms step_avg:68.37ms +[2025-07-07 08:02:39] [Rank 0] step:9701/10000 train_time:663300ms step_avg:68.37ms +[2025-07-07 08:02:40] [Rank 0] step:9721/10000 train_time:664667ms step_avg:68.37ms +[2025-07-07 08:02:40] [Rank 0] step:9721/10000 train_time:664667ms step_avg:68.37ms +[2025-07-07 08:02:41] [Rank 0] step:9741/10000 train_time:666087ms step_avg:68.38ms +[2025-07-07 08:02:41] [Rank 0] step:9741/10000 train_time:666087ms step_avg:68.38ms +[2025-07-07 08:02:43] [Rank 0] step:9761/10000 train_time:667454ms step_avg:68.38ms +[2025-07-07 08:02:43] [Rank 0] step:9761/10000 train_time:667454ms step_avg:68.38ms +[2025-07-07 08:02:44] [Rank 0] step:9781/10000 train_time:668822ms step_avg:68.38ms +[2025-07-07 08:02:44] [Rank 0] step:9781/10000 train_time:668822ms step_avg:68.38ms +[2025-07-07 08:02:45] [Rank 0] step:9801/10000 train_time:670190ms step_avg:68.38ms +[2025-07-07 08:02:45] [Rank 0] step:9801/10000 train_time:670190ms step_avg:68.38ms +[2025-07-07 08:02:47] [Rank 0] step:9821/10000 train_time:671558ms step_avg:68.38ms +[2025-07-07 08:02:47] [Rank 0] step:9821/10000 train_time:671558ms step_avg:68.38ms +[2025-07-07 08:02:48] [Rank 0] step:9841/10000 train_time:672927ms step_avg:68.38ms +[2025-07-07 08:02:48] [Rank 0] step:9841/10000 train_time:672927ms step_avg:68.38ms +[2025-07-07 08:02:50] [Rank 0] step:9861/10000 train_time:674295ms step_avg:68.38ms +[2025-07-07 08:02:50] [Rank 0] step:9861/10000 train_time:674295ms step_avg:68.38ms +[2025-07-07 08:02:51] [Rank 0] step:9881/10000 train_time:675664ms step_avg:68.38ms +[2025-07-07 08:02:51] [Rank 0] step:9881/10000 train_time:675664ms step_avg:68.38ms +[2025-07-07 08:02:52] [Rank 0] step:9901/10000 train_time:677285ms step_avg:68.41ms +[2025-07-07 08:02:52] [Rank 0] step:9901/10000 train_time:677285ms step_avg:68.41ms +[2025-07-07 08:02:54] [Rank 0] step:9921/10000 train_time:678441ms step_avg:68.38ms +[2025-07-07 08:02:54] [Rank 0] step:9921/10000 train_time:678441ms step_avg:68.38ms +[2025-07-07 08:02:55] [Rank 0] step:9941/10000 train_time:679810ms step_avg:68.38ms +[2025-07-07 08:02:55] [Rank 0] step:9941/10000 train_time:679810ms step_avg:68.38ms +[2025-07-07 08:02:56] [Rank 0] step:9961/10000 train_time:681179ms step_avg:68.38ms +[2025-07-07 08:02:56] [Rank 0] step:9961/10000 train_time:681179ms step_avg:68.38ms +[2025-07-07 08:02:58] [Rank 0] step:9981/10000 train_time:682548ms step_avg:68.38ms +[2025-07-07 08:02:58] [Rank 0] step:9981/10000 train_time:682548ms step_avg:68.38ms +[2025-07-07 08:02:59] [Rank 0] step:10000/10000 train_time:683851ms step_avg:68.39ms +[2025-07-07 08:02:59] [Rank 0] step:10000/10000 train_time:683851ms step_avg:68.39ms +[2025-07-07 08:02:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:02:59] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:03:00] [Rank 0] PRINT: step:10000/10000 train_loss:0.8858 val_loss:1.0303 train_time:684549ms step_avg:68.45ms +[2025-07-07 08:03:00] [Rank 0] PRINT: step:10000/10000 train_loss:0.8858 val_loss:1.0303 train_time:684549ms step_avg:68.45ms +[2025-07-07 08:03:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:03:00] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:03:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:03:00] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:03:00] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:03:00] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:08:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:08:23] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:08:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:08:23] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:08:23] [Rank 0] Total Loss: 5.8147 +[2025-07-07 08:08:23] [Rank 0] Total Loss: 5.8147 +[2025-07-07 08:08:23] [Rank 0] Total FTA: 0.5086 +[2025-07-07 08:08:23] [Rank 0] Total FTA: 0.5086 +[2025-07-07 08:08:23] [Rank 0] Group 0 Loss: 6.0269 +[2025-07-07 08:08:23] [Rank 0] Group 0 Loss: 6.0269 +[2025-07-07 08:08:23] [Rank 0] Group 1 Loss: 5.3107 +[2025-07-07 08:08:23] [Rank 0] Group 1 Loss: 5.3107 +[2025-07-07 08:08:23] [Rank 0] Group 2 Loss: 5.4312 +[2025-07-07 08:08:23] [Rank 0] Group 2 Loss: 5.4312 +[2025-07-07 08:08:23] [Rank 0] Group 3 Loss: 5.9126 +[2025-07-07 08:08:23] [Rank 0] Group 3 Loss: 5.9126 +[2025-07-07 08:08:23] [Rank 0] Group 4 Loss: 5.7482 +[2025-07-07 08:08:23] [Rank 0] Group 4 Loss: 5.7482 +[2025-07-07 08:08:23] [Rank 0] Group 5 Loss: 6.0381 +[2025-07-07 08:08:23] [Rank 0] Group 5 Loss: 6.0381 +[2025-07-07 08:08:23] [Rank 0] Group 6 Loss: 5.8282 +[2025-07-07 08:08:23] [Rank 0] Group 6 Loss: 5.8282 +[2025-07-07 08:08:23] [Rank 0] Group 7 Loss: 5.8353 +[2025-07-07 08:08:23] [Rank 0] Group 7 Loss: 5.8353 +[2025-07-07 08:08:23] [Rank 0] Group 8 Loss: 5.7626 +[2025-07-07 08:08:23] [Rank 0] Group 8 Loss: 5.7626 +[2025-07-07 08:08:23] [Rank 0] Group 9 Loss: 5.8372 +[2025-07-07 08:08:23] [Rank 0] Group 9 Loss: 5.8372 +[2025-07-07 08:08:23] [Rank 0] Group 10 Loss: 5.8608 +[2025-07-07 08:08:23] [Rank 0] Group 10 Loss: 5.8608 +[2025-07-07 08:08:23] [Rank 0] Group 11 Loss: 5.8709 +[2025-07-07 08:08:23] [Rank 0] Group 11 Loss: 5.8709 +[2025-07-07 08:08:23] [Rank 0] Group 0 FTA: 0.5098 +[2025-07-07 08:08:23] [Rank 0] Group 0 FTA: 0.5098 +[2025-07-07 08:08:23] [Rank 0] Group 1 FTA: 0.6641 +[2025-07-07 08:08:23] [Rank 0] Group 1 FTA: 0.6641 +[2025-07-07 08:08:23] [Rank 0] Group 2 FTA: 0.5391 +[2025-07-07 08:08:23] [Rank 0] Group 2 FTA: 0.5391 +[2025-07-07 08:08:23] [Rank 0] Group 3 FTA: 0.5755 +[2025-07-07 08:08:23] [Rank 0] Group 3 FTA: 0.5755 +[2025-07-07 08:08:23] [Rank 0] Group 4 FTA: 0.4740 +[2025-07-07 08:08:23] [Rank 0] Group 4 FTA: 0.4740 +[2025-07-07 08:08:23] [Rank 0] Group 5 FTA: 0.4453 +[2025-07-07 08:08:23] [Rank 0] Group 5 FTA: 0.4453 +[2025-07-07 08:08:23] [Rank 0] Group 6 FTA: 0.4349 +[2025-07-07 08:08:23] [Rank 0] Group 6 FTA: 0.4349 +[2025-07-07 08:08:23] [Rank 0] Group 7 FTA: 0.4714 +[2025-07-07 08:08:23] [Rank 0] Group 7 FTA: 0.4714 +[2025-07-07 08:08:23] [Rank 0] Group 8 FTA: 0.5026 +[2025-07-07 08:08:23] [Rank 0] Group 8 FTA: 0.5026 +[2025-07-07 08:08:23] [Rank 0] Group 9 FTA: 0.4727 +[2025-07-07 08:08:23] [Rank 0] Group 9 FTA: 0.4727 +[2025-07-07 08:08:23] [Rank 0] Group 10 FTA: 0.5078 +[2025-07-07 08:08:23] [Rank 0] Group 10 FTA: 0.5078 +[2025-07-07 08:08:23] [Rank 0] Group 11 FTA: 0.5029 +[2025-07-07 08:08:23] [Rank 0] Group 11 FTA: 0.5029 +[2025-07-07 08:08:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 08:08:23] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_loss_curves.png +[2025-07-07 08:08:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 08:08:24] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/per_class_acc_curves.png +[2025-07-07 08:08:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 08:08:24] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_loss_curve.png +[2025-07-07 08:08:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 08:08:24] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_45/total_acc_curve.png +[2025-07-07 08:08:24] [Rank 0] step:10001/10000 train_time:684561ms step_avg:68.45ms +[2025-07-07 08:08:24] [Rank 0] step:10001/10000 train_time:684561ms step_avg:68.45ms +[2025-07-07 08:08:24] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 08:08:24 2025 --- +[2025-07-07 08:08:24] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 08:08:24 2025 --- +[2025-07-07 08:08:24] [Rank 0] PRINT: Peak memory allocated: 8720 MiB reserved: 10316 MiB +[2025-07-07 08:08:24] [Rank 0] PRINT: Peak memory allocated: 8720 MiB reserved: 10316 MiB diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/config.json b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/config.json new file mode 100644 index 0000000000000000000000000000000000000000..9cd5ce74df3f90916ac335bb525b9c824bcebb2a --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/config.json @@ -0,0 +1,23 @@ +{ + "cli_args": { + "unet": false, + "seed": 48, + "optimizer_mode": 5, + "model_parameterization": "qkvo", + "adam_lr": 0.005 + }, + "hyperparameters": { + "train_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin", + "val_files": "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin", + "val_tokens": 1966080, + "train_seq_len": 12288, + "val_seq_len": 65536, + "num_iterations": 10000, + "cooldown_frac": 0.8, + "vocab_size": 50257, + "val_loss_every": 500, + "save_checkpoint": false + }, + "run_uuid_for_log": "5312631a-43bd-45ba-bf56-ba1cbd9bbf31", + "script_code_logged_at_start": true +} \ No newline at end of file diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..e8137f68104324431b4d53500cf87feb4c837610 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:459d9c84c7389e858838be733477d08fc26be854a707df2762aa2b0c13286337 +size 461699 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..9f739eecf53ee01e6a0dc6d1bc3f9d67a57841f8 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:728436545b14b9208a0a3afbf9e0f92f8251523e0c1a0c48f86071d23b04854c +size 497579 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..c3c5bde41bce3759f8c481b6c444364675772ede --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb36874f951287e4fb879f8dceebf26f37230f51fbae873f04958390d7f08024 +size 101775 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png new file mode 100644 index 0000000000000000000000000000000000000000..3f80e8b51061f38264fc4e9b1568142d0bb8408e --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:841928657b856dcbe0837f4b396388b8e1fc4051e6c756cf8074dcc3b0a48a60 +size 127957 diff --git a/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/training_log_5312631a-43bd-45ba-bf56-ba1cbd9bbf31.txt b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/training_log_5312631a-43bd-45ba-bf56-ba1cbd9bbf31.txt new file mode 100644 index 0000000000000000000000000000000000000000..fda706885ca258d6ff7851a1a2cde18c7f21cb75 --- /dev/null +++ b/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/training_log_5312631a-43bd-45ba-bf56-ba1cbd9bbf31.txt @@ -0,0 +1,5132 @@ +[2025-07-07 06:12:34] [Rank 0] PRINT: --- Script Start: Mon Jul 7 06:12:34 2025 --- +[2025-07-07 06:12:34] [Rank 0] PRINT: --- Script Start: Mon Jul 7 06:12:34 2025 --- +[2025-07-07 06:12:34] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-07 06:12:34] [Rank 0] PRINT: Parsed CLI args: Namespace(unet=False, seed=48, optimizer_mode=5, model_parameterization='qkvo', adam_lr=0.005) +[2025-07-07 06:12:34] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 06:12:34] [Rank 0] PRINT: Hyperparameters: Hyperparameters() +[2025-07-07 06:12:34] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-07 06:12:34] [Rank 0] PRINT: Using fixed seed: 48 +[2025-07-07 06:12:34] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48 +[2025-07-07 06:12:34] [Rank 0] PRINT: Run directory: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48 +[2025-07-07 06:12:34] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 06:12:34] [Rank 0] import os +import sys +with open(sys.argv[0]) as f: + code = f.read() # read the code of this file ASAP, for logging +import uuid +import time +import copy +import glob +import math +from dataclasses import dataclass, asdict +from functools import lru_cache +from pathlib import Path +import argparse # Keep argparse for --unet and potentially --optimizer_mode +import json +import random +import numpy as np +import itertools +from itertools import cycle +from transformers import GPT2Tokenizer +from collections import defaultdict +import matplotlib.pyplot as plt +from matplotlib.colors import Normalize +from tqdm import tqdm +import re + +# + +os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" +import torch +torch.empty(1, device="cuda", requires_grad=True).backward() # prevents a bug on some systems +from torch import Tensor, nn +import torch.nn.functional as F +import torch.distributed as dist +# use of FlexAttention contributed by @KoszarskyB +from torch.nn.attention.flex_attention import BlockMask, flex_attention +sys.path.append("/home/aiops/zhangfz/MUON_theory/modded-nanogpt") # Already present +from optimizers.MUON import Muon +from utils.float_compute import mm_op, backward as mm_backward_custom, setup_context as mm_setup_context_custom # Renamed + +#from kn_util.utils import setup_debugpy +#torch._inductor.config.coordinate_descent_tuning = True + +# ----------------------------------------------------------------------------- + +mm_op.register_autograd(mm_backward_custom, setup_context=mm_setup_context_custom) # Use renamed imports + +# ----------------------------------------------------------------------------- +# Seeding Function +def set_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed_all(seed) + print(f"PRINT: Set seed to {seed}", flush=True) # Print immediately for all ranks + + + +# ----------------------------------------------------------------------------- +# Our own simple Distributed Data Loader (KEEP AS IS) +def _load_data_shard(file: Path): + header = torch.from_file(str(file), False, 256, dtype=torch.int32) + assert header[0] == 20240520, "magic number mismatch in the data .bin file" + assert header[1] == 1, "unsupported version" + num_tokens = int(header[2]) + with file.open("rb", buffering=0) as f: + tokens = torch.empty(num_tokens, dtype=torch.uint16, pin_memory=True) + f.seek(256 * 4) + nbytes = f.readinto(tokens.numpy()) + assert nbytes == 2 * num_tokens, "number of tokens read does not match header" + return tokens + +def distributed_data_generator(filename_pattern: str, batch_size: int, rank : int, world_size : int): + files = [Path(file) for file in sorted(glob.glob(filename_pattern))] + assert batch_size % world_size == 0 + local_batch_size = batch_size // world_size + file_iter = cycle(files) # use itertools.cycle(files) instead if you want to do multi-epoch training + tokens, pos = _load_data_shard(next(file_iter)), 0 + while True: + if pos + batch_size + 1 >= len(tokens): + tokens, pos = _load_data_shard(next(file_iter)), 0 + buf = tokens[pos + rank * local_batch_size:][:local_batch_size + 1] + inputs = buf[:-1].to(device="cuda", dtype=torch.int32, non_blocking=True) # no sync on host side; + targets = buf[1:].to(device="cuda", dtype=torch.int64, non_blocking=True) # H2D in another stream isn't helpful. + pos += batch_size + yield inputs, targets + + + + + +# ----------------------------------------------------------------------------- +# int main +parser = argparse.ArgumentParser(description="NanoGPT Training Script with Muon") +parser.add_argument("--unet", action="store_true", help="Use U-net architecture") +parser.add_argument("--seed", type=int, default=42, help="Random seed for reproducibility") +# --- MODIFICATION: Add optimizer_mode as a CLI argument --- +parser.add_argument("--optimizer_mode", type=int, default=0, + help="Defines how Muon is applied. " + "0: Muon(All Hidden Attn+MLP - original); " + "1: Muon(QK Attn)/Adam(VO Attn,MLP); " + "2: Muon(VO Attn)/Adam(QK Attn,MLP); " + "3: Muon(All Attn)/Adam(MLP); " + "4: Muon(MLP)/Adam(All Attn)" + "5: All Adam (No Muon, all applicable matrices to Adam)." + "6: Muon(W_2 MLP)/Adam(attn, W_1 MLP)." + "7: Muon(VO Attn, MLP)/Adam(QK Attn)." + "8: Muon(VO Attn, W_2 MLP)/Adam(QK Attn, W_1 MLP)." + ) +parser.add_argument("--model_parameterization", type=str, default="whole",choices=["whole","qkvo"]) + +parser.add_argument("--adam_lr", type=float, default=1e-3, help="Base learning rate for Adam optimizer groups.") + + +exp_args = parser.parse_args() +set_seed(exp_args.seed) + +# --- MODIFICATION: Import correct GPT model based on --unet flag --- +if exp_args.unet: + print("Using U-net architecture") + from models.nano_GPT_unet import GPT +elif exp_args.model_parameterization == "qkvo": + print("Using architecture (models.nano_gpt_qkvo) with CausalSelfAttention having q_w, k_w, v_w") + # This MUST be the nano_GPT.py file where CausalSelfAttention has q_w, k_w, v_w + from models.nano_GPT_qkvo import GPT +elif exp_args.model_parameterization == "whole": + print("Using original architecture") + from models.nano_GPT import GPT + +@dataclass +class Hyperparameters: + # data + #train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_train_*.bin" + #val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/data/fineweb10B/fineweb_val_*.bin" + train_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/train_data/train_*.bin" + val_files = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_bin/val_data/val_*.bin" + #val_tokens = 1966080 + #val_tokens = 10485760 + #train_seq_len = 12*1024 + #val_seq_len = 4*16*1024 + #train_seq_len = 48*1024 # FlexAttention sequence length + #train_seq_len = 12*1024 # FlexAttention sequence length + #val_seq_len = 4*64*1024 # FlexAttention sequence length for validation + #lr_warmup_steps = 1000 + #learning_rate = 0.001 + #min_learning_rate = 0.0001 + + val_tokens = 1966080 + train_seq_len = 12*1024 + val_seq_len = 4*16*1024 + #train_seq_len = 512 + #val_seq_len = 512 + # optimization + num_iterations = 10000 #1770 # Original: 1770 + cooldown_frac = 0.8 + # architecture + vocab_size = 50257 + #vocab_size = 7 + # evaluation and logging + val_loss_every = 500 # Original: 125 + save_checkpoint = False # Original: False +args = Hyperparameters() + +# DDP setup (KEEP AS IS, but ensure rank and world_size are correctly used) +rank = int(os.environ.get("RANK", 0)) +local_rank = int(os.environ.get("LOCAL_RANK", 0)) # Used for device setting +world_size = int(os.environ.get("WORLD_SIZE", 1)) + +# print(f"[Rank {rank}] Global Rank: {rank}, Local Rank: {local_rank}, World Size: {world_size}", flush=True) # Debug + +assert torch.cuda.is_available() +device = torch.device("cuda", local_rank) # Use local_rank for device +torch.cuda.set_device(device) + +if not dist.is_initialized(): # Ensure DDP is initialized only once + dist.init_process_group(backend="nccl", rank=rank, world_size=world_size) # Pass rank and world_size +dist.barrier() +master_process = (rank == 0) + +# Logging setup (KEEP AS IS, but maybe add optimizer_mode to filename) +logfile = None +# --- MODIFICATION: Add optimizer_mode to log file name and specify new dir --- +#log_dir = "modded-nanogpt/logs_detailed_attn_minimal_changes" +#if master_process: +# run_id = uuid.uuid4() +# os.makedirs(log_dir, exist_ok=True) # Create new log directory +# logfile = f"{log_dir}/exp_mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_{run_id}.txt" +# print(f"Logging to: {logfile}") + +logfile = None +run_dir_path_str = f"/home/aiops/zhangfz/MUON_theory/logs_bios/qa_0704/mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" +run_dir_path = Path(run_dir_path_str) + +base_log_dir = Path("logs_bios/qa_0704") # Base log directory for bioS mixed training + +if master_process: + # Set seed again specifically for master process for operations like dir creation, config saving + set_seed(exp_args.seed) + + # Construct folder name based on config and seed + run_folder_name = f"mode_{exp_args.optimizer_mode}_param_{exp_args.model_parameterization}_lr_{exp_args.adam_lr}_seed_{exp_args.seed}" + run_dir_path = base_log_dir / run_folder_name + run_dir_path.mkdir(parents=True, exist_ok=True) + run_dir_path_str = str(run_dir_path) + + run_uuid = uuid.uuid4() + logfile = run_dir_path / f"training_log_{run_uuid}.txt" + print(f"Logging to: {logfile}") + + # Save configuration + config_to_save = { + "cli_args": vars(exp_args), + "hyperparameters": {k: v for k, v in args.__class__.__dict__.items() if not k.startswith('__') and not callable(v)}, + "run_uuid_for_log": str(run_uuid), + "script_code_logged_at_start": True + } + config_file_path = run_dir_path / "config.json" + with open(config_file_path, "w") as f: + json.dump(config_to_save, f, indent=4) + print(f"Saved configuration to: {config_file_path}") + +def print0(s, console=False): + if master_process: + # Add timestamp and rank for better log readability + timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + log_message = f"[{timestamp}] [Rank {rank}] {s}" + + # Print to console if requested or if it's a specific "PRINT:" message + if console or s.startswith("PRINT:"): + actual_s = s[6:] if s.startswith("PRINT:") else s + print(actual_s) # Print to stdout for master process + + if logfile: + with open(logfile, "a") as f: + f.write(log_message + "\n") + + with open(logfile, "a") as f: + f.write(log_message + "\n") + + +print0(f"PRINT: --- Script Start: {time.ctime()} ---", console=True) +print0(f"PRINT: Parsed CLI args: {exp_args}", console=True) +print0(f"PRINT: Hyperparameters: {args}", console=True) +print0(f"PRINT: Using fixed seed: {exp_args.seed}", console=True) +if master_process: + print0(f"PRINT: Run directory: {run_dir_path_str}", console=True) +print0(code) # Log the code +# ... (other initial logs) + + + +# ----------------------------------------------------------------------------- +## Function to compute first token accuracy ## +def compute_first_token_accuracy(model, tokenizer, device, num_samples=1000): + + # Helper 1: QA text cleaner (identical to training) + def clean_qa_text_completely(text): + import re + + if not isinstance(text, str): + return "" + + patterns = [ + r'\s*Answer\s*:\s*', r'\s*Answer\s*\:\s*', r'\s*Answer\s+', + r'\s+Answer\s*:\s*', r'\s+Answer\s+', r'\bAnswer\b\s*:\s*', + r'\bAnswer\b\s+', r'\bAnswer\b', r'answer\s*:\s*', + r'answer\s+', r'\banswer\b', + ] + + cleaned_text = text + for pattern in patterns: + cleaned_text = re.sub(pattern, ' ', cleaned_text, flags=re.IGNORECASE) + + cleaned_text = re.sub(r'\s*:\s*', ' ', cleaned_text) + cleaned_text = re.sub(r'\s+', ' ', cleaned_text) + cleaned_text = re.sub(r'\n+', ' ', cleaned_text) + cleaned_text = re.sub(r'\t+', ' ', cleaned_text) + cleaned_text = cleaned_text.strip() + + return cleaned_text + + # Helper 2: Convert a QA item to a prompt/answer pair + def process_qa_simple(qa_item): + if not isinstance(qa_item, dict) or 'text' not in qa_item: + return None + + qa_text = qa_item['text'] + cleaned_text = clean_qa_text_completely(qa_text) + + question_end = cleaned_text.find('?') + if question_end == -1: + return None + + # Prompt is the question up to (and including) '?' + prompt = cleaned_text[:question_end + 1].strip() + answer = cleaned_text[question_end + 1:].strip() + + if not answer: + return None + + # Encode answer with a leading space so the first token matches training context + try: + answer_with_space = ' ' + answer + answer_tokens = tokenizer.encode(answer_with_space, add_special_tokens=False) + + if not answer_tokens: + return None + + return { + 'prompt': prompt, + 'answer': answer, + 'expected_token': answer_tokens[0], + 'original': qa_text, + 'cleaned': cleaned_text, + } + + except Exception: + return None + + # --- Main logic starts here --- + print("=" * 70) + print("First Token Accuracy test") + print("=" * 70) + + DATA_DIR = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data" + QA_FILE = "bio_QA_50000.jsonl" + NUM_INDIVIDUALS = 300000 + + try: + import random + import os + import json + import torch + + + # Load cached and fully preprocessed QA samples + with open("/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/cached_val_samples.json", "r", encoding="utf-8") as f: + all_cached_samples = json.load(f) + + final_samples = all_cached_samples[:num_samples] # use full cached set + print(f"Using {len(final_samples)} cached validation samples.") + + + # Quick preview of the first few samples + print("\n Sample format check (first 3):") + for i, sample in enumerate(final_samples[:3]): + print(f"\nSample {i + 1}:") + # print(f" Original: {sample['original']}") + # print(f" Cleaned : {sample['cleaned']}") + print(f" Prompt : '{sample['prompt']}'") + print(f" Answer : '{sample['answer']}'") + print( + f" Expected token: {sample['expected_token']} -> '{tokenizer.decode([sample['expected_token']])}'" + ) + + # Inference loop + correct_count = 0 + total_count = 0 + debug_info = [] + + model.eval() + with torch.no_grad(): + print("\n Running model inference …") + for idx, sample in enumerate(final_samples): + try: + prompt_tokens = tokenizer.encode(sample['prompt'], add_special_tokens=False) + current_len = len(prompt_tokens) + + # Pad/trim to BLOCK_SIZE + BLOCK_SIZE = 128 + if current_len <= BLOCK_SIZE: + padded_tokens = prompt_tokens + [tokenizer.eos_token_id] * (BLOCK_SIZE - current_len) + actual_len = current_len + else: + padded_tokens = prompt_tokens[:BLOCK_SIZE] + actual_len = BLOCK_SIZE + + padded_input_ids = torch.tensor(padded_tokens, dtype=torch.int32, device=device) + num_blocks = (len(padded_tokens) + BLOCK_SIZE - 1) // BLOCK_SIZE + sliding_window_num_blocks = torch.tensor(num_blocks, device=device) + + result = model(padded_input_ids, None, sliding_window_num_blocks) + + logits = result[-1] if isinstance(result, tuple) else result + if logits.dim() == 3 and logits.shape[0] == 1: + logits = logits.squeeze(0) + + if actual_len - 1 < logits.shape[0]: + last_token_logits = logits[actual_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + is_correct = predicted_token == sample['expected_token'] + if is_correct: + correct_count += 1 + + if idx < 15: + debug_info.append( + { + 'idx': idx, + 'prompt': sample['prompt'], + 'answer': sample['answer'], + 'predicted_token': predicted_token, + 'expected_token': sample['expected_token'], + 'pred_text': tokenizer.decode([predicted_token]), + 'exp_text': tokenizer.decode([sample['expected_token']]), + 'is_correct': is_correct, + } + ) + total_count += 1 + + except Exception as e: + if idx < 5: + print(f" Sample {idx} error: {e}") + continue + model.train() + + # Detailed results + print("\n" + "=" * 70 + "\n Detailed predictions:\n" + "=" * 70) + for result in debug_info: + status = "Correct" if result['is_correct'] else "Incorrect" + print(f"\nSample {result['idx']}: {status}") + print(f" Prompt : '{result['prompt']}'") + print(f" Expected answer: '{result['answer']}'") + print( + f" Prediction: {result['predicted_token']} -> '{result['pred_text']}'" + ) + print( + f" Expected : {result['expected_token']} -> '{result['exp_text']}'" + ) + + if total_count > 0: + accuracy = correct_count / total_count + print( + "\n" + "=" * 70 + "\n Final result:\n" + + f" First Token Accuracy: {accuracy:.3f} ({correct_count}/{total_count})\n" + + f" Success rate: {accuracy*100:.1f}%\n" + "=" * 70 + ) + return accuracy, correct_count, total_count + else: + print("\n No samples were successfully processed") + return 0.0, 0, 0 + + except Exception as e: + print(f" Fatal error: {e}") + import traceback + traceback.print_exc() + return 0.0, 0, 0 + + +def generate_powerlaw_selection_counts(m: int): + """Construct class sample counts to match the paper's distribution.""" + selection_counts = {} + class_groups = [] + class_id = 0 + for group_id in range(m + 1): + if group_id == 0: num_classes = 1 + else: num_classes = 2 ** (group_id - 1) + samples_per_class = 2 ** (m - group_id) + if samples_per_class < 1: continue + for _ in range(num_classes): + selection_counts[class_id] = samples_per_class + class_groups.append(group_id) + class_id += 1 + return selection_counts, class_groups + + +def run_detailed_evaluation(model, tokenizer, qa_data_path, device, m_val, class_to_group_map, num_samples=None): + """ + In a single evaluation, compute Per-Class Loss, Per-Class FTA, Total Loss, and Total FTA. + """ + print0("\n--- Starting Detailed Evaluation (Loss & FTA) ---", console=True) + model.eval() + + # 1. Load and sample data + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for detailed evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + + # 2. Initialize counters + group_losses = defaultdict(float) + group_loss_counts = defaultdict(int) # For loss sample count + group_correct = defaultdict(int) + group_total_fta = defaultdict(int) # For FTA sample count + + # 3. Evaluation loop + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=(not master_process)): + if not item or 'text' not in item or not item['text']: continue + + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + # --- Data prep for Loss --- + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + # --- Data prep for FTA --- + match = re.search(r'^(.*?\?)\s*Answer\s*:\s*(.*)$', item['text'], re.IGNORECASE) + if not match: continue + prompt, answer = match.groups() + prompt, answer = prompt.strip(), answer.strip() + if not answer: continue + + try: + expected_token = tokenizer.encode(' ' + answer, add_special_tokens=False)[0] + except IndexError: + continue + + # --- Model call (once only) --- + logits = model(input_seq, target_seq=None, sliding_window_num_blocks=window_blocks) + if isinstance(logits, tuple): logits = logits[0] + + # --- Compute Loss --- + loss = F.cross_entropy(logits.view(-1, logits.size(-1)), target_seq.view(-1), ignore_index=-100) + if not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_loss_counts[group_id] += 1 + + # --- Compute FTA --- + prompt_tokens_len = len(tokenizer.encode(prompt, add_special_tokens=False)) + if prompt_tokens_len > 0 and prompt_tokens_len <= padded_len: + last_token_logits = logits.squeeze(0)[prompt_tokens_len - 1, :] + predicted_token = torch.argmax(last_token_logits).item() + + if predicted_token == expected_token: + group_correct[group_id] += 1 + group_total_fta[group_id] += 1 + + # 4. Aggregate results + avg_group_loss = {str(g): group_losses[g] / group_loss_counts[g] for g in group_loss_counts if group_loss_counts[g] > 0} + avg_group_acc = {str(g): group_correct[g] / group_total_fta[g] for g in group_total_fta if group_total_fta[g] > 0} + + total_loss = sum(group_losses.values()) / sum(group_loss_counts.values()) if sum(group_loss_counts.values()) > 0 else 0 + total_acc = sum(group_correct.values()) / sum(group_total_fta.values()) if sum(group_total_fta.values()) > 0 else 0 + + print0("--- Detailed Evaluation Complete ---", console=True) + return { + 'per_class_loss': avg_group_loss, + 'per_class_acc': avg_group_acc, + 'total_loss': total_loss, + 'total_acc': total_acc + } + +def plot_curves(history, output_path, title, y_label, y_lim=None): + """Generic plotting function""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not history: + print0(f"Warning: No history data for {y_label}, cannot plot.", console=True) + plt.close() + return + + is_per_class = isinstance(next(iter(history.values())), dict) + + if is_per_class: + group_ids = sorted([int(g) for g in history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + values = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, values, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.legend(title="Class Group", bbox_to_anchor=(1.05, 1), loc='upper left') + else: + epochs = sorted([int(e) for e in history.keys()]) + values = [history[str(e)] for e in epochs] + ax.plot(epochs, values, linewidth=2.5) + + ax.set_xlabel("Epoch", fontsize=14) + ax.set_ylabel(y_label, fontsize=14) + ax.set_title(title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + + if y_lim: + ax.set_ylim(y_lim) + else: + all_values = [] + if is_per_class: + for group_data in history.values(): all_values.extend(group_data.values()) + else: + all_values = list(history.values()) + if all_values: + min_val, max_val = min(all_values), max(all_values) + ax.set_ylim(min_val * 0.95, max_val * 1.05) + + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"[✓] {title} curve updated and saved to: {output_path}", console=True) + plt.close() + +# ===================================================================== +# <<<<< End of Additions / Replacements >>>>> +# ===================================================================== + +def evaluate_per_class_loss(model, tokenizer, qa_data_path, device, m_val, num_samples=None): + """ + Internal evaluation on original QA data for per-class loss. + (Final fixed version: NameError resolved) + """ + print0("\n--- Starting Per-Class Loss Evaluation (Final Fixed Version) ---", console=True) + model.eval() + + # ================================================================= + # <<<<< Restored Missing Code >>>>> + # ================================================================= + with open(qa_data_path, 'r', encoding='utf-8') as f: + qa_data = [json.loads(line) for line in f] + + if num_samples is not None and num_samples > 0 and len(qa_data) > num_samples: + print0(f"Using stratified sampling to extract ~{num_samples} samples for evaluation...", console=True) + data_by_class = defaultdict(list) + for item in qa_data: + data_by_class[item['class_id']].append(item) + sample_ratio = num_samples / len(qa_data) + stratified_sample_data = [] + for class_id, items in data_by_class.items(): + num_to_sample = max(1, int(len(items) * sample_ratio)) + sampled_items = random.sample(items, min(len(items), num_to_sample)) + stratified_sample_data.extend(sampled_items) + qa_data = stratified_sample_data + print0(f"Evaluation set size after sampling: {len(qa_data)}", console=True) + # ================================================================= + + # 3. Create mapping + selection_counts, class_groups = generate_powerlaw_selection_counts(m_val) + class_to_group_map = {class_id: group_id for class_id, group_id in zip(selection_counts.keys(), class_groups)} + + group_losses = defaultdict(float) + group_counts = defaultdict(int) + + with torch.no_grad(): + for item in tqdm(qa_data, desc="Detailed Evaluation", disable=not master_process): + if not item or 'text' not in item or not item['text']: continue + group_id = class_to_group_map.get(item['class_id']) + if group_id is None: continue + + tokens = tokenizer.encode(item['text'], add_special_tokens=False) + tokens.append(tokenizer.eos_token_id) + + original_len = len(tokens) + if original_len < 2: continue + + BLOCK_SIZE = 128 + padded_len = ((original_len + BLOCK_SIZE - 1) // BLOCK_SIZE) * BLOCK_SIZE + max_eval_len = 4096 + padded_len = min(padded_len, max_eval_len) + + final_tokens = tokens[:padded_len] + pad_token_id = tokenizer.pad_token_id if tokenizer.pad_token_id is not None else tokenizer.eos_token_id + padded_input = final_tokens + [pad_token_id] * (padded_len - len(final_tokens)) + + input_seq = torch.tensor(padded_input, dtype=torch.long, device=device) + + target_seq_list = (tokens[1:] + [pad_token_id])[:padded_len] + target_seq_list += [-100] * (padded_len - len(target_seq_list)) + target_seq = torch.tensor(target_seq_list, dtype=torch.long, device=device) + + window_blocks = torch.tensor(padded_len // BLOCK_SIZE, device=device, dtype=torch.int32) + + loss = model(input_seq, target_seq, window_blocks) + + if loss is not None and not torch.isnan(loss): + group_losses[group_id] += loss.item() + group_counts[group_id] += 1 + + avg_group_losses = {str(group): group_losses[group] / group_counts[group] + for group in group_losses if group_counts[group] > 0} + + print0("--- Per-Class Loss Evaluation Complete ---", console=True) + return avg_group_losses + +def plot_loss_curves(loss_history, output_path, plot_title="Per-Class Loss"): + """Plot loss curve from aggregated history data""" + plt.style.use('seaborn-v0_8-whitegrid') + fig, ax = plt.subplots(figsize=(8, 6)) + if not loss_history: + print0("Warning: Loss history is empty. Cannot plot.", console=True) + plt.close() + return + group_ids = sorted([int(g) for g in loss_history.keys()]) + cmap = plt.get_cmap("viridis") + norm = Normalize(vmin=min(group_ids) if group_ids else 0, vmax=max(group_ids) if group_ids else 1) + for group_id_int in group_ids: + group_id_str = str(group_id_int) + epoch_data = loss_history[group_id_str] + epochs = sorted([int(e) for e in epoch_data.keys()]) + losses = [epoch_data[str(e)] for e in epochs] + ax.plot(epochs, losses, color=cmap(norm(group_id_int)), linewidth=2.0, label=f'Group {group_id_int}') + ax.set_xlabel("Step", fontsize=14) + ax.set_ylabel("Per-Class Loss", fontsize=14) + ax.set_title(plot_title, fontsize=16) + ax.tick_params(axis='both', which='major', labelsize=12) + all_losses = [loss for group_data in loss_history.values() for loss in group_data.values()] + if all_losses: + min_loss, max_loss = min(all_losses), max(all_losses) + ax.set_ylim(min_loss * 0.95, max_loss * 1.05) + ax.legend(title="Class Group") + ax.grid(True) + plt.tight_layout() + plt.savefig(output_path, dpi=300) + print0(f"Per-Class Loss curve updated and saved to: {output_path}", console=True) + plt.close() + + + + + + +######################################## +# Construct model and optimizer # +######################################## +if master_process: + try: + ft_tokenizer = GPT2Tokenizer.from_pretrained('gpt2') + print0("PRINT: First-token accuracy tokenizer loaded.", console=True) + except: + ft_tokenizer = None + print0("PRINT: Failed to load tokenizer for first-token accuracy.", console=True) +else: + ft_tokenizer = None + +print0("PRINT: Constructing model...", console=True) +model: nn.Module = GPT(vocab_size=args.vocab_size, num_layers=12, num_heads=6, model_dim=768, + max_seq_len=max(args.train_seq_len, args.val_seq_len)).cuda() +for m in model.modules(): + if isinstance(m, nn.Embedding): + m.bfloat16() +print0("PRINT: Broadcasting model parameters...", console=True) +for param in model.parameters(): + dist.broadcast(param.detach(), 0) +print0("PRINT: Model constructed and broadcasted.", console=True) + + +if master_process: + print0("PRINT: Testing model forward function:", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) + model.train() + + print0(f"PRINT: Model test - Result type: {type(result)}", console=True) + if isinstance(result, tuple): + print0(f"PRINT: Model test - Tuple length: {len(result)}", console=True) + if len(result) >= 2: + print0(f"PRINT: Model test - First element (loss): {result[0]}", console=True) + print0(f"PRINT: Model test - Second element shape (logits): {result[1].shape if hasattr(result[1], 'shape') else 'No shape'}", console=True) + else: + print0(f"PRINT: Model test - Single result shape: {result.shape if hasattr(result, 'shape') else 'No shape'}", console=True) + except Exception as e: + print0(f"PRINT: Model test failed: {e}", console=True) + + +model_for_inference = model +print0("PRINT: Saved original model reference for inference.", console=True) + + +if master_process: + print0("PRINT: Testing model with target_seq=None...", console=True) + try: + test_input = torch.randint(0, 1000, (128,), device=device, dtype=torch.int32) + test_blocks = torch.tensor(1, device=device) + model.eval() + with torch.no_grad(): + result = model(test_input, None, test_blocks) # target_seq=None + model.train() + + if isinstance(result, tuple) and len(result) == 2: + loss, logits = result + print0(f"PRINT: SUCCESS! Model returns (loss={loss}, logits.shape={logits.shape})", console=True) + else: + print0(f"PRINT: Model returns: {type(result)}", console=True) + except Exception as e: + print0(f"PRINT: Model test still fails: {e}", console=True) + + + +# --- START MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +if exp_args.model_parameterization == "qkvo": + print0("PRINT: Collecting parameters for optimizers...", console=True) + head_params = [model.lm_head.weight] + embed_params = [model.embed.weight] + [ve.weight for ve in model.value_embeds] + + # Granular collection for attention and MLP parts + attn_q_params = [] + attn_k_params = [] + attn_v_params = [] + attn_o_params = [] # W_O from c_proj + mlp_fc_params = [] + mlp_proj_params = [] + + for block_module in model.blocks: + if block_module.attn is not None: + # These attributes (q_w, k_w, v_w) MUST exist in your CausalSelfAttention class + if hasattr(block_module.attn, 'q_w'): attn_q_params.append(block_module.attn.q_w) + else: print0(f"PRINT: Warning: q_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'k_w'): attn_k_params.append(block_module.attn.k_w) + else: print0(f"PRINT: Warning: k_w not found in attn module of a block.", console=True) + if hasattr(block_module.attn, 'v_w'): attn_v_params.append(block_module.attn.v_w) + else: print0(f"PRINT: Warning: v_w not found in attn module of a block.", console=True) + attn_o_params.append(block_module.attn.c_proj.weight) + if block_module.mlp is not None: + mlp_fc_params.append(block_module.mlp.c_fc.weight) + mlp_proj_params.append(block_module.mlp.c_proj.weight) + + # Combine into logical groups for experiments + attn_qk_group = attn_q_params + attn_k_params + attn_vo_group = attn_v_params + attn_o_params + all_attn_matrices = attn_qk_group + attn_vo_group + mlp_w1_group = mlp_fc_params + mlp_w2_group = mlp_proj_params + all_mlp_matrices = mlp_fc_params + mlp_proj_params + + # Scalar parameters (all others not explicitly grouped as matrices) + matrix_params_for_scalar_check = set(head_params + embed_params + all_attn_matrices + all_mlp_matrices) + scalar_params = [p for n, p in model.named_parameters() if p not in matrix_params_for_scalar_check] + for p_scalar in scalar_params: # Sanity check + if p_scalar.ndim >=2: + print0(f"PRINT: Warning - Parameter {p_scalar.shape} ended up in scalar_params but has ndim >= 2. Check grouping.", console=True) + + + # Determine parameter distribution based on optimizer_mode + muon_params_target_list = [] + adam_matrix_target_list = [] # Matrices that Adam will handle specifically + adam_matrix_lr = exp_args.adam_lr # LR for matrices if Adam handles them (can be tuned) + + current_optimizer_mode = exp_args.optimizer_mode + print0(f"PRINT: Configuring optimizers for EXPERIMENT_MODE = {current_optimizer_mode}", console=True) + + if current_optimizer_mode == 0: # Original behavior: Muon on all "hidden_matrix_params" + print0(f"PRINT: Mode 0: Muon on ALL Attention (QKVO) and ALL MLP matrices.", console=True) + muon_params_target_list = all_attn_matrices + all_mlp_matrices + # Adam handles embeds, head, scalars by default. No extra matrices for Adam here. + elif current_optimizer_mode == 1: # Muon on QK, Adam on VO and MLP + print0(f"PRINT: Mode 1: Muon on QK Attn. Adam on VO Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_qk_group + adam_matrix_target_list = attn_vo_group + all_mlp_matrices + elif current_optimizer_mode == 2: # Muon on VO, Adam on QK and MLP + print0(f"PRINT: Mode 2: Muon on VO Attn. Adam on QK Attn, MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + adam_matrix_target_list = attn_qk_group + all_mlp_matrices + elif current_optimizer_mode == 3: # Muon on All Attn (QKVO), Adam on MLP + print0(f"PRINT: Mode 3: Muon on ALL Attn (QKVO). Adam on MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_attn_matrices + adam_matrix_target_list = all_mlp_matrices + elif current_optimizer_mode == 4: # Muon on MLP, Adam on All Attn (QKVO) + print0(f"PRINT: Mode 4: Muon on MLP. Adam on ALL Attn (QKVO) (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = all_mlp_matrices + adam_matrix_target_list = all_attn_matrices + elif current_optimizer_mode == 5: # NEW MODE 5 - All Adam + print0(f"PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = [] + adam_matrix_target_list = all_attn_matrices + all_mlp_matrices # All matrices to Adam + elif current_optimizer_mode == 6: # Muon on W_2 MLP, Adam on attn, W_1 MLP + print0(f"PRINT: Mode 6: Muon on W_2 MLP. Adam on attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = mlp_w2_group + adam_matrix_target_list = all_attn_matrices + mlp_w1_group + elif current_optimizer_mode == 7: # Muon on VO Attn, MLP, Adam on QK Attn + print0(f"PRINT: Mode 7: Muon on VO Attn, MLP. Adam on QK Attn (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + all_mlp_matrices + adam_matrix_target_list = attn_qk_group + elif current_optimizer_mode == 8: # Muon on VO Attn, W_2 MLP, Adam on QK Attn, W_1 MLP + print0(f"PRINT: Mode 8: Muon on VO Attn, W_2 MLP. Adam on QK Attn, W_1 MLP (Adam LR: {adam_matrix_lr}).", console=True) + muon_params_target_list = attn_vo_group + mlp_w2_group + adam_matrix_target_list = attn_qk_group + mlp_w1_group + else: + raise ValueError(f"Unsupported EXPERIMENT_MODE: {current_optimizer_mode}") + + # Adam optimizer setup + adam_param_groups_config = [ + #dict(params=head_params, lr=0.22), + #dict(params=embed_params, lr=0.6), + #dict(params=scalar_params, lr=0.04) # Scalar params always go to Adam + dict(params=head_params, lr=exp_args.adam_lr ), + dict(params=embed_params, lr=exp_args.adam_lr ), + dict(params=scalar_params, lr=exp_args.adam_lr ) # Scalar params always go to Adam + ] + # Add matrices specifically assigned to Adam for this experiment mode + if adam_matrix_target_list: + # Ensure adam_matrix_target_list is flat and contains Parameters + flat_adam_matrices = [p for sublist_or_p in adam_matrix_target_list for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]) if p is not None] + if flat_adam_matrices: # Only add group if there are params + adam_param_groups_config.append(dict(params=flat_adam_matrices, lr=adam_matrix_lr)) + + # Filter out any Adam groups that might be empty (e.g., if scalar_params was empty) + adam_param_groups_config = [g for g in adam_param_groups_config if g['params']] + optimizer1 = torch.optim.Adam(adam_param_groups_config, betas=(0.8, 0.95), eps=1e-10, fused=True)#add weight_decay=0.01 to Adam + optimizers = [optimizer1] # Start with Adam + + # Muon optimizer setup + if muon_params_target_list: + # Ensure muon_params_target_list is flat, unique, and contains Parameters + flat_unique_muon_params = [] + seen_muon_ids = set() + for sublist_or_p in muon_params_target_list: + for p in (sublist_or_p if isinstance(sublist_or_p, list) else [sublist_or_p]): + if p is not None and id(p) not in seen_muon_ids: + flat_unique_muon_params.append(p) + seen_muon_ids.add(id(p)) + + if flat_unique_muon_params: # Only create Muon if it has parameters + optimizer2 = Muon(flat_unique_muon_params, lr=0.01, momentum=0.95, nesterov=True, ns_steps=5, rank=rank, world_size=world_size) # Pass nesterov, ns_steps + optimizers.append(optimizer2) + else: + print0("PRINT: Muon optimizer not created as its target parameter list was empty.", console=True) + optimizer2 = None # Explicitly set to None if not created + else: + print0("PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices).", console=True) + optimizer2 = None # Explicitly set to None + + print0(f"PRINT: Optimizers configured. Total optimizers: {len(optimizers)}", console=True) + if optimizer2: + print0(f"PRINT: Muon optimizer is active with {len(flat_unique_muon_params)} parameters.", console=True) + # --- END MODIFIED PARAMETER COLLECTION AND OPTIMIZER SETUP --- +elif exp_args.model_parameterization == "whole": + hidden_matrix_params = [p for n, p in model.blocks.named_parameters() if p.ndim >= 2 and "embed" not in n] + embed_params = [p for n, p in model.named_parameters() if "embed" in n] + scalar_params = [p for p in model.parameters() if p.ndim < 2] + head_params = [model.lm_head.weight] + + # init the optimizer(s) + adam_params = [dict(params=head_params, lr=0.22), dict(params=embed_params, lr=0.6), dict(params=scalar_params, lr=0.04)] + # small adam epsilon by @YouJiacheng. this is an alternate method of fixing the world_size dependence + # discovered by @fernbear.bsky.social https://x.com/hi_tysam/status/1879692937589875094 + optimizer1 = torch.optim.Adam(adam_params, betas=(0.8, 0.95), eps=1e-10, fused=True) + optimizer2 = Muon(hidden_matrix_params, lr=0.05, momentum=0.95, rank=rank, world_size=world_size) + optimizers = [optimizer1, optimizer2] + +for opt in optimizers: + for group in opt.param_groups: + group["initial_lr"] = group["lr"] + +# learning rate schedule: stable then decay (KEEP AS IS, but check assert) +def get_lr(step: int): + x = step / args.num_iterations # progress in training + # assert 0 <= x < 1 # Original assert, might fail on last step if step == num_iterations + # --- MODIFICATION: Adjust assert for LR schedule --- + if not (0 <= x <= 1): # Allow x=1 for the last step + x = min(max(x, 0.0), 1.0) # Clamp x if step goes beyond num_iterations + # print0(f"LR schedule x = {x:.4f} (step={step}) was clamped.", console=False) # Optional log + + if x < 1 - args.cooldown_frac: + return 1.0 + else: + # Ensure cooldown_frac is not zero to avoid division by zero + w = (1 - x) / max(args.cooldown_frac, 1e-9) + return w * 1.0 + (1 - w) * 0.1 + + +# attention window size schedule (KEEP AS IS) +def next_multiple_of_n(v: float | int, *, n: int): + return next(x for x in range(n, int(v) + 1 + n, n) if x >= v) +@lru_cache(1) +def get_window_size_blocks_helper(window_size: int): + return torch.tensor(window_size // 128, dtype=torch.int32, pin_memory=True).cuda(non_blocking=True) +def get_window_size_blocks(step: int): + x = step / args.num_iterations # progress in training + # --- MODIFICATION: Adjust assert for window size schedule --- + if not (0 <= x <= 1): + x = min(max(x, 0.0), 1.0) # Clamp x + + # Ensure window_size is at least 128 + window_size = max(128, next_multiple_of_n(1728 * x, n=128)) + return get_window_size_blocks_helper(window_size) + +print0("PRINT: Compiling model with TorchInductor...", console=True) +# Use 'model' for compilation, not 'model_compiled' before it's defined + +model_compiled: nn.Module = torch.compile(model, dynamic=False, mode="max-autotune") +print0("PRINT: Model compilation complete.", console=True) + +######################################## +# Warmup kernels +######################################## +print0("PRINT: Starting warmup...", console=True) +warmup_steps = 10 +initial_state = dict( + model=copy.deepcopy(model_compiled.state_dict()), + optimizers=[copy.deepcopy(opt.state_dict()) for opt in optimizers] +) + +for i in range(warmup_steps): + inputs = targets = torch.randint(0, args.vocab_size, size=(args.train_seq_len,), device="cuda") + loss = model_compiled(inputs.to(torch.int32), targets, get_window_size_blocks(0)) + loss.backward() + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + for opt in optimizers: + opt.step() + model_compiled.zero_grad(set_to_none=True) + model_compiled.load_state_dict(initial_state["model"]) + for opt, opt_state in zip(optimizers, initial_state["optimizers"]): + opt.load_state_dict(opt_state) + +del initial_state +print0("PRINT: Warmup complete.", console=True) +torch.cuda.synchronize() + +######################################## +# Training and validation +######################################## +print0("PRINT: Starting training...", console=True) +train_loader = distributed_data_generator(args.train_files, world_size * args.train_seq_len, rank, world_size) +train_loss_sum = torch.zeros(1, device=device) +train_step_count = torch.zeros(1, device=device) +training_time_ms = 0 +torch.cuda.synchronize() +t0 = time.perf_counter() +train_steps = args.num_iterations + + + +if master_process: + tokenizer_for_eval = GPT2Tokenizer.from_pretrained('gpt2') + + history = { + 'per_class_loss': defaultdict(dict), + 'per_class_acc': defaultdict(dict), + 'total_loss': {}, + 'total_acc': {} + } + # ------------------------------------ + QA_JSONL_PATH = "/home/aiops/zhangfz/MUON_theory/modded-nanogpt/BIO_dataset/data/qa_tail.jsonl" + M_FOR_POWERLAW = 11 + NUM_SAMPLES_FOR_DETAIL_EVAL = 5000 + + +for step in range(train_steps + 1): + last_step = (step == train_steps) + + # --------- VALIDATION SECTION --------- + if step == 0 or last_step or (args.val_loss_every > 0 and step % args.val_loss_every == 0): + torch.cuda.synchronize() + if step > 0: + current_run_time = 1000 * (time.perf_counter() - t0) + training_time_ms += current_run_time + + model_compiled.eval() + val_batch_size = world_size * args.val_seq_len + if args.val_tokens % val_batch_size != 0: + print0(f"PRINT: Warning: val_tokens ({args.val_tokens}) not perfectly divisible by val_batch_size ({val_batch_size}). Some tokens might be missed.", console=True) + + val_num_steps = args.val_tokens // val_batch_size + val_loader = distributed_data_generator(args.val_files, val_batch_size, rank, world_size) + val_loss_sum = torch.zeros(1, device=device) + actual_val_steps = 0 + + with torch.no_grad(): + for val_i in range(val_num_steps): + try: + inputs, targets = next(val_loader) + loss_val = model_compiled(inputs, targets, get_window_size_blocks(step)) + val_loss_sum += loss_val + actual_val_steps += 1 + except StopIteration: + print0(f"PRINT: Validation data loader for '{args.val_files}' exhausted early at val_step {val_i+1}/{val_num_steps}.", console=True) + break + + if actual_val_steps > 0: + val_loss_avg = val_loss_sum / actual_val_steps + else: + val_loss_avg = torch.tensor(float('nan'), device=device) + print0(f"PRINT: Warning: No validation steps were completed. val_loss is NaN.", console=True) + + del val_loader + dist.all_reduce(val_loss_avg, op=dist.ReduceOp.AVG) + + if train_step_count > 0: + avg_train_loss = train_loss_sum / train_step_count + dist.all_reduce(avg_train_loss, op=dist.ReduceOp.AVG) + avg_train_loss = avg_train_loss.item() + else: + avg_train_loss = float('nan') + + avg_step_time = training_time_ms / max(step, 1) if step > 0 else 0 + + #first_token_acc = 0.0 + #ft_correct = 0 + #ft_total = 0 + #if master_process and ft_tokenizer is not None: + # try: + # first_token_acc, ft_correct, ft_total = compute_first_token_accuracy( + # model_for_inference, ft_tokenizer, device, num_samples=1000 + # ) + # except Exception as e: + # print0(f"PRINT: First-token accuracy computation failed: {e}", console=True) + + #if world_size > 1: + #ft_acc_tensor = torch.tensor(first_token_acc, device=device) + #ft_correct_tensor = torch.tensor(ft_correct, device=device) + #ft_total_tensor = torch.tensor(ft_total, device=device) + #dist.broadcast(ft_acc_tensor, 0) + #dist.broadcast(ft_correct_tensor, 0) + #dist.broadcast(ft_total_tensor, 0) + #first_token_acc = ft_acc_tensor.item() + #ft_correct = int(ft_correct_tensor.item()) + #ft_total = int(ft_total_tensor.item()) + + avg_train_loss = float(avg_train_loss) + if step == 0: + print0(f"PRINT: step:{step}/{train_steps} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms", console=True) + else: + print0(f"PRINT: step:{step}/{train_steps} train_loss:{avg_train_loss:.4f} val_loss:{val_loss_avg.item():.4f} train_time:{training_time_ms:.0f}ms step_avg:{avg_step_time:.2f}ms", console=True) + + if master_process and step > 0: + selection_counts, class_groups_list = generate_powerlaw_selection_counts(M_FOR_POWERLAW) + class_to_group_map = {cid: gid for cid, gid in zip(selection_counts.keys(), class_groups_list)} + + model_for_inference.load_state_dict(model.state_dict()) + + + eval_results = run_detailed_evaluation( + model=model_for_inference, + tokenizer=tokenizer_for_eval, + qa_data_path=QA_JSONL_PATH, + device=device, + m_val=M_FOR_POWERLAW, + class_to_group_map=class_to_group_map, + num_samples=NUM_SAMPLES_FOR_DETAIL_EVAL + ) + + + print0("--- Detailed Evaluation Results (This Step) ---", console=True) + print0(f" Total Loss: {eval_results['total_loss']:.4f}", console=True) + print0(f" Total FTA: {eval_results['total_acc']:.4f}", console=True) + for group_id, loss in sorted(eval_results['per_class_loss'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} Loss: {loss:.4f}", console=True) + for group_id, acc in sorted(eval_results['per_class_acc'].items(), key=lambda item: int(item[0])): + print0(f" Group {group_id} FTA: {acc:.4f}", console=True) + + + current_step_str = str(step) + history['total_loss'][current_step_str] = eval_results['total_loss'] + history['total_acc'][current_step_str] = eval_results['total_acc'] + for group_id, loss in eval_results['per_class_loss'].items(): + history['per_class_loss'][group_id][current_step_str] = loss + for group_id, acc in eval_results['per_class_acc'].items(): + history['per_class_acc'][group_id][current_step_str] = acc + + + plot_curves(history['per_class_loss'], run_dir_path / "per_class_loss_curves.png", "Per-Class Loss", "Loss") + plot_curves(history['per_class_acc'], run_dir_path / "per_class_acc_curves.png", "Per-Class FTA", "Accuracy", y_lim=[0, 1]) + plot_curves(history['total_loss'], run_dir_path / "total_loss_curve.png", "Total Detailed Loss", "Loss") + plot_curves(history['total_acc'], run_dir_path / "total_acc_curve.png", "Total Detailed FTA", "Accuracy", y_lim=[0, 1]) + + if world_size > 1: + dist.barrier() + + + if master_process and args.save_checkpoint and step > 0: + if run_dir_path_str: + + checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + + + checkpoint_path = checkpoint_parent_dir / f"ckpt_epoch_{step}.pt" + + log_checkpoint = dict( + step=step, + code=code, + model=model_compiled.state_dict(), + optimizers=[opt.state_dict() for opt in optimizers] + ) + + torch.save(log_checkpoint, str(checkpoint_path)) + print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + else: + print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + + train_loss_sum = torch.zeros(1, device=device) + train_step_count = torch.zeros(1, device=device) + model_compiled.train() + torch.cuda.synchronize() + t0 = time.perf_counter() + + #if last_step: + # if master_process and args.save_checkpoint: + # if run_dir_path_str: + # checkpoint_parent_dir = Path(run_dir_path_str) / "checkpoints" + # checkpoint_parent_dir.mkdir(parents=True, exist_ok=True) + # checkpoint_path = checkpoint_parent_dir / f"state_step{step:06d}.pt" + # log_checkpoint = dict( + # step=step, + # code=code, + # model=model_compiled.state_dict(), + # optimizers=[opt.state_dict() for opt in optimizers] + # ) + # torch.save(log_checkpoint, str(checkpoint_path)) + # print0(f"PRINT: Saved checkpoint to {checkpoint_path}", console=True) + # else: + # print0("PRINT: Warning - run_dir_path_str not set, cannot save checkpoint.", console=True) + # break + + # --------- TRAINING SECTION --------- + try: + inputs, targets = next(train_loader) + except StopIteration: + + print0(f"PRINT: Training data loader for '{args.train_files}' exhausted. Ending training early at step {step}.", console=True) + break + + loss_train = model_compiled(inputs, targets, get_window_size_blocks(step)) + loss_train.backward() + train_loss_sum += loss_train.detach()/ args.train_seq_len + train_step_count += 1 + + for param in model_compiled.parameters(): + if param.grad is not None: + dist.all_reduce(param.grad, op=dist.ReduceOp.AVG) + + current_lr_val = get_lr(step) + for opt in optimizers: + for group in opt.param_groups: + group["lr"] = group["initial_lr"] * current_lr_val + + if optimizer2 is not None: + for group in optimizer2.param_groups: + frac = min(step / 300, 1) + group["momentum"] = (1 - frac) * 0.85 + frac * 0.95 + + for opt in optimizers: + opt.step() + + model_compiled.zero_grad(set_to_none=True) + + if step > 0 and (step % 20 == 0 or step == train_steps - 1): + current_segment_time_ms = 1000 * (time.perf_counter() - t0) + approx_total_training_time_ms = training_time_ms + current_segment_time_ms + total_tokens_in_batch = args.train_seq_len * world_size + train_loss_per_token = loss_train.item() / total_tokens_in_batch if total_tokens_in_batch > 0 else loss_train.item() + print0(f"step:{step+1}/{train_steps} train_time:{approx_total_training_time_ms:.0f}ms step_avg:{approx_total_training_time_ms/max(1, step + 1):.2f}ms", console=True) + +print0(f"PRINT: --- Training Finished: {time.ctime()} ---", console=True) +print0(f"PRINT: Peak memory allocated: {torch.cuda.max_memory_allocated() // 1024 // 1024} MiB " + f"reserved: {torch.cuda.max_memory_reserved() // 1024 // 1024} MiB", console=True) + +if dist.is_initialized(): + dist.destroy_process_group() + +[2025-07-07 06:12:34] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 06:12:34] [Rank 0] PRINT: First-token accuracy tokenizer loaded. +[2025-07-07 06:12:34] [Rank 0] PRINT: Constructing model... +[2025-07-07 06:12:34] [Rank 0] PRINT: Constructing model... +[2025-07-07 06:12:36] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 06:12:36] [Rank 0] PRINT: Broadcasting model parameters... +[2025-07-07 06:12:36] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 06:12:36] [Rank 0] PRINT: Model constructed and broadcasted. +[2025-07-07 06:12:36] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 06:12:36] [Rank 0] PRINT: Testing model forward function: +[2025-07-07 06:12:37] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 06:12:37] [Rank 0] PRINT: Model test - Result type: +[2025-07-07 06:12:37] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 06:12:37] [Rank 0] PRINT: Model test - Single result shape: torch.Size([1, 128, 50304]) +[2025-07-07 06:12:37] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 06:12:37] [Rank 0] PRINT: Saved original model reference for inference. +[2025-07-07 06:12:37] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 06:12:37] [Rank 0] PRINT: Testing model with target_seq=None... +[2025-07-07 06:12:37] [Rank 0] PRINT: Model returns: +[2025-07-07 06:12:37] [Rank 0] PRINT: Model returns: +[2025-07-07 06:12:37] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 06:12:37] [Rank 0] PRINT: Collecting parameters for optimizers... +[2025-07-07 06:12:37] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 06:12:37] [Rank 0] PRINT: Configuring optimizers for EXPERIMENT_MODE = 5 +[2025-07-07 06:12:37] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-07 06:12:37] [Rank 0] PRINT: Mode 5: All Adam. All Attn and MLP matrices to Adam (Adam LR: 0.005). +[2025-07-07 06:12:37] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 06:12:37] [Rank 0] PRINT: Muon optimizer not created as muon_params_target_list was empty (e.g. mode where Adam handles all matrices). +[2025-07-07 06:12:37] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 06:12:37] [Rank 0] PRINT: Optimizers configured. Total optimizers: 1 +[2025-07-07 06:12:37] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 06:12:37] [Rank 0] PRINT: Compiling model with TorchInductor... +[2025-07-07 06:12:37] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 06:12:37] [Rank 0] PRINT: Model compilation complete. +[2025-07-07 06:12:37] [Rank 0] PRINT: Starting warmup... +[2025-07-07 06:12:37] [Rank 0] PRINT: Starting warmup... +[2025-07-07 06:13:42] [Rank 0] PRINT: Warmup complete. +[2025-07-07 06:13:42] [Rank 0] PRINT: Warmup complete. +[2025-07-07 06:13:42] [Rank 0] PRINT: Starting training... +[2025-07-07 06:13:42] [Rank 0] PRINT: Starting training... +[2025-07-07 06:13:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:13:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:13:50] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 06:13:50] [Rank 0] PRINT: step:0/10000 val_loss:10.8258 train_time:0ms +[2025-07-07 06:13:52] [Rank 0] step:21/10000 train_time:814ms step_avg:38.78ms +[2025-07-07 06:13:52] [Rank 0] step:21/10000 train_time:814ms step_avg:38.78ms +[2025-07-07 06:13:53] [Rank 0] step:41/10000 train_time:2128ms step_avg:51.90ms +[2025-07-07 06:13:53] [Rank 0] step:41/10000 train_time:2128ms step_avg:51.90ms +[2025-07-07 06:13:55] [Rank 0] step:61/10000 train_time:3444ms step_avg:56.45ms +[2025-07-07 06:13:55] [Rank 0] step:61/10000 train_time:3444ms step_avg:56.45ms +[2025-07-07 06:13:56] [Rank 0] step:81/10000 train_time:4763ms step_avg:58.80ms +[2025-07-07 06:13:56] [Rank 0] step:81/10000 train_time:4763ms step_avg:58.80ms +[2025-07-07 06:13:57] [Rank 0] step:101/10000 train_time:6087ms step_avg:60.27ms +[2025-07-07 06:13:57] [Rank 0] step:101/10000 train_time:6087ms step_avg:60.27ms +[2025-07-07 06:13:59] [Rank 0] step:121/10000 train_time:7413ms step_avg:61.27ms +[2025-07-07 06:13:59] [Rank 0] step:121/10000 train_time:7413ms step_avg:61.27ms +[2025-07-07 06:14:00] [Rank 0] step:141/10000 train_time:8740ms step_avg:61.99ms +[2025-07-07 06:14:00] [Rank 0] step:141/10000 train_time:8740ms step_avg:61.99ms +[2025-07-07 06:14:01] [Rank 0] step:161/10000 train_time:10068ms step_avg:62.53ms +[2025-07-07 06:14:01] [Rank 0] step:161/10000 train_time:10068ms step_avg:62.53ms +[2025-07-07 06:14:03] [Rank 0] step:181/10000 train_time:11443ms step_avg:63.22ms +[2025-07-07 06:14:03] [Rank 0] step:181/10000 train_time:11443ms step_avg:63.22ms +[2025-07-07 06:14:04] [Rank 0] step:201/10000 train_time:12770ms step_avg:63.53ms +[2025-07-07 06:14:04] [Rank 0] step:201/10000 train_time:12770ms step_avg:63.53ms +[2025-07-07 06:14:05] [Rank 0] step:221/10000 train_time:14100ms step_avg:63.80ms +[2025-07-07 06:14:05] [Rank 0] step:221/10000 train_time:14100ms step_avg:63.80ms +[2025-07-07 06:14:07] [Rank 0] step:241/10000 train_time:15428ms step_avg:64.02ms +[2025-07-07 06:14:07] [Rank 0] step:241/10000 train_time:15428ms step_avg:64.02ms +[2025-07-07 06:14:08] [Rank 0] step:261/10000 train_time:16759ms step_avg:64.21ms +[2025-07-07 06:14:08] [Rank 0] step:261/10000 train_time:16759ms step_avg:64.21ms +[2025-07-07 06:14:09] [Rank 0] step:281/10000 train_time:18088ms step_avg:64.37ms +[2025-07-07 06:14:09] [Rank 0] step:281/10000 train_time:18088ms step_avg:64.37ms +[2025-07-07 06:14:11] [Rank 0] step:301/10000 train_time:19417ms step_avg:64.51ms +[2025-07-07 06:14:11] [Rank 0] step:301/10000 train_time:19417ms step_avg:64.51ms +[2025-07-07 06:14:12] [Rank 0] step:321/10000 train_time:20748ms step_avg:64.64ms +[2025-07-07 06:14:12] [Rank 0] step:321/10000 train_time:20748ms step_avg:64.64ms +[2025-07-07 06:14:13] [Rank 0] step:341/10000 train_time:22078ms step_avg:64.74ms +[2025-07-07 06:14:13] [Rank 0] step:341/10000 train_time:22078ms step_avg:64.74ms +[2025-07-07 06:14:15] [Rank 0] step:361/10000 train_time:23410ms step_avg:64.85ms +[2025-07-07 06:14:15] [Rank 0] step:361/10000 train_time:23410ms step_avg:64.85ms +[2025-07-07 06:14:16] [Rank 0] step:381/10000 train_time:24803ms step_avg:65.10ms +[2025-07-07 06:14:16] [Rank 0] step:381/10000 train_time:24803ms step_avg:65.10ms +[2025-07-07 06:14:17] [Rank 0] step:401/10000 train_time:26134ms step_avg:65.17ms +[2025-07-07 06:14:17] [Rank 0] step:401/10000 train_time:26134ms step_avg:65.17ms +[2025-07-07 06:14:19] [Rank 0] step:421/10000 train_time:27465ms step_avg:65.24ms +[2025-07-07 06:14:19] [Rank 0] step:421/10000 train_time:27465ms step_avg:65.24ms +[2025-07-07 06:14:20] [Rank 0] step:441/10000 train_time:28795ms step_avg:65.30ms +[2025-07-07 06:14:20] [Rank 0] step:441/10000 train_time:28795ms step_avg:65.30ms +[2025-07-07 06:14:21] [Rank 0] step:461/10000 train_time:30128ms step_avg:65.35ms +[2025-07-07 06:14:21] [Rank 0] step:461/10000 train_time:30128ms step_avg:65.35ms +[2025-07-07 06:14:23] [Rank 0] step:481/10000 train_time:31458ms step_avg:65.40ms +[2025-07-07 06:14:23] [Rank 0] step:481/10000 train_time:31458ms step_avg:65.40ms +[2025-07-07 06:14:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:14:24] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:14:25] [Rank 0] PRINT: step:500/10000 train_loss:3.6466 val_loss:2.1097 train_time:33396ms step_avg:66.79ms +[2025-07-07 06:14:25] [Rank 0] PRINT: step:500/10000 train_loss:3.6466 val_loss:2.1097 train_time:33396ms step_avg:66.79ms +[2025-07-07 06:14:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:14:25] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:14:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:14:25] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:14:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:14:25] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:19:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:19:44] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:19:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:19:44] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:19:44] [Rank 0] Total Loss: 5.3558 +[2025-07-07 06:19:44] [Rank 0] Total Loss: 5.3558 +[2025-07-07 06:19:44] [Rank 0] Total FTA: 0.0792 +[2025-07-07 06:19:44] [Rank 0] Total FTA: 0.0792 +[2025-07-07 06:19:44] [Rank 0] Group 0 Loss: 5.3993 +[2025-07-07 06:19:44] [Rank 0] Group 0 Loss: 5.3993 +[2025-07-07 06:19:44] [Rank 0] Group 1 Loss: 5.1728 +[2025-07-07 06:19:44] [Rank 0] Group 1 Loss: 5.1728 +[2025-07-07 06:19:44] [Rank 0] Group 2 Loss: 5.3850 +[2025-07-07 06:19:44] [Rank 0] Group 2 Loss: 5.3850 +[2025-07-07 06:19:44] [Rank 0] Group 3 Loss: 5.3553 +[2025-07-07 06:19:44] [Rank 0] Group 3 Loss: 5.3553 +[2025-07-07 06:19:44] [Rank 0] Group 4 Loss: 5.3616 +[2025-07-07 06:19:44] [Rank 0] Group 4 Loss: 5.3616 +[2025-07-07 06:19:44] [Rank 0] Group 5 Loss: 5.3831 +[2025-07-07 06:19:44] [Rank 0] Group 5 Loss: 5.3831 +[2025-07-07 06:19:44] [Rank 0] Group 6 Loss: 5.3138 +[2025-07-07 06:19:44] [Rank 0] Group 6 Loss: 5.3138 +[2025-07-07 06:19:44] [Rank 0] Group 7 Loss: 5.3592 +[2025-07-07 06:19:44] [Rank 0] Group 7 Loss: 5.3592 +[2025-07-07 06:19:44] [Rank 0] Group 8 Loss: 5.3411 +[2025-07-07 06:19:44] [Rank 0] Group 8 Loss: 5.3411 +[2025-07-07 06:19:44] [Rank 0] Group 9 Loss: 5.4134 +[2025-07-07 06:19:44] [Rank 0] Group 9 Loss: 5.4134 +[2025-07-07 06:19:44] [Rank 0] Group 10 Loss: 5.3428 +[2025-07-07 06:19:44] [Rank 0] Group 10 Loss: 5.3428 +[2025-07-07 06:19:44] [Rank 0] Group 11 Loss: 5.3809 +[2025-07-07 06:19:44] [Rank 0] Group 11 Loss: 5.3809 +[2025-07-07 06:19:44] [Rank 0] Group 0 FTA: 0.1404 +[2025-07-07 06:19:44] [Rank 0] Group 0 FTA: 0.1404 +[2025-07-07 06:19:44] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:19:44] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:19:44] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-07 06:19:44] [Rank 0] Group 2 FTA: 0.0755 +[2025-07-07 06:19:44] [Rank 0] Group 3 FTA: 0.0885 +[2025-07-07 06:19:44] [Rank 0] Group 3 FTA: 0.0885 +[2025-07-07 06:19:44] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 06:19:44] [Rank 0] Group 4 FTA: 0.0156 +[2025-07-07 06:19:44] [Rank 0] Group 5 FTA: 0.0807 +[2025-07-07 06:19:44] [Rank 0] Group 5 FTA: 0.0807 +[2025-07-07 06:19:44] [Rank 0] Group 6 FTA: 0.0729 +[2025-07-07 06:19:44] [Rank 0] Group 6 FTA: 0.0729 +[2025-07-07 06:19:44] [Rank 0] Group 7 FTA: 0.0729 +[2025-07-07 06:19:44] [Rank 0] Group 7 FTA: 0.0729 +[2025-07-07 06:19:44] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 06:19:44] [Rank 0] Group 8 FTA: 0.0964 +[2025-07-07 06:19:44] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 06:19:44] [Rank 0] Group 9 FTA: 0.1016 +[2025-07-07 06:19:44] [Rank 0] Group 10 FTA: 0.0762 +[2025-07-07 06:19:44] [Rank 0] Group 10 FTA: 0.0762 +[2025-07-07 06:19:44] [Rank 0] Group 11 FTA: 0.0781 +[2025-07-07 06:19:44] [Rank 0] Group 11 FTA: 0.0781 +[2025-07-07 06:19:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 06:19:44] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 06:19:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 06:19:45] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 06:19:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 06:19:45] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 06:19:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 06:19:45] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 06:19:45] [Rank 0] step:501/10000 train_time:33405ms step_avg:66.68ms +[2025-07-07 06:19:45] [Rank 0] step:501/10000 train_time:33405ms step_avg:66.68ms +[2025-07-07 06:19:47] [Rank 0] step:521/10000 train_time:34138ms step_avg:65.52ms +[2025-07-07 06:19:47] [Rank 0] step:521/10000 train_time:34138ms step_avg:65.52ms +[2025-07-07 06:19:48] [Rank 0] step:541/10000 train_time:36164ms step_avg:66.85ms +[2025-07-07 06:19:48] [Rank 0] step:541/10000 train_time:36164ms step_avg:66.85ms +[2025-07-07 06:19:49] [Rank 0] step:561/10000 train_time:36880ms step_avg:65.74ms +[2025-07-07 06:19:49] [Rank 0] step:561/10000 train_time:36880ms step_avg:65.74ms +[2025-07-07 06:19:51] [Rank 0] step:581/10000 train_time:38207ms step_avg:65.76ms +[2025-07-07 06:19:51] [Rank 0] step:581/10000 train_time:38207ms step_avg:65.76ms +[2025-07-07 06:19:52] [Rank 0] step:601/10000 train_time:39534ms step_avg:65.78ms +[2025-07-07 06:19:52] [Rank 0] step:601/10000 train_time:39534ms step_avg:65.78ms +[2025-07-07 06:19:53] [Rank 0] step:621/10000 train_time:40863ms step_avg:65.80ms +[2025-07-07 06:19:53] [Rank 0] step:621/10000 train_time:40863ms step_avg:65.80ms +[2025-07-07 06:19:55] [Rank 0] step:641/10000 train_time:42192ms step_avg:65.82ms +[2025-07-07 06:19:55] [Rank 0] step:641/10000 train_time:42192ms step_avg:65.82ms +[2025-07-07 06:19:56] [Rank 0] step:661/10000 train_time:43530ms step_avg:65.85ms +[2025-07-07 06:19:56] [Rank 0] step:661/10000 train_time:43530ms step_avg:65.85ms +[2025-07-07 06:19:57] [Rank 0] step:681/10000 train_time:44859ms step_avg:65.87ms +[2025-07-07 06:19:57] [Rank 0] step:681/10000 train_time:44859ms step_avg:65.87ms +[2025-07-07 06:19:59] [Rank 0] step:701/10000 train_time:46192ms step_avg:65.89ms +[2025-07-07 06:19:59] [Rank 0] step:701/10000 train_time:46192ms step_avg:65.89ms +[2025-07-07 06:20:00] [Rank 0] step:721/10000 train_time:47596ms step_avg:66.01ms +[2025-07-07 06:20:00] [Rank 0] step:721/10000 train_time:47596ms step_avg:66.01ms +[2025-07-07 06:20:01] [Rank 0] step:741/10000 train_time:48892ms step_avg:65.98ms +[2025-07-07 06:20:01] [Rank 0] step:741/10000 train_time:48892ms step_avg:65.98ms +[2025-07-07 06:20:03] [Rank 0] step:761/10000 train_time:50231ms step_avg:66.01ms +[2025-07-07 06:20:03] [Rank 0] step:761/10000 train_time:50231ms step_avg:66.01ms +[2025-07-07 06:20:04] [Rank 0] step:781/10000 train_time:51578ms step_avg:66.04ms +[2025-07-07 06:20:04] [Rank 0] step:781/10000 train_time:51578ms step_avg:66.04ms +[2025-07-07 06:20:05] [Rank 0] step:801/10000 train_time:52923ms step_avg:66.07ms +[2025-07-07 06:20:05] [Rank 0] step:801/10000 train_time:52923ms step_avg:66.07ms +[2025-07-07 06:20:07] [Rank 0] step:821/10000 train_time:54266ms step_avg:66.10ms +[2025-07-07 06:20:07] [Rank 0] step:821/10000 train_time:54266ms step_avg:66.10ms +[2025-07-07 06:20:08] [Rank 0] step:841/10000 train_time:55612ms step_avg:66.13ms +[2025-07-07 06:20:08] [Rank 0] step:841/10000 train_time:55612ms step_avg:66.13ms +[2025-07-07 06:20:09] [Rank 0] step:861/10000 train_time:56959ms step_avg:66.15ms +[2025-07-07 06:20:09] [Rank 0] step:861/10000 train_time:56959ms step_avg:66.15ms +[2025-07-07 06:20:11] [Rank 0] step:881/10000 train_time:58309ms step_avg:66.19ms +[2025-07-07 06:20:11] [Rank 0] step:881/10000 train_time:58309ms step_avg:66.19ms +[2025-07-07 06:20:12] [Rank 0] step:901/10000 train_time:59707ms step_avg:66.27ms +[2025-07-07 06:20:12] [Rank 0] step:901/10000 train_time:59707ms step_avg:66.27ms +[2025-07-07 06:20:14] [Rank 0] step:921/10000 train_time:61058ms step_avg:66.30ms +[2025-07-07 06:20:14] [Rank 0] step:921/10000 train_time:61058ms step_avg:66.30ms +[2025-07-07 06:20:15] [Rank 0] step:941/10000 train_time:62418ms step_avg:66.33ms +[2025-07-07 06:20:15] [Rank 0] step:941/10000 train_time:62418ms step_avg:66.33ms +[2025-07-07 06:20:16] [Rank 0] step:961/10000 train_time:63762ms step_avg:66.35ms +[2025-07-07 06:20:16] [Rank 0] step:961/10000 train_time:63762ms step_avg:66.35ms +[2025-07-07 06:20:18] [Rank 0] step:981/10000 train_time:65106ms step_avg:66.37ms +[2025-07-07 06:20:18] [Rank 0] step:981/10000 train_time:65106ms step_avg:66.37ms +[2025-07-07 06:20:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:20:19] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:20:20] [Rank 0] PRINT: step:1000/10000 train_loss:1.8505 val_loss:1.7543 train_time:67064ms step_avg:67.06ms +[2025-07-07 06:20:20] [Rank 0] PRINT: step:1000/10000 train_loss:1.8505 val_loss:1.7543 train_time:67064ms step_avg:67.06ms +[2025-07-07 06:20:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:20:20] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:20:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:20:20] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:20:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:20:20] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:25:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:25:36] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:25:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:25:36] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:25:36] [Rank 0] Total Loss: 5.7942 +[2025-07-07 06:25:36] [Rank 0] Total Loss: 5.7942 +[2025-07-07 06:25:36] [Rank 0] Total FTA: 0.0694 +[2025-07-07 06:25:36] [Rank 0] Total FTA: 0.0694 +[2025-07-07 06:25:36] [Rank 0] Group 0 Loss: 5.8644 +[2025-07-07 06:25:36] [Rank 0] Group 0 Loss: 5.8644 +[2025-07-07 06:25:36] [Rank 0] Group 1 Loss: 5.5347 +[2025-07-07 06:25:36] [Rank 0] Group 1 Loss: 5.5347 +[2025-07-07 06:25:36] [Rank 0] Group 2 Loss: 5.8561 +[2025-07-07 06:25:36] [Rank 0] Group 2 Loss: 5.8561 +[2025-07-07 06:25:36] [Rank 0] Group 3 Loss: 5.8011 +[2025-07-07 06:25:36] [Rank 0] Group 3 Loss: 5.8011 +[2025-07-07 06:25:36] [Rank 0] Group 4 Loss: 5.8552 +[2025-07-07 06:25:36] [Rank 0] Group 4 Loss: 5.8552 +[2025-07-07 06:25:36] [Rank 0] Group 5 Loss: 5.6836 +[2025-07-07 06:25:36] [Rank 0] Group 5 Loss: 5.6836 +[2025-07-07 06:25:36] [Rank 0] Group 6 Loss: 5.7938 +[2025-07-07 06:25:36] [Rank 0] Group 6 Loss: 5.7938 +[2025-07-07 06:25:36] [Rank 0] Group 7 Loss: 5.8128 +[2025-07-07 06:25:36] [Rank 0] Group 7 Loss: 5.8128 +[2025-07-07 06:25:36] [Rank 0] Group 8 Loss: 5.7461 +[2025-07-07 06:25:36] [Rank 0] Group 8 Loss: 5.7461 +[2025-07-07 06:25:36] [Rank 0] Group 9 Loss: 5.8020 +[2025-07-07 06:25:36] [Rank 0] Group 9 Loss: 5.8020 +[2025-07-07 06:25:36] [Rank 0] Group 10 Loss: 5.8070 +[2025-07-07 06:25:36] [Rank 0] Group 10 Loss: 5.8070 +[2025-07-07 06:25:36] [Rank 0] Group 11 Loss: 5.8346 +[2025-07-07 06:25:36] [Rank 0] Group 11 Loss: 5.8346 +[2025-07-07 06:25:36] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 06:25:36] [Rank 0] Group 0 FTA: 0.0000 +[2025-07-07 06:25:36] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:25:36] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:25:36] [Rank 0] Group 2 FTA: 0.0703 +[2025-07-07 06:25:36] [Rank 0] Group 2 FTA: 0.0703 +[2025-07-07 06:25:36] [Rank 0] Group 3 FTA: 0.1042 +[2025-07-07 06:25:36] [Rank 0] Group 3 FTA: 0.1042 +[2025-07-07 06:25:36] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-07 06:25:36] [Rank 0] Group 4 FTA: 0.0286 +[2025-07-07 06:25:36] [Rank 0] Group 5 FTA: 0.0391 +[2025-07-07 06:25:36] [Rank 0] Group 5 FTA: 0.0391 +[2025-07-07 06:25:36] [Rank 0] Group 6 FTA: 0.0859 +[2025-07-07 06:25:36] [Rank 0] Group 6 FTA: 0.0859 +[2025-07-07 06:25:37] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-07 06:25:37] [Rank 0] Group 7 FTA: 0.0885 +[2025-07-07 06:25:37] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-07 06:25:37] [Rank 0] Group 8 FTA: 0.0990 +[2025-07-07 06:25:37] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-07 06:25:37] [Rank 0] Group 9 FTA: 0.0938 +[2025-07-07 06:25:37] [Rank 0] Group 10 FTA: 0.1152 +[2025-07-07 06:25:37] [Rank 0] Group 10 FTA: 0.1152 +[2025-07-07 06:25:37] [Rank 0] Group 11 FTA: 0.1074 +[2025-07-07 06:25:37] [Rank 0] Group 11 FTA: 0.1074 +[2025-07-07 06:25:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 06:25:37] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 06:25:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 06:25:37] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 06:25:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 06:25:38] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 06:25:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 06:25:38] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 06:25:38] [Rank 0] step:1001/10000 train_time:67074ms step_avg:67.01ms +[2025-07-07 06:25:38] [Rank 0] step:1001/10000 train_time:67074ms step_avg:67.01ms +[2025-07-07 06:25:39] [Rank 0] step:1021/10000 train_time:67835ms step_avg:66.44ms +[2025-07-07 06:25:39] [Rank 0] step:1021/10000 train_time:67835ms step_avg:66.44ms +[2025-07-07 06:25:41] [Rank 0] step:1041/10000 train_time:69173ms step_avg:66.45ms +[2025-07-07 06:25:41] [Rank 0] step:1041/10000 train_time:69173ms step_avg:66.45ms +[2025-07-07 06:25:42] [Rank 0] step:1061/10000 train_time:70512ms step_avg:66.46ms +[2025-07-07 06:25:42] [Rank 0] step:1061/10000 train_time:70512ms step_avg:66.46ms +[2025-07-07 06:25:43] [Rank 0] step:1081/10000 train_time:71850ms step_avg:66.47ms +[2025-07-07 06:25:43] [Rank 0] step:1081/10000 train_time:71850ms step_avg:66.47ms +[2025-07-07 06:25:45] [Rank 0] step:1101/10000 train_time:73248ms step_avg:66.53ms +[2025-07-07 06:25:45] [Rank 0] step:1101/10000 train_time:73248ms step_avg:66.53ms +[2025-07-07 06:25:46] [Rank 0] step:1121/10000 train_time:74589ms step_avg:66.54ms +[2025-07-07 06:25:46] [Rank 0] step:1121/10000 train_time:74589ms step_avg:66.54ms +[2025-07-07 06:25:47] [Rank 0] step:1141/10000 train_time:75930ms step_avg:66.55ms +[2025-07-07 06:25:47] [Rank 0] step:1141/10000 train_time:75930ms step_avg:66.55ms +[2025-07-07 06:25:49] [Rank 0] step:1161/10000 train_time:77272ms step_avg:66.56ms +[2025-07-07 06:25:49] [Rank 0] step:1161/10000 train_time:77272ms step_avg:66.56ms +[2025-07-07 06:25:50] [Rank 0] step:1181/10000 train_time:78614ms step_avg:66.57ms +[2025-07-07 06:25:50] [Rank 0] step:1181/10000 train_time:78614ms step_avg:66.57ms +[2025-07-07 06:25:51] [Rank 0] step:1201/10000 train_time:79956ms step_avg:66.57ms +[2025-07-07 06:25:51] [Rank 0] step:1201/10000 train_time:79956ms step_avg:66.57ms +[2025-07-07 06:25:53] [Rank 0] step:1221/10000 train_time:81300ms step_avg:66.58ms +[2025-07-07 06:25:53] [Rank 0] step:1221/10000 train_time:81300ms step_avg:66.58ms +[2025-07-07 06:25:54] [Rank 0] step:1241/10000 train_time:82643ms step_avg:66.59ms +[2025-07-07 06:25:54] [Rank 0] step:1241/10000 train_time:82643ms step_avg:66.59ms +[2025-07-07 06:25:55] [Rank 0] step:1261/10000 train_time:83987ms step_avg:66.60ms +[2025-07-07 06:25:55] [Rank 0] step:1261/10000 train_time:83987ms step_avg:66.60ms +[2025-07-07 06:25:57] [Rank 0] step:1281/10000 train_time:85380ms step_avg:66.65ms +[2025-07-07 06:25:57] [Rank 0] step:1281/10000 train_time:85380ms step_avg:66.65ms +[2025-07-07 06:25:58] [Rank 0] step:1301/10000 train_time:86724ms step_avg:66.66ms +[2025-07-07 06:25:58] [Rank 0] step:1301/10000 train_time:86724ms step_avg:66.66ms +[2025-07-07 06:25:59] [Rank 0] step:1321/10000 train_time:88068ms step_avg:66.67ms +[2025-07-07 06:25:59] [Rank 0] step:1321/10000 train_time:88068ms step_avg:66.67ms +[2025-07-07 06:26:01] [Rank 0] step:1341/10000 train_time:89412ms step_avg:66.68ms +[2025-07-07 06:26:01] [Rank 0] step:1341/10000 train_time:89412ms step_avg:66.68ms +[2025-07-07 06:26:02] [Rank 0] step:1361/10000 train_time:90758ms step_avg:66.68ms +[2025-07-07 06:26:02] [Rank 0] step:1361/10000 train_time:90758ms step_avg:66.68ms +[2025-07-07 06:26:04] [Rank 0] step:1381/10000 train_time:92107ms step_avg:66.70ms +[2025-07-07 06:26:04] [Rank 0] step:1381/10000 train_time:92107ms step_avg:66.70ms +[2025-07-07 06:26:05] [Rank 0] step:1401/10000 train_time:93455ms step_avg:66.71ms +[2025-07-07 06:26:05] [Rank 0] step:1401/10000 train_time:93455ms step_avg:66.71ms +[2025-07-07 06:26:06] [Rank 0] step:1421/10000 train_time:94802ms step_avg:66.71ms +[2025-07-07 06:26:06] [Rank 0] step:1421/10000 train_time:94802ms step_avg:66.71ms +[2025-07-07 06:26:08] [Rank 0] step:1441/10000 train_time:96149ms step_avg:66.72ms +[2025-07-07 06:26:08] [Rank 0] step:1441/10000 train_time:96149ms step_avg:66.72ms +[2025-07-07 06:26:09] [Rank 0] step:1461/10000 train_time:97541ms step_avg:66.76ms +[2025-07-07 06:26:09] [Rank 0] step:1461/10000 train_time:97541ms step_avg:66.76ms +[2025-07-07 06:26:10] [Rank 0] step:1481/10000 train_time:98892ms step_avg:66.77ms +[2025-07-07 06:26:10] [Rank 0] step:1481/10000 train_time:98892ms step_avg:66.77ms +[2025-07-07 06:26:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:26:12] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:26:13] [Rank 0] PRINT: step:1500/10000 train_loss:1.6750 val_loss:1.5661 train_time:100853ms step_avg:67.24ms +[2025-07-07 06:26:13] [Rank 0] PRINT: step:1500/10000 train_loss:1.6750 val_loss:1.5661 train_time:100853ms step_avg:67.24ms +[2025-07-07 06:26:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:26:13] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:26:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:26:13] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:26:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:26:13] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:31:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:31:29] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:31:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:31:29] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:31:29] [Rank 0] Total Loss: 5.9415 +[2025-07-07 06:31:29] [Rank 0] Total Loss: 5.9415 +[2025-07-07 06:31:29] [Rank 0] Total FTA: 0.0902 +[2025-07-07 06:31:29] [Rank 0] Total FTA: 0.0902 +[2025-07-07 06:31:29] [Rank 0] Group 0 Loss: 5.9987 +[2025-07-07 06:31:29] [Rank 0] Group 0 Loss: 5.9987 +[2025-07-07 06:31:29] [Rank 0] Group 1 Loss: 5.6040 +[2025-07-07 06:31:29] [Rank 0] Group 1 Loss: 5.6040 +[2025-07-07 06:31:29] [Rank 0] Group 2 Loss: 5.7457 +[2025-07-07 06:31:29] [Rank 0] Group 2 Loss: 5.7457 +[2025-07-07 06:31:29] [Rank 0] Group 3 Loss: 6.1547 +[2025-07-07 06:31:29] [Rank 0] Group 3 Loss: 6.1547 +[2025-07-07 06:31:29] [Rank 0] Group 4 Loss: 6.0198 +[2025-07-07 06:31:29] [Rank 0] Group 4 Loss: 6.0198 +[2025-07-07 06:31:29] [Rank 0] Group 5 Loss: 5.8664 +[2025-07-07 06:31:29] [Rank 0] Group 5 Loss: 5.8664 +[2025-07-07 06:31:29] [Rank 0] Group 6 Loss: 5.9135 +[2025-07-07 06:31:29] [Rank 0] Group 6 Loss: 5.9135 +[2025-07-07 06:31:29] [Rank 0] Group 7 Loss: 6.0095 +[2025-07-07 06:31:29] [Rank 0] Group 7 Loss: 6.0095 +[2025-07-07 06:31:29] [Rank 0] Group 8 Loss: 5.9707 +[2025-07-07 06:31:29] [Rank 0] Group 8 Loss: 5.9707 +[2025-07-07 06:31:29] [Rank 0] Group 9 Loss: 5.9842 +[2025-07-07 06:31:29] [Rank 0] Group 9 Loss: 5.9842 +[2025-07-07 06:31:29] [Rank 0] Group 10 Loss: 5.9527 +[2025-07-07 06:31:29] [Rank 0] Group 10 Loss: 5.9527 +[2025-07-07 06:31:29] [Rank 0] Group 11 Loss: 5.9753 +[2025-07-07 06:31:29] [Rank 0] Group 11 Loss: 5.9753 +[2025-07-07 06:31:29] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 06:31:29] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 06:31:29] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:31:29] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:31:29] [Rank 0] Group 2 FTA: 0.1458 +[2025-07-07 06:31:29] [Rank 0] Group 2 FTA: 0.1458 +[2025-07-07 06:31:29] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 06:31:29] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 06:31:29] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 06:31:29] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 06:31:29] [Rank 0] Group 5 FTA: 0.0703 +[2025-07-07 06:31:29] [Rank 0] Group 5 FTA: 0.0703 +[2025-07-07 06:31:29] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-07 06:31:29] [Rank 0] Group 6 FTA: 0.0990 +[2025-07-07 06:31:29] [Rank 0] Group 7 FTA: 0.1146 +[2025-07-07 06:31:29] [Rank 0] Group 7 FTA: 0.1146 +[2025-07-07 06:31:29] [Rank 0] Group 8 FTA: 0.0807 +[2025-07-07 06:31:29] [Rank 0] Group 8 FTA: 0.0807 +[2025-07-07 06:31:29] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 06:31:29] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 06:31:29] [Rank 0] Group 10 FTA: 0.0781 +[2025-07-07 06:31:29] [Rank 0] Group 10 FTA: 0.0781 +[2025-07-07 06:31:29] [Rank 0] Group 11 FTA: 0.0869 +[2025-07-07 06:31:29] [Rank 0] Group 11 FTA: 0.0869 +[2025-07-07 06:31:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 06:31:29] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 06:31:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 06:31:30] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 06:31:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 06:31:30] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 06:31:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 06:31:30] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 06:31:30] [Rank 0] step:1501/10000 train_time:100863ms step_avg:67.20ms +[2025-07-07 06:31:30] [Rank 0] step:1501/10000 train_time:100863ms step_avg:67.20ms +[2025-07-07 06:31:31] [Rank 0] step:1521/10000 train_time:101607ms step_avg:66.80ms +[2025-07-07 06:31:31] [Rank 0] step:1521/10000 train_time:101607ms step_avg:66.80ms +[2025-07-07 06:31:33] [Rank 0] step:1541/10000 train_time:102944ms step_avg:66.80ms +[2025-07-07 06:31:33] [Rank 0] step:1541/10000 train_time:102944ms step_avg:66.80ms +[2025-07-07 06:31:34] [Rank 0] step:1561/10000 train_time:104284ms step_avg:66.81ms +[2025-07-07 06:31:34] [Rank 0] step:1561/10000 train_time:104284ms step_avg:66.81ms +[2025-07-07 06:31:35] [Rank 0] step:1581/10000 train_time:105623ms step_avg:66.81ms +[2025-07-07 06:31:35] [Rank 0] step:1581/10000 train_time:105623ms step_avg:66.81ms +[2025-07-07 06:31:37] [Rank 0] step:1601/10000 train_time:106964ms step_avg:66.81ms +[2025-07-07 06:31:37] [Rank 0] step:1601/10000 train_time:106964ms step_avg:66.81ms +[2025-07-07 06:31:38] [Rank 0] step:1621/10000 train_time:108306ms step_avg:66.81ms +[2025-07-07 06:31:38] [Rank 0] step:1621/10000 train_time:108306ms step_avg:66.81ms +[2025-07-07 06:31:40] [Rank 0] step:1641/10000 train_time:109694ms step_avg:66.85ms +[2025-07-07 06:31:40] [Rank 0] step:1641/10000 train_time:109694ms step_avg:66.85ms +[2025-07-07 06:31:41] [Rank 0] step:1661/10000 train_time:111037ms step_avg:66.85ms +[2025-07-07 06:31:41] [Rank 0] step:1661/10000 train_time:111037ms step_avg:66.85ms +[2025-07-07 06:31:42] [Rank 0] step:1681/10000 train_time:112380ms step_avg:66.85ms +[2025-07-07 06:31:42] [Rank 0] step:1681/10000 train_time:112380ms step_avg:66.85ms +[2025-07-07 06:31:44] [Rank 0] step:1701/10000 train_time:113721ms step_avg:66.86ms +[2025-07-07 06:31:44] [Rank 0] step:1701/10000 train_time:113721ms step_avg:66.86ms +[2025-07-07 06:31:45] [Rank 0] step:1721/10000 train_time:115064ms step_avg:66.86ms +[2025-07-07 06:31:45] [Rank 0] step:1721/10000 train_time:115064ms step_avg:66.86ms +[2025-07-07 06:31:46] [Rank 0] step:1741/10000 train_time:116406ms step_avg:66.86ms +[2025-07-07 06:31:46] [Rank 0] step:1741/10000 train_time:116406ms step_avg:66.86ms +[2025-07-07 06:31:48] [Rank 0] step:1761/10000 train_time:117748ms step_avg:66.86ms +[2025-07-07 06:31:48] [Rank 0] step:1761/10000 train_time:117748ms step_avg:66.86ms +[2025-07-07 06:31:49] [Rank 0] step:1781/10000 train_time:119092ms step_avg:66.87ms +[2025-07-07 06:31:49] [Rank 0] step:1781/10000 train_time:119092ms step_avg:66.87ms +[2025-07-07 06:31:50] [Rank 0] step:1801/10000 train_time:120692ms step_avg:67.01ms +[2025-07-07 06:31:50] [Rank 0] step:1801/10000 train_time:120692ms step_avg:67.01ms +[2025-07-07 06:31:52] [Rank 0] step:1821/10000 train_time:121781ms step_avg:66.88ms +[2025-07-07 06:31:52] [Rank 0] step:1821/10000 train_time:121781ms step_avg:66.88ms +[2025-07-07 06:31:53] [Rank 0] step:1841/10000 train_time:123131ms step_avg:66.88ms +[2025-07-07 06:31:53] [Rank 0] step:1841/10000 train_time:123131ms step_avg:66.88ms +[2025-07-07 06:31:54] [Rank 0] step:1861/10000 train_time:124477ms step_avg:66.89ms +[2025-07-07 06:31:54] [Rank 0] step:1861/10000 train_time:124477ms step_avg:66.89ms +[2025-07-07 06:31:56] [Rank 0] step:1881/10000 train_time:125824ms step_avg:66.89ms +[2025-07-07 06:31:56] [Rank 0] step:1881/10000 train_time:125824ms step_avg:66.89ms +[2025-07-07 06:31:57] [Rank 0] step:1901/10000 train_time:127170ms step_avg:66.90ms +[2025-07-07 06:31:57] [Rank 0] step:1901/10000 train_time:127170ms step_avg:66.90ms +[2025-07-07 06:31:58] [Rank 0] step:1921/10000 train_time:128517ms step_avg:66.90ms +[2025-07-07 06:31:58] [Rank 0] step:1921/10000 train_time:128517ms step_avg:66.90ms +[2025-07-07 06:32:00] [Rank 0] step:1941/10000 train_time:129862ms step_avg:66.90ms +[2025-07-07 06:32:00] [Rank 0] step:1941/10000 train_time:129862ms step_avg:66.90ms +[2025-07-07 06:32:01] [Rank 0] step:1961/10000 train_time:131208ms step_avg:66.91ms +[2025-07-07 06:32:01] [Rank 0] step:1961/10000 train_time:131208ms step_avg:66.91ms +[2025-07-07 06:32:02] [Rank 0] step:1981/10000 train_time:132553ms step_avg:66.91ms +[2025-07-07 06:32:02] [Rank 0] step:1981/10000 train_time:132553ms step_avg:66.91ms +[2025-07-07 06:32:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:32:04] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:32:05] [Rank 0] PRINT: step:2000/10000 train_loss:1.4447 val_loss:1.3437 train_time:134563ms step_avg:67.28ms +[2025-07-07 06:32:05] [Rank 0] PRINT: step:2000/10000 train_loss:1.4447 val_loss:1.3437 train_time:134563ms step_avg:67.28ms +[2025-07-07 06:32:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:32:05] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:32:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:32:05] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:32:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:32:05] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:37:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:37:21] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:37:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:37:21] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:37:21] [Rank 0] Total Loss: 5.8775 +[2025-07-07 06:37:21] [Rank 0] Total Loss: 5.8775 +[2025-07-07 06:37:21] [Rank 0] Total FTA: 0.0904 +[2025-07-07 06:37:21] [Rank 0] Total FTA: 0.0904 +[2025-07-07 06:37:21] [Rank 0] Group 0 Loss: 5.9370 +[2025-07-07 06:37:21] [Rank 0] Group 0 Loss: 5.9370 +[2025-07-07 06:37:21] [Rank 0] Group 1 Loss: 5.6916 +[2025-07-07 06:37:21] [Rank 0] Group 1 Loss: 5.6916 +[2025-07-07 06:37:21] [Rank 0] Group 2 Loss: 5.7151 +[2025-07-07 06:37:21] [Rank 0] Group 2 Loss: 5.7151 +[2025-07-07 06:37:21] [Rank 0] Group 3 Loss: 5.9269 +[2025-07-07 06:37:21] [Rank 0] Group 3 Loss: 5.9269 +[2025-07-07 06:37:21] [Rank 0] Group 4 Loss: 5.9060 +[2025-07-07 06:37:21] [Rank 0] Group 4 Loss: 5.9060 +[2025-07-07 06:37:21] [Rank 0] Group 5 Loss: 5.8482 +[2025-07-07 06:37:21] [Rank 0] Group 5 Loss: 5.8482 +[2025-07-07 06:37:21] [Rank 0] Group 6 Loss: 5.8265 +[2025-07-07 06:37:21] [Rank 0] Group 6 Loss: 5.8265 +[2025-07-07 06:37:21] [Rank 0] Group 7 Loss: 6.0009 +[2025-07-07 06:37:21] [Rank 0] Group 7 Loss: 6.0009 +[2025-07-07 06:37:21] [Rank 0] Group 8 Loss: 5.8685 +[2025-07-07 06:37:21] [Rank 0] Group 8 Loss: 5.8685 +[2025-07-07 06:37:21] [Rank 0] Group 9 Loss: 5.8722 +[2025-07-07 06:37:21] [Rank 0] Group 9 Loss: 5.8722 +[2025-07-07 06:37:21] [Rank 0] Group 10 Loss: 5.8981 +[2025-07-07 06:37:21] [Rank 0] Group 10 Loss: 5.8981 +[2025-07-07 06:37:21] [Rank 0] Group 11 Loss: 5.9122 +[2025-07-07 06:37:21] [Rank 0] Group 11 Loss: 5.9122 +[2025-07-07 06:37:21] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 06:37:21] [Rank 0] Group 0 FTA: 0.1743 +[2025-07-07 06:37:21] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:37:21] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 06:37:21] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 06:37:21] [Rank 0] Group 2 FTA: 0.0807 +[2025-07-07 06:37:21] [Rank 0] Group 3 FTA: 0.0625 +[2025-07-07 06:37:21] [Rank 0] Group 3 FTA: 0.0625 +[2025-07-07 06:37:21] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 06:37:21] [Rank 0] Group 4 FTA: 0.0208 +[2025-07-07 06:37:21] [Rank 0] Group 5 FTA: 0.0859 +[2025-07-07 06:37:21] [Rank 0] Group 5 FTA: 0.0859 +[2025-07-07 06:37:21] [Rank 0] Group 6 FTA: 0.1120 +[2025-07-07 06:37:21] [Rank 0] Group 6 FTA: 0.1120 +[2025-07-07 06:37:21] [Rank 0] Group 7 FTA: 0.1094 +[2025-07-07 06:37:21] [Rank 0] Group 7 FTA: 0.1094 +[2025-07-07 06:37:21] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-07 06:37:21] [Rank 0] Group 8 FTA: 0.0885 +[2025-07-07 06:37:21] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-07 06:37:21] [Rank 0] Group 9 FTA: 0.0781 +[2025-07-07 06:37:21] [Rank 0] Group 10 FTA: 0.1016 +[2025-07-07 06:37:21] [Rank 0] Group 10 FTA: 0.1016 +[2025-07-07 06:37:21] [Rank 0] Group 11 FTA: 0.0859 +[2025-07-07 06:37:21] [Rank 0] Group 11 FTA: 0.0859 +[2025-07-07 06:37:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 06:37:22] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 06:37:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 06:37:22] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 06:37:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 06:37:22] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 06:37:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 06:37:23] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 06:37:23] [Rank 0] step:2001/10000 train_time:134573ms step_avg:67.25ms +[2025-07-07 06:37:23] [Rank 0] step:2001/10000 train_time:134573ms step_avg:67.25ms +[2025-07-07 06:37:24] [Rank 0] step:2021/10000 train_time:135328ms step_avg:66.96ms +[2025-07-07 06:37:24] [Rank 0] step:2021/10000 train_time:135328ms step_avg:66.96ms +[2025-07-07 06:37:25] [Rank 0] step:2041/10000 train_time:136665ms step_avg:66.96ms +[2025-07-07 06:37:25] [Rank 0] step:2041/10000 train_time:136665ms step_avg:66.96ms +[2025-07-07 06:37:27] [Rank 0] step:2061/10000 train_time:138003ms step_avg:66.96ms +[2025-07-07 06:37:27] [Rank 0] step:2061/10000 train_time:138003ms step_avg:66.96ms +[2025-07-07 06:37:28] [Rank 0] step:2081/10000 train_time:139342ms step_avg:66.96ms +[2025-07-07 06:37:28] [Rank 0] step:2081/10000 train_time:139342ms step_avg:66.96ms +[2025-07-07 06:37:30] [Rank 0] step:2101/10000 train_time:140682ms step_avg:66.96ms +[2025-07-07 06:37:30] [Rank 0] step:2101/10000 train_time:140682ms step_avg:66.96ms +[2025-07-07 06:37:31] [Rank 0] step:2121/10000 train_time:142023ms step_avg:66.96ms +[2025-07-07 06:37:31] [Rank 0] step:2121/10000 train_time:142023ms step_avg:66.96ms +[2025-07-07 06:37:32] [Rank 0] step:2141/10000 train_time:143365ms step_avg:66.96ms +[2025-07-07 06:37:32] [Rank 0] step:2141/10000 train_time:143365ms step_avg:66.96ms +[2025-07-07 06:37:34] [Rank 0] step:2161/10000 train_time:144707ms step_avg:66.96ms +[2025-07-07 06:37:34] [Rank 0] step:2161/10000 train_time:144707ms step_avg:66.96ms +[2025-07-07 06:37:35] [Rank 0] step:2181/10000 train_time:146099ms step_avg:66.99ms +[2025-07-07 06:37:35] [Rank 0] step:2181/10000 train_time:146099ms step_avg:66.99ms +[2025-07-07 06:37:36] [Rank 0] step:2201/10000 train_time:147441ms step_avg:66.99ms +[2025-07-07 06:37:36] [Rank 0] step:2201/10000 train_time:147441ms step_avg:66.99ms +[2025-07-07 06:37:38] [Rank 0] step:2221/10000 train_time:148784ms step_avg:66.99ms +[2025-07-07 06:37:38] [Rank 0] step:2221/10000 train_time:148784ms step_avg:66.99ms +[2025-07-07 06:37:39] [Rank 0] step:2241/10000 train_time:150138ms step_avg:67.00ms +[2025-07-07 06:37:39] [Rank 0] step:2241/10000 train_time:150138ms step_avg:67.00ms +[2025-07-07 06:37:40] [Rank 0] step:2261/10000 train_time:151530ms step_avg:67.02ms +[2025-07-07 06:37:40] [Rank 0] step:2261/10000 train_time:151530ms step_avg:67.02ms +[2025-07-07 06:37:42] [Rank 0] step:2281/10000 train_time:152897ms step_avg:67.03ms +[2025-07-07 06:37:42] [Rank 0] step:2281/10000 train_time:152897ms step_avg:67.03ms +[2025-07-07 06:37:43] [Rank 0] step:2301/10000 train_time:154299ms step_avg:67.06ms +[2025-07-07 06:37:43] [Rank 0] step:2301/10000 train_time:154299ms step_avg:67.06ms +[2025-07-07 06:37:45] [Rank 0] step:2321/10000 train_time:155667ms step_avg:67.07ms +[2025-07-07 06:37:45] [Rank 0] step:2321/10000 train_time:155667ms step_avg:67.07ms +[2025-07-07 06:37:46] [Rank 0] step:2341/10000 train_time:157037ms step_avg:67.08ms +[2025-07-07 06:37:46] [Rank 0] step:2341/10000 train_time:157037ms step_avg:67.08ms +[2025-07-07 06:37:47] [Rank 0] step:2361/10000 train_time:158464ms step_avg:67.12ms +[2025-07-07 06:37:47] [Rank 0] step:2361/10000 train_time:158464ms step_avg:67.12ms +[2025-07-07 06:37:49] [Rank 0] step:2381/10000 train_time:159831ms step_avg:67.13ms +[2025-07-07 06:37:49] [Rank 0] step:2381/10000 train_time:159831ms step_avg:67.13ms +[2025-07-07 06:37:50] [Rank 0] step:2401/10000 train_time:161199ms step_avg:67.14ms +[2025-07-07 06:37:50] [Rank 0] step:2401/10000 train_time:161199ms step_avg:67.14ms +[2025-07-07 06:37:51] [Rank 0] step:2421/10000 train_time:162566ms step_avg:67.15ms +[2025-07-07 06:37:51] [Rank 0] step:2421/10000 train_time:162566ms step_avg:67.15ms +[2025-07-07 06:37:53] [Rank 0] step:2441/10000 train_time:163935ms step_avg:67.16ms +[2025-07-07 06:37:53] [Rank 0] step:2441/10000 train_time:163935ms step_avg:67.16ms +[2025-07-07 06:37:54] [Rank 0] step:2461/10000 train_time:165304ms step_avg:67.17ms +[2025-07-07 06:37:54] [Rank 0] step:2461/10000 train_time:165304ms step_avg:67.17ms +[2025-07-07 06:37:56] [Rank 0] step:2481/10000 train_time:166672ms step_avg:67.18ms +[2025-07-07 06:37:56] [Rank 0] step:2481/10000 train_time:166672ms step_avg:67.18ms +[2025-07-07 06:37:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:37:57] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:37:58] [Rank 0] PRINT: step:2500/10000 train_loss:1.3119 val_loss:1.2876 train_time:168665ms step_avg:67.47ms +[2025-07-07 06:37:58] [Rank 0] PRINT: step:2500/10000 train_loss:1.3119 val_loss:1.2876 train_time:168665ms step_avg:67.47ms +[2025-07-07 06:37:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:37:58] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:37:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:37:58] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:37:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:37:58] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:43:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:43:17] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:43:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:43:17] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:43:17] [Rank 0] Total Loss: 5.9551 +[2025-07-07 06:43:17] [Rank 0] Total Loss: 5.9551 +[2025-07-07 06:43:17] [Rank 0] Total FTA: 0.0983 +[2025-07-07 06:43:17] [Rank 0] Total FTA: 0.0983 +[2025-07-07 06:43:17] [Rank 0] Group 0 Loss: 6.1260 +[2025-07-07 06:43:17] [Rank 0] Group 0 Loss: 6.1260 +[2025-07-07 06:43:17] [Rank 0] Group 1 Loss: 5.6295 +[2025-07-07 06:43:17] [Rank 0] Group 1 Loss: 5.6295 +[2025-07-07 06:43:17] [Rank 0] Group 2 Loss: 5.9046 +[2025-07-07 06:43:17] [Rank 0] Group 2 Loss: 5.9046 +[2025-07-07 06:43:17] [Rank 0] Group 3 Loss: 5.9966 +[2025-07-07 06:43:17] [Rank 0] Group 3 Loss: 5.9966 +[2025-07-07 06:43:17] [Rank 0] Group 4 Loss: 5.9110 +[2025-07-07 06:43:17] [Rank 0] Group 4 Loss: 5.9110 +[2025-07-07 06:43:17] [Rank 0] Group 5 Loss: 5.9523 +[2025-07-07 06:43:17] [Rank 0] Group 5 Loss: 5.9523 +[2025-07-07 06:43:17] [Rank 0] Group 6 Loss: 5.8970 +[2025-07-07 06:43:17] [Rank 0] Group 6 Loss: 5.8970 +[2025-07-07 06:43:18] [Rank 0] Group 7 Loss: 5.9701 +[2025-07-07 06:43:18] [Rank 0] Group 7 Loss: 5.9701 +[2025-07-07 06:43:18] [Rank 0] Group 8 Loss: 5.9446 +[2025-07-07 06:43:18] [Rank 0] Group 8 Loss: 5.9446 +[2025-07-07 06:43:18] [Rank 0] Group 9 Loss: 5.9371 +[2025-07-07 06:43:18] [Rank 0] Group 9 Loss: 5.9371 +[2025-07-07 06:43:18] [Rank 0] Group 10 Loss: 5.9615 +[2025-07-07 06:43:18] [Rank 0] Group 10 Loss: 5.9615 +[2025-07-07 06:43:18] [Rank 0] Group 11 Loss: 5.9913 +[2025-07-07 06:43:18] [Rank 0] Group 11 Loss: 5.9913 +[2025-07-07 06:43:18] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 06:43:18] [Rank 0] Group 0 FTA: 0.1469 +[2025-07-07 06:43:18] [Rank 0] Group 1 FTA: 0.1745 +[2025-07-07 06:43:18] [Rank 0] Group 1 FTA: 0.1745 +[2025-07-07 06:43:18] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 06:43:18] [Rank 0] Group 2 FTA: 0.0859 +[2025-07-07 06:43:18] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 06:43:18] [Rank 0] Group 3 FTA: 0.0495 +[2025-07-07 06:43:18] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 06:43:18] [Rank 0] Group 4 FTA: 0.0391 +[2025-07-07 06:43:18] [Rank 0] Group 5 FTA: 0.1120 +[2025-07-07 06:43:18] [Rank 0] Group 5 FTA: 0.1120 +[2025-07-07 06:43:18] [Rank 0] Group 6 FTA: 0.0651 +[2025-07-07 06:43:18] [Rank 0] Group 6 FTA: 0.0651 +[2025-07-07 06:43:18] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-07 06:43:18] [Rank 0] Group 7 FTA: 0.0911 +[2025-07-07 06:43:18] [Rank 0] Group 8 FTA: 0.0651 +[2025-07-07 06:43:18] [Rank 0] Group 8 FTA: 0.0651 +[2025-07-07 06:43:18] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 06:43:18] [Rank 0] Group 9 FTA: 0.0820 +[2025-07-07 06:43:18] [Rank 0] Group 10 FTA: 0.1074 +[2025-07-07 06:43:18] [Rank 0] Group 10 FTA: 0.1074 +[2025-07-07 06:43:18] [Rank 0] Group 11 FTA: 0.1006 +[2025-07-07 06:43:18] [Rank 0] Group 11 FTA: 0.1006 +[2025-07-07 06:43:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 06:43:18] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 06:43:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 06:43:18] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 06:43:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 06:43:19] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 06:43:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 06:43:19] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 06:43:19] [Rank 0] step:2501/10000 train_time:168675ms step_avg:67.44ms +[2025-07-07 06:43:19] [Rank 0] step:2501/10000 train_time:168675ms step_avg:67.44ms +[2025-07-07 06:43:20] [Rank 0] step:2521/10000 train_time:169682ms step_avg:67.31ms +[2025-07-07 06:43:20] [Rank 0] step:2521/10000 train_time:169682ms step_avg:67.31ms +[2025-07-07 06:43:22] [Rank 0] step:2541/10000 train_time:170839ms step_avg:67.23ms +[2025-07-07 06:43:22] [Rank 0] step:2541/10000 train_time:170839ms step_avg:67.23ms +[2025-07-07 06:43:23] [Rank 0] step:2561/10000 train_time:172201ms step_avg:67.24ms +[2025-07-07 06:43:23] [Rank 0] step:2561/10000 train_time:172201ms step_avg:67.24ms +[2025-07-07 06:43:25] [Rank 0] step:2581/10000 train_time:173564ms step_avg:67.25ms +[2025-07-07 06:43:25] [Rank 0] step:2581/10000 train_time:173564ms step_avg:67.25ms +[2025-07-07 06:43:26] [Rank 0] step:2601/10000 train_time:174927ms step_avg:67.25ms +[2025-07-07 06:43:26] [Rank 0] step:2601/10000 train_time:174927ms step_avg:67.25ms +[2025-07-07 06:43:27] [Rank 0] step:2621/10000 train_time:176291ms step_avg:67.26ms +[2025-07-07 06:43:27] [Rank 0] step:2621/10000 train_time:176291ms step_avg:67.26ms +[2025-07-07 06:43:29] [Rank 0] step:2641/10000 train_time:177656ms step_avg:67.27ms +[2025-07-07 06:43:29] [Rank 0] step:2641/10000 train_time:177656ms step_avg:67.27ms +[2025-07-07 06:43:30] [Rank 0] step:2661/10000 train_time:179021ms step_avg:67.28ms +[2025-07-07 06:43:30] [Rank 0] step:2661/10000 train_time:179021ms step_avg:67.28ms +[2025-07-07 06:43:31] [Rank 0] step:2681/10000 train_time:180386ms step_avg:67.28ms +[2025-07-07 06:43:31] [Rank 0] step:2681/10000 train_time:180386ms step_avg:67.28ms +[2025-07-07 06:43:33] [Rank 0] step:2701/10000 train_time:182422ms step_avg:67.54ms +[2025-07-07 06:43:33] [Rank 0] step:2701/10000 train_time:182422ms step_avg:67.54ms +[2025-07-07 06:43:34] [Rank 0] step:2721/10000 train_time:183159ms step_avg:67.31ms +[2025-07-07 06:43:34] [Rank 0] step:2721/10000 train_time:183159ms step_avg:67.31ms +[2025-07-07 06:43:35] [Rank 0] step:2741/10000 train_time:184527ms step_avg:67.32ms +[2025-07-07 06:43:35] [Rank 0] step:2741/10000 train_time:184527ms step_avg:67.32ms +[2025-07-07 06:43:37] [Rank 0] step:2761/10000 train_time:185893ms step_avg:67.33ms +[2025-07-07 06:43:37] [Rank 0] step:2761/10000 train_time:185893ms step_avg:67.33ms +[2025-07-07 06:43:38] [Rank 0] step:2781/10000 train_time:187264ms step_avg:67.34ms +[2025-07-07 06:43:38] [Rank 0] step:2781/10000 train_time:187264ms step_avg:67.34ms +[2025-07-07 06:43:40] [Rank 0] step:2801/10000 train_time:188632ms step_avg:67.34ms +[2025-07-07 06:43:40] [Rank 0] step:2801/10000 train_time:188632ms step_avg:67.34ms +[2025-07-07 06:43:41] [Rank 0] step:2821/10000 train_time:190002ms step_avg:67.35ms +[2025-07-07 06:43:41] [Rank 0] step:2821/10000 train_time:190002ms step_avg:67.35ms +[2025-07-07 06:43:42] [Rank 0] step:2841/10000 train_time:191370ms step_avg:67.36ms +[2025-07-07 06:43:42] [Rank 0] step:2841/10000 train_time:191370ms step_avg:67.36ms +[2025-07-07 06:43:44] [Rank 0] step:2861/10000 train_time:192740ms step_avg:67.37ms +[2025-07-07 06:43:44] [Rank 0] step:2861/10000 train_time:192740ms step_avg:67.37ms +[2025-07-07 06:43:45] [Rank 0] step:2881/10000 train_time:194362ms step_avg:67.46ms +[2025-07-07 06:43:45] [Rank 0] step:2881/10000 train_time:194362ms step_avg:67.46ms +[2025-07-07 06:43:46] [Rank 0] step:2901/10000 train_time:195514ms step_avg:67.40ms +[2025-07-07 06:43:46] [Rank 0] step:2901/10000 train_time:195514ms step_avg:67.40ms +[2025-07-07 06:43:48] [Rank 0] step:2921/10000 train_time:196883ms step_avg:67.40ms +[2025-07-07 06:43:48] [Rank 0] step:2921/10000 train_time:196883ms step_avg:67.40ms +[2025-07-07 06:43:49] [Rank 0] step:2941/10000 train_time:198255ms step_avg:67.41ms +[2025-07-07 06:43:49] [Rank 0] step:2941/10000 train_time:198255ms step_avg:67.41ms +[2025-07-07 06:43:51] [Rank 0] step:2961/10000 train_time:199626ms step_avg:67.42ms +[2025-07-07 06:43:51] [Rank 0] step:2961/10000 train_time:199626ms step_avg:67.42ms +[2025-07-07 06:43:52] [Rank 0] step:2981/10000 train_time:200998ms step_avg:67.43ms +[2025-07-07 06:43:52] [Rank 0] step:2981/10000 train_time:200998ms step_avg:67.43ms +[2025-07-07 06:43:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:43:53] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:43:54] [Rank 0] PRINT: step:3000/10000 train_loss:1.2753 val_loss:1.2711 train_time:202993ms step_avg:67.66ms +[2025-07-07 06:43:54] [Rank 0] PRINT: step:3000/10000 train_loss:1.2753 val_loss:1.2711 train_time:202993ms step_avg:67.66ms +[2025-07-07 06:43:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:43:54] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:43:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:43:54] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:43:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:43:54] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:49:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:49:13] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:49:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:49:13] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:49:13] [Rank 0] Total Loss: 5.7703 +[2025-07-07 06:49:13] [Rank 0] Total Loss: 5.7703 +[2025-07-07 06:49:13] [Rank 0] Total FTA: 0.1090 +[2025-07-07 06:49:13] [Rank 0] Total FTA: 0.1090 +[2025-07-07 06:49:13] [Rank 0] Group 0 Loss: 5.9340 +[2025-07-07 06:49:13] [Rank 0] Group 0 Loss: 5.9340 +[2025-07-07 06:49:13] [Rank 0] Group 1 Loss: 5.5484 +[2025-07-07 06:49:13] [Rank 0] Group 1 Loss: 5.5484 +[2025-07-07 06:49:13] [Rank 0] Group 2 Loss: 5.6979 +[2025-07-07 06:49:13] [Rank 0] Group 2 Loss: 5.6979 +[2025-07-07 06:49:13] [Rank 0] Group 3 Loss: 5.9446 +[2025-07-07 06:49:13] [Rank 0] Group 3 Loss: 5.9446 +[2025-07-07 06:49:13] [Rank 0] Group 4 Loss: 5.6241 +[2025-07-07 06:49:13] [Rank 0] Group 4 Loss: 5.6241 +[2025-07-07 06:49:13] [Rank 0] Group 5 Loss: 5.7057 +[2025-07-07 06:49:13] [Rank 0] Group 5 Loss: 5.7057 +[2025-07-07 06:49:13] [Rank 0] Group 6 Loss: 5.7009 +[2025-07-07 06:49:13] [Rank 0] Group 6 Loss: 5.7009 +[2025-07-07 06:49:13] [Rank 0] Group 7 Loss: 5.7933 +[2025-07-07 06:49:13] [Rank 0] Group 7 Loss: 5.7933 +[2025-07-07 06:49:13] [Rank 0] Group 8 Loss: 5.7541 +[2025-07-07 06:49:13] [Rank 0] Group 8 Loss: 5.7541 +[2025-07-07 06:49:13] [Rank 0] Group 9 Loss: 5.8128 +[2025-07-07 06:49:13] [Rank 0] Group 9 Loss: 5.8128 +[2025-07-07 06:49:13] [Rank 0] Group 10 Loss: 5.7553 +[2025-07-07 06:49:13] [Rank 0] Group 10 Loss: 5.7553 +[2025-07-07 06:49:13] [Rank 0] Group 11 Loss: 5.7918 +[2025-07-07 06:49:13] [Rank 0] Group 11 Loss: 5.7918 +[2025-07-07 06:49:13] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 06:49:13] [Rank 0] Group 0 FTA: 0.1651 +[2025-07-07 06:49:13] [Rank 0] Group 1 FTA: 0.1406 +[2025-07-07 06:49:13] [Rank 0] Group 1 FTA: 0.1406 +[2025-07-07 06:49:13] [Rank 0] Group 2 FTA: 0.1589 +[2025-07-07 06:49:13] [Rank 0] Group 2 FTA: 0.1589 +[2025-07-07 06:49:13] [Rank 0] Group 3 FTA: 0.1458 +[2025-07-07 06:49:13] [Rank 0] Group 3 FTA: 0.1458 +[2025-07-07 06:49:13] [Rank 0] Group 4 FTA: 0.0417 +[2025-07-07 06:49:13] [Rank 0] Group 4 FTA: 0.0417 +[2025-07-07 06:49:13] [Rank 0] Group 5 FTA: 0.0625 +[2025-07-07 06:49:13] [Rank 0] Group 5 FTA: 0.0625 +[2025-07-07 06:49:13] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-07 06:49:13] [Rank 0] Group 6 FTA: 0.1042 +[2025-07-07 06:49:13] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-07 06:49:13] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-07 06:49:13] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-07 06:49:13] [Rank 0] Group 8 FTA: 0.0781 +[2025-07-07 06:49:13] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 06:49:13] [Rank 0] Group 9 FTA: 0.0859 +[2025-07-07 06:49:13] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-07 06:49:13] [Rank 0] Group 10 FTA: 0.0918 +[2025-07-07 06:49:13] [Rank 0] Group 11 FTA: 0.0986 +[2025-07-07 06:49:13] [Rank 0] Group 11 FTA: 0.0986 +[2025-07-07 06:49:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 06:49:14] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 06:49:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 06:49:14] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 06:49:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 06:49:15] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 06:49:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 06:49:15] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 06:49:15] [Rank 0] step:3001/10000 train_time:203003ms step_avg:67.64ms +[2025-07-07 06:49:15] [Rank 0] step:3001/10000 train_time:203003ms step_avg:67.64ms +[2025-07-07 06:49:16] [Rank 0] step:3021/10000 train_time:203772ms step_avg:67.45ms +[2025-07-07 06:49:16] [Rank 0] step:3021/10000 train_time:203772ms step_avg:67.45ms +[2025-07-07 06:49:18] [Rank 0] step:3041/10000 train_time:205133ms step_avg:67.46ms +[2025-07-07 06:49:18] [Rank 0] step:3041/10000 train_time:205133ms step_avg:67.46ms +[2025-07-07 06:49:19] [Rank 0] step:3061/10000 train_time:206544ms step_avg:67.48ms +[2025-07-07 06:49:19] [Rank 0] step:3061/10000 train_time:206544ms step_avg:67.48ms +[2025-07-07 06:49:20] [Rank 0] step:3081/10000 train_time:207899ms step_avg:67.48ms +[2025-07-07 06:49:20] [Rank 0] step:3081/10000 train_time:207899ms step_avg:67.48ms +[2025-07-07 06:49:22] [Rank 0] step:3101/10000 train_time:209264ms step_avg:67.48ms +[2025-07-07 06:49:22] [Rank 0] step:3101/10000 train_time:209264ms step_avg:67.48ms +[2025-07-07 06:49:23] [Rank 0] step:3121/10000 train_time:210629ms step_avg:67.49ms +[2025-07-07 06:49:23] [Rank 0] step:3121/10000 train_time:210629ms step_avg:67.49ms +[2025-07-07 06:49:24] [Rank 0] step:3141/10000 train_time:211995ms step_avg:67.49ms +[2025-07-07 06:49:24] [Rank 0] step:3141/10000 train_time:211995ms step_avg:67.49ms +[2025-07-07 06:49:26] [Rank 0] step:3161/10000 train_time:213361ms step_avg:67.50ms +[2025-07-07 06:49:26] [Rank 0] step:3161/10000 train_time:213361ms step_avg:67.50ms +[2025-07-07 06:49:27] [Rank 0] step:3181/10000 train_time:214726ms step_avg:67.50ms +[2025-07-07 06:49:27] [Rank 0] step:3181/10000 train_time:214726ms step_avg:67.50ms +[2025-07-07 06:49:29] [Rank 0] step:3201/10000 train_time:216093ms step_avg:67.51ms +[2025-07-07 06:49:29] [Rank 0] step:3201/10000 train_time:216093ms step_avg:67.51ms +[2025-07-07 06:49:30] [Rank 0] step:3221/10000 train_time:217460ms step_avg:67.51ms +[2025-07-07 06:49:30] [Rank 0] step:3221/10000 train_time:217460ms step_avg:67.51ms +[2025-07-07 06:49:31] [Rank 0] step:3241/10000 train_time:219080ms step_avg:67.60ms +[2025-07-07 06:49:31] [Rank 0] step:3241/10000 train_time:219080ms step_avg:67.60ms +[2025-07-07 06:49:33] [Rank 0] step:3261/10000 train_time:220253ms step_avg:67.54ms +[2025-07-07 06:49:33] [Rank 0] step:3261/10000 train_time:220253ms step_avg:67.54ms +[2025-07-07 06:49:34] [Rank 0] step:3281/10000 train_time:221621ms step_avg:67.55ms +[2025-07-07 06:49:34] [Rank 0] step:3281/10000 train_time:221621ms step_avg:67.55ms +[2025-07-07 06:49:35] [Rank 0] step:3301/10000 train_time:222991ms step_avg:67.55ms +[2025-07-07 06:49:35] [Rank 0] step:3301/10000 train_time:222991ms step_avg:67.55ms +[2025-07-07 06:49:37] [Rank 0] step:3321/10000 train_time:224359ms step_avg:67.56ms +[2025-07-07 06:49:37] [Rank 0] step:3321/10000 train_time:224359ms step_avg:67.56ms +[2025-07-07 06:49:38] [Rank 0] step:3341/10000 train_time:225727ms step_avg:67.56ms +[2025-07-07 06:49:38] [Rank 0] step:3341/10000 train_time:225727ms step_avg:67.56ms +[2025-07-07 06:49:40] [Rank 0] step:3361/10000 train_time:227096ms step_avg:67.57ms +[2025-07-07 06:49:40] [Rank 0] step:3361/10000 train_time:227096ms step_avg:67.57ms +[2025-07-07 06:49:41] [Rank 0] step:3381/10000 train_time:228463ms step_avg:67.57ms +[2025-07-07 06:49:41] [Rank 0] step:3381/10000 train_time:228463ms step_avg:67.57ms +[2025-07-07 06:49:42] [Rank 0] step:3401/10000 train_time:229831ms step_avg:67.58ms +[2025-07-07 06:49:42] [Rank 0] step:3401/10000 train_time:229831ms step_avg:67.58ms +[2025-07-07 06:49:44] [Rank 0] step:3421/10000 train_time:231248ms step_avg:67.60ms +[2025-07-07 06:49:44] [Rank 0] step:3421/10000 train_time:231248ms step_avg:67.60ms +[2025-07-07 06:49:45] [Rank 0] step:3441/10000 train_time:232604ms step_avg:67.60ms +[2025-07-07 06:49:45] [Rank 0] step:3441/10000 train_time:232604ms step_avg:67.60ms +[2025-07-07 06:49:46] [Rank 0] step:3461/10000 train_time:233974ms step_avg:67.60ms +[2025-07-07 06:49:46] [Rank 0] step:3461/10000 train_time:233974ms step_avg:67.60ms +[2025-07-07 06:49:48] [Rank 0] step:3481/10000 train_time:235345ms step_avg:67.61ms +[2025-07-07 06:49:48] [Rank 0] step:3481/10000 train_time:235345ms step_avg:67.61ms +[2025-07-07 06:49:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:49:49] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:49:50] [Rank 0] PRINT: step:3500/10000 train_loss:1.2493 val_loss:1.2517 train_time:237337ms step_avg:67.81ms +[2025-07-07 06:49:50] [Rank 0] PRINT: step:3500/10000 train_loss:1.2493 val_loss:1.2517 train_time:237337ms step_avg:67.81ms +[2025-07-07 06:49:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:49:50] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:49:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:49:50] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:49:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:49:50] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:55:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:55:07] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 06:55:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:55:07] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 06:55:07] [Rank 0] Total Loss: 5.7362 +[2025-07-07 06:55:07] [Rank 0] Total Loss: 5.7362 +[2025-07-07 06:55:07] [Rank 0] Total FTA: 0.1156 +[2025-07-07 06:55:07] [Rank 0] Total FTA: 0.1156 +[2025-07-07 06:55:07] [Rank 0] Group 0 Loss: 5.9135 +[2025-07-07 06:55:07] [Rank 0] Group 0 Loss: 5.9135 +[2025-07-07 06:55:07] [Rank 0] Group 1 Loss: 5.5107 +[2025-07-07 06:55:07] [Rank 0] Group 1 Loss: 5.5107 +[2025-07-07 06:55:07] [Rank 0] Group 2 Loss: 5.7493 +[2025-07-07 06:55:07] [Rank 0] Group 2 Loss: 5.7493 +[2025-07-07 06:55:07] [Rank 0] Group 3 Loss: 5.7304 +[2025-07-07 06:55:07] [Rank 0] Group 3 Loss: 5.7304 +[2025-07-07 06:55:07] [Rank 0] Group 4 Loss: 5.6960 +[2025-07-07 06:55:07] [Rank 0] Group 4 Loss: 5.6960 +[2025-07-07 06:55:07] [Rank 0] Group 5 Loss: 5.7156 +[2025-07-07 06:55:07] [Rank 0] Group 5 Loss: 5.7156 +[2025-07-07 06:55:07] [Rank 0] Group 6 Loss: 5.6765 +[2025-07-07 06:55:07] [Rank 0] Group 6 Loss: 5.6765 +[2025-07-07 06:55:07] [Rank 0] Group 7 Loss: 5.7720 +[2025-07-07 06:55:07] [Rank 0] Group 7 Loss: 5.7720 +[2025-07-07 06:55:07] [Rank 0] Group 8 Loss: 5.7593 +[2025-07-07 06:55:07] [Rank 0] Group 8 Loss: 5.7593 +[2025-07-07 06:55:07] [Rank 0] Group 9 Loss: 5.7230 +[2025-07-07 06:55:07] [Rank 0] Group 9 Loss: 5.7230 +[2025-07-07 06:55:07] [Rank 0] Group 10 Loss: 5.7282 +[2025-07-07 06:55:07] [Rank 0] Group 10 Loss: 5.7282 +[2025-07-07 06:55:07] [Rank 0] Group 11 Loss: 5.7153 +[2025-07-07 06:55:07] [Rank 0] Group 11 Loss: 5.7153 +[2025-07-07 06:55:07] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 06:55:07] [Rank 0] Group 0 FTA: 0.1691 +[2025-07-07 06:55:07] [Rank 0] Group 1 FTA: 0.1849 +[2025-07-07 06:55:07] [Rank 0] Group 1 FTA: 0.1849 +[2025-07-07 06:55:07] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-07 06:55:07] [Rank 0] Group 2 FTA: 0.1667 +[2025-07-07 06:55:07] [Rank 0] Group 3 FTA: 0.0833 +[2025-07-07 06:55:07] [Rank 0] Group 3 FTA: 0.0833 +[2025-07-07 06:55:07] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 06:55:07] [Rank 0] Group 4 FTA: 0.0365 +[2025-07-07 06:55:07] [Rank 0] Group 5 FTA: 0.0833 +[2025-07-07 06:55:07] [Rank 0] Group 5 FTA: 0.0833 +[2025-07-07 06:55:07] [Rank 0] Group 6 FTA: 0.1172 +[2025-07-07 06:55:07] [Rank 0] Group 6 FTA: 0.1172 +[2025-07-07 06:55:07] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-07 06:55:07] [Rank 0] Group 7 FTA: 0.0938 +[2025-07-07 06:55:07] [Rank 0] Group 8 FTA: 0.1250 +[2025-07-07 06:55:07] [Rank 0] Group 8 FTA: 0.1250 +[2025-07-07 06:55:07] [Rank 0] Group 9 FTA: 0.1094 +[2025-07-07 06:55:07] [Rank 0] Group 9 FTA: 0.1094 +[2025-07-07 06:55:07] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-07 06:55:07] [Rank 0] Group 10 FTA: 0.0996 +[2025-07-07 06:55:07] [Rank 0] Group 11 FTA: 0.0977 +[2025-07-07 06:55:07] [Rank 0] Group 11 FTA: 0.0977 +[2025-07-07 06:55:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 06:55:08] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 06:55:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 06:55:08] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 06:55:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 06:55:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 06:55:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 06:55:09] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 06:55:09] [Rank 0] step:3501/10000 train_time:237347ms step_avg:67.79ms +[2025-07-07 06:55:09] [Rank 0] step:3501/10000 train_time:237347ms step_avg:67.79ms +[2025-07-07 06:55:10] [Rank 0] step:3521/10000 train_time:238114ms step_avg:67.63ms +[2025-07-07 06:55:10] [Rank 0] step:3521/10000 train_time:238114ms step_avg:67.63ms +[2025-07-07 06:55:11] [Rank 0] step:3541/10000 train_time:239477ms step_avg:67.63ms +[2025-07-07 06:55:11] [Rank 0] step:3541/10000 train_time:239477ms step_avg:67.63ms +[2025-07-07 06:55:13] [Rank 0] step:3561/10000 train_time:240840ms step_avg:67.63ms +[2025-07-07 06:55:13] [Rank 0] step:3561/10000 train_time:240840ms step_avg:67.63ms +[2025-07-07 06:55:14] [Rank 0] step:3581/10000 train_time:242205ms step_avg:67.64ms +[2025-07-07 06:55:14] [Rank 0] step:3581/10000 train_time:242205ms step_avg:67.64ms +[2025-07-07 06:55:16] [Rank 0] step:3601/10000 train_time:243616ms step_avg:67.65ms +[2025-07-07 06:55:16] [Rank 0] step:3601/10000 train_time:243616ms step_avg:67.65ms +[2025-07-07 06:55:17] [Rank 0] step:3621/10000 train_time:244989ms step_avg:67.66ms +[2025-07-07 06:55:17] [Rank 0] step:3621/10000 train_time:244989ms step_avg:67.66ms +[2025-07-07 06:55:18] [Rank 0] step:3641/10000 train_time:246354ms step_avg:67.66ms +[2025-07-07 06:55:18] [Rank 0] step:3641/10000 train_time:246354ms step_avg:67.66ms +[2025-07-07 06:55:20] [Rank 0] step:3661/10000 train_time:247718ms step_avg:67.66ms +[2025-07-07 06:55:20] [Rank 0] step:3661/10000 train_time:247718ms step_avg:67.66ms +[2025-07-07 06:55:21] [Rank 0] step:3681/10000 train_time:249086ms step_avg:67.67ms +[2025-07-07 06:55:21] [Rank 0] step:3681/10000 train_time:249086ms step_avg:67.67ms +[2025-07-07 06:55:22] [Rank 0] step:3701/10000 train_time:250453ms step_avg:67.67ms +[2025-07-07 06:55:22] [Rank 0] step:3701/10000 train_time:250453ms step_avg:67.67ms +[2025-07-07 06:55:24] [Rank 0] step:3721/10000 train_time:251822ms step_avg:67.68ms +[2025-07-07 06:55:24] [Rank 0] step:3721/10000 train_time:251822ms step_avg:67.68ms +[2025-07-07 06:55:25] [Rank 0] step:3741/10000 train_time:253189ms step_avg:67.68ms +[2025-07-07 06:55:25] [Rank 0] step:3741/10000 train_time:253189ms step_avg:67.68ms +[2025-07-07 06:55:26] [Rank 0] step:3761/10000 train_time:254557ms step_avg:67.68ms +[2025-07-07 06:55:26] [Rank 0] step:3761/10000 train_time:254557ms step_avg:67.68ms +[2025-07-07 06:55:28] [Rank 0] step:3781/10000 train_time:255973ms step_avg:67.70ms +[2025-07-07 06:55:28] [Rank 0] step:3781/10000 train_time:255973ms step_avg:67.70ms +[2025-07-07 06:55:29] [Rank 0] step:3801/10000 train_time:257343ms step_avg:67.70ms +[2025-07-07 06:55:29] [Rank 0] step:3801/10000 train_time:257343ms step_avg:67.70ms +[2025-07-07 06:55:31] [Rank 0] step:3821/10000 train_time:258712ms step_avg:67.71ms +[2025-07-07 06:55:31] [Rank 0] step:3821/10000 train_time:258712ms step_avg:67.71ms +[2025-07-07 06:55:32] [Rank 0] step:3841/10000 train_time:260082ms step_avg:67.71ms +[2025-07-07 06:55:32] [Rank 0] step:3841/10000 train_time:260082ms step_avg:67.71ms +[2025-07-07 06:55:33] [Rank 0] step:3861/10000 train_time:261453ms step_avg:67.72ms +[2025-07-07 06:55:33] [Rank 0] step:3861/10000 train_time:261453ms step_avg:67.72ms +[2025-07-07 06:55:35] [Rank 0] step:3881/10000 train_time:262824ms step_avg:67.72ms +[2025-07-07 06:55:35] [Rank 0] step:3881/10000 train_time:262824ms step_avg:67.72ms +[2025-07-07 06:55:36] [Rank 0] step:3901/10000 train_time:264242ms step_avg:67.74ms +[2025-07-07 06:55:36] [Rank 0] step:3901/10000 train_time:264242ms step_avg:67.74ms +[2025-07-07 06:55:37] [Rank 0] step:3921/10000 train_time:265614ms step_avg:67.74ms +[2025-07-07 06:55:37] [Rank 0] step:3921/10000 train_time:265614ms step_avg:67.74ms +[2025-07-07 06:55:39] [Rank 0] step:3941/10000 train_time:266986ms step_avg:67.75ms +[2025-07-07 06:55:39] [Rank 0] step:3941/10000 train_time:266986ms step_avg:67.75ms +[2025-07-07 06:55:40] [Rank 0] step:3961/10000 train_time:268512ms step_avg:67.79ms +[2025-07-07 06:55:40] [Rank 0] step:3961/10000 train_time:268512ms step_avg:67.79ms +[2025-07-07 06:55:42] [Rank 0] step:3981/10000 train_time:269729ms step_avg:67.75ms +[2025-07-07 06:55:42] [Rank 0] step:3981/10000 train_time:269729ms step_avg:67.75ms +[2025-07-07 06:55:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:55:43] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 06:55:44] [Rank 0] PRINT: step:4000/10000 train_loss:1.2179 val_loss:1.2422 train_time:271724ms step_avg:67.93ms +[2025-07-07 06:55:44] [Rank 0] PRINT: step:4000/10000 train_loss:1.2179 val_loss:1.2422 train_time:271724ms step_avg:67.93ms +[2025-07-07 06:55:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:55:44] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 06:55:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:55:44] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 06:55:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 06:55:44] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:01:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:01:06] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:01:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:01:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:01:06] [Rank 0] Total Loss: 5.5298 +[2025-07-07 07:01:06] [Rank 0] Total Loss: 5.5298 +[2025-07-07 07:01:06] [Rank 0] Total FTA: 0.1392 +[2025-07-07 07:01:06] [Rank 0] Total FTA: 0.1392 +[2025-07-07 07:01:06] [Rank 0] Group 0 Loss: 5.7301 +[2025-07-07 07:01:06] [Rank 0] Group 0 Loss: 5.7301 +[2025-07-07 07:01:06] [Rank 0] Group 1 Loss: 5.2172 +[2025-07-07 07:01:06] [Rank 0] Group 1 Loss: 5.2172 +[2025-07-07 07:01:06] [Rank 0] Group 2 Loss: 5.2919 +[2025-07-07 07:01:06] [Rank 0] Group 2 Loss: 5.2919 +[2025-07-07 07:01:06] [Rank 0] Group 3 Loss: 5.6486 +[2025-07-07 07:01:06] [Rank 0] Group 3 Loss: 5.6486 +[2025-07-07 07:01:07] [Rank 0] Group 4 Loss: 5.4362 +[2025-07-07 07:01:07] [Rank 0] Group 4 Loss: 5.4362 +[2025-07-07 07:01:07] [Rank 0] Group 5 Loss: 5.4836 +[2025-07-07 07:01:07] [Rank 0] Group 5 Loss: 5.4836 +[2025-07-07 07:01:07] [Rank 0] Group 6 Loss: 5.4890 +[2025-07-07 07:01:07] [Rank 0] Group 6 Loss: 5.4890 +[2025-07-07 07:01:07] [Rank 0] Group 7 Loss: 5.5991 +[2025-07-07 07:01:07] [Rank 0] Group 7 Loss: 5.5991 +[2025-07-07 07:01:07] [Rank 0] Group 8 Loss: 5.5345 +[2025-07-07 07:01:07] [Rank 0] Group 8 Loss: 5.5345 +[2025-07-07 07:01:07] [Rank 0] Group 9 Loss: 5.5915 +[2025-07-07 07:01:07] [Rank 0] Group 9 Loss: 5.5915 +[2025-07-07 07:01:07] [Rank 0] Group 10 Loss: 5.5614 +[2025-07-07 07:01:07] [Rank 0] Group 10 Loss: 5.5614 +[2025-07-07 07:01:07] [Rank 0] Group 11 Loss: 5.5499 +[2025-07-07 07:01:07] [Rank 0] Group 11 Loss: 5.5499 +[2025-07-07 07:01:07] [Rank 0] Group 0 FTA: 0.1508 +[2025-07-07 07:01:07] [Rank 0] Group 0 FTA: 0.1508 +[2025-07-07 07:01:07] [Rank 0] Group 1 FTA: 0.3385 +[2025-07-07 07:01:07] [Rank 0] Group 1 FTA: 0.3385 +[2025-07-07 07:01:07] [Rank 0] Group 2 FTA: 0.1562 +[2025-07-07 07:01:07] [Rank 0] Group 2 FTA: 0.1562 +[2025-07-07 07:01:07] [Rank 0] Group 3 FTA: 0.0859 +[2025-07-07 07:01:07] [Rank 0] Group 3 FTA: 0.0859 +[2025-07-07 07:01:07] [Rank 0] Group 4 FTA: 0.0599 +[2025-07-07 07:01:07] [Rank 0] Group 4 FTA: 0.0599 +[2025-07-07 07:01:07] [Rank 0] Group 5 FTA: 0.1224 +[2025-07-07 07:01:07] [Rank 0] Group 5 FTA: 0.1224 +[2025-07-07 07:01:07] [Rank 0] Group 6 FTA: 0.1693 +[2025-07-07 07:01:07] [Rank 0] Group 6 FTA: 0.1693 +[2025-07-07 07:01:07] [Rank 0] Group 7 FTA: 0.1432 +[2025-07-07 07:01:07] [Rank 0] Group 7 FTA: 0.1432 +[2025-07-07 07:01:07] [Rank 0] Group 8 FTA: 0.1302 +[2025-07-07 07:01:07] [Rank 0] Group 8 FTA: 0.1302 +[2025-07-07 07:01:07] [Rank 0] Group 9 FTA: 0.1094 +[2025-07-07 07:01:07] [Rank 0] Group 9 FTA: 0.1094 +[2025-07-07 07:01:07] [Rank 0] Group 10 FTA: 0.1152 +[2025-07-07 07:01:07] [Rank 0] Group 10 FTA: 0.1152 +[2025-07-07 07:01:07] [Rank 0] Group 11 FTA: 0.1152 +[2025-07-07 07:01:07] [Rank 0] Group 11 FTA: 0.1152 +[2025-07-07 07:01:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 07:01:07] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 07:01:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 07:01:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 07:01:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 07:01:08] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 07:01:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 07:01:08] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 07:01:08] [Rank 0] step:4001/10000 train_time:271733ms step_avg:67.92ms +[2025-07-07 07:01:08] [Rank 0] step:4001/10000 train_time:271733ms step_avg:67.92ms +[2025-07-07 07:01:09] [Rank 0] step:4021/10000 train_time:272493ms step_avg:67.77ms +[2025-07-07 07:01:09] [Rank 0] step:4021/10000 train_time:272493ms step_avg:67.77ms +[2025-07-07 07:01:11] [Rank 0] step:4041/10000 train_time:273856ms step_avg:67.77ms +[2025-07-07 07:01:11] [Rank 0] step:4041/10000 train_time:273856ms step_avg:67.77ms +[2025-07-07 07:01:12] [Rank 0] step:4061/10000 train_time:275220ms step_avg:67.77ms +[2025-07-07 07:01:12] [Rank 0] step:4061/10000 train_time:275220ms step_avg:67.77ms +[2025-07-07 07:01:13] [Rank 0] step:4081/10000 train_time:276582ms step_avg:67.77ms +[2025-07-07 07:01:13] [Rank 0] step:4081/10000 train_time:276582ms step_avg:67.77ms +[2025-07-07 07:01:15] [Rank 0] step:4101/10000 train_time:277947ms step_avg:67.78ms +[2025-07-07 07:01:15] [Rank 0] step:4101/10000 train_time:277947ms step_avg:67.78ms +[2025-07-07 07:01:16] [Rank 0] step:4121/10000 train_time:279312ms step_avg:67.78ms +[2025-07-07 07:01:16] [Rank 0] step:4121/10000 train_time:279312ms step_avg:67.78ms +[2025-07-07 07:01:18] [Rank 0] step:4141/10000 train_time:280676ms step_avg:67.78ms +[2025-07-07 07:01:18] [Rank 0] step:4141/10000 train_time:280676ms step_avg:67.78ms +[2025-07-07 07:01:19] [Rank 0] step:4161/10000 train_time:282043ms step_avg:67.78ms +[2025-07-07 07:01:19] [Rank 0] step:4161/10000 train_time:282043ms step_avg:67.78ms +[2025-07-07 07:01:20] [Rank 0] step:4181/10000 train_time:283409ms step_avg:67.78ms +[2025-07-07 07:01:20] [Rank 0] step:4181/10000 train_time:283409ms step_avg:67.78ms +[2025-07-07 07:01:22] [Rank 0] step:4201/10000 train_time:284776ms step_avg:67.79ms +[2025-07-07 07:01:22] [Rank 0] step:4201/10000 train_time:284776ms step_avg:67.79ms +[2025-07-07 07:01:23] [Rank 0] step:4221/10000 train_time:286142ms step_avg:67.79ms +[2025-07-07 07:01:23] [Rank 0] step:4221/10000 train_time:286142ms step_avg:67.79ms +[2025-07-07 07:01:24] [Rank 0] step:4241/10000 train_time:287510ms step_avg:67.79ms +[2025-07-07 07:01:24] [Rank 0] step:4241/10000 train_time:287510ms step_avg:67.79ms +[2025-07-07 07:01:26] [Rank 0] step:4261/10000 train_time:288878ms step_avg:67.80ms +[2025-07-07 07:01:26] [Rank 0] step:4261/10000 train_time:288878ms step_avg:67.80ms +[2025-07-07 07:01:27] [Rank 0] step:4281/10000 train_time:290249ms step_avg:67.80ms +[2025-07-07 07:01:27] [Rank 0] step:4281/10000 train_time:290249ms step_avg:67.80ms +[2025-07-07 07:01:29] [Rank 0] step:4301/10000 train_time:291618ms step_avg:67.80ms +[2025-07-07 07:01:29] [Rank 0] step:4301/10000 train_time:291618ms step_avg:67.80ms +[2025-07-07 07:01:30] [Rank 0] step:4321/10000 train_time:292987ms step_avg:67.81ms +[2025-07-07 07:01:30] [Rank 0] step:4321/10000 train_time:292987ms step_avg:67.81ms +[2025-07-07 07:01:31] [Rank 0] step:4341/10000 train_time:294389ms step_avg:67.82ms +[2025-07-07 07:01:31] [Rank 0] step:4341/10000 train_time:294389ms step_avg:67.82ms +[2025-07-07 07:01:33] [Rank 0] step:4361/10000 train_time:295759ms step_avg:67.82ms +[2025-07-07 07:01:33] [Rank 0] step:4361/10000 train_time:295759ms step_avg:67.82ms +[2025-07-07 07:01:34] [Rank 0] step:4381/10000 train_time:297131ms step_avg:67.82ms +[2025-07-07 07:01:34] [Rank 0] step:4381/10000 train_time:297131ms step_avg:67.82ms +[2025-07-07 07:01:35] [Rank 0] step:4401/10000 train_time:298501ms step_avg:67.83ms +[2025-07-07 07:01:35] [Rank 0] step:4401/10000 train_time:298501ms step_avg:67.83ms +[2025-07-07 07:01:37] [Rank 0] step:4421/10000 train_time:299872ms step_avg:67.83ms +[2025-07-07 07:01:37] [Rank 0] step:4421/10000 train_time:299872ms step_avg:67.83ms +[2025-07-07 07:01:38] [Rank 0] step:4441/10000 train_time:301243ms step_avg:67.83ms +[2025-07-07 07:01:38] [Rank 0] step:4441/10000 train_time:301243ms step_avg:67.83ms +[2025-07-07 07:01:40] [Rank 0] step:4461/10000 train_time:302611ms step_avg:67.83ms +[2025-07-07 07:01:40] [Rank 0] step:4461/10000 train_time:302611ms step_avg:67.83ms +[2025-07-07 07:01:41] [Rank 0] step:4481/10000 train_time:303980ms step_avg:67.84ms +[2025-07-07 07:01:41] [Rank 0] step:4481/10000 train_time:303980ms step_avg:67.84ms +[2025-07-07 07:01:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:01:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:01:43] [Rank 0] PRINT: step:4500/10000 train_loss:1.1775 val_loss:1.2109 train_time:305972ms step_avg:67.99ms +[2025-07-07 07:01:43] [Rank 0] PRINT: step:4500/10000 train_loss:1.1775 val_loss:1.2109 train_time:305972ms step_avg:67.99ms +[2025-07-07 07:01:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:01:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:01:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:01:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:01:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:01:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:07:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:07:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:07:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:07:06] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:07:06] [Rank 0] Total Loss: 5.6541 +[2025-07-07 07:07:06] [Rank 0] Total Loss: 5.6541 +[2025-07-07 07:07:06] [Rank 0] Total FTA: 0.1871 +[2025-07-07 07:07:06] [Rank 0] Total FTA: 0.1871 +[2025-07-07 07:07:06] [Rank 0] Group 0 Loss: 5.9467 +[2025-07-07 07:07:06] [Rank 0] Group 0 Loss: 5.9467 +[2025-07-07 07:07:06] [Rank 0] Group 1 Loss: 5.5077 +[2025-07-07 07:07:06] [Rank 0] Group 1 Loss: 5.5077 +[2025-07-07 07:07:06] [Rank 0] Group 2 Loss: 5.3816 +[2025-07-07 07:07:06] [Rank 0] Group 2 Loss: 5.3816 +[2025-07-07 07:07:06] [Rank 0] Group 3 Loss: 5.8485 +[2025-07-07 07:07:06] [Rank 0] Group 3 Loss: 5.8485 +[2025-07-07 07:07:06] [Rank 0] Group 4 Loss: 5.5393 +[2025-07-07 07:07:06] [Rank 0] Group 4 Loss: 5.5393 +[2025-07-07 07:07:06] [Rank 0] Group 5 Loss: 5.4996 +[2025-07-07 07:07:06] [Rank 0] Group 5 Loss: 5.4996 +[2025-07-07 07:07:06] [Rank 0] Group 6 Loss: 5.6266 +[2025-07-07 07:07:06] [Rank 0] Group 6 Loss: 5.6266 +[2025-07-07 07:07:06] [Rank 0] Group 7 Loss: 5.6603 +[2025-07-07 07:07:06] [Rank 0] Group 7 Loss: 5.6603 +[2025-07-07 07:07:06] [Rank 0] Group 8 Loss: 5.6823 +[2025-07-07 07:07:06] [Rank 0] Group 8 Loss: 5.6823 +[2025-07-07 07:07:06] [Rank 0] Group 9 Loss: 5.6276 +[2025-07-07 07:07:06] [Rank 0] Group 9 Loss: 5.6276 +[2025-07-07 07:07:06] [Rank 0] Group 10 Loss: 5.6301 +[2025-07-07 07:07:06] [Rank 0] Group 10 Loss: 5.6301 +[2025-07-07 07:07:06] [Rank 0] Group 11 Loss: 5.6353 +[2025-07-07 07:07:06] [Rank 0] Group 11 Loss: 5.6353 +[2025-07-07 07:07:06] [Rank 0] Group 0 FTA: 0.4837 +[2025-07-07 07:07:06] [Rank 0] Group 0 FTA: 0.4837 +[2025-07-07 07:07:06] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-07 07:07:06] [Rank 0] Group 1 FTA: 0.1562 +[2025-07-07 07:07:06] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 07:07:06] [Rank 0] Group 2 FTA: 0.0000 +[2025-07-07 07:07:06] [Rank 0] Group 3 FTA: 0.0938 +[2025-07-07 07:07:06] [Rank 0] Group 3 FTA: 0.0938 +[2025-07-07 07:07:06] [Rank 0] Group 4 FTA: 0.1094 +[2025-07-07 07:07:06] [Rank 0] Group 4 FTA: 0.1094 +[2025-07-07 07:07:06] [Rank 0] Group 5 FTA: 0.1510 +[2025-07-07 07:07:06] [Rank 0] Group 5 FTA: 0.1510 +[2025-07-07 07:07:06] [Rank 0] Group 6 FTA: 0.2240 +[2025-07-07 07:07:06] [Rank 0] Group 6 FTA: 0.2240 +[2025-07-07 07:07:06] [Rank 0] Group 7 FTA: 0.1406 +[2025-07-07 07:07:06] [Rank 0] Group 7 FTA: 0.1406 +[2025-07-07 07:07:06] [Rank 0] Group 8 FTA: 0.1432 +[2025-07-07 07:07:06] [Rank 0] Group 8 FTA: 0.1432 +[2025-07-07 07:07:06] [Rank 0] Group 9 FTA: 0.1680 +[2025-07-07 07:07:06] [Rank 0] Group 9 FTA: 0.1680 +[2025-07-07 07:07:06] [Rank 0] Group 10 FTA: 0.1602 +[2025-07-07 07:07:06] [Rank 0] Group 10 FTA: 0.1602 +[2025-07-07 07:07:06] [Rank 0] Group 11 FTA: 0.1621 +[2025-07-07 07:07:06] [Rank 0] Group 11 FTA: 0.1621 +[2025-07-07 07:07:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 07:07:06] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 07:07:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 07:07:07] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 07:07:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 07:07:07] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 07:07:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 07:07:07] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 07:07:08] [Rank 0] step:4501/10000 train_time:305988ms step_avg:67.98ms +[2025-07-07 07:07:08] [Rank 0] step:4501/10000 train_time:305988ms step_avg:67.98ms +[2025-07-07 07:07:09] [Rank 0] step:4521/10000 train_time:307453ms step_avg:68.01ms +[2025-07-07 07:07:09] [Rank 0] step:4521/10000 train_time:307453ms step_avg:68.01ms +[2025-07-07 07:07:11] [Rank 0] step:4541/10000 train_time:309082ms step_avg:68.06ms +[2025-07-07 07:07:11] [Rank 0] step:4541/10000 train_time:309082ms step_avg:68.06ms +[2025-07-07 07:07:12] [Rank 0] step:4561/10000 train_time:310176ms step_avg:68.01ms +[2025-07-07 07:07:12] [Rank 0] step:4561/10000 train_time:310176ms step_avg:68.01ms +[2025-07-07 07:07:13] [Rank 0] step:4581/10000 train_time:311541ms step_avg:68.01ms +[2025-07-07 07:07:13] [Rank 0] step:4581/10000 train_time:311541ms step_avg:68.01ms +[2025-07-07 07:07:15] [Rank 0] step:4601/10000 train_time:312904ms step_avg:68.01ms +[2025-07-07 07:07:15] [Rank 0] step:4601/10000 train_time:312904ms step_avg:68.01ms +[2025-07-07 07:07:16] [Rank 0] step:4621/10000 train_time:314269ms step_avg:68.01ms +[2025-07-07 07:07:16] [Rank 0] step:4621/10000 train_time:314269ms step_avg:68.01ms +[2025-07-07 07:07:17] [Rank 0] step:4641/10000 train_time:315634ms step_avg:68.01ms +[2025-07-07 07:07:17] [Rank 0] step:4641/10000 train_time:315634ms step_avg:68.01ms +[2025-07-07 07:07:19] [Rank 0] step:4661/10000 train_time:317000ms step_avg:68.01ms +[2025-07-07 07:07:19] [Rank 0] step:4661/10000 train_time:317000ms step_avg:68.01ms +[2025-07-07 07:07:20] [Rank 0] step:4681/10000 train_time:318618ms step_avg:68.07ms +[2025-07-07 07:07:20] [Rank 0] step:4681/10000 train_time:318618ms step_avg:68.07ms +[2025-07-07 07:07:22] [Rank 0] step:4701/10000 train_time:319732ms step_avg:68.01ms +[2025-07-07 07:07:22] [Rank 0] step:4701/10000 train_time:319732ms step_avg:68.01ms +[2025-07-07 07:07:23] [Rank 0] step:4721/10000 train_time:321101ms step_avg:68.02ms +[2025-07-07 07:07:23] [Rank 0] step:4721/10000 train_time:321101ms step_avg:68.02ms +[2025-07-07 07:07:24] [Rank 0] step:4741/10000 train_time:322469ms step_avg:68.02ms +[2025-07-07 07:07:24] [Rank 0] step:4741/10000 train_time:322469ms step_avg:68.02ms +[2025-07-07 07:07:26] [Rank 0] step:4761/10000 train_time:323838ms step_avg:68.02ms +[2025-07-07 07:07:26] [Rank 0] step:4761/10000 train_time:323838ms step_avg:68.02ms +[2025-07-07 07:07:27] [Rank 0] step:4781/10000 train_time:325206ms step_avg:68.02ms +[2025-07-07 07:07:27] [Rank 0] step:4781/10000 train_time:325206ms step_avg:68.02ms +[2025-07-07 07:07:28] [Rank 0] step:4801/10000 train_time:326574ms step_avg:68.02ms +[2025-07-07 07:07:28] [Rank 0] step:4801/10000 train_time:326574ms step_avg:68.02ms +[2025-07-07 07:07:30] [Rank 0] step:4821/10000 train_time:327943ms step_avg:68.02ms +[2025-07-07 07:07:30] [Rank 0] step:4821/10000 train_time:327943ms step_avg:68.02ms +[2025-07-07 07:07:31] [Rank 0] step:4841/10000 train_time:329312ms step_avg:68.03ms +[2025-07-07 07:07:31] [Rank 0] step:4841/10000 train_time:329312ms step_avg:68.03ms +[2025-07-07 07:07:33] [Rank 0] step:4861/10000 train_time:330727ms step_avg:68.04ms +[2025-07-07 07:07:33] [Rank 0] step:4861/10000 train_time:330727ms step_avg:68.04ms +[2025-07-07 07:07:34] [Rank 0] step:4881/10000 train_time:332098ms step_avg:68.04ms +[2025-07-07 07:07:34] [Rank 0] step:4881/10000 train_time:332098ms step_avg:68.04ms +[2025-07-07 07:07:35] [Rank 0] step:4901/10000 train_time:333468ms step_avg:68.04ms +[2025-07-07 07:07:35] [Rank 0] step:4901/10000 train_time:333468ms step_avg:68.04ms +[2025-07-07 07:07:37] [Rank 0] step:4921/10000 train_time:334840ms step_avg:68.04ms +[2025-07-07 07:07:37] [Rank 0] step:4921/10000 train_time:334840ms step_avg:68.04ms +[2025-07-07 07:07:38] [Rank 0] step:4941/10000 train_time:336214ms step_avg:68.05ms +[2025-07-07 07:07:38] [Rank 0] step:4941/10000 train_time:336214ms step_avg:68.05ms +[2025-07-07 07:07:39] [Rank 0] step:4961/10000 train_time:337587ms step_avg:68.05ms +[2025-07-07 07:07:39] [Rank 0] step:4961/10000 train_time:337587ms step_avg:68.05ms +[2025-07-07 07:07:41] [Rank 0] step:4981/10000 train_time:338962ms step_avg:68.05ms +[2025-07-07 07:07:41] [Rank 0] step:4981/10000 train_time:338962ms step_avg:68.05ms +[2025-07-07 07:07:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:07:42] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:07:43] [Rank 0] PRINT: step:5000/10000 train_loss:1.1250 val_loss:1.1960 train_time:340959ms step_avg:68.19ms +[2025-07-07 07:07:43] [Rank 0] PRINT: step:5000/10000 train_loss:1.1250 val_loss:1.1960 train_time:340959ms step_avg:68.19ms +[2025-07-07 07:07:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:07:43] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:07:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:07:43] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:07:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:07:43] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:13:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:13:05] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:13:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:13:05] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:13:05] [Rank 0] Total Loss: 5.5841 +[2025-07-07 07:13:05] [Rank 0] Total Loss: 5.5841 +[2025-07-07 07:13:05] [Rank 0] Total FTA: 0.1915 +[2025-07-07 07:13:05] [Rank 0] Total FTA: 0.1915 +[2025-07-07 07:13:05] [Rank 0] Group 0 Loss: 5.7974 +[2025-07-07 07:13:05] [Rank 0] Group 0 Loss: 5.7974 +[2025-07-07 07:13:05] [Rank 0] Group 1 Loss: 5.4262 +[2025-07-07 07:13:05] [Rank 0] Group 1 Loss: 5.4262 +[2025-07-07 07:13:05] [Rank 0] Group 2 Loss: 5.3218 +[2025-07-07 07:13:05] [Rank 0] Group 2 Loss: 5.3218 +[2025-07-07 07:13:05] [Rank 0] Group 3 Loss: 5.6879 +[2025-07-07 07:13:05] [Rank 0] Group 3 Loss: 5.6879 +[2025-07-07 07:13:05] [Rank 0] Group 4 Loss: 5.6159 +[2025-07-07 07:13:05] [Rank 0] Group 4 Loss: 5.6159 +[2025-07-07 07:13:05] [Rank 0] Group 5 Loss: 5.5097 +[2025-07-07 07:13:05] [Rank 0] Group 5 Loss: 5.5097 +[2025-07-07 07:13:05] [Rank 0] Group 6 Loss: 5.5278 +[2025-07-07 07:13:05] [Rank 0] Group 6 Loss: 5.5278 +[2025-07-07 07:13:05] [Rank 0] Group 7 Loss: 5.5570 +[2025-07-07 07:13:05] [Rank 0] Group 7 Loss: 5.5570 +[2025-07-07 07:13:05] [Rank 0] Group 8 Loss: 5.5318 +[2025-07-07 07:13:05] [Rank 0] Group 8 Loss: 5.5318 +[2025-07-07 07:13:05] [Rank 0] Group 9 Loss: 5.5961 +[2025-07-07 07:13:05] [Rank 0] Group 9 Loss: 5.5961 +[2025-07-07 07:13:05] [Rank 0] Group 10 Loss: 5.6046 +[2025-07-07 07:13:05] [Rank 0] Group 10 Loss: 5.6046 +[2025-07-07 07:13:05] [Rank 0] Group 11 Loss: 5.5961 +[2025-07-07 07:13:05] [Rank 0] Group 11 Loss: 5.5961 +[2025-07-07 07:13:05] [Rank 0] Group 0 FTA: 0.1717 +[2025-07-07 07:13:05] [Rank 0] Group 0 FTA: 0.1717 +[2025-07-07 07:13:05] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 07:13:05] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 07:13:05] [Rank 0] Group 2 FTA: 0.2943 +[2025-07-07 07:13:05] [Rank 0] Group 2 FTA: 0.2943 +[2025-07-07 07:13:05] [Rank 0] Group 3 FTA: 0.1823 +[2025-07-07 07:13:05] [Rank 0] Group 3 FTA: 0.1823 +[2025-07-07 07:13:05] [Rank 0] Group 4 FTA: 0.0911 +[2025-07-07 07:13:05] [Rank 0] Group 4 FTA: 0.0911 +[2025-07-07 07:13:05] [Rank 0] Group 5 FTA: 0.2526 +[2025-07-07 07:13:05] [Rank 0] Group 5 FTA: 0.2526 +[2025-07-07 07:13:05] [Rank 0] Group 6 FTA: 0.2500 +[2025-07-07 07:13:05] [Rank 0] Group 6 FTA: 0.2500 +[2025-07-07 07:13:05] [Rank 0] Group 7 FTA: 0.2083 +[2025-07-07 07:13:05] [Rank 0] Group 7 FTA: 0.2083 +[2025-07-07 07:13:05] [Rank 0] Group 8 FTA: 0.1901 +[2025-07-07 07:13:05] [Rank 0] Group 8 FTA: 0.1901 +[2025-07-07 07:13:05] [Rank 0] Group 9 FTA: 0.2422 +[2025-07-07 07:13:05] [Rank 0] Group 9 FTA: 0.2422 +[2025-07-07 07:13:05] [Rank 0] Group 10 FTA: 0.2012 +[2025-07-07 07:13:05] [Rank 0] Group 10 FTA: 0.2012 +[2025-07-07 07:13:05] [Rank 0] Group 11 FTA: 0.2129 +[2025-07-07 07:13:05] [Rank 0] Group 11 FTA: 0.2129 +[2025-07-07 07:13:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 07:13:05] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 07:13:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 07:13:06] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 07:13:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 07:13:06] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 07:13:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 07:13:06] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 07:13:06] [Rank 0] step:5001/10000 train_time:340969ms step_avg:68.18ms +[2025-07-07 07:13:06] [Rank 0] step:5001/10000 train_time:340969ms step_avg:68.18ms +[2025-07-07 07:13:08] [Rank 0] step:5021/10000 train_time:341719ms step_avg:68.06ms +[2025-07-07 07:13:08] [Rank 0] step:5021/10000 train_time:341719ms step_avg:68.06ms +[2025-07-07 07:13:09] [Rank 0] step:5041/10000 train_time:343755ms step_avg:68.19ms +[2025-07-07 07:13:09] [Rank 0] step:5041/10000 train_time:343755ms step_avg:68.19ms +[2025-07-07 07:13:11] [Rank 0] step:5061/10000 train_time:344490ms step_avg:68.07ms +[2025-07-07 07:13:11] [Rank 0] step:5061/10000 train_time:344490ms step_avg:68.07ms +[2025-07-07 07:13:12] [Rank 0] step:5081/10000 train_time:345853ms step_avg:68.07ms +[2025-07-07 07:13:12] [Rank 0] step:5081/10000 train_time:345853ms step_avg:68.07ms +[2025-07-07 07:13:13] [Rank 0] step:5101/10000 train_time:347218ms step_avg:68.07ms +[2025-07-07 07:13:13] [Rank 0] step:5101/10000 train_time:347218ms step_avg:68.07ms +[2025-07-07 07:13:15] [Rank 0] step:5121/10000 train_time:348583ms step_avg:68.07ms +[2025-07-07 07:13:15] [Rank 0] step:5121/10000 train_time:348583ms step_avg:68.07ms +[2025-07-07 07:13:16] [Rank 0] step:5141/10000 train_time:349950ms step_avg:68.07ms +[2025-07-07 07:13:16] [Rank 0] step:5141/10000 train_time:349950ms step_avg:68.07ms +[2025-07-07 07:13:17] [Rank 0] step:5161/10000 train_time:351315ms step_avg:68.07ms +[2025-07-07 07:13:17] [Rank 0] step:5161/10000 train_time:351315ms step_avg:68.07ms +[2025-07-07 07:13:19] [Rank 0] step:5181/10000 train_time:352681ms step_avg:68.07ms +[2025-07-07 07:13:19] [Rank 0] step:5181/10000 train_time:352681ms step_avg:68.07ms +[2025-07-07 07:13:20] [Rank 0] step:5201/10000 train_time:354048ms step_avg:68.07ms +[2025-07-07 07:13:20] [Rank 0] step:5201/10000 train_time:354048ms step_avg:68.07ms +[2025-07-07 07:13:22] [Rank 0] step:5221/10000 train_time:356075ms step_avg:68.20ms +[2025-07-07 07:13:22] [Rank 0] step:5221/10000 train_time:356075ms step_avg:68.20ms +[2025-07-07 07:13:23] [Rank 0] step:5241/10000 train_time:356814ms step_avg:68.08ms +[2025-07-07 07:13:23] [Rank 0] step:5241/10000 train_time:356814ms step_avg:68.08ms +[2025-07-07 07:13:24] [Rank 0] step:5261/10000 train_time:358182ms step_avg:68.08ms +[2025-07-07 07:13:24] [Rank 0] step:5261/10000 train_time:358182ms step_avg:68.08ms +[2025-07-07 07:13:26] [Rank 0] step:5281/10000 train_time:359549ms step_avg:68.08ms +[2025-07-07 07:13:26] [Rank 0] step:5281/10000 train_time:359549ms step_avg:68.08ms +[2025-07-07 07:13:27] [Rank 0] step:5301/10000 train_time:360917ms step_avg:68.08ms +[2025-07-07 07:13:27] [Rank 0] step:5301/10000 train_time:360917ms step_avg:68.08ms +[2025-07-07 07:13:28] [Rank 0] step:5321/10000 train_time:362285ms step_avg:68.09ms +[2025-07-07 07:13:28] [Rank 0] step:5321/10000 train_time:362285ms step_avg:68.09ms +[2025-07-07 07:13:30] [Rank 0] step:5341/10000 train_time:363655ms step_avg:68.09ms +[2025-07-07 07:13:30] [Rank 0] step:5341/10000 train_time:363655ms step_avg:68.09ms +[2025-07-07 07:13:31] [Rank 0] step:5361/10000 train_time:365031ms step_avg:68.09ms +[2025-07-07 07:13:31] [Rank 0] step:5361/10000 train_time:365031ms step_avg:68.09ms +[2025-07-07 07:13:32] [Rank 0] step:5381/10000 train_time:366400ms step_avg:68.09ms +[2025-07-07 07:13:32] [Rank 0] step:5381/10000 train_time:366400ms step_avg:68.09ms +[2025-07-07 07:13:34] [Rank 0] step:5401/10000 train_time:368427ms step_avg:68.21ms +[2025-07-07 07:13:34] [Rank 0] step:5401/10000 train_time:368427ms step_avg:68.21ms +[2025-07-07 07:13:35] [Rank 0] step:5421/10000 train_time:369167ms step_avg:68.10ms +[2025-07-07 07:13:35] [Rank 0] step:5421/10000 train_time:369167ms step_avg:68.10ms +[2025-07-07 07:13:37] [Rank 0] step:5441/10000 train_time:370539ms step_avg:68.10ms +[2025-07-07 07:13:37] [Rank 0] step:5441/10000 train_time:370539ms step_avg:68.10ms +[2025-07-07 07:13:38] [Rank 0] step:5461/10000 train_time:371908ms step_avg:68.10ms +[2025-07-07 07:13:38] [Rank 0] step:5461/10000 train_time:371908ms step_avg:68.10ms +[2025-07-07 07:13:39] [Rank 0] step:5481/10000 train_time:373278ms step_avg:68.10ms +[2025-07-07 07:13:39] [Rank 0] step:5481/10000 train_time:373278ms step_avg:68.10ms +[2025-07-07 07:13:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:13:41] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:13:42] [Rank 0] PRINT: step:5500/10000 train_loss:1.0672 val_loss:1.1773 train_time:375270ms step_avg:68.23ms +[2025-07-07 07:13:42] [Rank 0] PRINT: step:5500/10000 train_loss:1.0672 val_loss:1.1773 train_time:375270ms step_avg:68.23ms +[2025-07-07 07:13:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:13:42] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:13:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:13:42] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:13:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:13:42] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:19:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:19:00] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:19:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:19:00] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:19:00] [Rank 0] Total Loss: 5.4761 +[2025-07-07 07:19:00] [Rank 0] Total Loss: 5.4761 +[2025-07-07 07:19:00] [Rank 0] Total FTA: 0.2398 +[2025-07-07 07:19:00] [Rank 0] Total FTA: 0.2398 +[2025-07-07 07:19:00] [Rank 0] Group 0 Loss: 5.5267 +[2025-07-07 07:19:00] [Rank 0] Group 0 Loss: 5.5267 +[2025-07-07 07:19:00] [Rank 0] Group 1 Loss: 5.3507 +[2025-07-07 07:19:00] [Rank 0] Group 1 Loss: 5.3507 +[2025-07-07 07:19:00] [Rank 0] Group 2 Loss: 5.2802 +[2025-07-07 07:19:00] [Rank 0] Group 2 Loss: 5.2802 +[2025-07-07 07:19:00] [Rank 0] Group 3 Loss: 5.5504 +[2025-07-07 07:19:00] [Rank 0] Group 3 Loss: 5.5504 +[2025-07-07 07:19:00] [Rank 0] Group 4 Loss: 5.3862 +[2025-07-07 07:19:00] [Rank 0] Group 4 Loss: 5.3862 +[2025-07-07 07:19:00] [Rank 0] Group 5 Loss: 5.3936 +[2025-07-07 07:19:00] [Rank 0] Group 5 Loss: 5.3936 +[2025-07-07 07:19:00] [Rank 0] Group 6 Loss: 5.4856 +[2025-07-07 07:19:00] [Rank 0] Group 6 Loss: 5.4856 +[2025-07-07 07:19:00] [Rank 0] Group 7 Loss: 5.5311 +[2025-07-07 07:19:00] [Rank 0] Group 7 Loss: 5.5311 +[2025-07-07 07:19:00] [Rank 0] Group 8 Loss: 5.5025 +[2025-07-07 07:19:00] [Rank 0] Group 8 Loss: 5.5025 +[2025-07-07 07:19:00] [Rank 0] Group 9 Loss: 5.4656 +[2025-07-07 07:19:00] [Rank 0] Group 9 Loss: 5.4656 +[2025-07-07 07:19:00] [Rank 0] Group 10 Loss: 5.5266 +[2025-07-07 07:19:00] [Rank 0] Group 10 Loss: 5.5266 +[2025-07-07 07:19:00] [Rank 0] Group 11 Loss: 5.5384 +[2025-07-07 07:19:00] [Rank 0] Group 11 Loss: 5.5384 +[2025-07-07 07:19:00] [Rank 0] Group 0 FTA: 0.3173 +[2025-07-07 07:19:00] [Rank 0] Group 0 FTA: 0.3173 +[2025-07-07 07:19:00] [Rank 0] Group 1 FTA: 0.1771 +[2025-07-07 07:19:00] [Rank 0] Group 1 FTA: 0.1771 +[2025-07-07 07:19:00] [Rank 0] Group 2 FTA: 0.2214 +[2025-07-07 07:19:00] [Rank 0] Group 2 FTA: 0.2214 +[2025-07-07 07:19:00] [Rank 0] Group 3 FTA: 0.1641 +[2025-07-07 07:19:00] [Rank 0] Group 3 FTA: 0.1641 +[2025-07-07 07:19:00] [Rank 0] Group 4 FTA: 0.1849 +[2025-07-07 07:19:00] [Rank 0] Group 4 FTA: 0.1849 +[2025-07-07 07:19:00] [Rank 0] Group 5 FTA: 0.2708 +[2025-07-07 07:19:00] [Rank 0] Group 5 FTA: 0.2708 +[2025-07-07 07:19:00] [Rank 0] Group 6 FTA: 0.2812 +[2025-07-07 07:19:00] [Rank 0] Group 6 FTA: 0.2812 +[2025-07-07 07:19:00] [Rank 0] Group 7 FTA: 0.2370 +[2025-07-07 07:19:00] [Rank 0] Group 7 FTA: 0.2370 +[2025-07-07 07:19:00] [Rank 0] Group 8 FTA: 0.2370 +[2025-07-07 07:19:00] [Rank 0] Group 8 FTA: 0.2370 +[2025-07-07 07:19:00] [Rank 0] Group 9 FTA: 0.2734 +[2025-07-07 07:19:00] [Rank 0] Group 9 FTA: 0.2734 +[2025-07-07 07:19:01] [Rank 0] Group 10 FTA: 0.2441 +[2025-07-07 07:19:01] [Rank 0] Group 10 FTA: 0.2441 +[2025-07-07 07:19:01] [Rank 0] Group 11 FTA: 0.2256 +[2025-07-07 07:19:01] [Rank 0] Group 11 FTA: 0.2256 +[2025-07-07 07:19:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 07:19:01] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 07:19:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 07:19:01] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 07:19:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 07:19:02] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 07:19:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 07:19:02] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 07:19:02] [Rank 0] step:5501/10000 train_time:375279ms step_avg:68.22ms +[2025-07-07 07:19:02] [Rank 0] step:5501/10000 train_time:375279ms step_avg:68.22ms +[2025-07-07 07:19:03] [Rank 0] step:5521/10000 train_time:376049ms step_avg:68.11ms +[2025-07-07 07:19:03] [Rank 0] step:5521/10000 train_time:376049ms step_avg:68.11ms +[2025-07-07 07:19:05] [Rank 0] step:5541/10000 train_time:377412ms step_avg:68.11ms +[2025-07-07 07:19:05] [Rank 0] step:5541/10000 train_time:377412ms step_avg:68.11ms +[2025-07-07 07:19:06] [Rank 0] step:5561/10000 train_time:378775ms step_avg:68.11ms +[2025-07-07 07:19:06] [Rank 0] step:5561/10000 train_time:378775ms step_avg:68.11ms +[2025-07-07 07:19:07] [Rank 0] step:5581/10000 train_time:380190ms step_avg:68.12ms +[2025-07-07 07:19:07] [Rank 0] step:5581/10000 train_time:380190ms step_avg:68.12ms +[2025-07-07 07:19:09] [Rank 0] step:5601/10000 train_time:381550ms step_avg:68.12ms +[2025-07-07 07:19:09] [Rank 0] step:5601/10000 train_time:381550ms step_avg:68.12ms +[2025-07-07 07:19:10] [Rank 0] step:5621/10000 train_time:382916ms step_avg:68.12ms +[2025-07-07 07:19:10] [Rank 0] step:5621/10000 train_time:382916ms step_avg:68.12ms +[2025-07-07 07:19:12] [Rank 0] step:5641/10000 train_time:384282ms step_avg:68.12ms +[2025-07-07 07:19:12] [Rank 0] step:5641/10000 train_time:384282ms step_avg:68.12ms +[2025-07-07 07:19:13] [Rank 0] step:5661/10000 train_time:385650ms step_avg:68.12ms +[2025-07-07 07:19:13] [Rank 0] step:5661/10000 train_time:385650ms step_avg:68.12ms +[2025-07-07 07:19:14] [Rank 0] step:5681/10000 train_time:387017ms step_avg:68.12ms +[2025-07-07 07:19:14] [Rank 0] step:5681/10000 train_time:387017ms step_avg:68.12ms +[2025-07-07 07:19:16] [Rank 0] step:5701/10000 train_time:388385ms step_avg:68.13ms +[2025-07-07 07:19:16] [Rank 0] step:5701/10000 train_time:388385ms step_avg:68.13ms +[2025-07-07 07:19:17] [Rank 0] step:5721/10000 train_time:389753ms step_avg:68.13ms +[2025-07-07 07:19:17] [Rank 0] step:5721/10000 train_time:389753ms step_avg:68.13ms +[2025-07-07 07:19:18] [Rank 0] step:5741/10000 train_time:391121ms step_avg:68.13ms +[2025-07-07 07:19:18] [Rank 0] step:5741/10000 train_time:391121ms step_avg:68.13ms +[2025-07-07 07:19:20] [Rank 0] step:5761/10000 train_time:392534ms step_avg:68.14ms +[2025-07-07 07:19:20] [Rank 0] step:5761/10000 train_time:392534ms step_avg:68.14ms +[2025-07-07 07:19:21] [Rank 0] step:5781/10000 train_time:393901ms step_avg:68.14ms +[2025-07-07 07:19:21] [Rank 0] step:5781/10000 train_time:393901ms step_avg:68.14ms +[2025-07-07 07:19:23] [Rank 0] step:5801/10000 train_time:395270ms step_avg:68.14ms +[2025-07-07 07:19:23] [Rank 0] step:5801/10000 train_time:395270ms step_avg:68.14ms +[2025-07-07 07:19:24] [Rank 0] step:5821/10000 train_time:396639ms step_avg:68.14ms +[2025-07-07 07:19:24] [Rank 0] step:5821/10000 train_time:396639ms step_avg:68.14ms +[2025-07-07 07:19:25] [Rank 0] step:5841/10000 train_time:398009ms step_avg:68.14ms +[2025-07-07 07:19:25] [Rank 0] step:5841/10000 train_time:398009ms step_avg:68.14ms +[2025-07-07 07:19:27] [Rank 0] step:5861/10000 train_time:399381ms step_avg:68.14ms +[2025-07-07 07:19:27] [Rank 0] step:5861/10000 train_time:399381ms step_avg:68.14ms +[2025-07-07 07:19:28] [Rank 0] step:5881/10000 train_time:400752ms step_avg:68.14ms +[2025-07-07 07:19:28] [Rank 0] step:5881/10000 train_time:400752ms step_avg:68.14ms +[2025-07-07 07:19:29] [Rank 0] step:5901/10000 train_time:402123ms step_avg:68.14ms +[2025-07-07 07:19:29] [Rank 0] step:5901/10000 train_time:402123ms step_avg:68.14ms +[2025-07-07 07:19:31] [Rank 0] step:5921/10000 train_time:403496ms step_avg:68.15ms +[2025-07-07 07:19:31] [Rank 0] step:5921/10000 train_time:403496ms step_avg:68.15ms +[2025-07-07 07:19:32] [Rank 0] step:5941/10000 train_time:404868ms step_avg:68.15ms +[2025-07-07 07:19:32] [Rank 0] step:5941/10000 train_time:404868ms step_avg:68.15ms +[2025-07-07 07:19:34] [Rank 0] step:5961/10000 train_time:406290ms step_avg:68.16ms +[2025-07-07 07:19:34] [Rank 0] step:5961/10000 train_time:406290ms step_avg:68.16ms +[2025-07-07 07:19:35] [Rank 0] step:5981/10000 train_time:407664ms step_avg:68.16ms +[2025-07-07 07:19:35] [Rank 0] step:5981/10000 train_time:407664ms step_avg:68.16ms +[2025-07-07 07:19:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:19:36] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:19:37] [Rank 0] PRINT: step:6000/10000 train_loss:1.0106 val_loss:1.1546 train_time:409664ms step_avg:68.28ms +[2025-07-07 07:19:37] [Rank 0] PRINT: step:6000/10000 train_loss:1.0106 val_loss:1.1546 train_time:409664ms step_avg:68.28ms +[2025-07-07 07:19:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:19:37] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:19:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:19:37] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:19:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:19:37] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:24:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:24:59] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:24:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:24:59] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:24:59] [Rank 0] Total Loss: 5.5032 +[2025-07-07 07:24:59] [Rank 0] Total Loss: 5.5032 +[2025-07-07 07:24:59] [Rank 0] Total FTA: 0.2652 +[2025-07-07 07:24:59] [Rank 0] Total FTA: 0.2652 +[2025-07-07 07:24:59] [Rank 0] Group 0 Loss: 5.8627 +[2025-07-07 07:24:59] [Rank 0] Group 0 Loss: 5.8627 +[2025-07-07 07:24:59] [Rank 0] Group 1 Loss: 5.3126 +[2025-07-07 07:24:59] [Rank 0] Group 1 Loss: 5.3126 +[2025-07-07 07:24:59] [Rank 0] Group 2 Loss: 5.5313 +[2025-07-07 07:24:59] [Rank 0] Group 2 Loss: 5.5313 +[2025-07-07 07:24:59] [Rank 0] Group 3 Loss: 5.5217 +[2025-07-07 07:24:59] [Rank 0] Group 3 Loss: 5.5217 +[2025-07-07 07:24:59] [Rank 0] Group 4 Loss: 5.3336 +[2025-07-07 07:24:59] [Rank 0] Group 4 Loss: 5.3336 +[2025-07-07 07:24:59] [Rank 0] Group 5 Loss: 5.4039 +[2025-07-07 07:24:59] [Rank 0] Group 5 Loss: 5.4039 +[2025-07-07 07:24:59] [Rank 0] Group 6 Loss: 5.3929 +[2025-07-07 07:24:59] [Rank 0] Group 6 Loss: 5.3929 +[2025-07-07 07:24:59] [Rank 0] Group 7 Loss: 5.4287 +[2025-07-07 07:24:59] [Rank 0] Group 7 Loss: 5.4287 +[2025-07-07 07:24:59] [Rank 0] Group 8 Loss: 5.4338 +[2025-07-07 07:24:59] [Rank 0] Group 8 Loss: 5.4338 +[2025-07-07 07:24:59] [Rank 0] Group 9 Loss: 5.4327 +[2025-07-07 07:24:59] [Rank 0] Group 9 Loss: 5.4327 +[2025-07-07 07:24:59] [Rank 0] Group 10 Loss: 5.5178 +[2025-07-07 07:24:59] [Rank 0] Group 10 Loss: 5.5178 +[2025-07-07 07:24:59] [Rank 0] Group 11 Loss: 5.4938 +[2025-07-07 07:24:59] [Rank 0] Group 11 Loss: 5.4938 +[2025-07-07 07:24:59] [Rank 0] Group 0 FTA: 0.3342 +[2025-07-07 07:24:59] [Rank 0] Group 0 FTA: 0.3342 +[2025-07-07 07:24:59] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 07:24:59] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 07:24:59] [Rank 0] Group 2 FTA: 0.2891 +[2025-07-07 07:24:59] [Rank 0] Group 2 FTA: 0.2891 +[2025-07-07 07:24:59] [Rank 0] Group 3 FTA: 0.2318 +[2025-07-07 07:24:59] [Rank 0] Group 3 FTA: 0.2318 +[2025-07-07 07:24:59] [Rank 0] Group 4 FTA: 0.2734 +[2025-07-07 07:24:59] [Rank 0] Group 4 FTA: 0.2734 +[2025-07-07 07:24:59] [Rank 0] Group 5 FTA: 0.2865 +[2025-07-07 07:24:59] [Rank 0] Group 5 FTA: 0.2865 +[2025-07-07 07:24:59] [Rank 0] Group 6 FTA: 0.2656 +[2025-07-07 07:24:59] [Rank 0] Group 6 FTA: 0.2656 +[2025-07-07 07:24:59] [Rank 0] Group 7 FTA: 0.2656 +[2025-07-07 07:24:59] [Rank 0] Group 7 FTA: 0.2656 +[2025-07-07 07:24:59] [Rank 0] Group 8 FTA: 0.2839 +[2025-07-07 07:24:59] [Rank 0] Group 8 FTA: 0.2839 +[2025-07-07 07:24:59] [Rank 0] Group 9 FTA: 0.2422 +[2025-07-07 07:24:59] [Rank 0] Group 9 FTA: 0.2422 +[2025-07-07 07:24:59] [Rank 0] Group 10 FTA: 0.3027 +[2025-07-07 07:24:59] [Rank 0] Group 10 FTA: 0.3027 +[2025-07-07 07:24:59] [Rank 0] Group 11 FTA: 0.2852 +[2025-07-07 07:24:59] [Rank 0] Group 11 FTA: 0.2852 +[2025-07-07 07:25:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 07:25:00] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 07:25:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 07:25:00] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 07:25:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 07:25:00] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 07:25:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 07:25:01] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 07:25:01] [Rank 0] step:6001/10000 train_time:409673ms step_avg:68.27ms +[2025-07-07 07:25:01] [Rank 0] step:6001/10000 train_time:409673ms step_avg:68.27ms +[2025-07-07 07:25:02] [Rank 0] step:6021/10000 train_time:410439ms step_avg:68.17ms +[2025-07-07 07:25:02] [Rank 0] step:6021/10000 train_time:410439ms step_avg:68.17ms +[2025-07-07 07:25:03] [Rank 0] step:6041/10000 train_time:411803ms step_avg:68.17ms +[2025-07-07 07:25:03] [Rank 0] step:6041/10000 train_time:411803ms step_avg:68.17ms +[2025-07-07 07:25:05] [Rank 0] step:6061/10000 train_time:413166ms step_avg:68.17ms +[2025-07-07 07:25:05] [Rank 0] step:6061/10000 train_time:413166ms step_avg:68.17ms +[2025-07-07 07:25:06] [Rank 0] step:6081/10000 train_time:414531ms step_avg:68.17ms +[2025-07-07 07:25:06] [Rank 0] step:6081/10000 train_time:414531ms step_avg:68.17ms +[2025-07-07 07:25:08] [Rank 0] step:6101/10000 train_time:415895ms step_avg:68.17ms +[2025-07-07 07:25:08] [Rank 0] step:6101/10000 train_time:415895ms step_avg:68.17ms +[2025-07-07 07:25:09] [Rank 0] step:6121/10000 train_time:417340ms step_avg:68.18ms +[2025-07-07 07:25:09] [Rank 0] step:6121/10000 train_time:417340ms step_avg:68.18ms +[2025-07-07 07:25:10] [Rank 0] step:6141/10000 train_time:418689ms step_avg:68.18ms +[2025-07-07 07:25:10] [Rank 0] step:6141/10000 train_time:418689ms step_avg:68.18ms +[2025-07-07 07:25:12] [Rank 0] step:6161/10000 train_time:420057ms step_avg:68.18ms +[2025-07-07 07:25:12] [Rank 0] step:6161/10000 train_time:420057ms step_avg:68.18ms +[2025-07-07 07:25:13] [Rank 0] step:6181/10000 train_time:421424ms step_avg:68.18ms +[2025-07-07 07:25:13] [Rank 0] step:6181/10000 train_time:421424ms step_avg:68.18ms +[2025-07-07 07:25:14] [Rank 0] step:6201/10000 train_time:422791ms step_avg:68.18ms +[2025-07-07 07:25:14] [Rank 0] step:6201/10000 train_time:422791ms step_avg:68.18ms +[2025-07-07 07:25:16] [Rank 0] step:6221/10000 train_time:424159ms step_avg:68.18ms +[2025-07-07 07:25:16] [Rank 0] step:6221/10000 train_time:424159ms step_avg:68.18ms +[2025-07-07 07:25:17] [Rank 0] step:6241/10000 train_time:425529ms step_avg:68.18ms +[2025-07-07 07:25:17] [Rank 0] step:6241/10000 train_time:425529ms step_avg:68.18ms +[2025-07-07 07:25:19] [Rank 0] step:6261/10000 train_time:426897ms step_avg:68.18ms +[2025-07-07 07:25:19] [Rank 0] step:6261/10000 train_time:426897ms step_avg:68.18ms +[2025-07-07 07:25:20] [Rank 0] step:6281/10000 train_time:428265ms step_avg:68.18ms +[2025-07-07 07:25:20] [Rank 0] step:6281/10000 train_time:428265ms step_avg:68.18ms +[2025-07-07 07:25:21] [Rank 0] step:6301/10000 train_time:429634ms step_avg:68.19ms +[2025-07-07 07:25:21] [Rank 0] step:6301/10000 train_time:429634ms step_avg:68.19ms +[2025-07-07 07:25:23] [Rank 0] step:6321/10000 train_time:431044ms step_avg:68.19ms +[2025-07-07 07:25:23] [Rank 0] step:6321/10000 train_time:431044ms step_avg:68.19ms +[2025-07-07 07:25:24] [Rank 0] step:6341/10000 train_time:432412ms step_avg:68.19ms +[2025-07-07 07:25:24] [Rank 0] step:6341/10000 train_time:432412ms step_avg:68.19ms +[2025-07-07 07:25:25] [Rank 0] step:6361/10000 train_time:433782ms step_avg:68.19ms +[2025-07-07 07:25:25] [Rank 0] step:6361/10000 train_time:433782ms step_avg:68.19ms +[2025-07-07 07:25:27] [Rank 0] step:6381/10000 train_time:435154ms step_avg:68.20ms +[2025-07-07 07:25:27] [Rank 0] step:6381/10000 train_time:435154ms step_avg:68.20ms +[2025-07-07 07:25:28] [Rank 0] step:6401/10000 train_time:436525ms step_avg:68.20ms +[2025-07-07 07:25:28] [Rank 0] step:6401/10000 train_time:436525ms step_avg:68.20ms +[2025-07-07 07:25:30] [Rank 0] step:6421/10000 train_time:437895ms step_avg:68.20ms +[2025-07-07 07:25:30] [Rank 0] step:6421/10000 train_time:437895ms step_avg:68.20ms +[2025-07-07 07:25:31] [Rank 0] step:6441/10000 train_time:439266ms step_avg:68.20ms +[2025-07-07 07:25:31] [Rank 0] step:6441/10000 train_time:439266ms step_avg:68.20ms +[2025-07-07 07:25:32] [Rank 0] step:6461/10000 train_time:440635ms step_avg:68.20ms +[2025-07-07 07:25:32] [Rank 0] step:6461/10000 train_time:440635ms step_avg:68.20ms +[2025-07-07 07:25:34] [Rank 0] step:6481/10000 train_time:442688ms step_avg:68.31ms +[2025-07-07 07:25:34] [Rank 0] step:6481/10000 train_time:442688ms step_avg:68.31ms +[2025-07-07 07:25:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:25:35] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:25:36] [Rank 0] PRINT: step:6500/10000 train_loss:0.9558 val_loss:1.1376 train_time:444050ms step_avg:68.32ms +[2025-07-07 07:25:36] [Rank 0] PRINT: step:6500/10000 train_loss:0.9558 val_loss:1.1376 train_time:444050ms step_avg:68.32ms +[2025-07-07 07:25:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:25:36] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:25:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:25:36] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:25:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:25:36] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:30:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:30:57] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:30:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:30:57] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:30:57] [Rank 0] Total Loss: 5.4742 +[2025-07-07 07:30:57] [Rank 0] Total Loss: 5.4742 +[2025-07-07 07:30:57] [Rank 0] Total FTA: 0.3266 +[2025-07-07 07:30:57] [Rank 0] Total FTA: 0.3266 +[2025-07-07 07:30:57] [Rank 0] Group 0 Loss: 5.6359 +[2025-07-07 07:30:57] [Rank 0] Group 0 Loss: 5.6359 +[2025-07-07 07:30:57] [Rank 0] Group 1 Loss: 5.2059 +[2025-07-07 07:30:57] [Rank 0] Group 1 Loss: 5.2059 +[2025-07-07 07:30:57] [Rank 0] Group 2 Loss: 5.4084 +[2025-07-07 07:30:57] [Rank 0] Group 2 Loss: 5.4084 +[2025-07-07 07:30:57] [Rank 0] Group 3 Loss: 5.6830 +[2025-07-07 07:30:57] [Rank 0] Group 3 Loss: 5.6830 +[2025-07-07 07:30:57] [Rank 0] Group 4 Loss: 5.3601 +[2025-07-07 07:30:57] [Rank 0] Group 4 Loss: 5.3601 +[2025-07-07 07:30:57] [Rank 0] Group 5 Loss: 5.3698 +[2025-07-07 07:30:57] [Rank 0] Group 5 Loss: 5.3698 +[2025-07-07 07:30:57] [Rank 0] Group 6 Loss: 5.4214 +[2025-07-07 07:30:57] [Rank 0] Group 6 Loss: 5.4214 +[2025-07-07 07:30:57] [Rank 0] Group 7 Loss: 5.4874 +[2025-07-07 07:30:57] [Rank 0] Group 7 Loss: 5.4874 +[2025-07-07 07:30:57] [Rank 0] Group 8 Loss: 5.4570 +[2025-07-07 07:30:57] [Rank 0] Group 8 Loss: 5.4570 +[2025-07-07 07:30:57] [Rank 0] Group 9 Loss: 5.4820 +[2025-07-07 07:30:57] [Rank 0] Group 9 Loss: 5.4820 +[2025-07-07 07:30:57] [Rank 0] Group 10 Loss: 5.5021 +[2025-07-07 07:30:57] [Rank 0] Group 10 Loss: 5.5021 +[2025-07-07 07:30:57] [Rank 0] Group 11 Loss: 5.4869 +[2025-07-07 07:30:57] [Rank 0] Group 11 Loss: 5.4869 +[2025-07-07 07:30:57] [Rank 0] Group 0 FTA: 0.3160 +[2025-07-07 07:30:57] [Rank 0] Group 0 FTA: 0.3160 +[2025-07-07 07:30:57] [Rank 0] Group 1 FTA: 0.3203 +[2025-07-07 07:30:57] [Rank 0] Group 1 FTA: 0.3203 +[2025-07-07 07:30:57] [Rank 0] Group 2 FTA: 0.3438 +[2025-07-07 07:30:57] [Rank 0] Group 2 FTA: 0.3438 +[2025-07-07 07:30:57] [Rank 0] Group 3 FTA: 0.3047 +[2025-07-07 07:30:57] [Rank 0] Group 3 FTA: 0.3047 +[2025-07-07 07:30:57] [Rank 0] Group 4 FTA: 0.3047 +[2025-07-07 07:30:57] [Rank 0] Group 4 FTA: 0.3047 +[2025-07-07 07:30:57] [Rank 0] Group 5 FTA: 0.3516 +[2025-07-07 07:30:57] [Rank 0] Group 5 FTA: 0.3516 +[2025-07-07 07:30:57] [Rank 0] Group 6 FTA: 0.3151 +[2025-07-07 07:30:57] [Rank 0] Group 6 FTA: 0.3151 +[2025-07-07 07:30:57] [Rank 0] Group 7 FTA: 0.3281 +[2025-07-07 07:30:57] [Rank 0] Group 7 FTA: 0.3281 +[2025-07-07 07:30:57] [Rank 0] Group 8 FTA: 0.3385 +[2025-07-07 07:30:57] [Rank 0] Group 8 FTA: 0.3385 +[2025-07-07 07:30:57] [Rank 0] Group 9 FTA: 0.3359 +[2025-07-07 07:30:57] [Rank 0] Group 9 FTA: 0.3359 +[2025-07-07 07:30:57] [Rank 0] Group 10 FTA: 0.3184 +[2025-07-07 07:30:57] [Rank 0] Group 10 FTA: 0.3184 +[2025-07-07 07:30:57] [Rank 0] Group 11 FTA: 0.3389 +[2025-07-07 07:30:57] [Rank 0] Group 11 FTA: 0.3389 +[2025-07-07 07:30:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 07:30:58] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 07:30:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 07:30:58] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 07:30:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 07:30:58] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 07:30:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 07:30:59] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 07:30:59] [Rank 0] step:6501/10000 train_time:444060ms step_avg:68.31ms +[2025-07-07 07:30:59] [Rank 0] step:6501/10000 train_time:444060ms step_avg:68.31ms +[2025-07-07 07:31:00] [Rank 0] step:6521/10000 train_time:444809ms step_avg:68.21ms +[2025-07-07 07:31:00] [Rank 0] step:6521/10000 train_time:444809ms step_avg:68.21ms +[2025-07-07 07:31:01] [Rank 0] step:6541/10000 train_time:446171ms step_avg:68.21ms +[2025-07-07 07:31:01] [Rank 0] step:6541/10000 train_time:446171ms step_avg:68.21ms +[2025-07-07 07:31:03] [Rank 0] step:6561/10000 train_time:447536ms step_avg:68.21ms +[2025-07-07 07:31:03] [Rank 0] step:6561/10000 train_time:447536ms step_avg:68.21ms +[2025-07-07 07:31:04] [Rank 0] step:6581/10000 train_time:448903ms step_avg:68.21ms +[2025-07-07 07:31:04] [Rank 0] step:6581/10000 train_time:448903ms step_avg:68.21ms +[2025-07-07 07:31:06] [Rank 0] step:6601/10000 train_time:450269ms step_avg:68.21ms +[2025-07-07 07:31:06] [Rank 0] step:6601/10000 train_time:450269ms step_avg:68.21ms +[2025-07-07 07:31:07] [Rank 0] step:6621/10000 train_time:451635ms step_avg:68.21ms +[2025-07-07 07:31:07] [Rank 0] step:6621/10000 train_time:451635ms step_avg:68.21ms +[2025-07-07 07:31:08] [Rank 0] step:6641/10000 train_time:453001ms step_avg:68.21ms +[2025-07-07 07:31:08] [Rank 0] step:6641/10000 train_time:453001ms step_avg:68.21ms +[2025-07-07 07:31:10] [Rank 0] step:6661/10000 train_time:455045ms step_avg:68.31ms +[2025-07-07 07:31:10] [Rank 0] step:6661/10000 train_time:455045ms step_avg:68.31ms +[2025-07-07 07:31:11] [Rank 0] step:6681/10000 train_time:455779ms step_avg:68.22ms +[2025-07-07 07:31:11] [Rank 0] step:6681/10000 train_time:455779ms step_avg:68.22ms +[2025-07-07 07:31:12] [Rank 0] step:6701/10000 train_time:457146ms step_avg:68.22ms +[2025-07-07 07:31:12] [Rank 0] step:6701/10000 train_time:457146ms step_avg:68.22ms +[2025-07-07 07:31:14] [Rank 0] step:6721/10000 train_time:458513ms step_avg:68.22ms +[2025-07-07 07:31:14] [Rank 0] step:6721/10000 train_time:458513ms step_avg:68.22ms +[2025-07-07 07:31:15] [Rank 0] step:6741/10000 train_time:459885ms step_avg:68.22ms +[2025-07-07 07:31:15] [Rank 0] step:6741/10000 train_time:459885ms step_avg:68.22ms +[2025-07-07 07:31:17] [Rank 0] step:6761/10000 train_time:461253ms step_avg:68.22ms +[2025-07-07 07:31:17] [Rank 0] step:6761/10000 train_time:461253ms step_avg:68.22ms +[2025-07-07 07:31:18] [Rank 0] step:6781/10000 train_time:462620ms step_avg:68.22ms +[2025-07-07 07:31:18] [Rank 0] step:6781/10000 train_time:462620ms step_avg:68.22ms +[2025-07-07 07:31:19] [Rank 0] step:6801/10000 train_time:463989ms step_avg:68.22ms +[2025-07-07 07:31:19] [Rank 0] step:6801/10000 train_time:463989ms step_avg:68.22ms +[2025-07-07 07:31:21] [Rank 0] step:6821/10000 train_time:465358ms step_avg:68.22ms +[2025-07-07 07:31:21] [Rank 0] step:6821/10000 train_time:465358ms step_avg:68.22ms +[2025-07-07 07:31:22] [Rank 0] step:6841/10000 train_time:467392ms step_avg:68.32ms +[2025-07-07 07:31:22] [Rank 0] step:6841/10000 train_time:467392ms step_avg:68.32ms +[2025-07-07 07:31:23] [Rank 0] step:6861/10000 train_time:468131ms step_avg:68.23ms +[2025-07-07 07:31:23] [Rank 0] step:6861/10000 train_time:468131ms step_avg:68.23ms +[2025-07-07 07:31:25] [Rank 0] step:6881/10000 train_time:469501ms step_avg:68.23ms +[2025-07-07 07:31:25] [Rank 0] step:6881/10000 train_time:469501ms step_avg:68.23ms +[2025-07-07 07:31:26] [Rank 0] step:6901/10000 train_time:470917ms step_avg:68.24ms +[2025-07-07 07:31:26] [Rank 0] step:6901/10000 train_time:470917ms step_avg:68.24ms +[2025-07-07 07:31:28] [Rank 0] step:6921/10000 train_time:472286ms step_avg:68.24ms +[2025-07-07 07:31:28] [Rank 0] step:6921/10000 train_time:472286ms step_avg:68.24ms +[2025-07-07 07:31:29] [Rank 0] step:6941/10000 train_time:473657ms step_avg:68.24ms +[2025-07-07 07:31:29] [Rank 0] step:6941/10000 train_time:473657ms step_avg:68.24ms +[2025-07-07 07:31:30] [Rank 0] step:6961/10000 train_time:475026ms step_avg:68.24ms +[2025-07-07 07:31:30] [Rank 0] step:6961/10000 train_time:475026ms step_avg:68.24ms +[2025-07-07 07:31:32] [Rank 0] step:6981/10000 train_time:476397ms step_avg:68.24ms +[2025-07-07 07:31:32] [Rank 0] step:6981/10000 train_time:476397ms step_avg:68.24ms +[2025-07-07 07:31:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:31:33] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:31:34] [Rank 0] PRINT: step:7000/10000 train_loss:0.9102 val_loss:1.0863 train_time:478390ms step_avg:68.34ms +[2025-07-07 07:31:34] [Rank 0] PRINT: step:7000/10000 train_loss:0.9102 val_loss:1.0863 train_time:478390ms step_avg:68.34ms +[2025-07-07 07:31:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:31:34] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:31:34] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:31:34] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:31:34] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:31:34] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:36:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:36:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:36:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:36:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:36:55] [Rank 0] Total Loss: 5.5932 +[2025-07-07 07:36:55] [Rank 0] Total Loss: 5.5932 +[2025-07-07 07:36:55] [Rank 0] Total FTA: 0.3506 +[2025-07-07 07:36:55] [Rank 0] Total FTA: 0.3506 +[2025-07-07 07:36:55] [Rank 0] Group 0 Loss: 5.8482 +[2025-07-07 07:36:55] [Rank 0] Group 0 Loss: 5.8482 +[2025-07-07 07:36:55] [Rank 0] Group 1 Loss: 5.6250 +[2025-07-07 07:36:55] [Rank 0] Group 1 Loss: 5.6250 +[2025-07-07 07:36:55] [Rank 0] Group 2 Loss: 5.4212 +[2025-07-07 07:36:55] [Rank 0] Group 2 Loss: 5.4212 +[2025-07-07 07:36:55] [Rank 0] Group 3 Loss: 5.6591 +[2025-07-07 07:36:55] [Rank 0] Group 3 Loss: 5.6591 +[2025-07-07 07:36:55] [Rank 0] Group 4 Loss: 5.5019 +[2025-07-07 07:36:55] [Rank 0] Group 4 Loss: 5.5019 +[2025-07-07 07:36:55] [Rank 0] Group 5 Loss: 5.5170 +[2025-07-07 07:36:55] [Rank 0] Group 5 Loss: 5.5170 +[2025-07-07 07:36:55] [Rank 0] Group 6 Loss: 5.5212 +[2025-07-07 07:36:55] [Rank 0] Group 6 Loss: 5.5212 +[2025-07-07 07:36:55] [Rank 0] Group 7 Loss: 5.5222 +[2025-07-07 07:36:55] [Rank 0] Group 7 Loss: 5.5222 +[2025-07-07 07:36:55] [Rank 0] Group 8 Loss: 5.5684 +[2025-07-07 07:36:55] [Rank 0] Group 8 Loss: 5.5684 +[2025-07-07 07:36:55] [Rank 0] Group 9 Loss: 5.5656 +[2025-07-07 07:36:55] [Rank 0] Group 9 Loss: 5.5656 +[2025-07-07 07:36:55] [Rank 0] Group 10 Loss: 5.5652 +[2025-07-07 07:36:55] [Rank 0] Group 10 Loss: 5.5652 +[2025-07-07 07:36:55] [Rank 0] Group 11 Loss: 5.5763 +[2025-07-07 07:36:55] [Rank 0] Group 11 Loss: 5.5763 +[2025-07-07 07:36:55] [Rank 0] Group 0 FTA: 0.3030 +[2025-07-07 07:36:55] [Rank 0] Group 0 FTA: 0.3030 +[2025-07-07 07:36:55] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 07:36:55] [Rank 0] Group 1 FTA: 0.0000 +[2025-07-07 07:36:55] [Rank 0] Group 2 FTA: 0.6042 +[2025-07-07 07:36:55] [Rank 0] Group 2 FTA: 0.6042 +[2025-07-07 07:36:55] [Rank 0] Group 3 FTA: 0.3490 +[2025-07-07 07:36:55] [Rank 0] Group 3 FTA: 0.3490 +[2025-07-07 07:36:55] [Rank 0] Group 4 FTA: 0.3880 +[2025-07-07 07:36:55] [Rank 0] Group 4 FTA: 0.3880 +[2025-07-07 07:36:55] [Rank 0] Group 5 FTA: 0.3203 +[2025-07-07 07:36:55] [Rank 0] Group 5 FTA: 0.3203 +[2025-07-07 07:36:55] [Rank 0] Group 6 FTA: 0.3724 +[2025-07-07 07:36:55] [Rank 0] Group 6 FTA: 0.3724 +[2025-07-07 07:36:55] [Rank 0] Group 7 FTA: 0.3542 +[2025-07-07 07:36:55] [Rank 0] Group 7 FTA: 0.3542 +[2025-07-07 07:36:55] [Rank 0] Group 8 FTA: 0.4271 +[2025-07-07 07:36:55] [Rank 0] Group 8 FTA: 0.4271 +[2025-07-07 07:36:55] [Rank 0] Group 9 FTA: 0.3789 +[2025-07-07 07:36:55] [Rank 0] Group 9 FTA: 0.3789 +[2025-07-07 07:36:55] [Rank 0] Group 10 FTA: 0.3574 +[2025-07-07 07:36:55] [Rank 0] Group 10 FTA: 0.3574 +[2025-07-07 07:36:55] [Rank 0] Group 11 FTA: 0.3721 +[2025-07-07 07:36:55] [Rank 0] Group 11 FTA: 0.3721 +[2025-07-07 07:36:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 07:36:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 07:36:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 07:36:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 07:36:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 07:36:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 07:36:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 07:36:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 07:36:56] [Rank 0] step:7001/10000 train_time:478399ms step_avg:68.33ms +[2025-07-07 07:36:56] [Rank 0] step:7001/10000 train_time:478399ms step_avg:68.33ms +[2025-07-07 07:36:58] [Rank 0] step:7021/10000 train_time:479179ms step_avg:68.25ms +[2025-07-07 07:36:58] [Rank 0] step:7021/10000 train_time:479179ms step_avg:68.25ms +[2025-07-07 07:36:59] [Rank 0] step:7041/10000 train_time:480571ms step_avg:68.25ms +[2025-07-07 07:36:59] [Rank 0] step:7041/10000 train_time:480571ms step_avg:68.25ms +[2025-07-07 07:37:00] [Rank 0] step:7061/10000 train_time:481935ms step_avg:68.25ms +[2025-07-07 07:37:00] [Rank 0] step:7061/10000 train_time:481935ms step_avg:68.25ms +[2025-07-07 07:37:02] [Rank 0] step:7081/10000 train_time:483298ms step_avg:68.25ms +[2025-07-07 07:37:02] [Rank 0] step:7081/10000 train_time:483298ms step_avg:68.25ms +[2025-07-07 07:37:03] [Rank 0] step:7101/10000 train_time:484665ms step_avg:68.25ms +[2025-07-07 07:37:03] [Rank 0] step:7101/10000 train_time:484665ms step_avg:68.25ms +[2025-07-07 07:37:04] [Rank 0] step:7121/10000 train_time:486033ms step_avg:68.25ms +[2025-07-07 07:37:04] [Rank 0] step:7121/10000 train_time:486033ms step_avg:68.25ms +[2025-07-07 07:37:06] [Rank 0] step:7141/10000 train_time:487398ms step_avg:68.25ms +[2025-07-07 07:37:06] [Rank 0] step:7141/10000 train_time:487398ms step_avg:68.25ms +[2025-07-07 07:37:07] [Rank 0] step:7161/10000 train_time:488765ms step_avg:68.25ms +[2025-07-07 07:37:07] [Rank 0] step:7161/10000 train_time:488765ms step_avg:68.25ms +[2025-07-07 07:37:08] [Rank 0] step:7181/10000 train_time:490133ms step_avg:68.25ms +[2025-07-07 07:37:08] [Rank 0] step:7181/10000 train_time:490133ms step_avg:68.25ms +[2025-07-07 07:37:10] [Rank 0] step:7201/10000 train_time:491500ms step_avg:68.25ms +[2025-07-07 07:37:10] [Rank 0] step:7201/10000 train_time:491500ms step_avg:68.25ms +[2025-07-07 07:37:11] [Rank 0] step:7221/10000 train_time:492903ms step_avg:68.26ms +[2025-07-07 07:37:11] [Rank 0] step:7221/10000 train_time:492903ms step_avg:68.26ms +[2025-07-07 07:37:13] [Rank 0] step:7241/10000 train_time:494271ms step_avg:68.26ms +[2025-07-07 07:37:13] [Rank 0] step:7241/10000 train_time:494271ms step_avg:68.26ms +[2025-07-07 07:37:14] [Rank 0] step:7261/10000 train_time:495639ms step_avg:68.26ms +[2025-07-07 07:37:14] [Rank 0] step:7261/10000 train_time:495639ms step_avg:68.26ms +[2025-07-07 07:37:15] [Rank 0] step:7281/10000 train_time:497008ms step_avg:68.26ms +[2025-07-07 07:37:15] [Rank 0] step:7281/10000 train_time:497008ms step_avg:68.26ms +[2025-07-07 07:37:17] [Rank 0] step:7301/10000 train_time:498377ms step_avg:68.26ms +[2025-07-07 07:37:17] [Rank 0] step:7301/10000 train_time:498377ms step_avg:68.26ms +[2025-07-07 07:37:18] [Rank 0] step:7321/10000 train_time:499754ms step_avg:68.26ms +[2025-07-07 07:37:18] [Rank 0] step:7321/10000 train_time:499754ms step_avg:68.26ms +[2025-07-07 07:37:19] [Rank 0] step:7341/10000 train_time:501125ms step_avg:68.26ms +[2025-07-07 07:37:19] [Rank 0] step:7341/10000 train_time:501125ms step_avg:68.26ms +[2025-07-07 07:37:21] [Rank 0] step:7361/10000 train_time:502495ms step_avg:68.26ms +[2025-07-07 07:37:21] [Rank 0] step:7361/10000 train_time:502495ms step_avg:68.26ms +[2025-07-07 07:37:22] [Rank 0] step:7381/10000 train_time:504542ms step_avg:68.36ms +[2025-07-07 07:37:22] [Rank 0] step:7381/10000 train_time:504542ms step_avg:68.36ms +[2025-07-07 07:37:24] [Rank 0] step:7401/10000 train_time:505281ms step_avg:68.27ms +[2025-07-07 07:37:24] [Rank 0] step:7401/10000 train_time:505281ms step_avg:68.27ms +[2025-07-07 07:37:25] [Rank 0] step:7421/10000 train_time:506652ms step_avg:68.27ms +[2025-07-07 07:37:25] [Rank 0] step:7421/10000 train_time:506652ms step_avg:68.27ms +[2025-07-07 07:37:26] [Rank 0] step:7441/10000 train_time:508026ms step_avg:68.27ms +[2025-07-07 07:37:26] [Rank 0] step:7441/10000 train_time:508026ms step_avg:68.27ms +[2025-07-07 07:37:28] [Rank 0] step:7461/10000 train_time:509397ms step_avg:68.27ms +[2025-07-07 07:37:28] [Rank 0] step:7461/10000 train_time:509397ms step_avg:68.27ms +[2025-07-07 07:37:29] [Rank 0] step:7481/10000 train_time:510768ms step_avg:68.28ms +[2025-07-07 07:37:29] [Rank 0] step:7481/10000 train_time:510768ms step_avg:68.28ms +[2025-07-07 07:37:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:37:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:37:31] [Rank 0] PRINT: step:7500/10000 train_loss:0.8792 val_loss:1.0632 train_time:512761ms step_avg:68.37ms +[2025-07-07 07:37:31] [Rank 0] PRINT: step:7500/10000 train_loss:0.8792 val_loss:1.0632 train_time:512761ms step_avg:68.37ms +[2025-07-07 07:37:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:37:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:37:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:37:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:37:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:37:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:42:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:42:53] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:42:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:42:53] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:42:53] [Rank 0] Total Loss: 5.4502 +[2025-07-07 07:42:53] [Rank 0] Total Loss: 5.4502 +[2025-07-07 07:42:53] [Rank 0] Total FTA: 0.4571 +[2025-07-07 07:42:53] [Rank 0] Total FTA: 0.4571 +[2025-07-07 07:42:53] [Rank 0] Group 0 Loss: 5.6646 +[2025-07-07 07:42:53] [Rank 0] Group 0 Loss: 5.6646 +[2025-07-07 07:42:53] [Rank 0] Group 1 Loss: 5.1145 +[2025-07-07 07:42:53] [Rank 0] Group 1 Loss: 5.1145 +[2025-07-07 07:42:53] [Rank 0] Group 2 Loss: 5.3552 +[2025-07-07 07:42:53] [Rank 0] Group 2 Loss: 5.3552 +[2025-07-07 07:42:53] [Rank 0] Group 3 Loss: 5.6094 +[2025-07-07 07:42:53] [Rank 0] Group 3 Loss: 5.6094 +[2025-07-07 07:42:53] [Rank 0] Group 4 Loss: 5.3182 +[2025-07-07 07:42:53] [Rank 0] Group 4 Loss: 5.3182 +[2025-07-07 07:42:53] [Rank 0] Group 5 Loss: 5.4197 +[2025-07-07 07:42:53] [Rank 0] Group 5 Loss: 5.4197 +[2025-07-07 07:42:53] [Rank 0] Group 6 Loss: 5.3484 +[2025-07-07 07:42:53] [Rank 0] Group 6 Loss: 5.3484 +[2025-07-07 07:42:53] [Rank 0] Group 7 Loss: 5.4288 +[2025-07-07 07:42:53] [Rank 0] Group 7 Loss: 5.4288 +[2025-07-07 07:42:53] [Rank 0] Group 8 Loss: 5.4532 +[2025-07-07 07:42:53] [Rank 0] Group 8 Loss: 5.4532 +[2025-07-07 07:42:53] [Rank 0] Group 9 Loss: 5.4602 +[2025-07-07 07:42:53] [Rank 0] Group 9 Loss: 5.4602 +[2025-07-07 07:42:53] [Rank 0] Group 10 Loss: 5.4736 +[2025-07-07 07:42:53] [Rank 0] Group 10 Loss: 5.4736 +[2025-07-07 07:42:53] [Rank 0] Group 11 Loss: 5.4829 +[2025-07-07 07:42:53] [Rank 0] Group 11 Loss: 5.4829 +[2025-07-07 07:42:53] [Rank 0] Group 0 FTA: 0.6528 +[2025-07-07 07:42:53] [Rank 0] Group 0 FTA: 0.6528 +[2025-07-07 07:42:53] [Rank 0] Group 1 FTA: 0.6536 +[2025-07-07 07:42:53] [Rank 0] Group 1 FTA: 0.6536 +[2025-07-07 07:42:53] [Rank 0] Group 2 FTA: 0.3854 +[2025-07-07 07:42:53] [Rank 0] Group 2 FTA: 0.3854 +[2025-07-07 07:42:53] [Rank 0] Group 3 FTA: 0.3073 +[2025-07-07 07:42:53] [Rank 0] Group 3 FTA: 0.3073 +[2025-07-07 07:42:53] [Rank 0] Group 4 FTA: 0.3542 +[2025-07-07 07:42:53] [Rank 0] Group 4 FTA: 0.3542 +[2025-07-07 07:42:53] [Rank 0] Group 5 FTA: 0.4115 +[2025-07-07 07:42:53] [Rank 0] Group 5 FTA: 0.4115 +[2025-07-07 07:42:53] [Rank 0] Group 6 FTA: 0.3776 +[2025-07-07 07:42:53] [Rank 0] Group 6 FTA: 0.3776 +[2025-07-07 07:42:53] [Rank 0] Group 7 FTA: 0.4271 +[2025-07-07 07:42:53] [Rank 0] Group 7 FTA: 0.4271 +[2025-07-07 07:42:53] [Rank 0] Group 8 FTA: 0.4323 +[2025-07-07 07:42:53] [Rank 0] Group 8 FTA: 0.4323 +[2025-07-07 07:42:53] [Rank 0] Group 9 FTA: 0.4297 +[2025-07-07 07:42:53] [Rank 0] Group 9 FTA: 0.4297 +[2025-07-07 07:42:53] [Rank 0] Group 10 FTA: 0.4375 +[2025-07-07 07:42:53] [Rank 0] Group 10 FTA: 0.4375 +[2025-07-07 07:42:53] [Rank 0] Group 11 FTA: 0.4424 +[2025-07-07 07:42:53] [Rank 0] Group 11 FTA: 0.4424 +[2025-07-07 07:42:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 07:42:54] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 07:42:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 07:42:54] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 07:42:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 07:42:54] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 07:42:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 07:42:54] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 07:42:55] [Rank 0] step:7501/10000 train_time:512772ms step_avg:68.36ms +[2025-07-07 07:42:55] [Rank 0] step:7501/10000 train_time:512772ms step_avg:68.36ms +[2025-07-07 07:42:56] [Rank 0] step:7521/10000 train_time:513532ms step_avg:68.28ms +[2025-07-07 07:42:56] [Rank 0] step:7521/10000 train_time:513532ms step_avg:68.28ms +[2025-07-07 07:42:57] [Rank 0] step:7541/10000 train_time:514894ms step_avg:68.28ms +[2025-07-07 07:42:57] [Rank 0] step:7541/10000 train_time:514894ms step_avg:68.28ms +[2025-07-07 07:42:59] [Rank 0] step:7561/10000 train_time:516307ms step_avg:68.29ms +[2025-07-07 07:42:59] [Rank 0] step:7561/10000 train_time:516307ms step_avg:68.29ms +[2025-07-07 07:43:00] [Rank 0] step:7581/10000 train_time:517677ms step_avg:68.29ms +[2025-07-07 07:43:00] [Rank 0] step:7581/10000 train_time:517677ms step_avg:68.29ms +[2025-07-07 07:43:01] [Rank 0] step:7601/10000 train_time:519041ms step_avg:68.29ms +[2025-07-07 07:43:01] [Rank 0] step:7601/10000 train_time:519041ms step_avg:68.29ms +[2025-07-07 07:43:03] [Rank 0] step:7621/10000 train_time:520407ms step_avg:68.29ms +[2025-07-07 07:43:03] [Rank 0] step:7621/10000 train_time:520407ms step_avg:68.29ms +[2025-07-07 07:43:04] [Rank 0] step:7641/10000 train_time:521780ms step_avg:68.29ms +[2025-07-07 07:43:04] [Rank 0] step:7641/10000 train_time:521780ms step_avg:68.29ms +[2025-07-07 07:43:06] [Rank 0] step:7661/10000 train_time:523147ms step_avg:68.29ms +[2025-07-07 07:43:06] [Rank 0] step:7661/10000 train_time:523147ms step_avg:68.29ms +[2025-07-07 07:43:07] [Rank 0] step:7681/10000 train_time:524514ms step_avg:68.29ms +[2025-07-07 07:43:07] [Rank 0] step:7681/10000 train_time:524514ms step_avg:68.29ms +[2025-07-07 07:43:08] [Rank 0] step:7701/10000 train_time:525880ms step_avg:68.29ms +[2025-07-07 07:43:08] [Rank 0] step:7701/10000 train_time:525880ms step_avg:68.29ms +[2025-07-07 07:43:10] [Rank 0] step:7721/10000 train_time:527247ms step_avg:68.29ms +[2025-07-07 07:43:10] [Rank 0] step:7721/10000 train_time:527247ms step_avg:68.29ms +[2025-07-07 07:43:11] [Rank 0] step:7741/10000 train_time:528659ms step_avg:68.29ms +[2025-07-07 07:43:11] [Rank 0] step:7741/10000 train_time:528659ms step_avg:68.29ms +[2025-07-07 07:43:12] [Rank 0] step:7761/10000 train_time:530030ms step_avg:68.29ms +[2025-07-07 07:43:12] [Rank 0] step:7761/10000 train_time:530030ms step_avg:68.29ms +[2025-07-07 07:43:14] [Rank 0] step:7781/10000 train_time:531399ms step_avg:68.29ms +[2025-07-07 07:43:14] [Rank 0] step:7781/10000 train_time:531399ms step_avg:68.29ms +[2025-07-07 07:43:15] [Rank 0] step:7801/10000 train_time:532766ms step_avg:68.29ms +[2025-07-07 07:43:15] [Rank 0] step:7801/10000 train_time:532766ms step_avg:68.29ms +[2025-07-07 07:43:17] [Rank 0] step:7821/10000 train_time:534135ms step_avg:68.29ms +[2025-07-07 07:43:17] [Rank 0] step:7821/10000 train_time:534135ms step_avg:68.29ms +[2025-07-07 07:43:18] [Rank 0] step:7841/10000 train_time:535503ms step_avg:68.30ms +[2025-07-07 07:43:18] [Rank 0] step:7841/10000 train_time:535503ms step_avg:68.30ms +[2025-07-07 07:43:19] [Rank 0] step:7861/10000 train_time:536871ms step_avg:68.30ms +[2025-07-07 07:43:19] [Rank 0] step:7861/10000 train_time:536871ms step_avg:68.30ms +[2025-07-07 07:43:21] [Rank 0] step:7881/10000 train_time:538239ms step_avg:68.30ms +[2025-07-07 07:43:21] [Rank 0] step:7881/10000 train_time:538239ms step_avg:68.30ms +[2025-07-07 07:43:22] [Rank 0] step:7901/10000 train_time:539609ms step_avg:68.30ms +[2025-07-07 07:43:22] [Rank 0] step:7901/10000 train_time:539609ms step_avg:68.30ms +[2025-07-07 07:43:23] [Rank 0] step:7921/10000 train_time:541028ms step_avg:68.30ms +[2025-07-07 07:43:23] [Rank 0] step:7921/10000 train_time:541028ms step_avg:68.30ms +[2025-07-07 07:43:25] [Rank 0] step:7941/10000 train_time:542383ms step_avg:68.30ms +[2025-07-07 07:43:25] [Rank 0] step:7941/10000 train_time:542383ms step_avg:68.30ms +[2025-07-07 07:43:26] [Rank 0] step:7961/10000 train_time:543752ms step_avg:68.30ms +[2025-07-07 07:43:26] [Rank 0] step:7961/10000 train_time:543752ms step_avg:68.30ms +[2025-07-07 07:43:28] [Rank 0] step:7981/10000 train_time:545121ms step_avg:68.30ms +[2025-07-07 07:43:28] [Rank 0] step:7981/10000 train_time:545121ms step_avg:68.30ms +[2025-07-07 07:43:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:43:29] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:43:30] [Rank 0] PRINT: step:8000/10000 train_loss:0.8628 val_loss:1.0538 train_time:547114ms step_avg:68.39ms +[2025-07-07 07:43:30] [Rank 0] PRINT: step:8000/10000 train_loss:0.8628 val_loss:1.0538 train_time:547114ms step_avg:68.39ms +[2025-07-07 07:43:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:43:30] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:43:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:43:30] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:43:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:43:30] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:48:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:48:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:48:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:48:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:48:55] [Rank 0] Total Loss: 5.5926 +[2025-07-07 07:48:55] [Rank 0] Total Loss: 5.5926 +[2025-07-07 07:48:55] [Rank 0] Total FTA: 0.4459 +[2025-07-07 07:48:55] [Rank 0] Total FTA: 0.4459 +[2025-07-07 07:48:55] [Rank 0] Group 0 Loss: 6.1318 +[2025-07-07 07:48:55] [Rank 0] Group 0 Loss: 6.1318 +[2025-07-07 07:48:55] [Rank 0] Group 1 Loss: 5.5241 +[2025-07-07 07:48:55] [Rank 0] Group 1 Loss: 5.5241 +[2025-07-07 07:48:55] [Rank 0] Group 2 Loss: 5.4716 +[2025-07-07 07:48:55] [Rank 0] Group 2 Loss: 5.4716 +[2025-07-07 07:48:55] [Rank 0] Group 3 Loss: 5.6760 +[2025-07-07 07:48:55] [Rank 0] Group 3 Loss: 5.6760 +[2025-07-07 07:48:55] [Rank 0] Group 4 Loss: 5.4284 +[2025-07-07 07:48:55] [Rank 0] Group 4 Loss: 5.4284 +[2025-07-07 07:48:55] [Rank 0] Group 5 Loss: 5.4566 +[2025-07-07 07:48:55] [Rank 0] Group 5 Loss: 5.4566 +[2025-07-07 07:48:55] [Rank 0] Group 6 Loss: 5.4301 +[2025-07-07 07:48:55] [Rank 0] Group 6 Loss: 5.4301 +[2025-07-07 07:48:55] [Rank 0] Group 7 Loss: 5.4924 +[2025-07-07 07:48:55] [Rank 0] Group 7 Loss: 5.4924 +[2025-07-07 07:48:55] [Rank 0] Group 8 Loss: 5.5215 +[2025-07-07 07:48:55] [Rank 0] Group 8 Loss: 5.5215 +[2025-07-07 07:48:55] [Rank 0] Group 9 Loss: 5.5495 +[2025-07-07 07:48:55] [Rank 0] Group 9 Loss: 5.5495 +[2025-07-07 07:48:55] [Rank 0] Group 10 Loss: 5.4949 +[2025-07-07 07:48:55] [Rank 0] Group 10 Loss: 5.4949 +[2025-07-07 07:48:55] [Rank 0] Group 11 Loss: 5.5248 +[2025-07-07 07:48:55] [Rank 0] Group 11 Loss: 5.5248 +[2025-07-07 07:48:55] [Rank 0] Group 0 FTA: 0.5111 +[2025-07-07 07:48:55] [Rank 0] Group 0 FTA: 0.5111 +[2025-07-07 07:48:55] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-07 07:48:55] [Rank 0] Group 1 FTA: 0.1719 +[2025-07-07 07:48:55] [Rank 0] Group 2 FTA: 0.5911 +[2025-07-07 07:48:55] [Rank 0] Group 2 FTA: 0.5911 +[2025-07-07 07:48:55] [Rank 0] Group 3 FTA: 0.5365 +[2025-07-07 07:48:55] [Rank 0] Group 3 FTA: 0.5365 +[2025-07-07 07:48:55] [Rank 0] Group 4 FTA: 0.3385 +[2025-07-07 07:48:55] [Rank 0] Group 4 FTA: 0.3385 +[2025-07-07 07:48:55] [Rank 0] Group 5 FTA: 0.5000 +[2025-07-07 07:48:55] [Rank 0] Group 5 FTA: 0.5000 +[2025-07-07 07:48:55] [Rank 0] Group 6 FTA: 0.3464 +[2025-07-07 07:48:55] [Rank 0] Group 6 FTA: 0.3464 +[2025-07-07 07:48:55] [Rank 0] Group 7 FTA: 0.4583 +[2025-07-07 07:48:55] [Rank 0] Group 7 FTA: 0.4583 +[2025-07-07 07:48:55] [Rank 0] Group 8 FTA: 0.4661 +[2025-07-07 07:48:55] [Rank 0] Group 8 FTA: 0.4661 +[2025-07-07 07:48:55] [Rank 0] Group 9 FTA: 0.4805 +[2025-07-07 07:48:55] [Rank 0] Group 9 FTA: 0.4805 +[2025-07-07 07:48:55] [Rank 0] Group 10 FTA: 0.4609 +[2025-07-07 07:48:55] [Rank 0] Group 10 FTA: 0.4609 +[2025-07-07 07:48:55] [Rank 0] Group 11 FTA: 0.4404 +[2025-07-07 07:48:55] [Rank 0] Group 11 FTA: 0.4404 +[2025-07-07 07:48:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 07:48:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 07:48:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 07:48:56] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 07:48:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 07:48:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 07:48:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 07:48:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 07:48:56] [Rank 0] step:8001/10000 train_time:547124ms step_avg:68.38ms +[2025-07-07 07:48:56] [Rank 0] step:8001/10000 train_time:547124ms step_avg:68.38ms +[2025-07-07 07:48:58] [Rank 0] step:8021/10000 train_time:547896ms step_avg:68.31ms +[2025-07-07 07:48:58] [Rank 0] step:8021/10000 train_time:547896ms step_avg:68.31ms +[2025-07-07 07:48:59] [Rank 0] step:8041/10000 train_time:549258ms step_avg:68.31ms +[2025-07-07 07:48:59] [Rank 0] step:8041/10000 train_time:549258ms step_avg:68.31ms +[2025-07-07 07:49:00] [Rank 0] step:8061/10000 train_time:550622ms step_avg:68.31ms +[2025-07-07 07:49:00] [Rank 0] step:8061/10000 train_time:550622ms step_avg:68.31ms +[2025-07-07 07:49:02] [Rank 0] step:8081/10000 train_time:551986ms step_avg:68.31ms +[2025-07-07 07:49:02] [Rank 0] step:8081/10000 train_time:551986ms step_avg:68.31ms +[2025-07-07 07:49:03] [Rank 0] step:8101/10000 train_time:554040ms step_avg:68.39ms +[2025-07-07 07:49:03] [Rank 0] step:8101/10000 train_time:554040ms step_avg:68.39ms +[2025-07-07 07:49:04] [Rank 0] step:8121/10000 train_time:554778ms step_avg:68.31ms +[2025-07-07 07:49:04] [Rank 0] step:8121/10000 train_time:554778ms step_avg:68.31ms +[2025-07-07 07:49:06] [Rank 0] step:8141/10000 train_time:556145ms step_avg:68.31ms +[2025-07-07 07:49:06] [Rank 0] step:8141/10000 train_time:556145ms step_avg:68.31ms +[2025-07-07 07:49:07] [Rank 0] step:8161/10000 train_time:557510ms step_avg:68.31ms +[2025-07-07 07:49:07] [Rank 0] step:8161/10000 train_time:557510ms step_avg:68.31ms +[2025-07-07 07:49:09] [Rank 0] step:8181/10000 train_time:558877ms step_avg:68.31ms +[2025-07-07 07:49:09] [Rank 0] step:8181/10000 train_time:558877ms step_avg:68.31ms +[2025-07-07 07:49:10] [Rank 0] step:8201/10000 train_time:560243ms step_avg:68.31ms +[2025-07-07 07:49:10] [Rank 0] step:8201/10000 train_time:560243ms step_avg:68.31ms +[2025-07-07 07:49:11] [Rank 0] step:8221/10000 train_time:561609ms step_avg:68.31ms +[2025-07-07 07:49:11] [Rank 0] step:8221/10000 train_time:561609ms step_avg:68.31ms +[2025-07-07 07:49:13] [Rank 0] step:8241/10000 train_time:562977ms step_avg:68.31ms +[2025-07-07 07:49:13] [Rank 0] step:8241/10000 train_time:562977ms step_avg:68.31ms +[2025-07-07 07:49:14] [Rank 0] step:8261/10000 train_time:564345ms step_avg:68.31ms +[2025-07-07 07:49:14] [Rank 0] step:8261/10000 train_time:564345ms step_avg:68.31ms +[2025-07-07 07:49:15] [Rank 0] step:8281/10000 train_time:565759ms step_avg:68.32ms +[2025-07-07 07:49:15] [Rank 0] step:8281/10000 train_time:565759ms step_avg:68.32ms +[2025-07-07 07:49:17] [Rank 0] step:8301/10000 train_time:567131ms step_avg:68.32ms +[2025-07-07 07:49:17] [Rank 0] step:8301/10000 train_time:567131ms step_avg:68.32ms +[2025-07-07 07:49:18] [Rank 0] step:8321/10000 train_time:568500ms step_avg:68.32ms +[2025-07-07 07:49:18] [Rank 0] step:8321/10000 train_time:568500ms step_avg:68.32ms +[2025-07-07 07:49:20] [Rank 0] step:8341/10000 train_time:569868ms step_avg:68.32ms +[2025-07-07 07:49:20] [Rank 0] step:8341/10000 train_time:569868ms step_avg:68.32ms +[2025-07-07 07:49:21] [Rank 0] step:8361/10000 train_time:571302ms step_avg:68.33ms +[2025-07-07 07:49:21] [Rank 0] step:8361/10000 train_time:571302ms step_avg:68.33ms +[2025-07-07 07:49:22] [Rank 0] step:8381/10000 train_time:572672ms step_avg:68.33ms +[2025-07-07 07:49:22] [Rank 0] step:8381/10000 train_time:572672ms step_avg:68.33ms +[2025-07-07 07:49:24] [Rank 0] step:8401/10000 train_time:574045ms step_avg:68.33ms +[2025-07-07 07:49:24] [Rank 0] step:8401/10000 train_time:574045ms step_avg:68.33ms +[2025-07-07 07:49:25] [Rank 0] step:8421/10000 train_time:575417ms step_avg:68.33ms +[2025-07-07 07:49:25] [Rank 0] step:8421/10000 train_time:575417ms step_avg:68.33ms +[2025-07-07 07:49:26] [Rank 0] step:8441/10000 train_time:576789ms step_avg:68.33ms +[2025-07-07 07:49:26] [Rank 0] step:8441/10000 train_time:576789ms step_avg:68.33ms +[2025-07-07 07:49:28] [Rank 0] step:8461/10000 train_time:578207ms step_avg:68.34ms +[2025-07-07 07:49:28] [Rank 0] step:8461/10000 train_time:578207ms step_avg:68.34ms +[2025-07-07 07:49:29] [Rank 0] step:8481/10000 train_time:579579ms step_avg:68.34ms +[2025-07-07 07:49:29] [Rank 0] step:8481/10000 train_time:579579ms step_avg:68.34ms +[2025-07-07 07:49:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:49:31] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:49:31] [Rank 0] PRINT: step:8500/10000 train_loss:0.8523 val_loss:1.0177 train_time:581572ms step_avg:68.42ms +[2025-07-07 07:49:31] [Rank 0] PRINT: step:8500/10000 train_loss:0.8523 val_loss:1.0177 train_time:581572ms step_avg:68.42ms +[2025-07-07 07:49:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:49:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:49:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:49:32] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:49:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:49:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:54:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:54:51] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 07:54:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:54:51] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 07:54:51] [Rank 0] Total Loss: 5.5108 +[2025-07-07 07:54:51] [Rank 0] Total Loss: 5.5108 +[2025-07-07 07:54:51] [Rank 0] Total FTA: 0.5315 +[2025-07-07 07:54:51] [Rank 0] Total FTA: 0.5315 +[2025-07-07 07:54:51] [Rank 0] Group 0 Loss: 5.9184 +[2025-07-07 07:54:51] [Rank 0] Group 0 Loss: 5.9184 +[2025-07-07 07:54:51] [Rank 0] Group 1 Loss: 5.3393 +[2025-07-07 07:54:51] [Rank 0] Group 1 Loss: 5.3393 +[2025-07-07 07:54:51] [Rank 0] Group 2 Loss: 5.3642 +[2025-07-07 07:54:51] [Rank 0] Group 2 Loss: 5.3642 +[2025-07-07 07:54:51] [Rank 0] Group 3 Loss: 5.6015 +[2025-07-07 07:54:51] [Rank 0] Group 3 Loss: 5.6015 +[2025-07-07 07:54:51] [Rank 0] Group 4 Loss: 5.3465 +[2025-07-07 07:54:51] [Rank 0] Group 4 Loss: 5.3465 +[2025-07-07 07:54:51] [Rank 0] Group 5 Loss: 5.4742 +[2025-07-07 07:54:51] [Rank 0] Group 5 Loss: 5.4742 +[2025-07-07 07:54:51] [Rank 0] Group 6 Loss: 5.4003 +[2025-07-07 07:54:51] [Rank 0] Group 6 Loss: 5.4003 +[2025-07-07 07:54:51] [Rank 0] Group 7 Loss: 5.4472 +[2025-07-07 07:54:51] [Rank 0] Group 7 Loss: 5.4472 +[2025-07-07 07:54:51] [Rank 0] Group 8 Loss: 5.4738 +[2025-07-07 07:54:51] [Rank 0] Group 8 Loss: 5.4738 +[2025-07-07 07:54:51] [Rank 0] Group 9 Loss: 5.4793 +[2025-07-07 07:54:51] [Rank 0] Group 9 Loss: 5.4793 +[2025-07-07 07:54:51] [Rank 0] Group 10 Loss: 5.5169 +[2025-07-07 07:54:51] [Rank 0] Group 10 Loss: 5.5169 +[2025-07-07 07:54:51] [Rank 0] Group 11 Loss: 5.4491 +[2025-07-07 07:54:51] [Rank 0] Group 11 Loss: 5.4491 +[2025-07-07 07:54:51] [Rank 0] Group 0 FTA: 0.6801 +[2025-07-07 07:54:51] [Rank 0] Group 0 FTA: 0.6801 +[2025-07-07 07:54:52] [Rank 0] Group 1 FTA: 0.4974 +[2025-07-07 07:54:52] [Rank 0] Group 1 FTA: 0.4974 +[2025-07-07 07:54:52] [Rank 0] Group 2 FTA: 0.6172 +[2025-07-07 07:54:52] [Rank 0] Group 2 FTA: 0.6172 +[2025-07-07 07:54:52] [Rank 0] Group 3 FTA: 0.4635 +[2025-07-07 07:54:52] [Rank 0] Group 3 FTA: 0.4635 +[2025-07-07 07:54:52] [Rank 0] Group 4 FTA: 0.5156 +[2025-07-07 07:54:52] [Rank 0] Group 4 FTA: 0.5156 +[2025-07-07 07:54:52] [Rank 0] Group 5 FTA: 0.4349 +[2025-07-07 07:54:52] [Rank 0] Group 5 FTA: 0.4349 +[2025-07-07 07:54:52] [Rank 0] Group 6 FTA: 0.4896 +[2025-07-07 07:54:52] [Rank 0] Group 6 FTA: 0.4896 +[2025-07-07 07:54:52] [Rank 0] Group 7 FTA: 0.5547 +[2025-07-07 07:54:52] [Rank 0] Group 7 FTA: 0.5547 +[2025-07-07 07:54:52] [Rank 0] Group 8 FTA: 0.4896 +[2025-07-07 07:54:52] [Rank 0] Group 8 FTA: 0.4896 +[2025-07-07 07:54:52] [Rank 0] Group 9 FTA: 0.5195 +[2025-07-07 07:54:52] [Rank 0] Group 9 FTA: 0.5195 +[2025-07-07 07:54:52] [Rank 0] Group 10 FTA: 0.5039 +[2025-07-07 07:54:52] [Rank 0] Group 10 FTA: 0.5039 +[2025-07-07 07:54:52] [Rank 0] Group 11 FTA: 0.5078 +[2025-07-07 07:54:52] [Rank 0] Group 11 FTA: 0.5078 +[2025-07-07 07:54:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 07:54:52] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 07:54:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 07:54:52] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 07:54:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 07:54:53] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 07:54:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 07:54:53] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 07:54:53] [Rank 0] step:8501/10000 train_time:581583ms step_avg:68.41ms +[2025-07-07 07:54:53] [Rank 0] step:8501/10000 train_time:581583ms step_avg:68.41ms +[2025-07-07 07:54:54] [Rank 0] step:8521/10000 train_time:582355ms step_avg:68.34ms +[2025-07-07 07:54:54] [Rank 0] step:8521/10000 train_time:582355ms step_avg:68.34ms +[2025-07-07 07:54:56] [Rank 0] step:8541/10000 train_time:583716ms step_avg:68.34ms +[2025-07-07 07:54:56] [Rank 0] step:8541/10000 train_time:583716ms step_avg:68.34ms +[2025-07-07 07:54:57] [Rank 0] step:8561/10000 train_time:585078ms step_avg:68.34ms +[2025-07-07 07:54:57] [Rank 0] step:8561/10000 train_time:585078ms step_avg:68.34ms +[2025-07-07 07:54:58] [Rank 0] step:8581/10000 train_time:586443ms step_avg:68.34ms +[2025-07-07 07:54:58] [Rank 0] step:8581/10000 train_time:586443ms step_avg:68.34ms +[2025-07-07 07:55:00] [Rank 0] step:8601/10000 train_time:587808ms step_avg:68.34ms +[2025-07-07 07:55:00] [Rank 0] step:8601/10000 train_time:587808ms step_avg:68.34ms +[2025-07-07 07:55:01] [Rank 0] step:8621/10000 train_time:589174ms step_avg:68.34ms +[2025-07-07 07:55:01] [Rank 0] step:8621/10000 train_time:589174ms step_avg:68.34ms +[2025-07-07 07:55:03] [Rank 0] step:8641/10000 train_time:590590ms step_avg:68.35ms +[2025-07-07 07:55:03] [Rank 0] step:8641/10000 train_time:590590ms step_avg:68.35ms +[2025-07-07 07:55:04] [Rank 0] step:8661/10000 train_time:591907ms step_avg:68.34ms +[2025-07-07 07:55:04] [Rank 0] step:8661/10000 train_time:591907ms step_avg:68.34ms +[2025-07-07 07:55:05] [Rank 0] step:8681/10000 train_time:593273ms step_avg:68.34ms +[2025-07-07 07:55:05] [Rank 0] step:8681/10000 train_time:593273ms step_avg:68.34ms +[2025-07-07 07:55:07] [Rank 0] step:8701/10000 train_time:594639ms step_avg:68.34ms +[2025-07-07 07:55:07] [Rank 0] step:8701/10000 train_time:594639ms step_avg:68.34ms +[2025-07-07 07:55:08] [Rank 0] step:8721/10000 train_time:596007ms step_avg:68.34ms +[2025-07-07 07:55:08] [Rank 0] step:8721/10000 train_time:596007ms step_avg:68.34ms +[2025-07-07 07:55:09] [Rank 0] step:8741/10000 train_time:597374ms step_avg:68.34ms +[2025-07-07 07:55:09] [Rank 0] step:8741/10000 train_time:597374ms step_avg:68.34ms +[2025-07-07 07:55:11] [Rank 0] step:8761/10000 train_time:598742ms step_avg:68.34ms +[2025-07-07 07:55:11] [Rank 0] step:8761/10000 train_time:598742ms step_avg:68.34ms +[2025-07-07 07:55:12] [Rank 0] step:8781/10000 train_time:600108ms step_avg:68.34ms +[2025-07-07 07:55:12] [Rank 0] step:8781/10000 train_time:600108ms step_avg:68.34ms +[2025-07-07 07:55:14] [Rank 0] step:8801/10000 train_time:601477ms step_avg:68.34ms +[2025-07-07 07:55:14] [Rank 0] step:8801/10000 train_time:601477ms step_avg:68.34ms +[2025-07-07 07:55:15] [Rank 0] step:8821/10000 train_time:603510ms step_avg:68.42ms +[2025-07-07 07:55:15] [Rank 0] step:8821/10000 train_time:603510ms step_avg:68.42ms +[2025-07-07 07:55:16] [Rank 0] step:8841/10000 train_time:604247ms step_avg:68.35ms +[2025-07-07 07:55:16] [Rank 0] step:8841/10000 train_time:604247ms step_avg:68.35ms +[2025-07-07 07:55:18] [Rank 0] step:8861/10000 train_time:605616ms step_avg:68.35ms +[2025-07-07 07:55:18] [Rank 0] step:8861/10000 train_time:605616ms step_avg:68.35ms +[2025-07-07 07:55:19] [Rank 0] step:8881/10000 train_time:606982ms step_avg:68.35ms +[2025-07-07 07:55:19] [Rank 0] step:8881/10000 train_time:606982ms step_avg:68.35ms +[2025-07-07 07:55:20] [Rank 0] step:8901/10000 train_time:608351ms step_avg:68.35ms +[2025-07-07 07:55:20] [Rank 0] step:8901/10000 train_time:608351ms step_avg:68.35ms +[2025-07-07 07:55:22] [Rank 0] step:8921/10000 train_time:609720ms step_avg:68.35ms +[2025-07-07 07:55:22] [Rank 0] step:8921/10000 train_time:609720ms step_avg:68.35ms +[2025-07-07 07:55:23] [Rank 0] step:8941/10000 train_time:611088ms step_avg:68.35ms +[2025-07-07 07:55:23] [Rank 0] step:8941/10000 train_time:611088ms step_avg:68.35ms +[2025-07-07 07:55:25] [Rank 0] step:8961/10000 train_time:612456ms step_avg:68.35ms +[2025-07-07 07:55:25] [Rank 0] step:8961/10000 train_time:612456ms step_avg:68.35ms +[2025-07-07 07:55:26] [Rank 0] step:8981/10000 train_time:613825ms step_avg:68.35ms +[2025-07-07 07:55:26] [Rank 0] step:8981/10000 train_time:613825ms step_avg:68.35ms +[2025-07-07 07:55:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:55:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 07:55:28] [Rank 0] PRINT: step:9000/10000 train_loss:0.8480 val_loss:0.9922 train_time:615816ms step_avg:68.42ms +[2025-07-07 07:55:28] [Rank 0] PRINT: step:9000/10000 train_loss:0.8480 val_loss:0.9922 train_time:615816ms step_avg:68.42ms +[2025-07-07 07:55:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:55:28] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 07:55:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:55:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 07:55:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 07:55:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:00:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:00:50] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:00:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:00:50] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:00:50] [Rank 0] Total Loss: 5.5114 +[2025-07-07 08:00:50] [Rank 0] Total Loss: 5.5114 +[2025-07-07 08:00:50] [Rank 0] Total FTA: 0.5707 +[2025-07-07 08:00:50] [Rank 0] Total FTA: 0.5707 +[2025-07-07 08:00:50] [Rank 0] Group 0 Loss: 5.9216 +[2025-07-07 08:00:50] [Rank 0] Group 0 Loss: 5.9216 +[2025-07-07 08:00:50] [Rank 0] Group 1 Loss: 5.4378 +[2025-07-07 08:00:50] [Rank 0] Group 1 Loss: 5.4378 +[2025-07-07 08:00:50] [Rank 0] Group 2 Loss: 5.5669 +[2025-07-07 08:00:50] [Rank 0] Group 2 Loss: 5.5669 +[2025-07-07 08:00:50] [Rank 0] Group 3 Loss: 5.6627 +[2025-07-07 08:00:50] [Rank 0] Group 3 Loss: 5.6627 +[2025-07-07 08:00:50] [Rank 0] Group 4 Loss: 5.2990 +[2025-07-07 08:00:50] [Rank 0] Group 4 Loss: 5.2990 +[2025-07-07 08:00:50] [Rank 0] Group 5 Loss: 5.4220 +[2025-07-07 08:00:50] [Rank 0] Group 5 Loss: 5.4220 +[2025-07-07 08:00:50] [Rank 0] Group 6 Loss: 5.3585 +[2025-07-07 08:00:50] [Rank 0] Group 6 Loss: 5.3585 +[2025-07-07 08:00:50] [Rank 0] Group 7 Loss: 5.4212 +[2025-07-07 08:00:50] [Rank 0] Group 7 Loss: 5.4212 +[2025-07-07 08:00:50] [Rank 0] Group 8 Loss: 5.4285 +[2025-07-07 08:00:50] [Rank 0] Group 8 Loss: 5.4285 +[2025-07-07 08:00:50] [Rank 0] Group 9 Loss: 5.4233 +[2025-07-07 08:00:50] [Rank 0] Group 9 Loss: 5.4233 +[2025-07-07 08:00:50] [Rank 0] Group 10 Loss: 5.4337 +[2025-07-07 08:00:50] [Rank 0] Group 10 Loss: 5.4337 +[2025-07-07 08:00:50] [Rank 0] Group 11 Loss: 5.4497 +[2025-07-07 08:00:50] [Rank 0] Group 11 Loss: 5.4497 +[2025-07-07 08:00:50] [Rank 0] Group 0 FTA: 0.6697 +[2025-07-07 08:00:50] [Rank 0] Group 0 FTA: 0.6697 +[2025-07-07 08:00:50] [Rank 0] Group 1 FTA: 0.4844 +[2025-07-07 08:00:50] [Rank 0] Group 1 FTA: 0.4844 +[2025-07-07 08:00:50] [Rank 0] Group 2 FTA: 0.8620 +[2025-07-07 08:00:50] [Rank 0] Group 2 FTA: 0.8620 +[2025-07-07 08:00:50] [Rank 0] Group 3 FTA: 0.4427 +[2025-07-07 08:00:50] [Rank 0] Group 3 FTA: 0.4427 +[2025-07-07 08:00:50] [Rank 0] Group 4 FTA: 0.3698 +[2025-07-07 08:00:50] [Rank 0] Group 4 FTA: 0.3698 +[2025-07-07 08:00:50] [Rank 0] Group 5 FTA: 0.6406 +[2025-07-07 08:00:50] [Rank 0] Group 5 FTA: 0.6406 +[2025-07-07 08:00:50] [Rank 0] Group 6 FTA: 0.5365 +[2025-07-07 08:00:50] [Rank 0] Group 6 FTA: 0.5365 +[2025-07-07 08:00:50] [Rank 0] Group 7 FTA: 0.5286 +[2025-07-07 08:00:50] [Rank 0] Group 7 FTA: 0.5286 +[2025-07-07 08:00:50] [Rank 0] Group 8 FTA: 0.5964 +[2025-07-07 08:00:50] [Rank 0] Group 8 FTA: 0.5964 +[2025-07-07 08:00:50] [Rank 0] Group 9 FTA: 0.5273 +[2025-07-07 08:00:50] [Rank 0] Group 9 FTA: 0.5273 +[2025-07-07 08:00:50] [Rank 0] Group 10 FTA: 0.5566 +[2025-07-07 08:00:50] [Rank 0] Group 10 FTA: 0.5566 +[2025-07-07 08:00:50] [Rank 0] Group 11 FTA: 0.5537 +[2025-07-07 08:00:50] [Rank 0] Group 11 FTA: 0.5537 +[2025-07-07 08:00:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 08:00:51] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 08:00:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 08:00:51] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 08:00:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 08:00:51] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 08:00:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 08:00:51] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 08:00:52] [Rank 0] step:9001/10000 train_time:615834ms step_avg:68.42ms +[2025-07-07 08:00:52] [Rank 0] step:9001/10000 train_time:615834ms step_avg:68.42ms +[2025-07-07 08:00:54] [Rank 0] step:9021/10000 train_time:617300ms step_avg:68.43ms +[2025-07-07 08:00:54] [Rank 0] step:9021/10000 train_time:617300ms step_avg:68.43ms +[2025-07-07 08:00:55] [Rank 0] step:9041/10000 train_time:618663ms step_avg:68.43ms +[2025-07-07 08:00:55] [Rank 0] step:9041/10000 train_time:618663ms step_avg:68.43ms +[2025-07-07 08:00:56] [Rank 0] step:9061/10000 train_time:620076ms step_avg:68.43ms +[2025-07-07 08:00:56] [Rank 0] step:9061/10000 train_time:620076ms step_avg:68.43ms +[2025-07-07 08:00:58] [Rank 0] step:9081/10000 train_time:621441ms step_avg:68.43ms +[2025-07-07 08:00:58] [Rank 0] step:9081/10000 train_time:621441ms step_avg:68.43ms +[2025-07-07 08:00:59] [Rank 0] step:9101/10000 train_time:622805ms step_avg:68.43ms +[2025-07-07 08:00:59] [Rank 0] step:9101/10000 train_time:622805ms step_avg:68.43ms +[2025-07-07 08:01:01] [Rank 0] step:9121/10000 train_time:624169ms step_avg:68.43ms +[2025-07-07 08:01:01] [Rank 0] step:9121/10000 train_time:624169ms step_avg:68.43ms +[2025-07-07 08:01:02] [Rank 0] step:9141/10000 train_time:625535ms step_avg:68.43ms +[2025-07-07 08:01:02] [Rank 0] step:9141/10000 train_time:625535ms step_avg:68.43ms +[2025-07-07 08:01:03] [Rank 0] step:9161/10000 train_time:626901ms step_avg:68.43ms +[2025-07-07 08:01:03] [Rank 0] step:9161/10000 train_time:626901ms step_avg:68.43ms +[2025-07-07 08:01:05] [Rank 0] step:9181/10000 train_time:628519ms step_avg:68.46ms +[2025-07-07 08:01:05] [Rank 0] step:9181/10000 train_time:628519ms step_avg:68.46ms +[2025-07-07 08:01:06] [Rank 0] step:9201/10000 train_time:629688ms step_avg:68.44ms +[2025-07-07 08:01:06] [Rank 0] step:9201/10000 train_time:629688ms step_avg:68.44ms +[2025-07-07 08:01:07] [Rank 0] step:9221/10000 train_time:631058ms step_avg:68.44ms +[2025-07-07 08:01:07] [Rank 0] step:9221/10000 train_time:631058ms step_avg:68.44ms +[2025-07-07 08:01:09] [Rank 0] step:9241/10000 train_time:632426ms step_avg:68.44ms +[2025-07-07 08:01:09] [Rank 0] step:9241/10000 train_time:632426ms step_avg:68.44ms +[2025-07-07 08:01:10] [Rank 0] step:9261/10000 train_time:633795ms step_avg:68.44ms +[2025-07-07 08:01:10] [Rank 0] step:9261/10000 train_time:633795ms step_avg:68.44ms +[2025-07-07 08:01:12] [Rank 0] step:9281/10000 train_time:635164ms step_avg:68.44ms +[2025-07-07 08:01:12] [Rank 0] step:9281/10000 train_time:635164ms step_avg:68.44ms +[2025-07-07 08:01:13] [Rank 0] step:9301/10000 train_time:636534ms step_avg:68.44ms +[2025-07-07 08:01:13] [Rank 0] step:9301/10000 train_time:636534ms step_avg:68.44ms +[2025-07-07 08:01:14] [Rank 0] step:9321/10000 train_time:637904ms step_avg:68.44ms +[2025-07-07 08:01:14] [Rank 0] step:9321/10000 train_time:637904ms step_avg:68.44ms +[2025-07-07 08:01:16] [Rank 0] step:9341/10000 train_time:639275ms step_avg:68.44ms +[2025-07-07 08:01:16] [Rank 0] step:9341/10000 train_time:639275ms step_avg:68.44ms +[2025-07-07 08:01:17] [Rank 0] step:9361/10000 train_time:640695ms step_avg:68.44ms +[2025-07-07 08:01:17] [Rank 0] step:9361/10000 train_time:640695ms step_avg:68.44ms +[2025-07-07 08:01:18] [Rank 0] step:9381/10000 train_time:642044ms step_avg:68.44ms +[2025-07-07 08:01:18] [Rank 0] step:9381/10000 train_time:642044ms step_avg:68.44ms +[2025-07-07 08:01:20] [Rank 0] step:9401/10000 train_time:643415ms step_avg:68.44ms +[2025-07-07 08:01:20] [Rank 0] step:9401/10000 train_time:643415ms step_avg:68.44ms +[2025-07-07 08:01:21] [Rank 0] step:9421/10000 train_time:644788ms step_avg:68.44ms +[2025-07-07 08:01:21] [Rank 0] step:9421/10000 train_time:644788ms step_avg:68.44ms +[2025-07-07 08:01:23] [Rank 0] step:9441/10000 train_time:646161ms step_avg:68.44ms +[2025-07-07 08:01:23] [Rank 0] step:9441/10000 train_time:646161ms step_avg:68.44ms +[2025-07-07 08:01:24] [Rank 0] step:9461/10000 train_time:647534ms step_avg:68.44ms +[2025-07-07 08:01:24] [Rank 0] step:9461/10000 train_time:647534ms step_avg:68.44ms +[2025-07-07 08:01:25] [Rank 0] step:9481/10000 train_time:648906ms step_avg:68.44ms +[2025-07-07 08:01:25] [Rank 0] step:9481/10000 train_time:648906ms step_avg:68.44ms +[2025-07-07 08:01:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:01:27] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:01:27] [Rank 0] PRINT: step:9500/10000 train_loss:0.8436 val_loss:1.0341 train_time:650903ms step_avg:68.52ms +[2025-07-07 08:01:27] [Rank 0] PRINT: step:9500/10000 train_loss:0.8436 val_loss:1.0341 train_time:650903ms step_avg:68.52ms +[2025-07-07 08:01:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:01:27] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:01:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:01:28] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:01:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:01:28] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:06:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:06:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:06:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:06:54] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:06:54] [Rank 0] Total Loss: 5.5244 +[2025-07-07 08:06:54] [Rank 0] Total Loss: 5.5244 +[2025-07-07 08:06:54] [Rank 0] Total FTA: 0.5754 +[2025-07-07 08:06:54] [Rank 0] Total FTA: 0.5754 +[2025-07-07 08:06:54] [Rank 0] Group 0 Loss: 5.9740 +[2025-07-07 08:06:54] [Rank 0] Group 0 Loss: 5.9740 +[2025-07-07 08:06:54] [Rank 0] Group 1 Loss: 5.3701 +[2025-07-07 08:06:54] [Rank 0] Group 1 Loss: 5.3701 +[2025-07-07 08:06:54] [Rank 0] Group 2 Loss: 5.3971 +[2025-07-07 08:06:54] [Rank 0] Group 2 Loss: 5.3971 +[2025-07-07 08:06:54] [Rank 0] Group 3 Loss: 5.7072 +[2025-07-07 08:06:54] [Rank 0] Group 3 Loss: 5.7072 +[2025-07-07 08:06:54] [Rank 0] Group 4 Loss: 5.2737 +[2025-07-07 08:06:54] [Rank 0] Group 4 Loss: 5.2737 +[2025-07-07 08:06:54] [Rank 0] Group 5 Loss: 5.4084 +[2025-07-07 08:06:54] [Rank 0] Group 5 Loss: 5.4084 +[2025-07-07 08:06:54] [Rank 0] Group 6 Loss: 5.3750 +[2025-07-07 08:06:54] [Rank 0] Group 6 Loss: 5.3750 +[2025-07-07 08:06:54] [Rank 0] Group 7 Loss: 5.4604 +[2025-07-07 08:06:54] [Rank 0] Group 7 Loss: 5.4604 +[2025-07-07 08:06:54] [Rank 0] Group 8 Loss: 5.4964 +[2025-07-07 08:06:54] [Rank 0] Group 8 Loss: 5.4964 +[2025-07-07 08:06:54] [Rank 0] Group 9 Loss: 5.4630 +[2025-07-07 08:06:54] [Rank 0] Group 9 Loss: 5.4630 +[2025-07-07 08:06:54] [Rank 0] Group 10 Loss: 5.5196 +[2025-07-07 08:06:54] [Rank 0] Group 10 Loss: 5.5196 +[2025-07-07 08:06:54] [Rank 0] Group 11 Loss: 5.4693 +[2025-07-07 08:06:54] [Rank 0] Group 11 Loss: 5.4693 +[2025-07-07 08:06:54] [Rank 0] Group 0 FTA: 0.6606 +[2025-07-07 08:06:54] [Rank 0] Group 0 FTA: 0.6606 +[2025-07-07 08:06:54] [Rank 0] Group 1 FTA: 0.7109 +[2025-07-07 08:06:54] [Rank 0] Group 1 FTA: 0.7109 +[2025-07-07 08:06:54] [Rank 0] Group 2 FTA: 0.7578 +[2025-07-07 08:06:54] [Rank 0] Group 2 FTA: 0.7578 +[2025-07-07 08:06:54] [Rank 0] Group 3 FTA: 0.4271 +[2025-07-07 08:06:54] [Rank 0] Group 3 FTA: 0.4271 +[2025-07-07 08:06:54] [Rank 0] Group 4 FTA: 0.5417 +[2025-07-07 08:06:54] [Rank 0] Group 4 FTA: 0.5417 +[2025-07-07 08:06:54] [Rank 0] Group 5 FTA: 0.5130 +[2025-07-07 08:06:54] [Rank 0] Group 5 FTA: 0.5130 +[2025-07-07 08:06:54] [Rank 0] Group 6 FTA: 0.5312 +[2025-07-07 08:06:54] [Rank 0] Group 6 FTA: 0.5312 +[2025-07-07 08:06:54] [Rank 0] Group 7 FTA: 0.5495 +[2025-07-07 08:06:54] [Rank 0] Group 7 FTA: 0.5495 +[2025-07-07 08:06:54] [Rank 0] Group 8 FTA: 0.5365 +[2025-07-07 08:06:54] [Rank 0] Group 8 FTA: 0.5365 +[2025-07-07 08:06:54] [Rank 0] Group 9 FTA: 0.5586 +[2025-07-07 08:06:54] [Rank 0] Group 9 FTA: 0.5586 +[2025-07-07 08:06:54] [Rank 0] Group 10 FTA: 0.5801 +[2025-07-07 08:06:54] [Rank 0] Group 10 FTA: 0.5801 +[2025-07-07 08:06:54] [Rank 0] Group 11 FTA: 0.5264 +[2025-07-07 08:06:54] [Rank 0] Group 11 FTA: 0.5264 +[2025-07-07 08:06:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 08:06:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 08:06:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 08:06:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 08:06:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 08:06:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 08:06:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 08:06:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 08:06:56] [Rank 0] step:9501/10000 train_time:650912ms step_avg:68.51ms +[2025-07-07 08:06:56] [Rank 0] step:9501/10000 train_time:650912ms step_avg:68.51ms +[2025-07-07 08:06:57] [Rank 0] step:9521/10000 train_time:651683ms step_avg:68.45ms +[2025-07-07 08:06:57] [Rank 0] step:9521/10000 train_time:651683ms step_avg:68.45ms +[2025-07-07 08:06:59] [Rank 0] step:9541/10000 train_time:653722ms step_avg:68.52ms +[2025-07-07 08:06:59] [Rank 0] step:9541/10000 train_time:653722ms step_avg:68.52ms +[2025-07-07 08:07:00] [Rank 0] step:9561/10000 train_time:654458ms step_avg:68.45ms +[2025-07-07 08:07:00] [Rank 0] step:9561/10000 train_time:654458ms step_avg:68.45ms +[2025-07-07 08:07:01] [Rank 0] step:9581/10000 train_time:655820ms step_avg:68.45ms +[2025-07-07 08:07:01] [Rank 0] step:9581/10000 train_time:655820ms step_avg:68.45ms +[2025-07-07 08:07:03] [Rank 0] step:9601/10000 train_time:657187ms step_avg:68.45ms +[2025-07-07 08:07:03] [Rank 0] step:9601/10000 train_time:657187ms step_avg:68.45ms +[2025-07-07 08:07:04] [Rank 0] step:9621/10000 train_time:658552ms step_avg:68.45ms +[2025-07-07 08:07:04] [Rank 0] step:9621/10000 train_time:658552ms step_avg:68.45ms +[2025-07-07 08:07:06] [Rank 0] step:9641/10000 train_time:659922ms step_avg:68.45ms +[2025-07-07 08:07:06] [Rank 0] step:9641/10000 train_time:659922ms step_avg:68.45ms +[2025-07-07 08:07:07] [Rank 0] step:9661/10000 train_time:661290ms step_avg:68.45ms +[2025-07-07 08:07:07] [Rank 0] step:9661/10000 train_time:661290ms step_avg:68.45ms +[2025-07-07 08:07:08] [Rank 0] step:9681/10000 train_time:662657ms step_avg:68.45ms +[2025-07-07 08:07:08] [Rank 0] step:9681/10000 train_time:662657ms step_avg:68.45ms +[2025-07-07 08:07:10] [Rank 0] step:9701/10000 train_time:664025ms step_avg:68.45ms +[2025-07-07 08:07:10] [Rank 0] step:9701/10000 train_time:664025ms step_avg:68.45ms +[2025-07-07 08:07:11] [Rank 0] step:9721/10000 train_time:665646ms step_avg:68.48ms +[2025-07-07 08:07:11] [Rank 0] step:9721/10000 train_time:665646ms step_avg:68.48ms +[2025-07-07 08:07:12] [Rank 0] step:9741/10000 train_time:666811ms step_avg:68.45ms +[2025-07-07 08:07:12] [Rank 0] step:9741/10000 train_time:666811ms step_avg:68.45ms +[2025-07-07 08:07:14] [Rank 0] step:9761/10000 train_time:668178ms step_avg:68.45ms +[2025-07-07 08:07:14] [Rank 0] step:9761/10000 train_time:668178ms step_avg:68.45ms +[2025-07-07 08:07:15] [Rank 0] step:9781/10000 train_time:669696ms step_avg:68.47ms +[2025-07-07 08:07:15] [Rank 0] step:9781/10000 train_time:669696ms step_avg:68.47ms +[2025-07-07 08:07:17] [Rank 0] step:9801/10000 train_time:670996ms step_avg:68.46ms +[2025-07-07 08:07:17] [Rank 0] step:9801/10000 train_time:670996ms step_avg:68.46ms +[2025-07-07 08:07:18] [Rank 0] step:9821/10000 train_time:672366ms step_avg:68.46ms +[2025-07-07 08:07:18] [Rank 0] step:9821/10000 train_time:672366ms step_avg:68.46ms +[2025-07-07 08:07:19] [Rank 0] step:9841/10000 train_time:673735ms step_avg:68.46ms +[2025-07-07 08:07:19] [Rank 0] step:9841/10000 train_time:673735ms step_avg:68.46ms +[2025-07-07 08:07:21] [Rank 0] step:9861/10000 train_time:675104ms step_avg:68.46ms +[2025-07-07 08:07:21] [Rank 0] step:9861/10000 train_time:675104ms step_avg:68.46ms +[2025-07-07 08:07:22] [Rank 0] step:9881/10000 train_time:676474ms step_avg:68.46ms +[2025-07-07 08:07:22] [Rank 0] step:9881/10000 train_time:676474ms step_avg:68.46ms +[2025-07-07 08:07:24] [Rank 0] step:9901/10000 train_time:678096ms step_avg:68.49ms +[2025-07-07 08:07:24] [Rank 0] step:9901/10000 train_time:678096ms step_avg:68.49ms +[2025-07-07 08:07:25] [Rank 0] step:9921/10000 train_time:679261ms step_avg:68.47ms +[2025-07-07 08:07:25] [Rank 0] step:9921/10000 train_time:679261ms step_avg:68.47ms +[2025-07-07 08:07:26] [Rank 0] step:9941/10000 train_time:680632ms step_avg:68.47ms +[2025-07-07 08:07:26] [Rank 0] step:9941/10000 train_time:680632ms step_avg:68.47ms +[2025-07-07 08:07:28] [Rank 0] step:9961/10000 train_time:682006ms step_avg:68.47ms +[2025-07-07 08:07:28] [Rank 0] step:9961/10000 train_time:682006ms step_avg:68.47ms +[2025-07-07 08:07:29] [Rank 0] step:9981/10000 train_time:683380ms step_avg:68.47ms +[2025-07-07 08:07:29] [Rank 0] step:9981/10000 train_time:683380ms step_avg:68.47ms +[2025-07-07 08:07:30] [Rank 0] step:10000/10000 train_time:684686ms step_avg:68.47ms +[2025-07-07 08:07:30] [Rank 0] step:10000/10000 train_time:684686ms step_avg:68.47ms +[2025-07-07 08:07:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:07:30] [Rank 0] PRINT: Warning: val_tokens (1966080) not perfectly divisible by val_batch_size (262144). Some tokens might be missed. +[2025-07-07 08:07:31] [Rank 0] PRINT: step:10000/10000 train_loss:0.8402 val_loss:0.9834 train_time:685386ms step_avg:68.54ms +[2025-07-07 08:07:31] [Rank 0] PRINT: step:10000/10000 train_loss:0.8402 val_loss:0.9834 train_time:685386ms step_avg:68.54ms +[2025-07-07 08:07:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:07:31] [Rank 0] +--- Starting Detailed Evaluation (Loss & FTA) --- +[2025-07-07 08:07:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:07:31] [Rank 0] Using stratified sampling to extract ~5000 samples for detailed evaluation... +[2025-07-07 08:07:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:07:32] [Rank 0] Evaluation set size after sampling: 5633 +[2025-07-07 08:12:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:12:54] [Rank 0] --- Detailed Evaluation Complete --- +[2025-07-07 08:12:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:12:55] [Rank 0] --- Detailed Evaluation Results (This Step) --- +[2025-07-07 08:12:55] [Rank 0] Total Loss: 5.4976 +[2025-07-07 08:12:55] [Rank 0] Total Loss: 5.4976 +[2025-07-07 08:12:55] [Rank 0] Total FTA: 0.5617 +[2025-07-07 08:12:55] [Rank 0] Total FTA: 0.5617 +[2025-07-07 08:12:55] [Rank 0] Group 0 Loss: 5.9267 +[2025-07-07 08:12:55] [Rank 0] Group 0 Loss: 5.9267 +[2025-07-07 08:12:55] [Rank 0] Group 1 Loss: 5.3731 +[2025-07-07 08:12:55] [Rank 0] Group 1 Loss: 5.3731 +[2025-07-07 08:12:55] [Rank 0] Group 2 Loss: 5.3007 +[2025-07-07 08:12:55] [Rank 0] Group 2 Loss: 5.3007 +[2025-07-07 08:12:55] [Rank 0] Group 3 Loss: 5.7143 +[2025-07-07 08:12:55] [Rank 0] Group 3 Loss: 5.7143 +[2025-07-07 08:12:55] [Rank 0] Group 4 Loss: 5.3716 +[2025-07-07 08:12:55] [Rank 0] Group 4 Loss: 5.3716 +[2025-07-07 08:12:55] [Rank 0] Group 5 Loss: 5.4768 +[2025-07-07 08:12:55] [Rank 0] Group 5 Loss: 5.4768 +[2025-07-07 08:12:55] [Rank 0] Group 6 Loss: 5.2953 +[2025-07-07 08:12:55] [Rank 0] Group 6 Loss: 5.2953 +[2025-07-07 08:12:55] [Rank 0] Group 7 Loss: 5.4094 +[2025-07-07 08:12:55] [Rank 0] Group 7 Loss: 5.4094 +[2025-07-07 08:12:55] [Rank 0] Group 8 Loss: 5.3897 +[2025-07-07 08:12:55] [Rank 0] Group 8 Loss: 5.3897 +[2025-07-07 08:12:55] [Rank 0] Group 9 Loss: 5.4170 +[2025-07-07 08:12:55] [Rank 0] Group 9 Loss: 5.4170 +[2025-07-07 08:12:55] [Rank 0] Group 10 Loss: 5.4367 +[2025-07-07 08:12:55] [Rank 0] Group 10 Loss: 5.4367 +[2025-07-07 08:12:55] [Rank 0] Group 11 Loss: 5.4694 +[2025-07-07 08:12:55] [Rank 0] Group 11 Loss: 5.4694 +[2025-07-07 08:12:55] [Rank 0] Group 0 FTA: 0.4811 +[2025-07-07 08:12:55] [Rank 0] Group 0 FTA: 0.4811 +[2025-07-07 08:12:55] [Rank 0] Group 1 FTA: 0.4479 +[2025-07-07 08:12:55] [Rank 0] Group 1 FTA: 0.4479 +[2025-07-07 08:12:55] [Rank 0] Group 2 FTA: 0.8438 +[2025-07-07 08:12:55] [Rank 0] Group 2 FTA: 0.8438 +[2025-07-07 08:12:55] [Rank 0] Group 3 FTA: 0.5104 +[2025-07-07 08:12:55] [Rank 0] Group 3 FTA: 0.5104 +[2025-07-07 08:12:55] [Rank 0] Group 4 FTA: 0.4609 +[2025-07-07 08:12:55] [Rank 0] Group 4 FTA: 0.4609 +[2025-07-07 08:12:55] [Rank 0] Group 5 FTA: 0.5990 +[2025-07-07 08:12:55] [Rank 0] Group 5 FTA: 0.5990 +[2025-07-07 08:12:55] [Rank 0] Group 6 FTA: 0.5703 +[2025-07-07 08:12:55] [Rank 0] Group 6 FTA: 0.5703 +[2025-07-07 08:12:55] [Rank 0] Group 7 FTA: 0.5703 +[2025-07-07 08:12:55] [Rank 0] Group 7 FTA: 0.5703 +[2025-07-07 08:12:55] [Rank 0] Group 8 FTA: 0.5469 +[2025-07-07 08:12:55] [Rank 0] Group 8 FTA: 0.5469 +[2025-07-07 08:12:55] [Rank 0] Group 9 FTA: 0.6523 +[2025-07-07 08:12:55] [Rank 0] Group 9 FTA: 0.6523 +[2025-07-07 08:12:55] [Rank 0] Group 10 FTA: 0.5430 +[2025-07-07 08:12:55] [Rank 0] Group 10 FTA: 0.5430 +[2025-07-07 08:12:55] [Rank 0] Group 11 FTA: 0.5879 +[2025-07-07 08:12:55] [Rank 0] Group 11 FTA: 0.5879 +[2025-07-07 08:12:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 08:12:55] [Rank 0] [✓] Per-Class Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_loss_curves.png +[2025-07-07 08:12:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 08:12:55] [Rank 0] [✓] Per-Class FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/per_class_acc_curves.png +[2025-07-07 08:12:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 08:12:56] [Rank 0] [✓] Total Detailed Loss curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_loss_curve.png +[2025-07-07 08:12:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 08:12:56] [Rank 0] [✓] Total Detailed FTA curve updated and saved to: logs_bios/qa_0704/mode_5_param_qkvo_lr_0.005_seed_48/total_acc_curve.png +[2025-07-07 08:12:56] [Rank 0] step:10001/10000 train_time:685396ms step_avg:68.53ms +[2025-07-07 08:12:56] [Rank 0] step:10001/10000 train_time:685396ms step_avg:68.53ms +[2025-07-07 08:12:56] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 08:12:56 2025 --- +[2025-07-07 08:12:56] [Rank 0] PRINT: --- Training Finished: Mon Jul 7 08:12:56 2025 --- +[2025-07-07 08:12:56] [Rank 0] PRINT: Peak memory allocated: 8720 MiB reserved: 10716 MiB +[2025-07-07 08:12:56] [Rank 0] PRINT: Peak memory allocated: 8720 MiB reserved: 10716 MiB